content
stringlengths
0
1.55M
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test that the driver can build tests effectively."""<import_stmt>os<import_stmt>unittest<import_from_stmt>gabbi driver<line_sep>TESTS_DIR='test_gabbits'<class_stmt>DriverTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>super(DriverTest self).setUp()<line_sep>self.loader=unittest.defaultTestLoader<line_sep>self.test_dir=os.path.join(os.path.dirname(__file__) TESTS_DIR)<block_end><def_stmt>test_driver_loads_three_tests self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' port=8001)<line_sep>self.assertEqual(1 len(suite._tests) 'top level suite contains one suite')<line_sep>self.assertEqual(3 len(suite._tests[0]._tests) 'contained suite contains three tests')<line_sep>the_one_test=suite._tests[0]._tests[0]<line_sep>self.assertEqual('test_driver_sample_one' the_one_test.__class__.__name__ 'test class name maps')<line_sep>self.assertEqual('one' the_one_test.test_data['name'])<line_sep>self.assertEqual('/' the_one_test.test_data['url'])<block_end><def_stmt>test_driver_prefix self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' port=8001 prefix='/mountpoint')<line_sep>the_one_test=suite._tests[0]._tests[0]<line_sep>the_two_test=suite._tests[0]._tests[1]<line_sep>self.assertEqual('/mountpoint' the_one_test.prefix)<line_sep>self.assertEqual('/mountpoint' the_two_test.prefix)<block_end><def_stmt>test_build_requires_host_or_intercept self<block_start><with_stmt>self.assertRaises(AssertionError)<block_start>driver.build_tests(self.test_dir self.loader)<block_end><block_end><def_stmt>test_build_with_url_provides_host self<block_start>"""This confirms that url provides the required host."""<line_sep>suite=driver.build_tests(self.test_dir self.loader url='https://foo.example.com')<line_sep>first_test=suite._tests[0]._tests[0]<line_sep>full_url=first_test._parse_url(first_test.test_data['url'])<line_sep>ssl=first_test.test_data['ssl']<line_sep>self.assertEqual('https://foo.example.com/' full_url)<line_sep>self.assertTrue(ssl)<block_end><def_stmt>test_build_require_ssl self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' require_ssl=<true>)<line_sep>first_test=suite._tests[0]._tests[0]<line_sep>full_url=first_test._parse_url(first_test.test_data['url'])<line_sep>self.assertEqual('https://localhost:8001/' full_url)<line_sep>suite=driver.build_tests(self.test_dir self.loader host='localhost' require_ssl=<false>)<line_sep>first_test=suite._tests[0]._tests[0]<line_sep>full_url=first_test._parse_url(first_test.test_data['url'])<line_sep>self.assertEqual('http://localhost:8001/' full_url)<block_end><def_stmt>test_build_url_target self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' port='999' url='https://example.com:1024/theend')<line_sep>first_test=suite._tests[0]._tests[0]<line_sep>full_url=first_test._parse_url(first_test.test_data['url'])<line_sep>self.assertEqual('https://example.com:1024/theend/' full_url)<block_end><def_stmt>test_build_url_target_forced_ssl self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' port='999' url='http://example.com:1024/theend' require_ssl=<true>)<line_sep>first_test=suite._tests[0]._tests[0]<line_sep>full_url=first_test._parse_url(first_test.test_data['url'])<line_sep>self.assertEqual('https://example.com:1024/theend/' full_url)<block_end><def_stmt>test_build_url_use_prior_test self<block_start>suite=driver.build_tests(self.test_dir self.loader host='localhost' use_prior_test=<true>)<for_stmt>test suite._tests[0]._tests<block_start><if_stmt>test.test_data['name']<ne>'use_prior_false'<block_start>expected_use_prior=<true><block_end><else_stmt><block_start>expected_use_prior=<false><block_end>self.assertEqual(expected_use_prior test.test_data['use_prior_test'])<block_end>suite=driver.build_tests(self.test_dir self.loader host='localhost' use_prior_test=<false>)<for_stmt>test suite._tests[0]._tests<block_start>self.assertEqual(<false> test.test_data['use_prior_test'])<block_end><block_end><block_end>
<import_from_stmt>django.db migrations<def_stmt>operation_make_labels_unique apps schema_editor<block_start>WebLink=apps.get_model(app_label='web_links' model_name='WebLink')<for_stmt>web_link WebLink.objects.using(schema_editor.connection.alias).all()# Look for instances with the same label <block_start>duplicate_queryset=WebLink.objects.using(schema_editor.connection.alias).filter(label=web_link.label).exclude(pk=web_link.pk)<if_stmt>duplicate_queryset# If a duplicate is found, append the id to the original instance # label <block_start>web_link.label='{}__{}'.format(web_link.label web_link.pk)<line_sep>web_link.save()<block_end><block_end><block_end><def_stmt>operation_make_labels_unique_reverse apps schema_editor<block_start>WebLink=apps.get_model(app_label='web_links' model_name='WebLink')<for_stmt>web_link WebLink.objects.using(schema_editor.connection.alias).all()<block_start><if_stmt>web_link.label.endswith('__{}'.format(web_link.pk))<block_start>web_link.label=web_link.label.replace('__{}'.format(web_link.pk) '')<line_sep>web_link.save()<block_end><block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('web_links' '0003_auto_20191211_0233') ]<line_sep>operations=[migrations.RunPython(code=operation_make_labels_unique reverse_code=operation_make_labels_unique_reverse) ]<block_end>
<try_stmt><block_start><import_from_stmt>DeepRTS Engine<block_end><except_stmt>ImportError<block_start><import_stmt>Engine<block_end><try_stmt><block_start><import_from_stmt>DeepRTS.Engine Map UnitManager Constants Player<import_from_stmt>DeepRTS.Engine Constants<block_end><except_stmt>ImportError<block_start><import_from_stmt>Engine Map UnitManager Constants Player Constants<block_end>
<import_stmt>sys<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_from_stmt>gym spaces<import_from_stmt>pyrep.const RenderMode<import_from_stmt>pyrep.objects.dummy Dummy<import_from_stmt>pyrep.objects.vision_sensor VisionSensor<import_from_stmt>rlbench.environment Environment<import_from_stmt>rlbench.action_modes ArmActionMode ActionMode<import_from_stmt>rlbench.observation_config ObservationConfig<import_from_stmt>rlbench.tasks *<line_sep># Don't forget to add: export PYTHONPATH=PATH_TO_YOUR_LOCAL_RLBENCH_REPO # list of state types state_types=['left_shoulder_rgb' 'left_shoulder_depth' 'left_shoulder_mask' 'right_shoulder_rgb' 'right_shoulder_depth' 'right_shoulder_mask' 'wrist_rgb' 'wrist_depth' 'wrist_mask' 'joint_velocities' 'joint_velocities_noise' 'joint_positions' 'joint_positions_noise' 'joint_forces' 'joint_forces_noise' 'gripper_pose' 'gripper_touch_forces' 'task_low_dim_state']<class_stmt>RLBenchEnv()<block_start>""" make RLBench env to have same interfaces as openai.gym """<def_stmt>__init__ self task_name:str state_type:list='state' # render_mode=None): <block_start>""" create RL Bench environment :param task_name: task names can be found in rlbench.tasks :param state_type: state or vision or a sub list of state_types list like ['left_shoulder_rgb'] """<if_stmt>state_type<eq>'state'<or>state_type<eq>'vision'<or>isinstance(state_type list)<block_start>self._state_type=state_type<block_end><else_stmt><block_start><raise>ValueError('State type value error, your value is {}'.format(state_type))<block_end># self._render_mode = render_mode self._render_mode=<none><line_sep>obs_config=ObservationConfig()<line_sep>obs_config.set_all(<true>)<line_sep>action_mode=ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)<line_sep>self.env=Environment(action_mode obs_config=obs_config headless=<true>)<line_sep>self.env.launch()<try_stmt><block_start>self.task=self.env.get_task(getattr(sys.modules[__name__] task_name))<block_end><except_stmt><block_start><raise>NotImplementedError<block_end>_,obs=self.task.reset()<line_sep>self.spec=Spec(task_name)<if_stmt>self._state_type<eq>'state'<block_start>self.observation_space=spaces.Box(low=-np.inf high=np.inf shape=obs.get_low_dim_data().shape)<block_end><elif_stmt>self._state_type<eq>'vision'<block_start>space_dict=OrderedDict()<line_sep>space_dict["state"]=spaces.Box(low=-np.inf high=np.inf shape=obs.get_low_dim_data().shape)<for_stmt>i ["left_shoulder_rgb" "right_shoulder_rgb" "wrist_rgb" "front_rgb"]<block_start>space_dict[i]=spaces.Box(low=0 high=1 shape=getattr(obs i).shape)<block_end>self.observation_space=spaces.Dict(space_dict)<block_end><else_stmt><block_start>space_dict=OrderedDict()<for_stmt>name self._state_type<block_start><if_stmt>name.split('_')[-1]<in>('rgb' 'depth' 'mask')<block_start>space_dict[name]=spaces.Box(low=0 high=1 shape=getattr(obs name).shape)<block_end><else_stmt><block_start>space_dict[name]=spaces.Box(low=-np.inf high=np.inf shape=getattr(obs name).shape)<block_end>self.observation_space=spaces.Dict(space_dict)<block_end><block_end>self.action_space=spaces.Box(low=-1.0 high=1.0 shape=(self.env.action_size ) dtype=np.float32)<line_sep># if render_mode is not None: # # Add the camera to the scene # cam_placeholder = Dummy('cam_cinematic_placeholder') # self._gym_cam = VisionSensor.create([640, 360]) # self._gym_cam.set_pose(cam_placeholder.get_pose()) # if render_mode == 'human': # self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED) # else: # self._gym_cam.set_render_mode(RenderMode.OPENGL3) <block_end><def_stmt>_extract_obs self obs<block_start><if_stmt>self._state_type<eq>'state'<block_start><return>np.array(obs.get_low_dim_data() np.float32)<block_end><elif_stmt>self._state_type<eq>'vision'<block_start><return>np.array([np.array(obs.get_low_dim_data() np.float32) np.array(obs.left_shoulder_rgb np.float32) np.array(obs.right_shoulder_rgb np.float32) np.array(obs.wrist_rgb np.float32) np.array(obs.front_rgb np.float32) ])<block_end><else_stmt><block_start>result=['tag']<for_stmt>name self._state_type<block_start>result.append(np.array(getattr(obs name) np.float32))<block_end><return>np.delete(np.array(result ) 0 0)<block_end><block_end><def_stmt>seed self seed_value# set seed as in openai.gym env <block_start><pass><block_end><def_stmt>render self mode='human'# todo render available at any time <block_start><if_stmt>self._render_mode<is><none><block_start>self._render_mode=mode<line_sep># Add the camera to the scene cam_placeholder=Dummy('cam_cinematic_placeholder')<line_sep>self._gym_cam=VisionSensor.create([640 360])<line_sep>self._gym_cam.set_pose(cam_placeholder.get_pose())<if_stmt>mode<eq>'human'<block_start>self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)<block_end><else_stmt><block_start>self._gym_cam.set_render_mode(RenderMode.OPENGL3)<block_end><block_end><if_stmt>mode<ne>self._render_mode<block_start><raise>ValueError('The render mode must match the render mode selected in the '<concat>'constructor. \nI.e. if you want "human" render mode, then '<concat>'create the env by calling: '<concat>'gym.make("reach_target-state-v0", render_mode="human").\n'<concat>'You passed in mode %s, but expected %s.'%(mode self._render_mode))<block_end><if_stmt>mode<eq>'rgb_array'<block_start><return>self._gym_cam.capture_rgb()<block_end><block_end><def_stmt>reset self<block_start>descriptions,obs=self.task.reset()<line_sep><return>self._extract_obs(obs)<block_end><def_stmt>step self action<block_start>obs,reward,terminate=self.task.step(action)<line_sep><return>self._extract_obs(obs) reward terminate <none><block_end><def_stmt>close self<block_start>self.env.shutdown()<block_end><block_end><class_stmt>Spec()<block_start>""" a fake spec """<def_stmt>__init__ self id_name<block_start>self.id=id_name<block_end><block_end>
# Copyright 2019-2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """utils"""<line_sep># input format begin DEFAULT="DefaultFormat"<line_sep>NCHW="NCHW"<line_sep>NHWC="NHWC"<line_sep>HWCN="HWCN"<line_sep>NC1HWC0="NC1HWC0"<line_sep>FRAC_Z="FracZ"<line_sep># input format end # fusion type begin ELEMWISE="ELEMWISE"<line_sep>CONVLUTION="CONVLUTION"<line_sep>COMMREDUCE="COMMREDUCE"<line_sep>SEGMENT="SEGMENT"<line_sep>OPAQUE="OPAQUE"<line_sep># fusion type end BINDS="binds"<line_sep>
<import_stmt>requests<import_from_stmt>django.conf settings<import_from_stmt>visualize.utils.api Client<class_stmt>GetUserInfo(object)<block_start>""" GetUserInfo : params : username response : { "login": "torvalds", "id": 1024025, "avatar_url": "https://avatars0.githubusercontent.com/u/1024025?v=4", "gravatar_id": "", "url": "https://api.github.com/users/torvalds", "html_url": "https://github.com/torvalds", "followers_url": "https://api.github.com/users/torvalds/followers", "following_url": "https://api.github.com/users/torvalds/following{/other_user}", "gists_url": "https://api.github.com/users/torvalds/gists{/gist_id}", "starred_url": "https://api.github.com/users/torvalds/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/torvalds/subscriptions", "organizations_url": "https://api.github.com/users/torvalds/orgs", "repos_url": "https://api.github.com/users/torvalds/repos", "events_url": "https://api.github.com/users/torvalds/events{/privacy}", "received_events_url": "https://api.github.com/users/torvalds/received_events", "type": "User", "site_admin": false, "name": "<NAME>", "company": "Linux Foundation", "blog": "", "location": "Portland, OR", "email": null, "hireable": null, "bio": null, "public_repos": 6, "public_gists": 0, "followers": 72049, "following": 0, "created_at": "2011-09-03T15:26:22Z", "updated_at": "2017-11-14T16:54:03Z" } """<def_stmt>_extract_infos self data<block_start><return>{"id":data["id"] "name":data["name"] "username":data["login"] "html_url":data["html_url"] "url":data["url"] "avatar":data["avatar_url"] "total_repos":data["public_repos"] "followers":data["followers"] "following":data["following"] "created_at":data["created_at"] "company":data["company"] "bio":data["bio"] "email":data["email"] "location":data["location"] }<block_end><def_stmt>validate self username<block_start><if_stmt><not>username<block_start><raise>Exception("Invalid username")<block_end><block_end><def_stmt>execute self username<block_start>self.validate(username)<line_sep>api_response=Client().user_info(url_params={"username":username})<if_stmt>"message"<in>api_response<block_start><return><false><block_end>response=self._extract_infos(api_response)<line_sep><return>response<block_end><block_end>
"""Sequence generation framework. Recurrent networks are often used to generate/model sequences. Examples include language modelling, machine translation, handwriting synthesis, etc.. A typical pattern in this context is that sequence elements are generated one often another, and every generated element is fed back into the recurrent network state. Sometimes also an attention mechanism is used to condition sequence generation on some structured input like another sequence or an image. This module provides :class:`SequenceGenerator` that builds a sequence generating network from three main components: * a core recurrent transition, e.g. :class:`~blocks.bricks.recurrent.LSTM` or :class:`~blocks.bricks.recurrent.GatedRecurrent` * a readout component that can produce sequence elements using the network state and the information from the attention mechanism * an attention mechanism (see :mod:`~blocks.bricks.attention` for more information) Implementation-wise :class:`SequenceGenerator` fully relies on :class:`BaseSequenceGenerator`. At the level of the latter an attention is mandatory, moreover it must be a part of the recurrent transition (see :class:`~blocks.bricks.attention.AttentionRecurrent`). To simulate optional attention, :class:`SequenceGenerator` wraps the pure recurrent network in :class:`FakeAttentionRecurrent`. """<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>six add_metaclass<import_from_stmt>theano tensor<import_from_stmt>blocks.bricks Initializable Random Bias NDimensionalSoftmax<import_from_stmt>blocks.bricks.base application Brick lazy<import_from_stmt>blocks.bricks.parallel Fork Merge<import_from_stmt>blocks.bricks.lookup LookupTable<import_from_stmt>blocks.bricks.recurrent recurrent<import_from_stmt>blocks.bricks.attention AbstractAttentionRecurrent AttentionRecurrent <import_from_stmt>blocks.roles add_role COST<import_from_stmt>blocks.utils dict_union dict_subset<class_stmt>BaseSequenceGenerator(Initializable)<block_start>r"""A generic sequence generator. This class combines two components, a readout network and an attention-equipped recurrent transition, into a context-dependent sequence generator. Third component must be also given which forks feedback from the readout network to obtain inputs for the transition. The class provides two methods: :meth:`generate` and :meth:`cost`. The former is to actually generate sequences and the latter is to compute the cost of generating given sequences. The generation algorithm description follows. **Definitions and notation:** * States :math:`s_i` of the generator are the states of the transition as specified in `transition.state_names`. * Contexts of the generator are the contexts of the transition as specified in `transition.context_names`. * Glimpses :math:`g_i` are intermediate entities computed at every generation step from states, contexts and the previous step glimpses. They are computed in the transition's `apply` method when not given or by explicitly calling the transition's `take_glimpses` method. The set of glimpses considered is specified in `transition.glimpse_names`. * Outputs :math:`y_i` are produced at every step and form the output sequence. A generation cost :math:`c_i` is assigned to each output. **Algorithm:** 1. Initialization. .. math:: y_0 = readout.initial\_outputs(contexts)\\ s_0, g_0 = transition.initial\_states(contexts)\\ i = 1\\ By default all recurrent bricks from :mod:`~blocks.bricks.recurrent` have trainable initial states initialized with zeros. Subclass them or :class:`~blocks.bricks.recurrent.BaseRecurrent` directly to get custom initial states. 2. New glimpses are computed: .. math:: g_i = transition.take\_glimpses( s_{i-1}, g_{i-1}, contexts) 3. A new output is generated by the readout and its cost is computed: .. math:: f_{i-1} = readout.feedback(y_{i-1}) \\ r_i = readout.readout(f_{i-1}, s_{i-1}, g_i, contexts) \\ y_i = readout.emit(r_i) \\ c_i = readout.cost(r_i, y_i) Note that the *new* glimpses and the *old* states are used at this step. The reason for not merging all readout methods into one is to make an efficient implementation of :meth:`cost` possible. 4. New states are computed and iteration is done: .. math:: f_i = readout.feedback(y_i) \\ s_i = transition.compute\_states(s_{i-1}, g_i, fork.apply(f_i), contexts) \\ i = i + 1 5. Back to step 2 if the desired sequence length has not been yet reached. | A scheme of the algorithm described above follows. .. image:: /_static/sequence_generator_scheme.png :height: 500px :width: 500px .. Parameters ---------- readout : instance of :class:`AbstractReadout` The readout component of the sequence generator. transition : instance of :class:`AbstractAttentionRecurrent` The transition component of the sequence generator. fork : :class:`~.bricks.Brick` The brick to compute the transition's inputs from the feedback. See Also -------- :class:`.Initializable` : for initialization parameters :class:`SequenceGenerator` : more user friendly interface to this\ brick """<line_sep>@lazy()<def_stmt>__init__ self readout transition fork **kwargs<block_start>self.readout=readout<line_sep>self.transition=transition<line_sep>self.fork=fork<line_sep>children=[self.readout self.fork self.transition]<line_sep>kwargs.setdefault('children' []).extend(children)<line_sep>super(BaseSequenceGenerator self).__init__(**kwargs)<block_end>@property<def_stmt>_state_names self<block_start><return>self.transition.compute_states.outputs<block_end>@property<def_stmt>_context_names self<block_start><return>self.transition.apply.contexts<block_end>@property<def_stmt>_glimpse_names self<block_start><return>self.transition.take_glimpses.outputs<block_end><def_stmt>_push_allocation_config self# Configure readout. That involves `get_dim` requests # to the transition. To make sure that it answers # correctly we should finish its configuration first. <block_start>self.transition.push_allocation_config()<line_sep>transition_sources=(self._state_names+self._context_names+self._glimpse_names)<line_sep>self.readout.source_dims=[self.transition.get_dim(name)<if>name<in>transition_sources<else>self.readout.get_dim(name)<for>name self.readout.source_names]<line_sep># Configure fork. For similar reasons as outlined above, # first push `readout` configuration. self.readout.push_allocation_config()<line_sep>feedback_name,=self.readout.feedback.outputs<line_sep>self.fork.input_dim=self.readout.get_dim(feedback_name)<line_sep>self.fork.output_dims=self.transition.get_dims(self.fork.apply.outputs)<block_end>@application<def_stmt>cost self application_call outputs mask=<none> **kwargs<block_start>"""Returns the average cost over the minibatch. The cost is computed by averaging the sum of per token costs for each sequence over the minibatch. .. warning:: Note that, the computed cost can be problematic when batches consist of vastly different sequence lengths. Parameters ---------- outputs : :class:`~tensor.TensorVariable` The 3(2) dimensional tensor containing output sequences. The axis 0 must stand for time, the axis 1 for the position in the batch. mask : :class:`~tensor.TensorVariable` The binary matrix identifying fake outputs. Returns ------- cost : :class:`~tensor.Variable` Theano variable for cost, computed by summing over timesteps and then averaging over the minibatch. Notes ----- The contexts are expected as keyword arguments. Adds average cost per sequence element `AUXILIARY` variable to the computational graph with name ``per_sequence_element``. """<line_sep># Compute the sum of costs costs=self.cost_matrix(outputs mask=mask **kwargs)<line_sep>cost=tensor.mean(costs.sum(axis=0))<line_sep>add_role(cost COST)<line_sep># Add auxiliary variable for per sequence element cost application_call.add_auxiliary_variable((costs.sum()/mask.sum())<if>mask<is><not><none><else>costs.mean() name='per_sequence_element')<line_sep><return>cost<block_end>@application<def_stmt>cost_matrix self application_call outputs mask=<none> **kwargs<block_start>"""Returns generation costs for output sequences. See Also -------- :meth:`cost` : Scalar cost. """<line_sep># We assume the data has axes (time, batch, features, ...) batch_size=outputs.shape[1]<line_sep># Prepare input for the iterative part states=dict_subset(kwargs self._state_names must_have=<false>)<line_sep># masks in context are optional (e.g. `attended_mask`) contexts=dict_subset(kwargs self._context_names must_have=<false>)<line_sep>feedback=self.readout.feedback(outputs)<line_sep>inputs=self.fork.apply(feedback as_dict=<true>)<line_sep># Run the recurrent network results=self.transition.apply(mask=mask return_initial_states=<true> as_dict=<true> **dict_union(inputs states contexts))<line_sep># Separate the deliverables. The last states are discarded: they # are not used to predict any output symbol. The initial glimpses # are discarded because they are not used for prediction. # Remember, glimpses are computed _before_ output stage, states are # computed after. states={name:results[name][:-1]<for>name self._state_names}<line_sep>glimpses={name:results[name][1:]<for>name self._glimpse_names}<line_sep># Compute the cost feedback=tensor.roll(feedback 1 0)<line_sep>feedback=tensor.set_subtensor(feedback[0] self.readout.feedback(self.readout.initial_outputs(batch_size)))<line_sep>readouts=self.readout.readout(feedback=feedback **dict_union(states glimpses contexts))<line_sep>costs=self.readout.cost(readouts outputs)<if_stmt>mask<is><not><none><block_start>costs<augmul>mask<block_end><for_stmt>name,variable list(glimpses.items())+list(states.items())<block_start>application_call.add_auxiliary_variable(variable.copy() name=name)<block_end># This variables can be used to initialize the initial states of the # next batch using the last states of the current batch. <for_stmt>name self._state_names+self._glimpse_names<block_start>application_call.add_auxiliary_variable(results[name][-1].copy() name=name+"_final_value")<block_end><return>costs<block_end>@recurrent<def_stmt>generate self outputs **kwargs<block_start>"""A sequence generation step. Parameters ---------- outputs : :class:`~tensor.TensorVariable` The outputs from the previous step. Notes ----- The contexts, previous states and glimpses are expected as keyword arguments. """<line_sep>states=dict_subset(kwargs self._state_names)<line_sep># masks in context are optional (e.g. `attended_mask`) contexts=dict_subset(kwargs self._context_names must_have=<false>)<line_sep>glimpses=dict_subset(kwargs self._glimpse_names)<line_sep>next_glimpses=self.transition.take_glimpses(as_dict=<true> **dict_union(states glimpses contexts))<line_sep>next_readouts=self.readout.readout(feedback=self.readout.feedback(outputs) **dict_union(states next_glimpses contexts))<line_sep>next_outputs=self.readout.emit(next_readouts)<line_sep>next_costs=self.readout.cost(next_readouts next_outputs)<line_sep>next_feedback=self.readout.feedback(next_outputs)<line_sep>next_inputs=(self.fork.apply(next_feedback as_dict=<true>)<if>self.fork<else>{'feedback':next_feedback})<line_sep>next_states=self.transition.compute_states(as_list=<true> **dict_union(next_inputs states next_glimpses contexts))<line_sep><return>(next_states+[next_outputs]+list(next_glimpses.values())+[next_costs])<block_end>@generate.delegate<def_stmt>generate_delegate self<block_start><return>self.transition.apply<block_end>@generate.property('states')<def_stmt>generate_states self<block_start><return>self._state_names+['outputs']+self._glimpse_names<block_end>@generate.property('outputs')<def_stmt>generate_outputs self<block_start><return>(self._state_names+['outputs']+self._glimpse_names+['costs'])<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<in>(self._state_names+self._context_names+self._glimpse_names)<block_start><return>self.transition.get_dim(name)<block_end><elif_stmt>name<eq>'outputs'<block_start><return>self.readout.get_dim(name)<block_end><return>super(BaseSequenceGenerator self).get_dim(name)<block_end>@application<def_stmt>initial_states self batch_size *args **kwargs# TODO: support dict of outputs for application methods # to simplify this code. <block_start>state_dict=dict(self.transition.initial_states(batch_size as_dict=<true> *args **kwargs) outputs=self.readout.initial_outputs(batch_size))<line_sep><return>[state_dict[state_name]<for>state_name self.generate.states]<block_end>@initial_states.property('outputs')<def_stmt>initial_states_outputs self<block_start><return>self.generate.states<block_end><block_end>@add_metaclass(ABCMeta)<class_stmt>AbstractReadout(Initializable)<block_start>"""The interface for the readout component of a sequence generator. The readout component of a sequence generator is a bridge between the core recurrent network and the output sequence. Parameters ---------- source_names : list A list of the source names (outputs) that are needed for the readout part e.g. ``['states']`` or ``['states', 'weighted_averages']`` or ``['states', 'feedback']``. readout_dim : int The dimension of the readout. Attributes ---------- source_names : list readout_dim : int See Also -------- :class:`BaseSequenceGenerator` : see how exactly a readout is used :class:`Readout` : the typically used readout brick """<line_sep>@lazy(allocation=['source_names' 'readout_dim'])<def_stmt>__init__ self source_names readout_dim **kwargs<block_start>self.source_names=source_names<line_sep>self.readout_dim=readout_dim<line_sep>super(AbstractReadout self).__init__(**kwargs)<block_end>@abstractmethod<def_stmt>emit self readouts<block_start>"""Produce outputs from readouts. Parameters ---------- readouts : :class:`~theano.Variable` Readouts produced by the :meth:`readout` method of a `(batch_size, readout_dim)` shape. """<line_sep><pass><block_end>@abstractmethod<def_stmt>cost self readouts outputs<block_start>"""Compute generation cost of outputs given readouts. Parameters ---------- readouts : :class:`~theano.Variable` Readouts produced by the :meth:`readout` method of a `(..., readout dim)` shape. outputs : :class:`~theano.Variable` Outputs whose cost should be computed. Should have as many or one less dimensions compared to `readout`. If readout has `n` dimensions, first `n - 1` dimensions of `outputs` should match with those of `readouts`. """<line_sep><pass><block_end>@abstractmethod<def_stmt>initial_outputs self batch_size<block_start>"""Compute initial outputs for the generator's first step. In the notation from the :class:`BaseSequenceGenerator` documentation this method should compute :math:`y_0`. """<line_sep><pass><block_end>@abstractmethod<def_stmt>readout self **kwargs<block_start>r"""Compute the readout vector from states, glimpses, etc. Parameters ---------- \*\*kwargs: dict Contains sequence generator states, glimpses, contexts and feedback from the previous outputs. """<line_sep><pass><block_end>@abstractmethod<def_stmt>feedback self outputs<block_start>"""Feeds outputs back to be used as inputs of the transition."""<line_sep><pass><block_end><block_end><class_stmt>Readout(AbstractReadout)<block_start>r"""Readout brick with separated emitter and feedback parts. :class:`Readout` combines a few bits and pieces into an object that can be used as the readout component in :class:`BaseSequenceGenerator`. This includes an emitter brick, to which :meth:`emit`, :meth:`cost` and :meth:`initial_outputs` calls are delegated, a feedback brick to which :meth:`feedback` functionality is delegated, and a pipeline to actually compute readouts from all the sources (see the `source_names` attribute of :class:`AbstractReadout`). The readout computation pipeline is constructed from `merge` and `post_merge` brick, whose responsibilites are described in the respective docstrings. Parameters ---------- emitter : an instance of :class:`AbstractEmitter` The emitter component. feedback_brick : an instance of :class:`AbstractFeedback` The feedback component. merge : :class:`~.bricks.Brick`, optional A brick that takes the sources given in `source_names` as an input and combines them into a single output. If given, `merge_prototype` cannot be given. merge_prototype : :class:`.FeedForward`, optional If `merge` isn't given, the transformation given by `merge_prototype` is applied to each input before being summed. By default a :class:`.Linear` transformation without biases is used. If given, `merge` cannot be given. post_merge : :class:`.Feedforward`, optional This transformation is applied to the merged inputs. By default :class:`.Bias` is used. merged_dim : int, optional The input dimension of `post_merge` i.e. the output dimension of `merge` (or `merge_prototype`). If not give, it is assumed to be the same as `readout_dim` (i.e. `post_merge` is assumed to not change dimensions). \*\*kwargs : dict Passed to the parent's constructor. See Also -------- :class:`BaseSequenceGenerator` : see how exactly a readout is used :class:`AbstractEmitter`, :class:`AbstractFeedback` """<def_stmt>__init__ self emitter=<none> feedback_brick=<none> merge=<none> merge_prototype=<none> post_merge=<none> merged_dim=<none> **kwargs<block_start><if_stmt><not>emitter<block_start>emitter=TrivialEmitter(kwargs['readout_dim'])<block_end><if_stmt><not>feedback_brick<block_start>feedback_brick=TrivialFeedback(kwargs['readout_dim'])<block_end><if_stmt><not>merge<block_start>merge=Merge(input_names=kwargs['source_names'] prototype=merge_prototype)<block_end><if_stmt><not>post_merge<block_start>post_merge=Bias(dim=kwargs['readout_dim'])<block_end><if_stmt><not>merged_dim<block_start>merged_dim=kwargs['readout_dim']<block_end>self.emitter=emitter<line_sep>self.feedback_brick=feedback_brick<line_sep>self.merge=merge<line_sep>self.post_merge=post_merge<line_sep>self.merged_dim=merged_dim<line_sep>children=[self.emitter self.feedback_brick self.merge self.post_merge]<line_sep>kwargs.setdefault('children' []).extend(children)<line_sep>super(Readout self).__init__(**kwargs)<block_end><def_stmt>_push_allocation_config self<block_start>self.emitter.readout_dim=self.get_dim('readouts')<line_sep>self.feedback_brick.output_dim=self.get_dim('outputs')<line_sep>self.merge.input_names=self.source_names<line_sep>self.merge.input_dims=self.source_dims<line_sep>self.merge.output_dim=self.merged_dim<line_sep>self.post_merge.input_dim=self.merged_dim<line_sep>self.post_merge.output_dim=self.readout_dim<block_end>@application<def_stmt>readout self **kwargs<block_start>merged=self.merge.apply(**{name:kwargs[name]<for>name self.merge.input_names})<line_sep>merged=self.post_merge.apply(merged)<line_sep><return>merged<block_end>@application<def_stmt>emit self readouts<block_start><return>self.emitter.emit(readouts)<block_end>@application<def_stmt>cost self readouts outputs<block_start><return>self.emitter.cost(readouts outputs)<block_end>@application<def_stmt>initial_outputs self batch_size<block_start><return>self.emitter.initial_outputs(batch_size)<block_end>@application(outputs=['feedback'])<def_stmt>feedback self outputs<block_start><return>self.feedback_brick.feedback(outputs)<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<eq>'outputs'<block_start><return>self.emitter.get_dim(name)<block_end><elif_stmt>name<eq>'feedback'<block_start><return>self.feedback_brick.get_dim(name)<block_end><elif_stmt>name<eq>'readouts'<block_start><return>self.readout_dim<block_end><return>super(Readout self).get_dim(name)<block_end><block_end>@add_metaclass(ABCMeta)<class_stmt>AbstractEmitter(Brick)<block_start>"""The interface for the emitter component of a readout. Attributes ---------- readout_dim : int The dimension of the readout. Is given by the :class:`Readout` brick when allocation configuration is pushed. See Also -------- :class:`Readout` :class:`SoftmaxEmitter` : for integer outputs Notes ----- An important detail about the emitter cost is that it will be evaluated with inputs of different dimensions so it has to be flexible enough to handle this. The two ways in which it can be applied are: 1. In :meth:BaseSequenceGenerator.cost_matrix where it will be applied to the whole sequence at once. 2. In :meth:BaseSequenceGenerator.generate where it will be applied to only one step of the sequence. """<line_sep>@abstractmethod<def_stmt>emit self readouts<block_start>"""Implements the respective method of :class:`Readout`."""<line_sep><pass><block_end>@abstractmethod<def_stmt>cost self readouts outputs<block_start>"""Implements the respective method of :class:`Readout`."""<line_sep><pass><block_end>@abstractmethod<def_stmt>initial_outputs self batch_size<block_start>"""Implements the respective method of :class:`Readout`."""<line_sep><pass><block_end><block_end>@add_metaclass(ABCMeta)<class_stmt>AbstractFeedback(Brick)<block_start>"""The interface for the feedback component of a readout. See Also -------- :class:`Readout` :class:`LookupFeedback` for integer outputs """<line_sep>@abstractmethod<def_stmt>feedback self outputs<block_start>"""Implements the respective method of :class:`Readout`."""<line_sep><pass><block_end><block_end><class_stmt>TrivialEmitter(AbstractEmitter)<block_start>"""An emitter for the trivial case when readouts are outputs. Parameters ---------- readout_dim : int The dimension of the readout. Notes ----- By default :meth:`cost` always returns zero tensor. """<line_sep>@lazy(allocation=['readout_dim'])<def_stmt>__init__ self readout_dim **kwargs<block_start>super(TrivialEmitter self).__init__(**kwargs)<line_sep>self.readout_dim=readout_dim<block_end>@application<def_stmt>emit self readouts<block_start><return>readouts<block_end>@application<def_stmt>cost self readouts outputs<block_start><return>tensor.zeros_like(outputs)<block_end>@application<def_stmt>initial_outputs self batch_size<block_start><return>tensor.zeros((batch_size self.readout_dim))<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<eq>'outputs'<block_start><return>self.readout_dim<block_end><return>super(TrivialEmitter self).get_dim(name)<block_end><block_end><class_stmt>SoftmaxEmitter(AbstractEmitter Initializable Random)<block_start>"""A softmax emitter for the case of integer outputs. Interprets readout elements as energies corresponding to their indices. Parameters ---------- initial_output : int or a scalar :class:`~theano.Variable` The initial output. """<def_stmt>__init__ self initial_output=0 **kwargs<block_start>self.initial_output=initial_output<line_sep>self.softmax=NDimensionalSoftmax()<line_sep>children=[self.softmax]<line_sep>kwargs.setdefault('children' []).extend(children)<line_sep>super(SoftmaxEmitter self).__init__(**kwargs)<block_end>@application<def_stmt>probs self readouts<block_start><return>self.softmax.apply(readouts extra_ndim=readouts.ndim-2)<block_end>@application<def_stmt>emit self readouts<block_start>probs=self.probs(readouts)<line_sep>batch_size=probs.shape[0]<line_sep>pvals_flat=probs.reshape((batch_size -1))<line_sep>generated=self.theano_rng.multinomial(pvals=pvals_flat)<line_sep><return>generated.reshape(probs.shape).argmax(axis=-1)<block_end>@application<def_stmt>cost self readouts outputs# WARNING: unfortunately this application method works # just fine when `readouts` and `outputs` have # different dimensions. Be careful! <block_start><return>self.softmax.categorical_cross_entropy(outputs readouts extra_ndim=readouts.ndim-2)<block_end>@application<def_stmt>initial_outputs self batch_size<block_start><return>self.initial_output<times>tensor.ones((batch_size ) dtype='int64')<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<eq>'outputs'<block_start><return>0<block_end><return>super(SoftmaxEmitter self).get_dim(name)<block_end><block_end><class_stmt>TrivialFeedback(AbstractFeedback)<block_start>"""A feedback brick for the case when readout are outputs."""<line_sep>@lazy(allocation=['output_dim'])<def_stmt>__init__ self output_dim **kwargs<block_start>super(TrivialFeedback self).__init__(**kwargs)<line_sep>self.output_dim=output_dim<block_end>@application(outputs=['feedback'])<def_stmt>feedback self outputs<block_start><return>outputs<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<eq>'feedback'<block_start><return>self.output_dim<block_end><return>super(TrivialFeedback self).get_dim(name)<block_end><block_end><class_stmt>LookupFeedback(AbstractFeedback Initializable)<block_start>"""A feedback brick for the case when readout are integers. Stores and retrieves distributed representations of integers. """<def_stmt>__init__ self num_outputs=<none> feedback_dim=<none> **kwargs<block_start>self.num_outputs=num_outputs<line_sep>self.feedback_dim=feedback_dim<line_sep>self.lookup=LookupTable(num_outputs feedback_dim)<line_sep>children=[self.lookup]<line_sep>kwargs.setdefault('children' []).extend(children)<line_sep>super(LookupFeedback self).__init__(**kwargs)<block_end><def_stmt>_push_allocation_config self<block_start>self.lookup.length=self.num_outputs<line_sep>self.lookup.dim=self.feedback_dim<block_end>@application<def_stmt>feedback self outputs<block_start><assert_stmt>self.output_dim<eq>0<line_sep><return>self.lookup.apply(outputs)<block_end><def_stmt>get_dim self name<block_start><if_stmt>name<eq>'feedback'<block_start><return>self.feedback_dim<block_end><return>super(LookupFeedback self).get_dim(name)<block_end><block_end><class_stmt>FakeAttentionRecurrent(AbstractAttentionRecurrent Initializable)<block_start>"""Adds fake attention interface to a transition. :class:`BaseSequenceGenerator` requires its transition brick to support :class:`~blocks.bricks.attention.AbstractAttentionRecurrent` interface, that is to have an embedded attention mechanism. For the cases when no attention is required (e.g. language modeling or encoder-decoder models), :class:`FakeAttentionRecurrent` is used to wrap a usual recurrent brick. The resulting brick has no glimpses and simply passes all states and contexts to the wrapped one. .. todo:: Get rid of this brick and support attention-less transitions in :class:`BaseSequenceGenerator`. """<def_stmt>__init__ self transition **kwargs<block_start>self.transition=transition<line_sep>self.state_names=transition.apply.states<line_sep>self.context_names=transition.apply.contexts<line_sep>self.glimpse_names=[]<line_sep>children=[self.transition]<line_sep>kwargs.setdefault('children' []).extend(children)<line_sep>super(FakeAttentionRecurrent self).__init__(**kwargs)<block_end>@application<def_stmt>apply self *args **kwargs<block_start><return>self.transition.apply(*args **kwargs)<block_end>@apply.delegate<def_stmt>apply_delegate self<block_start><return>self.transition.apply<block_end>@application<def_stmt>compute_states self *args **kwargs<block_start><return>self.transition.apply(iterate=<false> *args **kwargs)<block_end>@compute_states.delegate<def_stmt>compute_states_delegate self<block_start><return>self.transition.apply<block_end>@application(outputs=[])<def_stmt>take_glimpses self *args **kwargs<block_start><return><none><block_end>@application<def_stmt>initial_states self batch_size *args **kwargs<block_start><return>self.transition.initial_states(batch_size *args **kwargs)<block_end>@initial_states.property('outputs')<def_stmt>initial_states_outputs self<block_start><return>self.transition.apply.states<block_end><def_stmt>get_dim self name<block_start><return>self.transition.get_dim(name)<block_end><block_end><class_stmt>SequenceGenerator(BaseSequenceGenerator)<block_start>r"""A more user-friendly interface for :class:`BaseSequenceGenerator`. Parameters ---------- readout : instance of :class:`AbstractReadout` The readout component for the sequence generator. transition : instance of :class:`.BaseRecurrent` The recurrent transition to be used in the sequence generator. Will be combined with `attention`, if that one is given. attention : object, optional The attention mechanism to be added to ``transition``, an instance of :class:`~blocks.bricks.attention.AbstractAttention`. add_contexts : bool If ``True``, the :class:`.AttentionRecurrent` wrapping the `transition` will add additional contexts for the attended and its mask. \*\*kwargs : dict All keywords arguments are passed to the base class. If `fork` keyword argument is not provided, :class:`.Fork` is created that forks all transition sequential inputs without a "mask" substring in them. """<def_stmt>__init__ self readout transition attention=<none> add_contexts=<true> **kwargs<block_start>normal_inputs=[name<for>name transition.apply.sequences<if>'mask'<not><in>name]<line_sep>kwargs.setdefault('fork' Fork(normal_inputs))<if_stmt>attention<block_start>transition=AttentionRecurrent(transition attention add_contexts=add_contexts name="att_trans")<block_end><else_stmt><block_start>transition=FakeAttentionRecurrent(transition name="with_fake_attention")<block_end>super(SequenceGenerator self).__init__(readout transition **kwargs)<block_end><block_end>
<import_stmt>infra.basetest<import_from_stmt>tests.init.base InitSystemBase<as>InitSystemBase<class_stmt>InitSystemBusyboxBase(InitSystemBase)<block_start>config=infra.basetest.BASIC_TOOLCHAIN_CONFIG+""" # BR2_TARGET_ROOTFS_TAR is not set """<def_stmt>check_init self<block_start>super(InitSystemBusyboxBase self).check_init("/bin/busybox")<block_end><block_end><class_stmt>TestInitSystemBusyboxRo(InitSystemBusyboxBase)<block_start>config=InitSystemBusyboxBase.config+""" # BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set BR2_TARGET_ROOTFS_SQUASHFS=y """<def_stmt>test_run self<block_start>self.start_emulator("squashfs")<line_sep>self.check_init()<line_sep>self.check_network("eth0" 1)<block_end><block_end><class_stmt>TestInitSystemBusyboxRw(InitSystemBusyboxBase)<block_start>config=InitSystemBusyboxBase.config+""" BR2_TARGET_ROOTFS_EXT2=y """<def_stmt>test_run self<block_start>self.start_emulator("ext2")<line_sep>self.check_init()<line_sep>self.check_network("eth0" 1)<block_end><block_end><class_stmt>TestInitSystemBusyboxRoNet(InitSystemBusyboxBase)<block_start>config=InitSystemBusyboxBase.config+""" BR2_SYSTEM_DHCP="eth0" # BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set BR2_TARGET_ROOTFS_SQUASHFS=y """<def_stmt>test_run self<block_start>self.start_emulator("squashfs")<line_sep>self.check_init()<line_sep>self.check_network("eth0")<block_end><block_end><class_stmt>TestInitSystemBusyboxRwNet(InitSystemBusyboxBase)<block_start>config=InitSystemBusyboxBase.config+""" BR2_SYSTEM_DHCP="eth0" BR2_TARGET_ROOTFS_EXT2=y """<def_stmt>test_run self<block_start>self.start_emulator("ext2")<line_sep>self.check_init()<line_sep>self.check_network("eth0")<block_end><block_end>
<import_stmt>argparse os<import_stmt>lib.config<as>config<import_stmt>lib.utils<as>utils<def_stmt>count_present_and_missing cls directory metadata<block_start>""" Count present and missing videos for a class based on metadata. :param cls: The class. If None, count all videos (used for testing videos - no classes). :param directory: Directory containing the videos. :param metadata: Kinetics metadata json. :return: Tuple: number present videos, number of missing videos """<line_sep>present=0<line_sep>missing=0<for_stmt>key metadata<block_start><if_stmt>cls<is><none><or>metadata[key]["annotations"]["label"]<eq>cls<block_start><if_stmt>os.path.isfile(os.path.join(directory "{}.mp4".format(key)))<block_start>present<augadd>1<block_end><else_stmt><block_start>missing<augadd>1<block_end><block_end><block_end><return>present missing<block_end><def_stmt>main args# load video classes <block_start>classes=utils.load_json(config.CLASSES_PATH)<line_sep># load lists of videos train_metadata=utils.load_json(config.TRAIN_METADATA_PATH)<line_sep>val_metadata=utils.load_json(config.VAL_METADATA_PATH)<line_sep>test_metadata=utils.load_json(config.TEST_METADATA_PATH)<line_sep>num_found=0<line_sep>total=0<line_sep>total_train_present=0<line_sep>total_train_missing=0<line_sep>total_val_present=0<line_sep>total_val_missing=0<line_sep># load subset subset=<none><if_stmt>args.subset<block_start>subset=utils.load_json(args.subset)<block_end># count train and validation videos <for_stmt>cls classes<block_start><if_stmt>subset<is><not><none><and>cls<not><in>subset<block_start><continue><block_end>total<augadd>1<line_sep>cls_train_path=os.path.join(config.TRAIN_ROOT cls.replace(" " "_"))<line_sep>cls_valid_path=os.path.join(config.VALID_ROOT cls.replace(" " "_"))<line_sep>train_found=<false><line_sep>valid_found=<false><if_stmt>os.path.isdir(cls_train_path)<block_start>train_present,train_missing=count_present_and_missing(cls cls_train_path train_metadata)<line_sep>train_found=<true><line_sep>total_train_present<augadd>train_present<line_sep>total_train_missing<augadd>train_missing<block_end><if_stmt>os.path.isdir(cls_valid_path)<block_start>valid_present,valid_missing=count_present_and_missing(cls cls_valid_path val_metadata)<line_sep>valid_found=<true><line_sep>total_val_present<augadd>valid_present<line_sep>total_val_missing<augadd>valid_missing<block_end><if_stmt>train_found<or>valid_found<block_start>num_found<augadd>1<if_stmt>args.details<block_start>print("class {}".format(cls))<if_stmt>train_found<block_start>print("train: {} / {}".format(train_present train_present+train_missing))<block_end><if_stmt>valid_found<block_start>print("valid: {} / {}".format(valid_present valid_present+valid_missing))<block_end>print()<block_end><block_end><block_end># count test videos test_present,test_missing=count_present_and_missing(<none> config.TEST_ROOT test_metadata)<line_sep># print train_percent_found=0<if_stmt>total_train_present<g>0<block_start>train_percent_found=(total_train_present<times>100)/(total_train_present+total_train_missing)<block_end>valid_percent_found=0<if_stmt>total_val_present<g>0<block_start>valid_percent_found=(total_val_present<times>100)/(total_val_present+total_val_missing)<block_end>test_percent_found=0<if_stmt>test_present<g>0<block_start>test_percent_found=(test_present<times>100)/(test_present+test_missing)<block_end>print("class stats:")<line_sep>print("\t{:d} / {:d} classes found".format(num_found total))<line_sep>print()<line_sep>print("video stats (only for found classes):")<line_sep>print("\t{:d} / {:d} ({:.2f}%) train videos found".format(total_train_present total_train_present+total_train_missing train_percent_found))<line_sep>print("\t{:d} / {:d} ({:.2f}%) valid videos found".format(total_val_present total_val_present+total_val_missing valid_percent_found))<line_sep>print("\t{:d} / {:d} ({:.2f}%) test videos found".format(test_present test_present+test_missing test_percent_found))<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser("Print statistics about downloaded videos.")<line_sep>parser.add_argument("-d" "--details" action="store_true" default=<false> help="detailed stats for each found class")<line_sep>parser.add_argument("-s" "--subset" help="path to a JSON file containing a subset of Kinetics classes")<line_sep>parsed=parser.parse_args()<line_sep>main(parsed)<block_end>
<import_stmt>numpy<as>np<import_from_stmt>cdlib.evaluation.internal onmi<import_from_stmt>cdlib.evaluation.internal.omega Omega<import_from_stmt>nf1 NF1<import_from_stmt>collections namedtuple defaultdict<line_sep>__all__=["MatchingResult" "normalized_mutual_information" "overlapping_normalized_mutual_information_LFK" "overlapping_normalized_mutual_information_MGH" "omega" "f1" "nf1" "adjusted_rand_index" "adjusted_mutual_information" "variation_of_information" "partition_closeness_simple" ]<line_sep># MatchingResult = namedtuple("MatchingResult", ['mean', 'std']) MatchingResult=namedtuple("MatchingResult" "score std")<line_sep>MatchingResult.__new__.__defaults__=(<none> )<times>len(MatchingResult._fields)<def_stmt>__check_partition_coverage first_partition:object second_partition:object<block_start>nodes_first={node:<none><for>community first_partition.communities<for>node community}<line_sep>nodes_second={node:<none><for>community second_partition.communities<for>node community}<if_stmt>len(set(nodes_first.keys())^set(nodes_second.keys()))<ne>0<block_start><raise>ValueError("Both partitions should cover the same node set")<block_end><block_end><def_stmt>__check_partition_overlap first_partition:object second_partition:object<block_start><if_stmt>first_partition.overlap<or>second_partition.overlap<block_start><raise>ValueError("Not defined for overlapping partitions")<block_end><block_end><def_stmt>normalized_mutual_information first_partition:object second_partition:object<arrow>MatchingResult<block_start>""" Normalized Mutual Information between two clusterings. Normalized Mutual Information (NMI) is an normalization of the Mutual Information (MI) score to scale the results between 0 (no mutual information) and 1 (perfect correlation). In this function, mutual information is normalized by ``sqrt(H(labels_true) * H(labels_pred))`` :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities) """<line_sep>__check_partition_coverage(first_partition second_partition)<line_sep>__check_partition_overlap(first_partition second_partition)<line_sep>first_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(first_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<line_sep>second_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(second_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<import_from_stmt>sklearn.metrics normalized_mutual_info_score<line_sep><return>MatchingResult(score=normalized_mutual_info_score(first_partition_c second_partition_c))<block_end><def_stmt>overlapping_normalized_mutual_information_LFK first_partition:object second_partition:object<arrow>MatchingResult<block_start>""" Overlapping Normalized Mutual Information between two clusterings. Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions. This is the version proposed by Lancichinetti et al. (1) :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities) :Reference: 1. <NAME>., <NAME>., & <NAME>. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015. """<line_sep><return>MatchingResult(score=onmi.onmi([set(x)<for>x first_partition.communities] [set(x)<for>x second_partition.communities] ))<block_end><def_stmt>overlapping_normalized_mutual_information_MGH first_partition:object second_partition:object normalization:str="max"<arrow>MatchingResult<block_start>""" Overlapping Normalized Mutual Information between two clusterings. Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions. This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref. for more details. :param first_partition: NodeClustering object :param second_partition: NodeClustering object :param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article) :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities) :Reference: 1. <NAME>., <NAME>., & <NAME>. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago """<if_stmt>normalization<eq>"max"<block_start>variant="MGH"<block_end><elif_stmt>normalization<eq>"LFK"<block_start>variant="MGH_LFK"<block_end><else_stmt><block_start><raise>ValueError("Wrong 'normalization' value. Please specify one among [max, LFK].")<block_end><return>MatchingResult(score=onmi.onmi([set(x)<for>x first_partition.communities] [set(x)<for>x second_partition.communities] variant=variant ))<block_end><def_stmt>omega first_partition:object second_partition:object<arrow>MatchingResult<block_start>""" Index of resemblance for overlapping, complete coverage, network clusterings. :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.omega(louvain_communities,leiden_communities) :Reference: 1. <NAME>, <NAME>, and <NAME>. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18. """<line_sep>__check_partition_coverage(first_partition second_partition)<line_sep>first_partition={k:v<for>k,v enumerate(first_partition.communities)}<line_sep>second_partition={k:v<for>k,v enumerate(second_partition.communities)}<line_sep>om_idx=Omega(first_partition second_partition)<line_sep><return>MatchingResult(score=om_idx.omega_score)<block_end><def_stmt>f1 first_partition:object second_partition:object<arrow>MatchingResult<block_start>""" Compute the average F1 score of the optimal algorithms matches among the partitions in input. Works on overlapping/non-overlapping complete/partial coverage partitions. :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.f1(louvain_communities,leiden_communities) :Reference: 1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham. """<line_sep>nf=NF1(first_partition.communities second_partition.communities)<line_sep>results=nf.summary()<line_sep><return>MatchingResult(score=results["details"]["F1 mean"][0] std=results["details"]["F1 std"][0])<block_end><def_stmt>nf1 first_partition:object second_partition:object<arrow>MatchingResult<block_start>""" Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input. Works on overlapping/non-overlapping complete/partial coverage partitions. :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.nf1(louvain_communities,leiden_communities) :Reference: 1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ 2. <NAME>. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912. """<line_sep>nf=NF1(first_partition.communities second_partition.communities)<line_sep>results=nf.summary()<line_sep><return>MatchingResult(score=results["scores"].loc["NF1"][0])<block_end><def_stmt>adjusted_rand_index first_partition:object second_partition:object<arrow>MatchingResult<block_start>"""Rand index adjusted for chance. The Rand Index computes a similarity measure between two clusterings by considering all pairs of samples and counting pairs that are assigned in the same or different clusters in the predicted and true clusterings. The raw RI score is then "adjusted for chance" into the ARI score using the following scheme:: ARI = (RI - Expected_RI) / (max(RI) - Expected_RI) The adjusted Rand index is thus ensured to have a value close to 0.0 for random labeling independently of the number of clusters and samples and exactly 1.0 when the clusterings are identical (up to a permutation). ARI is a symmetric measure:: adjusted_rand_index(a, b) == adjusted_rand_index(b, a) :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities) :Reference: 1. <NAME>., & <NAME>. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218. """<line_sep>__check_partition_coverage(first_partition second_partition)<line_sep>__check_partition_overlap(first_partition second_partition)<line_sep>first_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(first_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<line_sep>second_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(second_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<import_from_stmt>sklearn.metrics adjusted_rand_score<line_sep><return>MatchingResult(score=adjusted_rand_score(first_partition_c second_partition_c))<block_end><def_stmt>adjusted_mutual_information first_partition:object second_partition:object<arrow>MatchingResult<block_start>"""Adjusted Mutual Information between two clusterings. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more information shared. For two clusterings :math:`U` and :math:`V`, the AMI is given as:: AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))] This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Be mindful that this function is an order of magnitude slower than other metrics, such as the Adjusted Rand Index. :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities) :Reference: 1. <NAME>., <NAME>., & <NAME>. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854. """<line_sep>__check_partition_coverage(first_partition second_partition)<line_sep>__check_partition_overlap(first_partition second_partition)<line_sep>first_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(first_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<line_sep>second_partition_c=[x[1]<for>x sorted([(node nid)<for>nid,cluster enumerate(second_partition.communities)<for>node cluster] key=<lambda>x:x[0] )]<import_from_stmt>sklearn.metrics adjusted_mutual_info_score<line_sep><return>MatchingResult(score=adjusted_mutual_info_score(first_partition_c second_partition_c))<block_end><def_stmt>variation_of_information first_partition:object second_partition:object<arrow>MatchingResult<block_start>"""Variation of Information among two nodes partitions. $$ H(p)+H(q)-2MI(p, q) $$ where MI is the mutual information, H the partition entropy and p,q are the algorithms sets :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.variation_of_information(louvain_communities,leiden_communities) :Reference: 1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013 """<line_sep>__check_partition_coverage(first_partition second_partition)<line_sep>__check_partition_overlap(first_partition second_partition)<line_sep>n=float(sum([len(c1)<for>c1 first_partition.communities]))<line_sep>sigma=0.0<for_stmt>c1 first_partition.communities<block_start>p=len(c1)/n<for_stmt>c2 second_partition.communities<block_start>q=len(c2)/n<line_sep>r=len(set(c1)&set(c2))/n<if_stmt>r<g>0.0<block_start>sigma<augadd>r<times>(np.log2(r/p)+np.log2(r/q))<block_end><block_end><block_end><return>MatchingResult(score=abs(sigma))<block_end><def_stmt>partition_closeness_simple first_partition:object second_partition:object<arrow>MatchingResult<block_start>"""Community size density closeness. Simple implementation that does not leverage kernel density estimator. $$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$ where: $$ N^a $$ total number of communities in A of any size; $$ x^a $$ ordered list of community sizes for A; $$ n^a $$ multiplicity of community sizes for A. (symmetrically for B) :param first_partition: NodeClustering object :param second_partition: NodeClustering object :return: MatchingResult object :Example: >>> from cdlib import evaluation, algorithms >>> g = nx.karate_club_graph() >>> louvain_communities = algorithms.louvain(g) >>> leiden_communities = algorithms.leiden(g) >>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities) :Reference: 1. Dao, Vinh-Loc, <NAME>, and <NAME>. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018. """<line_sep>coms_a=sorted(list(set([len(c)<for>c first_partition.communities])))<line_sep>freq_a=defaultdict(int)<for_stmt>a coms_a<block_start>freq_a[a]<augadd>1<block_end>freq_a=[freq_a[a]<for>a sorted(freq_a)]<line_sep>n_a=sum([coms_a[i]<times>freq_a[i]<for>i range(0 len(coms_a))])<line_sep>coms_b=sorted(list(set([len(c)<for>c second_partition.communities])))<line_sep>freq_b=defaultdict(int)<for_stmt>b coms_b<block_start>freq_b[b]<augadd>1<block_end>freq_b=[freq_b[b]<for>b sorted(freq_b)]<line_sep>n_b=sum([coms_b[i]<times>freq_b[i]<for>i range(0 len(coms_b))])<line_sep>closeness=0<for_stmt>i range(0 len(coms_a))<block_start><for_stmt>j range(0 len(coms_b))<block_start><if_stmt>coms_a[i]<eq>coms_b[j]<block_start>closeness<augadd>min((coms_a[i]<times>freq_a[i])/n_a (coms_b[j]<times>freq_b[j])/n_b)<block_end><block_end><block_end>closeness<augmul>0.5<line_sep><return>MatchingResult(score=closeness)<block_end>
<import_stmt>torch<import_from_stmt>transformers BertTokenizer<import_from_stmt>.base_tester Tester<import_from_stmt>textclf.utils.raw_data create_tokenizer<import_from_stmt>textclf.utils.create create_instance<import_from_stmt>textclf.config DLTesterConfig<import_from_stmt>textclf.data.dictionary Dictionary<class_stmt>DLTester(Tester)<block_start>"""负责Deep Learning model的测试"""<def_stmt>__init__ self config:DLTesterConfig<block_start>super().__init__(config)<line_sep>self.tokenizer=create_tokenizer(self.config.tokenizer)<line_sep>self.use_cuda=self.config.use_cuda<and>torch.cuda.is_available()<line_sep>print(f"Load checkpoint from {self.config.model_path}..")<line_sep>checkpoint=torch.load(self.config.model_path)<line_sep>self.model_conf,self.dictionary,self.label2id=checkpoint["info_for_test"]<line_sep>self.model=create_instance(self.model_conf)<line_sep>self.model.load_state_dict(checkpoint['model_state_dict'])<line_sep>self.classes=sorted(self.label2id key=self.label2id.get)<block_end><def_stmt>_preprocess self text<block_start>text_tokenized=self.tokenizer(text)<if_stmt>isinstance(self.dictionary Dictionary)<block_start>text_processed=self.dictionary.tokens_to_tensor(text_tokenized max_len=self.config.max_len)<line_sep>text_len=(text_processed<ne>self.dictionary.pad()).sum()<block_end><elif_stmt>isinstance(self.dictionary BertTokenizer)<block_start>text_processed=torch.LongTensor(self.dictionary.encode(text_tokenized add_special_tokens=<true>)[:-1])<line_sep>max_len=self.config.max_len<line_sep>pad_id=self.dictionary.pad_token_id<if_stmt>len(text_processed)<ge>max_len<block_start>text_processed=text_processed[:max_len]<block_end><else_stmt><block_start>text_processed=torch.cat([text_processed torch.ones(max_len-len(text_processed)).long()<times>pad_id])<block_end>text_len=(text_processed<ne>pad_id).sum()<block_end><if_stmt>self.use_cuda<block_start>text_processed=text_processed.cuda()<line_sep>text_len=text_len.cuda()<block_end><return>text_processed.unsqueeze(0) text_len.unsqueeze(0)<block_end><def_stmt>predict_label self text<block_start>text_processed,text_len=self._preprocess(text)<line_sep>self.model.eval()<with_stmt>torch.no_grad()<block_start>logits=self.model(text_processed text_len)<block_end>label_id=torch.argmax(logits)<line_sep><return>self.classes[label_id]<block_end><def_stmt>predict_prob self text<block_start>text_processed,text_len=self._preprocess(text)<line_sep>self.model.eval()<with_stmt>torch.no_grad()<block_start>logits=self.model(text_processed text_len)[0]<block_end><return>torch.softmax(logits dim=0).tolist()<block_end><def_stmt>get_all_labels self<block_start><return>self.classes<block_end><block_end>
<import_from_stmt>._base *<line_sep>DEBUG=<false><line_sep>WEBSITE_URL="https://example.com"# without trailing slash MEDIA_URL=f"{WEBSITE_URL}/media/"<line_sep>
<import_stmt>os<import_stmt>subprocess<line_sep>PAGES_DIR="../../pages"<line_sep>keyword_note={'tutorials':'' 'demos':'' 'quizzes':'' 'mathematics':'' 'algorithms':'' 'programming':'By the way, if you prefer books to blogs, <a href="https://wordsandbuttons.online/SYTYKC.pdf">there is a free book</a> that was originally made from this section.'}<line_sep>index_title='Hello, world!'<line_sep>index_description='This is <i>Words and Buttons Online</i> — a growing collection of&nbsp;interactive tutorials, demos, and quizzes about maths, algorithms, and programming.'<line_sep>all_span_ids=[]<def_stmt>read_index_spans path<block_start><global>all_span_ids<line_sep>index_spans=[]<for_stmt>file_name os.listdir(path)<block_start><if_stmt>os.path.isfile(path+'/'+file_name)<block_start><if_stmt>file_name.endswith('.html')<block_start>html=open(path+'/'+file_name 'r')<line_sep>text=html.read()<line_sep>html.close()<line_sep>spans=text.split('<span id="index_')<if_stmt>spans<ne>[]<block_start>spans=spans[1:]<block_end>Spans=text.split('<Span id="index_')<if_stmt>Spans<ne>[]<block_start>Spans=Spans[1:]<block_end>span_ids=['index_'+s.split('"')[0]<for>s spans]<line_sep>span_titles=[s.split('>')[1].split('<')[0].lower()<for>s spans]<line_sep>span_ids<augadd>['index_'+s.split('"')[0]<for>s Spans]<line_sep>span_titles<augadd>[s.split('>')[1].split('<')[0]<for>s Spans]<for_stmt>i range(0 len(span_ids))<block_start>index_spans<augadd>[(file_name span_ids[i] span_titles[i])]<block_end><for_stmt>span_id span_ids<block_start><if_stmt>span_id<in>all_span_ids<block_start>print('Duplicated index span id: '+span_id+" in "+file_name)<block_end>all_span_ids<augadd>[span_id]<block_end><block_end><block_end><block_end><return>index_spans<block_end>date_link_title_description_keywords=[]<line_sep>all_keywords=set()<for_stmt>filename os.listdir(PAGES_DIR)<block_start><if_stmt>filename<eq>'index.html'<block_start><continue><block_end><if_stmt>filename<eq>'faq.html'<block_start><continue><block_end><if_stmt>filename.endswith(".html")<block_start>f=open(PAGES_DIR+"/"+filename 'rt')<line_sep>content=f.read()<line_sep>f.close<if_stmt>content.find("meta name=\"keywords\"")<eq>-1<block_start><continue><block_end>date_from_git=subprocess.run(["git" "log" "--reverse" "--date=iso" "--format=%cd" "--" filename] cwd=PAGES_DIR stdout=subprocess.PIPE)<line_sep>full_date=date_from_git.stdout.decode('utf-8')<line_sep>date=full_date.split(' ')[0]<line_sep>title=content.split("<title>")[1].split("</title>")[0]<line_sep>description=content.split('<meta name="description" content="')[1].split('">')[0]<line_sep>keywords=content.split('<meta name="keywords" content="')[1].split('">')[0].split(', ')<if_stmt>keywords[0]<eq>""<block_start><continue><block_end>date_link_title_description_keywords<augadd>[(date filename title description keywords)]<line_sep>all_keywords.update(keywords)<block_end><block_end>date_link_title_description_keywords.sort()<line_sep># index f=open('index.template')<line_sep>template=f.read()<line_sep>f.close()<line_sep>index='%s'%template<line_sep>f=open('links.txt')<line_sep>links=f.readlines()<line_sep>f.close()<line_sep>links_html='<h1>More interactive learning</h1>'<for_stmt>link links<block_start><if_stmt>link.strip().find(' ')<ne>-1<block_start>url=link.split(' ')[0]<line_sep>title_chunks=link.split(' ')[1:]<line_sep>title=title_chunks[0]<for_stmt>chunk title_chunks[1:]# no hanging short words <block_start><if_stmt>len(chunk)<l>2<block_start>title<augadd>'&nbsp;'+chunk<block_end><else_stmt><block_start>title<augadd>' '+chunk<block_end><block_end>links_html<augadd>'<p style="margin-bottom: 12pt;">'+title+'<br><a href="'+url+'">'+url+'</a></p>\n'<block_end><block_end>menu='<p class="links" style="width: 555pt;">'<for_stmt>(kw _) keyword_note.items()<block_start>menu<augadd>'<nobr><a style="padding-right: 4pt;" href="all_'+kw+'.html">#'+kw+'</a></nobr> '<block_end>menu<augadd>'</p>'<line_sep># index is now real index not a timeline the_index='<h1 title="A real index on index.html! How cool is that!">Index</h1>'<line_sep>spans=read_index_spans(PAGES_DIR)<line_sep>cur_letter=''<for_stmt>(f i t) sorted(spans key=<lambda>fit:fit[2].upper())<block_start>letter=t[0].upper()<if_stmt>cur_letter<ne>letter<block_start><if_stmt>cur_letter<ne>''<block_start>the_index<augadd>'</p>\n'<block_end>the_index<augadd>'<h2>'+letter+'</h2>\n'<line_sep>the_index<augadd>'<p class="index_items">\n'<line_sep>cur_letter=letter<block_end>the_index<augadd>'<nobr><a style="padding-right: 24pt;" href="'+f+'#'+i+'">'+t+'</a></nobr>\n'<block_end>the_index<augadd>'</p>\n'<line_sep>index=index.replace('<h1>Title</h1>' '<h1>'+index_title+'</h1>')<line_sep>index=index.replace('<p>Description</p>' '<p style="width: 555pt;">'+index_description+'</p>')<line_sep>index=index.replace('<div id="menu"></div>' '\n'+menu+'\n')<line_sep>index=index.replace('<p>Note</p>' '')<line_sep>index=index.replace('<div id="timeline"></div>' '\n'+the_index+'\n')<line_sep>index=index.replace('<div id="links"></div>' '\n'+links_html+'\n')<line_sep>f=open('../../pages/'+'index.html' 'w')<line_sep>f.write(index)<line_sep>f.close<line_sep># tag's all_* pages <for_stmt>title list(all_keywords)<block_start>page='%s'%template<line_sep>timeline=''<line_sep>menu='<p class="links" style="width: 555pt;">'<for_stmt>(kw _) keyword_note.items()<block_start><if_stmt>kw<eq>title<block_start>menu<augadd>'<nobr><span style="padding-right: 4pt; color: #999;">#'+kw+'</span></nobr> '<block_end><else_stmt><block_start>menu<augadd>'<nobr><a style="padding-right: 4pt;" href="all_'+kw+'.html">#'+kw+'</a></nobr> '<block_end><block_end>menu<augadd>'</p>'<for_stmt>(d l t desc kwds) date_link_title_description_keywords[::-1]<block_start><if_stmt><not>title<in>kwds<block_start><continue><block_end>timeline<augadd>'<p class="title">'+'<a href="'+l+'">'+t+'</a></p>\n'<line_sep>timeline<augadd>'<p class="description">'+desc+'</p>\n'<line_sep>timeline<augadd>'<p class="links">'<for_stmt>kw sorted(list(kwds))<block_start><if_stmt>kw<eq>title<block_start>timeline<augadd>'<span style="padding-right: 8pt; color: #999;">#'+kw+'</span> '<block_end><else_stmt><block_start>timeline<augadd>'<a style="padding-right: 8pt;" href="all_'+kw+'.html">#'+kw+'</a> '<block_end><block_end>timeline<augadd>'</p>\n'<block_end>page=page.replace('<h1>Title</h1>' '<h1><a href="index.html">Words and Buttons</a>: '+title+'</h1>')<line_sep>page=page.replace('<p>Description</p>' '')<line_sep>page=page.replace('<div id="menu"></div>' '\n'+menu+'\n')<line_sep>page=page.replace('<p>Note</p>' '<p style="width: 555pt;">'+keyword_note[title]+'</p>')<line_sep>page=page.replace('<div id="timeline"></div>' '\n'+timeline+'\n')<line_sep>page=page.replace('<div id="links"></div>' '')<line_sep>f=open('../../pages/all_'+title+'.html' 'w')<line_sep>f.write(page)<line_sep>f.close<block_end>
<import_stmt>json<import_from_stmt>textwrap dedent<import_stmt>pytest<line_sep>@pytest.mark.usefixtures("add_host_with_interface")<class_stmt>TestAddHostBonded<block_start><def_stmt>test_no_hosts self host<block_start>result=host.run('stack add host bonded')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "host" argument is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_no_matching_hosts self host<block_start>result=host.run('stack add host bonded a:test')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "host" argument is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_multiple_hosts self host<block_start>result=host.run('stack add host bonded frontend-0-0 backend-0-0')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "host" argument must be unique {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_no_channel self host<block_start>result=host.run('stack add host bonded backend-0-0')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "channel" parameter is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_no_interfaces self host<block_start>result=host.run('stack add host bonded backend-0-0 channel=bond0')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "interfaces" parameter is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_no_ip self host<block_start>result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "ip" parameter is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_no_network self host<block_start>result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1 ip=192.168.0.1')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>dedent('''\ error - "network" parameter is required {host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string] ''')<block_end><def_stmt>test_invalid_network self host# Add a second interface to our backend <block_start>result=host.run('stack add host interface backend-0-0 interface=eth1')<assert_stmt>result.rc<eq>0<line_sep># Add the bonded interface result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1 ip=192.168.0.1 network=test')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>'error - network "test" does not exist\n'<block_end><def_stmt>test_missing_interface self host<block_start>result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1 ip=192.168.0.1 network=private')<assert_stmt>result.rc<eq>255<assert_stmt>result.stderr<eq>'error - interface "eth1" does not exist for host "backend-0-0"\n'<block_end><def_stmt>test_comma_seperated_interfaces self host# Add a second interface to our backend <block_start>result=host.run('stack add host interface backend-0-0 interface=eth1')<assert_stmt>result.rc<eq>0<line_sep># Add the bonded interface result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1 ip=192.168.0.1 network=private')<assert_stmt>result.rc<eq>0<line_sep># Check the interface is in the database now result=host.run('stack list host interface backend-0-0 output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'channel':<none> 'default':<none> 'host':'backend-0-0' 'interface':'bond0' 'ip':'192.168.0.1' 'mac':<none> 'module':'bonding' 'name':'backend-0-0' 'network':'private' 'options':<none> 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth0' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth1' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>}]<block_end><def_stmt>test_space_seperated_interfaces self host# Add a second interface to our backend <block_start>result=host.run('stack add host interface backend-0-0 interface=eth1')<assert_stmt>result.rc<eq>0<line_sep># Add the bonded interface result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces="eth0 eth1" ip=192.168.0.1 network=private')<assert_stmt>result.rc<eq>0<line_sep># Check the interface is in the database now result=host.run('stack list host interface backend-0-0 output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'channel':<none> 'default':<none> 'host':'backend-0-0' 'interface':'bond0' 'ip':'192.168.0.1' 'mac':<none> 'module':'bonding' 'name':'backend-0-0' 'network':'private' 'options':<none> 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth0' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth1' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>}]<block_end><def_stmt>test_default_with_options self host# Add a second interface to our backend <block_start>result=host.run('stack add host interface backend-0-0 interface=eth1 default=true')<assert_stmt>result.rc<eq>0<line_sep># Add the bonded interface result=host.run('stack add host bonded backend-0-0 channel=bond0 '<concat>'interfaces=eth0,eth1 ip=192.168.0.1 network=private options=test_options')<assert_stmt>result.rc<eq>0<line_sep># Check the interface is in the database now result=host.run('stack list host interface backend-0-0 output-format=json')<assert_stmt>result.rc<eq>0<assert_stmt>json.loads(result.stdout)<eq>[{'channel':<none> 'default':<true> 'host':'backend-0-0' 'interface':'bond0' 'ip':'192.168.0.1' 'mac':<none> 'module':'bonding' 'name':'backend-0-0' 'network':'private' 'options':'bonding-opts="test_options"' 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth0' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>} {'channel':'bond0' 'default':<none> 'host':'backend-0-0' 'interface':'eth1' 'ip':<none> 'mac':<none> 'module':<none> 'name':'backend-0-0' 'network':<none> 'options':<none> 'vlan':<none>}]<block_end><block_end>
# # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>socket<import_stmt>edgedb<import_from_stmt>edgedb _testbase<as>tb<class_stmt>TestConnect(tb.AsyncQueryTestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>super().setUpClass()<line_sep>cls.port=cls._get_free_port()<block_end>@classmethod<def_stmt>_get_free_port cls<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_STREAM)<try_stmt><block_start>sock.bind(('127.0.0.1' 0))<line_sep><return>sock.getsockname()[1]<block_end><except_stmt>Exception<block_start><return><none><block_end><finally_stmt><block_start>sock.close()<block_end><block_end><async_keyword><def_stmt>test_connect_async_01 self<block_start>orig_conn_args=self.get_connect_args()<line_sep>conn_args=orig_conn_args.copy()<line_sep>conn_args['port']=self.port<line_sep>conn_args['wait_until_available']=0<with_stmt>self.assertRaisesRegex(edgedb.ClientConnectionError f'(?s).*Is the server running.*port {self.port}.*')<block_start>conn_args['host']='127.0.0.1'<line_sep><await>edgedb.async_connect(**conn_args)<block_end><with_stmt>self.assertRaisesRegex(edgedb.ClientConnectionError f'(?s).*Is the server running.*port {self.port}.*')<block_start>conn_args['host']=orig_conn_args['host']<line_sep><await>edgedb.async_connect(**conn_args)<block_end><block_end><def_stmt>test_connect_sync_01 self<block_start>orig_conn_args=self.get_connect_args()<line_sep>conn_args=orig_conn_args.copy()<line_sep>conn_args['port']=self.port<line_sep>conn_args['wait_until_available']=0<with_stmt>self.assertRaisesRegex(edgedb.ClientConnectionError f'(?s).*Is the server running.*port {self.port}.*')<block_start>conn_args['host']='127.0.0.1'<line_sep>edgedb.connect(**conn_args)<block_end><with_stmt>self.assertRaisesRegex(edgedb.ClientConnectionError f'(?s).*Is the server running.*port {self.port}.*')<block_start>conn_args['host']=orig_conn_args['host']<line_sep>edgedb.connect(**conn_args)<block_end><block_end><block_end>
<import_stmt>sys<line_sep>sys.path.insert(1 "../../")<import_stmt>h2o<import_from_stmt>tests pyunit_utils<import_from_stmt>h2o.estimators.gbm H2OGradientBoostingEstimator<import_from_stmt>h2o.utils.model_utils reset_model_threshold<def_stmt>test_reset_threshold <block_start>""" Test the model threshold can be reset. Performance metric should be recalculated and also predictions should be changed based on the new threshold. """<line_sep># import data airlines=h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv"))<line_sep># convert columns to factors airlines["Year"]=airlines["Year"].asfactor()<line_sep>airlines["Month"]=airlines["Month"].asfactor()<line_sep>airlines["DayOfWeek"]=airlines["DayOfWeek"].asfactor()<line_sep>airlines["Cancelled"]=airlines["Cancelled"].asfactor()<line_sep>airlines['FlightNum']=airlines['FlightNum'].asfactor()<line_sep># set the predictor names and the response column name predictors=["Origin" "Dest" "Year" "UniqueCarrier" "DayOfWeek" "Month" "Distance" "FlightNum"]<line_sep>response="IsDepDelayed"<line_sep># split into train and validation sets train,valid=airlines.split_frame(ratios=[.8] seed=1234)<line_sep># initialize the estimator model=H2OGradientBoostingEstimator(seed=1234 ntrees=5)<line_sep># train the model model.train(x=predictors y=response training_frame=train)<line_sep>old_threshold=model._model_json['output']['default_threshold']<line_sep># predict preds=model.predict(airlines)<line_sep># reset the threshold and get the old one new_threshold=0.6917189903082518<line_sep>old_returned=reset_model_threshold(model new_threshold)<line_sep>reset_model=h2o.get_model(model.model_id)<line_sep>reset_threshold=reset_model._model_json['output']['default_threshold']<line_sep># predict with reset model preds_reset=reset_model.predict(airlines)<line_sep># compare thresholds <assert_stmt>old_threshold<eq>old_returned<assert_stmt>new_threshold<eq>reset_threshold<assert_stmt>reset_threshold<ne>old_threshold<line_sep># compare predictions preds_local=preds.as_data_frame()<line_sep>preds_reset_local=preds_reset.as_data_frame()<line_sep>print("old threshold:" old_threshold "new_threshold:" new_threshold)<for_stmt>i range(airlines.nrow)<block_start><if_stmt>old_threshold<le>preds_local.iloc[i 2]<l>new_threshold<block_start><assert_stmt>preds_local.iloc[i 0]<ne>preds_reset_local.iloc[i 0]<block_end><else_stmt><block_start><assert_stmt>preds_local.iloc[i 0]<eq>preds_reset_local.iloc[i 0]<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>pyunit_utils.standalone_test(test_reset_threshold)<block_end><else_stmt><block_start>test_reset_threshold()<block_end>
<import_stmt>pytest<import_stmt>sqlalchemy<as>sa<class_stmt>ThreeLevelDeepOneToOne(object)<block_start>@pytest.fixture<def_stmt>Catalog self Base Category<block_start><class_stmt>Catalog(Base)<block_start>__tablename__='catalog'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>category=sa.orm.relationship(Category uselist=<false> backref='catalog')<block_end><return>Catalog<block_end>@pytest.fixture<def_stmt>Category self Base SubCategory<block_start><class_stmt>Category(Base)<block_start>__tablename__='category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>catalog_id=sa.Column('_catalog_id' sa.Integer sa.ForeignKey('catalog._id'))<line_sep>sub_category=sa.orm.relationship(SubCategory uselist=<false> backref='category')<block_end><return>Category<block_end>@pytest.fixture<def_stmt>SubCategory self Base Product<block_start><class_stmt>SubCategory(Base)<block_start>__tablename__='sub_category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>category_id=sa.Column('_category_id' sa.Integer sa.ForeignKey('category._id'))<line_sep>product=sa.orm.relationship(Product uselist=<false> backref='sub_category')<block_end><return>SubCategory<block_end>@pytest.fixture<def_stmt>Product self Base<block_start><class_stmt>Product(Base)<block_start>__tablename__='product'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>price=sa.Column(sa.Integer)<line_sep>sub_category_id=sa.Column('_sub_category_id' sa.Integer sa.ForeignKey('sub_category._id'))<block_end><return>Product<block_end>@pytest.fixture<def_stmt>init_models self Catalog Category SubCategory Product<block_start><pass><block_end><block_end><class_stmt>ThreeLevelDeepOneToMany(object)<block_start>@pytest.fixture<def_stmt>Catalog self Base Category<block_start><class_stmt>Catalog(Base)<block_start>__tablename__='catalog'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>categories=sa.orm.relationship(Category backref='catalog')<block_end><return>Catalog<block_end>@pytest.fixture<def_stmt>Category self Base SubCategory<block_start><class_stmt>Category(Base)<block_start>__tablename__='category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>catalog_id=sa.Column('_catalog_id' sa.Integer sa.ForeignKey('catalog._id'))<line_sep>sub_categories=sa.orm.relationship(SubCategory backref='category')<block_end><return>Category<block_end>@pytest.fixture<def_stmt>SubCategory self Base Product<block_start><class_stmt>SubCategory(Base)<block_start>__tablename__='sub_category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>category_id=sa.Column('_category_id' sa.Integer sa.ForeignKey('category._id'))<line_sep>products=sa.orm.relationship(Product backref='sub_category')<block_end><return>SubCategory<block_end>@pytest.fixture<def_stmt>Product self Base<block_start><class_stmt>Product(Base)<block_start>__tablename__='product'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>price=sa.Column(sa.Numeric)<line_sep>sub_category_id=sa.Column('_sub_category_id' sa.Integer sa.ForeignKey('sub_category._id'))<def_stmt>__repr__ self<block_start><return>'<Product id=%r>'%self.id<block_end><block_end><return>Product<block_end>@pytest.fixture<def_stmt>init_models self Catalog Category SubCategory Product<block_start><pass><block_end><block_end><class_stmt>ThreeLevelDeepManyToMany(object)<block_start>@pytest.fixture<def_stmt>Catalog self Base Category<block_start>catalog_category=sa.Table('catalog_category' Base.metadata sa.Column('catalog_id' sa.Integer sa.ForeignKey('catalog._id')) sa.Column('category_id' sa.Integer sa.ForeignKey('category._id')))<class_stmt>Catalog(Base)<block_start>__tablename__='catalog'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>categories=sa.orm.relationship(Category backref='catalogs' secondary=catalog_category)<block_end><return>Catalog<block_end>@pytest.fixture<def_stmt>Category self Base SubCategory<block_start>category_subcategory=sa.Table('category_subcategory' Base.metadata sa.Column('category_id' sa.Integer sa.ForeignKey('category._id')) sa.Column('subcategory_id' sa.Integer sa.ForeignKey('sub_category._id')))<class_stmt>Category(Base)<block_start>__tablename__='category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>sub_categories=sa.orm.relationship(SubCategory backref='categories' secondary=category_subcategory)<block_end><return>Category<block_end>@pytest.fixture<def_stmt>SubCategory self Base Product<block_start>subcategory_product=sa.Table('subcategory_product' Base.metadata sa.Column('subcategory_id' sa.Integer sa.ForeignKey('sub_category._id')) sa.Column('product_id' sa.Integer sa.ForeignKey('product._id')))<class_stmt>SubCategory(Base)<block_start>__tablename__='sub_category'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>products=sa.orm.relationship(Product backref='sub_categories' secondary=subcategory_product)<block_end><return>SubCategory<block_end>@pytest.fixture<def_stmt>Product self Base<block_start><class_stmt>Product(Base)<block_start>__tablename__='product'<line_sep>id=sa.Column('_id' sa.Integer primary_key=<true>)<line_sep>price=sa.Column(sa.Numeric)<block_end><return>Product<block_end>@pytest.fixture<def_stmt>init_models self Catalog Category SubCategory Product<block_start><pass><block_end><block_end>
<import_from_stmt>triage.experiments ExperimentBase<class_stmt>SingleThreadedExperiment(ExperimentBase)<block_start><def_stmt>process_query_tasks self query_tasks<block_start>self.feature_generator.process_table_tasks(query_tasks)<block_end><def_stmt>process_matrix_build_tasks self matrix_build_tasks<block_start>self.matrix_builder.build_all_matrices(matrix_build_tasks)<block_end><def_stmt>process_train_test_batches self batches<block_start>self.model_train_tester.process_all_batches(batches)<block_end><def_stmt>process_subset_tasks self subset_tasks<block_start>self.subsetter.process_all_tasks(subset_tasks)<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>cctbx.array_family flex<import_from_stmt>cctbx adptbx<import_from_stmt>mmtbx bulk_solvent<import_from_stmt>cctbx.array_family flex<import_from_stmt>cctbx adptbx<import_stmt>mmtbx<import_from_stmt>libtbx group_args<import_stmt>mmtbx.arrays<import_stmt>mmtbx.bulk_solvent.scaler<import_from_stmt>libtbx.test_utils approx_equal<import_from_stmt>libtbx.math_utils ifloor iceil<import_stmt>mmtbx.f_model<import_stmt>mmtbx.bulk_solvent.bulk_solvent_and_scaling<as>bss<import_from_stmt>six.moves zip range<class_stmt>run(mmtbx.f_model.manager)<block_start>""" This is a very specialized routine to perform complex protocols of updating all scales of fmodel, including case of twininng, presence of H and lileky more. Inside it pretends to be fmodel proper (done by dictionary updates before and after - any better ideas of how to do it nicer are welcome!). """<def_stmt>__init__ self fmodel apply_back_trace remove_outliers fast params refine_hd_scattering log### Must be first thing here <block_start>self.__dict__.update(fmodel.__dict__)<line_sep># From this point on: self = fmodel ### russ=self.compute(apply_back_trace=apply_back_trace remove_outliers=remove_outliers fast=fast params=params refine_hd_scattering=refine_hd_scattering log=log)<line_sep>### Must be next to last... fmodel.__dict__.update(self.__dict__)<line_sep>### ...and this one is last self.russ=russ<block_end><def_stmt>compute self apply_back_trace remove_outliers fast params refine_hd_scattering log<block_start><assert_stmt>[self.arrays.core_twin self.twin_law].count(<none>)<in>[0 2]<line_sep>self.show(prefix="start" log=log)<line_sep>self.reset_all_scales()<line_sep>self.show(prefix="re-set all scales" log=log)<if_stmt>(remove_outliers<and><not>self.twinned())<block_start><for_stmt>iii range(5)<block_start>self.remove_outliers(use_model=<false> log=<none>)# XXX <block_end>self.show(prefix="remove outliers" log=log)<block_end>result=<none><if_stmt>(self.twinned())<block_start><for_stmt>cycle range(2)<block_start><if_stmt>(log<is><not><none>)<block_start>print("cycle %d:"%cycle file=log)<block_end>self.update_twin_fraction()<line_sep>self.show(prefix="update twin fraction" log=log)<line_sep>result=self.update_solvent_and_scale_twin(log=log refine_hd_scattering=refine_hd_scattering)<block_end><block_end><else_stmt><block_start>result=self.update_solvent_and_scale_2(fast=fast params=params apply_back_trace=apply_back_trace refine_hd_scattering=refine_hd_scattering log=log)<block_end>#XXX if(remove_outliers and not self.twinned()): #XXX self.remove_outliers(use_model = True, log = None) # XXX <if_stmt>(remove_outliers<and><not>self.twinned())<block_start><for_stmt>iii range(5)<block_start>self.remove_outliers(use_model=<true> log=<none>)# XXX <block_end>self.show(prefix="remove outliers" log=log)<block_end><return>result<block_end><def_stmt>reset_all_scales self<block_start>size=self.f_obs().data().size()<line_sep>zero_c=flex.complex_double(size 0)<line_sep>zero_d=flex.double(size 0)<line_sep>one_d=flex.double(size 1)<line_sep>f_part1_twin=self.f_calc_twin()<line_sep>f_part2_twin=self.f_calc_twin()<if_stmt>(f_part1_twin<is><not><none>)<block_start>f_part1_twin=self.f_calc_twin().array(data=zero_c)<line_sep>f_part2_twin=self.f_calc_twin().array(data=zero_c)<block_end>self.update_core(f_part1=self.f_calc().array(data=zero_c) f_part2=self.f_calc().array(data=zero_c) f_part1_twin=f_part1_twin f_part2_twin=f_part2_twin k_isotropic=one_d k_anisotropic=one_d k_mask=[zero_d]<times>len(self.k_masks()))<block_end><def_stmt>show self prefix log r=<none><block_start><if_stmt>(log<is><none>)<block_start><return><block_end><if_stmt>(r<is><none>)<block_start>r=self.r_all()<block_end>m="%24s: r(all,work,free)=%6.4f %6.4f %6.4f n_refl.: %d"%(prefix r self.r_work() self.r_free() self.f_obs().data().size())<if_stmt>(<not>self.twinned())<block_start>print(m file=log)<block_end><else_stmt><block_start>print(m+" twin_fraction=%4.2f"%self.twin_fraction file=log)<block_end><block_end><def_stmt>need_to_refine_hd_scattering_contribution self<block_start><if_stmt>(self.xray_structure<is><none>)<block_start><return><false><block_end>refine_hd_scattering=<true><line_sep>hd_selection=self.xray_structure.hd_selection()<line_sep>occ_h_all_zero=self.xray_structure.select(hd_selection).scatterers().extract_occupancies().all_eq(0.0)<line_sep># riding H <if_stmt>(self.xray_structure.guess_scattering_type_neutron()<or>hd_selection.count(<true>)<eq>0<or><not>occ_h_all_zero)<block_start>refine_hd_scattering=<false><block_end><return>refine_hd_scattering<block_end><def_stmt>update_solvent_and_scale_2 self fast params apply_back_trace refine_hd_scattering log<block_start><if_stmt>(params<is><none>)<block_start>params=bss.master_params.extract()<block_end><if_stmt>(self.xray_structure<is><not><none>)# Figure out Fcalc and Fmask based on presence of H <block_start>hd_selection=self.xray_structure.hd_selection()<line_sep>xrs_no_h=self.xray_structure.select(~hd_selection)<line_sep>xrs_h=self.xray_structure.select(hd_selection)<block_end># Create data container for scalers. If H scattering is refined then it is # assumed that self.f_calc() does not contain H contribution at all. fmodel_kbu=mmtbx.f_model.manager_kbu(f_obs=self.f_obs() f_calc=self.f_calc() f_masks=self.f_masks() ss=self.ss)<line_sep># Compute k_total and k_mask using one of the two methods (anal or min). # Note: this intentionally ignores previously existing f_part1 and f_part2. # k_sol,b_sol,b_cart,b_adj=[<none> ]<times>4<if_stmt>(fast)# analytical <block_start><assert_stmt>len(fmodel_kbu.f_masks)<eq>1<line_sep>result=mmtbx.bulk_solvent.scaler.run_simple(fmodel_kbu=fmodel_kbu r_free_flags=self.r_free_flags() bulk_solvent=params.bulk_solvent aniso_scale=params.anisotropic_scaling bin_selections=self.bin_selections)<line_sep>r_all_from_scaler=result.r_all()# must be here, before apply_back_trace <block_end><else_stmt># using minimization: exp solvent and scale model (k_sol,b_sol,b_cart) <block_start>result=bss.bulk_solvent_and_scales(fmodel_kbu=fmodel_kbu params=params)<line_sep>k_sol,b_sol,b_cart=result.k_sols() result.b_sols() result.b_cart()<line_sep>r_all_from_scaler=result.r_all()# must be here, before apply_back_trace <block_end><if_stmt>(apply_back_trace<and>len(fmodel_kbu.f_masks)<eq>1<and>self.xray_structure<is><not><none>)<block_start>o=result.apply_back_trace_of_overall_exp_scale_matrix(xray_structure=self.xray_structure)<line_sep>b_adj=o.b_adj<if_stmt>(<not>fast)<block_start>b_sol,b_cart=[o.b_sol] o.b_cart<block_end>self.update_xray_structure(xray_structure=o.xray_structure update_f_calc=<true>)<line_sep>fmodel_kbu=fmodel_kbu.update(f_calc=self.f_calc())<line_sep>self.show(prefix="overall B=%s to atoms"%str("%7.2f"%o.b_adj).strip() log=log)<block_end># Update self with new arrays so that H correction knows current R factor. # If no H to account for, then this is the final result. k_masks=result.k_masks()<line_sep>k_anisotropic=result.k_anisotropic()<line_sep>k_isotropic=result.k_isotropic()<line_sep>self.update_core(k_mask=k_masks k_anisotropic=k_anisotropic k_isotropic=k_isotropic)<line_sep>self.show(prefix="bulk-solvent and scaling" log=log)<line_sep># Consistency check <if_stmt>(<not>apply_back_trace)<block_start><assert_stmt>approx_equal(self.r_all() r_all_from_scaler)<block_end># Add contribution from H (if present and riding). This goes to f_part2. kh,bh=0 0<if_stmt>(refine_hd_scattering<and>self.need_to_refine_hd_scattering_contribution())# Obsolete previous contribution f_part2 <block_start>f_part2=fmodel_kbu.f_calc.array(data=fmodel_kbu.f_calc.data()<times>0)<line_sep>self.update_core(f_part2=f_part2)<line_sep>xrs_h=xrs_h.set_occupancies(value=1).set_b_iso(value=0)<line_sep>f_h=self.compute_f_calc(xray_structure=xrs_h)<line_sep># Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N data=fmodel_kbu.f_calc.data()<for_stmt>k_mask_,f_mask_ zip(k_masks fmodel_kbu.f_masks)<block_start>data=data+k_mask_<times>f_mask_.data()<block_end>f_calc_plus_f_bulk_no_scales=fmodel_kbu.f_calc.array(data=data)<line_sep># Consistency check <assert_stmt>approx_equal(self.f_model().data() f_calc_plus_f_bulk_no_scales.data()<times>k_isotropic<times>k_anisotropic)<assert_stmt>approx_equal(self.f_model_no_scales().data() f_calc_plus_f_bulk_no_scales.data())<line_sep># # Compute contribution from H (F_H) # # Coarse sampling b_mean=flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())<times>adptbx.u_as_b(1.)<line_sep>b_min=int(max(0 b_mean)<times>0.5)<line_sep>b_max=int(b_mean<times>1.5)<line_sep>sc=1000.<line_sep>kr=[i/sc<for>i range(ifloor(0<times>sc) iceil(1.5<times>sc)+1 int(0.1<times>sc))]<line_sep>br=[i/sc<for>i range(ifloor(b_min<times>sc) iceil(b_max<times>sc)+1 int(5.<times>sc))]<line_sep>o=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=fmodel_kbu.f_obs.data() f_calc=f_calc_plus_f_bulk_no_scales.data() f_mask=f_h.data() k_total=k_isotropic<times>k_anisotropic ss=fmodel_kbu.ss k_sol_range=flex.double(kr) b_sol_range=flex.double(br) r_ref=self.r_work())<if_stmt>(o.updated())<block_start>f_part2=f_h.array(data=o.k_mask()<times>f_h.data())<line_sep>kh,bh=o.k_sol() o.b_sol()<line_sep>self.show(prefix="add H (%4.2f, %6.2f)"%(kh bh) log=log r=o.r())<block_end># Fine sampling k_min=max(0 o.k_sol()-0.1)<line_sep>k_max=o.k_sol()+0.1<line_sep>b_min=max(0 o.b_sol()-5.)<line_sep>b_max=o.b_sol()+5.<line_sep>kr=[i/sc<for>i range(ifloor(k_min<times>sc) iceil(k_max<times>sc)+1 int(0.01<times>sc))]<line_sep>br=[i/sc<for>i range(ifloor(b_min<times>sc) iceil(b_max<times>sc)+1 int(1.<times>sc))]<line_sep>o=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=fmodel_kbu.f_obs.data() f_calc=f_calc_plus_f_bulk_no_scales.data() f_mask=f_h.data() k_total=k_isotropic<times>k_anisotropic ss=fmodel_kbu.ss k_sol_range=flex.double(kr) b_sol_range=flex.double(br) r_ref=o.r())<if_stmt>(o.updated())<block_start>f_part2=f_h.array(data=o.k_mask()<times>f_h.data())<line_sep>kh,bh=o.k_sol() o.b_sol()<line_sep>self.show(prefix="add H (%4.2f, %6.2f)"%(kh bh) log=log r=o.r())<block_end># THIS HELPS if fast=true is used, see how it works in reality # <if_stmt>(fast)<block_start>fmodel_kbu_=mmtbx.f_model.manager_kbu(f_obs=self.f_obs() f_calc=f_calc_plus_f_bulk_no_scales f_masks=[f_part2] ss=self.ss)<line_sep>result=mmtbx.bulk_solvent.scaler.run_simple(fmodel_kbu=fmodel_kbu_ r_free_flags=self.r_free_flags() bulk_solvent=params.bulk_solvent aniso_scale=params.anisotropic_scaling bin_selections=self.bin_selections)<line_sep>f_part2=f_part2.array(data=result.core.k_mask()<times>f_part2.data())<line_sep>k_isotropic=result.core.k_isotropic<times>result.core.k_isotropic_exp<line_sep>k_anisotropic=result.core.k_anisotropic<block_end># Update self with final scales self.update_core(k_mask=k_masks k_anisotropic=k_anisotropic k_isotropic=k_isotropic f_part2=f_part2)<line_sep># Make sure what came out of scaling matches what self thinks it really is # It must match at least up to 1.e-6. self.show(prefix="add H (%4.2f, %6.2f)"%(kh bh) log=log)<if_stmt>(fast)<block_start><assert_stmt>approx_equal(result.r_work() self.r_work() 1.e-4)<block_end><else_stmt><block_start><assert_stmt>approx_equal(self.r_all() o.r()) [self.r_all() o.r()]<block_end><block_end><return>group_args(k_sol=k_sol b_sol=b_sol b_cart=b_cart k_h=kh b_h=bh b_adj=b_adj)<block_end><def_stmt>update_solvent_and_scale_twin self refine_hd_scattering log<block_start><if_stmt>(<not>self.twinned())<block_start><return><block_end><assert_stmt>len(self.f_masks())<eq>1<line_sep># Re-set all scales to unit or zero self.show(prefix="update scales twin start" log=log)<line_sep>self.reset_all_scales()<line_sep>self.show(prefix="reset f_part, k_(total,mask)" log=log)<line_sep>f_calc_data=self.f_calc().data()<line_sep>f_calc_data_twin=self.f_calc_twin().data()<line_sep># Initial trial set sc=1000.<line_sep>ksr=[i/sc<for>i range(ifloor(0<times>sc) iceil(0.6<times>sc)+1 int(0.05<times>sc))]<line_sep>bsr=[i/sc<for>i range(ifloor(0<times>sc) iceil(150.<times>sc)+1 int(10.<times>sc))]<line_sep>o_kbu_sol=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=self.f_obs().data() f_calc_1=f_calc_data f_calc_2=f_calc_data_twin f_mask_1=self.arrays.core.f_masks[0].data() f_mask_2=self.arrays.core_twin.f_masks[0].data() ss=self.ss twin_fraction=self.twin_fraction k_sol_range=flex.double(ksr) b_sol_range=flex.double(bsr) miller_indices=self.f_obs().indices() #XXX ??? What about twin-related? unit_cell=self.f_obs().unit_cell() r_ref=self.r_all())<if_stmt>(o_kbu_sol.updated())<block_start>self.update(k_mask=o_kbu_sol.k_mask() k_anisotropic=o_kbu_sol.k_anisotropic())<block_end># Second (finer) trial set k_min=max(o_kbu_sol.k_sol()-0.05 0)<line_sep>k_max=min(o_kbu_sol.k_sol()+0.05 0.6)<line_sep>ksr=[i/sc<for>i range(ifloor(k_min<times>sc) iceil(k_max<times>sc)+1 int(0.01<times>sc))]<line_sep>b_min=max(o_kbu_sol.b_sol()-10 0)<line_sep>b_max=min(o_kbu_sol.b_sol()+10 150)<line_sep>bsr=[i/sc<for>i range(ifloor(b_min<times>sc) iceil(b_max<times>sc)+1 int(1.<times>sc))]<line_sep>o_kbu_sol=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=self.f_obs().data() f_calc_1=f_calc_data f_calc_2=f_calc_data_twin f_mask_1=self.arrays.core.f_masks[0].data() f_mask_2=self.arrays.core_twin.f_masks[0].data() ss=self.ss twin_fraction=self.twin_fraction k_sol_range=flex.double(ksr) b_sol_range=flex.double(bsr) miller_indices=self.f_obs().indices() #XXX ??? What about twin-related? unit_cell=self.f_obs().unit_cell() r_ref=o_kbu_sol.r())<if_stmt>(o_kbu_sol.updated())<block_start>self.update(k_mask=o_kbu_sol.k_mask() k_anisotropic=o_kbu_sol.k_anisotropic())<line_sep># Disable due to rare failures. Technically they should always match. But # since different routines are used tiny disagreements are possible. # See examples in : /net/anaconda/raid1/afonine/work/bugs/twin_refinement #assert approx_equal(self.r_all(), o_kbu_sol.r(), 1.e-5) ############## # use apply_back_trace in if below <if_stmt>(self.xray_structure<is><not><none>)<block_start>o=mmtbx.bulk_solvent.scaler.tmp(xray_structure=self.xray_structure k_anisotropic=o_kbu_sol.k_anisotropic() k_masks=[o_kbu_sol.k_mask()] ss=self.ss)<line_sep>self.update_xray_structure(xray_structure=o.xray_structure update_f_calc=<true>)<line_sep>############# self.update(k_mask=o.k_masks k_anisotropic=o.k_anisotropic)<block_end><block_end>self.show(prefix="bulk-solvent and scaling" log=log)<line_sep># # Add contribution from H (if present and riding). This goes to f_part2. # kh,bh=0 0<if_stmt>(refine_hd_scattering<and>self.need_to_refine_hd_scattering_contribution())<block_start>hd_selection=self.xray_structure.hd_selection()<line_sep>xrs_no_h=self.xray_structure.select(~hd_selection)<line_sep>xrs_h=self.xray_structure.select(hd_selection)<line_sep># Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N data=self.f_calc().data()+self.f_masks()[0].data()<times>self.k_masks()[0]<line_sep>f_calc_plus_f_bulk_no_scales=self.f_calc().array(data=data)<line_sep>data=self.f_calc_twin().data()+self.f_masks_twin()[0].data()<times>self.k_masks_twin()[0]<line_sep>f_calc_plus_f_bulk_no_scales_twin=self.f_calc_twin().array(data=data)<line_sep># Initial FH contribution xrs_h=xrs_h.set_occupancies(value=1).set_b_iso(value=0)<line_sep>f_h=self.compute_f_calc(xray_structure=xrs_h)<line_sep>f_h_twin=self.compute_f_calc(xray_structure=xrs_h miller_array=self.f_calc_twin())<line_sep># Coarse sampling b_mean=flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())<times>adptbx.u_as_b(1.)<line_sep>b_min=int(max(0 b_mean)<times>0.5)<line_sep>b_max=int(b_mean<times>1.5)<line_sep>sc=1000.<line_sep>kr=[i/sc<for>i range(ifloor(0<times>sc) iceil(1.5<times>sc)+1 int(0.1<times>sc))]<line_sep>br=[i/sc<for>i range(ifloor(b_min<times>sc) iceil(b_max<times>sc)+1 int(5.<times>sc))]<line_sep>obj=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=self.f_obs().data() f_calc_1=f_calc_plus_f_bulk_no_scales.data() f_calc_2=f_calc_plus_f_bulk_no_scales_twin.data() f_mask_1=f_h.data() f_mask_2=f_h_twin.data() ss=self.ss twin_fraction=self.twin_fraction k_sol_range=flex.double(kr) b_sol_range=flex.double(br) miller_indices=self.f_obs().indices() # XXX What about twin-related? unit_cell=self.f_obs().unit_cell() r_ref=self.r_work())<if_stmt>(obj.updated())<block_start>f_part2=f_h.array(data=obj.k_mask()<times>f_h.data())<line_sep>f_part2_twin=f_h_twin.array(data=obj.k_mask()<times>f_h_twin.data())<line_sep>kh,bh=obj.k_sol() obj.b_sol()<block_end># Fine sampling k_min=max(0 obj.k_sol()-0.1)<line_sep>k_max=obj.k_sol()+0.1<line_sep>b_min=max(0 obj.b_sol()-5.)<line_sep>b_max=obj.b_sol()+5.<line_sep>kr=[i/sc<for>i range(ifloor(k_min<times>sc) iceil(k_max<times>sc)+1 int(0.01<times>sc))]<line_sep>br=[i/sc<for>i range(ifloor(b_min<times>sc) iceil(b_max<times>sc)+1 int(5.<times>sc))]<line_sep>obj=bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(f_obs=self.f_obs().data() f_calc_1=f_calc_plus_f_bulk_no_scales.data() f_calc_2=f_calc_plus_f_bulk_no_scales_twin.data() f_mask_1=f_h.data() f_mask_2=f_h_twin.data() ss=self.ss twin_fraction=self.twin_fraction k_sol_range=flex.double(kr) b_sol_range=flex.double(br) miller_indices=self.f_obs().indices() # XXX What about twin-related? unit_cell=self.f_obs().unit_cell() r_ref=obj.r())<if_stmt>(obj.updated())<block_start>f_part2=f_h.array(data=obj.k_mask()<times>f_h.data())<line_sep>f_part2_twin=f_h_twin.array(data=obj.k_mask()<times>f_h_twin.data())<line_sep>kh,bh=obj.k_sol() obj.b_sol()<block_end>self.update_core(f_part2=f_part2 f_part2_twin=f_part2_twin k_anisotropic=obj.k_anisotropic())<line_sep>self.show(prefix="add H (%4.2f, %6.2f)"%(kh bh) log=log)<block_end>b_cart=adptbx.u_as_b(adptbx.u_star_as_u_cart(self.f_obs().unit_cell() o_kbu_sol.u_star()))<line_sep><return>group_args(k_sol=o_kbu_sol.k_sol() b_sol=o_kbu_sol.b_sol() b_cart=b_cart k_h=kh b_h=bh)<block_end><block_end>
<import_stmt>panel<as>pn<def_stmt>main <block_start>text_error=""" This is not formatted correctly by Markdown due to the indentation!"""<line_sep>text_ok=""" This is formatted correctly by Markdown! """<line_sep>app=pn.Column(pn.pane.Markdown(text_error) pn.pane.HTML("<hr>" sizing_mode="stretch_width" ) pn.pane.Markdown(text_ok) )<line_sep>app.servable()<block_end>main()<line_sep>
<import_from_stmt>django.conf.urls url patterns<import_from_stmt>.views tutorial_email tutorial_message<line_sep>urlpatterns=patterns("" # flake8: noqa url(r"^mail/(?P<pk>\d+)/(?P<pks>[0-9,]+)/$" tutorial_email name="tutorial_email") url(r"^message/(?P<pk>\d+)/$" tutorial_message name="tutorial_message") )<line_sep>
""" Pure-Python implementation of a Python 2-like str object for Python 3. """<import_from_stmt>numbers Integral<import_from_stmt>past.utils PY2 with_metaclass<if_stmt>PY2<block_start><import_from_stmt>collections Iterable<block_end><else_stmt><block_start><import_from_stmt>collections.abc Iterable<block_end>_builtin_bytes=bytes<class_stmt>BaseOldStr(type)<block_start><def_stmt>__instancecheck__ cls instance<block_start><return>isinstance(instance _builtin_bytes)<block_end><block_end><def_stmt>unescape s<block_start>r""" Interprets strings with escape sequences Example: >>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def' >>> print(s) 'abc\def' >>> s2 = unescape('abc\\ndef') >>> len(s2) 8 >>> print(s2) abc def """<line_sep><return>s.encode().decode('unicode_escape')<block_end><class_stmt>oldstr(with_metaclass(BaseOldStr _builtin_bytes))<block_start>""" A forward port of the Python 2 8-bit string object to Py3 """<line_sep># Python 2 strings have no __iter__ method: @property<def_stmt>__iter__ self<block_start><raise>AttributeError<block_end><def_stmt>__dir__ self<block_start><return>[thing<for>thing dir(_builtin_bytes)<if>thing<ne>'__iter__']<block_end># def __new__(cls, *args, **kwargs): # """ # From the Py3 bytes docstring: # bytes(iterable_of_ints) -> bytes # bytes(string, encoding[, errors]) -> bytes # bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer # bytes(int) -> bytes object of size given by the parameter initialized with null bytes # bytes() -> empty bytes object # # Construct an immutable array of bytes from: # - an iterable yielding integers in range(256) # - a text string encoded using the specified encoding # - any object implementing the buffer API. # - an integer # """ # # if len(args) == 0: # return super(newbytes, cls).__new__(cls) # # Was: elif isinstance(args[0], newbytes): # # We use type() instead of the above because we're redefining # # this to be True for all unicode string subclasses. Warning: # # This may render newstr un-subclassable. # elif type(args[0]) == newbytes: # return args[0] # elif isinstance(args[0], _builtin_bytes): # value = args[0] # elif isinstance(args[0], unicode): # if 'encoding' not in kwargs: # raise TypeError('unicode string argument without an encoding') # ### # # Was: value = args[0].encode(**kwargs) # # Python 2.6 string encode() method doesn't take kwargs: # # Use this instead: # newargs = [kwargs['encoding']] # if 'errors' in kwargs: # newargs.append(kwargs['errors']) # value = args[0].encode(*newargs) # ### # elif isinstance(args[0], Iterable): # if len(args[0]) == 0: # # What is this? # raise ValueError('unknown argument type') # elif len(args[0]) > 0 and isinstance(args[0][0], Integral): # # It's a list of integers # value = b''.join([chr(x) for x in args[0]]) # else: # raise ValueError('item cannot be interpreted as an integer') # elif isinstance(args[0], Integral): # if args[0] < 0: # raise ValueError('negative count') # value = b'\x00' * args[0] # else: # value = args[0] # return super(newbytes, cls).__new__(cls, value) <def_stmt>__repr__ self<block_start>s=super(oldstr self).__repr__()# e.g. b'abc' on Py3, b'abc' on Py3 <return>s[1:]<block_end><def_stmt>__str__ self<block_start>s=super(oldstr self).__str__()# e.g. "b'abc'" or "b'abc\\ndef' # TODO: fix this: <assert_stmt>s[:2]<eq>"b'"<and>s[-1]<eq>"'"<line_sep><return>unescape(s[2:-1])# e.g. 'abc' or 'abc\ndef' <block_end><def_stmt>__getitem__ self y<block_start><if_stmt>isinstance(y Integral)<block_start><return>super(oldstr self).__getitem__(slice(y y+1))<block_end><else_stmt><block_start><return>super(oldstr self).__getitem__(y)<block_end><block_end><def_stmt>__getslice__ self *args<block_start><return>self.__getitem__(slice(*args))<block_end><def_stmt>__contains__ self key<block_start><if_stmt>isinstance(key int)<block_start><return><false><block_end><block_end><def_stmt>__native__ self<block_start><return>bytes(self)<block_end><block_end>__all__=['oldstr']<line_sep>
"""Falcon Quick Scan API Interface Class _______ __ _______ __ __ __ | _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----. |. 1___| _| _ | | | | _ | 1___| _| _| | <| -__| |. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____| |: 1 | |: 1 | |::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy `-------' `-------' OAuth2 API - Customer SDK This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <https://unlicense.org> """<import_from_stmt>._util force_default process_service_request handle_single_argument<import_from_stmt>._payload generic_payload_list aggregate_payload<import_from_stmt>._service_class ServiceClass<import_from_stmt>._endpoint._quick_scan _quick_scan_endpoints<as>Endpoints<class_stmt>QuickScan(ServiceClass)<block_start>"""The only requirement to instantiate an instance of this class is one of the following: - a valid client_id and client_secret provided as keywords. - a credential dictionary with client_id and client_secret containing valid API credentials { "client_id": "CLIENT_ID_HERE", "client_secret": "CLIENT_SECRET_HERE" } - a previously-authenticated instance of the authentication service class (oauth2.py) - a valid token provided by the authentication service class (oauth2.py) """<line_sep>@force_default(defaults=["body"] default_types=["dict"])<def_stmt>get_scans_aggregates self:object body:dict=<none> **kwargs<arrow>dict<block_start>"""Get scans aggregations as specified via json in request body. Keyword arguments: body -- full body payload, not required when using other keywords. { "date_ranges": [ { "from": "string", "to": "string" } ], "field": "string", "filter": "string", "interval": "string", "min_doc_count": 0, "missing": "string", "name": "string", "q": "string", "ranges": [ { "From": 0, "To": 0 } ], "size": 0, "sort": "string", "sub_aggregates": [ null ], "time_zone": "string", "type": "string" } date_ranges -- List of dictionaries. field -- String. filter -- FQL syntax. String. interval -- String. min_doc_count -- Minimum number of documents required to match. Integer. missing -- String. name -- Scan name. String. q -- FQL syntax. String. ranges -- List of dictionaries. size -- Integer. sort -- FQL syntax. String. sub_aggregates -- List of strings. time_zone -- String. type -- String. This method only supports keywords for providing arguments. This method does not support body payload validation. Returns: dict object containing API response. HTTP Method: POST Swagger URL https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScansAggregates """<if_stmt><not>body<block_start>body=aggregate_payload(submitted_keywords=kwargs)<block_end><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="GetScansAggregates" body=body)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>get_scans self:object *args parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Check the status of a volume scan. Time required for analysis increases with the number of samples in a volume but usually it should take less than 1 minute. Keyword arguments: ids -- One or more remediation IDs. String or list of strings. parameters - full parameters payload, not required if ids is provided as a keyword. Arguments: When not specified, the first argument to this method is assumed to be 'ids'. All others are ignored. Returns: dict object containing API response. HTTP Method: GET Swagger URL https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScans """<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="GetScans" keywords=kwargs params=handle_single_argument(args parameters "ids"))<block_end>@force_default(defaults=["body"] default_types=["dict"])<def_stmt>scan_samples self:object *args body:dict=<none> **kwargs<arrow>dict<block_start>"""Get scans aggregations as specified via json in request body. Keyword arguments: body -- full body payload, not required when samples keyword is provided. { "samples": [ "string" ] } samples -- SHA256(s) of the samples to scan. Must have been previously submitted using SampleUploadV3 (SampleUploads class). String or list of strings. Arguments: When not specified, the first argument to this method is assumed to be 'samples'. All others are ignored. Returns: dict object containing API response. HTTP Method: POST Swagger URL https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/ScanSamples """<if_stmt><not>body<block_start>body=generic_payload_list(submitted_arguments=args submitted_keywords=kwargs payload_value="samples")<block_end><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="ScanSamples" body=body body_validator={"samples":list}<if>self.validate_payloads<else><none> body_required=["samples"]<if>self.validate_payloads<else><none>)<block_end>@force_default(defaults=["parameters"] default_types=["dict"])<def_stmt>query_submissions self:object parameters:dict=<none> **kwargs<arrow>dict<block_start>"""Find IDs for submitted scans by providing an FQL filter and paging details. Returns a set of volume IDs that match your criteria. Keyword arguments: filter -- The filter expression that should be used to limit the results. FQL syntax. limit -- The maximum number of records to return. [integer, 1-5000] offset -- The integer offset to start retrieving records from. parameters - full parameters payload, not required if using other keywords. sort -- The property to sort by. FQL syntax. This method only supports keywords for providing arguments. Returns: dict object containing API response. HTTP Method: GET Swagger URL https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/QuerySubmissionsMixin0 """<line_sep><return>process_service_request(calling_object=self endpoints=Endpoints operation_id="QuerySubmissionsMixin0" keywords=kwargs params=parameters)<block_end># These method names align to the operation IDs in the API but # do not conform to snake_case / PEP8 and are defined here for # backwards compatibility / ease of use purposes GetScansAggregates=get_scans_aggregates<line_sep>GetScans=get_scans<line_sep>ScanSamples=scan_samples<line_sep>QuerySubmissionsMixin0=query_submissions<block_end># The legacy name for this class does not conform to PascalCase / PEP8 # It is defined here for backwards compatibility purposes only. Quick_Scan=QuickScan# pylint: disable=C0103
<import_from_stmt>oauthlib.oauth2 InvalidClientError MissingTokenError<import_stmt>pytest<import_from_stmt>test configure_mendeley cassette<def_stmt>test_should_get_authenticated_session <block_start>mendeley=configure_mendeley()<line_sep>auth=mendeley.start_client_credentials_flow()<with_stmt>cassette('fixtures/auth/client_credentials/get_authenticated_session.yaml')<block_start>session=auth.authenticate()<assert_stmt>session.token['access_token']<assert_stmt>session.host<eq>'https://api.mendeley.com'<block_end><block_end><def_stmt>test_should_throw_exception_on_incorrect_credentials <block_start>mendeley=configure_mendeley()<line_sep>mendeley.client_secret<augadd>'-invalid'<line_sep>auth=mendeley.start_client_credentials_flow()<line_sep># We should never get an access token back # and the OAuth library should be unhappy about that <with_stmt>cassette('fixtures/auth/client_credentials/incorrect_credentials.yaml') pytest.raises(MissingTokenError)<block_start>auth.authenticate()<block_end><block_end>
# coding=utf-8 DEBUG=<true><line_sep>TESTING=<true><line_sep>SECRET_KEY='secret_key for test'<line_sep># mongodb MONGODB_SETTINGS={'db':'firefly_test' 'username':'' 'password':'' 'host':'127.0.0.1' 'port':27017}<line_sep># redis cache CACHE_TYPE='redis'<line_sep>CACHE_REDIS_HOST='127.0.0.1'<line_sep>CACHE_REDIS_PORT=6379<line_sep>CACHE_REDIS_DB=9<line_sep>CACHE_REDIS_PASSWORD=''<line_sep># mail sender MAIL_SERVER='smtp.googlemail.com'<line_sep>MAIL_PORT=587<line_sep>MAIL_USE_TLS=<true><line_sep>MAIL_USERNAME='MAIL_USERNAME'<line_sep>MAIL_PASSWORD='<PASSWORD>'<line_sep>MAIL_DEFAULT_SENDER='<EMAIL>'<line_sep>SECURITY_PASSWORD_SALT="abc"<line_sep>SECURITY_PASSWORD_HASH="<PASSWORD>"<line_sep># SECURITY_PASSWORD_HASH = "<PASSWORD>" SECURITY_EMAIL_SENDER="<EMAIL>"<line_sep>SECURITY_CONFIRM_SALT="570be5f24e690ce5af208244f3e539a93b6e4f05"<line_sep>SECURITY_REMEMBER_SALT="de154140385c591ea771dcb3b33f374383e6ea47"<line_sep># Set secret keys for CSRF protection CSRF_ENABLED=<false><line_sep>WTF_CSRF_ENABLED=<false><line_sep>SERVER_EMAIL='Python-China <<EMAIL>>'<line_sep># Flask-SocialBlueprint SOCIAL_BLUEPRINT={# https://developers.facebook.com/apps/ "flask_social_blueprint.providers.Facebook":{# App ID 'consumer_key':'197…' # App Secret 'consumer_secret':'c956c1…'} # https://apps.twitter.com/app/new "flask_social_blueprint.providers.Twitter":{# Your access token from API Keys tab 'consumer_key':'bkp…' # access token secret 'consumer_secret':'pHUx…'} # https://console.developers.google.com/project "flask_social_blueprint.providers.Google":{# Client ID 'consumer_key':'797….apps.googleusercontent.com' # Client secret 'consumer_secret':'bDG…'} # https://github.com/settings/applications/new "flask_social_blueprint.providers.Github":{# Client ID 'consumer_key':'6f6…' # Client Secret 'consumer_secret':'1a9…'} }<line_sep>
<import_stmt>tensorflow<as>tf<line_sep>"""Class for KDD10 percent GAN architecture. Generator and discriminator. """<line_sep>learning_rate=0.00001<line_sep>batch_size=50<line_sep>layer=1<line_sep>latent_dim=32<line_sep>dis_inter_layer_dim=128<line_sep>init_kernel=tf.contrib.layers.xavier_initializer()<def_stmt>generator z_inp is_training=<false> getter=<none> reuse=<false><block_start>""" Generator architecture in tensorflow Generates data from the latent space Args: z_inp (tensor): variable in the latent space reuse (bool): sharing variables or not Returns: (tensor): last activation layer of the generator """<with_stmt>tf.variable_scope('generator' reuse=reuse custom_getter=getter)<block_start>name_net='layer_1'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(z_inp units=64 kernel_initializer=init_kernel name='fc')<line_sep>net=tf.nn.relu(net name='relu')<block_end>name_net='layer_2'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(net units=128 kernel_initializer=init_kernel name='fc')<line_sep>net=tf.nn.relu(net name='relu')<block_end>name_net='layer_4'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(net units=121 kernel_initializer=init_kernel name='fc')<block_end><return>net<block_end><block_end><def_stmt>discriminator x_inp is_training=<false> getter=<none> reuse=<false><block_start>""" Discriminator architecture in tensorflow Discriminates between real data and generated data Args: x_inp (tensor): input data for the encoder. reuse (bool): sharing variables or not Returns: logits (tensor): last activation layer of the discriminator (shape 1) intermediate_layer (tensor): intermediate layer for feature matching """<with_stmt>tf.variable_scope('discriminator' reuse=reuse custom_getter=getter)<block_start>name_net='layer_1'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(x_inp units=256 kernel_initializer=init_kernel name='fc')<line_sep>net=leakyReLu(net)<line_sep>net=tf.layers.dropout(net rate=0.2 name='dropout' training=is_training)<block_end>name_net='layer_2'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(net units=128 kernel_initializer=init_kernel name='fc')<line_sep>net=leakyReLu(net)<line_sep>net=tf.layers.dropout(net rate=0.2 name='dropout' training=is_training)<block_end>name_net='layer_3'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(net units=dis_inter_layer_dim kernel_initializer=init_kernel name='fc')<line_sep>net=leakyReLu(net)<line_sep>net=tf.layers.dropout(net rate=0.2 name='dropout' training=is_training)<block_end>intermediate_layer=net<line_sep>name_net='layer_4'<with_stmt>tf.variable_scope(name_net)<block_start>net=tf.layers.dense(net units=1 kernel_initializer=init_kernel name='fc')<block_end>net=tf.squeeze(net)<line_sep><return>net intermediate_layer<block_end><block_end><def_stmt>leakyReLu x alpha=0.1 name=<none><block_start><if_stmt>name<block_start><with_stmt>tf.variable_scope(name)<block_start><return>_leakyReLu_impl(x alpha)<block_end><block_end><else_stmt><block_start><return>_leakyReLu_impl(x alpha)<block_end><block_end><def_stmt>_leakyReLu_impl x alpha<block_start><return>tf.nn.relu(x)-(alpha<times>tf.nn.relu(-x))<block_end>
# Copyright (c) cocotb contributors # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ Backports and compatibility shims for newer python features. These are for internal use - users should use a third party library like `six` if they want to use these shims in their own code """<import_stmt>sys<line_sep># backport of Python 3.7's contextlib.nullcontext <class_stmt>nullcontext<block_start>"""Context manager that does no additional processing. Used as a stand-in for a normal context manager, when a particular block of code is only sometimes used with a normal context manager: cm = optional_cm if condition else nullcontext() with cm: # Perform operation, using optional_cm if condition is True """<def_stmt>__init__ self enter_result=<none><block_start>self.enter_result=enter_result<block_end><def_stmt>__enter__ self<block_start><return>self.enter_result<block_end><def_stmt>__exit__ self *excinfo<block_start><pass><block_end><block_end># On python 3.7 onwards, `dict` is guaranteed to preserve insertion order. # Since `OrderedDict` is a little slower that `dict`, we prefer the latter # when possible. <if_stmt>sys.version_info[:2]<ge>(3 7)<block_start>insertion_ordered_dict=dict<block_end><else_stmt><block_start><import_stmt>collections<line_sep>insertion_ordered_dict=collections.OrderedDict<block_end>
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. <import_from_future_stmt> absolute_import<import_from_stmt>.images ImageInferenceJob<import_from_stmt>.job InferenceJob<line_sep>__all__=['InferenceJob' 'ImageInferenceJob' ]<line_sep>
<import_from_stmt>.generator generate<import_from_stmt>.init init<line_sep>
# Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Common code for unit tests of the interoperability test code."""<import_from_stmt>tests.interop methods<class_stmt>IntraopTestCase(object)<block_start>"""Unit test methods. This class must be mixed in with unittest.TestCase and a class that defines setUp and tearDown methods that manage a stub attribute. """<def_stmt>testEmptyUnary self<block_start>methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub <none>)<block_end><def_stmt>testLargeUnary self<block_start>methods.TestCase.LARGE_UNARY.test_interoperability(self.stub <none>)<block_end><def_stmt>testServerStreaming self<block_start>methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub <none>)<block_end><def_stmt>testClientStreaming self<block_start>methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub <none>)<block_end><def_stmt>testPingPong self<block_start>methods.TestCase.PING_PONG.test_interoperability(self.stub <none>)<block_end><def_stmt>testCancelAfterBegin self<block_start>methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub <none>)<block_end><def_stmt>testCancelAfterFirstResponse self<block_start>methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(self.stub <none>)<block_end><def_stmt>testTimeoutOnSleepingServer self<block_start>methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(self.stub <none>)<block_end><block_end>
<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_stmt>datetime<import_stmt>pytest<import_from_stmt>moto mock_sagemaker<import_from_stmt>moto.sts.models ACCOUNT_ID<line_sep>FAKE_ROLE_ARN="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)<line_sep>TEST_REGION_NAME="us-east-1"<class_stmt>MyProcessingJobModel(object)<block_start><def_stmt>__init__ self processing_job_name role_arn container=<none> bucket=<none> prefix=<none> app_specification=<none> network_config=<none> processing_inputs=<none> processing_output_config=<none> processing_resources=<none> stopping_condition=<none> <block_start>self.processing_job_name=processing_job_name<line_sep>self.role_arn=role_arn<line_sep>self.container=(container<or>"683313688378.dkr.ecr.us-east-1.amazonaws.com/sagemaker-scikit-learn:0.23-1-cpu-py3")<line_sep>self.bucket=bucket<or>"my-bucket"<line_sep>self.prefix=prefix<or>"sagemaker"<line_sep>self.app_specification=app_specification<or>{"ImageUri":self.container "ContainerEntrypoint":["python3" ] }<line_sep>self.network_config=network_config<or>{"EnableInterContainerTrafficEncryption":<false> "EnableNetworkIsolation":<false> }<line_sep>self.processing_inputs=processing_inputs<or>[{"InputName":"input" "AppManaged":<false> "S3Input":{"S3Uri":"s3://{}/{}/processing/".format(self.bucket self.prefix) "LocalPath":"/opt/ml/processing/input" "S3DataType":"S3Prefix" "S3InputMode":"File" "S3DataDistributionType":"FullyReplicated" "S3CompressionType":"None" } }]<line_sep>self.processing_output_config=processing_output_config<or>{"Outputs":[{"OutputName":"output" "S3Output":{"S3Uri":"s3://{}/{}/processing/".format(self.bucket self.prefix) "LocalPath":"/opt/ml/processing/output" "S3UploadMode":"EndOfJob" } "AppManaged":<false> }]}<line_sep>self.processing_resources=processing_resources<or>{"ClusterConfig":{"InstanceCount":1 "InstanceType":"ml.m5.large" "VolumeSizeInGB":10 } }<line_sep>self.stopping_condition=stopping_condition<or>{"MaxRuntimeInSeconds":3600 }<block_end><def_stmt>save self<block_start>sagemaker=boto3.client("sagemaker" region_name=TEST_REGION_NAME)<line_sep>params={"AppSpecification":self.app_specification "NetworkConfig":self.network_config "ProcessingInputs":self.processing_inputs "ProcessingJobName":self.processing_job_name "ProcessingOutputConfig":self.processing_output_config "ProcessingResources":self.processing_resources "RoleArn":self.role_arn "StoppingCondition":self.stopping_condition }<line_sep><return>sagemaker.create_processing_job(**params)<block_end><block_end>@mock_sagemaker<def_stmt>test_create_processing_job <block_start>sagemaker=boto3.client("sagemaker" region_name=TEST_REGION_NAME)<line_sep>processing_job_name="MyProcessingJob"<line_sep>role_arn="arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)<line_sep>container="382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1"<line_sep>bucket="my-bucket"<line_sep>prefix="my-prefix"<line_sep>app_specification={"ImageUri":container "ContainerEntrypoint":["python3" "app.py"] }<line_sep>processing_resources={"ClusterConfig":{"InstanceCount":2 "InstanceType":"ml.m5.xlarge" "VolumeSizeInGB":20 } }<line_sep>stopping_condition={"MaxRuntimeInSeconds":60<times>60}<line_sep>job=MyProcessingJobModel(processing_job_name role_arn container=container bucket=bucket prefix=prefix app_specification=app_specification processing_resources=processing_resources stopping_condition=stopping_condition )<line_sep>resp=job.save()<line_sep>resp["ProcessingJobArn"].should.match(r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name))<line_sep>resp=sagemaker.describe_processing_job(ProcessingJobName=processing_job_name)<line_sep>resp["ProcessingJobName"].should.equal(processing_job_name)<line_sep>resp["ProcessingJobArn"].should.match(r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name))<assert_stmt>"python3"<in>resp["AppSpecification"]["ContainerEntrypoint"]<assert_stmt>"app.py"<in>resp["AppSpecification"]["ContainerEntrypoint"]<assert_stmt>resp["RoleArn"]<eq>role_arn<assert_stmt>resp["ProcessingJobStatus"]<eq>"Completed"<assert_stmt>isinstance(resp["CreationTime"] datetime.datetime)<assert_stmt>isinstance(resp["LastModifiedTime"] datetime.datetime)<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<line_sep>name="blah"<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"<line_sep>test_processing_job=MyProcessingJobModel(processing_job_name=name role_arn=arn)<line_sep>test_processing_job.save()<line_sep>processing_jobs=client.list_processing_jobs()<assert_stmt>len(processing_jobs["ProcessingJobSummaries"]).should.equal(1)<assert_stmt>processing_jobs["ProcessingJobSummaries"][0]["ProcessingJobName"].should.equal(name)<assert_stmt>processing_jobs["ProcessingJobSummaries"][0]["ProcessingJobArn"].should.match(r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(name))<assert_stmt>processing_jobs.get("NextToken")<is><none><block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_multiple <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<line_sep>name_job_1="blah"<line_sep>arn_job_1="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"<line_sep>test_processing_job_1=MyProcessingJobModel(processing_job_name=name_job_1 role_arn=arn_job_1)<line_sep>test_processing_job_1.save()<line_sep>name_job_2="blah2"<line_sep>arn_job_2="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar2"<line_sep>test_processing_job_2=MyProcessingJobModel(processing_job_name=name_job_2 role_arn=arn_job_2)<line_sep>test_processing_job_2.save()<line_sep>processing_jobs_limit=client.list_processing_jobs(MaxResults=1)<assert_stmt>len(processing_jobs_limit["ProcessingJobSummaries"]).should.equal(1)<line_sep>processing_jobs=client.list_processing_jobs()<assert_stmt>len(processing_jobs["ProcessingJobSummaries"]).should.equal(2)<assert_stmt>processing_jobs.get("NextToken").should.be.none<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_none <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<line_sep>processing_jobs=client.list_processing_jobs()<assert_stmt>len(processing_jobs["ProcessingJobSummaries"]).should.equal(0)<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_should_validate_input <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<line_sep>junk_status_equals="blah"<with_stmt>pytest.raises(ClientError)<as>ex<block_start>client.list_processing_jobs(StatusEquals=junk_status_equals)<block_end>expected_error=f"1 validation errors detected: Value '{junk_status_equals}' at 'statusEquals' failed to satisfy constraint: Member must satisfy enum value set: ['Completed', 'Stopped', 'InProgress', 'Stopping', 'Failed']"<assert_stmt>ex.value.response["Error"]["Code"]<eq>"ValidationException"<assert_stmt>ex.value.response["Error"]["Message"]<eq>expected_error<line_sep>junk_next_token="<PASSWORD>"<with_stmt>pytest.raises(ClientError)<as>ex<block_start>client.list_processing_jobs(NextToken=junk_next_token)<block_end><assert_stmt>ex.value.response["Error"]["Code"]<eq>"ValidationException"<assert_stmt>(ex.value.response["Error"]["Message"]<eq>'Invalid pagination token because "{0}".')<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_with_name_filters <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<for_stmt>i range(5)<block_start>name="xgboost-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end><for_stmt>i range(5)<block_start>name="vgg-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end>xgboost_processing_jobs=client.list_processing_jobs(NameContains="xgboost")<assert_stmt>len(xgboost_processing_jobs["ProcessingJobSummaries"]).should.equal(5)<line_sep>processing_jobs_with_2=client.list_processing_jobs(NameContains="2")<assert_stmt>len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_paginated <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<for_stmt>i range(5)<block_start>name="xgboost-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end>xgboost_processing_job_1=client.list_processing_jobs(NameContains="xgboost" MaxResults=1)<assert_stmt>len(xgboost_processing_job_1["ProcessingJobSummaries"]).should.equal(1)<assert_stmt>xgboost_processing_job_1["ProcessingJobSummaries"][0]["ProcessingJobName"].should.equal("xgboost-0")<assert_stmt>xgboost_processing_job_1.get("NextToken").should_not.be.none<line_sep>xgboost_processing_job_next=client.list_processing_jobs(NameContains="xgboost" MaxResults=1 NextToken=xgboost_processing_job_1.get("NextToken") )<assert_stmt>len(xgboost_processing_job_next["ProcessingJobSummaries"]).should.equal(1)<assert_stmt>xgboost_processing_job_next["ProcessingJobSummaries"][0]["ProcessingJobName"].should.equal("xgboost-1")<assert_stmt>xgboost_processing_job_next.get("NextToken").should_not.be.none<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_paginated_with_target_in_middle <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<for_stmt>i range(5)<block_start>name="xgboost-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end><for_stmt>i range(5)<block_start>name="vgg-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end>vgg_processing_job_1=client.list_processing_jobs(NameContains="vgg" MaxResults=1)<assert_stmt>len(vgg_processing_job_1["ProcessingJobSummaries"]).should.equal(0)<assert_stmt>vgg_processing_job_1.get("NextToken").should_not.be.none<line_sep>vgg_processing_job_6=client.list_processing_jobs(NameContains="vgg" MaxResults=6)<assert_stmt>len(vgg_processing_job_6["ProcessingJobSummaries"]).should.equal(1)<assert_stmt>vgg_processing_job_6["ProcessingJobSummaries"][0]["ProcessingJobName"].should.equal("vgg-0")<assert_stmt>vgg_processing_job_6.get("NextToken").should_not.be.none<line_sep>vgg_processing_job_10=client.list_processing_jobs(NameContains="vgg" MaxResults=10)<assert_stmt>len(vgg_processing_job_10["ProcessingJobSummaries"]).should.equal(5)<assert_stmt>vgg_processing_job_10["ProcessingJobSummaries"][-1]["ProcessingJobName"].should.equal("vgg-4")<assert_stmt>vgg_processing_job_10.get("NextToken").should.be.none<block_end>@mock_sagemaker<def_stmt>test_list_processing_jobs_paginated_with_fragmented_targets <block_start>client=boto3.client("sagemaker" region_name="us-east-1")<for_stmt>i range(5)<block_start>name="xgboost-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end><for_stmt>i range(5)<block_start>name="vgg-{}".format(i)<line_sep>arn="arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)<line_sep>MyProcessingJobModel(processing_job_name=name role_arn=arn).save()<block_end>processing_jobs_with_2=client.list_processing_jobs(NameContains="2" MaxResults=8)<assert_stmt>len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)<assert_stmt>processing_jobs_with_2.get("NextToken").should_not.be.none<line_sep>processing_jobs_with_2_next=client.list_processing_jobs(NameContains="2" MaxResults=1 NextToken=processing_jobs_with_2.get("NextToken") )<assert_stmt>len(processing_jobs_with_2_next["ProcessingJobSummaries"]).should.equal(0)<assert_stmt>processing_jobs_with_2_next.get("NextToken").should_not.be.none<line_sep>processing_jobs_with_2_next_next=client.list_processing_jobs(NameContains="2" MaxResults=1 NextToken=processing_jobs_with_2_next.get("NextToken") )<assert_stmt>len(processing_jobs_with_2_next_next["ProcessingJobSummaries"]).should.equal(0)<assert_stmt>processing_jobs_with_2_next_next.get("NextToken").should.be.none<block_end>
<import_stmt>pytest<import_stmt>datetime<import_stmt>json<import_stmt>functools<import_from_stmt>urllib.parse urlencode parse_qs<import_from_stmt>descarteslabs.common.graft client<as>graft_client<import_from_stmt>... types<import_from_stmt>.. tile_url<def_stmt>test_url <block_start>base="foo"<line_sep>base_q=base+"?"<line_sep>url=functools.partial(tile_url.tile_url base types.Image.from_id(""))<assert_stmt>url()<eq>base<assert_stmt>url(session_id="foo")<eq>base_q+urlencode({"session_id":"foo"})<assert_stmt>url(colormap="foo")<eq>base_q+urlencode({"colormap":"foo"})<assert_stmt>url(colormap="")<eq>base_q+urlencode({"colormap":""})<assert_stmt>url(reduction="mean")<eq>base_q+urlencode({"reduction":"mean"})<assert_stmt>url(checkerboard=<true>)<eq>base_q+urlencode({"checkerboard":"true"})<assert_stmt>url(checkerboard=<false>)<eq>base_q+urlencode({"checkerboard":"false"})<assert_stmt>url(bands=["red"])<eq>base_q+urlencode({"band":"red"})<assert_stmt>url(bands=["red" "green"])<eq>base_q+urlencode({"band":["red" "green"]} doseq=<true>)<with_stmt>pytest.raises(ValueError match="Up to 3 bands may be specified, not 4")<block_start>url(bands=["a" "b" "c" "d"])<block_end># 1-band scales are normalized <assert_stmt>url(scales=[0 1])<eq>base_q+urlencode({"scales":"[[0.0, 1.0]]"})<line_sep># If all none scales, not included <assert_stmt>url(scales=[<none> <none>])<eq>base_q+urlencode({"scales":"null"})<line_sep># test everything gets added together correctly got_base,params=url(session_id="foo" colormap="bar" bands=["red" "green"]).split("?")<assert_stmt>got_base<eq>base<line_sep>query=parse_qs(params strict_parsing=<true> keep_blank_values=<true>)<assert_stmt>query<eq>{# `parse_qs` returns all values wrapped in lists "session_id":["foo"] "colormap":["bar"] "band":["red" "green"] }<block_end>@pytest.mark.parametrize("args" [{"p1":"2021-01-20" "p2":2.2 "p3":1 } {"p1":datetime.datetime(2020 1 20) "p2":types.Float(1.1)+1 "p3":1 } {"p1":types.Datetime(2021 1 20) "p2":types.Float(1.1)+1 "p3":types.Int(1) } ] )<def_stmt>test_url_arguments args<block_start>func=types.Function[dict(p1=types.Datetime p2=types.Float p3=types.Int) types.Image]("x")<line_sep>base="http://base.net"<line_sep>url=functools.partial(tile_url.tile_url base func)<with_stmt>pytest.raises(TypeError match="missing a required argument")<block_start>url()<block_end><with_stmt>pytest.raises(TypeError match="got an unexpected keyword argument 'blah'")<block_start>url(**args blah="bad")<block_end><with_stmt>graft_client.consistent_guid()<block_start>got_base,params=url(**args).split("?")<block_end><assert_stmt>got_base<eq>base<line_sep>query=parse_qs(params strict_parsing=<true> keep_blank_values=<true>)<assert_stmt>query.keys()<eq>args.keys()<with_stmt>graft_client.consistent_guid()<block_start>p1_graft=types.Datetime._promote(args["p1"]).graft<block_end><assert_stmt>query["p1"]<eq>[json.dumps(p1_graft)]<if_stmt>isinstance(args["p2"] float)<block_start><assert_stmt>query["p2"]<eq>["2.2"]<block_end><else_stmt><block_start><assert_stmt>query["p2"]<eq>[json.dumps(args["p2"].graft)]<block_end><assert_stmt>query["p3"]<eq>["1"]<block_end><def_stmt>test_no_url_for_positional_only_function <block_start><with_stmt>pytest.raises(TypeError match="cannot use Functions with positional-only arguments")<block_start>tile_url.tile_url("" types.Function[types.Int {} types.Image]("x"))<block_end><block_end><def_stmt>test_validate_scales <block_start><assert_stmt>tile_url.validate_scales([[0.0 1.0] [0.0 2.0] [-1.0 1.0]])<eq>[[0.0 1.0] [0.0 2.0] [-1.0 1.0] ]<assert_stmt>tile_url.validate_scales([[0.0 1.0]])<eq>[[0.0 1.0]]<line_sep># ints -> floats <assert_stmt>tile_url.validate_scales([[0 1]])<eq>[[0.0 1.0]]<line_sep># 1-band convenience <assert_stmt>tile_url.validate_scales([0 1])<eq>[[0.0 1.0]]<line_sep># no scalings <assert_stmt>tile_url.validate_scales(<none>)<eq>[]<assert_stmt>tile_url.validate_scales([])<eq>[]<with_stmt>pytest.raises(TypeError match="Expected a list or tuple of scales")<block_start>tile_url.validate_scales(0)<block_end><with_stmt>pytest.raises(TypeError match="Expected a list or tuple of scales")<block_start>tile_url.validate_scales("foo")<block_end><with_stmt>pytest.raises(TypeError match="Scaling 0: expected a 2-item list or tuple")<block_start>tile_url.validate_scales([1 2 3])<block_end><with_stmt>pytest.raises(TypeError match="Scaling 0: items in scaling must be numbers")<block_start>tile_url.validate_scales([1 "foo"])<block_end><with_stmt>pytest.raises(ValueError match="expected up to 3 scales, but got 4")<block_start>tile_url.validate_scales([[0.0 1.0] [0.0 1.0] [0.0 1.0] [0.0 1.0]])<block_end><with_stmt>pytest.raises(ValueError match="but length was 3")<block_start>tile_url.validate_scales([[0.0 1.0 2.0]])<block_end><with_stmt>pytest.raises(ValueError match="but length was 1")<block_start>tile_url.validate_scales([[0.0]])<block_end><with_stmt>pytest.raises(ValueError match="one number and one None in scales")<block_start>tile_url.validate_scales([[<none> 1.0]])<block_end><block_end>
#! /usr/bin/env python # -*- coding: utf-8 -* <import_stmt>collections<import_from_stmt>census_data_downloader.core.tables BaseTableConfig<import_from_stmt>census_data_downloader.core.decorators register<line_sep>@register<class_stmt>MedianAgeDownloader(BaseTableConfig)<block_start>PROCESSED_TABLE_NAME='medianage'<line_sep>UNIVERSE="total population"<line_sep>RAW_TABLE_NAME='B01002'<line_sep>RAW_FIELD_CROSSWALK=collections.OrderedDict({"001":"median" "002":"male" "003":"female"})<block_end>
<import_stmt>os<import_stmt>time<import_stmt>random<import_stmt>scipy.sparse<as>sp<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>argparse<import_from_stmt>models SpHGAT<import_from_stmt>utils process<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--dataset' help='Dataset.' default='imdb' type=str)<line_sep>parser.add_argument('--epochs' help='Epochs.' default=100000 type=int)<line_sep>parser.add_argument('--patience' help='Patience for early stopping.' default=100 type=int)<line_sep>parser.add_argument('--lr' help='Learning rate.' default=0.005 type=float)<line_sep>parser.add_argument('--l2_coef' help='Weight decay.' default=0.0005 type=float)<line_sep>parser.add_argument('--dropout' help='Dropout.' default=0.6 type=float)<line_sep>parser.add_argument('--train_rate' help='Label rate for training.' default=0.1 type=float)<line_sep>parser.add_argument('--seed' help='Random seed for data splitting.' default=<none> type=int)<line_sep>parser.add_argument('--layers' help='Number of layers.' default=2 type=int)<line_sep>parser.add_argument('--hid' help='Number of hidden units per head in each layer.' nargs='*' default=[8 8] type=int)<line_sep>parser.add_argument('--heads' help='Number of attention heads in each layer.' nargs='*' default=[8 1] type=int)<line_sep>parser.add_argument('--residue' help='Using residue.' action='store_true')<line_sep>parser.add_argument('--repeat' help='Repeat.' default=10 type=int)<line_sep>parser.add_argument('--random_feature' help='Random features' action='store_true')<line_sep>parser.add_argument('--target_node' help='index of target nodes for classification.' nargs='*' default=[0 1] type=int)<line_sep>parser.add_argument('--target_is_multilabels' help='each type of target node for classification is multi-labels or not.(0 means not else means yes)' nargs='*' default=[0 1] type=int)<line_sep>parser.add_argument('--saved_model_suffix' help='to splite checkpoint by suffix' default="" type=str)<line_sep>parser.add_argument('--no_attn_reg' help='Do not use edge direction regularization' action='store_true')<line_sep>parser.add_argument('--simple_inner' help='Use original inner product' action='store_true')<line_sep>parser.add_argument('--loop_coef' help='Coefficient for regularization.' default=1e-3 type=float)<line_sep>parser.add_argument('--inv_coef' help='Coefficient for regularization.' default=1e-3 type=float)<line_sep>config=tf.ConfigProto(allow_soft_placement=<true>)<line_sep>config.gpu_options.allow_growth=<true><line_sep>args=parser.parse_args()<line_sep>dataset=args.dataset<line_sep>checkpt_file='pre_trained/{}/{}/{}.ckpt'.format(dataset args.saved_model_suffix dataset)<line_sep>checkpt_file=checkpt_file.replace('//' '/')<line_sep>process.mkdir(os.path.split(checkpt_file)[0])<line_sep># training params batch_size=1<line_sep>train_rate=args.train_rate<line_sep>seed=args.seed<line_sep>nb_epochs=args.epochs<line_sep>patience=args.patience<line_sep>lr=args.lr# learning rate l2_coef=args.l2_coef# weight decay dropout=args.dropout<line_sep>repeat=args.repeat<line_sep>random_feature=args.random_feature<line_sep>target_node=args.target_node<line_sep>is_multilabel=[<false><if>t<eq>0<else><true><for>t args.target_is_multilabels]<line_sep>loop_coef=args.loop_coef<line_sep>inv_coef=args.inv_coef<line_sep>layers=args.layers<line_sep>hid=args.hid<if_stmt>len(hid)<eq>1<block_start>hid_units=hid<times>layers<block_end><elif_stmt>len(hid)<eq>layers<block_start>hid_units=hid<block_end>heads=args.heads<if_stmt>len(heads)<eq>1<block_start>n_heads=heads<times>layers<block_end><elif_stmt>len(heads)<eq>2<block_start>n_heads=[heads[0]]<times>(layers-1)+[heads[1]]<block_end><elif_stmt>len(heads)<eq>layers<block_start>n_heads=heads<block_end>residual=args.residue# False nonlinearity=tf.nn.elu<line_sep>model=SpHGAT<line_sep>no_attn_reg=args.no_attn_reg<line_sep>simple_inner=args.simple_inner<line_sep>random.seed(seed)# random seed for random data split only print('Dataset: '+dataset)<line_sep>print('Train rate: '+str(train_rate))<line_sep>print('----- Opt. hyperparams -----')<line_sep>print('lr: '+str(lr))<line_sep>print('l2_coef: '+str(l2_coef))<line_sep>print('----- Archi. hyperparams -----')<line_sep>print('nb. layers: '+str(len(hid_units)))<line_sep>print('nb. units per layer: '+str(hid_units))<line_sep>print('nb. attention heads: '+str(n_heads))<line_sep>print('residual: '+str(residual))<line_sep>print('nonlinearity: '+str(nonlinearity))<line_sep>print('model: '+str(model))<line_sep>print('target nodes: ' target_node)<line_sep>print('is_multilabel: ' is_multilabel)<line_sep>print('loop_coef:' loop_coef)<line_sep>print('inv_coef:' inv_coef)<line_sep>sparse=<true><line_sep>metr_num=2<line_sep>total_vl_acc=np.array([0.]<times>(len(target_node)<times>metr_num))# should be array total_ts_acc=np.array([0.]<times>(len(target_node)<times>metr_num))# should be array <def_stmt>get_loss_acc logits labels msk is_multilabel=<false><block_start><global>model<line_sep>class_num=labels.shape[-1]<line_sep>log_resh=tf.reshape(logits [-1 class_num])<line_sep>lab_resh=tf.reshape(labels [-1 class_num])<line_sep>msk_resh=tf.reshape(msk [-1])<if_stmt>is_multilabel<block_start>loss=model.masked_sigmoid_cross_entropy(log_resh lab_resh msk_resh)<line_sep>accuracy=[model.micro_f1(log_resh lab_resh msk_resh) model.macro_f1(log_resh lab_resh msk_resh)]<line_sep>acc_name=['if1' 'af1']<line_sep>acc_full_name=['micro f1' 'macro f1']<block_end><else_stmt><block_start>loss=model.masked_softmax_cross_entropy(log_resh lab_resh msk_resh)<line_sep>accuracy=[model.micro_f1_onelabel(log_resh lab_resh msk_resh) model.macro_f1_onelabel(log_resh lab_resh msk_resh)]<line_sep>acc_name=['if1' 'af1']<line_sep>acc_full_name=['micro f1' 'macro f1']<block_end><return>loss accuracy acc_name acc_full_name<block_end><def_stmt>print_eachclass_info train_loss_each train_acc_each val_loss_each val_acc_each acc_name<block_start>tl_average=np.mean(np.array(train_loss_each) axis=0)<line_sep>ta_average=np.mean(np.array(train_acc_each) axis=0)<line_sep>vl_average=np.mean(np.array(val_loss_each) axis=0)<line_sep>va_average=np.mean(np.array(val_acc_each) axis=0)<line_sep>metric_num=int(len(ta_average)/len(tl_average))<for_stmt>i range(len(tl_average))<block_start>line='\t\t target %s: loss = %.3f, '%(i tl_average[i])<for_stmt>j range(metric_num)<block_start>line<augadd>'%s = %.5f, '%(acc_name[i<times>metric_num+j] ta_average[i<times>metric_num+j])<block_end>line<augadd>'| Val: loss = %.3f, '%(vl_average[i])<for_stmt>j range(metric_num)<block_start>line<augadd>'%s = %.5f, '%(acc_name[i<times>metric_num+j] va_average[i<times>metric_num+j])<block_end>print(line)<block_end><block_end><for_stmt>repeat_i range(repeat)<block_start>print('Run #'+str(repeat_i)+':')<line_sep>adj,adj_type,edge_list,features,y_train,y_val,y_test,train_mask,val_mask,test_mask=process.load_heterogeneous_data(dataset train_rate=train_rate target_node=target_node)<line_sep>features=[process.preprocess_features(feature)[0]<for>feature features]<line_sep>nb_nodes=[feature.shape[0]<for>feature features]<line_sep>ft_size=[feature.shape[1]<for>feature features]<line_sep>nb_classes=[y.shape[1]<for>y y_train]<line_sep>features=[feature[np.newaxis]<for>feature features]<line_sep>y_train=[y[np.newaxis]<for>y y_train]<line_sep>y_val=[y[np.newaxis]<for>y y_val]<line_sep>y_test=[y[np.newaxis]<for>y y_test]<line_sep>train_mask=[m[np.newaxis]<for>m train_mask]<line_sep>val_mask=[m[np.newaxis]<for>m val_mask]<line_sep>test_mask=[m[np.newaxis]<for>m test_mask]<if_stmt>random_feature<block_start>features[0]=np.random.standard_normal(features[0].shape)<block_end><if_stmt>sparse<block_start>biases=[process.preprocess_adj_hete(a)<for>a adj]# transposed here <block_end><else_stmt><block_start>biases=[]<for_stmt>a adj<block_start>a=a.todense()<line_sep>a=a[np.newaxis]<block_end><block_end><if_stmt>no_attn_reg<block_start>edge_list=[(i )<for>i range(len(adj_type))]<block_end><if_stmt>simple_inner<block_start>edge_list=[]<block_end><with_stmt>tf.Graph().as_default()<block_start><with_stmt>tf.name_scope('input')<block_start>ftr_in=[tf.placeholder(dtype=tf.float32 shape=(batch_size nb ft))<for>nb,ft zip(nb_nodes ft_size)]<if_stmt>sparse<block_start>bias_in=[tf.sparse_placeholder(dtype=tf.float32)<for>_ biases]<block_end><else_stmt><block_start>bias_in=<none><block_end>lbl_in=[tf.placeholder(dtype=tf.int32 shape=(batch_size nb_nodes[target_node[i]] nb_classes[i]))<for>i range(len(nb_classes))]<line_sep>msk_in=[tf.placeholder(dtype=tf.int32 shape=(batch_size nb_nodes[target_node[i]]))<for>i range(len(nb_classes))]<line_sep>attn_drop=tf.placeholder(dtype=tf.float32 shape=())<line_sep>ffd_drop=tf.placeholder(dtype=tf.float32 shape=())<line_sep>is_train=tf.placeholder(dtype=tf.bool shape=())<block_end>logits=model.inference(ftr_in nb_classes nb_nodes is_train attn_drop ffd_drop target_nodes=target_node bias_mat=bias_in adj_type=adj_type edge_list=edge_list hid_units=hid_units n_heads=n_heads residual=residual activation=nonlinearity)<with_stmt>tf.name_scope('loss_acc')<block_start>loss,accuracy,acc_name,acc_full_name=[] [] [] []<line_sep>all_class_loss=0.0<for_stmt>tn range(len(target_node))<block_start>tn_logits=logits[tn]<line_sep>tn_labels=lbl_in[tn]<line_sep>tn_masks=msk_in[tn]<line_sep>tn_is_multilabel=is_multilabel[tn]<line_sep>tn_loss,tn_accuracy,tn_acc_name,tn_acc_full_name=get_loss_acc(tn_logits tn_labels tn_masks is_multilabel=tn_is_multilabel)<line_sep>loss.append(tn_loss)<line_sep>accuracy.extend(tn_accuracy)<line_sep>acc_name.extend(tn_acc_name)<line_sep>acc_full_name.extend(tn_acc_full_name)<line_sep>all_class_loss<augadd>tn_loss<block_end>loss_loop=tf.add_n(tf.get_collection('loss_loop'))<times>loop_coef<line_sep>loss_inv=tf.add_n(tf.get_collection('loss_inv'))<times>inv_coef<block_end>train_op=model.training(all_class_loss+loss_loop+loss_inv lr l2_coef)<line_sep>saver=tf.train.Saver()<line_sep>init_op=tf.group(tf.global_variables_initializer() tf.local_variables_initializer())<line_sep>vlss_mn=np.inf<line_sep>vacc_mx=0.0<line_sep>curr_step=0<with_stmt>tf.Session(config=config)<as>sess<block_start>sess.run(init_op)<line_sep>vacc_early_model=0.0<line_sep>vlss_early_model=0.0<line_sep>vacc_each_early_model=np.array([0.]<times>(len(target_node)<times>metr_num))<for_stmt>epoch range(nb_epochs)# summary information <block_start>train_loss_avg=0<line_sep>train_acc_avg=0<line_sep>val_loss_avg=0<line_sep>val_acc_avg=0<line_sep># for each class information train_loss_each=[]<line_sep>train_acc_each=[]<line_sep>val_loss_each=[]<line_sep>val_acc_each=[]<line_sep>tr_step=0<line_sep>tr_size=features[0].shape[0]<while_stmt>tr_step<times>batch_size<l>tr_size<block_start><if_stmt>sparse<block_start>fd={i:d<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d<for>i,d zip(bias_in biases)})<block_end><else_stmt><block_start>fd={i:d[tr_step<times>batch_size:(tr_step+1)<times>batch_size]<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d[tr_step<times>batch_size:(tr_step+1)<times>batch_size]<for>i,d zip(bias_in biases)})<block_end>fd.update({i:d[tr_step<times>batch_size:(tr_step+1)<times>batch_size]<for>i,d zip(lbl_in y_train)})<line_sep>fd.update({i:d[tr_step<times>batch_size:(tr_step+1)<times>batch_size]<for>i,d zip(msk_in train_mask)})<line_sep>fd.update({is_train:<true>})<line_sep>fd.update({attn_drop:dropout ffd_drop:dropout})<line_sep>_,loss_list_tr,acc_list_tr,loss_loop_tr,loss_inv_tr=sess.run([train_op loss accuracy loss_loop loss_inv] feed_dict=fd)<line_sep>train_loss_each.append(np.array(loss_list_tr))<line_sep>train_acc_each.append(np.array(acc_list_tr))<line_sep>train_loss_avg<augadd>np.sum(np.array(loss_list_tr))<line_sep>train_acc_avg<augadd>np.sum(np.array(acc_list_tr))<line_sep>tr_step<augadd>1<block_end>vl_step=0<line_sep>vl_size=features[0].shape[0]<while_stmt>vl_step<times>batch_size<l>vl_size<block_start><if_stmt>sparse<block_start>fd={i:d<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d<for>i,d zip(bias_in biases)})<block_end><else_stmt><block_start>fd={i:d[vl_step<times>batch_size:(vl_step+1)<times>batch_size]<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d[vl_step<times>batch_size:(vl_step+1)<times>batch_size]<for>i,d zip(bias_in biases)})<block_end>fd.update({i:d[vl_step<times>batch_size:(vl_step+1)<times>batch_size]<for>i,d zip(lbl_in y_val)})<line_sep>fd.update({i:d[vl_step<times>batch_size:(vl_step+1)<times>batch_size]<for>i,d zip(msk_in val_mask)})<line_sep>fd.update({is_train:<false>})<line_sep>fd.update({attn_drop:0.0 ffd_drop:0.0})<line_sep>loss_list_vl,acc_list_vl=sess.run([loss accuracy] feed_dict=fd)<line_sep>acc_list_vl=[0.<if>np.isnan(acc_vl)<else>acc_vl<for>acc_vl acc_list_vl]<line_sep>val_loss_each.append(np.array(loss_list_vl))<line_sep>val_acc_each.append(np.array(acc_list_vl))<line_sep>val_loss_avg<augadd>np.sum(np.array(loss_list_vl))<line_sep>val_acc_avg<augadd>np.sum(np.array(acc_list_vl))<line_sep>vl_step<augadd>1<block_end>print('Training %s: loss = %.5f, %s = %.5f, loss_loop = %.5f, loss_inv = %.5f | Val: loss = %.5f, %s = %.5f'%(epoch train_loss_avg/tr_step 'acc/F1' train_acc_avg/tr_step loss_loop_tr loss_inv_tr val_loss_avg/vl_step 'acc/F1' val_acc_avg/vl_step))<line_sep>print_eachclass_info(train_loss_each train_acc_each val_loss_each val_acc_each acc_name)<if_stmt>val_acc_avg/vl_step<g>vacc_mx<or>val_loss_avg/vl_step<l>vlss_mn<block_start><if_stmt>val_acc_avg/vl_step<g>vacc_mx<and>val_loss_avg/vl_step<l>vlss_mn<block_start>vacc_early_model=val_acc_avg/vl_step<line_sep>vlss_early_model=val_loss_avg/vl_step<line_sep>vacc_each_early_model=np.mean(np.array(val_acc_each) axis=0)<line_sep>saver.save(sess checkpt_file)<line_sep>print("saved model as %s"%checkpt_file)<block_end>vacc_mx=np.max((val_acc_avg/vl_step vacc_mx))<line_sep>vlss_mn=np.min((val_loss_avg/vl_step vlss_mn))<line_sep>curr_step=0<block_end><else_stmt><block_start>curr_step<augadd>1<if_stmt>curr_step<eq>patience<block_start>print('Early stop! Min loss: ' vlss_mn ', Max' 'acc/F1' ': ' vacc_mx)<line_sep>print('Early stop model validation loss: ' vlss_early_model ', ' 'acc/F1' ': ' vacc_early_model)<line_sep>total_vl_acc<augadd>vacc_each_early_model<line_sep><break><block_end><block_end><block_end><if_stmt>curr_step<l>patience<block_start>print('Min loss: ' vlss_mn ', Max' 'acc/F1' ': ' vacc_mx)<line_sep>print('model validation loss: ' vlss_early_model ', ' 'acc/F1' ': ' vacc_early_model)<line_sep>total_vl_acc<augadd>vacc_each_early_model<block_end>saver.restore(sess checkpt_file)<line_sep>ts_size=features[0].shape[0]<line_sep>ts_step=0<line_sep>test_loss_each=[]<line_sep>test_acc_each=[]<while_stmt>ts_step<times>batch_size<l>ts_size<block_start><if_stmt>sparse<block_start>fd={i:d<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d<for>i,d zip(bias_in biases)})<block_end><else_stmt><block_start>fd={i:d[ts_step<times>batch_size:(ts_step+1)<times>batch_size]<for>i,d zip(ftr_in features)}<line_sep>fd.update({i:d[ts_step<times>batch_size:(ts_step+1)<times>batch_size]<for>i,d zip(bias_in biases)})<block_end>fd.update({i:d[ts_step<times>batch_size:(ts_step+1)<times>batch_size]<for>i,d zip(lbl_in y_test)})<line_sep>fd.update({i:d[ts_step<times>batch_size:(ts_step+1)<times>batch_size]<for>i,d zip(msk_in test_mask)})<line_sep>fd.update({is_train:<false>})<line_sep>fd.update({attn_drop:0.0 ffd_drop:0.0})<line_sep>loss_list_ts,acc_list_ts=sess.run([loss accuracy] feed_dict=fd)<line_sep>test_loss_each.append(np.array(loss_list_ts))<line_sep>test_acc_each.append(np.array(acc_list_ts))<line_sep>ts_step<augadd>1<block_end>test_loss_each=np.mean(np.array(test_loss_each) axis=0)<line_sep>test_acc_each=np.mean(np.array(test_acc_each) axis=0)<line_sep>print('*'<times>10 'Test information:' '*'<times>10)<for_stmt>e range(len(target_node))<block_start>print('target %s: loss: %.3f, %s:%.5f, %s:%.5f'%(e test_loss_each[e] acc_full_name[e<times>metr_num] test_acc_each[e<times>metr_num] acc_full_name[e<times>metr_num+1] test_acc_each[e<times>metr_num+1]))<block_end>total_ts_acc<augadd>test_acc_each<line_sep>sess.close()<block_end><block_end><block_end>print('Validation:' total_vl_acc/repeat 'Test:' total_ts_acc/repeat)<line_sep>
<def_stmt>test <block_start><assert_stmt>"spacy.load"<in>__solution__ "Rufst du spacy.load auf?"<assert_stmt>nlp.meta["lang"]<eq>"de" "Lädst du das korrekte Modell?"<assert_stmt>nlp.meta["name"]<eq>"core_news_sm" "Lädst du das korrekte Modell?"<assert_stmt>"nlp(text)"<in>__solution__ "Verarbeitest du den Text korrekt?"<assert_stmt>"print(doc.text)"<in>__solution__ "Druckst du den Text des Doc?"<line_sep>__msg__.good("Gut gemacht! Jetzt wo du das Laden von Modellen geübt hast, lass uns "<concat>"mal ein paar ihrer Vorhersagen anschauen.")<block_end>
<import_stmt>pycxsimulator<import_from_stmt>pylab *<import_stmt>copy<as>cp<line_sep>nr=500.# carrying capacity of rabbits r_init=100# initial rabbit population mr=0.03# magnitude of movement of rabbits dr=1.0# death rate of rabbits when it faces foxes rr=0.1# reproduction rate of rabbits f_init=30# initial fox population mf=0.05# magnitude of movement of foxes df=0.1# death rate of foxes when there is no food rf=0.5# reproduction rate of foxes cd=0.02# radius for collision detection cdsq=cd<power>2<class_stmt>agent<block_start><pass><block_end><def_stmt>initialize <block_start><global>agents<line_sep>agents=[]<for_stmt>i range(r_init+f_init)<block_start>ag=agent()<line_sep>ag.type='r'<if>i<l>r_init<else>'f'<line_sep>ag.x=random()<line_sep>ag.y=random()<line_sep>agents.append(ag)<block_end><block_end><def_stmt>observe <block_start><global>agents<line_sep>cla()<line_sep>rabbits=[ag<for>ag agents<if>ag.type<eq>'r']<if_stmt>len(rabbits)<g>0<block_start>x=[ag.x<for>ag rabbits]<line_sep>y=[ag.y<for>ag rabbits]<line_sep>plot(x y 'b.')<block_end>foxes=[ag<for>ag agents<if>ag.type<eq>'f']<if_stmt>len(foxes)<g>0<block_start>x=[ag.x<for>ag foxes]<line_sep>y=[ag.y<for>ag foxes]<line_sep>plot(x y 'ro')<block_end>axis('image')<line_sep>axis([0 1 0 1])<block_end><def_stmt>update_one_agent <block_start><global>agents<if_stmt>agents<eq>[]<block_start><return><block_end>ag=choice(agents)<line_sep># simulating random movement m=mr<if>ag.type<eq>'r'<else>mf<line_sep>ag.x<augadd>uniform(-m m)<line_sep>ag.y<augadd>uniform(-m m)<line_sep>ag.x=1<if>ag.x<g>1<else>0<if>ag.x<l>0<else>ag.x<line_sep>ag.y=1<if>ag.y<g>1<else>0<if>ag.y<l>0<else>ag.y<line_sep># detecting collision and simulating death or birth neighbors=[nb<for>nb agents<if>nb.type<ne>ag.type<and>(ag.x-nb.x)<power>2+(ag.y-nb.y)<power>2<l>cdsq]<if_stmt>ag.type<eq>'r'<block_start><if_stmt>len(neighbors)<g>0# if there are foxes nearby <block_start><if_stmt>random()<l>dr<block_start>agents.remove(ag)<line_sep><return><block_end><block_end><if_stmt>random()<l>rr<times>(1-sum([1<for>x agents<if>x.type<eq>'r'])/nr)<block_start>agents.append(cp.copy(ag))<block_end><block_end><else_stmt><block_start><if_stmt>len(neighbors)<eq>0# if there are no rabbits nearby <block_start><if_stmt>random()<l>df<block_start>agents.remove(ag)<line_sep><return><block_end><block_end><else_stmt># if there are rabbits nearby <block_start><if_stmt>random()<l>rf<block_start>agents.append(cp.copy(ag))<block_end><block_end><block_end><block_end><def_stmt>update <block_start><global>agents<line_sep>t=0.<while_stmt>t<l>1.<and>len(agents)<g>0<block_start>t<augadd>1./len(agents)<line_sep>update_one_agent()<block_end><block_end>pycxsimulator.GUI().start(func=[initialize observe update])<line_sep>
# Copyright 2021 The ML Collections Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Tests for ml_collections.FrozenConfigDict."""<import_from_stmt>collections abc<as>collections_abc<import_stmt>copy<import_stmt>pickle<import_from_stmt>absl.testing absltest<import_stmt>ml_collections<line_sep>_TEST_DICT={'int':2 'list':[1 2] 'nested_list':[[1 [2]]] 'set':{1 2} 'tuple':(1 2) 'frozenset':frozenset({1 2}) 'dict':{'float':-1.23 'list':[1 2] 'dict':{} 'tuple_containing_list':(1 2 (3 [4 5] (6 7))) 'list_containing_tuple':[1 2 [3 4] (5 6)] } 'ref':ml_collections.FieldReference({'int':0})}<def_stmt>_test_dict_deepcopy <block_start><return>copy.deepcopy(_TEST_DICT)<block_end><def_stmt>_test_configdict <block_start><return>ml_collections.ConfigDict(_TEST_DICT)<block_end><def_stmt>_test_frozenconfigdict <block_start><return>ml_collections.FrozenConfigDict(_TEST_DICT)<block_end><class_stmt>FrozenConfigDictTest(absltest.TestCase)<block_start>"""Tests FrozenConfigDict in config flags library."""<def_stmt>assertFrozenRaisesValueError self input_list<block_start>"""Assert initialization on all elements of input_list raise ValueError."""<for_stmt>initial_dictionary input_list<block_start><with_stmt>self.assertRaises(ValueError)<block_start>_=ml_collections.FrozenConfigDict(initial_dictionary)<block_end><block_end><block_end><def_stmt>testBasicEquality self<block_start>"""Tests basic equality with different types of initialization."""<line_sep>fcd=_test_frozenconfigdict()<line_sep>fcd_cd=ml_collections.FrozenConfigDict(_test_configdict())<line_sep>fcd_fcd=ml_collections.FrozenConfigDict(fcd)<line_sep>self.assertEqual(fcd fcd_cd)<line_sep>self.assertEqual(fcd fcd_fcd)<block_end><def_stmt>testImmutability self<block_start>"""Tests immutability of frozen config."""<line_sep>fcd=_test_frozenconfigdict()<line_sep>self.assertEqual(fcd.list tuple(_TEST_DICT['list']))<line_sep>self.assertEqual(fcd.tuple _TEST_DICT['tuple'])<line_sep>self.assertEqual(fcd.set frozenset(_TEST_DICT['set']))<line_sep>self.assertEqual(fcd.frozenset _TEST_DICT['frozenset'])<line_sep># Must manually check set to frozenset conversion, since Python == does not self.assertIsInstance(fcd.set frozenset)<line_sep>self.assertEqual(fcd.dict.list tuple(_TEST_DICT['dict']['list']))<line_sep>self.assertNotEqual(fcd.dict.tuple_containing_list _TEST_DICT['dict']['tuple_containing_list'])<line_sep>self.assertEqual(fcd.dict.tuple_containing_list[2][1] tuple(_TEST_DICT['dict']['tuple_containing_list'][2][1]))<line_sep>self.assertIsInstance(fcd.dict ml_collections.FrozenConfigDict)<with_stmt>self.assertRaises(AttributeError)<block_start>fcd.newitem=0<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>fcd.dict.int=0<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>fcd['newitem']=0<block_end><with_stmt>self.assertRaises(AttributeError)<block_start><del_stmt>fcd.int<block_end><with_stmt>self.assertRaises(AttributeError)<block_start><del_stmt>fcd['int']<block_end><block_end><def_stmt>testLockAndFreeze self<block_start>"""Ensures .lock() and .freeze() raise errors."""<line_sep>fcd=_test_frozenconfigdict()<line_sep>self.assertFalse(fcd.is_locked)<line_sep>self.assertFalse(fcd.as_configdict().is_locked)<with_stmt>self.assertRaises(AttributeError)<block_start>fcd.lock()<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>fcd.unlock()<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>fcd.freeze()<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>fcd.unfreeze()<block_end><block_end><def_stmt>testInitConfigDict self<block_start>"""Tests that ConfigDict initialization handles FrozenConfigDict. Initializing a ConfigDict on a dictionary with FrozenConfigDict values should unfreeze these values. """<line_sep>dict_without_fcd_node=_test_dict_deepcopy()<line_sep>dict_without_fcd_node.pop('ref')<line_sep>dict_with_fcd_node=copy.deepcopy(dict_without_fcd_node)<line_sep>dict_with_fcd_node['dict']=ml_collections.FrozenConfigDict(dict_with_fcd_node['dict'])<line_sep>cd_without_fcd_node=ml_collections.ConfigDict(dict_without_fcd_node)<line_sep>cd_with_fcd_node=ml_collections.ConfigDict(dict_with_fcd_node)<line_sep>fcd_without_fcd_node=ml_collections.FrozenConfigDict(dict_without_fcd_node)<line_sep>fcd_with_fcd_node=ml_collections.FrozenConfigDict(dict_with_fcd_node)<line_sep>self.assertEqual(cd_without_fcd_node cd_with_fcd_node)<line_sep>self.assertEqual(fcd_without_fcd_node fcd_with_fcd_node)<block_end><def_stmt>testInitCopying self<block_start>"""Tests that initialization copies when and only when necessary. Ensures copying only occurs when converting mutable type to immutable type, regardless of whether the FrozenConfigDict is initialized by a dict or a FrozenConfigDict. Also ensures no copying occurs when converting from FrozenConfigDict back to ConfigDict. """<line_sep>fcd=_test_frozenconfigdict()<line_sep># These should be uncopied when creating fcd fcd_unchanged_from_test_dict=[(_TEST_DICT['tuple'] fcd.tuple) (_TEST_DICT['frozenset'] fcd.frozenset) (_TEST_DICT['dict']['tuple_containing_list'][2][2] fcd.dict.tuple_containing_list[2][2]) (_TEST_DICT['dict']['list_containing_tuple'][3] fcd.dict.list_containing_tuple[3])]<line_sep># These should be copied when creating fcd fcd_different_from_test_dict=[(_TEST_DICT['list'] fcd.list) (_TEST_DICT['dict']['tuple_containing_list'][2][1] fcd.dict.tuple_containing_list[2][1])]<for_stmt>(x y) fcd_unchanged_from_test_dict<block_start>self.assertEqual(id(x) id(y))<block_end><for_stmt>(x y) fcd_different_from_test_dict<block_start>self.assertNotEqual(id(x) id(y))<block_end># Also make sure that converting back to ConfigDict makes no copies self.assertEqual(id(_TEST_DICT['dict']['tuple_containing_list']) id(ml_collections.ConfigDict(fcd).dict.tuple_containing_list))<block_end><def_stmt>testAsConfigDict self<block_start>"""Tests that converting FrozenConfigDict to ConfigDict works correctly. In particular, ensures that FrozenConfigDict does the inverse of ConfigDict regarding type_safe, lock, and attribute mutability. """<line_sep># First ensure conversion to ConfigDict works on empty FrozenConfigDict self.assertEqual(ml_collections.ConfigDict(ml_collections.FrozenConfigDict()) ml_collections.ConfigDict())<line_sep>cd=_test_configdict()<line_sep>cd_fcd_cd=ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd))<line_sep>self.assertEqual(cd cd_fcd_cd)<line_sep># Make sure locking is respected cd.lock()<line_sep>self.assertEqual(cd ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))<line_sep># Make sure type_safe is respected cd=ml_collections.ConfigDict(_TEST_DICT type_safe=<false>)<line_sep>self.assertEqual(cd ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))<block_end><def_stmt>testInitSelfReferencing self<block_start>"""Ensure initialization fails on self-referencing dicts."""<line_sep>self_ref={}<line_sep>self_ref['self']=self_ref<line_sep>parent_ref={'dict':{}}<line_sep>parent_ref['dict']['parent']=parent_ref<line_sep>tuple_parent_ref={'dict':{}}<line_sep>tuple_parent_ref['dict']['tuple']=(1 2 tuple_parent_ref)<line_sep>attribute_cycle={'dict':copy.deepcopy(self_ref)}<line_sep>self.assertFrozenRaisesValueError([self_ref parent_ref tuple_parent_ref attribute_cycle])<block_end><def_stmt>testInitCycles self<block_start>"""Ensure initialization fails if an attribute of input is cyclic."""<line_sep>inner_cyclic_list=[1 2]<line_sep>cyclic_list=[3 inner_cyclic_list]<line_sep>inner_cyclic_list.append(cyclic_list)<line_sep>cyclic_tuple=tuple(cyclic_list)<line_sep>test_dict_cyclic_list=_test_dict_deepcopy()<line_sep>test_dict_cyclic_tuple=_test_dict_deepcopy()<line_sep>test_dict_cyclic_list['cyclic_list']=cyclic_list<line_sep>test_dict_cyclic_tuple['dict']['cyclic_tuple']=cyclic_tuple<line_sep>self.assertFrozenRaisesValueError([test_dict_cyclic_list test_dict_cyclic_tuple])<block_end><def_stmt>testInitDictInList self<block_start>"""Ensure initialization fails on dict and ConfigDict in lists/tuples."""<line_sep>list_containing_dict={'list':[1 2 3 {'a':4 'b':5}]}<line_sep>tuple_containing_dict={'tuple':(1 2 3 {'a':4 'b':5})}<line_sep>list_containing_cd={'list':[1 2 3 _test_configdict()]}<line_sep>tuple_containing_cd={'tuple':(1 2 3 _test_configdict())}<line_sep>fr_containing_list_containing_dict={'fr':ml_collections.FieldReference([1 {'a':2}])}<line_sep>self.assertFrozenRaisesValueError([list_containing_dict tuple_containing_dict list_containing_cd tuple_containing_cd fr_containing_list_containing_dict])<block_end><def_stmt>testInitFieldReferenceInList self<block_start>"""Ensure initialization fails on FieldReferences in lists/tuples."""<line_sep>list_containing_fr={'list':[1 2 3 ml_collections.FieldReference(4)]}<line_sep>tuple_containing_fr={'tuple':(1 2 3 ml_collections.FieldReference('a'))}<line_sep>self.assertFrozenRaisesValueError([list_containing_fr tuple_containing_fr])<block_end><def_stmt>testInitInvalidAttributeName self<block_start>"""Ensure initialization fails on attributes with invalid names."""<line_sep>dot_name={'dot.name':<none>}<line_sep>immutable_name={'__hash__':<none>}<with_stmt>self.assertRaises(ValueError)<block_start>ml_collections.FrozenConfigDict(dot_name)<block_end><with_stmt>self.assertRaises(AttributeError)<block_start>ml_collections.FrozenConfigDict(immutable_name)<block_end><block_end><def_stmt>testFieldReferenceResolved self<block_start>"""Tests that FieldReferences are resolved."""<line_sep>cfg=ml_collections.ConfigDict({'fr':ml_collections.FieldReference(1)})<line_sep>frozen_cfg=ml_collections.FrozenConfigDict(cfg)<line_sep>self.assertNotIsInstance(frozen_cfg._fields['fr'] ml_collections.FieldReference)<line_sep>hash(frozen_cfg)<block_end># with FieldReference resolved, frozen_cfg is hashable <def_stmt>testFieldReferenceCycle self<block_start>"""Tests that FieldReferences may not contain reference cycles."""<line_sep>frozenset_fr={'frozenset':frozenset({1 2})}<line_sep>frozenset_fr['fr']=ml_collections.FieldReference(frozenset_fr['frozenset'])<line_sep>list_fr={'list':[1 2]}<line_sep>list_fr['fr']=ml_collections.FieldReference(list_fr['list'])<line_sep>cyclic_fr={'a':1}<line_sep>cyclic_fr['fr']=ml_collections.FieldReference(cyclic_fr)<line_sep>cyclic_fr_parent={'dict':{}}<line_sep>cyclic_fr_parent['dict']['fr']=ml_collections.FieldReference(cyclic_fr_parent)<line_sep># FieldReference is allowed to point to non-cyclic objects: _=ml_collections.FrozenConfigDict(frozenset_fr)<line_sep>_=ml_collections.FrozenConfigDict(list_fr)<line_sep># But not cycles: self.assertFrozenRaisesValueError([cyclic_fr cyclic_fr_parent])<block_end><def_stmt>testDeepCopy self<block_start>"""Ensure deepcopy works and does not affect equality."""<line_sep>fcd=_test_frozenconfigdict()<line_sep>fcd_deepcopy=copy.deepcopy(fcd)<line_sep>self.assertEqual(fcd fcd_deepcopy)<block_end><def_stmt>testEquals self<block_start>"""Tests that __eq__() respects hidden mutability."""<line_sep>fcd=_test_frozenconfigdict()<line_sep># First, ensure __eq__() returns False when comparing to other types self.assertNotEqual(fcd (1 2))<line_sep>self.assertNotEqual(fcd fcd.as_configdict())<line_sep>list_to_tuple=_test_dict_deepcopy()<line_sep>list_to_tuple['list']=tuple(list_to_tuple['list'])<line_sep>fcd_list_to_tuple=ml_collections.FrozenConfigDict(list_to_tuple)<line_sep>set_to_frozenset=_test_dict_deepcopy()<line_sep>set_to_frozenset['set']=frozenset(set_to_frozenset['set'])<line_sep>fcd_set_to_frozenset=ml_collections.FrozenConfigDict(set_to_frozenset)<line_sep>self.assertNotEqual(fcd fcd_list_to_tuple)<line_sep># Because set == frozenset in Python: self.assertEqual(fcd fcd_set_to_frozenset)<line_sep># Items are not affected by hidden mutability self.assertCountEqual(fcd.items() fcd_list_to_tuple.items())<line_sep>self.assertCountEqual(fcd.items() fcd_set_to_frozenset.items())<block_end><def_stmt>testEqualsAsConfigDict self<block_start>"""Tests that eq_as_configdict respects hidden mutability but not type."""<line_sep>fcd=_test_frozenconfigdict()<line_sep># First, ensure eq_as_configdict() returns True with an equal ConfigDict but # False for other types. self.assertFalse(fcd.eq_as_configdict([1 2]))<line_sep>self.assertTrue(fcd.eq_as_configdict(fcd.as_configdict()))<line_sep>empty_fcd=ml_collections.FrozenConfigDict()<line_sep>self.assertTrue(empty_fcd.eq_as_configdict(ml_collections.ConfigDict()))<line_sep># Now, ensure it has the same immutability detection as __eq__(). list_to_tuple=_test_dict_deepcopy()<line_sep>list_to_tuple['list']=tuple(list_to_tuple['list'])<line_sep>fcd_list_to_tuple=ml_collections.FrozenConfigDict(list_to_tuple)<line_sep>set_to_frozenset=_test_dict_deepcopy()<line_sep>set_to_frozenset['set']=frozenset(set_to_frozenset['set'])<line_sep>fcd_set_to_frozenset=ml_collections.FrozenConfigDict(set_to_frozenset)<line_sep>self.assertFalse(fcd.eq_as_configdict(fcd_list_to_tuple))<line_sep># Because set == frozenset in Python: self.assertTrue(fcd.eq_as_configdict(fcd_set_to_frozenset))<block_end><def_stmt>testHash self<block_start>"""Ensures __hash__() respects hidden mutability."""<line_sep>list_to_tuple=_test_dict_deepcopy()<line_sep>list_to_tuple['list']=tuple(list_to_tuple['list'])<line_sep>self.assertEqual(hash(_test_frozenconfigdict()) hash(ml_collections.FrozenConfigDict(_test_dict_deepcopy())))<line_sep>self.assertNotEqual(hash(_test_frozenconfigdict()) hash(ml_collections.FrozenConfigDict(list_to_tuple)))<line_sep># Ensure Python realizes FrozenConfigDict is hashable self.assertIsInstance(_test_frozenconfigdict() collections_abc.Hashable)<block_end><def_stmt>testUnhashableType self<block_start>"""Ensures __hash__() fails if FrozenConfigDict has unhashable value."""<line_sep>unhashable_fcd=ml_collections.FrozenConfigDict({'unhashable':bytearray()})<with_stmt>self.assertRaises(TypeError)<block_start>hash(unhashable_fcd)<block_end><block_end><def_stmt>testToDict self<block_start>"""Ensure to_dict() does not care about hidden mutability."""<line_sep>list_to_tuple=_test_dict_deepcopy()<line_sep>list_to_tuple['list']=tuple(list_to_tuple['list'])<line_sep>self.assertEqual(_test_frozenconfigdict().to_dict() ml_collections.FrozenConfigDict(list_to_tuple).to_dict())<block_end><def_stmt>testPickle self<block_start>"""Make sure FrozenConfigDict can be dumped and loaded with pickle."""<line_sep>fcd=_test_frozenconfigdict()<line_sep>locked_fcd=ml_collections.FrozenConfigDict(_test_configdict().lock())<line_sep>unpickled_fcd=pickle.loads(pickle.dumps(fcd))<line_sep>unpickled_locked_fcd=pickle.loads(pickle.dumps(locked_fcd))<line_sep>self.assertEqual(fcd unpickled_fcd)<line_sep>self.assertEqual(locked_fcd unpickled_locked_fcd)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end>
# -*- coding: utf-8 -*- <import_stmt>datetime<import_from_stmt>pagseguro.utils is_valid_cpf is_valid_cnpj is_valid_email parse_date <import_from_stmt>pagseguro.exceptions PagSeguroValidationError<import_stmt>pytest<import_from_stmt>dateutil.tz tzutc<def_stmt>test_is_valid_email <block_start>valid='<EMAIL>'<line_sep>valid2=u'<EMAIL>'<line_sep>not_valid='@asd.com'<line_sep>not_valid2='bad'<line_sep>not_valid3=u'user@росси́я'<with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_email(not_valid)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_email(not_valid2)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_email(not_valid3)<block_end><assert_stmt>is_valid_email(valid)<eq>'<EMAIL>'<assert_stmt>is_valid_email(valid2)<eq>u'<EMAIL>'<block_end><def_stmt>test_parse_date # DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S' <block_start>date_str='2016-10-10T10:10:10'<assert_stmt>parse_date(date_str)<eq>datetime.datetime(2016 10 10 10 10 10 tzinfo=tzutc())<block_end><def_stmt>test_is_valid_cpf <block_start>valid='041.684.826-50'<line_sep>valid2='04168482650'<line_sep>bad='bla///'<line_sep>max_digits='1111111111111111111111111'<line_sep>invalid_cpf='040.684.826-50'<with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cpf(bad)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cpf(max_digits)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cpf(invalid_cpf)<block_end><assert_stmt>is_valid_cpf(valid)<eq>valid<assert_stmt>is_valid_cpf(valid2)<eq>'04168482650'<block_end><def_stmt>test_is_valid_cnpj <block_start>valid='31331052000174'<line_sep>valid2='72.168.117/0001-90'<line_sep>invalid='///'<line_sep>digits='1111111'<line_sep>wrong_number='31331052000175'<with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cnpj(invalid)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cnpj(digits)<block_end><with_stmt>pytest.raises(PagSeguroValidationError)<block_start>is_valid_cnpj(wrong_number)<block_end><assert_stmt>is_valid_cnpj(valid)<eq>'31331052000174'<assert_stmt>is_valid_cnpj(valid2)<eq>'72168117000190'<block_end>
<def_stmt>test <block_start>obj={'xxx1':1 'xxx2':2 'xxx3':4 'xxx4':4 'foo':123}<line_sep>i=0<while_stmt>i<l>1e7<block_start>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>obj['foo']=234<line_sep>i<augadd>1<block_end><block_end>test()<line_sep>
<import_from_stmt>itertools chain<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>pandas.api.types union_categoricals<import_from_stmt>..progress Progress<import_from_stmt>..result QueryResult<class_stmt>NumpyQueryResult(QueryResult)<block_start>""" Stores query result from multiple blocks as numpy arrays. """<def_stmt>store self packet<block_start>block=getattr(packet 'block' <none>)<if_stmt>block<is><none><block_start><return><block_end># Header block contains no rows. Pick columns from it. <if_stmt>block.num_rows<block_start><if_stmt>self.columnar<block_start>self.data.append(block.get_columns())<block_end><else_stmt><block_start>self.data.extend(block.get_rows())<block_end><block_end><elif_stmt><not>self.columns_with_types<block_start>self.columns_with_types=block.columns_with_types<block_end><block_end><def_stmt>get_result self<block_start>""" :return: stored query result. """<for_stmt>packet self.packet_generator<block_start>self.store(packet)<block_end><if_stmt>self.columnar<block_start>data=[]<line_sep># Transpose to a list of columns, each column is list of chunks <for_stmt>column_chunks zip(*self.data)# Concatenate chunks for each column <block_start><if_stmt>isinstance(column_chunks[0] np.ndarray)<block_start>column=np.concatenate(column_chunks)<block_end><elif_stmt>isinstance(column_chunks[0] pd.Categorical)<block_start>column=union_categoricals(column_chunks)<block_end><else_stmt><block_start>column=tuple(chain.from_iterable(column_chunks))<block_end>data.append(column)<block_end><block_end><else_stmt><block_start>data=self.data<block_end><if_stmt>self.with_column_types<block_start><return>data self.columns_with_types<block_end><else_stmt><block_start><return>data<block_end><block_end><block_end><class_stmt>NumpyProgressQueryResult(NumpyQueryResult)<block_start>""" Stores query result and progress information from multiple blocks. Provides iteration over query progress. """<def_stmt>__init__ self *args **kwargs<block_start>self.progress_totals=Progress()<line_sep>super(NumpyProgressQueryResult self).__init__(*args **kwargs)<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start><while_stmt><true><block_start>packet=next(self.packet_generator)<line_sep>progress_packet=getattr(packet 'progress' <none>)<if_stmt>progress_packet<block_start>self.progress_totals.increment(progress_packet)<line_sep><return>(self.progress_totals.rows self.progress_totals.total_rows)<block_end><else_stmt><block_start>self.store(packet)<block_end><block_end><block_end><def_stmt>get_result self# Read all progress packets. <block_start><for_stmt>_ self<block_start><pass><block_end><return>super(NumpyProgressQueryResult self).get_result()<block_end><block_end><class_stmt>NumpyIterQueryResult(object)<block_start>""" Provides iteration over returned data by chunks (streaming by chunks). """<def_stmt>__init__ self packet_generator with_column_types=<false><block_start>self.packet_generator=packet_generator<line_sep>self.with_column_types=with_column_types<line_sep>self.first_block=<true><line_sep>super(NumpyIterQueryResult self).__init__()<block_end><def_stmt>__iter__ self<block_start><return>self<block_end><def_stmt>__next__ self<block_start>packet=next(self.packet_generator)<line_sep>block=getattr(packet 'block' <none>)<if_stmt>block<is><none><block_start><return>[]<block_end><if_stmt>self.first_block<and>self.with_column_types<block_start>self.first_block=<false><line_sep>rv=[block.columns_with_types]<line_sep>rv.extend(block.get_rows())<line_sep><return>rv<block_end><else_stmt><block_start><return>block.get_rows()<block_end><block_end><block_end>
""" LibriParty Dataset creation by using official metadata. Author ------ <NAME>, 2020 <NAME>, 2020 """<import_stmt>os<import_stmt>sys<import_stmt>speechbrain<as>sb<import_from_stmt>hyperpyyaml load_hyperpyyaml<import_from_stmt>speechbrain.utils.data_utils download_file<import_from_stmt>local.create_mixtures_from_metadata create_mixture<import_stmt>json<import_from_stmt>tqdm tqdm<line_sep>URL_METADATA=("https://www.dropbox.com/s/0u6x6ndyedb4rl7/LibriParty_metadata.zip?dl=1")<line_sep># Load hyperparameters file with command-line overrides params_file,run_opts,overrides=sb.core.parse_arguments(sys.argv[1:])<with_stmt>open(params_file)<as>fin<block_start>params=load_hyperpyyaml(fin overrides)<block_end>metadata_folder=params["metadata_folder"]<if_stmt><not>os.path.exists(metadata_folder)<block_start>os.makedirs(metadata_folder)<block_end># Download meta data from the web download_file(URL_METADATA metadata_folder+"/meta.zip" unpack=<true> dest_unpack=metadata_folder )<for_stmt>data_split ["train" "dev" "eval"]<block_start><with_stmt>open(os.path.join(metadata_folder data_split+".json") "r")<as>f<block_start>metadata=json.load(f)<block_end>print("Creating data for {} set".format(data_split))<line_sep>c_folder=os.path.join(params["out_folder"] data_split)<line_sep>os.makedirs(c_folder exist_ok=<true>)<for_stmt>sess tqdm(metadata.keys())<block_start>create_mixture(sess c_folder params metadata[sess])<block_end><block_end>
<import_from_stmt>typing Any Optional<import_stmt>pyarrow<as>pa<import_from_stmt>fugue.column.expressions ColumnExpr _FuncExpr _to_col function <import_from_stmt>triad Schema<def_stmt>coalesce *args:Any<arrow>ColumnExpr<block_start>"""SQL ``COALESCE`` function :param args: If a value is not :class:`~fugue.column.expressions.ColumnExpr` then it's converted to a literal column by :func:`~fugue.column.expressions.col` .. note:: this function can infer neither type nor alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f f.coalesce(col("a"), col("b")+col("c"), 1) """<line_sep><return>function("COALESCE" *[_to_col(x)<for>x args])<block_end><def_stmt>min col:ColumnExpr<arrow>ColumnExpr# pylint: disable=redefined-builtin <block_start>"""SQL ``MIN`` function (aggregation) :param col: the column to find min .. note:: * this function can infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f # assume col a has type double f.min(col("a")) # CAST(MIN(a) AS double) AS a f.min(-col("a")) # CAST(MIN(-a) AS double) AS a # neither type nor alias can be inferred in the following cases f.min(col("a")+1) f.min(col("a")+col("b")) # you can specify explicitly # CAST(MIN(a+b) AS int) AS x f.min(col("a")+col("b")).cast(int).alias("x") """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_SameTypeUnaryAggFuncExpr("MIN" col)<block_end><def_stmt>max col:ColumnExpr<arrow>ColumnExpr# pylint: disable=redefined-builtin <block_start>"""SQL ``MAX`` function (aggregation) :param col: the column to find max .. note:: * this function can infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f # assume col a has type double f.max(col("a")) # CAST(MAX(a) AS double) AS a f.max(-col("a")) # CAST(MAX(-a) AS double) AS a # neither type nor alias can be inferred in the following cases f.max(col("a")+1) f.max(col("a")+col("b")) # you can specify explicitly # CAST(MAX(a+b) AS int) AS x f.max(col("a")+col("b")).cast(int).alias("x") """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_SameTypeUnaryAggFuncExpr("MAX" col)<block_end><def_stmt>count col:ColumnExpr<arrow>ColumnExpr<block_start>"""SQL ``COUNT`` function (aggregation) :param col: the column to find count .. note:: * this function cannot infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f f.count(col("*")) # COUNT(*) f.count(col("a")) # COUNT(a) AS a # you can specify explicitly # CAST(COUNT(a) AS double) AS a f.count(col("a")).cast(float) """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_UnaryAggFuncExpr("COUNT" col)<block_end><def_stmt>count_distinct col:ColumnExpr<arrow>ColumnExpr<block_start>"""SQL ``COUNT DISTINCT`` function (aggregation) :param col: the column to find distinct element count .. note:: * this function cannot infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f f.count_distinct(col("*")) # COUNT(DISTINCT *) f.count_distinct(col("a")) # COUNT(DISTINCT a) AS a # you can specify explicitly # CAST(COUNT(DISTINCT a) AS double) AS a f.count_distinct(col("a")).cast(float) """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_UnaryAggFuncExpr("COUNT" col arg_distinct=<true>)<block_end><def_stmt>avg col:ColumnExpr<arrow>ColumnExpr<block_start>"""SQL ``AVG`` function (aggregation) :param col: the column to find average .. note:: * this function cannot infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f f.avg(col("a")) # AVG(a) AS a # you can specify explicitly # CAST(AVG(a) AS double) AS a f.avg(col("a")).cast(float) """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_UnaryAggFuncExpr("AVG" col)<block_end><def_stmt>sum col:ColumnExpr<arrow>ColumnExpr# pylint: disable=redefined-builtin <block_start>"""SQL ``SUM`` function (aggregation) :param col: the column to find sum .. note:: * this function cannot infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f f.sum(col("a")) # SUM(a) AS a # you can specify explicitly # CAST(SUM(a) AS double) AS a f.sum(col("a")).cast(float) """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_UnaryAggFuncExpr("SUM" col)<block_end><def_stmt>first col:ColumnExpr<arrow>ColumnExpr<block_start>"""SQL ``FIRST`` function (aggregation) :param col: the column to find first .. note:: * this function can infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f # assume col a has type double f.first(col("a")) # CAST(FIRST(a) AS double) AS a f.first(-col("a")) # CAST(FIRST(-a) AS double) AS a # neither type nor alias can be inferred in the following cases f.first(col("a")+1) f.first(col("a")+col("b")) # you can specify explicitly # CAST(FIRST(a+b) AS int) AS x f.first(col("a")+col("b")).cast(int).alias("x") """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_SameTypeUnaryAggFuncExpr("FIRST" col)<block_end><def_stmt>last col:ColumnExpr<arrow>ColumnExpr<block_start>"""SQL ``LAST`` function (aggregation) :param col: the column to find last .. note:: * this function can infer type from ``col`` type * this function can infer alias from ``col``'s inferred alias .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f # assume col a has type double f.last(col("a")) # CAST(LAST(a) AS double) AS a f.last(-col("a")) # CAST(LAST(-a) AS double) AS a # neither type nor alias can be inferred in the following cases f.last(col("a")+1) f.last(col("a")+col("b")) # you can specify explicitly # CAST(LAST(a+b) AS int) AS x f.last(col("a")+col("b")).cast(int).alias("x") """<assert_stmt>isinstance(col ColumnExpr)<line_sep><return>_SameTypeUnaryAggFuncExpr("LAST" col)<block_end><def_stmt>is_agg column:Any<arrow>bool<block_start>"""Check if a column contains aggregation operation :param col: the column to check :return: whether the column is :class:`~fugue.column.expressions.ColumnExpr` and contains aggregation operations .. admonition:: New Since :class: hint **0.6.0** .. admonition:: Examples .. code-block:: python import fugue.column.functions as f assert not f.is_agg(1) assert not f.is_agg(col("a")) assert not f.is_agg(col("a")+lit(1)) assert f.is_agg(f.max(col("a"))) assert f.is_agg(-f.max(col("a"))) assert f.is_agg(f.max(col("a")+1)) assert f.is_agg(f.max(col("a"))+f.min(col("a")))) """<if_stmt>isinstance(column _UnaryAggFuncExpr)<block_start><return><true><block_end><if_stmt>isinstance(column _FuncExpr)<block_start><return>any(is_agg(x)<for>x column.args)<or>any(is_agg(x)<for>x column.kwargs.values())<block_end><return><false><block_end><class_stmt>_UnaryAggFuncExpr(_FuncExpr)<block_start><def_stmt>__init__ self func:str col:ColumnExpr arg_distinct:bool=<false><block_start>super().__init__(func col arg_distinct=arg_distinct)<block_end><def_stmt>infer_alias self<arrow>ColumnExpr<block_start><return>(self<if>self.output_name<ne>""<else>self.alias(self.args[0].infer_alias().output_name))<block_end><def_stmt>_copy self<arrow>_FuncExpr<block_start><return>_UnaryAggFuncExpr(self.func *self.args **self.kwargs)<block_end><block_end><class_stmt>_SameTypeUnaryAggFuncExpr(_UnaryAggFuncExpr)<block_start><def_stmt>_copy self<arrow>_FuncExpr<block_start><return>_SameTypeUnaryAggFuncExpr(self.func *self.args **self.kwargs)<block_end><def_stmt>infer_type self schema:Schema<arrow>Optional[pa.DataType]<block_start><return>self.as_type<or>self.args[0].infer_type(schema)<block_end><block_end>
<import_stmt>unittest<import_from_stmt>minos.common MinosException <import_from_stmt>minos.networks MinosNetworkException <class_stmt>TestExceptions(unittest.TestCase)<block_start><def_stmt>test_type self<block_start>self.assertTrue(issubclass(MinosNetworkException MinosException))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
# Copyright (c) 2020-20201, Lawrence Livermore National Security, LLC. # See top-level LICENSE file for details. # # SPDX-License-Identifier: BSD-3-Clause <import_stmt>setuptools<import_from_stmt>codecs open<import_from_stmt>os path<line_sep>here=path.abspath(path.dirname(__file__))<line_sep># Get the long description from the README file <with_stmt>open(path.join(here "README.md") encoding="utf-8")<as>f<block_start>long_description=f.read()<block_end># Get the version in a safe way which does not refrence the `__init__` file # per python docs: https://packaging.python.org/guides/single-sourcing-package-version/ version={}<with_stmt>open("./caliperreader/version.py")<as>fp<block_start>exec(fp.read() version)<block_end>setuptools.setup(name="caliper-reader" version=version["__version__"] description="A Python library for reading Caliper .cali files" long_description=long_description long_description_content_type="text/markdown" url="https://github.com/LLNL/Caliper" author="<NAME>" author_email="<EMAIL>" license="BSD-3-Clause" classifiers=["Development Status :: 5 - Production/Stable" "License :: OSI Approved :: BSD License" ] packages=setuptools.find_packages())<line_sep>
#! /usr/bin/env python """ Learning Series: Network Programmability Basics Module: Programming Fundamentals Lesson: Python Part 2 Author: <NAME> <<EMAIL>> common_vars.py Illustrate the following concepts: - Code reuse imported into other examples """<line_sep>shapes=["square" "triangle" "circle"]<line_sep>books=[{"title":"War and Peace" "shelf":3 "available":<true>} {"title":"Hamlet" "shelf":1 "available":<false>} {"title":"Harold and the Purple Crayon" "shelf":2 "available":<true>}]<line_sep>colors=["blue" "green" "red"]<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>hamcrest *<import_from_stmt>test.base BaseTestCase<import_from_stmt>amplify.agent.common.util.text decompose_format parse_line parse_line_split <line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright (C) Nginx, Inc. All rights reserved."<line_sep>__license__=""<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>COMBINED_FORMAT='$remote_addr - $remote_user [$time_local] "$request" '+'$status $body_bytes_sent "$http_referer" "$http_user_agent"'<class_stmt>UtilTextTestCase(BaseTestCase)<block_start><def_stmt>test_decompose_format_regular self<block_start>keys,trie,non_key_patterns,first_value_is_key=decompose_format(COMBINED_FORMAT full=<true>)<line_sep>assert_that(keys not_none())<line_sep>assert_that(trie not_none())<line_sep>assert_that(non_key_patterns not_none())<line_sep>assert_that(first_value_is_key equal_to(<true>))<line_sep>assert_that(keys equal_to(['remote_addr' 'remote_user' 'time_local' 'request' 'status' 'body_bytes_sent' 'http_referer' 'http_user_agent']))<line_sep>assert_that(non_key_patterns equal_to([' - ' ' [' '] "' '" ' ' ' ' "' '" "' '"']))<block_end><def_stmt>test_decompose_format_different self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" '+'"$http_user_agent" rt=$request_time '+'ut="$upstream_response_time" cs=$upstream_cache_status'<line_sep>keys,trie,non_key_patterns,first_value_is_key=decompose_format(log_format full=<true>)<line_sep>assert_that(keys not_none())<line_sep>assert_that(trie not_none())<line_sep>assert_that(non_key_patterns not_none())<line_sep>assert_that(first_value_is_key equal_to(<true>))<line_sep>assert_that(keys equal_to(['remote_addr' 'remote_user' 'time_local' 'request' 'status' 'body_bytes_sent' 'http_referer' 'http_user_agent' 'request_time' 'upstream_response_time' 'upstream_cache_status']))<line_sep>assert_that(non_key_patterns equal_to([' - ' ' [' '] "' '" ' ' ' ' "' '" "' '" rt=' ' ut="' '" cs=']))<block_end><def_stmt>test_parse_line self<block_start>keys,trie=decompose_format(COMBINED_FORMAT)<line_sep>line='127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" '+'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'<line_sep>results=parse_line(line keys=keys trie=trie)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['http_user_agent'] equal_to('python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'))<block_end><def_stmt>test_parse_line_split self<block_start>keys,_,non_key_patterns,first_value_is_key=decompose_format(COMBINED_FORMAT full=<true>)<line_sep>line='127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" '+'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'<line_sep>results=parse_line_split(line keys=keys non_key_patterns=non_key_patterns first_value_is_key=first_value_is_key)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['http_user_agent'] equal_to('python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'))<block_end><def_stmt>test_parse_line_non_standard_http_method self<block_start>keys,trie=decompose_format(COMBINED_FORMAT)<line_sep>line='127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" '+'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'<line_sep>results=parse_line(line keys=keys trie=trie)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['http_user_agent'] equal_to('python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'))<block_end><def_stmt>test_parse_line_split_non_standard_http_method self<block_start>keys,_,non_key_patterns,first_value_is_key=decompose_format(COMBINED_FORMAT full=<true>)<line_sep>line='127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" '+'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'<line_sep>results=parse_line_split(line keys=keys non_key_patterns=non_key_patterns first_value_is_key=first_value_is_key)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['http_user_agent'] equal_to('python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'))<block_end><def_stmt>test_parse_line_upstream_log_format self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'<line_sep>keys,trie=decompose_format(log_format)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'<line_sep>results=parse_line(line keys=keys trie=trie)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_cache_status'] equal_to('MISS'))<line_sep># check some complicated values assert_that(results['request_time'] equal_to('0.010'))<line_sep>assert_that(results['upstream_response_time'] equal_to('2.001, 0.345'))<block_end><def_stmt>test_parse_line_split_upstream_log_format self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'<line_sep>keys,_,non_key_patterns,first_value_is_key=decompose_format(log_format full=<true>)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'<line_sep>results=parse_line_split(line keys=keys non_key_patterns=non_key_patterns first_value_is_key=first_value_is_key)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_cache_status'] equal_to('MISS'))<line_sep># check some complicated values assert_that(results['request_time'] equal_to('0.010'))<line_sep>assert_that(results['upstream_response_time'] equal_to('2.001, 0.345'))<block_end><def_stmt>test_parse_line_upstream_log_format_empty_upstreams self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'<line_sep>keys,trie=decompose_format(log_format)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'<line_sep>results=parse_line(line keys=keys trie=trie)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_response_time'] equal_to('-'))<line_sep>assert_that(results['upstream_cache_status'] equal_to('-'))<block_end><def_stmt>test_parse_line_split_upstream_log_format_empty_upstreams self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'<line_sep>keys,_,non_key_patterns,first_value_is_key=decompose_format(log_format full=<true>)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'<line_sep>results=parse_line_split(line keys=keys non_key_patterns=non_key_patterns first_value_is_key=first_value_is_key)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_response_time'] equal_to('-'))<line_sep>assert_that(results['upstream_cache_status'] equal_to('-'))<block_end><def_stmt>test_parse_line_upstream_log_format_part_empty_upstreams self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'<line_sep>keys,trie=decompose_format(log_format)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'<line_sep>results=parse_line(line keys=keys trie=trie)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_cache_status'] equal_to('MISS'))<block_end><def_stmt>test_parse_line_split_upstream_log_format_part_empty_upstreams self<block_start>log_format='$remote_addr - $remote_user [$time_local] '+'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" '+'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'<line_sep>keys,_,non_key_patterns,first_value_is_key=decompose_format(log_format full=<true>)<line_sep>line='1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 '+'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'<line_sep>results=parse_line_split(line keys=keys non_key_patterns=non_key_patterns first_value_is_key=first_value_is_key)<line_sep>assert_that(results not_none())<for_stmt>key keys<block_start>assert_that(results has_item(key))<line_sep>assert_that(results[key] not_none())<block_end># check the last value to make sure complete parse assert_that(results['upstream_cache_status'] equal_to('MISS'))<block_end><block_end>
<import_stmt>unittest<import_stmt>uuid<import_from_stmt>memory.client MemoryClient<import_from_stmt>hailtop.aiocloud.aiogoogle GoogleStorageAsyncFS<import_from_stmt>hailtop.config get_user_config<import_from_stmt>hailtop.utils async_to_blocking<import_from_stmt>gear.cloud_config get_gcp_config<line_sep>PROJECT=get_gcp_config().project<class_stmt>BlockingMemoryClient<block_start><def_stmt>__init__ self gcs_project=<none> fs=<none> deploy_config=<none> session=<none> headers=<none> _token=<none><block_start>self._client=MemoryClient(gcs_project fs deploy_config session headers _token)<line_sep>async_to_blocking(self._client.async_init())<block_end><def_stmt>_get_file_if_exists self filename<block_start><return>async_to_blocking(self._client._get_file_if_exists(filename))<block_end><def_stmt>read_file self filename<block_start><return>async_to_blocking(self._client.read_file(filename))<block_end><def_stmt>write_file self filename data<block_start><return>async_to_blocking(self._client.write_file(filename data))<block_end><def_stmt>close self<block_start><return>async_to_blocking(self._client.close())<block_end><block_end><class_stmt>Tests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>bucket_name=get_user_config().get('batch' 'bucket')<line_sep>token=uuid.uuid4()<line_sep>self.test_path=f'gs://{bucket_name}/memory-tests/{token}'<line_sep>self.fs=GoogleStorageAsyncFS(project=PROJECT)<line_sep>self.client=BlockingMemoryClient(fs=self.fs)<line_sep>self.temp_files=set()<block_end><def_stmt>tearDown self<block_start>async_to_blocking(self.fs.rmtree(<none> self.test_path))<line_sep>self.client.close()<block_end><async_keyword><def_stmt>add_temp_file_from_string self name:str str_value:bytes<block_start>handle=f'{self.test_path}/{name}'<async_keyword><with_stmt><await>self.fs.create(handle)<as>f<block_start><await>f.write(str_value)<block_end><return>handle<block_end><def_stmt>test_non_existent self<block_start><for_stmt>_ range(3)<block_start>self.assertIsNone(self.client._get_file_if_exists(f'{self.test_path}/nonexistent'))<block_end><block_end><def_stmt>test_small_write_around self<block_start><async_keyword><def_stmt>read url<block_start><async_keyword><with_stmt><await>self.fs.open(url)<as>f<block_start><return><await>f.read()<block_end><block_end>cases=[('empty_file' b'') ('null' b'\0') ('small' b'hello world')]<for_stmt>file,data cases<block_start>handle=async_to_blocking(self.add_temp_file_from_string(file data))<line_sep>expected=async_to_blocking(read(handle))<line_sep>self.assertEqual(expected data)<line_sep>i=0<line_sep>cached=self.client._get_file_if_exists(handle)<while_stmt>cached<is><none><and>i<l>10<block_start>cached=self.client._get_file_if_exists(handle)<line_sep>i<augadd>1<block_end>self.assertEqual(cached expected)<block_end><block_end><def_stmt>test_small_write_through self<block_start>cases=[('empty_file2' b'') ('null2' b'\0') ('small2' b'hello world')]<for_stmt>file,data cases<block_start>filename=f'{self.test_path}/{file}'<line_sep>self.client.write_file(filename data)<line_sep>cached=self.client._get_file_if_exists(filename)<line_sep>self.assertEqual(cached data)<block_end><block_end><block_end>
<import_stmt>librosa<import_stmt>numpy<as>np<import_from_stmt>. base<import_from_stmt>. spectral<class_stmt>OnsetStrength(base.Computation)<block_start>""" Compute a spectral flux onset strength envelope. Based on http://librosa.github.io/librosa/generated/librosa.onset.onset_strength.html Args: n_mels (int): Number of mel bands to generate. """<def_stmt>__init__ self n_mels=128 parent=<none> name=<none><block_start>super(OnsetStrength self).__init__(left_context=1 right_context=0 parent=parent name=name)<line_sep>self.n_mels=n_mels<block_end><def_stmt>compute self chunk sampling_rate corpus=<none> utterance=<none># Compute mel-spetrogram <block_start>power_spec=np.abs(spectral.stft_from_frames(chunk.data.T))<power>2<line_sep>mel=np.abs(librosa.feature.melspectrogram(S=power_spec n_mels=self.n_mels sr=sampling_rate))<line_sep>mel_power=librosa.power_to_db(mel)<line_sep># Compute onset strengths oenv=librosa.onset.onset_strength(S=mel_power center=<false>)<line_sep># Switch dimensions and add dimension to have frames oenv=oenv.T.reshape(oenv.shape[0] -1)<line_sep># Remove context oenv=oenv[chunk.left_context:oenv.shape[0]-chunk.right_context]<line_sep><return>oenv<block_end><block_end>
# coding=utf-8 # Copyright 2021 DeepMind Technologies Limited and the Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Building blocks for VDVAE."""<import_from_stmt>typing Optional Tuple<import_stmt>chex<import_from_stmt>flax linen<as>nn<import_stmt>jax<line_sep>_NUM_CONV_LAYER_PER_BLOCK=4<def_stmt>get_vdvae_convolution output_channels kernel_shape weights_scale=1. name=<none> precision=<none><block_start>"""Builds a 2D convolution. Args: output_channels: number of output channels. kernel_shape: shape of convolutional kernel. weights_scale: scale of initial weights in the convolution. name: name of the module. precision: jax precision. Returns: a nn.Conv2D. """<line_sep>kernel_init=nn.initializers.variance_scaling(scale=weights_scale mode='fan_in' distribution='truncated_normal')<line_sep><return>nn.Conv(features=output_channels kernel_size=kernel_shape strides=(1 1) padding='SAME' use_bias=<true> kernel_init=kernel_init name=name precision=precision)<block_end><class_stmt>ResBlock(nn.Module)<block_start>"""Residual block from the VDVAE paper. This block is made of four convolutions, followed by an optional residual connection and an optional average pooling to downsample the image. Compared to the paper, it uses the same gelu non-linearity but no batch normalization. It also accepts as an optional input an auxiliary batch of context vectors to be processed by 1x1 convolutions. This is typically useful to condition a VAE on an embedded context. """<line_sep>internal_channels:int<line_sep>output_channels:int<line_sep>downsampling_rate:int=1<line_sep>use_residual_connection:bool=<false><line_sep>last_weights_scale:float=1.<line_sep>precision:Optional[jax.lax.Precision]=<none><line_sep>@nn.compact<def_stmt>__call__ self inputs context_vectors=<none> <block_start>"""Applies the res block to input images. Args: inputs: a rank-4 array of input images of shape (B, H, W, C). context_vectors: optional auxiliary inputs, typically used for conditioning. If set, they should be of rank 2, and their first (batch) dimension should match that of `inputs`. Their number of features is arbitrary. They will be reshaped from (B, D) to (B, 1, 1, D) and a 1x1 convolution will be applied to them. Returns: a the rank-4 output of the block. """<if_stmt>self.downsampling_rate<l>1<block_start><raise>ValueError('downsampling_rate should be >= 1, but got '<concat>f'{self.downsampling_rate}.')<block_end><def_stmt>build_layers inputs<block_start>"""Build layers of the ResBlock given a batch of inputs."""<line_sep>resolution=inputs.shape[1]<if_stmt>resolution<g>2<block_start>kernel_shapes=((1 1) (3 3) (3 3) (1 1))<block_end><else_stmt><block_start>kernel_shapes=((1 1) (1 1) (1 1) (1 1))<block_end>conv_layers=[]<line_sep>aux_conv_layers=[]<for_stmt>layer_idx,kernel_shape enumerate(kernel_shapes)<block_start>is_last=layer_idx<eq>_NUM_CONV_LAYER_PER_BLOCK-1<line_sep>num_channels=self.output_channels<if>is_last<else>self.internal_channels<line_sep>weights_scale=self.last_weights_scale<if>is_last<else>1.<line_sep>conv_layers.append(get_vdvae_convolution(num_channels kernel_shape weights_scale name=f'c{layer_idx}' precision=self.precision))<line_sep>aux_conv_layers.append(get_vdvae_convolution(num_channels (1 1) 0. name=f'aux_c{layer_idx}' precision=self.precision))<block_end><return>conv_layers aux_conv_layers<block_end>chex.assert_rank(inputs 4)<if_stmt>inputs.shape[1]<ne>inputs.shape[2]<block_start><raise>ValueError('VDVAE only works with square images, but got '<concat>f'rectangular images of shape {inputs.shape[1:3]}.')<block_end><if_stmt>context_vectors<is><not><none><block_start>chex.assert_rank(context_vectors 2)<line_sep>inputs_batch_dim=inputs.shape[0]<line_sep>aux_batch_dim=context_vectors.shape[0]<if_stmt>inputs_batch_dim<ne>aux_batch_dim<block_start><raise>ValueError('Context vectors batch dimension is incompatible '<concat>'with inputs batch dimension. Got '<concat>f'{aux_batch_dim} vs {inputs_batch_dim}.')<block_end>context_vectors=context_vectors[: <none> <none> :]<block_end>conv_layers,aux_conv_layers=build_layers(inputs)<line_sep>outputs=inputs<for_stmt>conv,auxiliary_conv zip(conv_layers aux_conv_layers)<block_start>outputs=conv(jax.nn.gelu(outputs))<if_stmt>context_vectors<is><not><none><block_start>outputs<augadd>auxiliary_conv(context_vectors)<block_end><block_end><if_stmt>self.use_residual_connection<block_start>in_channels=inputs.shape[-1]<line_sep>out_channels=outputs.shape[-1]<if_stmt>in_channels<ne>out_channels<block_start><raise>AssertionError('Cannot apply residual connection because the '<concat>'number of output channels differs from the '<concat>'number of input channels: '<concat>f'{out_channels} vs {in_channels}.')<block_end>outputs<augadd>inputs<block_end><if_stmt>self.downsampling_rate<g>1<block_start>shape=(self.downsampling_rate self.downsampling_rate)<line_sep>outputs=nn.avg_pool(outputs window_shape=shape strides=shape padding='VALID')<block_end><return>outputs<block_end><block_end>
""" Discovery: broadcasts a query, attempting to discover all running RPyC servers over the local network/specific subnet. """<import_stmt>socket<import_stmt>select<import_stmt>struct<line_sep>__all__=["discover_servers"]<line_sep>UDP_DISCOVERY_PORT=18813<line_sep>QUERY_MAGIC="RPYC_QUERY"<line_sep>MAX_DGRAM_SIZE=100<def_stmt>discover_servers subnet="255.255.255.255" timeout=1<block_start>"""broadcasts a query and returns a list of (addr, port) of running servers"""<line_sep># broadcast s=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>s.setsockopt(socket.SOL_SOCKET socket.SO_BROADCAST 1)<line_sep>s.sendto(QUERY_MAGIC (subnet UDP_DISCOVERY_PORT))<line_sep># wait for replies replies=[]<while_stmt><true><block_start>rlist,dummy,dummy=select.select([s] [] [] timeout)<if_stmt><not>rlist<block_start><break><block_end>data,(addr port)=s.recvfrom(MAX_DGRAM_SIZE)<line_sep>rpyc_port,=struct.unpack("<H" data)<line_sep>replies.append((addr rpyc_port))<block_end><return>list(set(replies))<block_end>
<import_from_future_stmt> annotations<import_stmt>re<import_from_stmt>typing Union<import_stmt>warp.yul.ast<as>ast<import_from_stmt>warp.yul.AstVisitor AstVisitor<import_from_stmt>warp.yul.WarpException WarpException<class_stmt>AstParser<block_start><def_stmt>__init__ self text:str<block_start>self.lines=text.splitlines()<if_stmt>len(self.lines)<eq>0<block_start><raise>WarpException("Text should not be empty")<block_end>self.pos=0<block_end><def_stmt>parse_typed_name self<arrow>ast.TypedName<block_start>tabs=self.get_tabs()<line_sep>node_type_name=self.get_word(tabs)<assert_stmt>node_type_name<eq>"TypedName:" "This node should be of type TypedNode"<line_sep>self.pos<augadd>1<assert_stmt>self.get_tabs()<eq>tabs+1 "Wrong indentation"<line_sep>node_name,node_type=self.get_word(tabs+1).split(":")<line_sep>self.pos<augadd>1<line_sep><return>ast.TypedName(name=node_name type=node_type)<block_end><def_stmt>parse_literal self<arrow>ast.Literal<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs).startswith("Literal:") "This node should be of type Literal"<line_sep>value=self.get_word(tabs+8)<line_sep>self.pos<augadd>1<try_stmt><block_start>value=int(value)<block_end><except_stmt>ValueError<block_start><pass><block_end><return>ast.Literal(value=value)<block_end><def_stmt>parse_identifier self<arrow>ast.Identifier<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs).startswith("Identifier:") "This node should be of type Identifier"<line_sep>name=self.get_word(tabs+11)<line_sep>self.pos<augadd>1<line_sep><return>ast.Identifier(name=name)<block_end><def_stmt>parse_assignment self<arrow>ast.Assignment<block_start>tabs=self.get_tabs()<assert_stmt>(self.get_word(tabs)<eq>"Assignment:") "This node should be of type Assignment"<line_sep>self.pos<augadd>1<assert_stmt>self.get_word(tabs+1)<eq>"Variables:"<line_sep>self.pos<augadd>1<line_sep>variables_list=self.parse_list(tabs+1 self.parse_identifier)<assert_stmt>self.get_word(tabs+1)<eq>"Value:"<line_sep>self.pos<augadd>1<line_sep><return>ast.Assignment(variable_names=variables_list value=self.parse_expression())<block_end><def_stmt>parse_function_call self<arrow>ast.FunctionCall<block_start>tabs=self.get_tabs()<assert_stmt>(self.get_word(tabs)<eq>"FunctionCall:") "This node should be of type FunctionCall"<line_sep>self.pos<augadd>1<line_sep><return>ast.FunctionCall(function_name=self.parse_identifier() arguments=self.parse_list(tabs self.parse_expression) )<block_end><def_stmt>parse_expression_statement self<arrow>ast.Statement<block_start>tabs=self.get_tabs()<assert_stmt>(self.get_word(tabs)<eq>"ExpressionStatement:") "This node should be of type ExpressionStatement"<line_sep>self.pos<augadd>1<line_sep><return>ast.ExpressionStatement(expression=self.parse_expression())<block_end><def_stmt>parse_variable_declaration self<arrow>ast.VariableDeclaration<block_start>tabs=self.get_tabs()<assert_stmt>(self.get_word(tabs)<eq>"VariableDeclaration:") "This node should be of type VariableDeclaration"<line_sep>self.pos<augadd>1<assert_stmt>self.get_tabs()<eq>tabs+1<assert_stmt>self.get_word(tabs+1)<eq>"Variables:"<line_sep>self.pos<augadd>1<line_sep>variables=self.parse_list(tabs+1 self.parse_typed_name)<assert_stmt>self.get_tabs()<eq>tabs+1<line_sep>word=self.get_word(tabs+1)<line_sep>self.pos<augadd>1<assert_stmt>word.startswith("Value")<if_stmt>word.endswith("None")<block_start>value=<none><block_end><else_stmt><block_start>value=self.parse_expression()<block_end><return>ast.VariableDeclaration(variables=variables value=value)<block_end><def_stmt>parse_block self<arrow>ast.Block<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Block:" "This node should be of type Block"<line_sep>self.pos<augadd>1<line_sep><return>ast.Block(statements=tuple(self.parse_list(tabs self.parse_statement)))<block_end><def_stmt>parse_function_definition self<arrow>ast.FunctionDefinition<block_start>tabs=self.get_tabs()<assert_stmt>(self.get_word(tabs)<eq>"FunctionDefinition:") "This node should be of type FunctionDefinition"<line_sep>self.pos<augadd>1<assert_stmt>self.get_tabs()<eq>tabs+1<and>self.get_word(tabs+1).startswith("Name:")<line_sep>fun_name=self.get_word(tabs+7)<line_sep>self.pos<augadd>1<assert_stmt>self.get_tabs()<eq>tabs+1<and>self.get_word(tabs+1)<eq>"Parameters:"<line_sep>self.pos<augadd>1<line_sep>params=self.parse_list(tabs+1 self.parse_typed_name)<assert_stmt>(self.get_tabs()<eq>tabs+1<and>self.get_word(tabs+1)<eq>"Return Variables:")<line_sep>self.pos<augadd>1<line_sep>returns=self.parse_list(tabs+1 self.parse_typed_name)<assert_stmt>self.get_tabs()<eq>tabs+1<and>self.get_word(tabs+1)<eq>"Body:"<line_sep>self.pos<augadd>1<line_sep>body=self.parse_block()<line_sep><return>ast.FunctionDefinition(name=fun_name parameters=params return_variables=returns body=body)<block_end><def_stmt>parse_if self<arrow>ast.If<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"If:" "This node should be of type If"<line_sep>self.pos<augadd>1<line_sep>condition=self.parse_expression()<line_sep>body=self.parse_block()<line_sep>else_body=<none><if_stmt>self.get_tabs()<g>tabs<block_start>else_body=self.parse_block()<block_end><return>ast.If(condition=condition body=body else_body=else_body)<block_end><def_stmt>parse_case self<arrow>ast.Case<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Case:" "This node should be of type Case"<line_sep>self.pos<augadd>1<try_stmt><block_start>value=self.parse_literal()<block_end><except_stmt>AssertionError<block_start><assert_stmt>(self.get_tabs()<eq>tabs+1<and>self.get_word(tabs+1)<eq>"Default") "The value must be a literal or None (when it's the default case)"<line_sep>value=<none><line_sep>self.pos<augadd>1<block_end><return>ast.Case(value=value body=self.parse_block())<block_end><def_stmt>parse_switch self<arrow>ast.Switch<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Switch:" "This node should be of type Switch"<line_sep>self.pos<augadd>1<line_sep><return>ast.Switch(expression=self.parse_expression() cases=self.parse_list(tabs self.parse_case) )<block_end><def_stmt>parse_for_loop self<arrow>ast.ForLoop<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"ForLoop:" "This node should be of type ForLoop"<line_sep>self.pos<augadd>1<line_sep><return>ast.ForLoop(pre=self.parse_block() condition=self.parse_expression() post=self.parse_block() body=self.parse_block() )<block_end><def_stmt>parse_break self<arrow>ast.Break<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Break" "This node should be of type Break"<line_sep>self.pos<augadd>1<line_sep><return>ast.Break()<block_end><def_stmt>parse_continue self<arrow>ast.Continue<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Continue" "This node should be of type Continue"<line_sep>self.pos<augadd>1<line_sep><return>ast.Continue()<block_end><def_stmt>parse_leave self<arrow>ast.Leave<block_start>tabs=self.get_tabs()<assert_stmt>self.get_word(tabs)<eq>"Leave" "This node should be of type Leave"<line_sep>self.pos<augadd>1<line_sep><return>ast.LEAVE<block_end><def_stmt>parse_node self<arrow>ast.Node<block_start>tabs=self.get_tabs()<line_sep>node_type_name=self.get_word(tabs).split(":")[0]<line_sep>parser_name=f"parse_{self.get_name(node_type_name)}"<line_sep>parser=getattr(self parser_name <none>)<if_stmt>parser<is><none><block_start><raise>WarpException("Wrong node type name!")<block_end><return>parser()<block_end><def_stmt>parse_statement self<arrow>ast.Statement<block_start>statements=["ExpressionStatement" "Assignment" "VariableDeclaration" "FunctionDefinition" "If" "Switch" "ForLoop" "Break" "Continue" "Leave" "Block" ]<line_sep>tabs=self.get_tabs()<line_sep>node_type_name=self.get_word(tabs).split(":")[0]<assert_stmt>node_type_name<in>statements "Not a valid statement"<line_sep><return>ast.assert_statement(self.parse_node())<block_end><def_stmt>parse_expression self<arrow>ast.Expression<block_start>tabs=self.get_tabs()<line_sep>node_type_name=self.get_word(tabs).split(":")[0]<assert_stmt>node_type_name<in>["Literal" "Identifier" "FunctionCall" ] "Node type must be an expression"<line_sep><return>ast.assert_expression(self.parse_node())<block_end><def_stmt>parse_list self tabs parser<block_start>items=[]<while_stmt>self.pos<l>len(self.lines)<and>self.get_tabs()<g>tabs<block_start>item=parser()<line_sep>items.append(item)<block_end><return>items<block_end><def_stmt>get_tabs self<block_start>tabs=0<if_stmt>self.pos<l>len(self.lines)<block_start><for_stmt>c self.lines[self.pos]<block_start><if_stmt><not>c<eq>"\t"<block_start><break><block_end>tabs<augadd>1<block_end><else_stmt><block_start><raise>WarpException("Lines are not supposed to be filled only with tabs")<block_end><block_end><return>tabs<block_end><def_stmt>get_word self start:int<arrow>str<block_start><return>self.lines[self.pos][start:]<block_end><def_stmt>get_name self name<block_start>name="_".join(re.findall("[A-Z][^A-Z]*" name))<line_sep><return>name.lower()<block_end><block_end><class_stmt>YulPrinter(AstVisitor)<block_start><def_stmt>format self node:ast.Node tabs:int=0<arrow>str<block_start><return>self.visit(node tabs)<block_end><def_stmt>visit_typed_name self node:ast.TypedName tabs:int=0<arrow>str<block_start><return>f"{node.name}"<block_end><def_stmt>visit_literal self node:ast.Literal tabs:int=0<arrow>str<block_start><return>f"{node.value}"<block_end><def_stmt>visit_identifier self node:ast.Identifier tabs:int=0<arrow>str<block_start><return>f"{node.name}"<block_end><def_stmt>visit_assignment self node:ast.Assignment tabs:int=0<arrow>str<block_start>variables=", ".join(self.visit_list(node.variable_names))<line_sep>value=self.visit(node.value 0)<line_sep><return>f"{variables} := {value}"<block_end><def_stmt>visit_function_call self node:ast.FunctionCall tabs:int=0<arrow>str<block_start>name=self.visit(node.function_name)<line_sep>args=", ".join(self.visit_list(node.arguments))<line_sep><return>f"{name}({args})"<block_end><def_stmt>visit_expression_statement self node:ast.ExpressionStatement tabs:int=0<arrow>str<block_start><return>self.visit(node.expression tabs)<block_end><def_stmt>visit_variable_declaration self node:ast.VariableDeclaration tabs:int=0<arrow>str<block_start>variables=", ".join(self.visit_list(node.variables))<line_sep>value=""<if_stmt>node.value<is><not><none><block_start>value=f" := {self.visit(node.value)}"<block_end><return>f"let {variables}{value}"<block_end><def_stmt>visit_block self node:ast.Block tabs:int=0<arrow>str<block_start>open_block="{"<line_sep>close_block="}"<if_stmt>self.is_short(node.statements)<block_start>statements="".join(self.visit_list(node.statements))<line_sep><return>" ".join([open_block statements close_block])<block_end>statements=self.visit_list(node.statements tabs+1)<line_sep>statements=["\t"<times>(tabs+1)+stmt<for>stmt statements]<line_sep>statements="\n".join(statements)<line_sep>close_block="\t"<times>tabs+close_block<line_sep>res="\n".join([open_block statements close_block])<line_sep><return>res<block_end><def_stmt>visit_function_definition self node:ast.FunctionDefinition tabs:int=0<arrow>str<block_start>parameters=", ".join(self.visit_list(node.parameters 0))<line_sep>ret_vars=", ".join(self.visit_list(node.return_variables 0))<line_sep>body=self.visit(node.body tabs)<line_sep>res=f"function {node.name}({parameters})"<if_stmt>len(node.return_variables)<g>0<block_start>res<augadd>f" -> {ret_vars}"<block_end>res<augadd>f" {body}"<line_sep><return>res<block_end><def_stmt>visit_if self node:ast.If tabs:int=0<arrow>str<block_start>res=f"if {self.visit(node.condition)} "<line_sep>res<augadd>self.visit(node.body tabs)<if_stmt>node.else_body<is><not><none><block_start>res<augadd>"\n"+"\t"<times>tabs+"else "<line_sep>res<augadd>self.visit(node.else_body tabs)<block_end><return>res<block_end><def_stmt>visit_case self node:ast.Case tabs:int=0<arrow>str<block_start>res="\t"<times>tabs<if_stmt>node.value<is><not><none><block_start>res<augadd>f"case {self.visit(node.value)} "<block_end><else_stmt><block_start>res<augadd>"default "<block_end>res<augadd>self.visit(node.body tabs)<line_sep><return>res<block_end><def_stmt>visit_switch self node:ast.Switch tabs:int=0<arrow>str<block_start>res=f"switch {self.visit(node.expression)}\n"<line_sep>res<augadd>"\n".join(self.visit_list(node.cases tabs))<line_sep><return>res<block_end><def_stmt>visit_for_loop self node:ast.ForLoop tabs:int=0<arrow>str<block_start>res="for "<line_sep>res<augadd>self.visit(node.pre tabs)<line_sep>res<augadd>f" {self.visit(node.condition)} "<line_sep>res<augadd>self.visit(node.post tabs)<line_sep>res<augadd>f"\n{self.visit(node.body tabs)}"<line_sep><return>res<block_end><def_stmt>visit_break self node:ast.Break tabs:int=0<arrow>str<block_start><return>"break"<block_end><def_stmt>visit_continue self node:ast.Continue tabs:int=0<arrow>str<block_start><return>"continue"<block_end><def_stmt>visit_leave self node:ast.Leave tabs:int=0<arrow>str<block_start><return>"leave"<block_end><def_stmt>is_short self stmts:tuple<arrow>bool<block_start><if_stmt>len(stmts)<eq>0<block_start><return><true><block_end><return>len(stmts)<eq>1<and>type(stmts[0]).__name__<not><in>["Block" "FunctionDefinition" "If" "Switch" "ForLoop" ]<block_end><block_end>
<import_stmt>theano<import_stmt>theano.tensor<as>T<import_from_stmt>theano.sandbox.rng_mrg MRG_RandomStreams<import_from_stmt>theano.tensor.nnet.conv conv2d<import_from_stmt>theano.tensor.signal.downsample max_pool_2d<import_from_stmt>theano.tensor.shared_randomstreams RandomStreams<import_stmt>numpy<as>np<import_from_stmt>toolbox *<import_from_stmt>modelbase *<import_stmt>itertools<class_stmt>FFN_ace(ModelSLBase)<block_start>""" Auto-classifier-encoder (Georgiev, 2015) """<def_stmt>save self<block_start><if_stmt><not>os.path.exists('savedmodels\\')<block_start>os.makedirs('savedmodels\\')<block_end>self.params.save(self.filename)<block_end><def_stmt>__init__ self data hp<block_start>super(FFN_ace self).__init__(self.__class__.__name__ data hp)<line_sep># batch_size: 10000; learning_rate = 0.0015; lr_halflife = 200, 500 self.epsilon=0.0001<line_sep>self.params=Parameters()<line_sep>self.shared_vars=Parameters()<line_sep>n_x=self.data['n_x']<line_sep>n_y=self.data['n_y']<line_sep>n_h1=1200<line_sep>n_h2=1000<line_sep>n_h3=800<line_sep>n_h4=800<line_sep>scale=hp.init_scale<if_stmt>hp.load_model<and>os.path.isfile(self.filename)<block_start>self.params.load(self.filename)<block_end><else_stmt><block_start><with_stmt>self.params<block_start>w_h=shared_normal((n_x n_h1) scale=scale)<line_sep>b_h=shared_zeros((n_h1 ))<line_sep>w_h2=shared_normal((n_h1 n_h2) scale=scale)<line_sep>b_h2=shared_zeros((n_h2 ))<line_sep>w_h3=shared_normal((n_h2 n_h3) scale=scale)<line_sep>b_h3=shared_zeros((n_h3 ))<line_sep>w_h4=shared_normal((n_h3 n_h4) scale=scale)<line_sep>b_h4=shared_zeros((n_h4 ))<line_sep>w_o=shared_normal((n_h4 n_y) scale=scale)<block_end><block_end><def_stmt>batch_norm h<block_start>m=T.mean(h axis=0 keepdims=<true>)<line_sep>std=T.sqrt(T.var(h axis=0 keepdims=<true>)+self.epsilon)<line_sep>h=(h-m)/std<line_sep><return>h<block_end><def_stmt>model X params p_drop_input p_drop_hidden<block_start>X_noise=X+gaussian(X.shape p_drop_input)<line_sep>h=batch_norm(dropout(rectify(T.dot(X_noise params.w_h)+params.b_h) p_drop_hidden))<line_sep># Dual reconstruction error phx=T.nnet.sigmoid(T.dot(h T.dot(h.T X_noise))/self.hp.batch_size)<line_sep>log_phx=T.nnet.binary_crossentropy(phx X_noise).sum()<line_sep>h2=dropout(rectify(T.dot(h params.w_h2)+params.b_h2) p_drop_hidden)<line_sep>h3=batch_norm(dropout(rectify(T.dot(h2 params.w_h3)+params.b_h3) p_drop_hidden))<line_sep>h4=dropout(rectify(T.dot(h3 params.w_h4)+params.b_h4) p_drop_hidden)<line_sep>py_x=softmax(T.dot(h4 params.w_o))<line_sep><return>[py_x log_phx]<block_end>noise_py_x,cost_recon=model(self.X self.params 0.2 0.5)<line_sep>cost_y2=-T.sum(self.Y<times>T.log(noise_py_x))<line_sep>cost=cost_y2+cost_recon<line_sep>pyx,_=model(self.X self.params 0. 0.)<line_sep>map_pyx=T.argmax(pyx axis=1)<line_sep>error_map_pyx=T.sum(T.neq(map_pyx T.argmax(self.Y axis=1)))<line_sep>self.compile(cost error_map_pyx)<block_end><block_end>
<import_stmt>pygame<line_sep>FPS=60<line_sep>BLOCK_SIZE=48<line_sep>COLOR_BACKGROUND=pygame.Color(0 0 0)<line_sep>
<import_stmt>openmc<import_stmt>openmc.deplete<import_stmt>matplotlib.pyplot<as>plt<line_sep>############################################################################### # Load previous simulation results ############################################################################### # Load geometry from statepoint statepoint='statepoint.100.h5'<with_stmt>openmc.StatePoint(statepoint)<as>sp<block_start>geometry=sp.summary.geometry<block_end># Load previous depletion results previous_results=openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")<line_sep>############################################################################### # Transport calculation settings ############################################################################### # Instantiate a Settings object, set all runtime parameters settings=openmc.Settings()<line_sep>settings.batches=100<line_sep>settings.inactive=10<line_sep>settings.particles=10000<line_sep># Create an initial uniform spatial source distribution over fissionable zones bounds=[-0.62992 -0.62992 -1 0.62992 0.62992 1]<line_sep>uniform_dist=openmc.stats.Box(bounds[:3] bounds[3:] only_fissionable=<true>)<line_sep>settings.source=openmc.source.Source(space=uniform_dist)<line_sep>entropy_mesh=openmc.RegularMesh()<line_sep>entropy_mesh.lower_left=[-0.39218 -0.39218 -1.e50]<line_sep>entropy_mesh.upper_right=[0.39218 0.39218 1.e50]<line_sep>entropy_mesh.dimension=[10 10 1]<line_sep>settings.entropy_mesh=entropy_mesh<line_sep>############################################################################### # Initialize and run depletion calculation ############################################################################### # Create depletion "operator" chain_file='./chain_simple.xml'<line_sep>op=openmc.deplete.Operator(geometry settings chain_file previous_results)<line_sep># Perform simulation using the predictor algorithm time_steps=[1.0 1.0 1.0 1.0 1.0]# days power=174# W/cm, for 2D simulations only (use W for 3D) integrator=openmc.deplete.PredictorIntegrator(op time_steps power timestep_units='d')<line_sep>integrator.integrate()<line_sep>############################################################################### # Read depletion calculation results ############################################################################### # Open results file results=openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")<line_sep># Obtain K_eff as a function of time time,keff=results.get_eigenvalue()<line_sep># Obtain U235 concentration as a function of time time,n_U235=results.get_atoms('1' 'U235')<line_sep># Obtain Xe135 capture reaction rate as a function of time time,Xe_capture=results.get_reaction_rate('1' 'Xe135' '(n,gamma)')<line_sep>############################################################################### # Generate plots ############################################################################### days=24<times>60<times>60<line_sep>plt.figure()<line_sep>plt.plot(time/days keff label="K-effective")<line_sep>plt.xlabel("Time (days)")<line_sep>plt.ylabel("Keff")<line_sep>plt.show()<line_sep>plt.figure()<line_sep>plt.plot(time/days n_U235 label="U 235")<line_sep>plt.xlabel("Time (days)")<line_sep>plt.ylabel("n U5 (-)")<line_sep>plt.show()<line_sep>plt.figure()<line_sep>plt.plot(time/days Xe_capture label="Xe135 capture")<line_sep>plt.xlabel("Time (days)")<line_sep>plt.ylabel("RR (-)")<line_sep>plt.show()<line_sep>plt.close('all')<line_sep>
""" STATEMENT Divide two integers without using multiplication, division and mod operator. CLARIFICATIONS - Do I have to handle 32-bit integer overflow? Yes, return the MAX_INT in that case. - Can the divisor be zero? Yes, return the MAX_INT. EXAMPLES 34/3 -> 11 COMMENTS - This solution is by tusizi in Leetcode (picked up from https://discuss.leetcode.com/topic/8714/clear-python-code) """<def_stmt>divide dividend divisor<block_start>""" :type dividend: int :type divisor: int :rtype: int """<line_sep>sign=(dividend<l>0)<is>(divisor<l>0)<line_sep>dividend,divisor=abs(dividend) abs(divisor)<line_sep>INT_MIN,INT_MAX=-2147483648 2147483647<if_stmt>(<not>divisor)<or>(dividend<l>INT_MIN<and>divisor<eq>-1)<block_start><return>INT_MAX<block_end>to_return=0<while_stmt>dividend<ge>divisor<block_start>temp,i=divisor 1<while_stmt>dividend<ge>temp<block_start>dividend<augsub>temp<line_sep>to_return<augadd>i<line_sep>i<auglshift>1<line_sep>temp<auglshift>1<block_end><block_end><if_stmt><not>sign<block_start>to_return=-to_return<block_end><return>min(max(INT_MIN to_return) INT_MAX)<block_end>
<import_from_stmt>...app.models App<import_from_stmt>...webhook.event_types WebhookEventType<def_stmt>test_qs_for_event_type payment_app<block_start>qs=App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)<assert_stmt>len(qs)<eq>1<assert_stmt>qs[0]<eq>payment_app<block_end><def_stmt>test_qs_for_event_type_no_payment_permissions payment_app<block_start>payment_app.permissions.first().delete()<line_sep>qs=App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)<assert_stmt>len(qs)<eq>0<block_end><def_stmt>test_qs_for_event_type_inactive_app payment_app<block_start>payment_app.is_active=<false><line_sep>payment_app.save()<line_sep>qs=App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)<assert_stmt>len(qs)<eq>0<block_end><def_stmt>test_qs_for_event_type_no_webhook_event payment_app<block_start>webhook=payment_app.webhooks.first()<line_sep>event=webhook.events.filter(event_type=WebhookEventType.PAYMENT_AUTHORIZE).first()<line_sep>event.delete()<line_sep>qs=App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)<assert_stmt>len(qs)<eq>0<block_end><def_stmt>test_qs_for_event_type_inactive_webhook payment_app<block_start>webhook=payment_app.webhooks.first()<line_sep>webhook.is_active=<false><line_sep>webhook.save()<line_sep>qs=App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)<assert_stmt>len(qs)<eq>0<block_end>
<import_stmt>sys<import_from_stmt>ctypes *<def_stmt>test_getattr <block_start><class_stmt>Stuff(Union)<block_start>_fields_=[('x' c_char) ('y' c_int)]<block_end>stuff=Stuff()<line_sep>stuff.y=ord('x')|(ord('z')<lshift>24)<if_stmt>sys.byteorder<eq>'little'<block_start><assert_stmt>stuff.x<eq>b'x'<block_end><else_stmt><block_start><assert_stmt>stuff.x<eq>b'z'<block_end><block_end><def_stmt>test_union_of_structures <block_start><class_stmt>Stuff(Structure)<block_start>_fields_=[('x' c_int)]<block_end><class_stmt>Stuff2(Structure)<block_start>_fields_=[('x' c_int)]<block_end><class_stmt>UnionofStuff(Union)<block_start>_fields_=[('one' Stuff) ('two' Stuff2)]<block_end>u=UnionofStuff()<line_sep>u.one.x=3<assert_stmt>u.two.x<eq>3<block_end>
""" Helios Election Workflows """<import_from_stmt>helios.datatypes LDObjectContainer<class_stmt>WorkflowObject(LDObjectContainer)<block_start><pass><block_end>
"""route.py Linux parsers for the following commands: * route """<line_sep># python <import_stmt>re<line_sep># metaparser <import_from_stmt>genie.metaparser MetaParser<import_from_stmt>genie.metaparser.util.schemaengine Schema Any Optional<import_from_stmt>netaddr IPAddress IPNetwork<line_sep># ======================================================= # Schema for 'route' # ======================================================= <class_stmt>RouteSchema(MetaParser)<block_start>"""Schema for route"""<line_sep># Destination Gateway Genmask Flags Metric Ref Use Iface # 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 wlo1 schema={'routes':{Any():{# 'destination' 'mask':{Any():{'nexthop':{Any():{# index: 1, 2, 3, etc 'interface':str Optional('flags'):str Optional('gateway'):str Optional('metric'):int Optional('ref'):int Optional('use'):int Optional('scope'):str Optional('proto'):str Optional('src'):str Optional('broadcast'):bool Optional('table'):str Optional('local'):bool}}}}}}}<block_end># ======================================================= # Parser for 'route' # ======================================================= <class_stmt>Route(RouteSchema)<block_start>"""Parser for * route * route -4 -n * route -4n * route -n4 * route -n -4 """<line_sep>cli_command=['route' 'route {flag}']<def_stmt>cli self flag=<none> output=<none><block_start><if_stmt>output<is><none><block_start>cmd=self.cli_command[0]<if_stmt>flag<in>['-4 -n' '-4n' '-n4']<block_start>command=self.cli_command[1].replace('{flag}' flag)<block_end>out=self.device.execute(cmd)<block_end><else_stmt><block_start>out=output<block_end># Destination Gateway Genmask Flags Metric Ref Use Iface # 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1 p1=re.compile(r'(?P<destination>[a-z0-9\.\:]+)'<concat>' +(?P<gateway>[a-z0-9\.\:_]+)'<concat>' +(?P<mask>[a-z0-9\.\:]+)'<concat>' +(?P<flags>[a-zA-Z]+)'<concat>' +(?P<metric>(\d+))'<concat>' +(?P<ref>(\d+))'<concat>' +(?P<use>(\d+))'<concat>' +(?P<interface>\S+)')<line_sep># Initializes the Python dictionary variable parsed_dict={}<line_sep># Defines the "for" loop, to pattern match each line of output <for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1 m=p1.match(line)<if_stmt>m<block_start><if_stmt>'routes'<not><in>parsed_dict<block_start>parsed_dict.setdefault('routes' {})<block_end>group=m.groupdict()<line_sep>destination=group['destination']<line_sep>mask=group['mask']<line_sep>index_dict={}<for_stmt>str_k ['interface' 'flags' 'gateway']<block_start>index_dict[str_k]=group[str_k]<block_end><for_stmt>int_k ['metric' 'ref' 'use']<block_start>index_dict[int_k]=int(group[int_k])<block_end><if_stmt>destination<in>parsed_dict['routes']<block_start><if_stmt>mask<in>parsed_dict['routes'][destination]['mask']<block_start>parsed_dict['routes'][destination]['mask'][mask].setdefault('nexthop' {index+1:index_dict})<block_end><else_stmt><block_start>index=1<line_sep>parsed_dict['routes'][destination]['mask'].setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end><block_end><else_stmt><block_start>index=1<line_sep>parsed_dict['routes'].setdefault(destination {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end><continue><block_end><block_end><return>parsed_dict<block_end><block_end># ======================================================= # Parser for 'netstat -rn' # ======================================================= <class_stmt>ShowNetworkStatusRoute(Route RouteSchema)<block_start>"""Parser for * netstat -rn """<line_sep>cli_command=['netstat -rn']<def_stmt>cli self output=<none><block_start><if_stmt>output<is><none><block_start>cmd=self.cli_command[0]<line_sep>out=self.device.execute(cmd)<block_end><else_stmt><block_start>out=output<block_end><return>super().cli(output=out)<block_end><block_end># ===================================================== # Parser for ip route show table all # ===================================================== <class_stmt>IpRouteShowTableAll(RouteSchema)<block_start>""" Parser for * ip route show table all """<line_sep>cli_command=['ip route show table all']<def_stmt>cli self output=<none><block_start><if_stmt>output<is><none><block_start>cmd=self.cli_command[0]<line_sep>out=self.device.execute(cmd)<block_end><else_stmt><block_start>out=output<block_end># default via 192.168.1.1 dev enp7s0 proto dhcp metric 100 p1=re.compile(r'default via (?P<gateway>[a-z0-9\.\:]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' proto (?P<proto>[a-z]+)'<concat>' metric (?P<metric>[\d]+)')<line_sep># 169.254.0.0/16 dev enp7s0 scope link metric 1000 p2=re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' scope (?P<scope>\w+)'<concat>' metric (?P<metric>[\d]+)')<line_sep># 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 p3=re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' proto (?P<proto>\w+)'<concat>' scope (?P<scope>\w+)'<concat>' src (?P<src>[a-z0-9\.\:\/]+)')<line_sep># 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown p4=re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' proto (?P<proto>\w+)'<concat>' scope (?P<scope>\w+)'<concat>' src (?P<src>[a-z0-9\.\:\/]+)'<concat>' linkdown ')<line_sep># 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100 p5=re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' proto (?P<proto>\w+)'<concat>' scope (?P<scope>\w+)'<concat>' src (?P<src>[a-z0-9\.\:\/]+)'<concat>' metric (?P<metric>[\d]+)')<line_sep># broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1 p6=re.compile(r'broadcast (?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' table (?P<table>\w+)'<concat>' proto (?P<proto>\w+)'<concat>' scope (?P<scope>\w+)'<concat>' src (?P<src>[a-z0-9\.\:\/]+)')<line_sep># local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70 p7=re.compile(r'local (?P<destination>[a-z0-9\.\:\/]+)'<concat>' dev (?P<device>[a-z0-9\.\-]+)'<concat>' table (?P<table>\w+)'<concat>' proto (?P<proto>\w+)'<concat>' scope (?P<scope>\w+)'<concat>' src (?P<src>[a-z0-9\.\:\/]+)')<line_sep># Initializes the Python dictionary variable parsed_dict={}<line_sep># Defines the "for" loop, to pattern match each line of output <for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># default via 192.168.1.1 dev enp7s0 proto dhcp metric 100 m=p1.match(line)<if_stmt>m<block_start><if_stmt>'routes'<not><in>parsed_dict<block_start>parsed_dict.setdefault('routes' {})<block_end>group=m.groupdict()<line_sep>gateway=group['gateway']<line_sep>interface=group['device']<line_sep>metric=int(group['metric'])<if_stmt>gateway<block_start>parsed_dict['routes']={'0.0.0.0':{'mask':{'0.0.0.0':{'nexthop':{1:{'gateway':gateway 'interface':interface 'metric':metric}}}}}}<block_end><block_end># 169.254.0.0/16 dev enp7s0 scope link metric 1000 m=p2.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>metric=int(group['metric'])<line_sep>scope=group['scope']<line_sep>index_dict={'interface':interface 'scope':scope 'metric':metric}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end># 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1 m=p3.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>scope=group['scope']<line_sep>proto=group['proto']<line_sep>src=group['src']<line_sep>index_dict={'interface':interface 'scope':scope 'proto':proto 'src':src}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end># 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown m=p4.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>scope=group['scope']<line_sep>proto=group['proto']<line_sep>src=group['src']<line_sep>index_dict={'interface':interface 'scope':scope 'proto':proto 'src':src}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end># 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100 m=p5.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>scope=group['scope']<line_sep>proto=group['proto']<line_sep>metric=group['metric']<line_sep>src=group['src']<line_sep>index_dict={'interface':interface 'scope':scope 'proto':proto 'src':src 'metric':metric}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end># broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1 m=p6.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>scope=group['scope']<line_sep>proto=group['proto']<line_sep>src=group['src']<line_sep>table=group['table']<line_sep>index_dict={'interface':interface 'scope':scope 'proto':proto 'src':src 'broadcast':<true> 'table':table}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end># local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70 m=p7.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>destination=IPNetwork(group['destination'])<line_sep>mask=str(destination.netmask)<line_sep>destination_addr=str(destination.ip)<line_sep>interface=group['device']<line_sep>scope=group['scope']<line_sep>proto=group['proto']<line_sep>src=group['src']<line_sep>table=group['table']<line_sep>index_dict={'interface':interface 'scope':scope 'proto':proto 'src':src 'local':<true> 'table':table}<line_sep>index=1<line_sep>parsed_dict['routes'].setdefault(destination_addr {}).setdefault('mask' {}).setdefault(mask {}).setdefault('nexthop' {index:index_dict})<block_end><block_end><return>parsed_dict<block_end><block_end>
<import_stmt>json<import_stmt>datetime<import_stmt>requests<import_from_stmt>nameko.web.handlers http<import_from_stmt>nameko.timer timer<import_from_stmt>statsd StatsClient<import_from_stmt>circuitbreaker circuit<class_stmt>DemoChassisService<block_start>name="demo_chassis_service"<line_sep>statsd=StatsClient('localhost' 8125 prefix='simplebank-demo')<line_sep>@http('GET' '/health')@statsd.timer('health')<def_stmt>health self _request<block_start><return>json.dumps({'ok':datetime.datetime.utcnow().__str__()})<block_end>@http('GET' '/external')@circuit(failure_threshold=5 expected_exception=ConnectionError)@statsd.timer('external')<def_stmt>external_request self _request<block_start>response=requests.get('https://jsonplaceholder.typicode.com/posts/1')<line_sep><return>json.dumps({'code':response.status_code 'body':response.text})<block_end>@http('GET' '/error')@circuit(failure_threshold=5 expected_exception=ZeroDivisionError)@statsd.timer('http_error')<def_stmt>error_http_request self<block_start><return>json.dumps({1/0})<block_end><block_end><class_stmt>HealthCheckService<block_start>name="health_check_service"<line_sep>statsd=StatsClient('localhost' 8125 prefix='simplebank-demo')<line_sep>@timer(interval=10)@statsd.timer('check_demo_service')<def_stmt>check_demo_service self<block_start>response=requests.get('http://0.0.0.0:8000/health')<line_sep>print("DemoChassisService HEALTH CHECK: status_code {}, response: {}".format(response.status_code response.text))<block_end><block_end>
"""Tests for frontend's FAST detector class. Authors: <NAME> """<import_stmt>unittest<import_stmt>tests.frontend.detector.test_detector_base<as>test_detector_base<import_from_stmt>gtsfm.frontend.detector.fast Fast<class_stmt>TestFast(test_detector_base.TestDetectorBase)<block_start>"""Test class for FAST detector class in frontend. All unit test functions defined in TestDetectorBase are run automatically. """<def_stmt>setUp self<block_start>super().setUp()<line_sep>self.detector=Fast()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
"""Unit tests for pynlpir's cli.py file."""<import_stmt>os<import_stmt>shutil<import_stmt>stat<import_stmt>unittest<try_stmt><block_start><import_from_stmt>urllib.error URLError<import_from_stmt>urllib.request urlopen<block_end><except_stmt>ImportError<block_start><import_from_stmt>urllib2 URLError urlopen<block_end><import_from_stmt>click.testing CliRunner<import_from_stmt>pynlpir cli<line_sep>TEST_DIR=os.path.abspath(os.path.dirname(__file__))<line_sep>LICENSE_FILE=os.path.join(TEST_DIR 'data' 'NLPIR.user')<def_stmt>can_reach_github <block_start>"""Check if we can reach GitHub's website."""<try_stmt><block_start>urlopen('http://github.com')<line_sep><return><true><block_end><except_stmt>URLError<block_start><return><false><block_end><block_end>@unittest.skipIf(can_reach_github()<is><false> 'Unable to reach GitHub')<class_stmt>TestCLI(unittest.TestCase)<block_start>"""Unit tests for the PyNLPIR CLI."""<def_stmt>setUp self<block_start>self.runner=CliRunner()<block_end><def_stmt>tearDown self<block_start>self.runner=<none><block_end><def_stmt>test_initial_license_download self<block_start>"""Tests that an initial license download works correctly."""<with_stmt>self.runner.isolated_filesystem()<block_start>result=self.runner.invoke(cli.cli ('update' '-d.'))<line_sep>self.assertEqual(0 result.exit_code)<line_sep>self.assertEqual('License updated.\n' result.output)<block_end><block_end><def_stmt>test_license_update self<block_start>"Test that a regular license update works correctly."<concat>""<with_stmt>self.runner.isolated_filesystem()<block_start>shutil.copyfile(LICENSE_FILE os.path.basename(LICENSE_FILE))<line_sep>result=self.runner.invoke(cli.cli ('update' '-d.'))<line_sep>self.assertEqual(0 result.exit_code)<line_sep>self.assertEqual('License updated.\n' result.output)<line_sep>result=self.runner.invoke(cli.cli ('update' '-d.'))<line_sep>self.assertEqual(0 result.exit_code)<line_sep>self.assertEqual('Your license is already up-to-date.\n' result.output)<block_end><block_end><def_stmt>test_license_write_fail self<block_start>"""Test tha writing a license file fails appropriately."""<with_stmt>self.runner.isolated_filesystem()<block_start>cwd=os.getcwd()<line_sep>os.chmod(cwd stat.S_IREAD)<with_stmt>self.assertRaises((IOError OSError))<block_start>cli.update_license_file(cwd)<block_end><block_end><block_end><block_end>
# Set random number generator np.random.seed(2020)<line_sep># Initialize step_end, n, t_range, v and i step_end=int(t_max/dt)<line_sep>n=50<line_sep>t_range=np.linspace(0 t_max num=step_end)<line_sep>v_n=el<times>np.ones([n step_end])<line_sep>i=i_mean<times>(1+0.1<times>(t_max/dt)<power>(0.5)<times>(2<times>np.random.random([n step_end])-1))<line_sep># Loop for step_end - 1 steps <for_stmt>step range(1 step_end)# Compute v_n <block_start>v_n[: step]=v_n[: step-1]+(dt/tau)<times>(el-v_n[: step-1]+r<times>i[: step])<block_end># Plot figure <with_stmt>plt.xkcd()<block_start>plt.figure()<line_sep>plt.title('Multiple realizations of $V_m$')<line_sep>plt.xlabel('time (s)')<line_sep>plt.ylabel('$V_m$ (V)')<line_sep>plt.plot(t_range v_n.T 'k' alpha=0.3)<line_sep>plt.show()<block_end>
<class_stmt>PaginatorOptions<block_start><def_stmt>__init__ self page_number:int page_size:int sort_column:str=<none> sort_descending:bool=<none><block_start>self.sort_column=sort_column<line_sep>self.sort_descending=sort_descending<line_sep>self.page_number=page_number<line_sep>self.page_size=page_size<assert_stmt>(page_number<is><not><none><and>page_size)<or>(page_number<is><not><none><and><not>page_size) 'Specify both page_number and page_size'<if_stmt><not>sort_column<block_start>self.sort_column='id'<line_sep>self.sort_descending=<true><block_end><block_end><block_end>__all__=['PaginatorOptions']<line_sep>
"""Sigv4 Signing Support"""<line_sep># Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy # of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS # OF ANY KIND, express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_stmt>boto3<import_stmt>botocore<import_stmt>json<def_stmt>sigv4_auth method host path querys body headers<block_start>"Adds authorization headers for sigv4 to headers parameter."<line_sep>endpoint=host.replace('https://' '').replace('http://' '')<line_sep>_api_id,_service,region,_domain=endpoint.split('.' maxsplit=3)<line_sep>request_parameters='&'.join([f"{k}={v}"<for>k,v querys])<line_sep>url=f"{host}{path}?{request_parameters}"<line_sep>session=botocore.session.Session()<line_sep>request=botocore.awsrequest.AWSRequest(method=method url=url data=json.dumps(body)<if>body<else><none>)<line_sep>botocore.auth.SigV4Auth(session.get_credentials() "execute-api" region).add_auth(request)<line_sep>prepared_request=request.prepare()<line_sep>headers['host']=endpoint.split('/' maxsplit=1)[0]<for_stmt>k,value prepared_request.headers.items()<block_start>headers[k]=value<block_end><block_end>
# We can transition on native options using this # //command_line_option:<option-name> syntax _BUILD_SETTING="//command_line_option:test_arg"<def_stmt>_test_arg_transition_impl settings attr<block_start>_ignore=(settings attr)<line_sep><return>{_BUILD_SETTING:["new arg"]}<block_end>_test_arg_transition=transition(implementation=_test_arg_transition_impl inputs=[] outputs=[_BUILD_SETTING] )<def_stmt>_test_transition_rule_impl ctx# We need to copy the executable because starlark doesn't allow # providing an executable not created by the rule <block_start>executable_src=ctx.executable.actual_test<line_sep>executable_dst=ctx.actions.declare_file(ctx.label.name)<line_sep>ctx.actions.run_shell(tools=[executable_src] outputs=[executable_dst] command="cp %s %s"%(executable_src.path executable_dst.path) )<line_sep>runfiles=ctx.attr.actual_test[0][DefaultInfo].default_runfiles<line_sep><return>[DefaultInfo(runfiles=runfiles executable=executable_dst)]<block_end>transition_rule_test=rule(implementation=_test_transition_rule_impl attrs={"actual_test":attr.label(cfg=_test_arg_transition executable=<true>) "_allowlist_function_transition":attr.label(default="@bazel_tools//tools/allowlists/function_transition_allowlist" ) } test=<true> )<def_stmt>test_arg_cc_test name **kwargs<block_start>cc_test_name=name+"_native_test"<line_sep>transition_rule_test(name=name actual_test=":%s"%cc_test_name )<line_sep>native.cc_test(name=cc_test_name **kwargs)<block_end>
# -*- coding: utf-8 -*- """ 查看 django.db.backends.mysql.base.by 源码发现 django 连接 mysql 时没有使用连接池, 导致每次数据库操作都要新建新的连接并查询完后关闭,更坑的是按照 django 的官方文档设置 CONN_MAX_AGE 参数是为了复用连接,然后设置了 CONN_MAX_AGE 后,每个新连接查询完后并不 会 close 掉,而是一直在那占着。如果在高并发模式下,很容易出现 too many connections 错误。故重写 mysql 连接库,实现连接池功能。 """<import_from_stmt>django.core.exceptions ImproperlyConfigured<import_stmt>queue<import_stmt>threading<try_stmt><block_start><import_stmt>MySQLdb<as>Database<block_end><except_stmt>ImportError<as>err<block_start><raise>ImproperlyConfigured('Error loading MySQLdb module.\n'<concat>'Did you install mysqlclient?')<from>err<block_end><import_from_stmt>django.db.backends.mysql.base *<import_from_stmt>django.db.backends.mysql.base DatabaseWrapper<as>_DatabaseWrapper<line_sep>DEFAULT_DB_POOL_SIZE=5<class_stmt>DatabaseWrapper(_DatabaseWrapper)<block_start>""" 使用此库时绝对不能设置 CONN_MAX_AGE 连接参数,否则会造成使用连接后不会快速释放到连接池,从而造成连接池阻塞 """<line_sep>connect_pools={}<line_sep>pool_size=<none><line_sep>mutex=threading.Lock()<def_stmt>get_new_connection self conn_params<block_start><with_stmt>self.mutex# 获取 DATABASES 配置字典中的 DB_POOL_SIZE 参数 <block_start><if_stmt><not>self.pool_size<block_start>self.pool_size=self.settings_dict.get('DB_POOL_SIZE')<or>DEFAULT_DB_POOL_SIZE<block_end><if_stmt>self.alias<not><in>self.connect_pools<block_start>self.connect_pools[self.alias]=ConnectPool(conn_params self.pool_size)<block_end><return>self.connect_pools[self.alias].get_connection()<block_end><block_end><def_stmt>_close self<block_start><with_stmt>self.mutex# 覆盖掉原来的 close 方法,查询结束后连接释放回连接池 <block_start><if_stmt>self.connection<is><not><none><block_start><with_stmt>self.wrap_database_errors<block_start><return>self.connect_pools[self.alias].release_connection(self.connection)<block_end><block_end><block_end><block_end><block_end><class_stmt>ConnectPool(object)<block_start><def_stmt>__init__ self conn_params pool_size<block_start>self.conn_params=conn_params<line_sep>self.pool_size=pool_size<line_sep>self.connect_count=0<line_sep>self.connects=queue.Queue()<block_end><def_stmt>get_connection self<block_start><if_stmt>self.connect_count<l>self.pool_size<block_start>self.connect_count=self.connect_count+1<line_sep><return>Database.connect(**self.conn_params)<block_end>conn=self.connects.get()<try_stmt># 检测连接是否有效,去掉性能更好,但建议保留 <block_start>conn.ping()<block_end><except_stmt>Exception<block_start>conn=Database.connect(**self.conn_params)<block_end><return>conn<block_end><def_stmt>release_connection self conn<block_start>self.connects.put(conn)<block_end><block_end>
# ---------------------------------------------------------------------------- # CLASSES: nightly # # Test Case: missingdata.py # # Tests: missing data # # Programmer: <NAME> # Date: Thu Jan 19 09:49:15 PST 2012 # # Modifications: # # ---------------------------------------------------------------------------- <def_stmt>SetTheView <block_start>v=GetView2D()<line_sep>v.viewportCoords=(0.02 0.98 0.25 1)<line_sep>SetView2D(v)<block_end><def_stmt>test0 datapath<block_start>TestSection("Missing data")<line_sep>OpenDatabase(pjoin(datapath "earth.nc"))<line_sep>AddPlot("Pseudocolor" "height")<line_sep>DrawPlots()<line_sep>SetTheView()<line_sep>Test("missingdata_0_00")<line_sep>ChangeActivePlotsVar("carbon_particulates")<line_sep>Test("missingdata_0_01")<line_sep>ChangeActivePlotsVar("seatemp")<line_sep>Test("missingdata_0_02")<line_sep>ChangeActivePlotsVar("population")<line_sep>Test("missingdata_0_03")<line_sep># Pick on higher zone numbers to make sure pick works. PickByNode(domain=0 element=833621)<line_sep>TestText("missingdata_0_04" GetPickOutput())<line_sep>DeleteAllPlots()<block_end><def_stmt>test1 datapath<block_start>TestSection("Expressions and missing data")<line_sep>OpenDatabase(pjoin(datapath "earth.nc"))<line_sep>DefineScalarExpression("meaningless" "carbon_particulates + seatemp")<line_sep>AddPlot("Pseudocolor" "meaningless")<line_sep>DrawPlots()<line_sep>SetTheView()<line_sep>Test("missingdata_1_00")<line_sep>DeleteAllPlots()<line_sep>DefineVectorExpression("color" "color(red,green,blue)")<line_sep>AddPlot("Truecolor" "color")<line_sep>DrawPlots()<line_sep>ResetView()<line_sep>SetTheView()<line_sep>Test("missingdata_1_01")<line_sep>DefineVectorExpression("color2" "color(population*0.364,green,blue)")<line_sep>ChangeActivePlotsVar("color2")<line_sep>v1=GetView2D()<line_sep>v1.viewportCoords=(0.02 0.98 0.02 0.98)<line_sep>v1.windowCoords=(259.439 513.299 288.93 540)#25.466) SetView2D(v1)<line_sep>Test("missingdata_1_02")<block_end><def_stmt>main <block_start>datapath=data_path("netcdf_test_data")<line_sep>test0(datapath)<line_sep>test1(datapath)<block_end>main()<line_sep>Exit()<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>HEBRecHitGPUtoSoAProd=cms.EDProducer('HEBRecHitGPUtoSoA' HEBRecHitGPUTok=cms.InputTag('HEBRecHitGPUProd'))<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>validator Validator<class_stmt>VimVint(Validator)<block_start>__filetype__='vim'<line_sep>checker='vint'<line_sep>args='-w --no-color'<line_sep>regex=r""" .+?: (?P<lnum>\d+): (?P<col>\d+): \s(?P<text>.+)"""<block_end>
# # Integrating `pytket` into Qiskit software # In this tutorial, we will focus on: # - Using `pytket` for compilation or providing devices/simulators within Qiskit workflows; # - Adapting Qiskit code to use `pytket` directly. # This example assumes some familiarity with the Qiskit algorithms library. We have chosen a small variational quantum eigensolver (VQE) for our example, but the same principles apply to a wide range of quantum algorithms. # # To run this example, you will need `pytket-qiskit`, as well as the separate `qiskit-optimization` package. You will also need IBMQ credentials stored on your local machine. # # Qiskit has risen to prominence as the most popular platform for the development of quantum software, providing an open source, full-stack solution with a large feature list and extensive examples from the developers and community. For many researchers who have already invested in building a large codebase built on top of Qiskit, the idea of switching entirely to a new platform can look like a time-sink and may require reversion to take advantage of the new tools that get regularly added to Qiskit. # # The interoperability provided by `pytket-qiskit` allows Qiskit users to start taking advantage of some of the unique features of `pytket` without having to completely rewrite their software. # Let's take as an example an ansatz for computing the ground-state energy of a hydrogen molecule. <import_from_stmt>qiskit.opflow.primitive_ops PauliSumOp<line_sep>H2_op=PauliSumOp.from_list([("II" -1.052373245772859) ("IZ" 0.39793742484318045) ("ZI" -0.39793742484318045) ("ZZ" -0.01128010425623538) ("XX" 0.18093119978423156) ])<line_sep># First let's use qiskit's NumPyEigensolver to compute the exact answer: <import_from_stmt>qiskit.algorithms NumPyEigensolver<line_sep>es=NumPyEigensolver(k=1)<line_sep>exact_result=es.compute_eigenvalues(H2_op).eigenvalues[0].real<line_sep>print("Exact result:" exact_result)<line_sep># The following function will attempt to find an approximation to this using VQE, given a qiskit QuantumInstance on which to run circuits: <import_from_stmt>qiskit.algorithms VQE<import_from_stmt>qiskit.algorithms.optimizers SPSA<import_from_stmt>qiskit.circuit.library EfficientSU2<def_stmt>vqe_solve op maxiter quantum_instance<block_start>optimizer=SPSA(maxiter=maxiter)<line_sep>ansatz=EfficientSU2(op.num_qubits entanglement="linear")<line_sep>vqe=VQE(ansatz=ansatz optimizer=optimizer quantum_instance=quantum_instance)<line_sep><return>vqe.compute_minimum_eigenvalue(op).eigenvalue<block_end># We will run this on a pytket `IBMQEmulatorBackend`. This is a noisy simulator whose characteristics match those of the real device, in this case "ibmq_belem" (a 5-qubit machine). The characteristics are retrieved from the device when the backend is constructed, so we must first load our IBMQ account. Circuits will be compiled to match the connectivity of the device and simulated using a basic noise model [constructed from the device parameters](https://qiskit.org/documentation/apidoc/aer_noise.html). <import_from_stmt>pytket.extensions.qiskit IBMQEmulatorBackend<import_from_stmt>qiskit IBMQ<line_sep>IBMQ.load_account()<line_sep>b_emu=IBMQEmulatorBackend("ibmq_belem" hub="ibm-q" group="open" project="main")<line_sep># Most qiskit algorithms require a qiskit `QuantumInstance` as input; this in turn is constructed from a `qiskit.providers.Backend`. The `TketBackend` class wraps a pytket backend as a `qiskit.providers.Backend`. <import_from_stmt>pytket.extensions.qiskit.tket_backend TketBackend<import_from_stmt>qiskit.utils QuantumInstance<line_sep>qis_backend=TketBackend(b_emu)<line_sep>qi=QuantumInstance(qis_backend shots=8192 wait=0.1)<line_sep># Note that we could have used any other pytket shots backend instead of `b_emu` here. The `pytket` extension modules provide an interface to a wide variety of devices and simulators from different quantum software platforms. # # We can now run the VQE algorithm. In this example we use only 50 iterations, but greater accuracy may be achieved by increasing this number: print("VQE result:" vqe_solve(H2_op 50 qi))<line_sep># Another way to improve the accuracy of results is to apply optimisations to the circuit in an attempt to reduce the overall noise. When we construct our qiskit backend, we can pass in a pytket compilation pass as an additional parameter. There is a wide range of options here; we recommend the device-specific default compilation pass, provided by each tket backend. This pass will ensure that all the hardware constraints of the device are met. We can enable tket's most aggressive optimisation level by setting the parameter `optimisation_level=2`. qis_backend2=TketBackend(b_emu b_emu.default_compilation_pass(optimisation_level=2))<line_sep>qi2=QuantumInstance(qis_backend2 shots=8192 wait=0.1)<line_sep># Let's run the optimisation again: print("VQE result (with optimisation):" vqe_solve(H2_op 50 qi2))<line_sep># These are small two-qubit circuits, so the improvement may be small, but with larger, more complex circuits, the reduction in noise from compilation will make a greater difference and allow VQE experiments to converge with fewer iterations.
<import_stmt>os<import_from_stmt>.config_voc *# noqa <import_from_stmt>.exps.darknet19_exp1 *# noqa <def_stmt>mkdir path max_depth=3<block_start>parent,child=os.path.split(path)<if_stmt><not>os.path.exists(parent)<and>max_depth<g>1<block_start>mkdir(parent max_depth-1)<block_end><if_stmt><not>os.path.exists(path)<block_start>os.mkdir(path)<block_end><block_end># input and output size ############################ multi_scale_inp_size=[np.array([320 320] dtype=np.int) np.array([352 352] dtype=np.int) np.array([384 384] dtype=np.int) np.array([416 416] dtype=np.int) np.array([448 448] dtype=np.int) np.array([480 480] dtype=np.int) np.array([512 512] dtype=np.int) np.array([544 544] dtype=np.int) np.array([576 576] dtype=np.int) # np.array([608, 608], dtype=np.int), ]<line_sep># w, h multi_scale_out_size=[multi_scale_inp_size[0]/32 multi_scale_inp_size[1]/32 multi_scale_inp_size[2]/32 multi_scale_inp_size[3]/32 multi_scale_inp_size[4]/32 multi_scale_inp_size[5]/32 multi_scale_inp_size[6]/32 multi_scale_inp_size[7]/32 multi_scale_inp_size[8]/32 # multi_scale_inp_size[9] / 32, ]<line_sep># w, h inp_size=np.array([416 416] dtype=np.int)# w, h out_size=inp_size/32<line_sep># for display ############################ <def_stmt>_to_color indx base<block_start>""" return (b, r, g) tuple"""<line_sep>base2=base<times>base<line_sep>b=2-indx/base2<line_sep>r=2-(indx%base2)/base<line_sep>g=2-(indx%base2)%base<line_sep><return>b<times>127 r<times>127 g<times>127<block_end>base=int(np.ceil(pow(num_classes 1./3)))<line_sep>colors=[_to_color(x base)<for>x range(num_classes)]<line_sep># detection config ############################ thresh=0.3<line_sep># dir config ############################ ROOT_DIR=os.path.abspath(os.path.join(os.path.dirname(__file__) '..'))<line_sep>DATA_DIR=os.path.join(ROOT_DIR 'data')<line_sep>MODEL_DIR=os.path.join(ROOT_DIR 'models')<line_sep>TRAIN_DIR=os.path.join(MODEL_DIR 'training')<line_sep>TEST_DIR=os.path.join(MODEL_DIR 'testing')<line_sep>trained_model=os.path.join(MODEL_DIR h5_fname)<line_sep>pretrained_model=os.path.join(MODEL_DIR pretrained_fname)<line_sep>train_output_dir=os.path.join(TRAIN_DIR exp_name)<line_sep>test_output_dir=os.path.join(TEST_DIR imdb_test h5_fname)<line_sep>mkdir(train_output_dir max_depth=3)<line_sep>mkdir(test_output_dir max_depth=4)<line_sep>rand_seed=1024<line_sep>use_tensorboard=<true><line_sep>log_interval=50<line_sep>disp_interval=10<line_sep>
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_from_stmt>http HTTPStatus<import_from_stmt>unittest mock<import_stmt>ddt<import_from_stmt>cinder.api.contrib volume_encryption_metadata<import_from_stmt>cinder.api.contrib volume_tenant_attribute<import_from_stmt>cinder.api.v3 volumes<import_from_stmt>cinder exception<import_from_stmt>cinder.policies volumes<as>volume_policies<import_from_stmt>cinder.tests.unit.api fakes<as>fake_api<import_from_stmt>cinder.tests.unit fake_constants<import_from_stmt>cinder.tests.unit.policies base<import_from_stmt>cinder.tests.unit.policies test_base<import_from_stmt>cinder.tests.unit utils<as>test_utils<import_from_stmt>cinder.volume api<as>volume_api<line_sep># TODO(yikun): The below policy test cases should be added: # * HOST_ATTRIBUTE_POLICY # * MIG_ATTRIBUTE_POLICY <class_stmt>VolumePolicyTests(test_base.CinderPolicyTests)<block_start><def_stmt>test_admin_can_create_volume self<block_start>admin_context=self.admin_context<line_sep>path='/v3/%(project_id)s/volumes'%{'project_id':admin_context.project_id}<line_sep>body={"volume":{"size":1}}<line_sep>response=self._get_request_response(admin_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end><def_stmt>test_nonadmin_user_can_create_volume self<block_start>user_context=self.user_context<line_sep>path='/v3/%(project_id)s/volumes'%{'project_id':user_context.project_id}<line_sep>body={"volume":{"size":1}}<line_sep>response=self._get_request_response(user_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end><def_stmt>test_admin_can_create_volume_from_image self<block_start>admin_context=self.admin_context<line_sep>path='/v3/%(project_id)s/volumes'%{'project_id':admin_context.project_id}<line_sep>body={"volume":{"size":1 "image_id":fake_constants.IMAGE_ID}}<line_sep>response=self._get_request_response(admin_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end><def_stmt>test_nonadmin_user_can_create_volume_from_image self<block_start>user_context=self.user_context<line_sep>path='/v3/%(project_id)s/volumes'%{'project_id':user_context.project_id}<line_sep>body={"volume":{"size":1 "image_id":fake_constants.IMAGE_ID}}<line_sep>response=self._get_request_response(user_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end>@mock.patch.object(volume_api.API 'get_volume')<def_stmt>test_admin_can_show_volumes self mock_volume# Make sure administrators are authorized to list volumes <block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(admin_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>self.assertEqual(response.json_body['volume']['id'] volume.id)<block_end>@mock.patch.object(volume_api.API 'get_volume')<def_stmt>test_owner_can_show_volumes self mock_volume# Make sure owners are authorized to list their volumes <block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(user_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>self.assertEqual(response.json_body['volume']['id'] volume.id)<block_end>@mock.patch.object(volume_api.API 'get_volume')<def_stmt>test_owner_cannot_show_volumes_for_others self mock_volume# Make sure volumes are only exposed to their owners <block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(non_owner_context path 'GET')<line_sep># NOTE(lbragstad): Technically, this user isn't supposed to see this # volume, because they didn't create it and it lives in a different # project. Does cinder return a 404 in cases like this? Or is a 403 # expected? self.assertEqual(HTTPStatus.NOT_FOUND response.status_int)<block_end><def_stmt>test_admin_can_get_all_volumes_detail self# Make sure administrators are authorized to list volumes <block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context)<line_sep>path='/v3/%(project_id)s/volumes/detail'%{'project_id':admin_context.project_id}<line_sep>response=self._get_request_response(admin_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volumes'][0]<line_sep>self.assertEqual(volume.id res_vol['id'])<block_end><def_stmt>test_owner_can_get_all_volumes_detail self# Make sure owners are authorized to list volumes <block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context)<line_sep>path='/v3/%(project_id)s/volumes/detail'%{'project_id':user_context.project_id}<line_sep>response=self._get_request_response(user_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volumes'][0]<line_sep>self.assertEqual(volume.id res_vol['id'])<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_admin_can_update_volumes self mock_volume<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>body={"volume":{"name":"update_name"}}<line_sep>response=self._get_request_response(admin_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_can_update_volumes self mock_volume<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>body={"volume":{"name":"update_name"}}<line_sep>response=self._get_request_response(user_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_update_volumes_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>body={"volume":{"name":"update_name"}}<line_sep>response=self._get_request_response(non_owner_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_can_delete_volumes self mock_volume<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(user_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_admin_can_delete_volumes self mock_volume<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(admin_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.ACCEPTED response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_delete_volumes_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(non_owner_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end>@mock.patch.object(volume_api.API 'get_volume')<def_stmt>test_admin_can_show_tenant_id_in_volume self mock_volume# Make sure administrators are authorized to show tenant_id <block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(admin_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volume']<line_sep>self.assertEqual(admin_context.project_id res_vol['os-vol-tenant-attr:tenant_id'])<block_end>@mock.patch.object(volume_api.API 'get_volume')<def_stmt>test_owner_can_show_tenant_id_in_volume self mock_volume# Make sure owners are authorized to show tenant_id in volume <block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context)<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(user_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volume']<line_sep>self.assertEqual(user_context.project_id res_vol['os-vol-tenant-attr:tenant_id'])<block_end><def_stmt>test_admin_can_show_tenant_id_in_volume_detail self# Make sure admins are authorized to show tenant_id in volume detail <block_start>admin_context=self.admin_context<line_sep>self._create_fake_volume(admin_context)<line_sep>path='/v3/%(project_id)s/volumes/detail'%{'project_id':admin_context.project_id}<line_sep>response=self._get_request_response(admin_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volumes'][0]<line_sep># Make sure owners are authorized to show tenant_id self.assertEqual(admin_context.project_id res_vol['os-vol-tenant-attr:tenant_id'])<block_end><def_stmt>test_owner_can_show_tenant_id_in_volume_detail self# Make sure owners are authorized to show tenant_id in volume detail <block_start>user_context=self.user_context<line_sep>self._create_fake_volume(user_context)<line_sep>path='/v3/%(project_id)s/volumes/detail'%{'project_id':user_context.project_id}<line_sep>response=self._get_request_response(user_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_vol=response.json_body['volumes'][0]<line_sep># Make sure owners are authorized to show tenant_id self.assertEqual(user_context.project_id res_vol['os-vol-tenant-attr:tenant_id'])<block_end><def_stmt>test_admin_can_create_metadata self<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k1":"v1"}}<line_sep>response=self._get_request_response(admin_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end><def_stmt>test_admin_can_get_metadata self<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(admin_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_meta=response.json_body['metadata']<line_sep>self.assertIn('k' res_meta)<line_sep>self.assertEqual('v' res_meta['k'])<block_end><def_stmt>test_admin_can_update_metadata self<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':admin_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k":"v2"}}<line_sep>response=self._get_request_response(admin_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_meta=response.json_body['metadata']<line_sep>self.assertIn('k' res_meta)<line_sep>self.assertEqual('v2' res_meta['k'])<block_end><def_stmt>test_admin_can_delete_metadata self<block_start>admin_context=self.admin_context<line_sep>volume=self._create_fake_volume(admin_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s'%{'project_id':admin_context.project_id 'volume_id':volume.id 'key':'k'}<line_sep>response=self._get_request_response(admin_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end><def_stmt>test_owner_can_create_metadata self<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k1":"v1"}}<line_sep>response=self._get_request_response(user_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end><def_stmt>test_owner_can_get_metadata self<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(user_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_meta=response.json_body['metadata']<line_sep>self.assertIn('k' res_meta)<line_sep>self.assertEqual('v' res_meta['k'])<block_end><def_stmt>test_owner_can_update_metadata self<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':user_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k":"v2"}}<line_sep>response=self._get_request_response(user_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<line_sep>res_meta=response.json_body['metadata']<line_sep>self.assertIn('k' res_meta)<line_sep>self.assertEqual('v2' res_meta['k'])<block_end><def_stmt>test_owner_can_delete_metadata self<block_start>user_context=self.user_context<line_sep>volume=self._create_fake_volume(user_context metadata={"k":"v"})<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s'%{'project_id':user_context.project_id 'volume_id':volume.id 'key':'k'}<line_sep>response=self._get_request_response(user_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.OK response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_create_metadata_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context metadata={"k":"v"})<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k1":"v1"}}<line_sep>response=self._get_request_response(non_owner_context path 'POST' body=body)<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_get_metadata_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context metadata={"k":"v"})<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>response=self._get_request_response(non_owner_context path 'GET')<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_update_metadata_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context metadata={"k":"v"})<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata'%{'project_id':non_owner_context.project_id 'volume_id':volume.id}<line_sep>body={"metadata":{"k":"v2"}}<line_sep>response=self._get_request_response(non_owner_context path 'PUT' body=body)<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end>@mock.patch.object(volume_api.API 'get')<def_stmt>test_owner_cannot_delete_metadata_for_others self mock_volume<block_start>owner_context=self.user_context<line_sep>non_owner_context=self.other_user_context<line_sep>volume=self._create_fake_volume(owner_context metadata={"k":"v"})<line_sep>mock_volume.return_value=volume<line_sep>path='/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s'%{'project_id':non_owner_context.project_id 'volume_id':volume.id 'key':'k'}<line_sep>response=self._get_request_response(non_owner_context path 'DELETE')<line_sep>self.assertEqual(HTTPStatus.FORBIDDEN response.status_int)<block_end><block_end>@ddt.ddt<class_stmt>VolumesPolicyTest(base.BasePolicyTest)<block_start>authorized_readers=['legacy_admin' 'legacy_owner' 'system_admin' 'project_admin' 'project_member' 'project_reader' 'project_foo' ]<line_sep>unauthorized_readers=['system_member' 'system_reader' 'system_foo' 'other_project_member' 'other_project_reader' ]<line_sep>authorized_members=['legacy_admin' 'legacy_owner' 'system_admin' 'project_admin' 'project_member' 'project_reader' 'project_foo' ]<line_sep>unauthorized_members=['system_member' 'system_reader' 'system_foo' 'other_project_member' 'other_project_reader' ]<line_sep>create_authorized_users=['legacy_admin' 'legacy_owner' 'system_admin' 'project_admin' 'project_member' 'project_reader' 'project_foo' # The other_* users are allowed because we don't have any check # mechanism in the code to validate this, these are validated on # the WSGI layer 'other_project_member' 'other_project_reader' ]<line_sep>create_unauthorized_users=['system_member' 'system_reader' 'system_foo' ]<line_sep># Basic policy test is without enforcing scope (which cinder doesn't # yet support) and deprecated rules enabled. <def_stmt>setUp self enforce_scope=<false> enforce_new_defaults=<false> *args **kwargs<block_start>super().setUp(enforce_scope enforce_new_defaults *args **kwargs)<line_sep>self.controller=volumes.VolumeController(mock.MagicMock())<line_sep>self.api_path='/v3/%s/volumes'%(self.project_id)<block_end><def_stmt>_create_volume self<block_start>vol_type=test_utils.create_volume_type(self.project_admin_context name='fake_vol_type' testcase_instance=self)<line_sep>volume=test_utils.create_volume(self.project_member_context volume_type_id=vol_type.id testcase_instance=self)<line_sep><return>volume<block_end>@ddt.data(*base.all_users)<def_stmt>test_create_volume_policy self user_id<block_start>rule_name=volume_policies.CREATE_POLICY<line_sep>url=self.api_path<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.method='POST'<line_sep>body={"volume":{"size":1}}<line_sep>unauthorized_exceptions=[]<line_sep>self.common_policy_check(user_id self.create_authorized_users self.create_unauthorized_users unauthorized_exceptions rule_name self.controller.create req body=body)<block_end>@ddt.data(*base.all_users)@mock.patch('cinder.api.v3.volumes.VolumeController._image_uuid_from_ref' return_value=fake_constants.IMAGE_ID)@mock.patch('cinder.api.v3.volumes.VolumeController._get_image_snapshot' return_value=<none>)@mock.patch('cinder.volume.flows.api.create_volume.'<concat>'ExtractVolumeRequestTask._get_image_metadata' return_value=<none>)<def_stmt>test_create_volume_from_image_policy self user_id mock_image_from_ref mock_image_snap mock_img_meta<block_start>rule_name=volume_policies.CREATE_FROM_IMAGE_POLICY<line_sep>url=self.api_path<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.method='POST'<line_sep>body={"volume":{"size":1 "image_id":fake_constants.IMAGE_ID}}<line_sep>unauthorized_exceptions=[]<line_sep>self.common_policy_check(user_id self.create_authorized_users self.create_unauthorized_users unauthorized_exceptions rule_name self.controller.create req body=body)<block_end>@ddt.data(*base.all_users)<def_stmt>test_create_multiattach_volume_policy self user_id<block_start>vol_type=test_utils.create_volume_type(self.project_admin_context name='multiattach_type' extra_specs={'multiattach':'<is> True'})<line_sep>rule_name=volume_policies.MULTIATTACH_POLICY<line_sep>url=self.api_path<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.method='POST'<line_sep>body={"volume":{"size":1 "volume_type":vol_type.id}}<line_sep># Relax the CREATE_POLICY in order to get past that check. self.policy.set_rules({volume_policies.CREATE_POLICY:""} overwrite=<false>)<line_sep>unauthorized_exceptions=[]<line_sep>self.common_policy_check(user_id self.create_authorized_users self.create_unauthorized_users unauthorized_exceptions rule_name self.controller.create req body=body)<block_end>@ddt.data(*base.all_users)<def_stmt>test_get_volume_policy self user_id<block_start>volume=self._create_volume()<line_sep>rule_name=volume_policies.GET_POLICY<line_sep>url='%s/%s'%(self.api_path volume.id)<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>unauthorized_exceptions=[exception.VolumeNotFound ]<line_sep>self.common_policy_check(user_id self.authorized_readers self.unauthorized_readers unauthorized_exceptions rule_name self.controller.show req id=volume.id)<block_end>@ddt.data(*base.all_users)<def_stmt>test_get_all_volumes_policy self user_id<block_start>self._create_volume()<line_sep>rule_name=volume_policies.GET_ALL_POLICY<line_sep>url=self.api_path<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep># Generally, any logged in user can list all volumes. authorized_users=[user_id]<line_sep>unauthorized_users=[]<line_sep># The exception is when deprecated rules are disabled, in which case # roles are enforced. Users without the 'reader' role should be # blocked. <if_stmt>self.enforce_new_defaults<block_start>context=self.create_context(user_id)<if_stmt>'reader'<not><in>context.roles<block_start>authorized_users=[]<line_sep>unauthorized_users=[user_id]<block_end><block_end>response=self.common_policy_check(user_id authorized_users unauthorized_users [] rule_name self.controller.index req)<line_sep># For some users, even if they're authorized, the list of volumes # will be empty if they are not in the volume's project. empty_response_users=[*self.unauthorized_readers # legacy_admin and system_admin do not have a project_id, and # so the list of volumes returned will be empty. 'legacy_admin' 'system_admin' ]<line_sep>volumes=response['volumes']<if>response<else>[]<line_sep>volume_count=0<if>user_id<in>empty_response_users<else>1<line_sep>self.assertEqual(volume_count len(volumes))<block_end>@ddt.data(*base.all_users)@mock.patch('cinder.db.volume_encryption_metadata_get')<def_stmt>test_get_volume_encryption_meta_policy self user_id mock_encrypt_meta<block_start>encryption_key_id=fake_constants.ENCRYPTION_KEY_ID<line_sep>mock_encrypt_meta.return_value=({'encryption_key_id':encryption_key_id})<line_sep>controller=(volume_encryption_metadata.VolumeEncryptionMetadataController())<line_sep>volume=self._create_volume()<line_sep>rule_name=volume_policies.ENCRYPTION_METADATA_POLICY<line_sep>url='%s/%s/encryption'%(self.api_path volume.id)<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>unauthorized_exceptions=[exception.VolumeNotFound ]<line_sep>resp=self.common_policy_check(user_id self.authorized_readers self.unauthorized_readers unauthorized_exceptions rule_name controller.index req volume.id)<if_stmt>user_id<in>self.authorized_readers<block_start>self.assertEqual(encryption_key_id resp['encryption_key_id'])<block_end><block_end>@ddt.data(*base.all_users)<def_stmt>test_get_volume_tenant_attr_policy self user_id<block_start>controller=volume_tenant_attribute.VolumeTenantAttributeController()<line_sep>volume=self._create_volume()<line_sep>volume=volume.obj_to_primitive()['versioned_object.data']<line_sep>rule_name=volume_policies.TENANT_ATTRIBUTE_POLICY<line_sep>url='%s/%s'%(self.api_path volume['id'])<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.get_db_volume=mock.MagicMock()<line_sep>req.get_db_volume.return_value=volume<line_sep>resp_obj=mock.MagicMock(obj={'volume':volume})<line_sep>unauthorized_exceptions=[exception.VolumeNotFound ]<line_sep>self.assertNotIn('os-vol-tenant-attr:tenant_id' volume.keys())<line_sep>self.common_policy_check(user_id self.authorized_readers self.unauthorized_readers unauthorized_exceptions rule_name controller.show req resp_obj volume['id'] fatal=<false>)<if_stmt>user_id<in>self.authorized_readers<block_start>self.assertIn('os-vol-tenant-attr:tenant_id' volume.keys())<block_end><block_end>@ddt.data(*base.all_users)<def_stmt>test_update_volume_policy self user_id<block_start>volume=self._create_volume()<line_sep>rule_name=volume_policies.UPDATE_POLICY<line_sep>url='%s/%s'%(self.api_path volume.id)<line_sep>body={"volume":{"name":"update_name"}}<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.method='PUT'<line_sep>unauthorized_exceptions=[exception.VolumeNotFound ]<line_sep>self.common_policy_check(user_id self.authorized_members self.unauthorized_members unauthorized_exceptions rule_name self.controller.update req id=volume.id body=body)<block_end>@ddt.data(*base.all_users)<def_stmt>test_delete_volume_policy self user_id<block_start>volume=self._create_volume()<line_sep>rule_name=volume_policies.DELETE_POLICY<line_sep>url='%s/%s'%(self.api_path volume.id)<line_sep>req=fake_api.HTTPRequest.blank(url)<line_sep>req.method='DELETE'<line_sep>unauthorized_exceptions=[exception.VolumeNotFound ]<line_sep>self.common_policy_check(user_id self.authorized_members self.unauthorized_members unauthorized_exceptions rule_name self.controller.delete req id=volume.id)<block_end><block_end><class_stmt>VolumesPolicySecureRbacTest(VolumesPolicyTest)<block_start>create_authorized_users=['legacy_admin' 'system_admin' 'project_admin' 'project_member' 'other_project_member' ]<line_sep>create_unauthorized_users=['legacy_owner' 'system_member' 'system_reader' 'system_foo' 'other_project_reader' 'project_foo' 'project_reader' ]<line_sep>authorized_readers=['legacy_admin' 'system_admin' 'project_admin' 'project_member' 'project_reader' ]<line_sep>unauthorized_readers=['legacy_owner' 'system_member' 'system_reader' 'system_foo' 'project_foo' 'other_project_member' 'other_project_reader' ]<line_sep>authorized_members=['legacy_admin' 'system_admin' 'project_admin' 'project_member' ]<line_sep>unauthorized_members=['legacy_owner' 'system_member' 'system_reader' 'system_foo' 'project_reader' 'project_foo' 'other_project_member' 'other_project_reader' ]<def_stmt>setUp self *args **kwargs# Test secure RBAC by disabling deprecated policy rules (scope # is still not enabled). <block_start>super().setUp(enforce_scope=<false> enforce_new_defaults=<true> *args **kwargs)<block_end><block_end>
# Copyright 2014 PDFium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>os<line_sep>path=os.path.abspath(os.path.split(__file__)[0])<line_sep>execfile(os.path.join(path 'gyp_pdfium'))<line_sep>
# -*- coding: utf-8 -*- """Serializer tests for the Mendeley addon."""<import_stmt>pytest<import_from_stmt>addons.base.tests.serializers CitationAddonSerializerTestSuiteMixin<import_from_stmt>addons.base.tests.utils MockFolder<import_from_stmt>addons.mendeley.tests.factories MendeleyAccountFactory<import_from_stmt>addons.mendeley.serializer MendeleySerializer<import_from_stmt>tests.base OsfTestCase<line_sep>pytestmark=pytest.mark.django_db<class_stmt>TestMendeleySerializer(CitationAddonSerializerTestSuiteMixin OsfTestCase)<block_start>addon_short_name='mendeley'<line_sep>Serializer=MendeleySerializer<line_sep>ExternalAccountFactory=MendeleyAccountFactory<line_sep>folder=MockFolder()<block_end>
# -*- coding: utf-8 -*- r""" Information-set decoding for linear codes Information-set decoding is a probabilistic decoding strategy that essentially tries to guess `k` correct positions in the received word, where `k` is the dimension of the code. A codeword agreeing with the received word on the guessed position can easily be computed, and their difference is one possible error vector. A "correct" guess is assumed when this error vector has low Hamming weight. This simple algorithm is not very efficient in itself, but there are numerous refinements to the strategy that make it very capable over rather large codes. Still, the decoding algorithm is exponential in dimension of the code and the log of the field size. The ISD strategy requires choosing how many errors is deemed acceptable. One choice could be `d/2`, where `d` is the minimum distance of the code, but sometimes `d` is not known, or sometimes more errors are expected. If one chooses anything above `d/2`, the algorithm does not guarantee to return a nearest codeword. AUTHORS: - <NAME>, <NAME>, <NAME> (2016-02, 2017-06): initial version """<line_sep>#****************************************************************************** # Copyright (C) 2017 <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # http://www.gnu.org/licenses/ #****************************************************************************** <import_from_stmt>sage.all ZZ Integer vector SageObject binomial<import_from_stmt>.decoder Decoder<def_stmt>_format_decoding_interval decoding_interval<block_start>r""" Format the decoding interval of an ISD decoder when calling ``_repr_`` or ``_latex_``. EXAMPLES:: sage: from sage.coding.information_set_decoder import _format_decoding_interval sage: _format_decoding_interval((0,3)) 'up to 3' sage: _format_decoding_interval((2,3)) 'between 2 and 3' sage: _format_decoding_interval((3,3)) 'exactly 3' """<if_stmt>decoding_interval[0]<eq>0<block_start><return>"up to {0}".format(decoding_interval[1])<block_end><if_stmt>decoding_interval[0]<eq>decoding_interval[1]<block_start><return>"exactly {0}".format(decoding_interval[0])<block_end><return>"between {0} and {1}".format(decoding_interval[0] decoding_interval[1])<block_end><class_stmt>InformationSetAlgorithm(SageObject)<block_start>r""" Abstract class for algorithms for :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`. To sub-class this class, override ``decode`` and ``calibrate``, and call the super constructor from ``__init__``. INPUT: - ``code`` -- A linear code for which to decode. - ``number_errors`` -- an integer, the maximal number of errors to accept as correct decoding. An interval can also be specified by giving a pair of integers, where both end values are taken to be in the interval. - ``algorithm_name`` -- A name for the specific ISD algorithm used (used for printing). - ``parameters`` -- (optional) A dictionary for setting the parameters of this ISD algorithm. Note that sanity checking this dictionary for the individual sub-classes should be done in the sub-class constructor. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors A minimal working example of how to sub-class:: sage: from sage.coding.information_set_decoder import InformationSetAlgorithm sage: from sage.coding.decoder import DecodingError sage: class MinimalISD(InformationSetAlgorithm): ....: def __init__(self, code, decoding_interval): ....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD") ....: def calibrate(self): ....: self._parameters = { } # calibrate parameters here ....: self._time_estimate = 10.0 # calibrated time estimate ....: def decode(self, r): ....: # decoding algorithm here ....: raise DecodingError("I failed") sage: MinimalISD(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """<def_stmt>__init__ self code decoding_interval algorithm_name parameters=<none><block_start>r""" TESTS:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4)) ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """<line_sep>self._code=code<line_sep>self._decoding_interval=decoding_interval<line_sep>self._algorithm_name=algorithm_name<if_stmt>parameters<block_start>self._parameters=parameters<line_sep>self._parameters_specified=<true><block_end><else_stmt><block_start>self._parameters_specified=<false><block_end><block_end><def_stmt>name self<block_start>r""" Return the name of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.name() 'Lee-Brickell' """<line_sep><return>self._algorithm_name<block_end><def_stmt>decode self r<block_start>r""" Decode a received word using this ISD decoding algorithm. Must be overridden by sub-classes. EXAMPLES:: sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\ [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\ [0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\ [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]]) sage: C = codes.LinearCode(M) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (2,2)) sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) sage: A.decode(r) (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) """<line_sep><raise>NotImplementedError<block_end><def_stmt>time_estimate self<block_start>""" Estimate for how long this ISD algorithm takes to perform a single decoding. The estimate is for a received word whose number of errors is within the decoding interval of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.time_estimate() #random 0.0008162108571427874 """<if_stmt><not>hasattr(self "_time_estimate")<block_start>self.calibrate()<block_end><return>self._time_estimate<block_end><def_stmt>calibrate self<block_start>""" Uses test computations to estimate optimal values for any parameters this ISD algorithm may take. Must be overridden by sub-classes. If ``self._parameters_specified`` is ``False``, this method shall set ``self._parameters`` to the best parameters estimated. It shall always set ``self._time_estimate`` to the time estimate of using ``self._parameters``. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)) sage: A.calibrate() sage: A.parameters() #random {'search_size': 1} """<line_sep><raise>NotImplementedError<block_end><def_stmt>code self<block_start>r""" Return the code associated to this ISD algorithm. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)) sage: A.code() [24, 12, 8] Extended Golay code over GF(2) """<line_sep><return>self._code<block_end><def_stmt>decoding_interval self<block_start>r""" A pair of integers specifying the interval of number of errors this ISD algorithm will attempt to correct. The interval includes both end values. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,2)) sage: A.decoding_interval() (0, 2) """<line_sep><return>self._decoding_interval<block_end><def_stmt>parameters self<block_start>""" Return any parameters this ISD algorithm uses. If the parameters have not already been set, efficient values will first be calibrated and returned. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3) sage: A.parameters() {'search_size': 3} If not set, calibration will determine a sensible value:: sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A.parameters() #random {'search_size': 1} """<if_stmt><not>hasattr(self "_parameters")<block_start>self.calibrate()<block_end><return>self._parameters<block_end><def_stmt>__eq__ self other<block_start>r""" Tests equality between ISD algorithm objects. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A == LeeBrickellISDAlgorithm(C, (0,4)) True sage: A == LeeBrickellISDAlgorithm(C, (0,5)) False sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2 sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search) False ISD Algorithm objects can be equal only if they have both calibrated the parameters, or if they both had it set and to the same value:: sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size']) sage: A == A2 False sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size']) True """<line_sep><return>isinstance(other self.__class__)<and>self.code()<eq>other.code()<and>self.decoding_interval()<eq>other.decoding_interval()<and>self._parameters_specified<eq>other._parameters_specified<and>(<not>self._parameters_specified<or>self.parameters()<eq>other.parameters())<block_end><def_stmt>__hash__ self<block_start>r""" Returns the hash value of ``self``. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: hash(A) #random 5884357732955478461 sage: C2 = codes.GolayCode(GF(3)) sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4)) sage: hash(A) != hash(A2) True """<line_sep><return>hash(str(self))<block_end><def_stmt>_repr_ self<block_start>r""" Returns a string representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors """<line_sep><return>"ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name self.code() _format_decoding_interval(self.decoding_interval()))<block_end><def_stmt>_latex_ self<block_start>r""" Returns a latex representation of this ISD algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)) sage: latex(A) \textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors} """<line_sep><return>"\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name self.code()._latex_() _format_decoding_interval(self.decoding_interval()))<block_end><block_end><class_stmt>LeeBrickellISDAlgorithm(InformationSetAlgorithm)<block_start>r""" The Lee-Brickell algorithm for information-set decoding. For a description of the information-set decoding paradigm (ISD), see :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`. This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the original binary case, and [Pet2010]_ for the `q`-ary extension. Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be a received word in a transmission. We seek the codeword whose Hamming distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices `I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by `I`. The Lee-Brickell ISD loops the following until it is successful: 1. Choose an information set `I` of `C`. 2. Compute `r' = r - r_{I}\times G_I^{-1} \times G` 3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`. For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`, 4. If `e` has a Hamming weight at most `w`, return `r-e`. INPUT: - ``code`` -- A linear code for which to decode. - ``decoding_interval`` -- a pair of integers specifying an interval of number of errors to correct. Includes both end values. - ``search_size`` -- (optional) the size of subsets to use on step 3 of the algorithm as described above. Usually a small number. It has to be at most the largest allowed number of errors. A good choice will be approximated if this option is not set; see :meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate` for details. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors """<def_stmt>__init__ self code decoding_interval search_size=<none><block_start>r""" TESTS: If ``search_size`` is not a positive integer, or is bigger than the decoding radius, an error will be raised:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1) Traceback (most recent call last): ... ValueError: The search size parameter has to be a positive integer sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4) Traceback (most recent call last): ... ValueError: The search size parameter has to be at most the maximal number of allowed errors """<if_stmt>search_size<is><not><none><block_start><if_stmt><not>isinstance(search_size (Integer int))<or>search_size<l>0<block_start><raise>ValueError("The search size parameter has to be a positive integer")<block_end><if_stmt>search_size<g>decoding_interval[1]<block_start><raise>ValueError("The search size parameter has to be at most"<concat>" the maximal number of allowed errors")<block_end>super(LeeBrickellISDAlgorithm self).__init__(code decoding_interval "Lee-Brickell" parameters={'search_size':search_size})<line_sep>self._parameters_specified=<true><block_end><else_stmt><block_start>self._parameters_specified=<false><line_sep>super(LeeBrickellISDAlgorithm self).__init__(code decoding_interval "Lee-Brickell")<block_end><block_end><def_stmt>decode self r<block_start>r""" The Lee-Brickell algorithm as described in the class doc. Note that either parameters must be given at construction time or :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()` should be called before calling this method. INPUT: - `r` -- a received word, i.e. a vector in the ambient space of :meth:`decoder.Decoder.code`. OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``. EXAMPLES:: sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\ [0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\ [0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\ [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]]) sage: C = codes.LinearCode(M) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (2,2)) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: c_out = A.decode(r) sage: (r - c).hamming_weight() == 2 True """<import_stmt>itertools<import_from_stmt>sage.misc.prandom sample<line_sep>C=self.code()<line_sep>n,k=C.length() C.dimension()<line_sep>tau=self.decoding_interval()<line_sep>p=self.parameters()['search_size']<line_sep>F=C.base_ring()<line_sep>G=C.generator_matrix()<line_sep>Fstar=F.list()[1:]<while_stmt><true># step 1. <block_start>I=sample(range(n) k)<line_sep>Gi=G.matrix_from_columns(I)<try_stmt><block_start>Gi_inv=Gi.inverse()<block_end><except_stmt>ZeroDivisionError# I was not an information set <block_start><continue><block_end>Gt=Gi_inv<times>G<line_sep>#step 2. y=r-vector([r[i]<for>i I])<times>Gt<line_sep>g=Gt.rows()<line_sep>#step 3. <for_stmt>pi range(p+1)<block_start><for_stmt>A itertools.combinations(range(k) pi)<block_start><for_stmt>m itertools.product(Fstar repeat=pi)<block_start>e=y-sum(m[i]<times>g[A[i]]<for>i range(pi))<line_sep>errs=e.hamming_weight()<if_stmt>errs<ge>tau[0]<and>errs<le>tau[1]<block_start><return>r-e<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>calibrate self<block_start>r""" Run some test computations to estimate the optimal search size. Let `p` be the search size. We should simply choose `p` such that the average expected time is minimal. The algorithm succeeds when it chooses an information set with at least `k - p` correct positions, where `k` is the dimension of the code and `p` the search size. The expected number of trials we need before this occurs is: .. MATH:: \binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i}) Here `\rho` is the fraction of `k` subsets of indices which are information sets. If `T` is the average time for steps 1 and 2 (including selecting `I` until an information set is found), while `P(i)` is the time for the body of the ``for``-loop in step 3 for `m` of weight `i`, then each information set trial takes roughly time `T + \sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base field. The values `T` and `P` are here estimated by running a few test computations similar to those done by the decoding algorithm. We don't explicitly estimate `\rho`. OUTPUT: Does not output anything but sets private fields used by :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()` and :meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``. EXAMPLES:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A.calibrate() sage: A.parameters() #random {'search_size': 1} sage: A.time_estimate() #random 0.0008162108571427874 If we specify the parameter at construction time, calibrate does not override this choice:: sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A.parameters() {'search_size': 2} sage: A.calibrate() sage: A.parameters() {'search_size': 2} sage: A.time_estimate() #random 0.0008162108571427874 """<import_from_stmt>sage.matrix.special random_matrix<import_from_stmt>sage.misc.prandom sample randint<import_from_stmt>sage.modules.free_module_element random_vector<import_from_stmt>time process_time<line_sep>C=self.code()<line_sep>G=C.generator_matrix()<line_sep>n,k=C.length() C.dimension()<line_sep>tau=self.decoding_interval()[1]<line_sep>F=C.base_ring()<line_sep>q=F.cardinality()<line_sep>Fstar=F.list()[1:]<def_stmt>time_information_set_steps <block_start>before=process_time()<while_stmt><true><block_start>I=sample(range(n) k)<line_sep>Gi=G.matrix_from_columns(I)<try_stmt><block_start>Gi_inv=Gi.inverse()<block_end><except_stmt>ZeroDivisionError<block_start><continue><block_end><return>process_time()-before<block_end><block_end><def_stmt>time_search_loop p<block_start>y=random_vector(F n)<line_sep>g=random_matrix(F p n).rows()<line_sep>scalars=[[Fstar[randint(0 q-2)]<for>i range(p)]<for>s range(100)]<line_sep>before=process_time()<for_stmt>m scalars<block_start>e=y-sum(m[i]<times>g[i]<for>i range(p))<block_end><return>(process_time()-before)/100.<block_end>T=sum([time_information_set_steps()<for>s range(5)])/5.<line_sep>P=[time_search_loop(p)<for>p range(tau+1)]<def_stmt>compute_estimate p<block_start>iters=1.<times>binomial(n k)/sum(binomial(n-tau k-i)<times>binomial(tau i)<for>i range(p+1))<line_sep>estimate=iters<times>(T+sum(P[pi]<times>(q-1)<power>pi<times>binomial(k pi)<for>pi range(p+1)))<line_sep><return>estimate<block_end><if_stmt>self._parameters_specified<block_start>self._time_estimate=compute_estimate(self._parameters['search_size'])<block_end><else_stmt><block_start>self._calibrate_select([compute_estimate(p)<for>p range(tau+1)])<block_end><block_end><def_stmt>_calibrate_select self estimates<block_start>r""" Internal method used by ``self.calibrate()``. Given the timing estimates, select the best parameter and set the appropriate private fields. INPUT: - `estimates` - list of time estimates, for the search size set to the index of the list entry. OUTPUT: None, but sets the private fields `self._parameters` and `self._time_estimate`. TESTS:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: C = codes.GolayCode(GF(2)) sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ]) sage: A._time_estimate 0.500000000000000 sage: A._parameters {'search_size': 3} """<line_sep>search_size=0<for_stmt>p range(1 len(estimates))<block_start><if_stmt>estimates[p]<l>estimates[search_size]<block_start>search_size=p<block_end><block_end>self._parameters={'search_size':search_size}<line_sep>self._time_estimate=estimates[search_size]<block_end><block_end><class_stmt>LinearCodeInformationSetDecoder(Decoder)<block_start>r""" Information-set decoder for any linear code. Information-set decoding is a probabilistic decoding strategy that essentially tries to guess `k` correct positions in the received word, where `k` is the dimension of the code. A codeword agreeing with the received word on the guessed position can easily be computed, and their difference is one possible error vector. A "correct" guess is assumed when this error vector has low Hamming weight. The ISD strategy requires choosing how many errors is deemed acceptable. One choice could be `d/2`, where `d` is the minimum distance of the code, but sometimes `d` is not known, or sometimes more errors are expected. If one chooses anything above `d/2`, the algorithm does not guarantee to return a nearest codeword. This simple algorithm is not very efficient in itself, but there are numerous refinements to the strategy. Specifying which strategy to use among those that Sage knows is done using the ``algorithm`` keyword. If this is not set, an efficient choice will be made for you. The various ISD algorithms all need to select a number of parameters. If you choose a specific algorithm to use, you can pass these parameters as named parameters directly to this class' constructor. If you don't, efficient choices will be calibrated for you. .. WARNING:: If there is no codeword within the specified decoding distance, then the decoder may never terminate, or it may raise a :exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD algorithm used. INPUT: - ``code`` -- A linear code for which to decode. - ``number_errors`` -- an integer, the maximal number of errors to accept as correct decoding. An interval can also be specified by giving a pair of integers, where both end values are taken to be in the interval. - ``algorithm`` -- (optional) the string name of the ISD algorithm to employ. If this is not set, an appropriate one will be chosen. A constructed :class:`sage.coding.information_set_decoder.InformationSetAlgorithm` object may also be given. In this case ``number_errors`` must match that of the passed algorithm. - ``**kwargs`` -- (optional) any number of named arguments passed on to the ISD algorithm. Such are usually not required, and they can only be set if ``algorithm`` is set to a specific algorithm. See the documentation for each individual ISD algorithm class for information on any named arguments they may accept. The easiest way to access this documentation is to first construct the decoder without passing any named arguments, then accessing the ISD algorithm using :meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`, and then reading the `?` help on the constructed object. EXAMPLES: The principal way to access this class is through the :meth:`sage.code.linear_code.AbstractLinearCode.decoder` method:: sage: C = codes.GolayCode(GF(3)) sage: D = C.decoder("InformationSet", 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors You can specify which algorithm you wish to use, and you should do so in order to pass special parameters to it:: sage: C = codes.GolayCode(GF(3)) sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2 Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: D2.algorithm() ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: D2.algorithm().parameters() {'search_size': 2} If you specify an algorithm which is not known, you get a friendly error message:: sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing") Traceback (most recent call last): ... ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell']. You can also construct an ISD algorithm separately and pass that. This is mostly useful if you write your own ISD algorithms:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, algorithm=A); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors When passing an already constructed ISD algorithm, you can't also pass parameters to the ISD algorithm when constructing the decoder:: sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2) Traceback (most recent call last): ... ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm We can also information-set decode non-binary codes:: sage: C = codes.GolayCode(GF(3)) sage: D = C.decoder("InformationSet", 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors There are two other ways to access this class:: sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder sage: D = LinearCodeInformationSetDecoder(C, 2); D Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors """<def_stmt>__init__ self code number_errors algorithm=<none> **kwargs<block_start>r""" TESTS: ``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints, or an Integer/int:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", "aa") Traceback (most recent call last): ... ValueError: number_errors should be an integer or a pair of integers If ``number_errors`` is passed as a list/tuple, it has to contain only two values, the first one being at most the second one:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", (4, 2)) Traceback (most recent call last): ... ValueError: number_errors should be a positive integer or a valid interval within the positive integers You cannot ask the decoder to correct more errors than the code length:: sage: D = C.decoder("InformationSet", 25) Traceback (most recent call last): ... ValueError: The provided number of errors should be at most the code's length If ``algorithm`` is not set, additional parameters cannot be passed to the ISD algorithm:: sage: D = C.decoder("InformationSet", 2, search_size=2) Traceback (most recent call last): ... ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword If ``algorithm`` is set to a constructed ISD algorithm, additional parameters cannot be passed to the ISD algorithm:: sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, A, search_size=3) Traceback (most recent call last): ... ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm If ``algorithm`` is set to a constructed :class:`sage.coding.information_set_decoder.InformationSetAlgorithm`, then ``number_errors`` must match that of the algorithm:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: A = LeeBrickellISDAlgorithm(C, (0, 2)) sage: D = C.decoder("InformationSet", 2, A); D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors sage: D = C.decoder("InformationSet", (0,2), A); D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors sage: D = C.decoder("InformationSet", 3, A); D Traceback (most recent call last): ... ValueError: number_errors must match that of the passed ISD algorithm """<if_stmt>isinstance(number_errors (Integer int))<block_start>number_errors=(0 number_errors)<block_end><if_stmt>isinstance(number_errors (tuple list))<and>len(number_errors)<eq>2<and>number_errors[0]<in>ZZ<and>number_errors[1]<in>ZZ<block_start><if_stmt>0<g>number_errors[0]<or>number_errors[0]<g>number_errors[1]<block_start><raise>ValueError("number_errors should be a positive integer or"<concat>" a valid interval within the positive integers")<block_end><if_stmt>number_errors[1]<g>code.length()<block_start><raise>ValueError("The provided number of errors should be at"<concat>" most the code's length")<block_end><block_end><else_stmt><block_start><raise>ValueError("number_errors should be an integer or a pair of integers")<block_end>self._number_errors=number_errors<line_sep>super(LinearCodeInformationSetDecoder self).__init__(code code.ambient_space() code._default_encoder_name)<if_stmt>algorithm<is><none><block_start><if_stmt>kwargs<block_start><raise>ValueError("Additional arguments to an information-set decoder"<concat>" algorithm are only allowed if a specific"<concat>" algorithm is selected by setting the algorithm"<concat>" keyword")<block_end>algorithm="Lee-Brickell"<block_end>algorithm_names=LinearCodeInformationSetDecoder.known_algorithms(dictionary=<true>)<if_stmt>isinstance(algorithm InformationSetAlgorithm)<block_start><if_stmt>kwargs<block_start><raise>ValueError("ISD algorithm arguments are not allowed when"<concat>" supplying a constructed ISD algorithm")<block_end><if_stmt>number_errors<ne>algorithm.decoding_interval()<block_start><raise>ValueError("number_errors must match that of the passed"<concat>" ISD algorithm")<block_end>self._algorithm=algorithm<block_end><elif_stmt>algorithm<in>algorithm_names<block_start>self._algorithm=algorithm_names[algorithm](code number_errors **kwargs)<block_end><else_stmt><block_start><raise>ValueError("Unknown ISD algorithm '{}'."<concat>" The known algorithms are {}.".format(algorithm sorted(algorithm_names)))<block_end><block_end>_known_algorithms={"Lee-Brickell":LeeBrickellISDAlgorithm}<line_sep>@staticmethod<def_stmt>known_algorithms dictionary=<false><block_start>r""" Return the list of ISD algorithms that Sage knows. Passing any of these to the constructor of :class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder` will make the ISD decoder use that algorithm. INPUT: - ``dictionary`` - optional. If set to ``True``, return a ``dict`` mapping decoding algorithm name to its class. OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class. EXAMPLES:: sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder sage: sorted(LinearCodeInformationSetDecoder.known_algorithms()) ['Lee-Brickell'] """<if_stmt>dictionary<block_start><return>LinearCodeInformationSetDecoder._known_algorithms<block_end><else_stmt><block_start><return>LinearCodeInformationSetDecoder._known_algorithms.keys()<block_end><block_end><def_stmt>algorithm self<block_start>r""" Return the ISD algorithm used by this ISD decoder. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell") sage: D.algorithm() ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors """<line_sep><return>self._algorithm<block_end><def_stmt>decode_to_code self r<block_start>r""" Decodes a received word with respect to the associated code of this decoder. .. WARNING:: If there is no codeword within the decoding radius of this decoder, this method may never terminate, or it may raise a :exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD algorithm used. INPUT: - ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`. OUTPUT: a codeword of :meth:`decoder.Decoder.code`. EXAMPLES:: sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\ [0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\ [0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\ [0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\ [0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]]) sage: C = LinearCode(M) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 2) sage: c == D.decode_to_code(r) True Information-set decoding a non-binary code:: sage: C = codes.GolayCode(GF(3)); C [12, 6, 6] Extended Golay code over GF(3) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 2) sage: c == D.decode_to_code(r) True Let's take a bigger example, for which syndrome decoding or nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic Residue code over `\GF{3}` has true minimum distance 17, so we can correct 8 errors:: sage: C = codes.QuadraticResidueCode(59, GF(3)) sage: c = C.random_element() sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2) sage: r = Chan(c) sage: D = C.decoder('InformationSet', 8) sage: c == D.decode_to_code(r) # long time True """<line_sep>C=self.code()<if_stmt>r<in>C<block_start><return>r<block_end><return>self.algorithm().decode(r)<block_end><def_stmt>decoding_radius self<block_start>r""" Return the maximal number of errors this decoder can decode. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D.decoding_radius() 2 """<line_sep><return>self._number_errors[1]<block_end><def_stmt>decoding_interval self<block_start>r""" A pair of integers specifying the interval of number of errors this decoder will attempt to correct. The interval includes both end values. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D.decoding_interval() (0, 2) """<line_sep><return>self._number_errors<block_end><def_stmt>_repr_ self<block_start>r""" Returns a string representation of this decoding algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: D = C.decoder("InformationSet", 2) sage: D Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors """<line_sep><return>"Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name() self.code() _format_decoding_interval(self.decoding_interval()))<block_end><def_stmt>_latex_ self<block_start>r""" Returns a latex representation of this decoding algorithm. EXAMPLES:: sage: C = codes.GolayCode(GF(2)) sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm sage: D = C.decoder("InformationSet", 2) sage: latex(D) \textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors} """<line_sep><return>"\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name() self.code()._latex_() _format_decoding_interval(self.decoding_interval()))<block_end><block_end>LinearCodeInformationSetDecoder._decoder_type={"hard-decision" "probabilistic" "not-always-closest" "bounded-distance" "might-fail"}<line_sep>
<import_stmt>math<import_stmt>tensorflow<as>tf<import_from_stmt>mayo.log log<import_from_stmt>mayo.util Percent memoize_method memoize_property object_from_params <import_from_stmt>mayo.session.base SessionBase<class_stmt>Train(SessionBase)<block_start>mode='train'<def_stmt>__init__ self config<block_start>super().__init__(config)<line_sep>self._run_train_ops=<true><line_sep>self._setup_train_operation()<line_sep>self._init()<line_sep>self._checkpoint_epoch=''<block_end>@memoize_property<def_stmt>learning_rate self<block_start>params=self.config.train.learning_rate<line_sep>lr_class,params=object_from_params(params)<if_stmt>lr_class<is>tf.train.piecewise_constant# `tf.train.piecewise_constant` uses argument name 'x' instead # just to make life more difficult <block_start>step_name='x'<block_end><else_stmt><block_start>step_name='global_step'<block_end>params[step_name]=self.num_epochs<line_sep>log.debug('Using learning rate {!r} with params {}.'.format(lr_class.__name__ params))<line_sep><return>lr_class(**params)<block_end>@memoize_property<def_stmt>optimizer self<block_start>params=self.config.train.optimizer<line_sep>optimizer_class,params=object_from_params(params)<line_sep>log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__))<line_sep><return>optimizer_class(self.learning_rate **params)<block_end>@staticmethod<def_stmt>_average_gradients tower_grads<block_start>tower_grads=list(tower_grads)<if_stmt>len(tower_grads)<eq>1<block_start><return>tower_grads[0]<block_end>average_grads=[]<for_stmt>grad_and_vars zip(*tower_grads)<block_start>grads=[]<for_stmt>g,v grad_and_vars# add 0 dimension to the gradients to represent the tower <block_start><if_stmt>g<is><none><block_start><raise>ValueError('Gradient for variable {} is None, please check '<concat>'connection.'.format(v))<block_end>g=tf.expand_dims(g 0)<line_sep>grads.append(g)<block_end># average over the 'tower' dimension. grad=tf.concat(axis=0 values=grads)<line_sep>grad=tf.reduce_mean(grad 0)<line_sep># simply return the first tower's pointer to the Variable v=grad_and_vars[0][1]<line_sep>grad_and_var=(grad v)<line_sep>average_grads.append(grad_and_var)<block_end><return>average_grads<block_end>@staticmethod<def_stmt>_loss_formatter key name<block_start><def_stmt>formatter estimator<block_start>loss_mean,loss_std=estimator.get_mean_std(key)<if_stmt>math.isnan(loss_mean)<block_start><raise>ValueError('Model diverged with a nan-valued loss.')<block_end>loss_std='±{}'.format(Percent(loss_std/loss_mean))<line_sep><return>'{}: {:10f}{:5}'.format(name loss_mean loss_std)<block_end><return>formatter<block_end>@memoize_method<def_stmt>_losses_and_gradients self<block_start>formatter=self._loss_formatter('regularization' 'regu')<line_sep>regularization=self.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES first_gpu=<true>)<if_stmt>regularization<block_start>self.estimator.register(tf.add_n(regularization) 'regularization' formatter=formatter)<block_end><def_stmt>gradient net prediction truth<block_start>loss=[self.task.train(net prediction truth)]+regularization<line_sep>loss=tf.add_n(loss)<line_sep><return>loss self.optimizer.compute_gradients(loss)<block_end>tower_losses,tower_grads=zip(*self.task.map(gradient))<line_sep><return>tower_losses self._average_gradients(tower_grads)<block_end><def_stmt>_setup_train_operation self<block_start>ops={}<line_sep>self._losses,gradients=self._losses_and_gradients()<line_sep>self._mean_loss=tf.reduce_mean(self._losses)<line_sep>ops['app_grad']=self.optimizer.apply_gradients(gradients)<line_sep># update ops update_ops=list(self.get_collection(tf.GraphKeys.UPDATE_OPS))<line_sep>ops['update']=tf.group(*update_ops name='update')<line_sep>log.debug('Using update operations: {}'.format(update_ops))<line_sep>log.debug('Using training operations: {}'.format(ops))<if_stmt>self.extra_train_ops<block_start>ops['extra']=self.extra_train_ops<block_end>self._train_op=ops<block_end><def_stmt>_init self<block_start>self.load_checkpoint(self.config.system.checkpoint.load)<line_sep>formatter=self._loss_formatter('loss' 'loss')<line_sep>self.estimator.register(self._mean_loss 'loss' formatter=formatter)<block_end><def_stmt>reset_num_epochs self<block_start>log.info('Reseting number of training epochs of the model...')<line_sep>self.run(self.imgs_seen.initializer)<line_sep>self.change.reset('checkpoint.epoch')<line_sep>self.change.reset('step')<block_end><def_stmt>once self<block_start>train_op=self._train_op<if>self._run_train_ops<else>[]<line_sep>tasks=[train_op self.num_epochs]<line_sep>_,num_epochs=self.run(tasks batch=<true>)<line_sep><return>num_epochs<block_end><def_stmt>overriders_assign self<block_start>log.info('Assigning overridden values of parameters to parameters...')<line_sep>self._overriders_call('assign')<block_end><def_stmt>overriders_update self<block_start>log.info('Updating overrider internal variables...')<line_sep>self._overriders_call('update')<block_end><def_stmt>overriders_reset self<block_start>log.info('Resetting overriders internal variables...')<line_sep>self._overriders_call('reset')<block_end><def_stmt>_iteration self max_epochs=<none><block_start>system=self.config.system<line_sep>epoch=self.once()<line_sep>floor_epoch=math.floor(epoch)<line_sep>cp_interval=system.checkpoint.get('save.interval' 0)<if_stmt>self.change.every('checkpoint.epoch' floor_epoch cp_interval)<block_start>log.info('Saving checkpoint at epoch {}...'.format(epoch) update=<true>)<with_stmt>log.demote()<block_start>self.save_checkpoint(floor_epoch)<block_end>self._checkpoint_epoch=floor_epoch<block_end>max_epochs=max_epochs<or>system.max_epochs<if_stmt>max_epochs<and>epoch<ge>max_epochs<block_start>log.info('Maximum epoch count {} reached.'.format(max_epochs))<if_stmt>self._checkpoint_epoch<and>floor_epoch<g>self._checkpoint_epoch<block_start>log.info('Saving final checkpoint...')<line_sep>self.save_checkpoint(floor_epoch)<block_end><return><false><block_end><return><true><block_end><def_stmt>train self max_epochs=<none># final debug outputs <block_start>lr=self.run(self.learning_rate)<line_sep>log.info('Training start with a learning rate {}.'.format(lr))<try_stmt># train iterations <block_start><while_stmt>self._iteration(max_epochs=max_epochs)<block_start><pass><block_end><block_end><except_stmt>KeyboardInterrupt<block_start>log.info('Stopped.')<line_sep>save=self.config.system.checkpoint.get('save' {})<if_stmt>save<block_start>countdown=save.get('countdown' 0)<if_stmt>log.countdown('Saving checkpoint' countdown)<block_start>self.save_checkpoint('latest')<block_end><block_end><block_end><block_end><block_end>
<import_from_stmt>django.apps AppConfig<class_stmt>ClxQueryConfig(AppConfig)<block_start>name="clxquery"<block_end>
<import_stmt>copy<import_stmt>logging<import_stmt>warnings<import_from_stmt>kolibri.plugins.registry registered_plugins<line_sep>logger=logging.getLogger(__name__)<def_stmt>__validate_config_option section name base_config_spec plugin_specs module_path# Raise an error if someone tries to overwrite a base option # except for the default value. <block_start><if_stmt>section<in>base_config_spec<block_start><if_stmt>name<in>base_config_spec[section]<block_start><raise>ValueError("Cannot overwrite a core Kolibri options spec option")<block_end><block_end># Warn if a plugin tries to add an option that another plugin has already added <if_stmt>section<in>plugin_specs<block_start><if_stmt>name<in>plugin_specs[section]<block_start>warnings.warn("{plugin} set an option {option} in section {section} but {plugins} had already set it".format(plugin=module_path plugins=", ".join(plugin_specs[section][name]) option=name section=section ))<line_sep>plugin_specs[section][name].append(module_path)<block_end><else_stmt># If not create the list for this option name # to track this and future modifications <block_start>plugin_specs[section][name]=[module_path]<block_end><block_end><else_stmt># If not create the dict for the section # and the list for this option name <block_start>plugin_specs[section]={name:[module_path]}<block_end><block_end><def_stmt>__process_config_spec option_spec base_config_spec plugin_specs module_path final_spec<block_start><for_stmt>section,opts option_spec.items()<block_start><for_stmt>name,attrs opts.items()<block_start>__validate_config_option(section name base_config_spec plugin_specs module_path)<if_stmt>section<not><in>final_spec<block_start>final_spec[section]={}<block_end>final_spec[section][name]=attrs<block_end><block_end><block_end><def_stmt>__validate_option_default section name plugin_default_overrides module_path# Warn if a plugin tries to add an option that another plugin has already added <block_start><if_stmt>section<in>plugin_default_overrides<block_start><if_stmt>name<in>plugin_default_overrides[section]<block_start>warnings.warn("{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(plugin=module_path plugins=", ".join(plugin_default_overrides[section][name]) option=name section=section ))<line_sep>plugin_default_overrides[section][name].append(module_path)<block_end><else_stmt># If not create the list for this option name # to track this and future modifications <block_start>plugin_default_overrides[section][name]=[module_path]<block_end><block_end><else_stmt># If not create the dict for the section # and the list for this option name <block_start>plugin_default_overrides[section]={name:[module_path]}<block_end><block_end><def_stmt>__process_option_defaults option_defaults base_config_spec plugin_default_overrides module_path final_spec<block_start><for_stmt>section,opts option_defaults.items()<block_start><for_stmt>name,default opts.items()<block_start>__validate_option_default(section name plugin_default_overrides module_path)<if_stmt>section<not><in>final_spec<block_start>logger.error("Tried to set a new default in section {}, but this is not a valid section".format(section))<line_sep><continue><block_end><if_stmt>name<in>final_spec[section]# This is valid, so set a default # Note that we do not validation here for now, # so it is up to the user to ensure the default value # is kosher. <block_start>final_spec[section][name]["default"]=default<block_end><else_stmt><block_start>logger.error("Tried to set a new default in section {}, for option {} but this is not a valid option".format(section name))<block_end><block_end><block_end><block_end><def_stmt>extend_config_spec base_config_spec<block_start>plugin_specs={}<line_sep>final_spec=copy.deepcopy(base_config_spec)<line_sep># First process options config spec additions <for_stmt>plugin_instance registered_plugins<block_start>plugin_options=plugin_instance.options_module<if_stmt>plugin_options<and>hasattr(plugin_options "option_spec")<block_start>module_path=plugin_instance.module_path<line_sep>option_spec=plugin_options.option_spec<line_sep>__process_config_spec(option_spec base_config_spec plugin_specs module_path final_spec)<block_end><block_end># Now process default value overrides, do this second in order to allow plugins # to override default values for other plugins! plugin_default_overrides={}<for_stmt>plugin_instance registered_plugins<block_start>plugin_options=plugin_instance.option_defaults_module<if_stmt>plugin_options<and>hasattr(plugin_options "option_defaults")<block_start>module_path=plugin_instance.module_path<line_sep>option_defaults=plugin_options.option_defaults<line_sep>__process_option_defaults(option_defaults base_config_spec plugin_default_overrides module_path final_spec )<block_end><block_end><return>final_spec<block_end>
<import_from_stmt>.pydeps pydeps<line_sep>pydeps()<line_sep>
<def_stmt>extract_smiles <block_start><return>["c1ccccc1" "Cc1ccccc1" "c1ccccc1" "CCO"]<block_end>
<import_stmt>json<import_stmt>time<import_stmt>pytest<import_from_stmt>contextlib contextmanager<import_from_stmt>datetime datetime timedelta<import_from_stmt>functools wraps<import_from_stmt>data.database QueueItem<import_from_stmt>data.queue WorkQueue MINIMUM_EXTENSION queue_items_locked queue_items_available queue_items_available_unlocked <import_from_stmt>test.fixtures *<line_sep>QUEUE_NAME="testqueuename"<class_stmt>AutoUpdatingQueue(object)<block_start><def_stmt>__init__ self queue_to_wrap<block_start>self._queue=queue_to_wrap<block_end><def_stmt>_wrapper self func<block_start>@wraps(func)<def_stmt>wrapper *args **kwargs<block_start>to_return=func(*args **kwargs)<line_sep>self._queue.update_metrics()<line_sep><return>to_return<block_end><return>wrapper<block_end><def_stmt>__getattr__ self attr_name<block_start>method_or_attr=getattr(self._queue attr_name)<if_stmt>callable(method_or_attr)<block_start><return>self._wrapper(method_or_attr)<block_end><else_stmt><block_start><return>method_or_attr<block_end><block_end><block_end>TEST_MESSAGE_1=json.dumps({"data":1})<line_sep>TEST_MESSAGE_2=json.dumps({"data":2})<line_sep>TEST_MESSAGES=[json.dumps({"data":str(i)})<for>i range(1 101)]<line_sep>@contextmanager<def_stmt>fake_transaction arg<block_start><yield><block_end>@pytest.fixture()<def_stmt>transaction_factory <block_start><return>fake_transaction<block_end><def_stmt>gauge_value g<block_start><return>g.collect()[0].samples[0].value<block_end>@pytest.fixture()<def_stmt>queue transaction_factory initialized_db<block_start><return>AutoUpdatingQueue(WorkQueue(QUEUE_NAME transaction_factory))<block_end><def_stmt>test_get_single_item queue transaction_factory# Add a single item to the queue. <block_start>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep># Have two "instances" retrieve an item to claim. Since there is only one, both calls should # return the same item. now=datetime.utcnow()<line_sep>first_item=queue._select_available_item(<false> now)<line_sep>second_item=queue._select_available_item(<false> now)<assert_stmt>first_item.id<eq>second_item.id<assert_stmt>first_item.state_id<eq>second_item.state_id<line_sep># Have both "instances" now try to claim the item. Only one should succeed. first_claimed=queue._attempt_to_claim_item(first_item now 300)<line_sep>second_claimed=queue._attempt_to_claim_item(first_item now 300)<assert_stmt>first_claimed<assert_stmt><not>second_claimed<line_sep># Ensure the item is no longer available. <assert_stmt>queue.get()<is><none><line_sep># Ensure the item's state ID has changed. <assert_stmt>first_item.state_id<ne>QueueItem.get().state_id<block_end><def_stmt>test_extend_processing queue transaction_factory# Add and retrieve a queue item. <block_start>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue_item=queue.get(processing_time=10)<assert_stmt>queue_item<is><not><none><line_sep>existing_db_item=QueueItem.get(id=queue_item.id)<line_sep># Call extend processing with a timedelta less than the minimum and ensure its # processing_expires and state_id do not change. changed=queue.extend_processing(queue_item 10+MINIMUM_EXTENSION.total_seconds()-1)<assert_stmt><not>changed<line_sep>updated_db_item=QueueItem.get(id=queue_item.id)<assert_stmt>existing_db_item.processing_expires<eq>updated_db_item.processing_expires<assert_stmt>existing_db_item.state_id<eq>updated_db_item.state_id<line_sep># Call extend processing with a timedelta greater than the minimum and ensure its # processing_expires and state_id are changed. changed=queue.extend_processing(queue_item 10+MINIMUM_EXTENSION.total_seconds()+1)<assert_stmt>changed<line_sep>updated_db_item=QueueItem.get(id=queue_item.id)<assert_stmt>existing_db_item.processing_expires<ne>updated_db_item.processing_expires<assert_stmt>existing_db_item.state_id<ne>updated_db_item.state_id<line_sep># Call extend processing with a timedelta less than the minimum but also with new data and # ensure its processing_expires and state_id are changed. changed=queue.extend_processing(queue_item 10+MINIMUM_EXTENSION.total_seconds()-1 updated_data="newbody")<assert_stmt>changed<line_sep>updated_db_item=QueueItem.get(id=queue_item.id)<assert_stmt>existing_db_item.processing_expires<ne>updated_db_item.processing_expires<assert_stmt>existing_db_item.state_id<ne>updated_db_item.state_id<assert_stmt>updated_db_item.body<eq>"newbody"<block_end><def_stmt>test_same_canonical_names queue transaction_factory<block_start>queue_items_locked.labels(queue._queue_name).set(0)<line_sep>queue_items_available.labels(queue._queue_name).set(0)<line_sep>queue_items_available_unlocked.labels(queue._queue_name).set(0)<line_sep>id_1=int(queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1))<line_sep>id_2=int(queue.put(["abc" "def"] TEST_MESSAGE_2 available_after=-1))<assert_stmt>id_1+1<eq>id_2<assert_stmt><not>queue._currently_processing<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>one=queue.get(ordering_required=<true>)<assert_stmt>one<is><not><none><assert_stmt>one.body<eq>TEST_MESSAGE_1<assert_stmt>queue._currently_processing<assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>two_fail=queue.get(ordering_required=<true>)<assert_stmt>two_fail<is><none><assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>queue.complete(one)<assert_stmt><not>queue._currently_processing<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>two=queue.get(ordering_required=<true>)<assert_stmt>two<is><not><none><assert_stmt>queue._currently_processing<assert_stmt>two.body<eq>TEST_MESSAGE_2<assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<block_end><def_stmt>test_different_canonical_names queue transaction_factory<block_start>queue_items_locked.labels(queue._queue_name).set(0)<line_sep>queue_items_available.labels(queue._queue_name).set(0)<line_sep>queue_items_available_unlocked.labels(queue._queue_name).set(0)<line_sep>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue.put(["abc" "ghi"] TEST_MESSAGE_2 available_after=-1)<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>2<line_sep>one=queue.get(ordering_required=<true>)<assert_stmt>one<is><not><none><assert_stmt>one.body<eq>TEST_MESSAGE_1<assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>2<line_sep>two=queue.get(ordering_required=<true>)<assert_stmt>two<is><not><none><assert_stmt>two.body<eq>TEST_MESSAGE_2<assert_stmt>gauge_value(queue_items_locked)<eq>2<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>2<block_end><def_stmt>test_canonical_name queue transaction_factory<block_start>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue.put(["abc" "def" "ghi"] TEST_MESSAGE_1 available_after=-1)<line_sep>one=queue.get(ordering_required=<true>)<assert_stmt>QUEUE_NAME+"/abc/def/"<ne>one<line_sep>two=queue.get(ordering_required=<true>)<assert_stmt>QUEUE_NAME+"/abc/def/ghi/"<ne>two<block_end><def_stmt>test_expiration queue transaction_factory<block_start>queue_items_locked.labels(queue._queue_name).set(0)<line_sep>queue_items_available.labels(queue._queue_name).set(0)<line_sep>queue_items_available_unlocked.labels(queue._queue_name).set(0)<line_sep>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>one=queue.get(processing_time=0.5 ordering_required=<true>)<assert_stmt>one<is><not><none><assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>one_fail=queue.get(ordering_required=<true>)<assert_stmt>one_fail<is><none><line_sep>time.sleep(1)<line_sep>queue.update_metrics()<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<line_sep>one_again=queue.get(ordering_required=<true>)<assert_stmt>one_again<is><not><none><assert_stmt>gauge_value(queue_items_locked)<eq>1<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<block_end><def_stmt>test_alive queue transaction_factory# No queue item = not alive. <block_start><assert_stmt><not>queue.alive(["abc" "def"])<line_sep># Add a queue item. queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<assert_stmt>queue.alive(["abc" "def"])<line_sep># Retrieve the queue item. queue_item=queue.get()<assert_stmt>queue_item<is><not><none><assert_stmt>queue.alive(["abc" "def"])<line_sep># Make sure it is running by trying to retrieve it again. <assert_stmt>queue.get()<is><none><line_sep># Delete the queue item. queue.complete(queue_item)<assert_stmt><not>queue.alive(["abc" "def"])<block_end><def_stmt>test_specialized_queue queue transaction_factory<block_start>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue.put(["def" "def"] TEST_MESSAGE_2 available_after=-1)<line_sep>my_queue=AutoUpdatingQueue(WorkQueue(QUEUE_NAME transaction_factory ["def"]))<line_sep>two=my_queue.get(ordering_required=<true>)<assert_stmt>two<is><not><none><assert_stmt>two.body<eq>TEST_MESSAGE_2<line_sep>one_fail=my_queue.get(ordering_required=<true>)<assert_stmt>one_fail<is><none><line_sep>one=queue.get(ordering_required=<true>)<assert_stmt>one<is><not><none><assert_stmt>one.body<eq>TEST_MESSAGE_1<block_end><def_stmt>test_random_queue_no_duplicates queue transaction_factory<block_start><for_stmt>msg TEST_MESSAGES<block_start>queue.put(["abc" "def"] msg available_after=-1)<block_end>seen=set()<for_stmt>_ range(1 101)<block_start>item=queue.get()<line_sep>json_body=json.loads(item.body)<line_sep>msg=str(json_body["data"])<assert_stmt>msg<not><in>seen<line_sep>seen.add(msg)<block_end><for_stmt>body TEST_MESSAGES<block_start>json_body=json.loads(body)<line_sep>msg=str(json_body["data"])<assert_stmt>msg<in>seen<block_end><block_end><def_stmt>test_bulk_insert queue transaction_factory<block_start>queue_items_locked.labels(queue._queue_name).set(0)<line_sep>queue_items_available.labels(queue._queue_name).set(0)<line_sep>queue_items_available_unlocked.labels(queue._queue_name).set(0)<with_stmt>queue.batch_insert()<as>queue_put<block_start>queue_put(["abc" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue_put(["abc" "def"] TEST_MESSAGE_2 available_after=-1)<block_end>queue.update_metrics()<assert_stmt><not>queue._currently_processing<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>1<with_stmt>queue.batch_insert()<as>queue_put<block_start>queue_put(["abd" "def"] TEST_MESSAGE_1 available_after=-1)<line_sep>queue_put(["abd" "ghi"] TEST_MESSAGE_2 available_after=-1)<block_end>queue.update_metrics()<assert_stmt><not>queue._currently_processing<assert_stmt>gauge_value(queue_items_locked)<eq>0<assert_stmt>gauge_value(queue_items_locked)+gauge_value(queue_items_available_unlocked)<eq>3<block_end><def_stmt>test_num_available_between queue transaction_factory<block_start>now=datetime.utcnow()<line_sep>queue.put(["abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep>queue.put(["abc" "ghi"] TEST_MESSAGE_2 available_after=-5)<line_sep># Partial results count=queue.num_available_jobs_between(now-timedelta(seconds=8) now ["abc"])<assert_stmt>count<eq>1<line_sep># All results count=queue.num_available_jobs_between(now-timedelta(seconds=20) now ["/abc"])<assert_stmt>count<eq>2<line_sep># No results count=queue.num_available_jobs_between(now now "abc")<assert_stmt>count<eq>0<block_end><def_stmt>test_incomplete queue transaction_factory# Add an item. <block_start>queue.put(["somenamespace" "abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep>now=datetime.utcnow()<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>1<line_sep># Retrieve it. item=queue.get()<assert_stmt>item<is><not><none><assert_stmt>queue._currently_processing<line_sep># Mark it as incomplete. queue.incomplete(item retry_after=-1)<assert_stmt><not>queue._currently_processing<line_sep># Retrieve again to ensure it is once again available. same_item=queue.get()<assert_stmt>same_item<is><not><none><assert_stmt>queue._currently_processing<assert_stmt>item.id<eq>same_item.id<block_end><def_stmt>test_complete queue transaction_factory# Add an item. <block_start>queue.put(["somenamespace" "abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep>now=datetime.utcnow()<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>1<line_sep># Retrieve it. item=queue.get()<assert_stmt>item<is><not><none><assert_stmt>queue._currently_processing<line_sep># Mark it as complete. queue.complete(item)<assert_stmt><not>queue._currently_processing<block_end><def_stmt>test_cancel queue transaction_factory# Add an item. <block_start>queue.put(["somenamespace" "abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep>queue.put(["somenamespace" "abc" "def"] TEST_MESSAGE_2 available_after=-5)<line_sep>now=datetime.utcnow()<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>2<line_sep># Retrieve it. item=queue.get()<assert_stmt>item<is><not><none><line_sep># Make sure we can cancel it. <assert_stmt>queue.cancel(item.id)<line_sep>now=datetime.utcnow()<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>1<line_sep># Make sure it is gone. <assert_stmt><not>queue.cancel(item.id)<block_end><def_stmt>test_deleted_namespaced_items queue transaction_factory<block_start>queue=AutoUpdatingQueue(WorkQueue(QUEUE_NAME transaction_factory has_namespace=<true>))<line_sep>queue.put(["somenamespace" "abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep>queue.put(["somenamespace" "abc" "ghi"] TEST_MESSAGE_2 available_after=-5)<line_sep>queue.put(["anothernamespace" "abc" "def"] TEST_MESSAGE_1 available_after=-10)<line_sep># Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`. now=datetime.utcnow()<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>2<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/anothernamespace"])<assert_stmt>count<eq>1<line_sep># Delete all `somenamespace` items. queue.delete_namespaced_items("somenamespace")<line_sep># Check the updated counts. count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>0<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/anothernamespace"])<assert_stmt>count<eq>1<line_sep># Delete all `anothernamespace` items. queue.delete_namespaced_items("anothernamespace")<line_sep># Check the updated counts. count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/somenamespace"])<assert_stmt>count<eq>0<line_sep>count=queue.num_available_jobs_between(now-timedelta(seconds=60) now ["/anothernamespace"])<assert_stmt>count<eq>0<block_end>
#! /usr/bin/env python # Copyright (c) 2017, Cuichaowen. All rights reserved. # -*- coding: utf-8 -*- # ops helper dictionary <class_stmt>Dictionary(object)<block_start>""" Dictionary for op param which needs to be combined """<def_stmt>__init__ self<block_start>self.__dict__={}<block_end><def_stmt>set_attr self **kwargs<block_start>""" set dict from kwargs """<for_stmt>key kwargs.keys()<block_start><if_stmt>type(kwargs[key])<eq>type(dict())<block_start><for_stmt>key_inner kwargs[key].keys()<block_start>self.__dict__[key_inner]=kwargs[key][key_inner]<block_end><block_end><else_stmt><block_start>self.__dict__[key]=kwargs[key]<block_end><block_end><return>self<block_end><def_stmt>__call__ self<block_start>""" call class function to generate dictionary param """<line_sep>ret={key:self.__dict__[key]<for>key self.__dict__.keys()}<line_sep><return>ret<block_end><block_end>########### Object track and detection helper (for adu(caffe layer type)) Op io define ############# # NMSSSDParameter nms_param=Dictionary().set_attr(need_nms=bool() overlap_ratio=list() top_n=list() add_score=bool() max_candidate_n=list() use_soft_nms=list() nms_among_classes=bool() voting=list() vote_iou=list() nms_gpu_max_n_per_time=int())<line_sep># BBoxRegParameter bbox_reg_param=Dictionary().set_attr(bbox_mean=list() bbox_std=list())<line_sep># GenerateAnchorParameter gen_anchor_param=Dictionary().set_attr(base_size=float() ratios=list() scales=list() anchor_width=list() anchor_height=list() anchor_x1=list() anchor_y1=list() anchor_x2=list() anchor_y2=list() zero_anchor_center=bool())<line_sep># KPTSParameter kpts_param=Dictionary().set_attr(kpts_exist_bottom_idx=int() kpts_reg_bottom_idx=int() kpts_reg_as_classify=bool() kpts_classify_width=int() kpts_classify_height=int() kpts_reg_norm_idx_st=int() kpts_st_for_each_class=list() kpts_ed_for_each_class=list() kpts_classify_pad_ratio=float())<line_sep># ATRSParameter # enum NormType { # NONE, # WIDTH, # HEIGHT, # WIDTH_LOG, # HEIGHT_LOG # } atrs_param=Dictionary().set_attr(atrs_reg_bottom_idx=int() atrs_reg_norm_idx_st=int() atrs_norm_type=str())<line_sep># FTRSParameter ftrs_param=Dictionary().set_attr(ftrs_bottom_idx=int())<line_sep># SPMPParameter spmp_param=Dictionary().set_attr(spmp_bottom_idx=int() spmp_class_aware=list() spmp_label_width=list() spmp_label_height=list() spmp_pad_ratio=list())<line_sep># Cam3dParameter cam3d_param=Dictionary().set_attr(cam3d_bottom_idx=int())<line_sep># DetectionOutputSSDParameter # enum MIN_SIZE_MODE { # HEIGHT_AND_WIDTH, # HEIGHT_OR_WIDTH # } detection_output_ssd_param=Dictionary().set_attr(nms=nms_param() threshold=list() channel_per_scale=int() class_name_list=str() num_class=int() refine_out_of_map_bbox=bool() class_indexes=list() heat_map_a=list() heat_map_b=list() threshold_objectness=float() proposal_min_sqrt_area=list() proposal_max_sqrt_area=list() bg_as_one_of_softmax=bool() use_target_type_rcnn=bool() im_width=float() im_height=float() rpn_proposal_output_score=bool() regress_agnostic=bool() gen_anchor=gen_anchor_param() allow_border=float() allow_border_ratio=float() bbox_size_add_one=bool() read_width_scale=float() read_height_scale=float() read_height_offset=int() min_size_h=float() min_size_w=float() min_size_mode="HEIGHT_AND_WIDTH" kpts=kpts_param() atrs=atrs_param() ftrs=ftrs_param() spmp=spmp_param() cam3d=cam3d_param())<line_sep># DFMBPSROIPoolingParameter dfmb_psroi_pooling_param=Dictionary().set_attr(heat_map_a=float() heat_map_b=float() pad_ratio=float() output_dim=int() trans_std=float() sample_per_part=int() group_height=int() group_width=int() pooled_height=int() pooled_width=int() part_height=int() part_width=int())<line_sep># ProposalImgScaleToCamCoordsParameter # # enum NormType { # HEIGHT, # HEIGHT_LOG # } # # enum OrienType { # PI, # PI2 # } proposal_img_scale_to_cam_coords_param=Dictionary().set_attr(num_class=int() sub_class_num_class=list() sub_class_bottom_idx=list() prj_h_norm_type=str() has_size3d_and_orien3d=bool() orien_type=str() cls_ids_zero_size3d_w=list() cls_ids_zero_size3d_l=list() cls_ids_zero_orien3d=list() cmp_pts_corner_3d=bool() cmp_pts_corner_2d=bool() ctr_2d_means=list() ctr_2d_stds=list() prj_h_means=list() prj_h_stds=list() real_h_means=list() real_h_stds=list() real_w_means=list() real_w_stds=list() real_l_means=list() real_l_stds=list() sin_means=list() sin_stds=list() cos_means=list() cos_stds=list() cam_info_idx_st_in_im_info=int() im_width_scale=float() im_height_scale=float() cords_offset_x=float() cords_offset_y=float() bbox_size_add_one=bool() rotate_coords_by_pitch=bool() #refine_coords_by_bbox=bool(), #refine_min_dist=float(), #refine_dist_for_height_ratio_one=float(), #max_3d2d_height_ratio_for_min_dist=float(), with_trunc_ratio=bool() regress_ph_rh_as_whole=bool() real_h_means_as_whole=list() real_h_stds_as_whole=list())<line_sep># RPNProposalSSD parameter RPNProposalSSD_param=Dictionary().set_attr(detection_output_ssd=detection_output_ssd_param() bbox_reg=bbox_reg_param())<line_sep>
<import_stmt>dns<import_stmt>dns.resolver<import_stmt>dns.rdatatype<def_stmt>dns_resolve domain:str<arrow>list<block_start>addrs=[]<line_sep>resolver=dns.resolver.Resolver(configure=<false>)<line_sep># Default to Google DNS resolver.nameservers=['8.8.8.8' '8.8.4.4']<try_stmt><block_start><for_stmt>answer resolver.resolve(domain 'A').response.answer<block_start><for_stmt>item answer<block_start><if_stmt>item.rdtype<eq>dns.rdatatype.A<block_start>addrs.append(item.address)<block_end><block_end><block_end><block_end><except_stmt>dns.resolver.NoAnswer<block_start><pass><block_end><try_stmt><block_start><for_stmt>answer resolver.resolve(domain 'AAAA').response.answer<block_start><for_stmt>item answer<block_start><if_stmt>item.rdtype<eq>dns.rdatatype.AAAA<block_start>addrs.append(item.address)<block_end><block_end><block_end><block_end><except_stmt>dns.resolver.NoAnswer<block_start><pass><block_end><return>addrs<block_end>
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>platform<import_from_stmt>_thread interrupt_main<import_from_stmt>contextlib contextmanager<import_from_stmt>glob glob<import_from_stmt>os path<import_from_stmt>threading Timer<import_from_stmt>typing Optional<import_stmt>torch<import_from_stmt>monai.utils.module get_torch_version_tuple optional_import<line_sep>dir_path=path.dirname(path.realpath(__file__))<line_sep>@contextmanager<def_stmt>timeout time message<block_start>timer=<none><try_stmt><block_start>timer=Timer(time interrupt_main)<line_sep>timer.daemon=<true><line_sep><yield>timer.start()<block_end><except_stmt>KeyboardInterrupt<as>e<block_start><if_stmt>timer<is><not><none><and>timer.is_alive()<block_start><raise>e# interrupt from user? <block_end><raise>TimeoutError(message)<from>e<block_end><finally_stmt><block_start><if_stmt>timer<is><not><none><block_start><try_stmt><block_start>timer.cancel()<block_end><finally_stmt><block_start><pass><block_end><block_end><block_end><block_end><def_stmt>load_module module_name:str defines:Optional[dict]=<none> verbose_build:bool=<false> build_timeout:int=300<block_start>""" Handles the loading of c++ extension modules. Args: module_name: Name of the module to load. Must match the name of the relevant source directory in the `_extensions` directory. defines: Dictionary containing names and values of compilation defines. verbose_build: Set to true to enable build logging. build_timeout: Time in seconds before the build will throw an exception to prevent hanging. """<line_sep># Ensuring named module exists in _extensions directory. module_dir=path.join(dir_path module_name)<if_stmt><not>path.exists(module_dir)<block_start><raise>ValueError(f"No extension module named {module_name}")<block_end>platform_str=f"_{platform.system()}_{platform.python_version()}_"<line_sep>platform_str<augadd>"".join(f"{v}"<for>v get_torch_version_tuple()[:2])<line_sep># Adding configuration to module name. <if_stmt>defines<is><not><none><block_start>module_name="_".join([module_name]+[f"{v}"<for>v defines.values()])<block_end># Gathering source files. source=glob(path.join(module_dir "**" "*.cpp") recursive=<true>)<if_stmt>torch.cuda.is_available()<block_start>source<augadd>glob(path.join(module_dir "**" "*.cu") recursive=<true>)<line_sep>platform_str<augadd>f"_{torch.version.cuda}"<block_end># Constructing compilation argument list. define_args=[]<if><not>defines<else>[f"-D {key}={defines[key]}"<for>key defines]<line_sep># Ninja may be blocked by something out of our control. # This will error if the build takes longer than expected. <with_stmt>timeout(build_timeout "Build appears to be blocked. Is there a stopped process building the same extension?")<block_start>load,_=optional_import("torch.utils.cpp_extension" name="load")# main trigger some JIT config in pytorch # This will either run the build or return the existing .so object. name=module_name+platform_str.replace("." "_")<line_sep>module=load(name=name sources=source extra_cflags=define_args extra_cuda_cflags=define_args verbose=verbose_build)<block_end><return>module<block_end>
# -*- coding: utf-8 -*- """ Use nose `$ pip install nose` `$ nosetests` """<import_from_stmt>textwrap dedent<import_from_stmt>hyde.generator Generator<import_from_stmt>hyde.site Site<import_from_stmt>fswrap File<line_sep>TEST_SITE=File(__file__).parent.parent.child_folder('_test')<class_stmt>TestPaginator(object)<block_start><def_stmt>setUp self<block_start>TEST_SITE.make()<line_sep>TEST_SITE.parent.child_folder('sites/test_paginator').copy_contents_to(TEST_SITE)<line_sep>self.s=Site(TEST_SITE)<line_sep>self.deploy=TEST_SITE.child_folder('deploy')<line_sep>self.gen=Generator(self.s)<line_sep>self.gen.load_site_if_needed()<line_sep>self.gen.load_template_if_needed()<line_sep>self.gen.generate_all()<block_end><def_stmt>tearDown self<block_start>TEST_SITE.delete()<block_end><def_stmt>test_page_no_paginator self<block_start>f=File(self.deploy.child('empty.txt'))<assert_stmt>f.exists<block_end><def_stmt>test_pages_of_one self<block_start>pages=['pages_of_one.txt' 'page2/pages_of_one.txt' 'page3/pages_of_one.txt' 'page4/pages_of_one.txt']<line_sep>files=[File(self.deploy.child(p))<for>p pages]<for_stmt>f files<block_start><assert_stmt>f.exists<block_end>page5=File(self.deploy.child('page5/pages_of_one.txt'))<assert_stmt><not>page5.exists<block_end><def_stmt>test_pages_of_one_content self<block_start>expected_page1_content=dedent('''\ Another Sad Post /page2/pages_of_one.txt''')<line_sep>expected_page2_content=dedent('''\ A Happy Post /pages_of_one.txt /page3/pages_of_one.txt''')<line_sep>expected_page3_content=dedent('''\ An Angry Post /page2/pages_of_one.txt /page4/pages_of_one.txt''')<line_sep>expected_page4_content=dedent('''\ A Sad Post /page3/pages_of_one.txt ''')<line_sep>page1=self.deploy.child('pages_of_one.txt')<line_sep>content=File(page1).read_all()<assert_stmt>expected_page1_content<eq>content<line_sep>page2=self.deploy.child('page2/pages_of_one.txt')<line_sep>content=File(page2).read_all()<assert_stmt>expected_page2_content<eq>content<line_sep>page3=self.deploy.child('page3/pages_of_one.txt')<line_sep>content=File(page3).read_all()<assert_stmt>expected_page3_content<eq>content<line_sep>page4=self.deploy.child('page4/pages_of_one.txt')<line_sep>content=File(page4).read_all()<assert_stmt>expected_page4_content<eq>content<block_end><def_stmt>test_pages_of_ten self<block_start>page1=self.deploy.child('pages_of_ten.txt')<line_sep>page2=self.deploy.child('page2/pages_of_ten.txt')<assert_stmt>File(page1).exists<assert_stmt><not>File(page2).exists<block_end><def_stmt>test_pages_of_ten_depends self<block_start>depends=self.gen.deps['pages_of_ten.txt']<assert_stmt>depends<assert_stmt>len(depends)<eq>4<assert_stmt>'blog/sad-post.html'<in>depends<assert_stmt>'blog/another-sad-post.html'<in>depends<assert_stmt>'blog/angry-post.html'<in>depends<assert_stmt>'blog/happy-post.html'<in>depends<block_end><def_stmt>test_pages_of_ten_content self<block_start>expected_content=dedent('''\ Another Sad Post A Happy Post An Angry Post A Sad Post ''')<line_sep>page=self.deploy.child('pages_of_ten.txt')<line_sep>content=File(page).read_all()<assert_stmt>expected_content<eq>content<block_end><def_stmt>test_pages_of_one_depends self<block_start>depends=self.gen.deps['pages_of_one.txt']<assert_stmt>depends<assert_stmt>len(depends)<eq>4<assert_stmt>'blog/sad-post.html'<in>depends<assert_stmt>'blog/another-sad-post.html'<in>depends<assert_stmt>'blog/angry-post.html'<in>depends<assert_stmt>'blog/happy-post.html'<in>depends<block_end><def_stmt>test_custom_file_pattern self<block_start>page1=self.deploy.child('custom_file_pattern.txt')<line_sep>page2=self.deploy.child('custom_file_pattern-2.txt')<assert_stmt>File(page1).exists<assert_stmt>File(page2).exists<block_end><block_end>
<import_from_stmt>enum Enum<class_stmt>ArgType(Enum)<block_start>""" Represents data types that can be used as arguments in different script functions. """<line_sep>Undefined=-1<line_sep>Empty=0<line_sep>String=1<line_sep>Numeric=2<line_sep>Mixed=3<block_end><class_stmt>ReturnType(Enum)<block_start>""" Represents return types that can be used in script evaluation. """<line_sep>Undefined=-1<line_sep>String=0<line_sep>Numeric=1<line_sep>Dual=2<block_end><class_stmt>FunctionType(Enum)<block_start>""" Represents function types. """<line_sep>Scalar=0<line_sep>Aggregation=1<line_sep>Tensor=2<block_end>
# Lint as: python3 # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions to create different kinds of systems."""<import_from_stmt>typing Sequence<import_stmt>attr<import_from_stmt>ferminet.utils elements<import_from_stmt>ferminet.utils units<as>unit_conversion<import_stmt>numpy<as>np<line_sep># Default bond lengths in angstrom for some diatomics. # Bond lengths from either the G3 dataset: # 1. http://www.cse.anl.gov/OldCHMwebsiteContent/compmat/comptherm.htm # 2. <NAME>, <NAME>, <NAME>, and <NAME>, # J. Chem. Phys, 109, 42 (1998). # or from NIST (https://cccbdb.nist.gov/diatomicexpbondx.asp). diatomic_bond_lengths={'BeH':1.348263 'CN':1.134797 'ClF':1.659091 'F2':1.420604 'H2':0.737164 'HCl':1.2799799 'Li2':2.77306 'LiH':1.639999 'N2':1.129978 'NH':1.039428 'CO':1.150338 'BH':1.2324 'PN':1.491 'AlH':1.648 'AlN':1.786 }<line_sep># Default spin polarisation for a few diatomics of interest. # Otherwise default to either singlet (doublet) for even (odd) numbers of # electrons. Units: number of unpaired electrons. diatomic_spin_polarisation={'B2':2 'O2':2 'NH':2 'AlN':2 }<line_sep>@attr.s<class_stmt>Atom# pytype: disable=invalid-function-definition <block_start>"""Atom information for Hamiltonians. The nuclear charge is inferred from the symbol if not given, in which case the symbol must be the IUPAC symbol of the desired element. Attributes: symbol: Element symbol. coords: An iterable of atomic coordinates. Always a list of floats and in bohr after initialisation. Default: place atom at origin. charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of the given name. atomic_number: Atomic number associated with element. Default: atomic number of element of the given symbol. Should match charge unless fractional nuclear charges are being used. units: String giving units of coords. Either bohr or angstrom. Default: bohr. If angstrom, coords are converted to be in bohr and units to the string 'bohr'. coords_angstrom: list of atomic coordinates in angstrom. coords_array: Numpy array of atomic coordinates in bohr. element: elements.Element corresponding to the symbol. """<line_sep>symbol=attr.ib()<line_sep>coords=attr.ib(converter=<lambda>xs:tuple(float(x)<for>x xs) default=(0.0 0.0 0.0))<line_sep># type: Sequence[float] charge=attr.ib(converter=float)<line_sep>atomic_number=attr.ib(converter=int)<line_sep>units=attr.ib(default='bohr' validator=attr.validators.in_(['bohr' 'angstrom']))<line_sep>@charge.default<def_stmt>_set_default_charge self<block_start><return>self.element.atomic_number<block_end>@atomic_number.default<def_stmt>_set_default_atomic_number self<block_start><return>self.element.atomic_number<block_end><def_stmt>__attrs_post_init__ self<block_start><if_stmt>self.units<eq>'angstrom'<block_start>self.coords=[unit_conversion.angstrom2bohr(x)<for>x self.coords]<line_sep>self.units='bohr'<block_end><block_end>@property<def_stmt>coords_angstrom self<block_start><return>[unit_conversion.bohr2angstrom(x)<for>x self.coords]<block_end>@property<def_stmt>coords_array self<block_start><if_stmt><not>hasattr(self '_coords_arr')<block_start>self._coords_arr=np.array(self.coords)<block_end><return>self._coords_arr<block_end>@property<def_stmt>element self<block_start><return>elements.SYMBOLS[self.symbol]<block_end><block_end><def_stmt>atom symbol spins=<none> charge=0<block_start>"""Return configuration for a single atom. Args: symbol: The atomic symbol from the periodic table spins (optional): A tuple with the number of spin-up and spin-down electrons charge (optional): If zero (default), create a neutral atom, otherwise create an anion if charge is negative or cation if charge is positive. Returns: A list with a single Atom object located at zero, and a tuple with the spin configuration of the electrons. """<line_sep>atomic_number=elements.SYMBOLS[symbol].atomic_number<if_stmt>charge<g>atomic_number<block_start><raise>ValueError('Cannot have a cation with charge larger than the '<concat>'atomic number. Charge: {}, Atomic Number{}'.format(charge atomic_number))<block_end><if_stmt>spins<is><none><block_start>spin_polarisation=elements.ATOMIC_NUMS[atomic_number-charge].spin_config<line_sep>nalpha=(atomic_number+spin_polarisation)<floordiv>2<line_sep>spins=(nalpha atomic_number-charge-nalpha)<block_end><return>[Atom(symbol=symbol coords=(0.0 0.0 0.0))] spins<block_end><def_stmt>diatomic symbol1 symbol2 bond_length spins=<none> charge=0 units='bohr'<block_start>"""Return configuration for a diatomic molecule."""<if_stmt>spins<is><none><block_start>atomic_number_1=elements.SYMBOLS[symbol1].atomic_number<line_sep>atomic_number_2=elements.SYMBOLS[symbol2].atomic_number<line_sep>total_charge=atomic_number_1+atomic_number_2-charge<if_stmt>total_charge%2<eq>0<block_start>spins=(total_charge<floordiv>2 total_charge<floordiv>2)<block_end><else_stmt><block_start>spins=((total_charge+1)<floordiv>2 (total_charge-1)<floordiv>2)<block_end><block_end><return>[Atom(symbol=symbol1 coords=(0.0 0.0 bond_length/2.0) units=units) Atom(symbol=symbol2 coords=(0.0 0.0 -bond_length/2.0) units=units)] spins<block_end><def_stmt>molecule symbol bond_length=0.0 units='bohr'<block_start>"""Hardcoded molecular geometries from the original Fermi Net paper."""<if_stmt>symbol<in>diatomic_bond_lengths<block_start><if_stmt>symbol[-1]<eq>'2'<block_start>symbs=[symbol[:-1] symbol[:-1]]<block_end><else_stmt># Split a camel-case string on the second capital letter <block_start>split_idx=<none><for_stmt>i range(1 len(symbol))<block_start><if_stmt>split_idx<is><none><and>symbol[i].isupper()<block_start>split_idx=i<block_end><block_end><if_stmt>split_idx<is><none><block_start><raise>ValueError('Cannot find second atomic symbol: {}'.format(symbol))<block_end>symbs=[symbol[:split_idx] symbol[split_idx:]]<block_end>atomic_number_1=elements.SYMBOLS[symbs[0]].atomic_number<line_sep>atomic_number_2=elements.SYMBOLS[symbs[1]].atomic_number<line_sep>total_charge=atomic_number_1+atomic_number_2<if_stmt>symbol<in>diatomic_spin_polarisation<block_start>spin_pol=diatomic_spin_polarisation[symbol]<line_sep>spins=((total_charge+spin_pol)<floordiv>2 (total_charge+spin_pol)<floordiv>2)<block_end><elif_stmt>total_charge%2<eq>0<block_start>spins=(total_charge<floordiv>2 total_charge<floordiv>2)<block_end><else_stmt><block_start>spins=((total_charge+1)<floordiv>2 (total_charge-1)<floordiv>2)<block_end><if_stmt>bond_length<eq>0.0<block_start>bond_length=diatomic_bond_lengths[symbol]<line_sep>units='angstrom'<block_end><return>diatomic(symbs[0] symbs[1] bond_length units=units spins=spins)<block_end><if_stmt>bond_length<ne>0.0<block_start><raise>ValueError('Bond length argument only appropriate for diatomics.')<block_end><if_stmt>symbol<eq>'CH4'<block_start><return>[Atom(symbol='C' coords=(0.0 0.0 0.0) units='bohr') Atom(symbol='H' coords=(1.18886 1.18886 1.18886) units='bohr') Atom(symbol='H' coords=(-1.18886 -1.18886 1.18886) units='bohr') Atom(symbol='H' coords=(1.18886 -1.18886 -1.18886) units='bohr') Atom(symbol='H' coords=(-1.18886 1.18886 -1.18886) units='bohr') ] (5 5)<block_end><if_stmt>symbol<eq>'NH3'<block_start><return>[Atom(symbol='N' coords=(0.0 0.0 0.22013) units='bohr') Atom(symbol='H' coords=(0.0 1.77583 -0.51364) units='bohr') Atom(symbol='H' coords=(1.53791 -0.88791 -0.51364) units='bohr') Atom(symbol='H' coords=(-1.53791 -0.88791 -0.51364) units='bohr') ] (5 5)<block_end><if_stmt>symbol<in>('C2H4' 'ethene' 'ethylene')<block_start><return>[Atom(symbol='C' coords=(0.0 0.0 1.26135) units='bohr') Atom(symbol='C' coords=(0.0 0.0 -1.26135) units='bohr') Atom(symbol='H' coords=(0.0 1.74390 2.33889) units='bohr') Atom(symbol='H' coords=(0.0 -1.74390 2.33889) units='bohr') Atom(symbol='H' coords=(0.0 1.74390 -2.33889) units='bohr') Atom(symbol='H' coords=(0.0 -1.74390 -2.33889) units='bohr') ] (8 8)<block_end><if_stmt>symbol<in>('C4H6' 'bicyclobutane')<block_start><return>[Atom(symbol='C' coords=(0.0 2.13792 0.58661) units='bohr') Atom(symbol='C' coords=(0.0 -2.13792 0.58661) units='bohr') Atom(symbol='C' coords=(1.41342 0.0 -0.58924) units='bohr') Atom(symbol='C' coords=(-1.41342 0.0 -0.58924) units='bohr') Atom(symbol='H' coords=(0.0 2.33765 2.64110) units='bohr') Atom(symbol='H' coords=(0.0 3.92566 -0.43023) units='bohr') Atom(symbol='H' coords=(0.0 -2.33765 2.64110) units='bohr') Atom(symbol='H' coords=(0.0 -3.92566 -0.43023) units='bohr') Atom(symbol='H' coords=(2.67285 0.0 -2.19514) units='bohr') Atom(symbol='H' coords=(-2.67285 0.0 -2.19514) units='bohr') ] (15 15)<block_end><raise>ValueError('Not a recognized molecule: {}'.format(symbol))<block_end><def_stmt>hn n r charge=0 units='bohr'<block_start>"""Return a hydrogen chain with n atoms and separation r."""<line_sep>m=n-charge# number of electrons <if_stmt>m%2<eq>0<block_start>spins=(m<floordiv>2 m<floordiv>2)<block_end><else_stmt><block_start>spins=((m+1)<floordiv>2 (m-1)<floordiv>2)<block_end>lim=r<times>(n-1)/2.0<line_sep><return>[Atom(symbol='H' coords=(0.0 0.0 z) units=units)<for>z np.linspace(-lim lim n)] spins<block_end><def_stmt>h4_circle r theta units='bohr'<block_start>"""Return 4 hydrogen atoms arranged in a circle, a failure case of CCSD(T)."""<line_sep><return>[Atom(symbol='H' coords=(r<times>np.cos(theta) r<times>np.sin(theta) 0.0) units=units) Atom(symbol='H' coords=(-r<times>np.cos(theta) r<times>np.sin(theta) 0.0) units=units) Atom(symbol='H' coords=(r<times>np.cos(theta) -r<times>np.sin(theta) 0.0) units=units) Atom(symbol='H' coords=(-r<times>np.cos(theta) -r<times>np.sin(theta) 0.0) units=units)] (2 2)<block_end>
<import_from_stmt>awx.main signals<class_stmt>TestCleanupDetachedLabels<block_start><def_stmt>test_cleanup_detached_labels_on_deleted_parent self mocker<block_start>mock_labels=[mocker.MagicMock() mocker.MagicMock()]<line_sep>mock_instance=mocker.MagicMock()<line_sep>mock_instance.labels.all=mocker.MagicMock()<line_sep>mock_instance.labels.all.return_value=mock_labels<line_sep>mock_labels[0].is_candidate_for_detach.return_value=<true><line_sep>mock_labels[1].is_candidate_for_detach.return_value=<false><line_sep>signals.cleanup_detached_labels_on_deleted_parent(<none> mock_instance)<line_sep>mock_labels[0].is_candidate_for_detach.assert_called_with()<line_sep>mock_labels[1].is_candidate_for_detach.assert_called_with()<line_sep>mock_labels[0].delete.assert_called_with()<line_sep>mock_labels[1].delete.assert_not_called()<block_end><block_end>
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet. # ========================================================================================== # NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved. # ========================================================================================== # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Provides utilities to preprocess images. Training images are sampled using the provided bounding boxes, and subsequently cropped to the sampled bounding box. Images are additionally flipped randomly, then resized to the target output size (without aspect-ratio preservation). Images used during evaluation are resized (with aspect-ratio preservation) and centrally cropped. All images undergo mean color subtraction. Note that these steps are colloquially referred to as "ResNet preprocessing," and they differ from "VGG preprocessing," which does not use bounding boxes and instead does an aspect-preserving resize followed by random crop during training. (These both differ from "Inception preprocessing," which introduces color distortion steps.) """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.ops control_flow_ops<import_from_stmt>preprocessing autoaugment<line_sep>_R_MEAN=123.68<line_sep>_G_MEAN=116.78<line_sep>_B_MEAN=103.94<line_sep>_CHANNEL_MEANS=[_R_MEAN _G_MEAN _B_MEAN]<line_sep>_MEAN=[0.485 0.456 0.406]<line_sep>_STD=[0.229 0.224 0.225]<line_sep># The lower bound for the smallest side of the image for aspect-preserving # resizing. For example, if an image is 500 x 1000, it will be resized to # _RESIZE_MIN x (_RESIZE_MIN * 2). _RESIZE_MIN=256<def_stmt>central_crop image crop_height crop_width<block_start>"""Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image. """<line_sep>shape=tf.shape(image)<line_sep>height,width=shape[0] shape[1]<line_sep>amount_to_be_cropped_h=(height-crop_height)<line_sep>crop_top=amount_to_be_cropped_h<floordiv>2<line_sep>amount_to_be_cropped_w=(width-crop_width)<line_sep>crop_left=amount_to_be_cropped_w<floordiv>2<line_sep><return>tf.slice(image [crop_top crop_left 0] [crop_height crop_width -1])<block_end><def_stmt>_mean_image_subtraction image means num_channels<block_start>"""Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. num_channels: number of color channels in the image that will be distorted. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """<if_stmt>image.get_shape().ndims<ne>3<block_start><raise>ValueError('Input must be of size [height, width, C>0]')<block_end><if_stmt>len(means)<ne>num_channels<block_start><raise>ValueError('len(means) must match the number of channels')<block_end># We have a 1-D tensor of means; convert to 3-D. # Note(b/130245863): we explicitly call `broadcast` instead of simply # expanding dimensions for better performance. means=tf.broadcast_to(means tf.shape(image))<line_sep><return>image-means<block_end><def_stmt>_normalization image means stds num_channels<block_start>"""Subtracts the given means from each image channel. For example: means = [123.68, 116.779, 103.939] image = _mean_image_subtraction(image, means) Note that the rank of `image` must be known. Args: image: a tensor of size [height, width, C]. means: a C-vector of values to subtract from each channel. num_channels: number of color channels in the image that will be distorted. Returns: the centered image. Raises: ValueError: If the rank of `image` is unknown, if `image` has a rank other than three or if the number of channels in `image` doesn't match the number of values in `means`. """<if_stmt>image.get_shape().ndims<ne>3<block_start><raise>ValueError('Input must be of size [height, width, C>0]')<block_end><if_stmt>len(means)<ne>num_channels<block_start><raise>ValueError('len(means) must match the number of channels')<block_end># We have a 1-D tensor of means; convert to 3-D. # Note(b/130245863): we explicitly call `broadcast` instead of simply # expanding dimensions for better performance. means=tf.broadcast_to(means tf.shape(image))<line_sep>stds=tf.broadcast_to(stds tf.shape(image))<line_sep><return>(image-means)/stds<block_end><def_stmt>_smallest_size_at_least height width resize_min<block_start>"""Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: an int32 scalar tensor indicating the new width. """<line_sep>resize_min=tf.cast(resize_min tf.float32)<line_sep># Convert to floats to make subsequent calculations go smoothly. height,width=tf.cast(height tf.float32) tf.cast(width tf.float32)<line_sep>smaller_dim=tf.minimum(height width)<line_sep>scale_ratio=resize_min/smaller_dim<line_sep># Convert back to ints to make heights and widths that TF ops will accept. new_height=tf.cast(height<times>scale_ratio tf.int32)<line_sep>new_width=tf.cast(width<times>scale_ratio tf.int32)<line_sep><return>new_height new_width<block_end><def_stmt>_aspect_preserving_resize image resize_min<block_start>"""Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image. """<line_sep>shape=tf.shape(image)<line_sep>height,width=shape[0] shape[1]<line_sep>new_height,new_width=_smallest_size_at_least(height width resize_min)<line_sep><return>_resize_image(image new_height new_width)<block_end><def_stmt>_resize_image image height width<block_start>"""Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width]. """<line_sep><return>tf.image.resize_images(image [height width] method=tf.image.ResizeMethod.BILINEAR align_corners=<false>)<block_end><def_stmt>_ten_crop image crop_h crop_w<block_start><def_stmt>_crop img center_offset# input img shape is [h,w,c] <block_start>img=tf.image.extract_glimpse([img] [crop_w crop_h] offsets=tf.to_float([center_offset]) centered=<false> normalized=<false>)<line_sep><return>tf.squeeze(img 0)<block_end><def_stmt>_crop5 img# img shape is [h,w,c] <block_start>im_shape=tf.shape(image)<line_sep>height,width=im_shape[0] im_shape[1]<line_sep>ch,cw=tf.to_int32(height/2) tf.to_int32(width/2)# center offset hh,hw=tf.to_int32(crop_h/2) tf.to_int32(crop_w/2)# half crop size ct=_crop(img [ch cw])<line_sep>lu=_crop(img [hh hw])<line_sep>ld=_crop(img [height-hh hw])<line_sep>ru=_crop(img [hh width-hw])<line_sep>rd=_crop(img [height-hh width-hw])<line_sep><return>tf.stack([lu ru ld rd ct])<block_end>lhs=_crop5(image)<line_sep>rhs=tf.image.flip_left_right(lhs)<line_sep><return>tf.concat([lhs rhs] axis=0)<block_end><def_stmt>preprocess_image_ten_crop image_buffer output_height output_width num_channels<block_start>image=tf.image.decode_jpeg(image_buffer channels=num_channels)<line_sep>image=_aspect_preserving_resize(image _RESIZE_MIN)<line_sep>images=_ten_crop(image output_height output_width)<line_sep>images.set_shape([10 output_height output_width num_channels])<line_sep>images=tf.map_fn(<lambda>x:_mean_image_subtraction(x _CHANNEL_MEANS num_channels) images)<line_sep><return>images<block_end><def_stmt>_crop image offset_height offset_width crop_height crop_width<block_start>"""Crops the given image using the provided offsets and sizes. Note that the method doesn't assume we know the input image size but it does assume we know the input image rank. Args: image: an image of shape [height, width, channels]. offset_height: a scalar tensor indicating the height offset. offset_width: a scalar tensor indicating the width offset. crop_height: the height of the cropped image. crop_width: the width of the cropped image. Returns: the cropped (and resized) image. Raises: InvalidArgumentError: if the rank is not 3 or if the image dimensions are less than the crop size. """<line_sep>original_shape=tf.shape(image)<line_sep>rank_assertion=tf.Assert(tf.equal(tf.rank(image) 3) ['Rank of image must be equal to 3.'])<with_stmt>tf.control_dependencies([rank_assertion])<block_start>cropped_shape=tf.stack([crop_height crop_width original_shape[2]])<block_end>size_assertion=tf.Assert(tf.logical_and(tf.greater_equal(original_shape[0] crop_height) tf.greater_equal(original_shape[1] crop_width)) ['Crop size greater than the image size.'])<line_sep>offsets=tf.to_int32(tf.stack([offset_height offset_width 0]))<line_sep># Use tf.slice instead of crop_to_bounding box as it accepts tensors to # define the crop size. <with_stmt>tf.control_dependencies([size_assertion])<block_start>image=tf.slice(image offsets cropped_shape)<block_end><return>tf.reshape(image cropped_shape)<block_end><def_stmt>_get_random_crop_coord image_list crop_height crop_width<block_start>"""Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150) Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the new height. crop_width: the new width. Returns: the image_list with cropped images. Raises: ValueError: if there are multiple image inputs provided with different size or the images are smaller than the crop dimensions. """<if_stmt><not>image_list<block_start><raise>ValueError('Empty image_list.')<block_end># Compute the rank assertions. rank_assertions=[]<for_stmt>i range(len(image_list))<block_start>image_rank=tf.rank(image_list[i])<line_sep>rank_assert=tf.Assert(tf.equal(image_rank 3) ['Wrong rank for tensor %s [expected] [actual]' image_list[i].name 3 image_rank])<line_sep>rank_assertions.append(rank_assert)<block_end>image_shape=control_flow_ops.with_dependencies([rank_assertions[0]] tf.shape(image_list[0]))<line_sep>image_height=image_shape[0]<line_sep>image_width=image_shape[1]<line_sep>crop_size_assert=tf.Assert(tf.logical_and(tf.greater_equal(image_height crop_height) tf.greater_equal(image_width crop_width)) ['Crop size greater than the image size.'])<line_sep>asserts=[rank_assertions[0] crop_size_assert]<for_stmt>i range(1 len(image_list))<block_start>image=image_list[i]<line_sep>asserts.append(rank_assertions[i])<line_sep>shape=control_flow_ops.with_dependencies([rank_assertions[i]] tf.shape(image))<line_sep>height=shape[0]<line_sep>width=shape[1]<line_sep>height_assert=tf.Assert(tf.equal(height image_height) ['Wrong height for tensor %s [expected][actual]' image.name height image_height])<line_sep>width_assert=tf.Assert(tf.equal(width image_width) ['Wrong width for tensor %s [expected][actual]' image.name width image_width])<line_sep>asserts.extend([height_assert width_assert])<block_end># Create a random bounding box. # # Use tf.random_uniform and not numpy.random.rand as doing the former would # generate random numbers at graph eval time, unlike the latter which # generates random numbers at graph definition time. max_offset_height=control_flow_ops.with_dependencies(asserts tf.reshape(image_height-crop_height+1 []))<line_sep>max_offset_width=control_flow_ops.with_dependencies(asserts tf.reshape(image_width-crop_width+1 []))<line_sep>offset_height=tf.random_uniform([] maxval=max_offset_height dtype=tf.int32)<line_sep>offset_width=tf.random_uniform([] maxval=max_offset_width dtype=tf.int32)<line_sep><return>tf.stack([offset_height offset_width crop_height crop_width])<block_end><def_stmt>_random_crop image_list crop_height crop_width<block_start>"""Crops the given list of images. The function applies the same crop to each image in the list. This can be effectively applied when there are multiple image inputs of the same dimension such as: image, depths, normals = _random_crop([image, depths, normals], 120, 150) Args: image_list: a list of image tensors of the same dimension but possibly varying channel. crop_height: the new height. crop_width: the new width. Returns: the image_list with cropped images. Raises: ValueError: if there are multiple image inputs provided with different size or the images are smaller than the crop dimensions. """<if_stmt><not>image_list<block_start><raise>ValueError('Empty image_list.')<block_end># Compute the rank assertions. rank_assertions=[]<for_stmt>i range(len(image_list))<block_start>image_rank=tf.rank(image_list[i])<line_sep>rank_assert=tf.Assert(tf.equal(image_rank 3) ['Wrong rank for tensor %s [expected] [actual]' image_list[i].name 3 image_rank])<line_sep>rank_assertions.append(rank_assert)<block_end>image_shape=control_flow_ops.with_dependencies([rank_assertions[0]] tf.shape(image_list[0]))<line_sep>image_height=image_shape[0]<line_sep>image_width=image_shape[1]<line_sep>crop_size_assert=tf.Assert(tf.logical_and(tf.greater_equal(image_height crop_height) tf.greater_equal(image_width crop_width)) ['Crop size greater than the image size.'])<line_sep>asserts=[rank_assertions[0] crop_size_assert]<for_stmt>i range(1 len(image_list))<block_start>image=image_list[i]<line_sep>asserts.append(rank_assertions[i])<line_sep>shape=control_flow_ops.with_dependencies([rank_assertions[i]] tf.shape(image))<line_sep>height=shape[0]<line_sep>width=shape[1]<line_sep>height_assert=tf.Assert(tf.equal(height image_height) ['Wrong height for tensor %s [expected][actual]' image.name height image_height])<line_sep>width_assert=tf.Assert(tf.equal(width image_width) ['Wrong width for tensor %s [expected][actual]' image.name width image_width])<line_sep>asserts.extend([height_assert width_assert])<block_end># Create a random bounding box. # # Use tf.random_uniform and not numpy.random.rand as doing the former would # generate random numbers at graph eval time, unlike the latter which # generates random numbers at graph definition time. max_offset_height=control_flow_ops.with_dependencies(asserts tf.reshape(image_height-crop_height+1 []))<line_sep>max_offset_width=control_flow_ops.with_dependencies(asserts tf.reshape(image_width-crop_width+1 []))<line_sep>offset_height=tf.random_uniform([] maxval=max_offset_height dtype=tf.int32)<line_sep>offset_width=tf.random_uniform([] maxval=max_offset_width dtype=tf.int32)<line_sep><return>[_crop(image offset_height offset_width crop_height crop_width)<for>image image_list]<block_end><def_stmt>pad_shorter image<block_start>shape=tf.shape(image)<line_sep>height,width=shape[0] shape[1]<line_sep>larger_dim=tf.maximum(height width)<line_sep>h1=(larger_dim-height)<floordiv>2<line_sep>h2=(larger_dim-height)-h1<line_sep>w1=tf.maximum((larger_dim-width)<floordiv>2 0)<line_sep>w2=(larger_dim-width)-w1<line_sep>pad_shape=[[h1 h2] [w1 w2] [0 0]]<line_sep><return>tf.pad(image pad_shape)<block_end><def_stmt>apply_with_random_selector x func num_cases<block_start>"""Computes func(x, sel), with sel sampled from [0...num_cases-1]. Args: x: input Tensor. func: Python function to apply. num_cases: Python int32, number of cases to sample sel from. Returns: The result of func(x, sel), where func receives the value of the selector as a python integer, but sel is sampled dynamically. """<line_sep>sel=tf.random_uniform([] maxval=num_cases dtype=tf.int32)<line_sep># Pass the real x only to one of the func calls. <return>control_flow_ops.merge([func(control_flow_ops.switch(x tf.equal(sel case))[1] case)<for>case range(num_cases)])[0]<block_end><def_stmt>resize_func image size method<block_start><if_stmt>method<eq>0<block_start>image=_resize_image(image _RESIZE_MIN _RESIZE_MIN)<line_sep>image=_random_crop([image] size[0] size[1])[0]<block_end><else_stmt><block_start>image=_resize_image(image size[0] size[1])<block_end><return>image<block_end><def_stmt>preprocess_image image_buffer output_height output_width num_channels dct_method='' is_training=<false> autoaugment_type=<none> eval_large_resolution=<true><block_start><if_stmt>is_training<block_start>image=tf.image.decode_jpeg(image_buffer channels=num_channels dct_method=dct_method)<line_sep>image=apply_with_random_selector(image <lambda>x method:resize_func(x [output_height output_width] method) num_cases=2)<line_sep>image.set_shape([output_height output_width 3])<line_sep>image=tf.to_float(image)<line_sep>image=tf.image.random_flip_left_right(image)<if_stmt>autoaugment_type<block_start>tf.logging.info('Apply AutoAugment policy {}'.format(autoaugment_type))<line_sep>image=tf.clip_by_value(image 0.0 255.0)<line_sep>dtype=image.dtype<line_sep>image=tf.cast(image dtype=tf.uint8)<line_sep>image=autoaugment.distort_image_with_autoaugment(image autoaugment_type)<line_sep>image=tf.cast(image dtype=dtype)<block_end>image.set_shape([output_height output_width num_channels])<block_end><else_stmt><block_start><if_stmt>eval_large_resolution<block_start>output_height=int(output_height<times>(1.0/0.875))<line_sep>output_width=int(output_width<times>(1.0/0.875))<block_end># For validation, we want to decode, resize, then just crop the middle. image=tf.image.decode_jpeg(image_buffer channels=num_channels dct_method=dct_method)<line_sep>image=_resize_image(image output_height output_width)<line_sep>image=tf.to_float(image)<line_sep>image.set_shape([output_height output_width num_channels])<block_end><return>_mean_image_subtraction(image _CHANNEL_MEANS num_channels)<block_end>
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>json<import_stmt>os.path<import_from_stmt>robot.running ArgInfo ArgumentSpec<import_from_stmt>robot.errors DataError<import_from_stmt>.model LibraryDoc KeywordDoc<class_stmt>JsonDocBuilder<block_start><def_stmt>build self path<block_start>spec=self._parse_spec_json(path)<line_sep><return>self.build_from_dict(spec)<block_end><def_stmt>build_from_dict self spec<block_start>libdoc=LibraryDoc(name=spec['name'] doc=spec['doc'] version=spec['version'] type=spec['type'] scope=spec['scope'] doc_format=spec['docFormat'] source=spec['source'] lineno=int(spec.get('lineno' -1)))<line_sep>libdoc.data_types.update(spec['dataTypes'].get('enums' []))<line_sep>libdoc.data_types.update(spec['dataTypes'].get('typedDicts' []))<line_sep>libdoc.inits=[self._create_keyword(kw)<for>kw spec['inits']]<line_sep>libdoc.keywords=[self._create_keyword(kw)<for>kw spec['keywords']]<line_sep><return>libdoc<block_end><def_stmt>_parse_spec_json self path<block_start><if_stmt><not>os.path.isfile(path)<block_start><raise>DataError("Spec file '%s' does not exist."%path)<block_end><with_stmt>open(path)<as>json_source<block_start>libdoc_dict=json.load(json_source)<block_end><return>libdoc_dict<block_end><def_stmt>_create_keyword self kw<block_start><return>KeywordDoc(name=kw.get('name') args=self._create_arguments(kw['args']) doc=kw['doc'] shortdoc=kw['shortdoc'] tags=kw['tags'] source=kw['source'] lineno=int(kw.get('lineno' -1)))<block_end><def_stmt>_create_arguments self arguments<block_start>spec=ArgumentSpec()<line_sep>setters={ArgInfo.POSITIONAL_ONLY:spec.positional_only.append ArgInfo.POSITIONAL_ONLY_MARKER:<lambda>value:<none> ArgInfo.POSITIONAL_OR_NAMED:spec.positional_or_named.append ArgInfo.VAR_POSITIONAL:<lambda>value:setattr(spec 'var_positional' value) ArgInfo.NAMED_ONLY_MARKER:<lambda>value:<none> ArgInfo.NAMED_ONLY:spec.named_only.append ArgInfo.VAR_NAMED:<lambda>value:setattr(spec 'var_named' value) }<for_stmt>arg arguments<block_start>name=arg['name']<line_sep>setters[arg['kind']](name)<line_sep>default=arg.get('defaultValue')<if_stmt>default<is><not><none><block_start>spec.defaults[name]=default<block_end>arg_types=arg['types']<if_stmt><not>spec.types<block_start>spec.types={}<block_end>spec.types[name]=tuple(arg_types)<block_end><return>spec<block_end><block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2020, 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Converter to convert a problem with inequality constraints to unconstrained with penalty terms."""<import_stmt>logging<import_from_stmt>typing Optional Union Tuple List Dict<import_stmt>numpy<as>np<import_from_stmt>.quadratic_program_converter QuadraticProgramConverter<import_from_stmt>..exceptions QiskitOptimizationError<import_from_stmt>..problems.constraint Constraint ConstraintSense<import_from_stmt>..problems.quadratic_objective QuadraticObjective<import_from_stmt>..problems.quadratic_program QuadraticProgram<import_from_stmt>..problems.variable Variable<line_sep>logger=logging.getLogger(__name__)<class_stmt>LinearInequalityToPenalty(QuadraticProgramConverter)<block_start>r"""Convert linear inequality constraints to penalty terms of the objective function. There are some linear constraints which do not require slack variables to construct penalty terms [1]. This class supports the following inequality constraints. .. math:: \begin{array}{} \text { Inequality constraint } & & \text { Penalty term } \\ x \leq y & \rightarrow & P(x-x y) \\ x \geq y & \rightarrow & P(y-x y) \\ \sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\ \sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j) \end{array} Note that x, y, z and :math:`x_i` are binary variables, and P is a penalty factor, where the value of P is automatically determined or supplied by users. If constraints match with any of the patterns, they are converted into penalty terms and added to the objective function. Otherwise, constraints are kept as is. References: [1]: <NAME>, et al. (2019), A Tutorial on Formulating and Using QUBO Models, `arXiv:1811.11538 <https://arxiv.org/abs/1811.11538>`_. """<def_stmt>__init__ self penalty:Optional[float]=<none><arrow><none><block_start>""" Args: penalty: Penalty factor to scale equality constraints that are added to objective. If None is passed, a penalty factor will be automatically calculated on every conversion. """<line_sep>self._src_num_vars:Optional[int]=<none><line_sep>self._dst:Optional[QuadraticProgram]=<none><line_sep>self._penalty:Optional[float]=penalty<line_sep>self._should_define_penalty:bool=penalty<is><none><block_end><def_stmt>convert self problem:QuadraticProgram<arrow>QuadraticProgram<block_start>r"""Convert inequality constraints into penalty terms of the objective function. This methods converts the following patterns where x, y, and :math:`x_i` are binary variables and P is a penalty factor. .. math:: \begin{array}{} \text { Inequality constraint } & & \text { Penalty term } \\ x \leq y & \rightarrow & P(x-x y) \\ x \geq y & \rightarrow & P(y-x y) \\ \sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\ \sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j) \end{array} Args: problem: The problem to be solved. Returns: The converted problem Raises: QiskitOptimizationError: If an unsupported-type variable exists. """<line_sep># create empty QuadraticProgram model self._src_num_vars=problem.get_num_vars()<line_sep>self._dst=QuadraticProgram(name=problem.name)<line_sep># If no penalty was given, set the penalty coefficient by _auto_define_penalty() <if_stmt>self._should_define_penalty<block_start>penalty=self._auto_define_penalty(problem)<block_end><else_stmt><block_start>penalty=self._penalty<block_end># Set variables <for_stmt>x problem.variables<block_start><if_stmt>x.vartype<eq>Variable.Type.CONTINUOUS<block_start>self._dst.continuous_var(x.lowerbound x.upperbound x.name)<block_end><elif_stmt>x.vartype<eq>Variable.Type.BINARY<block_start>self._dst.binary_var(x.name)<block_end><elif_stmt>x.vartype<eq>Variable.Type.INTEGER<block_start>self._dst.integer_var(x.lowerbound x.upperbound x.name)<block_end><else_stmt><block_start><raise>QiskitOptimizationError(f"Unsupported vartype: {x.vartype}")<block_end><block_end># get original objective terms offset=problem.objective.constant<line_sep>linear=problem.objective.linear.to_dict()<line_sep>quadratic=problem.objective.quadratic.to_dict()<line_sep>sense=problem.objective.sense.value<line_sep># convert linear constraints into penalty terms <for_stmt>constraint problem.linear_constraints# special constraint check function here <block_start><if_stmt><not>self._is_matched_constraint(problem constraint)<block_start>self._dst.linear_constraint(constraint.linear.coefficients constraint.sense constraint.rhs constraint.name )<line_sep><continue><block_end>conv_offset,conv_linear,conv_quadratic,varmap=self._conversion_table(constraint)<line_sep># constant part offset<augadd>sense<times>penalty<times>conv_offset<line_sep># linear parts of penalty <for_stmt>j,j_2 varmap.items()# if j already exists in the linear terms dic, add a penalty term # into existing value else create new key and value in the linear_term dict <block_start><if_stmt>conv_linear[j]<ne>0<block_start>linear[j_2]=linear.get(j_2 0.0)+sense<times>penalty<times>conv_linear[j]<block_end><block_end># quadratic parts of penalty <for_stmt>j,j_2 varmap.items()<block_start><for_stmt>k range(j len(varmap))# if j and k already exist in the quadratic terms dict, # add a penalty term into existing value # else create new key and value in the quadratic term dict <block_start><if_stmt>conv_quadratic[j][k]<ne>0<block_start>tup=(j_2 varmap[k])<line_sep>quadratic[tup]=(quadratic.get(tup 0.0)+sense<times>penalty<times>conv_quadratic[j][k])<block_end><block_end><block_end><block_end># Copy quadratic_constraints <for_stmt>quadratic_constraint problem.quadratic_constraints<block_start>self._dst.quadratic_constraint(quadratic_constraint.linear.coefficients quadratic_constraint.quadratic.coefficients quadratic_constraint.sense quadratic_constraint.rhs quadratic_constraint.name )<block_end><if_stmt>problem.objective.sense<eq>QuadraticObjective.Sense.MINIMIZE<block_start>self._dst.minimize(offset linear quadratic)<block_end><else_stmt><block_start>self._dst.maximize(offset linear quadratic)<block_end># Update the penalty to the one just used self._penalty=penalty<line_sep><return>self._dst<block_end>@staticmethod<def_stmt>_conversion_table constraint <arrow>Tuple[int np.ndarray np.ndarray Dict[int int]]<block_start>"""Construct conversion matrix for special constraint. Returns: Return conversion table which is used to construct penalty term in main function. Raises: QiskitOptimizationError: if the constraint is invalid. """<line_sep>vars_dict=constraint.linear.to_dict()<line_sep>coeffs=list(vars_dict.values())<line_sep>varmap=dict(enumerate(vars_dict.keys()))<line_sep>rhs=constraint.rhs<line_sep>sense=constraint.sense<line_sep>num_vars=len(vars_dict)<line_sep># initialize return values, these are used for converted offset, linear # and quadratic terms offset=0<line_sep>linear=np.zeros(num_vars dtype=int)<line_sep>quadratic=np.zeros((num_vars num_vars) dtype=int)<line_sep># rhs = num_vars - 1 correspond to multiple variable with >= n - 1 case. <if_stmt>sense<eq>ConstraintSense.GE<and>rhs<eq>num_vars-1# x_1 + ... + x_n >= n - 1 # The number of offset is combination ( nC2 ) <block_start>offset=num_vars<times>(num_vars-1)<floordiv>2<line_sep>linear=np.full(num_vars 1-num_vars dtype=int)<line_sep>quadratic=np.triu(np.ones((num_vars num_vars) dtype=int) k=1)<block_end><elif_stmt>sense<eq>ConstraintSense.LE<and>rhs<eq>1# x_1 + ... + x_n <= 1 <block_start>quadratic=np.triu(np.ones((num_vars num_vars) dtype=int) k=1)<block_end><elif_stmt>rhs<eq>0<block_start><if_stmt>num_vars<ne>2<block_start><raise>QiskitOptimizationError(f"Internal error: invalid number of variables {num_vars} {constraint.name}")<block_end>quadratic=np.array([[0 -1] [0 0]])<if_stmt>sense<eq>ConstraintSense.GE# x >= y case <block_start><if_stmt>coeffs[0]<l>0.0<block_start>linear[0]=1<block_end><else_stmt><block_start>linear[1]=1<block_end><block_end><elif_stmt>sense<eq>ConstraintSense.LE# x <= y case <block_start><if_stmt>coeffs[0]<g>0.0<block_start>linear[0]=1<block_end><else_stmt><block_start>linear[1]=1<block_end><block_end><block_end><else_stmt><block_start><raise>QiskitOptimizationError(f"Internal error: invalid constraint {constraint.name}")<block_end><return>offset linear quadratic varmap<block_end>@staticmethod<def_stmt>_is_matched_constraint problem constraint<arrow>bool<block_start>"""Determine if constraint is special or not. Returns: True: when constraint is special False: when constraint is not special """<line_sep>params=constraint.linear.to_dict()<line_sep>num_vars=len(params)<line_sep>rhs=constraint.rhs<line_sep>sense=constraint.sense<line_sep>coeff_array=np.array(list(params.values()))<line_sep># Binary parameter? <if_stmt>any(problem.variables[i].vartype<ne>Variable.Type.BINARY<for>i params.keys())<block_start><return><false><block_end><if_stmt>num_vars<eq>2<and>rhs<eq>0<block_start><if_stmt>sense<in>(Constraint.Sense.LE Constraint.Sense.GE)# x-y<=0 # x-y>=0 <block_start><return>coeff_array.min()<eq>-1.0<and>coeff_array.max()<eq>1.0<block_end><block_end><elif_stmt>num_vars<ge>2<block_start><if_stmt>sense<eq>Constraint.Sense.LE<and>rhs<eq>1<block_start><if_stmt>all(i<eq>1<for>i params.values())# x1+x2+...<=1 <block_start><return><true><block_end><block_end><elif_stmt>sense<eq>Constraint.Sense.GE<and>rhs<eq>num_vars-1<block_start><if_stmt>all(i<eq>1<for>i params.values())# x1+x2+...>=n-1 <block_start><return><true><block_end><block_end><block_end><return><false><block_end>@staticmethod<def_stmt>_auto_define_penalty problem<arrow>float<block_start>"""Automatically define the penalty coefficient. Returns: Return the minimum valid penalty factor calculated from the upper bound and the lower bound of the objective function. If a constraint has a float coefficient, return the default value for the penalty factor. """<line_sep>default_penalty=1e5<line_sep># Check coefficients of constraints. # If a constraint has a float coefficient, return the default value for the penalty factor. terms=[]<for_stmt>constraint problem.linear_constraints<block_start>terms.append(constraint.rhs)<line_sep>terms.extend(constraint.linear.to_array().tolist())<block_end><if_stmt>any(isinstance(term float)<and><not>term.is_integer()<for>term terms)<block_start>logger.warning("Warning: Using %f for the penalty coefficient because "<concat>"a float coefficient exists in constraints. \n"<concat>"The value could be too small. "<concat>"If so, set the penalty coefficient manually." default_penalty )<line_sep><return>default_penalty<block_end>lin_b=problem.objective.linear.bounds<line_sep>quad_b=problem.objective.quadratic.bounds<line_sep><return>1.0+(lin_b.upperbound-lin_b.lowerbound)+(quad_b.upperbound-quad_b.lowerbound)<block_end><def_stmt>interpret self x:Union[np.ndarray List[float]]<arrow>np.ndarray<block_start>"""Convert the result of the converted problem back to that of the original problem Args: x: The result of the converted problem or the given result in case of FAILURE. Returns: The result of the original problem. Raises: QiskitOptimizationError: if the number of variables in the result differs from that of the original problem. """<if_stmt>len(x)<ne>self._src_num_vars<block_start><raise>QiskitOptimizationError(f"The number of variables in the passed result ({len(x)}) differs from "<concat>f"that of the original problem ({self._src_num_vars}).")<block_end><return>np.asarray(x)<block_end>@property<def_stmt>penalty self<arrow>Optional[float]<block_start>"""Returns the penalty factor used in conversion. Returns: The penalty factor used in conversion. """<line_sep><return>self._penalty<block_end>@penalty.setter<def_stmt>penalty self penalty:Optional[float]<arrow><none><block_start>"""Set a new penalty factor. Args: penalty: The new penalty factor. If None is passed, a penalty factor will be automatically calculated on every conversion. """<line_sep>self._penalty=penalty<line_sep>self._should_define_penalty=penalty<is><none><block_end><block_end>
<import_from_stmt>django.urls path<import_from_stmt>. views<line_sep>urlpatterns=[path("draugiem/login/" views.login name="draugiem_login") path("draugiem/callback/" views.callback name="draugiem_callback") ]<line_sep>
"""Input transformer machinery to support IPython special syntax. This includes the machinery to recognise and transform ``%magic`` commands, ``!system`` commands, ``help?`` querying, prompt stripping, and so forth. Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were deprecated in 7.0. """<line_sep># Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. <import_from_stmt>codeop compile_command<import_stmt>re<import_stmt>tokenize<import_from_stmt>typing List Tuple Union<import_stmt>warnings<line_sep>_indent_re=re.compile(r'^[ \t]+')<def_stmt>leading_indent lines<block_start>"""Remove leading indentation. If the first line starts with a spaces or tabs, the same whitespace will be removed from each following line in the cell. """<if_stmt><not>lines<block_start><return>lines<block_end>m=_indent_re.match(lines[0])<if_stmt><not>m<block_start><return>lines<block_end>space=m.group(0)<line_sep>n=len(space)<line_sep><return>[l[n:]<if>l.startswith(space)<else>l<for>l lines]<block_end><class_stmt>PromptStripper<block_start>"""Remove matching input prompts from a block of input. Parameters ---------- prompt_re : regular expression A regular expression matching any input prompt (including continuation, e.g. ``...``) initial_re : regular expression, optional A regular expression matching only the initial prompt, but not continuation. If no initial expression is given, prompt_re will be used everywhere. Used mainly for plain Python prompts (``>>>``), where the continuation prompt ``...`` is a valid Python expression in Python 3, so shouldn't be stripped. If initial_re and prompt_re differ, only initial_re will be tested against the first line. If any prompt is found on the first two lines, prompts will be stripped from the rest of the block. """<def_stmt>__init__ self prompt_re initial_re=<none><block_start>self.prompt_re=prompt_re<line_sep>self.initial_re=initial_re<or>prompt_re<block_end><def_stmt>_strip self lines<block_start><return>[self.prompt_re.sub('' l count=1)<for>l lines]<block_end><def_stmt>__call__ self lines<block_start><if_stmt><not>lines<block_start><return>lines<block_end><if_stmt>self.initial_re.match(lines[0])<or>(len(lines)<g>1<and>self.prompt_re.match(lines[1]))<block_start><return>self._strip(lines)<block_end><return>lines<block_end><block_end>classic_prompt=PromptStripper(prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)') initial_re=re.compile(r'^>>>( |$)'))<line_sep>ipython_prompt=PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))<def_stmt>cell_magic lines<block_start><if_stmt><not>lines<or><not>lines[0].startswith('%%')<block_start><return>lines<block_end><if_stmt>re.match(r'%%\w+\?' lines[0])# This case will be handled by help_end <block_start><return>lines<block_end>magic_name,_,first_line=lines[0][2:-1].partition(' ')<line_sep>body=''.join(lines[1:])<line_sep><return>['get_ipython().run_cell_magic(%r, %r, %r)\n'%(magic_name first_line body)]<block_end><def_stmt>_find_assign_op token_line<arrow>Union[int <none>]<block_start>"""Get the index of the first assignment in the line ('=' not inside brackets) Note: We don't try to support multiple special assignment (a = b = %foo) """<line_sep>paren_level=0<for_stmt>i,ti enumerate(token_line)<block_start>s=ti.string<if_stmt>s<eq>'='<and>paren_level<eq>0<block_start><return>i<block_end><if_stmt>s<in>{'(' '[' '{'}<block_start>paren_level<augadd>1<block_end><elif_stmt>s<in>{')' ']' '}'}<block_start><if_stmt>paren_level<g>0<block_start>paren_level<augsub>1<block_end><block_end><block_end><block_end><def_stmt>find_end_of_continued_line lines start_line:int<block_start>"""Find the last line of a line explicitly extended using backslashes. Uses 0-indexed line numbers. """<line_sep>end_line=start_line<while_stmt>lines[end_line].endswith('\\\n')<block_start>end_line<augadd>1<if_stmt>end_line<ge>len(lines)<block_start><break><block_end><block_end><return>end_line<block_end><def_stmt>assemble_continued_line lines start:Tuple[int int] end_line:int<block_start>r"""Assemble a single line from multiple continued line pieces Continued lines are lines ending in ``\``, and the line following the last ``\`` in the block. For example, this code continues over multiple lines:: if (assign_ix is not None) \ and (len(line) >= assign_ix + 2) \ and (line[assign_ix+1].string == '%') \ and (line[assign_ix+2].type == tokenize.NAME): This statement contains four continued line pieces. Assembling these pieces into a single line would give:: if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[... This uses 0-indexed line numbers. *start* is (lineno, colno). Used to allow ``%magic`` and ``!system`` commands to be continued over multiple lines. """<line_sep>parts=[lines[start[0]][start[1]:]]+lines[start[0]+1:end_line+1]<line_sep><return>' '.join([p[:-2]<for>p parts[:-1]]# Strip backslash+newline +[parts[-1][:-1]])<line_sep># Strip newline from last line <block_end><class_stmt>TokenTransformBase<block_start>"""Base class for transformations which examine tokens. Special syntax should not be transformed when it occurs inside strings or comments. This is hard to reliably avoid with regexes. The solution is to tokenise the code as Python, and recognise the special syntax in the tokens. IPython's special syntax is not valid Python syntax, so tokenising may go wrong after the special syntax starts. These classes therefore find and transform *one* instance of special syntax at a time into regular Python syntax. After each transformation, tokens are regenerated to find the next piece of special syntax. Subclasses need to implement one class method (find) and one regular method (transform). The priority attribute can select which transformation to apply if multiple transformers match in the same place. Lower numbers have higher priority. This allows "%magic?" to be turned into a help call rather than a magic call. """<line_sep># Lower numbers -> higher priority (for matches in the same location) priority=10<def_stmt>sortby self<block_start><return>self.start_line self.start_col self.priority<block_end><def_stmt>__init__ self start<block_start>self.start_line=start[0]-1# Shift from 1-index to 0-index self.start_col=start[1]<block_end>@classmethod<def_stmt>find cls tokens_by_line<block_start>"""Find one instance of special syntax in the provided tokens. Tokens are grouped into logical lines for convenience, so it is easy to e.g. look at the first token of each line. *tokens_by_line* is a list of lists of tokenize.TokenInfo objects. This should return an instance of its class, pointing to the start position it has found, or None if it found no match. """<line_sep><raise>NotImplementedError<block_end><def_stmt>transform self lines:List[str]<block_start>"""Transform one instance of special syntax found by ``find()`` Takes a list of strings representing physical lines, returns a similar list of transformed lines. """<line_sep><raise>NotImplementedError<block_end><block_end><class_stmt>MagicAssign(TokenTransformBase)<block_start>"""Transformer for assignments from magics (a = %foo)"""<line_sep>@classmethod<def_stmt>find cls tokens_by_line<block_start>"""Find the first magic assignment (a = %foo) in the cell. """<for_stmt>line tokens_by_line<block_start>assign_ix=_find_assign_op(line)<if_stmt>(assign_ix<is><not><none>)<and>(len(line)<ge>assign_ix+2)<and>(line[assign_ix+1].string<eq>'%')<and>(line[assign_ix+2].type<eq>tokenize.NAME)<block_start><return>cls(line[assign_ix+1].start)<block_end><block_end><block_end><def_stmt>transform self lines:List[str]<block_start>"""Transform a magic assignment found by the ``find()`` classmethod. """<line_sep>start_line,start_col=self.start_line self.start_col<line_sep>lhs=lines[start_line][:start_col]<line_sep>end_line=find_end_of_continued_line(lines start_line)<line_sep>rhs=assemble_continued_line(lines (start_line start_col) end_line)<assert_stmt>rhs.startswith('%') rhs<line_sep>magic_name,_,args=rhs[1:].partition(' ')<line_sep>lines_before=lines[:start_line]<line_sep>call="get_ipython().run_line_magic({!r}, {!r})".format(magic_name args)<line_sep>new_line=lhs+call+'\n'<line_sep>lines_after=lines[end_line+1:]<line_sep><return>lines_before+[new_line]+lines_after<block_end><block_end><class_stmt>SystemAssign(TokenTransformBase)<block_start>"""Transformer for assignments from system commands (a = !foo)"""<line_sep>@classmethod<def_stmt>find cls tokens_by_line<block_start>"""Find the first system assignment (a = !foo) in the cell. """<for_stmt>line tokens_by_line<block_start>assign_ix=_find_assign_op(line)<if_stmt>(assign_ix<is><not><none>)<and><not>line[assign_ix].line.strip().startswith('=')<and>(len(line)<ge>assign_ix+2)<and>(line[assign_ix+1].type<eq>tokenize.ERRORTOKEN)<block_start>ix=assign_ix+1<while_stmt>ix<l>len(line)<and>line[ix].type<eq>tokenize.ERRORTOKEN<block_start><if_stmt>line[ix].string<eq>'!'<block_start><return>cls(line[ix].start)<block_end><elif_stmt><not>line[ix].string.isspace()<block_start><break><block_end>ix<augadd>1<block_end><block_end><block_end><block_end><def_stmt>transform self lines:List[str]<block_start>"""Transform a system assignment found by the ``find()`` classmethod. """<line_sep>start_line,start_col=self.start_line self.start_col<line_sep>lhs=lines[start_line][:start_col]<line_sep>end_line=find_end_of_continued_line(lines start_line)<line_sep>rhs=assemble_continued_line(lines (start_line start_col) end_line)<assert_stmt>rhs.startswith('!') rhs<line_sep>cmd=rhs[1:]<line_sep>lines_before=lines[:start_line]<line_sep>call="get_ipython().getoutput({!r})".format(cmd)<line_sep>new_line=lhs+call+'\n'<line_sep>lines_after=lines[end_line+1:]<line_sep><return>lines_before+[new_line]+lines_after<block_end><block_end># The escape sequences that define the syntax transformations IPython will # apply to user input. These can NOT be just changed here: many regular # expressions and other parts of the code may use their hardcoded values, and # for all intents and purposes they constitute the 'IPython syntax', so they # should be considered fixed. ESC_SHELL='!'# Send line to underlying system shell ESC_SH_CAP='!!'# Send line to system shell and capture output ESC_HELP='?'# Find information about object ESC_HELP2='??'# Find extra-detailed information about object ESC_MAGIC='%'# Call magic function ESC_MAGIC2='%%'# Call cell-magic function ESC_QUOTE=','# Split args on whitespace, quote each as string and call ESC_QUOTE2=';'# Quote all args as a single string, call ESC_PAREN='/'# Call first argument with rest of line as arguments ESCAPE_SINGLES={'!' '?' '%' ',' ';' '/'}<line_sep>ESCAPE_DOUBLES={'!!' '??'}# %% (cell magic) is handled separately <def_stmt>_make_help_call target esc next_input=<none><block_start>"""Prepares a pinfo(2)/psearch call from a target name and the escape (i.e. ? or ??)"""<line_sep>method='pinfo2'<if>esc<eq>'??'<else>'psearch'<if>'*'<in>target<else>'pinfo'<line_sep>arg=" ".join([method target])<line_sep>#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args) t_magic_name,_,t_magic_arg_s=arg.partition(' ')<line_sep>t_magic_name=t_magic_name.lstrip(ESC_MAGIC)<if_stmt>next_input<is><none><block_start><return>'get_ipython().run_line_magic(%r, %r)'%(t_magic_name t_magic_arg_s)<block_end><else_stmt><block_start><return>'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)'%(next_input t_magic_name t_magic_arg_s)<block_end><block_end><def_stmt>_tr_help content<block_start>"""Translate lines escaped with: ? A naked help line should fire the intro help screen (shell.show_usage()) """<if_stmt><not>content<block_start><return>'get_ipython().show_usage()'<block_end><return>_make_help_call(content '?')<block_end><def_stmt>_tr_help2 content<block_start>"""Translate lines escaped with: ?? A naked help line should fire the intro help screen (shell.show_usage()) """<if_stmt><not>content<block_start><return>'get_ipython().show_usage()'<block_end><return>_make_help_call(content '??')<block_end><def_stmt>_tr_magic content<block_start>"Translate lines escaped with a percent sign: %"<line_sep>name,_,args=content.partition(' ')<line_sep><return>'get_ipython().run_line_magic(%r, %r)'%(name args)<block_end><def_stmt>_tr_quote content<block_start>"Translate lines escaped with a comma: ,"<line_sep>name,_,args=content.partition(' ')<line_sep><return>'%s("%s")'%(name '", "'.join(args.split()))<block_end><def_stmt>_tr_quote2 content<block_start>"Translate lines escaped with a semicolon: ;"<line_sep>name,_,args=content.partition(' ')<line_sep><return>'%s("%s")'%(name args)<block_end><def_stmt>_tr_paren content<block_start>"Translate lines escaped with a slash: /"<line_sep>name,_,args=content.partition(' ')<line_sep><return>'%s(%s)'%(name ", ".join(args.split()))<block_end>tr={ESC_SHELL:'get_ipython().system({!r})'.format ESC_SH_CAP:'get_ipython().getoutput({!r})'.format ESC_HELP:_tr_help ESC_HELP2:_tr_help2 ESC_MAGIC:_tr_magic ESC_QUOTE:_tr_quote ESC_QUOTE2:_tr_quote2 ESC_PAREN:_tr_paren}<class_stmt>EscapedCommand(TokenTransformBase)<block_start>"""Transformer for escaped commands like %foo, !foo, or /foo"""<line_sep>@classmethod<def_stmt>find cls tokens_by_line<block_start>"""Find the first escaped command (%foo, !foo, etc.) in the cell. """<for_stmt>line tokens_by_line<block_start><if_stmt><not>line<block_start><continue><block_end>ix=0<line_sep>ll=len(line)<while_stmt>ll<g>ix<and>line[ix].type<in>{tokenize.INDENT tokenize.DEDENT}<block_start>ix<augadd>1<block_end><if_stmt>ix<ge>ll<block_start><continue><block_end><if_stmt>line[ix].string<in>ESCAPE_SINGLES<block_start><return>cls(line[ix].start)<block_end><block_end><block_end><def_stmt>transform self lines<block_start>"""Transform an escaped line found by the ``find()`` classmethod. """<line_sep>start_line,start_col=self.start_line self.start_col<line_sep>indent=lines[start_line][:start_col]<line_sep>end_line=find_end_of_continued_line(lines start_line)<line_sep>line=assemble_continued_line(lines (start_line start_col) end_line)<if_stmt>len(line)<g>1<and>line[:2]<in>ESCAPE_DOUBLES<block_start>escape,content=line[:2] line[2:]<block_end><else_stmt><block_start>escape,content=line[:1] line[1:]<block_end><if_stmt>escape<in>tr<block_start>call=tr[escape](content)<block_end><else_stmt><block_start>call=''<block_end>lines_before=lines[:start_line]<line_sep>new_line=indent+call+'\n'<line_sep>lines_after=lines[end_line+1:]<line_sep><return>lines_before+[new_line]+lines_after<block_end><block_end>_help_end_re=re.compile(r"""(%{0,2} [a-zA-Z_*][\w*]* # Variable name (\.[a-zA-Z_*][\w*]*)* # .etc.etc ) (\?\??)$ # ? or ?? """ re.VERBOSE)<class_stmt>HelpEnd(TokenTransformBase)<block_start>"""Transformer for help syntax: obj? and obj??"""<line_sep># This needs to be higher priority (lower number) than EscapedCommand so # that inspecting magics (%foo?) works. priority=5<def_stmt>__init__ self start q_locn<block_start>super().__init__(start)<line_sep>self.q_line=q_locn[0]-1# Shift from 1-indexed to 0-indexed self.q_col=q_locn[1]<block_end>@classmethod<def_stmt>find cls tokens_by_line<block_start>"""Find the first help command (foo?) in the cell. """<for_stmt>line tokens_by_line# Last token is NEWLINE; look at last but one <block_start><if_stmt>len(line)<g>2<and>line[-2].string<eq>'?'# Find the first token that's not INDENT/DEDENT <block_start>ix=0<while_stmt>line[ix].type<in>{tokenize.INDENT tokenize.DEDENT}<block_start>ix<augadd>1<block_end><return>cls(line[ix].start line[-2].start)<block_end><block_end><block_end><def_stmt>transform self lines<block_start>"""Transform a help command found by the ``find()`` classmethod. """<line_sep>piece=''.join(lines[self.start_line:self.q_line+1])<line_sep>indent,content=piece[:self.start_col] piece[self.start_col:]<line_sep>lines_before=lines[:self.start_line]<line_sep>lines_after=lines[self.q_line+1:]<line_sep>m=_help_end_re.search(content)<if_stmt><not>m<block_start><raise>SyntaxError(content)<block_end><assert_stmt>m<is><not><none> content<line_sep>target=m.group(1)<line_sep>esc=m.group(3)<line_sep># If we're mid-command, put it back on the next prompt for the user. next_input=<none><if_stmt>(<not>lines_before)<and>(<not>lines_after)<and>content.strip()<ne>m.group(0)<block_start>next_input=content.rstrip('?\n')<block_end>call=_make_help_call(target esc next_input=next_input)<line_sep>new_line=indent+call+'\n'<line_sep><return>lines_before+[new_line]+lines_after<block_end><block_end><def_stmt>make_tokens_by_line lines:List[str]<block_start>"""Tokenize a series of lines and group tokens by line. The tokens for a multiline Python string or expression are grouped as one line. All lines except the last lines should keep their line ending ('\\n', '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)` for example when passing block of text to this function. """<line_sep># NL tokens are used inside multiline expressions, but also after blank # lines or comments. This is intentional - see https://bugs.python.org/issue17061 # We want to group the former case together but split the latter, so we # track parentheses level, similar to the internals of tokenize. NEWLINE,NL=tokenize.NEWLINE tokenize.NL<line_sep>tokens_by_line=[[]]<if_stmt>len(lines)<g>1<and><not>lines[0].endswith(('\n' '\r' '\r\n' '\x0b' '\x0c'))<block_start>warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")<block_end>parenlev=0<try_stmt><block_start><for_stmt>token tokenize.generate_tokens(iter(lines).__next__)<block_start>tokens_by_line[-1].append(token)<if_stmt>(token.type<eq>NEWLINE)<or>((token.type<eq>NL)<and>(parenlev<le>0))<block_start>tokens_by_line.append([])<block_end><elif_stmt>token.string<in>{'(' '[' '{'}<block_start>parenlev<augadd>1<block_end><elif_stmt>token.string<in>{')' ']' '}'}<block_start><if_stmt>parenlev<g>0<block_start>parenlev<augsub>1<block_end><block_end><block_end><block_end><except_stmt>tokenize.TokenError# Input ended in a multiline string or expression. That's OK for us. <block_start><pass><block_end><if_stmt><not>tokens_by_line[-1]<block_start>tokens_by_line.pop()<block_end><return>tokens_by_line<block_end><def_stmt>show_linewise_tokens s:str<block_start>"""For investigation and debugging"""<if_stmt><not>s.endswith('\n')<block_start>s<augadd>'\n'<block_end>lines=s.splitlines(keepends=<true>)<for_stmt>line make_tokens_by_line(lines)<block_start>print("Line -------")<for_stmt>tokinfo line<block_start>print(" " tokinfo)<block_end><block_end><block_end># Arbitrary limit to prevent getting stuck in infinite loops TRANSFORM_LOOP_LIMIT=500<class_stmt>TransformerManager<block_start>"""Applies various transformations to a cell or code block. The key methods for external use are ``transform_cell()`` and ``check_complete()``. """<def_stmt>__init__ self<block_start>self.cleanup_transforms=[leading_indent classic_prompt ipython_prompt ]<line_sep>self.line_transforms=[cell_magic ]<line_sep>self.token_transformers=[MagicAssign SystemAssign EscapedCommand HelpEnd ]<block_end><def_stmt>do_one_token_transform self lines<block_start>"""Find and run the transform earliest in the code. Returns (changed, lines). This method is called repeatedly until changed is False, indicating that all available transformations are complete. The tokens following IPython special syntax might not be valid, so the transformed code is retokenised every time to identify the next piece of special syntax. Hopefully long code cells are mostly valid Python, not using lots of IPython special syntax, so this shouldn't be a performance issue. """<line_sep>tokens_by_line=make_tokens_by_line(lines)<line_sep>candidates=[]<for_stmt>transformer_cls self.token_transformers<block_start>transformer=transformer_cls.find(tokens_by_line)<if_stmt>transformer<block_start>candidates.append(transformer)<block_end><block_end><if_stmt><not>candidates# Nothing to transform <block_start><return><false> lines<block_end>ordered_transformers=sorted(candidates key=TokenTransformBase.sortby)<for_stmt>transformer ordered_transformers<block_start><try_stmt><block_start><return><true> transformer.transform(lines)<block_end><except_stmt>SyntaxError<block_start><pass><block_end><block_end><return><false> lines<block_end><def_stmt>do_token_transforms self lines<block_start><for_stmt>_ range(TRANSFORM_LOOP_LIMIT)<block_start>changed,lines=self.do_one_token_transform(lines)<if_stmt><not>changed<block_start><return>lines<block_end><block_end><raise>RuntimeError("Input transformation still changing after "<concat>"%d iterations. Aborting."%TRANSFORM_LOOP_LIMIT)<block_end><def_stmt>transform_cell self cell:str<arrow>str<block_start>"""Transforms a cell of input code"""<if_stmt><not>cell.endswith('\n')<block_start>cell<augadd>'\n'# Ensure the cell has a trailing newline <block_end>lines=cell.splitlines(keepends=<true>)<for_stmt>transform self.cleanup_transforms+self.line_transforms<block_start>lines=transform(lines)<block_end>lines=self.do_token_transforms(lines)<line_sep><return>''.join(lines)<block_end><def_stmt>check_complete self cell:str<block_start>"""Return whether a block of code is ready to execute, or should be continued Parameters ---------- source : string Python input code, which can be multiline. Returns ------- status : str One of 'complete', 'incomplete', or 'invalid' if source is not a prefix of valid code. indent_spaces : int or None The number of spaces by which to indent the next line of code. If status is not 'incomplete', this is None. """<line_sep># Remember if the lines ends in a new line. ends_with_newline=<false><for_stmt>character reversed(cell)<block_start><if_stmt>character<eq>'\n'<block_start>ends_with_newline=<true><line_sep><break><block_end><elif_stmt>character.strip()<block_start><break><block_end><else_stmt><block_start><continue><block_end><block_end><if_stmt><not>ends_with_newline# Append an newline for consistent tokenization # See https://bugs.python.org/issue33899 <block_start>cell<augadd>'\n'<block_end>lines=cell.splitlines(keepends=<true>)<if_stmt><not>lines<block_start><return>'complete' <none><block_end><if_stmt>lines[-1].endswith('\\')# Explicit backslash continuation <block_start><return>'incomplete' find_last_indent(lines)<block_end><try_stmt><block_start><for_stmt>transform self.cleanup_transforms<block_start>lines=transform(lines)<block_end><block_end><except_stmt>SyntaxError<block_start><return>'invalid' <none><block_end><if_stmt>lines[0].startswith('%%')# Special case for cell magics - completion marked by blank line <block_start><if_stmt>lines[-1].strip()<block_start><return>'incomplete' find_last_indent(lines)<block_end><else_stmt><block_start><return>'complete' <none><block_end><block_end><try_stmt><block_start><for_stmt>transform self.line_transforms<block_start>lines=transform(lines)<block_end>lines=self.do_token_transforms(lines)<block_end><except_stmt>SyntaxError<block_start><return>'invalid' <none><block_end>tokens_by_line=make_tokens_by_line(lines)<if_stmt><not>tokens_by_line<block_start><return>'incomplete' find_last_indent(lines)<block_end><if_stmt>tokens_by_line[-1][-1].type<ne>tokenize.ENDMARKER# We're in a multiline string or expression <block_start><return>'incomplete' find_last_indent(lines)<block_end>newline_types={tokenize.NEWLINE tokenize.COMMENT tokenize.ENDMARKER}<line_sep># Pop the last line which only contains DEDENTs and ENDMARKER last_token_line=<none><if_stmt>{t.type<for>t tokens_by_line[-1]}<in>[{tokenize.DEDENT tokenize.ENDMARKER} {tokenize.ENDMARKER}]<and>len(tokens_by_line)<g>1<block_start>last_token_line=tokens_by_line.pop()<block_end><while_stmt>tokens_by_line[-1]<and>tokens_by_line[-1][-1].type<in>newline_types<block_start>tokens_by_line[-1].pop()<block_end><if_stmt>len(tokens_by_line)<eq>1<and><not>tokens_by_line[-1]<block_start><return>'incomplete' 0<block_end><if_stmt>tokens_by_line[-1][-1].string<eq>':'# The last line starts a block (e.g. 'if foo:') <block_start>ix=0<while_stmt>tokens_by_line[-1][ix].type<in>{tokenize.INDENT tokenize.DEDENT}<block_start>ix<augadd>1<block_end>indent=tokens_by_line[-1][ix].start[1]<line_sep><return>'incomplete' indent+4<block_end><if_stmt>tokens_by_line[-1][0].line.endswith('\\')<block_start><return>'incomplete' <none><block_end># At this point, our checks think the code is complete (or invalid). # We'll use codeop.compile_command to check this with the real parser <try_stmt><block_start><with_stmt>warnings.catch_warnings()<block_start>warnings.simplefilter('error' SyntaxWarning)<line_sep>res=compile_command(''.join(lines) symbol='exec')<block_end><block_end><except_stmt>(SyntaxError OverflowError ValueError TypeError MemoryError SyntaxWarning)<block_start><return>'invalid' <none><block_end><else_stmt><block_start><if_stmt>res<is><none><block_start><return>'incomplete' find_last_indent(lines)<block_end><block_end><if_stmt>last_token_line<and>last_token_line[0].type<eq>tokenize.DEDENT<block_start><if_stmt>ends_with_newline<block_start><return>'complete' <none><block_end><return>'incomplete' find_last_indent(lines)<block_end># If there's a blank line at the end, assume we're ready to execute <if_stmt><not>lines[-1].strip()<block_start><return>'complete' <none><block_end><return>'complete' <none><block_end><block_end><def_stmt>find_last_indent lines<block_start>m=_indent_re.match(lines[-1])<if_stmt><not>m<block_start><return>0<block_end><return>len(m.group(0).replace('\t' ' '<times>4))<block_end>
ANGULAR_PACKAGES_CONFIG=[("@angular/animations" struct(entry_points=["browser"])) ("@angular/common" struct(entry_points=["http/testing" "http" "testing"])) ("@angular/compiler" struct(entry_points=["testing"])) ("@angular/core" struct(entry_points=["testing"])) ("@angular/forms" struct(entry_points=[])) ("@angular/platform-browser" struct(entry_points=["testing" "animations"])) ("@angular/platform-browser-dynamic" struct(entry_points=["testing"])) ("@angular/router" struct(entry_points=[])) ]<line_sep>ANGULAR_PACKAGES=[struct(name=name[len("@angular/"):] entry_points=config.entry_points platform=config.platform<if>hasattr(config "platform")<else>"browser" module_name=name )<for>name,config ANGULAR_PACKAGES_CONFIG]<line_sep>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>cogdl.utils spmm<class_stmt>GINLayer(nn.Module)<block_start>r"""Graph Isomorphism Network layer from paper `"How Powerful are Graph Neural Networks?" <https://arxiv.org/pdf/1810.00826.pdf>`__. .. math:: h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} + \mathrm{sum}\left(\left\{h_j^{l}, j\in\mathcal{N}(i) \right\}\right)\right) Parameters ---------- apply_func : callable layer function) layer or function applied to update node feature eps : float32, optional Initial `\epsilon` value. train_eps : bool, optional If True, `\epsilon` will be a learnable parameter. """<def_stmt>__init__ self apply_func=<none> eps=0 train_eps=<true><block_start>super(GINLayer self).__init__()<if_stmt>train_eps<block_start>self.eps=torch.nn.Parameter(torch.FloatTensor([eps]))<block_end><else_stmt><block_start>self.register_buffer("eps" torch.FloatTensor([eps]))<block_end>self.apply_func=apply_func<block_end><def_stmt>forward self graph x<block_start>out=(1+self.eps)<times>x+spmm(graph x)<if_stmt>self.apply_func<is><not><none><block_start>out=self.apply_func(out)<block_end><return>out<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>typing Sequence<import_stmt>torch<import_from_stmt>torchvision.transforms Normalize<import_from_stmt>..builder ALGORITHMS build_backbone build_head build_neck<import_from_stmt>.base BaseModel<line_sep>@ALGORITHMS.register_module()<class_stmt>CAE(BaseModel)<block_start>"""CAE. Implementation of `Context Autoencoder for Self-Supervised Representation Learning <https://arxiv.org/abs/2202.03026>`_. Args: backbone (dict, optional): Config dict for module of backbone. neck (dict, optional): Config dict for module of deep features to compact feature vectors. Defaults to None. head (dict, optional): Config dict for module of loss functions. Defaults to None. base_momentum (float): The base momentum coefficient for the target network. Defaults to 0.0. init_cfg (dict, optional): the config to control the initialization. """<def_stmt>__init__ self backbone:dict=<none> neck:dict=<none> head:dict=<none> base_momentum:float=0.0 init_cfg:dict=<none> **kwargs<arrow><none><block_start>super(CAE self).__init__(init_cfg)<assert_stmt>backbone<is><not><none><line_sep>self.backbone=build_backbone(backbone)<line_sep>self.teacher=build_backbone(backbone)<assert_stmt>neck<is><not><none><line_sep>self.neck=build_neck(neck)<assert_stmt>head<is><not><none><line_sep>self.head=build_head(head)<line_sep>self.momentum=base_momentum<line_sep>self.img_norm=Normalize(mean=torch.tensor((0.485 0.456 0.406)) std=torch.tensor((0.229 0.224 0.225)))<block_end><def_stmt>init_weights self<arrow><none><block_start>super().init_weights()<line_sep>self._init_teacher()<block_end><def_stmt>_init_teacher self<arrow><none># init the weights of teacher with those of backbone <block_start><for_stmt>param_backbone,param_teacher zip(self.backbone.parameters() self.teacher.parameters())<block_start>param_teacher.detach()<line_sep>param_teacher.data.copy_(param_backbone.data)<line_sep>param_teacher.requires_grad=<false><block_end><block_end><def_stmt>momentum_update self<arrow><none><block_start>"""Momentum update of the teacher network."""<for_stmt>param_bacbone,param_teacher zip(self.backbone.parameters() self.teacher.parameters())<block_start>param_teacher.data=param_teacher.data<times>self.momentum+param_bacbone.data<times>(1.-self.momentum)<block_end><block_end><def_stmt>extract_feat self img:torch.Tensor mask:torch.Tensor<arrow>torch.Tensor<block_start>x=self.backbone(img mask)<line_sep><return>x<block_end><def_stmt>forward_train self samples:Sequence **kwargs<arrow>dict<block_start>img,img_target,mask=samples<line_sep># normalize images and the images to get the target img_list=[self.img_norm(x).unsqueeze(0)<for>x img]<line_sep>img=torch.cat(img_list)<line_sep>img_target=0.8<times>img_target+0.1<line_sep>mask=mask.flatten(1).to(torch.bool)<line_sep>unmasked=self.backbone(img mask)<line_sep># get the latent prediction for the masked patches <with_stmt>torch.no_grad()<block_start>latent_target=self.teacher(img ~mask)<line_sep>latent_target=latent_target[: 1: :]<line_sep>self.momentum_update()<block_end>pos_embed=self.backbone.pos_embed.expand(img.shape[0] -1 -1)<line_sep>pos_embed_masked=pos_embed[: 1:][mask].reshape(img.shape[0] -1 pos_embed.shape[-1])<line_sep>pos_embed_unmasked=pos_embed[: 1:][~mask].reshape(img.shape[0] -1 pos_embed.shape[-1])<line_sep># input the unmasked tokens and masked tokens to the decoder logits,latent_pred=self.neck(unmasked[: 1:] pos_embed_masked pos_embed_unmasked)<line_sep>logits=logits.view(-1 logits.shape[-1])<line_sep>losses=self.head(img_target logits latent_pred latent_target mask)<line_sep><return>losses<block_end><block_end>
<import_from_stmt>collections namedtuple<import_stmt>hashlib<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_stmt>pyperformance<import_from_stmt>. _utils _python _pythoninfo<import_from_stmt>.venv VenvForBenchmarks REQUIREMENTS_FILE<import_from_stmt>. _venv<class_stmt>BenchmarkException(Exception)<block_start><pass><block_end><class_stmt>RunID(namedtuple('RunID' 'python compat bench timestamp'))<block_start><def_stmt>__new__ cls python compat bench timestamp<block_start>self=super().__new__(cls python compat bench<or><none> int(timestamp)<if>timestamp<else><none> )<line_sep><return>self<block_end><def_stmt>__str__ self<block_start><if_stmt><not>self.timestamp<block_start><return>self.name<block_end><return>f'{self.name}-{self.timestamp}'<block_end>@property<def_stmt>name self<block_start><try_stmt><block_start><return>self._name<block_end><except_stmt>AttributeError<block_start>name=f'{self.python}-compat-{self.compat}'<if_stmt>self.bench<block_start>name=f'{name}-bm-{self.bench.name}'<block_end>self._name=name<line_sep><return>self._name<block_end><block_end><block_end><def_stmt>get_run_id python bench=<none><block_start>py_id=_python.get_id(python prefix=<true>)<line_sep>compat_id=get_compatibility_id(bench)<line_sep>ts=time.time()<line_sep><return>RunID(py_id compat_id bench ts)<block_end><def_stmt>run_benchmarks should_run python options<block_start>to_run=sorted(should_run)<line_sep>info=_pythoninfo.get_info(python)<line_sep>runid=get_run_id(info)<line_sep>unique=getattr(options 'unique_venvs' <false>)<if_stmt><not>unique<block_start>common=VenvForBenchmarks.ensure(_venv.get_venv_root(runid.name python=info) info upgrade='oncreate' inherit_environ=options.inherit_environ )<block_end>benchmarks={}<line_sep>venvs=set()<for_stmt>i,bench enumerate(to_run)<block_start>bench_runid=runid._replace(bench=bench)<assert_stmt>bench_runid.name (bench bench_runid)<line_sep>name=bench_runid.name<line_sep>venv_root=_venv.get_venv_root(name python=info)<line_sep>print()<line_sep>print('='<times>50)<line_sep>print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})')<line_sep>print()<if_stmt><not>unique<block_start>print('(trying common venv first)')<line_sep># Try the common venv first. <try_stmt><block_start>common.ensure_reqs(bench)<block_end><except_stmt>_venv.RequirementsInstallationFailedError<block_start>print('(falling back to unique venv)')<block_end><else_stmt><block_start>benchmarks[bench]=(common bench_runid)<line_sep><continue><block_end><block_end>venv=VenvForBenchmarks.ensure(venv_root info upgrade='oncreate' inherit_environ=options.inherit_environ )<try_stmt># XXX Do not override when there is a requirements collision. <block_start>venv.ensure_reqs(bench)<block_end><except_stmt>_venv.RequirementsInstallationFailedError<block_start>print('(benchmark will be skipped)')<line_sep>print()<line_sep>venv=<none><block_end>venvs.add(venv_root)<line_sep>benchmarks[bench]=(venv bench_runid)<block_end>print()<line_sep>suite=<none><line_sep>run_count=str(len(to_run))<line_sep>errors=[]<line_sep>pyperf_opts=get_pyperf_opts(options)<import_stmt>pyperf<for_stmt>index,bench enumerate(to_run)<block_start>name=bench.name<line_sep>print("[%s/%s] %s..."%(str(index+1).rjust(len(run_count)) run_count name))<line_sep>sys.stdout.flush()<def_stmt>add_bench dest_suite obj<block_start><if_stmt>isinstance(obj pyperf.BenchmarkSuite)<block_start>results=obj<block_end><else_stmt><block_start>results=(obj )<block_end>version=pyperformance.__version__<for_stmt>res results<block_start>res.update_metadata({'performance_version':version})<if_stmt>dest_suite<is><not><none><block_start>dest_suite.add_benchmark(res)<block_end><else_stmt><block_start>dest_suite=pyperf.BenchmarkSuite([res])<block_end><block_end><return>dest_suite<block_end>bench_venv,bench_runid=benchmarks.get(bench)<if_stmt>bench_venv<is><none><block_start>print("ERROR: Benchmark %s failed: could not install requirements"%name)<line_sep>errors.append(name)<line_sep><continue><block_end><try_stmt><block_start>result=bench.run(bench_venv.python bench_runid pyperf_opts venv=bench_venv verbose=options.verbose )<block_end><except_stmt>Exception<as>exc<block_start>print("ERROR: Benchmark %s failed: %s"%(name exc))<line_sep>traceback.print_exc()<line_sep>errors.append(name)<block_end><else_stmt><block_start>suite=add_bench(suite result)<block_end><block_end>print()<line_sep><return>(suite errors)<block_end># Utility functions <def_stmt>get_compatibility_id bench=<none># XXX Do not include the pyperformance reqs if a benchmark was provided? <block_start>reqs=sorted(_utils.iter_clean_lines(REQUIREMENTS_FILE))<if_stmt>bench<block_start>lockfile=bench.requirements_lockfile<if_stmt>lockfile<and>os.path.exists(lockfile)<block_start>reqs<augadd>sorted(_utils.iter_clean_lines(lockfile))<block_end><block_end>data=[# XXX Favor pyperf.__version__ instead? pyperformance.__version__ '\n'.join(reqs) ]<line_sep>h=hashlib.sha256()<for_stmt>value data<block_start>h.update(value.encode('utf-8'))<block_end>compat_id=h.hexdigest()<line_sep># XXX Return the whole string? compat_id=compat_id[:12]<line_sep><return>compat_id<block_end><def_stmt>get_pyperf_opts options<block_start>opts=[]<if_stmt>options.debug_single_value<block_start>opts.append('--debug-single-value')<block_end><elif_stmt>options.rigorous<block_start>opts.append('--rigorous')<block_end><elif_stmt>options.fast<block_start>opts.append('--fast')<block_end><if_stmt>options.verbose<block_start>opts.append('--verbose')<block_end><if_stmt>options.affinity<block_start>opts.append('--affinity=%s'%options.affinity)<block_end><if_stmt>options.track_memory<block_start>opts.append('--track-memory')<block_end><if_stmt>options.inherit_environ<block_start>opts.append('--inherit-environ=%s'%','.join(options.inherit_environ))<block_end><return>opts<block_end>
# -*- coding: utf-8 -*- """ Created on Fri Nov 27 08:09:11 2020 @author: Tin """<line_sep># Plot Candlestick in bokeh <import_stmt>pandas<as>pd# Dataframe Library <import_from_stmt>math pi<import_from_stmt>bokeh.plotting figure show output_file<line_sep>pd.set_option('max_columns' <none>)# To show all columns <import_stmt>yfinance<as>yf<line_sep>yf.pdr_override()<line_sep># input symbol='AAPL'<line_sep>start='2019-12-01'<line_sep>end='2020-01-01'<line_sep># dataframe df=yf.download(symbol start end)<line_sep>df["Date"]=pd.to_datetime(df.index)<line_sep>mids=(df['Open']+df['Adj Close'])/2<line_sep>spans=abs(df['Adj Close']-df['Open'])<line_sep>inc=df['Adj Close']<g>df['Open']<line_sep>dec=df['Open']<g>df['Adj Close']<line_sep>w=12<times>60<times>60<times>1000# half day in ms TOOLS="pan,wheel_zoom,box_zoom,reset,save"<line_sep>p=figure(x_axis_type="datetime" tools=TOOLS plot_width=1000 title=symbol+" Candlestick")<line_sep>p.xaxis.major_label_orientation=pi/4<line_sep>p.grid.grid_line_alpha=0.3<line_sep>p.segment(df.Date df.High df.Date df.Low color="black")<line_sep>p.vbar(df.Date[inc] w df.Open[inc] df['Adj Close'][inc] fill_color="#D5E1DD" line_color="black")<line_sep>p.vbar(df.Date[dec] w df.Open[dec] df['Adj Close'][dec] fill_color="#F2583E" line_color="black")<line_sep>output_file("candlestick.html" title=symbol+" candlestick")<line_sep>show(p)# open a browser
<import_stmt>unittest<import_from_stmt>mock Mock<import_from_stmt>foundations_spec.helpers.spec Spec<import_from_stmt>foundations_spec.helpers let let_mock set_up<class_stmt>TestLazyBucket(Spec)<block_start>@let<def_stmt>lazy_bucket self<block_start><import_from_stmt>foundations_contrib.lazy_bucket LazyBucket<line_sep><return>LazyBucket(self.bucket_constructor)<block_end>@set_up<def_stmt>set_up self<block_start>self.bucket_constructor.return_value=self.bucket<block_end>bucket_constructor=let_mock()<line_sep>bucket=let_mock()<line_sep>name=let_mock()<line_sep>data=let_mock()<line_sep>input_file=let_mock()<line_sep>output_file=let_mock()<line_sep>dummy=let_mock()<line_sep>pathname=let_mock()<line_sep>source=let_mock()<line_sep>destination=let_mock()<def_stmt>test_ensure_bucket_is_not_constructed self<block_start>self.lazy_bucket<line_sep>self.bucket_constructor.assert_not_called()<block_end><def_stmt>test_upload_from_string_calls_bucket self<block_start>self.bucket.upload_from_string.return_value=self.dummy<line_sep>result=self.lazy_bucket.upload_from_string(self.name self.data)<line_sep>self.bucket.upload_from_string.assert_called_with(self.name self.data)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_upload_from_file_calls_bucket self<block_start>self.bucket.upload_from_file.return_value=self.dummy<line_sep>result=self.lazy_bucket.upload_from_file(self.name self.input_file)<line_sep>self.bucket.upload_from_file.assert_called_with(self.name self.input_file)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_exists_calls_bucket self<block_start>self.bucket.exists.return_value=self.dummy<line_sep>result=self.lazy_bucket.exists(self.name)<line_sep>self.bucket.exists.assert_called_with(self.name)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_download_as_string_calls_bucket self<block_start>self.bucket.download_as_string.return_value=self.dummy<line_sep>result=self.lazy_bucket.download_as_string(self.name)<line_sep>self.bucket.download_as_string.assert_called_with(self.name)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_download_to_file_calls_bucket self<block_start>self.bucket.download_to_file.return_value=self.dummy<line_sep>result=self.lazy_bucket.download_to_file(self.name self.output_file)<line_sep>self.bucket.download_to_file.assert_called_with(self.name self.output_file)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_list_files_calls_bucket self<block_start>self.bucket.list_files.return_value=self.dummy<line_sep>result=self.lazy_bucket.list_files(self.pathname)<line_sep>self.bucket.list_files.assert_called_with(self.pathname)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_remove_calls_bucket self<block_start>self.bucket.remove.return_value=self.dummy<line_sep>result=self.lazy_bucket.remove(self.name)<line_sep>self.bucket.remove.assert_called_with(self.name)<line_sep>self.assertEqual(self.dummy result)<block_end><def_stmt>test_move_calls_bucket self<block_start>self.bucket.move.return_value=self.dummy<line_sep>result=self.lazy_bucket.move(self.source self.destination)<line_sep>self.bucket.move.assert_called_with(self.source self.destination)<line_sep>self.assertEqual(self.dummy result)<block_end><block_end>