content stringlengths 0 1.55M |
|---|
<import_stmt>numpy<as>np<import_stmt>unittest<import_from_stmt>openmdao.utils cs_safe<import_from_stmt>openmdao.utils.assert_utils assert_near_equal<class_stmt>TestCSSafeFuctions(unittest.TestCase)<block_start><def_stmt>test_abs self<block_start>test_data=np.array([1 -1 -2 2 5.675 -5.676] dtype='complex')<line_sep>assert_near_equal(cs_safe.abs(test_data) np.abs(test_data))<line_sep>test_data<augadd>complex(0 1e-50)<line_sep>cs_derivs=cs_safe.abs(test_data).imag/1e-50<line_sep>expected=[1 -1 -1 1 1 -1]<line_sep>assert_near_equal(cs_derivs expected)<block_end><def_stmt>test_norm self<block_start>test_data=np.array([[1 2 3 -4] [5 6 7 -8]] dtype='complex')<line_sep>assert_near_equal(cs_safe.norm(test_data axis=<none>) np.linalg.norm(test_data axis=<none>))<line_sep>assert_near_equal(cs_safe.norm(test_data axis=0) np.linalg.norm(test_data axis=0))<line_sep>assert_near_equal(cs_safe.norm(test_data axis=1) np.linalg.norm(test_data axis=1))<line_sep>deriv_test_data=test_data.copy()<line_sep>deriv_test_data[0 0]<augadd>complex(0 1e-50)<line_sep>cs_deriv=cs_safe.norm(deriv_test_data).imag/1e-50<line_sep>expected=1/np.linalg.norm(test_data)<times>test_data[0 0].real<line_sep>assert_near_equal(cs_deriv expected)<block_end><def_stmt>test_arctan2 self<block_start>x=np.array([-1 +1 +1 -1] dtype='complex')<line_sep>y=np.array([-1 -1 +1 +1] dtype='complex')<line_sep>expected=np.array([-2.35619449 -0.78539816 0.78539816 2.35619449])<line_sep>assert_near_equal(cs_safe.arctan2(y x) expected tolerance=1e-8)<line_sep>x<augadd>complex(0 1e-50)<line_sep>y<augadd>complex(0 1e-50)<line_sep>cs_derivs=cs_safe.arctan2(y x).imag/1e-50<line_sep>expected=[0. 1. 0. -1.]<line_sep>assert_near_equal(cs_derivs expected)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
"""
Driver program for training and evaluation.
"""<import_stmt>argparse<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>random<import_stmt>torch<import_stmt>torch.optim<as>O<import_from_stmt>datasets get_dataset get_dataset_configurations<import_from_stmt>models get_model<import_from_stmt>runners Runner<if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Sentence similarity models')<line_sep>parser.add_argument('--model' default='sif' choices=['sif' 'mpcnn' 'mpcnn-lite' 'bimpm'] help='Model to use')<line_sep>parser.add_argument('--dataset' default='sick' choices=['sick' 'wikiqa'] help='Dataset to use')<line_sep>parser.add_argument('--batch-size' type=int default=64 help='Batch size')<line_sep>parser.add_argument('--epochs' type=int default=15 help='Number of epochs')<line_sep>parser.add_argument('--lr' type=float default=2e-4 help='Learning rate')<line_sep>parser.add_argument('--regularization' type=float default=3e-4 help='Regularization')<line_sep>parser.add_argument('--seed' type=int default=1234 help='Seed for reproducibility')<line_sep>parser.add_argument('--device' type=int default=0 help='Device, -1 for CPU')<line_sep>parser.add_argument('--log-interval' type=int default=50 help='Device, -1 for CPU')<line_sep># Special options for SIF model
parser.add_argument('--unsupervised' action='store_true' default=<false> help='Set this flag to use unsupervised mode.')<line_sep>parser.add_argument('--alpha' type=float default=1e-3 help='Smoothing term for smooth inverse frequency baseline model')<line_sep>parser.add_argument('--no-remove-special-direction' action='store_true' default=<false> help='Set to not remove projection onto first principal component')<line_sep>parser.add_argument('--frequency-dataset' default='enwiki' choices=['train' 'enwiki'])<line_sep>args=parser.parse_args()<line_sep>random.seed(args.seed)<line_sep>np.random.seed(args.seed)<line_sep>torch.manual_seed(args.seed)<if_stmt>args.device<ne>-1<block_start>torch.cuda.manual_seed(args.seed)<block_end>logger=logging.getLogger(__name__)<line_sep>logger.setLevel(logging.INFO)<line_sep>ch=logging.StreamHandler()<line_sep>ch.setLevel(logging.DEBUG)<line_sep>formatter=logging.Formatter('%(levelname)s - %(message)s')<line_sep>ch.setFormatter(formatter)<line_sep>logger.addHandler(ch)<line_sep>dataset_cls,train_loader,dev_loader,test_loader,embedding=get_dataset(args)<line_sep>model=get_model(args dataset_cls embedding)<if_stmt>args.model<eq>'sif'<block_start>model.populate_word_frequency_estimation(train_loader)<block_end>total_params=0<for_stmt>param model.parameters()<block_start>size=[s<for>s param.size()]<line_sep>total_params<augadd>np.prod(size)<block_end>logger.info('Total number of parameters: %s' total_params)<line_sep>loss_fn,metrics,y_to_score,resolved_pred_to_score=get_dataset_configurations(args)<line_sep>optimizer=O.Adam(filter(<lambda>p:p.requires_grad model.parameters()) lr=args.lr weight_decay=args.regularization)<line_sep>runner=Runner(model loss_fn metrics optimizer y_to_score resolved_pred_to_score args.device <none>)<line_sep>runner.run(args.epochs train_loader dev_loader test_loader args.log_interval)<block_end> |
track=dict(author_username='alexisbcook' course_name='Data Cleaning' course_url='https://www.kaggle.com/learn/data-cleaning' course_forum_url='https://www.kaggle.com/learn-forum/172650')<line_sep>lessons=[{'topic':topic_name}<for>topic_name ['Handling missing values' #1
'Scaling and normalization' #2
'Parsing dates' #3
'Character encodings' #4
'Inconsistent data entry']#5
]<line_sep>notebooks=[dict(filename='tut1.ipynb' lesson_idx=0 type='tutorial' dataset_sources=['maxhorowitz/nflplaybyplay2009to2016'] ) dict(filename='ex1.ipynb' lesson_idx=0 type='exercise' dataset_sources=['aparnashastry/building-permit-applications-data'] scriptid=10824396) dict(filename='tut2.ipynb' lesson_idx=1 type='tutorial' ) dict(filename='ex2.ipynb' lesson_idx=1 type='exercise' dataset_sources=['kemical/kickstarter-projects'] scriptid=10824404) dict(filename='tut3.ipynb' lesson_idx=2 type='tutorial' dataset_sources=['nasa/landslide-events']) dict(filename='ex3.ipynb' lesson_idx=2 type='exercise' dataset_sources=['usgs/earthquake-database' 'smithsonian/volcanic-eruptions'] scriptid=10824403) dict(filename='tut4.ipynb' lesson_idx=3 type='tutorial' dataset_sources=['kemical/kickstarter-projects']) dict(filename='ex4.ipynb' lesson_idx=3 type='exercise' dataset_sources=['kwullum/fatal-police-shootings-in-the-us'] scriptid=10824401) dict(filename='tut5.ipynb' lesson_idx=4 type='tutorial' dataset_sources=['alexisbcook/pakistan-intellectual-capital']) dict(filename='ex5.ipynb' lesson_idx=4 type='exercise' dataset_sources=['alexisbcook/pakistan-intellectual-capital'] scriptid=10824407) ]<line_sep> |
# Generated by Django 3.2.12 on 2022-03-16 18:10
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("bookwyrm" "0144_alter_announcement_display_type") ]<line_sep>operations=[migrations.AddField(model_name="sitesettings" name="version" field=models.CharField(blank=<true> max_length=10 null=<true>) ) ]<block_end> |
"""
Module description:
"""<line_sep>__version__='0.3.1'<line_sep>__author__='<NAME>, <NAME>'<line_sep>__email__='<EMAIL>, <EMAIL>'<import_stmt>pandas<as>pd<import_stmt>configparser<import_stmt>pickle<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>types SimpleNamespace<def_stmt>read_csv filename<block_start>"""
Args:
filename (str): csv file path
Return:
A pandas dataframe.
"""<line_sep>df=pd.read_csv(filename index_col=<false>)<line_sep><return>df<block_end><def_stmt>read_np filename<block_start>"""
Args:
filename (str): filename of numpy to load
Return:
The loaded numpy.
"""<line_sep><return>np.load(filename)<block_end><def_stmt>read_imagenet_classes_txt filename<block_start>"""
Args:
filename (str): txt file path
Return:
A list with 1000 imagenet classes as strings.
"""<with_stmt>open(filename)<as>f<block_start>idx2label=eval(f.read())<block_end><return>idx2label<block_end><def_stmt>read_config sections_fields<block_start>"""
Args:
sections_fields (list): list of fields to retrieve from configuration file
Return:
A list of configuration values.
"""<line_sep>config=configparser.ConfigParser()<line_sep>config.read('./config/configs.ini')<line_sep>configs=[]<for_stmt>s,f sections_fields<block_start>configs.append(config[s][f])<block_end><return>configs<block_end><def_stmt>read_multi_config <block_start>"""
It reads a config file that contains the configuration parameters for the recommendation systems.
Return:
A list of configuration settings.
"""<line_sep>config=configparser.ConfigParser()<line_sep>config.read('./config/multi.ini')<line_sep>configs=[]<for_stmt>section config.sections()<block_start>single_config=SimpleNamespace()<line_sep>single_config.name=section<for_stmt>field,value config.items(section)<block_start>single_config.field=value<block_end>configs.append(single_config)<block_end><return>configs<block_end><def_stmt>load_obj name<block_start>"""
Load the pkl object by name
:param name: name of file
:return:
"""<with_stmt>open(name 'rb')<as>f<block_start><return>pickle.load(f)<block_end><block_end><def_stmt>find_checkpoint dir restore_epochs epochs rec best=0<block_start>"""
:param dir: directory of the model where we start from the reading.
:param restore_epochs: epoch from which we start from.
:param epochs: epochs from which we restore (0 means that we have best)
:param rec: recommender model
:param best: 0 No Best - 1 Search for the Best
:return:
"""<if_stmt>best<block_start><for_stmt>r,d,f os.walk(dir)<block_start><for_stmt>file f<block_start><if_stmt>'best-weights-'.format(restore_epochs)<in>file<block_start><return>dir+file.split('.')[0]<block_end><block_end><block_end><return>''<block_end><if_stmt>rec<eq>"apr"<and>restore_epochs<l>epochs# We have to restore from an execution of bprmf
<block_start>dir_stored_models=os.walk('/'.join(dir.split('/')[:-2]))<for_stmt>dir_stored_model dir_stored_models<block_start><if_stmt>'bprmf'<in>dir_stored_model[0]<block_start>dir=dir_stored_model[0]+'/'<line_sep><break><block_end><block_end><block_end><for_stmt>r,d,f os.walk(dir)<block_start><for_stmt>file f<block_start><if_stmt>'weights-{0}-'.format(restore_epochs)<in>file<block_start><return>dir+file.split('.')[0]<block_end><block_end><block_end><return>''<block_end> |
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake gcloud utils for testing without cloud access."""<import_from_stmt>makani.lib.python.batch_sim gcloud_util<class_stmt>FakeFilesystem(object)<block_start>"""A fake filesystem.
A FakeFilesystem instance is simply a dictionary of file names to file
contents, with Save() and Load() methods to make access look a bit more
file-like.
The class itself also contains LOCAL and CLOUD variables intended to store
references to particular FakeFilesystem instances. These are initialized to
None and intended to be defined as needed via mock.patch. For example:
with mock.patch('makani.batch_sim.gcloud_fakes.FakeFilesystem.LOCAL',
FakeFilesystem()) as local_fs:
<Do something with local files>
with mock.patch('makani.batch_sim.gcloud_fakes.FakeFilesystem.CLOUD',
FakeFilesystem()) as remote_fs:
<Do something with remote files>
In particular, many of the fakes in this module use FakeFilesystem.LOCAL and
FakeFilesystem.CLOUD to simulate actual storage patterns.
"""<line_sep>LOCAL=<none><line_sep>CLOUD=<none><def_stmt>__init__ self<block_start>self.files={}<block_end><def_stmt>Save self filename descriptor<block_start>self.files[filename]=descriptor<block_end><def_stmt>Load self filename<block_start><return>self.files[filename]<block_end><block_end><class_stmt>FakeCloudStorageApi(object)<block_start>"""A fake of gcloud_util.CloudStorageApi.
This performs simple transfers between FakeFilesystem.LOCAL and
FakeFilesystem.CLOUD.
To simulate working with different local filesystems, FakeFilesystem.LOCAL
may be patched before instantiating the FakeCloudStorageApi.
"""<def_stmt>__init__ self bucket=<none><block_start>self._local_fs=FakeFilesystem.LOCAL<line_sep>self._cloud_fs=FakeFilesystem.CLOUD<line_sep>self._bucket=bucket<block_end><def_stmt>_RemoveBucketFromCloudName self cloud_name<block_start>cloud_name=cloud_name.strip()<if_stmt>cloud_name.startswith('gs://')<block_start>_,cloud_name=gcloud_util.ParseBucketAndPath(cloud_name <none>)<block_end><return>cloud_name<block_end><def_stmt>DownloadFile self cloud_name stream<block_start>cloud_name=self._RemoveBucketFromCloudName(cloud_name)<line_sep>stream.write(self._cloud_fs.Load(cloud_name))<block_end><def_stmt>UploadFile self local_name cloud_name<block_start>cloud_name=self._RemoveBucketFromCloudName(cloud_name)<line_sep>self._cloud_fs.Save(cloud_name self._local_fs.Load(local_name))<block_end><def_stmt>UploadStream self stream cloud_name<block_start>cloud_name=self._RemoveBucketFromCloudName(cloud_name)<line_sep>self._cloud_fs.Save(cloud_name stream.getvalue())<block_end><def_stmt>DeletePrefix self prefix<block_start><for_stmt>filename self.List(prefix)<block_start><if_stmt>filename.startswith(prefix)<block_start>self._cloud_fs.files.pop(filename)<block_end><block_end><block_end><def_stmt>DeleteFile self cloud_name<block_start>cloud_name=self._RemoveBucketFromCloudName(cloud_name)<line_sep>self._cloud_fs.files.pop(cloud_name)<block_end><def_stmt>List self prefix<block_start>prefix=self._RemoveBucketFromCloudName(prefix)<line_sep><return>[name<for>name self._cloud_fs.files<if>name.startswith(prefix)]<block_end><block_end> |
<class_stmt>Solution<block_start><def_stmt>singleNumber self nums<block_start>"""
:type nums: List[int]
:rtype: int
"""<line_sep>k=0<for_stmt>n nums<block_start>k<augxor>n<block_end><return>k<block_end><block_end> |
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
<import_from_stmt>django.conf settings<import_from_stmt>django.template Library Node TemplateSyntaxError Variable<import_from_stmt>django.templatetags.cache CacheNode<line_sep>register=Library()<class_stmt>DummyNode(Node)<block_start><def_stmt>__init__ self nodelist *args<block_start>self.nodelist=nodelist<block_end><def_stmt>render self context<block_start>value=self.nodelist.render(context)<line_sep><return>value<block_end><block_end>@register.tag('cache_large')<def_stmt>do_cache parser token<block_start>nodelist=parser.parse(('endcache_large' ))<line_sep>parser.delete_first_token()<line_sep>tokens=token.split_contents()<if_stmt>len(tokens)<l>3<block_start><raise>TemplateSyntaxError("'%r' tag requires at least 2 arguments."%tokens[0])<block_end><if_stmt><not>settings.CACHE_LARGE_VALUES_ALLOWED<block_start><return>DummyNode(nodelist )<block_end><return>CacheNode(nodelist parser.compile_filter(tokens[1]) tokens[2] # fragment_name can't be a variable.
[parser.compile_filter(t)<for>t tokens[3:]] Variable(repr(settings.CACHE_LARGE_VALUES_ALIAS)) )<block_end> |
# Generated by Django 2.2b1 on 2019-03-11 13:32
<import_from_stmt>django.db migrations<import_stmt>imagekit.models.fields<import_stmt>openbook_posts.helpers<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('openbook_posts' '0021_auto_20190309_1532') ]<line_sep>operations=[migrations.AlterField(model_name='postimage' name='image' field=imagekit.models.fields.ProcessedImageField(height_field='height' null=<true> upload_to=openbook_posts.helpers.upload_to_post_image_directory verbose_name='image' width_field='width') ) ]<block_end> |
# Copyright 2018 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>cloudpickle<as>cp<import_stmt>pyarrow<as>pa<import_stmt>codecs<import_from_stmt>io BytesIO<import_stmt>numpy<as>np<import_from_stmt>.functions_pb2 *<import_from_stmt>. shared<line_sep>SER_FORMAT='raw_unicode_escape'<class_stmt>Serializer()<block_start><def_stmt>__init__ self<block_start><raise>NotImplementedError('Cannot instantiate abstract class.')<block_end><def_stmt>_serialize self msg<block_start><pass><block_end><def_stmt>_deserialize self msg<block_start><pass><block_end><def_stmt>dump self msg<block_start><pass><block_end><def_stmt>load self msg<block_start><pass><block_end><block_end><class_stmt>DefaultSerializer(Serializer)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>_serialize msg<block_start><return>msg<block_end><def_stmt>_deserialize self msg<block_start><return>msg<block_end><def_stmt>dump self msg<block_start><return>cp.dumps(msg)<block_end><def_stmt>load self msg<block_start><return>cp.loads(msg)<block_end><block_end><class_stmt>StringSerializer(Serializer)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>_serialize self msg<block_start><return>codecs.decode(msg SER_FORMAT)<block_end><def_stmt>_deserialize self msg<block_start><return>codecs.encode(msg SER_FORMAT)<block_end><def_stmt>dump self msg<block_start><return>self._serialize(cp.dumps(msg))<block_end><def_stmt>load self msg<block_start><return>cp.loads(self._deserialize(msg))<block_end><block_end># TODO: how can we make serializers pluggable?
<class_stmt>NumpySerializer(DefaultSerializer)<block_start><def_stmt>__init__ self<block_start><pass><block_end><def_stmt>dump self msg<block_start><return>pa.serialize(msg).to_buffer().to_pybytes()<block_end><def_stmt>load self msg<block_start><return>pa.deserialize(msg)<block_end><block_end>numpy_ser=NumpySerializer()<line_sep>default_ser=DefaultSerializer()<line_sep>string_ser=StringSerializer()<line_sep>function_ser=default_ser<def_stmt>get_serializer kind<block_start><global>numpy_ser default_ser string_ser<if_stmt>kind<eq>NUMPY<block_start><return>numpy_ser<block_end><elif_stmt>kind<eq>STRING<block_start><return>string_ser<block_end><elif_stmt>kind<eq>DEFAULT<block_start><return>default_ser<block_end><else_stmt><block_start><return>default_ser<block_end><block_end><def_stmt>serialize_val val valobj=<none> serialize=<true><block_start><if_stmt><not>valobj<block_start>valobj=Value()<block_end><if_stmt>isinstance(val shared.FluentFuture)<block_start>valobj.body=default_ser.dump(shared.FluentReference(val.obj_id <true> LWW))<block_end><elif_stmt>isinstance(val np.ndarray)<block_start>valobj.body=numpy_ser.dump(val)<line_sep>valobj.type=NUMPY<block_end><else_stmt><block_start>valobj.body=default_ser.dump(val)<block_end><if_stmt><not>serialize<block_start><return>valobj<block_end><return>valobj.SerializeToString()<block_end><def_stmt>deserialize_val val<block_start>v=Value()<line_sep>v.ParseFromString(val)<if_stmt>v.type<eq>DEFAULT<block_start><return>default_ser.load(v.body)<block_end><elif_stmt>v.type<eq>STRING<block_start><return>string_ser.load(v.body)<block_end><elif_stmt>v.type<eq>NUMPY<block_start><return>numpy_ser.load(v.body)<block_end><block_end> |
<import_stmt>os<try_stmt><block_start><import_from_stmt>xdebug.unittesting XdebugDeferrableTestCase<block_end><except_stmt><block_start><import_from_stmt>SublimeTextXdebug.xdebug.unittesting XdebugDeferrableTestCase<block_end><class_stmt>TestBreakpointStep(XdebugDeferrableTestCase)<block_start>breakpoint_step_file='breakpoint_step.php'<line_sep>breakpoint_step_file_local_path=os.path.join(XdebugDeferrableTestCase.local_path breakpoint_step_file)<def_stmt>test_step_into self<block_start>self.set_breakpoint(self.breakpoint_step_file_local_path 11)<line_sep>self.run_command('xdebug_session_start')<line_sep><yield>self.window_has_debug_layout<line_sep>breakpoint_view=self.get_view_by_title('Xdebug Breakpoint')<line_sep>context_view=self.get_view_by_title('Xdebug Context')<line_sep>stack_view=self.get_view_by_title('Xdebug Stack')<line_sep>self.assertViewContains(breakpoint_view '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))<line_sep>self.assertViewIsEmpty(context_view)<line_sep>self.assertViewIsEmpty(stack_view)<line_sep>self.send_server_request(path=self.breakpoint_step_file)<def_stmt>context_and_stack_have_content <block_start><return><not>self.view_is_empty(context_view)<and><not>self.view_is_empty(stack_view)<block_end><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greeting = <uninitialized>')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<line_sep>context_view_contents=self.get_contents_of_view(context_view)<line_sep>stack_view_contents=self.get_contents_of_view(stack_view)<def_stmt>context_and_stack_have_different_content <block_start><return>self.get_contents_of_view(context_view)<ne>context_view_contents<and>self.get_contents_of_view(stack_view)<ne>stack_view_contents<block_end>self.run_command('xdebug_execute' {'command':'step_into'})<line_sep><yield>context_and_stack_have_different_content<line_sep><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greet = <uninitialized>')<line_sep>self.assertViewContains(context_view '$name = (string) Stranger')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:4, greet()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<line_sep>context_view_contents=self.get_contents_of_view(context_view)<line_sep>stack_view_contents=self.get_contents_of_view(stack_view)<def_stmt>context_and_stack_have_different_content <block_start><return>self.get_contents_of_view(context_view)<ne>context_view_contents<and>self.get_contents_of_view(stack_view)<ne>stack_view_contents<block_end>self.run_command('xdebug_execute' {'command':'step_into'})<line_sep><yield>context_and_stack_have_different_content<line_sep><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greet = (string) Hi')<line_sep>self.assertViewContains(context_view '$name = (string) Stranger')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<block_end><def_stmt>test_step_out self<block_start>self.set_breakpoint(self.breakpoint_step_file_local_path 5)<line_sep>self.run_command('xdebug_session_start')<line_sep><yield>self.window_has_debug_layout<line_sep>breakpoint_view=self.get_view_by_title('Xdebug Breakpoint')<line_sep>context_view=self.get_view_by_title('Xdebug Context')<line_sep>stack_view=self.get_view_by_title('Xdebug Stack')<line_sep>self.assertViewContains(breakpoint_view '=> {file_local_path}\n\t|+| 5'.format(file_local_path=self.breakpoint_step_file_local_path))<line_sep>self.assertViewIsEmpty(context_view)<line_sep>self.assertViewIsEmpty(stack_view)<line_sep>self.send_server_request(path=self.breakpoint_step_file)<def_stmt>context_and_stack_have_content <block_start><return><not>self.view_is_empty(context_view)<and><not>self.view_is_empty(stack_view)<block_end><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greet = (string) Hi')<line_sep>self.assertViewContains(context_view '$name = (string) Stranger')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:5, greet()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<line_sep>context_view_contents=self.get_contents_of_view(context_view)<line_sep>stack_view_contents=self.get_contents_of_view(stack_view)<def_stmt>context_and_stack_have_different_content <block_start><return>self.get_contents_of_view(context_view)<ne>context_view_contents<and>self.get_contents_of_view(stack_view)<ne>stack_view_contents<block_end>self.run_command('xdebug_execute' {'command':'step_out'})<line_sep><yield>context_and_stack_have_different_content<line_sep><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greeting = (string) Hello Stranger!')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<block_end><def_stmt>test_step_over self<block_start>self.set_breakpoint(self.breakpoint_step_file_local_path 11)<line_sep>self.run_command('xdebug_session_start')<line_sep><yield>self.window_has_debug_layout<line_sep>breakpoint_view=self.get_view_by_title('Xdebug Breakpoint')<line_sep>context_view=self.get_view_by_title('Xdebug Context')<line_sep>stack_view=self.get_view_by_title('Xdebug Stack')<line_sep>self.assertViewContains(breakpoint_view '=> {file_local_path}\n\t|+| 11'.format(file_local_path=self.breakpoint_step_file_local_path))<line_sep>self.assertViewIsEmpty(context_view)<line_sep>self.assertViewIsEmpty(stack_view)<line_sep>self.send_server_request(path=self.breakpoint_step_file)<def_stmt>context_and_stack_have_content <block_start><return><not>self.view_is_empty(context_view)<and><not>self.view_is_empty(stack_view)<block_end><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greeting = <uninitialized>')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:11, {{main}}()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<line_sep>context_view_contents=self.get_contents_of_view(context_view)<line_sep>stack_view_contents=self.get_contents_of_view(stack_view)<def_stmt>context_and_stack_have_different_content <block_start><return>self.get_contents_of_view(context_view)<ne>context_view_contents<and>self.get_contents_of_view(stack_view)<ne>stack_view_contents<block_end>self.run_command('xdebug_execute' {'command':'step_over'})<line_sep><yield>context_and_stack_have_different_content<line_sep><yield>context_and_stack_have_content<line_sep>self.assertViewContains(context_view '$greeting = (string) Hello Stranger!')<line_sep>self.assertViewContains(stack_view '[0] file://{remote_path}/{file}:12, {{main}}()'.format(remote_path=self.remote_path file=self.breakpoint_step_file))<block_end><block_end> |
__all__=["learner"]<import_from_stmt>icevision.imports *<import_from_stmt>icevision.engines.fastai *<import_from_stmt>icevision.models.ultralytics.yolov5.fastai.callbacks Yolov5Callback<import_from_stmt>yolov5.utils.loss ComputeLoss<def_stmt>learner dls:List[Union[DataLoader fastai.DataLoader]] model:nn.Module cbs=<none> **learner_kwargs <block_start>"""Fastai `Learner` adapted for Yolov5.
# Arguments
dls: `Sequence` of `DataLoaders` passed to the `Learner`.
The first one will be used for training and the second for validation.
model: The model to train.
cbs: Optional `Sequence` of callbacks.
**learner_kwargs: Keyword arguments that will be internally passed to `Learner`.
# Returns
A fastai `Learner`.
"""<line_sep>cbs=[Yolov5Callback()]+L(cbs)<line_sep>compute_loss=ComputeLoss(model)<def_stmt>loss_fn preds targets<arrow>Tensor<block_start><return>compute_loss(preds targets)[0]<block_end>learn=adapted_fastai_learner(dls=dls model=model cbs=cbs loss_func=loss_fn **learner_kwargs )<line_sep># HACK: patch AvgLoss (in original, find_bs looks at learn.yb which has shape (N, 6) - with N being number_of_objects_in_image * batch_size. So impossible to retrieve BS)
<class_stmt>Yolov5AvgLoss(fastai.AvgLoss)<block_start><def_stmt>accumulate self learn<block_start>bs=len(learn.xb[0])<line_sep>self.total<augadd>learn.to_detach(learn.loss.mean())<times>bs<line_sep>self.count<augadd>bs<block_end><block_end>recorder=[cb<for>cb learn.cbs<if>isinstance(cb fastai.Recorder)][0]<line_sep>recorder.loss=Yolov5AvgLoss()<line_sep><return>learn<block_end> |
'''
MLCommons
group: TinyMLPerf (https://github.com/mlcommons/tiny)
image classification on cifar10
train.py desc: loads data, trains and saves model, plots training metrics
'''<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pickle<import_stmt>tensorflow<as>tf<import_from_stmt>keras.callbacks LearningRateScheduler<import_from_stmt>keras.utils to_categorical<import_stmt>keras_model<import_stmt>datetime<line_sep>EPOCHS=500<line_sep>BS=32<line_sep># get date ant time to save model
dt=datetime.datetime.today()<line_sep>year=dt.year<line_sep>month=dt.month<line_sep>day=dt.day<line_sep>hour=dt.hour<line_sep>minute=dt.minute<line_sep>"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""<line_sep>#learning rate schedule
<def_stmt>lr_schedule epoch<block_start>initial_learning_rate=0.001<line_sep>decay_per_epoch=0.99<line_sep>lrate=initial_learning_rate<times>(decay_per_epoch<power>epoch)<line_sep>print('Learning rate = %f'%lrate)<line_sep><return>lrate<block_end>lr_scheduler=LearningRateScheduler(lr_schedule)<line_sep>#optimizer
optimizer=tf.keras.optimizers.Adam()<line_sep>#define data generator
datagen=tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=15 width_shift_range=0.1 height_shift_range=0.1 horizontal_flip=<true> #brightness_range=(0.9, 1.2),
#contrast_range=(0.9, 1.2),
validation_split=0.2)<def_stmt>unpickle file<block_start>"""load the cifar-10 data"""<with_stmt>open(file 'rb')<as>fo<block_start>data=pickle.load(fo encoding='bytes')<block_end><return>data<block_end><def_stmt>load_cifar_10_data data_dir negatives=<false><block_start>"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""<line_sep># get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict=unpickle(data_dir+"/batches.meta")<line_sep>cifar_label_names=meta_data_dict[b'label_names']<line_sep>cifar_label_names=np.array(cifar_label_names)<line_sep># training data
cifar_train_data=<none><line_sep>cifar_train_filenames=[]<line_sep>cifar_train_labels=[]<for_stmt>i range(1 6)<block_start>cifar_train_data_dict=unpickle(data_dir+"/data_batch_{}".format(i))<if_stmt>i<eq>1<block_start>cifar_train_data=cifar_train_data_dict[b'data']<block_end><else_stmt><block_start>cifar_train_data=np.vstack((cifar_train_data cifar_train_data_dict[b'data']))<block_end>cifar_train_filenames<augadd>cifar_train_data_dict[b'filenames']<line_sep>cifar_train_labels<augadd>cifar_train_data_dict[b'labels']<block_end>cifar_train_data=cifar_train_data.reshape((len(cifar_train_data) 3 32 32))<if_stmt>negatives<block_start>cifar_train_data=cifar_train_data.transpose(0 2 3 1).astype(np.float32)<block_end><else_stmt><block_start>cifar_train_data=np.rollaxis(cifar_train_data 1 4)<block_end>cifar_train_filenames=np.array(cifar_train_filenames)<line_sep>cifar_train_labels=np.array(cifar_train_labels)<line_sep>cifar_test_data_dict=unpickle(data_dir+"/test_batch")<line_sep>cifar_test_data=cifar_test_data_dict[b'data']<line_sep>cifar_test_filenames=cifar_test_data_dict[b'filenames']<line_sep>cifar_test_labels=cifar_test_data_dict[b'labels']<line_sep>cifar_test_data=cifar_test_data.reshape((len(cifar_test_data) 3 32 32))<if_stmt>negatives<block_start>cifar_test_data=cifar_test_data.transpose(0 2 3 1).astype(np.float32)<block_end><else_stmt><block_start>cifar_test_data=np.rollaxis(cifar_test_data 1 4)<block_end>cifar_test_filenames=np.array(cifar_test_filenames)<line_sep>cifar_test_labels=np.array(cifar_test_labels)<line_sep><return>cifar_train_data cifar_train_filenames to_categorical(cifar_train_labels) cifar_test_data cifar_test_filenames to_categorical(cifar_test_labels) cifar_label_names<block_end><if_stmt>__name__<eq>"__main__"<block_start>"""load cifar10 data and trains model"""<line_sep>cifar_10_dir='cifar-10-batches-py'<line_sep>train_data,train_filenames,train_labels,test_data,test_filenames,test_labels,label_names=load_cifar_10_data(cifar_10_dir)<line_sep>print("Train data: " train_data.shape)<line_sep>print("Train filenames: " train_filenames.shape)<line_sep>print("Train labels: " train_labels.shape)<line_sep>print("Test data: " test_data.shape)<line_sep>print("Test filenames: " test_filenames.shape)<line_sep>print("Test labels: " test_labels.shape)<line_sep>print("Label names: " label_names.shape)<line_sep># Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot=5<line_sep>f,ax=plt.subplots(num_plot num_plot)<for_stmt>m range(num_plot)<block_start><for_stmt>n range(num_plot)<block_start>idx=np.random.randint(0 train_data.shape[0])<line_sep>ax[m n].imshow(train_data[idx])<line_sep>ax[m n].get_xaxis().set_visible(<false>)<line_sep>ax[m n].get_yaxis().set_visible(<false>)<block_end><block_end>f.subplots_adjust(hspace=0.1)<line_sep>f.subplots_adjust(wspace=0)<line_sep>plt.show()<line_sep>new_model=keras_model.resnet_v1_eembc()<line_sep>new_model.summary()<line_sep># compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_data)<line_sep>new_model.compile(optimizer=optimizer loss='categorical_crossentropy' metrics='accuracy' loss_weights=<none> weighted_metrics=<none> run_eagerly=<none>)<line_sep># fits the model on batches with real-time data augmentation:
History=new_model.fit(datagen.flow(train_data train_labels batch_size=BS) steps_per_epoch=len(train_data)/BS epochs=EPOCHS callbacks=[lr_scheduler])<line_sep>plt.plot(np.array(range(EPOCHS)) History.history['loss'])<line_sep>plt.plot(np.array(range(EPOCHS)) History.history['accuracy'])<line_sep>plt.savefig('train_loss_acc.png')<line_sep>model_name="trainedResnet.h5"<line_sep>new_model.save("trained_models/"+model_name)<block_end> |
<import_from_stmt>unittest TestCase<import_from_stmt>parameterized parameterized<import_from_stmt>tests.test_utils mock_request_handler<import_from_stmt>web.web_auth_utils remove_webpack_suffixes is_allowed_during_login<class_stmt>WebpackSuffixesTest(TestCase)<block_start><def_stmt>test_remove_webpack_suffixes_when_css self<block_start>normalized=remove_webpack_suffixes('js/chunk-login-vendors.59040343.css')<line_sep>self.assertEqual('js/chunk-login-vendors.css' normalized)<block_end><def_stmt>test_remove_webpack_suffixes_when_js self<block_start>normalized=remove_webpack_suffixes('js/login.be16f278.js')<line_sep>self.assertEqual('js/login.js' normalized)<block_end><def_stmt>test_remove_webpack_suffixes_when_js_map self<block_start>normalized=remove_webpack_suffixes('js/login.be16f278.js.map')<line_sep>self.assertEqual('js/login.js.map' normalized)<block_end><def_stmt>test_remove_webpack_suffixes_when_favicon self<block_start>normalized=remove_webpack_suffixes('favicon.123.ico')<line_sep>self.assertEqual('favicon.123.ico' normalized)<block_end><def_stmt>test_remove_webpack_suffixes_when_no_suffixes self<block_start>normalized=remove_webpack_suffixes('css/chunk-login-vendors.css')<line_sep>self.assertEqual('css/chunk-login-vendors.css' normalized)<block_end><def_stmt>test_remove_webpack_suffixes_when_no_extension self<block_start>normalized=remove_webpack_suffixes('data/some_file')<line_sep>self.assertEqual('data/some_file' normalized)<block_end><block_end><class_stmt>LoginResourcesTest(TestCase)<block_start>@parameterized.expand([('/favicon.ico') ('login.html') ('/js/login.be16f278.js') ('/js/login.be16f278.js.map') ('/js/chunk-login-vendors.18e22e7f.js') ('/js/chunk-login-vendors.18e22e7f.js.map') ('/img/titleBackground_login.a6c36d4c.jpg') ('/css/login.8e74be0f.css') ('/fonts/roboto-latin-400.60fa3c06.woff') ('/fonts/roboto-latin-400.479970ff.woff2') ('/fonts/roboto-latin-500.020c97dc.woff2') ('/fonts/roboto-latin-500.87284894.woff')])<def_stmt>test_is_allowed_during_login_when_allowed self resource<block_start>request_handler=mock_request_handler(method='GET')<line_sep>allowed=is_allowed_during_login(resource 'login.html' request_handler)<line_sep>self.assertTrue(allowed 'Resource '+resource+' should be allowed, but was not')<block_end><def_stmt>test_is_allowed_during_login_when_prohibited self<block_start>request_handler=mock_request_handler(method='GET')<line_sep>resource='admin.html'<line_sep>allowed=is_allowed_during_login(resource 'login.html' request_handler)<line_sep>self.assertFalse(allowed 'Resource '+resource+' should NOT be allowed, but WAS')<block_end><block_end> |
"""AmLogic s905x3 pin names"""<line_sep># pylint: disable=wildcard-import,unused-wildcard-import
<import_from_stmt>adafruit_blinka.microcontroller.amlogic.meson_g12_common.pin *<line_sep> |
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''Testing module for `python_toolbox.abc_tools.AbstractStaticMethod`.'''<import_stmt>copy<import_from_stmt>python_toolbox.cheat_hashing cheat_hash<def_stmt>test_cheat_hash <block_start>'''Test `cheat_hash` on various objects.'''<line_sep>things=[1 7 4.5 [1 2 3.4] (1 2 3.4) {1:2 3:4.5} {1 2 3.4} [1 [1 2] 3] [1 {frozenset((1 2)):'meow'} 3] sum <none> (<none> {<none>:<none>})]<line_sep>things_copy=copy.deepcopy(things)<for_stmt>thing,thing_copy zip(things things_copy)<block_start><assert_stmt>cheat_hash(thing)<eq>cheat_hash(thing)<eq>cheat_hash(thing_copy)<eq>cheat_hash(thing_copy)<block_end><block_end> |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
<import_from_stmt>pysnmp.hlapi.v1arch.auth *<import_from_stmt>pysnmp.hlapi.v1arch.asyncore *<import_from_stmt>pysnmp.hlapi.varbinds *<import_from_stmt>pysnmp.smi.rfc1902 *<import_from_stmt>pysnmp.proto.api v2c<import_from_stmt>pysnmp.proto.proxy rfc2576<import_from_stmt>pysnmp error<line_sep>__all__=['sendNotification']<line_sep>VB_PROCESSOR=NotificationOriginatorVarBinds()<def_stmt>sendNotification snmpDispatcher authData transportTarget notifyType *varBinds **options<block_start>"""Send SNMP notification.
Based on passed parameters, prepares SNMP TRAP or INFORM
notification (:RFC:`1905#section-4.2.6`) and schedules its
transmission by I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing asyncore-based asynchronous event loop and
associated state information.
authData: :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or
:py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
notifyType: str
Indicates type of notification to be sent. Recognized literal
values are *trap* or *inform*.
\*varBinds: :class:`tuple` of OID-value pairs or :py:class:`~pysnmp.smi.rfc1902.ObjectType` or :py:class:`~pysnmp.smi.rfc1902.NotificationType`
One or more objects representing MIB variables to place
into SNMP notification. It could be tuples of OID-values
or :py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
of :py:class:`~pysnmp.smi.rfc1902.NotificationType` objects.
Besides user variable-bindings, SNMP Notification PDU requires at
least two variable-bindings to be present:
0. SNMPv2-MIB::sysUpTime.0 = <agent uptime>
1. SNMPv2-SMI::snmpTrapOID.0 = <notification ID>
When sending SNMPv1 TRAP, more variable-bindings could be present:
2. SNMP-COMMUNITY-MIB::snmpTrapAddress.0 = <agent-IP>
3. SNMP-COMMUNITY-MIB::snmpTrapCommunity.0 = <snmp-community-name>
4. SNMP-COMMUNITY-MIB::snmpTrapEnterprise.0 = <enterprise-OID>
If user does not supply some or any of the above variable-bindings or
if they are at the wrong positions, the system will add/reorder the
missing ones automatically.
On top of that, some notification types imply including some additional
variable-bindings providing additional details on the event being
reported. Therefore it is generally easier to use
:py:class:`~pysnmp.smi.rfc1902.NotificationType` object which will
help adding relevant variable-bindings.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `False`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Note
----
The `SnmpDispatcher` object may be expensive to create, therefore it is
advised to maintain it for the lifecycle of the application/thread for
as long as possible.
Returns
-------
sendRequestHandle: int
Unique request identifier. Can be used for matching received
responses with ongoing *INFORM* requests. Returns `None` for
*TRAP* notifications.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> snmpDispatcher = SnmpDispatcher()
>>>
>>> sendNotification(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 162)),
>>> 'trap',
>>> NotificationType(ObjectIdentity('SNMPv2-MIB', 'coldStart')),
>>> lookupMib=True
>>> )
>>> snmpDispatcher.transportDispatcher.runDispatcher()
"""<line_sep>sysUpTime=v2c.apiTrapPDU.sysUpTime<line_sep>snmpTrapOID=v2c.apiTrapPDU.snmpTrapOID<def_stmt>_ensureVarBinds varBinds# Add sysUpTime if not present already
<block_start><if_stmt><not>varBinds<or>varBinds[0][0]<ne>sysUpTime<block_start>varBinds.insert(0 (v2c.ObjectIdentifier(sysUpTime) v2c.TimeTicks(0)))<block_end># Search for and reposition sysUpTime if it's elsewhere
<for_stmt>idx,varBind enumerate(varBinds[1:])<block_start><if_stmt>varBind[0]<eq>sysUpTime<block_start>varBinds[0]=varBind<del_stmt>varBinds[idx+1]<line_sep><break><block_end><block_end><if_stmt>len(varBinds)<l>2<block_start><raise>error.PySnmpError('SNMP notification PDU requires '<concat>'SNMPv2-MIB::snmpTrapOID.0 to be present')<block_end># Search for and reposition snmpTrapOID if it's elsewhere
<for_stmt>idx,varBind enumerate(varBinds[2:])<block_start><if_stmt>varBind[0]<eq>snmpTrapOID<block_start><del_stmt>varBinds[idx+2]<if_stmt>varBinds[1][0]<eq>snmpTrapOID<block_start>varBinds[1]=varBind<block_end><else_stmt><block_start>varBinds.insert(1 varBind)<block_end><break><block_end><block_end># Fail on missing snmpTrapOID
<if_stmt>varBinds[1][0]<ne>snmpTrapOID<block_start><raise>error.PySnmpError('SNMP notification PDU requires '<concat>'SNMPv2-MIB::snmpTrapOID.0 to be present')<block_end><return>varBinds<block_end><def_stmt>_cbFun snmpDispatcher stateHandle errorIndication rspPdu _cbCtx<block_start><if_stmt><not>cbFun<block_start><return><block_end><if_stmt>errorIndication<block_start>cbFun(errorIndication v2c.Integer(0) v2c.Integer(0) <none> cbCtx=cbCtx snmpDispatcher=snmpDispatcher stateHandle=stateHandle)<line_sep><return><block_end>errorStatus=v2c.apiTrapPDU.getErrorStatus(rspPdu)<line_sep>errorIndex=v2c.apiTrapPDU.getErrorIndex(rspPdu)<line_sep>varBinds=v2c.apiTrapPDU.getVarBinds(rspPdu)<if_stmt>lookupMib<block_start>varBinds=VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache varBinds)<block_end>nextStateHandle=v2c.getNextRequestID()<line_sep>nextVarBinds=cbFun(errorIndication errorStatus errorIndex varBinds cbCtx=cbCtx snmpDispatcher=snmpDispatcher stateHandle=stateHandle nextStateHandle=nextStateHandle)<if_stmt><not>nextVarBinds<block_start><return><block_end>v2c.apiTrapPDU.setRequestID(reqPdu nextStateHandle)<line_sep>v2c.apiTrapPDU.setVarBinds(reqPdu _ensureVarBinds(nextVarBinds))<line_sep><return>snmpDispatcher.sendPdu(authData transportTarget reqPdu cbFun=_cbFun)<block_end>lookupMib,cbFun,cbCtx=[options.get(x)<for>x ('lookupMib' 'cbFun' 'cbCtx')]<if_stmt>lookupMib<block_start>varBinds=VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache varBinds)<block_end><if_stmt>notifyType<eq>'trap'<block_start>reqPdu=v2c.TrapPDU()<block_end><else_stmt><block_start>reqPdu=v2c.InformRequestPDU()<block_end>v2c.apiTrapPDU.setDefaults(reqPdu)<line_sep>v2c.apiTrapPDU.setVarBinds(reqPdu varBinds)<line_sep>varBinds=v2c.apiTrapPDU.getVarBinds(reqPdu)<line_sep>v2c.apiTrapPDU.setVarBinds(reqPdu _ensureVarBinds(varBinds))<if_stmt>authData.mpModel<eq>0<block_start>reqPdu=rfc2576.v2ToV1(reqPdu)<block_end><return>snmpDispatcher.sendPdu(authData transportTarget reqPdu cbFun=_cbFun)<block_end> |
<import_from_stmt>sympy *<line_sep># Implementation of QuaternionBase<Derived>::toRotationMatrix(void).
# The quaternion q is given as a list [qw, qx, qy, qz].
<def_stmt>QuaternionToRotationMatrix q<block_start>tx=2<times>q[1]<line_sep>ty=2<times>q[2]<line_sep>tz=2<times>q[3]<line_sep>twx=tx<times>q[0]<line_sep>twy=ty<times>q[0]<line_sep>twz=tz<times>q[0]<line_sep>txx=tx<times>q[1]<line_sep>txy=ty<times>q[1]<line_sep>txz=tz<times>q[1]<line_sep>tyy=ty<times>q[2]<line_sep>tyz=tz<times>q[2]<line_sep>tzz=tz<times>q[3]<line_sep><return>Matrix([[1-(tyy+tzz) txy-twz txz+twy] [txy+twz 1-(txx+tzz) tyz-twx] [txz-twy tyz+twx 1-(txx+tyy)]])<block_end># Implementation of SO3Group<Scalar> expAndTheta().
# Only implementing the first case (of very small rotation) since we take the Jacobian at zero.
<def_stmt>SO3exp omega<block_start>theta=omega.norm()<line_sep>theta_sq=theta<power>2<line_sep>half_theta=theta/2<line_sep>theta_po4=theta_sq<times>theta_sq<line_sep>imag_factor=Rational(1 2)-Rational(1 48)<times>theta_sq+Rational(1 3840)<times>theta_po4<line_sep>real_factor=1-Rational(1 2)<times>theta_sq+Rational(1 384)<times>theta_po4<line_sep># return SO3Group<Scalar>(Eigen::Quaternion<Scalar>(
# real_factor, imag_factor * omega.x(), imag_factor * omega.y(),
# imag_factor * omega.z()));
qw=real_factor<line_sep>qx=imag_factor<times>omega[0]<line_sep>qy=imag_factor<times>omega[1]<line_sep>qz=imag_factor<times>omega[2]<line_sep><return>QuaternionToRotationMatrix([qw qx qy qz])<block_end># Implementation of SE3Group<Scalar> exp().
# Only implementing the first case (of small rotation) since we take the Jacobian at zero.
<def_stmt>SE3exp tangent<block_start>omega=Matrix(tangent[3:6])<line_sep>V=SO3exp(omega)<line_sep>rotation=V<line_sep>translation=V<times>Matrix(tangent[0:3])<line_sep><return>rotation.row_join(translation)<block_end># Main
init_printing(use_unicode=<true>)<line_sep>print('Variant 1')<line_sep>print('')<line_sep># Define the tangent vector with symbolic elements T_0 to T_5.
# (For a matrix, use: Matrix(3, 1, lambda i,j:var('S_%d%d' % (i,j))) )
T=Matrix(6 1 <lambda>i j:var('T_%d'%(i)))<line_sep># Compute transformation matrix from tangent vector.
T_matrix=SE3exp(T)<line_sep># Define the vector current_T * src:
S=Matrix(3 1 <lambda>i j:var('S_%d'%(i)))<line_sep># Matrix-vector multiplication with homogeneous vector:
result=T_matrix<times>S.col_join(Matrix([1]))<line_sep># Compute Jacobian:
# (Note: The transpose is needed for stacking the matrix columns (instead of rows) into a vector.)
jac=result.transpose().reshape(result.rows<times>result.cols 1).jacobian(T)<line_sep># Take Jacobian at zero:
jac_subs=jac.subs([(T[0] 0) (T[1] 0) (T[2] 0) (T[3] 0) (T[4] 0) (T[5] 0)])<line_sep># Simplify and output:
jac_subs_simple=simplify(jac_subs)<line_sep>pprint(jac_subs_simple)<line_sep>print('')<line_sep>print('')<line_sep>print('Variant 2')<line_sep>print('')<line_sep># Treat the function of which we want to determine the derivative as a list of nested functions.
# This makes it easier to compute the derivative of each part, simplify it, and concatenate the results
# using the chain rule.
### Define the function of which the Jacobian shall be taken ###
# Matrix-vector multiplication with homogeneous vector:
<def_stmt>MatrixVectorMultiplyHomogeneous matrix vector<block_start><return>matrix<times>vector.col_join(Matrix([1]))<block_end># Define the vector current_T * src:
S=Matrix(3 1 <lambda>i j:var('S_%d'%(i)))<line_sep># The list of nested functions. They will be evaluated from right to left
# (this is to match the way they would be written in math: f(g(x)).)
functions=[<lambda>matrix:MatrixVectorMultiplyHomogeneous(matrix S) SE3exp]<line_sep>### Define the variables wrt. to take the Jacobian, and the position for evaluation ###
# Chain rule:
# d(f(g(x))) / dx = (df/dy)(g(x)) * dg/dx
# Define the parameter with respect to take the Jacobian, y in the formula above:
parameters=Matrix(6 1 <lambda>i j:var('T_%d'%(i)))<line_sep># Set the position at which to take the Jacobian, g(x) in the formula above:
parameter_values=zeros(6 1)<line_sep>### Automatic Jacobian calculation, no need to modify anything beyond this point ###
# Jacobian from previous step, dg/dx in the formula above:
previous_jacobian=1<line_sep># TODO: Test whether this works with non-matrix functions.
<def_stmt>ComputeValueAndJacobian function parameters parameter_values# Evaluate the function.
<block_start>values=function(parameter_values)<line_sep># Compute the Jacobian.
symbolic_values=function(parameters)<line_sep>symbolic_values_vector=symbolic_values.transpose().reshape(symbolic_values.rows<times>symbolic_values.cols 1)<line_sep>parameters_vector=parameters.transpose().reshape(parameters.rows<times>parameters.cols 1)<line_sep>jacobian=symbolic_values_vector.jacobian(parameters_vector)<line_sep># Set in the evaluation point.
<for_stmt>row range(0 parameters.rows)<block_start><for_stmt>col range(0 parameters.cols)<block_start>jacobian=jacobian.subs(parameters[row col] parameter_values[row col])<block_end><block_end># Simplify the jacobian.
jacobian=simplify(jacobian)<line_sep><return>(values jacobian)<block_end># Print info about initial state.
print('Taking the Jacobian of these functions (sorted from inner to outer):')<for_stmt>i range(len(functions)-1 -1 -1)<block_start>print(str(functions[i]))<block_end>print('with respect to:')<line_sep>pprint(parameters)<line_sep>print('at position:')<line_sep>pprint(parameter_values)<line_sep>print('')<line_sep># Loop over all functions:
<for_stmt>i range(len(functions)-1 -1 -1)# Compute value and Jacobian of this function.
<block_start>(values jacobian)=ComputeValueAndJacobian(functions[i] parameters parameter_values)<line_sep># Update parameter_values
parameter_values=values<line_sep># Update parameters (create a new symbolic vector of the same size as parameter_values)
parameters=Matrix(values.rows values.cols <lambda>i j:var('T_%d%d'%(i j)))<line_sep># Concatenate this Jacobian with the previous one according to the chain rule:
previous_jacobian=jacobian<times>previous_jacobian<line_sep># Print intermediate result
print('Intermediate step '+str(len(functions)-i)+', for '+str(functions[i]))<line_sep>print('Position after function evaluation (function value):')<line_sep>pprint(parameter_values)<line_sep>print('Jacobian of this function wrt. its input only:')<line_sep>pprint(jacobian)<line_sep>print('Cumulative Jacobian wrt. the innermost parameter:')<line_sep>pprint(previous_jacobian)<line_sep>print('')<block_end># Print final result
print('Final result:')<line_sep>pprint(previous_jacobian)<line_sep> |
<import_stmt>sys<line_sep>sys.path.insert(0 "../../python/")<import_stmt>mxnet<as>mx<import_stmt>numpy<as>np<import_from_stmt>collections namedtuple<import_stmt>time<import_stmt>math<line_sep>RNNState=namedtuple("RNNState" ["h"])<line_sep>RNNParam=namedtuple("RNNParam" ["i2h_weight" "i2h_bias" "h2h_weight" "h2h_bias"])<line_sep>RNNModel=namedtuple("RNNModel" ["rnn_exec" "symbol" "init_states" "last_states" "seq_data" "seq_labels" "seq_outputs" "param_blocks"])<def_stmt>rnn num_hidden in_data prev_state param seqidx layeridx<block_start>i2h=mx.sym.FullyConnected(data=in_data weight=param.i2h_weight bias=param.i2h_bias num_hidden=num_hidden name="t%d_l%d_i2h"%(seqidx layeridx))<if_stmt>seqidx<g>0<block_start>h2h=mx.sym.FullyConnected(data=prev_state weight=param.h2h_weight bias=param.h2h_bias num_hidden=num_hidden name="t%d_l%d_h2h"%(seqidx layeridx))<line_sep>hidden=i2h+h2h<block_end><else_stmt><block_start>hidden=i2h<block_end>hidden=mx.sym.Activation(data=hidden act_type="tanh")<line_sep><return>RNNState(h=hidden)<block_end><def_stmt>rnn_unroll num_rnn_layer seq_len input_size num_hidden num_label<block_start>cls_weight=mx.sym.Variable("cls_weight")<line_sep>cls_bias=mx.sym.Variable("cls_bias")<line_sep>param_cells=[]<for_stmt>i range(num_rnn_layer)<block_start>param_cells.append(RNNParam(i2h_weight=mx.sym.Variable("l%d_i2h_weight"%i) i2h_bias=mx.sym.Variable("l%d_i2h_bias"%i) h2h_weight=mx.sym.Variable("l%d_h2h_weight"%i) h2h_bias=mx.sym.Variable("l%d_h2h_bias"%i)))<block_end>loss_all=[]<line_sep>ori_data=mx.sym.Variable('data')<line_sep>label=mx.sym.Variable('softmax_label')<line_sep>data_timestamp=mx.sym.SliceChannel(data=ori_data num_outputs=seq_len squeeze_axis=1)<line_sep>hidden=<none><for_stmt>seqidx range(seq_len)<block_start>in_data=data_timestamp[seqidx]<line_sep>next_state=rnn(num_hidden in_data=in_data prev_state=hidden param=param_cells[i] seqidx=seqidx layeridx=i)<line_sep>hidden=next_state.h<block_end>fc=mx.sym.FullyConnected(data=hidden weight=cls_weight bias=cls_bias num_hidden=num_label)<line_sep>reg=mx.sym.LinearRegressionOutput(data=fc label=label)<line_sep><return>reg<block_end> |
""" Implement alike logic as is done on www.cdecl.org
Try for example:
$ cdelc.py 'char **a;'
"""<import_stmt>argparse<import_stmt>io<import_from_stmt>ppci.api get_current_arch<import_from_stmt>ppci.lang.c CLexer CParser COptions CContext CSemantics<import_from_stmt>ppci.lang.c.nodes types declarations<import_from_stmt>ppci.lang.c.preprocessor prepare_for_parsing<line_sep>parser=argparse.ArgumentParser(description=__doc__ formatter_class=argparse.RawDescriptionHelpFormatter)<line_sep>parser.add_argument('source' type=str)<line_sep>args=parser.parse_args()<line_sep># print('Source:', args.source)
# Parse into ast:
arch=get_current_arch()<line_sep>coptions=COptions()<line_sep>ccontext=CContext(coptions arch.info)<line_sep>semantics=CSemantics(ccontext)<line_sep>cparser=CParser(coptions semantics)<line_sep>clexer=CLexer(COptions())<line_sep>f=io.StringIO(args.source)<line_sep>tokens=clexer.lex(f '<snippet>')<line_sep>tokens=prepare_for_parsing(tokens cparser.keywords)<line_sep>cparser.init_lexer(tokens)<line_sep>semantics.begin()<line_sep>decl=cparser.parse_declarations()[0]<line_sep># Explain:
<def_stmt>explain x<block_start><if_stmt>isinstance(x declarations.VariableDeclaration)<block_start><return>'{} is {}'.format(x.name explain(x.typ))<block_end><elif_stmt>isinstance(x types.PointerType)<block_start><return>'a pointer to {}'.format(explain(x.element_type))<block_end><elif_stmt>isinstance(x types.ArrayType)<block_start><return>'an array of {}'.format(explain(x.element_type))<block_end><elif_stmt>isinstance(x types.BasicType)<block_start><return>'{}'.format(x.type_id)<block_end><else_stmt><block_start>print('???' x)<block_end><block_end>print(explain(decl))<line_sep> |
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""<line_sep># need to advertise different processor type, eg CPU, GPU, TPU
<import_stmt>traceback<import_stmt>logging<import_from_stmt>curt.base_service BaseService<class_stmt>VisionProcessorService(BaseService)<block_start><def_stmt>__init__ self<block_start>super().__init__("VisionProcessor")<block_end><def_stmt>execute_function self worker data<block_start>config_worker=data[-1]<try_stmt><block_start><if_stmt>config_worker<block_start><return>worker.config_worker(data[0])<block_end><else_stmt><block_start><if_stmt>isinstance(data[0] list)<block_start><return>worker.run_inference(data[0])<block_end><elif_stmt>isinstance(data[0] dict)<block_start>data_list=[]<for_stmt>param data[0]["ready_data"]<block_start>data_list.append(param)<block_end><for_stmt>guid data[0].keys()<block_start><if_stmt>guid<ne>"ready_data"<block_start>data_list.append(data[0][guid])<block_end><block_end><return>worker.run_inference(data_list)<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>logging.error(traceback.format_exc())<block_end><block_end><block_end> |
<import_stmt>time<import_stmt>argparse<import_from_stmt>datetime datetime<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>os<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>torch.multiprocessing<as>mp<import_from_stmt>models NavCnnModel NavCnnRnnModel NavCnnRnnMultModel NavPlannerControllerModel<import_from_stmt>data EqaDataLoader<import_from_stmt>metrics NavMetric<import_from_stmt>models MaskedNLLCriterion<import_from_stmt>models get_state ensure_shared_grads<import_from_stmt>data load_vocab<import_from_stmt>torch.autograd Variable<import_from_stmt>tqdm tqdm<import_stmt>time<line_sep>torch.backends.cudnn.enabled=<false><line_sep>################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
<import_stmt>torch._utils<try_stmt><block_start>torch._utils._rebuild_tensor_v2<block_end><except_stmt>AttributeError<block_start><def_stmt>_rebuild_tensor_v2 storage storage_offset size stride requires_grad backward_hooks<block_start>tensor=torch._utils._rebuild_tensor(storage storage_offset size stride)<line_sep>tensor.requires_grad=requires_grad<line_sep>tensor._backward_hooks=backward_hooks<line_sep><return>tensor<block_end>torch._utils._rebuild_tensor_v2=_rebuild_tensor_v2<block_end>################################################################################################
<def_stmt>eval rank args shared_model<block_start>torch.cuda.set_device(args.gpus.index(args.gpus[rank%len(args.gpus)]))<if_stmt>args.model_type<eq>'cnn'<block_start>model_kwargs={}<line_sep>model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'cnn+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm'<block_start>model_kwargs={}<line_sep>model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm-mult+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnRnnMultModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'pacman'<block_start>model_kwargs={'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavPlannerControllerModel(**model_kwargs)<block_end><else_stmt><block_start>exit()<block_end>eval_loader_kwargs={'questions_h5':getattr(args args.eval_split+'_h5') 'data_json':args.data_json 'vocab':args.vocab_json 'target_obj_conn_map_dir':args.target_obj_conn_map_dir 'map_resolution':args.map_resolution 'batch_size':1 'input_type':args.model_type 'num_frames':5 'split':args.eval_split 'max_threads_per_gpu':args.max_threads_per_gpu 'gpu_id':args.gpus[rank%len(args.gpus)] 'to_cache':<false> 'overfit':args.overfit 'max_controller_actions':args.max_controller_actions }<line_sep>eval_loader=EqaDataLoader(**eval_loader_kwargs)<line_sep>print('eval_loader has %d samples'%len(eval_loader.dataset))<line_sep>logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))<line_sep>args.output_log_path=os.path.join(args.log_dir 'eval_'+str(rank)+'.json')<line_sep>t,epoch,best_eval_acc=0 0 0.0<line_sep>max_epochs=args.max_epochs<if_stmt>args.mode<eq>'eval'<block_start>max_epochs=1<block_end><while_stmt>epoch<l>int(max_epochs)<block_start>invalids=[]<line_sep>model.load_state_dict(shared_model.state_dict())<line_sep>model.eval()<line_sep># that's a lot of numbers
metrics=NavMetric(info={'split':args.eval_split 'thread':rank} metric_names=['d_0_10' 'd_0_30' 'd_0_50' 'd_T_10' 'd_T_30' 'd_T_50' 'd_D_10' 'd_D_30' 'd_D_50' 'd_min_10' 'd_min_30' 'd_min_50' 'r_T_10' 'r_T_30' 'r_T_50' 'r_e_10' 'r_e_30' 'r_e_50' 'stop_10' 'stop_30' 'stop_50' 'ep_len_10' 'ep_len_30' 'ep_len_50'] log_json=args.output_log_path)<if_stmt>'cnn'<in>args.model_type<block_start>done=<false><while_stmt>done<eq><false><block_start><for_stmt>batch tqdm(eval_loader)<block_start>model.load_state_dict(shared_model.state_dict())<line_sep>model.cuda()<line_sep>idx,questions,_,img_feats,actions_in,actions_out,action_length=batch<line_sep>metrics_slug={}<line_sep># evaluate at multiple initializations
<for_stmt>i [10 30 50]<block_start>t<augadd>1<if_stmt>action_length[0]+1-i-5<l>0<block_start>invalids.append(idx[0])<line_sep><continue><block_end>ep_inds=[x<for>x range(action_length[0]+1-i-5 action_length[0]+1-i)]<line_sep>sub_img_feats=torch.index_select(img_feats 1 torch.LongTensor(ep_inds))<line_sep>init_pos=eval_loader.dataset.episode_pos_queue[ep_inds[-1]]<line_sep>h3d=eval_loader.dataset.episode_house<line_sep>h3d.env.reset(x=init_pos[0] y=init_pos[2] yaw=init_pos[3])<line_sep>init_dist_to_target=h3d.get_dist_to_target(h3d.env.cam.pos)<if_stmt>init_dist_to_target<l>0# unreachable
<block_start>invalids.append(idx[0])<line_sep><continue><block_end>sub_img_feats_var=Variable(sub_img_feats.cuda())<if_stmt>'+q'<in>args.model_type<block_start>questions_var=Variable(questions.cuda())<block_end># sample actions till max steps or <stop>
# max no. of actions = 100
episode_length=0<line_sep>episode_done=<true><line_sep>dists_to_target,pos_queue,actions=[init_dist_to_target] [init_pos] []<for_stmt>step range(args.max_episode_length)<block_start>episode_length<augadd>1<if_stmt>'+q'<in>args.model_type<block_start>scores=model(sub_img_feats_var questions_var)<block_end><else_stmt><block_start>scores=model(sub_img_feats_var)<block_end>prob=F.softmax(scores dim=1)<line_sep>action=int(prob.max(1)[1].data.cpu().numpy()[0])<line_sep>actions.append(action)<line_sep>img,_,episode_done=h3d.step(action)<line_sep>episode_done=episode_done<or>episode_length<ge>args.max_episode_length<line_sep>img=torch.from_numpy(img.transpose(2 0 1)).float()/255.0<line_sep>img_feat_var=eval_loader.dataset.cnn(Variable(img.view(1 3 224 224).cuda())).view(1 1 3200)<line_sep>sub_img_feats_var=torch.cat([sub_img_feats_var img_feat_var] dim=1)<line_sep>sub_img_feats_var=sub_img_feats_var[: -5: :]<line_sep>dists_to_target.append(h3d.get_dist_to_target(h3d.env.cam.pos))<line_sep>pos_queue.append([h3d.env.cam.pos.x h3d.env.cam.pos.y h3d.env.cam.pos.z h3d.env.cam.yaw])<if_stmt>episode_done<eq><true><block_start><break><block_end><block_end># compute stats
metrics_slug['d_0_'+str(i)]=dists_to_target[0]<line_sep>metrics_slug['d_T_'+str(i)]=dists_to_target[-1]<line_sep>metrics_slug['d_D_'+str(i)]=dists_to_target[0]-dists_to_target[-1]<line_sep>metrics_slug['d_min_'+str(i)]=np.array(dists_to_target).min()<line_sep>metrics_slug['ep_len_'+str(i)]=episode_length<if_stmt>action<eq>3<block_start>metrics_slug['stop_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['stop_'+str(i)]=0<block_end>inside_room=[]<for_stmt>p pos_queue<block_start>inside_room.append(h3d.is_inside_room(p eval_loader.dataset.target_room))<block_end><if_stmt>inside_room[-1]<eq><true><block_start>metrics_slug['r_T_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_T_'+str(i)]=0<block_end><if_stmt>any([x<eq><true><for>x inside_room])<eq><true><block_start>metrics_slug['r_e_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_e_'+str(i)]=0<block_end><block_end># collate and update metrics
metrics_list=[]<for_stmt>i metrics.metric_names<block_start><if_stmt>i<not><in>metrics_slug<block_start>metrics_list.append(metrics.metrics[metrics.metric_names.index(i)][0])<block_end><else_stmt><block_start>metrics_list.append(metrics_slug[i])<block_end><block_end># update metrics
metrics.update(metrics_list)<block_end>print(metrics.get_stat_string(mode=0))<line_sep>print('invalids' len(invalids))<line_sep>logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))<line_sep>logging.info("EVAL: invalids: {}".format(len(invalids)))<line_sep># del h3d
eval_loader.dataset._load_envs()<if_stmt>len(eval_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><block_end><block_end><block_end><elif_stmt>'lstm'<in>args.model_type<block_start>done=<false><while_stmt>done<eq><false><block_start><if_stmt>args.overfit<block_start>metrics=NavMetric(info={'split':args.eval_split 'thread':rank} metric_names=['d_0_10' 'd_0_30' 'd_0_50' 'd_T_10' 'd_T_30' 'd_T_50' 'd_D_10' 'd_D_30' 'd_D_50' 'd_min_10' 'd_min_30' 'd_min_50' 'r_T_10' 'r_T_30' 'r_T_50' 'r_e_10' 'r_e_30' 'r_e_50' 'stop_10' 'stop_30' 'stop_50' 'ep_len_10' 'ep_len_30' 'ep_len_50'] log_json=args.output_log_path)<block_end><for_stmt>batch tqdm(eval_loader)<block_start>model.load_state_dict(shared_model.state_dict())<line_sep>model.cuda()<line_sep>idx,questions,answer,_,actions_in,actions_out,action_lengths,_=batch<line_sep>question_var=Variable(questions.cuda())<line_sep>metrics_slug={}<line_sep># evaluate at multiple initializations
<for_stmt>i [10 30 50]<block_start>t<augadd>1<if_stmt>action_lengths[0]-1-i<l>0<block_start>invalids.append([idx[0] i])<line_sep><continue><block_end>h3d=eval_loader.dataset.episode_house<line_sep># forward through lstm till spawn
<if_stmt>len(eval_loader.dataset.episode_pos_queue[:-i])<g>0<block_start>images=eval_loader.dataset.get_frames(h3d eval_loader.dataset.episode_pos_queue[:-i] preprocess=<true>)<line_sep>raw_img_feats=eval_loader.dataset.cnn(Variable(torch.FloatTensor(images).cuda()))<line_sep>actions_in_pruned=actions_in[: :action_lengths[0]-i]<line_sep>actions_in_var=Variable(actions_in_pruned.cuda())<line_sep>action_lengths_pruned=action_lengths.clone().fill_(action_lengths[0]-i)<line_sep>img_feats_var=raw_img_feats.view(1 -1 3200)<if_stmt>'+q'<in>args.model_type<block_start>scores,hidden=model(img_feats_var question_var actions_in_var action_lengths_pruned.cpu().numpy())<block_end><else_stmt><block_start>scores,hidden=model(img_feats_var <false> actions_in_var action_lengths_pruned.cpu().numpy())<block_end><try_stmt><block_start>init_pos=eval_loader.dataset.episode_pos_queue[-i]<block_end><except_stmt><block_start>invalids.append([idx[0] i])<line_sep><continue><block_end>action_in=torch.LongTensor(1 1).fill_(actions_in[0 action_lengths[0]-i]).cuda()<block_end><else_stmt><block_start>init_pos=eval_loader.dataset.episode_pos_queue[-i]<line_sep>hidden=model.nav_rnn.init_hidden(1)<line_sep>action_in=torch.LongTensor(1 1).fill_(0).cuda()<block_end>h3d.env.reset(x=init_pos[0] y=init_pos[2] yaw=init_pos[3])<line_sep>init_dist_to_target=h3d.get_dist_to_target(h3d.env.cam.pos)<if_stmt>init_dist_to_target<l>0# unreachable
<block_start>invalids.append([idx[0] i])<line_sep><continue><block_end>img=h3d.env.render()<line_sep>img=torch.from_numpy(img.transpose(2 0 1)).float()/255.0<line_sep>img_feat_var=eval_loader.dataset.cnn(Variable(img.view(1 3 224 224).cuda())).view(1 1 3200)<line_sep>episode_length=0<line_sep>episode_done=<true><line_sep>dists_to_target,pos_queue,actions=[init_dist_to_target] [init_pos] []<line_sep>actual_pos_queue=[(h3d.env.cam.pos.x h3d.env.cam.pos.z h3d.env.cam.yaw)]<for_stmt>step range(args.max_episode_length)<block_start>episode_length<augadd>1<if_stmt>'+q'<in>args.model_type<block_start>scores,hidden=model(img_feat_var question_var Variable(action_in) <false> hidden=hidden step=<true>)<block_end><else_stmt><block_start>scores,hidden=model(img_feat_var <false> Variable(action_in) <false> hidden=hidden step=<true>)<block_end>prob=F.softmax(scores dim=1)<line_sep>action=int(prob.max(1)[1].data.cpu().numpy()[0])<line_sep>actions.append(action)<line_sep>img,_,episode_done=h3d.step(action)<line_sep>episode_done=episode_done<or>episode_length<ge>args.max_episode_length<line_sep>img=torch.from_numpy(img.transpose(2 0 1)).float()/255.0<line_sep>img_feat_var=eval_loader.dataset.cnn(Variable(img.view(1 3 224 224).cuda())).view(1 1 3200)<line_sep>action_in=torch.LongTensor(1 1).fill_(action+1).cuda()<line_sep>dists_to_target.append(h3d.get_dist_to_target(h3d.env.cam.pos))<line_sep>pos_queue.append([h3d.env.cam.pos.x h3d.env.cam.pos.y h3d.env.cam.pos.z h3d.env.cam.yaw])<if_stmt>episode_done<eq><true><block_start><break><block_end>actual_pos_queue.append([h3d.env.cam.pos.x h3d.env.cam.pos.z h3d.env.cam.yaw])<block_end># compute stats
metrics_slug['d_0_'+str(i)]=dists_to_target[0]<line_sep>metrics_slug['d_T_'+str(i)]=dists_to_target[-1]<line_sep>metrics_slug['d_D_'+str(i)]=dists_to_target[0]-dists_to_target[-1]<line_sep>metrics_slug['d_min_'+str(i)]=np.array(dists_to_target).min()<line_sep>metrics_slug['ep_len_'+str(i)]=episode_length<if_stmt>action<eq>3<block_start>metrics_slug['stop_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['stop_'+str(i)]=0<block_end>inside_room=[]<for_stmt>p pos_queue<block_start>inside_room.append(h3d.is_inside_room(p eval_loader.dataset.target_room))<block_end><if_stmt>inside_room[-1]<eq><true><block_start>metrics_slug['r_T_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_T_'+str(i)]=0<block_end><if_stmt>any([x<eq><true><for>x inside_room])<eq><true><block_start>metrics_slug['r_e_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_e_'+str(i)]=0<block_end><block_end># collate and update metrics
metrics_list=[]<for_stmt>i metrics.metric_names<block_start><if_stmt>i<not><in>metrics_slug<block_start>metrics_list.append(metrics.metrics[metrics.metric_names.index(i)][0])<block_end><else_stmt><block_start>metrics_list.append(metrics_slug[i])<block_end><block_end># update metrics
metrics.update(metrics_list)<block_end>print(metrics.get_stat_string(mode=0))<line_sep>print('invalids' len(invalids))<line_sep>logging.info("EVAL: init_steps: {} metrics: {}".format(i metrics.get_stat_string(mode=0)))<line_sep>logging.info("EVAL: init_steps: {} invalids: {}".format(i len(invalids)))<line_sep># del h3d
eval_loader.dataset._load_envs()<line_sep>print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))<line_sep>logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))<assert_stmt>len(eval_loader.dataset.pruned_env_set)<g>0<if_stmt>len(eval_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><block_end><block_end><block_end><elif_stmt>'pacman'<in>args.model_type<block_start>done=<false><while_stmt>done<eq><false><block_start><if_stmt>args.overfit<block_start>metrics=NavMetric(info={'split':args.eval_split 'thread':rank} metric_names=['d_0_10' 'd_0_30' 'd_0_50' 'd_T_10' 'd_T_30' 'd_T_50' 'd_D_10' 'd_D_30' 'd_D_50' 'd_min_10' 'd_min_30' 'd_min_50' 'r_T_10' 'r_T_30' 'r_T_50' 'r_e_10' 'r_e_30' 'r_e_50' 'stop_10' 'stop_30' 'stop_50' 'ep_len_10' 'ep_len_30' 'ep_len_50'] log_json=args.output_log_path)<block_end><for_stmt>batch tqdm(eval_loader)<block_start>model.load_state_dict(shared_model.state_dict())<line_sep>model.cuda()<line_sep>idx,question,answer,actions,action_length=batch<line_sep>metrics_slug={}<line_sep>h3d=eval_loader.dataset.episode_house<line_sep># evaluate at multiple initializations
<for_stmt>i [10 30 50]<block_start>t<augadd>1<if_stmt>i<g>action_length[0]<block_start>invalids.append([idx[0] i])<line_sep><continue><block_end>question_var=Variable(question.cuda())<line_sep>controller_step=<false><line_sep>planner_hidden=model.planner_nav_rnn.init_hidden(1)<line_sep># get hierarchical action history
(planner_actions_in planner_img_feats controller_step controller_action_in controller_img_feats init_pos controller_action_counter)=eval_loader.dataset.get_hierarchical_features_till_spawn(actions[0 :action_length[0]+1].numpy() i args.max_controller_actions)<line_sep>planner_actions_in_var=Variable(planner_actions_in.cuda())<line_sep>planner_img_feats_var=Variable(planner_img_feats.cuda())<line_sep># forward planner till spawn to update hidden state
<for_stmt>step range(planner_actions_in.size(0))<block_start>planner_scores,planner_hidden=model.planner_step(question_var planner_img_feats_var[step].unsqueeze(0).unsqueeze(0) planner_actions_in_var[step].view(1 1) planner_hidden)<block_end>h3d.env.reset(x=init_pos[0] y=init_pos[2] yaw=init_pos[3])<line_sep>init_dist_to_target=h3d.get_dist_to_target(h3d.env.cam.pos)<if_stmt>init_dist_to_target<l>0# unreachable
<block_start>invalids.append([idx[0] i])<line_sep><continue><block_end>dists_to_target,pos_queue,pred_actions=[init_dist_to_target] [init_pos] []<line_sep>planner_actions,controller_actions=[] []<line_sep>episode_length=0<if_stmt>args.max_controller_actions<g>1<block_start>controller_action_counter=controller_action_counter%args.max_controller_actions<line_sep>controller_action_counter=max(controller_action_counter-1 0)<block_end><else_stmt><block_start>controller_action_counter=0<block_end>first_step=<true><line_sep>first_step_is_controller=controller_step<line_sep>planner_step=<true><line_sep>action=int(controller_action_in)<for_stmt>step range(args.max_episode_length)<block_start><if_stmt><not>first_step<block_start>img=torch.from_numpy(img.transpose(2 0 1)).float()/255.0<line_sep>img_feat_var=eval_loader.dataset.cnn(Variable(img.view(1 3 224 224).cuda())).view(1 1 3200)<block_end><else_stmt><block_start>img_feat_var=Variable(controller_img_feats.cuda()).view(1 1 3200)<block_end><if_stmt><not>first_step<or>first_step_is_controller# query controller to continue or not
<block_start>controller_action_in=Variable(torch.LongTensor(1 1).fill_(action).cuda())<line_sep>controller_scores=model.controller_step(img_feat_var controller_action_in planner_hidden[0])<line_sep>prob=F.softmax(controller_scores dim=1)<line_sep>controller_action=int(prob.max(1)[1].data.cpu().numpy()[0])<if_stmt>controller_action<eq>1<and>controller_action_counter<l>args.max_controller_actions-1<block_start>controller_action_counter<augadd>1<line_sep>planner_step=<false><block_end><else_stmt><block_start>controller_action_counter=0<line_sep>planner_step=<true><line_sep>controller_action=0<block_end>controller_actions.append(controller_action)<line_sep>first_step=<false><block_end><if_stmt>planner_step<block_start><if_stmt><not>first_step<block_start>action_in=torch.LongTensor(1 1).fill_(action+1).cuda()<line_sep>planner_scores,planner_hidden=model.planner_step(question_var img_feat_var Variable(action_in) planner_hidden)<block_end>prob=F.softmax(planner_scores dim=1)<line_sep>action=int(prob.max(1)[1].data.cpu().numpy()[0])<line_sep>planner_actions.append(action)<block_end>episode_done=action<eq>3<or>episode_length<ge>args.max_episode_length<line_sep>episode_length<augadd>1<line_sep>dists_to_target.append(h3d.get_dist_to_target(h3d.env.cam.pos))<line_sep>pos_queue.append([h3d.env.cam.pos.x h3d.env.cam.pos.y h3d.env.cam.pos.z h3d.env.cam.yaw])<if_stmt>episode_done<block_start><break><block_end>img,_,_=h3d.step(action)<line_sep>first_step=<false><block_end># compute stats
metrics_slug['d_0_'+str(i)]=dists_to_target[0]<line_sep>metrics_slug['d_T_'+str(i)]=dists_to_target[-1]<line_sep>metrics_slug['d_D_'+str(i)]=dists_to_target[0]-dists_to_target[-1]<line_sep>metrics_slug['d_min_'+str(i)]=np.array(dists_to_target).min()<line_sep>metrics_slug['ep_len_'+str(i)]=episode_length<if_stmt>action<eq>3<block_start>metrics_slug['stop_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['stop_'+str(i)]=0<block_end>inside_room=[]<for_stmt>p pos_queue<block_start>inside_room.append(h3d.is_inside_room(p eval_loader.dataset.target_room))<block_end><if_stmt>inside_room[-1]<eq><true><block_start>metrics_slug['r_T_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_T_'+str(i)]=0<block_end><if_stmt>any([x<eq><true><for>x inside_room])<eq><true><block_start>metrics_slug['r_e_'+str(i)]=1<block_end><else_stmt><block_start>metrics_slug['r_e_'+str(i)]=0<block_end><block_end># collate and update metrics
metrics_list=[]<for_stmt>i metrics.metric_names<block_start><if_stmt>i<not><in>metrics_slug<block_start>metrics_list.append(metrics.metrics[metrics.metric_names.index(i)][0])<block_end><else_stmt><block_start>metrics_list.append(metrics_slug[i])<block_end><block_end># update metrics
metrics.update(metrics_list)<block_end><try_stmt><block_start>print(metrics.get_stat_string(mode=0))<line_sep>logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))<block_end><except_stmt><block_start><pass><block_end>print('epoch' epoch)<line_sep>print('invalids' len(invalids))<line_sep>logging.info("EVAL: epoch {}".format(epoch))<line_sep>logging.info("EVAL: invalids {}".format(invalids))<line_sep># del h3d
eval_loader.dataset._load_envs()<if_stmt>len(eval_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><block_end><block_end><block_end>epoch<augadd>1<line_sep># checkpoint if best val loss
<if_stmt>metrics.metrics[8][0]<g>best_eval_acc# d_D_50
<block_start>best_eval_acc=metrics.metrics[8][0]<if_stmt>epoch%args.eval_every<eq>0<and>args.log<eq><true><block_start>metrics.dump_log()<line_sep>model_state=get_state(model)<line_sep>aad=dict(args.__dict__)<line_sep>ad={}<for_stmt>i aad<block_start><if_stmt>i[0]<ne>'_'<block_start>ad[i]=aad[i]<block_end><block_end>checkpoint={'args':ad 'state':model_state 'epoch':epoch}<line_sep>checkpoint_path='%s/epoch_%d_d_D_50_%.04f.pt'%(args.checkpoint_dir epoch best_eval_acc)<line_sep>print('Saving checkpoint to %s'%checkpoint_path)<line_sep>logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))<line_sep>torch.save(checkpoint checkpoint_path)<block_end><block_end>print('[best_eval_d_D_50:%.04f]'%best_eval_acc)<line_sep>logging.info("EVAL: [best_eval_d_D_50:{:.04f}]".format(best_eval_acc))<line_sep>eval_loader.dataset._load_envs(start_idx=0 in_order=<true>)<block_end><block_end><def_stmt>train rank args shared_model<block_start>torch.cuda.set_device(args.gpus.index(args.gpus[rank%len(args.gpus)]))<if_stmt>args.model_type<eq>'cnn'<block_start>model_kwargs={}<line_sep>model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'cnn+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm'<block_start>model_kwargs={}<line_sep>model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm-mult+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnRnnMultModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'pacman'<block_start>model_kwargs={'question_vocab':load_vocab(args.vocab_json)}<line_sep>model=NavPlannerControllerModel(**model_kwargs)<block_end><else_stmt><block_start>exit()<block_end>lossFn=torch.nn.CrossEntropyLoss().cuda()<line_sep>optim=torch.optim.Adamax(filter(<lambda>p:p.requires_grad shared_model.parameters()) lr=args.learning_rate)<line_sep>train_loader_kwargs={'questions_h5':args.train_h5 'data_json':args.data_json 'vocab':args.vocab_json 'batch_size':args.batch_size 'input_type':args.model_type 'num_frames':5 'map_resolution':args.map_resolution 'split':'train' 'max_threads_per_gpu':args.max_threads_per_gpu 'gpu_id':args.gpus[rank%len(args.gpus)] 'to_cache':args.cache 'overfit':args.overfit 'max_controller_actions':args.max_controller_actions 'max_actions':args.max_actions}<line_sep>args.output_log_path=os.path.join(args.log_dir 'train_'+str(rank)+'.json')<if_stmt>'pacman'<in>args.model_type<block_start>metrics=NavMetric(info={'split':'train' 'thread':rank} metric_names=['planner_loss' 'controller_loss'] log_json=args.output_log_path)<block_end><else_stmt><block_start>metrics=NavMetric(info={'split':'train' 'thread':rank} metric_names=['loss'] log_json=args.output_log_path)<block_end>train_loader=EqaDataLoader(**train_loader_kwargs)<line_sep>print('train_loader has %d samples'%len(train_loader.dataset))<line_sep>logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))<line_sep>t,epoch=0 0<while_stmt>epoch<l>int(args.max_epochs)<block_start><if_stmt>'cnn'<in>args.model_type<block_start>done=<false><line_sep>all_envs_loaded=train_loader.dataset._check_if_all_envs_loaded()<while_stmt>done<eq><false><block_start><for_stmt>batch train_loader<block_start>t<augadd>1<line_sep>model.load_state_dict(shared_model.state_dict())<line_sep>model.train()<line_sep>model.cuda()<line_sep>idx,questions,_,img_feats,_,actions_out,_=batch<line_sep>img_feats_var=Variable(img_feats.cuda())<if_stmt>'+q'<in>args.model_type<block_start>questions_var=Variable(questions.cuda())<block_end>actions_out_var=Variable(actions_out.cuda())<if_stmt>'+q'<in>args.model_type<block_start>scores=model(img_feats_var questions_var)<block_end><else_stmt><block_start>scores=model(img_feats_var)<block_end>loss=lossFn(scores actions_out_var)<line_sep># zero grad
optim.zero_grad()<line_sep># update metrics
metrics.update([loss.data[0]])<line_sep># backprop and update
loss.backward()<line_sep>ensure_shared_grads(model.cpu() shared_model)<line_sep>optim.step()<if_stmt>t%args.print_every<eq>0<block_start>print(metrics.get_stat_string())<line_sep>logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))<if_stmt>args.log<eq><true><block_start>metrics.dump_log()<block_end><block_end>print('[CHECK][Cache:%d][Total:%d]'%(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<line_sep>logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<block_end><if_stmt>all_envs_loaded<eq><false><block_start>train_loader.dataset._load_envs(in_order=<true>)<if_stmt>len(train_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><if_stmt>args.cache<eq><false><block_start>train_loader.dataset._load_envs(start_idx=0 in_order=<true>)<block_end><block_end><block_end><else_stmt><block_start>done=<true><block_end><block_end><block_end><elif_stmt>'lstm'<in>args.model_type<block_start>lossFn=MaskedNLLCriterion().cuda()<line_sep>done=<false><line_sep>all_envs_loaded=train_loader.dataset._check_if_all_envs_loaded()<line_sep>total_times=[]<while_stmt>done<eq><false><block_start>start_time=time.time()<for_stmt>batch train_loader<block_start>t<augadd>1<line_sep>model.load_state_dict(shared_model.state_dict())<line_sep>model.train()<line_sep>model.cuda()<line_sep>idx,questions,_,img_feats,actions_in,actions_out,action_lengths,masks=batch<line_sep>img_feats_var=Variable(img_feats.cuda())<if_stmt>'+q'<in>args.model_type<block_start>questions_var=Variable(questions.cuda())<block_end>actions_in_var=Variable(actions_in.cuda())<line_sep>actions_out_var=Variable(actions_out.cuda())<line_sep>action_lengths=action_lengths.cuda()<line_sep>masks_var=Variable(masks.cuda())<line_sep>action_lengths,perm_idx=action_lengths.sort(0 descending=<true>)<line_sep>img_feats_var=img_feats_var[perm_idx]<if_stmt>'+q'<in>args.model_type<block_start>questions_var=questions_var[perm_idx]<block_end>actions_in_var=actions_in_var[perm_idx]<line_sep>actions_out_var=actions_out_var[perm_idx]<line_sep>masks_var=masks_var[perm_idx]<if_stmt>'+q'<in>args.model_type<block_start>scores,hidden=model(img_feats_var questions_var actions_in_var action_lengths.cpu().numpy())<block_end><else_stmt><block_start>scores,hidden=model(img_feats_var <false> actions_in_var action_lengths.cpu().numpy())<block_end>#block out masks
<if_stmt>args.curriculum<block_start>curriculum_length=(epoch+1)<times>5<for_stmt>i,action_length enumerate(action_lengths)<block_start><if_stmt>action_length-curriculum_length<g>0<block_start>masks_var[i :action_length-curriculum_length]=0<block_end><block_end><block_end>logprob=F.log_softmax(scores dim=1)<line_sep>loss=lossFn(logprob actions_out_var[: :action_lengths.max()].contiguous().view(-1 1) masks_var[: :action_lengths.max()].contiguous().view(-1 1))<line_sep># zero grad
optim.zero_grad()<line_sep># update metrics
metrics.update([loss.data[0]])<line_sep>logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))<line_sep># backprop and update
loss.backward()<line_sep>ensure_shared_grads(model.cpu() shared_model)<line_sep>optim.step()<if_stmt>t%args.print_every<eq>0<block_start>print(metrics.get_stat_string())<line_sep>logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))<if_stmt>args.log<eq><true><block_start>metrics.dump_log()<block_end><block_end>print('[CHECK][Cache:%d][Total:%d]'%(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<line_sep>logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<block_end><if_stmt>all_envs_loaded<eq><false><block_start>train_loader.dataset._load_envs(in_order=<true>)<if_stmt>len(train_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><if_stmt>args.cache<eq><false><block_start>train_loader.dataset._load_envs(start_idx=0 in_order=<true>)<block_end><block_end><block_end><else_stmt><block_start>done=<true><block_end><block_end><block_end><elif_stmt>'pacman'<in>args.model_type<block_start>planner_lossFn=MaskedNLLCriterion().cuda()<line_sep>controller_lossFn=MaskedNLLCriterion().cuda()<line_sep>done=<false><line_sep>all_envs_loaded=train_loader.dataset._check_if_all_envs_loaded()<while_stmt>done<eq><false><block_start><for_stmt>batch train_loader<block_start>t<augadd>1<line_sep>model.load_state_dict(shared_model.state_dict())<line_sep>model.train()<line_sep>model.cuda()<line_sep>idx,questions,_,planner_img_feats,planner_actions_in,planner_actions_out,planner_action_lengths,planner_masks,controller_img_feats,controller_actions_in,planner_hidden_idx,controller_outs,controller_action_lengths,controller_masks=batch<line_sep>questions_var=Variable(questions.cuda())<line_sep>planner_img_feats_var=Variable(planner_img_feats.cuda())<line_sep>planner_actions_in_var=Variable(planner_actions_in.cuda())<line_sep>planner_actions_out_var=Variable(planner_actions_out.cuda())<line_sep>planner_action_lengths=planner_action_lengths.cuda()<line_sep>planner_masks_var=Variable(planner_masks.cuda())<line_sep>controller_img_feats_var=Variable(controller_img_feats.cuda())<line_sep>controller_actions_in_var=Variable(controller_actions_in.cuda())<line_sep>planner_hidden_idx_var=Variable(planner_hidden_idx.cuda())<line_sep>controller_outs_var=Variable(controller_outs.cuda())<line_sep>controller_action_lengths=controller_action_lengths.cuda()<line_sep>controller_masks_var=Variable(controller_masks.cuda())<line_sep>planner_action_lengths,perm_idx=planner_action_lengths.sort(0 descending=<true>)<line_sep>questions_var=questions_var[perm_idx]<line_sep>planner_img_feats_var=planner_img_feats_var[perm_idx]<line_sep>planner_actions_in_var=planner_actions_in_var[perm_idx]<line_sep>planner_actions_out_var=planner_actions_out_var[perm_idx]<line_sep>planner_masks_var=planner_masks_var[perm_idx]<line_sep>controller_img_feats_var=controller_img_feats_var[perm_idx]<line_sep>controller_actions_in_var=controller_actions_in_var[perm_idx]<line_sep>controller_outs_var=controller_outs_var[perm_idx]<line_sep>planner_hidden_idx_var=planner_hidden_idx_var[perm_idx]<line_sep>controller_action_lengths=controller_action_lengths[perm_idx]<line_sep>controller_masks_var=controller_masks_var[perm_idx]<line_sep>planner_scores,controller_scores,planner_hidden=model(questions_var planner_img_feats_var planner_actions_in_var planner_action_lengths.cpu().numpy() planner_hidden_idx_var controller_img_feats_var controller_actions_in_var controller_action_lengths)<line_sep>planner_logprob=F.log_softmax(planner_scores dim=1)<line_sep>controller_logprob=F.log_softmax(controller_scores dim=1)<line_sep>planner_loss=planner_lossFn(planner_logprob planner_actions_out_var[: :planner_action_lengths.max()].contiguous().view(-1 1) planner_masks_var[: :planner_action_lengths.max()].contiguous().view(-1 1))<line_sep>controller_loss=controller_lossFn(controller_logprob controller_outs_var[: :controller_action_lengths.max()].contiguous().view(-1 1) controller_masks_var[: :controller_action_lengths.max()].contiguous().view(-1 1))<line_sep># zero grad
optim.zero_grad()<line_sep># update metrics
metrics.update([planner_loss.data[0] controller_loss.data[0]])<line_sep>logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(planner_loss.data[0] controller_loss.data[0]))<line_sep># backprop and update
<if_stmt>args.max_controller_actions<eq>1<block_start>(planner_loss).backward()<block_end><else_stmt><block_start>(planner_loss+controller_loss).backward()<block_end>ensure_shared_grads(model.cpu() shared_model)<line_sep>optim.step()<if_stmt>t%args.print_every<eq>0<block_start>print(metrics.get_stat_string())<line_sep>logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))<if_stmt>args.log<eq><true><block_start>metrics.dump_log()<block_end><block_end>print('[CHECK][Cache:%d][Total:%d]'%(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<line_sep>logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(len(train_loader.dataset.img_data_cache) len(train_loader.dataset.env_list)))<block_end><if_stmt>all_envs_loaded<eq><false><block_start>train_loader.dataset._load_envs(in_order=<true>)<if_stmt>len(train_loader.dataset.pruned_env_set)<eq>0<block_start>done=<true><if_stmt>args.cache<eq><false><block_start>train_loader.dataset._load_envs(start_idx=0 in_order=<true>)<block_end><block_end><block_end><else_stmt><block_start>done=<true><block_end><block_end><block_end>epoch<augadd>1<if_stmt>epoch%args.save_every<eq>0<block_start>model_state=get_state(model)<line_sep>optimizer_state=optim.state_dict()<line_sep>aad=dict(args.__dict__)<line_sep>ad={}<for_stmt>i aad<block_start><if_stmt>i[0]<ne>'_'<block_start>ad[i]=aad[i]<block_end><block_end>checkpoint={'args':ad 'state':model_state 'epoch':epoch 'optimizer':optimizer_state}<line_sep>checkpoint_path='%s/epoch_%d_thread_%d.pt'%(args.checkpoint_dir epoch rank)<line_sep>print('Saving checkpoint to %s'%checkpoint_path)<line_sep>logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))<line_sep>torch.save(checkpoint checkpoint_path)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep># data params
parser.add_argument('-train_h5' default='data/train.h5')<line_sep>parser.add_argument('-val_h5' default='data/val.h5')<line_sep>parser.add_argument('-test_h5' default='data/test.h5')<line_sep>parser.add_argument('-data_json' default='data/data.json')<line_sep>parser.add_argument('-vocab_json' default='data/vocab.json')<line_sep>parser.add_argument('-target_obj_conn_map_dir' default='data/target-obj-conn-maps/500')<line_sep>parser.add_argument('-map_resolution' default=500 type=int)<line_sep>parser.add_argument('-mode' default='train+eval' type=str choices=['train' 'eval' 'train+eval'])<line_sep>parser.add_argument('-eval_split' default='val' type=str)<line_sep># model details
parser.add_argument('-model_type' default='cnn' choices=['cnn' 'cnn+q' 'lstm' 'lstm+q' 'lstm-mult+q' 'pacman'])<line_sep>parser.add_argument('-max_episode_length' default=100 type=int)<line_sep>parser.add_argument('-curriculum' default=0 type=int)<line_sep># optim params
parser.add_argument('-batch_size' default=20 type=int)<line_sep>parser.add_argument('-learning_rate' default=1e-3 type=float)<line_sep>parser.add_argument('-max_epochs' default=1000 type=int)<line_sep>parser.add_argument('-overfit' default=<false> action='store_true')<line_sep># bookkeeping
parser.add_argument('-print_every' default=5 type=int)<line_sep>parser.add_argument('-eval_every' default=1 type=int)<line_sep>parser.add_argument('-save_every' default=1000 type=int)#optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier' default='cnn')<line_sep>parser.add_argument('-num_processes' default=1 type=int)<line_sep>parser.add_argument('-max_threads_per_gpu' default=10 type=int)<line_sep># checkpointing
parser.add_argument('-checkpoint_path' default=<false>)<line_sep>parser.add_argument('-checkpoint_dir' default='checkpoints/nav/')<line_sep>parser.add_argument('-log_dir' default='logs/nav/')<line_sep>parser.add_argument('-log' default=<false> action='store_true')<line_sep>parser.add_argument('-cache' default=<false> action='store_true')<line_sep>parser.add_argument('-max_controller_actions' type=int default=5)<line_sep>parser.add_argument('-max_actions' type=int)<line_sep>args=parser.parse_args()<line_sep>args.time_id=time.strftime("%m_%d_%H:%M")<line_sep>#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
<if_stmt><not>os.path.isdir(args.log_dir)<block_start>os.makedirs(args.log_dir)<block_end><if_stmt>args.curriculum<block_start><assert_stmt>'lstm'<in>args.model_type<block_end>#TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir "run_{}.log".format(str(datetime.now()).replace(' ' '_'))) level=logging.INFO format='%(asctime)-15s %(message)s')<try_stmt><block_start>args.gpus=os.environ['CUDA_VISIBLE_DEVICES'].split(',')<line_sep>args.gpus=[int(x)<for>x args.gpus]<block_end><except_stmt>KeyError<block_start>print("CPU not supported")<line_sep>logging.info("CPU not supported")<line_sep>exit()<block_end><if_stmt>args.checkpoint_path<ne><false><block_start>print('Loading checkpoint from %s'%args.checkpoint_path)<line_sep>logging.info("Loading checkpoint from {}".format(args.checkpoint_path))<line_sep>args_to_keep=['model_type']<line_sep>checkpoint=torch.load(args.checkpoint_path map_location={'cuda:0':'cpu'})<for_stmt>i args.__dict__<block_start><if_stmt>i<not><in>args_to_keep<block_start>checkpoint['args'][i]=args.__dict__[i]<block_end><block_end>args=type('new_dict' (object ) checkpoint['args'])<block_end>args.checkpoint_dir=os.path.join(args.checkpoint_dir args.time_id+'_'+args.identifier)<line_sep>args.log_dir=os.path.join(args.log_dir args.time_id+'_'+args.identifier)<line_sep># if set to overfit; set eval_split to train
<if_stmt>args.overfit<eq><true><block_start>args.eval_split='train'<block_end>print(args.__dict__)<line_sep>logging.info(args.__dict__)<if_stmt><not>os.path.exists(args.checkpoint_dir)<block_start>os.makedirs(args.checkpoint_dir)<line_sep>os.makedirs(args.log_dir)<block_end><if_stmt>args.model_type<eq>'cnn'<block_start>model_kwargs={}<line_sep>shared_model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'cnn+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>shared_model=NavCnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm'<block_start>model_kwargs={}<line_sep>shared_model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'lstm+q'<block_start>model_kwargs={'question_input':<true> 'question_vocab':load_vocab(args.vocab_json)}<line_sep>shared_model=NavCnnRnnModel(**model_kwargs)<block_end><elif_stmt>args.model_type<eq>'pacman'<block_start>model_kwargs={'question_vocab':load_vocab(args.vocab_json)}<line_sep>shared_model=NavPlannerControllerModel(**model_kwargs)<block_end><else_stmt><block_start>exit()<block_end>shared_model.share_memory()<if_stmt>args.checkpoint_path<ne><false><block_start>print('Loading params from checkpoint: %s'%args.checkpoint_path)<line_sep>logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))<line_sep>shared_model.load_state_dict(checkpoint['state'])<block_end><if_stmt>args.mode<eq>'eval'<block_start>eval(0 args shared_model)<block_end><elif_stmt>args.mode<eq>'train'<block_start><if_stmt>args.num_processes<g>1<block_start>processes=[]<for_stmt>rank range(0 args.num_processes)# for rank in range(0, args.num_processes):
<block_start>p=mp.Process(target=train args=(rank args shared_model))<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join()<block_end><block_end><else_stmt><block_start>train(0 args shared_model)<block_end><block_end><else_stmt><block_start>processes=[]<line_sep># Start the eval thread
p=mp.Process(target=eval args=(0 args shared_model))<line_sep>p.start()<line_sep>processes.append(p)<line_sep># Start the training thread(s)
<for_stmt>rank range(1 args.num_processes+1)# for rank in range(0, args.num_processes):
<block_start>p=mp.Process(target=train args=(rank args shared_model))<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join()<block_end><block_end><block_end> |
<import_stmt>tarfile<line_sep>tar_file=tarfile.open("work.tar.gz" "r:gz")<line_sep>print(tar_file.getnames())<line_sep> |
<import_stmt>os<import_stmt>re<import_stmt>gzip<import_stmt>argparse<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>collections defaultdict<def_stmt>get_args <block_start>"""
Parse command line arguments
"""<line_sep>parser=argparse.ArgumentParser(description="Method to create track for escape mutations")<line_sep>parser.add_argument("-xlsx" help="file containing all the data")<line_sep>parser.add_argument("-pid" help="pep to number" default="prot_names_pids_8.txt")<line_sep>parser.add_argument("-gb_tools" help="path to gb_tools" default="./")<line_sep>args=parser.parse_args()<line_sep><return>args<block_end><def_stmt>read_pid args<block_start>inputfilehandler=open(args.pid 'r')<line_sep>pid={}<line_sep>aaid={}<line_sep>nucid={}<for_stmt>line inputfilehandler<block_start>line=line.strip()<line_sep>fields=line.split()<line_sep>peptide=fields[0]<line_sep>pid[peptide]=fields[1]<line_sep>nucid[peptide]=fields[2]<line_sep>aaid[peptide]=fields[3]<block_end>inputfilehandler.close()<line_sep><return>(pid aaid nucid)<block_end><def_stmt>get_start_pos peptide pid aaid nucid<block_start>first_eight=''.join(list(peptide)[0:8])<if_stmt>first_eight<in>pid<block_start><return>nucid[first_eight]<block_end><return>-1<block_end><def_stmt>main args<block_start>(pid aaid nucid)=read_pid(args)<line_sep>cd8_epitopes=pd.read_excel(args.xlsx skiprows=0 header=0 index_col=<none>)<line_sep>print(cd8_epitopes.columns)<line_sep>outfiletag='escape_mutations'<line_sep>beddetailfilename=outfiletag+'.beddetail'<line_sep>bedfilename=outfiletag+'.bed'<line_sep>bbfilename=outfiletag+'.bb'<line_sep>#print (cd8_epitopes['Probable Infection Location'])
#print (cd8_epitopes['Gene'])
#print (cd8_epitopes['Position of Mutation'])
#print (cd8_epitopes['AA Change'])
#print (cd8_epitopes['Codon Change'])
#print (cd8_epitopes['Wildtype Sequence'])
#print (cd8_epitopes['Mutant Sequence 1'])
#print (cd8_epitopes['Mutant Sequence 2'])
wt_mt=defaultdict(list)<line_sep>mutations=[]<line_sep>beddetailfilehandler=open(beddetailfilename 'w')<for_stmt>i range(0 len(cd8_epitopes['Position of Mutation']))<block_start>chrom="NC_045512v2"<line_sep>reserved=0<line_sep>score=1000<line_sep>strand='+'<line_sep>pom=cd8_epitopes['Position of Mutation'][i]<line_sep>gene=cd8_epitopes['Gene'][i]<line_sep>pil=cd8_epitopes['Probable Infection Location'][i]<line_sep>aa_change=cd8_epitopes['AA Change'][i]<line_sep>c_change=cd8_epitopes['Codon Change'][i]<if_stmt>gene+'_'+c_change+'_'+aa_change<not><in>mutations<block_start>mutations.append(gene+'_'+c_change+'_'+aa_change)<block_end><if_stmt>';'<not><in>cd8_epitopes['Wildtype Sequence'][i]<block_start>chromStart=get_start_pos(cd8_epitopes['Wildtype Sequence'][i] pid aaid nucid)<if_stmt>chromStart<ne>-1<block_start>chromEnd=str(len(list(cd8_epitopes['Wildtype Sequence'][i]))<times>3+int(chromStart))<line_sep>thickStart=str(chromStart)<line_sep>thickEnd=str(chromEnd)<line_sep>wt_pep=cd8_epitopes['Wildtype Sequence'][i]<line_sep>mt_pep=cd8_epitopes['Mutant Sequence 1'][i]<if_stmt>wt_pep<not><in>wt_mt<block_start>wt_mt[wt_pep].append(mt_pep)<block_end><else_stmt><block_start><if_stmt>mt_pep<in>wt_mt[wt_pep]<block_start><continue><block_end><block_end>beddetailfilehandler.write(chrom+'\t'+str(chromStart)+'\t'+str(chromEnd)+'\t'+wt_pep+'\t'+str(score)+'\t'+strand+'\t'+thickStart+'\t'+thickEnd+'\t'+str(pom)+'\t'+str(gene)+'\t'+str(pil)+'\t'+aa_change+'\t'+c_change+'\t'+mt_pep+"\n")<block_end><block_end><else_stmt><block_start>wt_pep=cd8_epitopes['Wildtype Sequence'][i]<line_sep>wt1_pep=wt_pep.split(';')[0]<line_sep>wt2_pep=wt_pep.split(';')[1]<line_sep>mt1_pep=cd8_epitopes['Mutant Sequence 1'][i]<line_sep>mt2_pep=cd8_epitopes['Mutant Sequence 2'][i]<line_sep>chromStart=get_start_pos(wt1_pep pid aaid nucid)<if_stmt>chromStart<ne>-1<block_start>chromEnd=str(len(list(wt1_pep))<times>3+int(chromStart))<line_sep>thickStart=chromStart<line_sep>thickEnd=chromEnd<if_stmt>wt1_pep<not><in>wt_mt<block_start>wt_mt[wt_pep].append(mt_pep)<block_end><else_stmt><block_start><if_stmt>mt1_pep<in>wt_mt[wt1_pep]<block_start><continue><block_end><block_end>beddetailfilehandler.write(chrom+'\t'+str(chromStart)+'\t'+str(chromEnd)+'\t'+wt1_pep+'\t'+str(score)+'\t'+strand+'\t'+thickStart+'\t'+thickEnd+'\t'+str(pom)+'\t'+str(gene)+'\t'+str(pil)+'\t'+aa_change+'\t'+c_change+'\t'+mt1_pep+"\n")<block_end>chromStart=get_start_pos(wt2_pep pid aaid nucid)<if_stmt>chromStart<ne>-1<block_start>chromEnd=str(len(list(wt2_pep))<times>3+int(chromStart))<line_sep>thickStart=chromStart<line_sep>thickEnd=chromEnd<if_stmt>wt2_pep<not><in>wt_mt<block_start>wt_mt[wt_pep].append(mt_pep)<block_end><else_stmt><block_start><if_stmt>mt2_pep<in>wt_mt[wt2_pep]<block_start><continue><block_end><block_end>beddetailfilehandler.write(chrom+'\t'+str(chromStart)+'\t'+str(chromEnd)+'\t'+wt2_pep+'\t'+str(score)+'\t'+strand+'\t'+thickStart+'\t'+thickEnd+'\t'+str(pom)+'\t'+str(gene)+'\t'+str(pil)+'\t'+aa_change+'\t'+c_change+'\t'+mt2_pep+"\n")<block_end><block_end><block_end>beddetailfilehandler.close()<line_sep>print(len(mutations))<line_sep># use gbtools to convert from beddetail to bed and bigbed
os.system(f"bedSort {beddetailfilename} {bedfilename}")<line_sep>os.system(f"bedToBigBed {bedfilename} wuhCor1.sizes {bbfilename} -tab -type=bed9+ -as=escape_mutants.as")<block_end><if_stmt>__name__<eq>"__main__"<block_start>main(get_args())<block_end> |
<import_from_stmt>django.conf settings<line_sep>DRFSO2_PROPRIETARY_BACKEND_NAME=getattr(settings 'DRFSO2_PROPRIETARY_BACKEND_NAME' "Django")<line_sep>DRFSO2_URL_NAMESPACE=getattr(settings 'DRFSO2_URL_NAMESPACE' "")<line_sep> |
"""
propertylist
"""<import_from_future_stmt> absolute_import division print_function<import_from_stmt>collections namedtuple<import_stmt>logging<import_from_stmt>PySide.QtCore Qt<import_from_stmt>mceditlib nbt<import_from_stmt>PySide QtGui QtCore<import_from_stmt>mcedit2.util.load_ui registerCustomWidget<line_sep>log=logging.getLogger(__name__)<class_stmt>PropertyListItemDelegate(QtGui.QStyledItemDelegate)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(PropertyListItemDelegate self).__init__(*args **kwargs)<block_end><def_stmt>createEditor self parent option index<block_start>model=index.model()<line_sep>tagName,displayName,valueType,min,max=model.properties[index.row()]<if_stmt>valueType<is>int<block_start>valueWidget=QtGui.QSpinBox()<line_sep>valueWidget.setMinimum(min)<line_sep>valueWidget.setMaximum(max)<block_end><elif_stmt>valueType<is>float<block_start>valueWidget=QtGui.QDoubleSpinBox()<line_sep>valueWidget.setMinimum(min)<line_sep>valueWidget.setMaximum(max)<block_end><elif_stmt>valueType<is>bool<block_start>valueWidget=QtGui.QCheckBox()<block_end><elif_stmt>isinstance(valueType list)# Choice list
<block_start>valueWidget=QtGui.QComboBox()<for_stmt>value,name valueType<block_start>valueWidget.addItem(name value)<block_end><block_end><elif_stmt>valueType<is>unicode<block_start>valueWidget=QtGui.QPlainTextEdit()<block_end><else_stmt><block_start><raise>TypeError("Can't create attribute widgets for %s yet"%valueType)<block_end>valueWidget.setParent(parent)<line_sep><return>valueWidget<block_end><def_stmt>setEditorData self editor index<block_start>model=index.model()<line_sep>rootTag=model.rootTag<line_sep>tagName,displayName,valueType,min,max=model.properties[index.row()]<if_stmt>valueType<is>int<block_start>editor.setValue(rootTag[tagName].value)<block_end><elif_stmt>valueType<is>float<block_start>editor.setValue(rootTag[tagName].value)<block_end><elif_stmt>valueType<is>bool<block_start>editor.setChecked(rootTag[tagName].value)<block_end><elif_stmt>isinstance(valueType list)# Choice list
<block_start>currentValue=rootTag[tagName].value<try_stmt><block_start>currentIndex=[v<for>v,n valueType].index(currentValue)<line_sep>editor.setCurrentIndex(currentIndex)<block_end><except_stmt>ValueError<block_start>editor.addItem("Unknown value %s"%currentValue currentValue)<block_end><block_end><elif_stmt>valueType<is>unicode<block_start>editor.setPlainText(rootTag[tagName].value)<block_end><else_stmt><block_start><raise>TypeError("Unknown valueType in setEditorData (check this in addNBTProperty, dummy)")<block_end><block_end><def_stmt>setModelData self editor model index<block_start>tagName,displayName,valueType,min,max=model.properties[index.row()]<line_sep>rootTag=model.rootTag<if_stmt>valueType<is>int<block_start>value=int(editor.value())<block_end><elif_stmt>valueType<is>float<block_start>value=float(editor.value())<block_end><elif_stmt>valueType<is>bool<block_start>value=editor.isChecked()<block_end><elif_stmt>isinstance(valueType list)# Choice list
<block_start>value=valueType[editor.currentIndex()][0]<block_end><elif_stmt>valueType<is>unicode<block_start>value=editor.plainText()<block_end><else_stmt><block_start><raise>TypeError("Unknown valueType in setModelData (check this in addNBTProperty, dummy)")<block_end>model.setData(index value)<block_end><block_end><class_stmt>PropertyListEntry(namedtuple('PropertyListEntry' 'tagName displayName valueType min max'))<block_start><pass><block_end><class_stmt>PropertyListModel(QtCore.QAbstractItemModel)<block_start>propertyChanged=QtCore.Signal(unicode object)<def_stmt>__init__ self rootTag<block_start>super(PropertyListModel self).__init__()<line_sep>self.rootTag=rootTag<line_sep>self.properties=[]<block_end><def_stmt>addNBTProperty self tagName valueType=<none> min=<none> max=<none> displayName=<none><block_start><if_stmt>displayName<is><none><block_start>displayName=tagName<block_end><if_stmt>valueType<is><none><block_start>valueType=int<block_end><if_stmt>tagName<not><in>self.rootTag<block_start><return><block_end>tag=self.rootTag[tagName]<if_stmt>tag.tagID<eq>nbt.ID_BYTE<block_start>tagMin=-(1<lshift>7)<line_sep>tagMax=(1<lshift>7)-1<block_end><elif_stmt>tag.tagID<eq>nbt.ID_SHORT<block_start>tagMin=-(1<lshift>15)<line_sep>tagMax=(1<lshift>15)-1<block_end><elif_stmt>tag.tagID<eq>nbt.ID_INT<block_start>tagMin=-(1<lshift>31)<line_sep>tagMax=(1<lshift>31)-1<block_end><else_stmt># tag.tagID == nbt.ID_LONG, ID_FLOAT, ID_DOUBLE
# tagMin = -(1 << 63) # xxxx 64-bit spinbox
# tagMax = (1 << 63) - 1
<block_start>tagMin=-(1<lshift>31)<line_sep>tagMax=(1<lshift>31)-1<block_end><if_stmt>min<is><none><block_start>min=tagMin<block_end><if_stmt>max<is><none><block_start>max=tagMax<block_end>self.properties.append(PropertyListEntry(tagName displayName valueType min max))<block_end><def_stmt>columnCount self index<block_start><return>2<block_end><def_stmt>data self index role=Qt.DisplayRole<block_start><if_stmt><not>index.isValid()<block_start><return><none><block_end>entry=self.properties[index.row()]<if_stmt>role<in>(Qt.DisplayRole Qt.EditRole)<block_start><if_stmt>index.column()<eq>0<block_start><return>entry.displayName<block_end><else_stmt><block_start>value=self.rootTag[entry.tagName].value<if_stmt>isinstance(entry.valueType (list tuple))<block_start><try_stmt><block_start><return>entry.valueType[value][1]<block_end><except_stmt>IndexError<block_start><return>"Unknown value %s"%value<block_end><block_end><else_stmt><block_start><return>value<block_end><block_end><block_end># if role == Qt.CheckStateRole:
# if entry.valueType is not bool:
# return -1
# value = self.rootTag[entry.tagName].value
# return bool(value)
<block_end><def_stmt>flags self index<block_start><if_stmt><not>index.isValid()<block_start><return>0<block_end>flags=Qt.ItemIsEnabled|Qt.ItemIsSelectable<if_stmt>index.column()<eq>1<block_start>flags<augor>Qt.ItemIsEditable<line_sep>entry=self.properties[index.row()]<line_sep>#if entry.valueType is bool:
# flags |= Qt.ItemIsUserCheckable
<block_end><return>flags<block_end><def_stmt>headerData self section orientation role=Qt.DisplayRole<block_start><if_stmt>orientation<eq>Qt.Horizontal<and>role<eq>Qt.DisplayRole<block_start><return>("Name" "Value")[section]<block_end><return><none><block_end><def_stmt>index self row column parent=QtCore.QModelIndex()<block_start><if_stmt>parent.isValid()<block_start><return>QtCore.QModelIndex()<block_end><return>self.createIndex(row column <none>)<block_end><def_stmt>parent self index<block_start><return>QtCore.QModelIndex()<block_end><def_stmt>rowCount self parent=QtCore.QModelIndex()<block_start><if_stmt>parent.isValid()<block_start><return>0<block_end><return>len(self.properties)<block_end><def_stmt>setData self index value role=Qt.EditRole<block_start>row=index.row()<line_sep>entry=self.properties[row]<if_stmt>self.rootTag[entry.tagName].value<ne>value<block_start>self.rootTag[entry.tagName].value=value<line_sep>self.propertyChanged.emit(entry.tagName value)<line_sep>self.dataChanged.emit(index index)<block_end><block_end><block_end>@registerCustomWidget<class_stmt>PropertyListWidget(QtGui.QTreeView)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(PropertyListWidget self).__init__(*args **kwargs)<line_sep>delegate=PropertyListItemDelegate()<line_sep>self.setItemDelegate(delegate)<line_sep>self.setEditTriggers(self.CurrentChanged|self.editTriggers())<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>tests.internal.common_utils find_free_port<import_stmt>unittest<import_stmt>multiprocessing<import_stmt>os<import_from_stmt>bagua.torch_api.utils flatten<import_stmt>bagua.torch_api<as>bagua<import_from_stmt>tests skip_if_cuda_not_available<line_sep>N_EPOCHS=10<class_stmt>Net1(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Net1 self).__init__()<line_sep>self.fc1=nn.Linear(2 10 bias=<false>)<line_sep>self.fc2=nn.Linear(10 50 bias=<true>)<line_sep>self.fc3=nn.Linear(50 4 bias=<false>)<line_sep>self.relu=nn.ReLU()<block_end><def_stmt>forward self x<block_start>x=self.relu(self.fc1(x))<line_sep>x=self.relu(self.fc2(x))<line_sep>x=self.fc3(x)<line_sep><return>F.softmax(x dim=1)<block_end><block_end><class_stmt>Net2(nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Net2 self).__init__()<line_sep>self.fc1=nn.Linear(2 10 bias=<false>)<line_sep>self.fc2=nn.Linear(10 30 bias=<true>)<line_sep>self.fc3=nn.Linear(30 20 bias=<true>)<line_sep>self.fc4=nn.Linear(20 4 bias=<false>)<line_sep>self.relu=nn.ReLU()<block_end><def_stmt>forward self x<block_start>x=self.relu(self.fc1(x))<line_sep>x=self.relu(self.fc2(x))<line_sep>x=self.relu(self.fc3(x))<line_sep>x=self.fc4(x)<line_sep><return>F.softmax(x dim=1)<block_end><block_end><def_stmt>_init_bagua_env rank env# set deterministic
<block_start>torch.backends.cudnn.benchmark=<false><line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.manual_seed(rank)<line_sep># initialize subprocess env
os.environ["WORLD_SIZE"]=env["WORLD_SIZE"]<line_sep>os.environ["LOCAL_WORLD_SIZE"]=env["LOCAL_WORLD_SIZE"]<line_sep>os.environ["MASTER_ADDR"]=env["MASTER_ADDR"]<line_sep>os.environ["MASTER_PORT"]=env["MASTER_PORT"]<line_sep>os.environ["BAGUA_SERVICE_PORT"]=env["BAGUA_SERVICE_PORT"]<line_sep>os.environ["RANK"]=str(rank)<line_sep>os.environ["LOCAL_RANK"]=str(rank)<line_sep># init bagua distributed process group
torch.cuda.set_device(rank)<line_sep>bagua.init_process_group()<block_end><def_stmt>_init_torch_env rank nprocs backend# set deterministic
<block_start>torch.backends.cudnn.benchmark=<false><line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.manual_seed(rank)<line_sep># init torch distributed process group
torch.cuda.set_device(rank)<line_sep>torch.distributed.init_process_group(world_size=nprocs rank=rank backend=backend init_method="file:///tmp/.bagua.test.filestore" )<block_end><def_stmt>run_model rank results env <block_start>_init_bagua_env(rank env)<line_sep># construct model and optimizer, etc.
model_1=Net1().cuda()<line_sep>optimizer_1=torch.optim.SGD(model_1.parameters() lr=0.01)<line_sep>loss_fn_1=nn.MSELoss()<line_sep>model_2=Net2().cuda()<line_sep>optimizer_2=torch.optim.SGD(model_2.parameters() lr=0.01)<line_sep>loss_fn_2=nn.MSELoss()<line_sep># wrap model
<import_from_stmt>bagua.torch_api.algorithms gradient_allreduce<line_sep>algorithm=gradient_allreduce.GradientAllReduceAlgorithm()<line_sep>model_1=model_1.with_bagua([optimizer_1] algorithm)<line_sep>model_2=model_2.with_bagua([optimizer_2] algorithm)<line_sep>ret=results[rank]<line_sep>ret.init_weight_1.copy_(flatten([param.data<for>param model_1.parameters()]))<line_sep>ret.init_weight_2.copy_(flatten([param.data<for>param model_2.parameters()]))<for_stmt>epoch range(N_EPOCHS)<block_start>data_1=torch.randn(8 2).cuda()<line_sep>target_1=torch.randn(8 4).cuda()<line_sep>optimizer_1.zero_grad()<line_sep>output_1=model_1(data_1)<line_sep>loss_1=loss_fn_1(output_1 target_1)<line_sep>loss_1.backward()<line_sep>optimizer_1.step()<line_sep>data_2=torch.randn(8 2).cuda()<line_sep>target_2=torch.randn(8 4).cuda()<line_sep>optimizer_2.zero_grad()<line_sep>output_2=model_2(data_2)<line_sep>loss_2=loss_fn_2(output_2 target_2)<line_sep>loss_2.backward()<line_sep>optimizer_2.step()<block_end>ret.end_weight_1.copy_(flatten([param.data<for>param model_1.parameters()]))<line_sep>ret.end_weight_2.copy_(flatten([param.data<for>param model_2.parameters()]))<block_end><def_stmt>run_torch_model rank nprocs results backend env <block_start>_init_torch_env(rank nprocs backend)<line_sep># construct model and optimizer, etc.
model_1=Net1().cuda()<line_sep>optimizer_1=torch.optim.SGD(model_1.parameters() lr=0.01)<line_sep>loss_fn_1=nn.MSELoss()<line_sep>model_2=Net2().cuda()<line_sep>optimizer_2=torch.optim.SGD(model_2.parameters() lr=0.01)<line_sep>loss_fn_2=nn.MSELoss()<line_sep># wrap model
model_1=torch.nn.parallel.DistributedDataParallel(model_1 device_ids=[rank])<line_sep>model_2=torch.nn.parallel.DistributedDataParallel(model_2 device_ids=[rank])<line_sep>ret=results[rank]<line_sep>ret.init_weight_1.copy_(flatten([param.data<for>param model_1.parameters()]))<line_sep>ret.init_weight_2.copy_(flatten([param.data<for>param model_2.parameters()]))<for_stmt>epoch range(N_EPOCHS)<block_start>data_1=torch.randn(8 2).cuda()<line_sep>target_1=torch.randn(8 4).cuda()<line_sep>optimizer_1.zero_grad()<line_sep>output_1=model_1(data_1)<line_sep>loss_1=loss_fn_1(output_1 target_1)<line_sep>loss_1.backward()<line_sep>optimizer_1.step()<line_sep>data_2=torch.randn(8 2).cuda()<line_sep>target_2=torch.randn(8 4).cuda()<line_sep>optimizer_2.zero_grad()<line_sep>output_2=model_2(data_2)<line_sep>loss_2=loss_fn_2(output_2 target_2)<line_sep>loss_2.backward()<line_sep>optimizer_2.step()<block_end>ret.end_weight_1.copy_(flatten([param.data<for>param model_1.parameters()]))<line_sep>ret.end_weight_2.copy_(flatten([param.data<for>param model_2.parameters()]))<block_end><class_stmt>Result(object)<block_start><def_stmt>__init__ self<block_start>model_1=Net1()<line_sep>model_2=Net2()<line_sep>self.init_weight_1=flatten([torch.zeros_like(param.data)<for>param model_1.parameters()])<line_sep>self.end_weight_1=flatten([torch.zeros_like(param.data)<for>param model_1.parameters()])<line_sep>self.init_weight_2=flatten([torch.zeros_like(param.data)<for>param model_2.parameters()])<line_sep>self.end_weight_2=flatten([torch.zeros_like(param.data)<for>param model_2.parameters()])<block_end><block_end><class_stmt>TestMultiModels(unittest.TestCase)<block_start>@skip_if_cuda_not_available()<def_stmt>test_multi_models self<block_start>nprocs=torch.cuda.device_count()<line_sep>env={}<line_sep>mp=multiprocessing.get_context("spawn")<line_sep>torch_results=[Result()<for>_ range(nprocs)]<line_sep>processes=[]<line_sep>backend="gloo"<for_stmt>i range(nprocs)<block_start>p=mp.Process(target=run_torch_model args=(i nprocs torch_results backend env ) )<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join(timeout=60)<line_sep>self.assertTrue(p.exitcode<eq>0)<block_end>env={"WORLD_SIZE":str(nprocs) "LOCAL_WORLD_SIZE":str(nprocs) "MASTER_ADDR":"127.0.0.1" "MASTER_PORT":str(find_free_port(8000 8100)) "BAGUA_SERVICE_PORT":str(find_free_port(9000 9100)) }<line_sep>bagua_results=[Result()<for>_ range(nprocs)]<line_sep>processes=[]<for_stmt>i range(nprocs)<block_start>p=mp.Process(target=run_model args=(i bagua_results env ) )<line_sep>p.start()<line_sep>processes.append(p)<block_end><for_stmt>p processes<block_start>p.join(timeout=60)<line_sep>self.assertTrue(p.exitcode<eq>0)<block_end><for_stmt>rank range(nprocs)<block_start>self.assertTrue(torch.all(torch.isclose(bagua_results[rank].init_weight_1 torch_results[rank].init_weight_1 )).item())<line_sep>self.assertTrue(torch.all(torch.isclose(bagua_results[rank].end_weight_1 torch_results[rank].end_weight_1 )).item())<line_sep>self.assertTrue(torch.all(torch.isclose(bagua_results[rank].init_weight_2 torch_results[rank].init_weight_2 )).item())<line_sep>self.assertTrue(torch.all(torch.isclose(bagua_results[rank].end_weight_2 torch_results[rank].end_weight_2 )).item())<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_stmt>asyncio<import_stmt>math<import_stmt>networkx<as>nx<import_stmt>ccxt.async_support<as>ccxt<import_stmt>datetime<import_stmt>logging<import_from_stmt>.logging_utils FormatForLogAdapter<line_sep>__all__=['FeesNotAvailable' 'create_exchange_graph' 'load_exchange_graph' ]<line_sep>adapter=FormatForLogAdapter(logging.getLogger('peregrinearb.utils.single_exchange'))<class_stmt>FeesNotAvailable(Exception)<block_start><pass><block_end><def_stmt>create_exchange_graph exchange:ccxt.Exchange<block_start>"""
Returns a simple graph representing exchange. Each edge represents a market.
exchange.load_markets() must have been called. Will throw a ccxt error if it has not.
"""<line_sep>graph=nx.Graph()<for_stmt>market_name exchange.symbols<block_start><try_stmt><block_start>base_currency,quote_currency=market_name.split('/')<block_end># if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
<except_stmt>ValueError<block_start><continue><block_end>graph.add_edge(base_currency quote_currency market_name=market_name)<block_end><return>graph<block_end><async_keyword><def_stmt>load_exchange_graph exchange name=<true> fees=<true> suppress=<none> depth=<false> tickers=<none><arrow>nx.DiGraph<block_start>"""
Returns a networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges). If depth, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""<if_stmt>suppress<is><none><block_start>suppress=['markets']<block_end><if_stmt>name<block_start>exchange=getattr(ccxt exchange)()<block_end><if_stmt>tickers<is><none><block_start>adapter.info('Fetching tickers')<line_sep>tickers=<await>exchange.fetch_tickers()<line_sep>adapter.info('Fetched tickers')<block_end>market_count=len(tickers)<line_sep>adapter.info('Loading exchange graph' marketCount=market_count)<line_sep>adapter.debug('Initializing empty graph with exchange_name and timestamp attributes')<line_sep>graph=nx.DiGraph()<line_sep># todo: get exchange's server time?
graph.graph['exchange_name']=exchange.id<line_sep>graph.graph['datetime']=datetime.datetime.now(tz=datetime.timezone.utc)<line_sep>adapter.debug('Initialized empty graph with exchange_name and timestamp attributes')<async_keyword><def_stmt>add_edges <block_start>tasks=[_add_weighted_edge_to_graph(exchange market_name graph log=<true> fees=fees suppress=suppress ticker=ticker depth=depth )<for>market_name,ticker tickers.items()]<line_sep><await>asyncio.wait(tasks)<block_end><if_stmt>fees<block_start><for_stmt>i range(20)<block_start><try_stmt><block_start>adapter.info('Loading fees' iteration=i)<line_sep># must load markets to get fees
<await>exchange.load_markets()<block_end><except_stmt>(ccxt.DDoSProtection ccxt.RequestTimeout)<as>e<block_start><if_stmt>i<eq>19<block_start>adapter.warning('Rate limited on final iteration, raising error' iteration=i)<line_sep><raise>e<block_end>adapter.warning('Rate limited when loading markets' iteration=i)<line_sep><await>asyncio.sleep(0.1)<block_end><except_stmt>ccxt.ExchangeNotAvailable<as>e<block_start><if_stmt>i<eq>19<block_start>adapter.warning('Cannot load markets due to ExchangeNotAvailable error, '<concat>'graph will not be loaded.' iteration=i)<line_sep><raise>e<block_end>adapter.warning('Received ExchangeNotAvailable error when loading markets' iteration=i)<block_end><else_stmt><block_start><break><block_end><block_end>adapter.info('Loaded fees' iteration=i marketCount=market_count)<line_sep>currency_count=len(exchange.currencies)<line_sep>adapter.info('Adding data to graph' marketCount=market_count currencyCount=currency_count)<line_sep><await>add_edges()<line_sep>adapter.info('Added data to graph' marketCount=market_count currencyCount=currency_count)<block_end><else_stmt><block_start>adapter.info('Adding data to graph' marketCount=market_count)<line_sep><await>add_edges()<line_sep>adapter.info('Added data to graph' marketCount=market_count)<block_end>adapter.debug('Closing connection')<line_sep><await>exchange.close()<line_sep>adapter.debug('Closed connection')<line_sep>adapter.info('Loaded exchange graph')<line_sep><return>graph<block_end><async_keyword><def_stmt>_add_weighted_edge_to_graph exchange:ccxt.Exchange market_name:str graph:nx.DiGraph log=<true> fees=<false> suppress=<none> ticker=<none> depth=<false> <block_start>"""
todo: add global variable to bid_volume/ ask_volume to see if all tickers (for a given exchange) have value == None
Returns a Networkx DiGraph populated with the current ask and bid prices for each market in graph (represented by
edges).
:param exchange: A ccxt Exchange object
:param market_name: A string representing a cryptocurrency market formatted like so:
'{base_currency}/{quote_currency}'
:param graph: A Networkx DiGraph upon
:param log: If the edge weights given to the graph should be the negative logarithm of the ask and bid prices. This
is necessary to calculate arbitrage opportunities.
:param fees: If fees should be taken into account for prices.
:param suppress: A list or set which tells which types of warnings to not throw. Accepted elements are 'markets'.
:param ticker: A dictionary representing a market as returned by ccxt's Exchange's fetch_ticker method
:param depth: If True, also adds an attribute 'depth' to each edge which represents the current volume of orders
available at the price represented by the 'weight' attribute of each edge.
"""<line_sep>adapter.debug('Adding edge to graph' market=market_name)<if_stmt>ticker<is><none><block_start><try_stmt><block_start>adapter.info('Fetching ticker' market=market_name)<line_sep>ticker=<await>exchange.fetch_ticker(market_name)<line_sep>adapter.info('Fetched ticker' market=market_name)<block_end># any error is solely because of fetch_ticker
<except_stmt><block_start><if_stmt>'markets'<not><in>suppress<block_start>adapter.warning('Market is unavailable at this time. It will not be included in the graph.' market=market_name)<block_end><return><block_end><block_end><if_stmt>fees<block_start><if_stmt>'taker'<in>exchange.markets[market_name]# we always take the taker side because arbitrage depends on filling orders
# sell_fee_dict = exchange.calculate_fee(market_name, 'limit', 'sell', 0, 0, 'taker')
# buy_fee_dict = exchange.calculate_fee(market_name, 'limit', 'buy', 0, 0, 'taker')
<block_start>fee=exchange.markets[market_name]['taker']<block_end><else_stmt><block_start><if_stmt>'fees'<not><in>suppress<block_start>adapter.warning("The fees for {} have not yet been implemented into ccxt's uniform API.".format(exchange))<line_sep><raise>FeesNotAvailable('Fees are not available for {} on {}'.format(market_name exchange.id))<block_end><else_stmt><block_start>fee=0.002<block_end><block_end><block_end><else_stmt><block_start>fee=0<block_end>fee_scalar=1-fee<try_stmt><block_start>bid_rate=ticker['bid']<line_sep>ask_rate=ticker['ask']<if_stmt>depth<block_start>bid_volume=ticker['bidVolume']<line_sep>ask_volume=ticker['askVolume']<if_stmt>bid_volume<is><none><block_start>adapter.warning('Market is unavailable because its bid volume was given as None. '<concat>'It will not be included in the graph.' market=market_name)<line_sep><return><block_end><if_stmt>ask_volume<is><none><block_start>adapter.warning('Market is unavailable because its ask volume was given as None. '<concat>'It will not be included in the graph.' market=market_name)<line_sep><return><block_end><block_end><block_end># ask and bid == None if this market is non existent.
<except_stmt>TypeError<block_start>adapter.warning('Market is unavailable at this time. It will not be included in the graph.' market=market_name)<line_sep><return><block_end># Exchanges give asks and bids as either 0 or None when they do not exist.
# todo: should we account for exchanges upon which an ask exists but a bid does not (and vice versa)? Would this
# cause bugs?
<if_stmt>ask_rate<eq>0<or>bid_rate<eq>0<or>ask_rate<is><none><or>bid_rate<is><none><block_start>adapter.warning('Market is unavailable at this time. It will not be included in the graph.' market=market_name)<line_sep><return><block_end><try_stmt><block_start>base_currency,quote_currency=market_name.split('/')<block_end># if ccxt returns a market in incorrect format (e.g FX_BTC_JPY on BitFlyer)
<except_stmt>ValueError<block_start><if_stmt>'markets'<not><in>suppress<block_start>adapter.warning('Market is unavailable at this time due to incorrect formatting. '<concat>'It will not be included in the graph.' market=market_name)<block_end><return><block_end><if_stmt>log<block_start><if_stmt>depth<block_start>graph.add_edge(base_currency quote_currency weight=-math.log(fee_scalar<times>bid_rate) depth=-math.log(bid_volume) market_name=market_name trade_type='SELL' fee=fee volume=bid_volume no_fee_rate=bid_rate)<line_sep>graph.add_edge(quote_currency base_currency weight=-math.log(fee_scalar<times>1/ask_rate) depth=-math.log(ask_volume<times>ask_rate) market_name=market_name trade_type='BUY' fee=fee volume=ask_volume no_fee_rate=ask_rate)<block_end><else_stmt><block_start>graph.add_edge(base_currency quote_currency weight=-math.log(fee_scalar<times>bid_rate) market_name=market_name trade_type='SELL' fee=fee no_fee_rate=bid_rate)<line_sep>graph.add_edge(quote_currency base_currency weight=-math.log(fee_scalar<times>1/ask_rate) market_name=market_name trade_type='BUY' fee=fee no_fee_rate=ask_rate)<block_end><block_end><else_stmt><block_start><if_stmt>depth<block_start>graph.add_edge(base_currency quote_currency weight=fee_scalar<times>bid_rate depth=bid_volume market_name=market_name trade_type='SELL' fee=fee volume=bid_volume no_fee_rate=bid_rate)<line_sep>graph.add_edge(quote_currency base_currency weight=fee_scalar<times>1/ask_rate depth=ask_volume market_name=market_name trade_type='BUY' fee=fee volume=ask_volume no_fee_rate=ask_rate)<block_end><else_stmt><block_start>graph.add_edge(base_currency quote_currency weight=fee_scalar<times>bid_rate market_name=market_name trade_type='SELL' fee=fee no_fee_rate=bid_rate)<line_sep>graph.add_edge(quote_currency base_currency weight=fee_scalar<times>1/ask_rate market_name=market_name trade_type='BUY' fee=fee no_fee_rate=ask_rate)<block_end><block_end>adapter.debug('Added edge to graph' market=market_name)<block_end> |
# Copyright 2017, <NAME>, All rights reserved.
<import_from_stmt>bottle HTTPResponse<import_from_stmt>common Status overrides<import_from_stmt>..web_app IHandler WebApp<import_from_stmt>..serialize SerializeStatusJson<class_stmt>StatusHandler(IHandler)<block_start><def_stmt>__init__ self status:Status<block_start>self.__status=status<block_end>@overrides(IHandler)<def_stmt>add_routes self web_app:WebApp<block_start>web_app.add_handler("/server/status" self.__handle_get_status)<block_end><def_stmt>__handle_get_status self<block_start>out_json=SerializeStatusJson.status(self.__status)<line_sep><return>HTTPResponse(body=out_json)<block_end><block_end> |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Electricity Transformer Temperature (ETT) dataset."""<import_from_stmt>dataclasses dataclass<import_stmt>pandas<as>pd<import_stmt>datasets<line_sep>_CITATION="""\
@inproceedings{haoyietal-informer-2021,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
booktitle = {The Thirty-Fifth {AAAI} Conference on Artificial Intelligence, {AAAI} 2021, Virtual Conference},
volume = {35},
number = {12},
pages = {11106--11115},
publisher = {{AAAI} Press},
year = {2021},
}
"""<line_sep>_DESCRIPTION="""\
The data of Electricity Transformers from two separated counties
in China collected for two years at hourly and 15-min frequencies.
Each data point consists of the target value "oil temperature" and
6 power load features. The train/val/test is 12/4/4 months.
"""<line_sep>_HOMEPAGE="https://github.com/zhouhaoyi/ETDataset"<line_sep>_LICENSE="The Creative Commons Attribution 4.0 International License. https://creativecommons.org/licenses/by/4.0/"<line_sep># The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
_URLS={"h1":"https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh1.csv" "h2":"https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTh2.csv" "m1":"https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm1.csv" "m2":"https://raw.githubusercontent.com/zhouhaoyi/ETDataset/main/ETT-small/ETTm2.csv" }<line_sep>@dataclass<class_stmt>ETTBuilderConfig(datasets.BuilderConfig)<block_start>"""ETT builder config."""<line_sep>prediction_length:int=24<line_sep>multivariate:bool=<false><block_end><class_stmt>ETT(datasets.GeneratorBasedBuilder)<block_start>"""Electricity Transformer Temperature (ETT) dataset"""<line_sep>VERSION=datasets.Version("1.0.0")<line_sep># You will be able to load one or the other configurations in the following list with
# data = datasets.load_dataset('ett', 'h1')
# data = datasets.load_dataset('ett', 'm2')
BUILDER_CONFIGS=[ETTBuilderConfig(name="h1" version=VERSION description="Time series from first county at hourly frequency." ) ETTBuilderConfig(name="h2" version=VERSION description="Time series from second county at hourly frequency." ) ETTBuilderConfig(name="m1" version=VERSION description="Time series from first county at 15-min frequency." ) ETTBuilderConfig(name="m2" version=VERSION description="Time series from second county at 15-min frequency." ) ]<line_sep>DEFAULT_CONFIG_NAME="h1"# It's not mandatory to have a default configuration. Just use one if it make sense.
<def_stmt>_info self<block_start><if_stmt>self.config.multivariate<block_start>features=datasets.Features({"start":datasets.Value("timestamp[s]") "target":datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) "feat_static_cat":datasets.Sequence(datasets.Value("uint64")) "item_id":datasets.Value("string") })<block_end><else_stmt><block_start>features=datasets.Features({"start":datasets.Value("timestamp[s]") "target":datasets.Sequence(datasets.Value("float32")) "feat_static_cat":datasets.Sequence(datasets.Value("uint64")) "feat_dynamic_real":datasets.Sequence(datasets.Sequence(datasets.Value("float32"))) "item_id":datasets.Value("string") })<block_end><return>datasets.DatasetInfo(# This is the description that will appear on the datasets page.
description=_DESCRIPTION # This defines the different columns of the dataset and their types
features=features # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
# specify them. They'll be used if as_supervised=True in builder.as_dataset.
# supervised_keys=("sentence", "label"),
# Homepage of the dataset for documentation
homepage=_HOMEPAGE # License for the dataset if available
license=_LICENSE # Citation for the dataset
citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>urls=_URLS[self.config.name]<line_sep>filepath=dl_manager.download_and_extract(urls)<line_sep><return>[datasets.SplitGenerator(name=datasets.Split.TRAIN # These kwargs will be passed to _generate_examples
gen_kwargs={"filepath":filepath "split":"train" } ) datasets.SplitGenerator(name=datasets.Split.TEST # These kwargs will be passed to _generate_examples
gen_kwargs={"filepath":filepath "split":"test" } ) datasets.SplitGenerator(name=datasets.Split.VALIDATION # These kwargs will be passed to _generate_examples
gen_kwargs={"filepath":filepath "split":"dev" } ) ]<block_end># method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
<def_stmt>_generate_examples self filepath split<block_start>data=pd.read_csv(filepath parse_dates=<true> index_col=0)<line_sep>start_date=data.index.min()<if_stmt>self.config.name<in>["m1" "m2"]<block_start>factor=4# 15-min frequency
<block_end><else_stmt><block_start>factor=1# hourly frequency
<block_end>train_end_date_index=12<times>30<times>24<times>factor# 1 year
<if_stmt>split<eq>"dev"<block_start>end_date_index=12<times>30<times>24+4<times>30<times>24<times>factor# 1 year + 4 months
<block_end><else_stmt><block_start>end_date_index=12<times>30<times>24+8<times>30<times>24<times>factor<block_end># 1 year + 8 months
<if_stmt>self.config.multivariate<block_start><if_stmt>split<in>["test" "dev"]# rolling windows of prediction_length for dev and test
<block_start><for_stmt>i,index enumerate(range(train_end_date_index end_date_index self.config.prediction_length ))<block_start><yield>i {"start":start_date "target":data[:index+self.config.prediction_length].values.astype("float32").T "feat_static_cat":[0] "item_id":"0" }<block_end><block_end><else_stmt><block_start><yield>0 {"start":start_date "target":data[:train_end_date_index].values.astype("float32").T "feat_static_cat":[0] "item_id":"0" }<block_end><block_end><else_stmt><block_start><if_stmt>split<in>["test" "dev"]# rolling windows of prediction_length for dev and test
<block_start><for_stmt>i,index enumerate(range(train_end_date_index end_date_index self.config.prediction_length ))<block_start>target=data["OT"][:index+self.config.prediction_length].values.astype("float32")<line_sep>feat_dynamic_real=data[["HUFL" "HULL" "MUFL" "MULL" "LUFL" "LULL"]][:index+self.config.prediction_length].values.T.astype("float32")<line_sep><yield>i {"start":start_date "target":target "feat_dynamic_real":feat_dynamic_real "feat_static_cat":[0] "item_id":"OT" }<block_end><block_end><else_stmt><block_start>target=data["OT"][:train_end_date_index].values.astype("float32")<line_sep>feat_dynamic_real=data[["HUFL" "HULL" "MUFL" "MULL" "LUFL" "LULL"]][:train_end_date_index].values.T.astype("float32")<line_sep><yield>0 {"start":start_date "target":target "feat_dynamic_real":feat_dynamic_real "feat_static_cat":[0] "item_id":"OT" }<block_end><block_end><block_end><block_end> |
<import_stmt>os<import_stmt>subprocess<import_stmt>sys<line_sep>inputs=['lib/assert/strict.js' 'lib/assert.js' 'lib/async_hooks.js' 'lib/buffer.js' 'lib/child_process.js' 'lib/cluster.js' 'lib/console.js' 'lib/constants.js' 'lib/crypto.js' 'lib/dgram.js' 'lib/diagnostics_channel.js' 'lib/dns/promises.js' 'lib/dns.js' 'lib/domain.js' 'lib/events.js' 'lib/fs/promises.js' 'lib/fs.js' 'lib/http.js' 'lib/http2.js' 'lib/https.js' 'lib/inspector.js' 'lib/internal/abort_controller.js' 'lib/internal/assert/assertion_error.js' 'lib/internal/assert/calltracker.js' 'lib/internal/assert.js' 'lib/internal/async_hooks.js' 'lib/internal/blob.js' 'lib/internal/blocklist.js' 'lib/internal/bootstrap/environment.js' 'lib/internal/bootstrap/loaders.js' 'lib/internal/bootstrap/node.js' 'lib/internal/bootstrap/pre_execution.js' 'lib/internal/bootstrap/switches/does_not_own_process_state.js' 'lib/internal/bootstrap/switches/does_own_process_state.js' 'lib/internal/bootstrap/switches/is_main_thread.js' 'lib/internal/bootstrap/switches/is_not_main_thread.js' 'lib/internal/buffer.js' 'lib/internal/child_process/serialization.js' 'lib/internal/child_process.js' 'lib/internal/cli_table.js' 'lib/internal/cluster/child.js' 'lib/internal/cluster/primary.js' 'lib/internal/cluster/round_robin_handle.js' 'lib/internal/cluster/shared_handle.js' 'lib/internal/cluster/utils.js' 'lib/internal/cluster/worker.js' 'lib/internal/console/constructor.js' 'lib/internal/console/global.js' 'lib/internal/constants.js' 'lib/internal/crypto/aes.js' 'lib/internal/crypto/certificate.js' 'lib/internal/crypto/cipher.js' 'lib/internal/crypto/diffiehellman.js' 'lib/internal/crypto/dsa.js' 'lib/internal/crypto/ec.js' 'lib/internal/crypto/hash.js' 'lib/internal/crypto/hashnames.js' 'lib/internal/crypto/hkdf.js' 'lib/internal/crypto/keygen.js' 'lib/internal/crypto/keys.js' 'lib/internal/crypto/mac.js' 'lib/internal/crypto/pbkdf2.js' 'lib/internal/crypto/random.js' 'lib/internal/crypto/rsa.js' 'lib/internal/crypto/scrypt.js' 'lib/internal/crypto/sig.js' 'lib/internal/crypto/util.js' 'lib/internal/crypto/webcrypto.js' 'lib/internal/crypto/x509.js' 'lib/internal/debugger/inspect.js' 'lib/internal/debugger/inspect_client.js' 'lib/internal/debugger/inspect_repl.js' 'lib/internal/dgram.js' 'lib/internal/dns/promises.js' 'lib/internal/dns/utils.js' 'lib/internal/dtrace.js' 'lib/internal/encoding.js' 'lib/internal/errors.js' 'lib/internal/error_serdes.js' 'lib/internal/event_target.js' 'lib/internal/fixed_queue.js' 'lib/internal/freelist.js' 'lib/internal/freeze_intrinsics.js' 'lib/internal/fs/cp/cp-sync.js' 'lib/internal/fs/cp/cp.js' 'lib/internal/fs/dir.js' 'lib/internal/fs/promises.js' 'lib/internal/fs/read_file_context.js' 'lib/internal/fs/rimraf.js' 'lib/internal/fs/streams.js' 'lib/internal/fs/sync_write_stream.js' 'lib/internal/fs/utils.js' 'lib/internal/fs/watchers.js' 'lib/internal/heap_utils.js' 'lib/internal/histogram.js' 'lib/internal/http.js' 'lib/internal/http2/compat.js' 'lib/internal/http2/core.js' 'lib/internal/http2/util.js' 'lib/internal/idna.js' 'lib/internal/inspector_async_hook.js' 'lib/internal/js_stream_socket.js' 'lib/internal/legacy/processbinding.js' 'lib/internal/linkedlist.js' 'lib/internal/main/check_syntax.js' 'lib/internal/main/eval_stdin.js' 'lib/internal/main/eval_string.js' 'lib/internal/main/inspect.js' 'lib/internal/main/print_help.js' 'lib/internal/main/prof_process.js' 'lib/internal/main/repl.js' 'lib/internal/main/run_main_module.js' 'lib/internal/main/worker_thread.js' 'lib/internal/modules/cjs/helpers.js' 'lib/internal/modules/cjs/loader.js' 'lib/internal/modules/esm/create_dynamic_module.js' 'lib/internal/modules/esm/get_format.js' 'lib/internal/modules/esm/get_source.js' 'lib/internal/modules/esm/loader.js' 'lib/internal/modules/esm/module_job.js' 'lib/internal/modules/esm/module_map.js' 'lib/internal/modules/esm/resolve.js' 'lib/internal/modules/esm/transform_source.js' 'lib/internal/modules/esm/translators.js' 'lib/internal/modules/package_json_reader.js' 'lib/internal/modules/run_main.js' 'lib/internal/net.js' 'lib/internal/options.js' 'lib/internal/perf/event_loop_delay.js' 'lib/internal/perf/event_loop_utilization.js' 'lib/internal/perf/nodetiming.js' 'lib/internal/perf/observe.js' 'lib/internal/perf/performance.js' 'lib/internal/perf/performance_entry.js' 'lib/internal/perf/timerify.js' 'lib/internal/perf/usertiming.js' 'lib/internal/perf/utils.js' 'lib/internal/per_context/domexception.js' 'lib/internal/per_context/messageport.js' 'lib/internal/per_context/primordials.js' 'lib/internal/policy/manifest.js' 'lib/internal/policy/sri.js' 'lib/internal/priority_queue.js' 'lib/internal/process/esm_loader.js' 'lib/internal/process/execution.js' 'lib/internal/process/per_thread.js' 'lib/internal/process/policy.js' 'lib/internal/process/promises.js' 'lib/internal/process/report.js' 'lib/internal/process/signal.js' 'lib/internal/process/task_queues.js' 'lib/internal/process/warning.js' 'lib/internal/process/worker_thread_only.js' 'lib/internal/querystring.js' 'lib/internal/readline/callbacks.js' 'lib/internal/readline/emitKeypressEvents.js' 'lib/internal/readline/utils.js' 'lib/internal/repl/await.js' 'lib/internal/repl/history.js' 'lib/internal/repl/utils.js' 'lib/internal/repl.js' 'lib/internal/socketaddress.js' 'lib/internal/socket_list.js' 'lib/internal/source_map/prepare_stack_trace.js' 'lib/internal/source_map/source_map.js' 'lib/internal/source_map/source_map_cache.js' 'lib/internal/streams/add-abort-signal.js' 'lib/internal/streams/buffer_list.js' 'lib/internal/streams/compose.js' 'lib/internal/streams/destroy.js' 'lib/internal/streams/duplex.js' 'lib/internal/streams/duplexify.js' 'lib/internal/streams/end-of-stream.js' 'lib/internal/streams/from.js' 'lib/internal/streams/lazy_transform.js' 'lib/internal/streams/legacy.js' 'lib/internal/streams/passthrough.js' 'lib/internal/streams/pipeline.js' 'lib/internal/streams/readable.js' 'lib/internal/streams/state.js' 'lib/internal/streams/transform.js' 'lib/internal/streams/utils.js' 'lib/internal/streams/writable.js' 'lib/internal/stream_base_commons.js' 'lib/internal/test/binding.js' 'lib/internal/test/transfer.js' 'lib/internal/timers.js' 'lib/internal/tls/parse-cert-string.js' 'lib/internal/tls/secure-context.js' 'lib/internal/tls/secure-pair.js' 'lib/internal/trace_events_async_hooks.js' 'lib/internal/tty.js' 'lib/internal/url.js' 'lib/internal/util/comparisons.js' 'lib/internal/util/debuglog.js' 'lib/internal/util/inspect.js' 'lib/internal/util/inspector.js' 'lib/internal/util/iterable_weak_map.js' 'lib/internal/util/types.js' 'lib/internal/util.js' 'lib/internal/v8_prof_polyfill.js' 'lib/internal/v8_prof_processor.js' 'lib/internal/validators.js' 'lib/internal/vm/module.js' 'lib/internal/watchdog.js' 'lib/internal/webstreams/encoding.js' 'lib/internal/webstreams/queuingstrategies.js' 'lib/internal/webstreams/readablestream.js' 'lib/internal/webstreams/transfer.js' 'lib/internal/webstreams/transformstream.js' 'lib/internal/webstreams/util.js' 'lib/internal/webstreams/writablestream.js' 'lib/internal/worker/io.js' 'lib/internal/worker/js_transferable.js' 'lib/internal/worker.js' 'lib/module.js' 'lib/net.js' 'lib/os.js' 'lib/path/posix.js' 'lib/path/win32.js' 'lib/path.js' 'lib/perf_hooks.js' 'lib/process.js' 'lib/punycode.js' 'lib/querystring.js' 'lib/readline.js' 'lib/repl.js' 'lib/stream/consumers.js' 'lib/stream/promises.js' 'lib/stream/web.js' 'lib/stream.js' 'lib/string_decoder.js' 'lib/sys.js' 'lib/timers/promises.js' 'lib/timers.js' 'lib/tls.js' 'lib/trace_events.js' 'lib/tty.js' 'lib/url.js' 'lib/util/types.js' 'lib/util.js' 'lib/v8.js' 'lib/vm.js' 'lib/wasi.js' 'lib/worker_threads.js' 'lib/zlib.js' 'lib/_http_agent.js' 'lib/_http_client.js' 'lib/_http_common.js' 'lib/_http_incoming.js' 'lib/_http_outgoing.js' 'lib/_http_server.js' 'lib/_stream_duplex.js' 'lib/_stream_passthrough.js' 'lib/_stream_readable.js' 'lib/_stream_transform.js' 'lib/_stream_wrap.js' 'lib/_stream_writable.js' 'lib/_tls_common.js' 'lib/_tls_wrap.js' 'deps/v8/tools/splaytree.mjs' 'deps/v8/tools/codemap.mjs' 'deps/v8/tools/consarray.mjs' 'deps/v8/tools/csvparser.mjs' 'deps/v8/tools/profile.mjs' 'deps/v8/tools/profile_view.mjs' 'deps/v8/tools/logreader.mjs' 'deps/v8/tools/arguments.mjs' 'deps/v8/tools/tickprocessor.mjs' 'deps/v8/tools/sourcemap.mjs' 'deps/v8/tools/tickprocessor-driver.mjs' 'deps/acorn/acorn/dist/acorn.js' 'deps/acorn/acorn-walk/dist/walk.js' 'deps/cjs-module-lexer/lexer.js' 'deps/cjs-module-lexer/dist/lexer.js' 'lib/_third_party_main.js' 'config.gypi' ]<line_sep>deps=['deps/v8/tools/splaytree.mjs' 'deps/v8/tools/codemap.mjs' 'deps/v8/tools/consarray.mjs' 'deps/v8/tools/csvparser.mjs' 'deps/v8/tools/profile.mjs' 'deps/v8/tools/profile_view.mjs' 'deps/v8/tools/logreader.mjs' 'deps/v8/tools/arguments.mjs' 'deps/v8/tools/tickprocessor.mjs' 'deps/v8/tools/sourcemap.mjs' 'deps/v8/tools/tickprocessor-driver.mjs' 'deps/acorn/acorn/dist/acorn.js' 'deps/acorn/acorn-walk/dist/walk.js' 'deps/cjs-module-lexer/lexer.js' 'deps/cjs-module-lexer/dist/lexer.js' ]<line_sep>noderoot=sys.argv[1]<line_sep>mtimes=[]<for_stmt>inFile deps<block_start>mtimes=mtimes+[os.path.getmtime(os.path.join(noderoot inFile))]<block_end>mtimes=mtimes+[os.path.getmtime(sys.argv[0])]<line_sep>mtimes.sort()<line_sep>mtimes.reverse()<line_sep>minputs=[]<for_stmt>inFile deps<block_start>minputs=minputs+[inFile.replace('/' os.path.sep)]<block_end>outFile=os.path.join(noderoot 'src/node_javascript.cc')<if_stmt><not>os.path.exists(outFile)<or>os.path.getmtime(outFile)<l>mtimes[0]<block_start>subprocess.check_call([sys.executable 'tools/js2c.py' '--directory' 'lib' '--target' 'src/node_javascript.cc' 'config.gypi']+deps cwd=noderoot)<block_end> |
'''
This file has basic image modification functions
'''<import_from_stmt>PIL Image<import_stmt>cv2<import_from_stmt>scipy.spatial Voronoi<import_from_stmt>itertools product<import_stmt>numpy<as>np<def_stmt>convert_black_white img_data=<none> img_file=<none> threshold=100<block_start><assert_stmt>img_data<is><not><none><or>img_file<is><not><none><if_stmt>img_data<is><none><block_start>img_data=Image.open(img_file)<block_end>img_copy=img_data.copy()<line_sep>pixels=img_copy.load()<for_stmt>j,k product(range(img_copy.size[0]) range(img_copy.size[1]))<block_start><if_stmt>(np.array(pixels[j k][0:3])<g>threshold).any()<block_start>pixels[j k]=(255 255 255 255)<block_end><else_stmt><block_start>pixels[j k]=(0 0 0 255)<block_end><block_end><return>img_copy<block_end><def_stmt>get_edges img_data=<none> img_file=<none> threshold=100 kernelsize=1<block_start><assert_stmt>img_data<is><not><none><or>img_file<is><not><none><if_stmt>img_data<is><none><block_start>img_data=Image.open(img_file)<block_end>img_copy=img_data.copy()<line_sep># Get the black and white image
img_bw=convert_black_white(img_data=img_copy img_file=img_file threshold=threshold)<line_sep>cv_bw=cv2.cvtColor(np.array(img_bw) cv2.COLOR_RGB2BGR)<line_sep># Detect edges using Laplacian
laplacian=cv2.Laplacian(cv_bw cv2.CV_8U ksize=kernelsize)<line_sep># Convert back to Pillow image
pil_lap=Image.fromarray(laplacian)<line_sep># For computing Voronoi images, we need to squeeze the RGB data to 0s and 1s
pil_squeezed=pil_lap.convert('L')<line_sep>pil_squeezed_01=pil_squeezed.point(<lambda>x:0<if>x<l>128<else>255 '1')<line_sep><return>pil_squeezed_01<block_end><def_stmt>voronoi_edge img_data=<none> img_file=<none> threshold=100 kernelsize=1<block_start><assert_stmt>img_data<is><not><none><or>img_file<is><not><none><if_stmt>img_data<is><none><block_start>img_data=Image.open(img_file)<block_end>img_copy=img_data.copy()<line_sep># Get 0s and 1s of the edges
pil_squeezed_01=get_edges(img_data=img_copy img_file=img_file threshold=threshold kernelsize=kernelsize)<line_sep># Collecting point for Voronoi edge computation
nz_elements=np.nonzero(np.asarray(pil_squeezed_01))<line_sep>points=np.fliplr(np.array(nz_elements).T)<line_sep>vor=Voronoi(points)<line_sep>vor_x=vor.vertices.T[0]<line_sep>vor_y=-vor.vertices.T[1]+img_data.size[1]<line_sep># Convert the black and white image to 0s and 1s
img_bw=convert_black_white(img_data=img_copy img_file=img_file threshold=threshold)<line_sep>img_bw_squeezed=img_bw.convert('L')<line_sep>img_bw_01=img_bw_squeezed.point(<lambda>x:0<if>x<l>128<else>255 '1')<line_sep>pixels=img_bw_01.load()<line_sep>center_x=[]<line_sep>center_y=[]<for_stmt>x,y zip(vor_x vor_y)<block_start><if_stmt>0<l>x<and>x<l>img_data.size[0]<and>0<l>y<and>y<l>img_data.size[1]<and>pixels[int(x) img_data.size[1]-1-int(y)]<eq>0<block_start>center_x.append(int(x))<line_sep>center_y.append(int(y))<block_end><block_end><return>{'edge_image':pil_squeezed_01 'vor_center_x':center_x 'vor_center_y':center_y}<block_end><def_stmt>plot_voronoi_plot img_data=<none> img_file=<none> threshold=100 kernelsize=3 plot_name=<none><block_start><import_stmt>matplotlib.pyplot<as>plt<assert_stmt>img_data<is><not><none><or>img_file<is><not><none><line_sep>vor_results=voronoi_edge(img_data=img_data img_file=img_file threshold=threshold kernelsize=kernelsize)<line_sep>xlim=vor_results['edge_image'].size[0]<line_sep>ylim=vor_results['edge_image'].size[1]<line_sep>x_data=vor_results['vor_center_x']<line_sep>y_data=vor_results['vor_center_y']<line_sep>plt.figure()<line_sep>plt.scatter(x_data y_data s=0.5)<line_sep>plt.xlim(0 xlim)<line_sep>plt.ylim(0 ylim)<if_stmt>plot_name<is><none><block_start>plt.savefig('voronoi_fig.png')<block_end><else_stmt><block_start>plt.savefig(plot_name+'.png')<block_end><block_end> |
<import_stmt>sys<import_from_stmt>queue Queue<import_from_stmt>multiprocessing.managers BaseManager<import_stmt>etl<import_stmt>json<import_stmt>extends<import_stmt>time<line_sep>authkey="etlpy".encode('utf-8')<line_sep>timeout=1<line_sep>rpc_port=8888<class_stmt>ETLJob<block_start><def_stmt>__init__ self project jobname config id<block_start>self.project=project<line_sep>self.jobname=jobname<line_sep>self.config=config<line_sep>self.id=id<line_sep><block_end><block_end><class_stmt>JobResult<block_start><def_stmt>__init__ self name count id<block_start>self.name=name<line_sep>self.count=count<line_sep>self.id=id<line_sep><block_end><block_end><class_stmt>Master<block_start><def_stmt>__init__ self project jobname# 派发出去的作业队列
<block_start>self.dispatched_job_queue=Queue()<line_sep># 完成的作业队列
self.finished_job_queue=Queue()<line_sep>self.project=project<line_sep>self.jobname=jobname<line_sep>self.maxprocess=10<line_sep><block_end><def_stmt>get_dispatched_job_queue self<block_start><return>self.dispatched_job_queue<block_end><def_stmt>get_finished_job_queue self<block_start><return>self.finished_job_queue<block_end><def_stmt>start self skip=0# 把派发作业队列和完成作业队列注册到网络上
<block_start>BaseManager.register('get_dispatched_job_queue' callable=self.get_dispatched_job_queue)<line_sep>BaseManager.register('get_finished_job_queue' callable=self.get_finished_job_queue)<line_sep># 监听端口和启动服务
manager=BaseManager(address=('0.0.0.0' rpc_port) authkey=authkey)<line_sep>manager.start()<line_sep># 使用上面注册的方法获取队列
dispatched_jobs=manager.get_dispatched_job_queue()<line_sep>finished_jobs=manager.get_finished_job_queue()<line_sep>job_id=0<line_sep>module=self.project.modules[self.jobname]<line_sep>proj=json.loads(json.dumps(etl.convert_dict(self.project self.project.__defaultdict__) ensure_ascii=<false>))<while_stmt><true><block_start><for_stmt>task etl.parallel_map(module)<block_start>job_id=job_id+1<if_stmt>job_id<l>skip<block_start><continue><block_end>job=ETLJob(proj self.jobname task job_id)<line_sep>print('Dispatch job: %s'%job.id)<line_sep>dispatched_jobs.put(job)<block_end><while_stmt><not>dispatched_jobs.empty()<block_start>job=finished_jobs.get(60)<line_sep>print('Finished Job: %s, Count: %s'%(job.id job.count))<block_end>key=input('press any key to repeat,c to cancel')<if_stmt>key<eq>'c'<block_start>manager.shutdown()<line_sep><break><block_end><block_end>#manager.shutdown()
<block_end><block_end><class_stmt>Slave<block_start><def_stmt>__init__ self# 派发出去的作业队列
<block_start>self.dispatched_job_queue=Queue()<line_sep># 完成的作业队列
self.finished_job_queue=Queue()<block_end><def_stmt>start self execute=<true> serverip='127.0.0.1' port=8888# 把派发作业队列和完成作业队列注册到网络上
<block_start>BaseManager.register('get_dispatched_job_queue')<line_sep>BaseManager.register('get_finished_job_queue')<line_sep>server=serverip<line_sep>print('Connect to server %s...'%server)<line_sep>manager=BaseManager(address=(server port) authkey=authkey)<line_sep>manager.connect()<line_sep># 使用上面注册的方法获取队列
dispatched_jobs=manager.get_dispatched_job_queue()<line_sep>finished_jobs=manager.get_finished_job_queue()<line_sep># 运行作业并返回结果,这里只是模拟作业运行,所以返回的是接收到的作业
<while_stmt><true><block_start><if_stmt>dispatched_jobs.empty()<block_start>time.sleep(1)<line_sep>print('queue is empty,wait 1 sec...')<line_sep><continue><line_sep><block_end>job=dispatched_jobs.get(timeout=timeout)<line_sep>print('Run job: %s '%job.id)<line_sep>project=job.project<line_sep>project=etl.LoadProject_dict(project)<line_sep>module=project.modules[job.jobname]<line_sep>count=0<try_stmt><block_start>generator=etl.parallel_reduce(module [job.config] execute)<for_stmt>r generator<block_start>count<augadd>1<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<block_end>print('finish job,id %s, count %s'%(job.id count))<line_sep>resultjob=JobResult(job.jobname count job.id)<line_sep>finished_jobs.put(resultjob)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>ip='127.0.0.1'<line_sep>port=8888<line_sep>argv=sys.argv<if_stmt>len(argv)<g>1<block_start>ip=argv[1]<line_sep><block_end><if_stmt>len(argv)<g>2<block_start>port=int(argv[2])<line_sep><block_end>slave=Slave()<line_sep>slave.start(<true> ip port)<line_sep><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.labour labour<def_stmt>test_labour <block_start>"""Test module labour.py by downloading
labour.csv and testing shape of
extracted data has 569 rows and 4 columns
"""<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=labour(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(569 4)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end> |
<import_stmt>torch<line_sep>TESSERACT_DIM=2<line_sep>TESSERACT_DEP=2<line_sep>BATCH_SIZE=8<line_sep>SEQ_LENGTH=8<line_sep>HIDDEN_SIZE=8<line_sep>NUM_CLASSES=8<line_sep>VOCAB_SIZE=16<line_sep>IMG_SIZE=16<def_stmt>check_equal A B<block_start><assert_stmt>torch.allclose(A B rtol=1e-5 atol=1e-2)<block_end> |
<import_from_stmt>.gae GAE<import_from_stmt>.vgae VGAE<line_sep> |
"""
--------------------------------------------------------------------------
The `self_paced_ensemble.canonical_resampling` module implement a
resampling-based classifier for imbalanced classification.
15 resampling algorithms are included:
'RUS', 'CNN', 'ENN', 'NCR', 'Tomek', 'ALLKNN', 'OSS',
'NM', 'CC', 'SMOTE', 'ADASYN', 'BorderSMOTE', 'SMOTEENN',
'SMOTETomek', 'ORG'.
Note: the implementation of these resampling algorithms is based on
imblearn python package.
See https://github.com/scikit-learn-contrib/imbalanced-learn.
--------------------------------------------------------------------------
"""<import_from_stmt>.canonical_resampling ResampleClassifier<line_sep>__all__=["ResampleClassifier" ]<line_sep> |
<import_stmt>os.path<import_stmt>os<import_stmt>numpy<import_from_stmt>. common cgen<line_sep>"""
References
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_empirical_covariance.py#L297
https://github.com/scikit-learn/scikit-learn/blob/15a949460dbf19e5e196b8ef48f9712b72a3b3c3/sklearn/covariance/_elliptic_envelope.py#L149
"""<import_from_stmt>sklearn.mixture._gaussian_mixture _compute_log_det_cholesky<import_from_stmt>sklearn.utils.extmath row_norms<line_sep>np=numpy<def_stmt>squared_mahalanobis_distance x1 x2 precision<block_start>"""
@precision is the inverted covariance matrix
computes (x1 - x2).T * VI * (x1 - x2)
where VI is the precision matrix, the inverse of the covariance matrix
Loosely based on the scikit-learn implementation,
https://github.com/scikit-learn/scikit-learn/blob/main/sklearn/neighbors/_dist_metrics.pyx
"""<line_sep>distance=0.0<line_sep>size=x1.shape[0]<line_sep>temp=numpy.zeros(shape=size)<assert_stmt>x1.shape<eq>x2.shape<assert_stmt>precision.shape[0]<eq>precision.shape[1]<assert_stmt>size<eq>precision.shape[0]<for_stmt>i range(size)<block_start>accumulate=0<for_stmt>j range(size)<block_start>accumulate<augadd>precision[i j]<times>(x1[j]-x2[j])<block_end>distance<augadd>accumulate<times>(x1[i]-x2[i])<block_end><return>distance<block_end><def_stmt>generate_code means precision offset name='my_elliptic' modifiers='static const'<block_start>n_features=means.shape[0]<line_sep>decision_boundary=offset# FIXME, check
classifier_name=f'{name}_classifier'<line_sep>means_name=f'{name}_means'<line_sep>precisions_name=f'{name}_precisions'<line_sep>predict_function_name=f'{name}_predict'<line_sep>includes='''
// This code is generated by emlearn
#include <eml_distance.h>
'''<line_sep>pre='\n\n'.join([includes cgen.array_declare(means_name n_features modifiers=modifiers values=means) cgen.array_declare(precisions_name n_features<times>n_features modifiers=modifiers values=precision.flatten(order='C') ) ])<line_sep>main=f'''
#include <stdio.h>
// Data definitions
{modifiers} EmlEllipticEnvelope {classifier_name} = {{
{n_features},
{decision_boundary},
{means_name},
{precisions_name}
}};
// Prediction function
float {predict_function_name}(const float *features, int n_features) {{
float dist = 0.0;
const int class = eml_elliptic_envelope_predict(&{classifier_name},
features, n_features, &dist);
return dist;
}}
'''<line_sep>code=pre+main<line_sep><return>code<block_end><class_stmt>Wrapper<block_start><def_stmt>__init__ self estimator classifier='inline' dtype='float'<block_start>self.dtype=dtype<line_sep>precision=estimator.get_precision()<line_sep>self._means=estimator.location_.copy()<line_sep>self._precision=precision<line_sep>self._offset=estimator.offset_<if_stmt>classifier<eq>'inline'<block_start>name='my_inline_elliptic'<line_sep>func='{}_predict(values, length)'.format(name)<line_sep>code=self.save(name=name)<line_sep>self.classifier_=common.CompiledClassifier(code name=name call=func out_dtype='float')<block_end><else_stmt><block_start><raise>ValueError("Unsupported classifier method '{}'".format(classifier))<block_end><block_end><def_stmt>mahalanobis self X<block_start><def_stmt>dist x<block_start><return>squared_mahalanobis_distance(x self._means precision=self._precision)<block_end>p=numpy.array([dist(x)<for>x X])<line_sep>predictions=self.classifier_.predict(X)<line_sep><return>predictions<block_end><def_stmt>predict self X<block_start><def_stmt>predict_one d<block_start>dist=-d<line_sep>dd=dist-self._offset<line_sep>is_inlier=1<if>dd<g>0<else>-1<line_sep><return>is_inlier<block_end>distances=self.mahalanobis(X)<line_sep><return>numpy.array([predict_one(d)<for>d distances])<block_end><def_stmt>save self name=<none> file=<none><block_start><if_stmt>name<is><none><block_start><if_stmt>file<is><none><block_start><raise>ValueError('Either name or file must be provided')<block_end><else_stmt><block_start>name=os.path.splitext(os.path.basename(file))[0]<block_end><block_end>code=generate_code(self._means self._precision self._offset name=name)<if_stmt>file<block_start><with_stmt>open(file 'w')<as>f<block_start>f.write(code)<block_end><block_end><return>code<block_end><block_end> |
"""Base Integration for ShiftLeft CORE - Cortex XSOAR Extension
"""<import_stmt>json<import_stmt>io<import_from_stmt>shiftleft list_app_findings_command ShiftLeftClient<def_stmt>util_load_json path<block_start><with_stmt>io.open(path mode="r" encoding="utf-8")<as>f<block_start><return>json.loads(f.read())<block_end><block_end><def_stmt>test_list_app_findings_command requests_mock<block_start>"""Tests list_app_findings_command function.
Checks the output of the command function with the expected output.
"""<line_sep>mock_response=util_load_json("test_data/test_list_findings.json")<line_sep>requests_mock.get("https://www.shiftleft.io/orgs/2c089ac1-3378-44d5-94da-9507e84351c3/apps/shiftleft-java-example/findings" json=mock_response )<line_sep>client=ShiftLeftClient(base_url="https://www.shiftleft.io" # disable-secrets-detection
verify=<false> )<line_sep>args={"app_name":"shiftleft-java-example" "severity":"critical" "type":["vuln"] "version":<none> }<line_sep>response=list_app_findings_command(client "2c089ac1-3378-44d5-94da-9507e84351c3" args)<assert_stmt>response.outputs<block_end> |
# Generated by Django 2.2.15 on 2020-08-27 08:11
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("courses" "0016_auto_20200417_1237") ]<line_sep>operations=[migrations.AlterField(model_name="courserun" name="resource_link" field=models.CharField(blank=<true> max_length=200 null=<true> verbose_name="Resource link") ) ]<block_end> |
# Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes the size of value search's search space."""<import_stmt>collections<import_stmt>functools<import_stmt>operator<import_stmt>os<import_stmt>sys<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>tf_coder tf_coder_utils<import_from_stmt>tf_coder tf_functions<import_from_stmt>tf_coder.benchmarks all_benchmarks<import_from_stmt>tf_coder.natural_language description_handler_factory<import_from_stmt>tf_coder.value_search value<as>value_module<import_from_stmt>tf_coder.value_search value_search<import_from_stmt>tf_coder.value_search value_search_settings<as>settings_module<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string('benchmark_name' 'google_02' 'The name of a benchmark to analyze.')<line_sep>flags.DEFINE_multi_string('settings' [] 'Settings to override the defaults.')<line_sep># Inspired by https://stackoverflow.com/a/45669280/9589593.
<class_stmt>SuppressPrint(object)<block_start>"""A context manager for suppressing print() calls temporarily."""<def_stmt>__enter__ self<block_start>self._old_stdout=sys.stdout<line_sep>sys.stdout=open(os.devnull 'w')<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start><del_stmt>exc_type exc_val exc_tb<line_sep>sys.stdout.close()<line_sep>sys.stdout=self._old_stdout<block_end><block_end><def_stmt>compute_search_space_size benchmark settings description_handler<block_start>"""Computes and prints the size of the search space.
This counts the total number of expressions with weight at most max_weight.
The weights come from the benchmark (for constants and inputs) and the
description handler (for determining the op weights). Distinct expressions
will be counted separately even if they evaluate to the same value, unlike in
TF-Coder's value_search algorithm which does value-based pruning.
Args:
benchmark: The Benchmark object defining the problem to analyze.
settings: A Settings object containing settings for value search.
description_handler: The DescriptionHandler used, which can modify weights
of operations.
Returns:
Nothing. All output is printed to stdout.
"""<line_sep>max_weight=settings.max_weight<line_sep>print('Computing search space.\n'<concat>'Benchmark name: {}\n'<concat>'Description handler: {}\n'<concat>'Max weight: {}'.format(benchmark.name description_handler max_weight))<line_sep># TODO(kshi): Update to load the tensor features model/config.
operations=value_search.get_reweighted_operations(benchmark settings description_handler tensor_model=<none> tensor_config=<none>)<line_sep># These loops are not the most efficient, but it doesn't really matter.
print('\nFound {} operations.'.format(len(operations)))<line_sep>print()<for_stmt>weight range(1 max(op.weight<for>op operations)+1)<block_start>print('# operations with weight {}: {}'.format(weight sum(1<for>op operations<if>op.weight<eq>weight)))<block_end>print()<for_stmt>arity range(1 max(op.num_args<for>op operations)+1)<block_start>print('# operations with arity {}: {}'.format(arity sum(1<for>op operations<if>op.num_args<eq>arity)))<block_end>output_value=value_module.OutputValue(benchmark.examples[0].output)<line_sep>values_by_weight=[collections.OrderedDict()<for>_ range(max_weight+1)]<line_sep>constant_operation=<none><for_stmt>operation operations<block_start><if_stmt>operation.name<eq>tf_functions.CONSTANT_OPERATION_NAME<block_start>constant_operation=operation<line_sep><break><block_end><block_end><with_stmt>SuppressPrint()<block_start>value_search._add_constants_and_inputs_and_print(# pylint: disable=protected-access
values_by_weight benchmark output_value constant_operation settings)<block_end>num_expressions_with_weight=[len(values_with_weight)<for>values_with_weight values_by_weight]<line_sep>print()<line_sep>max_weight_with_initial_value=max(w<for>w range(max_weight+1)<if>num_expressions_with_weight[w])<for_stmt>weight range(1 max_weight_with_initial_value+1)<block_start>print('# initial values with weight {}: {}'.format(weight num_expressions_with_weight[weight]))<block_end><for_stmt>total_weight range(2 max_weight+1)<block_start><for_stmt>operation operations# All operations should have strictly positive weight and num_args.
<block_start>op_weight=operation.weight<line_sep>op_arity=operation.num_args<if_stmt>total_weight-op_weight<l>op_arity<block_start><continue><block_end># Partition `total_weight - op_weight` into `op_arity` positive pieces.
# Equivalently, partition `total_weight - op_weight - op_arity` into
# `op_arity` nonnegative pieces.
<for_stmt>partition tf_coder_utils.generate_partitions(total_weight-op_weight-op_arity op_arity)<block_start>arg_weights=[part+1<for>part partition]<line_sep>num_expressions_with_weight[total_weight]<augadd>functools.reduce(operator.mul (num_expressions_with_weight[w]<for>w arg_weights))<block_end><block_end><block_end>print()<for_stmt>weight range(1 max_weight+1)<block_start>print('# expressions with weight exactly {}: {}'.format(weight num_expressions_with_weight[weight]))<block_end>print()<for_stmt>weight range(1 max_weight+1)<block_start>print('# expressions with weight up to {}: {}'.format(weight sum(num_expressions_with_weight[:weight+1])))<block_end><block_end><def_stmt>main unused_argv<block_start>settings=settings_module.from_list(FLAGS.settings)<line_sep>description_handler=description_handler_factory.create_handler(settings.description_handler_name)<line_sep>benchmark=all_benchmarks.find_benchmark_with_name(FLAGS.benchmark_name)<if_stmt><not>benchmark<block_start><raise>ValueError('Unknown benchmark: {}'.format(FLAGS.benchmark_name))<block_end>compute_search_space_size(benchmark=benchmark settings=settings description_handler=description_handler)<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end> |
"""Tests for capture.
Within Maya, setup a scene of moderate range (e.g. 10 frames)
and run the following.
Example:
>>> nose.run(argv=[sys.argv[0], "tests", "-v"])
"""<import_stmt>capture<import_from_stmt>maya cmds<def_stmt>test_capture <block_start>"""Plain capture works"""<line_sep>capture.capture()<block_end><def_stmt>test_camera_options <block_start>"""(Optional) camera options works"""<line_sep>capture.capture(camera_options={"displayGateMask":<false>})<block_end><def_stmt>test_display_options <block_start>"""(Optional) display options works"""<line_sep>capture.capture(display_options={"displayGradient":<false>})<block_end><def_stmt>test_viewport_options <block_start>"""(Optional) viewport options works"""<line_sep>capture.capture(viewport_options={"wireframeOnShaded":<true>})<block_end><def_stmt>test_viewport2_options <block_start>"""(Optional) viewport2 options works"""<line_sep>capture.capture(viewport2_options={"ssaoEnable":<true>})<block_end><def_stmt>test_parse_active_view <block_start>"""Parse active view works"""<line_sep># Set focus to modelPanel1 (assume it exists)
# Otherwise the panel with focus (temporary panel from capture)
# got deleted and there's no "active panel"
<import_stmt>maya.cmds<as>cmds<line_sep>cmds.setFocus("modelPanel1")<line_sep>options=capture.parse_active_view()<line_sep>capture.capture(**options)<block_end><def_stmt>test_parse_view <block_start>"""Parse view works"""<line_sep>options=capture.parse_view("modelPanel1")<line_sep>capture.capture(**options)<block_end><def_stmt>test_apply_view <block_start>"""Apply view works"""<line_sep>capture.apply_view("modelPanel1" camera_options={"overscan":2})<block_end><def_stmt>test_apply_parsed_view <block_start>"""Apply parsed view works"""<line_sep>options=capture.parse_view("modelPanel1")<line_sep>capture.apply_view("modelPanel1" **options)<block_end><def_stmt>test_apply_parsed_view_exact <block_start>"""Apply parsed view sanity check works"""<import_stmt>maya.cmds<as>cmds<line_sep>panel="modelPanel1"<line_sep>cmds.modelEditor(panel edit=<true> displayAppearance="wireframe")<line_sep>parsed=capture.parse_view(panel)<line_sep>display=parsed["viewport_options"]["displayAppearance"]<assert_stmt>display<eq>"wireframe"<line_sep># important to test both, just in case wireframe was already
# set when making the first query, and to make sure this
# actually does something.
cmds.modelEditor(panel edit=<true> displayAppearance="smoothShaded")<line_sep>parsed=capture.parse_view(panel)<line_sep>display=parsed["viewport_options"]["displayAppearance"]<assert_stmt>display<eq>"smoothShaded"<line_sep>capture.apply_view(panel viewport_options={"displayAppearance":"wireframe"})<assert_stmt>cmds.modelEditor(panel query=<true> displayAppearance=<true>)<eq>"wireframe"<block_end><def_stmt>test_apply_parsed_view_all <block_start>"""Apply parsed view all options works"""<line_sep># A set of options all trying to be different from the default
# settings (in `capture.py`) so we can test "changing states"
camera_options={}<line_sep>display_options={}<line_sep>viewport_options={}<line_sep>viewport2_options={}<for_stmt>key,value capture.CameraOptions.items()<block_start><if_stmt>isinstance(value bool)<block_start>value=<not>value<block_end><elif_stmt>isinstance(value (int float))<block_start>value=value+1<block_end><else_stmt><block_start><raise>Exception("Unexpected value in CameraOptions: %s=%s"%(key value))<block_end><block_end><for_stmt>key,value capture.DisplayOptions.items()<block_start><if_stmt>isinstance(value bool)<block_start>value=<not>value<block_end><elif_stmt>isinstance(value tuple)<block_start>value=(1 0 1)<block_end><else_stmt><block_start><raise>Exception("Unexpected value in DisplayOptions: %s=%s"%(key value))<block_end><block_end><for_stmt>key,value capture.ViewportOptions.items()<block_start><if_stmt>isinstance(value bool)<block_start>value=<not>value<block_end><elif_stmt>isinstance(value (int float))<block_start>value=value+1<block_end><elif_stmt>isinstance(value tuple)<block_start>value=(1 0 1)<block_end><elif_stmt>isinstance(value basestring)<block_start><pass># Don't bother, for now
<block_end><else_stmt><block_start><raise>Exception("Unexpected value in ViewportOptions: %s=%s"%(key value))<block_end><block_end><for_stmt>key,value capture.Viewport2Options.items()<block_start><if_stmt>isinstance(value bool)<block_start>value=<not>value<block_end><elif_stmt>isinstance(value (int float))<block_start>value=value+1<block_end><elif_stmt>isinstance(value tuple)<block_start>value=(1 0 1)<block_end><elif_stmt>isinstance(value basestring)<block_start><pass># Don't bother, for now
<block_end><else_stmt><block_start><raise>Exception("Unexpected value in Viewport2Options: %s=%s"%(key value))<block_end><block_end>defaults={"camera_options":capture.CameraOptions.copy() "display_options":capture.DisplayOptions.copy() "viewport_options":capture.ViewportOptions.copy() "viewport2_options":capture.Viewport2Options.copy() }<line_sep>others={"camera_options":camera_options "display_options":display_options "viewport_options":viewport_options "viewport2_options":viewport2_options }<line_sep>panel="modelPanel1"<def_stmt>compare this other<block_start>"""Compare options for only settings available in `this`
Some color values will be returned with possible floating
point precision errors as such result in a slightly
different number. We'd need to compare whilst keeping
such imprecisions in mind.
"""<line_sep>precision=1e-4<for_stmt>opt this<block_start>this_option=this[opt]<line_sep>other_option=other[opt]<for_stmt>key,value this_option.iteritems()<block_start>other_value=other_option[key]<if_stmt>isinstance(value float)<or>isinstance(other_value float)<block_start><if_stmt>abs(value-other_value)<g>precision<block_start><return><false><block_end><block_end><elif_stmt>isinstance(value (tuple list))# Assuming for now that any tuple or list contains floats
<block_start><if_stmt><not>all((abs(a-b)<l>precision)<for>a,b zip(value other_value))<block_start><return><false><block_end><block_end><else_stmt><block_start><if_stmt>value<ne>other_value<block_start><return><false><block_end><block_end><block_end><block_end><return><true><block_end># Apply defaults and check
capture.apply_view(panel **defaults)<line_sep>parsed_defaults=capture.parse_view(panel)<assert_stmt>compare(defaults parsed_defaults)<line_sep># Apply others and check
capture.apply_view(panel **others)<line_sep>parsed_others=capture.parse_view(panel)<assert_stmt>compare(others parsed_others)<block_end><def_stmt>test_preset <block_start>"""Creating and applying presets works"""<line_sep>preset={"width":320 "height":240 "camera_options":{"displayGateMask":<false>} "viewport_options":{"wireframeOnShaded":<true>} "display_options":{"displayGateMask":<false>}}<line_sep>capture.capture(**preset)<block_end><def_stmt>test_parse_active_scene <block_start>"""parse_active_scene() works"""<line_sep>parsed=capture.parse_active_scene()<line_sep>reference={"start_frame":cmds.playbackOptions(minTime=<true> query=<true>) "end_frame":cmds.playbackOptions(maxTime=<true> query=<true>) "width":cmds.getAttr("defaultResolution.width") "height":cmds.getAttr("defaultResolution.height") "compression":cmds.optionVar(query="playblastCompression") "filename":(cmds.optionVar(query="playblastFile")<if>cmds.optionVar(query="playblastSaveToFile")<else><none>) "format":cmds.optionVar(query="playblastFormat") "off_screen":(<true><if>cmds.optionVar(query="playblastOffscreen")<else><false>) "show_ornaments":(<true><if>cmds.optionVar(query="playblastShowOrnaments")<else><false>) "quality":cmds.optionVar(query="playblastQuality")}<for_stmt>key,value reference.items()<block_start><assert_stmt>parsed[key]<eq>value<block_end><block_end> |
<import_from_stmt>.eval_card EvaluationCard<import_from_stmt>.evaluator Evaluator<import_from_stmt>.lookup LookupTable<line_sep> |
<import_from_stmt>.sec_gateway SecGateway<line_sep> |
# Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
<import_stmt>fairlearn.metrics<as>metrics<def_stmt>_get_raw_MetricFrame # Gets an uninitialised MetricFrame for testing purposes
<block_start><return>metrics.MetricFrame.__new__(metrics.MetricFrame)<block_end> |
<def_stmt>test_deploying_contract client hex_accounts<block_start>pre_balance=client.get_balance(hex_accounts[1])<line_sep>client.send_transaction(_from=hex_accounts[0] to=hex_accounts[1] value=1234 )<line_sep>post_balance=client.get_balance(hex_accounts[1])<assert_stmt>post_balance-pre_balance<eq>1234<block_end> |
# Copyright 2018 The go-python Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
# Testcases for functions in math.
#
# Each line takes the form:
#
# <testid> <function> <input_value> -> <output_value> <flags>
#
# where:
#
# <testid> is a short name identifying the test,
#
# <function> is the function to be tested (exp, cos, asinh, ...),
#
# <input_value> is a string representing a floating-point value
#
# <output_value> is the expected (ideal) output value, again
# represented as a string.
#
# <flags> is a list of the floating-point flags required by C99
#
# The possible flags are:
#
# divide-by-zero : raised when a finite input gives a
# mathematically infinite result.
#
# overflow : raised when a finite input gives a finite result that
# is too large to fit in the usual range of an IEEE 754 double.
#
# invalid : raised for invalid inputs (e.g., sqrt(-1))
#
# ignore-sign : indicates that the sign of the result is
# unspecified; e.g., if the result is given as inf,
# then both -inf and inf should be accepted as correct.
#
# Flags may appear in any order.
#
# Lines beginning with '--' (like this one) start a comment, and are
# ignored. Blank lines, or lines containing only whitespace, are also
# ignored.
# Many of the values below were computed with the help of
# version 2.4 of the MPFR library for multiple-precision
# floating-point computations with correct rounding. All output
# values in this file are (modulo yet-to-be-discovered bugs)
# correctly rounded, provided that each input and output decimal
# floating-point value below is interpreted as a representation of
# the corresponding nearest IEEE 754 double-precision value. See the
# MPFR homepage at http://www.mpfr.org for more information about the
# MPFR project.
<import_stmt>math<import_from_stmt>libtest *<import_from_stmt>libulp *<line_sep>doc="testcases"<line_sep>inf=float("inf")<line_sep>nan=float("nan")<def_stmt>tolerance a b e<block_start>"""Return if a-b is within tolerance e"""<line_sep>d=a-b<if_stmt>d<l>0<block_start>d=-d<block_end><if_stmt>a<ne>0<block_start>e=e<times>a<if_stmt>e<l>0<block_start>e=-e<block_end><block_end><return>d<le>e<block_end><def_stmt>acc_check what want got rel_err=2e-15 abs_err=5e-323<block_start>"""Determine whether non-NaN floats a and b are equal to within a
(small) rounding error. The default values for rel_err and
abs_err are chosen to be suitable for platforms where a float is
represented by an IEEE 754 double. They allow an error of between
9 and 19 ulps."""<line_sep># need to special case infinities, since inf - inf gives nan
<if_stmt>math.isinf(want)<and>got<eq>want<block_start><return><block_end>error=got-want<line_sep>permitted_error=rel_err<times>abs(want)<if_stmt>abs_err<g>permitted_error<block_start>permitted_error=abs_err<block_end><if_stmt>abs(error)<l>permitted_error<block_start><return><block_end><raise>AssertionError("%s: want %g, got %g: error = %g; permitted error = %g"%(what want got error permitted_error))<block_end><def_stmt>t name fn x want exc=<none><block_start><global>doc<line_sep>doc=name<if_stmt>exc<is><none><block_start>got=fn(x)<if_stmt>math.isnan(want)<and>math.isnan(got)<block_start><return><block_end><if_stmt>want<eq>inf<and>got<eq>inf<block_start><return><block_end><if_stmt>want<eq>-inf<and>got<eq>-inf<block_start><return><block_end><if_stmt>fn<eq>math.lgamma# we use a weaker accuracy test for lgamma;
# lgamma only achieves an absolute error of
# a few multiples of the machine accuracy, in
# general.
<block_start>acc_check(doc want got rel_err=5e-15 abs_err=5e-15)<block_end><elif_stmt>fn<eq>math.erfc# erfc has less-than-ideal accuracy for large
# arguments (x ~ 25 or so), mainly due to the
# error involved in computing exp(-x*x).
#
# XXX Would be better to weaken this test only
# for large x, instead of for all x.
<block_start>ulps_check(doc want got 2000)<block_end><else_stmt><block_start>ulps_check(doc want got 20)<block_end><block_end><else_stmt><block_start><try_stmt><block_start>got=fn(x)<block_end><except_stmt>exc<as>e<block_start><pass><block_end><else_stmt><block_start><assert_stmt><false> "%s not raised"%exc<block_end><block_end><block_end>#
# erf: error function --
#
t("erf0000" math.erf 0.0 0.0)<line_sep>t("erf0001" math.erf -0.0 -0.0)<line_sep>t("erf0002" math.erf inf 1.0)<line_sep>t("erf0003" math.erf -inf -1.0)<line_sep>t("erf0004" math.erf nan nan)<line_sep># tiny values
t("erf0010" math.erf 1e-308 1.1283791670955125e-308)<line_sep>t("erf0011" math.erf 5e-324 4.9406564584124654e-324)<line_sep>t("erf0012" math.erf 1e-10 1.1283791670955126e-10)<line_sep># small integers
t("erf0020" math.erf 1 0.84270079294971489)<line_sep>t("erf0021" math.erf 2 0.99532226501895271)<line_sep>t("erf0022" math.erf 3 0.99997790950300136)<line_sep>t("erf0023" math.erf 4 0.99999998458274209)<line_sep>t("erf0024" math.erf 5 0.99999999999846256)<line_sep>t("erf0025" math.erf 6 1.0)<line_sep>t("erf0030" math.erf -1 -0.84270079294971489)<line_sep>t("erf0031" math.erf -2 -0.99532226501895271)<line_sep>t("erf0032" math.erf -3 -0.99997790950300136)<line_sep>t("erf0033" math.erf -4 -0.99999998458274209)<line_sep>t("erf0034" math.erf -5 -0.99999999999846256)<line_sep>t("erf0035" math.erf -6 -1.0)<line_sep># huge values should all go to +/-1, depending on sign
t("erf0040" math.erf -40 -1.0)<line_sep>t("erf0041" math.erf 1e16 1.0)<line_sep>t("erf0042" math.erf -1e150 -1.0)<line_sep>t("erf0043" math.erf 1.7e308 1.0)<line_sep># Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erf0100" math.erf 26.2 1.0)<line_sep>t("erf0101" math.erf 26.4 1.0)<line_sep>t("erf0102" math.erf 26.6 1.0)<line_sep>t("erf0103" math.erf 26.8 1.0)<line_sep>t("erf0104" math.erf 27.0 1.0)<line_sep>t("erf0105" math.erf 27.2 1.0)<line_sep>t("erf0106" math.erf 27.4 1.0)<line_sep>t("erf0107" math.erf 27.6 1.0)<line_sep>t("erf0110" math.erf -26.2 -1.0)<line_sep>t("erf0111" math.erf -26.4 -1.0)<line_sep>t("erf0112" math.erf -26.6 -1.0)<line_sep>t("erf0113" math.erf -26.8 -1.0)<line_sep>t("erf0114" math.erf -27.0 -1.0)<line_sep>t("erf0115" math.erf -27.2 -1.0)<line_sep>t("erf0116" math.erf -27.4 -1.0)<line_sep>t("erf0117" math.erf -27.6 -1.0)<line_sep>#
# erfc: complementary error function --
#
t("erfc0000" math.erfc 0.0 1.0)<line_sep>t("erfc0001" math.erfc -0.0 1.0)<line_sep>t("erfc0002" math.erfc inf 0.0)<line_sep>t("erfc0003" math.erfc -inf 2.0)<line_sep>t("erfc0004" math.erfc nan nan)<line_sep># tiny values
t("erfc0010" math.erfc 1e-308 1.0)<line_sep>t("erfc0011" math.erfc 5e-324 1.0)<line_sep>t("erfc0012" math.erfc 1e-10 0.99999999988716204)<line_sep># small integers
t("erfc0020" math.erfc 1 0.15729920705028513)<line_sep>t("erfc0021" math.erfc 2 0.0046777349810472662)<line_sep>t("erfc0022" math.erfc 3 2.2090496998585441e-05)<line_sep>t("erfc0023" math.erfc 4 1.541725790028002e-08)<line_sep>t("erfc0024" math.erfc 5 1.5374597944280349e-12)<line_sep>t("erfc0025" math.erfc 6 2.1519736712498913e-17)<line_sep>t("erfc0030" math.erfc -1 1.8427007929497148)<line_sep>t("erfc0031" math.erfc -2 1.9953222650189528)<line_sep>t("erfc0032" math.erfc -3 1.9999779095030015)<line_sep>t("erfc0033" math.erfc -4 1.9999999845827421)<line_sep>t("erfc0034" math.erfc -5 1.9999999999984626)<line_sep>t("erfc0035" math.erfc -6 2.0)<line_sep># as x -> infinity, erfc(x) behaves like exp(-x*x)/x/sqrt(pi)
t("erfc0040" math.erfc 20 5.3958656116079012e-176)<line_sep>t("erfc0041" math.erfc 25 8.3001725711965228e-274)<line_sep># FIXME(underflows to 0) t("erfc0042", math.erfc, 27, 5.2370464393526292e-319)
t("erfc0043" math.erfc 28 0.0)<line_sep># huge values
t("erfc0050" math.erfc -40 2.0)<line_sep>t("erfc0051" math.erfc 1e16 0.0)<line_sep>t("erfc0052" math.erfc -1e150 2.0)<line_sep>t("erfc0053" math.erfc 1.7e308 0.0)<line_sep># Issue 8986: inputs x with exp(-x*x) near the underflow threshold
# incorrectly signalled overflow on some platforms.
t("erfc0100" math.erfc 26.2 1.6432507924389461e-300)<line_sep>t("erfc0101" math.erfc 26.4 4.4017768588035426e-305)<line_sep>t("erfc0102" math.erfc 26.6 1.0885125885442269e-309)<line_sep># FIXME(underflows to 0) t("erfc0103", math.erfc, 26.8, 2.4849621571966629e-314)
# FIXME(underflows to 0) t("erfc0104", math.erfc, 27.0, 5.2370464393526292e-319)
# FIXME(underflows to 0) t("erfc0105", math.erfc, 27.2, 9.8813129168249309e-324)
t("erfc0106" math.erfc 27.4 0.0)<line_sep>t("erfc0107" math.erfc 27.6 0.0)<line_sep>t("erfc0110" math.erfc -26.2 2.0)<line_sep>t("erfc0111" math.erfc -26.4 2.0)<line_sep>t("erfc0112" math.erfc -26.6 2.0)<line_sep>t("erfc0113" math.erfc -26.8 2.0)<line_sep>t("erfc0114" math.erfc -27.0 2.0)<line_sep>t("erfc0115" math.erfc -27.2 2.0)<line_sep>t("erfc0116" math.erfc -27.4 2.0)<line_sep>t("erfc0117" math.erfc -27.6 2.0)<line_sep>#
# lgamma: log of absolute value of the gamma function --
#
# special values
t("lgam0000" math.lgamma 0.0 inf ValueError)<line_sep>t("lgam0001" math.lgamma -0.0 inf ValueError)<line_sep>t("lgam0002" math.lgamma inf inf)<line_sep># FIXME(ValueError) t("lgam0003", math.lgamma, -inf, inf)
t("lgam0004" math.lgamma nan nan)<line_sep># negative integers
t("lgam0010" math.lgamma -1 inf ValueError)<line_sep>t("lgam0011" math.lgamma -2 inf ValueError)<line_sep>t("lgam0012" math.lgamma -1e16 inf ValueError)<line_sep>t("lgam0013" math.lgamma -1e300 inf ValueError)<line_sep>t("lgam0014" math.lgamma -1.79e308 inf ValueError)<line_sep># small positive integers give factorials
t("lgam0020" math.lgamma 1 0.0)<line_sep>t("lgam0021" math.lgamma 2 0.0)<line_sep>t("lgam0022" math.lgamma 3 0.69314718055994529)<line_sep>t("lgam0023" math.lgamma 4 1.791759469228055)<line_sep>t("lgam0024" math.lgamma 5 3.1780538303479458)<line_sep>t("lgam0025" math.lgamma 6 4.7874917427820458)<line_sep># half integers
t("lgam0030" math.lgamma 0.5 0.57236494292470008)<line_sep>t("lgam0031" math.lgamma 1.5 -0.12078223763524522)<line_sep>t("lgam0032" math.lgamma 2.5 0.28468287047291918)<line_sep>t("lgam0033" math.lgamma 3.5 1.2009736023470743)<line_sep>t("lgam0034" math.lgamma -0.5 1.2655121234846454)<line_sep>t("lgam0035" math.lgamma -1.5 0.86004701537648098)<line_sep>t("lgam0036" math.lgamma -2.5 -0.056243716497674054)<line_sep>t("lgam0037" math.lgamma -3.5 -1.309006684993042)<line_sep># values near 0
t("lgam0040" math.lgamma 0.1 2.252712651734206)<line_sep>t("lgam0041" math.lgamma 0.01 4.5994798780420219)<line_sep>t("lgam0042" math.lgamma 1e-8 18.420680738180209)<line_sep>t("lgam0043" math.lgamma 1e-16 36.841361487904734)<line_sep>t("lgam0044" math.lgamma 1e-30 69.077552789821368)<line_sep>t("lgam0045" math.lgamma 1e-160 368.41361487904732)<line_sep># FIXME(inaccurate) t("lgam0046", math.lgamma, 1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0047", math.lgamma, 5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0048", math.lgamma, 5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0049", math.lgamma, 1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0050", math.lgamma, 1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0051", math.lgamma, 5e-324, 744.44007192138122)
t("lgam0060" math.lgamma -0.1 2.3689613327287886)<line_sep>t("lgam0061" math.lgamma -0.01 4.6110249927528013)<line_sep>t("lgam0062" math.lgamma -1e-8 18.420680749724522)<line_sep>t("lgam0063" math.lgamma -1e-16 36.841361487904734)<line_sep>t("lgam0064" math.lgamma -1e-30 69.077552789821368)<line_sep>t("lgam0065" math.lgamma -1e-160 368.41361487904732)<line_sep># FIXME(inaccurate) t("lgam0066", math.lgamma, -1e-308, 709.19620864216608)
# FIXME(inaccurate) t("lgam0067", math.lgamma, -5.6e-309, 709.77602713741896)
# FIXME(inaccurate) t("lgam0068", math.lgamma, -5.5e-309, 709.79404564292167)
# FIXME(inaccurate) t("lgam0069", math.lgamma, -1e-309, 711.49879373516012)
# FIXME(inaccurate) t("lgam0070", math.lgamma, -1e-323, 743.74692474082133)
# FIXME(inaccurate) t("lgam0071", math.lgamma, -5e-324, 744.44007192138122)
# values near negative integers
t("lgam0080" math.lgamma -0.99999999999999989 36.736800569677101)<line_sep>t("lgam0081" math.lgamma -1.0000000000000002 36.043653389117154)<line_sep>t("lgam0082" math.lgamma -1.9999999999999998 35.350506208557213)<line_sep>t("lgam0083" math.lgamma -2.0000000000000004 34.657359027997266)<line_sep>t("lgam0084" math.lgamma -100.00000000000001 -331.85460524980607)<line_sep>t("lgam0085" math.lgamma -99.999999999999986 -331.85460524980596)<line_sep># large inputs
t("lgam0100" math.lgamma 170 701.43726380873704)<line_sep>t("lgam0101" math.lgamma 171 706.57306224578736)<line_sep>t("lgam0102" math.lgamma 171.624 709.78077443669895)<line_sep>t("lgam0103" math.lgamma 171.625 709.78591682948365)<line_sep>t("lgam0104" math.lgamma 172 711.71472580228999)<line_sep>t("lgam0105" math.lgamma 2000 13198.923448054265)<line_sep>t("lgam0106" math.lgamma 2.55998332785163e305 1.7976931348623099e+308)<line_sep>t("lgam0107" math.lgamma 2.55998332785164e305 inf OverflowError)<line_sep>t("lgam0108" math.lgamma 1.7e308 inf OverflowError)<line_sep># inputs for which gamma(x) is tiny
t("lgam0120" math.lgamma -100.5 -364.90096830942736)<line_sep>t("lgam0121" math.lgamma -160.5 -656.88005261126432)<line_sep>t("lgam0122" math.lgamma -170.5 -707.99843314507882)<line_sep>t("lgam0123" math.lgamma -171.5 -713.14301641168481)<line_sep>t("lgam0124" math.lgamma -176.5 -738.95247590846486)<line_sep>t("lgam0125" math.lgamma -177.5 -744.13144651738037)<line_sep>t("lgam0126" math.lgamma -178.5 -749.3160351186001)<line_sep>t("lgam0130" math.lgamma -1000.5 -5914.4377011168517)<line_sep>t("lgam0131" math.lgamma -30000.5 -279278.6629959144)<line_sep># FIXME t("lgam0132", math.lgamma, -4503599627370495.5, -1.5782258434492883e+17)
# results close to 0: positive argument ...
t("lgam0150" math.lgamma 0.99999999999999989 6.4083812134800075e-17)<line_sep>t("lgam0151" math.lgamma 1.0000000000000002 -1.2816762426960008e-16)<line_sep>t("lgam0152" math.lgamma 1.9999999999999998 -9.3876980655431170e-17)<line_sep>t("lgam0153" math.lgamma 2.0000000000000004 1.8775396131086244e-16)<line_sep># ... and negative argument
# these are very inaccurate in python3
t("lgam0160" math.lgamma -2.7476826467 -5.2477408147689136e-11)<line_sep>t("lgam0161" math.lgamma -2.457024738 3.3464637541912932e-10)<line_sep>#
# gamma: Gamma function --
#
# special values
t("gam0000" math.gamma 0.0 inf ValueError)<line_sep>t("gam0001" math.gamma -0.0 -inf ValueError)<line_sep>t("gam0002" math.gamma inf inf)<line_sep>t("gam0003" math.gamma -inf nan ValueError)<line_sep>t("gam0004" math.gamma nan nan)<line_sep># negative integers inputs are invalid
t("gam0010" math.gamma -1 nan ValueError)<line_sep>t("gam0011" math.gamma -2 nan ValueError)<line_sep>t("gam0012" math.gamma -1e16 nan ValueError)<line_sep>t("gam0013" math.gamma -1e300 nan ValueError)<line_sep># small positive integers give factorials
t("gam0020" math.gamma 1 1)<line_sep>t("gam0021" math.gamma 2 1)<line_sep>t("gam0022" math.gamma 3 2)<line_sep>t("gam0023" math.gamma 4 6)<line_sep>t("gam0024" math.gamma 5 24)<line_sep>t("gam0025" math.gamma 6 120)<line_sep># half integers
t("gam0030" math.gamma 0.5 1.7724538509055161)<line_sep>t("gam0031" math.gamma 1.5 0.88622692545275805)<line_sep>t("gam0032" math.gamma 2.5 1.3293403881791370)<line_sep>t("gam0033" math.gamma 3.5 3.3233509704478426)<line_sep>t("gam0034" math.gamma -0.5 -3.5449077018110322)<line_sep>t("gam0035" math.gamma -1.5 2.3632718012073548)<line_sep>t("gam0036" math.gamma -2.5 -0.94530872048294190)<line_sep>t("gam0037" math.gamma -3.5 0.27008820585226911)<line_sep># values near 0
t("gam0040" math.gamma 0.1 9.5135076986687306)<line_sep>t("gam0041" math.gamma 0.01 99.432585119150602)<line_sep>t("gam0042" math.gamma 1e-8 99999999.422784343)<line_sep>t("gam0043" math.gamma 1e-16 10000000000000000)<line_sep>t("gam0044" math.gamma 1e-30 9.9999999999999988e+29)<line_sep>t("gam0045" math.gamma 1e-160 1.0000000000000000e+160)<line_sep>t("gam0046" math.gamma 1e-308 1.0000000000000000e+308)<line_sep>t("gam0047" math.gamma 5.6e-309 1.7857142857142848e+308)<line_sep>t("gam0048" math.gamma 5.5e-309 inf OverflowError)<line_sep>t("gam0049" math.gamma 1e-309 inf OverflowError)<line_sep>t("gam0050" math.gamma 1e-323 inf OverflowError)<line_sep>t("gam0051" math.gamma 5e-324 inf OverflowError)<line_sep>t("gam0060" math.gamma -0.1 -10.686287021193193)<line_sep>t("gam0061" math.gamma -0.01 -100.58719796441078)<line_sep>t("gam0062" math.gamma -1e-8 -100000000.57721567)<line_sep>t("gam0063" math.gamma -1e-16 -10000000000000000)<line_sep>t("gam0064" math.gamma -1e-30 -9.9999999999999988e+29)<line_sep>t("gam0065" math.gamma -1e-160 -1.0000000000000000e+160)<line_sep>t("gam0066" math.gamma -1e-308 -1.0000000000000000e+308)<line_sep>t("gam0067" math.gamma -5.6e-309 -1.7857142857142848e+308)<line_sep>t("gam0068" math.gamma -5.5e-309 -inf OverflowError)<line_sep>t("gam0069" math.gamma -1e-309 -inf OverflowError)<line_sep>t("gam0070" math.gamma -1e-323 -inf OverflowError)<line_sep>t("gam0071" math.gamma -5e-324 -inf OverflowError)<line_sep># values near negative integers
t("gam0080" math.gamma -0.99999999999999989 -9007199254740992.0)<line_sep>t("gam0081" math.gamma -1.0000000000000002 4503599627370495.5)<line_sep>t("gam0082" math.gamma -1.9999999999999998 2251799813685248.5)<line_sep>t("gam0083" math.gamma -2.0000000000000004 -1125899906842623.5)<line_sep>t("gam0084" math.gamma -100.00000000000001 -7.5400833348831090e-145)<line_sep>t("gam0085" math.gamma -99.999999999999986 7.5400833348840962e-145)<line_sep># large inputs
t("gam0100" math.gamma 170 4.2690680090047051e+304)<line_sep>t("gam0101" math.gamma 171 7.2574156153079990e+306)<line_sep># FIXME(overflows) t("gam0102", math.gamma, 171.624, 1.7942117599248104e+308)
t("gam0103" math.gamma 171.625 inf OverflowError)<line_sep>t("gam0104" math.gamma 172 inf OverflowError)<line_sep>t("gam0105" math.gamma 2000 inf OverflowError)<line_sep>t("gam0106" math.gamma 1.7e308 inf OverflowError)<line_sep># inputs for which gamma(x) is tiny
t("gam0120" math.gamma -100.5 -3.3536908198076787e-159)<line_sep>t("gam0121" math.gamma -160.5 -5.2555464470078293e-286)<line_sep>t("gam0122" math.gamma -170.5 -3.3127395215386074e-308)<line_sep># Reported as https://github.com/golang/go/issues/11441
# FIXME(overflows) t("gam0123", math.gamma, -171.5, 1.9316265431711902e-310)
# FIXME(overflows) t("gam0124", math.gamma, -176.5, -1.1956388629358166e-321)
# FIXME(overflows) t("gam0125", math.gamma, -177.5, 4.9406564584124654e-324)
# FIXME(overflows) t("gam0126", math.gamma, -178.5, -0.0)
# FIXME(overflows) t("gam0127", math.gamma, -179.5, 0.0)
# FIXME(overflows) t("gam0128", math.gamma, -201.0001, 0.0)
# FIXME(overflows) t("gam0129", math.gamma, -202.9999, -0.0)
# FIXME(overflows) t("gam0130", math.gamma, -1000.5, -0.0)
# FIXME(overflows) t("gam0131", math.gamma, -1000000000.3, -0.0)
# FIXME(overflows) t("gam0132", math.gamma, -4503599627370495.5, 0.0)
# inputs that cause problems for the standard reflection formula,
# thanks to loss of accuracy in 1-x
t("gam0140" math.gamma -63.349078729022985 4.1777971677761880e-88)<line_sep>t("gam0141" math.gamma -127.45117632943295 1.1831110896236810e-214)<line_sep>#
# log1p: log(1 + x), without precision loss for small x --
#
# special values
t("log1p0000" math.log1p 0.0 0.0)<line_sep>t("log1p0001" math.log1p -0.0 -0.0)<line_sep>t("log1p0002" math.log1p inf inf)<line_sep>t("log1p0003" math.log1p -inf nan ValueError)<line_sep>t("log1p0004" math.log1p nan nan)<line_sep># singularity at -1.0
t("log1p0010" math.log1p -1.0 -inf ValueError)<line_sep>t("log1p0011" math.log1p -0.9999999999999999 -36.736800569677101)<line_sep># finite values < 1.0 are invalid
t("log1p0020" math.log1p -1.0000000000000002 nan ValueError)<line_sep>t("log1p0021" math.log1p -1.1 nan ValueError)<line_sep>t("log1p0022" math.log1p -2.0 nan ValueError)<line_sep>t("log1p0023" math.log1p -1e300 nan ValueError)<line_sep># tiny x: log1p(x) ~ x
t("log1p0110" math.log1p 5e-324 5e-324)<line_sep>t("log1p0111" math.log1p 1e-320 1e-320)<line_sep>t("log1p0112" math.log1p 1e-300 1e-300)<line_sep>t("log1p0113" math.log1p 1e-150 1e-150)<line_sep>t("log1p0114" math.log1p 1e-20 1e-20)<line_sep>t("log1p0120" math.log1p -5e-324 -5e-324)<line_sep>t("log1p0121" math.log1p -1e-320 -1e-320)<line_sep>t("log1p0122" math.log1p -1e-300 -1e-300)<line_sep>t("log1p0123" math.log1p -1e-150 -1e-150)<line_sep>t("log1p0124" math.log1p -1e-20 -1e-20)<line_sep># some (mostly) random small and moderate-sized values
t("log1p0200" math.log1p -0.89156889782277482 -2.2216403106762863)<line_sep>t("log1p0201" math.log1p -0.23858496047770464 -0.27257668276980057)<line_sep>t("log1p0202" math.log1p -0.011641726191307515 -0.011710021654495657)<line_sep>t("log1p0203" math.log1p -0.0090126398571693817 -0.0090534993825007650)<line_sep>t("log1p0204" math.log1p -0.00023442805985712781 -0.00023445554240995693)<line_sep>t("log1p0205" math.log1p -1.5672870980936349e-5 -1.5672993801662046e-5)<line_sep>t("log1p0206" math.log1p -7.9650013274825295e-6 -7.9650330482740401e-6)<line_sep>t("log1p0207" math.log1p -2.5202948343227410e-7 -2.5202951519170971e-7)<line_sep>t("log1p0208" math.log1p -8.2446372820745855e-11 -8.2446372824144559e-11)<line_sep>t("log1p0209" math.log1p -8.1663670046490789e-12 -8.1663670046824230e-12)<line_sep>t("log1p0210" math.log1p 7.0351735084656292e-18 7.0351735084656292e-18)<line_sep>t("log1p0211" math.log1p 5.2732161907375226e-12 5.2732161907236188e-12)<line_sep>t("log1p0212" math.log1p 1.0000000000000000e-10 9.9999999995000007e-11)<line_sep>t("log1p0213" math.log1p 2.1401273266000197e-9 2.1401273243099470e-9)<line_sep>t("log1p0214" math.log1p 1.2668914653979560e-8 1.2668914573728861e-8)<line_sep>t("log1p0215" math.log1p 1.6250007816299069e-6 1.6249994613175672e-6)<line_sep>t("log1p0216" math.log1p 8.3740495645839399e-6 8.3740145024266269e-6)<line_sep>t("log1p0217" math.log1p 3.0000000000000001e-5 2.9999550008999799e-5)<line_sep>t("log1p0218" math.log1p 0.0070000000000000001 0.0069756137364252423)<line_sep>t("log1p0219" math.log1p 0.013026235315053002 0.012942123564008787)<line_sep>t("log1p0220" math.log1p 0.013497160797236184 0.013406885521915038)<line_sep>t("log1p0221" math.log1p 0.027625599078135284 0.027250897463483054)<line_sep>t("log1p0222" math.log1p 0.14179687245544870 0.13260322540908789)<line_sep># large values
t("log1p0300" math.log1p 1.7976931348623157e+308 709.78271289338397)<line_sep>t("log1p0301" math.log1p 1.0000000000000001e+300 690.77552789821368)<line_sep>t("log1p0302" math.log1p 1.0000000000000001e+70 161.18095650958321)<line_sep>t("log1p0303" math.log1p 10000000000.000000 23.025850930040455)<line_sep># other values transferred from testLog1p in test_math
t("log1p0400" math.log1p -0.63212055882855767 -1.0000000000000000)<line_sep>t("log1p0401" math.log1p 1.7182818284590451 1.0000000000000000)<line_sep>t("log1p0402" math.log1p 1.0000000000000000 0.69314718055994529)<line_sep>t("log1p0403" math.log1p 1.2379400392853803e+27 62.383246250395075)<line_sep>#
# expm1: exp(x) - 1, without precision loss for small x --
#
# special values
t("expm10000" math.expm1 0.0 0.0)<line_sep>t("expm10001" math.expm1 -0.0 -0.0)<line_sep>t("expm10002" math.expm1 inf inf)<line_sep>t("expm10003" math.expm1 -inf -1.0)<line_sep>t("expm10004" math.expm1 nan nan)<line_sep># expm1(x) ~ x for tiny x
t("expm10010" math.expm1 5e-324 5e-324)<line_sep>t("expm10011" math.expm1 1e-320 1e-320)<line_sep>t("expm10012" math.expm1 1e-300 1e-300)<line_sep>t("expm10013" math.expm1 1e-150 1e-150)<line_sep>t("expm10014" math.expm1 1e-20 1e-20)<line_sep>t("expm10020" math.expm1 -5e-324 -5e-324)<line_sep>t("expm10021" math.expm1 -1e-320 -1e-320)<line_sep>t("expm10022" math.expm1 -1e-300 -1e-300)<line_sep>t("expm10023" math.expm1 -1e-150 -1e-150)<line_sep>t("expm10024" math.expm1 -1e-20 -1e-20)<line_sep># moderate sized values, where direct evaluation runs into trouble
t("expm10100" math.expm1 1e-10 1.0000000000500000e-10)<line_sep>t("expm10101" math.expm1 -9.9999999999999995e-08 -9.9999995000000163e-8)<line_sep>t("expm10102" math.expm1 3.0000000000000001e-05 3.0000450004500034e-5)<line_sep>t("expm10103" math.expm1 -0.0070000000000000001 -0.0069755570667648951)<line_sep>t("expm10104" math.expm1 -0.071499208740094633 -0.069002985744820250)<line_sep>t("expm10105" math.expm1 -0.063296004180116799 -0.061334416373633009)<line_sep>t("expm10106" math.expm1 0.02390954035597756 0.024197665143819942)<line_sep>t("expm10107" math.expm1 0.085637352649044901 0.089411184580357767)<line_sep>t("expm10108" math.expm1 0.5966174947411006 0.81596588596501485)<line_sep>t("expm10109" math.expm1 0.30247206212075139 0.35319987035848677)<line_sep>t("expm10110" math.expm1 0.74574727375889516 1.1080161116737459)<line_sep>t("expm10111" math.expm1 0.97767512926555711 1.6582689207372185)<line_sep>t("expm10112" math.expm1 0.8450154566787712 1.3280137976535897)<line_sep>t("expm10113" math.expm1 -0.13979260323125264 -0.13046144381396060)<line_sep>t("expm10114" math.expm1 -0.52899322039643271 -0.41080213643695923)<line_sep>t("expm10115" math.expm1 -0.74083261478900631 -0.52328317124797097)<line_sep>t("expm10116" math.expm1 -0.93847766984546055 -0.60877704724085946)<line_sep>t("expm10117" math.expm1 10.0 22025.465794806718)<line_sep>t("expm10118" math.expm1 27.0 532048240600.79865)<line_sep>t("expm10119" math.expm1 123 2.6195173187490626e+53)<line_sep>t("expm10120" math.expm1 -12.0 -0.99999385578764666)<line_sep>t("expm10121" math.expm1 -35.100000000000001 -0.99999999999999944)<line_sep># extreme negative values
t("expm10201" math.expm1 -37.0 -0.99999999999999989)<line_sep>t("expm10200" math.expm1 -38.0 -1.0)<line_sep># FIXME(overflows) t("expm10210", math.expm1, -710.0, -1.0)
# the formula expm1(x) = 2 * sinh(x/2) * exp(x/2) doesn't work so
# well when exp(x/2) is subnormal or underflows to zero; check we're
# not using it!
# Reported as https://github.com/golang/go/issues/11442
# FIXME(overflows) t("expm10211", math.expm1, -1420.0, -1.0)
# FIXME(overflows) t("expm10212", math.expm1, -1450.0, -1.0)
# FIXME(overflows) t("expm10213", math.expm1, -1500.0, -1.0)
# FIXME(overflows) t("expm10214", math.expm1, -1e50, -1.0)
# FIXME(overflows) t("expm10215", math.expm1, -1.79e308, -1.0)
# extreme positive values
# FIXME(fails on 32 bit) t("expm10300", math.expm1, 300, 1.9424263952412558e+130)
# FIXME(fails on 32 bit) t("expm10301", math.expm1, 700, 1.0142320547350045e+304)
# the next test (expm10302) is disabled because it causes failure on
# OS X 10.4/Intel: apparently all values over 709.78 produce an
# overflow on that platform. See issue #7575.
# expm10302 expm1 709.78271289328393 -> 1.7976931346824240e+308
t("expm10303" math.expm1 709.78271289348402 inf OverflowError)<line_sep>t("expm10304" math.expm1 1000 inf OverflowError)<line_sep>t("expm10305" math.expm1 1e50 inf OverflowError)<line_sep>t("expm10306" math.expm1 1.79e308 inf OverflowError)<line_sep># weaker version of expm10302
# FIXME(fails on 32 bit) t("expm10307", math.expm1, 709.5, 1.3549863193146328e+308)
#
# log2: log to base 2 --
#
# special values
t("log20000" math.log2 0.0 -inf ValueError)<line_sep>t("log20001" math.log2 -0.0 -inf ValueError)<line_sep>t("log20002" math.log2 inf inf)<line_sep>t("log20003" math.log2 -inf nan ValueError)<line_sep>t("log20004" math.log2 nan nan)<line_sep># exact value at 1.0
t("log20010" math.log2 1.0 0.0)<line_sep># negatives
t("log20020" math.log2 -5e-324 nan ValueError)<line_sep>t("log20021" math.log2 -1.0 nan ValueError)<line_sep>t("log20022" math.log2 -1.7e-308 nan ValueError)<line_sep># exact values at powers of 2
t("log20100" math.log2 2.0 1.0)<line_sep>t("log20101" math.log2 4.0 2.0)<line_sep>t("log20102" math.log2 8.0 3.0)<line_sep>t("log20103" math.log2 16.0 4.0)<line_sep>t("log20104" math.log2 32.0 5.0)<line_sep>t("log20105" math.log2 64.0 6.0)<line_sep>t("log20106" math.log2 128.0 7.0)<line_sep>t("log20107" math.log2 256.0 8.0)<line_sep>t("log20108" math.log2 512.0 9.0)<line_sep>t("log20109" math.log2 1024.0 10.0)<line_sep>t("log20110" math.log2 2048.0 11.0)<line_sep>t("log20200" math.log2 0.5 -1.0)<line_sep>t("log20201" math.log2 0.25 -2.0)<line_sep>t("log20202" math.log2 0.125 -3.0)<line_sep>t("log20203" math.log2 0.0625 -4.0)<line_sep># values close to 1.0
# FIXME(inaccurate) t("log20300", math.log2, 1.0000000000000002, 3.2034265038149171e-16)
# FIXME(inaccurate) t("log20301", math.log2, 1.0000000001, 1.4426951601859516e-10)
# FIXME(inaccurate) t("log20302", math.log2, 1.00001, 1.4426878274712997e-5)
t("log20310" math.log2 0.9999999999999999 -1.6017132519074588e-16)<line_sep>t("log20311" math.log2 0.9999999999 -1.4426951603302210e-10)<line_sep>t("log20312" math.log2 0.99999 -1.4427022544056922e-5)<line_sep># tiny values
t("log20400" math.log2 5e-324 -1074.0)<line_sep>t("log20401" math.log2 1e-323 -1073.0)<line_sep>t("log20402" math.log2 1.5e-323 -1072.4150374992789)<line_sep>t("log20403" math.log2 2e-323 -1072.0)<line_sep>t("log20410" math.log2 1e-308 -1023.1538532253076)<line_sep>t("log20411" math.log2 2.2250738585072014e-308 -1022.0)<line_sep>t("log20412" math.log2 4.4501477170144028e-308 -1021.0)<line_sep>t("log20413" math.log2 1e-307 -1019.8319251304202)<line_sep># huge values
t("log20500" math.log2 1.7976931348623157e+308 1024.0)<line_sep>t("log20501" math.log2 1.7e+308 1023.9193879716706)<line_sep>t("log20502" math.log2 8.9884656743115795e+307 1023.0)<line_sep># selection of random values
t("log20600" math.log2 -7.2174324841039838e+289 nan ValueError)<line_sep>t("log20601" math.log2 -2.861319734089617e+265 nan ValueError)<line_sep>t("log20602" math.log2 -4.3507646894008962e+257 nan ValueError)<line_sep>t("log20603" math.log2 -6.6717265307520224e+234 nan ValueError)<line_sep>t("log20604" math.log2 -3.9118023786619294e+229 nan ValueError)<line_sep>t("log20605" math.log2 -1.5478221302505161e+206 nan ValueError)<line_sep>t("log20606" math.log2 -1.4380485131364602e+200 nan ValueError)<line_sep>t("log20607" math.log2 -3.7235198730382645e+185 nan ValueError)<line_sep>t("log20608" math.log2 -1.0472242235095724e+184 nan ValueError)<line_sep>t("log20609" math.log2 -5.0141781956163884e+160 nan ValueError)<line_sep>t("log20610" math.log2 -2.1157958031160324e+124 nan ValueError)<line_sep>t("log20611" math.log2 -7.9677558612567718e+90 nan ValueError)<line_sep>t("log20612" math.log2 -5.5553906194063732e+45 nan ValueError)<line_sep>t("log20613" math.log2 -16573900952607.953 nan ValueError)<line_sep>t("log20614" math.log2 -37198371019.888618 nan ValueError)<line_sep>t("log20615" math.log2 -6.0727115121422674e-32 nan ValueError)<line_sep>t("log20616" math.log2 -2.5406841656526057e-38 nan ValueError)<line_sep>t("log20617" math.log2 -4.9056766703267657e-43 nan ValueError)<line_sep>t("log20618" math.log2 -2.1646786075228305e-71 nan ValueError)<line_sep>t("log20619" math.log2 -2.470826790488573e-78 nan ValueError)<line_sep>t("log20620" math.log2 -3.8661709303489064e-165 nan ValueError)<line_sep>t("log20621" math.log2 -1.0516496976649986e-182 nan ValueError)<line_sep>t("log20622" math.log2 -1.5935458614317996e-255 nan ValueError)<line_sep>t("log20623" math.log2 -2.8750977267336654e-293 nan ValueError)<line_sep>t("log20624" math.log2 -7.6079466794732585e-296 nan ValueError)<line_sep>t("log20625" math.log2 3.2073253539988545e-307 -1018.1505544209213)<line_sep>t("log20626" math.log2 1.674937885472249e-244 -809.80634755783126)<line_sep>t("log20627" math.log2 1.0911259044931283e-214 -710.76679472274213)<line_sep>t("log20628" math.log2 2.0275372624809709e-154 -510.55719818383272)<line_sep>t("log20629" math.log2 7.3926087369631841e-115 -379.13564735312292)<line_sep>t("log20630" math.log2 1.3480198206342423e-86 -285.25497445094436)<line_sep>t("log20631" math.log2 8.9927384655719947e-83 -272.55127136401637)<line_sep>t("log20632" math.log2 3.1452398713597487e-60 -197.66251564496875)<line_sep>t("log20633" math.log2 7.0706573215457351e-55 -179.88420087782217)<line_sep>t("log20634" math.log2 3.1258285390731669e-49 -161.13023800505653)<line_sep>t("log20635" math.log2 8.2253046627829942e-41 -133.15898277355879)<line_sep>t("log20636" math.log2 7.8691367397519897e+49 165.75068202732419)<line_sep>t("log20637" math.log2 2.9920561983925013e+64 214.18453534573757)<line_sep>t("log20638" math.log2 4.7827254553946841e+77 258.04629628445673)<line_sep>t("log20639" math.log2 3.1903566496481868e+105 350.47616767491166)<line_sep>t("log20640" math.log2 5.6195082449502419e+113 377.86831861008250)<line_sep>t("log20641" math.log2 9.9625658250651047e+125 418.55752921228753)<line_sep>t("log20642" math.log2 2.7358945220961532e+145 483.13158636923413)<line_sep>t("log20643" math.log2 2.785842387926931e+174 579.49360214860280)<line_sep>t("log20644" math.log2 2.4169172507252751e+193 642.40529039289652)<line_sep>t("log20645" math.log2 3.1689091206395632e+205 682.65924573798395)<line_sep>t("log20646" math.log2 2.535995592365391e+208 692.30359597460460)<line_sep>t("log20647" math.log2 6.2011236566089916e+233 776.64177576730913)<line_sep>t("log20648" math.log2 2.1843274820677632e+253 841.57499717289647)<line_sep>t("log20649" math.log2 8.7493931063474791e+297 989.74182713073981)<line_sep>doc="finished"<line_sep> |
<import_from_stmt>dlib vector vectors vectorss dot<try_stmt><block_start><import_stmt>cPickle<as>pickle# Use cPickle on Python 2.7
<block_end><except_stmt>ImportError<block_start><import_stmt>pickle<block_end><import_from_stmt>pytest raises<def_stmt>test_vector_empty_init <block_start>v=vector()<assert_stmt>len(v)<eq>0<assert_stmt>v.shape<eq>(0 1)<assert_stmt>str(v)<eq>""<assert_stmt>repr(v)<eq>"dlib.vector([])"<block_end><def_stmt>test_vector_init_with_number <block_start>v=vector(3)<assert_stmt>len(v)<eq>3<assert_stmt>v.shape<eq>(3 1)<assert_stmt>str(v)<eq>"0\n0\n0"<assert_stmt>repr(v)<eq>"dlib.vector([0, 0, 0])"<block_end><def_stmt>test_vector_set_size <block_start>v=vector(3)<line_sep>v.set_size(0)<assert_stmt>len(v)<eq>0<assert_stmt>v.shape<eq>(0 1)<line_sep>v.resize(10)<assert_stmt>len(v)<eq>10<assert_stmt>v.shape<eq>(10 1)<for_stmt>i range(10)<block_start><assert_stmt>v[i]<eq>0<block_end><block_end><def_stmt>test_vector_init_with_list <block_start>v=vector([1 2 3])<assert_stmt>len(v)<eq>3<assert_stmt>v.shape<eq>(3 1)<assert_stmt>str(v)<eq>"1\n2\n3"<assert_stmt>repr(v)<eq>"dlib.vector([1, 2, 3])"<block_end><def_stmt>test_vector_getitem <block_start>v=vector([1 2 3])<assert_stmt>v[0]<eq>1<assert_stmt>v[-1]<eq>3<assert_stmt>v[1]<eq>v[-2]<block_end><def_stmt>test_vector_slice <block_start>v=vector([1 2 3 4 5])<line_sep>v_slice=v[1:4]<assert_stmt>len(v_slice)<eq>3<for_stmt>idx,val enumerate([2 3 4])<block_start><assert_stmt>v_slice[idx]<eq>val<block_end>v_slice=v[-3:-1]<assert_stmt>len(v_slice)<eq>2<for_stmt>idx,val enumerate([3 4])<block_start><assert_stmt>v_slice[idx]<eq>val<block_end>v_slice=v[1:-2]<assert_stmt>len(v_slice)<eq>2<for_stmt>idx,val enumerate([2 3])<block_start><assert_stmt>v_slice[idx]<eq>val<block_end><block_end><def_stmt>test_vector_invalid_getitem <block_start>v=vector([1 2 3])<with_stmt>raises(IndexError)<block_start>v[-4]<block_end><with_stmt>raises(IndexError)<block_start>v[3]<block_end><block_end><def_stmt>test_vector_init_with_negative_number <block_start><with_stmt>raises(Exception)<block_start>vector(-3)<block_end><block_end><def_stmt>test_dot <block_start>v1=vector([1 0])<line_sep>v2=vector([0 1])<line_sep>v3=vector([-1 0])<assert_stmt>dot(v1 v1)<eq>1<assert_stmt>dot(v1 v2)<eq>0<assert_stmt>dot(v1 v3)<eq>-1<block_end><def_stmt>test_vector_serialization <block_start>v=vector([1 2 3])<line_sep>ser=pickle.dumps(v 2)<line_sep>deser=pickle.loads(ser)<assert_stmt>str(v)<eq>str(deser)<block_end><def_stmt>generate_test_vectors <block_start>vs=vectors()<line_sep>vs.append(vector([0 1 2]))<line_sep>vs.append(vector([3 4 5]))<line_sep>vs.append(vector([6 7 8]))<assert_stmt>len(vs)<eq>3<line_sep><return>vs<block_end><def_stmt>generate_test_vectorss <block_start>vss=vectorss()<line_sep>vss.append(generate_test_vectors())<line_sep>vss.append(generate_test_vectors())<line_sep>vss.append(generate_test_vectors())<assert_stmt>len(vss)<eq>3<line_sep><return>vss<block_end><def_stmt>test_vectors_serialization <block_start>vs=generate_test_vectors()<line_sep>ser=pickle.dumps(vs 2)<line_sep>deser=pickle.loads(ser)<assert_stmt>vs<eq>deser<block_end><def_stmt>test_vectors_clear <block_start>vs=generate_test_vectors()<line_sep>vs.clear()<assert_stmt>len(vs)<eq>0<block_end><def_stmt>test_vectors_resize <block_start>vs=vectors()<line_sep>vs.resize(100)<assert_stmt>len(vs)<eq>100<for_stmt>i range(100)<block_start><assert_stmt>len(vs[i])<eq>0<block_end><block_end><def_stmt>test_vectors_extend <block_start>vs=vectors()<line_sep>vs.extend([vector([1 2 3]) vector([4 5 6])])<assert_stmt>len(vs)<eq>2<block_end><def_stmt>test_vectorss_serialization <block_start>vss=generate_test_vectorss()<line_sep>ser=pickle.dumps(vss 2)<line_sep>deser=pickle.loads(ser)<assert_stmt>vss<eq>deser<block_end><def_stmt>test_vectorss_clear <block_start>vss=generate_test_vectorss()<line_sep>vss.clear()<assert_stmt>len(vss)<eq>0<block_end><def_stmt>test_vectorss_resize <block_start>vss=vectorss()<line_sep>vss.resize(100)<assert_stmt>len(vss)<eq>100<for_stmt>i range(100)<block_start><assert_stmt>len(vss[i])<eq>0<block_end><block_end><def_stmt>test_vectorss_extend <block_start>vss=vectorss()<line_sep>vss.extend([generate_test_vectors() generate_test_vectors()])<assert_stmt>len(vss)<eq>2<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>source=cms.Source("PoolSource" fileNames=cms.untracked.vstring() # skipBadFiles = cms.untracked.bool(True),
inputCommands=cms.untracked.vstring("keep *" "drop *_MEtoEDMConverter_*_*"))<line_sep> |
###
# A script to convert the Services-consumable feeSchedules.json
# into the "typed" format used by the public pricing calculator.
###
<import_stmt>json<line_sep>providers=['nodedata' 'networkdata' 'servicedata']<line_sep>typed_schedules={}<with_stmt>open('hedera-node/src/main/resources/feeSchedules.json' 'r')<as>fin<block_start>cur_and_next_schedules=json.load(fin)<line_sep>schedules=cur_and_next_schedules[0]['currentFeeSchedule']<for_stmt>tfs schedules<block_start><if_stmt>'expiryTime'<in>tfs<block_start><break><block_end>tfs=tfs['transactionFeeSchedule']<line_sep>function=tfs['hederaFunctionality']<line_sep>prices_list=tfs['fees']<line_sep>prices_by_type={}<for_stmt>typed_prices prices_list<block_start>this_type=typed_prices.get('subType' 'DEFAULT')<line_sep>this_type_prices={}<for_stmt>provider providers<block_start>this_type_prices[provider]=typed_prices[provider]<block_end>prices_by_type[this_type]=this_type_prices<block_end>typed_schedules[function]=prices_by_type<block_end><block_end><with_stmt>open('typedFeeSchedules.json' 'w')<as>fout<block_start>json.dump(typed_schedules fout indent=2)<block_end> |
<import_from_stmt>.data_process EEDataProcessor REDataProcessor ERDataProcessor CTCDataProcessor CDNDataProcessor STSDataProcessor QQRDataProcessor QICDataProcessor QTRDataProcessor<import_from_stmt>.dataset EEDataset REDataset ERDataset CTCDataset CDNDataset STSDataset QQRDataset QICDataset QTRDataset<line_sep>__all__=['EEDataProcessor' 'EEDataset' 'REDataProcessor' 'REDataset' 'ERDataProcessor' 'ERDataset' 'CDNDataProcessor' 'CDNDataset' 'CTCDataProcessor' 'CTCDataset' 'STSDataProcessor' 'STSDataset' 'QQRDataProcessor' 'QQRDataset' 'QICDataProcessor' 'QICDataset' 'QTRDataProcessor' 'QTRDataset']<line_sep> |
<import_stmt>bpy<import_from_stmt>.vrm0.property_group Vrm0PropertyGroup<class_stmt>VrmAddonArmatureExtensionPropertyGroup(bpy.types.PropertyGroup)# type: ignore[misc]
<block_start>addon_version:bpy.props.IntVectorProperty(# type: ignore[valid-type]
size=3# noqa: F722
)<line_sep>vrm0:bpy.props.PointerProperty(# type: ignore[valid-type]
name="VRM 0.x" type=Vrm0PropertyGroup# noqa: F722
)<line_sep>armature_data_name:bpy.props.StringProperty()<block_end># type: ignore[valid-type]
|
<import_from_stmt>copy copy<import_from_stmt>io BytesIO<import_from_stmt>PIL Image<import_from_stmt>django.core.files.uploadedfile InMemoryUploadedFile<import_from_stmt>django.urls reverse<import_from_stmt>service_catalog.models Service<import_from_stmt>tests.test_service_catalog.base BaseTest<class_stmt>ServiceCreateTestCase(BaseTest)<block_start><def_stmt>setUp self<block_start>super(ServiceCreateTestCase self).setUp()<line_sep>self.url=reverse('service_catalog:create_service')<block_end><def_stmt>test_create_service self<block_start>data={"name":"new_service" "description":"a new service" "job_template":self.job_template_test.id "billing":"defined" "billing_group_id":"" "billing_group_is_shown":"on"}<line_sep>response=self.client.get(self.url)<line_sep>self.assertEqual(200 response.status_code)<line_sep>number_service_before=copy(Service.objects.all().count())<line_sep>response=self.client.post(self.url data=data)<line_sep>self.assertEqual(302 response.status_code)<line_sep>self.assertEqual(number_service_before+1 Service.objects.all().count())<block_end><def_stmt>test_create_service_with_image self<block_start>im=Image.new(mode='RGB' size=(200 200))# create a new image using PIL
im_io=BytesIO()# a BytesIO object for saving image
im.save(im_io 'JPEG')# save the image to im_io
im_io.seek(0)# seek to the beginning
image=InMemoryUploadedFile(im_io <none> 'random-name.jpg' 'image/jpeg' len(im_io.getvalue()) <none>)<line_sep>data={"name":"new_service_with_image" "description":"a new service" "job_template":self.job_template_test.id "billing":"defined" "billing_group_id":"" "billing_group_is_shown":"on" "image":image}<line_sep>number_service_before=Service.objects.all().count()<line_sep>response=self.client.post(self.url data=data format="multipart")<line_sep>self.assertEqual(302 response.status_code)<line_sep>self.assertEqual(number_service_before+1 Service.objects.all().count())<line_sep>new_service_with_image=Service.objects.get(name="new_service_with_image")<try_stmt><block_start>self.assertIsNotNone(new_service_with_image.image.file)<block_end><except_stmt>ValueError<block_start>self.fail("Image not set")<block_end># cleanup image after the test
new_service_with_image.image.delete()<block_end><block_end> |
<import_stmt>conftest# Add root path to sys.path
<import_stmt>os<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>PathPlanning.SpiralSpanningTreeCPP spiral_spanning_tree_coverage_path_planner<line_sep>spiral_spanning_tree_coverage_path_planner.do_animation=<true><def_stmt>spiral_stc_cpp img start<block_start>num_free=0<for_stmt>i range(img.shape[0])<block_start><for_stmt>j range(img.shape[1])<block_start>num_free<augadd>img[i][j]<block_end><block_end>STC_planner=spiral_spanning_tree_coverage_path_planner.SpiralSpanningTreeCoveragePlanner(img)<line_sep>edge,route,path=STC_planner.plan(start)<line_sep>covered_nodes=set()<for_stmt>p,q edge<block_start>covered_nodes.add(p)<line_sep>covered_nodes.add(q)<block_end># assert complete coverage
<assert_stmt>len(covered_nodes)<eq>num_free/4<block_end><def_stmt>test_spiral_stc_cpp_1 <block_start>img_dir=os.path.dirname(os.path.abspath(__file__))+"/../PathPlanning/SpiralSpanningTreeCPP"<line_sep>img=plt.imread(os.path.join(img_dir 'map' 'test.png'))<line_sep>start=(0 0)<line_sep>spiral_stc_cpp(img start)<block_end><def_stmt>test_spiral_stc_cpp_2 <block_start>img_dir=os.path.dirname(os.path.abspath(__file__))+"/../PathPlanning/SpiralSpanningTreeCPP"<line_sep>img=plt.imread(os.path.join(img_dir 'map' 'test_2.png'))<line_sep>start=(10 0)<line_sep>spiral_stc_cpp(img start)<block_end><def_stmt>test_spiral_stc_cpp_3 <block_start>img_dir=os.path.dirname(os.path.abspath(__file__))+"/../PathPlanning/SpiralSpanningTreeCPP"<line_sep>img=plt.imread(os.path.join(img_dir 'map' 'test_3.png'))<line_sep>start=(0 0)<line_sep>spiral_stc_cpp(img start)<block_end><if_stmt>__name__<eq>'__main__'<block_start>conftest.run_this_test(__file__)<block_end> |
<import_stmt>sys<import_stmt>json<import_stmt>subprocess<import_stmt>sys<import_stmt>os<import_from_stmt>graphviz Digraph<import_from_stmt>graphviz Graph<class_stmt>ConnectionsGraph(object)<block_start><def_stmt>draw self connections_json output_folder relsToHide#print(connections_json)
<block_start>cmd="ls -ltr /root/"<line_sep>out=subprocess.Popen(cmd stdout=subprocess.PIPE stderr=subprocess.PIPE shell=<true>).communicate()[0]<line_sep>#print(out)
fp=open(output_folder+"/"+connections_json "r")<line_sep>json_data=fp.read()<line_sep>json_output=json.loads(json_data)<line_sep>#print(json_output)
nodemap={}<for_stmt>n json_output<block_start>level=n['Level']<if_stmt>level<in>nodemap.keys()<block_start>nodelist=nodemap[level]<block_end><else_stmt><block_start>nodelist=[]<block_end>nodelist.append(n)<line_sep>nodemap[level]=nodelist<block_end>#print(nodemap)
opformat='png'<line_sep>dot=Graph(comment='Connections Graph' format=opformat)<line_sep># dot.node('A', 'King Shivaji')
# dot.node('B', 'Sir Bedevere the Wise')
# dot.node('L', 'Sir Lancelot the Brave')
relsToHideList1=relsToHide.split(",")<line_sep>relsToHideList=[]<for_stmt>rel relsToHideList1<block_start>relsToHideList.append(rel.strip())<block_end>#print(relsToHideList)
# Create Nodes
<for_stmt>level,nodelist nodemap.items()<block_start><for_stmt>n nodelist<block_start>fqnodename=n['Kind']+" "+n['Name']<line_sep>fqpeername=n['PeerKind']+" "+n['PeerName']<line_sep>#print(fqnodename + " " + fqpeername)
<if_stmt>n['Kind']<eq>'Pod'<block_start>dot.node(fqnodename fqnodename shape='box' style='filled' color='lightcyan1')<block_end><else_stmt><block_start>dot.node(fqnodename fqnodename shape='box' style='filled' color='snow2')<block_end><if_stmt>level<g>0<block_start>color='gray0'<line_sep>relationshipType=n['RelationType']<line_sep>relationshipDetails=n['RelationDetails']<line_sep>relationInfo=relationshipType<if_stmt>relationshipDetails<ne>''<and>relationshipType<not><in>relsToHideList<block_start>relationInfo=relationInfo+" ("+relationshipDetails+")"<block_end><if_stmt>relationshipType<eq>'specproperty'<block_start>color='crimson'<block_end><if_stmt>relationshipType<eq>'label'<block_start>color='darkgreen'<block_end><if_stmt>relationshipType<eq>'envvariable'<block_start>color='gold4'<block_end><if_stmt>relationshipType<eq>'annotation'<block_start>color='indigo'<block_end><if_stmt>relationshipType<eq>'owner reference'<block_start>color='blue'<block_end>dot.edge(fqpeername fqnodename color=color label=relationInfo)<block_end><block_end><block_end># Create edges
#dot.edges(['AB', 'AL'])
#dot.edge('B', 'L', constraint='false')
#print(dot.source)
filename=connections_json+".gv"<line_sep>rendered_file_path=dot.render('/root/'+filename view=<false>)<line_sep>#print("FILENAME:" + filename)
#print("Rendered file path:" + rendered_file_path)
#print("Output available in " + filename + "." + opformat)
#fp1 = open(output_folder + "/abc.txt", "w")
#fp1.write(connections_json)
#fp1.close()
<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>graph=ConnectionsGraph()<line_sep>#print("Inside connections.py")
connections_json=sys.argv[1]<line_sep>output_folder=sys.argv[2]<if_stmt>len(sys.argv)<eq>4<block_start>relsToHide=sys.argv[3]<block_end><else_stmt><block_start>relsToHide=""<block_end>#print("Connections_json:"+ connections_json)
#print("Output folder:" + output_folder)
#print(relsToHide)
graph.draw(connections_json output_folder relsToHide)<block_end> |
<import_from_stmt>bs4 BeautifulSoup<import_stmt>time<import_from_stmt>kik_unofficial.datatypes.xmpp.base_elements XMPPElement XMPPResponse<class_stmt>Struct<block_start><def_stmt>__init__ self **entries<block_start>self.__dict__.update(entries)<block_end><block_end><class_stmt>OutgoingAcknowledgement(XMPPElement)<block_start>"""
Represents an outgoing acknowledgement for a message ID
"""<def_stmt>__init__ self sender_jid is_receipt ack_id group_jid<block_start>super().__init__()<line_sep>self.sender_jid=sender_jid<line_sep>self.group_jid=group_jid<line_sep>self.is_receipt=is_receipt<line_sep>self.ack_id=ack_id<block_end><def_stmt>serialize self<block_start>timestamp=str(int(round(time.time()<times>1000)))<line_sep>user_ack_data=('<sender jid="{}">'<concat>'<ack-id receipt="{}">{}</ack-id>'<concat>'</sender>').format(self.sender_jid str(self.is_receipt).lower() self.ack_id)<line_sep>group_ack_data=('<sender jid="{}" g="{}">'<concat>'<ack-id receipt="{}">{}</ack-id>'<concat>'</sender>').format(self.sender_jid self.group_jid str(self.is_receipt).lower() self.ack_id)<line_sep>data=('<iq type="set" id="{}" cts="{}">'<concat>'<query xmlns="kik:iq:QoS">'<concat>'<msg-acks>'<concat>'{}'<concat>'</msg-acks>'<concat>'<history attach="false" />'<concat>'</query>'<concat>'</iq>').format(self.message_id timestamp user_ack_data<if>self.group_jid<ne><none><else>group_ack_data)<line_sep><return>data.encode()<block_end><block_end><class_stmt>OutgoingHistoryRequest(XMPPElement)<block_start>"""
Represents an outgoing request for the account's messaging history
"""<def_stmt>__init__ self<block_start>super().__init__()<block_end><def_stmt>serialize self<block_start>timestamp=str(int(round(time.time()<times>1000)))<line_sep>data=('<iq type="set" id="{}" cts="{}">'<concat>'<query xmlns="kik:iq:QoS">'<concat>'<msg-acks />'<concat>'<history attach="true" />'<concat>'</query>'<concat>'</iq>').format(self.message_id timestamp )<line_sep><return>data.encode()<block_end><block_end><class_stmt>HistoryResponse(XMPPResponse)<block_start>"""
Represents a Kik messaging history response.
"""<def_stmt>__init__ self data:BeautifulSoup<block_start>super().__init__(data)<line_sep>self.id=data["id"]<if_stmt>data.query.history<block_start>self.more=data.query.history.has_attr("more")<line_sep>self.from_jid=data["from"]<line_sep>self.messages=[]<for_stmt>message data.query.history<block_start><if_stmt>message["type"]<eq>"receipt"<block_start>args={'type':'receipt' 'from_jid':message["from"] 'receipt_type':message.receipt["type"] 'id':message.receipt.msgid["id"]}<line_sep>self.messages.append(Struct(**args))<block_end><elif_stmt>message["type"]<eq>"chat"<block_start>args={'type':'chat' 'id':message["id"] 'from_jid':message["from"] 'body':message.body.text<if>message.body<else><none> 'preview':message.preview.text<if>message.preview<else><none> 'timestamp':message.kik["timestamp"]}<line_sep>self.messages.append(Struct(**args))<block_end><elif_stmt>message["type"]<eq>"groupchat"<block_start>args={'type':'groupchat' 'id':message["id"] 'from_jid':message["from"] 'body':message.body.text<if>message.body<else><none> 'preview':message.preview.text<if>message.preview<else><none> 'timestamp':message.kik["timestamp"] 'group_jid':message.g["jid"]}<line_sep>self.messages.append(Struct(**args))<block_end><block_end><block_end><block_end><block_end> |
# Copyright (c) OpenMMLab. All rights reserved.
<import_stmt>torch<import_from_stmt>..builder ROTATED_DETECTORS<import_from_stmt>.two_stage RotatedTwoStageDetector<line_sep>@ROTATED_DETECTORS.register_module()<class_stmt>OrientedRCNN(RotatedTwoStageDetector)<block_start>"""Implementation of `Oriented R-CNN for Object Detection.`__
__ https://openaccess.thecvf.com/content/ICCV2021/papers/Xie_Oriented_R-CNN_for_Object_Detection_ICCV_2021_paper.pdf # noqa: E501, E261.
"""<def_stmt>__init__ self backbone rpn_head roi_head train_cfg test_cfg neck=<none> pretrained=<none> init_cfg=<none><block_start>super(OrientedRCNN self).__init__(backbone=backbone neck=neck rpn_head=rpn_head roi_head=roi_head train_cfg=train_cfg test_cfg=test_cfg pretrained=pretrained init_cfg=init_cfg)<block_end><def_stmt>forward_dummy self img<block_start>"""Used for computing network flops.
See `mmrotate/tools/analysis_tools/get_flops.py`
"""<line_sep>outs=()<line_sep># backbone
x=self.extract_feat(img)<line_sep># rpn
<if_stmt>self.with_rpn<block_start>rpn_outs=self.rpn_head(x)<line_sep>outs=outs+(rpn_outs )<block_end>proposals=torch.randn(1000 6).to(img.device)<line_sep># roi_head
roi_outs=self.roi_head.forward_dummy(x proposals)<line_sep>outs=outs+(roi_outs )<line_sep><return>outs<block_end><block_end> |
"""
This example shows how to interact with the Determined PyTorch Lightning Adapter
interface to build a basic MNIST network. LightningAdapter utilizes the provided
LightningModule with Determined's PyTorch control loop.
"""<import_from_stmt>determined.pytorch PyTorchTrialContext DataLoader<import_from_stmt>determined.pytorch.lightning LightningAdapter<import_stmt>data<import_stmt>mnist<class_stmt>MNISTTrial(LightningAdapter)<block_start><def_stmt>__init__ self context:PyTorchTrialContext *args **kwargs<arrow><none><block_start>lm=mnist.LitMNIST(hidden_size=context.get_hparam('hidden_size') learning_rate=context.get_hparam('learning_rate') )<line_sep>data_dir=f"/tmp/data-rank{context.distributed.get_rank()}"<line_sep>self.dm=data.MNISTDataModule(data_url=context.get_data_config()["url"] data_dir=data_dir batch_size=context.get_per_slot_batch_size() )<line_sep>super().__init__(context lightning_module=lm *args **kwargs)<line_sep>self.dm.prepare_data()<block_end><def_stmt>build_training_data_loader self<arrow>DataLoader<block_start>self.dm.setup()<line_sep>dl=self.dm.train_dataloader()<line_sep><return>DataLoader(dl.dataset batch_size=dl.batch_size num_workers=dl.num_workers)<block_end><def_stmt>build_validation_data_loader self<arrow>DataLoader<block_start>self.dm.setup()<line_sep>dl=self.dm.val_dataloader()<line_sep><return>DataLoader(dl.dataset batch_size=dl.batch_size num_workers=dl.num_workers)<block_end><block_end> |
<import_stmt>mxnet<as>mx<def_stmt>get_vgg16_gen <block_start>relu_feature=mx.symbol.Variable(name="relu_feature")<line_sep>box_predict=mx.symbol.Variable(name="box_predict")<line_sep>ground_truth=mx.symbol.Variable(name="ground_truth")<line_sep>bbox_label=mx.symbol.Variable(name="bbox_label")<line_sep>ell_label=mx.symbol.GenEllLabel(*[box_predict bbox_label ground_truth] spatial_scale=0.5 name="ell_label")<line_sep># roi warping
roi_warping=mx.symbol.ROIWarping(*[relu_feature box_predict ground_truth] warped_shape=(28 28) spatial_scale=0.5 name="roi_warping")<line_sep>roi_warping_pool=mx.symbol.Pooling(data=roi_warping pool_type="max" kernel=(4 4) stride=(4 4) name="roi_warping_pool")<line_sep>roi_warping_flatten=mx.symbol.Flatten(data=roi_warping_pool)<line_sep>loss_all=mx.symbol.Group([roi_warping_flatten ell_label])<line_sep><return>loss_all<block_end> |
<import_from_stmt>space *<import_stmt>base<import_stmt>bon<import_stmt>evaluator<import_stmt>core<import_stmt>os<import_stmt>pathobj<import_stmt>stdlib<import_stmt>sys<class_stmt>ModuleScope(Object)<block_start><def_stmt>__init__ self local parent=<none> frozen=<false><block_start>self.cache={}# maps absolute path -> module cache entry
self.local=local<line_sep>self.parent=parent<line_sep>self.frozen=frozen# if frozen, the scope relies on cache.
self.compile_file=null<line_sep>self.base_module=<none><block_end><def_stmt>setcache self m_path module mtime<block_start>m=ModuleCache(m_path module mtime)<line_sep>self.cache[pathobj.stringify(m_path)]=m<line_sep><return>m<block_end><def_stmt>getcache self m_path<block_start>s=pathobj.stringify(m_path)<try_stmt><block_start><return>self.cache[s]<block_end><except_stmt>KeyError<as>k<block_start><return><none><block_end><block_end><def_stmt>getattr self name<block_start><if_stmt>name<eq>u"parent"<block_start><return>self.parent<if>self.parent<is><not><none><else>null<block_end><if_stmt>name<eq>u"local"<block_start><return>self.local<block_end><if_stmt>name<eq>u"frozen"<block_start><return>boolean(self.frozen)<block_end><if_stmt>name<eq>u"base_module"<block_start><if_stmt>self.base_module<is><none><block_start><return>null<block_end><return>self.base_module<block_end><if_stmt>name<eq>u"compile_file"<block_start><return>self.compile_file<block_end><return>Object.getattr(self name)<block_end><def_stmt>setattr self name value<block_start><if_stmt>name<eq>u"base_module"<block_start><if_stmt>len(self.cache)<g>0<block_start><raise>unwind(LTypeError(u"Cannot change base_module in active module scope"))<block_end>self.base_module=cast_n(value Module u"ModuleScope.base_module")<line_sep><return>null<block_end><return>Object.setattr(self name value)<block_end><def_stmt>listattr self<block_start>listing=Object.listattr(self)<line_sep>listing.extend([String(u"parent") String(u"local") String(u"frozen") String(u"base_module") String(u"compile_file") ])<line_sep><return>listing<block_end><def_stmt>getitem self item<block_start><if_stmt>isinstance(item String)<block_start><if_stmt>item.string<in>self.cache<block_start><return>self.cache[item.string]<block_end><block_end><raise>OldError(u"%s not in module scope"%item.repr())<block_end><def_stmt>iter self<block_start><return>ScopeIterator(self.cache.iterkeys())<block_end><block_end>#
@ModuleScope.instantiator2(signature(pathobj.Path ModuleScope Object optional=2))<def_stmt>_ local parent options<block_start>scope=ModuleScope(local parent)<if_stmt>options<block_start>key=String(u"compile_file")<if_stmt>options.contains(key)<block_start>scope.compile_file=options.getitem(key)<block_end><block_end><return>scope<block_end><class_stmt>ScopeIterator(Object)<block_start>_immutable_fields_=['iterator']<def_stmt>__init__ self iterator<block_start>self.iterator=iterator<block_end><def_stmt>iter self<block_start><return>self<block_end><block_end>@ScopeIterator.builtin_method@signature(ScopeIterator)<def_stmt>next self<block_start><return>String(self.iterator.next())<block_end><class_stmt>ModuleCache(Object)<block_start><def_stmt>__init__ self path module mtime<block_start>self.path=path<line_sep>self.module=module<line_sep>self.mtime=mtime<block_end><def_stmt>getattr self name<block_start><if_stmt>name<eq>u"path"<block_start><return>self.path<block_end><if_stmt>name<eq>u"module"<block_start><return>self.module<block_end><if_stmt>name<eq>u"mtime"<block_start><return>Float(self.mtime)<block_end><return>Object.getattr(self name)<block_end><def_stmt>listattr self<block_start>listing=Object.listattr(self)<line_sep>listing.extend([String(u"path") String(u"module") String(u"mtime") ])<line_sep><return>listing<block_end><block_end>@ModuleCache.builtin_method@signature(ModuleCache)<def_stmt>get_moduleinfo self<block_start><return>moduleinfo(self.path)<block_end>root_module=ModuleScope(pathobj.parse(u"builtin:/") frozen=<true>)<line_sep>root_module.base_module=base.module<for_stmt>py_module stdlib.import_all_modules()<block_start><assert_stmt>isinstance(py_module.module Module) "dependency cycle somewhere"<line_sep>p=pathobj.concat(root_module.local pathobj.parse(py_module.module.name))<line_sep>py_module.module.setattr_force(u"doc" pathobj.parse(u"doc:/"+py_module.module.name))<line_sep>root_module.setcache(p py_module.module 0.0)<import_stmt>naming<line_sep>naming.breath_first_search(py_module.module 1.0)<block_end>base.module.setattr_force(u"doc" pathobj.parse(u"doc:/base"))<line_sep>root_module.setcache(pathobj.parse(u"builtin:/"+base.module.name) base.module 0.0)<line_sep># the importer poststage for base module will take place in
# entry generation at runtime/main.py because there are so many
# items added into the base module all around the system.
<import_stmt>main<def_stmt>start main_script<block_start><assert_stmt>isinstance(main_script String)<line_sep>lib_scope=ModuleScope(pathobj.concat(core.get_ec().lever_path pathobj.parse(u"lib")) root_module)<line_sep>lib_scope.compile_file=LazyLoader(lib_scope)<line_sep>main_path=pathobj.os_parse(resuffix(main_script.string u".lc" u""))<line_sep>mi=moduleinfo(pathobj.abspath(main_path))<line_sep>scope=ModuleScope(mi.directory lib_scope)<line_sep>this=Module(mi.name.string {} extends=base.module)# base.module
<if_stmt><not>(mi.lc_present<or>mi.cb_present)<block_start><raise>OldError(u"main module not present")<block_end>scope.setcache(main_path this max(mi.lc_mtime mi.cb_mtime))<line_sep>mi.default_config(this scope)<line_sep>mi.loadit(this scope)<line_sep><return>this<block_end><class_stmt>LazyLoader(Object)<block_start><def_stmt>__init__ self lib_scope<block_start>self.lib_scope=lib_scope<block_end><def_stmt>call self argv<block_start>lib_scope=self.lib_scope<line_sep>mi=moduleinfo(pathobj.concat(lib_scope.local pathobj.parse(u"compiler")))<line_sep>this=Module(mi.name.string {} extends=base.module)# base.module
mi.default_config(this lib_scope)<line_sep>mi.loadit(this lib_scope)<line_sep>lib_scope.compile_file=this.getattr(u"compile_file")<line_sep><return>lib_scope.compile_file.call(argv)<block_end><block_end># plans:
# allow modules derive or create new scopes and isolate themselves.
# module path
<def_stmt>moduleinfo module_path<block_start>module_path=pathobj.abspath(module_path)<line_sep>module_name=module_path.getattr(u"basename")<assert_stmt>isinstance(module_name String)<line_sep>s=pathobj.os_stringify(module_path).encode('utf-8')<line_sep>is_dir=<false><if_stmt>os.path.isdir(s)<block_start>w=os.path.join(s "init")<if_stmt>os.path.exists(w+".lc.cb")<or>os.path.exists(w+".lc")<block_start>is_dir=<true><line_sep>s=w<block_end><block_end><else_stmt><block_start>module_path=pathobj.directory(module_path)<block_end>cb_path=s+".lc.cb"<line_sep>cb_present=os.path.exists(cb_path)<line_sep>cb_mtime=0.0<line_sep>lc_path=s+".lc"<line_sep>lc_present=os.path.exists(lc_path)<line_sep>lc_mtime=0.0<if_stmt>cb_present<block_start>cb_mtime=os.path.getmtime(cb_path)<block_end><if_stmt>lc_present<block_start>lc_mtime=os.path.getmtime(lc_path)<block_end># This ignores outdated bytecode objects.
<if_stmt>cb_present<and>lc_present<block_start>cb_present=<not>cb_mtime<l>lc_mtime<block_end><return>ModuleInfo(module_name module_path pathobj.os_parse(cb_path.decode('utf-8')) cb_present cb_mtime pathobj.os_parse(lc_path.decode('utf-8')) lc_present lc_mtime )<block_end><class_stmt>ModuleInfo(Object)<block_start><def_stmt>__init__ self name directory cb_path cb_present cb_mtime lc_path lc_present lc_mtime<block_start>self.name=name<line_sep>self.directory=directory<line_sep>self.cb_path=cb_path<line_sep>self.cb_present=cb_present<line_sep>self.cb_mtime=cb_mtime<line_sep>self.lc_path=lc_path<line_sep>self.lc_present=lc_present<line_sep>self.lc_mtime=lc_mtime<block_end><def_stmt>default_config self module scope<block_start>module.setattr(u"dir" self.directory)<line_sep>module.setattr(u"name" self.name)<line_sep>module.setattr(u"import" Import(self.directory scope))<line_sep><return>module<block_end><def_stmt>loadit self module scope<block_start><if_stmt><not>self.cb_present<block_start><while_stmt>scope.compile_file<is>null<and>scope.parent<is><not><none><block_start>scope=scope.parent<block_end><if_stmt>scope.compile_file<is>null<block_start><raise>OldError(u"Lever bytecode compiler stale or missing: "+self.lc_path.repr())<block_end>scope.compile_file.call([self.cb_path self.lc_path])<line_sep>self.cb_mtime=os.path.getmtime(pathobj.os_stringify(self.cb_path).encode('utf-8'))<line_sep>self.cb_present=<true><block_end>program=evaluator.loader.from_object(bon.open_file(self.cb_path) self.cb_path)<line_sep>res=program.call([module])<line_sep><return>res<block_end><def_stmt>getattr self name<block_start><if_stmt>name<eq>u"present"<block_start><return>boolean(self.cb_present<or>self.lc_present)<block_end><if_stmt>name<eq>u"mtime"<block_start><return>Float(max(self.lc_mtime self.cb_mtime))<block_end><return>Object.getattr(self name)<block_end><block_end><class_stmt>Import(Object)<block_start><def_stmt>__init__ self local scope<block_start>self.local=local<line_sep>self.scope=scope<block_end><def_stmt>call self argv<block_start><if_stmt>len(argv)<ne>1<block_start><raise>OldError(u"wrong number of arguments to import")<block_end>name=argv[0]<if_stmt>isinstance(name pathobj.Path)<block_start><raise>OldError(u"no direct loading yet")<block_end><elif_stmt><not>isinstance(name String)<block_start><raise>OldError(u"expected string")<block_end># import resolution:
# local/script.lc
path=pathobj.concat(self.local pathobj.to_path(name))<line_sep>cache=self.scope.getcache(path)<if_stmt>cache<block_start><return>cache.module<block_end><if_stmt><not>self.scope.frozen<block_start>mi=moduleinfo(path)<if_stmt>mi.lc_present<or>mi.cb_present<block_start>base_module=get_base_module(self.scope)<line_sep>this=Module(name.string {} extends=base_module)# base.module
self.scope.setcache(path this max(mi.lc_mtime mi.cb_mtime))<line_sep>mi.default_config(this self.scope)<line_sep>mi.loadit(this self.scope)<line_sep><return>this<block_end><block_end># scope/
scope=self.scope<while_stmt>scope<is><not><none><block_start>path=pathobj.concat(scope.local pathobj.to_path(name))<line_sep>cache=scope.getcache(path)<if_stmt>cache<block_start><return>cache.module<block_end><if_stmt><not>scope.frozen<block_start>mi=moduleinfo(path)<if_stmt>mi.lc_present<or>mi.cb_present<block_start>base_module=get_base_module(scope)<line_sep>this=Module(name.string {} extends=base_module)# base.module
scope.setcache(path this max(mi.lc_mtime mi.cb_mtime))<line_sep>mi.default_config(this scope)<line_sep>mi.loadit(this scope)<line_sep><return>this<block_end><block_end>scope=scope.parent<block_end><raise>OldError(u"module '%s' not present"%name.string)<block_end><def_stmt>getattr self name<block_start><if_stmt>name<eq>u'scope'<block_start><return>self.scope<block_end><if_stmt>name<eq>u"local"<block_start><return>self.local<block_end><return>Object.getattr(self name)<block_end><block_end><def_stmt>get_base_module scope<block_start><while_stmt>scope.parent<and>scope.base_module<is><none><block_start>scope=scope.parent<block_end><return>scope.base_module<block_end>@Import.instantiator2(signature(pathobj.Path ModuleScope))<def_stmt>_ local scope<block_start><return>Import(local scope)<block_end>@ModuleScope.builtin_method@signature(ModuleScope String)<def_stmt>reimport scope obj<block_start><if_stmt>obj.string<not><in>scope.cache<block_start><raise>OldError(u"Cannot reimport, module not present")<block_end>mc=scope.cache[obj.string]<line_sep>mi=moduleinfo(mc.path)<line_sep>mi.default_config(mc.module scope)<line_sep>mi.loadit(mc.module scope)<line_sep>mc.mtime=max(mi.lc_mtime mi.cb_mtime)<line_sep><return>mc.module<block_end><def_stmt>resuffix string suffix new_suffix=u""<block_start><if_stmt>string.endswith(suffix)<block_start>i=max(0 len(string)-len(suffix))<line_sep><return>string[0:i]+new_suffix<block_end><return>string+new_suffix<block_end>base.module.setattr_force(u"ModuleScope" ModuleScope.interface)<line_sep>base.module.setattr_force(u"Import" Import.interface)<line_sep> |
<import_from_stmt>.lars LARS# noqa
<import_from_stmt>.lamb LAMB# noqa
|
version="0.9.4"<line_sep> |
<import_stmt>os<line_sep>MODELS_PATH=os.path.join(os.path.dirname(__file__) "models")<line_sep>YOLO_SIZE=288<line_sep>YOLO_TARGET=9<line_sep>CORRECTOR_SIZE=50<line_sep> |
<import_stmt>testutils<import_stmt>json<import_stmt>string<import_stmt>psycopg2<class_stmt>TestInsertDocument(testutils.BedquiltTestCase)<block_start><def_stmt>test_insert_into_non_existant_collection self<block_start>doc={"_id":"<EMAIL>" "name":"<NAME>" "age":20}<line_sep>self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))<line_sep>result=self.cur.fetchone()<line_sep>self.assertEqual(result ('<EMAIL>' ))<line_sep>self.cur.execute("select bq_list_collections();")<line_sep>collections=self.cur.fetchall()<line_sep>self.assertIsNotNone(collections)<line_sep>self.assertEqual(collections [("people" )])<block_end><def_stmt>test_with_non_string_id self<block_start>docs=[{"_id":42 "name":"Penguin" "age":"<EMAIL>"} {"_id":['derp'] "name":"Penguin" "age":"<EMAIL>"} {"_id":{"name":"Penguin"} "age":"<EMAIL>"} {"_id":<false> "name":"Penguin" "age":"<EMAIL>"} {"_id":<none> "name":"Penguin" "age":"<EMAIL>"}]<for_stmt>doc docs<block_start><with_stmt>self.assertRaises(psycopg2.InternalError)<block_start>self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))<block_end>self.conn.rollback()<block_end><block_end><def_stmt>test_insert_without_id self<block_start>doc={"name":"<NAME>" "age":20}<line_sep>self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))<line_sep>result=self.cur.fetchone()<line_sep>self.assertIsNotNone(result)<line_sep>self.assertEqual(type(result) tuple)<line_sep>self.assertEqual(len(result) 1)<line_sep>_id=result[0]<line_sep>self.assertIn(type(_id) {str unicode})<line_sep>self.assertEqual(len(_id) 24)<for_stmt>character _id<block_start>self.assertIn(character string.hexdigits)<block_end><block_end><def_stmt>test_with_single_quotes_in_field self<block_start>doc={"description":"Something I've eaten"}<line_sep>self.cur.execute("""
select bq_insert('things', %s);
""" (json.dumps(doc) ))<line_sep>result=self.cur.fetchone()<line_sep>self.assertIsNotNone(result)<block_end><def_stmt>test_insert_with_repeat_id self<block_start>doc={"_id":"user_one" "name":"<NAME>" "age":20}<line_sep>self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))<line_sep>result=self.cur.fetchone()<line_sep>self.assertIsNotNone(result)<line_sep>self.assertEqual(type(result) tuple)<line_sep>self.assertEqual(len(result) 1)<line_sep>_id=result[0]<line_sep>self.assertEqual(_id "user_one")<line_sep>self.conn.commit()<with_stmt>self.assertRaises(psycopg2.IntegrityError)<block_start>self.cur.execute("""
select bq_insert('people', '{}');
""".format(json.dumps(doc)))<block_end>self.conn.rollback()<line_sep>self.cur.execute("select count(*) from people;")<line_sep>result=self.cur.fetchone()<line_sep>self.assertEqual(result (1 ))<block_end><block_end> |
# Copyright (C) 2016 <NAME>.
# This file is part of CodexGigas - https://github.com/codexgigassys/
# See the file 'LICENSE' for copying permission.
<import_stmt>pathmagic<import_from_stmt>pymongo MongoClient<import_stmt>ssdeep<import_from_stmt>env envget<def_stmt>searchFuzzy fuzz limit thresh<block_start>client=MongoClient(envget('metadata.host') envget('metadata.port'))<line_sep>db=client[envget('db_metadata_name')]<line_sep>coll_meta=db["db_metadata_collection"]<line_sep>f1=coll_meta.find({} {"file_id":1 "fuzzy_hash":1}).limit(limit)<line_sep>l=[]<for_stmt>f f1<block_start>l.append(f)<block_end>ret={}<for_stmt>a l<block_start>res=-1<try_stmt><block_start>res=ssdeep.compare(a["fuzzy_hash"] fuzz)<block_end><except_stmt>InternalError<block_start>print(str(res)+"------"+str(a["fuzzy_hash"])+"-----"+str(a["file_id"]))<line_sep><continue><block_end><if_stmt>(res<ge>thresh)<block_start>ret[a["file_id"]]=res<block_end><block_end><return>ret<block_end><def_stmt>searchFull search limit# print("1")
<block_start>client=MongoClient(envget('metadata.host') envget('metadata.port'))<line_sep># print("2")
db=client[envget('db_metadata_name')]<line_sep># print("3")
coll_meta=db["db_metadata_collection"]<line_sep># print("4")
f1=coll_meta.find(search).limit(limit)<line_sep># print("5")
l=[]<for_stmt>f f1<block_start>l.append(f)<block_end># print("6")
ret=[]<for_stmt>a l<block_start>ret.append(str(a["file_id"]))<block_end># print("7")
<return>ret<block_end> |
# -*- coding:utf-8 -*-
# edit by fuzongfei
<import_from_stmt>django.urls path<import_from_stmt>sqlorders views<line_sep>urlpatterns=[# SQL工单
path('envs' views.GetDBEnvironment.as_view() name='v1.sqlorders.db-environment') path('schemas' views.GetDbSchemas.as_view() name='v1.sqlorders.db-schemas') path('incep/syntaxcheck' views.IncepSyntaxCheckView.as_view() name='v1.sqlorders.incep.syntaxcheck') path('commit' views.SqlOrdersCommit.as_view() name='v1.sqlorders.commit') path('list' views.SqlOrdersList.as_view() name='v1.sqlorders.list') path('detail/<str:order_id>' views.SqlOrdersDetail.as_view() name='v1.sqlorders.detail') path('op/approve/<int:pk>' views.OpSqlOrderView.as_view({"put":"approve"}) name='v1.sqlorders.approve') path('op/feedback/<int:pk>' views.OpSqlOrderView.as_view({"put":"feedback"}) name='v1.sqlorders.feedback') path('op/close/<int:pk>' views.OpSqlOrderView.as_view({"put":"close"}) name='v1.sqlorders.close') path('op/review/<int:pk>' views.OpSqlOrderView.as_view({"put":"review"}) name='v1.sqlorders.review') # 生成工单任务
path('tasks/generate' views.GenerateTasksView.as_view() name='v1.sqlorders.generate-tasks') path('tasks/get/<str:order_id>' views.GetTaskIdView.as_view() name='v1.sqlorders.get-task-id') path('tasks/list/<str:task_id>' views.GetTasksListView.as_view() name='v1.sqlorders.get-tasks-list') path('tasks/preview/<str:task_id>' views.GetTasksPreviewView.as_view() name='v1.sqlorders.get-tasks-preview') # 执行任务
path('tasks/execute/single' views.ExecuteSingleTaskView.as_view() name='v1.sqlorders.execute-single-task') path('tasks/execute/multi' views.ExecuteMultiTasksView.as_view() name='v1.sqlorders.execute-multi-tasks') path('tasks/throttle' views.ThrottleTaskView.as_view() name='v1.sqlorders.throttle-task') path('tasks/result/<int:id>' views.GetTasksResultView.as_view() name='v1.sqlorders.get-tasks-result') # Hook
path('hook' views.HookSqlOrdersView.as_view() name='v1.sqlorders.hook-sqlorders') # download export files
path('export/download/<str:base64_filename>' views.DownloadExportFilesView.as_view() name='v1.sqlorders.download-export-files') # 上线版本
path('versions/get' views.ReleaseVersionsGet.as_view() name='v1.sqlorders.versions.get') path('versions/list' views.ReleaseVersionsList.as_view() name='v1.sqlorders.versions.list') path('versions/create' views.ReleaseVersionsCreate.as_view() name='v1.sqlorders.versions.create') path('versions/update/<int:key>' views.ReleaseVersionsUpdate.as_view() name='v1.sqlorders.versions.update') path('versions/delete/<int:id>' views.ReleaseVersionsDelete.as_view() name='v1.sqlorders.versions.delete') path('versions/view/<str:version>' views.ReleaseVersionsView.as_view() name='v1.sqlorders.versions.view') ]<line_sep> |
"""
ORM是django的核心思想, object-related-mapping对象-关系-映射
ORM核心就是操作数据库的时候不再直接操作sql语句,而是操作对象
定义一个类,类中有uid,username等类属型,sql语句insert修改的时候直接插入这个User对象
"""<line_sep># ORM映射实现原理,通过type修改类对象信息
# 定义这个元类metaclass
<class_stmt>ModelMetaclass(type)<block_start><def_stmt>__new__ cls name bases attrs# name --> User
# bases --> object
# attrs --> {
# "uid" :('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# "email": ('email', "varchar(30)"),
# "password": ('password', "varchar(30)"),
# "__init__": xxx,
# "save": xxx2,
# }
<block_start>mappings=dict()<line_sep># 判断是否需要保存
<for_stmt>k,v attrs.items()# 判断是否是元组类型
<block_start><if_stmt>isinstance(v tuple)<block_start>print('Found mapping: %s ==> %s'%(k v))<line_sep>mappings[k]=v<block_end><block_end># 删除这些已经在字典中存储的属性
<for_stmt>k mappings.keys()<block_start>attrs.pop(k)<block_end># 等于del attrs[k]
# 将之前的uid/name/email/password以及对应的对象引用、类名字
# attrs = {
# "__init__": xxxx,
# "save": xxxx2,
# "__mappings__": {
# "uid": ('uid', "int unsigned"),
# "name": ('username', "varchar(30)"),
# ""email: ('email', "varchar(30)"),
# "password": ('password', "varchar(30)")
# },
# "__table__": "User"
# }
attrs['__mappings__']=mappings# 保存属性和列的映射关系
attrs['__table__']=name# 假设表名和类名一致
<return>type.__new__(cls name bases attrs)<block_end><block_end><class_stmt>User(metaclass=ModelMetaclass)<block_start>uid=('uid' "int unsigned")<line_sep>name=('username' "varchar(30)")<line_sep>email=('email' "varchar(30)")<line_sep>password=('password' "<PASSWORD>)")<line_sep># 当指定元类之后,以上的类属性将不在类中,而是在__mappings__属性指定的字典中存储
# 以上User类中有
# __mappings__ = {
# "uid": ('uid', "int unsigned")
# "name": ('username', "varchar(30)")
# "email": ('email', "varchar(30)")
# "password": ('password', "varchar(30)")
# }
# __table__ = "User"
# 参数名是kwargs,不是**kwargs,**只是告诉解释器将传来的参数变为字典
# for循环遍历__new__返回的attrs字典,实现实例对象的属性和方法赋值
<def_stmt>__init__ self **kwargs<block_start><for_stmt>name,value kwargs.items()<block_start>setattr(self name value)<block_end><block_end><def_stmt>save self<block_start>fields=[]# ["uid", "username"...]
args=[]#[12345, "laowang"...]
# 创建的实例对象中没有__mapping__,去类对象中找
# k --> uid, v --> 12345
<for_stmt>k,v self.__mappings__.items()<block_start>fields.append(v[0])<line_sep>args.append(getattr(self k <none>))<block_end>args_temp=list()<for_stmt>temp args<block_start><if_stmt>isinstance(temp int)# 判断如果是数字类型
<block_start>args_temp.append(str(temp))<block_end><elif_stmt>isinstance(temp str)# 判断如果是字符串类型
<block_start>args_temp.append("""'%s'"""%temp)<block_end><block_end># sql = 'insert into %s (%s) values (%s);' \
# % (self.__table__, ','.join(fields), ','.join([str(i) for i in args]))
# 使用",".join为每一个字段后都插入逗号分隔
sql='insert into %s (%s) values (%s)'%(self.__table__ ','.join(fields) ','.join(args_temp))<line_sep>print('SQL: %s'%sql)<block_end><block_end># 抽取为基类,再创建User2这个类,就直接让其继承Model类
<class_stmt>Model(object metaclass=ModelMetaclass)<block_start><def_stmt>__init__ self **kwargs<block_start><for_stmt>name,value kwargs.items()<block_start>setattr(self name value)<block_end><block_end><def_stmt>save self<block_start>fields=[]<line_sep>args=[]<for_stmt>k,v self.__mappings__.items()<block_start>fields.append(v[0])<line_sep>args.append(getattr(self k <none>))<block_end>args_temp=list()<for_stmt>temp args# 判断入如果是数字类型
<block_start><if_stmt>isinstance(temp int)<block_start>args_temp.append(str(temp))<block_end><elif_stmt>isinstance(temp str)<block_start>args_temp.append("""'%s'"""%temp)<block_end><block_end>sql='insert into %s (%s) values (%s)'%(self.__table__ ','.join(fields) ','.join(args_temp))<line_sep>print('SQL: %s'%sql)<block_end><block_end><class_stmt>User2(Model)<block_start>uid=('uid' "int unsigned")<line_sep>name=('username' "varchar(30)")<line_sep>email=('email' "varchar(30)")<line_sep>password=('password' "<PASSWORD>)")<block_end><def_stmt>test01 <block_start>u=User(uid=12345 name='Michael' email='<EMAIL>' password='<PASSWORD>')<line_sep># print(u.__dict__)
u.save()<block_end><def_stmt>test02 <block_start>list=['12356' "laowang" "email"]<line_sep>print(",".join(list))<block_end><def_stmt>main # test01()
<block_start>test02()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# Standard libraries
<import_stmt>os<import_stmt>json<import_stmt>logging<import_from_stmt>typing Text<line_sep># Azure functions
<import_stmt>azure.functions<as>func<line_sep># Inference runtime
<import_stmt>onnxruntime<as>ort<import_from_stmt>tokenizers BertWordPieceTokenizer<line_sep># Helper scripts
<import_from_stmt>.PreprocessData normalize_text truncate_text<import_from_stmt>.Predict get_ids_and_masks predict<line_sep># Initialize ONNX runtime and language model tokenizer
vocab_file_path=os.path.join(os.path.dirname(__file__) "Model/bert-base-uncased-vocab.txt")<line_sep>onnx_file_path=os.path.join(os.path.dirname(__file__) "Model/watchdog_model.onnx")<line_sep>tokenizer=BertWordPieceTokenizer(vocab_file_path)<line_sep>tokenizer.enable_padding(pad_id=0 pad_token="[PAD]" length=128)<line_sep>tokenizer.enable_truncation(max_length=128)<line_sep>ort_session=ort.InferenceSession(onnx_file_path)<def_stmt>main req:func.HttpRequest<arrow>func.HttpResponse<block_start>logging.info('Invoked TextQualityWatchdog Skill.')<try_stmt><block_start>body=json.dumps(req.get_json())<if_stmt>body<block_start>logging.info(body)<line_sep>values=json.loads(body)['values']<line_sep>results={}<line_sep>results["values"]=[]<for_stmt>value values<block_start>text=value['data']['text']<line_sep># Apply puntuation and whitespace normalization, and convert to lowercase
text=normalize_text(text)<line_sep># Truncate the text to a maximum of 128 (default) whitespace separated tokens
text=truncate_text(text)<line_sep># Compute the input tokens and attention masks for the text sequence
input_ids,attention_masks=get_ids_and_masks(tokenizer text)<line_sep># Call the ONNX model to perform inference on the input
flat_prediction=predict(ort_session input_ids attention_masks)<line_sep>payload=({"recordId":value['recordId'] "data":{"text_quality_warning":int(flat_prediction[0])}})<line_sep>results["values"].append(payload)<block_end>result=json.dumps(results ensure_ascii=<false>)<line_sep><return>func.HttpResponse(result mimetype="application/json")<block_end><else_stmt><block_start><return>func.HttpResponse("Invalid body" status_code=400)<block_end><block_end><except_stmt>ValueError<block_start><return>func.HttpResponse("Invalid body" status_code=400)<block_end><block_end> |
#
# Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
_UNIT_TO_JUNIT={"s":"SECONDS" "ms":"MILLISECONDS" "us":"MICROSECONDS" "ns":"NANOSECONDS"}<def_stmt>jsc sc<block_start>"""Returns the underlying Scala SparkContext
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaObject` (org.apache.spark.SparkContext)
"""<line_sep><return>sc._jsc.sc()<block_end><def_stmt>jvm sc<block_start>"""Returns the Pyspark JVM handle
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaView
` """<line_sep><return>sc._jvm<block_end><def_stmt>scala_object jpkg obj<block_start><return>jpkg.__getattr__(obj+"$").__getattr__("MODULE$")<block_end><def_stmt>scala_package_object jpkg<block_start><return>scala_object(jpkg "package")<block_end><def_stmt>pyutils sc<block_start>"""Returns a handle to ``com.twosigma.flint.rdd.PythonUtils``
:param sc: SparkContext
:return: :class:`py4j.java_gateway.JavaPackage` (com.twosigma.flint.rdd.PythonUtils)
"""<line_sep><return>jvm(sc).com.twosigma.flint.rdd.PythonUtils<block_end><def_stmt>copy_jobj sc obj<block_start>"""Returns a Java object ``obj`` with an additional reference count
:param sc: Spark Context
:param obj: :class:`py4j.java_gateway.JavaObject`
:return: ``obj`` (:class:`py4j.java_gateway.JavaObject`) with an additional reference count
"""<line_sep><return>pyutils(sc).makeCopy(obj)<block_end><def_stmt>to_list lst<block_start>"""Make sure the object is wrapped in a list
:return: a ``list`` object, either lst or lst in a list
"""<if_stmt>isinstance(lst str)<block_start>lst=[lst]<block_end><elif_stmt><not>isinstance(lst list)<block_start><try_stmt><block_start>lst=list(lst)<block_end><except_stmt>TypeError<block_start>lst=[lst]<block_end><block_end><return>lst<block_end><def_stmt>list_to_seq sc lst preserve_none=<false><block_start>"""Shorthand for accessing PythonUtils Java Package
If lst is a Python None, returns a None or empty Scala Seq (depending on preserve_none)
If lst is a Python object, such as str, returns a Scala Seq containing the object
If lst is a Python tuple/list, returns a Scala Seq containing the objects in the tuple/list
:return: A copy of ``lst`` as a ``scala.collection.Seq``
"""<if_stmt>lst<is><none><block_start><if_stmt>preserve_none<block_start><return><none><block_end><else_stmt><block_start>lst=[]<block_end><block_end><return>jvm(sc).org.apache.spark.api.python.PythonUtils.toSeq(to_list(lst))<block_end><def_stmt>py_col_to_scala_col sc py_col<block_start>converters={list:list_to_seq tuple:list_to_seq}<line_sep>convert=converters.get(type(py_col))<if_stmt>convert<block_start><return>convert(sc py_col)<block_end><else_stmt><block_start><return>py_col<block_end><block_end><def_stmt>junit sc unit<block_start>"""Converts a Pandas unit to scala.concurrent.duration object
:return: Scala equivalent of ``unit`` as ``scala.concurrent.duration object``
"""<if_stmt>unit<not><in>_UNIT_TO_JUNIT<block_start><raise>ValueError("unit must be in {}".format(_UNIT_TO_JUNIT.keys()))<block_end><return>scala_package_object(jvm(sc).scala.concurrent.duration).__getattr__(_UNIT_TO_JUNIT[unit])()<block_end><def_stmt>jschema sc schema<block_start>"""Converts a Python schema (StructType) to a Scala schema ``org.apache.spark.sql.types.StructType``
:return: :class:``org.apache.spark.sql.types.StructType``
"""<import_stmt>json<line_sep><return>jvm(sc).org.apache.spark.sql.types.StructType.fromString(json.dumps(schema.jsonValue))<block_end> |
<import_from_stmt>sympy.diffgeom Manifold Patch CoordSystem Point<import_from_stmt>sympy symbols Function<import_from_stmt>sympy.testing.pytest warns_deprecated_sympy<line_sep>m=Manifold('m' 2)<line_sep>p=Patch('p' m)<line_sep>a,b=symbols('a b')<line_sep>cs=CoordSystem('cs' p [a b])<line_sep>x,y=symbols('x y')<line_sep>f=Function('f')<line_sep>s1,s2=cs.coord_functions()<line_sep>v1,v2=cs.base_vectors()<line_sep>f1,f2=cs.base_oneforms()<def_stmt>test_point <block_start>point=Point(cs [x y])<assert_stmt>point<ne>Point(cs [2 y])<line_sep>#TODO assert point.subs(x, 2) == Point(cs, [2, y])
#TODO assert point.free_symbols == set([x, y])
<block_end><def_stmt>test_subs <block_start><assert_stmt>s1.subs(s1 s2)<eq>s2<assert_stmt>v1.subs(v1 v2)<eq>v2<assert_stmt>f1.subs(f1 f2)<eq>f2<assert_stmt>(x<times>f(s1)+y).subs(s1 s2)<eq>x<times>f(s2)+y<assert_stmt>(f(s1)<times>v1).subs(v1 v2)<eq>f(s1)<times>v2<assert_stmt>(y<times>f(s1)<times>f1).subs(f1 f2)<eq>y<times>f(s1)<times>f2<block_end><def_stmt>test_deprecated <block_start><with_stmt>warns_deprecated_sympy()<block_start>cs_wname=CoordSystem('cs' p ['a' 'b'])<assert_stmt>cs_wname<eq>cs_wname.func(*cs_wname.args)<block_end><block_end> |
<import_stmt>string<import_stmt>datrie<line_sep>trie=datrie.Trie(string.ascii_lowercase)<line_sep>trie[u'foo']=5<assert_stmt>u'foo'<in>trie<line_sep> |
<import_from_stmt>typing Any Dict List Tuple Union<import_stmt>pytest<import_stmt>torch<import_from_stmt>allennlp.common.testing AllenNlpTestCase multi_device global_distributed_metric run_distributed_test <import_from_stmt>allennlp_models.vision VqaMeasure<class_stmt>VqaMeasureTest(AllenNlpTestCase)<block_start>@multi_device<def_stmt>test_vqa self device:str<block_start>vqa=VqaMeasure()<line_sep>logits=torch.tensor([[0.35 0.25 0.1 0.1 0.2] [0.1 0.6 0.1 0.2 0.0]] device=device)<line_sep>labels=torch.tensor([[0] [3]] device=device)<line_sep>label_weights=torch.tensor([[1/3] [2/3]] device=device)<line_sep>vqa(logits labels label_weights)<line_sep>vqa_score=vqa.get_metric()["score"]<assert_stmt>vqa_score<eq>pytest.approx((1/3)/2)<block_end>@multi_device<def_stmt>test_vqa_accumulates_and_resets_correctly self device:str<block_start>vqa=VqaMeasure()<line_sep>logits=torch.tensor([[0.35 0.25 0.1 0.1 0.2] [0.1 0.6 0.1 0.2 0.0]] device=device)<line_sep>labels=torch.tensor([[0] [3]] device=device)<line_sep>labels2=torch.tensor([[4] [4]] device=device)<line_sep>label_weights=torch.tensor([[1/3] [2/3]] device=device)<line_sep>vqa(logits labels label_weights)<line_sep>vqa(logits labels label_weights)<line_sep>vqa(logits labels2 label_weights)<line_sep>vqa(logits labels2 label_weights)<line_sep>vqa_score=vqa.get_metric(reset=<true>)["score"]<assert_stmt>vqa_score<eq>pytest.approx((1/3+1/3+0+0)/8)<line_sep>vqa(logits labels label_weights)<line_sep>vqa_score=vqa.get_metric(reset=<true>)["score"]<assert_stmt>vqa_score<eq>pytest.approx((1/3)/2)<block_end>@multi_device<def_stmt>test_does_not_divide_by_zero_with_no_count self device:str<block_start>vqa=VqaMeasure()<assert_stmt>vqa.get_metric()["score"]<eq>pytest.approx(0.0)<block_end><def_stmt>test_distributed_accuracy self<block_start>logits=[torch.tensor([[0.35 0.25 0.1 0.1 0.2]]) torch.tensor([[0.1 0.6 0.1 0.2 0.0]]) ]<line_sep>labels=[torch.tensor([[0]]) torch.tensor([[3]])]<line_sep>label_weights=[torch.tensor([[1/3]]) torch.tensor([[2/3]])]<line_sep>metric_kwargs={"logits":logits "labels":labels "label_weights":label_weights}<line_sep>desired_accuracy={"score":(1/3)/2}<line_sep>run_distributed_test([-1 -1] global_distributed_metric VqaMeasure() metric_kwargs desired_accuracy exact=<false> )<block_end><def_stmt>test_distributed_accuracy_unequal_batches self<block_start>logits=[torch.tensor([[0.35 0.25 0.1 0.1 0.2] [0.35 0.25 0.1 0.1 0.2]]) torch.tensor([[0.1 0.6 0.1 0.2 0.0]]) ]<line_sep>labels=[torch.tensor([[0] [0]]) torch.tensor([[3]])]<line_sep>label_weights=[torch.tensor([[1] [1]]) torch.tensor([[1/3]])]<line_sep>metric_kwargs={"logits":logits "labels":labels "label_weights":label_weights}<line_sep>desired_accuracy={"score":(1+1+0)/3}<line_sep>run_distributed_test([-1 -1] global_distributed_metric VqaMeasure() metric_kwargs desired_accuracy exact=<false> )<block_end><def_stmt>test_multiple_distributed_runs self<block_start>logits=[torch.tensor([[0.35 0.25 0.1 0.1 0.2]]) torch.tensor([[0.1 0.6 0.1 0.2 0.0]]) ]<line_sep>labels=[torch.tensor([[0]]) torch.tensor([[3]])]<line_sep>label_weights=[torch.tensor([[1/3]]) torch.tensor([[2/3]])]<line_sep>metric_kwargs={"logits":logits "labels":labels "label_weights":label_weights}<line_sep>desired_accuracy={"score":(1/3)/2}<line_sep>run_distributed_test([-1 -1] global_distributed_metric VqaMeasure() metric_kwargs desired_accuracy exact=<true> number_of_runs=200 )<block_end><block_end> |
# pylint: disable=missing-function-docstring, missing-module-docstring/
<import_from_stmt>numpy.random randint<import_from_stmt>pyccel.epyccel epyccel<def_stmt>test_transpose_shape language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose<line_sep>y=transpose(x)<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start><import_from_stmt>numpy transpose<line_sep>y=transpose(x)<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2)<block_end><def_stmt>test_transpose_property language<block_start><def_stmt>f1 x:'int[:,:]'<block_start>y=x.T<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start>y=x.T<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2)<block_end><def_stmt>test_transpose_in_expression language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose<line_sep>y=transpose(x)+3<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start>y=x.T<times>3<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2)<block_end><def_stmt>test_mixed_order language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose ones<line_sep>n,m=x.shape<line_sep>y=ones((m n) order='F')<line_sep>z=x+transpose(y)<line_sep>n,m=z.shape<line_sep><return>n m z[-1 0] z[0 -1]<block_end><def_stmt>f2 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose ones<line_sep>n,m=x.shape<line_sep>y=ones((m n) order='F')<line_sep>z=x.transpose()+y<line_sep>n,m=z.shape<line_sep><return>n m z[-1 0] z[0 -1]<block_end><def_stmt>f3 x:'int[:,:,:]'<block_start><import_from_stmt>numpy transpose ones<line_sep>n,m,p=x.shape<line_sep>y=ones((p m n))<line_sep>z=transpose(x)+y<line_sep>n,m,p=z.shape<line_sep><return>n m p z[0 -1 0] z[0 0 -1] z[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x1)<eq>f2_epyc(x1)<line_sep>f3_epyc=epyccel(f3 language=language)<assert_stmt>f3(x2)<eq>f3_epyc(x2)<block_end><def_stmt>test_transpose_pointer language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose<line_sep>y=transpose(x)<line_sep>x[0 -1]<augadd>22<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start>y=x.T<line_sep>x[0 -1 0]<augadd>11<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x1_copy=x1.copy()<line_sep>x2=randint(50 size=(2 3 7))<line_sep>x2_copy=x2.copy()<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1_copy)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2_copy)<block_end><def_stmt>test_transpose_of_expression language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose<line_sep>y=transpose(x<times>2)+3<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start>y=(x<times>2).T<times>3<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2)<block_end><def_stmt>test_force_transpose language<block_start><def_stmt>f1 x:'int[:,:]'<block_start><import_from_stmt>numpy transpose empty<line_sep>n,m=x.shape<line_sep>y=empty((m n))<line_sep>y[: :]=transpose(x)<line_sep>n,m=y.shape<line_sep><return>n m y[-1 0] y[0 -1]<block_end><def_stmt>f2 x:'int[:,:,:]'<block_start><import_from_stmt>numpy empty<line_sep>n,m,p=x.shape<line_sep>y=empty((p m n))<line_sep>y[: : :]=x.transpose()<line_sep>n,m,p=y.shape<line_sep><return>n m p y[0 -1 0] y[0 0 -1] y[-1 -1 0]<block_end>x1=randint(50 size=(2 5))<line_sep>x2=randint(50 size=(2 3 7))<line_sep>f1_epyc=epyccel(f1 language=language)<assert_stmt>f1(x1)<eq>f1_epyc(x1)<line_sep>f2_epyc=epyccel(f2 language=language)<assert_stmt>f2(x2)<eq>f2_epyc(x2)<block_end> |
<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>scipy<as>sp<import_stmt>scipy.sparse<as>sparse<import_from_stmt>megaman.geometry Geometry<import_from_stmt>sklearn datasets<import_from_stmt>megaman.embedding Isomap LocallyLinearEmbedding LTSA SpectralEmbedding <line_sep># Generate an example data set
N=10<line_sep>X,color=datasets.samples_generator.make_s_curve(N random_state=0)<line_sep># Geometry is the main class that will Cache things like distance, affinity, and laplacian.
# you instantiate the Geometry class with the parameters & methods for the three main components:
# Adjacency: an NxN (sparse) pairwise matrix indicating neighborhood regions
# Affinity an NxN (sparse) pairwise matrix insicated similarity between points
# Laplacian an NxN (sparse) pairwsie matrix containing geometric manifold information
radius=5<line_sep>adjacency_method='cyflann'<line_sep>adjacency_kwds={'radius':radius}# ignore distances above this radius
affinity_method='gaussian'<line_sep>affinity_kwds={'radius':radius}# A = exp(-||x - y||/radius^2)
laplacian_method='geometric'<line_sep>laplacian_kwds={'scaling_epps':radius}# scaling ensures convergence to Laplace-Beltrami operator
geom=Geometry(adjacency_method=adjacency_method adjacency_kwds=adjacency_kwds affinity_method=affinity_method affinity_kwds=affinity_kwds laplacian_method=laplacian_method laplacian_kwds=laplacian_kwds)<line_sep># You can/should also use the set_data_matrix, set_adjacency_matrix, set_affinity_matrix
# to send your data set (in whichever form it takes) this way.
geom.set_data_matrix(X)<line_sep># You can get the distance, affinity etc with e.g: Geometry.get_distance_matrix()
# you can update the keyword arguments passed inially using these functions
adjacency_matrix=geom.compute_adjacency_matrix()<line_sep># by defualt this is pass-by-reference. Use copy=True to get a copied version.
# If you don't want to pre-compute a Geometry you can pass a dictionary or geometry
# arguments to one of the embedding classes.
geom={'adjacency_method':adjacency_method 'adjacency_kwds':adjacency_kwds 'affinity_method':affinity_method 'affinity_kwds':affinity_kwds 'laplacian_method':laplacian_method 'laplacian_kwds':laplacian_kwds}<line_sep># an example follows for creating each embedding into 2 dimensions.
n_components=2<line_sep># LTSA
ltsa=LTSA(n_components=n_components eigen_solver='arpack' geom=geom)<line_sep>embed_ltsa=ltsa.fit_transform(X)<line_sep># LLE
lle=LocallyLinearEmbedding(n_components=n_components eigen_solver='arpack' geom=geom)<line_sep>embed_lle=lle.fit_transform(X)<line_sep># Isomap
isomap=Isomap(n_components=n_components eigen_solver='arpack' geom=geom)<line_sep>embed_isomap=isomap.fit_transform(X)<line_sep># Spectral Embedding
spectral=SpectralEmbedding(n_components=n_components eigen_solver='arpack' geom=geom)<line_sep>embed_spectral=spectral.fit_transform(X)<line_sep> |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
<import_from_stmt>.config add_detr_config<import_from_stmt>.detr Detr<import_from_stmt>.dataset_mapper DetrDatasetMapper<line_sep> |
<import_from_stmt>pathlib Path<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pandas<as>pd<line_sep>results_dir=Path('results')<line_sep>results_dir.mkdir(exist_ok=<true>)<line_sep># Performance plot
<for_stmt>scale [3 4]<block_start><for_stmt>test_set ['Set5' 'Set14']<block_start>time=[]<line_sep>psnr=[]<line_sep>model=[]<for_stmt>save_dir sorted(Path('.').glob(f'*-sc{scale}'))<block_start><if_stmt>'bicubic'<not><in>save_dir.stem<block_start>model<augadd>[save_dir.stem.rsplit('-' 1)[0].upper()]<line_sep>metrics_file=save_dir/f'test/{test_set}/metrics.csv'<line_sep>metrics=pd.read_csv(str(metrics_file) index_col='name')<line_sep>time<augadd>[metrics.time.average]<line_sep>psnr<augadd>[metrics.psnr.average]<block_end><block_end>plt.figure()<line_sep>plt.semilogx(time psnr '.')<line_sep>plt.grid(<true> which='both')<for_stmt>x,y,s zip(time psnr model)<block_start><if_stmt>'NS'<in>s<block_start>s=s.split('-')[1]<block_end>plt.text(x y s)<block_end>plt.xlabel('Run time (sec)')<line_sep>plt.ylabel('PSNR (dB)')<line_sep>plt.title(f'Scale {scale} on {test_set}')<line_sep>plt.savefig(str(results_dir/f'performance-sc{scale}-{test_set}.png'))<line_sep>plt.close()<block_end><block_end># History plot
<for_stmt>scale [3 4]<block_start>plt.figure()<for_stmt>save_dir sorted(Path('.').glob(f'*-sc{scale}'))<block_start><if_stmt>'bicubic'<not><in>save_dir.stem<block_start>model=save_dir.stem.rsplit('-' 1)[0].upper()<line_sep>history_file=save_dir/f'train/history.csv'<line_sep>history=pd.read_csv(str(history_file))<line_sep>plt.plot(history.epoch history.val_psnr label=model alpha=0.8)<block_end><block_end>plt.legend()<line_sep>plt.xlabel('Epochs')<line_sep>plt.ylabel('Average test PSNR (dB)')<line_sep>plt.savefig(str(results_dir/f'history-sc{scale}.png'))<line_sep>plt.xlim(0 500)<if_stmt>scale<eq>3<block_start>plt.ylim(31.5 34.5)<block_end><if_stmt>scale<eq>4<block_start>plt.ylim(29 32)<block_end>plt.savefig(str(results_dir/f'history-sc{scale}-zoom.png'))<line_sep>plt.close()<block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torchvision.datasets<as>dsets<import_stmt>torchvision.transforms<as>transforms<import_from_stmt>torch.autograd Variable<line_sep># Hyper Parameters
input_size=784<line_sep>hidden_size=256<line_sep>dni_size=1024<line_sep>num_classes=10<line_sep>num_epochs=50<line_sep>batch_size=500<line_sep>learning_rate=1e-3<line_sep>use_cuda=torch.cuda.is_available()<line_sep># MNIST Dataset
train_dataset=dsets.MNIST(root='../data' train=<true> transform=transforms.ToTensor() download=<true>)<line_sep>test_dataset=dsets.MNIST(root='../data' train=<false> transform=transforms.ToTensor())<line_sep># Data Loader (Input Pipeline)
train_loader=torch.utils.data.DataLoader(dataset=train_dataset batch_size=batch_size shuffle=<true>)<line_sep>test_loader=torch.utils.data.DataLoader(dataset=test_dataset batch_size=batch_size shuffle=<false>)<class_stmt>DNI(nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size<block_start>super(DNI self).__init__()<line_sep>self.fc1=nn.Linear(input_size hidden_size)<line_sep>self.bn1=nn.BatchNorm1d(hidden_size)<line_sep>self.act1=nn.ReLU()<line_sep>self.fc2=nn.Linear(hidden_size input_size)<block_end><def_stmt>forward self x<block_start>out=self.fc1(x)<line_sep>out=self.bn1(out)<line_sep>out=self.act1(out)<line_sep>out=self.fc2(out)<line_sep><return>out<block_end><def_stmt>reset_parameters self<block_start>super(DNI self).reset_parameters()<for_stmt>param self.fc2.parameters()<block_start>param.data.zero_()<block_end><block_end><block_end>dni=DNI(hidden_size dni_size)<class_stmt>Net1(nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size<block_start>super(Net1 self).__init__()<line_sep>self.mlp=nn.Sequential(nn.Linear(input_size hidden_size) nn.BatchNorm1d(hidden_size) nn.ReLU())<block_end><def_stmt>forward self x<block_start><return>self.mlp.forward(x)<block_end><block_end>net1=Net1(input_size hidden_size)<class_stmt>Net2(nn.Module)<block_start><def_stmt>__init__ self input_size hidden_size num_classes<block_start>super(Net2 self).__init__()<line_sep>self.mlp=nn.Sequential()<line_sep>self.mlp.add_module('fc1' nn.Linear(input_size hidden_size))<line_sep>self.mlp.add_module('bn1' nn.BatchNorm1d(hidden_size))<line_sep>self.mlp.add_module('act1' nn.ReLU())<line_sep>self.mlp.add_module('fc' nn.Linear(hidden_size num_classes))<block_end><def_stmt>forward self x<block_start><return>self.mlp.forward(x)<block_end><block_end>net2=Net2(hidden_size hidden_size num_classes)<line_sep># Loss
xent=nn.CrossEntropyLoss()<line_sep>mse=nn.MSELoss()<line_sep># Optimizers
opt_net1=torch.optim.Adam(net1.parameters() lr=learning_rate)<line_sep>opt_net2=torch.optim.Adam(net2.parameters() lr=learning_rate)<line_sep>opt_dni=torch.optim.Adam(dni.parameters() lr=learning_rate)<if_stmt>use_cuda<block_start>net1.cuda()<line_sep>net2.cuda()<line_sep>dni.cuda()<block_end># Train the Model
<for_stmt>epoch range(num_epochs)<block_start><for_stmt>i,(images labels) enumerate(train_loader)# Convert torch tensor to Variable
<block_start><if_stmt>use_cuda<block_start>images=images.cuda()<line_sep>labels=labels.cuda()<block_end>images=Variable(images.view(-1 28<times>28))<line_sep>labels=Variable(labels)<line_sep># Forward + Backward + Optimize
opt_net1.zero_grad()# zero the gradient buffer
opt_net2.zero_grad()# zero the gradient buffer
opt_dni.zero_grad()# zero the gradient buffer
# Forward, Stage1
h=net1(images)<line_sep>h1=Variable(h.data requires_grad=<true>)<line_sep>h2=Variable(h.data requires_grad=<false>)<line_sep># Forward, Stage2
outputs=net2(h1)<line_sep># Backward
loss=xent(outputs labels)<line_sep>loss.backward()<line_sep># Synthetic gradient and backward
grad=dni(h2)<line_sep>h.backward(grad)<line_sep># regress
regress_loss=mse(grad Variable(h1.grad.data))<line_sep>regress_loss.backward()<line_sep># optimize
opt_net1.step()<line_sep>opt_net2.step()<line_sep>opt_dni.step()<if_stmt>(i+1)%100<eq>0<block_start>print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'%(epoch+1 num_epochs i+1 len(train_dataset)<floordiv>batch_size loss.data[0]))<block_end><block_end><block_end># Test the Model
correct=0<line_sep>total=0<for_stmt>images,labels test_loader<block_start><if_stmt>use_cuda<block_start>images=images.cuda()<line_sep>labels=labels.cuda()<block_end>images=Variable(images.view(-1 28<times>28))<line_sep>outputs=net2(net1(images))<line_sep>_,predicted=torch.max(outputs.data 1)<line_sep>total<augadd>labels.size(0)<line_sep>correct<augadd>(predicted<eq>labels).sum()<block_end>print('Accuracy of the network on the 10000 test images: %d %%'%(100<times>correct/total))<line_sep> |
<import_stmt>os<import_stmt>csv<import_stmt>shutil<import_stmt>random<import_from_stmt>PIL Image<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn optim<import_from_stmt>torch.utils.data Dataset DataLoader<import_stmt>xception_conf<as>config<import_from_stmt>model_def xception<import_from_stmt>augmentation_utils train_transform val_transform<def_stmt>save_checkpoint path state_dict epoch=0 arch="" acc1=0<block_start>new_state_dict={}<for_stmt>k,v state_dict.items()<block_start><if_stmt>k.startswith("module.")<block_start>k=k[7:]<block_end><if_stmt>torch.is_tensor(v)<block_start>v=v.cpu()<block_end>new_state_dict[k]=v<block_end>torch.save({"epoch":epoch "arch":arch "acc1":acc1 "state_dict":new_state_dict } path)<block_end><class_stmt>DFDCDataset(Dataset)<block_start><def_stmt>__init__ self data_csv required_set data_root="" ratio=(0.25 0.05) stable=<false> transform=<none><block_start>video_info=[]<line_sep>data_list=[]<with_stmt>open(data_csv)<as>fin<block_start>reader=csv.DictReader(fin)<for_stmt>row reader<block_start><if_stmt>row["set_name"]<eq>required_set<block_start>label=int(row["is_fake"])<line_sep>n_frame=int(row["n_frame"])<line_sep>select_frame=round(n_frame<times>ratio[label])<for_stmt>sample_idx range(select_frame)<block_start>data_list.append((len(video_info) sample_idx))<block_end>video_info.append({"name":row["name"] "label":label "n_frame":n_frame "select_frame":select_frame })<block_end><block_end><block_end>self.stable=stable<line_sep>self.data_root=data_root<line_sep>self.video_info=video_info<line_sep>self.data_list=data_list<line_sep>self.transform=transform<block_end><def_stmt>__getitem__ self index<block_start>video_idx,sample_idx=self.data_list[index]<line_sep>info=self.video_info[video_idx]<if_stmt>self.stable<block_start>frame_idx=info["n_frame"]<times>sample_idx<floordiv>info["select_frame"]<block_end><else_stmt><block_start>frame_idx=random.randint(0 info["n_frame"]-1)<block_end>image_path=os.path.join(self.data_root info["name"] "%03d.png"%frame_idx)<try_stmt><block_start>img=Image.open(image_path).convert("RGB")<block_end><except_stmt>OSError<block_start>img=np.random.randint(0 255 (320 320 3) dtype=np.uint8)<block_end><if_stmt>self.transform<is><not><none># img = self.transform(img)
<block_start>result=self.transform(image=np.array(img))<line_sep>img=result["image"]<block_end><return>img info["label"]<block_end><def_stmt>__len__ self<block_start><return>len(self.data_list)<block_end><block_end><def_stmt>main <block_start>torch.backends.cudnn.benchmark=<true><line_sep>train_dataset=DFDCDataset(config.data_list "train" config.data_root transform=train_transform)<line_sep>val_dataset=DFDCDataset(config.data_list "val" config.data_root transform=val_transform stable=<true>)<line_sep>kwargs=dict(batch_size=config.batch_size num_workers=config.num_workers shuffle=<true> pin_memory=<true>)<line_sep>train_loader=DataLoader(train_dataset **kwargs)<line_sep>val_loader=DataLoader(val_dataset **kwargs)<line_sep># Model initialization
model=xception(num_classes=2 pretrained=<none>)<if_stmt>hasattr(config "resume")<and>os.path.isfile(config.resume)<block_start>ckpt=torch.load(config.resume map_location="cpu")<line_sep>start_epoch=ckpt.get("epoch" 0)<line_sep>best_acc=ckpt.get("acc1" 0.0)<line_sep>model.load_state_dict(ckpt["state_dict"])<block_end><else_stmt><block_start>start_epoch=0<line_sep>best_acc=0.0<block_end>model=model.cuda()<line_sep>model=nn.DataParallel(model)<line_sep>criterion=nn.CrossEntropyLoss()<line_sep>optimizer=optim.SGD(model.parameters() 0.01 momentum=0.9 weight_decay=1e-4)<line_sep>scheduler=optim.lr_scheduler.StepLR(optimizer step_size=2 gamma=0.2)<line_sep>os.makedirs(config.save_dir exist_ok=<true>)<for_stmt>epoch range(config.n_epoches)<block_start><if_stmt>epoch<l>start_epoch<block_start>scheduler.step()<line_sep><continue><block_end>print("Epoch {}".format(epoch+1))<line_sep>model.train()<line_sep>loss_record=[]<line_sep>acc_record=[]<for_stmt>count,(inputs labels) enumerate(train_loader)<block_start>inputs=inputs.cuda(non_blocking=<true>)<line_sep>labels=labels.cuda(non_blocking=<true>)<line_sep>outputs=model(inputs)<line_sep>loss=criterion(outputs labels)<line_sep>optimizer.zero_grad()<line_sep>loss.backward()<line_sep>optimizer.step()<line_sep>iter_loss=loss.item()<line_sep>loss_record.append(iter_loss)<line_sep>preds=torch.argmax(outputs.data 1)<line_sep>iter_acc=torch.sum(preds<eq>labels).item()/len(preds)<line_sep>acc_record.append(iter_acc)<if_stmt>count<and>count%100<eq>0<block_start>print("T-Iter %d: loss=%.4f, acc=%.4f"%(count iter_loss iter_acc))<block_end><block_end>epoch_loss=np.mean(loss_record)<line_sep>epoch_acc=np.mean(acc_record)<line_sep>print("Training: loss=%.4f, acc=%.4f"%(epoch_loss epoch_acc))<line_sep>model.eval()<line_sep>loss_record=[]<line_sep>acc_record=[]<with_stmt>torch.no_grad()<block_start><for_stmt>count,(inputs labels) enumerate(val_loader)<block_start>inputs=inputs.cuda(non_blocking=<true>)<line_sep>labels=labels.cuda(non_blocking=<true>)<line_sep>outputs=model(inputs)<line_sep>preds=torch.argmax(outputs 1)<line_sep>loss=criterion(outputs labels)<line_sep>iter_loss=loss.item()<line_sep>loss_record.append(iter_loss)<line_sep>preds=torch.argmax(outputs.data 1)<line_sep>iter_acc=torch.sum(preds<eq>labels).item()/len(preds)<line_sep>acc_record.append(iter_acc)<if_stmt>count<and>count%100<eq>0<block_start>print("V-Iter %d: loss=%.4f, acc=%.4f"%(count iter_loss iter_acc))<block_end><block_end>epoch_loss=np.mean(loss_record)<line_sep>epoch_acc=np.mean(acc_record)<line_sep>print("Validation: loss=%.4f, acc=%.4f"%(epoch_loss epoch_acc))<line_sep>scheduler.step()<line_sep>ckpt_path=os.path.join(config.save_dir "ckpt-%d.pth"%epoch)<line_sep>save_checkpoint(ckpt_path model.state_dict() epoch=epoch+1 acc1=epoch_acc)<if_stmt>epoch_acc<g>best_acc<block_start>print("Best accuracy!")<line_sep>shutil.copy(ckpt_path os.path.join(config.save_dir "best.pth"))<line_sep>best_acc=epoch_acc<block_end>print()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>os<import_stmt>re<import_stmt>subprocess<import_from_stmt>collections Counter<import_from_stmt>django.conf settings<import_from_stmt>django.core.management.base BaseCommand<import_stmt>datadog<import_from_stmt>dimagi.ext.couchdbkit Document<import_from_stmt>corehq.feature_previews all_previews<import_from_stmt>corehq.toggles all_toggles<class_stmt>DatadogLogger<block_start><def_stmt>__init__ self stdout<block_start>self.stdout=stdout<line_sep>self.datadog=os.environ.get("TRAVIS_EVENT_TYPE")<eq>'cron'<if_stmt>self.datadog<block_start>api_key=os.environ.get("DATADOG_API_KEY")<line_sep>app_key=os.environ.get("DATADOG_APP_KEY")<assert_stmt>api_key<and>app_key "DATADOG_API_KEY and DATADOG_APP_KEY must both be set"<line_sep>datadog.initialize(api_key=api_key app_key=app_key)<line_sep>self.metrics=[]<block_end><block_end><def_stmt>log self metric value tags=<none><block_start>self.stdout.write(f"{metric}: {value} {tags<or>''}")<if_stmt>self.datadog<block_start>self.metrics.append({'metric':metric 'points':value 'type':"gauge" 'host':"travis-ci.org" 'tags':["environment:travis" f"travis_build:{os.environ.get('TRAVIS_BUILD_ID')}" f"travis_number:{os.environ.get('TRAVIS_BUILD_NUMBER')}" f"travis_job_number:{os.environ.get('TRAVIS_JOB_NUMBER')}" ]+(tags<or>[]) })<block_end><block_end><def_stmt>send_all self<block_start><if_stmt>self.datadog<block_start>datadog.api.Metric.send(self.metrics)<line_sep>self.metrics=[]<block_end><block_end><block_end><class_stmt>Command(BaseCommand)<block_start>help=("Display a variety of code-quality metrics. This is run on every travis "<concat>"build, but only submitted to datadog during the daily cron job.")<def_stmt>handle self **options<block_start>self.stdout.write("----------> Begin Static Analysis <----------")<line_sep>self.logger=DatadogLogger(self.stdout)<line_sep>self.show_couch_model_count()<line_sep>self.show_custom_modules()<line_sep>self.show_js_dependencies()<line_sep>self.show_toggles()<line_sep>self.show_complexity()<line_sep>self.logger.send_all()<line_sep>self.stdout.write("----------> End Static Analysis <----------")<block_end><def_stmt>show_couch_model_count self<block_start><def_stmt>all_subclasses cls<block_start><return>set(cls.__subclasses__()).union([s<for>c cls.__subclasses__()<for>s all_subclasses(c)])<block_end>model_count=len(all_subclasses(Document))<line_sep>self.logger.log("commcare.static_analysis.couch_model_count" model_count)<block_end><def_stmt>show_custom_modules self<block_start>custom_module_count=len(set(settings.DOMAIN_MODULE_MAP.values()))<line_sep>custom_domain_count=len(settings.DOMAIN_MODULE_MAP)<line_sep>self.logger.log("commcare.static_analysis.custom_module_count" custom_module_count)<line_sep>self.logger.log("commcare.static_analysis.custom_domain_count" custom_domain_count)<block_end><def_stmt>show_js_dependencies self<block_start>proc=subprocess.Popen(["./scripts/codechecks/hqDefine.sh" "static-analysis"] stdout=subprocess.PIPE)<line_sep>output=proc.communicate()[0].strip().decode("utf-8")<line_sep>(step1 step2 step3)=output.split(" ")<line_sep>self.logger.log("commcare.static_analysis.hqdefine_file_count" int(step1) tags=['status:unmigrated' ])<line_sep>self.logger.log("commcare.static_analysis.hqdefine_file_count" int(step2) tags=['status:hqdefine_only' ])<line_sep>self.logger.log("commcare.static_analysis.requirejs_file_count" int(step3) tags=['status:migrated' ])<block_end><def_stmt>show_toggles self<block_start>counts=Counter(t.tag.name<for>t all_toggles()+all_previews())<for_stmt>tag,count counts.items()<block_start>self.logger.log("commcare.static_analysis.toggle_count" count [f"toggle_tag:{tag}"])<block_end><block_end><def_stmt>show_complexity self# We can use `--json` for more granularity, but it doesn't provide a summary
<block_start>output=subprocess.run(["radon" "cc" "." "--min=C" "--total-average" "--exclude=node_modules/*,staticfiles/*" ] stdout=subprocess.PIPE).stdout.decode('utf-8').strip()<line_sep>raw_blocks,raw_complexity=output.split('\n')[-2:]<line_sep>blocks_pattern=r'^(\d+) blocks \(classes, functions, methods\) analyzed.$'<line_sep>blocks=int(re.match(blocks_pattern raw_blocks).group(1))<line_sep>self.logger.log("commcare.static_analysis.code_blocks" blocks)<line_sep>complexity_pattern=r'^Average complexity: A \(([\d.]+)\)$'<line_sep>complexity=round(float(re.match(complexity_pattern raw_complexity).group(1)) 3)<line_sep>self.logger.log("commcare.static_analysis.avg_complexity" complexity)<for_stmt>grade ["C" "D" "E" "F"]<block_start>count=len(re.findall(f" - {grade}\n" output))<line_sep>self.logger.log("commcare.static_analysis.complex_block_count" count tags=[f"complexity_grade:{grade}"] )<block_end><block_end><block_end> |
<import_stmt>sqlite3<import_from_stmt>scripts.artifact_report ArtifactHtmlReport<import_from_stmt>scripts.ilapfuncs logfunc tsv open_sqlite_db_readonly<def_stmt>get_installedappsGass files_found report_folder seeker wrap_text<block_start><for_stmt>file_found files_found<block_start>file_found=str(file_found)<if_stmt>file_found.endswith('.db')<block_start>db=open_sqlite_db_readonly(file_found)<line_sep>cursor=db.cursor()<line_sep>cursor.execute('''
SELECT
distinct(package_name)
FROM
app_info
''')<if_stmt>'user'<in>file_found<block_start>usernum=file_found.split("/")<line_sep>usernum='_'+str(usernum[-4])<block_end><else_stmt><block_start>usernum=''<block_end>all_rows=cursor.fetchall()<line_sep>usageentries=len(all_rows)<if_stmt>usageentries<g>0<block_start>report=ArtifactHtmlReport('Installed Apps')<line_sep>report.start_artifact_report(report_folder f'Installed Apps (GMS){usernum}')<line_sep>report.add_script()<line_sep>data_headers=('Bundle ID' )# Don't remove the comma, that is required to make this a tuple as there is only 1 element
data_list=[]<for_stmt>row all_rows<block_start>data_list.append((row[0] ))<block_end>report.write_artifact_data_table(data_headers data_list file_found)<line_sep>report.end_artifact_report()<line_sep>tsvname=f'installed apps - GMS{usernum}'<line_sep>tsv(report_folder data_headers data_list tsvname)<block_end><else_stmt><block_start>logfunc('No Installed Apps data available{usernum}')<block_end>db.close()<block_end><block_end><block_end> |
<import_from_future_stmt> division<import_stmt>torch<import_from_stmt>mmcv.parallel MMDataParallel<import_from_stmt>..datasets build_dataloader<import_from_stmt>.env get_root_logger<def_stmt>test_fashion_recommender model dataset cfg distributed=<false> validate=<false> logger=<none><block_start><if_stmt>logger<is><none><block_start>logger=get_root_logger(cfg.log_level)<line_sep># start testing predictor
<block_end><if_stmt>distributed# to do
<block_start>_dist_test(model dataset cfg validate=validate)<block_end><else_stmt><block_start>_non_dist_test(model dataset cfg validate=validate)<block_end><block_end><def_stmt>_process_embeds dataset model cfg<block_start>data_loader=build_dataloader(dataset cfg.data.imgs_per_gpu cfg.data.workers_per_gpu len(cfg.gpus.test) dist=<false> shuffle=<false>)<line_sep>print('dataloader built')<line_sep>embeds=[]<with_stmt>torch.no_grad()<block_start><for_stmt>data data_loader<block_start>embed=model(data['img'] return_loss=<false>)<line_sep>embeds.append(embed.data.cpu())<block_end><block_end>embeds=torch.cat(embeds)<line_sep><return>embeds<block_end><def_stmt>_non_dist_test model dataset cfg validate=<false><block_start>model=MMDataParallel(model device_ids=cfg.gpus.test).cuda()<line_sep>model.eval()<line_sep>embeds=_process_embeds(dataset model cfg)<line_sep>metric=model.module.triplet_net.metric_branch<line_sep># compatibility auc
auc=dataset.test_compatibility(embeds metric)<line_sep># fill-in-blank accuracy
acc=dataset.test_fitb(embeds metric)<line_sep>print('Compat AUC: {:.2f} FITB: {:.1f}\n'.format(round(auc 2) round(acc<times>100 1)))<block_end><def_stmt>_dist_test model dataset cfg validate=<false><block_start><raise>NotImplementedError<block_end> |
<import_from_stmt>google.appengine.ext ndb<line_sep>CACHE_DATA={}<def_stmt>get cache_key<block_start>full_cache_key='{}:{}'.format(cache_key ndb.get_context().__hash__())<line_sep><return>CACHE_DATA.get(full_cache_key <none>)<block_end><def_stmt>set cache_key value<block_start>full_cache_key='{}:{}'.format(cache_key ndb.get_context().__hash__())<line_sep>CACHE_DATA[full_cache_key]=value<block_end> |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
<import_from_stmt>typing Tuple<import_stmt>pandas<as>pd<import_from_stmt>.configs InferConfig ModelConfig<import_from_stmt>.null_mixture_model NullMixtureMixedEffectModel<class_stmt>HME<block_start>"""The Hierarchical Mixed Effect model interface.
:param data: observed train data
:param model_config: HME model configuration parameters
"""<def_stmt>__init__ self data:pd.DataFrame model_config:ModelConfig<arrow><none><block_start>self.model=NullMixtureMixedEffectModel(data model_config)<line_sep>self.posterior_samples=<none><line_sep>self.posterior_diagnostics=<none><block_end><def_stmt>infer self infer_config:InferConfig<arrow>Tuple[pd.DataFrame]<block_start>"""Performs MCMC posterior inference on HME model parameters and
returns MCMC samples for those parameters registered in the query.
:param infer_config: configuration settings of posterior inference
:return: posterior samples and their diagnostic summary statistics
"""<line_sep>self.posterior_samples,self.posterior_diagnostics=self.model.infer(infer_config)<line_sep><return>self.posterior_samples self.posterior_diagnostics<block_end><def_stmt>predict self new_data:pd.DataFrame<arrow>pd.DataFrame<block_start>"""Computes predictive distributions on the new test data according to
MCMC posterior samples.
:param new_data: test data for prediction
:return: predictive distributions on the new test data
"""<line_sep><return>self.model.predict(new_data self.posterior_samples)<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>mlflow.utils.string_utils strip_prefix strip_suffix is_string_type<line_sep>@pytest.mark.parametrize("original,prefix,expected" [("smoketest" "smoke" "test") ("" "test" "") ("" "" "") ("test" "" "test")] )<def_stmt>test_strip_prefix original prefix expected<block_start><assert_stmt>strip_prefix(original prefix)<eq>expected<block_end>@pytest.mark.parametrize("original,suffix,expected" [("smoketest" "test" "smoke") ("" "test" "") ("" "" "") ("test" "" "test")] )<def_stmt>test_strip_suffix original suffix expected<block_start><assert_stmt>strip_suffix(original suffix)<eq>expected<block_end><def_stmt>test_is_string_type <block_start><assert_stmt>is_string_type("validstring")<assert_stmt>is_string_type("")<assert_stmt>is_string_type((b"dog").decode("utf-8"))<assert_stmt><not>is_string_type(<none>)<assert_stmt><not>is_string_type(["teststring"])<assert_stmt><not>is_string_type([])<assert_stmt><not>is_string_type({})<assert_stmt><not>is_string_type({"test":"string"})<assert_stmt><not>is_string_type(12)<assert_stmt><not>is_string_type(12.7)<block_end> |
<import_stmt>os<import_stmt>sys<import_from_stmt>gradslam.config CfgNode<as>CN<line_sep>cfg=CN()<line_sep>cfg.TRAIN=CN()<line_sep>cfg.TRAIN.HYPERPARAM_1=0.9<line_sep> |
<import_stmt>numpy<import_from_stmt>chainer functions<import_from_stmt>chainer testing<import_from_stmt>chainer utils<line_sep>@testing.parameterize(*testing.product({'shape':[(3 4) ()] 'dtype':[numpy.float16 numpy.float32 numpy.float64] }))@testing.fix_random()@testing.inject_backend_tests(<none> # CPU tests
[{} ]# GPU tests
+testing.product({'use_cuda':[<true>] 'cuda_device':[0 1] })# ChainerX tests
+testing.product({'use_chainerx':[<true>] 'chainerx_device':['native:0' 'cuda:0' 'cuda:1'] }))<class_stmt>TestLinearInterpolate(testing.FunctionTestCase)<block_start><def_stmt>setUp self<block_start><if_stmt>self.dtype<eq>numpy.float16<block_start>self.check_forward_options.update({'atol':1e-3 'rtol':1e-3})<line_sep>self.check_backward_options.update({'atol':5e-4 'rtol':5e-3})<line_sep>self.check_double_backward_options.update({'atol':5e-3 'rtol':5e-2})<block_end><block_end><def_stmt>generate_inputs self<block_start>p=numpy.random.uniform(0 1 self.shape).astype(self.dtype)<line_sep>x=numpy.random.uniform(-1 1 self.shape).astype(self.dtype)<line_sep>y=numpy.random.uniform(-1 1 self.shape).astype(self.dtype)<line_sep><return>p x y<block_end><def_stmt>forward self inputs device<block_start>p,x,y=inputs<line_sep>ret=functions.linear_interpolate(p x y)<line_sep>ret=functions.cast(ret numpy.float64)<line_sep><return>ret <block_end><def_stmt>forward_expected self inputs<block_start>p,x,y=inputs<line_sep>expected=p<times>x+(1-p)<times>y<line_sep>expected=utils.force_array(expected dtype=numpy.float64)<line_sep><return>expected <block_end><block_end>testing.run_module(__name__ __file__)<line_sep> |
# pylint: disable=wildcard-import
"""
Customized data loader for video classification related tasks.
"""<import_from_future_stmt> absolute_import<import_from_stmt>.classification *<line_sep> |
<import_from_future_stmt> annotations<import_stmt>os<import_from_stmt>datetime datetime<import_from_stmt>twisted.python log<import_stmt>cowrie.core.output<import_from_stmt>cowrie.core.config CowrieConfig<line_sep>token=CowrieConfig.get("output_csirtg" "token" fallback="<PASSWORD>")<if_stmt>token<eq>"<PASSWORD>"<block_start>log.msg("output_csirtg: token not found in configuration file")<line_sep>exit(1)<block_end>os.environ["CSIRTG_TOKEN"]=token<import_stmt>csirtgsdk# noqa: E402
<class_stmt>Output(cowrie.core.output.Output)<block_start>"""
CSIRTG output
"""<def_stmt>start self<block_start>"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""<line_sep>self.user=CowrieConfig.get("output_csirtg" "username")<line_sep>self.feed=CowrieConfig.get("output_csirtg" "feed")<line_sep>self.debug=CowrieConfig.getboolean("output_csirtg" "debug" fallback=<false>)<line_sep>self.description=CowrieConfig.get("output_csirtg" "description")<line_sep>self.context={}<line_sep># self.client = csirtgsdk.client.Client()
<block_end><def_stmt>stop self<block_start><pass><block_end><def_stmt>write self e<block_start>"""
Only pass on connection events
"""<if_stmt>e["eventid"]<eq>"cowrie.session.connect"<block_start>self.submitIp(e)<block_end><block_end><def_stmt>submitIp self e<block_start>peerIP=e["src_ip"]<line_sep>ts=e["timestamp"]<line_sep>system=e.get("system" <none>)<if_stmt>system<not><in>["cowrie.ssh.factory.CowrieSSHFactory" "cowrie.telnet.transport.HoneyPotTelnetFactory" ]<block_start><return><block_end>today=str(datetime.now().date())<if_stmt><not>self.context.get(today)<block_start>self.context={}<line_sep>self.context[today]=set()<block_end>key=",".join([peerIP system])<if_stmt>key<in>self.context[today]<block_start><return><block_end>self.context[today].add(key)<line_sep>tags="scanner,ssh"<line_sep>port=22<if_stmt>e["system"]<eq>"cowrie.telnet.transport.HoneyPotTelnetFactory"<block_start>tags="scanner,telnet"<line_sep>port=23<block_end>i={"user":self.user "feed":self.feed "indicator":peerIP "portlist":port "protocol":"tcp" "tags":tags "firsttime":ts "lasttime":ts "description":self.description }<if_stmt>self.debug<is><true><block_start>log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")<block_end>ind=csirtgsdk.indicator.Indicator(i).submit()<if_stmt>self.debug<is><true><block_start>log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")<block_end>log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))<block_end><block_end> |
# coding: utf-8
<import_stmt>urllib<import_from_stmt>flask_restful fields<import_from_stmt>flask_restful.fields *<class_stmt>BlobKey(fields.Raw)<block_start><def_stmt>format self value<block_start><return>urllib.quote(str(value))<block_end><block_end><class_stmt>Blob(fields.Raw)<block_start><def_stmt>format self value<block_start><return>repr(value)<block_end><block_end><class_stmt>DateTime(fields.DateTime)<block_start><def_stmt>format self value<block_start><return>value.isoformat()<block_end><block_end><class_stmt>GeoPt(fields.Raw)<block_start><def_stmt>format self value<block_start><return>'%s,%s'%(value.lat value.lon)<block_end><block_end><class_stmt>Id(fields.Raw)<block_start><def_stmt>output self key obj<block_start><try_stmt><block_start>value=getattr(obj 'key' <none>).id()<line_sep><return>super(Id self).output(key {'id':value})<block_end><except_stmt>AttributeError<block_start><return><none><block_end><block_end><block_end><class_stmt>Integer(fields.Integer)<block_start><def_stmt>format self value<block_start><if_stmt>value<g>9007199254740992<or>value<l>-9007199254740992<block_start><return>str(value)<block_end><return>value<block_end><block_end><class_stmt>Key(fields.Raw)<block_start><def_stmt>format self value<block_start><return>value.urlsafe()<block_end><block_end> |
<import_from_future_stmt> unicode_literals<import_stmt>os<import_stmt>requests<import_from_stmt>six.moves.urllib.parse parse_qs<import_from_stmt>.constants XERO_FILES_URL<import_from_stmt>.exceptions XeroBadRequest XeroExceptionUnknown XeroForbidden XeroInternalError XeroNotAvailable XeroNotFound XeroNotImplemented XeroRateLimitExceeded XeroUnauthorized XeroUnsupportedMediaType <class_stmt>FilesManager(object)<block_start>DECORATED_METHODS=("get" "all" "create" "save" "delete" "get_files" "upload_file" "get_association" "get_associations" "make_association" "delete_association" "get_content" )<def_stmt>__init__ self name credentials<block_start>self.credentials=credentials<line_sep>self.name=name<line_sep>self.base_url=credentials.base_url+XERO_FILES_URL<for_stmt>method_name self.DECORATED_METHODS<block_start>method=getattr(self "_%s"%method_name)<line_sep>setattr(self method_name self._get_data(method))<block_end><block_end><def_stmt>_get_results self data<block_start>response=data["Response"]<if_stmt>self.name<in>response<block_start>result=response[self.name]<block_end><elif_stmt>"Attachments"<in>response<block_start>result=response["Attachments"]<block_end><else_stmt><block_start><return><none><block_end><if_stmt>isinstance(result tuple)<or>isinstance(result list)<block_start><return>result<block_end><if_stmt>isinstance(result dict)<and>self.singular<in>result<block_start><return>result[self.singular]<block_end><block_end><def_stmt>_get_data self func<block_start>""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""<def_stmt>wrapper *args **kwargs<block_start>uri,params,method,body,headers,singleobject,files=func(*args **kwargs)<line_sep>response=getattr(requests method)(uri data=body headers=headers auth=self.credentials.oauth params=params files=files )<if_stmt>response.status_code<eq>200<or>response.status_code<eq>201<block_start><if_stmt>response.headers["content-type"].startswith("application/json")<block_start><return>response.json()<block_end><else_stmt># return a byte string without doing any Unicode conversions
<block_start><return>response.content<block_end><block_end># Delete will return a response code of 204 - No Content
<elif_stmt>response.status_code<eq>204<block_start><return>"Deleted"<block_end><elif_stmt>response.status_code<eq>400<block_start><raise>XeroBadRequest(response)<block_end><elif_stmt>response.status_code<eq>401<block_start><raise>XeroUnauthorized(response)<block_end><elif_stmt>response.status_code<eq>403<block_start><raise>XeroForbidden(response)<block_end><elif_stmt>response.status_code<eq>404<block_start><raise>XeroNotFound(response)<block_end><elif_stmt>response.status_code<eq>415<block_start><raise>XeroUnsupportedMediaType(response)<block_end><elif_stmt>response.status_code<eq>500<block_start><raise>XeroInternalError(response)<block_end><elif_stmt>response.status_code<eq>501<block_start><raise>XeroNotImplemented(response)<block_end><elif_stmt>response.status_code<eq>503# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
<block_start>payload=parse_qs(response.text)<if_stmt>payload<block_start><raise>XeroRateLimitExceeded(response payload)<block_end><else_stmt><block_start><raise>XeroNotAvailable(response)<block_end><block_end><else_stmt><block_start><raise>XeroExceptionUnknown(response)<block_end><block_end><return>wrapper<block_end><def_stmt>_get self id headers=<none><block_start>uri="/".join([self.base_url self.name id])<line_sep><return>uri {} "get" <none> headers <true> <none><block_end><def_stmt>_get_files self folderId<block_start>"""Retrieve the list of files contained in a folder"""<line_sep>uri="/".join([self.base_url self.name folderId "Files"])<line_sep><return>uri {} "get" <none> <none> <false> <none><block_end><def_stmt>_get_associations self id<block_start>uri="/".join([self.base_url self.name id "Associations"])+"/"<line_sep><return>uri {} "get" <none> <none> <false> <none><block_end><def_stmt>_get_association self fileId objectId<block_start>uri="/".join([self.base_url self.name fileId "Associations" objectId])<line_sep><return>uri {} "get" <none> <none> <false> <none><block_end><def_stmt>_delete_association self fileId objectId<block_start>uri="/".join([self.base_url self.name fileId "Associations" objectId])<line_sep><return>uri {} "delete" <none> <none> <false> <none><block_end><def_stmt>create_or_save self data method="post" headers=<none> summarize_errors=<true><block_start><if_stmt>"Id"<not><in>data<block_start>uri="/".join([self.base_url self.name])<block_end><else_stmt><block_start>uri="/".join([self.base_url self.name data["Id"]])<block_end>body=data<if_stmt>summarize_errors<block_start>params={}<block_end><else_stmt><block_start>params={"summarizeErrors":"false"}<block_end><return>uri params method body headers <false> <none><block_end><def_stmt>_create self data<block_start><return>self.create_or_save(data method="post")<block_end><def_stmt>_save self data summarize_errors=<true><block_start><return>self.create_or_save(data method="put" summarize_errors=summarize_errors)<block_end><def_stmt>_delete self id<block_start>uri="/".join([self.base_url self.name id])<line_sep><return>uri {} "delete" <none> <none> <false> <none><block_end><def_stmt>_upload_file self path folderId=<none><block_start><if_stmt>folderId<is><not><none><block_start>uri="/".join([self.base_url self.name folderId])<block_end><else_stmt><block_start>uri="/".join([self.base_url self.name])<block_end>filename=self.filename(path)<line_sep>files=dict()<line_sep>files[filename]=open(path mode="rb")<line_sep><return>uri {} "post" <none> <none> <false> files<block_end><def_stmt>_get_content self fileId<block_start>uri="/".join([self.base_url self.name fileId "Content"])<line_sep><return>uri {} "get" <none> <none> <false> <none><block_end><def_stmt>_make_association self id data<block_start>uri="/".join([self.base_url self.name id "Associations"])<line_sep>body=data<line_sep><return>uri {} "post" body <none> <false> <none><block_end><def_stmt>_all self<block_start>uri="/".join([self.base_url self.name])<line_sep><return>uri {} "get" <none> <none> <false> <none><block_end><def_stmt>filename self path<block_start>head,tail=os.path.split(path)<line_sep><return>tail<or>os.path.basename(head)<block_end><block_end> |
<import_stmt>pandas<as>pd<import_from_stmt>ml.preprocessing.normalization Normalizer<import_from_stmt>category_encoders *<import_stmt>logging<line_sep>logging.getLogger().setLevel(logging.INFO)<class_stmt>Preprocessing<block_start>"""
Class to perform data preprocessing before training
"""<def_stmt>clean_data self df:pd.DataFrame<block_start>"""
Perform data cleansing.
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""<line_sep>logging.info("Cleaning data")<line_sep>df_copy=df.copy()<line_sep>df_copy['Pclass']=df_copy.Pclass.astype('object')<line_sep>df_copy=df_copy.dropna()<line_sep><return>df_copy<block_end><def_stmt>categ_encoding self df:pd.DataFrame<block_start>"""
Perform encoding of the categorical variables
Parameters
----------
df : pd.Dataframe
Dataframe to be processed
Returns
-------
pd.Dataframe
Cleaned Data Frame
"""<line_sep>logging.info("Category encoding")<line_sep>df_copy=df.copy()<line_sep>df_copy=pd.get_dummies(df_copy)<line_sep><return>df_copy<block_end><block_end> |
<import_from_stmt>typing Dict List<def_stmt>_wrap_text tag<block_start><return>tag.text<if>tag<else>''<block_end><def_stmt>parse_authors authors_tag<arrow>List<block_start>"""The PMC XML has a slightly different format than authors listed in front tag."""<if_stmt><not>authors_tag<block_start><return>[]<block_end>authors=[]<for_stmt>name_tag authors_tag.find_all('name' recursive=<false>)<block_start>surname=name_tag.find('surname')<line_sep>given_names=name_tag.find('given-names')<line_sep>given_names=given_names.text.split(' ')<if>given_names<else><none><line_sep>suffix=name_tag.find('suffix')<line_sep>authors.append({'first':given_names[0]<if>given_names<else>'' 'middle':given_names[1:]<if>given_names<else>[] 'last':surname.text<if>surname<else>'' 'suffix':suffix.text<if>suffix<else>''})<block_end><return>authors<block_end><def_stmt>parse_bib_entries back_tag<arrow>Dict<block_start>bib_entries={}<line_sep># TODO: PMC2778891 does not have 'ref-list' in its back_tag. do we even need this, or can directly .find_all('ref')?
ref_list_tag=back_tag.find('ref-list')<if_stmt>ref_list_tag<block_start><for_stmt>ref_tag ref_list_tag.find_all('ref')# The ref ID and label are semantically swapped between CORD-19 and PMC, lol
<block_start>ref_label=ref_tag['id']<line_sep>ref_id=ref_tag.find('label')<line_sep>authors_tag=ref_tag.find('person-group' {'person-group-type':'author'})<line_sep>year=ref_tag.find('year')<line_sep>fpage=ref_tag.find('fpage')<line_sep>lpage=ref_tag.find('lpage')<line_sep>pages=f'{fpage.text}-{lpage.text}'<if>fpage<and>lpage<else><none><line_sep>dois=[tag.text<for>tag ref_tag.find_all('pub-id' {'pub-id-type':'doi'})]<line_sep>bib_entries[ref_label]={'ref_id':_wrap_text(ref_id) 'title':_wrap_text(ref_tag.find('article-title')) 'authors':parse_authors(authors_tag) 'year':int(year.text)<if>year<and>year.text.isdigit()<else><none> 'venue':_wrap_text(ref_tag.find('source')) 'volume':_wrap_text(ref_tag.find('volume')) 'issn':_wrap_text(ref_tag.find('issue')) 'pages':pages 'other_ids':{'DOI':dois }}<block_end><block_end><return>bib_entries<block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<line_sep>range=getattr(__builtins__ 'xrange' range)<line_sep># end of py2 compatability boilerplate
<import_stmt>numpy<as>np<import_from_stmt>matrixprofile core<import_from_stmt>matrixprofile.io.protobuf.proto_messages_pb2 Location Motif MPFOutput <def_stmt>get_matrix_attributes matrix<block_start>"""
Utility function to extract the rows, cols and flattened array from a
numpy array so it can be stored in the MPFOutput protobuf message.
Parameters
----------
matrix : np.ndarray
The numpy array to extract the attributes from.
Returns
-------
tuple :
A tuple containing the rows, cols and flattened array.
"""<if_stmt><not>core.is_array_like(matrix)<or>len(matrix)<l>1<block_start><return><none> <none> <none><block_end>rows=matrix.shape[0]<line_sep>cols=0<if_stmt>len(matrix.shape)<g>1<block_start>cols=matrix.shape[1]<block_end><return>rows cols matrix.flatten()<block_end><def_stmt>get_windows profile<block_start>"""
Utility function to format the windows from a profile structure ensuring
that the windows are in an array.
Parameters
----------
profile : dict
The MatrixProfile or PMP profile.
Returns
-------
list :
The window(s) in a list.
"""<line_sep>windows=[]<if_stmt>core.is_mp_obj(profile)<block_start>windows.append(profile.get('w'))<block_end><elif_stmt>core.is_pmp_obj(profile)<block_start>windows=profile.get('windows')<block_end><return>windows<block_end><def_stmt>get_proto_motif motif<block_start>"""
Utility function to convert a motif from a MatrixProfile or PMP structure
ensuring that it is compatible with the MPFOutput message.
Note
----
A single dimensional motif location will only have a row index and
a column index of 0.
Parameters
----------
motif : dict
The motif to convert.
Returns
-------
Motif :
The motif object for MPFOutput message.
"""<line_sep>out_motif=Motif()<for_stmt>indices motif['motifs']<block_start>tmp=Location()<line_sep>tmp.row=0<line_sep>tmp.col=0<line_sep># handle single integer location
<if_stmt>core.is_array_like(indices)<block_start>tmp.row=indices[0]<line_sep>tmp.col=indices[1]<block_end><else_stmt><block_start>tmp.row=indices<block_end>out_motif.motifs.append(tmp)<block_end><for_stmt>neighbor motif['neighbors']<block_start>tmp=Location()<line_sep>tmp.row=0<line_sep>tmp.col=0<line_sep># handle single integer location
<if_stmt>core.is_array_like(neighbor)<block_start>tmp.row=neighbor[0]<line_sep>tmp.col=neighbor[1]<block_end><else_stmt><block_start>tmp.row=neighbor<block_end>out_motif.neighbors.append(tmp)<block_end><return>out_motif<block_end><def_stmt>get_proto_discord discord<block_start>"""
Utility function to convert a discord into the MPFOutput message
format.
Note
----
A single dimensional discord location will only have a row index and
a column index of 0.
Parameters
----------
discord : int or tuple
The discord with row, col index or single index.
Returns
-------
Location :
The Location message used in the MPFOutput protobuf message.
"""<line_sep>out_discord=Location()<line_sep>out_discord.row=0<line_sep>out_discord.col=0<if_stmt>core.is_array_like(discord)<block_start>out_discord.row=discord[0]<line_sep>out_discord.col=discord[1]<block_end><else_stmt><block_start>out_discord.row=discord<block_end><return>out_discord<block_end><def_stmt>profile_to_proto profile<block_start>"""
Utility function that takes a MatrixProfile or PMP profile data structure
and converts it to the MPFOutput protobuf message object.
Parameters
----------
profile : dict
The profile to convert.
Returns
-------
MPFOutput :
The MPFOutput protobuf message object.
"""<line_sep>output=MPFOutput()<line_sep># add higher level attributes that work for PMP and MP
output.klass=profile.get('class')<line_sep>output.algorithm=profile.get('algorithm')<line_sep>output.metric=profile.get('metric')<line_sep>output.sample_pct=profile.get('sample_pct')<line_sep># add time series data
ts=profile.get('data').get('ts')<line_sep>query=profile.get('data').get('query')<line_sep>rows,cols,data=get_matrix_attributes(ts)<line_sep>output.ts.rows=rows<line_sep>output.ts.cols=cols<line_sep>output.ts.data.extend(data)<line_sep># add query data
query=profile.get('data').get('query')<line_sep>rows,cols,data=get_matrix_attributes(query)<if_stmt>rows<and>cols<and>core.is_array_like(data)<block_start>output.query.rows=rows<line_sep>output.query.cols=cols<line_sep>output.query.data.extend(data)<block_end># add window(s)
output.windows.extend(get_windows(profile))<line_sep># add motifs
motifs=profile.get('motifs')<if_stmt><not>isinstance(motifs type(<none>))<block_start><for_stmt>motif motifs<block_start>output.motifs.append(get_proto_motif(motif))<block_end><block_end># add discords
discords=profile.get('discords')<if_stmt><not>isinstance(discords type(<none>))<block_start><for_stmt>discord discords<block_start>output.discords.append(get_proto_discord(discord))<block_end><block_end># add cmp
cmp=profile.get('cmp')<if_stmt><not>isinstance(cmp type(<none>))<block_start>rows,cols,data=get_matrix_attributes(cmp)<line_sep>output.cmp.rows=rows<line_sep>output.cmp.cols=cols<line_sep>output.cmp.data.extend(data)<block_end># add av
av=profile.get('av')<if_stmt><not>isinstance(av type(<none>))<block_start>rows,cols,data=get_matrix_attributes(av)<line_sep>output.av.rows=rows<line_sep>output.av.cols=cols<line_sep>output.av.data.extend(data)<block_end># add av_type
av_type=profile.get('av_type')<if_stmt><not>isinstance(av_type type(<none>))<and>len(av_type)<g>0<block_start>output.av_type=av_type<block_end># add the matrix profile specific attributes
<if_stmt>core.is_mp_obj(profile)<block_start>output.mp.ez=profile.get('ez')<line_sep>output.mp.join=profile.get('join')<line_sep># add mp
rows,cols,data=get_matrix_attributes(profile.get('mp'))<line_sep>output.mp.mp.rows=rows<line_sep>output.mp.mp.cols=cols<line_sep>output.mp.mp.data.extend(data)<line_sep># add pi
rows,cols,data=get_matrix_attributes(profile.get('pi'))<line_sep>output.mp.pi.rows=rows<line_sep>output.mp.pi.cols=cols<line_sep>output.mp.pi.data.extend(data)<line_sep># add lmp
rows,cols,data=get_matrix_attributes(profile.get('lmp'))<if_stmt>rows<and>cols<and>core.is_array_like(data)<block_start>output.mp.lmp.rows=rows<line_sep>output.mp.lmp.cols=cols<line_sep>output.mp.lmp.data.extend(data)<block_end># add lpi
rows,cols,data=get_matrix_attributes(profile.get('lpi'))<if_stmt>rows<and>cols<and>core.is_array_like(data)<block_start>output.mp.lpi.rows=rows<line_sep>output.mp.lpi.cols=cols<line_sep>output.mp.lpi.data.extend(data)<block_end># add rmp
rows,cols,data=get_matrix_attributes(profile.get('rmp'))<if_stmt>rows<and>cols<and>core.is_array_like(data)<block_start>output.mp.rmp.rows=rows<line_sep>output.mp.rmp.cols=cols<line_sep>output.mp.rmp.data.extend(data)<block_end># add rpi
rows,cols,data=get_matrix_attributes(profile.get('rpi'))<if_stmt>rows<and>cols<and>core.is_array_like(data)<block_start>output.mp.rpi.rows=rows<line_sep>output.mp.rpi.cols=cols<line_sep>output.mp.rpi.data.extend(data)<block_end><block_end># add the pan matrix profile specific attributes
<elif_stmt>core.is_pmp_obj(profile)# add pmp
<block_start>rows,cols,data=get_matrix_attributes(profile.get('pmp'))<line_sep>output.pmp.pmp.rows=rows<line_sep>output.pmp.pmp.cols=cols<line_sep>output.pmp.pmp.data.extend(data)<line_sep># add pmpi
rows,cols,data=get_matrix_attributes(profile.get('pmpi'))<line_sep>output.pmp.pmpi.rows=rows<line_sep>output.pmp.pmpi.cols=cols<line_sep>output.pmp.pmpi.data.extend(data)<block_end><else_stmt><block_start><raise>ValueError('Expecting Pan-MatrixProfile or MatrixProfile!')<block_end><return>output<block_end><def_stmt>to_mpf profile<block_start>"""
Converts a given profile object into MPF binary file format.
Parameters
----------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
Returns
-------
str :
The profile as a binary formatted string.
"""<line_sep>obj=profile_to_proto(profile)<line_sep><return>obj.SerializeToString()<block_end><def_stmt>from_proto_to_array value<block_start>"""
Utility function to convert a protobuf array back into the correct
dimensions.
Parameters
----------
value : array_like
The array to transform.
Returns
-------
np.ndarray :
The transformed array.
"""<if_stmt>isinstance(value type(<none>))<or>len(value.data)<l>1<block_start><return><none><block_end>shape=(value.rows value.cols)<line_sep>out=np.array(value.data)<if_stmt>shape[1]<g>0<block_start>out=out.reshape(shape)<block_end><return>out<block_end><def_stmt>discords_from_proto discords is_one_dimensional=<false><block_start>"""
Utility function to transform discord locations back to single dimension
or multi-dimension location.
Parameter
---------
discords : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
np.ndarray :
The transformed discord locations.
"""<line_sep>out=[]<for_stmt>discord discords<block_start><if_stmt>is_one_dimensional<block_start>out.append(discord.row)<block_end><else_stmt><block_start>out.append((discord.row discord.col))<block_end><block_end><return>np.array(out dtype=int)<block_end><def_stmt>motifs_from_proto motifs is_one_dimensional=<false><block_start>"""
Utility function to transform motif locations back to single dimension
or multi-dimension location.
Parameter
---------
motifs : array_like
The protobuf formatted array.
is_one_dimensional : boolean
A flag to indicate if the original locations should be 1D.
Returns
-------
list :
The transformed motif locations.
"""<line_sep>out=[]<for_stmt>motif motifs<block_start>tmp={'motifs':[] 'neighbors':[]}<for_stmt>location motif.motifs<block_start><if_stmt>is_one_dimensional<block_start>tmp['motifs'].append(location.row)<block_end><else_stmt><block_start>tmp['motifs'].append((location.row location.col))<block_end><block_end><for_stmt>neighbor motif.neighbors<block_start><if_stmt>is_one_dimensional<block_start>tmp['neighbors'].append(neighbor.row)<block_end><else_stmt><block_start>tmp['neighbors'].append((neighbor.row neighbor.col))<block_end><block_end>out.append(tmp)<block_end><return>out<block_end><def_stmt>from_mpf profile<block_start>"""
Converts binary formatted MPFOutput message into a profile data structure.
Parameters
----------
profile : str
The profile as a binary formatted MPFOutput message.
Returns
-------
profile : dict_like
A MatrixProfile or Pan-MatrixProfile data structure.
"""<line_sep>obj=MPFOutput()<line_sep>obj.ParseFromString(profile)<line_sep>out={}<line_sep>is_one_dimensional=<false><line_sep># load in all higher level attributes
out['class']=obj.klass<line_sep>out['algorithm']=obj.algorithm<line_sep>out['metric']=obj.metric<line_sep>out['sample_pct']=obj.sample_pct<line_sep>out['data']={'ts':from_proto_to_array(obj.ts) 'query':from_proto_to_array(obj.query)}<if_stmt>obj.klass<eq>'MatrixProfile'<block_start>out['mp']=from_proto_to_array(obj.mp.mp)<line_sep>out['pi']=from_proto_to_array(obj.mp.pi)<line_sep>out['lmp']=from_proto_to_array(obj.mp.lmp)<line_sep>out['lpi']=from_proto_to_array(obj.mp.lpi)<line_sep>out['rmp']=from_proto_to_array(obj.mp.rmp)<line_sep>out['rpi']=from_proto_to_array(obj.mp.rpi)<line_sep>out['ez']=obj.mp.ez<line_sep>out['join']=obj.mp.join<line_sep>out['w']=obj.windows[0]<line_sep>is_one_dimensional=len(out['mp'].shape)<eq>1<block_end><elif_stmt>obj.klass<eq>'PMP'<block_start>out['pmp']=from_proto_to_array(obj.pmp.pmp)<line_sep>out['pmpi']=from_proto_to_array(obj.pmp.pmpi)<line_sep>out['windows']=np.array(obj.windows)<block_end><if_stmt><not>isinstance(obj.discords type(<none>))<and>len(obj.discords)<g>0<block_start>out['discords']=discords_from_proto(obj.discords is_one_dimensional=is_one_dimensional)<block_end><if_stmt><not>isinstance(obj.motifs type(<none>))<and>len(obj.motifs)<g>0<block_start>out['motifs']=motifs_from_proto(obj.motifs is_one_dimensional=is_one_dimensional)<block_end><if_stmt><not>isinstance(obj.cmp type(<none>))<and>len(obj.cmp.data)<g>0<block_start>out['cmp']=from_proto_to_array(obj.cmp)<block_end><if_stmt><not>isinstance(obj.av type(<none>))<and>len(obj.av.data)<g>0<block_start>out['av']=from_proto_to_array(obj.av)<block_end><if_stmt><not>isinstance(obj.av_type type(<none>))<and>len(obj.av_type)<g>0<block_start>out['av_type']=obj.av_type<block_end><return>out<block_end> |
# -*- coding: utf-8 -*-
"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""<import_from_stmt>builtins object<import_from_stmt>abc ABCMeta abstractmethod abstractproperty<import_from_stmt>future.utils with_metaclass<class_stmt>DomainInterface(with_metaclass(ABCMeta object))<block_start>"""Interface for a domain: in/out test, random point generation, and update limiting (for constrained optimization)."""<line_sep>@abstractproperty<def_stmt>dim self<block_start>"""Return the number of spatial dimensions."""<line_sep><pass><block_end>@abstractmethod<def_stmt>check_point_inside self point<block_start>r"""Check if a point is inside the domain/on its boundary or outside.
:param point: point to check
:type point: array of float64 with shape (dim)
:return: true if point is inside the domain
:rtype: bool
"""<line_sep><pass><block_end>@abstractmethod<def_stmt>get_bounding_box self<block_start>"""Return a list of ClosedIntervals representing a bounding box for this domain."""<line_sep><pass><block_end>@abstractmethod<def_stmt>get_constraint_list self<block_start>"""Return a list of lambda functions expressing the domain bounds as linear constraints. Used by COBYLA.
:return: a list of lambda functions corresponding to constraints
:rtype: array of lambda functions with shape (dim * 2)
"""<line_sep><pass><block_end>@abstractmethod<def_stmt>generate_random_point_in_domain self random_source=<none><block_start>"""Generate ``point`` uniformly at random such that ``self.check_point_inside(point)`` is True.
.. Note:: if you need multiple points, use generate_uniform_random_points_in_domain instead;
depending on implementation, it may ield better distributions over many points. For example,
tensor product type domains use latin hypercube sampling instead of repeated random draws
which guarantees that no non-uniform clusters may arise (in subspaces) versus this method
which treats all draws independently.
:return: point in domain
:rtype: array of float64 with shape (dim)
"""<line_sep><pass><block_end>@abstractmethod<def_stmt>generate_uniform_random_points_in_domain self num_points random_source<block_start>r"""Generate AT MOST ``num_points`` uniformly distributed points from the domain.
.. NOTE::
The number of points returned may be LESS THAN ``num_points``!
Implementations may use rejection sampling. In such cases, generating the requested
number of points may be unreasonably slow, so implementers are allowed to generate
fewer than ``num_points`` results.
:param num_points: max number of points to generate
:type num_points: int >= 0
:param random_source:
:type random_source: callable yielding uniform random numbers in [0,1]
:return: uniform random sampling of points from the domain; may be fewer than ``num_points``!
:rtype: array of float64 with shape (num_points_generated, dim)
"""<line_sep><pass><block_end>@abstractmethod<def_stmt>compute_update_restricted_to_domain self max_relative_change current_point update_vector<block_start>r"""Compute a new update so that CheckPointInside(``current_point`` + ``new_update``) is true.
Changes new_update_vector so that:
``point_new = point + new_update_vector``
has coordinates such that ``CheckPointInside(point_new)`` returns true.
``new_update_vector`` is a function of ``update_vector``.
``new_update_vector`` is just a copy of ``update_vector`` if ``current_point`` is already inside the domain.
.. NOTE::
We modify update_vector (instead of returning point_new) so that further update
limiting/testing may be performed.
:param max_relative_change: max change allowed per update (as a relative fraction of current distance to boundary)
:type max_relative_change: float64 in (0, 1]
:param current_point: starting point
:type current_point: array of float64 with shape (dim)
:param update_vector: proposed update
:type update_vector: array of float64 with shape (dim)
:return: new update so that the final point remains inside the domain
:rtype: array of float64 with shape (dim)
"""<line_sep><pass><block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># reco hit production
<import_from_stmt>RecoPPS.Local.ctppsDiamondRecHits_cfi ctppsDiamondRecHits<line_sep># local track fitting
<import_from_stmt>RecoPPS.Local.ctppsDiamondLocalTracks_cfi ctppsDiamondLocalTracks<line_sep>ctppsDiamondLocalReconstructionTask=cms.Task(ctppsDiamondRecHits ctppsDiamondLocalTracks)<line_sep>ctppsDiamondLocalReconstruction=cms.Sequence(ctppsDiamondLocalReconstructionTask)<line_sep> |
<import_from_stmt>config yoloCfg yoloWeights opencvFlag<import_from_stmt>config AngleModelPb AngleModelPbtxt<import_from_stmt>config IMGSIZE<import_from_stmt>PIL Image<import_stmt>numpy<as>np<import_stmt>cv2<if_stmt>opencvFlag<eq>'keras'##转换为tf模型,以便GPU调用
<block_start><import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.platform gfile<line_sep>config=tf.ConfigProto(allow_soft_placement=<true>)<line_sep>sess=tf.Session(config=config)<with_stmt>gfile.FastGFile(AngleModelPb 'rb')<as>f<block_start>graph_def=tf.GraphDef()<line_sep>graph_def.ParseFromString(f.read())<line_sep>sess.graph.as_default()<line_sep>tf.import_graph_def(graph_def name='')<block_end>inputImg=sess.graph.get_tensor_by_name('input_1:0')<line_sep>predictions=sess.graph.get_tensor_by_name('predictions/Softmax:0')<line_sep>keep_prob=tf.placeholder(tf.float32)<block_end><else_stmt><block_start>angleNet=cv2.dnn.readNetFromTensorflow(AngleModelPb AngleModelPbtxt)##dnn 文字方向检测
<block_end>textNet=cv2.dnn.readNetFromDarknet(yoloCfg yoloWeights)##文字定位
<def_stmt>text_detect img<block_start>thresh=0<line_sep>h,w=img.shape[:2]<line_sep>inputBlob=cv2.dnn.blobFromImage(img scalefactor=0.00390625 size=IMGSIZE swapRB=<true> crop=<false>)<line_sep>textNet.setInput(inputBlob)<line_sep>pred=textNet.forward()<line_sep>cx=pred[: 0]<times>w<line_sep>cy=pred[: 1]<times>h<line_sep>xmin=cx-pred[: 2]<times>w/2<line_sep>xmax=cx+pred[: 2]<times>w/2<line_sep>ymin=cy-pred[: 3]<times>h/2<line_sep>ymax=cy+pred[: 3]<times>h/2<line_sep>scores=pred[: 4]<line_sep>indx=np.where(scores<g>thresh)[0]<line_sep>scores=scores[indx]<line_sep>boxes=np.array(list(zip(xmin[indx] ymin[indx] xmax[indx] ymax[indx])))<line_sep><return>boxes scores<block_end><def_stmt>angle_detect_dnn img adjust=<true><block_start>"""
文字方向检测
"""<line_sep>h,w=img.shape[:2]<line_sep>ROTATE=[0 90 180 270]<if_stmt>adjust<block_start>thesh=0.05<line_sep>xmin,ymin,xmax,ymax=int(thesh<times>w) int(thesh<times>h) w-int(thesh<times>w) h-int(thesh<times>h)<line_sep>img=img[ymin:ymax xmin:xmax]<block_end>##剪切图片边缘
inputBlob=cv2.dnn.blobFromImage(img scalefactor=1.0 size=(224 224) swapRB=<true> mean=[103.939 116.779 123.68] crop=<false>)<line_sep>angleNet.setInput(inputBlob)<line_sep>pred=angleNet.forward()<line_sep>index=np.argmax(pred axis=1)[0]<line_sep><return>ROTATE[index]<block_end><def_stmt>angle_detect_tf img adjust=<true><block_start>"""
文字方向检测
"""<line_sep>h,w=img.shape[:2]<line_sep>ROTATE=[0 90 180 270]<if_stmt>adjust<block_start>thesh=0.05<line_sep>xmin,ymin,xmax,ymax=int(thesh<times>w) int(thesh<times>h) w-int(thesh<times>w) h-int(thesh<times>h)<line_sep>img=img[ymin:ymax xmin:xmax]##剪切图片边缘
<block_end>img=cv2.resize(img (224 224))<line_sep>img=img[<ellipsis> ::-1].astype(np.float32)<line_sep>img[<ellipsis> 0]<augsub>103.939<line_sep>img[<ellipsis> 1]<augsub>116.779<line_sep>img[<ellipsis> 2]<augsub>123.68<line_sep>img=np.array([img])<line_sep>out=sess.run(predictions feed_dict={inputImg:img keep_prob:0})<line_sep>index=np.argmax(out axis=1)[0]<line_sep><return>ROTATE[index]<block_end><def_stmt>angle_detect img adjust=<true><block_start>"""
文字方向检测
"""<if_stmt>opencvFlag<eq>'keras'<block_start><return>angle_detect_tf(img adjust=adjust)<block_end><else_stmt><block_start><return>angle_detect_dnn(img adjust=adjust)<block_end><block_end> |
<import_from_stmt>hubspot HubSpot<import_from_stmt>hubspot.discovery.cms.blogs.discovery Discovery<def_stmt>test_is_discoverable <block_start>apis=HubSpot().cms<assert_stmt>isinstance(apis.blogs Discovery)<block_end> |
<import_stmt>math<import_stmt>numpy<as>np<import_from_stmt>keras backend<as>K<import_from_stmt>keras.layers Conv2D Concatenate Activation Add<import_from_stmt>keras.engine InputSpec<def_stmt>logsoftmax x<block_start>''' Numerically stable log(softmax(x)) '''<line_sep>m=K.max(x axis=-1 keepdims=<true>)<line_sep><return>x-m-K.log(K.sum(K.exp(x-m) axis=-1 keepdims=<true>))<block_end><def_stmt>pixelcnn_loss target output img_rows img_cols img_chns n_components<block_start>''' Keras PixelCNN loss function. Use a lambda to fill in the last few
parameters
Args:
img_rows, img_cols, img_chns: image dimensions
n_components: number of mixture components
Returns:
log-loss
'''<assert_stmt>img_chns<eq>3<line_sep># Extract out each of the mixture parameters (multiple of 3 b/c of image channels)
output_m=output[: : : :3<times>n_components]<line_sep>output_invs=output[: : : 3<times>n_components:6<times>n_components]<line_sep>output_logit_weights=output[: : : 6<times>(n_components):]<line_sep># Repeat the target to match the number of mixture component shapes
x=K.reshape(target (-1 img_rows img_cols img_chns))<line_sep>slices=[]<for_stmt>c range(img_chns)<block_start>slices<augadd>[x[: : : c:c+1]]<times>n_components<block_end>x=K.concatenate(slices axis=-1)<line_sep>x_decoded_m=output_m<line_sep>x_decoded_invs=output_invs<line_sep>x_logit_weights=output_logit_weights<line_sep># Pixels rescaled to be in [-1, 1] interval
offset=1./127.5/2.<line_sep>centered_mean=x-x_decoded_m<line_sep>cdfminus_arg=(centered_mean-offset)<times>K.exp(x_decoded_invs)<line_sep>cdfplus_arg=(centered_mean+offset)<times>K.exp(x_decoded_invs)<line_sep>cdfminus_safe=K.sigmoid(cdfminus_arg)<line_sep>cdfplus_safe=K.sigmoid(cdfplus_arg)<line_sep># Generate the PDF (logistic) in case the `m` is way off (cdf is too small)
# pdf = e^(-(x-m)/s) / {s(1 + e^{-(x-m)/s})^2}
# logpdf = -(x-m)/s - log s - 2 * log(1 + e^(-(x-m)/s))
# = -mid_in - invs - 2 * softplus(-mid_in)
mid_in=centered_mean<times>K.exp(x_decoded_invs)<line_sep>log_pdf_mid=-mid_in-x_decoded_invs-2.<times>K.tf.nn.softplus(-mid_in)<line_sep># Use trick from PixelCNN++ implementation to protect against edge/overflow cases
# In extreme cases (cdfplus_safe - cdf_minus_safe < 1e-5), use the
# log_pdf_mid and assume that density is 1 pixel width wide (1/127.5) as
# the density: log(pdf * 1/127.5) = log(pdf) - log(127.5)
# Add on line of best fit (see notebooks/blog post) to the difference between
# edge case and the standard case
edge_case=log_pdf_mid-np.log(127.5)+2.04<times>x_decoded_invs-0.107<line_sep># ln (sigmoid(x)) = x - ln(e^x + 1) = x - softplus(x)
# ln (1 - sigmoid(x)) = ln(1 / (1 + e^x)) = -softplus(x)
log_cdfplus=cdfplus_arg-K.tf.nn.softplus(cdfplus_arg)<line_sep>log_1minus_cdf=-K.tf.nn.softplus(cdfminus_arg)<line_sep>log_ll=K.tf.where(x<le>-0.999 log_cdfplus K.tf.where(x<ge>0.999 log_1minus_cdf K.tf.where(cdfplus_safe-cdfminus_safe<g>1e-5 K.log(K.maximum(cdfplus_safe-cdfminus_safe 1e-12)) edge_case)))<line_sep># x_weights * [sigma(x+0.5...) - sigma(x-0.5 ...) ]
# = log x_weights + log (...)
# Compute log(softmax(.)) directly here, instead of doing 2-step to avoid overflow
pre_result=logsoftmax(x_logit_weights)+log_ll<line_sep>result=[]<for_stmt>chn range(img_chns)<block_start>chn_result=pre_result[: : : chn<times>n_components:(chn+1)<times>n_components]<line_sep>v=K.logsumexp(chn_result axis=-1)<line_sep>result.append(v)<block_end>result=K.batch_flatten(K.stack(result axis=-1))<line_sep><return>-K.sum(result axis=-1)<block_end><def_stmt>sigmoid x# Protect overflow
<block_start><if_stmt>x<l>-20<block_start><return>0.0<block_end><elif_stmt>x<g>20<block_start><return>1.0<block_end><return>1/(1+math.exp(-x))<block_end><def_stmt>logistic_cdf x loc scale<block_start><return>sigmoid((x-loc)/scale)<block_end><def_stmt>compute_pvals m invs<block_start>pvals=[]<for_stmt>i range(256)<block_start><if_stmt>i<eq>0<block_start>pval=logistic_cdf((0.5-127.5)/127.5 loc=m scale=1./np.exp(invs))<block_end><elif_stmt>i<eq>255<block_start>pval=1.-logistic_cdf((254.5-127.5)/127.5 loc=m scale=1./np.exp(invs))<block_end><else_stmt><block_start>pval=(logistic_cdf((i+0.5-127.5)/127.5 loc=m scale=1./np.exp(invs))-logistic_cdf((i-0.5-127.5)/127.5 loc=m scale=1./np.exp(invs)))<block_end>pvals.append(pval)<block_end><return>pvals<block_end><def_stmt>compute_mixture ms invs weights n_comps<block_start>components=[]<for_stmt>i range(n_comps)<block_start>pvals=compute_pvals(ms[i] invs[i])<line_sep>arr=np.array(pvals)<line_sep>components.append(weights[i]<times>arr)<block_end><return>np.sum(components axis=0)<block_end><class_stmt>PixelConv2D(Conv2D)<block_start><def_stmt>__init__ self ptype *args **kwargs# ptype corresponds to pixel type and mask type, e.g. ra, ga, ba, rb, gb, bb
<block_start><assert_stmt>ptype[0]<in>['r' 'g' 'b'] ptype<assert_stmt>ptype[1]<in>['a' 'b'] ptype<line_sep>self.ptype=ptype<line_sep>super(PixelConv2D self).__init__(*args **kwargs)<block_end><def_stmt>build_mask self kernel_shape# kernel_shape = kern_dim x kern_dim x total_filters
# = kern_dim x kern_dim x r_g_b_filters x filters_per_channel
<block_start><assert_stmt>kernel_shape[0]<eq>kernel_shape[1] "{} must be equal in first two dims".format(kernel_shape)<assert_stmt>kernel_shape[0]%2<eq>1 "{} must be odd size in first two dims".format(kernel_shape)<assert_stmt>kernel_shape[2]%3<eq>0 "{} must be divisible by 3".format(kernel_shape)<line_sep>data=np.ones(kernel_shape)<line_sep>data.shape<line_sep>mid=data.shape[0]<floordiv>2<if_stmt>self.ptype[0]<eq>'r'<block_start>filt_prev=0<line_sep>filt_thres=int(data.shape[2]/3)<block_end><elif_stmt>self.ptype[0]<eq>'g'<block_start>filt_prev=int(data.shape[2]/3)<line_sep>filt_thres=int(2<times>data.shape[2]/3)<block_end><else_stmt><block_start><assert_stmt>self.ptype[0]<eq>'b' self.ptype<line_sep>filt_prev=int(2<times>data.shape[2]/3)<line_sep>filt_thres=data.shape[2]<block_end><for_stmt>k1 range(data.shape[0])<block_start><for_stmt>k2 range(data.shape[1])<block_start><for_stmt>chan range(data.shape[2])<block_start><if_stmt>(self.ptype[1]<eq>'a'<and>filt_prev<le>chan<l>filt_thres<and>k1<eq>mid<and>k2<eq>mid)# Handle the only difference between 'a' and 'b' ptypes
<block_start>data[k1 k2 chan :]=0<block_end><elif_stmt>k1<g>mid<or>(k1<ge>mid<and>k2<g>mid)<or>chan<ge>filt_thres# Turn off anything:
# a) Below currrent pixel
# b) Past the current pixel (scanning left from right, up to down)
# c) In a later filter
<block_start>data[k1 k2 chan :]=0<block_end><block_end><block_end><block_end><return>K.constant(np.ravel(data) dtype='float32' shape=kernel_shape)<block_end><def_stmt>build self input_shape<block_start><if_stmt>self.data_format<eq>'channels_first'<block_start>channel_axis=1<block_end><else_stmt><block_start>channel_axis=-1<block_end><if_stmt>input_shape[channel_axis]<is><none><block_start><raise>ValueError('The channel dimension of the inputs '<concat>'should be defined. Found `None`.')<block_end>input_dim=input_shape[channel_axis]<line_sep>kernel_shape=self.kernel_size+(input_dim self.filters)<line_sep>self.kernel_mask=self.build_mask(kernel_shape)<line_sep>self.kernel=self.add_weight(shape=kernel_shape initializer=self.kernel_initializer name='kernel' regularizer=self.kernel_regularizer constraint=self.kernel_constraint)<if_stmt>self.use_bias<block_start>self.bias=self.add_weight(shape=(self.filters ) initializer=self.bias_initializer name='bias' regularizer=self.bias_regularizer constraint=self.bias_constraint)<block_end><else_stmt><block_start>self.bias=<none><block_end># Set input spec.
self.input_spec=InputSpec(ndim=self.rank+2 axes={channel_axis:input_dim})<line_sep>self.built=<true><block_end><def_stmt>call self inputs<block_start>masked_kernel=self.kernel<times>self.kernel_mask<line_sep>outputs=K.conv2d(inputs masked_kernel strides=self.strides padding=self.padding data_format=self.data_format dilation_rate=self.dilation_rate)<if_stmt>self.use_bias<block_start>outputs=K.bias_add(outputs self.bias data_format=self.data_format)<block_end><if_stmt>self.activation<is><not><none><block_start><return>self.activation(outputs)<block_end><return>outputs<block_end><block_end><def_stmt>conv_block input_tensor filters kernel_size name is_first=<false><block_start>outs=[]<for_stmt>t ['rb' 'gb' 'bb']<block_start><if_stmt>is_first<block_start>t=t[0]+'a'<block_end>x=PixelConv2D(t filters kernel_size name='res'+name+t padding='same')(input_tensor)<line_sep>x=Activation('relu')(x)<line_sep>outs.append(x)<block_end><return>Concatenate()(outs)<block_end><def_stmt>resnet_block input_tensor filters stage block kernel=3<block_start>name_base=str(stage)+block+'_branch'<line_sep>filters1,filters2,filters3=filters<line_sep>x=input_tensor<line_sep>x=conv_block(x filters1 (1 1) name=name_base+'_a-1x1')<line_sep>x=conv_block(x filters2 (kernel kernel) name=name_base+'_b-{}x{}'.format(kernel kernel))<line_sep>x=conv_block(x filters3 (1 1) name=name_base+'_c-1x1')<line_sep>x=Add()([x input_tensor])<line_sep><return>x<block_end><def_stmt>final_block input_tensor filters in_filters name kernel_size=(1 1)<block_start>outs=[]<for_stmt>t ['rb' 'gb' 'bb']<block_start>x=PixelConv2D(t filters kernel_size name='final'+name+'_'+t padding='same')(input_tensor)<line_sep>x=Activation('relu')(x)<line_sep>outs.append(x)<block_end><return>Concatenate()(outs)<block_end> |
# terrascript/data/logicmonitor.py
<import_stmt>terrascript<class_stmt>logicmonitor_collectors(terrascript.Data)<block_start><pass><block_end><class_stmt>logicmonitor_dashboard(terrascript.Data)<block_start><pass><block_end><class_stmt>logicmonitor_dashboard_group(terrascript.Data)<block_start><pass><block_end><class_stmt>logicmonitor_device_group(terrascript.Data)<block_start><pass><block_end>__all__=["logicmonitor_collectors" "logicmonitor_dashboard" "logicmonitor_dashboard_group" "logicmonitor_device_group" ]<line_sep> |
<import_from_stmt>rest_framework exceptions serializers<import_from_stmt>api.v2.serializers.summaries IdentitySummarySerializer<import_from_stmt>core.models Identity<class_stmt>IdentityRelatedField(serializers.RelatedField)<block_start><def_stmt>get_queryset self<block_start><return>Identity.objects.all()<block_end><def_stmt>to_representation self identity<block_start>serializer=IdentitySummarySerializer(identity context=self.context)<line_sep><return>serializer.data<block_end><def_stmt>to_internal_value self data<block_start>queryset=self.get_queryset()<if_stmt>isinstance(data dict)<block_start>identity=data.get("id" <none>)<block_end><else_stmt><block_start>identity=data<block_end><try_stmt><block_start><return>queryset.get(id=identity)<block_end><except_stmt><block_start><raise>exceptions.ValidationError("Identity with id '%s' does not exist."%identity)<block_end><block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.