content stringlengths 0 1.55M |
|---|
# -*- coding: utf-8 -*-
<import_from_stmt>naomi testutils<import_from_stmt>. snr_vad<class_stmt>TestSNR_VADPlugin(testutils.Test_VADPlugin)<block_start><def_stmt>setUp self<block_start>super(TestSNR_VADPlugin self).setUp()<line_sep>self.plugin=testutils.get_plugin_instance(snr_vad.SNRPlugin self._test_input)<line_sep># prime by running through one wav file
self.map_file()<block_end><block_end> |
<import_stmt>curses<class_stmt>Key<block_start><def_stmt>__init__ self *values<block_start>self.values=values<line_sep>self._hash=hash(values)<line_sep>self._keyset=set(values)<block_end><def_stmt>__eq__ self other<block_start><return>self._hash<eq>other._hash<block_end><def_stmt>__hash__ self<block_start><return>self._hash<block_end><block_end><class_stmt>Keys<block_start>ESC=Key(27)<line_sep>TAB=Key(ord("\t") ord("n"))<line_sep>SHIFT_TAB=Key(353 ord("N"))<line_sep>VISUAL=Key(ord("v") ord("V"))<line_sep>COPY=Key(ord("c") ord("y"))<line_sep>QUIT=Key(ord("q"))<line_sep>UP=Key(curses.KEY_UP ord("k"))<line_sep>DOWN=Key(curses.KEY_DOWN ord("j"))<line_sep>LEFT=Key(curses.KEY_LEFT ord("h"))<line_sep>RIGHT=Key(curses.KEY_RIGHT ord("l"))<line_sep>HELP=Key(ord("?"))<line_sep>ALL=[ESC TAB SHIFT_TAB VISUAL COPY QUIT UP DOWN LEFT RIGHT HELP]<line_sep>_id_to_key={id:key<for>key ALL<for>id key.values}<line_sep>@staticmethod<def_stmt>to_key key:int<arrow>Key<block_start><return>Keys._id_to_key.get(key)<block_end><block_end> |
<import_stmt>json<import_stmt>requests<import_from_stmt>typing List<import_from_stmt>konlpy.tag Okt<import_from_stmt>requests.models Response<class_stmt>OktTokenizer<block_start>"""
A POS-tagger based tokenizer functor. Note that these are just examples. The `phrases` function usually gives a better result than an ordinary POS tokenizer.
Example:
tokenizer: OktTokenizer = OktTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""<line_sep>okt:Okt=Okt()<def_stmt>__call__ self text:str<arrow>List[str]<block_start>tokens:List[str]=self.okt.phrases(text)<line_sep><return>tokens<block_end><block_end><class_stmt>ApiTokenizer<block_start>"""
An API based tokenizer functor, assuming that the response body is a jsonifyable string with content of list of `str` tokens.
Example:
tokenizer: ApiTokenizer = ApiTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""<def_stmt>__init__ self endpoint:str<arrow><none><block_start>self.endpoint:str=endpoint<block_end><def_stmt>__call__ self text:str<arrow>List[str]<block_start>body:bytes=text.encode('utf-8')<line_sep>res:Response=requests.post(self.endpoint data=body)<line_sep>tokens:List[str]=json.loads(res.text)<line_sep><return>tokens<block_end><block_end> |
# The script is intended to get a list of all devices available via Tuya Home Assistant API endpoint.
<import_stmt>requests<import_stmt>pprint<line_sep># CHANGE THIS - BEGINNING
USERNAME=""<line_sep>PASSWORD=""<line_sep>REGION="eu"# cn, eu, us
COUNTRY_CODE="1"# Your account country code, e.g., 1 for USA or 86 for China
BIZ_TYPE="smart_life"# tuya, smart_life, jinvoo_smart
FROM="tuya"# you likely don't need to touch this
# CHANGE THIS - END
# NO NEED TO CHANGE ANYTHING BELOW
TUYACLOUDURL="https://px1.tuya{}.com"<line_sep>pp=pprint.PrettyPrinter(indent=4)<line_sep>print("Getting credentials")<line_sep>auth_response=requests.post((TUYACLOUDURL+"/homeassistant/auth.do").format(REGION) data={"userName":USERNAME "password":PASSWORD "countryCode":COUNTRY_CODE "bizType":BIZ_TYPE "from":FROM } )<line_sep>print("Got credentials")<line_sep>auth_response=auth_response.json()<line_sep>pp.pprint(auth_response)<line_sep>header={"name":"Discovery" "namespace":"discovery" "payloadVersion":1}<line_sep>payload={"accessToken":auth_response["access_token"]}<line_sep>data={"header":header "payload":payload}<line_sep>print("Getting devices")<line_sep>discovery_response=requests.post((TUYACLOUDURL+"/homeassistant/skill").format(REGION) json=data)<line_sep>print("Got devices")<line_sep>discovery_response=discovery_response.json()<line_sep>pp.pprint(discovery_response)<line_sep>print("!!! NOW REMOVE THIS FILE, SO YOUR CREDENTIALS (username, password) WON'T LEAK !!!")<line_sep> |
# NOTE: Following example requires boto3 package.
<import_stmt>boto3<import_from_stmt>InquirerPy prompt<import_from_stmt>InquirerPy.exceptions InvalidArgument<import_from_stmt>InquirerPy.validator PathValidator<line_sep>client=boto3.client("s3")<def_stmt>get_bucket _<block_start><return>[bucket["Name"]<for>bucket client.list_buckets()["Buckets"]]<block_end><def_stmt>walk_s3_bucket result<block_start>response=[]<line_sep>paginator=client.get_paginator("list_objects")<for_stmt>result paginator.paginate(Bucket=result["bucket"])<block_start><for_stmt>file result["Contents"]<block_start>response.append(file["Key"])<block_end><block_end><return>response<block_end><def_stmt>is_upload result<block_start><return>result[0]<eq>"Upload"<block_end>questions=[{"message":"Select an S3 action:" "type":"list" "choices":["Upload" "Download"] } {"message":"Enter the filepath to upload:" "type":"filepath" "when":is_upload "validate":PathValidator() "only_files":<true> } {"message":"Select a bucket:" "type":"fuzzy" "choices":get_bucket "name":"bucket" "spinner_enable":<true> } {"message":"Select files to download:" "type":"fuzzy" "when":<lambda>_:<not>is_upload(_) "choices":walk_s3_bucket "multiselect":<true> "spinner_enable":<true> } {"message":"Enter destination folder:" "type":"filepath" "when":<lambda>_:<not>is_upload(_) "only_directories":<true> "validate":PathValidator() } {"message":"Confirm?" "type":"confirm" "default":<false>} ]<try_stmt><block_start>result=prompt(questions vi_mode=<true>)<block_end><except_stmt>InvalidArgument<block_start>print("No available choices")<block_end># Download or Upload the file based on result ...
|
"""
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Image Processing, vol. 19 (12), pp. 3243-3254, 2010.
Author: <NAME>
E-mail: <EMAIL>
Released Under MIT License
"""<import_stmt>numpy<as>np<import_from_stmt>skimage.io imread<import_from_stmt>lv_set.find_lsf find_lsf<import_from_stmt>lv_set.potential_func *<import_from_stmt>lv_set.show_fig draw_all<def_stmt>gourd_params <block_start>img=imread('gourd.bmp' <true>)<line_sep>img=np.interp(img [np.min(img) np.max(img)] [0 255])<line_sep># initialize LSF as binary step function
c0=2<line_sep>initial_lsf=c0<times>np.ones(img.shape)<line_sep># generate the initial region R0 as two rectangles
initial_lsf[24:35 19:25]=-c0<line_sep>initial_lsf[24:35 39:50]=-c0<line_sep># parameters
<return>{'img':img 'initial_lsf':initial_lsf 'timestep':1 # time step
'iter_inner':10 'iter_outer':30 'lmda':5 # coefficient of the weighted length term L(phi)
'alfa':-3 # coefficient of the weighted area term A(phi)
'epsilon':1.5 # parameter that specifies the width of the DiracDelta function
'sigma':0.8 # scale parameter in Gaussian kernel
'potential_function':DOUBLE_WELL }<block_end><def_stmt>two_cells_params <block_start>img=imread('twocells.bmp' <true>)<line_sep>img=np.interp(img [np.min(img) np.max(img)] [0 255])<line_sep># initialize LSF as binary step function
c0=2<line_sep>initial_lsf=c0<times>np.ones(img.shape)<line_sep># generate the initial region R0 as two rectangles
initial_lsf[9:55 9:75]=-c0<line_sep># parameters
<return>{'img':img 'initial_lsf':initial_lsf 'timestep':5 # time step
'iter_inner':5 'iter_outer':40 'lmda':5 # coefficient of the weighted length term L(phi)
'alfa':1.5 # coefficient of the weighted area term A(phi)
'epsilon':1.5 # parameter that specifies the width of the DiracDelta function
'sigma':1.5 # scale parameter in Gaussian kernel
'potential_function':DOUBLE_WELL }<block_end>params=gourd_params()<line_sep># params = two_cells_params()
phi=find_lsf(**params)<line_sep>print('Show final output')<line_sep>draw_all(phi params['img'] 10)<line_sep> |
# -*- coding: utf-8 -*-
"""
Inspired by:
* https://gist.github.com/shirriff/c9fb5d98e6da79d9a772#file-merkle-py
* https://github.com/richardkiss/pycoin
"""<import_from_future_stmt> absolute_import division unicode_literals<import_from_stmt>builtins range<import_stmt>binascii<import_stmt>hashlib<def_stmt>merkleroot hashes<block_start>"""
Args:
hashes: reversed binary form of transactions hashes, e.g.:
``binascii.unhexlify(h)[::-1] for h in block['tx']]``
Returns:
merkle root in hexadecimal form
"""<if_stmt>len(hashes)<eq>1<block_start><return>binascii.hexlify(bytearray(reversed(hashes[0])))<block_end><if_stmt>len(hashes)%2<eq>1<block_start>hashes.append(hashes[-1])<block_end>parent_hashes=[]<for_stmt>i range(0 len(hashes)-1 2)<block_start>first_round_hash=hashlib.sha256(hashes[i]+hashes[i+1]).digest()<line_sep>second_round_hash=hashlib.sha256(first_round_hash).digest()<line_sep>parent_hashes.append(second_round_hash)<block_end><return>merkleroot(parent_hashes)<block_end> |
# Copyright (c) 2019 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>tempest.lib.common api_version_utils<import_from_stmt>tempest.lib.common rest_client<line_sep>PLACEMENT_MICROVERSION=<none><class_stmt>BasePlacementClient(rest_client.RestClient)<block_start>api_microversion_header_name='OpenStack-API-Version'<line_sep>version_header_value='placement %s'<def_stmt>get_headers self<block_start>headers=super(BasePlacementClient self).get_headers()<if_stmt>PLACEMENT_MICROVERSION<block_start>headers[self.api_microversion_header_name]=self.version_header_value%PLACEMENT_MICROVERSION<block_end><return>headers<block_end><def_stmt>request self method url extra_headers=<false> headers=<none> body=<none> chunked=<false><block_start>resp,resp_body=super(BasePlacementClient self).request(method url extra_headers headers body chunked)<if_stmt>(PLACEMENT_MICROVERSION<and>PLACEMENT_MICROVERSION<ne>api_version_utils.LATEST_MICROVERSION)<block_start>api_version_utils.assert_version_header_matches_request(self.api_microversion_header_name self.version_header_value%PLACEMENT_MICROVERSION resp)<block_end><return>resp resp_body<block_end><block_end> |
<def_stmt>get_max_coins_helper matrix crow ccol rows cols<block_start>cval=matrix[crow][ccol]<if_stmt>crow<eq>rows-1<and>ccol<eq>cols-1<block_start><return>cval<block_end>down,right=cval cval<if_stmt>crow<l>rows-1<block_start>down<augadd>get_max_coins_helper(matrix crow+1 ccol rows cols)<block_end><if_stmt>ccol<l>cols-1<block_start>right<augadd>get_max_coins_helper(matrix crow ccol+1 rows cols)<block_end><return>max(down right)<block_end><def_stmt>get_max_coins matrix<block_start><if_stmt>matrix<block_start><return>get_max_coins_helper(matrix 0 0 len(matrix) len(matrix[0]))<block_end><block_end>coins=[[0 3 1 1] [2 0 0 4] [1 5 3 1]]<assert_stmt>get_max_coins(coins)<eq>12<line_sep>coins=[[0 3 1 1] [2 8 9 4] [1 5 3 1]]<assert_stmt>get_max_coins(coins)<eq>25<line_sep> |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains information about each output stream."""<import_from_stmt>streamer.bitrate_configuration AudioCodec AudioChannelLayout VideoCodec VideoResolution<import_from_stmt>streamer.input_configuration Input MediaType<import_from_stmt>streamer.pipe Pipe<import_from_stmt>typing Dict Union<class_stmt>OutputStream(object)<block_start>"""Base class for output streams."""<def_stmt>__init__ self type:MediaType input:Input codec:Union[AudioCodec VideoCodec <none>] pipe_dir:str skip_transcoding:bool=<false> pipe_suffix:str=''<arrow><none><block_start>self.type:MediaType=type<line_sep>self.skip_transcoding=skip_transcoding<line_sep>self.input:Input=input<line_sep>self.features:Dict[str str]={}<line_sep>self.codec:Union[AudioCodec VideoCodec <none>]=codec<if_stmt>self.skip_transcoding# If skip_transcoding is specified, let the Packager read from a plain
# file instead of an IPC pipe.
<block_start>self.ipc_pipe=Pipe.create_file_pipe(self.input.name mode='r')<block_end><else_stmt><block_start>self.ipc_pipe=Pipe.create_ipc_pipe(pipe_dir pipe_suffix)<block_end><block_end><def_stmt>is_hardware_accelerated self<arrow>bool<block_start>"""Returns True if this output stream uses hardware acceleration."""<if_stmt>self.codec<block_start><return>self.codec.is_hardware_accelerated()<block_end><return><false><block_end><def_stmt>get_ffmpeg_codec_string self hwaccel_api:str<arrow>str<block_start>"""Returns a codec string accepted by FFmpeg for this stream's codec."""<assert_stmt>self.codec<is><not><none><line_sep><return>self.codec.get_ffmpeg_codec_string(hwaccel_api)<block_end><def_stmt>is_dash_only self<arrow>bool<block_start>"""Returns True if the output format is restricted to DASH protocol"""<if_stmt>self.codec<is><not><none><block_start><return>self.codec.get_output_format()<eq>'webm'<block_end><return><false><block_end><def_stmt>get_init_seg_file self<arrow>Pipe<block_start>INIT_SEGMENT={MediaType.AUDIO:'audio_{language}_{channels}c_{bitrate}_{codec}_init.{format}' MediaType.VIDEO:'video_{resolution_name}_{bitrate}_{codec}_init.{format}' MediaType.TEXT:'text_{language}_init.{format}' }<line_sep>path_templ=INIT_SEGMENT[self.type].format(**self.features)<line_sep><return>Pipe.create_file_pipe(path_templ mode='w')<block_end><def_stmt>get_media_seg_file self<arrow>Pipe<block_start>MEDIA_SEGMENT={MediaType.AUDIO:'audio_{language}_{channels}c_{bitrate}_{codec}_$Number$.{format}' MediaType.VIDEO:'video_{resolution_name}_{bitrate}_{codec}_$Number$.{format}' MediaType.TEXT:'text_{language}_$Number$.{format}' }<line_sep>path_templ=MEDIA_SEGMENT[self.type].format(**self.features)<line_sep><return>Pipe.create_file_pipe(path_templ mode='w')<block_end><def_stmt>get_single_seg_file self<arrow>Pipe<block_start>SINGLE_SEGMENT={MediaType.AUDIO:'audio_{language}_{channels}c_{bitrate}_{codec}.{format}' MediaType.VIDEO:'video_{resolution_name}_{bitrate}_{codec}.{format}' MediaType.TEXT:'text_{language}.{format}' }<line_sep>path_templ=SINGLE_SEGMENT[self.type].format(**self.features)<line_sep><return>Pipe.create_file_pipe(path_templ mode='w')<block_end><block_end><class_stmt>AudioOutputStream(OutputStream)<block_start><def_stmt>__init__ self input:Input pipe_dir:str codec:AudioCodec channel_layout:AudioChannelLayout<arrow><none><block_start>super().__init__(MediaType.AUDIO input codec pipe_dir)<line_sep># Override the codec type and specify that it's an audio codec
self.codec:AudioCodec=codec<line_sep>self.layout=channel_layout<line_sep># The features that will be used to generate the output filename.
self.features={'language':input.language 'channels':str(self.layout.max_channels) 'bitrate':self.get_bitrate() 'format':self.codec.get_output_format() 'codec':self.codec.value }<block_end><def_stmt>get_bitrate self<arrow>str<block_start>"""Returns the bitrate for this stream."""<line_sep><return>self.layout.bitrates[self.codec]<block_end><block_end><class_stmt>VideoOutputStream(OutputStream)<block_start><def_stmt>__init__ self input:Input pipe_dir:str codec:VideoCodec resolution:VideoResolution<arrow><none><block_start>super().__init__(MediaType.VIDEO input codec pipe_dir)<line_sep># Override the codec type and specify that it's an audio codec
self.codec:VideoCodec=codec<line_sep>self.resolution=resolution<line_sep># The features that will be used to generate the output filename.
self.features={'resolution_name':self.resolution.get_key() 'bitrate':self.get_bitrate() 'format':self.codec.get_output_format() 'codec':self.codec.value }<block_end><def_stmt>get_bitrate self<arrow>str<block_start>"""Returns the bitrate for this stream."""<line_sep><return>self.resolution.bitrates[self.codec]<block_end><block_end><class_stmt>TextOutputStream(OutputStream)<block_start><def_stmt>__init__ self input:Input pipe_dir:str skip_transcoding:bool# We don't have a codec per se for text, but we'd like to generically
# process OutputStream objects in ways that are easier with this attribute
# set, so set it to None.
<block_start>codec=<none><line_sep>super().__init__(MediaType.TEXT input codec pipe_dir skip_transcoding pipe_suffix='.vtt')<line_sep># The features that will be used to generate the output filename.
self.features={'language':input.language 'format':'mp4' }<block_end><block_end> |
<import_stmt>deepchem<as>dc<import_stmt>numpy<as>np<import_stmt>os<def_stmt>test_numpy_dataset_get_shape <block_start>"""Test that get_shape works for numpy datasets."""<line_sep>num_datapoints=100<line_sep>num_features=10<line_sep>num_tasks=10<line_sep># Generate data
X=np.random.rand(num_datapoints num_features)<line_sep>y=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>w=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>ids=np.array(["id"]<times>num_datapoints)<line_sep>dataset=dc.data.NumpyDataset(X y w ids)<line_sep>X_shape,y_shape,w_shape,ids_shape=dataset.get_shape()<assert_stmt>X_shape<eq>X.shape<assert_stmt>y_shape<eq>y.shape<assert_stmt>w_shape<eq>w.shape<assert_stmt>ids_shape<eq>ids.shape<block_end><def_stmt>test_disk_dataset_get_shape_single_shard <block_start>"""Test that get_shape works for disk dataset."""<line_sep>num_datapoints=100<line_sep>num_features=10<line_sep>num_tasks=10<line_sep># Generate data
X=np.random.rand(num_datapoints num_features)<line_sep>y=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>w=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>ids=np.array(["id"]<times>num_datapoints)<line_sep>dataset=dc.data.DiskDataset.from_numpy(X y w ids)<line_sep>X_shape,y_shape,w_shape,ids_shape=dataset.get_shape()<assert_stmt>X_shape<eq>X.shape<assert_stmt>y_shape<eq>y.shape<assert_stmt>w_shape<eq>w.shape<assert_stmt>ids_shape<eq>ids.shape<block_end><def_stmt>test_disk_dataset_get_shape_multishard <block_start>"""Test that get_shape works for multisharded disk dataset."""<line_sep>num_datapoints=100<line_sep>num_features=10<line_sep>num_tasks=10<line_sep># Generate data
X=np.random.rand(num_datapoints num_features)<line_sep>y=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>w=np.random.randint(2 size=(num_datapoints num_tasks))<line_sep>ids=np.array(["id"]<times>num_datapoints)<line_sep>dataset=dc.data.DiskDataset.from_numpy(X y w ids)<line_sep># Should now have 10 shards
dataset.reshard(shard_size=10)<line_sep>X_shape,y_shape,w_shape,ids_shape=dataset.get_shape()<assert_stmt>X_shape<eq>X.shape<assert_stmt>y_shape<eq>y.shape<assert_stmt>w_shape<eq>w.shape<assert_stmt>ids_shape<eq>ids.shape<block_end><def_stmt>test_disk_dataset_get_legacy_shape_single_shard <block_start>"""Test that get_shape works for legacy disk dataset."""<line_sep># This is the shape of legacy_data
num_datapoints=100<line_sep>num_features=10<line_sep>num_tasks=10<line_sep>current_dir=os.path.dirname(os.path.abspath(__file__))<line_sep># legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir=os.path.join(current_dir "legacy_dataset")<line_sep>dataset=dc.data.DiskDataset(data_dir)<line_sep>X_shape,y_shape,w_shape,ids_shape=dataset.get_shape()<assert_stmt>X_shape<eq>(num_datapoints num_features)<assert_stmt>y_shape<eq>(num_datapoints num_tasks)<assert_stmt>w_shape<eq>(num_datapoints num_tasks)<assert_stmt>ids_shape<eq>(num_datapoints )<block_end><def_stmt>test_disk_dataset_get_legacy_shape_multishard <block_start>"""Test that get_shape works for multisharded legacy disk dataset."""<line_sep># This is the shape of legacy_data_reshard
num_datapoints=100<line_sep>num_features=10<line_sep>num_tasks=10<line_sep># legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>data_dir=os.path.join(current_dir "legacy_dataset_reshard")<line_sep>dataset=dc.data.DiskDataset(data_dir)<line_sep># Should now have 10 shards
<assert_stmt>dataset.get_number_shards()<eq>10<line_sep>X_shape,y_shape,w_shape,ids_shape=dataset.get_shape()<assert_stmt>X_shape<eq>(num_datapoints num_features)<assert_stmt>y_shape<eq>(num_datapoints num_tasks)<assert_stmt>w_shape<eq>(num_datapoints num_tasks)<assert_stmt>ids_shape<eq>(num_datapoints )<block_end><def_stmt>test_get_shard_size <block_start>"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""<line_sep>current_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>file_path=os.path.join(current_dir "reaction_smiles.csv")<line_sep>featurizer=dc.feat.DummyFeaturizer()<line_sep>loader=dc.data.CSVLoader(tasks=[] feature_field="reactions" featurizer=featurizer)<line_sep>dataset=loader.create_dataset(file_path)<assert_stmt>dataset.get_shard_size()<eq>4<block_end> |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>ExecutionLogPolicy(object)<block_start>"""
Configures the logging policies for the execution logs of an API Deployment.
"""<line_sep>#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "INFO"
LOG_LEVEL_INFO="INFO"<line_sep>#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "WARN"
LOG_LEVEL_WARN="WARN"<line_sep>#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "ERROR"
LOG_LEVEL_ERROR="ERROR"<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new ExecutionLogPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_enabled:
The value to assign to the is_enabled property of this ExecutionLogPolicy.
:type is_enabled: bool
:param log_level:
The value to assign to the log_level property of this ExecutionLogPolicy.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type log_level: str
"""<line_sep>self.swagger_types={'is_enabled':'bool' 'log_level':'str'}<line_sep>self.attribute_map={'is_enabled':'isEnabled' 'log_level':'logLevel'}<line_sep>self._is_enabled=<none><line_sep>self._log_level=<none><block_end>@property<def_stmt>is_enabled self<block_start>"""
Gets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:return: The is_enabled of this ExecutionLogPolicy.
:rtype: bool
"""<line_sep><return>self._is_enabled<block_end>@is_enabled.setter<def_stmt>is_enabled self is_enabled<block_start>"""
Sets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:param is_enabled: The is_enabled of this ExecutionLogPolicy.
:type: bool
"""<line_sep>self._is_enabled=is_enabled<block_end>@property<def_stmt>log_level self<block_start>"""
Gets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The log_level of this ExecutionLogPolicy.
:rtype: str
"""<line_sep><return>self._log_level<block_end>@log_level.setter<def_stmt>log_level self log_level<block_start>"""
Sets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
:param log_level: The log_level of this ExecutionLogPolicy.
:type: str
"""<line_sep>allowed_values=["INFO" "WARN" "ERROR"]<if_stmt><not>value_allowed_none_or_none_sentinel(log_level allowed_values)<block_start>log_level='UNKNOWN_ENUM_VALUE'<block_end>self._log_level=log_level<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end> |
<import_from_stmt>EvernoteController EvernoteController<import_from_stmt>Memo Memo<line_sep>MEMO_NAME='Memo'<line_sep>MEMO_DIR='Memo'<line_sep>MEMO_STORAGE_DIR='S-Memo'<def_stmt>f fn *args **kwargs<block_start><try_stmt><block_start>fn(*args **kwargs)<block_end><except_stmt><block_start><pass><block_end><block_end>m=Memo()<line_sep>e=EvernoteController()<line_sep>f(e.create_notebook MEMO_DIR)<line_sep>f(e.create_notebook MEMO_STORAGE_DIR)<line_sep>f(e.move_note MEMO_DIR+'/'+MEMO_NAME MEMO_STORAGE_DIR)<line_sep>e.create_note('Memo' m.raw_memo() MEMO_DIR)<line_sep> |
<import_stmt>gc<import_from_stmt>unittest TestCase<import_from_stmt>testing await_true<line_sep>gc.collect()<class_stmt>TestGPSInteractive(TestCase)<block_start><def_stmt>test_read_value self<block_start><import_stmt>adafruit_blinka<line_sep>adafruit_blinka.patch_system()# needed before adafruit_gps imports time
<import_stmt>microcontroller.pin<line_sep>gc.collect()<import_stmt>busio<line_sep>gc.collect()<import_stmt>adafruit_gps<line_sep>gc.collect()<line_sep># configure the last available UART (first uart often for REPL)
uartId,uartTx,uartRx=microcontroller.pin.uartPorts[0]<line_sep>uart=busio.UART(uartTx uartRx baudrate=9600 timeout=3000)<line_sep>gps=adafruit_gps.GPS(uart)<line_sep>gps.send_command("PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")<line_sep>gps.send_command("PMTK220,1000")<def_stmt>try_fix <block_start>gps.update()<line_sep><return>gps.has_fix<block_end>await_true("GPS fix" try_fix)<line_sep>self.assertTrue(gps.satellites<is><not><none>)<line_sep>self.assertTrue(-90<le>gps.latitude<l>90)<line_sep>self.assertTrue(-180<le>gps.longitude<l>180)<block_end><block_end> |
"""React Preset"""<import_stmt>shutil<import_stmt>os<import_from_stmt>.Preset Preset<import_from_stmt>..utils.filesystem make_directory<import_from_stmt>..utils.location resources_path views_path<class_stmt>React(Preset)<block_start>"""
Configure the front-end scaffolding for the application to use ReactJS
Will also remove Vue as Vue and React are a bit mutally exclusive
"""<line_sep>key="react"<line_sep>packages={"react":"^17.0.2" "react-dom":"^17.0.2" "@babel/preset-react":"^7.16.5" }<line_sep>removed_packages=["vue" "vue-loader"]<def_stmt>install self<block_start>"""Install the preset"""<line_sep>self.update_packages(dev=<true>)<line_sep>self.update_webpack_mix()<line_sep>self.update_js()<line_sep>self.add_components()<line_sep>self.update_css()<line_sep>self.create_view()<line_sep>self.remove_node_modules()<block_end><def_stmt>add_components self<block_start>"""Copy example React component into application (delete example Vue component
if it exists)"""<line_sep># make components directory if does not exists
make_directory(resources_path("js/components/Example.js"))<line_sep># delete Vue components if exists
vue_files=[resources_path("js/components/HelloWorld.vue") resources_path("js/App.vue") ]<for_stmt>vue_file vue_files<block_start><if_stmt>os.path.exists(vue_file)<block_start>os.remove(vue_file)<block_end><block_end># add Vue components
shutil.copyfile(self.get_template_path("Example.js") resources_path("js/components/Example.js") )<block_end><def_stmt>create_view self<block_start>"""Copy an example app view with assets included."""<line_sep>shutil.copyfile(self.get_template_path("app.html") views_path("app_react.html"))<block_end><block_end> |
# -*- coding: utf8 -*-
__author__='<NAME>'<import_from_stmt>Queue Queue<import_from_stmt>Worker Worker<class_stmt>Pool<block_start><def_stmt>__init__ self size<block_start>self.size=size<line_sep>self.workers=[]<line_sep>self.tasks=Queue()<block_end><def_stmt>_removeDeadWorkers self<block_start>self.workers=[w<for>w self.workers<if>w.isAlive()]<block_end><def_stmt>map_async self func objects callback<block_start>self._removeDeadWorkers()<if_stmt><not>len(self.workers)<eq>0<block_start><raise>Exception('ThreadPool is still working! Adding new jobs is not allowed!')<block_end><for_stmt>object objects<block_start>self.tasks.put((func object callback))<block_end><for_stmt>id range(self.size)<block_start>self.workers.append(Worker(id self.tasks))<block_end><for_stmt>worker self.workers<block_start>worker.start()<block_end><block_end><def_stmt>join self<block_start><for_stmt>worker self.workers<block_start>worker.join()<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
# @Time: 2020/7/3 10:21
# @Author: GraceKoo
# @File: interview_33.py
# @Desc: https://leetcode-cn.com/problems/chou-shu-lcof/
<class_stmt>Solution<block_start><def_stmt>nthUglyNumber self n:int<arrow>int<block_start><if_stmt>n<le>0<block_start><return>0<block_end>dp,a,b,c=[1]<times>n 0 0 0<for_stmt>i range(1 n)<block_start>min_ugly=min(dp[a]<times>2 dp[b]<times>3 dp[c]<times>5)<line_sep>dp[i]=min_ugly<if_stmt>min_ugly<eq>dp[a]<times>2<block_start>a<augadd>1<block_end><if_stmt>min_ugly<eq>dp[b]<times>3<block_start>b<augadd>1<block_end><if_stmt>min_ugly<eq>dp[c]<times>5<block_start>c<augadd>1<block_end><block_end><return>dp[-1]<block_end><block_end>so=Solution()<line_sep>print(so.nthUglyNumber(10))<line_sep> |
# test __getattr__ on module
# ensure that does_not_exist doesn't exist to start with
this=__import__(__name__)<try_stmt><block_start>this.does_not_exist<assert_stmt><false><block_end><except_stmt>AttributeError<block_start><pass><block_end># define __getattr__
<def_stmt>__getattr__ attr<block_start><if_stmt>attr<eq>'does_not_exist'<block_start><return><false><block_end><raise>AttributeError<block_end># do feature test (will also test functionality if the feature exists)
<if_stmt><not>hasattr(this 'does_not_exist')<block_start>print('SKIP')<line_sep><raise>SystemExit<block_end># check that __getattr__ works as expected
print(this.does_not_exist)<line_sep> |
# Copyright (c) 2017, Carnegie Mellon University. All rights reserved.
#
# Use of the K-NRM package is subject to the terms of the software license set
# forth in the LICENSE file included with this software, and also available at
# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE
<import_from_stmt>setuptools setup<import_from_stmt>setuptools find_packages<line_sep>setup(name='knrm' version='0' description='knrm' author='<NAME> and <NAME>' install_requires=['numpy' 'traitlets' 'tensorflow'] packages=find_packages())<line_sep> |
<import_stmt>re<import_stmt>string<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_from_stmt>typing List<import_from_stmt>docqa.triviaqa.read_data TriviaQaQuestion<import_from_stmt>docqa.triviaqa.trivia_qa_eval normalize_answer f1_score<import_from_stmt>docqa.utils flatten_iterable split<line_sep>"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""<class_stmt>ExactMatchDetector(object)<block_start><def_stmt>__init__ self<block_start>self.answer_tokens=<none><block_end><def_stmt>set_question self normalized_aliases<block_start>self.answer_tokens=normalized_aliases<block_end><def_stmt>any_found self para<block_start>words=[x.lower()<for>x flatten_iterable(para)]<line_sep>occurances=[]<for_stmt>answer_ix,answer enumerate(self.answer_tokens)<block_start>word_starts=[i<for>i,w enumerate(words)<if>answer[0]<eq>w]<line_sep>n_tokens=len(answer)<for_stmt>start word_starts<block_start>end=start+1<line_sep>ans_token=1<while_stmt>ans_token<l>n_tokens<and>end<l>len(words)<block_start>next=words[end]<if_stmt>answer[ans_token]<eq>next<block_start>ans_token<augadd>1<line_sep>end<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>n_tokens<eq>ans_token<block_start>occurances.append((start end))<block_end><block_end><block_end><return>list(set(occurances))<block_end><block_end><class_stmt>NormalizedAnswerDetector(object)<block_start>""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """<def_stmt>__init__ self<block_start>self.answer_tokens=<none><block_end><def_stmt>set_question self normalized_aliases<block_start>self.answer_tokens=normalized_aliases<block_end><def_stmt>any_found self para<block_start>words=[normalize_answer(w)<for>w flatten_iterable(para)]<line_sep>occurances=[]<for_stmt>answer_ix,answer enumerate(self.answer_tokens)<block_start>word_starts=[i<for>i,w enumerate(words)<if>answer[0]<eq>w]<line_sep>n_tokens=len(answer)<for_stmt>start word_starts<block_start>end=start+1<line_sep>ans_token=1<while_stmt>ans_token<l>n_tokens<and>end<l>len(words)<block_start>next=words[end]<if_stmt>answer[ans_token]<eq>next<block_start>ans_token<augadd>1<line_sep>end<augadd>1<block_end><elif_stmt>next<eq>""<block_start>end<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>n_tokens<eq>ans_token<block_start>occurances.append((start end))<block_end><block_end><block_end><return>list(set(occurances))<block_end><block_end><class_stmt>FastNormalizedAnswerDetector(object)<block_start>""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """<def_stmt>__init__ self# These come from the TrivaQA official evaluation script
<block_start>self.skip={"a" "an" "the" ""}<line_sep>self.strip=string.punctuation+"".join([u"‘" u"’" u"´" u"`" "_"])<line_sep>self.answer_tokens=<none><block_end><def_stmt>set_question self normalized_aliases<block_start>self.answer_tokens=normalized_aliases<block_end><def_stmt>any_found self para# Normalize the paragraph
<block_start>words=[w.lower().strip(self.strip)<for>w flatten_iterable(para)]<line_sep>occurances=[]<for_stmt>answer_ix,answer enumerate(self.answer_tokens)# Locations where the first word occurs
<block_start>word_starts=[i<for>i,w enumerate(words)<if>answer[0]<eq>w]<line_sep>n_tokens=len(answer)<line_sep># Advance forward until we find all the words, skipping over articles
<for_stmt>start word_starts<block_start>end=start+1<line_sep>ans_token=1<while_stmt>ans_token<l>n_tokens<and>end<l>len(words)<block_start>next=words[end]<if_stmt>answer[ans_token]<eq>next<block_start>ans_token<augadd>1<line_sep>end<augadd>1<block_end><elif_stmt>next<in>self.skip<block_start>end<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>n_tokens<eq>ans_token<block_start>occurances.append((start end))<block_end><block_end><block_end><return>list(set(occurances))<block_end><block_end><class_stmt>CarefulAnswerDetector(object)<block_start>"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""<def_stmt>__init__ self<block_start>self.skip={"a" "an" "the" "&" "and" "-" "\u2019" "\u2018" "\"" ";" "'" "(" ")" "'s'" "s" ":" "," "."}<line_sep>self.answer_regex=<none><line_sep>self.aliases=<none><block_end><def_stmt>set_question self normalized_aliases<block_start>answer_regex=[]<line_sep>self.aliases=normalized_aliases<for_stmt>answer normalized_aliases<block_start>tokens=[]<for_stmt>token answer<block_start><if_stmt>len(token)<g>1<block_start>tokens.append(token+"s?")<block_end><else_stmt><block_start>tokens.append(token)<block_end><block_end><if_stmt>tokens[-1]<eq>"s"<block_start>tokens[-1]="s?"<block_end>answer_regex.append([re.compile(x re.IGNORECASE)<for>x tokens])<block_end>self.answer_regex=answer_regex<block_end><def_stmt>any_found self para<block_start>words=flatten_iterable(para)<line_sep>occurances=[]<for_stmt>answer_ix,answer enumerate(self.answer_regex)<block_start>word_starts=[i<for>i,w enumerate(words)<if>answer[0].fullmatch(w)]<line_sep>n_tokens=len(answer)<for_stmt>start word_starts<block_start>end=start+1<line_sep>ans_token=1<while_stmt>ans_token<l>n_tokens<and>end<l>len(words)<block_start>next=words[end]<if_stmt>answer[ans_token].match(next)<block_start>ans_token<augadd>1<line_sep>end<augadd>1<block_end><elif_stmt>next<in>self.skip<block_start>end<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end><if_stmt>n_tokens<eq>ans_token<block_start>occurances.append((start end))<block_end><block_end><block_end><return>list(set(occurances))<block_end><block_end><def_stmt>evaluate_question_detector questions corpus word_tokenize detector reference_detector=<none> compute_f1s=<false><block_start>""" Just for debugging """<line_sep>n_no_docs=0<line_sep>answer_per_doc=[]<line_sep>answer_f1s=[]<for_stmt>question_ix,q enumerate(tqdm(questions))<block_start>tokenized_aliases=[word_tokenize(x)<for>x q.answer.normalized_aliases]<line_sep>detector.set_question(tokenized_aliases)<for_stmt>doc q.all_docs<block_start>doc=corpus.get_document(doc.doc_id)<if_stmt>doc<is><none><block_start>n_no_docs<augadd>1<line_sep><continue><block_end>output=[]<for_stmt>i,para enumerate(doc)<block_start><for_stmt>s,e detector.any_found(para)<block_start>output.append((i s e))<block_end><block_end><if_stmt>len(output)<eq>0<and>reference_detector<is><not><none><block_start><if_stmt>reference_detector<is><not><none><block_start>reference_detector.set_question(tokenized_aliases)<line_sep>detected=[]<for_stmt>i,para enumerate(doc)<block_start><for_stmt>s,e reference_detector.any_found(para)<block_start>detected.append((i s e))<block_end><block_end><if_stmt>len(detected)<g>0<block_start>print("Found a difference")<line_sep>print(q.answer.normalized_aliases)<line_sep>print(tokenized_aliases)<for_stmt>p,s,e detected<block_start>token=flatten_iterable(doc[p])[s:e]<line_sep>print(token)<block_end><block_end><block_end><block_end>answer_per_doc.append(output)<if_stmt>compute_f1s<block_start>f1s=[]<for_stmt>p,s,e output<block_start>token=flatten_iterable(doc[p])[s:e]<line_sep>answer=normalize_answer(" ".join(token))<line_sep>f1=0<for_stmt>gt q.answer.normalized_aliases<block_start>f1=max(f1 f1_score(answer gt))<block_end>f1s.append(f1)<block_end>answer_f1s.append(f1s)<block_end><block_end><block_end>n_answers=sum(len(x)<for>x answer_per_doc)<line_sep>print("Found %d answers (av %.4f)"%(n_answers n_answers/len(answer_per_doc)))<line_sep>print("%.4f docs have answers"%np.mean([len(x)<g>0<for>x answer_per_doc]))<if_stmt>len(answer_f1s)<g>0<block_start>print("Average f1 is %.4f"%np.mean(flatten_iterable(answer_f1s)))<block_end><block_end><def_stmt>compute_answer_spans questions:List[TriviaQaQuestion] corpus word_tokenize detector<block_start><for_stmt>i,q enumerate(questions)<block_start><if_stmt>i%500<eq>0<block_start>print("Completed question %d of %d (%.3f)"%(i len(questions) i/len(questions)))<block_end>q.question=word_tokenize(q.question)<if_stmt>q.answer<is><none><block_start><continue><block_end>tokenized_aliases=[word_tokenize(x)<for>x q.answer.all_answers]<if_stmt>len(tokenized_aliases)<eq>0<block_start><raise>ValueError()<block_end>detector.set_question(tokenized_aliases)<for_stmt>doc q.all_docs<block_start>text=corpus.get_document(doc.doc_id)<if_stmt>text<is><none><block_start><raise>ValueError()<block_end>spans=[]<line_sep>offset=0<for_stmt>para_ix,para enumerate(text)<block_start><for_stmt>s,e detector.any_found(para)<block_start>spans.append((s+offset e+offset-1))# turn into inclusive span
<block_end>offset<augadd>sum(len(s)<for>s para)<block_end><if_stmt>len(spans)<eq>0<block_start>spans=np.zeros((0 2) dtype=np.int32)<block_end><else_stmt><block_start>spans=np.array(spans dtype=np.int32)<block_end>doc.answer_spans=spans<block_end><block_end><block_end><def_stmt>_compute_answer_spans_chunk questions corpus tokenizer detector# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
<block_start>word_tokenize=tokenizer.tokenize_paragraph_flat<line_sep>compute_answer_spans(questions corpus word_tokenize detector)<line_sep><return>questions<block_end><def_stmt>compute_answer_spans_par questions:List[TriviaQaQuestion] corpus tokenizer detector n_processes:int<block_start><if_stmt>n_processes<eq>1<block_start>word_tokenize=tokenizer.tokenize_paragraph_flat<line_sep>compute_answer_spans(questions corpus word_tokenize detector)<line_sep><return>questions<block_end><import_from_stmt>multiprocessing Pool<with_stmt>Pool(n_processes)<as>p<block_start>chunks=split(questions n_processes)<line_sep>questions=flatten_iterable(p.starmap(_compute_answer_spans_chunk [[c corpus tokenizer detector]<for>c chunks]))<line_sep><return>questions<block_end><block_end><def_stmt>main <block_start><import_from_stmt>trivia_qa.build_span_corpus TriviaQaWebDataset<import_from_stmt>data_processing.text_utils NltkAndPunctTokenizer<line_sep>dataset=TriviaQaWebDataset()<line_sep>qs=dataset.get_train()<line_sep>qs=np.random.RandomState(0).choice(qs 1000 replace=<false>)<line_sep>evaluate_question_detector(qs dataset.evidence NltkAndPunctTokenizer().tokenize_paragraph_flat FastNormalizedAnswerDetector())<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
entity_id=data.get('entity_id')<line_sep>command=data.get('command')<line_sep>params=str(data.get('params'))<line_sep>parsedParams=[]<for_stmt>z params.replace(' ' '').replace('],[' '|').replace('[' '').replace(']' '').split('|')<block_start>rect=[]<for_stmt>c z.split(',')<block_start>rect.append(int(c))<block_end>parsedParams.append(rect)<block_end><if_stmt>command<in>["app_goto_target" "app_segment_clean"]<block_start>parsedParams=parsedParams[0]<block_end>hass.services.call('vacuum' 'send_command' {'entity_id':entity_id 'command':command 'params':parsedParams} <true>)<line_sep> |
<import_from_stmt>tests.common.devices.base AnsibleHostBase<class_stmt>VMHost(AnsibleHostBase)<block_start>"""
@summary: Class for VM server
For running ansible module on VM server
"""<def_stmt>__init__ self ansible_adhoc hostname<block_start>AnsibleHostBase.__init__(self ansible_adhoc hostname)<block_end>@property<def_stmt>external_port self<block_start><if_stmt><not>hasattr(self "_external_port")<block_start>vm=self.host.options["variable_manager"]<line_sep>im=self.host.options["inventory_manager"]<line_sep>hostvars=vm.get_vars(host=im.get_host(self.hostname) include_delegate_to=<false>)<line_sep>setattr(self "_external_port" hostvars["external_port"])<block_end><return>getattr(self "_external_port")<block_end><block_end> |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
<import_stmt>numpy<as>np<import_stmt>unittest<import_stmt>paddle<import_stmt>paddle.nn<as>nn<class_stmt>SimpleReturnLayer(nn.Layer)<block_start><def_stmt>forward self x<block_start><return>x<block_end><block_end><class_stmt>AddAttrLayer(nn.Layer)<block_start><def_stmt>__init__ self<block_start>super(AddAttrLayer self).__init__()<line_sep>self.attr=<none><block_end><def_stmt>forward self x<block_start>out=x+self.attr<line_sep><return>out<block_end><block_end><class_stmt>IsInstanceLayer(nn.Layer)<block_start><def_stmt>__init__ self layer<block_start>super(IsInstanceLayer self).__init__()<line_sep>self.layer=layer<block_end>@paddle.jit.to_static<def_stmt>forward self x<block_start><if_stmt>isinstance(self.layer (AddAttrLayer ))<block_start>self.layer.attr=x<block_end>res=self.layer(x)<line_sep><return>res<block_end><block_end><class_stmt>SequentialLayer(nn.Layer)<block_start><def_stmt>__init__ self layers<block_start>super(SequentialLayer self).__init__()<line_sep>self.layers=nn.LayerList(layers)<block_end>@paddle.jit.to_static<def_stmt>forward self x<block_start>res=x<for_stmt>layer self.layers<block_start><if_stmt>isinstance(layer AddAttrLayer)<block_start>layer.attr=x<block_end>res=layer(res)<block_end><return>res<block_end><block_end><def_stmt>train model to_static<block_start>prog_trans=paddle.jit.ProgramTranslator.get_instance()<line_sep>prog_trans.enable(to_static)<line_sep>x=paddle.ones(shape=[2 3] dtype='int32')<line_sep>out=model(x)<line_sep><return>out.numpy()<block_end><class_stmt>TestIsinstance(unittest.TestCase)<block_start><def_stmt>test_isinstance_simple_return_layer self<block_start>model=IsInstanceLayer(SimpleReturnLayer())<line_sep>self._test_model(model)<block_end><def_stmt>test_isinstance_add_attr_layer self<block_start>model=IsInstanceLayer(AddAttrLayer())<line_sep>self._test_model(model)<block_end><def_stmt>test_sequential_layer self<block_start>layers=[]<for_stmt>i range(5)<block_start>layers.append(SimpleReturnLayer())<line_sep>layers.append(AddAttrLayer())<block_end>model=SequentialLayer(layers)<line_sep>self._test_model(model)<block_end><def_stmt>_test_model self model<block_start>st_out=train(model to_static=<true>)<line_sep>dy_out=train(model to_static=<false>)<line_sep>self.assertTrue(np.allclose(dy_out st_out) msg="dy_out:\n {}\n st_out:\n{}".format(dy_out st_out))<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_future_stmt> with_statement<import_stmt>inspect<import_from_stmt>random choice randint<import_stmt>sys<import_from_stmt>whoosh fields query scoring<import_from_stmt>whoosh.compat u xrange permutations<import_from_stmt>whoosh.filedb.filestore RamStorage<def_stmt>_weighting_classes ignore# Get all the subclasses of Weighting in whoosh.scoring
<block_start><return>[c<for>_,c inspect.getmembers(scoring inspect.isclass)<if>scoring.Weighting<in>c.__bases__<and>c<not><in>ignore]<block_end><def_stmt>test_all <block_start>domain=[u("alfa") u("bravo") u("charlie") u("delta") u("echo") u("foxtrot")]<line_sep>schema=fields.Schema(text=fields.TEXT)<line_sep>storage=RamStorage()<line_sep>ix=storage.create_index(schema)<line_sep>w=ix.writer()<for_stmt>_ xrange(100)<block_start>w.add_document(text=u(" ").join(choice(domain)<for>_ xrange(randint(10 20))))<block_end>w.commit()<line_sep># List ABCs that should not be tested
abcs=()<line_sep># provide initializer arguments for any weighting classes that require them
init_args={"MultiWeighting":([scoring.BM25F()] {"text":scoring.Frequency()}) "ReverseWeighting":([scoring.BM25F()] {})}<for_stmt>wclass _weighting_classes(abcs)<block_start><try_stmt><block_start><if_stmt>wclass.__name__<in>init_args<block_start>args,kwargs=init_args[wclass.__name__]<line_sep>weighting=wclass(*args **kwargs)<block_end><else_stmt><block_start>weighting=wclass()<block_end><block_end><except_stmt>TypeError<block_start>e=sys.exc_info()[1]<line_sep><raise>TypeError("Error instantiating %r: %s"%(wclass e))<block_end><with_stmt>ix.searcher(weighting=weighting)<as>s<block_start><try_stmt><block_start><for_stmt>word domain<block_start>s.search(query.Term("text" word))<block_end><block_end><except_stmt>Exception<block_start>e=sys.exc_info()[1]<line_sep>e.msg="Error searching with %r: %s"%(wclass e)<line_sep><raise><block_end><block_end><block_end><block_end><def_stmt>test_compatibility <block_start><import_from_stmt>whoosh.scoring Weighting<line_sep># This is the old way of doing a custom weighting model, check that
# it's still supported...
<class_stmt>LegacyWeighting(Weighting)<block_start>use_final=<true><def_stmt>score self searcher fieldname text docnum weight<block_start><return>weight+0.5<block_end><def_stmt>final self searcher docnum score<block_start><return>score<times>1.5<block_end><block_end>schema=fields.Schema(text=fields.TEXT)<line_sep>ix=RamStorage().create_index(schema)<line_sep>w=ix.writer()<line_sep>domain="alfa bravo charlie delta".split()<for_stmt>ls permutations(domain 3)<block_start>w.add_document(text=u(" ").join(ls))<block_end>w.commit()<line_sep>s=ix.searcher(weighting=LegacyWeighting())<line_sep>r=s.search(query.Term("text" u("bravo")))<assert_stmt>r.score(0)<eq>2.25<block_end> |
# Just to keep things like ./manage.py test happy
<import_from_stmt>django.contrib.auth.models AbstractUser<line_sep># class Group(models.Model):
# """
# Groups are a generic way of categorizing users to apply permissions, or
# some other label, to those users. A user can belong to any number of
# groups.
# A user in a group automatically has all the permissions granted to that
# group. For example, if the group Site editors has the permission
# can_edit_home_page, any user in that group will have that permission.
# Beyond permissions, groups are a convenient way to categorize users to
# apply some label, or extended functionality, to them. For example, you
# could create a group 'Special users', and you could write code that would
# do special things to those users -- such as giving them access to a
# members-only portion of your site, or sending them members-only email
# messages.
# """
# name = models.CharField(_('name'), max_length=80, unique=True)
# permissions = models.ManyToManyField(
# Permission,
# verbose_name=_('permissions'),
# blank=True,
# )
#
# objects = GroupManager()
#
# class Meta:
# verbose_name = _('group')
# verbose_name_plural = _('groups')
#
# def __str__(self):
# return self.name
#
# def natural_key(self):
# return (self.name,)
# class User(AbstractUser):
# """
# Users within the Django authentication system are represented by this
# model.
# Username, password and email are required. Other fields are optional.
# """
# class Meta(AbstractUser.Meta):
# swappable = 'AUTH_USER_MODEL'
|
<class_stmt>MaxPQ<block_start><def_stmt>__init__ self<block_start>self.pq=[]<block_end><def_stmt>insert self v<block_start>self.pq.append(v)<line_sep>self.swim(len(self.pq)-1)<block_end><def_stmt>max self<block_start><return>self.pq[0]<block_end><def_stmt>del_max self <block_start>m=self.pq[0]<line_sep>self.pq[0],self.pq[-1]=self.pq[-1] self.pq[0]<line_sep>self.pq=self.pq[:-1]<line_sep>self.sink(0)<line_sep><return>m<block_end><def_stmt>is_empty self <block_start><return><not>self.pq<block_end><def_stmt>size self <block_start><return>len(self.pq)<block_end><def_stmt>swim self k<block_start><while_stmt>k<g>0<and>self.pq[(k-1)<floordiv>2]<l>self.pq[k]<block_start>self.pq[k],self.pq[(k-1)<floordiv>2]=self.pq[(k-1)<floordiv>2] self.pq[k]<line_sep>k=k<floordiv>2<block_end><block_end><def_stmt>sink self k<block_start>N=len(self.pq)<while_stmt>2<times>k+1<le>N-1<block_start>j=2<times>k+1<if_stmt>j<l>N-1<and>self.pq[j]<l>self.pq[j+1]<block_start>j<augadd>1<block_end><if_stmt>self.pq[k]<g>self.pq[j]<block_start><break><block_end>self.pq[k],self.pq[j]=self.pq[j] self.pq[k]<line_sep>k=j<block_end><block_end><block_end> |
<import_from_stmt>nltk.translate.bleu_score sentence_bleu corpus_bleu<import_from_stmt>nltk.translate.bleu_score SmoothingFunction<import_from_stmt>nltk.collocations BigramCollocationFinder<import_from_stmt>nltk.probability FreqDist<import_from_stmt>.bleu Bleu<import_stmt>argparse<import_stmt>codecs<import_stmt>numpy<as>np<import_stmt>math<import_from_stmt>bert_score score<import_from_stmt>rouge Rouge<import_stmt>os re<import_stmt>ipdb<import_stmt>numpy<as>np<line_sep># BLEU of NLTK
<def_stmt>cal_BLEU_nltk refer candidate ngram=1<block_start>'''
SmoothingFunction refer to https://github.com/PaddlePaddle/models/blob/a72760dff8574fe2cb8b803e01b44624db3f3eff/PaddleNLP/Research/IJCAI2019-MMPMS/mmpms/utils/metrics.py
'''<line_sep>smoothie=SmoothingFunction().method7<if_stmt>ngram<eq>1<block_start>weight=(1 0 0 0)<block_end><elif_stmt>ngram<eq>2<block_start>weight=(0.5 0.5 0 0)<block_end><elif_stmt>ngram<eq>3<block_start>weight=(0.33 0.33 0.33 0)<block_end><elif_stmt>ngram<eq>4<block_start>weight=(0.25 0.25 0.25 0.25)<block_end><return>sentence_bleu(refer candidate weights=weight smoothing_function=smoothie)<block_end># BLEU of nlg-eval
<def_stmt>cal_BLEU refs tgts<block_start>scorer=Bleu(4)<line_sep>refs={idx:[line]<for>idx,line enumerate(refs)}<line_sep>tgts={idx:[line]<for>idx,line enumerate(tgts)}<line_sep>s=scorer.compute_score(refs tgts)<line_sep><return>s[0]<block_end># BLEU of multibleu.perl
<def_stmt>cal_BLEU_perl dataset model<block_start>p=os.popen(f'python ./metric/perl-bleu.py {dataset} {model}').read()<line_sep>print(f'[!] multi-perl: {p}')<line_sep>pattern=re.compile(r'(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)')<line_sep>bleu1,bleu2,bleu3,bleu4=pattern.findall(p)[0]<line_sep>bleu1,bleu2,bleu3,bleu4=float(bleu1) float(bleu2) float(bleu3) float(bleu4)<line_sep><return>bleu1 bleu2 bleu3 bleu4<block_end><def_stmt>cal_Distinct corpus<block_start>"""
Calculates unigram and bigram diversity
Args:
corpus: tokenized list of sentences sampled
Returns:
uni_diversity: distinct-1 score
bi_diversity: distinct-2 score
"""<line_sep>bigram_finder=BigramCollocationFinder.from_words(corpus)<line_sep>bi_diversity=len(bigram_finder.ngram_fd)/bigram_finder.N<line_sep>dist=FreqDist(corpus)<line_sep>uni_diversity=len(dist)/len(corpus)<line_sep><return>uni_diversity bi_diversity<block_end><def_stmt>cal_ROUGE refer candidate<block_start><if_stmt>len(candidate)<eq>0<block_start>candidate=['<unk>']<block_end><elif_stmt>len(candidate)<eq>1<block_start>candidate.append('<unk>')<block_end><if_stmt>len(refer)<eq>0<block_start>refer=['<unk>']<block_end><elif_stmt>len(refer)<eq>1<block_start>refer.append('<unk>')<block_end>rouge=Rouge()<line_sep>scores=rouge.get_scores(' '.join(candidate) ' '.join(refer))<line_sep><return>scores[0]['rouge-2']['f']<block_end><def_stmt>cal_BERTScore refer candidate# too slow, fuck it
<block_start>_,_,bert_scores=score(candidate refer lang='en' rescale_with_baseline=<true>)<line_sep>bert_scores=bert_scores.tolist()<line_sep>bert_scores=[0.5<if>math.isnan(score)<else>score<for>score bert_scores]<line_sep><return>np.mean(bert_scores)<block_end># ========== fuck nlg-eval fuck ========== #
# ========== Our own embedding-based metric ========== #
<def_stmt>cal_vector_extrema x y dic# x and y are the list of the words
# dic is the gensim model which holds 300 the google news word2ved model
<block_start><def_stmt>vecterize p<block_start>vectors=[]<for_stmt>w p<block_start><if_stmt>w<in>dic<block_start>vectors.append(dic[w.lower()])<block_end><block_end><if_stmt><not>vectors<block_start>vectors.append(np.random.randn(300))<block_end><return>np.stack(vectors)<block_end>x=vecterize(x)<line_sep>y=vecterize(y)<line_sep>vec_x=np.max(x axis=0)<line_sep>vec_y=np.max(y axis=0)<assert_stmt>len(vec_x)<eq>len(vec_y) "len(vec_x) != len(vec_y)"<line_sep>zero_list=np.zeros(len(vec_x))<if_stmt>vec_x.all()<eq>zero_list.all()<or>vec_y.all()<eq>zero_list.all()<block_start><return>float(1)<if>vec_x.all()<eq>vec_y.all()<else>float(0)<block_end>res=np.array([[vec_x[i]<times>vec_y[i] vec_x[i]<times>vec_x[i] vec_y[i]<times>vec_y[i]]<for>i range(len(vec_x))])<line_sep>cos=sum(res[: 0])/(np.sqrt(sum(res[: 1]))<times>np.sqrt(sum(res[: 2])))<line_sep><return>cos<block_end><def_stmt>cal_embedding_average x y dic# x and y are the list of the words
<block_start><def_stmt>vecterize p<block_start>vectors=[]<for_stmt>w p<block_start><if_stmt>w<in>dic<block_start>vectors.append(dic[w.lower()])<block_end><block_end><if_stmt><not>vectors<block_start>vectors.append(np.random.randn(300))<block_end><return>np.stack(vectors)<block_end>x=vecterize(x)<line_sep>y=vecterize(y)<line_sep>vec_x=np.array([0<for>_ range(len(x[0]))])<for_stmt>x_v x<block_start>x_v=np.array(x_v)<line_sep>vec_x=np.add(x_v vec_x)<block_end>vec_x=vec_x/math.sqrt(sum(np.square(vec_x)))<line_sep>vec_y=np.array([0<for>_ range(len(y[0]))])<line_sep>#print(len(vec_y))
<for_stmt>y_v y<block_start>y_v=np.array(y_v)<line_sep>vec_y=np.add(y_v vec_y)<block_end>vec_y=vec_y/math.sqrt(sum(np.square(vec_y)))<assert_stmt>len(vec_x)<eq>len(vec_y) "len(vec_x) != len(vec_y)"<line_sep>zero_list=np.array([0<for>_ range(len(vec_x))])<if_stmt>vec_x.all()<eq>zero_list.all()<or>vec_y.all()<eq>zero_list.all()<block_start><return>float(1)<if>vec_x.all()<eq>vec_y.all()<else>float(0)<block_end>vec_x=np.mat(vec_x)<line_sep>vec_y=np.mat(vec_y)<line_sep>num=float(vec_x<times>vec_y.T)<line_sep>denom=np.linalg.norm(vec_x)<times>np.linalg.norm(vec_y)<line_sep>cos=num/denom<line_sep># res = np.array([[vec_x[i] * vec_y[i], vec_x[i] * vec_x[i], vec_y[i] * vec_y[i]] for i in range(len(vec_x))])
# cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
<return>cos<block_end><def_stmt>cal_greedy_matching x y dic# x and y are the list of words
<block_start><def_stmt>vecterize p<block_start>vectors=[]<for_stmt>w p<block_start><if_stmt>w<in>dic<block_start>vectors.append(dic[w.lower()])<block_end><block_end><if_stmt><not>vectors<block_start>vectors.append(np.random.randn(300))<block_end><return>np.stack(vectors)<block_end>x=vecterize(x)<line_sep>y=vecterize(y)<line_sep>len_x=len(x)<line_sep>len_y=len(y)<line_sep>cosine=[]<line_sep>sum_x=0<for_stmt>x_v x<block_start><for_stmt>y_v y<block_start><assert_stmt>len(x_v)<eq>len(y_v) "len(x_v) != len(y_v)"<line_sep>zero_list=np.zeros(len(x_v))<if_stmt>x_v.all()<eq>zero_list.all()<or>y_v.all()<eq>zero_list.all()<block_start><if_stmt>x_v.all()<eq>y_v.all()<block_start>cos=float(1)<block_end><else_stmt><block_start>cos=float(0)<block_end><block_end><else_stmt># method 1
<block_start>res=np.array([[x_v[i]<times>y_v[i] x_v[i]<times>x_v[i] y_v[i]<times>y_v[i]]<for>i range(len(x_v))])<line_sep>cos=sum(res[: 0])/(np.sqrt(sum(res[: 1]))<times>np.sqrt(sum(res[: 2])))<block_end>cosine.append(cos)<block_end><if_stmt>cosine<block_start>sum_x<augadd>max(cosine)<line_sep>cosine=[]<block_end><block_end>sum_x=sum_x/len_x<line_sep>cosine=[]<line_sep>sum_y=0<for_stmt>y_v y<block_start><for_stmt>x_v x<block_start><assert_stmt>len(x_v)<eq>len(y_v) "len(x_v) != len(y_v)"<line_sep>zero_list=np.zeros(len(y_v))<if_stmt>x_v.all()<eq>zero_list.all()<or>y_v.all()<eq>zero_list.all()<block_start><if_stmt>(x_v<eq>y_v).all()<block_start>cos=float(1)<block_end><else_stmt><block_start>cos=float(0)<block_end><block_end><else_stmt># method 1
<block_start>res=np.array([[x_v[i]<times>y_v[i] x_v[i]<times>x_v[i] y_v[i]<times>y_v[i]]<for>i range(len(x_v))])<line_sep>cos=sum(res[: 0])/(np.sqrt(sum(res[: 1]))<times>np.sqrt(sum(res[: 2])))<block_end>cosine.append(cos)<block_end><if_stmt>cosine<block_start>sum_y<augadd>max(cosine)<line_sep>cosine=[]<block_end><block_end>sum_y=sum_y/len_y<line_sep>score=(sum_x+sum_y)/2<line_sep><return>score<block_end><def_stmt>cal_greedy_matching_matrix x y dic# x and y are the list of words
<block_start><def_stmt>vecterize p<block_start>vectors=[]<for_stmt>w p<block_start><if_stmt>w<in>dic<block_start>vectors.append(dic[w.lower()])<block_end><block_end><if_stmt><not>vectors<block_start>vectors.append(np.random.randn(300))<block_end><return>np.stack(vectors)<block_end>x=vecterize(x)# [x, 300]
y=vecterize(y)# [y, 300]
len_x=len(x)<line_sep>len_y=len(y)<line_sep>matrix=np.dot(x y.T)# [x, y]
matrix=matrix/np.linalg.norm(x axis=1 keepdims=<true>)# [x, 1]
matrix=matrix/np.linalg.norm(y axis=1).reshape(1 -1)# [1, y]
x_matrix_max=np.mean(np.max(matrix axis=1))# [x]
y_matrix_max=np.mean(np.max(matrix axis=0))# [y]
<return>(x_matrix_max+y_matrix_max)/2<block_end># ========== End of our own embedding-based metric ========== #
<if_stmt>__name__<eq>"__main__"<block_start>path='./processed/dailydialog/GatedGCN-no-correlation/pred.txt'<with_stmt>open(path)<as>f<block_start>ref,tgt=[] []<for_stmt>idx,line enumerate(f.readlines())<block_start><if_stmt>idx%4<eq>1<block_start>line=line.replace("user1" "").replace("user0" "").replace("- ref: " "").replace('<sos>' '').replace('<eos>' '').strip()<line_sep>ref.append(line.split())<block_end><elif_stmt>idx%4<eq>2<block_start>line=line.replace("user1" "").replace("user0" "").replace("- tgt: " "").replace('<sos>' '').replace('<eos>' '').strip()<line_sep>tgt.append(line.split())<block_end><block_end><block_end># Distinct-1, Distinct-2
candidates,references=[] []<for_stmt>line1,line2 zip(tgt ref)<block_start>candidates.extend(line1)<line_sep>references.extend(line2)<block_end>distinct_1,distinct_2=cal_Distinct(candidates)<line_sep>rdistinct_1,rdistinct_2=cal_Distinct(references)<line_sep>print(distinct_1 distinct_2)<block_end> |
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_stmt>os.path<import_stmt>time<import_from_stmt>neutron_lib constants<import_stmt>webob<import_stmt>webob.dec<import_stmt>webob.exc<import_from_stmt>neutron.agent.linux utils<import_from_stmt>neutron.tests.common machine_fixtures<import_from_stmt>neutron.tests.common net_helpers<import_from_stmt>neutron.tests.functional.agent.l3 framework<import_from_stmt>neutron.tests.functional.agent.linux helpers<line_sep>METADATA_REQUEST_TIMEOUT=60<line_sep>METADATA_REQUEST_SLEEP=5<class_stmt>MetadataFakeProxyHandler(object)<block_start><def_stmt>__init__ self status<block_start>self.status=status<block_end>@webob.dec.wsgify()<def_stmt>__call__ self req<block_start><return>webob.Response(status=self.status)<block_end><block_end><class_stmt>MetadataL3AgentTestCase(framework.L3AgentTestFramework)<block_start>SOCKET_MODE=0o644<def_stmt>_create_metadata_fake_server self status<block_start>server=utils.UnixDomainWSGIServer('metadata-fake-server')<line_sep>self.addCleanup(server.stop)<line_sep># NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but metadata_proxy_socket folder must be readable by all
# users
self.useFixture(helpers.RecursivePermDirFixture(os.path.dirname(self.agent.conf.metadata_proxy_socket) 0o555))<line_sep>server.start(MetadataFakeProxyHandler(status) self.agent.conf.metadata_proxy_socket workers=0 backlog=4096 mode=self.SOCKET_MODE)<block_end><def_stmt>_query_metadata_proxy self machine<block_start>url='http://%(host)s:%(port)s'%{'host':constants.METADATA_V4_IP 'port':constants.METADATA_PORT}<line_sep>cmd='curl' '--max-time' METADATA_REQUEST_TIMEOUT '-D-' url<line_sep>i=0<line_sep>CONNECTION_REFUSED_TIMEOUT=METADATA_REQUEST_TIMEOUT<floordiv>2<while_stmt>i<le>CONNECTION_REFUSED_TIMEOUT<block_start><try_stmt><block_start>raw_headers=machine.execute(cmd)<line_sep><break><block_end><except_stmt>RuntimeError<as>e<block_start><if_stmt>'Connection refused'<in>str(e)<block_start>time.sleep(METADATA_REQUEST_SLEEP)<line_sep>i<augadd>METADATA_REQUEST_SLEEP<block_end><else_stmt><block_start>self.fail('metadata proxy unreachable '<concat>'on %s before timeout'%url)<block_end><block_end><block_end><if_stmt>i<g>CONNECTION_REFUSED_TIMEOUT<block_start>self.fail('Timed out waiting metadata proxy to become available')<block_end><return>raw_headers.splitlines()[0]<block_end><def_stmt>test_access_to_metadata_proxy self<block_start>"""Test access to the l3-agent metadata proxy.
The test creates:
* A l3-agent metadata service:
* A router (which creates a metadata proxy in the router namespace),
* A fake metadata server
* A "client" namespace (simulating a vm) with a port on router
internal subnet.
The test queries from the "client" namespace the metadata proxy on
http://169.254.169.254 and asserts that the metadata proxy added
the X-Forwarded-For and X-Neutron-Router-Id headers to the request
and forwarded the http request to the fake metadata server and the
response to the "client" namespace.
"""<line_sep>router_info=self.generate_router_info(enable_ha=<false>)<line_sep>router=self.manage_router(self.agent router_info)<line_sep>self._create_metadata_fake_server(webob.exc.HTTPOk.code)<line_sep># Create and configure client namespace
router_ip_cidr=self._port_first_ip_cidr(router.internal_ports[0])<line_sep>br_int=framework.get_ovs_bridge(self.agent.conf.OVS.integration_bridge)<line_sep>machine=self.useFixture(machine_fixtures.FakeMachine(br_int net_helpers.increment_ip_cidr(router_ip_cidr) router_ip_cidr.partition('/')[0]))<line_sep># Query metadata proxy
firstline=self._query_metadata_proxy(machine)<line_sep># Check status code
self.assertIn(str(webob.exc.HTTPOk.code) firstline.split())<block_end><block_end><class_stmt>UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase)<block_start>"""Test metadata proxy with least privileged user.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
"""<line_sep>SOCKET_MODE=0o664<def_stmt>setUp self<block_start>super(UnprivilegedUserMetadataL3AgentTestCase self).setUp()<line_sep>self.agent.conf.set_override('metadata_proxy_user' '65534')<block_end><block_end><class_stmt>UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase)<block_start>"""Test metadata proxy with least privileged user/group.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's
why we use its gid.
"""<line_sep>SOCKET_MODE=0o666<def_stmt>setUp self<block_start>super(UnprivilegedUserGroupMetadataL3AgentTestCase self).setUp()<line_sep>self.agent.conf.set_override('metadata_proxy_user' '65534')<line_sep>self.agent.conf.set_override('metadata_proxy_group' '65534')<block_end><block_end> |
<class_stmt>IncompatibleAttribute(Exception)<block_start><pass><block_end><class_stmt>IncompatibleDataException(Exception)<block_start><pass><block_end><class_stmt>UndefinedROI(Exception)<block_start><pass><block_end><class_stmt>InvalidSubscriber(Exception)<block_start><pass><block_end><class_stmt>InvalidMessage(Exception)<block_start><pass><block_end> |
<import_from_stmt>distutils.version StrictVersion<as>SV<import_stmt>unittest<import_stmt>minecraft<class_stmt>VersionTest(unittest.TestCase)<block_start><def_stmt>test_module_version_is_a_valid_pep_386_strict_version self<block_start>SV(minecraft.__version__)<block_end><def_stmt>test_protocol_version_is_an_int self<block_start><for_stmt>version minecraft.SUPPORTED_PROTOCOL_VERSIONS<block_start>self.assertTrue(type(version)<is>int)<block_end><block_end><block_end> |
<import_from_future_stmt> print_function<import_stmt>os<import_stmt>time<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>sys<import_from_stmt>zoneout_wrapper ZoneoutWrapper<class_stmt>SequencePredictor()<block_start><def_stmt>add_placeholders self<block_start>"""Generates placeholder variables to represent the input tensors
"""<line_sep>self.inputs_placeholder=tf.placeholder(tf.int32 shape=(<none> self.config.max_length) name="x")<line_sep>self.labels_placeholder=tf.placeholder(tf.int32 shape=(<none> self.config.max_length) name="y")<line_sep>self.dropout_placeholder=tf.placeholder(tf.float32)<block_end><def_stmt>create_feed_dict self inputs_batch labels_batch=<none> initial_state=<none> keep_prob=1.0<block_start>"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""<line_sep>feed_dict={self.inputs_placeholder:inputs_batch self.dropout_placeholder:keep_prob }<if_stmt>labels_batch<is><not><none><block_start>feed_dict[self.labels_placeholder]=labels_batch<block_end><if_stmt>initial_state<is><not><none><block_start>feed_dict[self.in_state]=initial_state<block_end><return>feed_dict<block_end><def_stmt>add_embedding self<block_start>""" Creates one-hot encoding for the input. No embedding is used as of now
"""<line_sep>embedding=tf.one_hot(self.inputs_placeholder self.config.num_classes)<line_sep><return>embedding<block_end><def_stmt>add_prediction_op self<block_start>""" Get the input from the embedding layer
"""<line_sep>x=self.add_embedding()<line_sep>""" Create a RNN first & define a placeholder for the initial state
"""<if_stmt>self.config.model_type<eq>"gru"<block_start>cell=tf.nn.rnn_cell.GRUCell(self.config.hidden_size)<block_end><elif_stmt>self.config.model_type<eq>"rnn"<block_start>cell=tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)<block_end><else_stmt><block_start><raise>Exception("Unsuppoprted model type...")<block_end><if_stmt>self.config.regularization<eq>"dropout"<block_start>cell=tf.nn.rnn_cell.DropoutWrapper(cell output_keep_prob=self.dropout_placeholder)<block_end><elif_stmt>self.config.regularization<eq>"zoneout"<block_start>cell=ZoneoutWrapper(cell zoneout_prob=self.dropout_placeholder)<block_end>cell=tf.nn.rnn_cell.MultiRNNCell([cell]<times>self.config.num_layers state_is_tuple=<false>)<line_sep>batch_size=tf.shape(x)[0]<line_sep>dynamic_max_length=tf.shape(x)[1]<line_sep>zero_state=cell.zero_state(batch_size tf.float32)<line_sep>self.in_state=tf.placeholder_with_default(zero_state [<none> cell.state_size])<line_sep>""" First find the sequence length and then use it to run the model
"""<line_sep>#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output,self.out_state=tf.nn.dynamic_rnn(cell x initial_state=self.in_state)<line_sep>output=tf.reshape(output shape=[-1 self.config.hidden_size])<line_sep>""" Pass it through a linear + Softmax layer to get the predictions
"""<line_sep>xavier_init=tf.contrib.layers.xavier_initializer()<line_sep>W=tf.get_variable("W" shape=[self.config.hidden_size self.config.num_classes] initializer=xavier_init)<line_sep>b1=tf.get_variable("b1" shape=[self.config.num_classes] initializer=xavier_init)<line_sep>preds=tf.add(tf.matmul(output W) b1)<line_sep>preds=tf.reshape(preds shape=[batch_size dynamic_max_length self.config.num_classes])<line_sep><return>preds<block_end><def_stmt>add_loss_op self preds<block_start>loss=tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder logits=preds))<line_sep>scaled_loss=loss/np.log(2)<line_sep>tf.summary.scalar('loss' scaled_loss)<line_sep><return>scaled_loss<block_end><def_stmt>add_training_op self loss<block_start>"""Sets up the training Ops.
"""<line_sep>global_step=tf.Variable(0 dtype=tf.int32 trainable=<false> name='global_step')<line_sep>optimizer=tf.train.AdamOptimizer(self.config.lr)<line_sep>train_op=optimizer.minimize(loss global_step=global_step)<line_sep><return>global_step train_op<block_end><def_stmt>loss_on_batch self sess inputs_batch labels_batch initial_state=<none><block_start>feed=self.create_feed_dict(inputs_batch=inputs_batch labels_batch=labels_batch initial_state=initial_state keep_prob=1.0)<line_sep>loss,out_state=sess.run([self.loss self.out_state] feed_dict=feed)<line_sep><return>loss out_state<block_end><def_stmt>train_on_batch self sess inputs_batch labels_batch initial_state=<none> dropout=1.0<block_start>feed=self.create_feed_dict(inputs_batch=inputs_batch labels_batch=labels_batch initial_state=initial_state keep_prob=dropout)<line_sep>_,loss,out_state,_step,summary=sess.run([self.train_op self.loss self.out_state self.global_step self.merged_summaries] feed_dict=feed)<line_sep><return>loss out_state _step summary<block_end><def_stmt>build self<block_start>self.add_placeholders()<line_sep>self.pred=self.add_prediction_op()<line_sep>self.loss=self.add_loss_op(self.pred)<line_sep>self.global_step,self.train_op=self.add_training_op(self.loss)<line_sep>self.merged_summaries=tf.summary.merge_all()<block_end><def_stmt>__init__ self config<block_start>self.config=config<line_sep>self.build()<block_end><block_end> |
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""<import_stmt>unittest<import_stmt>os<import_from_stmt>programy.storage.stores.sql.engine SQLStorageEngine<import_from_stmt>programy.storage.stores.sql.config SQLStorageConfiguration<import_from_stmt>programy.security.linking.accountlinker BasicAccountLinkerService<import_from_stmt>programytest.client TestClient<class_stmt>AccountLinkerTestClient(TestClient)<block_start><def_stmt>__init__ self<block_start>TestClient.__init__(self)<block_end><def_stmt>load_storage self<block_start>super(AccountLinkerTestClient self).load_storage()<line_sep>self.add_default_stores()<line_sep>self.add_categories_store([os.path.dirname(__file__)])<block_end><block_end><class_stmt>AccountLinkerAIMLTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>config=SQLStorageConfiguration()<line_sep>storage_engine=SQLStorageEngine(config)<line_sep>storage_engine.initialise()<line_sep>client=AccountLinkerTestClient()<line_sep>self.context=client.create_client_context("TESTUSER")<line_sep>self.context.brain._security._account_linker=BasicAccountLinkerService(storage_engine)<block_end><def_stmt>test_account_link_happy_path self<block_start>response=self.context.bot.ask_question(self.context "LINK PRIMARY ACCOUNT USER1 CONSOLE PASSWORD123")<line_sep>self.assertIsNotNone(response)<line_sep>self.assertTrue(response.startswith('Your generated key is'))<line_sep>words=response.split(" ")<line_sep>self.assertTrue(5 len(words))<line_sep>generated_key=words[4][:-1]<line_sep>command="LINK SECONDARY ACCOUNT USER1 USER2 FACEBOOK PASSWORD<PASSWORD> %s"%generated_key<line_sep>response=self.context.bot.ask_question(self.context command)<line_sep>self.assertIsNotNone(response)<line_sep>self.assertEqual('Your accounts are now linked.' response)<block_end><block_end> |
"""
Profile the time needed for retrieval.
We consider retrieval in a corpus of 1M videos, 1K videos are added, 10K queries are retrieved.
Calculate the time needed for adding 1K videos, and performing retrieval for 10K queries.
1, Data Loading time is ignored, consider it is hidden by computation time.
2, Sort time is ignored, since it is the similar among the methods.
"""<import_stmt>os<import_stmt>time<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>pprint<import_from_stmt>tqdm tqdm trange<import_from_stmt>baselines.crossmodal_moment_localization.model_xml XML xml_base_config<import_from_stmt>baselines.mixture_embedding_experts.model MEE mee_base_cfg<import_from_stmt>baselines.clip_alignment_with_language.model CALWithSub cal_base_cfg<import_from_stmt>baselines.excl.model EXCL excl_base_cfg<import_from_stmt>utils.basic_utils save_json<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<line_sep>logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s" datefmt="%Y-%m-%d %H:%M:%S" level=logging.INFO)<def_stmt>mask_logits target mask<block_start><return>target<times>mask+(1-mask)<times>(-1e10)<block_end><class_stmt>ProfileBase(object)<block_start>N_NewQuery=1e4<line_sep>N_NewVideo=1e3<line_sep>N_Videos=1e6<line_sep>AvgVideoLength=100<line_sep>ClipLength=5<line_sep>AvgClipPerVideo=int(AvgVideoLength/ClipLength)# max_ctx_l
AvgWordInQuery=15<line_sep># estimated by
# scales=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], => max_proposal = 14
AvgProposalPerVideo=170<line_sep>MaxClipPerProposal=14# pad to this length
AvgClipPerProposal=7# 6.88
VideoFeatureDim=3074# 1024 + 2048 + 2 (TEF)
SubFeatureDim=770<line_sep>QueryFeatureDim=768<line_sep>HiddenSize=256<line_sep>N_Runs=5# Get the average time
<def_stmt>__init__ self device=torch.device("cuda:0") ctx_batch_size=400 query_batch_size=100<block_start>self.device=device<line_sep>self.ctx_batch_size=ctx_batch_size<line_sep>self.query_batch_size=query_batch_size<line_sep>self.model_config=self.get_model_config()<line_sep>print(self.model_config)<line_sep>self.model=self.get_model()<block_end><def_stmt>get_model self<block_start><return><none><block_end><def_stmt>get_model_config self<block_start><return><none><block_end><def_stmt>set_ctx_batch_size self batch_size<block_start>self.ctx_batch_size=batch_size<block_end><def_stmt>set_query_batch_size self batch_size<block_start>self.query_batch_size=batch_size<block_end><def_stmt>cast_dict_inputs_to_device self dict_inputs device<block_start><return>{k:v.to(device)<for>k,v dict_inputs.items()}<block_end><def_stmt>get_fake_ctx_raw_input_st_ed self no_tef=<false><block_start><return>dict(video_feat=torch.FloatTensor(self.ctx_batch_size self.model_config.max_ctx_l self.VideoFeatureDim-2<times>no_tef) sub_feat=torch.FloatTensor(self.ctx_batch_size self.model_config.max_ctx_l self.SubFeatureDim-2<times>no_tef) ctx_mask=torch.FloatTensor(self.ctx_batch_size self.model_config.max_ctx_l) )<block_end><def_stmt>get_fake_raw_query self<block_start><return>dict(query_feat=torch.FloatTensor(self.query_batch_size self.AvgWordInQuery self.QueryFeatureDim) query_mask=torch.ones(self.query_batch_size self.AvgWordInQuery))<block_end><block_end>"""
from baselines.profiling.profile_main import ProfileXML
profile_xml = ProfileXML(ctx_batch_size=400, query_batch_size=100)
profile_xml.get_ctx_encoding_time()
"""<class_stmt>ProfileXML(ProfileBase)<block_start><def_stmt>get_model_config self<block_start>xml_base_config["ctx_mode"]="video_sub_tef"<line_sep>xml_base_config["merge_two_stream"]=<true><line_sep>xml_base_config["cross_att"]=<true><line_sep>xml_base_config["max_ctx_l"]=self.AvgClipPerVideo<line_sep>xml_base_config["visual_input_size"]=self.VideoFeatureDim<line_sep>xml_base_config["query_input_size"]=self.QueryFeatureDim<line_sep>xml_base_config["sub_input_size"]=self.SubFeatureDim<line_sep>xml_base_config["hidden_size"]=self.HiddenSize<line_sep><return>xml_base_config<block_end><def_stmt>get_model self<block_start>model=XML(self.model_config)<line_sep>model.to(self.device)<line_sep>model.eval()<line_sep><return>model<block_end><def_stmt>get_fake_encoded_ctx self<block_start><return>dict(ctx_feat=torch.FloatTensor(self.ctx_batch_size self.model_config.max_ctx_l self.HiddenSize) ctx_mask=torch.FloatTensor(self.ctx_batch_size self.model_config.max_ctx_l) )<block_end><def_stmt>get_fake_encoded_query self<block_start><return>dict(query_feat=torch.FloatTensor(self.ctx_batch_size self.HiddenSize))<block_end><def_stmt>_get_ctx_encoding_time self video_feat sub_feat ctx_mask<block_start>"""Considered two modalities"""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model.cross_encode_context(video_feat ctx_mask sub_feat ctx_mask)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_ctx_encoding_time self<block_start><with_stmt>torch.no_grad()<block_start>fake_ctx_inputs=self.cast_dict_inputs_to_device(self.get_fake_ctx_raw_input_st_ed() self.device)<line_sep>raw_video=fake_ctx_inputs["video_feat"]<line_sep>raw_sub=fake_ctx_inputs["sub_feat"]<line_sep>ctx_mask=fake_ctx_inputs["ctx_mask"]<line_sep>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_ctx_encoding_time(raw_video raw_sub ctx_mask)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_query_encoding_time self raw_query query_mask<block_start>"""Considered two modalities"""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>encoded_query=self.model.encode_input(raw_query query_mask self.model.query_input_proj self.model.query_encoder self.model.query_pos_embed)<line_sep># (N, Lq, D)
# video level
video_query,sub_query=self.model.get_modularized_queries(encoded_query query_mask return_modular_att=<false>)<line_sep># st ed
video_query=self.model.video_query_linear(video_query)<line_sep>sub_query=self.model.sub_query_linear(sub_query)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_query_encoding_time self<block_start><with_stmt>torch.no_grad()<block_start>query_inputs=self.cast_dict_inputs_to_device(self.get_fake_raw_query() self.device)<line_sep>raw_query=query_inputs["query_feat"]<line_sep>query_mask=query_inputs["query_mask"]<line_sep>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_query_encoding_time(raw_query query_mask)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_retrieval_time self encoded_video_query encoded_video ctx_mask<block_start>"""Consider the queries are encoded, Calculate in a single modality then multiply by 2."""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model.get_video_level_scores(encoded_video_query encoded_video ctx_mask)<line_sep>torch.cuda.synchronize()<line_sep><return>(time.time()-st_time)<times>2<block_end><def_stmt>get_retrieval_time self<block_start><with_stmt>torch.no_grad()<block_start>encoded_query=self.cast_dict_inputs_to_device(self.get_fake_encoded_query() self.device)["query_feat"]<line_sep>fake_ctx_inputs=self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx() self.device)<line_sep>encoded_ctx=fake_ctx_inputs["ctx_feat"]<line_sep>ctx_mask=fake_ctx_inputs["ctx_mask"]<line_sep>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_retrieval_time(encoded_query encoded_ctx ctx_mask)]<block_end>times=torch.FloatTensor(times)# since we have two modalities
<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_span_prediction_time self query_feat ctx_feat ctx_mask<block_start>"""Considered two modalities"""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>similarity=torch.einsum("md,nld->mnl" query_feat ctx_feat)<line_sep>similarity=(similarity+similarity)/2# (Nq, Nv, L) from query to all videos.
n_q,n_c,l=similarity.shape<line_sep>similarity=similarity.view(n_q<times>n_c 1 l)<line_sep>st_prob=self.model.merged_st_predictor(similarity).view(n_q n_c l)# (Nq, Nv, L)
ed_prob=self.model.merged_ed_predictor(similarity).view(n_q n_c l)# (Nq, Nv, L)
st_prob=mask_logits(st_prob ctx_mask)# (N, L)
ed_prob=mask_logits(ed_prob ctx_mask)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_span_prediction_time self<block_start><with_stmt>torch.no_grad()<block_start>encoded_query=self.cast_dict_inputs_to_device(self.get_fake_encoded_query() self.device)["query_feat"]<line_sep>fake_ctx_inputs=self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx() self.device)<line_sep>encoded_ctx=fake_ctx_inputs["ctx_feat"]<line_sep>ctx_mask=fake_ctx_inputs["ctx_mask"]<line_sep>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_span_prediction_time(encoded_query encoded_ctx ctx_mask)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><block_end>"""
from baselines.profiling.profile_main import ProfileMEE
profile_mee = ProfileMEE(ctx_batch_size=400, query_batch_size=100)
profile_mee.get_ctx_encoding_time()
"""<class_stmt>ProfileMEE(ProfileBase)<block_start><def_stmt>get_model_config self<block_start>mee_base_cfg["ctx_mode"]="video_sub"<line_sep>mee_base_cfg["text_input_size"]=self.QueryFeatureDim<line_sep>mee_base_cfg["vid_input_size"]=self.VideoFeatureDim<line_sep>mee_base_cfg["output_size"]=self.HiddenSize<line_sep><return>mee_base_cfg<block_end><def_stmt>get_model self<block_start>model=MEE(self.model_config)<line_sep>model.to(self.device)<line_sep>model.eval()<line_sep><return>model<block_end><def_stmt>get_fake_raw_ctx self<block_start><return>dict(vid_feat=torch.FloatTensor(self.ctx_batch_size self.VideoFeatureDim) sub_feat=torch.FloatTensor(self.ctx_batch_size self.QueryFeatureDim))<block_end><def_stmt>get_fake_encoded_ctx_query self<block_start><return>dict(ctx_feat=torch.FloatTensor(self.ctx_batch_size self.HiddenSize) query_feat=torch.FloatTensor(self.ctx_batch_size self.HiddenSize))<block_end><def_stmt>_get_ctx_encoding_time self vid_feat sub_feat<block_start>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model.video_gu(vid_feat)<line_sep>self.model.sub_gu(sub_feat)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_ctx_encoding_time self<block_start>feat_dict=self.cast_dict_inputs_to_device(self.get_fake_raw_ctx() self.device)<with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_ctx_encoding_time(**feat_dict)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_query_encoding_time self query_feat<block_start>"""Considered 2 modalities"""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>pooled_query=self.model.query_pooling(query_feat)# (N, Dt)
video_query=self.model.video_query_gu(pooled_query)<line_sep>sub_query=self.model.sub_query_gu(pooled_query)<line_sep>stream_weights=self.model.moe_fc(pooled_query)# (N, 2)
torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_query_encoding_time self<block_start>raw_query=self.cast_dict_inputs_to_device(self.get_fake_raw_query() self.device)["query_feat"]<with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_query_encoding_time(raw_query)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_retrieval_time self encoded_query encoded_ctx<block_start>"""Considered 2 modalities"""<line_sep>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>torch.einsum("md,nd->mn" encoded_query encoded_ctx)# (N, N)
torch.cuda.synchronize()<line_sep><return>(time.time()-st_time)<times>2<block_end><def_stmt>get_retrieval_time self<block_start>model_inputs=self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx_query() self.device)<line_sep>encoded_query=model_inputs["ctx_feat"]<line_sep>encoded_ctx=model_inputs["query_feat"]<with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_retrieval_time(encoded_query encoded_ctx)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><block_end><class_stmt>ProfileCAL(ProfileBase)<block_start><def_stmt>get_model_config self<block_start>cal_base_cfg["ctx_mode"]="video_sub"<line_sep>cal_base_cfg["embedding_size"]=self.QueryFeatureDim<line_sep>cal_base_cfg["visual_input_size"]=self.VideoFeatureDim<times>2<line_sep>cal_base_cfg["textual_input_size"]=self.SubFeatureDim<times>2<line_sep>cal_base_cfg["output_size"]=self.HiddenSize<line_sep><return>cal_base_cfg<block_end><def_stmt>get_model self<block_start>model=CALWithSub(self.model_config)<line_sep>model.to(self.device)<line_sep>model.eval()<line_sep><return>model<block_end><def_stmt>get_fake_raw_ctx self model_name="cal"<block_start>"""The features are `*2` since they use both global and local features"""<line_sep><return>dict(sub_feat=torch.FloatTensor(self.ctx_batch_size self.AvgProposalPerVideo self.AvgClipPerProposal self.SubFeatureDim<times>2) vid_feat=torch.FloatTensor(self.ctx_batch_size self.AvgProposalPerVideo self.AvgClipPerProposal self.VideoFeatureDim<times>2))<block_end><def_stmt>_get_ctx_encoding_time self sub_feat vid_feat model_name="cal"<block_start><if_stmt>model_name<eq>"mcn"<block_start>sub_feat=sub_feat.sum(2)<line_sep>vid_feat=vid_feat.sum(2)<block_end>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model.moment_encoder(vid_feat module_name="video")<line_sep>self.model.moment_encoder(sub_feat module_name="sub")<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_ctx_encoding_time self model_name="cal"<block_start>"""model_name: str, `cal` or `mcn`"""<line_sep>feat_dict=self.cast_dict_inputs_to_device(self.get_fake_raw_ctx(model_name=model_name) self.device)<line_sep>feat_dict["model_name"]=model_name<with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_ctx_encoding_time(**feat_dict)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><def_stmt>_get_query_encoding_time self query_feat query_mask<block_start>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model.query_encoder(query_feat query_mask)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_query_encoding_time self<block_start>feat_dict=self.cast_dict_inputs_to_device(self.get_fake_raw_query() self.device)<with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_query_encoding_time(**feat_dict)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><block_end><class_stmt>ProfileExCL(ProfileBase)<block_start><def_stmt>get_model_config self<block_start>excl_base_cfg["ctx_mode"]="video_sub"<line_sep>excl_base_cfg["query_input_size"]=self.QueryFeatureDim<line_sep>excl_base_cfg["visual_input_size"]=self.VideoFeatureDim<line_sep>excl_base_cfg["sub_input_size"]=self.SubFeatureDim<line_sep>excl_base_cfg["output_size"]=self.HiddenSize<line_sep><return>excl_base_cfg<block_end><def_stmt>get_model self<block_start>model=EXCL(self.model_config)<line_sep>model.to(self.device)<line_sep>model.eval()<line_sep><return>model<block_end><def_stmt>get_fake_raw_input self<block_start>"""The features are `*2` since they use both global and local features"""<line_sep><return>dict(query_feat=torch.FloatTensor(self.ctx_batch_size self.AvgWordInQuery self.QueryFeatureDim) query_mask=torch.ones((self.ctx_batch_size self.AvgWordInQuery)) sub_feat=torch.FloatTensor(self.ctx_batch_size self.AvgClipPerVideo self.SubFeatureDim) sub_mask=torch.ones(self.ctx_batch_size self.AvgClipPerVideo) video_feat=torch.FloatTensor(self.ctx_batch_size self.AvgClipPerVideo self.VideoFeatureDim) video_mask=torch.ones(self.ctx_batch_size self.AvgClipPerVideo) tef_feat=torch.FloatTensor(self.ctx_batch_size self.AvgClipPerVideo 2) tef_mask=torch.ones(self.ctx_batch_size self.AvgClipPerVideo) st_ed_indices=torch.ones(2 2) # not used.
)<block_end><def_stmt>_get_prediction_time self input_dict<block_start>torch.cuda.synchronize()<line_sep>st_time=time.time()<line_sep>self.model(**input_dict)<line_sep>torch.cuda.synchronize()<line_sep><return>time.time()-st_time<block_end><def_stmt>get_prediction_time self<block_start>"""model_name: str, `cal` or `mcn`"""<line_sep>feat_dict=self.cast_dict_inputs_to_device(self.get_fake_raw_input() self.device)<line_sep>feat_dict["is_training"]=<false><with_stmt>torch.no_grad()<block_start>times=[]<for_stmt>_ trange(self.N_Runs)<block_start>times<augadd>[self._get_prediction_time(feat_dict)]<block_end>times=torch.FloatTensor(times)<block_end><return>dict(avg=float(times.mean()) std=float(times.std()))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--model" type=str help="")<line_sep>parser.add_argument("--ctx_batch_size" type=int default=400)<line_sep>parser.add_argument("--query_batch_size" type=int default=100)<line_sep>parser.add_argument("--save_dir" type=str default="baselines/profiling/cache")<line_sep>args=parser.parse_args()<line_sep>model=args.model<line_sep>query_batch_size=args.query_batch_size<line_sep>ctx_batch_size=args.ctx_batch_size<if_stmt>model<eq>"mee"<block_start>profile_mee=ProfileMEE(ctx_batch_size=ctx_batch_size query_batch_size=query_batch_size)<line_sep># use the 2nd one to report time
profile_mee.get_ctx_encoding_time()<line_sep>ctx_enc_time=profile_mee.get_ctx_encoding_time()<line_sep>query_enc_time=profile_mee.get_query_encoding_time()<block_end><elif_stmt>model<eq>"cal"<block_start>profile_cal=ProfileCAL(ctx_batch_size=ctx_batch_size query_batch_size=query_batch_size)<line_sep># use the 2nd one to report time
profile_cal.get_ctx_encoding_time()<line_sep>ctx_enc_time=profile_cal.get_ctx_encoding_time(model_name="cal")<line_sep>query_enc_time=profile_cal.get_query_encoding_time()<block_end><elif_stmt>model<eq>"mcn"<block_start>profile_cal=ProfileCAL(ctx_batch_size=ctx_batch_size query_batch_size=query_batch_size)<line_sep># use the 2nd one to report time
profile_cal.get_ctx_encoding_time()<line_sep>ctx_enc_time=profile_cal.get_ctx_encoding_time(model_name="mcn")<line_sep>query_enc_time=profile_cal.get_query_encoding_time()<block_end><elif_stmt>model<eq>"xml"<block_start>profile_xml=ProfileXML(ctx_batch_size=ctx_batch_size query_batch_size=query_batch_size)<line_sep># use the 2nd one to report time
profile_xml.get_ctx_encoding_time()<line_sep>ctx_enc_time=profile_xml.get_ctx_encoding_time()<line_sep>query_enc_time=profile_xml.get_query_encoding_time()<block_end><elif_stmt>model<eq>"excl"<block_start>profile_excl=ProfileExCL(ctx_batch_size=ctx_batch_size query_batch_size=ctx_batch_size)<line_sep># use the 2nd one to report time
profile_excl.get_prediction_time()<line_sep>ctx_enc_time=profile_excl.get_prediction_time()<line_sep>query_enc_time=0<line_sep># Calculate the total time as ctx_enc_time * (100 * 1M / ctx_batch_size)
<block_end><else_stmt><block_start><raise>NotImplementedError<block_end># ctx_enc_time = ctx_enc_time
save_path=os.path.join(args.save_dir "{}_profile_main.json".format(model))<line_sep>n_videos=ProfileBase.N_Videos<line_sep>res=dict(ctx_enc_time=ctx_enc_time ctx_enc_avg_time_all_videos=ctx_enc_time["avg"]<times>n_videos/ctx_batch_size query_enc_time=query_enc_time n_videos=n_videos ctx_batch_size=ctx_batch_size query_batch_size=query_batch_size model=model)<line_sep>save_json(res save_path save_pretty=<true>)<line_sep>pprint.pprint(res)<block_end> |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>sys<import_from_stmt>absl logging<def_stmt>generate_stand_vocab old_vocab new_vocab<block_start>vocab_file=open(new_vocab 'w')<line_sep>vocab_file.write('<pad>'+'\t'+'0'+'\n')<line_sep>vocab_file.write('<s>'+'\t'+'1'+'\n')<line_sep>vocab_file.write('</s>'+'\t'+'2'+'\n')<line_sep>vocab_file.write('<unk>'+'\t'+'3'+'\n')<line_sep>vocab_file.write('<sos>'+'\t'+'4'+'\n')<line_sep>vocab_file.write('<eos>'+'\t'+'5'+'\n')<line_sep>idx=6<with_stmt>open(old_vocab 'r')<as>f<block_start><for_stmt>i,line enumerate(f.readlines())<block_start><if_stmt>i<g>2<block_start>vocab_file.write(line.strip()+'\t'+str(idx)+'\n')<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.set_verbosity(logging.INFO)<if_stmt>len(sys.argv)<ne>3<block_start>logging.error("Usage python {} old_vocab new_vocab".format(sys.argv[0]))<line_sep>sys.exit(-1)<block_end>old_vocab=sys.argv[1]<line_sep>new_vocab=sys.argv[2]<line_sep>generate_stand_vocab(old_vocab new_vocab)<block_end> |
"""Unit test for KNX/IP HPAI objects."""<import_stmt>pytest<import_from_stmt>xknx.exceptions ConversionError CouldNotParseKNXIP<import_from_stmt>xknx.knxip HPAI<class_stmt>TestKNXIPHPAI<block_start>"""Test class for KNX/IP HPAI objects."""<def_stmt>test_hpai self<block_start>"""Test parsing and streaming HPAI KNX/IP fragment."""<line_sep>raw=(0x08 0x01 0xC0 0xA8 0x2A 0x01 0x84 0x95)<line_sep>hpai=HPAI()<assert_stmt>hpai.from_knx(raw)<eq>8<assert_stmt>hpai.ip_addr<eq>"192.168.42.1"<assert_stmt>hpai.port<eq>33941<line_sep>hpai2=HPAI(ip_addr="192.168.42.1" port=33941)<assert_stmt>hpai2.to_knx()<eq>list(raw)<block_end><def_stmt>test_from_knx_wrong_input1 self<block_start>"""Test parsing of wrong HPAI KNX/IP packet (wrong length)."""<line_sep>raw=(0x08 0x01 0xC0 0xA8 0x2A)<with_stmt>pytest.raises(CouldNotParseKNXIP)<block_start>HPAI().from_knx(raw)<block_end><block_end><def_stmt>test_from_knx_wrong_input2 self<block_start>"""Test parsing of wrong HPAI KNX/IP packet (wrong length byte)."""<line_sep>raw=(0x09 0x01 0xC0 0xA8 0x2A 0x01 0x84 0x95)<with_stmt>pytest.raises(CouldNotParseKNXIP)<block_start>HPAI().from_knx(raw)<block_end><block_end><def_stmt>test_from_knx_wrong_input3 self<block_start>"""Test parsing of wrong HPAI KNX/IP packet (wrong HPAI type)."""<line_sep>raw=(0x08 0x02 0xC0 0xA8 0x2A 0x01 0x84 0x95)<with_stmt>pytest.raises(CouldNotParseKNXIP)<block_start>HPAI().from_knx(raw)<block_end><block_end><def_stmt>test_to_knx_wrong_ip self<block_start>"""Test serializing HPAI to KNV/IP with wrong ip type."""<line_sep>hpai=HPAI(ip_addr=127001)<with_stmt>pytest.raises(ConversionError)<block_start>hpai.to_knx()<block_end><block_end><block_end> |
<import_from_stmt>timemachines.skaters.nproph.nprophetinclusion using_neuralprophet NeuralProphet<if_stmt>using_neuralprophet<block_start><import_stmt>pandas<as>pd<import_from_stmt>typing List Tuple Any<import_from_stmt>timemachines.skatertools.utilities.conventions wrap<import_from_stmt>timemachines.skaters.nproph.nprophparams NPROPHET_MODEL NPROPHET_META<def_stmt>nprophet_iskater_factory y:[[float]] k:int a:List=<none> t:List=<none> e=<none> freq:str=<none> n_max=1000 recursive:bool=<false> model_params:dict=<none> return_forecast=<true># For now we keep it simple. Will add to this over time
<block_start>y0s=[wrap(yi)[0]<for>yi y]<line_sep>x,x_std,forecast,m=nprophet_fit_and_predict_simple(y=y0s k=k freq=freq model_params=model_params)<line_sep><return>(x x_std forecast m)<if>return_forecast<else>(x x_std)<block_end><def_stmt>nprophet_fit_and_predict_simple y:[float] k:int freq:str=<none> model_params:dict=<none><arrow>Tuple[List List Any Any]<block_start>""" Simpler wrapper for offlinetesting - univariate only """<assert_stmt>isinstance(y[0] float)<line_sep>freq=freq<or>NPROPHET_META['freq']<line_sep>used_params=NPROPHET_MODEL<line_sep>used_params.update({'n_forecasts':k})<if_stmt>model_params<block_start>used_params.update(model_params)<block_end><if_stmt>len(y)<l>used_params['n_lags']<block_start>x=[wrap(y)[0]]<times>k<line_sep>x_std=[1.0]<times>k<line_sep><return>x x_std <none> <none><block_end><else_stmt><block_start>model=NeuralProphet(**used_params)<line_sep>df=pd.DataFrame(columns=['y'] data=y)<line_sep>df['ds']=pd.date_range(start='2021-01-01' periods=len(y) freq=freq)<line_sep>metrics=model.fit(df freq=freq epochs=40 progress_bar=<false>)<line_sep>future=model.make_future_dataframe(df)<line_sep>forecast=model.predict(future)<line_sep>x=[forecast['yhat'+str(j+1)].values[-k+j]<for>j range(k)]<line_sep>x_std=[1.0]<times>k<line_sep><return>x x_std forecast model<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><assert_stmt>using_neuralprophet 'pip install neuralprophet'<import_from_stmt>timemachines.skatertools.data.real hospital<line_sep>k=3<line_sep>n=500<line_sep>y=hospital(n=n)[-200:]<line_sep>x,x_std,forecast,m=nprophet_iskater_factory(y=y k=k)<line_sep>print(x)<assert_stmt>len(x)<eq>k<line_sep>x1,x_std1,forecast1,m1=nprophet_fit_and_predict_simple(y=y k=k)<if_stmt><true><block_start>m.plot(forecast)<line_sep>m1.plot(forecast1)<import_stmt>matplotlib.pyplot<as>plt<line_sep>plt.show()<block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
<import_from_stmt>.common Common<import_from_stmt>.vocabulary ThreatDescriptor<as>td<import_from_stmt>.vocabulary ThreatExchange<as>t<class_stmt>ThreatDescriptor(Common)<block_start>_URL=t.URL+t.VERSION+t.THREAT_DESCRIPTORS<line_sep>_DETAILS=t.URL+t.VERSION<line_sep>_RELATED=t.URL+t.VERSION<line_sep>_fields=[td.ADDED_ON td.CONFIDENCE td.DESCRIPTION td.EXPIRED_ON td.FIRST_ACTIVE td.ID td.INDICATOR td.LAST_ACTIVE td.LAST_UPDATED td.METADATA td.MY_REACTIONS td.OWNER td.PRECISION td.PRIVACY_MEMBERS td.PRIVACY_TYPE td.RAW_INDICATOR td.REVIEW_STATUS td.SEVERITY td.SHARE_LEVEL td.SOURCE_URI td.STATUS td.TAGS td.TYPE ]<line_sep>_default_fields=[td.ADDED_ON td.CONFIDENCE td.DESCRIPTION td.EXPIRED_ON td.FIRST_ACTIVE td.ID td.INDICATOR td.LAST_ACTIVE td.LAST_UPDATED td.METADATA td.MY_REACTIONS td.OWNER td.PRECISION td.RAW_INDICATOR td.REVIEW_STATUS td.SEVERITY td.SHARE_LEVEL td.SOURCE_URI td.STATUS td.TAGS td.TYPE ]<line_sep>_connections=[]<line_sep>_unique=[]<block_end> |
<import_stmt>sys<line_sep>sys.path.append('..')<import_stmt>external_test<line_sep>external_test.runExternalTest()<line_sep> |
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
<import_from_stmt>typing Text<import_from_stmt>zenml.core.base_component BaseComponent<class_stmt>MockComponent(BaseComponent)<block_start>"""Mocking the base component for testing."""<line_sep>tmp_path:str<def_stmt>get_serialization_dir self<arrow>Text<block_start>"""Mock serialization dir"""<line_sep><return>self.tmp_path<block_end><block_end><def_stmt>test_base_component_serialization_logic tmp_path<block_start>"""Tests the UUID serialization logic of BaseComponent"""<line_sep># Application of the monkeypatch to replace Path.home
# with the behavior of mockreturn defined above.
# mc = MockComponent(tmp_path=str(tmp_path))
# Calling getssh() will use mockreturn in place of Path.home
# for this test with the monkeypatch.
# print(mc.get_serialization_dir())
<block_end> |
<import_from_stmt>unittest TestCase<import_from_stmt>unittest.mock Mock<import_from_stmt>parameterized parameterized<import_from_stmt>samcli.lib.utils.packagetype ZIP<import_from_stmt>samcli.local.lambdafn.config FunctionConfig<import_from_stmt>samcli.commands.local.cli_common.user_exceptions InvalidSamTemplateException<class_stmt>TestFunctionConfig(TestCase)<block_start>DEFAULT_MEMORY=128<line_sep>DEFAULT_TIMEOUT=3<def_stmt>setUp self<block_start>self.name="name"<line_sep>self.runtime="runtime"<line_sep>self.handler="handler"<line_sep>self.imageuri=<none><line_sep>self.imageconfig=<none><line_sep>self.packagetype=ZIP<line_sep>self.code_path="codepath"<line_sep>self.memory=1234<line_sep>self.timeout=34<line_sep>self.env_vars_mock=Mock()<line_sep>self.layers=["layer1"]<line_sep>self.architecture="arm64"<block_end><def_stmt>test_init_with_env_vars self<block_start>config=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout=self.timeout env_vars=self.env_vars_mock )<line_sep>self.assertEqual(config.name self.name)<line_sep>self.assertEqual(config.runtime self.runtime)<line_sep>self.assertEqual(config.handler self.handler)<line_sep>self.assertEqual(config.imageuri self.imageuri)<line_sep>self.assertEqual(config.imageconfig self.imageconfig)<line_sep>self.assertEqual(config.packagetype self.packagetype)<line_sep>self.assertEqual(config.code_abs_path self.code_path)<line_sep>self.assertEqual(config.layers self.layers)<line_sep>self.assertEqual(config.memory self.memory)<line_sep>self.assertEqual(config.timeout self.timeout)<line_sep>self.assertEqual(config.env_vars self.env_vars_mock)<line_sep>self.assertEqual(self.env_vars_mock.handler self.handler)<line_sep>self.assertEqual(self.env_vars_mock.memory self.memory)<line_sep>self.assertEqual(self.env_vars_mock.timeout self.timeout)<block_end><def_stmt>test_init_without_optional_values self<block_start>config=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture )<line_sep>self.assertEqual(config.name self.name)<line_sep>self.assertEqual(config.runtime self.runtime)<line_sep>self.assertEqual(config.handler self.handler)<line_sep>self.assertEqual(config.packagetype self.packagetype)<line_sep>self.assertEqual(config.imageuri self.imageuri)<line_sep>self.assertEqual(config.imageconfig self.imageconfig)<line_sep>self.assertEqual(config.code_abs_path self.code_path)<line_sep>self.assertEqual(config.layers self.layers)<line_sep>self.assertEqual(config.memory self.DEFAULT_MEMORY)<line_sep>self.assertEqual(config.timeout self.DEFAULT_TIMEOUT)<line_sep>self.assertIsNotNone(config.env_vars)<line_sep>self.assertEqual(config.env_vars.handler self.handler)<line_sep>self.assertEqual(config.env_vars.memory self.DEFAULT_MEMORY)<line_sep>self.assertEqual(config.env_vars.timeout self.DEFAULT_TIMEOUT)<block_end><def_stmt>test_init_with_timeout_of_int_string self<block_start>config=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout="34" env_vars=self.env_vars_mock )<line_sep>self.assertEqual(config.name self.name)<line_sep>self.assertEqual(config.runtime self.runtime)<line_sep>self.assertEqual(config.handler self.handler)<line_sep>self.assertEqual(config.packagetype self.packagetype)<line_sep>self.assertEqual(config.imageuri self.imageuri)<line_sep>self.assertEqual(config.imageconfig self.imageconfig)<line_sep>self.assertEqual(config.code_abs_path self.code_path)<line_sep>self.assertEqual(config.layers self.layers)<line_sep>self.assertEqual(config.memory self.memory)<line_sep>self.assertEqual(config.timeout 34)<line_sep>self.assertEqual(config.env_vars self.env_vars_mock)<line_sep>self.assertEqual(self.env_vars_mock.handler self.handler)<line_sep>self.assertEqual(self.env_vars_mock.memory self.memory)<line_sep>self.assertEqual(self.env_vars_mock.timeout 34)<block_end><block_end><class_stmt>TestFunctionConfigInvalidTimeouts(TestCase)<block_start><def_stmt>setUp self<block_start>self.name="name"<line_sep>self.runtime="runtime"<line_sep>self.handler="handler"<line_sep>self.imageuri=<none><line_sep>self.imageconfig=<none><line_sep>self.packagetype=ZIP<line_sep>self.code_path="codepath"<line_sep>self.memory=1234<line_sep>self.env_vars_mock=Mock()<line_sep>self.layers=["layer1"]<line_sep>self.architecture="x86_64"<block_end>@parameterized.expand([("none int string" ) ({"dictionary":"is not a string either"} ) ("/local/lambda/timeout" ) ("3.2" ) ("4.2" ) ("0.123" ) ])<def_stmt>test_init_with_invalid_timeout_values self timeout<block_start><with_stmt>self.assertRaises(InvalidSamTemplateException)<block_start>FunctionConfig(self.name self.runtime self.imageuri self.handler self.packagetype self.imageconfig self.code_path self.layers self.architecture memory=self.memory timeout=timeout env_vars=self.env_vars_mock )<block_end><block_end><block_end><class_stmt>TestFunctionConfig_equals(TestCase)<block_start>DEFAULT_MEMORY=128<line_sep>DEFAULT_TIMEOUT=3<def_stmt>setUp self<block_start>self.name="name"<line_sep>self.name2="name2"<line_sep>self.runtime="runtime"<line_sep>self.handler="handler"<line_sep>self.imageuri=<none><line_sep>self.imageconfig=<none><line_sep>self.packagetype=ZIP<line_sep>self.code_path="codepath"<line_sep>self.memory=1234<line_sep>self.timeout=34<line_sep>self.env_vars_mock=Mock()<line_sep>self.layers=["layer1"]<line_sep>self.architecture="arm64"<block_end><def_stmt>test_equals_function_config self<block_start>config1=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout=self.timeout env_vars=self.env_vars_mock )<line_sep>config2=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout=self.timeout env_vars=self.env_vars_mock )<line_sep>self.assertTrue(config1<eq>config2)<block_end><def_stmt>test_not_equals_function_config self<block_start>config1=FunctionConfig(self.name self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout=self.timeout env_vars=self.env_vars_mock )<line_sep>config2=FunctionConfig(self.name2 self.runtime self.handler self.imageuri self.imageconfig self.packagetype self.code_path self.layers self.architecture memory=self.memory timeout=self.timeout env_vars=self.env_vars_mock )<line_sep>self.assertTrue(config1<ne>config2)<block_end><block_end> |
<import_from_stmt>rockstar RockStar<line_sep>lolcode_code="""HAI
CAN HAS STDIO?
VISIBLE "HAI WORLD!"
KTHXBYE"""<line_sep>rock_it_bro=RockStar(days=400 file_name='helloworld.lol' code=lolcode_code)<line_sep>rock_it_bro.make_me_a_rockstar()<line_sep> |
# -*- coding: utf-8 -*-
<import_from_stmt>amplify.agent.common.util.math median<import_from_stmt>unittest TestCase<import_from_stmt>hamcrest *<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright (C) Nginx, Inc. All rights reserved."<line_sep>__license__=""<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<class_stmt>MathTestCase(TestCase)<block_start><def_stmt>test_median self# even length
<block_start>assert_that(median([1 3 5 7]) equal_to(4.0))<line_sep># unsorted
assert_that(median([1 5 7 3]) equal_to(4.0))<line_sep># odd length
assert_that(median([1 2 3 4 5 6 7]) equal_to(4.0))<line_sep>assert_that(median([]) equal_to(<none>))<block_end><block_end> |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with formatting utils of the aea cli."""<import_from_stmt>typing Dict List<import_from_stmt>aea.configurations.base AgentConfig<import_from_stmt>aea.configurations.loader ConfigLoader<import_from_stmt>aea.exceptions enforce<import_from_stmt>aea.helpers.io open_file<def_stmt>format_items items:List[Dict]<arrow>str<block_start>"""Format list of items (protocols/connections) to a string for CLI output."""<line_sep>list_str=""<for_stmt>item items<block_start>list_str<augadd>("{line}\n"<concat>"Public ID: {public_id}\n"<concat>"Name: {name}\n"<concat>"Description: {description}\n"<concat>"Author: {author}\n"<concat>"Version: {version}\n"<concat>"{line}\n".format(name=item["name"] public_id=item["public_id"] description=item["description"] author=item["author"] version=item["version"] line="-"<times>30 ))<block_end><return>list_str<block_end><def_stmt>retrieve_details name:str loader:ConfigLoader config_filepath:str<arrow>Dict<block_start>"""Return description of a protocol, skill, connection."""<with_stmt>open_file(str(config_filepath))<as>fp<block_start>config=loader.load(fp)<block_end>item_name=config.agent_name<if>isinstance(config AgentConfig)<else>config.name<line_sep>enforce(item_name<eq>name "Item names do not match!")<line_sep><return>{"public_id":str(config.public_id) "name":item_name "author":config.author "description":config.description "version":config.version }<block_end><def_stmt>sort_items items:List[Dict]<arrow>List[Dict]<block_start>"""
Sort a list of dict items associated with packages.
:param items: list of dicts that represent items.
:return: sorted list.
"""<line_sep><return>sorted(items key=<lambda>k:k["name"])<block_end> |
<import_from_stmt>arm.logicnode.arm_nodes *<class_stmt>SeparateVectorNode(ArmLogicTreeNode)<block_start>"""Splits the given vector into X, Y and Z."""<line_sep>bl_idname='LNSeparateVectorNode'<line_sep>bl_label='Separate XYZ'<line_sep>arm_section='vector'<line_sep>arm_version=1<def_stmt>arm_init self context<block_start>self.add_input('ArmVectorSocket' 'Vector')<line_sep>self.add_output('ArmFloatSocket' 'X')<line_sep>self.add_output('ArmFloatSocket' 'Y')<line_sep>self.add_output('ArmFloatSocket' 'Z')<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<class_stmt>conv2d_transpose(nn.Module)<block_start><def_stmt>__init__ self batchNorm in_planes out_planes kernel_size=1 stride=1 activation=<true><block_start>super(conv2d_transpose self).__init__()<line_sep>self.conv=nn.ConvTranspose2d(in_planes out_planes kernel_size=kernel_size stride=stride padding=(kernel_size-1)<floordiv>2)<line_sep>self.biases=self.conv.bias<line_sep>self.weights=self.conv.weight<line_sep>self.batchNorm=batchNorm<line_sep>self.batch_normalization=nn.BatchNorm2d(out_planes momentum=0.01 eps=1e-6)<if_stmt>activation<block_start>self.activation_fn=nn.LeakyReLU(0.2)<block_end><else_stmt><block_start>self.activation_fn=nn.Identity()<block_end><block_end><def_stmt>forward self x<block_start>x=self.conv(x)<if_stmt>self.batchNorm<block_start>x=self.batch_normalization(x)<block_end>x=self.activation_fn(x)<line_sep><return>x<block_end><block_end><class_stmt>conv2d(nn.Module)<block_start><def_stmt>__init__ self batchNorm in_planes out_planes kernel_size=1 stride=1 activation=<true><block_start>super(conv2d self).__init__()<line_sep>self.conv=nn.Conv2d(in_planes out_planes kernel_size=kernel_size stride=stride padding=(kernel_size-1)<floordiv>2)<line_sep>self.biases=self.conv.bias<line_sep>self.weights=self.conv.weight<line_sep>self.batchNorm=batchNorm<if_stmt>self.batchNorm<block_start>self.batch_normalization=nn.BatchNorm2d(out_planes momentum=0.01 eps=1e-6)<block_end><if_stmt>activation<block_start>self.activation_fn=nn.LeakyReLU(0.2 inplace=<true>)<block_end><else_stmt><block_start>self.activation_fn=nn.Identity()<block_end><block_end><def_stmt>forward self x<block_start>x=self.conv(x)<if_stmt>self.batchNorm<block_start>x=self.batch_normalization(x)<block_end>x=self.activation_fn(x)<line_sep><return>x<block_end><block_end> |
<import_stmt>time<import_stmt>json<import_from_stmt>wptserve.utils isomorphic_decode isomorphic_encode<def_stmt>main request response<block_start>headers=[(b'Content-Type' b'application/javascript') (b'Cache-Control' b'max-age=86400') (b'Last-Modified' isomorphic_encode(time.strftime(u"%a, %d %b %Y %H:%M:%S GMT" time.gmtime())))]<line_sep>test=request.GET[b'test']<line_sep>body=u'''
const mainTime = {time:8f};
const testName = {test};
importScripts('update-max-aged-worker-imported-script.py');
addEventListener('message', event => {{
event.source.postMessage({{
mainTime,
importTime,
test: {test}
}});
}});
'''.format(time=time.time() test=json.dumps(isomorphic_decode(test)))<line_sep><return>headers body<block_end> |
"""
This is duplicated from Django 3.0 to avoid
starting an import chain that ends up with
ContentTypes which may not be installed in a
Djangae project.
"""<class_stmt>BaseBackend<block_start><def_stmt>authenticate self request **kwargs<block_start><return><none><block_end>@classmethod<def_stmt>can_authenticate cls request<block_start>"""
This is a pre-check to see if the credentials are
available to try to authenticate.
"""<line_sep><return><true><block_end><def_stmt>get_user self user_id<block_start><return><none><block_end><def_stmt>get_user_permissions self user_obj obj=<none><block_start><return>set()<block_end><def_stmt>get_group_permissions self user_obj obj=<none><block_start><return>set()<block_end><def_stmt>get_all_permissions self user_obj obj=<none><block_start><return>{*self.get_user_permissions(user_obj obj=obj) *self.get_group_permissions(user_obj obj=obj) }<block_end><def_stmt>has_perm self user_obj perm obj=<none><block_start><return>perm<in>self.get_all_permissions(user_obj obj=obj)<block_end><block_end> |
<import_from_stmt>align.cell_fabric.transformation Rect<def_stmt>test_toList <block_start>r=Rect(0 0 1 1)<assert_stmt>r.toList()<eq>[0 0 1 1]<block_end><def_stmt>test_canonical <block_start>r=Rect(1 1 0 0)<assert_stmt>r.canonical().toList()<eq>[0 0 1 1]<block_end><def_stmt>test_repr <block_start>r=Rect(0 0 1 1)<assert_stmt>r.__repr__()<eq>"[0, 0, 1, 1]"<assert_stmt>repr(r)<eq>"[0, 0, 1, 1]"<assert_stmt>str(r)<eq>"[0, 0, 1, 1]"<block_end> |
"""
For an array, we can build a SegmentTree for it, each node stores an extra attribute count to denote the number of
elements in the the array which value is between interval start and end. (The array may not fully filled by elements)
Design a query method with three parameters root, start and end, find the number of elements in the in array's interval
[start, end] by the given root of value SegmentTree.
Have you met this question in a real interview? Yes
Example
For array [0, empty, 2, 3], the corresponding value Segment Tree is:
[0, 3, count=3]
/ \
[0,1,count=1] [2,3,count=2]
/ \ / \
[0,0,count=1] [1,1,count=0] [2,2,count=1], [3,3,count=1]
query(1, 1), return 0
query(1, 2), return 1
query(2, 3), return 2
query(0, 2), return 2
"""<line_sep>__author__='Daniel'<line_sep>DEFAULT=0<line_sep>f=<lambda>x y:x+y<class_stmt>Solution<block_start><def_stmt>query self root s e<block_start>"""
Segment: [s, e]
:param root: The root of segment tree
:param start: start of segment/interval
:param end: end of segment/interval
:return: The count number in the interval [start, end]
"""<if_stmt><not>root<block_start><return>DEFAULT<block_end><if_stmt>s<le>root.start<and>e<ge>root.end<block_start><return>root.count<block_end><if_stmt>s<g>root.end<or>e<l>root.start<block_start><return>DEFAULT<block_end>l=self.query(root.left s e)<line_sep>r=self.query(root.right s e)<line_sep><return>f(l r)<block_end><block_end> |
<import_from_stmt>hearthbreaker.cards.base SpellCard<import_from_stmt>hearthbreaker.constants CHARACTER_CLASS CARD_RARITY<import_from_stmt>hearthbreaker.tags.base BuffUntil Buff<import_from_stmt>hearthbreaker.tags.event TurnStarted<import_from_stmt>hearthbreaker.tags.status Stealth Taunt Frozen<import_stmt>hearthbreaker.targeting<class_stmt>TheCoin(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("The Coin" 0 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false>)<block_end><def_stmt>use self player game<block_start>super().use(player game)<if_stmt>player.mana<l>10<block_start>player.mana<augadd>1<block_end><block_end><block_end><class_stmt>ArmorPlating(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Armor Plating" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.increase_health(1)<block_end><block_end><class_stmt>EmergencyCoolant(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Emergency Coolant" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.add_buff(Buff(Frozen()))<block_end><block_end><class_stmt>FinickyCloakfield(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Finicky Cloakfield" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.add_buff(BuffUntil(Stealth() TurnStarted()))<block_end><block_end><class_stmt>ReversingSwitch(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Reversing Switch" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>temp_attack=self.target.calculate_attack()<line_sep>temp_health=self.target.health<if_stmt>temp_attack<eq>0<block_start>self.target.die(<none>)<block_end><else_stmt><block_start>self.target.set_attack_to(temp_health)<line_sep>self.target.set_health_to(temp_attack)<block_end><block_end><block_end><class_stmt>RustyHorn(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Rusty Horn" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.add_buff(Buff(Taunt()))<block_end><block_end><class_stmt>TimeRewinder(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Time Rewinder" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.bounce()<block_end><block_end><class_stmt>WhirlingBlades(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Whirling Blades" 1 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false> target_func=hearthbreaker.targeting.find_minion_spell_target)<block_end><def_stmt>use self player game<block_start>super().use(player game)<line_sep>self.target.change_attack(1)<block_end><block_end>spare_part_list=[ArmorPlating() EmergencyCoolant() FinickyCloakfield() TimeRewinder() ReversingSwitch() RustyHorn() WhirlingBlades()]<class_stmt>GallywixsCoin(SpellCard)<block_start><def_stmt>__init__ self<block_start>super().__init__("Gallywix's Coin" 0 CHARACTER_CLASS.ALL CARD_RARITY.COMMON <false>)<block_end><def_stmt>use self player game<block_start>super().use(player game)<if_stmt>player.mana<l>10<block_start>player.mana<augadd>1<block_end><block_end><block_end> |
<import_stmt>subprocess<import_stmt>time<import_stmt>sys<import_stmt>signal<import_from_stmt>testutils assert_raises<line_sep>is_unix=<not>sys.platform.startswith("win")<if_stmt>is_unix<block_start><def_stmt>echo text<block_start><return>["echo" text]<block_end><def_stmt>sleep secs<block_start><return>["sleep" str(secs)]<block_end><block_end><else_stmt><block_start><def_stmt>echo text<block_start><return>["cmd" "/C" f"echo {text}"]<block_end><def_stmt>sleep secs# TODO: make work in a non-unixy environment (something with timeout.exe?)
<block_start><return>["sleep" str(secs)]<block_end><block_end>p=subprocess.Popen(echo("test"))<line_sep>time.sleep(0.1)<assert_stmt>p.returncode<is><none><assert_stmt>p.poll()<eq>0<assert_stmt>p.returncode<eq>0<line_sep>p=subprocess.Popen(sleep(2))<assert_stmt>p.poll()<is><none><with_stmt>assert_raises(subprocess.TimeoutExpired)<block_start><assert_stmt>p.wait(1)<block_end>p.wait()<assert_stmt>p.returncode<eq>0<line_sep>p=subprocess.Popen(echo("test") stdout=subprocess.PIPE)<line_sep>p.wait()<assert_stmt>p.stdout.read().strip()<eq>b"test"<line_sep>p=subprocess.Popen(sleep(2))<line_sep>p.terminate()<line_sep>p.wait()<if_stmt>is_unix<block_start><assert_stmt>p.returncode<eq>-signal.SIGTERM<block_end><else_stmt><block_start><assert_stmt>p.returncode<eq>1<block_end>p=subprocess.Popen(sleep(2))<line_sep>p.kill()<line_sep>p.wait()<if_stmt>is_unix<block_start><assert_stmt>p.returncode<eq>-signal.SIGKILL<block_end><else_stmt><block_start><assert_stmt>p.returncode<eq>1<block_end>p=subprocess.Popen(echo("test") stdout=subprocess.PIPE)<line_sep>(stdout stderr)=p.communicate()<assert_stmt>stdout.strip()<eq>b"test"<line_sep>p=subprocess.Popen(sleep(5) stdout=subprocess.PIPE)<with_stmt>assert_raises(subprocess.TimeoutExpired)<block_start>p.communicate(timeout=1)<block_end> |
<import_from_stmt>. get_main_movies_base_data<import_from_stmt>. get_main_movies_full_data<import_from_stmt>. get_celebrities_full_data<import_from_stmt>. down_video_images<import_from_stmt>. down_celebrity_images<line_sep> |
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>gpflow.params DataHolder Minibatch<import_from_stmt>gpflow autoflow params_as_tensors ParamList<import_from_stmt>gpflow.models.model Model<import_from_stmt>gpflow.mean_functions Identity Linear<import_from_stmt>gpflow.mean_functions Zero<import_from_stmt>gpflow.quadrature mvhermgauss<import_from_stmt>gpflow settings<line_sep>float_type=settings.float_type<import_from_stmt>doubly_stochastic_dgp.layers SVGP_Layer<def_stmt>init_layers_linear X Y Z kernels num_outputs=<none> mean_function=Zero() Layer=SVGP_Layer white=<false><block_start>num_outputs=num_outputs<or>Y.shape[1]<line_sep>layers=[]<line_sep>X_running,Z_running=X.copy() Z.copy()<for_stmt>kern_in,kern_out zip(kernels[:-1] kernels[1:])<block_start>dim_in=kern_in.input_dim<line_sep>dim_out=kern_out.input_dim<line_sep>print(dim_in dim_out)<if_stmt>dim_in<eq>dim_out<block_start>mf=Identity()<block_end><else_stmt><block_start><if_stmt>dim_in<g>dim_out# stepping down, use the pca projection
<block_start>_,_,V=np.linalg.svd(X_running full_matrices=<false>)<line_sep>W=V[:dim_out :].T<block_end><else_stmt># stepping up, use identity + padding
<block_start>W=np.concatenate([np.eye(dim_in) np.zeros((dim_in dim_out-dim_in))] 1)<block_end>mf=Linear(W)<line_sep>mf.set_trainable(<false>)<block_end>layers.append(Layer(kern_in Z_running dim_out mf white=white))<if_stmt>dim_in<ne>dim_out<block_start>Z_running=Z_running.dot(W)<line_sep>X_running=X_running.dot(W)<block_end><block_end># final layer
layers.append(Layer(kernels[-1] Z_running num_outputs mean_function white=white))<line_sep><return>layers<block_end><def_stmt>init_layers_input_prop X Y Z kernels num_outputs=<none> mean_function=Zero() Layer=SVGP_Layer white=<false><block_start>num_outputs=num_outputs<or>Y.shape[1]<line_sep>D=X.shape[1]<line_sep>M=Z.shape[0]<line_sep>layers=[]<for_stmt>kern_in,kern_out zip(kernels[:-1] kernels[1:])<block_start>dim_in=kern_in.input_dim<line_sep>dim_out=kern_out.input_dim-D<line_sep>std_in=kern_in.variance.read_value()<power>0.5<line_sep>pad=np.random.randn(M dim_in-D)<times>2.<times>std_in<line_sep>Z_padded=np.concatenate([Z pad] 1)<line_sep>layers.append(Layer(kern_in Z_padded dim_out Zero() white=white input_prop_dim=D))<block_end>dim_in=kernels[-1].input_dim<line_sep>std_in=kernels[-2].variance.read_value()<power>0.5<if>dim_in<g>D<else>1.<line_sep>pad=np.random.randn(M dim_in-D)<times>2.<times>std_in<line_sep>Z_padded=np.concatenate([Z pad] 1)<line_sep>layers.append(Layer(kernels[-1] Z_padded num_outputs mean_function white=white))<line_sep><return>layers<block_end> |
<import_from_stmt>flask Flask<import_from_stmt>flask_sqlalchemy SQLAlchemy<import_from_stmt>flask_script Manager<import_from_stmt>flask_migrate Migrate MigrateCommand<line_sep>app=Flask(__name__)<line_sep>app.config['SQLALCHEMY_DATABASE_URI']='postgres://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'<line_sep>db=SQLAlchemy(app)<line_sep>migrate=Migrate(app db)<line_sep>manager=Manager(app)<line_sep>manager.add_command('db' MigrateCommand)<class_stmt>UserData(db.Model)<block_start>__tablename__='UserData'<line_sep>Id=db.Column(db.Integer primary_key=<true>)<line_sep>Name=db.Column(db.String(64))<line_sep>Description=db.Column(db.String(256))<line_sep>CreateDate=db.Column(db.DateTime)<def_stmt>__init__ self Name Description CreateDate<block_start>self.Name=Name<line_sep>self.Description=Description<line_sep>self.CreateDate=CreateDate<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>manager.run()<block_end> |
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
<import_stmt>sys<import_from_stmt>awsglue.utils getResolvedOptions<import_from_stmt>awsglue.transforms *<import_from_stmt>pyspark.context SparkContext<import_from_stmt>awsglue.context GlueContext<import_from_stmt>awsglue.job Job<import_from_stmt>pyspark SparkConf<import_from_stmt>awsglue.dynamicframe DynamicFrame<import_from_stmt>awsglue.gluetypes Field IntegerType TimestampType StructType<line_sep>## @params: [JOB_NAME]
args=getResolvedOptions(sys.argv ['JOB_NAME'])<line_sep>sc=SparkContext()<line_sep>glueContext=GlueContext(sc)<line_sep>spark=glueContext.spark_session<line_sep>job=Job(glueContext)<line_sep>job.init(args['JOB_NAME'] args)<line_sep>######################################## test connection options ########################################
## please pick up and customize the right connection options for testing
## If you are using a large testing data set, please consider using column partitioning to parallel the data reading for performance purpose.
# DataSourceTest - - please configure according to your connector type and options
options_dataSourceTest_jdbc={"query":"select NumberOfEmployees, CreatedDate from Account" "className":"partner.jdbc.some.Driver" # test parameters
"url":"jdbc:some:url:SecurityToken=abc;" "user":"user" "password":"password" }<line_sep># ColumnPartitioningTest
# for JDBC connector only
options_columnPartitioningTest={"query":"select NumberOfEmployees, CreatedDate from Account where " "url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};" "secretId":"test-partner-driver" "className":"partner.jdbc.some.Driver" # test parameters
"partitionColumn":"RecordId__c" "lowerBound":"0" "upperBound":"13" "numPartitions":"2" }<line_sep># DataTypeMappingTest
# for JDBC connector only
options_dataTypeMappingTest={"query":"select NumberOfEmployees, CreatedDate from Account where " "url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};" "secretId":"test-partner-driver" "className":"partner.jdbc.some.Driver" # test parameter
"dataTypeMapping":{"INTEGER":"STRING"}}<line_sep># DbtableQueryTest
# for JDBC connector only
options_dbtableQueryTest={"url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};" "secretId":"test-partner-driver" "className":"partner.jdbc.some.Driver" # test parameter
"query":"select NumberOfEmployees, CreatedDate from Account"# "dbTable" : "Account"
}<line_sep># JDBCUrlTest - extra jdbc connections UseBulkAPI appended
# for JDBC connector only
options_JDBCUrlTest={"query":"select NumberOfEmployees, CreatedDate from Account" "secretId":"test-partner-driver" "className":"partner.jdbc.some.Driver" # test parameter
"url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};UseBulkAPI=true" }<line_sep># SecretsManagerTest - - please configure according to your connector type and options
options_secretsManagerTest={"query":"select NumberOfEmployees, CreatedDate from Account" "url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};" "className":"partner.jdbc.some.Driver" # test parameter
"secretId":"test-partner-driver"}<line_sep># FilterPredicateTest
# for JDBC connector only
options_filterPredicateTest={"query":"select NumberOfEmployees, CreatedDate from Account where" "url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};" "secretId":"test-partner-driver" "className":"partner.jdbc.some.Driver" # test parameter
"filterPredicate":"BillingState='CA'"}<line_sep>##################################### read data from data source ######################################
datasource0=glueContext.create_dynamic_frame_from_options(connection_type="marketplace.jdbc" connection_options=options_secretsManagerTest)<line_sep># pick up the right test conection options
######################################## validate data reading ########################################
## validate data schema and count
# more data type: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-types.html
expected_schema=StructType([Field("NumberOfEmployees" IntegerType()) Field("CreatedDate" TimestampType())])<line_sep>expected_count=2<assert_stmt>datasource0.schema()<eq>expected_schema<line_sep>print("expected schema: "+str(expected_schema.jsonValue()))<line_sep>print("result schema: "+str(datasource0.schema().jsonValue()))<line_sep>print("result schema in tree structure: ")<line_sep>datasource0.printSchema()<line_sep>## validate data count is euqal to expected count
<assert_stmt>datasource0.count()<eq>expected_count<line_sep>print("expected record count: "+str(expected_count))<line_sep>print("result record count: "+str(datasource0.count()))<line_sep>######################################## write data to s3 ########################################
datasource0.write(connection_type="s3" connection_options={"path":"s3://your/output/path/"} format="json")<line_sep>######################################## DataSinkTest ########################################
## Create a DynamicFrame on the fly
jsonStrings=['{"Name":"Andrew"}']<line_sep>rdd=sc.parallelize(jsonStrings)<line_sep>sql_df=spark.read.json(rdd)<line_sep>df=DynamicFrame.fromDF(sql_df glueContext "new_dynamic_frame")<line_sep>## DataSinkTest options
options_dataSinkTest={"secretId":"test-partner-driver" "dbtable":"Account" "className":"partner.jdbc.some.Driver" "url":"jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};"}<line_sep>## Write to data target
glueContext.write_dynamic_frame.from_options(frame=df connection_type="marketplace.jdbc" connection_options=options_dataSinkTest)<line_sep>## write validation
# You may check data in the database side.
# You may also refer to 'read data from data source' and 'validate data reading' part to compose your own validation logics.
job.commit()<line_sep> |
# import torch
# from torch.nn import functional as F
<import_stmt>numpy<as>np<import_from_stmt>scipy.ndimage distance_transform_edt<as>distance<import_from_stmt>skimage segmentation<as>skimage_seg<def_stmt>compute_dtm img_gt out_shape normalize=<false> fg=<false><block_start>"""
compute the distance transform map of foreground in binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the foreground Distance Map (SDM)
dtm(x) = 0; x in segmentation boundary
inf|x-y|; x in segmentation
"""<line_sep>fg_dtm=np.zeros(out_shape)<for_stmt>b range(out_shape[0])# batch size
<block_start>posmask=img_gt[b].astype(np.bool)<if_stmt><not>fg<block_start><if_stmt>posmask.any()<block_start>negmask=1-posmask<line_sep>posdis=distance(posmask)<line_sep>negdis=distance(negmask)<line_sep>boundary=skimage_seg.find_boundaries(posmask mode='inner').astype(np.uint8)<if_stmt>normalize<block_start>fg_dtm[b]=(negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis))+(posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))<block_end><else_stmt><block_start>fg_dtm[b]=posdis+negdis<block_end>fg_dtm[b][boundary<eq>1]=0<block_end><block_end><else_stmt><block_start><if_stmt>posmask.any()<block_start>posdis=distance(posmask)<line_sep>boundary=skimage_seg.find_boundaries(posmask mode='inner').astype(np.uint8)<if_stmt>normalize<block_start>fg_dtm[b]=(posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))<block_end><else_stmt><block_start>fg_dtm[b]=posdis<block_end>fg_dtm[b][boundary<eq>1]=0<block_end><block_end><block_end><return>fg_dtm<block_end><def_stmt>hd_loss seg_soft gt gt_dtm=<none> one_side=<true> seg_dtm=<none><block_start>"""
compute huasdorff distance loss for binary segmentation
input: seg_soft: softmax results, shape=(b,x,y,z)
gt: ground truth, shape=(b,x,y,z)
seg_dtm: segmentation distance transform map; shape=(b,x,y,z)
gt_dtm: ground truth distance transform map; shape=(b,x,y,z)
output: boundary_loss; sclar
"""<line_sep>delta_s=(seg_soft-gt.float())<power>2<line_sep>g_dtm=gt_dtm<power>2<line_sep>dtm=g_dtm<if>one_side<else>g_dtm+seg_dtm<power>2<line_sep>multipled=torch.einsum('bxyz, bxyz->bxyz' delta_s dtm)<line_sep># hd_loss = multipled.sum()*1.0/(gt_dtm > 0).sum()
hd_loss=multipled.mean()<line_sep><return>hd_loss<block_end><def_stmt>save_sdf gt_path=<none><block_start>'''
generate SDM for gt segmentation
'''<import_stmt>nibabel<as>nib<line_sep>dir_path='C:/Seolen/PycharmProjects/semi_seg/semantic-semi-supervised-master/model/gan_sdfloss3D_0229_04/test'<line_sep>gt_path=dir_path+'/00_gt.nii.gz'<line_sep>gt_img=nib.load(gt_path)<line_sep>gt=gt_img.get_data().astype(np.uint8)<line_sep>posmask=gt.astype(np.bool)<line_sep>negmask=~posmask<line_sep>posdis=distance(posmask)<line_sep>negdis=distance(negmask)<line_sep>boundary=skimage_seg.find_boundaries(posmask mode='inner').astype(np.uint8)<line_sep># sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / ( np.max(posdis) - np.min(posdis))
sdf=(posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))<line_sep>sdf[boundary<eq>1]=0<line_sep>sdf=sdf.astype(np.float32)<line_sep>sdf=nib.Nifti1Image(sdf gt_img.affine)<line_sep>save_path=dir_path+'/00_sdm_pos.nii.gz'<line_sep>nib.save(sdf save_path)<block_end><def_stmt>compute_sdf img_gt out_shape<block_start>"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""<line_sep>img_gt=img_gt.astype(np.uint8)<line_sep>normalized_sdf=np.zeros(out_shape)<for_stmt>b range(out_shape[0])# batch size
<block_start>posmask=img_gt[b].astype(np.bool)<if_stmt>posmask.any()<block_start>negmask=~posmask<line_sep>posdis=distance(posmask)<line_sep>negdis=distance(negmask)<line_sep>boundary=skimage_seg.find_boundaries(posmask mode='inner').astype(np.uint8)<line_sep>sdf=(negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis))-(posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))<line_sep>sdf[boundary<eq>1]=0<line_sep>normalized_sdf[b]=sdf<line_sep># assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
# assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
<block_end><block_end><return>normalized_sdf<block_end><def_stmt>sdf_loss net_output gt_sdm# print('net_output.shape, gt_sdm.shape', net_output.shape, gt_sdm.shape)
# ([4, 1, 112, 112, 80])
<block_start>smooth=1e-5<line_sep># compute eq (4)
intersect=torch.sum(net_output<times>gt_sdm)<line_sep>pd_sum=torch.sum(net_output<power>2)<line_sep>gt_sum=torch.sum(gt_sdm<power>2)<line_sep>L_product=(intersect+smooth)/(intersect+pd_sum+gt_sum+smooth)<line_sep># print('L_product.shape', L_product.shape) (4,2)
L_SDF=1/3-L_product+torch.norm(net_output-gt_sdm 1)/torch.numel(net_output)<line_sep><return>L_SDF<block_end><def_stmt>boundary_loss outputs_soft gt_sdf<block_start>"""
compute boundary loss for binary segmentation
input: outputs_soft: sigmoid results, shape=(b,2,x,y,z)
gt_sdf: sdf of ground truth (can be original or normalized sdf); shape=(b,2,x,y,z)
output: boundary_loss; sclar
"""<line_sep>pc=outputs_soft[: 1 <ellipsis>]<line_sep>dc=gt_sdf[: 1 <ellipsis>]<line_sep>multipled=torch.einsum('bxyz, bxyz->bxyz' pc dc)<line_sep>bd_loss=multipled.mean()<line_sep><return>bd_loss<block_end><if_stmt>__name__<eq>'__main__'<block_start>save_sdf()<block_end> |
#! /usr/bin/env python
# $Id: test_parser.py 7463 2012-06-22 19:49:51Z milde $
# Author: <NAME> <strank(AT)strank(DOT)info>
# Copyright: This module has been placed in the public domain.
"""
Tests for basic functionality of parser classes.
"""<import_stmt>sys<import_stmt>unittest<import_stmt>DocutilsTestSupport# must be imported before docutils
<import_stmt>docutils<import_from_stmt>docutils parsers utils frontend<import_from_stmt>docutils._compat b<class_stmt>RstParserTests(unittest.TestCase)<block_start><def_stmt>test_inputrestrictions self<block_start>parser_class=parsers.get_parser_class('rst')<line_sep>parser=parser_class()<line_sep>document=utils.new_document('test data' frontend.OptionParser(components=(parser )).get_default_values())<if_stmt>sys.version_info<l>(3 )# supplying string input is supported, but only if ascii-decodable
<block_start>self.assertRaises(UnicodeDecodeError parser.parse b('hol%s'%chr(224)) document)<block_end><else_stmt># input must be unicode at all times
<block_start>self.assertRaises(TypeError parser.parse b('hol') document)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_from_stmt>._TestFillEmbedTime *<import_from_stmt>._TestFillSimple *<import_from_stmt>._TestManyFields *<import_from_stmt>._TestMsgArray *<import_from_stmt>._TestPrimitiveArray *<import_from_stmt>._TestString *<line_sep> |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>time<import_stmt>numpy<as>np<import_stmt>sys<import_stmt>os<import_stmt>argparse<import_stmt>json<import_stmt>random<import_stmt>multiprocessing<as>mp<def_stmt>mp_run data process_num func *args<block_start>""" run func with multi process
"""<line_sep>level_start=time.time()<line_sep>partn=max(len(data)/process_num 1)<line_sep>start=0<line_sep>p_idx=0<line_sep>ps=[]<while_stmt>start<l>len(data)<block_start>local_data=data[start:start+partn]<line_sep>start<augadd>partn<line_sep>p=mp.Process(target=func args=(local_data p_idx)+args)<line_sep>ps.append(p)<line_sep>p.start()<line_sep>p_idx<augadd>1<block_end><for_stmt>p ps<block_start>p.join()<block_end><for_stmt>p ps<block_start>p.terminate()<block_end><return>p_idx<block_end><def_stmt>read train_data_file test_data_file<block_start>behavior_dict=dict()<line_sep>train_sample=dict()<line_sep>test_sample=dict()<line_sep>user_id=list()<line_sep>item_id=list()<line_sep>cat_id=list()<line_sep>behav_id=list()<line_sep>timestamp=list()<line_sep>start=time.time()<line_sep>itobj=zip([train_data_file test_data_file] [train_sample test_sample])<for_stmt>filename,sample itobj<block_start><with_stmt>open(filename 'rb')<as>f<block_start><for_stmt>line f<block_start>arr=line.strip().split(',')<if_stmt>len(arr)<ne>5<block_start><break><block_end>user_id.append(int(arr[0]))<line_sep>item_id.append(int(arr[1]))<line_sep>cat_id.append(int(arr[2]))<if_stmt>arr[3]<not><in>behavior_dict<block_start>i=len(behavior_dict)<line_sep>behavior_dict[arr[3]]=i<block_end>behav_id.append(behavior_dict[arr[3]])<line_sep>timestamp.append(int(arr[4]))<block_end>sample["USERID"]=np.array(user_id)<line_sep>sample["ITEMID"]=np.array(item_id)<line_sep>sample["CATID"]=np.array(cat_id)<line_sep>sample["BEHAV"]=np.array(behav_id)<line_sep>sample["TS"]=np.array(timestamp)<line_sep>user_id=[]<line_sep>item_id=[]<line_sep>cat_id=[]<line_sep>behav_id=[]<line_sep>timestamp=[]<block_end><block_end>print("Read data done, {} train records, {} test records"<concat>", elapsed: {}".format(len(train_sample["USERID"]) len(test_sample["USERID"]) time.time()-start))<line_sep><return>behavior_dict train_sample test_sample<block_end><def_stmt>gen_user_his_behave train_sample<block_start>user_his_behav=dict()<line_sep>iterobj=zip(train_sample["USERID"] train_sample["ITEMID"] train_sample["TS"])<for_stmt>user_id,item_id,ts iterobj<block_start><if_stmt>user_id<not><in>user_his_behav<block_start>user_his_behav[user_id]=list()<block_end>user_his_behav[user_id].append((item_id ts))<block_end><for_stmt>_,value user_his_behav.items()<block_start>value.sort(key=<lambda>x:x[1])<block_end><return>user_his_behav<block_end><def_stmt>split_train_sample train_dir train_sample_seg_cnt<block_start>segment_filenames=[]<line_sep>segment_files=[]<for_stmt>i range(train_sample_seg_cnt)<block_start>filename="{}/part_{}".format(train_dir i)<line_sep>segment_filenames.append(filename)<line_sep>segment_files.append(open(filename 'wb'))<block_end><with_stmt>open("train_tmp" 'rb')<as>fi<block_start><for_stmt>line fi<block_start>i=random.randint(0 train_sample_seg_cnt-1)<line_sep>segment_files[i].write(line)<block_end><block_end><for_stmt>f segment_files<block_start>f.close()<block_end>os.remove("train_tmp")<line_sep># Shuffle
<for_stmt>fn segment_filenames<block_start>lines=[]<with_stmt>open(fn 'rb')<as>f<block_start><for_stmt>line f<block_start>lines.append(line)<block_end><block_end>random.shuffle(lines)<with_stmt>open(fn 'wb')<as>f<block_start><for_stmt>line lines<block_start>f.write(line)<block_end><block_end><block_end><block_end><def_stmt>partial_gen_train_sample users user_his_behav filename pipe seq_len min_len<block_start>stat=dict()<with_stmt>open(filename 'wb')<as>f<block_start><for_stmt>user users<block_start>value=user_his_behav[user]<line_sep>count=len(value)<if_stmt>count<l>min_len<block_start><continue><block_end>arr=[0<for>i range(seq_len-min_len)]+[v[0]<for>v value]<for_stmt>i range(len(arr)-seq_len+1)<block_start>sample=arr[i:i+seq_len]<line_sep>f.write('{}_{}'.format(user i))# sample id
f.write('\t{}'.format(sample[-1]))# label feature
<for_stmt>j range(seq_len-1)<block_start><if_stmt>sample[j]<ne>0<block_start>f.write("\tslot_{}:{}".format(j+1 sample[j]))<block_end><block_end>f.write('\n')<if_stmt>sample[-1]<not><in>stat<block_start>stat[sample[-1]]=0<block_end>stat[sample[-1]]<augadd>1<block_end><block_end><block_end>pipe.send(stat)<block_end><def_stmt>gen_train_sample train_sample args<block_start>user_his_behav=gen_user_his_behave(train_sample)<line_sep>print("user_his_behav len: {}".format(len(user_his_behav)))<line_sep>users=user_his_behav.keys()<line_sep>process=[]<line_sep>pipes=[]<line_sep>parall=args.parall<line_sep>job_size=int(len(user_his_behav)/parall)<if_stmt>len(user_his_behav)%parall<ne>0<block_start>parall<augadd>1<block_end><for_stmt>i range(parall)<block_start>a,b=mp.Pipe()<line_sep>pipes.append(a)<line_sep>p=mp.Process(target=partial_gen_train_sample args=(users[i<times>job_size:(i+1)<times>job_size] user_his_behav 'train_tmp.part_{}'.format(i) b args.seq_len args.min_seq_len))<line_sep>process.append(p)<line_sep>p.start()<block_end>stat=dict()<for_stmt>pipe pipes<block_start>st=pipe.recv()<for_stmt>k,v st.items()<block_start><if_stmt>k<not><in>stat<block_start>stat[k]=0<block_end>stat[k]<augadd>v<block_end><block_end><for_stmt>p process<block_start>p.join()<block_end># Merge partial files
<with_stmt>open("train_tmp" 'wb')<as>f<block_start><for_stmt>i range(parall)<block_start>filename='train_tmp.part_{}'.format(i)<with_stmt>open(filename 'rb')<as>f1<block_start>f.write(f1.read())<block_end>os.remove(filename)<block_end><block_end># Split train sample to segments
split_train_sample(args.train_dir args.train_sample_seg_cnt)<line_sep><return>stat<block_end><def_stmt>gen_test_sample test_dir test_sample seq_len min_seq_len<block_start>user_his_behav=gen_user_his_behave(test_sample)<with_stmt>open("{}/part-0".format(test_dir) 'wb')<as>f<block_start><for_stmt>user,value user_his_behav.items()<block_start><if_stmt>len(value)/2+1<l>min_seq_len<block_start><continue><block_end>mid=int(len(value)/2)<line_sep>left=value[:mid][-seq_len+1:]<line_sep>right=value[mid:]<line_sep>left=[0<for>i range(seq_len-len(left)-1)]+[l[0]<for>l left]<line_sep>f.write('{}_{}'.format(user 'T'))# sample id
labels=','.join(map(str [item[0]<for>item right]))<line_sep>f.write('\t{}'.format(labels))<line_sep># kvs
<for_stmt>j range(seq_len-1)<block_start><if_stmt>left[j]<ne>0<block_start>f.write("\tslot_{}:{}".format(j+1 left[j]))<block_end><block_end>f.write('\n')<block_end><block_end><block_end><def_stmt>prepare_sample_set train_dir sample_dir process_num=12 feature_num=69<block_start><def_stmt>parse_data files idx feature_num=69<block_start>history_ids=[0]<times>feature_num<line_sep>samples=dict()<line_sep>process=0<for_stmt>filename files<block_start>process<augadd>1<line_sep>print("process {} / {}.".format(process len(files)))<with_stmt>open(filename)<as>f<block_start>print("Begin to handle {}.".format(filename))<for_stmt>line f<block_start>features=line.strip().split("\t")<line_sep>item_id=int(features[1])<for_stmt>item features[2:]<block_start>slot,feasign=item.split(":")<line_sep>slot_id=int(slot.split("_")[1])<line_sep>history_ids[slot_id-1]=int(feasign)<block_end><if_stmt>item_id<not><in>samples<block_start>samples[item_id]=list()<block_end>samples[item_id].append(history_ids)<block_end><block_end><block_end><with_stmt>open("parse_data_{}.json".format(idx) 'w')<as>json_file<block_start>json.dump(samples json_file)<block_end><block_end>files=["{}/{}".format(train_dir f)<for>f os.listdir(train_dir)]<line_sep>real_process_num=mp_run(files process_num parse_data feature_num)<line_sep>num=0<line_sep>all_samples=dict()<for_stmt>i range(real_process_num)<block_start>filename="parse_data_{}.json".format(i)<with_stmt>open(filename 'r')<as>json_file<block_start>each_samples=json.load(json_file)<for_stmt>key each_samples<block_start><if_stmt>key<not><in>all_samples<block_start>all_samples[key]=[]<block_end>all_samples[key]<augadd>each_samples[key]<line_sep>num<augadd>len(each_samples[key])<block_end><block_end>os.remove(filename)<block_end><for_stmt>ck all_samples<block_start><with_stmt>open("{}/samples_{}.json".format(sample_dir ck) 'w')<as>f<block_start>json.dump(all_samples[ck] f)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>_PARSER=argparse.ArgumentParser(description="DataProcess")<line_sep>_PARSER.add_argument("--train_file" required=<true> help="Train filename")<line_sep>_PARSER.add_argument("--test_file" required=<true> help="Test filename")<line_sep>_PARSER.add_argument("--item_cate_filename" default="./Item_Cate.txt" help="item cate filename, used to init the first tree.")<line_sep>_PARSER.add_argument("--stat_file" default="./Stat.txt" help="Stat filename")<line_sep>_PARSER.add_argument("--train_dir" default="./train_data" help="Train directory")<line_sep>_PARSER.add_argument("--sample_dir" default="./samples" help="Sample directory")<line_sep>_PARSER.add_argument("--test_dir" default="./test_data" help="Test directory")<line_sep>_PARSER.add_argument('--parall' type=int help="parall process used" default=16)<line_sep>_PARSER.add_argument("--train_sample_seg_cnt" type=int default=20 help="count of train sample segments file")<line_sep>_PARSER.add_argument("--seq_len" type=int help="sequence length of the sample record" default=70)<line_sep>_PARSER.add_argument("--min_seq_len" type=int help="Min length of the sample sequence record" default=8)<line_sep>args=_PARSER.parse_args()<line_sep>os.system("rm -rf ./{} && mkdir -p {}".format(args.train_dir args.train_dir))<line_sep>os.system("rm -rf ./{} && mkdir -p {}".format(args.test_dir args.test_dir))<line_sep>os.system("rm -rf ./{} && mkdir -p {}".format(args.sample_dir args.sample_dir))<line_sep>behavior_dict,train_sample,test_sample=read(args.train_file args.test_file)<line_sep>print(repr(behavior_dict))<line_sep>stat=gen_train_sample(train_sample args)<with_stmt>open(args.stat_file 'w')<as>f<block_start>json.dump(stat f)<block_end>gen_test_sample(args.test_dir test_sample args.seq_len args.min_seq_len)<line_sep>item_cate=dict()<for_stmt>sample [train_sample test_sample]<block_start>iterobj=zip(sample["ITEMID"] sample["CATID"])<for_stmt>item_id,cat_id iterobj<block_start><if_stmt>item_id<not><in>item_cate<block_start>item_cate[item_id]=cat_id<block_end><block_end><block_end><with_stmt>open(args.item_cate_filename 'w')<as>f<block_start><for_stmt>key item_cate<block_start>f.write("{}\t{}\n".format(key item_cate[key]))<block_end><block_end>prepare_sample_set(args.train_dir args.sample_dir args.parall feature_num=args.seq_len-1)<block_end> |
<import_from_stmt>django.db migrations<import_stmt>utilities.fields<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('dcim' '0132_cable_length') ]<line_sep>operations=[migrations.AddField(model_name='frontport' name='color' field=utilities.fields.ColorField(blank=<true> max_length=6) ) migrations.AddField(model_name='frontporttemplate' name='color' field=utilities.fields.ColorField(blank=<true> max_length=6) ) migrations.AddField(model_name='rearport' name='color' field=utilities.fields.ColorField(blank=<true> max_length=6) ) migrations.AddField(model_name='rearporttemplate' name='color' field=utilities.fields.ColorField(blank=<true> max_length=6) ) ]<block_end> |
'''Utility functions.'''<import_stmt>email<def_stmt>parse_date string<block_start>'''Return a timestamp for the provided datestring, described by RFC 7231.'''<line_sep>parsed=email.utils.parsedate_tz(string)<if_stmt>parsed<is><none><block_start><raise>ValueError("Invalid time.")<block_end>parsed=list(parsed)<line_sep># Default time zone is GMT/UTC
parsed[9]=0<if>parsed[9]<is><none><else>parsed[9]<line_sep><return>email.utils.mktime_tz(parsed)<block_end> |
<import_from_stmt>.differential_operators make_vjp grad<line_sep> |
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helper for argparse_flags_test."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>random<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl.flags argparse_flags<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string('absl_echo' <none> 'The echo message from absl.flags.')<def_stmt>parse_flags_simple argv<block_start>"""Simple example for absl.flags + argparse."""<line_sep>parser=argparse_flags.ArgumentParser(description='A simple example of argparse_flags.')<line_sep>parser.add_argument('--argparse_echo' help='The echo message from argparse_flags')<line_sep><return>parser.parse_args(argv[1:])<block_end><def_stmt>main_simple args<block_start>print('--absl_echo is' FLAGS.absl_echo)<line_sep>print('--argparse_echo is' args.argparse_echo)<block_end><def_stmt>roll_dice args<block_start>print('Rolled a dice:' random.randint(1 args.num_faces))<block_end><def_stmt>shuffle args<block_start>inputs=list(args.inputs)<line_sep>random.shuffle(inputs)<line_sep>print('Shuffled:' ' '.join(inputs))<block_end><def_stmt>parse_flags_subcommands argv<block_start>"""Subcommands example for absl.flags + argparse."""<line_sep>parser=argparse_flags.ArgumentParser(description='A subcommands example of argparse_flags.')<line_sep>parser.add_argument('--argparse_echo' help='The echo message from argparse_flags')<line_sep>subparsers=parser.add_subparsers(help='The command to execute.')<line_sep>roll_dice_parser=subparsers.add_parser('roll_dice' help='Roll a dice.')<line_sep>roll_dice_parser.add_argument('--num_faces' type=int default=6)<line_sep>roll_dice_parser.set_defaults(command=roll_dice)<line_sep>shuffle_parser=subparsers.add_parser('shuffle' help='Shuffle inputs.')<line_sep>shuffle_parser.add_argument('inputs' metavar='I' nargs='+' help='Inputs to shuffle.')<line_sep>shuffle_parser.set_defaults(command=shuffle)<line_sep><return>parser.parse_args(argv[1:])<block_end><def_stmt>main_subcommands args<block_start>main_simple(args)<line_sep>args.command(args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main_func_name=os.environ['MAIN_FUNC']<line_sep>flags_parser_func_name=os.environ['FLAGS_PARSER_FUNC']<line_sep>app.run(main=globals()[main_func_name] flags_parser=globals()[flags_parser_func_name])<block_end> |
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{'targets':[{'target_name':'test_cdecl' 'type':'loadable_module' 'msvs_settings':{'VCCLCompilerTool':{'CallingConvention':0 } } 'sources':['calling-convention.cc' 'calling-convention-cdecl.def' ] } {'target_name':'test_fastcall' 'type':'loadable_module' 'msvs_settings':{'VCCLCompilerTool':{'CallingConvention':1 } } 'sources':['calling-convention.cc' 'calling-convention-fastcall.def' ] } {'target_name':'test_stdcall' 'type':'loadable_module' 'msvs_settings':{'VCCLCompilerTool':{'CallingConvention':2 } } 'sources':['calling-convention.cc' 'calling-convention-stdcall.def' ] } ] 'conditions':[['MSVS_VERSION[0:4]>="2013"' {'targets':[{'target_name':'test_vectorcall' 'type':'loadable_module' 'msvs_settings':{'VCCLCompilerTool':{'CallingConvention':3 } } 'sources':['calling-convention.cc' 'calling-convention-vectorcall.def' ] } ] }] ] }<line_sep> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi<line_sep>#PropagatorWithMaterialESProducer
oppositeToMomElePropagator=TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone(Mass=0.000511 ComponentName='oppositeToMomElePropagator')<line_sep> |
<import_from_stmt>rdflib plugin<import_from_stmt>rdflib store<line_sep>plugin.register("SQLAlchemy" store.Store "rdflib_sqlalchemy.store" "SQLAlchemy" )<line_sep> |
<import_stmt>datetime<import_from_stmt>unittest TestCase<import_from_stmt>isc_dhcp_leases.iscdhcpleases Lease6 utc<import_from_stmt>freezegun freeze_time<line_sep>__author__='<NAME> <<EMAIL>>'<class_stmt>TestLease6(TestCase)<block_start><def_stmt>setUp self<block_start>self.lease_time=datetime.datetime(2015 8 18 16 55 37 tzinfo=utc)<line_sep>self.lease_data={'binding':'state active' 'ends':'never' 'preferred-life':'375' 'max-life':'600'}<block_end><def_stmt>test_init self<block_start>lease=Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na")<line_sep>self.assertEqual(lease.ip "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")<line_sep>self.assertEqual(lease.host_identifier b"4dv\xea\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")<line_sep>self.assertEqual(lease.valid <true>)<line_sep>self.assertEqual(lease.iaid 3933627444)<line_sep>self.assertEqual(lease.duid b"\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")<line_sep>self.assertEqual(lease.active <true>)<line_sep>self.assertEqual(lease.binding_state 'active')<line_sep>self.assertEqual(lease.preferred_life 375)<line_sep>self.assertEqual(lease.max_life 600)<line_sep>self.assertEqual(lease.last_communication self.lease_time)<line_sep>self.assertEqual(lease.type Lease6.NON_TEMPORARY)<block_end><def_stmt>test_repr self<block_start>lease=Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na")<line_sep>self.assertEqual(repr(lease) '<Lease6 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b>')<block_end><def_stmt>_test_valid self now=<none><block_start>lease=Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na" now=now)<line_sep>self.assertTrue(lease.valid)# Lease is forever
lease.end=datetime.datetime(2015 7 6 13 57 4 tzinfo=utc)<line_sep>self.assertTrue(lease.valid)# Lease is before end
lease.end=lease.end-datetime.timedelta(hours=7)<line_sep>self.assertFalse(lease.valid)<block_end># Lease is ended
@freeze_time("2015-07-6 8:15:0")<def_stmt>test_valid_frozen self<block_start>self._test_valid()<block_end><def_stmt>test_valid_historical self<block_start>self._test_valid(now=datetime.datetime(2015 7 6 8 15 0 tzinfo=utc))<block_end><def_stmt>test_eq self<block_start>lease_a=Lease6("2fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na")<line_sep>lease_b=Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na")<line_sep>self.assertEqual(lease_a lease_b)<line_sep>lease_b.ip="fc00:e968:6179::de52:7100"<line_sep>self.assertNotEqual(lease_a lease_b)<line_sep>lease_b.ip="fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"<line_sep>lease_b.host_identifier="gd4\352\000\001\000\001\035b\037\322\012\000'\000\000\000"<line_sep>self.assertNotEqual(lease_a lease_b)<block_end><def_stmt>test_naive_time self<block_start><with_stmt>self.assertRaises(ValueError)<block_start>Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b" self.lease_data self.lease_time "4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000" "na" now=datetime.datetime.now())<block_end><block_end><block_end> |
<import_from_stmt>typing Optional List<import_from_stmt>abc ABC abstractmethod<import_from_stmt>spectacles.client LookerClient<import_from_stmt>spectacles.lookml Project Model Dimension<import_from_stmt>spectacles.select is_selected<import_from_stmt>spectacles.exceptions LookMlNotFound<class_stmt>Validator(ABC)# pragma: no cover
<block_start>"""Defines abstract base interface for validators.
Not intended to be used directly, only inherited.
Attributes:
client: Looker API client.
"""<def_stmt>__init__ self client:LookerClient project:str<block_start>self.client=client<line_sep>self.project=Project(project models=[])<block_end>@abstractmethod<def_stmt>validate self<block_start><raise>NotImplementedError<block_end><def_stmt>build_project self selectors:Optional[List[str]]=<none> exclusions:Optional[List[str]]=<none> build_dimensions:bool=<false> <arrow><none><block_start>"""Creates an object representation of the project's LookML.
Args:
selectors: List of selector strings in 'model_name/explore_name' format.
The '*' wildcard selects all models or explores. For instance,
'model_name/*' would select all explores in the 'model_name' model.
"""<line_sep># Assign default values for selectors and exclusions
<if_stmt>selectors<is><none><block_start>selectors=["*/*"]<block_end><if_stmt>exclusions<is><none><block_start>exclusions=[]<block_end>all_models=[Model.from_json(model)<for>model self.client.get_lookml_models(fields=["name" "project_name" "explores"])]<line_sep>project_models=[model<for>model all_models<if>model.project_name<eq>self.project.name]<if_stmt><not>project_models<block_start><raise>LookMlNotFound(name="project-models-not-found" title="No configured models found for the specified project." detail=(f"Go to {self.client.base_url}/projects and confirm "<concat>"a) at least one model exists for the project and "<concat>"b) it has an active configuration.") )<block_end><for_stmt>model project_models<block_start>model.explores=[explore<for>explore model.explores<if>is_selected(model.name explore.name selectors exclusions)]<if_stmt>build_dimensions<block_start><for_stmt>explore model.explores<block_start>dimensions_json=self.client.get_lookml_dimensions(model.name explore.name)<for_stmt>dimension_json dimensions_json<block_start>dimension=Dimension.from_json(dimension_json model.name explore.name)<line_sep>dimension.url=self.client.base_url+dimension.url<if_stmt><not>dimension.ignore<block_start>explore.add_dimension(dimension)<block_end><block_end><block_end><block_end><block_end>self.project.models=[model<for>model project_models<if>len(model.explores)<g>0]<block_end><block_end> |
<import_stmt>pandas<as>pd<import_stmt>json<import_stmt>time<import_from_stmt>bentoml env artifacts api BentoService<import_from_stmt>bentoml.adapters DataframeInput JsonInput StringInput<import_from_stmt>bentoml.frameworks.sklearn SklearnModelArtifact<line_sep>@env(infer_pip_packages=<true>)@artifacts([SklearnModelArtifact('model')])<class_stmt>AnomalyDetection(BentoService)<block_start>"""
A minimum prediction service exposing a Scikit-learn model
"""<line_sep>@api(input=JsonInput())<def_stmt>analyse self param:json<block_start>"""
An inference API named `analyse` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function iwnput
"""<line_sep>dic={}<if_stmt>param['taskType']<eq>'async'<block_start>time.sleep(30)<block_end><try_stmt><block_start><if_stmt>len(param['seriesList'])<l>2<block_start><raise>Exception()<block_end><else_stmt><block_start>series=[]<line_sep>series.append([1635216096000 23.541])<line_sep>dic['predictSeriesList']=series<block_end><block_end><except_stmt>Exception<as>ex<block_start>dic['code']='detectorError'<line_sep>dic['message']='some error in detector internal!'<block_end><return>dic<block_end>@api(input=DataframeInput() batch=<true>)<def_stmt>predict self df:pd.DataFrame<block_start>"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""<line_sep><return>self.artifacts.model.predict(df)<block_end>@api(input=JsonInput())<def_stmt>analyze self param:json<block_start>"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""<line_sep><return>"good"<block_end>@api(input=StringInput())<def_stmt>doc self message:str<block_start>"""
get README.md
"""<line_sep>f=open("README.md")<line_sep>doc=f.read()<line_sep>f.close()<line_sep><return>doc<block_end><block_end> |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Function used in Worker tests of legacy GCF Python 3.7 logging."""<import_stmt>logging<line_sep>X_GOOGLE_FUNCTION_NAME="gcf-function"<line_sep>X_GOOGLE_ENTRY_POINT="function"<line_sep>HOME="/tmp"<def_stmt>function request<block_start>"""Test function which logs exceptions.
Args:
request: The HTTP request which triggered this function.
"""<try_stmt><block_start><raise>Exception<block_end><except_stmt><block_start>logging.exception("log")<block_end><return><none><block_end> |
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of the `swift_c_module` rule."""<line_sep>load(":swift_common.bzl" "swift_common")<line_sep>load(":utils.bzl" "merge_runfiles")<def_stmt>_swift_c_module_impl ctx<block_start>module_map=ctx.file.module_map<line_sep>deps=ctx.attr.deps<line_sep>cc_infos=[dep[CcInfo]<for>dep deps]<line_sep>data_runfiles=[dep[DefaultInfo].data_runfiles<for>dep deps]<line_sep>default_runfiles=[dep[DefaultInfo].default_runfiles<for>dep deps]<if_stmt>cc_infos<block_start>cc_info=cc_common.merge_cc_infos(cc_infos=cc_infos)<line_sep>compilation_context=cc_info.compilation_context<block_end><else_stmt><block_start>cc_info=<none><line_sep>compilation_context=cc_common.create_compilation_context()<block_end>providers=[# We must repropagate the dependencies' DefaultInfos, otherwise we
# will lose runtime dependencies that the library expects to be
# there during a test (or a regular `bazel run`).
DefaultInfo(data_runfiles=merge_runfiles(data_runfiles) default_runfiles=merge_runfiles(default_runfiles) files=depset([module_map]) ) swift_common.create_swift_info(modules=[swift_common.create_module(name=ctx.attr.module_name clang=swift_common.create_clang_module(compilation_context=compilation_context module_map=module_map # TODO(b/142867898): Precompile the module and place it
# here.
precompiled_module=<none> ) ) ] ) ]<if_stmt>cc_info<block_start>providers.append(cc_info)<block_end><return>providers<block_end>swift_c_module=rule(attrs={"module_map":attr.label(allow_single_file=<true> doc="""\
The module map file that should be loaded to import the C library dependency
into Swift.
""" mandatory=<true> ) "module_name":attr.string(doc="""\
The name of the top-level module in the module map that this target represents.
A single `module.modulemap` file can define multiple top-level modules. When
building with implicit modules, the presence of that module map allows any of
the modules defined in it to be imported. When building explicit modules,
however, there is a one-to-one correspondence between top-level modules and
BUILD targets and the module name must be known without reading the module map
file, so it must be provided directly. Therefore, one may have multiple
`swift_c_module` targets that reference the same `module.modulemap` file but
with different module names and headers.
""" mandatory=<true> ) "deps":attr.label_list(allow_empty=<false> doc="""\
A list of C targets (or anything propagating `CcInfo`) that are dependencies of
this target and whose headers may be referenced by the module map.
""" mandatory=<true> providers=[[CcInfo]] ) } doc="""\
Wraps one or more C targets in a new module map that allows it to be imported
into Swift to access its C interfaces.
The `cc_library` rule in Bazel does not produce module maps that are compatible
with Swift. In order to make interop between Swift and C possible, users have
one of two options:
1. **Use an auto-generated module map.** In this case, the `swift_c_module`
rule is not needed. If a `cc_library` is a direct dependency of a
`swift_{binary,library,test}` target, a module map will be automatically
generated for it and the module's name will be derived from the Bazel target
label (in the same fashion that module names for Swift targets are derived).
The module name can be overridden by setting the `swift_module` tag on the
`cc_library`; e.g., `tags = ["swift_module=MyModule"]`.
2. **Use a custom module map.** For finer control over the headers that are
exported by the module, use the `swift_c_module` rule to provide a custom
module map that specifies the name of the module, its headers, and any other
module information. The `cc_library` targets that contain the headers that
you wish to expose to Swift should be listed in the `deps` of your
`swift_c_module` (and by listing multiple targets, you can export multiple
libraries under a single module if desired). Then, your
`swift_{binary,library,test}` targets should depend on the `swift_c_module`
target, not on the underlying `cc_library` target(s).
NOTE: Swift at this time does not support interop directly with C++. Any headers
referenced by a module map that is imported into Swift must have only C features
visible, often by using preprocessor conditions like `#if __cplusplus` to hide
any C++ declarations.
""" implementation=_swift_c_module_impl )<line_sep> |
__all__=['proc_stat' 'disk_stats' 'disk_sizes' 'memory' 'uptime' 'open_files' 'net' 'la' 'pg_probackup']<import_from_stmt>. *<line_sep> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('networks' '0009_auto_20160823_0921') ]<line_sep>operations=[migrations.AlterField(model_name='network' name='gateway' field=models.ForeignKey(to='networks.IPAddress' blank=<true> on_delete=django.db.models.deletion.SET_NULL verbose_name='Gateway address' null=<true> related_name='gateway_network') ) ]<block_end> |
# Copyright (C) 2020 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>tacker.api.common attribute_filter<import_from_stmt>tacker.common exceptions<as>exception<class_stmt>BaseViewBuilder(object)<block_start>@classmethod<def_stmt>validate_filter cls filters=<none><block_start><if_stmt><not>filters<block_start><return><block_end><return>attribute_filter.parse_filter_rule(filters target=cls.FLATTEN_ATTRIBUTES)<block_end>@classmethod<def_stmt>validate_attribute_fields cls all_fields=<none> fields=<none> exclude_fields=<none> exclude_default=<none><block_start><if_stmt>all_fields<and>(fields<or>exclude_fields<or>exclude_default)<block_start>msg=("Invalid query parameter combination: 'all_fields' "<concat>"cannot be combined with 'fields' or 'exclude_fields' "<concat>"or 'exclude_default'")<line_sep><raise>exception.ValidationError(msg)<block_end><if_stmt>fields<and>(all_fields<or>exclude_fields)<block_start>msg=("Invalid query parameter combination: 'fields' "<concat>"cannot be combined with 'all_fields' or 'exclude_fields' ")<line_sep><raise>exception.ValidationError(msg)<block_end><if_stmt>exclude_fields<and>(all_fields<or>fields<or>exclude_default)<block_start>msg=("Invalid query parameter combination: 'exclude_fields' "<concat>"cannot be combined with 'all_fields' or 'fields' "<concat>"or 'exclude_default'")<line_sep><raise>exception.ValidationError(msg)<block_end><if_stmt>exclude_default<and>(all_fields<or>exclude_fields)<block_start>msg=("Invalid query parameter combination: 'exclude_default' "<concat>"cannot be combined with 'all_fields' or 'exclude_fields' ")<line_sep><raise>exception.ValidationError(msg)<block_end><def_stmt>_validate_complex_attributes query_parameter fields<block_start>msg=("Invalid query parameter '%(query_parameter)s'. "<concat>"Value: %(field)s")<for_stmt>field fields<block_start><if_stmt>field<in>cls.COMPLEX_ATTRIBUTES<block_start><continue><block_end><elif_stmt>'*'<in>field# Field should never contain '*' as it's reserved for
# special purpose for handling key-value pairs.
<block_start><raise>exception.ValidationError(msg%{"query_parameter":query_parameter "field":field})<block_end><elif_stmt>field<not><in>cls.FLATTEN_COMPLEX_ATTRIBUTES# Special case for field with key-value pairs.
# In this particular case, key will act as an attribute
# in structure so you need to treat it differently than
# other fields. All key-value pair field will be post-fix
# with '*' in FLATTEN_COMPLEX_ATTRIBUTES. Request
# with field which contains '*' will be treated as an
# error.
<block_start>special_field=<false><for_stmt>attribute cls.FLATTEN_COMPLEX_ATTRIBUTES<block_start><if_stmt>'*'<in>attribute<and>field.startswith(attribute.split('*')[0])<block_start>special_field=<true><block_end><block_end><if_stmt><not>special_field<block_start><raise>exception.ValidationError(msg%{"query_parameter":query_parameter "field":field})<block_end><block_end><block_end><block_end><if_stmt>fields<block_start>_validate_complex_attributes("fields" fields.split(','))<block_end><elif_stmt>exclude_fields<block_start>_validate_complex_attributes("exclude_fields" exclude_fields.split(","))<block_end><block_end><block_end> |
<import_stmt>vkwave.vkscript.handlers.assignments<import_stmt>vkwave.vkscript.handlers.blocks<import_stmt>vkwave.vkscript.handlers.calls<import_stmt>vkwave.vkscript.handlers.expressions<import_stmt>vkwave.vkscript.handlers.statements<import_stmt>vkwave.vkscript.handlers.types<import_from_stmt>.converter VKScriptConverter<import_from_stmt>.execute Execute<import_from_stmt>.execute execute<line_sep>__all__=("execute" "Execute" "VKScriptConverter")<line_sep> |
##
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
<import_from_stmt>supybot.test *<import_stmt>supybot.conf<as>conf<import_stmt>supybot.registry<as>registry<import_stmt>supybot.ircutils<as>ircutils<class_stmt>SupyConfTestCase(SupyTestCase)<block_start><def_stmt>testJoinToOneChannel self<block_start>orig=conf.supybot.networks.test.channels()<line_sep>channels=ircutils.IrcSet()<line_sep>channels.add("#bar")<line_sep>conf.supybot.networks.test.channels.setValue(channels)<line_sep>msgs=conf.supybot.networks.test.channels.joins()<line_sep>self.assertEqual(msgs[0].args ("#bar" ))<line_sep>conf.supybot.networks.test.channels.setValue(orig)<block_end><def_stmt>testJoinToManyChannels self<block_start>orig=conf.supybot.networks.test.channels()<line_sep>channels=ircutils.IrcSet()<line_sep>input_list=[]<for_stmt>x range(1 30)<block_start>name="#verylongchannelname"+str(x)<line_sep>channels.add(name)<line_sep>input_list.append(name)<block_end>conf.supybot.networks.test.channels.setValue(channels)<line_sep>msgs=conf.supybot.networks.test.channels.joins()<line_sep># Double check we split the messages
self.assertEqual(len(msgs) 2)<line_sep># Ensure all channel names are present
chan_list=(msgs[0].args[0]+','+msgs[1].args[0]).split(',')<line_sep>self.assertCountEqual(input_list chan_list)<line_sep>conf.supybot.networks.test.channels.setValue(orig)<block_end><block_end> |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>qf_lib.common.enums.grid_proportion GridProportion<import_from_stmt>qf_lib.documents_utils.document_exporting.document Document<import_from_stmt>qf_lib.documents_utils.document_exporting.element Element<class_stmt>CustomElement(Element)<block_start><def_stmt>__init__ self html:str grid_proportion=GridProportion.Eight<block_start>"""
An element containing custom HTML.
"""<line_sep>super().__init__(grid_proportion)<line_sep>self.html=html<block_end><def_stmt>generate_html self document:Document<arrow>str<block_start>"""
Generates the HTML that represents the underlying element.
"""<line_sep><return>self.html<block_end><block_end> |
<import_from_stmt>sigtools.wrappers decorator<import_from_stmt>clize run<line_sep>@decorator<def_stmt>with_uppercase wrapped *args uppercase=<false> **kwargs<block_start>"""
Formatting options:
:param uppercase: Print output in capitals
"""<line_sep>ret=wrapped(*args **kwargs)<if_stmt>uppercase<block_start><return>str(ret).upper()<block_end><else_stmt><block_start><return>ret<block_end><block_end>@with_uppercase<def_stmt>hello_world name=<none><block_start>"""Says hello world
:param name: Who to say hello to
"""<if_stmt>name<is><not><none><block_start><return>'Hello '+name<block_end><else_stmt><block_start><return>'Hello world!'<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>run(hello_world)<block_end> |
# -*- coding: utf-8 -*-
"""
wordpress
~~~~
tamper for wordpress
:author: LoRexxar <<EMAIL>>
:homepage: https://github.com/LoRexxar/Kunlun-M
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 LoRexxar. All rights reserved
"""<line_sep>wordpress={"esc_url":[1000 10001 10002] "esc_js":[1000 10001 10002] "esc_html":[1000 10001 10002] "esc_attr":[1000 10001 10002] "esc_textarea":[1000 10001 10002] "tag_escape":[1000 10001 10002] "esc_sql":[1004 1005 1006] "_real_escape":[1004 1005 1006] }<line_sep>wordpress_controlled=[]<line_sep> |
<import_stmt>numpy<as>np<import_stmt>unittest<import_stmt>chainer<import_from_stmt>chainer testing<import_from_stmt>chainer.testing attr<import_from_stmt>chainercv.links YOLOv3<line_sep>@testing.parameterize(*testing.product({'n_fg_class':[1 5 20] }))<class_stmt>TestYOLOv3(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.link=YOLOv3(n_fg_class=self.n_fg_class)<line_sep>self.insize=416<line_sep>self.n_bbox=(13<times>13+26<times>26+52<times>52)<times>3<block_end><def_stmt>_check_call self<block_start>x=self.link.xp.array(np.random.uniform(-1 1 size=(1 3 self.insize self.insize)) dtype=np.float32)<line_sep>locs,objs,confs=self.link(x)<line_sep>self.assertIsInstance(locs chainer.Variable)<line_sep>self.assertIsInstance(locs.array self.link.xp.ndarray)<line_sep>self.assertEqual(locs.shape (1 self.n_bbox 4))<line_sep>self.assertIsInstance(objs chainer.Variable)<line_sep>self.assertIsInstance(objs.array self.link.xp.ndarray)<line_sep>self.assertEqual(objs.shape (1 self.n_bbox))<line_sep>self.assertIsInstance(confs chainer.Variable)<line_sep>self.assertIsInstance(confs.array self.link.xp.ndarray)<line_sep>self.assertEqual(confs.shape (1 self.n_bbox self.n_fg_class))<block_end>@attr.slow<def_stmt>test_call_cpu self<block_start>self._check_call()<block_end>@attr.gpu@attr.slow<def_stmt>test_call_gpu self<block_start>self.link.to_gpu()<line_sep>self._check_call()<block_end><block_end>@testing.parameterize(*testing.product({'n_fg_class':[<none> 10 20] 'pretrained_model':['voc0712'] }))<class_stmt>TestYOLOv3Pretrained(unittest.TestCase)<block_start>@attr.slow<def_stmt>test_pretrained self<block_start>kwargs={'n_fg_class':self.n_fg_class 'pretrained_model':self.pretrained_model }<if_stmt>self.pretrained_model<eq>'voc0712'<block_start>valid=self.n_fg_class<in>{<none> 20}<block_end><if_stmt>valid<block_start>YOLOv3(**kwargs)<block_end><else_stmt><block_start><with_stmt>self.assertRaises(ValueError)<block_start>YOLOv3(**kwargs)<block_end><block_end><block_end><block_end>testing.run_module(__name__ __file__)<line_sep> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<def_stmt>conv3x3 in_planes out_planes stride=1<block_start>"3x3 convolution with padding"<line_sep><return>nn.Conv2d(in_planes out_planes kernel_size=3 stride=stride padding=1 bias=<false>)<block_end><class_stmt>ChannelAttention(nn.Module)<block_start><def_stmt>__init__ self in_planes ratio=4<block_start>super(ChannelAttention self).__init__()<line_sep>self.avg_pool=nn.AdaptiveAvgPool2d(1)<line_sep>self.max_pool=nn.AdaptiveMaxPool2d(1)<line_sep>self.sharedMLP=nn.Sequential(nn.Conv2d(in_planes in_planes<floordiv>ratio 1 bias=<false>) nn.ReLU() nn.Conv2d(in_planes<floordiv>ratio in_planes 1 bias=<false>))<line_sep>self.sigmoid=nn.Sigmoid()<block_end><def_stmt>forward self x<block_start>avgout=self.sharedMLP(self.avg_pool(x))<line_sep>maxout=self.sharedMLP(self.max_pool(x))<line_sep><return>self.sigmoid(avgout+maxout)<block_end><block_end><class_stmt>SpatialAttention(nn.Module)<block_start><def_stmt>__init__ self kernel_size=7<block_start>super(SpatialAttention self).__init__()<assert_stmt>kernel_size<in>(3 7) "kernel size must be 3 or 7"<line_sep>padding=3<if>kernel_size<eq>7<else>1<line_sep>self.conv=nn.Conv2d(2 1 kernel_size padding=padding bias=<false>)<line_sep>self.sigmoid=nn.Sigmoid()<block_end><def_stmt>forward self x<block_start>avgout=torch.mean(x dim=1 keepdim=<true>)<line_sep>maxout,_=torch.max(x dim=1 keepdim=<true>)<line_sep>x=torch.cat([avgout maxout] dim=1)<line_sep>x=self.conv(x)<line_sep><return>self.sigmoid(x)<block_end><block_end><class_stmt>BasicBlock(nn.Module)<block_start>expansion=1<def_stmt>__init__ self inplanes planes stride=1 downsample=<none><block_start>super(BasicBlock self).__init__()<line_sep>self.conv1=conv3x3(inplanes planes stride)<line_sep>self.bn1=nn.BatchNorm2d(planes)<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep>self.conv2=conv3x3(planes planes)<line_sep>self.bn2=nn.BatchNorm2d(planes)<line_sep>self.ca=ChannelAttention(planes)<line_sep>self.sa=SpatialAttention()<line_sep>self.downsample=downsample<line_sep>self.stride=stride<block_end><def_stmt>forward self x<block_start>residual=x<line_sep>out=self.conv1(x)<line_sep>out=self.bn1(out)<line_sep>out=self.relu(out)<line_sep>out=self.conv2(out)<line_sep>out=self.bn2(out)<line_sep>out=self.ca(out)<times>out# 广播机制
out=self.sa(out)<times>out# 广播机制
<if_stmt>self.downsample<is><not><none><block_start>print("downsampling")<line_sep>residual=self.downsample(x)<block_end>print(out.shape residual.shape)<line_sep>out<augadd>residual<line_sep>out=self.relu(out)<line_sep><return>out<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>downsample=nn.Sequential(nn.Conv2d(16 32 kernel_size=1 stride=1 bias=<false>) nn.BatchNorm2d(32))<line_sep>x=torch.ones(3 16 32 32)<line_sep>model=BasicBlock(16 32 stride=1 downsample=downsample)<line_sep>print(model(x).shape)<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>..local_interpolation ThirdOrderHermitePolynomialInterpolation<import_from_stmt>.runge_kutta AbstractESDIRK ButcherTableau<line_sep>γ=0.26<line_sep>a21=γ<line_sep>a31=0.13<line_sep>a32=0.84033320996790809<line_sep>a41=0.22371961478320505<line_sep>a42=0.47675532319799699<line_sep>a43=-0.06470895363112615<line_sep>a51=0.16648564323248321<line_sep>a52=0.10450018841591720<line_sep>a53=0.03631482272098715<line_sep>a54=-0.13090704451073998<line_sep>a61=0.13855640231268224<line_sep>a62=0<line_sep>a63=-0.04245337201752043<line_sep>a64=0.02446657898003141<line_sep>a65=0.61943039072480676<line_sep>a71=0.13659751177640291<line_sep>a72=0<line_sep>a73=-0.05496908796538376<line_sep>a74=-0.04118626728321046<line_sep>a75=0.62993304899016403<line_sep>a76=0.06962479448202728<line_sep># Predictors taken from
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/tableaus/sdirk_tableaus.jl#L1444 # noqa: E501
# https://github.com/SciML/OrdinaryDiffEq.jl/blob/54fb35870fa402fc95d665cd5f9502e2759ea436/src/perform_step/kencarp_kvaerno_perform_step.jl#L1123 # noqa: E501
# This is with the exception of α21, which is mistakenly set to zero.
#
# See also /devdocs/predictor_dirk.md
α21=1.0<line_sep>α31=-1.366025403784441<line_sep>α32=2.3660254037844357<line_sep>α41=-0.19650552613122207<line_sep>α42=0.8113579546496623<line_sep>α43=0.38514757148155954<line_sep>α51=0.10375304369958693<line_sep>α52=0.937994698066431<line_sep>α53=-0.04174774176601781<line_sep>α61=-0.17281112873898072<line_sep>α62=0.6235784481025847<line_sep>α63=0.5492326806363959<line_sep>α71=a61<line_sep>α72=a62<line_sep>α73=a63<line_sep>α74=a64<line_sep>α75=a65<line_sep>α76=γ<line_sep>_kvaerno5_tableau=ButcherTableau(a_lower=(np.array([a21]) np.array([a31 a32]) np.array([a41 a42 a43]) np.array([a51 a52 a53 a54]) np.array([a61 a62 a63 a64 a65]) np.array([a71 a72 a73 a74 a75 a76]) ) a_diagonal=np.array([0 γ γ γ γ γ γ]) a_predictor=(np.array([α21]) np.array([α31 α32]) np.array([α41 α42 α43]) np.array([α51 α52 α53 0]) np.array([α61 α62 α63 0 0]) np.array([α71 α72 α73 α74 α75 α76]) ) b_sol=np.array([a71 a72 a73 a74 a75 a76 γ]) b_error=np.array([a71-a61 a72-a62 a73-a63 a74-a64 a75-a65 a76-γ γ]) c=np.array([0.52 1.230333209967908 0.8957659843500759 0.43639360985864756 1.0 1.0]) )<class_stmt>Kvaerno5(AbstractESDIRK)<block_start>r"""Kvaerno's 5/4 method.
A-L stable stiffly accurate 5th order ESDIRK method. Has an embedded 4th order
method for adaptive step sizing. Uses 7 stages.
When solving an ODE over the interval $[t_0, t_1]$, note that this method will make
some evaluations slightly past $t_1$.
??? cite "Reference"
```bibtex
@article{kvaerno2004singly,
title={Singly diagonally implicit Runge--Kutta methods with an explicit first
stage},
author={Kv{\ae}rn{\o}, Anne},
journal={BIT Numerical Mathematics},
volume={44},
number={3},
pages={489--502},
year={2004},
publisher={Springer}
}
```
"""<line_sep>tableau=_kvaerno5_tableau<line_sep>interpolation_cls=ThirdOrderHermitePolynomialInterpolation.from_k<def_stmt>order self terms<block_start><return>5<block_end><block_end> |
#MenuTitle: Remove Zero Deltas in Selected Glyphs
# -*- coding: utf-8 -*-
<import_from_future_stmt> division print_function unicode_literals<line_sep>__doc__="""
Goes through all layers of each selected glyph, and deletes all TT Delta Hints with an offset of zero. Detailed Report in Macro Window.
"""<def_stmt>process Layer<block_start><try_stmt><block_start>count=0<for_stmt>i reversed(range(len(Layer.hints)))<block_start>hint=Layer.hints[i]<if_stmt>hint.type<eq>TTDELTA<block_start>elementDict=hint.elementDict()<if_stmt>"settings"<in>elementDict<block_start>settings=elementDict["settings"]<if_stmt>settings<block_start><for_stmt>deltaType ("deltaH" "deltaV")<block_start><if_stmt>deltaType<in>settings<block_start><for_stmt>transformType settings[deltaType]<block_start>deltas=settings[deltaType][transformType]<for_stmt>ppmSize deltas<block_start><if_stmt>deltas[ppmSize]<eq>0<block_start><del_stmt>deltas[ppmSize]<line_sep>count<augadd>1<block_end><block_end># clean up delta PPMs:
<if_stmt>len(settings[deltaType][transformType])<eq>0<block_start><del_stmt>settings[deltaType][transformType]<block_end><block_end># clean up delta directions:
<if_stmt>len(settings[deltaType])<eq>0<block_start><del_stmt>settings[deltaType]<block_end><block_end><block_end><block_end># clean up hints:
<if_stmt><not>elementDict["settings"]<block_start><del_stmt>Layer.hints[i]<block_end><block_end><block_end><block_end>print(" Deleted %i zero delta%s on layer '%s'."%(count ""<if>count<eq>1<else>"s" Layer.name ))<line_sep><return>count<block_end><except_stmt>Exception<as>e<block_start>Glyphs.showMacroWindow()<import_stmt>traceback<line_sep>print(traceback.format_exc())<line_sep>print()<line_sep>print(e)<block_end><block_end>thisFont=Glyphs.font# frontmost font
selectedLayers=thisFont.selectedLayers# active layers of selected glyphs
Glyphs.clearLog()# clears log in Macro window
totalCount=0<for_stmt>selectedLayer selectedLayers<block_start>thisGlyph=selectedLayer.parent<line_sep>print("%s:"%thisGlyph.name)<line_sep>thisGlyph.beginUndo()# begin undo grouping
<for_stmt>thisLayer thisGlyph.layers<block_start>totalCount<augadd>process(thisLayer)<block_end>thisGlyph.endUndo()<block_end># end undo grouping
<if_stmt>totalCount<block_start>Message(title="%i Zero Delta%s Deleted"%(totalCount ""<if>totalCount<eq>1<else>"s" ) message="Deleted %i TT delta hint%s with zero offset in %i selected glyph%s (%s%s). Detailed report in Macro Window."%(totalCount ""<if>totalCount<eq>1<else>"s" len(selectedLayers) ""<if>len(selectedLayers)<eq>1<else>"s" ", ".join([l.parent.name<for>l selectedLayers[:min(20 len(selectedLayers))]]) ",..."<if>len(selectedLayers)<g>20<else>"" ) OKButton=u"👍🏻 OK" )<block_end><else_stmt><block_start>Message(title="No Zero Deltas" message="No TT delta hints with zero offset were found in selected glyph%s (%s%s)."%(""<if>len(selectedLayers)<eq>1<else>"s" ", ".join([l.parent.name<for>l selectedLayers[:min(20 len(selectedLayers))]]) ",..."<if>len(selectedLayers)<g>20<else>"" ) OKButton=u"🍸 Cheers")<block_end> |
<import_from_stmt>. _internal<line_sep> |
<import_stmt>os<import_stmt>shutil<import_stmt>json<class_stmt>Document()<block_start><def_stmt>__init__ self doc_id doc_text edi_id company_name body topic<block_start>self.doc_id=doc_id<line_sep>self.doc_text=doc_text<line_sep>self.edi_id=edi_id<line_sep>self.company_name=company_name<line_sep>self.body=body<line_sep>self.topic=topic<block_end><def_stmt>get_header self<block_start><return>{"document_id":self.document_id "document_name":self.document_name "doc_text":self.doc_text "edi_id":self.edi_id}<block_end>@property<def_stmt>document_id self<block_start><return>self.edi_id<block_end>@property<def_stmt>document_name self<block_start><return>self.company_name<block_end>@classmethod<def_stmt>load cls file_path<block_start><if_stmt><not>os.path.isfile(file_path)<block_start><raise>Exception("File {} does not found.".format(file_path))<block_end><with_stmt>open(file_path encoding="utf-8")<as>f<block_start>doc=json.load(f)<block_end>doc_id=doc["doc_id"]<line_sep>doc_text=doc["doc_text"]<line_sep>edi_id=doc["edi_id"]<line_sep>company_name=doc["company_name"]<line_sep>body=doc["body"]<line_sep>topic=doc["topic"]<line_sep><return>cls(doc_id doc_text edi_id company_name body topic)<block_end><block_end><class_stmt>Label()<block_start><def_stmt>__init__ self label label_group="" display_name="" display_style=""<block_start>self.label=label<line_sep>self.label_group=label_group<line_sep>self.display_name=display_name<line_sep>self.display_style=display_style<block_end><def_stmt>dumps self<block_start><return>{"label":self.label "label_group":self.label_group "display_name":self.display_name "display_style":self.display_style}<block_end><block_end><class_stmt>Annotation()<block_start><def_stmt>__init__ self target_id target label label_target="" position=() annotator="anonymous"<block_start>self.target_id=int(target_id)<line_sep>self.target=target<line_sep>self.label=label<line_sep>self.label_target=label_target<line_sep>self.position=position<if_stmt>len(self.position)<g>0<block_start>self.position=[int(i)<for>i self.position]<block_end>self.annotator=annotator<block_end><def_stmt>dumps self<block_start>a={"target_id":self.target_id "target":self.target "label":self.label "label_target":self.label_target "position":self.position "annotator":self.annotator}<line_sep><return>a<block_end>@classmethod<def_stmt>loads cls obj<block_start>a=Annotation(obj["target_id"] obj["target"] obj["label"] obj["label_target"] obj["position"]<if>"position"<in>obj<else>())<if_stmt>"annotator"<in>obj<block_start>a.annotator=obj["annotator"]<block_end><return>a<block_end><block_end><class_stmt>AnnotationTask()<block_start>ANNOTATION_CLASS=Annotation<def_stmt>__init__ self document annotations=()<block_start>self.document=document<line_sep>self.annotations={}<if>len(annotations)<eq>0<else>annotations<block_end><def_stmt>get_targets self<block_start><raise>Exception("Sub class have to specify texts for annotation")<block_end><def_stmt>get_labels self<block_start><raise>Exception("Sub class have to define label candidates")<block_end><def_stmt>get_dataset self<block_start>dataset={}<for_stmt>target_id,target self.get_targets()<block_start>a_s=[]<if_stmt>target_id<in>self.annotations<block_start>a_s=[a.dumps()<for>a self.annotations[target_id]]<block_end>dataset[target_id]={"target":target "annotations":a_s}<block_end><return>dataset<block_end><def_stmt>save_annotations self target_dir annotation_objs annotator<block_start>_dir=os.path.join(target_dir self.document.document_id)<line_sep>annotations=[self.ANNOTATION_CLASS.loads(a_obj)<for>a_obj annotation_objs]<if_stmt>annotator<block_start><for_stmt>a annotations<block_start>a.annotator=annotator<block_end><if_stmt>os.path.exists(_dir)<block_start><for_stmt>f os.listdir(_dir)<block_start><if_stmt>f.startswith("ann__")<and>f.endswith("__{}.json".format(annotator))<block_start>os.remove(os.path.join(_dir f))<block_end><block_end><block_end><block_end>save_bucket={}<for_stmt>a annotations<block_start>key=(a.target_id a.annotator)<if_stmt>key<not><in>save_bucket<block_start>save_bucket[key]=[]<block_end>save_bucket[key].append(a)<block_end><if_stmt>len(save_bucket)<g>0<and><not>os.path.exists(_dir)<block_start>os.mkdir(_dir)<block_end><for_stmt>key save_bucket<block_start>file_name=self._make_annotation_file_name(*key)<line_sep>body={"annotations":[a.dumps()<for>a save_bucket[key]]}<line_sep>file_path=os.path.join(_dir file_name)<with_stmt>open(file_path mode="w" encoding="utf-8")<as>f<block_start>json.dump(body f ensure_ascii=<false> indent=2)<block_end><block_end><block_end><def_stmt>_make_annotation_file_name self target_id annotator<block_start><return>"ann__{}__{}__{}.json".format(self.document.document_id target_id annotator)<block_end>@classmethod<def_stmt>load cls target_dir document annotator=""<block_start>annotations={}<line_sep>_dir=os.path.join(target_dir document.document_id)<if_stmt>os.path.exists(_dir)<block_start><for_stmt>f sorted(os.listdir(_dir))<block_start><if_stmt><not>f.startswith("ann__")<block_start><continue><block_end><if_stmt>annotator<and><not>f.endswith("__{}.json".format(annotator))<block_start><continue><block_end>path=os.path.join(_dir f)<with_stmt>open(path encoding="utf-8")<as>af<block_start>annotation_objs=json.load(af)["annotations"]<line_sep>a_list=[cls.ANNOTATION_CLASS.loads(a_obj)<for>a_obj annotation_objs]<if_stmt>len(a_list)<g>0<block_start>target_id=a_list[0].target_id<if_stmt>target_id<not><in>annotations<block_start>annotations[target_id]=a_list<block_end><else_stmt><block_start>annotations[target_id]<augadd>a_list<block_end><block_end><block_end><block_end><block_end>instance=cls(document annotations)<line_sep><return>instance<block_end><block_end> |
# date: 2019.05.05
# author: Bartłomiej 'furas' Burek
<import_stmt>robobrowser<line_sep>br=robobrowser.RoboBrowser(user_agent='Mozilla/5.0 (X11; Linux i586; rv:31.0) Gecko/20100101 Firefox/31.0')<line_sep>br.parser='lxml'<line_sep>br.open("https://www.just-eat.fr")<line_sep>print(br.get_forms())<line_sep>iframe_src=br.select('iframe')[0]['src']<line_sep>print(iframe_src)<line_sep>br.open("https://www.just-eat.fr"+iframe_src)<line_sep>print(br.parsed)<line_sep>br.open("https://www.just-eat.fr")<line_sep>print(br.get_forms())<line_sep> |
# Copyright (C) 2017 Mandiant, Inc. All Rights Reserved.
<import_stmt>logging<import_from_stmt>typing List Tuple<import_from_stmt>dataclasses dataclass<import_stmt>viv_utils<import_stmt>envi.memory<import_stmt>viv_utils.emulator_drivers<import_from_stmt>envi Emulator<import_from_stmt>. api_hooks<line_sep>logger=logging.getLogger("floss")<line_sep>MAX_MAPS_SIZE=1024<times>1024<times>100# 100MB max memory allocated in an emulator instance
<def_stmt>is_import emu va<block_start>"""
Return True if the given VA is that of an imported function.
"""<line_sep># TODO: also check location type
t=emu.getVivTaint(va)<if_stmt>t<is><none><block_start><return><false><block_end><return>t[1]<eq>"import"<block_end># type aliases for envi.memory map
MemoryMapDescriptor=Tuple[# va
int # size
int # perms
int # name
str ]<line_sep># type aliases for envi.memory map
MemoryMap=Tuple[# start
int # end
int # descriptor
MemoryMapDescriptor # content
bytes ]<line_sep># type aliases for envi.memory map
Memory=List[MemoryMap]<line_sep>@dataclass<class_stmt>Snapshot<block_start>"""
A snapshot of the state of the CPU and memory.
Attributes:
memory: a snapshot of the memory contents
sp: the stack counter
pc: the instruction pointer
"""<line_sep>memory:Memory<line_sep>sp:int<line_sep>pc:int<block_end><def_stmt>get_map_size emu<block_start>size=0<for_stmt>mapva,mapsize,mperm,mfname emu.getMemoryMaps()<block_start>mapsize<augadd>size<block_end><return>size<block_end><class_stmt>MapsTooLargeError(Exception)<block_start><pass><block_end><def_stmt>make_snapshot emu:Emulator<arrow>Snapshot<block_start>"""
Create a snapshot of the current CPU and memory.
"""<if_stmt>get_map_size(emu)<g>MAX_MAPS_SIZE<block_start>logger.debug("emulator mapped too much memory: 0x%x" get_map_size(emu))<line_sep><raise>MapsTooLargeError()<block_end><return>Snapshot(emu.getMemorySnap() emu.getStackCounter() emu.getProgramCounter())<block_end>@dataclass<class_stmt>Delta<block_start>"""
a pair of snapshots from before and after an operation.
facilitates diffing the state of an emulator.
"""<line_sep>pre:Snapshot<line_sep>post:Snapshot<block_end><class_stmt>DeltaCollectorHook(viv_utils.emulator_drivers.Hook)<block_start>"""
hook that collects Deltas at each imported API call.
"""<def_stmt>__init__ self pre_snap:Snapshot<block_start>super(DeltaCollectorHook self).__init__()<line_sep>self._pre_snap=pre_snap<line_sep>self.deltas:List[Delta]=[]<block_end><def_stmt>hook self callname driver callconv api argv<block_start><if_stmt>is_import(driver._emu driver._emu.getProgramCounter())<block_start><try_stmt><block_start>self.deltas.append(Delta(self._pre_snap make_snapshot(driver._emu)))<block_end><except_stmt>MapsTooLargeError<block_start>logger.debug("despite call to import %s, maps too large, not extracting strings" callname)<line_sep><pass><block_end><block_end><block_end><block_end><def_stmt>emulate_function emu:Emulator function_index fva:int return_address:int max_instruction_count:int<arrow>List[Delta]<block_start>"""
Emulate a function and collect snapshots at each interesting place.
These interesting places include calls to imported API functions
and the final state of the emulator.
Emulation continues until the return address is hit, or
the given max_instruction_count is hit.
Some library functions are shimmed, such as memory allocation routines.
This helps "normal" routines emulate correct using standard library function.
These include:
- GetProcessHeap
- RtlAllocateHeap
- AllocateHeap
- malloc
:type function_index: viv_utils.FunctionIndex
:param fva: The start address of the function to emulate.
:param return_address: The expected return address of the function.
Emulation stops here.
:param max_instruction_count: The max number of instructions to emulate.
This helps avoid unexpected infinite loops.
"""<try_stmt><block_start>pre_snap=make_snapshot(emu)<block_end><except_stmt>MapsTooLargeError<block_start>logger.warn("initial snapshot mapped too much memory, can't extract strings")<line_sep><return>[]<block_end>delta_collector=DeltaCollectorHook(pre_snap)<try_stmt><block_start>logger.debug("Emulating function at 0x%08X" fva)<line_sep>driver=viv_utils.emulator_drivers.DebuggerEmulatorDriver(emu)<line_sep>monitor=api_hooks.ApiMonitor(emu.vw function_index)<line_sep>driver.add_monitor(monitor)<line_sep>driver.add_hook(delta_collector)<with_stmt>api_hooks.defaultHooks(driver)<block_start>driver.runToVa(return_address max_instruction_count)<block_end><block_end><except_stmt>viv_utils.emulator_drivers.InstructionRangeExceededError<block_start>logger.debug("Halting as emulation has escaped!")<block_end><except_stmt>envi.InvalidInstruction<block_start>logger.debug("vivisect encountered an invalid instruction. will continue processing." exc_info=<true>)<block_end><except_stmt>envi.UnsupportedInstruction<block_start>logger.debug("vivisect encountered an unsupported instruction. will continue processing." exc_info=<true>)<block_end><except_stmt>envi.BreakpointHit<block_start>logger.debug("vivisect encountered an unexpected emulation breakpoint. will continue processing." exc_info=<true>)<block_end><except_stmt>viv_utils.emulator_drivers.StopEmulation<block_start><pass><block_end><except_stmt>Exception<block_start>logger.debug("vivisect encountered an unexpected exception. will continue processing." exc_info=<true>)<block_end>logger.debug("Ended emulation at 0x%08X" emu.getProgramCounter())<line_sep>deltas=delta_collector.deltas<try_stmt><block_start>deltas.append(Delta(pre_snap make_snapshot(emu)))<block_end><except_stmt>MapsTooLargeError<block_start>logger.debug("failed to create final snapshot, emulator mapped too much memory, skipping")<line_sep><pass><block_end><return>deltas<block_end> |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
<import_stmt>pytest<import_from_stmt>datadog_checks.base OpenMetricsBaseCheckV2<import_from_stmt>datadog_checks.base.constants ServiceCheck<import_from_stmt>datadog_checks.dev.testing requires_py3<import_from_stmt>.utils get_check<line_sep>pytestmark=[requires_py3 pytest.mark.openmetrics pytest.mark.openmetrics_interface]<def_stmt>test_default_config aggregator dd_run_check mock_http_response<block_start><class_stmt>Check(OpenMetricsBaseCheckV2)<block_start>__NAMESPACE__='test'<def_stmt>get_default_config self<block_start><return>{'metrics':['.+'] 'rename_labels':{'foo':'bar'}}<block_end><block_end>mock_http_response("""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
""")<line_sep>check=Check('test' {} [{'openmetrics_endpoint':'test'}])<line_sep>dd_run_check(check)<line_sep>aggregator.assert_metric('test.go_memstats_alloc_bytes' 6396288 metric_type=aggregator.GAUGE tags=['endpoint:test' 'bar:baz'])<line_sep>aggregator.assert_all_metrics_covered()<block_end><def_stmt>test_service_check_dynamic_tags aggregator dd_run_check mock_http_response<block_start>mock_http_response("""
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes{foo="baz"} 6.396288e+06
# HELP state Node state
# TYPE state gauge
state{bar="baz"} 3
""")<line_sep>check=get_check({'metrics':['.+' {'state':{'type':'service_check' 'status_map':{'3':'ok'}}}] 'tags':['foo:bar']})<line_sep>dd_run_check(check)<line_sep>aggregator.assert_metric('test.go_memstats_alloc_bytes' 6396288 metric_type=aggregator.GAUGE tags=['endpoint:test' 'foo:bar' 'foo:baz'] )<line_sep>aggregator.assert_service_check('test.state' ServiceCheck.OK tags=['endpoint:test' 'foo:bar'])<line_sep>aggregator.assert_service_check('test.openmetrics.health' ServiceCheck.OK tags=['endpoint:test' 'foo:bar'])<line_sep>aggregator.assert_all_metrics_covered()<assert_stmt>len(aggregator.service_check_names)<eq>2<line_sep>aggregator.reset()<line_sep>check.set_dynamic_tags('baz:foo')<line_sep>dd_run_check(check)<line_sep>aggregator.assert_metric('test.go_memstats_alloc_bytes' 6396288 metric_type=aggregator.GAUGE tags=['endpoint:test' 'foo:bar' 'foo:baz' 'baz:foo'] )<line_sep>aggregator.assert_service_check('test.state' ServiceCheck.OK tags=['endpoint:test' 'foo:bar'])<line_sep>aggregator.assert_service_check('test.openmetrics.health' ServiceCheck.OK tags=['endpoint:test' 'foo:bar'])<line_sep>aggregator.assert_all_metrics_covered()<assert_stmt>len(aggregator.service_check_names)<eq>2<block_end> |
"""Script containing the DeepLoco environments."""<import_stmt>gym<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_stmt>cv2<try_stmt><block_start>sys.path.append(os.path.join(os.environ["TERRAINRL_PATH"] "simAdapter"))<import_stmt>terrainRLSim# noqa: F401
<block_end><except_stmt>(KeyError ImportError ModuleNotFoundError)<block_start><pass><block_end><class_stmt>BipedalSoccer(gym.Env)<block_start>"""Bipedal Soccer environment.
In this environment, a bipedal agent is placed in an open field with a
soccer ball. The agent is rewarded for moving to the ball, and additionally
dribbling the ball to the target. The reward function is a weighted sum of
the agent's distance from the ball and the distance of the ball from a
desired goal position. This reward is positive to discourage the agent from
falling prematurely.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""<def_stmt>__init__ self<block_start>"""Instantiate the environment."""<line_sep>self.wrapped_env=terrainRLSim.getEnv("PD-Biped3D-HLC-Soccer-v1" render=<false>)<line_sep># Add the time horizon.
self.horizon=512<block_end>@property<def_stmt>observation_space self<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.observation_space<block_end>@property<def_stmt>action_space self<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.action_space<block_end><def_stmt>step self action<block_start>"""See parent class."""<line_sep>obs,rew,done,info=self.wrapped_env.step(np.array([action]))<line_sep><return>obs[0] rew[0][0] done info<block_end><def_stmt>reset self<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.reset()[0]<block_end><def_stmt>render self mode='human'<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.render(mode=mode)<block_end><block_end><class_stmt>BipedalObstacles(gym.Env)<block_start>"""Bipedal Obstacles environment.
In this environment, a bipedal agent is placed in an open field with
obstacles scattered throughout the world. The goal of the agent is to
walk around the world and reach a goal position.
Attributes
----------
wrapped_env : gym.Env
the original environment, which add more dimensions than wanted here
"""<def_stmt>__init__ self render<block_start>"""Instantiate the environment.
Parameters
----------
render : bool
whether to render the environment
"""<line_sep>self.t=0<if_stmt>render<block_start>self.wrapped_env=gym.make("PD-Biped3D-HLC-Obstacles-render-v2")<block_end><else_stmt><block_start>self.wrapped_env=gym.make("PD-Biped3D-HLC-Obstacles-v2")<block_end># Add the time horizon.
self.horizon=2000<block_end>@property<def_stmt>observation_space self<block_start>"""See parent class."""<line_sep><return>gym.spaces.Box(low=20<times>self.wrapped_env.observation_space.low[:-2] high=20<times>self.wrapped_env.observation_space.high[:-2] dtype=np.float32)<block_end>@property<def_stmt>context_space self<block_start>"""See parent class."""<line_sep><return>gym.spaces.Box(low=20<times>self.wrapped_env.observation_space.low[-2:] high=20<times>self.wrapped_env.observation_space.high[-2:] dtype=np.float32)<block_end>@property<def_stmt>action_space self<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.action_space<block_end>@property<def_stmt>current_context self<block_start>"""See parent class."""<line_sep><return>self.wrapped_env.env.getObservation()[-2:]<block_end><def_stmt>step self action<block_start>"""See parent class."""<line_sep>self.t<augadd>1<line_sep>obs,rew,done,info=self.wrapped_env.step(action)<line_sep>done=done<or>self.t<ge>self.horizon<line_sep><return>obs[:-2] rew done info<block_end><def_stmt>reset self<block_start>"""See parent class."""<line_sep>self.t=0<line_sep><return>self.wrapped_env.reset()[:-2]<block_end><def_stmt>render self mode='human'<block_start>"""See parent class."""<line_sep>image=self.wrapped_env.env.render(headless_step=<true>)<if_stmt>mode<eq>'human'<block_start>f=np.flip(image.astype(np.float32)/255.0 axis=0)<line_sep>f=np.flip(f axis=2)<line_sep>cv2.imshow("PD-Biped3D-HLC-Obstacles-v2" f)<line_sep>cv2.waitKey(1)<block_end><elif_stmt>mode<eq>'rgb_array'<block_start><return>image<block_end><block_end><block_end> |
<import_stmt>collections<import_stmt>functools<import_stmt>io<import_stmt>math<import_stmt>typing<import_from_stmt>abc ABC abstractmethod<import_from_stmt>river base<import_from_stmt>river.utils.skmultiflow_utils calculate_object_size normalize_values_in_dict <import_from_stmt>.nodes.branch DTBranch NominalBinaryBranch NominalMultiwayBranch NumericBinaryBranch NumericMultiwayBranch <import_from_stmt>.nodes.leaf HTLeaf<try_stmt><block_start><import_stmt>graphviz<line_sep>GRAPHVIZ_INSTALLED=<true><block_end><except_stmt>ImportError<block_start>GRAPHVIZ_INSTALLED=<false><block_end><class_stmt>HoeffdingTree(ABC)<block_start>"""Base class for Hoeffding Decision Trees.
This is an **abstract class**, so it cannot be used directly. It defines base operations
and properties that all the Hoeffding decision trees must inherit or implement according to
their own design.
Parameters
----------
max_depth
The maximum depth a tree can reach. If `None`, the tree will grow indefinitely.
binary_split
If True, only allow binary splits.
max_size
The max size of the tree, in Megabytes (MB).
memory_estimate_period
Interval (number of processed instances) between memory consumption checks.
stop_mem_management
If True, stop growing as soon as memory limit is hit.
remove_poor_attrs
If True, disable poor attributes to reduce memory usage.
merit_preprune
If True, enable merit-based tree pre-pruning.
"""<def_stmt>__init__ self max_depth:int=<none> binary_split:bool=<false> max_size:float=100.0 memory_estimate_period:int=1000000 stop_mem_management:bool=<false> remove_poor_attrs:bool=<false> merit_preprune:bool=<true> # Properties common to all the Hoeffding trees
<block_start>self._split_criterion:str=""<line_sep>self._leaf_prediction:str=""<line_sep>self.max_depth:float=max_depth<if>max_depth<is><not><none><else>math.inf<line_sep>self.binary_split:bool=binary_split<line_sep>self._max_size:float=max_size<line_sep>self._max_byte_size:float=self._max_size<times>(2<power>20)# convert to byte
self.memory_estimate_period:int=memory_estimate_period<line_sep>self.stop_mem_management:bool=stop_mem_management<line_sep>self.remove_poor_attrs:bool=remove_poor_attrs<line_sep>self.merit_preprune:bool=merit_preprune<line_sep>self._root:typing.Union[DTBranch HTLeaf <none>]=<none><line_sep>self._n_active_leaves:int=0<line_sep>self._n_inactive_leaves:int=0<line_sep>self._inactive_leaf_size_estimate:float=0.0<line_sep>self._active_leaf_size_estimate:float=0.0<line_sep>self._size_estimate_overhead_fraction:float=1.0<line_sep>self._growth_allowed=<true><line_sep>self._train_weight_seen_by_model:float=0.0<block_end>@staticmethod<def_stmt>_hoeffding_bound range_val confidence n<block_start>r"""Compute the Hoeffding bound, used to decide how many samples are necessary at each
node.
Notes
-----
The Hoeffding bound is defined as:
$\\epsilon = \\sqrt{\\frac{R^2\\ln(1/\\delta))}{2n}}$
where:
$\\epsilon$: Hoeffding bound.
$R$: Range of a random variable. For a probability the range is 1, and for an
information gain the range is log *c*, where *c* is the number of classes.
$\\delta$: Confidence. 1 minus the desired probability of choosing the correct
attribute at any given node.
$n$: Number of samples.
Parameters
----------
range_val
Range value.
confidence
Confidence of choosing the correct attribute.
n
Number of processed samples.
"""<line_sep><return>math.sqrt((range_val<times>range_val<times>math.log(1.0/confidence))/(2.0<times>n))<block_end>@property<def_stmt>max_size self<block_start>"""Max allowed size tree can reach (in MB)."""<line_sep><return>self._max_size<block_end>@max_size.setter<def_stmt>max_size self size<block_start>self._max_size=size<line_sep>self._max_byte_size=self._max_size<times>(2<power>20)<block_end>@property<def_stmt>height self<arrow>int<block_start><if_stmt>self._root<block_start><return>self._root.height<block_end><block_end>@property<def_stmt>n_nodes self<block_start><if_stmt>self._root<block_start><return>self._root.n_nodes<block_end><block_end>@property<def_stmt>n_branches self<block_start><if_stmt>self._root<block_start><return>self._root.n_branches<block_end><block_end>@property<def_stmt>n_leaves self<block_start><if_stmt>self._root<block_start><return>self._root.n_leaves<block_end><block_end>@property<def_stmt>n_active_leaves self<block_start><return>self._n_active_leaves<block_end>@property<def_stmt>n_inactive_leaves self<block_start><return>self._n_inactive_leaves<block_end>@property<def_stmt>summary self<block_start>"""Collect metrics corresponding to the current status of the tree
in a string buffer.
"""<line_sep>summary={"n_nodes":self.n_nodes "n_branches":self.n_branches "n_leaves":self.n_leaves "n_active_leaves":self.n_active_leaves "n_inactive_leaves":self.n_inactive_leaves "height":self.height "total_observed_weight":self._train_weight_seen_by_model }<line_sep><return>summary<block_end><def_stmt>to_dataframe self<block_start>"""Return a representation of the current tree structure organized in a
`pandas.DataFrame` object.
In case the tree is empty or it only contains a single node (a leaf), `None` is returned.
Returns
-------
df
A `pandas.DataFrame` depicting the tree structure.
"""<if_stmt>self._root<is><not><none><and>isinstance(self._root DTBranch)<block_start><return>self._root.to_dataframe()<block_end><block_end><def_stmt>_branch_selector self numerical_feature=<true> multiway_split=<false><arrow>typing.Type[DTBranch]<block_start>"""Create a new split node."""<if_stmt>numerical_feature<block_start><if_stmt><not>multiway_split<block_start><return>NumericBinaryBranch<block_end><else_stmt><block_start><return>NumericMultiwayBranch<block_end><block_end><else_stmt><block_start><if_stmt><not>multiway_split<block_start><return>NominalBinaryBranch<block_end><else_stmt><block_start><return>NominalMultiwayBranch<block_end><block_end><block_end>@abstractmethod<def_stmt>_new_leaf self initial_stats:dict=<none> parent:typing.Union[HTLeaf DTBranch]=<none><arrow>HTLeaf<block_start>"""Create a new learning node.
The characteristics of the learning node depends on the tree algorithm.
Parameters
----------
initial_stats
Target statistics set from the parent node.
parent
Parent node to inherit from.
Returns
-------
A new learning node.
"""<block_end>@property<def_stmt>split_criterion self<arrow>str<block_start>"""Return a string with the name of the split criterion being used by the tree."""<line_sep><return>self._split_criterion<block_end>@split_criterion.setter@abstractmethod<def_stmt>split_criterion self split_criterion<block_start>"""Define the split criterion to be used by the tree."""<block_end>@property<def_stmt>leaf_prediction self<arrow>str<block_start>"""Return the prediction strategy used by the tree at its leaves."""<line_sep><return>self._leaf_prediction<block_end>@leaf_prediction.setter@abstractmethod<def_stmt>leaf_prediction self leaf_prediction<block_start>"""Define the prediction strategy used by the tree in its leaves."""<block_end><def_stmt>_enforce_size_limit self<block_start>"""Track the size of the tree and disable/enable nodes if required.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: <NAME>., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""<line_sep>tree_size=self._size_estimate_overhead_fraction<times>(self._active_leaf_size_estimate+self._n_inactive_leaves<times>self._inactive_leaf_size_estimate)<if_stmt>self._n_inactive_leaves<g>0<or>tree_size<g>self._max_byte_size<block_start><if_stmt>self.stop_mem_management<block_start>self._growth_allowed=<false><line_sep><return><block_end><block_end>leaves=self._find_leaves()<line_sep>leaves.sort(key=<lambda>leaf:leaf.calculate_promise())<line_sep>max_active=0<while_stmt>max_active<l>len(leaves)<block_start>max_active<augadd>1<if_stmt>((max_active<times>self._active_leaf_size_estimate+(len(leaves)-max_active)<times>self._inactive_leaf_size_estimate)<times>self._size_estimate_overhead_fraction)<g>self._max_byte_size<block_start>max_active<augsub>1<line_sep><break><block_end><block_end>cutoff=len(leaves)-max_active<for_stmt>i range(cutoff)<block_start><if_stmt>leaves[i].is_active()<block_start>leaves[i].deactivate()<line_sep>self._n_inactive_leaves<augadd>1<line_sep>self._n_active_leaves<augsub>1<block_end><block_end><for_stmt>i range(cutoff len(leaves))<block_start><if_stmt><not>leaves[i].is_active()<and>leaves[i].depth<l>self.max_depth<block_start>leaves[i].activate()<line_sep>self._n_active_leaves<augadd>1<line_sep>self._n_inactive_leaves<augsub>1<block_end><block_end><block_end><def_stmt>_estimate_model_size self<block_start>"""Calculate the size of the model and trigger tracker function
if the actual model size exceeds the max size in the configuration.
This memory-management routine shared by all the Hoeffding Trees is based on [^1].
References
----------
[^1]: <NAME>., 2007. Improving hoeffding trees (Doctoral dissertation,
The University of Waikato).
"""<line_sep>leaves=self._find_leaves()<line_sep>total_active_size=0<line_sep>total_inactive_size=0<for_stmt>leaf leaves<block_start><if_stmt>leaf.is_active()<block_start>total_active_size<augadd>calculate_object_size(leaf)<block_end><else_stmt><block_start>total_inactive_size<augadd>calculate_object_size(leaf)<block_end><block_end><if_stmt>total_active_size<g>0<block_start>self._active_leaf_size_estimate=total_active_size/self._n_active_leaves<block_end><if_stmt>total_inactive_size<g>0<block_start>self._inactive_leaf_size_estimate=(total_inactive_size/self._n_inactive_leaves)<block_end>actual_model_size=calculate_object_size(self)<line_sep>estimated_model_size=(self._n_active_leaves<times>self._active_leaf_size_estimate+self._n_inactive_leaves<times>self._inactive_leaf_size_estimate)<line_sep>self._size_estimate_overhead_fraction=actual_model_size/estimated_model_size<if_stmt>actual_model_size<g>self._max_byte_size<block_start>self._enforce_size_limit()<block_end><block_end><def_stmt>_deactivate_all_leaves self<block_start>"""Deactivate all leaves."""<line_sep>leaves=self._find_leaves()<for_stmt>leaf leaves<block_start>leaf.deactivate()<line_sep>self._n_inactive_leaves<augadd>1<line_sep>self._n_active_leaves<augsub>1<block_end><block_end><def_stmt>_find_leaves self<arrow>typing.List[HTLeaf]<block_start>"""Find learning nodes in the tree.
Returns
-------
List of learning nodes in the tree.
"""<line_sep><return>[leaf<for>leaf self._root.iter_leaves()]<block_end># Adapted from creme's original implementation
<def_stmt>debug_one self x:dict<arrow>typing.Union[str <none>]<block_start>"""Print an explanation of how `x` is predicted.
Parameters
----------
x
A dictionary of features.
Returns
-------
A representation of the path followed by the tree to predict `x`; `None` if
the tree is empty.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
"""<if_stmt>self._root<is><none><block_start><return><block_end># We'll redirect all the print statement to a buffer, we'll return the content of the
# buffer at the end
buffer=io.StringIO()<line_sep>_print=functools.partial(print file=buffer)<for_stmt>node self._root.walk(x until_leaf=<true>)<block_start><if_stmt>isinstance(node HTLeaf)<block_start>_print(repr(node))<block_end><else_stmt><block_start><try_stmt><block_start>child_index=node.branch_no(x)# noqa
<block_end><except_stmt>KeyError<block_start>child_index,_=node.most_common_path()<block_end>_print(node.repr_branch(child_index))<block_end><block_end># noqa
<return>buffer.getvalue()<block_end><def_stmt>draw self max_depth:int=<none><block_start>"""Draw the tree using the `graphviz` library.
Since the tree is drawn without passing incoming samples, classification trees
will show the majority class in their leaves, whereas regression trees will
use the target mean.
Parameters
----------
max_depth
Only the root will be drawn when set to `0`. Every node will be drawn when
set to `None`.
Notes
-----
Currently, Label Combination Hoeffding Tree Classifier (for multi-label
classification) is not supported.
Examples
--------
>>> from river import datasets
>>> from river import tree
>>> model = tree.HoeffdingTreeClassifier(
... grace_period=5,
... split_confidence=1e-5,
... split_criterion='gini',
... max_depth=10,
... tie_threshold=0.05,
... )
>>> for x, y in datasets.Phishing():
... model = model.learn_one(x, y)
>>> dot = model.draw()
.. image:: ../../docs/img/dtree_draw.svg
:align: center
"""<line_sep>counter=0<def_stmt>iterate node=<none><block_start><if_stmt>node<is><none><block_start><yield><none> <none> self._root 0 <none><line_sep><yield><from>iterate(self._root)<block_end><nonlocal>counter<line_sep>parent_no=counter<if_stmt>isinstance(node DTBranch)<block_start><for_stmt>branch_index,child enumerate(node.children)<block_start>counter<augadd>1<line_sep><yield>parent_no node child counter branch_index<if_stmt>isinstance(child DTBranch)<block_start><yield><from>iterate(child)<block_end><block_end><block_end><block_end><if_stmt>max_depth<is><none><block_start>max_depth=math.inf<block_end>dot=graphviz.Digraph(graph_attr={"splines":"ortho" "forcelabels":"true" "overlap":"false"} node_attr={"shape":"box" "penwidth":"1.2" "fontname":"trebuchet" "fontsize":"11" "margin":"0.1,0.0" } edge_attr={"penwidth":"0.6" "center":"true" "fontsize":"7 "} )<if_stmt>isinstance(self base.Classifier)<block_start>n_colors=len(self.classes)# noqa
<block_end><else_stmt><block_start>n_colors=1<block_end># Pick a color palette which maps classes to colors
new_color=functools.partial(next iter(_color_brew(n_colors)))<line_sep>palette=collections.defaultdict(new_color)<for_stmt>parent_no,parent,child,child_no,branch_index iterate()<block_start><if_stmt>child.depth<g>max_depth<block_start><continue><block_end><if_stmt>isinstance(child DTBranch)<block_start>text=f"{child.feature}"# noqa
<block_end><else_stmt><block_start>text=f"{repr(child)}\nsamples: {int(child.total_weight)}"<block_end># Pick a color, the hue depends on the class and the transparency on the distribution
<if_stmt>isinstance(self base.Classifier)<block_start>class_proba=normalize_values_in_dict(child.stats inplace=<false>)<line_sep>mode=max(class_proba key=class_proba.get)<line_sep>p_mode=class_proba[mode]<try_stmt><block_start>alpha=(p_mode-1/n_colors)/(1-1/n_colors)<line_sep>fillcolor=str(transparency_hex(color=palette[mode] alpha=alpha))<block_end><except_stmt>ZeroDivisionError<block_start>fillcolor="#FFFFFF"<block_end><block_end><else_stmt><block_start>fillcolor="#FFFFFF"<block_end>dot.node(f"{child_no}" text fillcolor=fillcolor style="filled")<if_stmt>parent_no<is><not><none><block_start>dot.edge(f"{parent_no}" f"{child_no}" xlabel=parent.repr_branch(branch_index shorten=<true>) )<block_end><block_end><return>dot<block_end><block_end># Utility adapted from the original creme's implementation
<def_stmt>_color_brew n:int<arrow>typing.List[typing.Tuple[int int int]]<block_start>"""Generate n colors with equally spaced hues.
Parameters
----------
n
The number of required colors.
Returns
-------
List of n tuples of form (R, G, B) being the components of each color.
References
----------
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/tree/_export.py
"""<line_sep>colors=[]<line_sep># Initialize saturation & value; calculate chroma & value shift
s,v=0.75 0.9<line_sep>c=s<times>v<line_sep>m=v-c<for_stmt>h [i<for>i range(25 385 int(360/n))]# Calculate some intermediate values
<block_start>h_bar=h/60.0<line_sep>x=c<times>(1-abs((h_bar%2)-1))<line_sep># Initialize RGB with same hue & chroma as our color
rgb=[(c x 0) (x c 0) (0 c x) (0 x c) (x 0 c) (c 0 x) (c x 0) ]<line_sep>r,g,b=rgb[int(h_bar)]<line_sep># Shift the initial RGB values to match value and store
colors.append(((int(255<times>(r+m))) (int(255<times>(g+m))) (int(255<times>(b+m)))))<block_end><return>colors<block_end># Utility adapted from the original creme's implementation
<def_stmt>transparency_hex color:typing.Tuple[int int int] alpha:float<arrow>str<block_start>"""Apply alpha coefficient on hexadecimal color."""<line_sep><return>"#%02x%02x%02x"%tuple([int(round(alpha<times>c+(1-alpha)<times>255 0))<for>c color])<block_end> |
<import_stmt>hashlib<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_stmt>traceback<import_from_stmt>contextlib closing<import_from_stmt>pywb.utils.loaders BlockLoader<import_from_stmt>webrecorder.rec.storage.base BaseStorage<import_from_stmt>webrecorder.rec.storage.storagepaths add_local_store_prefix strip_prefix<line_sep>logger=logging.getLogger('wr.io')<line_sep># ============================================================================
<class_stmt>DirectLocalFileStorage(BaseStorage)<block_start>"""Webrecorder storage (local files)."""<def_stmt>__init__ self<block_start>"""Initialize Webrecorder storage."""<line_sep>super(DirectLocalFileStorage self).__init__(os.environ['STORAGE_ROOT'])<block_end><def_stmt>delete_collection_dir self dir_path<block_start>"""Delete collection directory.
:param str dir_path: directory path
:returns: whether successful or not
:rtype: bool
"""<line_sep>local_dir=os.path.join(self.storage_root dir_path)<try_stmt><block_start>logger.debug('Local Store: Deleting Directory: '+local_dir)<line_sep>parent_dir=os.path.dirname(local_dir)<line_sep>shutil.rmtree(local_dir)<line_sep>os.removedirs(parent_dir)<line_sep><return><true><block_end><except_stmt>Exception<as>e<block_start><if_stmt>e.errno<ne>2<block_start>logger.error(str(e))<block_end><return><false><block_end><block_end><def_stmt>do_upload self target_url full_filename<block_start>"""Upload file into local file storage.
:param str target_url: target URL
:param str full_filename: path
:returns: whether successful or not
:rtype: bool
"""<line_sep>os.makedirs(os.path.dirname(target_url) exist_ok=<true>)<try_stmt><block_start><if_stmt>full_filename<ne>target_url<block_start>shutil.copyfile(full_filename target_url)<block_end><else_stmt><block_start>logger.debug('Local Store: Same File, No Upload')<block_end><return><true><block_end><except_stmt>Exception<as>e<block_start>logger.error(str(e))<line_sep><return><false><block_end><block_end><def_stmt>is_valid_url self target_url<block_start>"""Return whether given target URL is an existing file.
:param str target_url: target URL
:returns: whether given target URL is an existing file
:rtype: bool
"""<line_sep><return>os.path.isfile(target_url)<block_end><def_stmt>get_client_url self target_url<block_start>"""Get client URL.
:param str target_url: target URL
:returns: client URL
:rtype: str
"""<line_sep><return>add_local_store_prefix(target_url.replace(os.path.sep '/'))<block_end><def_stmt>client_url_to_target_url self client_url<block_start>"""Get target URL (from client URL).
:param str client URL: client URL
:returns: target URL
:rtype: str
"""<line_sep><return>strip_prefix(client_url)<block_end><def_stmt>do_delete self target_url client_url<block_start>"""Delete file from storage.
:param str target_url: target URL
:returns: whether successful or not
:rtype: bool
"""<try_stmt><block_start>logger.debug('Local Store: Deleting: '+target_url)<line_sep>os.remove(target_url)<line_sep># if target_url.startswith(self.storage_root):
# os.removedirs(os.path.dirname(target_url))
<return><true><block_end><except_stmt>Exception<as>e<block_start><if_stmt>e.errno<ne>2<block_start>logger.error(str(e))<block_end><return><false><block_end><block_end><block_end># ============================================================================
<class_stmt>LocalFileStorage(DirectLocalFileStorage)<block_start>"""Webrecorder storage w/ Redis interface (local files).
:ivar StrictRedis redis: Redis interface
"""<def_stmt>__init__ self redis<block_start>"""Initialize Webrecorder storage w/ Redis interface.
:param StrictRedis redis: Redis interface
"""<line_sep>self.redis=redis<line_sep>super(LocalFileStorage self).__init__()<block_end>### BEGIN PERMA CUSTOMIZATIONS
### First pass at https://github.com/harvard-lil/perma/issues/2614
<def_stmt>delete_collection self collection<block_start>"""Delete collection.
:param collection: collection
:type: n.s.
:returns: whether successful or not
:rtype: bool
"""<line_sep>path=collection.get_dir_path()<if_stmt>path<block_start><try_stmt><block_start>dirpath=os.path.join(self.storage_root path)<line_sep><return>(self.redis.publish('handle_delete_dir' dirpath)<g>0)<block_end><except_stmt>Exception<block_start>logger.error("Failed attempt to delete collection {}".format(collection) exc_info=<true>)<line_sep><return><false><block_end><block_end><return><false><block_end>### END PERMA CUSTOMIZATIONS
<def_stmt>do_delete self target_url client_url<block_start>"""Delete file.
:param str target_url: target URL
:param str client_url: client URL (unused argument)
:returns: whether successful or not
:rtype: bool
"""<line_sep><return>self.redis.publish('handle_delete_file' target_url)<g>0<block_end><def_stmt>get_checksum_and_size self filepath_or_url<block_start>"""Returns the checksum of the supplied URL or filepath and the size of the resource
:param str filepath_or_url: The URL or filepath to the resource that the checksum and size is desired for
:return: A three tuple containing the kind of checksum, the checksum itself, and size
:rtype: tuple[str|None, str|None, int|None]
"""<line_sep>m=hashlib.md5()<line_sep>amount=1024<times>1024<line_sep>total_size=0<with_stmt>closing(BlockLoader().load(filepath_or_url))<as>f<block_start><while_stmt><true><block_start>chunk=f.read(amount)<line_sep>chunk_size=len(chunk)<if_stmt>chunk_size<eq>0<block_start><break><block_end>total_size<augadd>chunk_size<line_sep>m.update(chunk)<block_end><block_end><return>'md5' m.hexdigest() total_size<block_end><block_end> |
<import_stmt>pandas<as>pd<class_stmt>PlusN<block_start>"""A sample transform that adds n to a specific field.
Attributes:
field: The field that this transform will be applied to.
n: The value to add to the field.
"""<line_sep>identifier="plusN"<line_sep>type_signature="col->col"<def_stmt>__init__ self n:int=1<arrow><none><block_start>self.n=n<block_end><def_stmt>__call__ self column:pd.Series<arrow>pd.Series<block_start><return>column+self.n<block_end><block_end> |
# Copyright (c) 2017 <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
<import_stmt>helper<import_stmt>runner<as>runner_mod<class_stmt>TestSuite(object)<block_start>'''
An object grouping a collection of tests. It provides tags which enable
filtering during list and run selection. All tests held in the suite must
have a unique name.
..note::
The :func:`__new__` method enables collection of test cases, it must
be called in order for test cases to be collected.
..note::
To reduce test definition boilerplate, the :func:`init` method is
forwarded all `*args` and `**kwargs`. This means derived classes can
define init without boilerplate super().__init__(*args, **kwargs).
'''<line_sep>runner=runner_mod.SuiteRunner<line_sep>collector=helper.InstanceCollector()<line_sep>fixtures=[]<line_sep>tests=[]<line_sep>tags=set()<def_stmt>__new__ klass *args **kwargs<block_start>obj=super(TestSuite klass).__new__(klass *args **kwargs)<line_sep>TestSuite.collector.collect(obj)<line_sep><return>obj<block_end><def_stmt>__init__ self name=<none> fixtures=tuple() tests=tuple() tags=tuple() **kwargs<block_start>self.fixtures=self.fixtures+list(fixtures)<line_sep>self.tags=self.tags|set(tags)<line_sep>self.tests=self.tests+list(tests)<if_stmt>name<is><none><block_start>name=self.__class__.__name__<block_end>self.name=name<block_end><def_stmt>__iter__ self<block_start><return>iter(self.tests)<block_end><block_end> |
<import_stmt>json<import_stmt>logging<import_from_stmt>csv DictWriter<import_from_stmt>pathlib Path<import_from_stmt>tempfile mkdtemp<import_from_stmt>typing TYPE_CHECKING Any Dict List Literal Tuple<import_from_stmt>zipfile ZIP_DEFLATED ZipFile<import_from_stmt>rotkehlchen.accounting.pnl PnlTotals<import_from_stmt>rotkehlchen.constants.misc ZERO<import_from_stmt>rotkehlchen.fval FVal<import_from_stmt>rotkehlchen.logging RotkehlchenLogsAdapter<import_from_stmt>rotkehlchen.types Timestamp<import_from_stmt>rotkehlchen.utils.mixins.customizable_date CustomizableDateMixin<import_from_stmt>rotkehlchen.utils.version_check get_current_version<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>rotkehlchen.accounting.structures.processed_event ProcessedAccountingEvent<import_from_stmt>rotkehlchen.db.dbhandler DBHandler<block_end>logger=logging.getLogger(__name__)<line_sep>log=RotkehlchenLogsAdapter(logger)<line_sep>FILENAME_ALL_CSV='all_events.csv'<line_sep>ETH_EXPLORER='https://etherscan.io/tx/'<line_sep>ACCOUNTING_SETTINGS=('include_crypto2crypto' 'taxfree_after_period' 'include_gas_costs' 'account_for_assets_movements' 'calculate_past_cost_basis' )<line_sep>CSV_INDEX_OFFSET=2# skip title row and since counting starts from 1
<class_stmt>CSVWriteError(Exception)<block_start><pass><block_end><def_stmt>_dict_to_csv_file path:Path dictionary_list:List<arrow><none><block_start>"""Takes a filepath and a list of dictionaries representing the rows and writes them
into the file as a CSV
May raise:
- CSVWriteError if DictWriter.writerow() tried to write a dict contains
fields not in fieldnames
"""<if_stmt>len(dictionary_list)<eq>0<block_start>log.debug('Skipping writting empty CSV for {}'.format(path))<line_sep><return><block_end><with_stmt>open(path 'w' newline='')<as>f<block_start>w=DictWriter(f fieldnames=dictionary_list[0].keys())<line_sep>w.writeheader()<try_stmt><block_start><for_stmt>dic dictionary_list<block_start>w.writerow(dic)<block_end><block_end><except_stmt>ValueError<as>e<block_start><raise>CSVWriteError(f'Failed to write {path} CSV due to {str(e)}')<from>e<block_end><block_end><block_end><class_stmt>CSVExporter(CustomizableDateMixin)<block_start><def_stmt>__init__ self database:'DBHandler' <block_start>super().__init__(database=database)<line_sep>self.reset(start_ts=Timestamp(0) end_ts=Timestamp(0))<block_end><def_stmt>reset self start_ts:Timestamp end_ts:Timestamp<arrow><none><block_start>self.start_ts=start_ts<line_sep>self.end_ts=end_ts<line_sep>self.reload_settings()<try_stmt><block_start>frontend_settings=json.loads(self.settings.frontend_settings)<if_stmt>('explorers'<in>frontend_settings<and>'ETH'<in>frontend_settings['explorers']<and>'transaction'<in>frontend_settings['explorers']['ETH'])<block_start>self.eth_explorer=frontend_settings['explorers']['ETH']['transaction']<block_end><else_stmt><block_start>self.eth_explorer=ETH_EXPLORER<block_end><block_end><except_stmt>(json.decoder.JSONDecodeError KeyError)<block_start>self.eth_explorer=ETH_EXPLORER<block_end><block_end><def_stmt>_add_sumif_formula self check_range:str condition:str sum_range:str actual_value:FVal <arrow>str<block_start><if_stmt>self.settings.pnl_csv_with_formulas<is><false><block_start><return>str(actual_value)<block_end><return>f'=SUMIF({check_range};{condition};{sum_range})'<block_end><def_stmt>_add_pnl_type self event:'ProcessedAccountingEvent' dict_event:Dict[str Any] amount_column:str name:Literal['free' 'taxable'] <arrow><none><block_start>"""Adds the pnl type value and cost basis to the passed dict event"""<if_stmt>getattr(event.pnl name ZERO)<eq>ZERO<block_start><return><block_end>index=event.index+CSV_INDEX_OFFSET<line_sep>value_formula=f'{amount_column}{index}*H{index}'<line_sep>total_value_formula=f'(F{index}*H{index}+G{index}*H{index})'# noqa: E501 # formula of both free and taxable
cost_basis_column='K'<if>name<eq>'taxable'<else>'L'<line_sep>cost_basis=f'{cost_basis_column}{index}'<line_sep>should_count_entire_spend_formula=(name<eq>'taxable'<and>event.timestamp<ge>self.start_ts<or>name<eq>'free'<and>event.timestamp<l>self.start_ts)<if_stmt>event.count_entire_amount_spend<and>should_count_entire_spend_formula<block_start>equation=(f'=IF({cost_basis}="",'<concat>f'-{total_value_formula},'<concat>f'-{total_value_formula}+{value_formula}-{cost_basis})')<block_end><else_stmt><block_start>equation=(f'=IF({cost_basis}="",'<concat>f'{value_formula},'<concat>f'{value_formula}-{cost_basis})')<block_end>dict_event[f'pnl_{name}']=equation<line_sep>cost_basis=''<if_stmt>event.cost_basis<is><not><none><block_start><for_stmt>acquisition event.cost_basis.matched_acquisitions<block_start><if_stmt>name<eq>'taxable'<and>acquisition.taxable<is><false><block_start><continue><block_end><if_stmt>name<eq>'free'<and>acquisition.taxable<is><true><block_start><continue><block_end>index=acquisition.event.index+CSV_INDEX_OFFSET<if_stmt>cost_basis<eq>''<block_start>cost_basis='='<block_end><else_stmt><block_start>cost_basis<augadd>'+'<block_end>cost_basis<augadd>f'{str(acquisition.amount)}*H{index}'<block_end><block_end>dict_event[f'cost_basis_{name}']=cost_basis<block_end><def_stmt>_maybe_add_summary self events:List[Dict[str Any]] pnls:PnlTotals<arrow><none><block_start>"""Depending on given settings, adds a few summary lines at the end of
the all events PnL report"""<if_stmt>self.settings.pnl_csv_have_summary<is><false><block_start><return><block_end>length=len(events)+1<line_sep>template:Dict[str Any]={'type':'' 'notes':'' 'location':'' 'timestamp':'' 'asset':'' 'free_amount':'' 'taxable_amount':'' 'price':'' 'pnl_taxable':'' 'cost_basis_taxable':'' 'pnl_free':'' 'cost_basis_free':'' }<line_sep>events.append(template)# separate with 2 new lines
events.append(template)<line_sep>entry=template.copy()<line_sep>entry['taxable_amount']='TAXABLE'<line_sep>entry['price']='FREE'<line_sep>events.append(entry)<line_sep>start_sums_index=length+4<line_sep>sums=0<for_stmt>name,value pnls.items()<block_start><if_stmt>value.taxable<eq>ZERO<and>value.free<eq>ZERO<block_start><continue><block_end>sums<augadd>1<line_sep>entry=template.copy()<line_sep>entry['free_amount']=f'{str(name)} total'<line_sep>entry['taxable_amount']=self._add_sumif_formula(check_range=f'A2:A{length}' condition=f'"{str(name)}"' sum_range=f'I2:I{length}' actual_value=value.taxable )<line_sep>entry['price']=self._add_sumif_formula(check_range=f'A2:A{length}' condition=f'"{str(name)}"' sum_range=f'J2:J{length}' actual_value=value.free )<line_sep>events.append(entry)<block_end>entry=template.copy()<line_sep>entry['free_amount']='TOTAL'<if_stmt>sums<ne>0<block_start>entry['taxable_amount']=f'=SUM(G{start_sums_index}:G{start_sums_index+sums-1})'<line_sep>entry['price']=f'=SUM(H{start_sums_index}:H{start_sums_index+sums-1})'<block_end><else_stmt><block_start>entry['taxable_amount']=entry['price']=0<block_end>events.append(entry)<line_sep>events.append(template)# separate with 2 new lines
events.append(template)<line_sep>version_result=get_current_version(check_for_updates=<false>)<line_sep>entry=template.copy()<line_sep>entry['free_amount']='rotki version'<line_sep>entry['taxable_amount']=version_result.our_version<line_sep>events.append(entry)<for_stmt>setting ACCOUNTING_SETTINGS<block_start>entry=template.copy()<line_sep>entry['free_amount']=setting<line_sep>entry['taxable_amount']=str(getattr(self.settings setting))<line_sep>events.append(entry)<block_end><block_end><def_stmt>create_zip self events:List['ProcessedAccountingEvent'] pnls:PnlTotals <arrow>Tuple[bool str]# TODO: Find a way to properly delete the directory after send is complete
<block_start>dirpath=Path(mkdtemp())<line_sep>success,msg=self.export(events=events pnls=pnls directory=dirpath)<if_stmt><not>success<block_start><return><false> msg<block_end>files:List[Tuple[Path str]]=[(dirpath/FILENAME_ALL_CSV FILENAME_ALL_CSV) ]<with_stmt>ZipFile(file=dirpath/'csv.zip' mode='w' compression=ZIP_DEFLATED)<as>csv_zip<block_start><for_stmt>path,filename files<block_start><if_stmt><not>path.exists()<block_start><continue><block_end>csv_zip.write(path filename)<line_sep>path.unlink()<block_end><block_end>success=<false><line_sep>filename=''<if_stmt>csv_zip.filename<is><not><none><block_start>success=<true><line_sep>filename=csv_zip.filename<block_end><return>success filename<block_end><def_stmt>to_csv_entry self event:'ProcessedAccountingEvent'<arrow>Dict[str Any]<block_start>dict_event=event.to_exported_dict(ts_converter=self.timestamp_to_date eth_explorer=self.eth_explorer for_api=<false> )<line_sep># For CSV also convert timestamp to date
dict_event['timestamp']=self.timestamp_to_date(event.timestamp)<if_stmt>self.settings.pnl_csv_with_formulas<is><false><block_start><return>dict_event<block_end># else add formulas
self._add_pnl_type(event=event dict_event=dict_event amount_column='F' name='free')<line_sep>self._add_pnl_type(event=event dict_event=dict_event amount_column='G' name='taxable')<line_sep><return>dict_event<block_end><def_stmt>export self events:List['ProcessedAccountingEvent'] pnls:PnlTotals directory:Path <arrow>Tuple[bool str]<block_start>serialized_events=[self.to_csv_entry(x)<for>idx,x enumerate(events)]<line_sep>self._maybe_add_summary(events=serialized_events pnls=pnls)<try_stmt><block_start>directory.mkdir(parents=<true> exist_ok=<true>)<line_sep>_dict_to_csv_file(directory/FILENAME_ALL_CSV serialized_events )<block_end><except_stmt>(CSVWriteError PermissionError)<as>e<block_start><return><false> str(e)<block_end><return><true> ''<block_end><block_end> |
<import_stmt>sys<import_stmt>os<import_stmt>argparse<import_stmt>numpy<as>np<line_sep>parser=argparse.ArgumentParser(description="""Command-line bin abundance estimator.
Print the median RPKM abundance for each bin in each sample to STDOUT.
Will read the RPKM file into memory - beware.""" formatter_class=argparse.RawDescriptionHelpFormatter add_help=<false>)<line_sep>parser.add_argument('rpkmpath' help='Path to RPKM file')<line_sep>parser.add_argument('clusterspath' help='Path to clusters.tsv')<line_sep>parser.add_argument('headerpath' help='Path to list of headers')<if_stmt>len(sys.argv)<eq>1<block_start>parser.print_help()<line_sep>sys.exit()<block_end>args=parser.parse_args()<line_sep># Check files
<for_stmt>infile (args.rpkmpath args.clusterspath args.headerpath)<block_start><if_stmt><not>os.path.isfile(infile)<block_start><raise>FileNotFoundError(infile)<block_end><block_end># Load Vamb
sys.path.append('../vamb')<import_stmt>vamb<line_sep># Load in files
<with_stmt>open(args.headerpath)<as>file<block_start>indexof={line.strip():i<for>i,line enumerate(file)}<block_end><with_stmt>open(args.clusterspath)<as>file<block_start>clusters=vamb.vambtools.read_clusters(file)<block_end># Check that all clusters names are in headers:
<for_stmt>cluster clusters.values()<block_start><for_stmt>header cluster<block_start><if_stmt>header<not><in>indexof<block_start><raise>KeyError("Header not found in headerlist: {}".format(header))<block_end><block_end><block_end># Load RPKM and check it
rpkm=vamb.vambtools.read_npz(args.rpkmpath)<line_sep>nsamples=rpkm.shape[1]<if_stmt>len(indexof)<ne>len(rpkm)<block_start><raise>ValueError("Not the same number of headers as rows in RPKM file")<block_end># Now estimate abundances
<for_stmt>clustername,cluster clusters.items()<block_start>depths=np.empty((len(cluster) nsamples) dtype=np.float32)<for_stmt>row,header enumerate(cluster)<block_start>index=indexof[header]<line_sep>depths[row]=rpkm[index]<block_end>median_depths=np.median(depths axis=0)<line_sep>print(clustername end='\t')<line_sep>print('\t'.join([str(i)<for>i median_depths]))<block_end> |
<import_from_stmt>sys version_info<if_stmt>version_info[:2]<ge>(3 6)<block_start><import_from_stmt>pony.orm.tests.py36_test_f_strings *<block_end> |
# Import Python libraries
<import_stmt>numpy<as>np<line_sep># Import distributed framework
<import_from_stmt>exaqute *<try_stmt><block_start>init()<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>computing_units_auxiliar_utilities=int(os.environ["computing_units_auxiliar_utilities"])<block_end><except_stmt><block_start>computing_units_auxiliar_utilities=1<block_end>"""
auxiliary function of UpdateOnePassCentralMoments of the StatisticalVariable class
input: sample: new value that will update the statistics
old_mean : old mean
old_central_moment_1 : old first central moment
compute_M1 : boolean setting if computation is needed
old_central_moment_2 : old second central moment
compute_M2 : boolean setting if computation is needed
old_central_moment_3 : old third central moment
compute_M3 : boolean setting if computation is needed
old_central_moment_1 : old fourth central moment
compute_M4 : boolean settings if computation is needed
nsamples : old number of samples computed, starts from 1
output: new_mean : updated mean
new_sample_variance : updated sample variance
new_central_moment_1 : updated central_moment_1
new_central_moment_2 : updated central_moment_2
new_central_moment_3 : updated central_moment_3
new_central_moment_4 : updated central_moment_4
nsamples : updated number of samples
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=7 priority=<true>)<def_stmt>UpdateOnePassCentralMomentsAux_Task sample old_mean old_central_moment_1 compute_M1 old_central_moment_2 compute_M2 old_central_moment_3 compute_M3 old_central_moment_4 compute_M4 nsamples<block_start>old_M1=old_central_moment_1<times>nsamples<line_sep>old_M2=old_central_moment_2<times>nsamples<line_sep>old_M3=old_central_moment_3<times>nsamples<line_sep>old_M4=old_central_moment_4<times>nsamples<line_sep>nsamples=nsamples+1<if_stmt>nsamples<eq>1<block_start>new_mean=sample<line_sep>new_M1=0.0<line_sep>new_M2=0.0<line_sep>new_sample_variance=0.0<line_sep>new_M3=0.0<line_sep>new_M4=0.0<block_end><else_stmt><block_start>delta=np.subtract(sample old_mean)<line_sep>new_mean=old_mean+np.divide(delta nsamples)<if_stmt>(compute_M1)<block_start>new_M1=old_M1# we are not updating, first central moment = 0.0
<block_end><else_stmt><block_start>new_M1=old_M1# we are not updating, first central moment = 0.0
<block_end><if_stmt>(compute_M2)<block_start>new_M2=old_M2+delta<times>np.subtract(sample new_mean)<block_end><else_stmt><block_start><raise>Exception("Not computing StatisticalVariable.central_moment_2, set StatisticalVariable.central_moment_2_to_compute to True")<block_end>new_sample_variance=np.divide(new_M2 np.subtract(nsamples 1))<if_stmt>(compute_M3)<block_start>new_M3=old_M3-3.0<times>old_M2<times>np.divide(delta nsamples)+np.divide(np.multiply((nsamples-1)<times>(nsamples-2) (delta<power>3)) (nsamples<power>2))<block_end><else_stmt><block_start>new_M3=old_M3# we are not updating
<block_end><if_stmt>(compute_M4)<block_start>new_M4=old_M4-4.0<times>old_M3<times>np.divide(delta nsamples)+6.0<times>old_M2<times>np.divide(delta nsamples)<power>2+np.multiply((nsamples-1)<times>(nsamples<power>2-3<times>nsamples+3) np.divide(delta<power>4 nsamples<power>3))<block_end><else_stmt><block_start>new_M4=old_M4# we are not updating
<block_end><block_end>new_central_moment_1=new_M1/nsamples<line_sep>new_central_moment_2=new_M2/nsamples<line_sep>new_central_moment_3=new_M3/nsamples<line_sep>new_central_moment_4=new_M4/nsamples<line_sep><return>new_mean new_sample_variance new_central_moment_1 new_central_moment_2 new_central_moment_3 new_central_moment_4 nsamples<block_end>"""
auxiliary function of UpdateOnePassPowerSums of the StatisticalVariable class
input: sample : new value that will update the statistics
old_S1 : old first power sum
old_S2 : old second power sum
old_S3 : old third power sum
old_S4 : old fourth power sum
nsamples : number of samples, it has already been updated in UpdateOnePassCentralMomentsAux_Task
output: new_S1 : updated first power sum
new_s2 : updated second power sum
new_S3 : updated third power sum
new_S4 : updated fourth power sum
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=5 priority=<true>)<def_stmt>UpdateOnePassPowerSumsAux_Task sample old_S1 old_S2 old_S3 old_S4 nsamples<block_start>nsamples=nsamples+1<if_stmt>nsamples<eq>1<block_start>new_S1=sample<line_sep>new_S2=sample<power>2<line_sep>new_S3=sample<power>3<line_sep>new_S4=sample<power>4<block_end><else_stmt><block_start>new_S1=old_S1+sample<line_sep>new_S2=old_S2+sample<power>2<line_sep>new_S3=old_S3+sample<power>3<line_sep>new_S4=old_S4+sample<power>4<block_end><return>new_S1 new_S2 new_S3 new_S4 nsamples<block_end>"""
auxiliary function of UpdateGlobalPowerSums of the StatisticalVariable class
input: old_S1 : old first power sum
old_S2 : old second power sum
old_S3 : old third power sum
old_S4 : old fourth power sum
number_samples_level : number of samples, it has already been updated in UpdateOnePassCentralMomentsAux_Task
add_S1 : power sum order one to add
add_S2 : power sum order two to add
add_S3 : power sum order three to add
add_S4 : power sum order four to add
add_number_samples_level : number of samples to add
output: new_S1 : updated first power sum
new_s2 : updated second power sum
new_S3 : updated third power sum
new_S4 : updated fourth power sum
number_samples_level : number of samples of current level
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=5 priority=<true>)<def_stmt>UpdateGlobalPowerSumsAux_Task old_S1 old_S2 old_S3 old_S4 number_samples_level add_S1 add_S2 add_S3 add_S4 add_number_samples_level<block_start>new_S1=old_S1+add_S1<line_sep>new_S2=old_S2+add_S2<line_sep>new_S3=old_S3+add_S3<line_sep>new_S4=old_S4+add_S4<line_sep>number_samples_level=number_samples_level+add_number_samples_level<line_sep><return>new_S1 new_S2 new_S3 new_S4 number_samples_level<block_end>"""
function unfolding values from a list, needed by PyCOMPSs for list of lists
input: sample : the list of lists
output: sample[*] : list position * of the list of lists
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=4 priority=<true>)<def_stmt>UnfoldValuesAux_Task sample<block_start><return>sample[0] sample[1] sample[2] sample[3]<block_end>"""
auxiliary function of UpdateBatchesPassPowerSums
input: samples : list of samples
output: return the sum, done in mini_batch_size batches, of the samples components
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=1 priority=<true>)<def_stmt>UpdateBatchesPassPowerSumsAux_Task *samples<block_start>samples_list=np.array(list(samples))<line_sep><return>np.sum(samples_list axis=0)<line_sep>"""
if nsamples == 0:
new_S1 = samples[0]
new_S2 = samples[0]**2
new_S3 = samples[0]**3
new_S4 = samples[0]**4
old_S1 = new_S1
old_S2 = new_S2
old_S3 = new_S3
old_S4 = new_S4
nsamples = 1
samples=samples[1:]
for sample in samples:
nsamples = nsamples + 1
new_S1 = old_S1 + sample
new_S2 = old_S2 + sample**2
new_S3 = old_S3 + sample**3
new_S4 = old_S4 + sample**4
old_S1 = new_S1
old_S2 = new_S2
old_S3 = new_S3
old_S4 = new_S4
return new_S1,new_S2,new_S3,new_S4,nsamples
"""<block_end>"""
auxiliary function of UpdateHStatistics of the StatisticalVariable class
input: S1_level : first power sum at defined level
S2_level : second power sum at defined level
S3_level : third power sum at defined level
S4_level : fourth power sum at defined level
number_samples_level : number of samples (already update) for defined level
output: h1_level : first h statistics for defined level
h2_level : second h statistics for defined level
h3_level : third h statistics for defined level
h4_level : fourth h statistics for defined level
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=4 priority=<true>)<def_stmt>ComputeHStatisticsAux_Task S1_level S2_level S3_level S4_level number_samples_level<block_start>h1_level=S1_level/number_samples_level<line_sep>h2_level=(number_samples_level<times>S2_level-S1_level<power>2)/((number_samples_level-1)<times>number_samples_level)<line_sep>h3_level=(number_samples_level<power>2<times>S3_level-3<times>number_samples_level<times>S2_level<times>S1_level+2<times>S1_level<power>3)/((number_samples_level-2)<times>(number_samples_level-1)<times>number_samples_level)<line_sep>h4_level=((-4<times>number_samples_level<power>2+8<times>number_samples_level-12)<times>S3_level<times>S1_level+(number_samples_level<power>3-2<times>number_samples_level<power>2+3<times>number_samples_level)<times>S4_level+6<times>number_samples_level<times>S2_level<times>S1_level<power>2+(9-6<times>number_samples_level)<times>S2_level<power>2-3<times>S1_level<power>4)/((number_samples_level-3)<times>(number_samples_level-2)<times>(number_samples_level-1)<times>number_samples_level)<line_sep><return>h1_level h2_level h3_level h4_level<block_end>"""
auxiliary function of ComputeSkewnessKurtosis of the StatisticalVariable class
input: h2_level : second h statistics for defined level
h3_level : third h statistics for defined level
h4_level : fourth h statistics for defined level
output: skewness_level : skewness for defined level
kurtosis_level : kurtosis for defined level
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=2 priority=<true>)<def_stmt>ComputeSkewnessKurtosisAux_Task h2_level h3_level h4_level<block_start>skewness_level=h3_level/(np.sqrt(h2_level<power>3))<line_sep>kurtosis_level=h4_level/(h2_level<power>2)<line_sep><return>skewness_level kurtosis_level<block_end>"""
auxiliary function of ComputeSampleCentralMomentsFromScratch of the StatisticalVariable class
input: sample: new value that will update the statistics
number_samples_level : number of samples for defined level
central_moment_from_scratch_1_to_compute : boolean setting if computation is needed
central_moment_from_scratch_2_to_compute : boolean setting if computation is needed
central_moment_from_scratch_3_to_compute : boolean setting if computation is needed
central_moment_from_scratch_3_absolute_to_compute : boolean setting if computation is needed
central_moment_from_scratch_4_to_compute : boolean setting if computation is needed
central_moment_from_scratch_1 : old first central moment
central_moment_from_scratch_2 : old second central moment
central_moment_from_scratch_3 : old third central moment
central_moment_from_scratch_3_absolute : old third central moment absolute value
central_moment_from_scratch_4 : old fourth central moment
output: central_moment_from_scratch_1 : updated first central moment
central_moment_from_scratch_2 : updated second central moment
central_moment_from_scratch_3 : updated third central moment
central_moment_from_scratch_3_absolute : updated third central moment absolute value
central_moment_from_scratch_4 : update fourth central moment
"""<line_sep>@constraint(computing_units=computing_units_auxiliar_utilities)@task(keep=<true> returns=5 priority=<true>)<def_stmt>ComputeSampleCentralMomentsFromScratchAux_Task number_samples_level central_moment_from_scratch_1_to_compute central_moment_from_scratch_2_to_compute central_moment_from_scratch_3_to_compute central_moment_from_scratch_3_absolute_to_compute central_moment_from_scratch_4_to_compute central_moment_from_scratch_1 central_moment_from_scratch_2 central_moment_from_scratch_3 central_moment_from_scratch_3_absolute central_moment_from_scratch_4 samples# generate a single list from a list of lists
<block_start>samples=[item<for>sublist samples<for>item sublist]<line_sep># compute the mean
auxiliary_mean=0.0<for_stmt>sample samples<block_start>auxiliary_mean=auxiliary_mean+sample<block_end>curr_mean=auxiliary_mean/number_samples_level<for_stmt>sample samples<block_start><if_stmt>(central_moment_from_scratch_1_to_compute)<block_start>central_moment_from_scratch_1=central_moment_from_scratch_1+((sample-curr_mean)<power>1)/number_samples_level<block_end><if_stmt>(central_moment_from_scratch_2_to_compute)<block_start>central_moment_from_scratch_2=central_moment_from_scratch_2+((sample-curr_mean)<power>2)/number_samples_level<block_end><if_stmt>(central_moment_from_scratch_3_to_compute)<block_start>central_moment_from_scratch_3=central_moment_from_scratch_3+((sample-curr_mean)<power>3)/number_samples_level<block_end><if_stmt>(central_moment_from_scratch_3_absolute_to_compute)<block_start>central_moment_from_scratch_3_absolute=central_moment_from_scratch_3_absolute+(np.abs(sample-curr_mean)<power>3)/number_samples_level<block_end><if_stmt>(central_moment_from_scratch_4_to_compute)<block_start>central_moment_from_scratch_4=central_moment_from_scratch_4+((sample-curr_mean)<power>4)/number_samples_level<block_end><block_end><return>central_moment_from_scratch_1 central_moment_from_scratch_2 central_moment_from_scratch_3 central_moment_from_scratch_3_absolute central_moment_from_scratch_4<block_end><class_stmt>StatisticalVariable(object)<block_start>"""The base class for statistical variables"""<def_stmt>__init__ self<block_start>"""constructor of the class
Keyword arguments:
self : an instance of a class
"""<line_sep># values of the variable, organized per level
self.values=[]<line_sep># mean of the variable per each level
self.raw_moment_1=[]<line_sep># sample variance of the variable per each level
self.unbiased_central_moment_2=[]<line_sep># moments of the variable per each level M_p = n * mu_p
# mu_p = p-th central moment
# n = number of values
self.central_moment_1=[]<line_sep>self.central_moment_2=[]<line_sep>self.central_moment_3=[]<line_sep>self.central_moment_4=[]<line_sep># set which central moments will be computed (moment_2 is mandatory to be computed because it is exploited in the mean evaluation)
self.central_moment_1_to_compute=<true><line_sep>self.central_moment_2_to_compute=<true><line_sep>self.central_moment_3_to_compute=<true><line_sep>self.central_moment_4_to_compute=<true><line_sep># bias error of the variable
self.bias_error=<none><line_sep># statistical error of the variable
self.statistical_error=<none><line_sep># type of variable: scalar or field
self.type=<none><line_sep># number of samples of the variable
self.number_samples=<none><line_sep>self.batches_number_samples=[]<line_sep># global power sums
# S_p = \sum_{i=1}^{n} Q(sample_i)**p, organized per level
self.power_sum_1=[]<line_sep>self.power_sum_2=[]<line_sep>self.power_sum_3=[]<line_sep>self.power_sum_4=[]<line_sep># power sums batches
self.power_sum_batches_1=[]<line_sep>self.power_sum_batches_2=[]<line_sep>self.power_sum_batches_3=[]<line_sep>self.power_sum_batches_4=[]<line_sep># sample central moments \mu_p = \sum_{i=1}^{n} (Q(sample_i)-mean_n)**p / n, organized per level
self.central_moment_from_scratch_1=[]<line_sep>self.central_moment_from_scratch_2=[]<line_sep>self.central_moment_from_scratch_3=[]<line_sep>self.central_moment_from_scratch_3_absolute=[]# \mu_p = \sum_{i=1}^{n} abs((Q(sample_i)-mean_n)**p) / n
self.central_moment_from_scratch_4=[]<line_sep>self.central_moment_from_scratch_1_to_compute=<false><line_sep>self.central_moment_from_scratch_2_to_compute=<false><line_sep>self.central_moment_from_scratch_3_to_compute=<false><line_sep>self.central_moment_from_scratch_3_absolute_to_compute=<false><line_sep>self.central_moment_from_scratch_4_to_compute=<false><line_sep># h-statistics h_p, the unbiased central moment estimator with minimal variance, organized per level
self.h_statistics_1=[]<line_sep>self.h_statistics_2=[]<line_sep>self.h_statistics_3=[]<line_sep>self.h_statistics_4=[]<line_sep>self.h_statistics_computed=<false><line_sep># skewness of the variable per each level
self.skewness=[]<line_sep># kurtosis of the variable per each level
self.kurtosis=[]<line_sep># convergence criteria of the algorithm
self.convergence_criteria=<none><block_end>"""
function initializing variables of the Statistical Variable class in lists given number of levels
input: self : an instance of the class
number_levels : number of levels considered
number_initial_batches : number of batches of iteration zero
"""<def_stmt>InitializeLists self number_levels number_initial_batches<block_start>self.number_samples=[0<for>_ range(number_levels)]<line_sep>self.values=[[[]<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<line_sep>self.raw_moment_1=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_1=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_2=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_3=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_4=[[]<for>_ range(number_levels)]<line_sep>self.unbiased_central_moment_2=[[]<for>_ range(number_levels)]<line_sep>self.power_sum_1=[0<for>_ range(number_levels)]<line_sep>self.power_sum_2=[0<for>_ range(number_levels)]<line_sep>self.power_sum_3=[0<for>_ range(number_levels)]<line_sep>self.power_sum_4=[0<for>_ range(number_levels)]<line_sep>self.power_sum_batches_1=[[[]<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<line_sep>self.power_sum_batches_2=[[[]<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<line_sep>self.power_sum_batches_3=[[[]<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<line_sep>self.power_sum_batches_4=[[[]<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<line_sep>self.h_statistics_1=[[]<for>_ range(number_levels)]<line_sep>self.h_statistics_2=[[]<for>_ range(number_levels)]<line_sep>self.h_statistics_3=[[]<for>_ range(number_levels)]<line_sep>self.h_statistics_4=[[]<for>_ range(number_levels)]<line_sep>self.skewness=[[]<for>_ range(number_levels)]<line_sep>self.kurtosis=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_from_scratch_1=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_from_scratch_2=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_from_scratch_3=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_from_scratch_3_absolute=[[]<for>_ range(number_levels)]<line_sep>self.central_moment_from_scratch_4=[[]<for>_ range(number_levels)]<line_sep>self.batches_number_samples=[[0<for>_ range(number_levels)]<for>_ range(number_initial_batches)]<block_end>"""
function updating statistic moments and number of samples
input: self : an instance of the class
level : defined level
i_sample : defined sample in level
"""<def_stmt>UpdateOnePassCentralMoments self level i_sample<block_start>number_samples_level=self.number_samples[level]<line_sep>sample=self.values[level][i_sample]<line_sep>old_mean=self.raw_moment_1[level]<line_sep># old_M1 = self.central_moment_1[level] * number_samples_level
old_central_moment_1=self.central_moment_1[level]<line_sep>compute_M1=self.central_moment_1_to_compute<line_sep># old_M2 = self.central_moment_2[level] * number_samples_level
old_central_moment_2=self.central_moment_2[level]<line_sep>compute_M2=self.central_moment_2_to_compute<line_sep># old_M3 = self.central_moment_3[level] * number_samples_level
old_central_moment_3=self.central_moment_3[level]<line_sep>compute_M3=self.central_moment_3_to_compute<line_sep># old_M4 = self.central_moment_4[level] * number_samples_level
old_central_moment_4=self.central_moment_4[level]<line_sep>compute_M4=self.central_moment_4_to_compute<line_sep>new_mean,new_sample_variance,new_central_moment_1,new_central_moment_2,new_central_moment_3,new_central_moment_4,number_samples_level=UpdateOnePassCentralMomentsAux_Task(sample old_mean old_central_moment_1 compute_M1 old_central_moment_2 compute_M2 old_central_moment_3 compute_M3 old_central_moment_4 compute_M4 number_samples_level)<line_sep>self.raw_moment_1[level]=new_mean<line_sep>self.unbiased_central_moment_2[level]=new_sample_variance<line_sep>self.central_moment_1[level]=new_central_moment_1<line_sep>self.central_moment_2[level]=new_central_moment_2<line_sep>self.central_moment_3[level]=new_central_moment_3<line_sep>self.central_moment_4[level]=new_central_moment_4<line_sep>self.number_samples[level]=number_samples_level<block_end>"""
function updating the power sums S_p
input: self : an instance of the class
level : defined level
i_sample : defined sample in level
"""<def_stmt>UpdateOnePassPowerSums self level i_sample<block_start>sample=self.values[level][i_sample]<line_sep>old_S1=self.power_sum_1[level]<line_sep>old_S2=self.power_sum_2[level]<line_sep>old_S3=self.power_sum_3[level]<line_sep>old_S4=self.power_sum_4[level]<line_sep>number_samples_level=self.number_samples[level]<line_sep>new_S1,new_S2,new_S3,new_S4,number_samples_level=UpdateOnePassPowerSumsAux_Task(sample old_S1 old_S2 old_S3 old_S4 number_samples_level)<line_sep>self.power_sum_1[level]=new_S1<line_sep>self.power_sum_2[level]=new_S2<line_sep>self.power_sum_3[level]=new_S3<line_sep>self.power_sum_4[level]=new_S4<line_sep>self.number_samples[level]=number_samples_level<block_end>"""
function updating the global power sums
input: self : an instance of the class
level : current level
batch_counter : current batch
"""<def_stmt>UpdateGlobalPowerSums self level batch_counter<block_start>old_S1=self.power_sum_1[level]<line_sep>old_S2=self.power_sum_2[level]<line_sep>old_S3=self.power_sum_3[level]<line_sep>old_S4=self.power_sum_4[level]<line_sep>number_samples_level=self.number_samples[level]<line_sep>add_S1=self.power_sum_batches_1[batch_counter][level]<line_sep>add_S2=self.power_sum_batches_2[batch_counter][level]<line_sep>add_S3=self.power_sum_batches_3[batch_counter][level]<line_sep>add_S4=self.power_sum_batches_4[batch_counter][level]<line_sep>add_number_samples_level=self.batches_number_samples[batch_counter][level]<line_sep>new_S1,new_S2,new_S3,new_S4,number_samples_level=UpdateGlobalPowerSumsAux_Task(old_S1 old_S2 old_S3 old_S4 number_samples_level add_S1 add_S2 add_S3 add_S4 add_number_samples_level)<line_sep>self.power_sum_1[level]=new_S1<line_sep>self.power_sum_2[level]=new_S2<line_sep>self.power_sum_3[level]=new_S3<line_sep>self.power_sum_4[level]=new_S4<line_sep>self.number_samples[level]=number_samples_level<block_end>"""
function updating the in-batch power sums
input: self : an instance of the class
level : current level
batch_counter : current batch
mini_batch : size such that we update the in-batch power sums with mini_batch samples
"""<def_stmt>UpdateBatchesPassPowerSum self level batch_counter mini_batch=50<block_start>samples=self.values[batch_counter][level]<line_sep>#for mini_batch in range (0,len(samples)):
<while_stmt>len(samples)<g>1<block_start>mini_batches_samples=samples[:mini_batch]<line_sep>samples=samples[mini_batch:]<line_sep>new_power_sums=UpdateBatchesPassPowerSumsAux_Task(*mini_batches_samples)<line_sep>samples.append(new_power_sums)<block_end>new_S1,new_S2,new_S3,new_S4=UnfoldValuesAux_Task(samples[0])<line_sep>self.power_sum_batches_1[batch_counter][level]=new_S1<line_sep>self.power_sum_batches_2[batch_counter][level]=new_S2<line_sep>self.power_sum_batches_3[batch_counter][level]=new_S3<line_sep>self.power_sum_batches_4[batch_counter][level]=new_S4<block_end>"""
function computing the h statistics h_p from the power sums
input: self : an instance of the class
level : defined level
"""<def_stmt>ComputeHStatistics self level<block_start>number_samples_level=self.number_samples[level]<line_sep>S1_level=self.power_sum_1[level]<line_sep>S2_level=self.power_sum_2[level]<line_sep>S3_level=self.power_sum_3[level]<line_sep>S4_level=self.power_sum_4[level]<line_sep>self.h_statistics_computed=<true><line_sep>h1_level,h2_level,h3_level,h4_level=ComputeHStatisticsAux_Task(S1_level S2_level S3_level S4_level number_samples_level)<line_sep>self.h_statistics_1[level]=h1_level<line_sep>self.h_statistics_2[level]=h2_level<line_sep>self.h_statistics_3[level]=h3_level<line_sep>self.h_statistics_4[level]=h4_level<block_end>"""
function computing from scratch the central moments and the absolute third central moment
input: self : an instance of the class
level : defined level
"""<def_stmt>ComputeSampleCentralMomentsFromScratch self level number_samples_level# initialize central moments
<block_start>central_moment_from_scratch_1=0.0<line_sep>central_moment_from_scratch_2=0.0<line_sep>central_moment_from_scratch_3=0.0<line_sep>central_moment_from_scratch_3_absolute=0.0<line_sep>central_moment_from_scratch_4=0.0<line_sep>central_moment_from_scratch_1_to_compute=self.central_moment_from_scratch_1_to_compute<line_sep>central_moment_from_scratch_2_to_compute=self.central_moment_from_scratch_2_to_compute<line_sep>central_moment_from_scratch_3_to_compute=self.central_moment_from_scratch_3_to_compute<line_sep>central_moment_from_scratch_3_absolute_to_compute=self.central_moment_from_scratch_3_absolute_to_compute<line_sep>central_moment_from_scratch_4_to_compute=self.central_moment_from_scratch_4_to_compute<line_sep>samples=[]<for_stmt>batch range(len(self.values))<block_start><for_stmt>mini_batch_samples self.values[batch][level]<block_start>samples.append(mini_batch_samples)<block_end><block_end>central_moment_from_scratch_1,central_moment_from_scratch_2,central_moment_from_scratch_3,central_moment_from_scratch_3_absolute,central_moment_from_scratch_4=ComputeSampleCentralMomentsFromScratchAux_Task(number_samples_level central_moment_from_scratch_1_to_compute central_moment_from_scratch_2_to_compute central_moment_from_scratch_3_to_compute central_moment_from_scratch_3_absolute_to_compute central_moment_from_scratch_4_to_compute central_moment_from_scratch_1 central_moment_from_scratch_2 central_moment_from_scratch_3 central_moment_from_scratch_3_absolute central_moment_from_scratch_4 samples)<line_sep>self.central_moment_from_scratch_1[level]=central_moment_from_scratch_1<line_sep>self.central_moment_from_scratch_2[level]=central_moment_from_scratch_2<line_sep>self.central_moment_from_scratch_3[level]=central_moment_from_scratch_3<line_sep>self.central_moment_from_scratch_3_absolute[level]=central_moment_from_scratch_3_absolute<line_sep>self.central_moment_from_scratch_4[level]=central_moment_from_scratch_4<block_end>"""
function computing the skewness and the kurtosis from the h statistics
skewness = \mu_3 / \sqrt(\mu_2^3)
kurtosis = \mu_4 / \mu_2^2
input: self : an instance of the class
level : defined level
"""<def_stmt>ComputeSkewnessKurtosis self level<block_start><if_stmt>(self.h_statistics_computed)<block_start>h2_level=self.h_statistics_2[level]<line_sep>h3_level=self.h_statistics_3[level]<line_sep>h4_level=self.h_statistics_4[level]<line_sep>skewness_level,kurtosis_level=ComputeSkewnessKurtosisAux_Task(h2_level h3_level h4_level)<line_sep>self.skewness[level]=skewness_level<line_sep>self.kurtosis[level]=kurtosis_level<block_end><block_end><block_end> |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# ------------------------------------------------------------------------------
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>yacs.config CfgNode<as>CN<line_sep>_C=CN()<line_sep>_C.OUTPUT_DIR="output"# This will be the base directory for all output, such as logs and saved models
_C.LOG_DIR=""# This will be a subdirectory inside OUTPUT_DIR
_C.GPUS=(0 )<line_sep>_C.WORKERS=4<line_sep>_C.PRINT_FREQ=20<line_sep>_C.AUTO_RESUME=<false><line_sep>_C.PIN_MEMORY=<true><line_sep>_C.LOG_CONFIG="logging.conf"<line_sep>_C.SEED=42<line_sep>_C.OPENCV_BORDER_CONSTANT=0<line_sep># size of voxel cube: WINDOW_SIZE x WINDOW_SIZE x WINDOW_SIZE; used for 3D models only
_C.WINDOW_SIZE=65<line_sep># Cudnn related params
_C.CUDNN=CN()<line_sep>_C.CUDNN.BENCHMARK=<true><line_sep>_C.CUDNN.DETERMINISTIC=<false><line_sep>_C.CUDNN.ENABLED=<true><line_sep># DATASET related params
_C.DATASET=CN()<line_sep>_C.DATASET.ROOT=""<line_sep>_C.DATASET.NUM_CLASSES=7<line_sep>_C.DATASET.CLASS_WEIGHTS=[0.02630481 0.05448931 0.0811898 0.01866496 0.15868563 0.0875993 0.5730662 ]<line_sep>_C.DATASET.INLINE_HEIGHT=1501<line_sep>_C.DATASET.INLINE_WIDTH=481<line_sep># common params for NETWORK
_C.MODEL=CN()<line_sep>_C.MODEL.NAME="resnet_unet"<line_sep>_C.MODEL.IN_CHANNELS=1<line_sep>_C.MODEL.PRETRAINED=""<line_sep>_C.MODEL.EXTRA=CN(new_allowed=<true>)<line_sep># training
_C.TRAIN=CN()<line_sep>_C.TRAIN.COMPLETE_PATCHES_ONLY=<true><line_sep>_C.TRAIN.MIN_LR=0.001<line_sep>_C.TRAIN.MAX_LR=0.01<line_sep>_C.TRAIN.MOMENTUM=0.9<line_sep>_C.TRAIN.BEGIN_EPOCH=0<line_sep>_C.TRAIN.END_EPOCH=300<line_sep>_C.TRAIN.BATCH_SIZE_PER_GPU=32<line_sep>_C.TRAIN.WEIGHT_DECAY=0.0001<line_sep>_C.TRAIN.SNAPSHOTS=5<line_sep>_C.TRAIN.MODEL_DIR="models"# This will be a subdirectory inside OUTPUT_DIR
_C.TRAIN.AUGMENTATION=<true><line_sep>_C.TRAIN.STRIDE=64<line_sep>_C.TRAIN.PATCH_SIZE=128<line_sep>_C.TRAIN.MEAN=[-0.0001777 0.49 -0.0000688]# 0.0009996710808862074
_C.TRAIN.STD=[0.14076 0.2717 0.06286]# 0.20976548783479299
_C.TRAIN.MAX=1<line_sep>_C.TRAIN.DEPTH="patch"# Options are none, patch, and section
# None adds no depth information and the num of channels remains at 1
# Patch adds depth per patch so is simply the height of that patch from 0 to 1, channels=3
# Section adds depth per section so contains depth information for the whole section, channels=3
_C.TRAIN.AUGMENTATIONS=CN()<line_sep>_C.TRAIN.AUGMENTATIONS.RESIZE=CN()<line_sep>_C.TRAIN.AUGMENTATIONS.RESIZE.HEIGHT=256<line_sep>_C.TRAIN.AUGMENTATIONS.RESIZE.WIDTH=256<line_sep>_C.TRAIN.AUGMENTATIONS.PAD=CN()<line_sep>_C.TRAIN.AUGMENTATIONS.PAD.HEIGHT=256<line_sep>_C.TRAIN.AUGMENTATIONS.PAD.WIDTH=256<line_sep># validation
_C.VALIDATION=CN()<line_sep>_C.VALIDATION.BATCH_SIZE_PER_GPU=32<line_sep>_C.VALIDATION.COMPLETE_PATCHES_ONLY=<true><line_sep># TEST
_C.TEST=CN()<line_sep>_C.TEST.MODEL_PATH=""<line_sep>_C.TEST.COMPLETE_PATCHES_ONLY=<true><line_sep>_C.TEST.AUGMENTATIONS=CN()<line_sep>_C.TEST.AUGMENTATIONS.RESIZE=CN()<line_sep>_C.TEST.AUGMENTATIONS.RESIZE.HEIGHT=256<line_sep>_C.TEST.AUGMENTATIONS.RESIZE.WIDTH=256<line_sep>_C.TEST.AUGMENTATIONS.PAD=CN()<line_sep>_C.TEST.AUGMENTATIONS.PAD.HEIGHT=256<line_sep>_C.TEST.AUGMENTATIONS.PAD.WIDTH=256<def_stmt>update_config cfg options=<none> config_file=<none><block_start>cfg.defrost()<if_stmt>config_file<block_start>cfg.merge_from_file(config_file)<block_end><if_stmt>options<block_start>cfg.merge_from_list(options)<block_end>cfg.freeze()<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>sys<with_stmt>open(sys.argv[1] "w")<as>f<block_start>print(_C file=f)<block_end><block_end> |
<import_from_stmt>typing TypedDict<import_from_stmt>backend.common.sitevars.sitevar Sitevar<class_stmt>ContentType(TypedDict)<block_start>secret_key:str<block_end><class_stmt>FlaskSecrets(Sitevar[ContentType])<block_start>DEFAULT_SECRET_KEY:str="thebluealliance"<line_sep>@staticmethod<def_stmt>key <arrow>str<block_start><return>"flask.secrets"<block_end>@staticmethod<def_stmt>description <arrow>str<block_start><return>"Secret key for Flask session"<block_end>@staticmethod<def_stmt>default_value <arrow>ContentType<block_start><return>ContentType(secret_key=FlaskSecrets.DEFAULT_SECRET_KEY)<block_end>@classmethod<def_stmt>secret_key cls<arrow>str<block_start>secret_key=cls.get().get("secret_key")<line_sep><return>secret_key<if>secret_key<else>FlaskSecrets.DEFAULT_SECRET_KEY<block_end><block_end> |
<import_stmt>BboxToolkit<as>bt<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>. nms_rotated_ext<def_stmt>obb2hbb obboxes<block_start>center,w,h,theta=torch.split(obboxes [2 1 1 1] dim=1)<line_sep>Cos,Sin=torch.cos(theta) torch.sin(theta)<line_sep>x_bias=torch.abs(w/2<times>Cos)+torch.abs(h/2<times>Sin)<line_sep>y_bias=torch.abs(w/2<times>Sin)+torch.abs(h/2<times>Cos)<line_sep>bias=torch.cat([x_bias y_bias] dim=1)<line_sep><return>torch.cat([center-bias center+bias] dim=1)<block_end><def_stmt>obb_nms dets iou_thr device_id=<none><block_start><if_stmt>isinstance(dets torch.Tensor)<block_start>is_numpy=<false><line_sep>dets_th=dets<block_end><elif_stmt>isinstance(dets np.ndarray)<block_start>is_numpy=<true><line_sep>device='cpu'<if>device_id<is><none><else>f'cuda:{device_id}'<line_sep>dets_th=torch.from_numpy(dets).to(device)<block_end><else_stmt><block_start><raise>TypeError('dets must be eithr a Tensor or numpy array, '<concat>f'but got {type(dets)}')<block_end><if_stmt>dets_th.numel()<eq>0<block_start>inds=dets_th.new_zeros(0 dtype=torch.int64)<block_end><else_stmt># same bug will happen when bboxes is too small
<block_start>too_small=dets_th[: [2 3]].min(1)[0]<l>0.001<if_stmt>too_small.all()<block_start>inds=dets_th.new_zeros(0 dtype=torch.int64)<block_end><else_stmt><block_start>ori_inds=torch.arange(dets_th.size(0))<line_sep>ori_inds=ori_inds[~too_small]<line_sep>dets_th=dets_th[~too_small]<line_sep>bboxes,scores=dets_th[: :5] dets_th[: 5]<line_sep>inds=nms_rotated_ext.nms_rotated(bboxes scores iou_thr)<line_sep>inds=ori_inds[inds]<block_end><block_end><if_stmt>is_numpy<block_start>inds=inds.cpu().numpy()<block_end><return>dets[inds :] inds<block_end><def_stmt>poly_nms dets iou_thr device_id=<none><block_start><if_stmt>isinstance(dets torch.Tensor)<block_start>is_numpy=<false><line_sep>dets_th=dets<block_end><elif_stmt>isinstance(dets np.ndarray)<block_start>is_numpy=<true><line_sep>device='cpu'<if>device_id<is><none><else>f'cuda:{device_id}'<line_sep>dets_th=torch.from_numpy(dets).to(device)<block_end><else_stmt><block_start><raise>TypeError('dets must be eithr a Tensor or numpy array, '<concat>f'but got {type(dets)}')<block_end><if_stmt>dets_th.device<eq>torch.device('cpu')<block_start><raise>NotImplementedError<block_end>inds=nms_rotated_ext.nms_poly(dets_th.float() iou_thr)<if_stmt>is_numpy<block_start>inds=inds.cpu().numpy()<block_end><return>dets[inds :] inds<block_end><def_stmt>BT_nms dets iou_thr device_id=<none><block_start><if_stmt>isinstance(dets torch.Tensor)<block_start>is_tensor=<true><line_sep>device=dets.device<line_sep>dets_np=dets.cpu().numpy()<block_end><elif_stmt>isinstance(dets np.ndarray)<block_start>is_tensor=<false><line_sep>dets_np=dets<block_end><else_stmt><block_start><raise>TypeError('dets must be eithr a Tensor or numpy array, '<concat>f'but got {type(dets)}')<block_end>bboxes,scores=dets_np[: :-1] dets_np[: -1]<line_sep>inds=bt.bbox_nms(bboxes scores iou_thr=iou_thr score_thr=0)<if_stmt>is_tensor<block_start>inds=torch.from_numpy(inds).to(device)<block_end><return>dets[inds :] inds<block_end><def_stmt>arb_batched_nms bboxes scores inds nms_cfg class_agnostic=<false><block_start>nms_cfg_=nms_cfg.copy()<line_sep>class_agnostic=nms_cfg_.pop('class_agnostic' class_agnostic)<if_stmt>class_agnostic<block_start>bboxes_for_nms=bboxes<block_end><else_stmt><block_start>hbboxes=obb2hbb(bboxes)<if>bboxes.size(-1)<eq>5<else>bboxes<line_sep>max_coordinate=hbboxes.max()-hbboxes.min()<line_sep>offsets=inds.to(bboxes)<times>(max_coordinate+1)<if_stmt>bboxes.size(-1)<eq>5<block_start>bboxes_for_nms=bboxes.clone()<line_sep>bboxes_for_nms[: :2]=bboxes_for_nms[: :2]+offsets[: <none>]<block_end><else_stmt><block_start>bboxes_for_nms=bboxes+offsets[: <none>]<block_end><block_end>nms_type=nms_cfg_.pop('type' 'BT_nms')<try_stmt><block_start>nms_op=eval(nms_type)<block_end><except_stmt>NameError<block_start><import_from_stmt>..nms nms_wrapper<line_sep>nms_op=getattr(nms_wrapper nms_type)<block_end>dets,keep=nms_op(torch.cat([bboxes_for_nms scores[: <none>]] -1) **nms_cfg_)<line_sep>bboxes=bboxes[keep]<line_sep>scores=dets[: -1]<line_sep><return>torch.cat([bboxes scores[: <none>]] -1) keep<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.