content stringlengths 0 1.55M |
|---|
<def_stmt>test_evens <block_start><yield>check_even_cls<block_end><class_stmt>Test(object)<block_start><def_stmt>test_evens self<block_start><yield>check_even_cls<block_end><block_end><class_stmt>Check(object)<block_start><def_stmt>__call__ self<block_start><pass><block_end><block_end>check_even_cls=Check()<line_sep> |
<import_stmt>time<import_from_stmt>..base order<as>od<import_from_stmt>.api BybitApi<class_stmt>BybitOrderManager(od.OrderManagerBase)<block_start><def_stmt>__init__ self api ws=<none> retention=60<block_start>super().__init__(api ws retention)<line_sep>self.ws.subscribe('execution' self.__on_events <true>)<line_sep>self.ws.subscribe('position' self.__on_events <true>)<line_sep>self.ws.subscribe('order' self.__on_events <true>)<block_end><def_stmt>_generate_order_object self e<block_start>info=e.info<if_stmt>e.type<ne>od.EVENT_OPEN<block_start>self.log.warning(f'event for unknown order: {e}')<line_sep><return><none><block_end>api=BybitApi.ccxt_instance()<line_sep>symbol=api.markets_by_id[info['symbol']]['symbol']<line_sep><return>od.Order(symbol info['order_type'].lower() info['side'].lower() info['qty'] float(info['price']))<block_end><def_stmt>__on_events self msg<block_start>topic=msg['topic']<for_stmt>e msg['data']<block_start>oe=od.OrderEvent()<line_sep>oe.info=e<line_sep>oe.ts=time.time()<if_stmt>topic<eq>'order'<block_start>oe.id=e['order_id']<line_sep>st=e['order_status']<if_stmt>st<eq>'New'<block_start>oe.type=od.EVENT_OPEN<block_end><elif_stmt>st<eq>'Filled'<block_start>oe.type=od.EVENT_CLOSE<block_end><elif_stmt>st<in>['Cancelled' 'Rejected']<block_start>oe.type=od.EVENT_CANCEL<block_end><else_stmt># ignore(PartiallyFilled, Created, PendingCancel)
<block_start><continue><block_end><block_end><elif_stmt>topic<eq>'execution'<block_start>oe.type=od.EVENT_EXECUTION<line_sep>oe.id=e['order_id']<line_sep>oe.price=float(e['price'])<line_sep>size=e['exec_qty']<line_sep>oe.size=-size<if>e['side']<eq>'Sell'<else>size<line_sep>oe.fee=float(e['exec_fee'])<times>size<block_end><elif_stmt>topic<eq>'position'<block_start><break><block_end><else_stmt><block_start><assert_stmt><false><block_end>self._handle_order_event(oe)<block_end><block_end><block_end><class_stmt>BybitPositionGroup(od.PositionGroupBase)<block_start>INVERSE=<true><block_end><class_stmt>BybitOrderGroup(od.OrderGroupBase)<block_start>PositionGroup=BybitPositionGroup<block_end><class_stmt>BybitOrderGroupManager(od.OrderGroupManagerBase)<block_start>OrderGroup=BybitOrderGroup<block_end># Future
<class_stmt>BybitUsdtOrderManager(BybitOrderManager)<block_start><pass><block_end><class_stmt>BybitUsdtPositionGroup(BybitPositionGroup)<block_start>INVERSE=<false><block_end><class_stmt>BybitUsdtOrderGroup(BybitOrderGroup)<block_start>PositionGroup=BybitUsdtPositionGroup<block_end><class_stmt>BybitUsdtOrderGroupManager(BybitOrderGroupManager)<block_start>OrderGroup=BybitUsdtOrderGroup<block_end> |
<import_stmt>abc<import_stmt>numbers<import_from_stmt>typing Union<import_stmt>numpy<as>np<import_from_stmt>river base optim utils<line_sep>VectorLike=Union[utils.VectorDict np.ndarray]<line_sep>__all__=["Initializer" "Scheduler" "Optimizer" "Loss"]<class_stmt>Initializer(base.Base abc.ABC)<block_start>"""An initializer is used to set initial weights in a model."""<line_sep>@abc.abstractmethod<def_stmt>__call__ self shape=1<block_start>"""Returns a fresh set of weights.
Parameters
----------
shape
Indicates how many weights to return. If `1`, then a single scalar value will be
returned.
"""<block_end><block_end><class_stmt>Scheduler(base.Base abc.ABC)<block_start>"""Can be used to program the learning rate schedule of an `optim.base.Optimizer`."""<line_sep>@abc.abstractmethod<def_stmt>get self t:int<arrow>float<block_start>"""Returns the learning rate at a given iteration.
Parameters
----------
t
The iteration number.
"""<block_end><def_stmt>__repr__ self<block_start><return>f"{self.__class__.__name__}({vars(self)})"<block_end><block_end><class_stmt>Optimizer(base.Base)<block_start>"""Optimizer interface.
Every optimizer inherits from this base interface.
Parameters
----------
lr
Attributes
----------
learning_rate : float
Returns the current learning rate value.
"""<def_stmt>__init__ self lr:Union[Scheduler float]<block_start><if_stmt>isinstance(lr numbers.Number)<block_start>lr=optim.schedulers.Constant(lr)<block_end>self.lr=lr<line_sep>self.n_iterations=0<block_end>@property<def_stmt>learning_rate self<arrow>float<block_start><return>self.lr.get(self.n_iterations)<block_end><def_stmt>look_ahead self w:dict<arrow>dict<block_start>"""Updates a weight vector before a prediction is made.
Parameters:
w (dict): A dictionary of weight parameters. The weights are modified in-place.
Returns:
The updated weights.
"""<line_sep><return>w<block_end><def_stmt>_step_with_dict self w:dict g:dict<arrow>dict<block_start><raise>NotImplementedError<block_end><def_stmt>_step_with_vector self w:VectorLike g:VectorLike<arrow>VectorLike<block_start><raise>NotImplementedError<block_end><def_stmt>step self w:Union[dict VectorLike] g:Union[dict VectorLike]<arrow>Union[dict VectorLike]<block_start>"""Updates a weight vector given a gradient.
Parameters
----------
w
A vector-like object containing weights. The weights are modified in-place.
g
A vector-like object of gradients.
Returns
-------
The updated weights.
"""<if_stmt>isinstance(w VectorLike.__args__)<and>isinstance(g VectorLike.__args__)<block_start><try_stmt><block_start>w=self._step_with_vector(w g)<line_sep>self.n_iterations<augadd>1<line_sep><return>w<block_end><except_stmt>NotImplementedError<block_start><pass><block_end><block_end>w=self._step_with_dict(w g)<line_sep>self.n_iterations<augadd>1<line_sep><return>w<block_end><def_stmt>__repr__ self<block_start><return>f"{self.__class__.__name__}({vars(self)})"<block_end><block_end><class_stmt>Loss(base.Base abc.ABC)<block_start>"""Base class for all loss functions."""<def_stmt>__repr__ self<block_start><return>f"{self.__class__.__name__}({vars(self)})"<block_end>@abc.abstractmethod<def_stmt>__call__ self y_true y_pred<block_start>"""Returns the loss.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The loss(es).
"""<block_end>@abc.abstractmethod<def_stmt>gradient self y_true y_pred<block_start>"""Return the gradient with respect to y_pred.
Parameters
----------
y_true
Ground truth(s).
y_pred
Prediction(s).
Returns
-------
The gradient(s).
"""<block_end>@abc.abstractmethod<def_stmt>mean_func self y_pred<block_start>"""Mean function.
This is the inverse of the link function. Typically, a loss function takes as input the raw
output of a model. In the case of classification, the raw output would be logits. The mean
function can be used to convert the raw output into a value that makes sense to the user,
such as a probability.
Parameters
----------
y_pred
Raw prediction(s).
Returns
-------
The adjusted prediction(s).
References
----------
[^1]: [Wikipedia section on link and mean function](https://www.wikiwand.com/en/Generalized_linear_model#/Link_function)
"""<block_end><block_end> |
<import_stmt>os<import_stmt>numpy<import_from_stmt>pydub AudioSegment<import_from_stmt>scipy.fftpack fft<class_stmt>AudioSignal(object)<block_start><def_stmt>__init__ self sample_rate signal=<none> filename=<none># Set sample rate
<block_start>self._sample_rate=sample_rate<if_stmt>signal<is><none># Get file name and file extension
<block_start>file,file_extension=os.path.splitext(filename)<line_sep># Check if file extension if audio format
<if_stmt>file_extension<in>['.mp3' '.wav']# Read audio file
<block_start>self._signal=self.read_audio_file(filename)<block_end># Check if file extension if video format
<elif_stmt>file_extension<in>['.mp4' '.mkv' 'avi']# Extract audio from video
<block_start>new_filename=self.extract_audio_from_video(filename)<line_sep># read audio file from extracted audio file
self._signal=self.read_audio_file(new_filename)<block_end># Case file extension is not supported
<else_stmt><block_start>print("Error: file not found or file extension not supported.")<block_end><block_end><elif_stmt>filename<is><none># Cast signal to array
<block_start>self._signal=signal<block_end><else_stmt><block_start>print("Error : argument missing in AudioSignal() constructor.")<block_end><block_end>'''
Function to extract audio from a video
'''<def_stmt>extract_audio_from_video self filename# Get video file name and extension
<block_start>file,file_extension=os.path.splitext(filename)<line_sep># Extract audio (.wav) from video
os.system('ffmpeg -i '+file+file_extension+' '+'-ar '+str(self._sample_rate)+' '+file+'.wav')<line_sep>print("Sucessfully converted {} into audio!".format(filename))<line_sep># Return audio file name created
<return>file+'.wav'<block_end>'''
Function to read audio file and to return audio samples of a specified WAV file
'''<def_stmt>read_audio_file self filename# Get audio signal
<block_start>audio_file=AudioSegment.from_file(filename)<line_sep># Resample audio signal
audio_file=audio_file.set_frame_rate(self._sample_rate)<line_sep># Cast to integer
<if_stmt>audio_file.sample_width<eq>2<block_start>data=numpy.fromstring(audio_file._data numpy.int16)<block_end><elif_stmt>audio_file.sample_width<eq>4<block_start>data=numpy.fromstring(audio_file._data numpy.int32)<block_end># Merge audio channels
audio_signal=[]<for_stmt>chn list(range(audio_file.channels))<block_start>audio_signal.append(data[chn::audio_file.channels])<block_end>audio_signal=numpy.array(audio_signal).T<line_sep># Flat signals
<if_stmt>audio_signal.ndim<eq>2<block_start><if_stmt>audio_signal.shape[1]<eq>1<block_start>audio_signal=audio_signal.flatten()<block_end><block_end># Convert stereo to mono
audio_signal=self.stereo_to_mono(audio_signal)<line_sep># Return sample rate and audio signal
<return>audio_signal<block_end>'''
Function to convert an input signal from stereo to mono
'''<line_sep>@staticmethod<def_stmt>stereo_to_mono audio_signal# Check if signal is stereo and convert to mono
<block_start><if_stmt>isinstance(audio_signal int)<block_start><return>-1<block_end><if_stmt>audio_signal.ndim<eq>1<block_start><return>audio_signal<block_end><elif_stmt>audio_signal.ndim<eq>2<block_start><if_stmt>audio_signal.shape[1]<eq>1<block_start><return>audio_signal.flatten()<block_end><else_stmt><block_start><if_stmt>audio_signal.shape[1]<eq>2<block_start><return>(audio_signal[: 1]/2)+(audio_signal[: 0]/2)<block_end><else_stmt><block_start><return>-1<block_end><block_end><block_end><block_end>'''
Function to split the input signal into windows of same size
'''<def_stmt>framing self size step hamming=<false># Rescale windows step and size
<block_start>win_size=int(size<times>self._sample_rate)<line_sep>win_step=int(step<times>self._sample_rate)<line_sep># Number of frames
nb_frames=1+int((len(self._signal)-win_size)/win_step)<line_sep># Build Hamming function
<if_stmt>hamming<is><true><block_start>ham=numpy.hamming(win_size)<block_end><else_stmt><block_start>ham=numpy.ones(win_size)<block_end># Split signals (and multiply each windows signals by Hamming functions)
frames=[]<for_stmt>t range(nb_frames)<block_start>sub_signal=AudioSignal(self._sample_rate signal=self._signal[(t<times>win_step):(t<times>win_step+win_size)]<times>ham)<line_sep>frames.append(sub_signal)<block_end><return>frames<block_end>'''
Function to compute the magnitude of the Discrete Fourier Transform coefficient
'''<def_stmt>dft self norm=<false># Commpute the magnitude of the spectrum (and normalize by the number of sample)
<block_start><if_stmt>norm<is><true><block_start>dft=abs(fft(self._signal))/len(self._signal)<block_end><else_stmt><block_start>dft=abs(fft(self._signal))<block_end><return>dft<block_end>'''
Function to apply pre-emphasis filter on signal
'''<def_stmt>pre_emphasis self alpha=0.97# Emphasized signal
<block_start>emphasized_signal=numpy.append(self._signal[0] self._signal[1:]-alpha<times>self._signal[:-1])<line_sep><return>emphasized_signal<block_end><block_end> |
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
# Expose method and class written in C
<import_from_stmt>_pybricks.tools wait StopWatch<line_sep># Imports for DataLog implementation
<import_from_stmt>utime localtime ticks_us<class_stmt>DataLog<block_start><def_stmt>__init__ self *headers name="log" timestamp=<true> extension="csv" append=<false># Make timestamp of the form yyyy_mm_dd_hh_mm_ss_uuuuuu
<block_start><if_stmt>timestamp<block_start>y,mo,d,h,mi,s=localtime()[0:6]<line_sep>u=ticks_us()%1000000<line_sep>stamp="_{0}_{1:02d}_{2:02d}_{3:02d}_{4:02d}_{5:02d}_{6:06d}".format(y mo d h mi s u)<block_end><else_stmt><block_start>stamp=""<block_end># File write mode
mode="a+"<if>append<else>"w+"<line_sep># Append extension and open
self.file=open("{0}{1}.{2}".format(name stamp extension) mode)<line_sep># Get length of existing contents
self.file.seek(0 2)<line_sep>length=self.file.tell()<line_sep># If column headers were given and we are at the start of the file, print headers as first line
<if_stmt>len(headers)<g>0<and>length<eq>0<block_start>print(*headers sep=", " file=self.file)<block_end><block_end><def_stmt>log self *values<block_start>print(*values sep=", " file=self.file)<block_end><def_stmt>__repr__ self<block_start>self.file.seek(0 0)<line_sep><return>self.file.read()<block_end><block_end> |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for tree_math."""<import_stmt>functools<import_from_stmt>absl.testing absltest<import_stmt>jax<import_from_stmt>jax lax<import_stmt>tree_math<as>tm<import_from_stmt>tree_math._src test_util<import_stmt>tree_math.numpy<as>tnp<line_sep># pylint: disable=g-complex-comprehension
<class_stmt>TreeMathTest(test_util.TestCase)<block_start><def_stmt>test_norm self<block_start>@tm.wrap<def_stmt>norm1 x y<block_start><return>((x-y)<power>2).sum()<power>0.5<block_end>@tm.wrap<def_stmt>norm2 x y<block_start>d=x-y<line_sep><return>(d@d)<power>0.5<block_end>x={"a":1 "b":1}<line_sep>y={"a":1+3 "b":1+4}<line_sep>expected=5.0<line_sep>actual=norm1(x y)<line_sep>self.assertAllClose(actual expected)<line_sep>actual=norm2(x y)<line_sep>self.assertAllClose(actual expected)<block_end><def_stmt>test_cg self# an integration test to verify non-trivial examples work
# pylint: disable=invalid-name
<block_start>@functools.partial(tm.wrap vector_argnames=["b" "x0"])<def_stmt>cg A b x0 M=<lambda>x:x maxiter=5 tol=1e-5 atol=0.0<block_start>"""jax.scipy.sparse.linalg.cg, written with tree_math."""<line_sep>A=tm.unwrap(A)<line_sep>M=tm.unwrap(M)<line_sep>atol2=tnp.maximum(tol<power>2<times>(b@b) atol<power>2)<def_stmt>cond_fun value<block_start>x,r,gamma,p,k=value# pylint: disable=unused-variable
<return>(r@r<g>atol2)&(k<l>maxiter)<block_end><def_stmt>body_fun value<block_start>x,r,gamma,p,k=value<line_sep>Ap=A(p)<line_sep>alpha=gamma/(p.conj()@Ap)<line_sep>x_=x+alpha<times>p<line_sep>r_=r-alpha<times>Ap<line_sep>z_=M(r_)<line_sep>gamma_=r_.conj()@z_<line_sep>beta_=gamma_/gamma<line_sep>p_=z_+beta_<times>p<line_sep><return>x_ r_ gamma_ p_ k+1<block_end>r0=b-A(x0)<line_sep>p0=z0=M(r0)<line_sep>gamma0=r0@z0<line_sep>initial_value=(x0 r0 gamma0 p0 0)<line_sep>x_final,*_=lax.while_loop(cond_fun body_fun initial_value)<line_sep><return>x_final<block_end>A=<lambda>x:{"a":x["a"]+0.5<times>x["b"] "b":0.5<times>x["a"]+x["b"]}<line_sep>b={"a":1.0 "b":-1.0}<line_sep>x0={"a":0.0 "b":0.0}<line_sep>actual=cg(A b x0)<line_sep>expected=jax.device_put({"a":2.0 "b":-2.0})<line_sep>self.assertTreeAllClose(actual expected check_dtypes=<true>)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>absltest.main()<block_end> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
<import_stmt>pytest<line_sep>pytest.importorskip("ethosu.vela")<import_stmt>tvm<import_stmt>tvm.script<import_from_stmt>tvm.script tir<import_from_stmt>tvm relay<import_from_stmt>tvm.relay.testing run_opt_pass<import_from_stmt>tvm.relay.backend.contrib.ethosu.tir.compiler lower_to_tir<import_from_stmt>.infra make_ethosu_conv2d<line_sep># fmt: off
@tvm.script.ir_module<class_stmt>ReferenceModule<block_start>@tir.prim_func<def_stmt>main placeholder:tir.handle placeholder_1:tir.handle placeholder_2:tir.handle placeholder_3:tir.handle placeholder_4:tir.handle placeholder_5:tir.handle placeholder_6:tir.handle placeholder_7:tir.handle placeholder_8:tir.handle placeholder_9:tir.handle T_concat:tir.handle<arrow><none># function attr dict
<block_start>tir.func_attr({"from_legacy_te_schedule":<true> "global_symbol":"main" "tir.noalias":<true>})<line_sep>buffer=tir.match_buffer(placeholder_2 [2992] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_1=tir.match_buffer(placeholder_4 [2992] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>placeholder_10=tir.match_buffer(placeholder_1 [1 8 10 16] dtype="int8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_2=tir.match_buffer(placeholder_9 [160] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_3=tir.match_buffer(placeholder_8 [2992] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_4=tir.match_buffer(placeholder_5 [160] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_5=tir.match_buffer(placeholder_6 [2992] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>T_concat_1=tir.match_buffer(T_concat [1 8 32 16] dtype="int8" elem_offset=0 align=128 offset_factor=1)<line_sep>placeholder_11=tir.match_buffer(placeholder [1 8 12 16] dtype="int8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_6=tir.match_buffer(placeholder_7 [160] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep>buffer_7=tir.match_buffer(placeholder_3 [160] dtype="uint8" elem_offset=0 align=128 offset_factor=1)<line_sep># body
T_concat_2=tir.allocate([2816] "int8" "global" annotations={"disable_lower_builtin":<true>})<line_sep>tir.evaluate(tir.call_extern("ethosu_conv2d" "int8" 8 10 16 8 0 10 tir.load("int8" placeholder_10.data 0) 0 0 0 tir.float32(0.5) 10 "NHWC" 160 16 1 "int8" 8 10 16 8 0 10 tir.load("int8" T_concat_2 192) 0 0 0 tir.float32(0.25) 14 "NHWC" 352 16 1 3 3 1 1 1 1 tir.load("uint8" buffer_1.data 0) 2992 12 tir.load("uint8" buffer_4.data 0) 160 1 1 1 1 "NONE" 0 0 "TFL" "NONE" dtype="handle"))<line_sep>tir.evaluate(tir.call_extern("ethosu_conv2d" "int8" 8 10 16 8 0 10 tir.load("int8" T_concat_2 192) 0 0 0 tir.float32(0.5) 10 "NHWC" 352 16 1 "int8" 8 10 16 8 0 10 tir.load("int8" T_concat_1.data 352) 0 0 0 tir.float32(0.25) 14 "NHWC" 512 16 1 3 3 1 1 1 1 tir.load("uint8" buffer_3.data 0) 2992 12 tir.load("uint8" buffer_2.data 0) 160 1 1 1 1 "NONE" 0 0 "TFL" "NONE" dtype="handle"))<line_sep>tir.evaluate(tir.call_extern("ethosu_conv2d" "int8" 8 12 16 8 0 12 tir.load("int8" placeholder_11.data 0) 0 0 0 tir.float32(0.5) 10 "NHWC" 192 16 1 "int8" 8 12 16 8 0 12 tir.load("int8" T_concat_2 0) 0 0 0 tir.float32(0.25) 14 "NHWC" 352 16 1 3 3 1 1 1 1 tir.load("uint8" buffer.data 0) 2992 12 tir.load("uint8" buffer_7.data 0) 160 1 1 1 1 "NONE" 0 0 "TFL" "NONE" dtype="handle"))<line_sep>tir.evaluate(tir.call_extern("ethosu_conv2d" "int8" 8 22 16 8 0 22 tir.load("int8" T_concat_2 0) 0 0 0 tir.float32(0.5) 10 "NHWC" 352 16 1 "int8" 8 22 16 8 0 22 tir.load("int8" T_concat_1.data 0) 0 0 0 tir.float32(0.25) 14 "NHWC" 512 16 1 3 3 1 1 1 1 tir.load("uint8" buffer_5.data 0) 2992 12 tir.load("uint8" buffer_6.data 0) 160 1 1 1 1 "NONE" 0 0 "TFL" "NONE" dtype="handle"))<block_end>__tvm_meta__=<none><block_end># fmt: on
<def_stmt>test_concat <block_start><def_stmt>_get_func <block_start>ifm1=relay.var("ifm1" shape=(1 8 12 16) dtype="int8")<line_sep>ifm2=relay.var("ifm2" shape=(1 8 10 16) dtype="int8")<line_sep>conv1=make_ethosu_conv2d(ifm1 16 16 (3 3) (1 1) (1 1) (1 1))<line_sep>conv2=make_ethosu_conv2d(ifm2 16 16 (3 3) (1 1) (1 1) (1 1))<line_sep>conc1=relay.concatenate((conv1 conv2) axis=2)<line_sep>conv3=make_ethosu_conv2d(conc1 16 16 (3 3) (1 1) (1 1) (1 1))<line_sep>conv4=make_ethosu_conv2d(conv2 16 16 (3 3) (1 1) (1 1) (1 1))<line_sep>conc2=relay.concatenate((conv3 conv4) axis=2)<line_sep>func=relay.Function(relay.analysis.free_vars(conc2) conc2)<line_sep>func=run_opt_pass(func relay.transform.InferType())<line_sep><return>func<block_end>func=_get_func()<line_sep>mod,_=lower_to_tir(func)<line_sep>script=mod.script(show_meta=<true>)<line_sep>test_mod=tvm.script.from_source(script)<line_sep>reference_mod=ReferenceModule<line_sep>tvm.ir.assert_structural_equal(test_mod["main"] reference_mod["main"] <true>)<block_end><if_stmt>__name__<eq>"__main__"<block_start>pytest.main([__file__])<block_end> |
<import_from_stmt>typing Optional<import_stmt>torch<import_from_stmt>torch Tensor<import_from_stmt>examples.simultaneous_translation.utils.functions exclusive_cumprod prob_check moving_sum <def_stmt>expected_alignment_from_p_choose p_choose:Tensor padding_mask:Optional[Tensor]=<none> eps:float=1e-6<block_start>"""
Calculating expected alignment for from stepwise probability
Reference:
Online and Linear-Time Attention by Enforcing Monotonic Alignments
https://arxiv.org/pdf/1704.00784.pdf
q_ij = (1 − p_{ij−1})q_{ij−1} + a+{i−1j}
a_ij = p_ij q_ij
Parallel solution:
ai = p_i * cumprod(1 − pi) * cumsum(a_i / cumprod(1 − pi))
============================================================
Expected input size
p_choose: bsz, tgt_len, src_len
"""<line_sep>prob_check(p_choose)<line_sep># p_choose: bsz, tgt_len, src_len
bsz,tgt_len,src_len=p_choose.size()<line_sep>dtype=p_choose.dtype<line_sep>p_choose=p_choose.float()<if_stmt>padding_mask<is><not><none><block_start>p_choose=p_choose.masked_fill(padding_mask.unsqueeze(1) 0.0)<block_end><if_stmt>p_choose.is_cuda<block_start>p_choose=p_choose.contiguous()<import_from_stmt>alignment_train_cuda_binding alignment_train_cuda<as>alignment_train<block_end><else_stmt><block_start><import_from_stmt>alignment_train_cpu_binding alignment_train_cpu<as>alignment_train<block_end>alpha=p_choose.new_zeros([bsz tgt_len src_len])<line_sep>alignment_train(p_choose alpha eps)<line_sep># Mix precision to prevent overflow for fp16
alpha=alpha.type(dtype)<line_sep>prob_check(alpha)<line_sep><return>alpha<block_end><def_stmt>expected_soft_attention alpha:Tensor soft_energy:Tensor padding_mask:Optional[Tensor]=<none> chunk_size:Optional[int]=<none> eps:float=1e-10<block_start>"""
Function to compute expected soft attention for
monotonic infinite lookback attention from
expected alignment and soft energy.
Reference:
Monotonic Chunkwise Attention
https://arxiv.org/abs/1712.05382
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
soft_energy: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""<if_stmt>padding_mask<is><not><none><block_start>alpha=alpha.masked_fill(padding_mask.unsqueeze(1) 0.0)<line_sep>soft_energy=soft_energy.masked_fill(padding_mask.unsqueeze(1) -float("inf"))<block_end>prob_check(alpha)<line_sep>dtype=alpha.dtype<line_sep>alpha=alpha.float()<line_sep>soft_energy=soft_energy.float()<line_sep>soft_energy=soft_energy-soft_energy.max(dim=2 keepdim=<true>)[0]<line_sep>exp_soft_energy=torch.exp(soft_energy)+eps<if_stmt>chunk_size<is><not><none># Chunkwise
<block_start>beta=(exp_soft_energy<times>moving_sum(alpha/(eps+moving_sum(exp_soft_energy chunk_size 1)) 1 chunk_size))<block_end><else_stmt># Infinite lookback
# Notice that infinite lookback is a special case of chunkwise
# where chunksize = inf
<block_start>inner_items=alpha/(eps+torch.cumsum(exp_soft_energy dim=2))<line_sep>beta=(exp_soft_energy<times>torch.cumsum(inner_items.flip(dims=[2]) dim=2).flip(dims=[2]))<block_end><if_stmt>padding_mask<is><not><none><block_start>beta=beta.masked_fill(padding_mask.unsqueeze(1).to(torch.bool) 0.0)<block_end># Mix precision to prevent overflow for fp16
beta=beta.type(dtype)<line_sep>beta=beta.clamp(0 1)<line_sep>prob_check(beta)<line_sep><return>beta<block_end><def_stmt>mass_preservation alpha:Tensor padding_mask:Optional[Tensor]=<none> left_padding:bool=<false><block_start>"""
Function to compute the mass perservation for alpha.
This means that the residual weights of alpha will be assigned
to the last token.
Reference:
Monotonic Infinite Lookback Attention for Simultaneous Machine Translation
https://arxiv.org/abs/1906.05218
alpha: bsz, tgt_len, src_len
padding_mask: bsz, src_len
left_padding: bool
"""<line_sep>prob_check(alpha)<if_stmt>padding_mask<is><not><none><block_start><if_stmt><not>left_padding<block_start><assert_stmt><not>padding_mask[: 0].any() ("Find padding on the beginning of the sequence.")<block_end>alpha=alpha.masked_fill(padding_mask.unsqueeze(1) 0.0)<block_end><if_stmt>left_padding<or>padding_mask<is><none><block_start>residuals=1-alpha[: : :-1].sum(dim=-1).clamp(0 1)<line_sep>alpha[: : -1]=residuals<block_end><else_stmt># right padding
<block_start>_,tgt_len,src_len=alpha.size()<line_sep>residuals=1-alpha.sum(dim=-1 keepdim=<true>).clamp(0 1)<line_sep>src_lens=src_len-padding_mask.sum(dim=1 keepdim=<true>)<line_sep>src_lens=src_lens.expand(-1 tgt_len).contiguous()<line_sep># add back the last value
residuals<augadd>alpha.gather(2 src_lens.unsqueeze(2)-1)<line_sep>alpha=alpha.scatter(2 src_lens.unsqueeze(2)-1 residuals)<line_sep>prob_check(alpha)<block_end><return>alpha<block_end> |
#-*- coding:utf-8 -*-
<import_from_future_stmt> print_function<import_stmt>os sys sip time<import_from_stmt>datetime datetime timedelta<import_from_stmt>qtpy.QtWidgets QTreeWidgetItem QMenu QApplication QAction QMainWindow<import_from_stmt>qtpy QtGui QtWidgets<import_from_stmt>qtpy.QtCore Qt QUrl QDate<import_from_stmt>Graph graphpage<import_from_stmt>layout Ui_MainWindow<import_from_stmt>pandas DataFrame<as>df<import_stmt>pandas<as>pd<import_stmt>tushare<as>ts<import_stmt>pickle<import_stmt>numpy<as>np<line_sep>list1=[]<class_stmt>MyUi(QMainWindow)<block_start><def_stmt>__init__ self<block_start>super(MyUi self).__init__()<line_sep>self.ui=Ui_MainWindow()<line_sep>self.ui.setupUi(self)<line_sep>cwd=os.getcwd()<line_sep>cwd=str(cwd)<if_stmt>os.path.isfile(cwd+"/time")<block_start><with_stmt>open("time" "rb")<as>outfile#reads current time
<block_start>history=pickle.load(outfile)<block_end><if_stmt>(datetime.now()-history).total_seconds()<l>43200#measures if time elapse>12 hours
<block_start>print("Less than 12 hours. Loading previously saved Pickle...")<block_end><else_stmt><block_start>print("More than 12 hours. Updating Pickle...")<line_sep>data=ts.get_industry_classified()<with_stmt>open("class" "wb+")<as>outfile<block_start>pickle.dump(data outfile)<block_end>now=datetime.now()<with_stmt>open("time" "wb+")<as>outfile#update time
<block_start>pickle.dump(now outfile)<block_end><block_end><block_end><else_stmt><block_start>print("No Pickle found!")#If this is first time using tuchart in this directory
data=df()<line_sep>data=ts.get_industry_classified()<with_stmt>open('class' 'wb+')<as>outfile#records pickle
<block_start>pickle.dump(data outfile)<block_end>now=datetime.now()<with_stmt>open("time" "wb+")<as>outfile<block_start>pickle.dump(now outfile)<block_end><block_end><with_stmt>open("class" "rb")<as>infile# reads current time
<block_start>series=pickle.load(infile)<block_end>#series = pd.read_json(cwd + "\\class.json")
#series = ts.get_industry_classified()
series=pd.DataFrame(series)<line_sep>curdate=time.strftime("%Y/%m/%d")# gets current time to put into dateedit
curdateQ=QDate.fromString(curdate "yyyy/MM/dd")<line_sep>dateobj=datetime.strptime(curdate "%Y/%m/%d")#converts to datetime object
past=dateobj-timedelta(days=7)#minus a week to start date
pasttime=datetime.strftime(past "%Y/%m/%d")<line_sep>pastQ=QDate.fromString(pasttime "yyyy/MM/dd")#convert to qtime so that widget accepts the values
pastL=dateobj-timedelta(days=30)# minus a month to start date
pasttimeL=datetime.strftime(pastL "%Y/%m/%d")<line_sep>pastQL=QDate.fromString(pasttimeL "yyyy/MM/dd")<line_sep>np_indexes=np.array([['sh' '上证指数' '大盘指数'] ['sz' '深证成指' '大盘指数'] ['hs300' '沪深300指数' '大盘指数'] ['sz50' '上证50' '大盘指数'] ['zxb' '中小板' '大盘指数'] ['cyb' '创业板' '大盘指数']])<line_sep>indexes=df(data=np_indexes index=range(5000 5006) columns=["code" "name" "c_name"])<line_sep>series=indexes.append(series)<line_sep>list1_bfr=series["c_name"].tolist()#Get industry categories. Filters out redundant ones
list1=list(set(list1_bfr))<line_sep>list1.sort(key=list1_bfr.index)<line_sep>#w = database()
#zsparent = QTreeWidgetItem(self.ui.treeWidget)
#zsparent.setText(0,"股票指数")
#zsnames =["上证指数-sh","深圳成指-sz","沪深300指数-hs300","上证50-"]
self.init_treeWidget(list1 series)<line_sep>self.ui.treeWidget.setContextMenuPolicy(Qt.CustomContextMenu)<line_sep>self.ui.treeWidget.customContextMenuRequested.connect(self.openMenu)<line_sep>#self.ui.webView.setGeometry(QtCore.QRect(0, 30,1550, 861))
file_path=os.path.abspath(os.path.join(os.path.dirname(__file__) "render.html"))#path to read html file
local_url=QUrl.fromLocalFile(file_path)<line_sep>self.ui.webView.load(local_url)<line_sep>#self.ui.commandLinkButton.setFixedSize(50, 50)
self.ui.search_btn.clicked.connect(<lambda>:self.search_comp(series))<line_sep>self.ui.init_code_btn.clicked.connect(<lambda>:self.code_sort_tree(series))<line_sep>self.ui.init_category_btn.clicked.connect(<lambda>:self.init_treeWidget(list1 series))<line_sep>self.ui.commandLinkButton.clicked.connect(self.classify)#when the arrow button is clicked, trigger events
#self.ui.commandLinkButton.clicked.connect(lambda action: self.classify(action, self.ui.treewidget))
# QSizePolicy
<try_stmt><block_start>retain_size=self.ui.dateEdit_2.sizePolicy()<line_sep>retain_size.setRetainSizeWhenHidden(<true>)<line_sep>self.ui.dateEdit_2.setSizePolicy(retain_size)<line_sep>retain_size=self.ui.comboBox.sizePolicy()<line_sep>retain_size.setRetainSizeWhenHidden(<true>)<line_sep>self.ui.comboBox.setSizePolicy(retain_size)<line_sep>retain_size=self.ui.label_2.sizePolicy()<line_sep>retain_size.setRetainSizeWhenHidden(<true>)<line_sep>self.ui.label_2.setSizePolicy(retain_size)<block_end><except_stmt>AttributeError<block_start>print("No PYQT5 Binding! Widgets might be deformed")<block_end>self.ui.dateEdit.setDate(pastQL)<line_sep>self.ui.dateEdit_2.setDate(curdateQ)#populate widgets
self.ui.dateEdit.setCalendarPopup(<true>)<line_sep>self.ui.dateEdit_2.setCalendarPopup(<true>)<line_sep>self.ui.comboBox.addItems(["D" "W" "M" "5" "15" "30" "60"])<line_sep>self.ui.treeWidget_2.setDragDropMode(self.ui.treeWidget_2.InternalMove)<line_sep>self.ui.treeWidget_2.setContextMenuPolicy(Qt.CustomContextMenu)<line_sep>self.ui.treeWidget_2.customContextMenuRequested.connect(self.openWidgetMenu)<line_sep>#self.ui.toolbutton.clicked.connect(lambda action: self.graphmerge(action, CombineKeyword))
self.ui.combobox.currentIndexChanged.connect(<lambda>:self.modifycombo(pastQL pastQ))<block_end><def_stmt>init_treeWidget self list1 series<block_start>self.ui.treeWidget.clear()<for_stmt>j list1<block_start>parent=QTreeWidgetItem(self.ui.treeWidget)#populate treewidget with names
parent.setText(0 j)<line_sep>var=series.loc[series["c_name"]<eq>j]<line_sep>list2=var["code"].tolist()<line_sep>name=var["name"].tolist()<line_sep>#var = showcollection(i) #Display database items
<for_stmt>idx,val enumerate(list2)<block_start>child=QTreeWidgetItem(parent)<line_sep>child.setText(0 name[idx]+"-"+val)<line_sep>#for i in Drag:
#grandson = QTreeWidgetItem(child) #Commented out because increases program response time
#grandson.setText(0, i)
<block_end><block_end>#self.ui.treeWidget.itemDoubleClicked.connect(self.onClickItem) #Display Collection items
<block_end><def_stmt>code_sort_tree self companies<block_start>self.ui.treeWidget.clear()<line_sep>sorted_comps=companies.sort_values(["code"])<line_sep>code_list=sorted_comps["code"].tolist()<line_sep>name_list=sorted_comps["name"].tolist()<line_sep>shares_parent=QTreeWidgetItem(self.ui.treeWidget)<line_sep>shares_parent.setText(0 "个股行情")<for_stmt>idx,val enumerate(code_list)<block_start>child=QTreeWidgetItem(shares_parent)<line_sep>child.setText(0 name_list[idx]+"-"+str(val))<block_end>self.ui.treeWidget.expandToDepth(0)<block_end><def_stmt>search_comp self companies<block_start>self.ui.treeWidget.clear()<line_sep>text=self.ui.search_lineEdit.text()<line_sep>filtered_codes=companies[companies['code'].str.contains(text)]<line_sep>filtered_names=companies[companies['name'].str.contains(text)]<line_sep>filtered_comps=filtered_codes.append(filtered_names)<line_sep>code_list=filtered_comps["code"].tolist()<line_sep>name_list=filtered_comps["name"].tolist()<line_sep>parent=QTreeWidgetItem(self.ui.treeWidget)<line_sep>parent.setText(0 "搜索结果")<for_stmt>idx,val enumerate(code_list)<block_start>child=QTreeWidgetItem(parent)<line_sep>child.setText(0 name_list[idx]+"-"+str(val))<block_end>self.ui.treeWidget.expandToDepth(0)<block_end><def_stmt>modifycombo self pastQL pastQ<block_start><if_stmt>self.ui.combobox.currentText()<eq>"复权"#if 复权 is selected, clear all existing queries to avoid value conflict
<block_start>self.ui.label_2.show()<line_sep>self.ui.dateEdit_2.show()<line_sep>self.ui.dateEdit.setDate(pastQL)<line_sep>self.ui.interval_label.show()<line_sep>self.ui.comboBox.show()<line_sep>self.ui.comboBox.clear()<line_sep>self.ui.comboBox.addItems(["hfq" "qfq"])<line_sep>self.ui.treeWidget_2.clear()<block_end><if_stmt>self.ui.combobox.currentText()<eq>"K线"<block_start>self.ui.label_2.show()<line_sep>self.ui.dateEdit_2.show()<line_sep>self.ui.dateEdit.setDate(pastQL)<line_sep>self.ui.interval_label.show()<line_sep>self.ui.comboBox.show()<line_sep>self.ui.comboBox.clear()<line_sep>self.ui.comboBox.addItems(["D" "W" "M" "5" "15" "30" "60"])#same as above
self.ui.treeWidget_2.clear()<block_end><if_stmt>self.ui.combobox.currentText()<eq>"分笔数据"<block_start>self.ui.interval_label.hide()<line_sep>self.ui.comboBox.hide()<line_sep>self.ui.label_2.hide()<line_sep>self.ui.dateEdit_2.hide()<line_sep>self.ui.dateEdit.setDate(pastQ)<line_sep>self.ui.treeWidget_2.clear()<block_end><if_stmt>self.ui.combobox.currentText()<eq>"历史分钟"<block_start>self.ui.interval_label.hide()<line_sep>self.ui.comboBox.show()<line_sep>self.ui.comboBox.clear()<line_sep>self.ui.comboBox.addItems(["1min" "5min" "15min" "30min" "60min"])<line_sep>self.ui.label_2.hide()<line_sep>self.ui.dateEdit_2.hide()<line_sep>self.ui.dateEdit.setDate(pastQ)<line_sep>self.ui.treeWidget_2.clear()<block_end><if_stmt>self.ui.combobox.currentText()<eq>u"十大股东"<block_start>self.ui.interval_label.hide()<line_sep>self.ui.comboBox.hide()<line_sep>self.ui.label_2.hide()<line_sep>self.ui.dateEdit_2.hide()<line_sep>self.ui.treeWidget_2.clear()<block_end><block_end><def_stmt>openMenu self position<block_start>indexes=self.ui.treeWidget.selectedIndexes()<line_sep>item=self.ui.treeWidget.itemAt(position)<line_sep>db_origin=""<line_sep>#if item.parent():
# db_origin = item.parent().text(0)
collec=item.text(0)<if_stmt>len(indexes)<g>0<block_start>level=0<line_sep>index=indexes[0]<while_stmt>index.parent().isValid()<block_start>index=index.parent()<line_sep>level=level+1<block_end>menu=QMenu()<line_sep>#print((collec, db_origin))
<if_stmt>level<eq>0<block_start><pass><block_end><else_stmt>#keyarray = GetKeys(collec, db_origin)
#if "Open" in keyarray:
<block_start><if_stmt>self.ui.combobox.currentText()<eq>u"K线"<block_start>menu.addAction(QAction("Kline" menu checkable=<true>))<line_sep>menu.addAction(QAction("Open" menu checkable=<true>))<line_sep>menu.addAction(QAction("Close" menu checkable=<true>))#open up different menu with different kind of graphs
menu.addAction(QAction("High" menu checkable=<true>))<line_sep>menu.addAction(QAction("Low" menu checkable=<true>))<line_sep>menu.addAction(QAction("Volume" menu checkable=<true>))<line_sep>#menu.addAction(QAction("P_change", menu, checkable=True))
#menu.addAction(QAction("Turnover",menu,checkable=True))
<block_end><if_stmt>self.ui.combobox.currentText()<eq>u"复权"<block_start>menu.addAction(QAction("Kline" menu checkable=<true>))<line_sep>menu.addAction(QAction("Open" menu checkable=<true>))<line_sep>menu.addAction(QAction("Close" menu checkable=<true>))<line_sep>menu.addAction(QAction("High" menu checkable=<true>))<line_sep>menu.addAction(QAction("Low" menu checkable=<true>))<line_sep>menu.addAction(QAction("Volume" menu checkable=<true>))<line_sep>menu.addAction(QAction("Amount" menu checkable=<true>))<block_end><if_stmt>self.ui.combobox.currentText()<eq>u"分笔数据"<block_start>menu.addAction(QAction("分笔" menu checkable=<true>))<block_end><if_stmt>self.ui.combobox.currentText()<eq>u"历史分钟"<block_start>menu.addAction(QAction("Kline" menu checkable=<true>))<line_sep>menu.addAction(QAction("Open" menu checkable=<true>))<line_sep>menu.addAction(QAction("Close" menu checkable=<true>))<line_sep>menu.addAction(QAction("High" menu checkable=<true>))<line_sep>menu.addAction(QAction("Low" menu checkable=<true>))<line_sep>menu.addAction(QAction("Volume" menu checkable=<true>))<line_sep>menu.addAction(QAction("Amount" menu checkable=<true>))<block_end><if_stmt>self.ui.combobox.currentText()<eq>u"十大股东"<block_start>menu.addAction(QAction("季度饼图" menu checkable=<true>))<line_sep>#menu.addAction(QAction("持股比例", menu, checkable=True))
<block_end>#for g in keyarray:
#menu.addAction(QAction(g, menu, checkable=True))
<block_end><block_end>menu.triggered.connect(<lambda>action:self.methodSelected(action collec))<line_sep>menu.exec_(self.ui.treeWidget.viewport().mapToGlobal(position))<block_end><def_stmt>methodSelected self action collec# print(action.text()) #Choice
# if (self.ui.treewidget.count() == 5):
# self.ui.label.setText("Maximum number of queries")
# return
# self.ui.label.setText("")
<block_start>Choice=action.text()<line_sep>Stock=collec<line_sep># print(collec) #Stock Name
# print(db_origin) #DataBase name
# list1 = [self.tr(Stock+"-"+Choice+"-"+db_origin)]
# self.ui.treewidget.addItems(list1)
parent=QTreeWidgetItem(self.ui.treeWidget_2)<line_sep>parent.setText(0 Stock+"-"+Choice)<block_end><def_stmt>openWidgetMenu self position<block_start>indexes=self.ui.treeWidget_2.selectedIndexes()<line_sep>item=self.ui.treeWidget_2.itemAt(position)<if_stmt>item<eq><none><block_start><return><block_end>#item = self.ui.listWidget.itemAt(position)
<if_stmt>len(indexes)<g>0<block_start>menu=QMenu()<line_sep>menu.addAction(QAction("Delete" menu checkable=<true>))#This function is perhaps useless
#menu.triggered.connect(self.eraseItem)
item=self.ui.treeWidget_2.itemAt(position)<line_sep>#collec = str(item.text())
menu.triggered.connect(<lambda>action:self.ListMethodSelected(action item))<block_end>menu.exec_(self.ui.treeWidget_2.viewport().mapToGlobal(position))<block_end><def_stmt>ListMethodSelected self action item<block_start><if_stmt>action.text()<eq>"Delete"<block_start>self.eraseItem()<block_end><if_stmt>action.text()<eq>"Combine"<block_start><global>CombineKeyword<line_sep>collec=str(item.text())<line_sep>CombineKeyword.append(collec)#Useless function(maybe?)
list1=[self.tr(collec)]<line_sep>self.ui.listwidget.addItems(list1)<line_sep>self.eraseItem()<block_end><block_end><def_stmt>eraseItem self<block_start><for_stmt>x self.ui.treeWidget_2.selectedItems()#delete with write click menu
#item = self.ui.treewidget.takeItem(self.ui.treewidget.currentRow())
<block_start>sip.delete(x)<line_sep>#item.delete
<block_end><block_end><def_stmt>classify self folder<block_start>startdate=self.ui.dateEdit.date()<line_sep>startdate=startdate.toPyDate()<line_sep>startdate=startdate.strftime("%Y/%m/%d")#converts date from dateedit to tushare readable date
enddate=self.ui.dateEdit_2.date()<line_sep>enddate=enddate.toPyDate()<line_sep>enddate=enddate.strftime("%Y/%m/%d")<line_sep>option=self.ui.comboBox.currentText()<line_sep>option=str(option)<line_sep>#if (self.ui.treewidget) == 0:
#self.ui.label.setText("Need to select at least one query")
#return
root=self.ui.treeWidget_2.invisibleRootItem()# This is for iterating child items
child_count=root.childCount()<line_sep>texts=[]<if_stmt>child_count<eq>0<block_start><return><block_end><for_stmt>i range(child_count)<block_start>item=root.child(i)<line_sep>text=item.text(0)#with 3 part'stock_name'+'-'+'code'+'-'+action
texts.append(text)<block_end>labels=[k<for>k texts]<line_sep>#items = ([x.encode("utf-8") for x in labels])
width=self.ui.webView.width()#give width and height of user's screen so that graphs can be generated with dynamic size
height=self.ui.webView.height()<line_sep>mode_combo=self.ui.combobox.currentText()<line_sep>graphpage(labels mode_combo startdate enddate option width height)#labels:复权ork线or分笔 option:hfq, qfq or 15, 30, D, etc
self.ui.webView.reload()#refreshes webengine
self.ui.webView.repaint()<line_sep>self.ui.webView.update()<block_end><def_stmt>graphmerge self combineKeyword<block_start>sth=""<for_stmt>i combineKeyword<block_start><if_stmt>sth<eq>""<block_start>sth=sth+i<block_end><else_stmt><block_start>sth=sth+"\n"+"&"+"-"+i<block_end><block_end>list1=sth<line_sep><return>sth<line_sep><global>CombineKeyword<line_sep>CombineKeyword=[]<line_sep>self.ui.listwidget.clear()<block_end><block_end>#combine stuff so that different graphs can be drawn together
app=QApplication(sys.argv)<line_sep>w=MyUi()<line_sep>w.show()<line_sep>sys.exit(app.exec_())<line_sep> |
<import_from_stmt>.gap_neck GlobalAveragePooling<line_sep>__all__=['GlobalAveragePooling']<line_sep> |
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recall metric."""<import_from_stmt>sklearn.metrics recall_score<import_stmt>datasets<line_sep>_DESCRIPTION="""
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""<line_sep>_KWARGS_DESCRIPTION="""
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""<line_sep>_CITATION="""
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={<NAME> <NAME> <NAME> <NAME> <NAME>.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""<line_sep>@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION _KWARGS_DESCRIPTION)<class_stmt>Recall(datasets.Metric)<block_start><def_stmt>_info self<block_start><return>datasets.MetricInfo(description=_DESCRIPTION citation=_CITATION inputs_description=_KWARGS_DESCRIPTION features=datasets.Features({"predictions":datasets.Sequence(datasets.Value("int32")) "references":datasets.Sequence(datasets.Value("int32")) }<if>self.config_name<eq>"multilabel"<else>{"predictions":datasets.Value("int32") "references":datasets.Value("int32") }) reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] )<block_end><def_stmt>_compute self predictions references labels=<none> pos_label=1 average="binary" sample_weight=<none> zero_division="warn" <block_start>score=recall_score(references predictions labels=labels pos_label=pos_label average=average sample_weight=sample_weight zero_division=zero_division )<line_sep><return>{"recall":float(score)<if>score.size<eq>1<else>score}<block_end><block_end> |
_base_=['../_base_/models/simmim_swin-base.py' '../_base_/datasets/imagenet_simmim.py' '../_base_/schedules/adamw_coslr-200e_in1k.py' '../_base_/default_runtime.py' ]<line_sep># data
data=dict(samples_per_gpu=128)<line_sep># optimizer
optimizer=dict(lr=2e-4<times>2048/512 betas=(0.9 0.999) eps=1e-8 paramwise_options={'norm':dict(weight_decay=0.) 'bias':dict(weight_decay=0.) 'absolute_pos_embed':dict(weight_decay=0.) 'relative_position_bias_table':dict(weight_decay=0.0)})<line_sep># clip gradient
optimizer_config=dict(grad_clip=dict(max_norm=5.0))<line_sep># learning policy
lr_config=dict(policy='CosineAnnealing' min_lr=1e-5<times>2048/512 warmup='linear' warmup_iters=10 warmup_ratio=1e-6/2e-4 warmup_by_epoch=<true> by_epoch=<false>)<line_sep># mixed precision
fp16=dict(loss_scale='dynamic')<line_sep># schedule
runner=dict(max_epochs=100)<line_sep># runtime
checkpoint_config=dict(interval=1 max_keep_ckpts=3 out_dir='')<line_sep>persistent_workers=<true><line_sep>log_config=dict(interval=100 hooks=[dict(type='TextLoggerHook') ])<line_sep> |
# Authors: <NAME> <<EMAIL>>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
<import_stmt>pytest<import_from_stmt>pytest_cases parametrize_with_cases<def_stmt>case_sum_one_plus_two <block_start>a=1<line_sep>b=2<line_sep>c=3<line_sep><return>a b c<block_end>@parametrize_with_cases(argnames=["a" "b" "c"] cases=".")<def_stmt>test_argnames_as_list a b c<block_start><assert_stmt>a+b<eq>c<block_end>@parametrize_with_cases(argnames=("a" "b" "c") cases=".")<def_stmt>test_argnames_as_tuple a b c<block_start><assert_stmt>a+b<eq>c<block_end><def_stmt>test_argnames_from_invalid_type <block_start><with_stmt>pytest.raises(TypeError match="^argnames should be a string, list or a tuple$")<block_start>parametrize_with_cases(argnames=42 cases=".")(<lambda>_:<none>)<block_end><block_end><def_stmt>test_argnames_element_from_invalid_type <block_start><with_stmt>pytest.raises(TypeError match="^all argnames should be strings$")<block_start>parametrize_with_cases(argnames=["a" 2 "c"] cases=".")(<lambda>_:<none>)<block_end><block_end> |
<import_stmt>pytest<line_sep>@pytest.mark.parametrize(('x' 'y' ) [(0 [1]) (0 [1]) (str(0) str([1]))])<def_stmt>test_foo x y<block_start><assert_stmt>str([int(x)+1])<eq>y<block_end> |
<import_stmt>ctypes<class_stmt>CDLL_errno(ctypes.CDLL)<block_start><class_stmt>_FuncPtr(ctypes._CFuncPtr)<block_start>_flags_=ctypes._FUNCFLAG_CDECL|ctypes._FUNCFLAG_USE_ERRNO<line_sep>_restype_=ctypes.c_int<def_stmt>__call__ self *args<block_start>ctypes.set_errno(0)<try_stmt><block_start><return>ctypes._CFuncPtr.__call__(self *args)<block_end><finally_stmt><block_start>errno=ctypes.get_errno()<if_stmt>errno<block_start><import_stmt>os<line_sep><raise>IOError(errno os.strerror(errno))<block_end><block_end><block_end><block_end><def_stmt>__init__ self *args **kw<block_start>ctypes.CDLL.__init__(self *args **kw)<del_stmt>self._FuncPtr<block_end><block_end> |
<import_stmt>aip<line_sep>client=aip.AipNlp("Enter Your APP_ID" "Enter Your API_KEY" "Enter Your SECRET_KEY")<line_sep>word={"r":"代词" "v":"动词" "nr":"名词"}<line_sep>s=""<for_stmt>i client.lexer("我爱米思齐" options={})["items"]<block_start>s=s+i["item"]<line_sep>s=s+"【"<line_sep>s=s+word[i["pos"]]<line_sep>s=s+"】"<block_end>print(s)<line_sep> |
<import_stmt>pytest<import_from_stmt>h models<import_from_stmt>h.services flag<class_stmt>TestFlagServiceFlagged<block_start><def_stmt>test_it_returns_true_when_flag_exists self svc flag<block_start><assert_stmt>svc.flagged(flag.user flag.annotation)<is><true><block_end><def_stmt>test_it_returns_false_when_flag_does_not_exist self svc user annotation<block_start><assert_stmt><not>svc.flagged(user annotation)<block_end><def_stmt>test_it_handles_missing_values self svc user annotation<block_start><assert_stmt><not>svc.flagged(<none> annotation)<assert_stmt><not>svc.flagged(user <none>)<block_end><def_stmt>test_it_uses_the_cache_if_possible self svc user annotation<block_start><assert_stmt><not>svc.flagged(user annotation)<line_sep>svc._flagged_cache[# pylint:disable=protected-access
(user.id annotation.id)]=<true><assert_stmt>svc.flagged(user annotation)<block_end><def_stmt>test_it_lists_flagged_ids self svc user flag noise<block_start>annotation_ids=[flag.annotation_id<for>flag noise]<line_sep>annotation_ids.append(flag.annotation_id)<line_sep>all_flagged=svc.all_flagged(user annotation_ids)<assert_stmt>all_flagged<eq>{flag.annotation_id}<assert_stmt>svc._flagged_cache<eq>{# pylint:disable=protected-access
(user.id noise[0].annotation_id):<false> (user.id noise[1].annotation_id):<false> (user.id flag.annotation_id):<true> }<block_end><def_stmt>test_it_handles_all_flagged_with_no_ids self svc user<block_start><assert_stmt>svc.all_flagged(user [])<eq>set()<block_end><def_stmt>test_it_handles_all_flagged_with_no_user self svc annotation<block_start><assert_stmt>svc.all_flagged(<none> [annotation.id])<eq>set()<block_end>@pytest.fixture<def_stmt>flag self factories user annotation<block_start><return>factories.Flag(user=user annotation=annotation)<block_end>@pytest.fixture<def_stmt>user self factories<block_start><return>factories.User()<block_end>@pytest.fixture<def_stmt>annotation self factories<block_start><return>factories.Annotation()<block_end>@pytest.fixture(autouse=<true>)<def_stmt>noise self factories<block_start><return>factories.Flag.create_batch(2)<block_end><block_end><class_stmt>TestFlagServiceCreate<block_start><def_stmt>test_it_creates_flag self svc db_session factories<block_start>user=factories.User()<line_sep>annotation=factories.Annotation(userid=user.userid)<line_sep>svc.create(user annotation)<line_sep>flag=(db_session.query(models.Flag).filter_by(user_id=user.id annotation_id=annotation.id).first())<assert_stmt>flag<is><not><none><block_end><def_stmt>test_it_skips_creating_flag_when_already_exists self svc db_session factories<block_start>existing=factories.Flag()<line_sep>svc.create(existing.user existing.annotation)<assert_stmt>(db_session.query(models.Flag).filter_by(user_id=existing.user.id annotation_id=existing.annotation.id).count()<eq>1)<block_end><block_end><class_stmt>TestFlagServiceCount<block_start><def_stmt>test_flag_count_returns_zero_for_unflagged_annotation self svc unflagged<block_start><assert_stmt><not>svc.flag_count(unflagged)<block_end><def_stmt>test_flag_count_returns_zero_for_None self svc<block_start><assert_stmt><not>svc.flag_count(<none>)<block_end><def_stmt>test_flag_count_returns_flag_count_for_flagged_annotation self svc flagged<block_start><assert_stmt>svc.flag_count(flagged)<eq>2<block_end><def_stmt>test_flag_count_uses_the_cache self svc flagged<block_start>svc._flag_count_cache[flagged.id]=99999# pylint:disable=protected-access
<assert_stmt>svc.flag_count(flagged)<eq>99999<block_end><def_stmt>test_flag_counts self svc flagged unflagged<block_start>ann_ids=[flagged.id unflagged.id]<line_sep>flag_counts=svc.flag_counts(ann_ids)<assert_stmt>(# pylint:disable=protected-access
flag_counts<eq>svc._flag_count_cache<eq>{flagged.id:2 unflagged.id:0})<block_end><def_stmt>test_flag_counts_returns_empty_dict_for_no_ids self svc<block_start><assert_stmt>svc.flag_counts([])<eq>{}<block_end><def_stmt>test_flag_counts_returns_zero_for_unflagged_annotation self svc unflagged<block_start>flag_counts=svc.flag_counts([unflagged.id])<assert_stmt><not>flag_counts[unflagged.id]<block_end>@pytest.fixture<def_stmt>unflagged self factories<block_start><return>factories.Annotation()<block_end>@pytest.fixture<def_stmt>flagged self factories<block_start>annotation=factories.Annotation()<line_sep>factories.Flag.create_batch(2 annotation=annotation)<line_sep><return>annotation<block_end><block_end><class_stmt>TestFlagServiceFactory<block_start><def_stmt>test_it_returns_flag_service self pyramid_request<block_start>svc=flag.flag_service_factory(<none> pyramid_request)<assert_stmt>isinstance(svc flag.FlagService)<block_end><block_end>@pytest.fixture<def_stmt>svc db_session<block_start><return>flag.FlagService(db_session)<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>allSuperClusterCandidates=cms.EDProducer("ConcreteEcalCandidateProducer" src=cms.InputTag("hybridSuperClusters") particleType=cms.string('gamma'))<line_sep> |
######################################################################
# INSTALL #
######################################################################
<import_from_stmt>Classes.Metadata Metadata<class_stmt>Install<block_start>"""
Stores data about an installation for usage
"""<def_stmt>__init__ self json_name:str display_name:str path:str install_switches download_type:str directory:str custom_install_switch install_exit_codes uninstall_exit_codes metadata:Metadata version<block_start>self.display_name=display_name<line_sep>self.json_name=json_name<line_sep>self.path=path<line_sep>self.install_switches=install_switches<line_sep>self.download_type=download_type<line_sep>self.directory=directory<line_sep>self.custom_install_switch=custom_install_switch<line_sep>self.metadata=metadata<line_sep>self.install_exit_codes=install_exit_codes<line_sep>self.uninstall_exit_codes=uninstall_exit_codes<line_sep>self.version=version<block_end><block_end> |
<import_from_future_stmt> absolute_import<import_from_stmt>pyti catch_errors<import_from_stmt>pyti.exponential_moving_average exponential_moving_average<as>ema <def_stmt>double_exponential_moving_average data period<block_start>"""
Double Exponential Moving Average.
Formula:
DEMA = 2*EMA - EMA(EMA)
"""<line_sep>catch_errors.check_for_period_error(data period)<line_sep>dema=(2<times>ema(data period))-ema(ema(data period) period)<line_sep><return>dema<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>prml.nn.function Function<class_stmt>Product(Function)<block_start><def_stmt>__init__ self axis=<none> keepdims=<false><block_start><if_stmt>isinstance(axis int)<block_start>axis=(axis )<block_end><elif_stmt>isinstance(axis tuple)<block_start>axis=tuple(sorted(axis))<block_end>self.axis=axis<line_sep>self.keepdims=keepdims<block_end><def_stmt>_forward self x<block_start>self.output=np.prod(x axis=self.axis keepdims=<true>)<if_stmt><not>self.keepdims<block_start><return>np.squeeze(self.output)<block_end><else_stmt><block_start><return>self.output<block_end><block_end><def_stmt>backward self delta x<block_start><if_stmt><not>self.keepdims<and>self.axis<is><not><none><block_start><for_stmt>ax self.axis<block_start>delta=np.expand_dims(delta ax)<block_end><block_end>dx=delta<times>self.output/x<line_sep><return>dx<block_end><block_end><def_stmt>prod x axis=<none> keepdims=<false><block_start>"""
product of all element in the array
Parameters
----------
x : tensor_like
input array
axis : int, tuple of ints
axis or axes along which a product is performed
keepdims : bool
keep dimensionality or not
Returns
-------
product : tensor_like
product of all element
"""<line_sep><return>Product(axis=axis keepdims=keepdims).forward(x)<block_end> |
# -*- mode: python; encoding: utf-8 -*-
#
# Copyright 2017 the Critic contributors, Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
<import_stmt>api<class_stmt>CommentError(api.APIError)<block_start><pass><block_end><class_stmt>InvalidCommentId(CommentError)<block_start>"""Raised when an invalid comment id is used."""<def_stmt>__init__ self comment_id<block_start>"""Constructor"""<line_sep>super(InvalidCommentId self).__init__("Invalid comment id: %d"%comment_id)<line_sep>self.comment_id=comment_id<block_end><block_end><class_stmt>InvalidCommentIds(CommentError)<block_start>"""Raised by fetchMany() when invalid comment ids are used."""<def_stmt>__init__ self comment_ids<block_start>"""Constructor"""<line_sep>super(InvalidCommentIds self).__init__("Invalid comment ids: %s"%", ".join(map(str comment_ids)))<line_sep>self.comment_ids=comment_ids<block_end><block_end><class_stmt>InvalidLocation(CommentError)<block_start>"""Raised when attempting to specify an invalid comment location"""<line_sep><pass><block_end><class_stmt>Comment(api.APIObject)<block_start>TYPE_VALUES=frozenset(["issue" "note"])<line_sep>@property<def_stmt>id self<block_start>"""The comment's unique id"""<line_sep><return>self._impl.id<block_end>@property<def_stmt>type self<block_start>"""The comment's type
The type is one of "issue" and "note"."""<line_sep><pass><block_end>@property<def_stmt>is_draft self<block_start>"""True if the comment is not yet published
Unpublished comments are not displayed to other users."""<line_sep><return>self._impl.is_draft<block_end>@property<def_stmt>review self<block_start>"""The review to which the comment belongs
The review is returned as an api.review.Review object."""<line_sep><return>self._impl.getReview(self.critic)<block_end>@property<def_stmt>author self<block_start>"""The comment's author
The author is returned as an api.user.User object."""<line_sep><return>self._impl.getAuthor(self.critic)<block_end>@property<def_stmt>timestamp self<block_start>"""The comment's timestamp
The return value is a datetime.datetime object."""<line_sep><return>self._impl.timestamp<block_end>@property<def_stmt>location self<block_start>"""The location of the comment, or None
If the comment was made against lines in a commit message, the return
value is a api.comment.CommitMessageLocation object. If the comment
was made against lines in a file version, the return value is
api.comment.FileVersionLocation object. Otherwise, the return value
is None."""<line_sep><return>self._impl.getLocation(self.critic)<block_end>@property<def_stmt>text self<block_start>"""The comment's text"""<line_sep><return>self._impl.text<block_end>@property<def_stmt>replies self<block_start>"""The replies to the comment
The replies are returned as a list of api.reply.Reply objects."""<line_sep><return>self._impl.getReplies(self.critic)<block_end><class_stmt>DraftChanges(object)<block_start>"""Draft changes to the comment"""<def_stmt>__init__ self author is_draft reply new_type<block_start>self.__author=author<line_sep>self.__is_draft=is_draft<line_sep>self.__reply=reply<line_sep>self.__new_type=new_type<block_end>@property<def_stmt>author self<block_start>"""The author of these draft changes
The author is returned as an api.user.User object."""<line_sep><return>self.__author<block_end>@property<def_stmt>is_draft self<block_start>"""True if the comment itself is a draft (not published)"""<line_sep><return>self.__is_draft<block_end>@property<def_stmt>reply self<block_start>"""The current unpublished reply
The reply is returned as an api.reply.Reply object, or None if
there is no current unpublished reply."""<line_sep><return>self.__reply<block_end>@property<def_stmt>new_type self<block_start>"""The new type of an unpublished type change
The type is returned as a string. Comment.TYPE_VALUES defines the
set of possible return values."""<line_sep><return>self.__new_type<block_end><block_end>@property<def_stmt>draft_changes self<block_start>"""The comment's current draft changes
The draft changes are returned as a Comment.DraftChanges object, or
None if the current user has no unpublished changes to this comment.
If the comment is currently an issue, or the current user has an
unpublished change of the comment's type to issue, the returned
object will be an Issue.DraftChanges instead."""<line_sep><return>self._impl.getDraftChanges(self.critic)<block_end><block_end><class_stmt>Issue(Comment)<block_start>STATE_VALUES=frozenset(["open" "addressed" "resolved"])<line_sep>@property<def_stmt>type self<block_start><return>"issue"<block_end>@property<def_stmt>state self<block_start>"""The issue's state
The state is one of the strings "open", "addressed" or "resolved"."""<line_sep><return>self._impl.state<block_end>@property<def_stmt>addressed_by self<block_start>"""The commit that addressed the issue, or None
The value is an api.commit.Commit object, or None if the issue's
state is not "addressed"."""<line_sep><return>self._impl.getAddressedBy(self.critic)<block_end>@property<def_stmt>resolved_by self<block_start>"""The user that resolved the issue, or None
The value is an api.user.User object, or None if the issue's state is
not "resolved"."""<line_sep><return>self._impl.getResolvedBy(self.critic)<block_end><class_stmt>DraftChanges(Comment.DraftChanges)<block_start>"""Draft changes to the issue"""<def_stmt>__init__ self author is_draft reply new_type new_state new_location<block_start>super(Issue.DraftChanges self).__init__(author is_draft reply new_type)<line_sep>self.__new_state=new_state<line_sep>self.__new_location=new_location<block_end>@property<def_stmt>new_state self<block_start>"""The issue's new state
The new state is returned as a string, or None if the current
user has not resolved or reopened the issue. Issue.STATE_VALUES
defines the set of possible return values."""<line_sep><return>self.__new_state<block_end>@property<def_stmt>new_location self<block_start>"""The issue's new location
The new location is returned as a FileVersionLocation objects, or
None if the issue has not been reopened, or if it was manually
resolved rather than addressed and did not need to be relocated
when being reopened.
Since only issues in file version locations can be addressed,
that is the only possible type of new location."""<line_sep><return>self.__new_location<block_end><block_end><block_end><class_stmt>Note(Comment)<block_start>@property<def_stmt>type self<block_start><return>"note"<block_end><block_end><class_stmt>Location(api.APIObject)<block_start>TYPE_VALUES=frozenset(["general" "commit-message" "file-version"])<def_stmt>__len__ self<block_start>"""Return the the length of the location, in lines"""<line_sep><return>(self.last_line-self.first_line)+1<block_end>@property<def_stmt>type self<block_start>"""The location's type
The type is one of "commit-message" and "file-version"."""<line_sep><pass><block_end>@property<def_stmt>first_line self<block_start>"""The line number of the first commented line
Note that line numbers are one-based."""<line_sep><return>self._impl.first_line<block_end>@property<def_stmt>last_line self<block_start>"""The line number of the last commented line
Note that line numbers are one-based."""<line_sep><return>self._impl.last_line<block_end><block_end><class_stmt>CommitMessageLocation(Location)<block_start>@property<def_stmt>type self<block_start><return>"commit-message"<block_end>@property<def_stmt>commit self<block_start>"""The commit whose message was commented"""<line_sep><return>self._impl.getCommit(self.critic)<block_end>@staticmethod<def_stmt>make critic first_line last_line commit<block_start><return>api.impl.comment.makeCommitMessageLocation(critic first_line last_line commit)<block_end><block_end><class_stmt>FileVersionLocation(Location)<block_start>@property<def_stmt>type self<block_start><return>"file-version"<block_end>@property<def_stmt>changeset self<block_start>"""The changeset containing the comment
The changeset is returned as an api.changeset.Changeset object.
If the comment was created while looking at a diff, this will
initially be that changeset. As additional commits are added to the
review, this changeset may be "extended" to contain those added
commits.
This is the ideal changeset to use to display the comment, unless it
is an issue that has been addressed, in which case a better changeset
would be the diff of the commit returned by Issue.addressed_by.
If the user did not make the comment while looking at a diff but
rather while looking at a single version of the file, then this
attribute returns None.
If this is an object returned by translateTo() called with a
changeset argument, then this will be that changeset."""<line_sep><return>self._impl.getChangeset(self.critic)<block_end>@property<def_stmt>side self<block_start>"""The commented side ("old" or "new") of the changeset
If the user did not make the comment while looking at a changeset
(i.e. a diff) but rather while looking at a single version of the
file, then this attribute returns None."""<line_sep><return>self._impl.side<block_end>@property<def_stmt>commit self<block_start>"""The commit whose version of the file this location references
The commit is returned as an api.commit.Commit object.
If this is an object returned by translateTo() called with a commit
argument, then this is the commit that was given as an argument to
it. If this is the primary location of the comment (returned from
Comment.location) then this is the commit whose version of the file
the comment was originally made against, or None if the comment was
made while looking at a diff."""<line_sep><return>self._impl.getCommit(self.critic)<block_end>@property<def_stmt>file self<block_start>"""The commented file"""<line_sep><return>self._impl.getFile(self.critic)<block_end>@property<def_stmt>is_translated self<block_start>"""True if this is a location returned by |translateTo()|"""<line_sep><return>self._impl.is_translated<block_end><def_stmt>translateTo self changeset=<none> commit=<none><block_start>"""Return a translated file version location, or None
The location is translated to the version of the file in a certain
commit. If |changeset| is not None, that commit is the changeset's
|to_commit|, unless the comment is not present there, and otherwise
the changeset's |from_commit|. If |commit| is not None, that's the
commit.
If the comment is not present in the commit, None is returned.
The returned object's |is_translated| will be True.
If the |changeset| argument is not None, then the returned object's
|changeset| will be that changeset, and its |side| will reflect which
of its |from_commit| and |to_commit| ended up being used. The
returned object's |commit| will be None.
If the |commit| argument is not None, the returned object's |commit|
will be that commit, and its |changeset| and |side| will be None."""<assert_stmt>changeset<is><none><or>isinstance(changeset api.changeset.Changeset)<assert_stmt>commit<is><none><or>isinstance(commit api.commit.Commit)<assert_stmt>(changeset<is><none>)<ne>(commit<is><none>)<line_sep><return>self._impl.translateTo(self.critic changeset commit)<block_end>@staticmethod<def_stmt>make critic first_line last_line file changeset=<none> side=<none> commit=<none># File is required.
<block_start><assert_stmt>isinstance(file api.file.File)<line_sep># Changeset and side go together.
<assert_stmt>(changeset<is><none>)<eq>(side<is><none>)<assert_stmt>(changeset<is><none>)<or>isinstance(changeset api.changeset.Changeset)<line_sep># Commit conflicts with changeset, but one is required.
<assert_stmt>(commit<is><none>)<ne>(changeset<is><none>)<assert_stmt>(commit<is><none>)<or>isinstance(commit api.commit.Commit)<line_sep><return>api.impl.comment.makeFileVersionLocation(critic first_line last_line file changeset side commit)<block_end><block_end><def_stmt>fetch critic comment_id<block_start>"""Fetch the Comment object with the given id"""<import_stmt>api.impl<assert_stmt>isinstance(critic api.critic.Critic)<assert_stmt>isinstance(comment_id int)<line_sep><return>api.impl.comment.fetch(critic comment_id)<block_end><def_stmt>fetchMany critic comment_ids<block_start>"""Fetch multiple Comment objects with the given ids"""<import_stmt>api.impl<assert_stmt>isinstance(critic api.critic.Critic)<line_sep>comment_ids=list(comment_ids)<assert_stmt>all(isinstance(comment_id int)<for>comment_id comment_ids)<line_sep><return>api.impl.comment.fetchMany(critic comment_ids)<block_end><def_stmt>fetchAll critic review=<none> author=<none> comment_type=<none> state=<none> location_type=<none> changeset=<none> commit=<none><block_start>"""Fetch all Comment objects
If |review| is not None, only comments in the specified review are
returned.
If |author| is not None, only comments created by the specified user are
returned.
If |comment_type| is not None, only comments of the specified type are
returned.
If |state| is not None, only issues in the specified state are returned.
This implies type="issue".
If |location_type| is not None, only issues in the specified type of
location are returned.
If |changeset| is not None, only comments against file versions that are
referenced by the specified changeset are returned. Must be combined with
|review|, and can not be combined with |commit|.
If |commit| is not None, only comments against the commit's message or
file versions referenced by the commit are returned. Must be combined
with |review|, and can not be combined with |changeset|."""<import_stmt>api.impl<assert_stmt>isinstance(critic api.critic.Critic)<assert_stmt>review<is><none><or>isinstance(review api.review.Review)<assert_stmt>author<is><none><or>isinstance(author api.user.User)<assert_stmt>comment_type<is><none><or>comment_type<in>Comment.TYPE_VALUES<assert_stmt>state<is><none><or>state<in>Issue.STATE_VALUES<assert_stmt>state<is><none><or>comment_type<in>(<none> "issue")<assert_stmt>location_type<is><none><or>location_type<in>Location.TYPE_VALUES<assert_stmt>changeset<is><none><or>isinstance(changeset api.changeset.Changeset)<assert_stmt>changeset<is><none><or>review<is><not><none><assert_stmt>commit<is><none><or>isinstance(commit api.commit.Commit)<assert_stmt>commit<is><none><or>review<is><not><none><assert_stmt>changeset<is><none><or>commit<is><none><line_sep><return>api.impl.comment.fetchAll(critic review author comment_type state location_type changeset commit)<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>json<import_stmt>urllib.parse<import_stmt>tornado.gen<import_from_stmt>app.const *<import_from_stmt>app.base.configs tp_cfg<import_from_stmt>app.base.session tp_session<import_from_stmt>app.base.core_server core_service_async_post_http<import_from_stmt>app.model record<import_from_stmt>app.base.stats tp_stats<import_from_stmt>app.base.logger *<import_from_stmt>app.base.controller TPBaseJsonHandler<class_stmt>RpcHandler(TPBaseJsonHandler)<block_start>@tornado.gen.coroutine<def_stmt>get self<block_start>_uri=self.request.uri.split('?' 1)<if_stmt>len(_uri)<ne>2<block_start><return>self.write_json(TPE_PARAM)<block_end><yield>self._dispatch(urllib.parse.unquote(_uri[1]))<block_end>@tornado.gen.coroutine<def_stmt>post self<block_start>req=self.request.body.decode('utf-8')<if_stmt>req<eq>''<block_start><return>self.write_json(TPE_PARAM)<block_end><yield>self._dispatch(req)<block_end>@tornado.gen.coroutine<def_stmt>_dispatch self req<block_start><try_stmt><block_start>_req=json.loads(req)<if_stmt>'method'<not><in>_req<or>'param'<not><in>_req<block_start><return>self.write_json(TPE_PARAM)<block_end><block_end><except_stmt><block_start><return>self.write_json(TPE_JSON_FORMAT)<block_end># log.d('WEB-JSON-RPC, method=`{}`\n'.format(_req['method']))
<if_stmt>'get_conn_info'<eq>_req['method']<block_start><return>self._get_conn_info(_req['param'])<block_end><elif_stmt>'session_begin'<eq>_req['method']<block_start><return>self._session_begin(_req['param'])<block_end><elif_stmt>'session_update'<eq>_req['method']<block_start><return>self._session_update(_req['param'])<block_end><elif_stmt>'session_end'<eq>_req['method']<block_start><return>self._session_end(_req['param'])<block_end><elif_stmt>'register_core'<eq>_req['method']<block_start><return>self._register_core(_req['param'])<block_end><elif_stmt>'exit'<eq>_req['method']<block_start><return>self._exit()<block_end><else_stmt><block_start>log.e('WEB-JSON-RPC got unknown method: `{}`.\n'.format(_req['method']))<block_end><return>self.write_json(TPE_UNKNOWN_CMD)<block_end><def_stmt>_get_conn_info self param<block_start><if_stmt>'conn_id'<not><in>param<block_start><return>self.write_json(TPE_PARAM)<block_end>conn_id=param['conn_id']<line_sep>x=tp_session().taken('tmp-conn-info-{}'.format(conn_id) <none>)<if_stmt>x<is><none><block_start><return>self.write_json(TPE_NOT_EXISTS)<block_end><else_stmt><block_start><return>self.write_json(TPE_OK data=x)<block_end><block_end><def_stmt>_session_begin self param<block_start><try_stmt><block_start>_sid=param['sid']<line_sep>_user_id=param['user_id']<line_sep>_host_id=param['host_id']<line_sep>_account_id=param['acc_id']<line_sep>_user_name=param['user_username']<line_sep>_acc_name=param['acc_username']<line_sep>_host_ip=param['host_ip']<line_sep>_conn_ip=param['conn_ip']<line_sep>_conn_port=param['conn_port']<line_sep>_client_ip=param['client_ip']<line_sep>_auth_type=param['auth_type']<line_sep>_protocol_type=param['protocol_type']<line_sep>_protocol_sub_type=param['protocol_sub_type']<block_end><except_stmt>IndexError<block_start><return>self.write_json(TPE_PARAM)<block_end>err,record_id=record.session_begin(_sid _user_id _host_id _account_id _user_name _acc_name _host_ip _conn_ip _conn_port _client_ip _auth_type _protocol_type _protocol_sub_type)<if_stmt>err<ne>TPE_OK<block_start><return>self.write_json(err message='can not write database.')<block_end><else_stmt><block_start>tp_stats().conn_counter_change(1)<line_sep><return>self.write_json(TPE_OK data={'rid':record_id})<block_end><block_end><def_stmt>_session_update self param<block_start><try_stmt><block_start>rid=param['rid']<line_sep>protocol_sub_type=param['protocol_sub_type']<line_sep>code=param['code']<block_end><except_stmt><block_start><return>self.write_json(TPE_PARAM)<block_end><if_stmt>'rid'<not><in>param<or>'code'<not><in>param<block_start><return>self.write_json(TPE_PARAM)<block_end><if_stmt><not>record.session_update(rid protocol_sub_type code)<block_start><return>self.write_json(TPE_DATABASE 'can not write database.')<block_end><else_stmt><block_start><return>self.write_json(TPE_OK)<block_end><block_end><def_stmt>_session_end self param<block_start><if_stmt>'rid'<not><in>param<or>'code'<not><in>param<block_start><return>self.write_json(TPE_PARAM message='invalid request.')<block_end><if_stmt><not>record.session_end(param['rid'] param['code'])<block_start><return>self.write_json(TPE_DATABASE 'can not write database.')<block_end><else_stmt><block_start>tp_stats().conn_counter_change(-1)<line_sep><return>self.write_json(TPE_OK)<block_end><block_end><def_stmt>_register_core self param# 因为core服务启动了(之前可能非正常终止了),做一下数据库中会话状态的修复操作
<block_start>record.session_fix()<if_stmt>'rpc'<not><in>param<block_start><return>self.write_json(TPE_PARAM 'invalid param.')<block_end>tp_cfg().common.core_server_rpc=param['rpc']<line_sep># 获取core服务的配置信息
req={'method':'get_config' 'param':[]}<line_sep>_yr=core_service_async_post_http(req)<line_sep>code,ret_data=<yield>_yr<if_stmt>code<ne>TPE_OK<block_start><return>self.write_json(code 'get config from core-service failed.')<block_end>log.d('update base server config info.\n')<line_sep>tp_cfg().update_core(ret_data)<line_sep># 将运行时配置发送给核心服务
req={'method':'set_config' 'param':{'noop_timeout':tp_cfg().sys.session.noop_timeout}}<line_sep>_yr=core_service_async_post_http(req)<line_sep>code,ret_data=<yield>_yr<if_stmt>code<ne>TPE_OK<block_start><return>self.write_json(code 'set runtime-config to core-service failed.')<block_end><return>self.write_json(TPE_OK)<block_end><def_stmt>_exit self# set exit flag.
<block_start><return>self.write_json(TPE_OK)<block_end><block_end> |
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
# This file instantiates some of the MIB managed objects for SNMP engine use
#
<if_stmt>'mibBuilder'<not><in>globals()<block_start><import_stmt>sys<line_sep>sys.stderr.write(__doc__)<line_sep>sys.exit(1)<block_end>MibScalarInstance,=mibBuilder.importSymbols('SNMPv2-SMI' 'MibScalarInstance')<line_sep>(snmpTargetSpinLock snmpUnavailableContexts snmpUnknownContexts)=mibBuilder.importSymbols('SNMP-TARGET-MIB' 'snmpTargetSpinLock' 'snmpUnavailableContexts' 'snmpUnknownContexts')<line_sep>_snmpTargetSpinLock=MibScalarInstance(snmpTargetSpinLock.name (0 ) snmpTargetSpinLock.syntax.clone(0))<line_sep>_snmpUnavailableContexts=MibScalarInstance(snmpUnavailableContexts.name (0 ) snmpUnavailableContexts.syntax.clone(0))<line_sep>_snmpUnknownContexts=MibScalarInstance(snmpUnknownContexts.name (0 ) snmpUnknownContexts.syntax.clone(0))<line_sep>mibBuilder.exportSymbols('__SNMP-TARGET-MIB' snmpTargetSpinLock=_snmpTargetSpinLock snmpUnavailableContexts=_snmpUnavailableContexts snmpUnknownContexts=_snmpUnknownContexts)<line_sep> |
<import_stmt>os<import_stmt>unittest<line_sep>this_dir=os.path.dirname(os.path.realpath(__file__))<class_stmt>TestPageXML(unittest.TestCase)<block_start><def_stmt>run_dataset_viewer self add_args<block_start><import_from_stmt>calamari_ocr.scripts.dataset_viewer main<line_sep>main(add_args+["--no_plot"])<block_end><def_stmt>test_cut_modes self<block_start>images=os.path.join(this_dir "data" "avicanon_pagexml" "*.nrm.png")<line_sep>self.run_dataset_viewer(["--gen" "PageXML" "--gen.images" images "--gen.cut_mode" "BOX"])<line_sep>self.run_dataset_viewer(["--gen" "PageXML" "--gen.images" images "--gen.cut_mode" "MBR"])<block_end><block_end> |
<import_from_stmt>.ranking CreditRanking<import_from_stmt>.interleaving_method InterleavingMethod<import_stmt>numpy<as>np<import_from_stmt>scipy.optimize linprog<class_stmt>Optimized(InterleavingMethod)<block_start>'''
Optimized Interleaving
Args:
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''<def_stmt>__init__ self lists max_length=<none> sample_num=<none> credit_func='inverse' secure_sampling=<false><block_start>'''
lists: lists of document IDs
max_length: the maximum length of resultant interleaving.
If this is None (default), it is set to the minimum length
of the given lists.
sample_num: If this is None (default), an interleaved ranking is
generated every time when `interleave` is called.
Otherwise, `sample_num` rankings are sampled in the
initialization, one of which is returned when `interleave`
is called.
credit_func: either 'inverse' (1/rank) or 'negative' (-rank)
'''<if_stmt>sample_num<is><none><block_start><raise>ValueError('sample_num cannot be None, '+'i.e. the initial sampling is necessary')<block_end><if_stmt>credit_func<eq>'inverse'<block_start>self._credit_func=<lambda>x:1.0/x<block_end><elif_stmt>credit_func<eq>'negative'<block_start>self._credit_func=<lambda>x:-x<block_end><else_stmt><block_start><raise>ValueError('credit_func should be either inverse or negative')<block_end>self._secure_sampling=secure_sampling<line_sep>super(Optimized self).__init__(lists max_length=max_length sample_num=sample_num)<line_sep># self._rankings (sampled rankings) is obtained here
res=self._compute_probabilities(lists self._rankings)<line_sep>is_success,self._probabilities,_=res<line_sep>self._probabilities<augdiv>np.sum(self._probabilities)<if_stmt><not>is_success<block_start><raise>ValueError('Optimization failed')<block_end><block_end><def_stmt>_sample_rankings self<block_start>'''
Sample `sample_num` rankings
'''<line_sep>distribution={}<if_stmt>self._secure_sampling<block_start>rankings=set()<for_stmt>_ range(self.sample_num)<block_start>rankings.add(self._sample(self.max_length self.lists))<block_end><for_stmt>ranking rankings<block_start>distribution[ranking]=1.0/len(rankings)<block_end><block_end><else_stmt><block_start><while_stmt>len(distribution)<l>self.sample_num<block_start>ranking=self._sample(self.max_length self.lists)<line_sep>distribution[ranking]=1.0/self.sample_num<block_end><block_end>self._rankings,self._probabilities=zip(*distribution.items())<block_end><def_stmt>_sample self max_length lists<block_start>'''
Prefix constraint sampling
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
max_length: the maximum length of resultant interleaving
lists: lists of document IDs
Return an instance of Ranking
'''<line_sep>num_rankers=len(lists)<line_sep>result=CreditRanking(num_rankers)<line_sep>teams=set(range(num_rankers))<while_stmt>len(result)<l>max_length<block_start><if_stmt>len(teams)<eq>0<block_start><break><block_end>selected_team=np.random.choice(list(teams))<line_sep>docs=[x<for>x lists[selected_team]<if><not>x<in>result]<if_stmt>len(docs)<g>0<block_start>selected_doc=docs[0]<line_sep>result.append(selected_doc)<block_end><else_stmt><block_start>teams.remove(selected_team)<block_end><block_end># assign credits
<for_stmt>docid result<block_start><for_stmt>team result.credits<block_start><if_stmt>docid<in>lists[team]<block_start>rank=lists[team].index(docid)+1<block_end><else_stmt><block_start>rank=len(lists[team])+1<block_end>result.credits[team][docid]=self._credit_func(rank)<block_end><block_end><return>result<block_end><def_stmt>_compute_probabilities self lists rankings<block_start>'''
Solve the optimization problem in
(Multileaved Comparisons for Fast Online Evaluation, CIKM'14)
lists: lists of document IDs
rankings: a list of Ranking instances
Return a list of probabilities for input rankings
'''<line_sep># probability constraints
A_p_sum=np.array([1]<times>len(rankings))<line_sep># unbiasedness constraints
ub_cons=self._unbiasedness_constraints(lists rankings)<line_sep># sensitivity
sensitivity=self._sensitivity(lists rankings)<line_sep># constraints
A_eq=np.vstack((A_p_sum ub_cons))<line_sep>b_eq=np.array([1.0]+[0.0]<times>ub_cons.shape[0])<line_sep># solving the optimization problem
res=linprog(sensitivity # objective function
A_eq=A_eq b_eq=b_eq # constraints
bounds=[(0 1)]<times>len(rankings)# 0 <= p <= 1
)<line_sep><return>res.success res.x res.fun<block_end><def_stmt>_unbiasedness_constraints self lists rankings<block_start>'''
for each k and team x, for a certain c_k:
sum_{L_i} {p_i} * sum^k_{j=1} ranking.credits[x][d_j] = c_k
In other words,
sum_{L_i} {p_i} * sum^k_{j=1}
(ranking.credits[x][d_j] - ranking.credits[x+1][d_j]) = 0
'''<line_sep>result=[]<line_sep>credits=np.zeros((self.max_length len(rankings) len(lists)))<for_stmt>rid,ranking enumerate(rankings)<block_start><for_stmt>idx,docid enumerate(ranking)<block_start><for_stmt>team ranking.credits<block_start>credits[idx rid team]=ranking.credits[team][docid]<if_stmt>idx<g>0<block_start>credits[idx rid team]<augadd>credits[idx-1 rid team]<block_end><block_end><block_end><block_end><for_stmt>i range(len(lists)-1)<block_start>result.append(credits[: : i]-credits[: : i+1])<block_end>result=np.vstack(result)<line_sep><return>result<block_end><def_stmt>_sensitivity self lists rankings<block_start>'''
Expected variance
'''<line_sep># compute the mean of each ranking
mu=np.zeros(len(rankings))<for_stmt>rid,ranking enumerate(rankings)<block_start><for_stmt>idx,docid enumerate(ranking)<block_start>click_prob=1.0/(idx+1)<line_sep>credit=np.sum([ranking.credits[x][docid]<for>x ranking.credits])<line_sep>mu[rid]<augadd>click_prob<times>credit<block_end><block_end>mu<augdiv>len(lists)<line_sep># compute the variance
var=np.zeros(len(rankings))<for_stmt>rid,ranking enumerate(rankings)<block_start><for_stmt>x ranking.credits<block_start>v=0.0<for_stmt>idx,docid enumerate(ranking)<block_start>click_prob=1.0/(idx+1)<if_stmt>docid<in>ranking.credits[x]<block_start>v<augadd>click_prob<times>ranking.credits[x][docid]<block_end><block_end>v<augsub>mu[rid]<line_sep>var[rid]<augadd>v<power>2<block_end><block_end><return>var<block_end>@classmethod<def_stmt>compute_scores cls ranking clicks<block_start>'''
ranking: an instance of Ranking
clicks: a list of indices clicked by a user
Return a list of scores of each ranker.
'''<line_sep><return>{i:sum([ranking.credits[i][ranking[c]]<for>c clicks])<for>i ranking.credits}<block_end><block_end> |
"""
Dynamic Connection Creation from a Variable
This file contains one ongoing DAG that executes every 15 minutes.
This DAG makes use of one custom operator:
- CreateConnectionsFromVariable
https://github.com/airflow-plugins/variable_connection_plugin/blob/master/operator/variable_connection_operator.py#L36
If using encrypted tokens in the Variable (recommended), it is necessary
to create a separate "Fernet Key Connection" with the relevant Fernet Key
kept in the password field. This Conn ID can then be specified in the
operator below.
"""<import_from_stmt>datetime datetime<import_from_stmt>airflow DAG<import_from_stmt>airflow.operators CreateConnectionsFromVariable<line_sep>FERNET_KEY_CONN_ID=<none><line_sep>CONFIG_VARIABLE_KEY=''<line_sep>args={'owner':'airflow' 'start_date':datetime(2018 2 22 0 0) 'provide_context':<true> 'email':[] 'email_on_failure':<true>}<line_sep>dag=DAG('__VARIABLE_CONNECTION_CREATION__' schedule_interval="*/15 * * * *" default_args=args catchup=<false>)<line_sep>create_airflow_connections=CreateConnectionsFromVariable(task_id='create_airflow_connections' fernet_key_conn_id=FERNET_KEY_CONN_ID config_variable_key=CONFIG_VARIABLE_KEY dag=dag)<line_sep>create_airflow_connections<line_sep> |
# Copyright (C) 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>pyjamas Factory<import_from_stmt>pyjamas DOM<import_from_stmt>pyjamas.dnd.DNDHelper dndHelper<import_from_stmt>pyjamas.ui.Widget Widget<import_from_stmt>pyjamas.ui.DropHandler DropHandler<import_stmt>pyjd<class_stmt>DropWidget(object)<block_start>"""
Mix-in class for a drop-target widget
"""<line_sep><pass><block_end><class_stmt>Html5DropWidget(Widget DropHandler)<block_start><def_stmt>__init__ self **kw<block_start><if_stmt>(<not>hasattr(self 'attached'))<or>kw<block_start>Widget.__init__(self **kw)<block_end>DropHandler.__init__(self)<line_sep>self.addDropListener(self)<block_end><block_end><class_stmt>EmulatedDropWidget(Html5DropWidget)<block_start><def_stmt>__init__ self **kw<block_start>Html5DropWidget.__init__(self **kw)<line_sep>dndHelper.registerTarget(self)<block_end><block_end><def_stmt>init is_native=<none><block_start><global>DropWidget<if_stmt>is_native<is><none><block_start>html5_dnd=hasattr(DOM.createElement('span') 'draggable')<block_end><else_stmt><block_start>html5_dnd=is_native<block_end><if_stmt>html5_dnd<block_start>DropWidget=Html5DropWidget<block_end><else_stmt><block_start>DropWidget=EmulatedDropWidget<block_end><block_end><if_stmt>pyjd.is_desktop<block_start>init(pyjd.native_dnd)<block_end><else_stmt><block_start>init(<none>)<block_end>Factory.registerClass('pyjamas.ui.DropWidget' 'DropWidget' DropWidget)<line_sep> |
<import_stmt>graphene<import_from_stmt>.....payment.models Payment<import_from_stmt>....tests.utils get_graphql_content<line_sep>PAYMENT_QUERY=""" query Payments($filter: PaymentFilterInput){
payments(first: 20, filter: $filter) {
edges {
node {
id
gateway
capturedAmount {
amount
currency
}
total {
amount
currency
}
actions
chargeStatus
transactions {
error
gatewayResponse
amount {
currency
amount
}
}
}
}
}
}
"""<def_stmt>test_query_payments_filter_by_checkout payment_dummy checkouts_list permission_manage_orders staff_api_client# given
<block_start>payment1=payment_dummy<line_sep>payment1.checkout=checkouts_list[0]<line_sep>payment1.save()<line_sep>payment2=Payment.objects.get(id=payment1.id)<line_sep>payment2.id=<none><line_sep>payment2.checkout=checkouts_list[1]<line_sep>payment2.save()<line_sep>payment3=Payment.objects.get(id=payment1.id)<line_sep>payment3.id=<none><line_sep>payment3.checkout=checkouts_list[2]<line_sep>payment3.save()<line_sep>variables={"filter":{"checkouts":[graphene.Node.to_global_id("Checkout" checkout.pk)<for>checkout checkouts_list[1:4]]}}<line_sep># when
response=staff_api_client.post_graphql(PAYMENT_QUERY variables permissions=[permission_manage_orders])<line_sep># then
content=get_graphql_content(response)<line_sep>edges=content["data"]["payments"]["edges"]<line_sep>payment_ids={edge["node"]["id"]<for>edge edges}<assert_stmt>payment_ids<eq>{graphene.Node.to_global_id("Payment" payment.pk)<for>payment [payment2 payment3]}<block_end> |
# flake8: noqa
<import_from_stmt>typing Dict List Union<import_stmt>pytest<import_stmt>torch<import_from_stmt>catalyst.metrics DiceMetric IOUMetric TrevskyMetric<line_sep>base_outputs=torch.tensor([[0.8 0.1 0] [0 0.4 0.3] [0 0 1]])<line_sep>base_targets=torch.tensor([[1.0 0 0] [0 1 0] [1 1 0]])<line_sep>base_outputs=torch.stack([base_outputs base_targets])[<none> : : :]<line_sep>base_targets=torch.stack([base_targets base_targets])[<none> : : :]<line_sep>base_outputs_2=torch.tensor([[0.8 0.1 0.4] [0.1 0.4 0.3] [0 1 1]])<line_sep>base_targets_2=torch.tensor([[1.0 0.1 0] [0 0.5 0] [0 1 1]])<line_sep>base_outputs_2=torch.stack([base_outputs_2 base_targets_2])[<none> : : :]<line_sep>base_targets_2=torch.stack([base_targets_2 base_targets_2])[<none> : : :]<line_sep>EPS=1e-5<line_sep>@pytest.mark.parametrize("outputs, targets, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] [0.2 0.8] ["class_name_00" "class_name_01"] [{"dice/class_name_00":0.3636363446712494 "dice/class_name_01":1.0 "dice":0.6818182 "dice/_weighted":0.8727272748947144 } {"dice/class_name_00":0.781818151473999 "dice/class_name_01":0.9055555462837219 "dice":0.8436868190765381 "dice/_weighted":0.8808081150054932 } ] [{"dice/class_name_00":0.3636363446712494 "dice/class_name_01":1.0 "dice":0.6818181872367859 "dice/_micro":0.7123287916183472 "dice/_weighted":0.8727272748947144 } {"dice/class_name_00":0.5888112187385559 "dice/class_name_01":0.9552631378173828 "dice/_micro":0.7776271104812622 "dice":0.772037148475647 "dice/_macro":0.772037148475647 "dice/_weighted":0.8819727897644043 } ] ) ) )<def_stmt>test_dice_metric outputs:List[torch.Tensor] targets:List[torch.Tensor] weights:List[float] class_names:List[str] batch_answers:List[Dict[str float]] total_answers:List[Dict[str float]] <block_start>"""Docs."""<line_sep>metric=DiceMetric(weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update_key_value(output target)<line_sep>total_score=metric.compute_key_value()<for_stmt>key,value batch_answer.items()<block_start><assert_stmt>key<in>batch_score<assert_stmt>abs(batch_score[key]-batch_answer[key])<l>EPS<block_end><for_stmt>key,value total_answer.items()<block_start><assert_stmt>key<in>total_score<assert_stmt>abs(total_score[key]-total_answer[key])<l>EPS<block_end><block_end><block_end>@pytest.mark.parametrize("outputs, targets, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] [0.2 0.8] ["class_name_00" "class_name_01"] [[0.3636363446712494 1.0] [0.781818151473999 0.9055555462837219]] [[[0.3636363446712494 1.0] 0.7123287916183472 0.6818181872367859 0.8727272748947144 ] [[0.5888112187385559 0.9552631378173828] 0.7776271104812622 0.772037148475647 0.8819727897644043 ] ] ) ) )<def_stmt>test_dice_metric_compute outputs:List[torch.Tensor] targets:List[torch.Tensor] weights:List[float] class_names:List[str] batch_answers:List[List[float]] total_answers:List[List[Union[List[float] float]]] <block_start>"""Docs."""<line_sep>metric=DiceMetric(weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update(output target)<line_sep>total_score=metric.compute()<assert_stmt>len(batch_answer)<eq>len(batch_score)<for_stmt>pred,answer zip(batch_score batch_answer)<block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><assert_stmt>len(total_score)<eq>len(total_score)<for_stmt>pred,answer zip(total_score total_score)<block_start><if_stmt>isinstance(pred list)<block_start><for_stmt>pred_sample,answer_sample zip(pred answer)<block_start><assert_stmt>abs(pred_sample-answer_sample)<l>EPS<block_end><block_end><else_stmt><block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><block_end><block_end><block_end>@pytest.mark.parametrize("outputs, targets, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] [0.2 0.8] ["class_name_00" "class_name_01"] [{"iou/class_name_00":0.2222222536802292 "iou/class_name_01":1.0 "iou":0.6111111 "iou/_weighted":0.8444444537162781 } {"iou/class_name_00":0.641791045665741 "iou/class_name_01":0.8274111747741699 "iou":0.7346011400222778 "iou/_weighted":0.7902871370315552 } ] [{"iou/class_name_00":0.2222222536802292 "iou/class_name_01":1.0 "iou":0.6111111044883728 "iou/_micro":0.5531914830207825 "iou/_weighted":0.8444444537162781 } {"iou/class_name_00":0.4172447919845581 "iou/class_name_01":0.9143576622009277 "iou/_micro":0.6361619234085083 "iou":0.6658012270927429 "iou/_macro":0.6658012270927429 "iou/_weighted":0.8149350881576538 } ] ) ) )<def_stmt>test_iou_metric outputs:List[torch.Tensor] targets:List[torch.Tensor] weights:List[float] class_names:List[str] batch_answers:List[Dict[str float]] total_answers:List[Dict[str float]] <block_start>"""Docs."""<line_sep>metric=IOUMetric(weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update_key_value(output target)<line_sep>total_score=metric.compute_key_value()<for_stmt>key,value batch_answer.items()<block_start><assert_stmt>key<in>batch_score<assert_stmt>abs(batch_score[key]-batch_answer[key])<l>EPS<block_end><for_stmt>key,value total_answer.items()<block_start><assert_stmt>key<in>total_score<assert_stmt>abs(total_score[key]-total_answer[key])<l>EPS<block_end><block_end><block_end>@pytest.mark.parametrize("outputs, targets, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] [0.2 0.8] ["class_name_00" "class_name_01"] [[0.2222222536802292 1.0] [0.641791045665741 0.8274111747741699]] [[[0.2222222536802292 1.0] 0.5531914830207825 0.6111111044883728 0.8444444537162781 ] [[0.4172447919845581 0.9143576622009277] 0.6361619234085083 0.6658012270927429 0.8149350881576538 ] ] ) ) )<def_stmt>test_iou_metric_compute outputs:List[torch.Tensor] targets:List[torch.Tensor] weights:List[float] class_names:List[str] batch_answers:List[List[float]] total_answers:List[List[Union[List[float] float]]] <block_start>"""IOU update, compute test"""<line_sep>metric=IOUMetric(weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update(output target)<line_sep>total_score=metric.compute()<assert_stmt>len(batch_answer)<eq>len(batch_score)<for_stmt>pred,answer zip(batch_score batch_answer)<block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><assert_stmt>len(total_score)<eq>len(total_score)<for_stmt>pred,answer zip(total_score total_score)<block_start><if_stmt>isinstance(pred list)<block_start><for_stmt>pred_sample,answer_sample zip(pred answer)<block_start><assert_stmt>abs(pred_sample-answer_sample)<l>EPS<block_end><block_end><else_stmt><block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><block_end><block_end><block_end>@pytest.mark.parametrize("outputs, targets, alpha, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] 0.2 [0.2 0.8] ["class_name_00" "class_name_01"] [{"trevsky/class_name_00":0.4166666567325592 "trevsky/class_name_01":1.0 "trevsky":0.7083333134651184 "trevsky/_weighted":0.8833333253860474 } {"trevsky/class_name_00":0.7524999976158142 "trevsky/class_name_01":0.9055555462837219 "trevsky":0.8290277719497681 "trevsky/_weighted":0.8749444484710693 } ] [{"trevsky/class_name_00":0.4166666567325592 "trevsky/class_name_01":1.0 "trevsky":0.7083333134651184 "trevsky/_micro":0.7558139562606812 "trevsky/_weighted":0.8833333253860474 } {"trevsky/class_name_00":0.6119186282157898 "trevsky/class_name_01":0.9552631974220276 "trevsky/_micro":0.7921270728111267 "trevsky":0.7835909128189087 "trevsky/_macro":0.7835909128189087 "trevsky/_weighted":0.886594295501709 } ] ) ) )<def_stmt>test_trevsky_metric outputs:List[torch.Tensor] targets:List[torch.Tensor] alpha:float weights:List[float] class_names:List[str] batch_answers:List[Dict[str float]] total_answers:List[Dict[str float]] <block_start>metric=TrevskyMetric(alpha=alpha weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update_key_value(output target)<line_sep>total_score=metric.compute_key_value()<for_stmt>key,value batch_answer.items()<block_start><assert_stmt>key<in>batch_score<assert_stmt>abs(batch_score[key]-batch_answer[key])<l>EPS<block_end><for_stmt>key,value total_answer.items()<block_start><assert_stmt>key<in>total_score<assert_stmt>abs(total_score[key]-total_answer[key])<l>EPS<block_end><block_end><block_end>@pytest.mark.parametrize("outputs, targets, alpha, weights, class_names, batch_answers, total_answers" (([base_outputs base_outputs_2] [base_targets base_targets_2] 0.2 [0.2 0.8] ["class_name_00" "class_name_01"] [[0.4166666567325592 1.0] [0.7524999976158142 0.9055555462837219]] [[[0.4166666567325592 1.0] 0.7558139562606812 0.7083333134651184 0.8833333253860474 ] [[0.6119186282157898 0.9552631974220276] 0.7921270728111267 0.7835909128189087 0.886594295501709 ] ] ) ) )<def_stmt>test_trevsky_metric_compute outputs:List[torch.Tensor] targets:List[torch.Tensor] alpha:float weights:List[float] class_names:List[str] batch_answers:List[List[float]] total_answers:List[List[Union[List[float] float]]] <block_start>"""Trevsky update, compute test"""<line_sep>metric=TrevskyMetric(alpha=alpha weights=weights class_names=class_names)<for_stmt>output,target,batch_answer,total_answer zip(outputs targets batch_answers total_answers)<block_start>batch_score=metric.update(output target)<line_sep>total_score=metric.compute()<assert_stmt>len(batch_answer)<eq>len(batch_score)<for_stmt>pred,answer zip(batch_score batch_answer)<block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><assert_stmt>len(total_score)<eq>len(total_score)<for_stmt>pred,answer zip(total_score total_score)<block_start><if_stmt>isinstance(pred list)<block_start><for_stmt>pred_sample,answer_sample zip(pred answer)<block_start><assert_stmt>abs(pred_sample-answer_sample)<l>EPS<block_end><block_end><else_stmt><block_start><assert_stmt>abs(pred-answer)<l>EPS<block_end><block_end><block_end><block_end> |
<import_stmt>base64<import_stmt>hashlib<import_from_stmt>utils.apr1 hash_apr1<def_stmt>md5_apr1 salt text<block_start><return>hash_apr1(salt text)<block_end><def_stmt>sha1 text<block_start>result=hashlib.sha1(text.encode('utf8'))<line_sep><return>base64.b64encode(result.digest()).decode('utf8')<block_end> |
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bash runfiles library init code for test_rules.bzl."""<line_sep># Init code to load the runfiles.bash file.
# The runfiles library itself defines rlocation which you would need to look
# up the library's runtime location, thus we have a chicken-and-egg problem.
INIT_BASH_RUNFILES=["# --- begin runfiles.bash initialization ---" "# Copy-pasted from Bazel Bash runfiles library (tools/bash/runfiles/runfiles.bash)." "set -euo pipefail" 'if [[ ! -d "${RUNFILES_DIR:-/dev/null}" && ! -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then' ' if [[ -f "$0.runfiles_manifest" ]]; then' ' export RUNFILES_MANIFEST_FILE="$0.runfiles_manifest"' ' elif [[ -f "$0.runfiles/MANIFEST" ]]; then' ' export RUNFILES_MANIFEST_FILE="$0.runfiles/MANIFEST"' ' elif [[ -f "$0.runfiles/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then' ' export RUNFILES_DIR="$0.runfiles"' " fi" "fi" 'if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then' ' source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash"' 'elif [[ -f "${RUNFILES_MANIFEST_FILE:-/dev/null}" ]]; then' ' source "$(grep -m1 "^bazel_tools/tools/bash/runfiles/runfiles.bash " \\' ' "$RUNFILES_MANIFEST_FILE" | cut -d " " -f 2-)"' "else" ' echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash"' " exit 1" "fi" "# --- end runfiles.bash initialization ---" ]<line_sep># Label of the runfiles library.
BASH_RUNFILES_DEP="@bazel_tools//tools/bash/runfiles"<line_sep> |
<import_from_stmt>guillotina configure<import_from_stmt>guillotina glogging<import_from_stmt>guillotina.db.interfaces IDBTransactionStrategy<import_from_stmt>guillotina.db.interfaces ITransaction<import_from_stmt>guillotina.db.strategies.base BaseStrategy<line_sep>logger=glogging.getLogger("guillotina")<line_sep>@configure.adapter(for_=ITransaction provides=IDBTransactionStrategy name="simple")<class_stmt>SimpleStrategy(BaseStrategy)<block_start><async_keyword><def_stmt>tpc_begin self<block_start><await>self.retrieve_tid()<if_stmt>self._transaction._db_txn<is><none><block_start><await>self._storage.start_transaction(self._transaction)<block_end><block_end><async_keyword><def_stmt>tpc_finish self# do actual db commit
<block_start><if_stmt>self.writable_transaction<block_start><await>self._storage.commit(self._transaction)<block_end><block_end><block_end> |
# Generated by Django 3.1.11 on 2021-07-01 20:18
<import_from_stmt>django.db migrations models<import_stmt>grandchallenge.core.storage<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("products" "0006_product_ce_under") ]<line_sep>operations=[migrations.CreateModel(name="ProjectAirFiles" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("title" models.CharField(max_length=150)) ("study_file" models.FileField(upload_to=grandchallenge.core.storage.get_pdf_path) ) ] ) ]<block_end> |
"""The fixer component."""<line_sep> |
# Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>rlgraph get_backend<import_from_stmt>rlgraph.components.layers.preprocessing.preprocess_layer PreprocessLayer<import_from_stmt>rlgraph.utils.decorators rlgraph_api<import_from_stmt>rlgraph.utils.ops flatten_op unflatten_op<if_stmt>get_backend()<eq>"tf"<block_start><import_stmt>tensorflow<as>tf<block_end><elif_stmt>get_backend()<eq>"pytorch"<block_start><import_stmt>torch<block_end><class_stmt>ImageCrop(PreprocessLayer)<block_start>"""
Crops one or more images to a new size without touching the color channel.
"""<def_stmt>__init__ self x=0 y=0 width=0 height=0 scope="image-crop" **kwargs<block_start>"""
Args:
x (int): Start x coordinate.
y (int): Start y coordinate.
width (int): Width of resulting image.
height (int): Height of resulting image.
"""<line_sep>super(ImageCrop self).__init__(scope=scope **kwargs)<line_sep>self.x=x<line_sep>self.y=y<line_sep>self.width=width<line_sep>self.height=height<assert_stmt>self.x<ge>0<assert_stmt>self.y<ge>0<assert_stmt>self.width<g>0<assert_stmt>self.height<g>0<line_sep># The output spaces after preprocessing (per flat-key).
self.output_spaces=dict()<block_end><def_stmt>get_preprocessed_space self space<block_start>ret=dict()<for_stmt>key,value space.flatten().items()# Do some sanity checking.
<block_start>rank=value.rank<assert_stmt>rank<eq>2<or>rank<eq>3 "ERROR: Given image's rank (which is {}{}, not counting batch rank) must be either 2 or 3!".format(rank (""<if>key<eq>""<else>" for key '{}'".format(key)))<line_sep># Determine the output shape.
shape=list(value.shape)<line_sep>shape[0]=self.width<line_sep>shape[1]=self.height<line_sep>ret[key]=value.__class__(shape=tuple(shape) add_batch_rank=value.has_batch_rank)<block_end><return>unflatten_op(ret)<block_end><def_stmt>create_variables self input_spaces action_space=<none><block_start>in_space=input_spaces["inputs"]<line_sep>self.output_spaces=flatten_op(self.get_preprocessed_space(in_space))<block_end>@rlgraph_api(flatten_ops=<true> split_ops=<true> add_auto_key_as_first_param=<true>)<def_stmt>_graph_fn_call self key inputs<block_start>"""
Images come in with either a batch dimension or not.
"""<if_stmt>self.backend<eq>"python"<or>get_backend()<eq>"python"<block_start><if_stmt>isinstance(inputs list)<block_start>inputs=np.asarray(inputs)<block_end># Preserve batch dimension.
<if_stmt>self.output_spaces[key].has_batch_rank<is><true><block_start><return>inputs[: self.y:self.y+self.height self.x:self.x+self.width]<block_end><else_stmt><block_start><return>inputs[self.y:self.y+self.height self.x:self.x+self.width]<block_end><block_end><elif_stmt>get_backend()<eq>"pytorch"<block_start><if_stmt>isinstance(inputs list)<block_start>inputs=torch.tensor(inputs)<block_end># TODO: the reason this key check is there is due to call during meta graph build - > out spaces
# do not exist yet -> need better solution.
# Preserve batch dimension.
<if_stmt>key<in>self.output_spaces<and>self.output_spaces[key].has_batch_rank<is><true><block_start><return>inputs[: self.y:self.y+self.height self.x:self.x+self.width]<block_end><else_stmt><block_start><return>inputs[self.y:self.y+self.height self.x:self.x+self.width]<block_end><block_end><elif_stmt>get_backend()<eq>"tf"<block_start><return>tf.image.crop_to_bounding_box(image=inputs offset_height=self.y offset_width=self.x target_height=self.height target_width=self.width)<block_end><block_end><block_end> |
<import_from_stmt>pythonforandroid.recipe PythonRecipe<class_stmt>PydanticRecipe(PythonRecipe)<block_start>version='1.8.2'<line_sep>url='https://github.com/samuelcolvin/pydantic/archive/refs/tags/v{version}.zip'<line_sep>depends=['setuptools']<line_sep>python_depends=['Cython' 'devtools' 'email-validator' 'dataclasses' 'typing-extensions' 'python-dotenv']<line_sep>call_hostpython_via_targetpython=<false><block_end>recipe=PydanticRecipe()<line_sep> |
<import_from_stmt>collections namedtuple<line_sep>Genotype=namedtuple('Genotype' 'backbone rpn')<line_sep>OP_NAMES=['sep_conv_3x3' 'sep_conv_3x3_dil3' 'sep_conv_5x5_dil6' 'skip_connect' 'def_conv_3x3' ]<line_sep>AGG_NAMES=['psum' 'cat']<line_sep>HEAD_OP_NAMES=['conv1x1' 'conv3x3' 'sep_conv_3x3' 'sep_conv_3x3_dil3' 'skip_connect' 'def_conv_3x3' ]<line_sep>HEAD_AGG_NAMES=['psum' 'cat']<line_sep> |
<import_from_stmt>opta.json_schema check_schemas<def_stmt>test_returns_without_error <arrow><none><block_start>check_schemas()<block_end> |
<import_stmt>vcr<import_from_stmt>fast_arrow Client<def_stmt>gen_vcr <block_start><return>vcr.VCR(cassette_library_dir='tests/fixtures_vcr' record_mode='none' match_on=['method' 'scheme' 'host' 'port' 'path' 'query'] )<block_end><def_stmt>gen_client <block_start>auth_data=gen_auth_data()<line_sep>client=Client(auth_data)<line_sep><return>client<block_end><def_stmt>gen_auth_data <block_start>auth_data={"account_id":123 "access_token":"<PASSWORD>" "refresh_token":"<PASSWORD>" "device_token":"<PASSWORD>" }<line_sep><return>auth_data<block_end> |
# encoding: utf-8
<import_from_stmt>opendatatools aqi<import_from_stmt>pyecharts Line<import_stmt>pandas<as>pd<if_stmt>__name__<eq>'__main__'<block_start>df_aqi=aqi.get_daily_aqi_onecity('北京市')<line_sep>df_aqi.set_index('date' inplace=<true>)<line_sep>df_aqi.sort_index(ascending=<true> inplace=<true>)<line_sep>df_aqi=df_aqi[df_aqi.index<ge>"2018-01-01"]<line_sep>axis_x=df_aqi.index<line_sep>axis_y=df_aqi['aqi']<line_sep>line=Line("北京AQI趋势图")<line_sep>line.add("aqi curve for beijing" axis_x axis_y mark_point=["average"])<line_sep>line.render("aqi_bj_curve.html")<block_end> |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>typing List Optional Union<import_from_stmt>azure.core.exceptions HttpResponseError<import_stmt>msrest.serialization<import_from_stmt>._guest_configuration_client_enums *<class_stmt>AssignmentInfo(msrest.serialization.Model)<block_start>"""Information about the guest configuration assignment.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the guest configuration assignment.
:vartype name: str
:param configuration: Information about the configuration.
:type configuration: ~guest_configuration_client.models.ConfigurationInfo
"""<line_sep>_validation={'name':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'configuration':{'key':'configuration' 'type':'ConfigurationInfo'} }<def_stmt>__init__ self * configuration:Optional["ConfigurationInfo"]=<none> **kwargs<block_start>super(AssignmentInfo self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.configuration=configuration<block_end><block_end><class_stmt>AssignmentReportDetails(msrest.serialization.Model)<block_start>"""Details of the guest configuration assignment report.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar job_id: GUID of the report.
:vartype job_id: str
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~guest_configuration_client.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~guest_configuration_client.models.AssignmentReportResource]
"""<line_sep>_validation={'compliance_status':{'readonly':<true>} 'start_time':{'readonly':<true>} 'end_time':{'readonly':<true>} 'job_id':{'readonly':<true>} 'operation_type':{'readonly':<true>} }<line_sep>_attribute_map={'compliance_status':{'key':'complianceStatus' 'type':'str'} 'start_time':{'key':'startTime' 'type':'iso-8601'} 'end_time':{'key':'endTime' 'type':'iso-8601'} 'job_id':{'key':'jobId' 'type':'str'} 'operation_type':{'key':'operationType' 'type':'str'} 'resources':{'key':'resources' 'type':'[AssignmentReportResource]'} }<def_stmt>__init__ self * resources:Optional[List["AssignmentReportResource"]]=<none> **kwargs<block_start>super(AssignmentReportDetails self).__init__(**kwargs)<line_sep>self.compliance_status=<none><line_sep>self.start_time=<none><line_sep>self.end_time=<none><line_sep>self.job_id=<none><line_sep>self.operation_type=<none><line_sep>self.resources=resources<block_end><block_end><class_stmt>AssignmentReportResource(msrest.serialization.Model)<block_start>"""The guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:param reasons: Compliance reason and reason code for a resource.
:type reasons:
list[~guest_configuration_client.models.AssignmentReportResourceComplianceReason]
:ivar properties: Properties of a guest configuration assignment resource.
:vartype properties: object
"""<line_sep>_validation={'compliance_status':{'readonly':<true>} 'properties':{'readonly':<true>} }<line_sep>_attribute_map={'compliance_status':{'key':'complianceStatus' 'type':'str'} 'reasons':{'key':'reasons' 'type':'[AssignmentReportResourceComplianceReason]'} 'properties':{'key':'properties' 'type':'object'} }<def_stmt>__init__ self * reasons:Optional[List["AssignmentReportResourceComplianceReason"]]=<none> **kwargs<block_start>super(AssignmentReportResource self).__init__(**kwargs)<line_sep>self.compliance_status=<none><line_sep>self.reasons=reasons<line_sep>self.properties=<none><block_end><block_end><class_stmt>AssignmentReportResourceComplianceReason(msrest.serialization.Model)<block_start>"""Reason and code for the compliance of the guest configuration assignment resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar phrase: Reason for the compliance of the guest configuration assignment resource.
:vartype phrase: str
:ivar code: Code for the compliance of the guest configuration assignment resource.
:vartype code: str
"""<line_sep>_validation={'phrase':{'readonly':<true>} 'code':{'readonly':<true>} }<line_sep>_attribute_map={'phrase':{'key':'phrase' 'type':'str'} 'code':{'key':'code' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(AssignmentReportResourceComplianceReason self).__init__(**kwargs)<line_sep>self.phrase=<none><line_sep>self.code=<none><block_end><block_end><class_stmt>ConfigurationInfo(msrest.serialization.Model)<block_start>"""Information about the configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: Name of the configuration.
:vartype name: str
:ivar version: Version of the configuration.
:vartype version: str
"""<line_sep>_validation={'name':{'readonly':<true>} 'version':{'readonly':<true>} }<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'version':{'key':'version' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(ConfigurationInfo self).__init__(**kwargs)<line_sep>self.name=<none><line_sep>self.version=<none><block_end><block_end><class_stmt>ConfigurationParameter(msrest.serialization.Model)<block_start>"""Represents a configuration parameter.
:param name: Name of the configuration parameter.
:type name: str
:param value: Value of the configuration parameter.
:type value: str
"""<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'value':{'key':'value' 'type':'str'} }<def_stmt>__init__ self * name:Optional[str]=<none> value:Optional[str]=<none> **kwargs<block_start>super(ConfigurationParameter self).__init__(**kwargs)<line_sep>self.name=name<line_sep>self.value=value<block_end><block_end><class_stmt>ConfigurationSetting(msrest.serialization.Model)<block_start>"""Configuration setting of LCM (Local Configuration Manager).
:param configuration_mode: Specifies how the LCM(Local Configuration Manager) actually applies
the configuration to the target nodes. Possible values are ApplyOnly, ApplyAndMonitor, and
ApplyAndAutoCorrect. Possible values include: "ApplyOnly", "ApplyAndMonitor",
"ApplyAndAutoCorrect".
:type configuration_mode: str or ~guest_configuration_client.models.ConfigurationMode
:param allow_module_overwrite: If true - new configurations downloaded from the pull service
are allowed to overwrite the old ones on the target node. Otherwise, false. Possible values
include: "True", "False".
:type allow_module_overwrite: str or ~guest_configuration_client.models.AllowModuleOverwrite
:param action_after_reboot: Specifies what happens after a reboot during the application of a
configuration. The possible values are ContinueConfiguration and StopConfiguration. Possible
values include: "ContinueConfiguration", "StopConfiguration".
:type action_after_reboot: str or ~guest_configuration_client.models.ActionAfterReboot
:param refresh_frequency_mins: The time interval, in minutes, at which the LCM checks a pull
service to get updated configurations. This value is ignored if the LCM is not configured in
pull mode. The default value is 30.
:type refresh_frequency_mins: float
:param reboot_if_needed: Set this to true to automatically reboot the node after a
configuration that requires reboot is applied. Otherwise, you will have to manually reboot the
node for any configuration that requires it. The default value is false. To use this setting
when a reboot condition is enacted by something other than DSC (such as Windows Installer),
combine this setting with the xPendingReboot module. Possible values include: "True", "False".
Default value: "False".
:type reboot_if_needed: str or ~guest_configuration_client.models.RebootIfNeeded
:param configuration_mode_frequency_mins: How often, in minutes, the current configuration is
checked and applied. This property is ignored if the ConfigurationMode property is set to
ApplyOnly. The default value is 15.
:type configuration_mode_frequency_mins: float
"""<line_sep>_attribute_map={'configuration_mode':{'key':'configurationMode' 'type':'str'} 'allow_module_overwrite':{'key':'allowModuleOverwrite' 'type':'str'} 'action_after_reboot':{'key':'actionAfterReboot' 'type':'str'} 'refresh_frequency_mins':{'key':'refreshFrequencyMins' 'type':'float'} 'reboot_if_needed':{'key':'rebootIfNeeded' 'type':'str'} 'configuration_mode_frequency_mins':{'key':'configurationModeFrequencyMins' 'type':'float'} }<def_stmt>__init__ self * configuration_mode:Optional[Union[str "ConfigurationMode"]]=<none> allow_module_overwrite:Optional[Union[str "AllowModuleOverwrite"]]=<none> action_after_reboot:Optional[Union[str "ActionAfterReboot"]]=<none> refresh_frequency_mins:Optional[float]=30 reboot_if_needed:Optional[Union[str "RebootIfNeeded"]]="False" configuration_mode_frequency_mins:Optional[float]=15 **kwargs<block_start>super(ConfigurationSetting self).__init__(**kwargs)<line_sep>self.configuration_mode=configuration_mode<line_sep>self.allow_module_overwrite=allow_module_overwrite<line_sep>self.action_after_reboot=action_after_reboot<line_sep>self.refresh_frequency_mins=refresh_frequency_mins<line_sep>self.reboot_if_needed=reboot_if_needed<line_sep>self.configuration_mode_frequency_mins=configuration_mode_frequency_mins<block_end><block_end><class_stmt>ErrorResponse(msrest.serialization.Model)<block_start>"""Error response of an operation failure.
:param error:
:type error: ~guest_configuration_client.models.ErrorResponseError
"""<line_sep>_attribute_map={'error':{'key':'error' 'type':'ErrorResponseError'} }<def_stmt>__init__ self * error:Optional["ErrorResponseError"]=<none> **kwargs<block_start>super(ErrorResponse self).__init__(**kwargs)<line_sep>self.error=error<block_end><block_end><class_stmt>ErrorResponseError(msrest.serialization.Model)<block_start>"""ErrorResponseError.
:param code: Error code.
:type code: str
:param message: Detail error message indicating why the operation failed.
:type message: str
"""<line_sep>_attribute_map={'code':{'key':'code' 'type':'str'} 'message':{'key':'message' 'type':'str'} }<def_stmt>__init__ self * code:Optional[str]=<none> message:Optional[str]=<none> **kwargs<block_start>super(ErrorResponseError self).__init__(**kwargs)<line_sep>self.code=code<line_sep>self.message=message<block_end><block_end><class_stmt>Resource(msrest.serialization.Model)<block_start>"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""<line_sep>_validation={'id':{'readonly':<true>} 'type':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'type':{'key':'type' 'type':'str'} }<def_stmt>__init__ self * name:Optional[str]=<none> location:Optional[str]=<none> **kwargs<block_start>super(Resource self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=name<line_sep>self.location=location<line_sep>self.type=<none><block_end><block_end><class_stmt>GuestConfigurationAssignment(Resource)<block_start>"""Guest configuration assignment is an association between a machine and guest configuration.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
:ivar target_resource_id: VM resource Id.
:vartype target_resource_id: str
:ivar compliance_status_properties_compliance_status: A value indicating compliance status of
the machine for the assigned guest configuration. Possible values include: "Compliant",
"NonCompliant", "Pending".
:vartype compliance_status_properties_compliance_status: str or
~guest_configuration_client.models.ComplianceStatus
:ivar last_compliance_status_checked: Date and time when last compliance status was checked.
:vartype last_compliance_status_checked: ~datetime.datetime
:ivar latest_report_id: Id of the latest report for the guest configuration assignment.
:vartype latest_report_id: str
:param context: The source which initiated the guest configuration assignment. Ex: Azure
Policy.
:type context: str
:ivar assignment_hash: Combined hash of the configuration package and parameters.
:vartype assignment_hash: str
:ivar provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "Succeeded", "Failed", "Canceled", "Created".
:vartype provisioning_state: str or ~guest_configuration_client.models.ProvisioningState
:ivar id_properties_latest_assignment_report_id: ARM resource id of the report for the guest
configuration assignment.
:vartype id_properties_latest_assignment_report_id: str
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~guest_configuration_client.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~guest_configuration_client.models.VmInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:ivar compliance_status_properties_latest_assignment_report_compliance_status: A value
indicating compliance status of the machine for the assigned guest configuration. Possible
values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status_properties_latest_assignment_report_compliance_status: str or
~guest_configuration_client.models.ComplianceStatus
:ivar operation_type: Type of report, Consistency or Initial. Possible values include:
"Consistency", "Initial".
:vartype operation_type: str or ~guest_configuration_client.models.Type
:param resources: The list of resources for which guest configuration assignment compliance is
checked.
:type resources: list[~guest_configuration_client.models.AssignmentReportResource]
:ivar kind: Kind of the guest configuration. For example:DSC. Default value: "DSC".
:vartype kind: str
:param name_properties_guest_configuration_name: Name of the guest configuration.
:type name_properties_guest_configuration_name: str
:param version: Version of the guest configuration.
:type version: str
:ivar content_uri: Uri of the storage where guest configuration package is uploaded.
:vartype content_uri: str
:ivar content_hash: Combined hash of the guest configuration package and configuration
parameters.
:vartype content_hash: str
:param configuration_parameter: The configuration parameters for the guest configuration.
:type configuration_parameter: list[~guest_configuration_client.models.ConfigurationParameter]
:param configuration_setting: The configuration setting for the guest configuration.
:type configuration_setting: ~guest_configuration_client.models.ConfigurationSetting
"""<line_sep>_validation={'id':{'readonly':<true>} 'type':{'readonly':<true>} 'target_resource_id':{'readonly':<true>} 'compliance_status_properties_compliance_status':{'readonly':<true>} 'last_compliance_status_checked':{'readonly':<true>} 'latest_report_id':{'readonly':<true>} 'assignment_hash':{'readonly':<true>} 'provisioning_state':{'readonly':<true>} 'id_properties_latest_assignment_report_id':{'readonly':<true>} 'report_id':{'readonly':<true>} 'start_time':{'readonly':<true>} 'end_time':{'readonly':<true>} 'compliance_status_properties_latest_assignment_report_compliance_status':{'readonly':<true>} 'operation_type':{'readonly':<true>} 'kind':{'constant':<true>} 'content_uri':{'readonly':<true>} 'content_hash':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'type':{'key':'type' 'type':'str'} 'target_resource_id':{'key':'properties.targetResourceId' 'type':'str'} 'compliance_status_properties_compliance_status':{'key':'properties.complianceStatus' 'type':'str'} 'last_compliance_status_checked':{'key':'properties.lastComplianceStatusChecked' 'type':'iso-8601'} 'latest_report_id':{'key':'properties.latestReportId' 'type':'str'} 'context':{'key':'properties.context' 'type':'str'} 'assignment_hash':{'key':'properties.assignmentHash' 'type':'str'} 'provisioning_state':{'key':'properties.provisioningState' 'type':'str'} 'id_properties_latest_assignment_report_id':{'key':'properties.latestAssignmentReport.id' 'type':'str'} 'report_id':{'key':'properties.latestAssignmentReport.reportId' 'type':'str'} 'assignment':{'key':'properties.latestAssignmentReport.assignment' 'type':'AssignmentInfo'} 'vm':{'key':'properties.latestAssignmentReport.vm' 'type':'VmInfo'} 'start_time':{'key':'properties.latestAssignmentReport.startTime' 'type':'iso-8601'} 'end_time':{'key':'properties.latestAssignmentReport.endTime' 'type':'iso-8601'} 'compliance_status_properties_latest_assignment_report_compliance_status':{'key':'properties.latestAssignmentReport.complianceStatus' 'type':'str'} 'operation_type':{'key':'properties.latestAssignmentReport.operationType' 'type':'str'} 'resources':{'key':'properties.latestAssignmentReport.resources' 'type':'[AssignmentReportResource]'} 'kind':{'key':'properties.guestConfiguration.kind' 'type':'str'} 'name_properties_guest_configuration_name':{'key':'properties.guestConfiguration.name' 'type':'str'} 'version':{'key':'properties.guestConfiguration.version' 'type':'str'} 'content_uri':{'key':'properties.guestConfiguration.contentUri' 'type':'str'} 'content_hash':{'key':'properties.guestConfiguration.contentHash' 'type':'str'} 'configuration_parameter':{'key':'properties.guestConfiguration.configurationParameter' 'type':'[ConfigurationParameter]'} 'configuration_setting':{'key':'properties.guestConfiguration.configurationSetting' 'type':'ConfigurationSetting'} }<line_sep>kind="DSC"<def_stmt>__init__ self * name:Optional[str]=<none> location:Optional[str]=<none> context:Optional[str]=<none> assignment:Optional["AssignmentInfo"]=<none> vm:Optional["VmInfo"]=<none> resources:Optional[List["AssignmentReportResource"]]=<none> name_properties_guest_configuration_name:Optional[str]=<none> version:Optional[str]=<none> configuration_parameter:Optional[List["ConfigurationParameter"]]=<none> configuration_setting:Optional["ConfigurationSetting"]=<none> **kwargs<block_start>super(GuestConfigurationAssignment self).__init__(name=name location=location **kwargs)<line_sep>self.target_resource_id=<none><line_sep>self.compliance_status_properties_compliance_status=<none><line_sep>self.last_compliance_status_checked=<none><line_sep>self.latest_report_id=<none><line_sep>self.context=context<line_sep>self.assignment_hash=<none><line_sep>self.provisioning_state=<none><line_sep>self.id_properties_latest_assignment_report_id=<none><line_sep>self.report_id=<none><line_sep>self.assignment=assignment<line_sep>self.vm=vm<line_sep>self.start_time=<none><line_sep>self.end_time=<none><line_sep>self.compliance_status_properties_latest_assignment_report_compliance_status=<none><line_sep>self.operation_type=<none><line_sep>self.resources=resources<line_sep>self.name_properties_guest_configuration_name=name_properties_guest_configuration_name<line_sep>self.version=version<line_sep>self.content_uri=<none><line_sep>self.content_hash=<none><line_sep>self.configuration_parameter=configuration_parameter<line_sep>self.configuration_setting=configuration_setting<block_end><block_end><class_stmt>GuestConfigurationAssignmentList(msrest.serialization.Model)<block_start>"""The response of the list guest configuration assignment operation.
:param value: Result of the list guest configuration assignment operation.
:type value: list[~guest_configuration_client.models.GuestConfigurationAssignment]
"""<line_sep>_attribute_map={'value':{'key':'value' 'type':'[GuestConfigurationAssignment]'} }<def_stmt>__init__ self * value:Optional[List["GuestConfigurationAssignment"]]=<none> **kwargs<block_start>super(GuestConfigurationAssignmentList self).__init__(**kwargs)<line_sep>self.value=value<block_end><block_end><class_stmt>GuestConfigurationAssignmentReport(msrest.serialization.Model)<block_start>"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the report for the guest configuration assignment.
:vartype id: str
:ivar name: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype name: str
:param properties: Properties of the guest configuration report.
:type properties:
~guest_configuration_client.models.GuestConfigurationAssignmentReportProperties
"""<line_sep>_validation={'id':{'readonly':<true>} 'name':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'properties':{'key':'properties' 'type':'GuestConfigurationAssignmentReportProperties'} }<def_stmt>__init__ self * properties:Optional["GuestConfigurationAssignmentReportProperties"]=<none> **kwargs<block_start>super(GuestConfigurationAssignmentReport self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.name=<none><line_sep>self.properties=properties<block_end><block_end><class_stmt>GuestConfigurationAssignmentReportList(msrest.serialization.Model)<block_start>"""List of guest configuration assignment reports.
:param value: List of reports for the guest configuration. Report contains information such as
compliance status, reason and more.
:type value: list[~guest_configuration_client.models.GuestConfigurationAssignmentReport]
"""<line_sep>_attribute_map={'value':{'key':'value' 'type':'[GuestConfigurationAssignmentReport]'} }<def_stmt>__init__ self * value:Optional[List["GuestConfigurationAssignmentReport"]]=<none> **kwargs<block_start>super(GuestConfigurationAssignmentReportList self).__init__(**kwargs)<line_sep>self.value=value<block_end><block_end><class_stmt>GuestConfigurationAssignmentReportProperties(msrest.serialization.Model)<block_start>"""Report for the guest configuration assignment. Report contains information such as compliance status, reason, and more.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar compliance_status: A value indicating compliance status of the machine for the assigned
guest configuration. Possible values include: "Compliant", "NonCompliant", "Pending".
:vartype compliance_status: str or ~guest_configuration_client.models.ComplianceStatus
:ivar report_id: GUID that identifies the guest configuration assignment report under a
subscription, resource group.
:vartype report_id: str
:param assignment: Configuration details of the guest configuration assignment.
:type assignment: ~guest_configuration_client.models.AssignmentInfo
:param vm: Information about the VM.
:type vm: ~guest_configuration_client.models.VmInfo
:ivar start_time: Start date and time of the guest configuration assignment compliance status
check.
:vartype start_time: ~datetime.datetime
:ivar end_time: End date and time of the guest configuration assignment compliance status
check.
:vartype end_time: ~datetime.datetime
:param details: Details of the assignment report.
:type details: ~guest_configuration_client.models.AssignmentReportDetails
"""<line_sep>_validation={'compliance_status':{'readonly':<true>} 'report_id':{'readonly':<true>} 'start_time':{'readonly':<true>} 'end_time':{'readonly':<true>} }<line_sep>_attribute_map={'compliance_status':{'key':'complianceStatus' 'type':'str'} 'report_id':{'key':'reportId' 'type':'str'} 'assignment':{'key':'assignment' 'type':'AssignmentInfo'} 'vm':{'key':'vm' 'type':'VmInfo'} 'start_time':{'key':'startTime' 'type':'iso-8601'} 'end_time':{'key':'endTime' 'type':'iso-8601'} 'details':{'key':'details' 'type':'AssignmentReportDetails'} }<def_stmt>__init__ self * assignment:Optional["AssignmentInfo"]=<none> vm:Optional["VmInfo"]=<none> details:Optional["AssignmentReportDetails"]=<none> **kwargs<block_start>super(GuestConfigurationAssignmentReportProperties self).__init__(**kwargs)<line_sep>self.compliance_status=<none><line_sep>self.report_id=<none><line_sep>self.assignment=assignment<line_sep>self.vm=vm<line_sep>self.start_time=<none><line_sep>self.end_time=<none><line_sep>self.details=details<block_end><block_end><class_stmt>Operation(msrest.serialization.Model)<block_start>"""GuestConfiguration REST API operation.
:param name: Operation name: For ex.
providers/Microsoft.GuestConfiguration/guestConfigurationAssignments/write or read.
:type name: str
:param display: Provider, Resource, Operation and description values.
:type display: ~guest_configuration_client.models.OperationDisplay
:param status_code: Service provider: Microsoft.GuestConfiguration.
:type status_code: str
"""<line_sep>_attribute_map={'name':{'key':'name' 'type':'str'} 'display':{'key':'display' 'type':'OperationDisplay'} 'status_code':{'key':'properties.statusCode' 'type':'str'} }<def_stmt>__init__ self * name:Optional[str]=<none> display:Optional["OperationDisplay"]=<none> status_code:Optional[str]=<none> **kwargs<block_start>super(Operation self).__init__(**kwargs)<line_sep>self.name=name<line_sep>self.display=display<line_sep>self.status_code=status_code<block_end><block_end><class_stmt>OperationDisplay(msrest.serialization.Model)<block_start>"""Provider, Resource, Operation and description values.
:param provider: Service provider: Microsoft.GuestConfiguration.
:type provider: str
:param resource: Resource on which the operation is performed: For ex.
:type resource: str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
:param description: Description about operation.
:type description: str
"""<line_sep>_attribute_map={'provider':{'key':'provider' 'type':'str'} 'resource':{'key':'resource' 'type':'str'} 'operation':{'key':'operation' 'type':'str'} 'description':{'key':'description' 'type':'str'} }<def_stmt>__init__ self * provider:Optional[str]=<none> resource:Optional[str]=<none> operation:Optional[str]=<none> description:Optional[str]=<none> **kwargs<block_start>super(OperationDisplay self).__init__(**kwargs)<line_sep>self.provider=provider<line_sep>self.resource=resource<line_sep>self.operation=operation<line_sep>self.description=description<block_end><block_end><class_stmt>OperationList(msrest.serialization.Model)<block_start>"""The response model for the list of Automation operations.
:param value: List of Automation operations supported by the Automation resource provider.
:type value: list[~guest_configuration_client.models.Operation]
"""<line_sep>_attribute_map={'value':{'key':'value' 'type':'[Operation]'} }<def_stmt>__init__ self * value:Optional[List["Operation"]]=<none> **kwargs<block_start>super(OperationList self).__init__(**kwargs)<line_sep>self.value=value<block_end><block_end><class_stmt>ProxyResource(Resource)<block_start>"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: ARM resource id of the guest configuration assignment.
:vartype id: str
:param name: Name of the guest configuration assignment.
:type name: str
:param location: Region where the VM is located.
:type location: str
:ivar type: The type of the resource.
:vartype type: str
"""<line_sep>_validation={'id':{'readonly':<true>} 'type':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'name':{'key':'name' 'type':'str'} 'location':{'key':'location' 'type':'str'} 'type':{'key':'type' 'type':'str'} }<def_stmt>__init__ self * name:Optional[str]=<none> location:Optional[str]=<none> **kwargs<block_start>super(ProxyResource self).__init__(name=name location=location **kwargs)<block_end><block_end><class_stmt>VmInfo(msrest.serialization.Model)<block_start>"""Information about the VM.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Azure resource Id of the VM.
:vartype id: str
:ivar uuid: UUID(Universally Unique Identifier) of the VM.
:vartype uuid: str
"""<line_sep>_validation={'id':{'readonly':<true>} 'uuid':{'readonly':<true>} }<line_sep>_attribute_map={'id':{'key':'id' 'type':'str'} 'uuid':{'key':'uuid' 'type':'str'} }<def_stmt>__init__ self **kwargs<block_start>super(VmInfo self).__init__(**kwargs)<line_sep>self.id=<none><line_sep>self.uuid=<none><block_end><block_end> |
<import_stmt>math<import_stmt>brownie<import_from_stmt>brownie chain<def_stmt>test_only_distributor_allowed alice stream<block_start><with_stmt>brownie.reverts("dev: only distributor")<block_start>stream.notify_reward_amount(10<power>18 {"from":alice})<block_end><block_end><def_stmt>test_retrieves_reward_token bob stream reward_token<block_start>stream.notify_reward_amount(10<power>18 {"from":bob})<line_sep>post_notify=reward_token.balanceOf(stream)<assert_stmt>post_notify<eq>10<power>18<block_end><def_stmt>test_reward_rate_updates bob stream<block_start>stream.notify_reward_amount(10<power>18 {"from":bob})<line_sep>post_notify=stream.reward_rate()<assert_stmt>post_notify<g>0<assert_stmt>post_notify<eq>10<power>18/(86400<times>10)<block_end><def_stmt>test_reward_rate_updates_mid_duration bob stream<block_start>stream.notify_reward_amount(10<power>18 {"from":bob})<line_sep>chain.sleep(86400<times>5)# half of the duration
# top up the balance to be 10 ** 18 again
stream.notify_reward_amount(10<power>18/2 {"from":bob})<line_sep>post_notify=stream.reward_rate()<line_sep># should relatively close .00001 seems about good of a heuristic
<assert_stmt>math.isclose(post_notify 10<power>18/(86400<times>10) rel_tol=0.00001)<block_end><def_stmt>test_period_finish_updates bob stream<block_start>tx=stream.notify_reward_amount(10<power>18 {"from":bob})<assert_stmt>stream.period_finish()<eq>tx.timestamp+86400<times>10<block_end><def_stmt>test_update_last_update_time bob stream<block_start>tx=stream.notify_reward_amount(10<power>18 {"from":bob})<assert_stmt>stream.last_update_time()<eq>tx.timestamp<block_end> |
"""Tests for ConnectedDriveVehicle."""<import_stmt>unittest<import_from_stmt>unittest mock<import_from_stmt>test load_response_json BackendMock TEST_USERNAME TEST_PASSWORD TEST_REGION G31_VIN F48_VIN I01_VIN I01_NOREX_VIN F15_VIN F45_VIN F31_VIN TEST_VEHICLE_DATA ATTRIBUTE_MAPPING MISSING_ATTRIBUTES ADDITIONAL_ATTRIBUTES G30_PHEV_OS7_VIN AVAILABLE_STATES_MAPPING<import_from_stmt>bimmer_connected.vehicle ConnectedDriveVehicle DriveTrainType<import_from_stmt>bimmer_connected.account ConnectedDriveAccount<line_sep>_VEHICLES=load_response_json('vehicles.json')['vehicles']<line_sep>G31_VEHICLE=_VEHICLES[0]<class_stmt>TestVehicle(unittest.TestCase)<block_start>"""Tests for ConnectedDriveVehicle."""<def_stmt>test_drive_train self<block_start>"""Tests around drive_train attribute."""<line_sep>vehicle=ConnectedDriveVehicle(<none> G31_VEHICLE)<line_sep>self.assertEqual(DriveTrainType.CONVENTIONAL vehicle.drive_train)<block_end><def_stmt>test_parsing_attributes self<block_start>"""Test parsing different attributes of the vehicle."""<line_sep>backend_mock=BackendMock()<with_stmt>mock.patch('bimmer_connected.account.requests' new=backend_mock)<block_start>account=ConnectedDriveAccount(TEST_USERNAME TEST_PASSWORD TEST_REGION)<block_end><for_stmt>vehicle account.vehicles<block_start>print(vehicle.name)<line_sep>self.assertIsNotNone(vehicle.drive_train)<line_sep>self.assertIsNotNone(vehicle.name)<line_sep>self.assertIsNotNone(vehicle.has_internal_combustion_engine)<line_sep>self.assertIsNotNone(vehicle.has_hv_battery)<line_sep>self.assertIsNotNone(vehicle.drive_train_attributes)<line_sep>self.assertIsNotNone(vehicle.has_statistics_service)<line_sep>self.assertIsNotNone(vehicle.has_weekly_planner_service)<line_sep>self.assertIsNotNone(vehicle.has_destination_service)<line_sep>self.assertIsNotNone(vehicle.has_rangemap_service)<block_end><block_end><def_stmt>test_drive_train_attributes self<block_start>"""Test parsing different attributes of the vehicle."""<line_sep>backend_mock=BackendMock()<with_stmt>mock.patch('bimmer_connected.account.requests' new=backend_mock)<block_start>account=ConnectedDriveAccount(TEST_USERNAME TEST_PASSWORD TEST_REGION)<block_end><for_stmt>vehicle account.vehicles<block_start>self.assertEqual(vehicle.vin<in>[G31_VIN F48_VIN F15_VIN F45_VIN F31_VIN G30_PHEV_OS7_VIN] vehicle.has_internal_combustion_engine)<line_sep>self.assertEqual(vehicle.vin<in>[I01_VIN I01_NOREX_VIN G30_PHEV_OS7_VIN] vehicle.has_hv_battery)<line_sep>self.assertEqual(vehicle.vin<in>[I01_VIN] vehicle.has_range_extender)<block_end><block_end><def_stmt>test_parsing_of_lsc_type self<block_start>"""Test parsing the lsc type field."""<line_sep>backend_mock=BackendMock()<with_stmt>mock.patch('bimmer_connected.account.requests' new=backend_mock)<block_start>account=ConnectedDriveAccount(TEST_USERNAME TEST_PASSWORD TEST_REGION)<block_end><for_stmt>vehicle account.vehicles<block_start>self.assertIsNotNone(vehicle.lsc_type)<block_end><block_end><def_stmt>test_available_attributes self<block_start>"""Check that available_attributes returns exactly the arguments we have in our test data."""<line_sep>backend_mock=BackendMock()<with_stmt>mock.patch('bimmer_connected.account.requests' new=backend_mock)<block_start>account=ConnectedDriveAccount(TEST_USERNAME TEST_PASSWORD TEST_REGION)<block_end><for_stmt>vin,dirname TEST_VEHICLE_DATA.items()<block_start>vehicle=account.get_vehicle(vin)<line_sep>print(vehicle.name)<line_sep>status_data=load_response_json('{}/status.json'.format(dirname))<line_sep>existing_attributes=status_data['vehicleStatus'].keys()<line_sep>existing_attributes=sorted([ATTRIBUTE_MAPPING.get(a a)<for>a existing_attributes<if>a<not><in>MISSING_ATTRIBUTES])<line_sep>expected_attributes=sorted([a<for>a vehicle.available_attributes<if>a<not><in>ADDITIONAL_ATTRIBUTES])<line_sep>self.assertListEqual(existing_attributes expected_attributes)<block_end><block_end><def_stmt>test_available_state_services self<block_start>"""Check that available_attributes returns exactly the arguments we have in our test data."""<line_sep>backend_mock=BackendMock()<with_stmt>mock.patch('bimmer_connected.account.requests' new=backend_mock)<block_start>account=ConnectedDriveAccount(TEST_USERNAME TEST_PASSWORD TEST_REGION)<block_end>vehicles=load_response_json('vehicles.json')<for_stmt>test_vehicle vehicles['vehicles']<block_start>vehicle=account.get_vehicle(test_vehicle['vin'])<line_sep>print(vehicle.name)<line_sep>services_to_check={k:v<for>k,v test_vehicle.items()<if>k<in>list(AVAILABLE_STATES_MAPPING)}<line_sep>available_services=['STATUS']<for_stmt>key,value services_to_check.items()<block_start><if_stmt>AVAILABLE_STATES_MAPPING[key].get(value)<block_start>available_services<augadd>AVAILABLE_STATES_MAPPING[key][value]<block_end><block_end><if_stmt>vehicle.drive_train<ne>DriveTrainType.CONVENTIONAL<block_start>available_services<augadd>['EFFICIENCY' 'NAVIGATION']<block_end>self.assertListEqual(sorted(vehicle.available_state_services) sorted(available_services))<block_end><block_end><block_end> |
# coding=utf-8
"""Batch convert the world traj in actev to carla traj."""<import_stmt>argparse<import_stmt>os<import_from_stmt>glob glob<import_from_stmt>tqdm tqdm<import_stmt>sys<if_stmt>sys.version_info<g>(3 0)<block_start><import_stmt>subprocess<as>commands<block_end><else_stmt><block_start><import_stmt>commands<block_end>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("traj_world_path")<line_sep>parser.add_argument("--traj_vehicle_world_path" default=<none>)<line_sep>parser.add_argument("save_carla_traj_path")<line_sep>parser.add_argument("--save_carla_vehicle_path" default=<none>)<line_sep>calibrations={"0000":{"world_rotate":320 "carla_rotate":130 "scale":1.0 "origin":[3.5 -48.0 0.3]} "0400":{"world_rotate":100 "carla_rotate":153 "scale":1.0 "origin":[-10.0 58.0 0.5]} "0401":{"world_rotate":120 "carla_rotate":135 "scale":1.0 "origin":[-48.0 24.0 0.5]} "0500":{"world_rotate":90 "carla_rotate":179 "scale":1.0 "origin":[-65.5 -75.5 0.1]} }<line_sep># Zara
calibration={"world_rotate":270 "carla_rotate":-3.04 "scale":1.2 "origin":[-44.0511921243 -79.6225002047 0.] }<def_stmt>get_scene videoname_<block_start>"""ActEV scene extractor from videoname."""<line_sep>s=videoname_.split("_S_")[-1]<line_sep>s=s.split("_")[0]<line_sep><return>s[:4]<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parser.parse_args()<line_sep># all files
ped_traj_files=glob(os.path.join(args.traj_world_path "*.txt"))<if_stmt>args.traj_vehicle_world_path<is><not><none><block_start><assert_stmt>args.save_carla_vehicle_path<is><not><none><if_stmt><not>os.path.exists(args.save_carla_vehicle_path)<block_start>os.makedirs(args.save_carla_vehicle_path)<block_end><block_end><if_stmt><not>os.path.exists(args.save_carla_traj_path)<block_start>os.makedirs(args.save_carla_traj_path)<block_end>script_path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "plot_traj_carla.py")<assert_stmt>os.path.exists(script_path) script_path<for_stmt>ped_traj_file tqdm(ped_traj_files)<block_start>filename=os.path.splitext(os.path.basename(ped_traj_file))[0]<line_sep>target_ped_file=os.path.join(args.save_carla_traj_path "%s.txt"%filename)<if_stmt>args.traj_vehicle_world_path<is><none><block_start>output=commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"<concat>" %f --scale %f --save_carla_traj_file %s"%(script_path ped_traj_file calibration["origin"][0] calibration["origin"][1] calibration["origin"][2] calibration["carla_rotate"] calibration["world_rotate"] calibration["scale"] target_ped_file))<block_end><else_stmt><block_start>scene=get_scene(filename)<if_stmt>scene<eq>"0002"<block_start><continue><block_end>vehicle_traj_file=os.path.join(args.traj_vehicle_world_path "%s.txt"%filename)<line_sep>target_vehicle_file=os.path.join(args.save_carla_vehicle_path "%s.txt"%filename)<line_sep>cmd="python3 %s %s 0 %f %f %f %f --world_rotate"<concat>" %f --scale %f --save_carla_traj_file %s"<concat>" --vehicle_world_traj_file %s"<concat>" --save_vehicle_carla_traj_file %s"%(script_path ped_traj_file calibrations[scene]["origin"][0] calibrations[scene]["origin"][1] calibrations[scene]["origin"][2] calibrations[scene]["carla_rotate"] calibrations[scene]["world_rotate"] calibrations[scene]["scale"] target_ped_file vehicle_traj_file target_vehicle_file)<line_sep>output=commands.getoutput("python3 %s %s 0 %f %f %f %f --world_rotate"<concat>" %f --scale %f --save_carla_traj_file %s"<concat>" --vehicle_world_traj_file %s --is_actev"<concat>" --save_vehicle_carla_traj_file %s"%(script_path ped_traj_file calibrations[scene]["origin"][0] calibrations[scene]["origin"][1] calibrations[scene]["origin"][2] calibrations[scene]["carla_rotate"] calibrations[scene]["world_rotate"] calibrations[scene]["scale"] target_ped_file vehicle_traj_file target_vehicle_file))<block_end><block_end><block_end> |
# Generated by Django 2.1.2 on 2018-10-12 14:18
<import_from_future_stmt> absolute_import unicode_literals<import_from_stmt>django.db migrations models<import_stmt>django_celery_beat.validators<import_stmt>timezone_field.fields<class_stmt>Migration(migrations.Migration)<block_start>replaces=[('django_celery_beat' '0005_add_solarschedule_events_choices') ('django_celery_beat' '0006_auto_20180210_1226') ('django_celery_beat' '0006_auto_20180322_0932') ('django_celery_beat' '0007_auto_20180521_0826') ('django_celery_beat' '0008_auto_20180914_1922') ]<line_sep>dependencies=[('django_celery_beat' '0004_auto_20170221_0000') ]<line_sep>operations=[migrations.AlterField(model_name='solarschedule' name='event' field=models.CharField(choices=[('dawn_astronomical' 'dawn_astronomical') ('dawn_civil' 'dawn_civil') ('dawn_nautical' 'dawn_nautical') ('dusk_astronomical' 'dusk_astronomical') ('dusk_civil' 'dusk_civil') ('dusk_nautical' 'dusk_nautical') ('solar_noon' 'solar_noon') ('sunrise' 'sunrise') ('sunset' 'sunset')] max_length=24 verbose_name='event') ) migrations.AlterModelOptions(name='crontabschedule' options={'ordering':['month_of_year' 'day_of_month' 'day_of_week' 'hour' 'minute' 'timezone'] 'verbose_name':'crontab' 'verbose_name_plural':'crontabs'} ) migrations.AlterModelOptions(name='crontabschedule' options={'ordering':['month_of_year' 'day_of_month' 'day_of_week' 'hour' 'minute' 'timezone'] 'verbose_name':'crontab' 'verbose_name_plural':'crontabs'} ) migrations.AddField(model_name='crontabschedule' name='timezone' field=timezone_field.fields.TimeZoneField(default='UTC') ) migrations.AddField(model_name='periodictask' name='one_off' field=models.BooleanField(default=<false> verbose_name='one-off task') ) migrations.AddField(model_name='periodictask' name='start_time' field=models.DateTimeField(blank=<true> null=<true> verbose_name='start_time') ) migrations.AlterField(model_name='crontabschedule' name='day_of_month' field=models.CharField(default='*' max_length=124 validators=[django_celery_beat.validators.day_of_month_validator] verbose_name='day of month') ) migrations.AlterField(model_name='crontabschedule' name='day_of_week' field=models.CharField(default='*' max_length=64 validators=[django_celery_beat.validators.day_of_week_validator] verbose_name='day of week') ) migrations.AlterField(model_name='crontabschedule' name='hour' field=models.CharField(default='*' max_length=96 validators=[django_celery_beat.validators.hour_validator] verbose_name='hour') ) migrations.AlterField(model_name='crontabschedule' name='minute' field=models.CharField(default='*' max_length=240 validators=[django_celery_beat.validators.minute_validator] verbose_name='minute') ) migrations.AlterField(model_name='crontabschedule' name='month_of_year' field=models.CharField(default='*' max_length=64 validators=[django_celery_beat.validators.month_of_year_validator] verbose_name='month of year') ) ]<block_end> |
"""
The Stock module is responsible for Stock management.
It includes models for:
- StockLocation
- StockItem
- StockItemTracking
"""<line_sep> |
########################################################################
#
# Developed for AT&T by <NAME>, August 2017
#
# Action plugin for hashivault_write_from_file module.
#
# Reads file from remote host using slurp module. (base64 encoded)
# Stores file/secret to Vault using hashivault_read module on localhost.
#
########################################################################
<import_from_stmt>ansible.plugins.action ActionBase<import_from_stmt>ansible.utils.vars merge_hash<class_stmt>ActionModule(ActionBase)<block_start><def_stmt>run self tmp=<none> task_vars=<none><block_start><if_stmt>task_vars<is><none><block_start>task_vars=dict()<block_end>results=super(ActionModule self).run(tmp task_vars)<line_sep>args=self._task.args.copy()<line_sep>key=args.pop('key' <none>)<line_sep>path=args.pop('path' <none>)<line_sep>new_module_args={'src':path}<line_sep>self._update_module_args('slurp' new_module_args task_vars)<line_sep>results=merge_hash(results # executes slurp module on remote host
self._execute_module(module_name='slurp' tmp=tmp task_vars=task_vars module_args=new_module_args))<if_stmt>'failed'<in>results<and>results['failed']<is><true><block_start><return>results<block_end># already base64 encoded from slurp
content=results.pop('content' <none>)<line_sep>self._play_context.become=<false><line_sep>self._play_context.become_method=<none><line_sep>self._connection=self._shared_loader_obj.connection_loader.get('local' self._play_context self._connection._new_stdin)<line_sep>args['data']={key:content}<if_stmt>'update'<not><in>args<block_start>args['update']=<true><block_end>results=merge_hash(results # executes hashivault_write module on localhost
self._execute_module(module_name='hashivault_write' tmp=tmp task_vars=task_vars module_args=args))<line_sep>results['invocation']['module_args']['data']='VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'<line_sep><return>results<block_end><block_end> |
<import_from_stmt>functools partial<import_from_stmt>graphql.utilities build_schema<import_from_stmt>graphql.validation SingleFieldSubscriptionsRule<import_from_stmt>.harness assert_validation_errors<line_sep>schema=build_schema("""
type Message {
body: String
sender: String
}
type SubscriptionRoot {
importantEmails: [String]
notImportantEmails: [String]
moreImportantEmails: [String]
spamEmails: [String]
deletedEmails: [String]
newMessage: Message
}
type QueryRoot {
dummy: String
}
schema {
query: QueryRoot
subscription: SubscriptionRoot
}
""")<line_sep>assert_errors=partial(assert_validation_errors SingleFieldSubscriptionsRule schema=schema)<line_sep>assert_valid=partial(assert_errors errors=[])<def_stmt>describe_validate_subscriptions_with_single_field <block_start><def_stmt>valid_subscription <block_start>assert_valid("""
subscription ImportantEmails {
importantEmails
}
""")<block_end><def_stmt>valid_subscription_with_fragment <block_start>assert_valid("""
subscription sub {
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
""")<block_end><def_stmt>valid_subscription_with_fragment_and_field <block_start>assert_valid("""
subscription sub {
newMessage {
body
}
...newMessageFields
}
fragment newMessageFields on SubscriptionRoot {
newMessage {
body
sender
}
}
""")<block_end><def_stmt>fails_with_more_than_one_root_field <block_start>assert_errors("""
subscription ImportantEmails {
importantEmails
notImportantEmails
}
""" [{"message":"Subscription 'ImportantEmails'"<concat>" must select only one top level field." "locations":[(4 15)] }] )<block_end><def_stmt>fails_with_more_than_one_root_field_including_introspection <block_start>assert_errors("""
subscription ImportantEmails {
importantEmails
__typename
}
""" [{"message":"Subscription 'ImportantEmails'"<concat>" must select only one top level field." "locations":[(4 15)] } {"message":"Subscription 'ImportantEmails'"<concat>" must not select an introspection top level field." "locations":[(4 15)] } ] )<block_end><def_stmt>fails_with_more_than_one_root_field_including_aliased_introspection <block_start>assert_errors("""
subscription ImportantEmails {
importantEmails
...Introspection
}
fragment Introspection on SubscriptionRoot {
typename: __typename
}
""" [{"message":"Subscription 'ImportantEmails'"<concat>" must select only one top level field." "locations":[(7 15)] } {"message":"Subscription 'ImportantEmails'"<concat>" must not select an introspection top level field." "locations":[(7 15)] } ] )<block_end><def_stmt>fails_with_many_more_than_one_root_field <block_start>assert_errors("""
subscription ImportantEmails {
importantEmails
notImportantEmails
spamEmails
}
""" [{"message":"Subscription 'ImportantEmails'"<concat>" must select only one top level field." "locations":[(4 15) (5 15)] }] )<block_end><def_stmt>fails_with_more_than_one_root_field_via_fragments <block_start>assert_errors("""
subscription ImportantEmails {
importantEmails
... {
more: moreImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
}
""" [{"message":"Subscription 'ImportantEmails'"<concat>" must select only one top level field." "locations":[(5 17) (10 15) (11 15) (15 15)] } ] )<block_end><def_stmt>does_not_infinite_loop_on_recursive_fragments <block_start>assert_errors("""
subscription NoInfiniteLoop {
...A
}
fragment A on SubscriptionRoot {
...A
}
""" [] )<block_end><def_stmt>fails_with_more_than_one_root_field_via_fragments_anonymous <block_start>assert_errors("""
subscription {
importantEmails
... {
more: moreImportantEmails
...NotImportantEmails
}
...NotImportantEmails
}
fragment NotImportantEmails on SubscriptionRoot {
notImportantEmails
deleted: deletedEmails
... {
... {
archivedEmails
}
}
...SpamEmails
}
fragment SpamEmails on SubscriptionRoot {
spamEmails
...NonExistentFragment
}
""" [{"message":"Anonymous Subscription"<concat>" must select only one top level field." "locations":[(5 17) (11 15) (12 15) (15 19) (21 15)] } ] )<block_end><def_stmt>fails_with_more_than_one_root_field_in_anonymous_subscriptions <block_start>assert_errors("""
subscription {
importantEmails
notImportantEmails
}
""" [{"message":"Anonymous Subscription"<concat>" must select only one top level field." "locations":[(4 15)] }] )<block_end><def_stmt>fails_with_introspection_field <block_start>assert_errors("""
subscription ImportantEmails {
__typename
}
""" [{"message":"Subscription 'ImportantEmails' must not"<concat>" select an introspection top level field." "locations":[(3 15)] }] )<block_end><def_stmt>fails_with_introspection_field_in_anonymous_subscription <block_start>assert_errors("""
subscription {
__typename
}
""" [{"message":"Anonymous Subscription must not"<concat>" select an introspection top level field." "locations":[(3 15)] }] )<block_end><def_stmt>skips_if_not_subscription_type <block_start>empty_schema=build_schema("""
type Query {
dummy: String
}
""")<line_sep>assert_errors("""
subscription {
__typename
}
""" [] schema=empty_schema )<block_end><block_end> |
# Generated by Django 3.2.12 on 2022-04-08 12:37
<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("webhook" "0008_webhook_subscription_query") ("core" "0004_delete_delivery_without_webhook") ]<line_sep>operations=[migrations.AlterField(model_name="eventdelivery" name="webhook" field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to="webhook.webhook") ) ]<block_end> |
<import_stmt>json<import_stmt>time<import_stmt>requests<import_from_stmt>PIL Image<import_from_stmt>pytesseract pytesseract<import_from_stmt>selenium webdriver<line_sep>"""
selenium和xpath的使用区别
selenium使用不需要自己写headers,只需要导入webdriver.Chrome().get(url)就会打开浏览器,使用find_xxx_by_xpath
写入自己的xpath语句即可
传统的xpath使用,需要导入etree.Html(url),然后写入自己的html.xpath(‘xxx’)
"""<line_sep>headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.75 Safari/537.36"}<def_stmt>test01 <block_start>"""斗鱼平台获取直播房间和的信息
1、爬取房间名称、类型、房主、关注人数、封面信息等
2、使用selenium进行爬取
"""<class_stmt>Douyu(object)<block_start><def_stmt>__init__ self<block_start>self.url='https://www.douyu.com/directory/all'<line_sep># 实例化浏览器对象
self.driver=webdriver.Chrome()<line_sep># 创建文件
self.file=open('douyu.json' 'w')<block_end># 解析数据
<def_stmt>parse_data self# 提取房间列表,必须使用elements
<block_start>node_list=self.driver.find_elements_by_xpath('//*[@id="listAll"]/section[2]/div[2]/ul/li')<line_sep># 测试节点列表
# print(len(node_list))
# 定义存储数据的容器
data_list=[]<line_sep># 遍历节点列表
<for_stmt>node node_list<block_start>temp={}<line_sep># 提取房间的标题/房间类型/房间主人/关注人数/封面
temp['title']=node.find_element_by_xpath('./div/div/h3').text<line_sep>temp['category']=node.find_element_by_xpath('./div/div/span').text<line_sep>temp['owner']=node.find_element_by_xpath('./div/p/span[1]').text<line_sep>temp['num']=node.find_element_by_xpath('./div/p/span[2]').text<line_sep>temp['cover']=node.find_element_by_xpath('./span/img').get_attribute('data-original')<line_sep># temp['link'] = node.get_attribute('href')
data_list.append(temp)<line_sep># print(temp)
<block_end># 返回数据
<return>data_list<block_end># 保存数据
<def_stmt>save_data self data_list# 遍历列表数据,因为里面存储的是字典类型
<block_start><for_stmt>data data_list<block_start>str_data=json.dumps(data ensure_ascii=<false>)+',\n'<line_sep>self.file.write(str_data)<block_end><block_end><def_stmt>__del__ self# 关闭浏览器对象
<block_start>self.driver.close()<line_sep>self.file.close()<block_end><def_stmt>run self# 构造url
# 构造webdriver浏览器对象
# 发送请求
<block_start>self.driver.get(self.url)<while_stmt><true># 解析数据,返回数据列表
<block_start>data_list=self.parse_data()<line_sep>self.save_data(data_list)<line_sep># 提取下一页链接,模拟点击
<try_stmt><block_start>ele_next_url=self.driver.find_element_by_xpath('//*[@class="shark-pager-next"]')<line_sep>ele_next_url.click()<line_sep>time.sleep(3)<block_end><except_stmt><block_start><break><block_end><block_end># 保存数据
<block_end><block_end>Douyu().run()<block_end><def_stmt>test02 <block_start><import_from_stmt>PIL Image<import_stmt>pytesseract<line_sep>"""谷歌图片识别的包:tesseract"""<line_sep># 使用pil加载一张图片到内存中,返回图片对象
img=Image.open('test.jpg')<line_sep># 调用tesseract进行识别,返回一个data
data=pytesseract.image_to_string(img)<line_sep># 输出结果
print(data)<block_end><def_stmt>test03 <block_start>"""图片识别验证码进行豆瓣登陆"""<line_sep># 创建浏览器对象
driver=webdriver.Chrome()<line_sep># 发送请求
driver.get('https://accounts.douban.com/login')<line_sep># 定位元素位置,账号
ele_email=driver.find_element_by_id('email')<line_sep># 把账号发送给表单
ele_email.send_keys('<KEY>')<line_sep># 定位元素,密码
ele_pswd=driver.find_element_by_id('password')<line_sep># 把密码发送给表单
ele_pswd.send_keys('123456shengjun')<line_sep># # 1、手动输入获取图片验证码
# 定位图片验证码所在的元素位置
# ele_captcha = driver.find_element_by_id('captcha_field')
# data = input('请输入图片验证码:')
# ele_captcha.send_keys(data)
# 2 使用ocr系统识别图片验证码
ele_image_captcha=driver.find_element_by_id('captcha_image')<line_sep>image_url=ele_image_captcha.get_attribute('src')<line_sep>print(image_url)<line_sep># 获取图片文件
headers={'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'}<line_sep># 发送get请求,获取图片文件bytes类型
# data = requests.get(image_url,headers=headers).content
# 保存图片文件
# with open('douban.jpg','wb') as f:
# f.write(data)
# time.sleep(3)
# 使用ocr系统
img=Image.open('douban.jpg')<line_sep>image_str=pytesseract.image_to_string(img)<line_sep>print('-------' image_str '-------')<line_sep># 定位登录按钮
ele_submit=driver.find_element_by_name('login')<line_sep># 模拟点击
ele_submit.click()<block_end><def_stmt>main # test01()
# test02()
<block_start>test03()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_from_stmt>office365.runtime.client_value ClientValue<class_stmt>SpaApplication(ClientValue)<block_start><pass><block_end> |
<import_from_stmt>spacenetutilities.labeltools coreLabelTools<import_stmt>json<import_stmt>glob<import_stmt>argparse<import_from_stmt>datetime datetime<import_stmt>os<def_stmt>modifyTimeField geoJson geoJsonNew featureItemsToAdd=['ingest_tim' 'ingest_time' 'edit_date'] featureKeyListToRemove=[]<block_start>now=datetime.today()<with_stmt>open(geoJson)<as>json_data<block_start>d=json.load(json_data)<block_end>featureList=d['features']<line_sep>newFeatureList=[]<for_stmt>feature featureList<block_start>tmpFeature=dict(feature)<for_stmt>featureKey featureKeyListToRemove<block_start><if_stmt>featureKey<in>tmpFeature['properties']<block_start><del_stmt>tmpFeature['properties'][featureKey]<block_end><block_end><for_stmt>featureKey featureItemsToAdd<block_start><if_stmt><not>(featureKey<in>tmpFeature['properties'])<block_start>print('inserting missing field')<line_sep>print(now.isoformat())<line_sep>tmpFeature['properties'][featureKey]=now.isoformat()<block_end><else_stmt><block_start><if_stmt><not>tmpFeature['properties'][featureKey]<block_start>print('filling empty field')<line_sep>tmpFeature['properties'][featureKey]=now.isoformat()<block_end><block_end><block_end>newFeatureList.append(tmpFeature)<block_end>d['features']=newFeatureList<if_stmt>os.path.exists(geoJsonNew)<block_start>os.remove(geoJsonNew)<block_end><with_stmt>open(geoJsonNew 'w')<as>json_data<block_start>json.dump(d json_data)<block_end><block_end><def_stmt>removeIdFieldFromJsonEntries geoJson geoJsonNew featureKeyListToRemove=['Id' 'id'] featureItemsToAdd={}<block_start><with_stmt>open(geoJson)<as>json_data<block_start>d=json.load(json_data)<block_end>featureList=d['features']<line_sep>newFeatureList=[]<for_stmt>feature featureList<block_start>tmpFeature=dict(feature)<for_stmt>featureKey featureKeyListToRemove<block_start><if_stmt>featureKey<in>tmpFeature['properties']<block_start><del_stmt>tmpFeature['properties'][featureKey]<block_end><block_end>tmpFeature.update(featureItemsToAdd)<line_sep>newFeatureList.append(tmpFeature)<block_end>d['features']=newFeatureList<if_stmt>os.path.exists(geoJsonNew)<block_start>os.remove(geoJsonNew)<block_end><with_stmt>open(geoJsonNew 'w')<as>json_data<block_start>json.dump(d json_data)<block_end><block_end><def_stmt>removeIdinGeoJSONFolder folder modifier='noid'<block_start>geoJsonList=glob.glob(os.path.join(folder '*.geojson'))<for_stmt>geojsonName geoJsonList<block_start>removeIdFieldFromJsonEntries(geojsonName geojsonName.replace('.geojson' '{}.geojson'.format(modifier)))<block_end><block_end> |
<import_from_stmt>hlo ShardingSpec ShardingSpecType<import_from_stmt>cluster_env ClusterEnvironment<import_from_stmt>common compute_bytes<def_stmt>test_tile <block_start>cluster_env=ClusterEnvironment([[0 1 2] [3 4 5]] [1 1] [1 1] <none>)<line_sep>sharding=ShardingSpec.tile((12 12) [0 1] [0 1] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(2 3)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12) [1 0] [1 0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(2 3)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12) [0 1] [1 0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(3 2)<assert_stmt>sharding.tile_assignment_devices<eq>(0 3 1 4 2 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12) [0] [0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(2 1 3)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><true><line_sep>sharding=ShardingSpec.tile((12 12) [0] [1] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(3 1 2)<assert_stmt>sharding.tile_assignment_devices<eq>(0 3 1 4 2 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><true><line_sep>sharding=ShardingSpec.tile((12 12) [1] [1] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(1 3 2)<assert_stmt>sharding.tile_assignment_devices<eq>(0 3 1 4 2 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><true><line_sep>sharding=ShardingSpec.tile((12 12) [1] [0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(1 2 3)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><true><line_sep>sharding=ShardingSpec.tile((12 12 12) [0 1] [0 1] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(2 3 1)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12 12) [0 1] [1 0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(3 2 1)<assert_stmt>sharding.tile_assignment_devices<eq>(0 3 1 4 2 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12 12) [1] [0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(1 2 1 3)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3 4 5)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><true><block_end><def_stmt>test_tile2 <block_start>cluster_env=ClusterEnvironment([[0 1 2 3]] [1 1] [1 1] <none>)<line_sep>sharding=ShardingSpec.tile((12 12) [1] [1] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(1 4)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12) [1] [0] cluster_env)<assert_stmt>sharding.type<eq>ShardingSpecType.REPLICATED<line_sep>cluster_env=ClusterEnvironment([[0] [1] [2] [3]] [1 1] [1 1] <none>)<line_sep>sharding=ShardingSpec.tile((12 12) [1] [0] cluster_env)<assert_stmt>sharding.tile_assignment_dimensions<eq>(1 4)<assert_stmt>sharding.tile_assignment_devices<eq>(0 1 2 3)<assert_stmt>sharding.replicate_on_last_tile_dim<eq><false><line_sep>sharding=ShardingSpec.tile((12 12) [1] [1] cluster_env)<assert_stmt>sharding.type<eq>ShardingSpecType.REPLICATED<block_end><def_stmt>test_tile3 <block_start>cluster_env=ClusterEnvironment([[0 1] [2 3]] [1 1] [1 1] <none>)<line_sep>shape=(12 12)<line_sep>src=ShardingSpec.split(shape 1 cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>print(src)<line_sep>print(dst)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>print(cost)<block_end><def_stmt>assert_allclose x y<block_start><assert_stmt>abs((x-y)/(y+1e-8))<l>0.01<block_end><def_stmt>test_resharding_cost <block_start>cluster_env=ClusterEnvironment([[0 1 2] [3 4 5]] [1 1] [1 1] <none>)<line_sep>shape=(128 128)<line_sep>src=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost 0)<line_sep>src=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [1 0] [1 0] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost 0)<line_sep>src=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost 0)<line_sep>src=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost 0)<line_sep>src=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost cluster_env.all_gather_cost(compute_bytes(shape) 1))<line_sep>src=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>dst=ShardingSpec.replicated(cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost cluster_env.all_gather_cost(compute_bytes(shape) 0)+cluster_env.all_gather_cost(compute_bytes(shape) 1))<block_end><def_stmt>test_resharding_cost2 <block_start>cluster_env=ClusterEnvironment([[0] [1] [2] [3]] [1 1] [1 1] <none>)<line_sep>shape=(128 128)<line_sep>src=ShardingSpec.tile(shape [0 1] [0 1] cluster_env)<line_sep>dst=ShardingSpec.tile(shape [0] [0] cluster_env)<line_sep>cost=cluster_env.resharding_cost(shape src dst)<line_sep>assert_allclose(cost 0)<block_end><if_stmt>__name__<eq>"__main__"<block_start>test_tile()<line_sep>test_tile2()<line_sep>#test_tile3()
test_resharding_cost()<line_sep>test_resharding_cost2()<block_end> |
"""
Run multiple experiments on a single machine.
"""<import_stmt>subprocess<import_stmt>numpy<as>np<line_sep>ALGOS=["sac"]<line_sep>ENVS=["MountainCarContinuous-v0"]<line_sep>N_SEEDS=10<line_sep>EVAL_FREQ=5000<line_sep>N_EVAL_EPISODES=10<line_sep>LOG_STD_INIT=[-6 -5 -4 -3 -2 -1 0 1]<for_stmt>algo ALGOS<block_start><for_stmt>env_id ENVS<block_start><for_stmt>log_std_init LOG_STD_INIT<block_start>log_folder=f"logs_std_{np.exp(log_std_init):.4f}"<for_stmt>_ range(N_SEEDS)<block_start>args=["--algo" algo "--env" env_id "--hyperparams" f"policy_kwargs:dict(log_std_init={log_std_init}, net_arch=[64, 64])" "--eval-episodes" N_EVAL_EPISODES "--eval-freq" EVAL_FREQ "-f" log_folder ]<line_sep>args=list(map(str args))<line_sep>ok=subprocess.call(["python" "train.py"]+args)<block_end><block_end><block_end><block_end> |
"""Blueprints package."""<line_sep> |
<import_stmt>os<import_from_stmt>os.path exists<import_from_stmt>os.path join<import_from_stmt>os.path splitext<import_from_stmt>subprocess check_call<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>typing Mapping<import_from_stmt>typing Optional<import_from_stmt>.compat is_posix<import_from_stmt>.exc CommandError<def_stmt>open_in_editor filename:str environ:Optional[Dict[str str]]=<none><arrow><none><block_start>"""
Opens the given file in a text editor. If the environment variable
``EDITOR`` is set, this is taken as preference.
Otherwise, a list of commonly installed editors is tried.
If no editor matches, an :py:exc:`OSError` is raised.
:param filename: The filename to open. Will be passed verbatim to the
editor command.
:param environ: An optional drop-in replacement for ``os.environ``. Used
mainly for testing.
"""<line_sep>env=os.environ<if>environ<is><none><else>environ<try_stmt><block_start>editor=_find_editor(env)<line_sep>check_call([editor filename])<block_end><except_stmt>Exception<as>exc<block_start><raise>CommandError("Error executing editor (%s)"%(exc ))<from>exc<block_end><block_end><def_stmt>_find_editor environ:Mapping[str str]<arrow>str<block_start>candidates=_default_editors()<for_stmt>i,var enumerate(("EDITOR" "VISUAL"))<block_start><if_stmt>var<in>environ<block_start>user_choice=environ[var]<if_stmt>exists(user_choice)<block_start><return>user_choice<block_end><if_stmt>os.sep<not><in>user_choice<block_start>candidates.insert(i user_choice)<block_end><block_end><block_end><for_stmt>candidate candidates<block_start>path=_find_executable(candidate environ)<if_stmt>path<is><not><none><block_start><return>path<block_end><block_end><raise>OSError("No suitable editor found. Please set the "<concat>'"EDITOR" or "VISUAL" environment variables')<block_end><def_stmt>_find_executable candidate:str environ:Mapping[str str]<arrow>Optional[str]# Assuming this is on the PATH, we need to determine it's absolute
# location. Otherwise, ``check_call`` will fail
<block_start><if_stmt><not>is_posix<and>splitext(candidate)[1]<ne>".exe"<block_start>candidate<augadd>".exe"<block_end><for_stmt>path environ.get("PATH" "").split(os.pathsep)<block_start>value=join(path candidate)<if_stmt>exists(value)<block_start><return>value<block_end><block_end><return><none><block_end><def_stmt>_default_editors <arrow>List[str]# Look for an editor. Prefer the user's choice by env-var, fall back to
# most commonly installed editor (nano/vim)
<block_start><if_stmt>is_posix<block_start><return>["sensible-editor" "editor" "nano" "vim" "code"]<block_end><else_stmt><block_start><return>["code.exe" "notepad++.exe" "notepad.exe"]<block_end><block_end> |
<import_stmt>importlib<import_stmt>pathlib<import_stmt>tempfile<import_stmt>_pytest.monkeypatch<import_stmt>pytest<import_stmt>chime<line_sep>@pytest.fixture(scope='function' autouse=<true>)<def_stmt>reload_chime <block_start>importlib.reload(chime)<block_end>@pytest.fixture(scope='function' autouse=<true>)<def_stmt>mock_pathlib_home monkeypatch:_pytest.monkeypatch.MonkeyPatch<block_start><with_stmt>tempfile.TemporaryDirectory()<as>home_dir<block_start>home_dir_path=pathlib.Path(home_dir)<line_sep>monkeypatch.setattr(pathlib.Path name='home' value=<lambda>:home_dir_path)<line_sep>monkeypatch.setenv('APPDATA' value=str(home_dir_path))<block_end><block_end> |
"""
Variable Scope.
Variables have a global or local "scope".
For example, variables declared within either the
setup() or draw() functions may be only used in these
functions. Global variables, variables declared outside
of setup() and draw(), may be used anywhere within the program.
If a local variable is declared with the same name as a
global variable, the program will use the local variable to make
its calculations within the current scope. Variables are localized
within each block.
"""<line_sep>a=80# Create a global variable "a"
<def_stmt>setup <block_start>size(640 360)<line_sep>background(0)<line_sep>stroke(255)<line_sep>noLoop()<block_end><def_stmt>draw # Draw a line using the global variable "a".
<block_start>line(a 0 a height)<line_sep># Create a variable "b" local to the draw() function.
b=100<line_sep># Create a global variable "c".
<global>c<line_sep>c=320# Since "c" is global, it is avalaible to other functions.
# Make a call to the custom function drawGreenLine()
drawGreenLine()<line_sep># Draw a line using the local variable "b".
line(b 0 b height)<block_end># Note that "b" remains set to 100.
<def_stmt>drawGreenLine # Since "b" was defined as a variable local to the draw() function,
# this code inside this if statement will not run.
<block_start><if_stmt>('b'<in>locals()<or>'b'<in>globals())<block_start>background(255)# This won't run
<block_end><else_stmt><block_start><with_stmt>pushStyle()<block_start>stroke(0 255 0)<line_sep>b=320# Create a variable "b" local to drawGreenLine().
# Use the local variable "b" and the global variable "c" to draw a line.
line(b 0 c height)<block_end><block_end><block_end> |
<import_stmt>json<import_stmt>os<import_from_stmt>datetime datetime<import_stmt>boto3<import_from_stmt>aws_lambda_powertools.logging Logger<line_sep>logger=Logger()<line_sep>@logger.inject_lambda_context<def_stmt>main event context<block_start>records=event.get("Records" [])<line_sep>entries=[]<line_sep>stream_label=os.environ["STREAM_LABEL"]<line_sep>logger.info({"record_count":len(records) "stream":stream_label })<for_stmt>record records<block_start>keys=record.get("dynamodb").get("Keys")<line_sep>pk=keys["pk"]["S"]<line_sep>sk=keys["sk"]["S"]<line_sep># pk and sk are prefixed with <type>#, every char before the '#' describes the attribute type
pk_type=pk[:pk.find("#")]<line_sep>sk_type=sk[:sk.find("#")]<line_sep>event_name=record["eventName"]<line_sep>logger.info({"pk":pk "pk_type":pk_type "sk":sk "sk_type":sk_type "event_name":event_name })<line_sep>entry={"Source":f"{stream_label}" "Resources":[] "DetailType":event_name "Detail":json.dumps({"pk_type":pk_type "sk_type":sk_type "record":record}) "EventBusName":"default" }<line_sep>entries.append(entry)<block_end>client=boto3.client("events")<line_sep>response=client.put_events(Entries=entries)<line_sep>logger.debug(entries)<line_sep>logger.info({"num_entries":len(records) "failed_entries":response["FailedEntryCount"] })<line_sep><return><block_end> |
__all__=["ffn" "rbfn" "ffn_bn" "ffn_ace" "ffn_lae" "ffn_bn_vat" "ffn_vat" "cnn" "vae1" "cvae" "draw_at_lstm1" "draw_at_lstm2" "draw_lstm1" "draw_sgru1" "lm_lstm" "lm_lstm_bn" "lm_gru" "lm_draw"]<line_sep> |
<def_stmt>main <block_start><return>'example'<block_end> |
"""Tests for ht.ui.menus.opmenu module."""<line_sep># =============================================================================
# IMPORTS
# =============================================================================
# Houdini Toolbox
<import_stmt>ht.ui.menus.opmenu<line_sep># Houdini
<import_stmt>hou<line_sep># =============================================================================
# TESTS
# =============================================================================
<def_stmt>test_create_absolute_reference_copy mocker<block_start>"""Test ht.ui.menus.opmenu.create_absolute_reference_copy."""<line_sep>mock_node=mocker.MagicMock(spec=hou.Node)<line_sep>scriptargs={"node":mock_node}<line_sep>ht.ui.menus.opmenu.create_absolute_reference_copy(scriptargs)<line_sep>mock_node.parent.return_value.copyItems.assert_called_with([mock_node] channel_reference_originals=<true> relative_references=<false>)<block_end><def_stmt>test_save_item_to_file mocker<block_start>"""Test ht.ui.menus.opmenu.save_item_to_file."""<line_sep>mock_copy=mocker.patch("ht.ui.menus.opmenu.copy_item")<line_sep>mock_node=mocker.MagicMock(spec=hou.Node)<line_sep>scriptargs={"node":mock_node}<line_sep>ht.ui.menus.opmenu.save_item_to_file(scriptargs)<line_sep>mock_copy.assert_called_with(mock_node)<block_end> |
<import_from_stmt>django.apps AppConfig<class_stmt>BgpConfig(AppConfig)<block_start>name="bgp"<line_sep>verbose_name="BGP"<block_end> |
"""Subprocess vectorized environments.
See also:
* https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
* https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/subproc_vec_env.py
"""<import_stmt>copy<import_stmt>numpy<as>np<import_stmt>multiprocessing<as>mp<import_from_stmt>safe_control_gym.utils.utils get_random_state set_random_state<import_from_stmt>safe_control_gym.envs.env_wrappers.vectorized_env.vec_env VecEnv<import_from_stmt>safe_control_gym.envs.env_wrappers.vectorized_env.vec_env_utils _flatten_list _flatten_obs CloudpickleWrapper clear_mpi_env_vars<class_stmt>SubprocVecEnv(VecEnv)<block_start>"""Multiprocess envs.
"""<def_stmt>__init__ self env_fns spaces=<none> context="spawn" n_workers=1<block_start>self.waiting=<false><line_sep>self.closed=<false><line_sep>nenvs=len(env_fns)<line_sep>self.n_workers=n_workers<assert_stmt>nenvs%n_workers<eq>0 "Number of envs must be divisible by number of workers to run in series"<line_sep>env_fns=np.array_split(env_fns self.n_workers)<line_sep># Context is necessary for multiprocessing with CUDA, see pytorch.org/docs/stable/notes/multiprocessing.html
ctx=mp.get_context(context)<line_sep>self.remotes,self.work_remotes=zip(*[ctx.Pipe()<for>_ range(self.n_workers)])<line_sep>self.ps=[ctx.Process(target=worker args=(work_remote remote CloudpickleWrapper(env_fn)))<for>(work_remote remote env_fn) zip(self.work_remotes self.remotes env_fns)]<for_stmt>p self.ps<block_start>p.daemon=<true># If the main process crashes, we should not cause things to hang.
<with_stmt>clear_mpi_env_vars()<block_start>p.start()<block_end><block_end><for_stmt>remote self.work_remotes<block_start>remote.close()<block_end>self.remotes[0].send(('get_spaces_spec' <none>))<line_sep>observation_space,action_space=self.remotes[0].recv().x<line_sep>self.viewer=<none><line_sep>VecEnv.__init__(self nenvs observation_space action_space)<block_end><def_stmt>step_async self actions<block_start>self._assert_not_closed()<line_sep>actions=np.array_split(actions self.n_workers)<for_stmt>remote,action zip(self.remotes actions)<block_start>remote.send(('step' action))<block_end>self.waiting=<true><block_end><def_stmt>step_wait self<block_start>self._assert_not_closed()<line_sep>results=[remote.recv()<for>remote self.remotes]<line_sep>results=_flatten_list(results)<line_sep>self.waiting=<false><line_sep>obs,rews,dones,infos=zip(*results)<line_sep><return>_flatten_obs(obs) np.stack(rews) np.stack(dones) {"n":infos}<block_end><def_stmt>reset self<block_start>self._assert_not_closed()<for_stmt>remote self.remotes<block_start>remote.send(('reset' <none>))<block_end>results=[remote.recv()<for>remote self.remotes]<line_sep>results=_flatten_list(results)<line_sep>obs,infos=zip(*results)<line_sep><return>_flatten_obs(obs) {"n":infos}<block_end><def_stmt>get_images self<block_start>"""Called by parent `render` to support tiling images.
"""<line_sep>self._assert_not_closed()<for_stmt>pipe self.remotes<block_start>pipe.send(('render' <none>))<block_end>imgs=[pipe.recv()<for>pipe self.remotes]<line_sep>imgs=_flatten_list(imgs)<line_sep><return>imgs<block_end><def_stmt>close self<block_start><if_stmt>self.closed<block_start><return><block_end><if_stmt>self.viewer<is><not><none><block_start>self.viewer.close()<block_end><if_stmt>self.waiting<block_start><for_stmt>remote self.remotes<block_start>remote.recv()<block_end><block_end><for_stmt>remote self.remotes<block_start>remote.send(('close' <none>))<block_end><for_stmt>p self.ps<block_start>p.join()<block_end>self.closed=<true><block_end><def_stmt>_assert_not_closed self<block_start><assert_stmt><not>self.closed "Trying to operate on a SubprocVecEnv after calling close()"<block_end><def_stmt>get_env_random_state self<block_start><for_stmt>remote self.remotes<block_start>remote.send(('get_random_state' <none>))<block_end>worker_random_states=[remote.recv().x<for>remote self.remotes]<line_sep><return>worker_random_states<block_end><def_stmt>set_env_random_state self worker_random_states<block_start><for_stmt>remote,random_state zip(self.remotes worker_random_states)<block_start>remote.send(('set_random_state' random_state))<block_end>res=[remote.recv()<for>remote self.remotes]<block_end><def_stmt>get_attr self attr_name indices=<none><block_start>"""Return attribute from vectorized environment (see base class).
"""<line_sep>target_remotes,remote_env_indices=self._get_target_envs(indices)<for_stmt>remote,env_indices zip(target_remotes remote_env_indices)<block_start>remote.send(("get_attr" (env_indices attr_name)))<block_end><return>_flatten_list([remote.recv()<for>remote target_remotes])<block_end><def_stmt>set_attr self attr_name values indices=<none><block_start>"""Set attribute inside vectorized environments (see base class).
"""<line_sep>target_remotes,remote_env_indices,splits=self._get_target_envs(indices)<line_sep>value_splits=[]<for_stmt>i range(len(splits)-1)<block_start>start,end=splits[i] splits[i+1]<line_sep>value_splits.append(values[start:end])<block_end><for_stmt>remote,env_indices,value_split zip(target_remotes remote_env_indices value_splits)<block_start>remote.send(("set_attr" (env_indices attr_name value_split)))<block_end><for_stmt>remote target_remotes<block_start>remote.recv()<block_end><block_end><def_stmt>env_method self method_name method_args=<none> method_kwargs=<none> indices=<none><block_start>"""Call instance methods of vectorized environments.
"""<line_sep>target_remotes,remote_env_indices,splits=self._get_target_envs(indices)<line_sep>method_arg_splits,method_kwarg_splits=[] []<for_stmt>i range(len(splits)-1)<block_start>start,end=splits[i] splits[i+1]<if_stmt>method_args<is><none><block_start>method_arg_splits.append([[]]<times>len(end-start))<block_end><else_stmt><block_start>method_arg_splits.append(method_args[start:end])<block_end><if_stmt>method_kwargs<is><none><block_start>method_kwarg_splits.append([{}]<times>len(end-start))<block_end><else_stmt><block_start>method_kwarg_splits.append(method_kwargs[start:end])<block_end><block_end><for_stmt>remote,env_indices,method_arg_split,method_kwarg_split zip(target_remotes remote_env_indices method_arg_splits method_kwarg_splits)<block_start>remote.send(("env_method" (env_indices method_name method_arg_split method_kwarg_split)))<block_end><return>_flatten_list([remote.recv()<for>remote target_remotes])<block_end><def_stmt>_get_target_envs self indices<block_start>"""
Example:
n_workers: 3
current envs: [0,1,2,3,4,5]
remote_envs: [0,1], [2,3], [4,5]
target_envs: [1,1,3,4]
remote_indices: [0,0,1,1] -> [0,1]
splits: [0,2] -> [0,2,4]
remote_env_indices: [1,1,0,1] -> [1,1], [0,1]
"""<assert_stmt>indices<is><none><or>sorted(indices)<eq>indices "Indices must be ordered"<line_sep>indices=self._get_indices(indices)<line_sep>remote_indices=[idx<floordiv>self.n_workers<for>idx indices]<line_sep>remote_env_indices=[idx%self.n_workers<for>idx indices]<line_sep>remote_indices,splits=np.unique(np.array(remote_indices) return_index=<true>)<line_sep>target_remotes=[self.remotes[idx]<for>idx remote_indices]<line_sep>remote_env_indices=np.split(np.array(remote_env_indices) splits[1:])<line_sep>remote_env_indices=remote_env_indices.tolist()<line_sep>splits=np.append(splits [len(indices)])<line_sep><return>target_remotes remote_env_indices splits<block_end><block_end><def_stmt>worker remote parent_remote env_fn_wrappers<block_start>"""Worker func to execute vec_env commands.
"""<def_stmt>step_env env action<block_start>ob,reward,done,info=env.step(action)<if_stmt>done<block_start>end_obs=copy.deepcopy(ob)<line_sep>end_info=copy.deepcopy(info)<line_sep>ob,info=env.reset()<line_sep>info["terminal_observation"]=end_obs<line_sep>info["terminal_info"]=end_info<block_end><return>ob reward done info<block_end>parent_remote.close()<line_sep>envs=[env_fn_wrapper()<for>env_fn_wrapper env_fn_wrappers.x]<try_stmt><block_start><while_stmt><true><block_start>cmd,data=remote.recv()<line_sep># Branch out for requests.
<if_stmt>cmd<eq>'step'<block_start>remote.send([step_env(env action)<for>env,action zip(envs data)])<block_end><elif_stmt>cmd<eq>'reset'<block_start>remote.send([env.reset()<for>env envs])<block_end><elif_stmt>cmd<eq>'render'<block_start>remote.send([env.render(mode='rgb_array')<for>env envs])<block_end><elif_stmt>cmd<eq>'close'<block_start>remote.close()<line_sep><break><block_end><elif_stmt>cmd<eq>'get_spaces_spec'<block_start>remote.send(CloudpickleWrapper((envs[0].observation_space envs[0].action_space)))<block_end><elif_stmt>cmd<eq>"get_random_state"<block_start>remote.send(CloudpickleWrapper(get_random_state()))<block_end><elif_stmt>cmd<eq>"set_random_state"<block_start>set_random_state(data)<line_sep># Placeholder for the return.
remote.send(<true>)<block_end><elif_stmt>cmd<eq>"get_attr"<block_start>env_indices,attr_name=data<line_sep>target_envs=[envs[idx]<for>idx env_indices]<line_sep>remote.send([getattr(env attr_name)<for>env target_envs])<block_end><elif_stmt>cmd<eq>"set_attr"<block_start>env_indices,attr_name,values=data<line_sep>target_envs=[envs[idx]<for>idx env_indices]<line_sep>remote.send([setattr(env attr_name value)<for>env,value zip(target_envs values)])<block_end><elif_stmt>cmd<eq>"env_method"<block_start>env_indices,name,args_list,kwargs_list=data<line_sep>target_envs=[envs[idx]<for>idx env_indices]<line_sep>methods=[getattr(env name)<for>env target_envs]<line_sep>remote.send([method(*args **kwargs)<for>method,args,kwargs zip(methods args_list kwargs_list)])<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print('SubprocVecEnv worker: got KeyboardInterrupt')<block_end><except_stmt>Exception<as>e<block_start>print('Environment runner process failed...')<line_sep>print(str(e))<block_end><finally_stmt><block_start><for_stmt>env envs<block_start>env.close()<block_end><block_end><block_end> |
""" Tries to stop all services until they are stopped. """<import_stmt>argparse<import_stmt>logging<import_stmt>time<import_from_stmt>appscale.common service_helper<import_from_stmt>appscale.common.constants LOG_FORMAT<import_from_stmt>appscale.common.retrying retry<line_sep>logger=logging.getLogger(__name__)<def_stmt>start_service <block_start>""" Starts a service. """<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('service' help='The service to start')<line_sep>args=parser.parse_args()<line_sep>service_helper.start(args.service)<block_end><def_stmt>stop_service <block_start>""" Stops a service. """<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('service' help='The service to stop')<line_sep>args=parser.parse_args()<line_sep>service_helper.stop(args.service)<block_end><def_stmt>stop_services <block_start>""" Tries to stop all appscale services until they are stopped. """<line_sep>@retry(max_retries=3)<def_stmt>stop_with_retries <block_start>logger.debug('Stopping AppScale services')<line_sep>service_helper.start('appscale-down.target' enable=<false>)<block_end>logger.info('Waiting for services to stop')<line_sep>stop_requested=<false><line_sep>original_services_count=<none><line_sep>stopped_count=0<while_stmt><true><block_start>services=service_helper.list()<if_stmt>original_services_count<is><none><block_start>original_services_count=len(services)<block_end>running={service:state<for>service,state services.items()<if>state<not><in>('stopped')}<if_stmt><not>running<block_start>logger.info('Finished stopping services')<line_sep><break><block_end><if_stmt>original_services_count-len(running)<ne>stopped_count<block_start>stopped_count=original_services_count-len(running)<line_sep>logger.info('Stopped {}/{} services'.format(stopped_count original_services_count))<block_end><if_stmt><not>stop_requested<block_start>stop_with_retries()<line_sep>stop_requested=<true><block_end>time.sleep(min(0.3<times>len(running) 5))<block_end><block_end><def_stmt>main <block_start>""" Main function which terminates all appscale processes. """<line_sep>logging.basicConfig(format=LOG_FORMAT level=logging.INFO)<line_sep># Parse command line arguments
parser=argparse.ArgumentParser(description='A stop services command')<line_sep>parser.add_argument('--verbose' action='store_true' help='Output debug-level logging')<line_sep>args=parser.parse_args()<if_stmt>args.verbose<block_start>logging.getLogger('appscale').setLevel(logging.DEBUG)<block_end>stop_services()<block_end> |
# Copyright 2017, <NAME>
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>pecan expose<import_from_stmt>repoxplorer.controllers utils<import_from_stmt>repoxplorer index<import_from_stmt>repoxplorer.index.projects Projects<import_from_stmt>repoxplorer.index.tags Tags<import_from_stmt>repoxplorer.index.contributors Contributors<class_stmt>TagsController(object)<block_start>@expose('json')<def_stmt>tags self pid=<none> tid=<none> dfrom=<none> dto=<none> inc_repos=<none><block_start>t=Tags(index.Connector(index_suffix='tags'))<line_sep>projects_index=Projects()<line_sep>idents=Contributors()<line_sep>query_kwargs=utils.resolv_filters(projects_index idents pid tid <none> <none> dfrom dto inc_repos <none> <none> <none> <none>)<line_sep>p_filter=[":".join(r.split(':')[:-1])<for>r query_kwargs['repos']]<line_sep>dfrom=query_kwargs['fromdate']<line_sep>dto=query_kwargs['todate']<line_sep>ret=[r['_source']<for>r t.get_tags(p_filter dfrom dto)]<line_sep># TODO: if tid is given we can include user defined releases
# for repo tagged with tid.
<if_stmt><not>pid<block_start><return>ret<block_end># now append user defined releases
ur={}<line_sep>project=projects_index.get(pid source=['refs' 'releases'])<for_stmt>release project.get('releases' [])<block_start>ur[release['name']]=release<block_end><for_stmt>ref project['refs']<block_start><for_stmt>release ref.get('releases' [])<block_start>ur[release['name']]=release<block_end><block_end><for_stmt>rel ur.values()<block_start>ret.append(rel)<block_end><return>ret<block_end><block_end> |
BOT_NAME='p5_downloader_middleware_handson'<line_sep>SPIDER_MODULES=['p5_downloader_middleware_handson.spiders']<line_sep>NEWSPIDER_MODULE='p5_downloader_middleware_handson.spiders'<line_sep>ROBOTSTXT_OBEY=<true><line_sep>DOWNLOADER_MIDDLEWARES={'p5_downloader_middleware_handson.middlewares.SeleniumDownloaderMiddleware':543 }<line_sep>SELENIUM_ENABLED=<true><line_sep> |
"""Provide compatibility over different versions of asyncio."""<import_stmt>asyncio<if_stmt>hasattr(asyncio "async")# Compatibility for Python 3.3 and older
<block_start>ensure_future=getattr(asyncio "async")<block_end><else_stmt><block_start>ensure_future=asyncio.ensure_future<block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>random<import_stmt>string<import_stmt>argparse<import_stmt>torch<import_stmt>torch.backends.cudnn<as>cudnn<import_stmt>torch.nn.init<as>init<import_stmt>torch.optim<as>optim<import_stmt>torch.utils.data<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_from_stmt>utils Averager adjust_learning_rate AttnLabelConverter<import_from_stmt>dataset hierarchical_dataset AlignCollate_SelfSL Batch_Balanced_Dataset<import_from_stmt>model Model<import_from_stmt>modules.self_supervised MoCoLoss<import_from_stmt>test validation<line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<def_stmt>train opt log<block_start><if_stmt>opt.self<eq>"MoCo"<block_start>opt.batch_size=256<block_end>""" dataset preparation """<if_stmt>opt.select_data<eq>"unlabel"<block_start>select_data=["U1.Book32" "U2.TextVQA" "U3.STVQA"]<line_sep>batch_ratio=[round(1/len(select_data) 3)]<times>len(select_data)<block_end><else_stmt><block_start>select_data=opt.select_data.split("-")<line_sep>batch_ratio=opt.batch_ratio.split("-")<block_end>train_loader=Batch_Balanced_Dataset(opt opt.train_data select_data batch_ratio log learn_type="self")<line_sep>AlignCollate_valid=AlignCollate_SelfSL(opt)<line_sep>valid_dataset,valid_dataset_log=hierarchical_dataset(root=opt.valid_data opt=opt data_type="unlabel")<line_sep>valid_loader=torch.utils.data.DataLoader(valid_dataset batch_size=opt.batch_size shuffle=<true> # 'True' to check training progress with validation function.
num_workers=int(opt.workers) collate_fn=AlignCollate_valid pin_memory=<false> )<line_sep>log.write(valid_dataset_log)<line_sep>print("-"<times>80)<line_sep>log.write("-"<times>80+"\n")<line_sep>""" model configuration """<if_stmt>opt.self<eq>"RotNet"<block_start>model=Model(opt SelfSL_layer=opt.SelfSL_layer)<line_sep># weight initialization
<for_stmt>name,param model.named_parameters()<block_start><if_stmt>"localization_fc2"<in>name<block_start>print(f"Skip {name} as it is already initialized")<line_sep><continue><block_end><try_stmt><block_start><if_stmt>"bias"<in>name<block_start>init.constant_(param 0.0)<block_end><elif_stmt>"weight"<in>name<block_start>init.kaiming_normal_(param)<block_end><block_end><except_stmt>Exception<as>e# for batchnorm.
<block_start><if_stmt>"weight"<in>name<block_start>param.data.fill_(1)<block_end><continue><block_end><block_end><block_end><elif_stmt>opt.self<eq>"MoCo"<block_start>model=MoCoLoss(opt dim=opt.moco_dim K=opt.moco_k m=opt.moco_m T=opt.moco_t)<block_end># data parallel for multi-GPU
model=torch.nn.DataParallel(model).to(device)<line_sep>model.train()<if_stmt>opt.saved_model<ne>""<block_start>print(f"loading pretrained model from {opt.saved_model}")<if_stmt>opt.FT<block_start>model.load_state_dict(torch.load(opt.saved_model) strict=<false>)<block_end><else_stmt><block_start>model.load_state_dict(torch.load(opt.saved_model))<block_end><block_end>print("Model:")<line_sep>print(model)<line_sep>log.write(repr(model)+"\n")<line_sep>""" setup loss """<line_sep>criterion=torch.nn.CrossEntropyLoss(ignore_index=-1).to(device)<line_sep># loss averager
train_loss_avg=Averager()<line_sep>valid_loss_avg=Averager()<line_sep># filter that only require gradient descent
filtered_parameters=[]<line_sep>params_num=[]<for_stmt>p filter(<lambda>p:p.requires_grad model.parameters())<block_start>filtered_parameters.append(p)<line_sep>params_num.append(np.prod(p.size()))<block_end>print(f"Trainable params num: {sum(params_num)}")<line_sep>log.write(f"Trainable params num: {sum(params_num)}\n")<line_sep># [print(name, p.numel()) for name, p in filter(lambda p: p[1].requires_grad, model.named_parameters())]
# setup optimizer
<if_stmt>opt.optimizer<eq>"adam"<block_start>optimizer=torch.optim.Adam(filtered_parameters lr=opt.lr)<block_end><elif_stmt>opt.self<eq>"MoCo"<block_start>optimizer=torch.optim.SGD(filtered_parameters lr=opt.moco_lr momentum=opt.moco_SGD_m weight_decay=opt.moco_wd )<line_sep>opt.schedule=opt.moco_schedule<line_sep>opt.lr=opt.moco_lr<line_sep>opt.lr_drop_rate=opt.moco_lr_drop_rate<block_end><else_stmt><block_start>optimizer=torch.optim.SGD(filtered_parameters lr=opt.lr momentum=opt.momentum weight_decay=opt.weight_decay )<block_end>print("Optimizer:")<line_sep>print(optimizer)<line_sep>log.write(repr(optimizer)+"\n")<if_stmt>"super"<in>opt.schedule<block_start><if_stmt>opt.optimizer<eq>"sgd"<block_start>cycle_momentum=<true><block_end><else_stmt><block_start>cycle_momentum=<false><block_end>scheduler=torch.optim.lr_scheduler.OneCycleLR(optimizer max_lr=opt.lr cycle_momentum=cycle_momentum div_factor=20 final_div_factor=1000 total_steps=opt.num_iter )<line_sep>print("Scheduler:")<line_sep>print(scheduler)<line_sep>log.write(repr(scheduler)+"\n")<block_end>""" final options """<line_sep># print(opt)
opt_log="------------ Options -------------\n"<line_sep>args=vars(opt)<for_stmt>k,v args.items()<block_start>opt_log<augadd>f"{str(k)}: {str(v)}\n"<block_end>opt_log<augadd>"---------------------------------------\n"<line_sep>print(opt_log)<line_sep>log.write(opt_log)<line_sep>log.close()<line_sep>""" start training """<line_sep>start_iter=0<if_stmt>opt.saved_model<ne>""<block_start><try_stmt><block_start>start_iter=int(opt.saved_model.split("_")[-1].split(".")[0])<line_sep>print(f"continue to train, start_iter: {start_iter}")<block_end><except_stmt><block_start><pass><block_end><block_end>start_time=time.time()<line_sep>iteration=start_iter<line_sep>best_score=-1<line_sep># training loop
<for_stmt>iteration tqdm(range(start_iter+1 opt.num_iter+1) total=opt.num_iter position=0 leave=<true> )# train part
<block_start><if_stmt>opt.self<eq>"RotNet"<block_start>image,Self_label=train_loader.get_batch()<line_sep>image=image.to(device)<line_sep>preds=model(image SelfSL_layer=opt.SelfSL_layer)<line_sep>target=torch.LongTensor(Self_label).to(device)<block_end><elif_stmt>opt.self<eq>"MoCo"<block_start>q,k=train_loader.get_batch_two_images()<line_sep>q=q.to(device)<line_sep>k=k.to(device)<line_sep>preds,target=model(im_q=q im_k=k)<block_end>loss=criterion(preds target)<line_sep>train_loss_avg.add(loss)<line_sep>model.zero_grad()<line_sep>loss.backward()<line_sep>torch.nn.utils.clip_grad_norm_(model.parameters() opt.grad_clip)<line_sep># gradient clipping with 5 (Default)
optimizer.step()<if_stmt>"super"<in>opt.schedule<block_start>scheduler.step()<block_end><else_stmt><block_start>adjust_learning_rate(optimizer iteration opt)<block_end># validation part.
# To see training progress, we also conduct validation when 'iteration == 1'
<if_stmt>iteration%opt.val_interval<eq>0<or>iteration<eq>1# for validation log
<block_start><with_stmt>open(f"./saved_models/{opt.exp_name}/log_train.txt" "a")<as>log<block_start>model.eval()<with_stmt>torch.no_grad()<block_start>length_of_data=0<line_sep>infer_time=0<line_sep>n_correct=0<for_stmt>i,(image_valid Self_label_valid) tqdm(enumerate(valid_loader) total=len(valid_loader) position=1 leave=<false> )<block_start><if_stmt>opt.self<eq>"RotNet"<block_start>batch_size=image_valid.size(0)<line_sep>start_infer_time=time.time()<line_sep>preds=model(image_valid.to(device) SelfSL_layer=opt.SelfSL_layer)<line_sep>forward_time=time.time()-start_infer_time<line_sep>target=torch.LongTensor(Self_label_valid).to(device)<block_end><elif_stmt>opt.self<eq>"MoCo"<block_start>batch_size=image_valid.size(0)<line_sep>q_valid=image_valid.to(device)<line_sep>k_valid=Self_label_valid.to(device)<line_sep>start_infer_time=time.time()<line_sep>preds,target=model(im_q=q_valid im_k=k_valid)<line_sep>forward_time=time.time()-start_infer_time<block_end>loss=criterion(preds target)<line_sep>valid_loss_avg.add(loss)<line_sep>infer_time<augadd>forward_time<line_sep>_,preds_index=preds.max(1)<line_sep>n_correct<augadd>(preds_index<eq>target).sum().item()<line_sep>length_of_data=length_of_data+batch_size<block_end>current_score=n_correct/length_of_data<times>100<block_end>model.train()<line_sep># keep best score (accuracy) model on valid dataset
<if_stmt>current_score<g>best_score<block_start>best_score=current_score<line_sep>torch.save(model.state_dict() f"./saved_models/{opt.exp_name}/best_score.pth" )<block_end># validation log: loss, lr, score, time.
lr=optimizer.param_groups[0]["lr"]<line_sep>elapsed_time=time.time()-start_time<line_sep>valid_log=f"\n[{iteration}/{opt.num_iter}] Train loss: {train_loss_avg.val():0.5f}, Valid loss: {valid_loss_avg.val():0.5f}, lr: {lr:0.7f}\n"<line_sep>valid_log<augadd>f"Best_score: {best_score:0.2f}, Current_score: {current_score:0.2f}, "<line_sep>valid_log<augadd>(f"Infer_time: {infer_time:0.1f}, Elapsed_time: {elapsed_time:0.1f}")<line_sep>train_loss_avg.reset()<line_sep>valid_loss_avg.reset()<line_sep># show some predicted results
dashed_line="-"<times>80<if_stmt>opt.self<eq>"RotNet"<block_start>head=f"GT:0 vs Pred | GT:90 vs Pred | GT:180 vs Pred | GT:270 vs Pred"<line_sep>preds_index=preds_index[:20]<line_sep>gts=Self_label_valid[:20]<block_end><elif_stmt>opt.self<eq>"MoCo"<block_start>head=f"GT:0 vs Pred | GT:0 vs Pred | GT:0 vs Pred | GT:0 vs Pred"<line_sep>preds_index=preds_index[:8]<line_sep>gts=torch.zeros(preds_index.shape[0] dtype=torch.long)<block_end>predicted_result_log=f"{dashed_line}\n{head}\n{dashed_line}\n"<for_stmt>i,(gt pred) enumerate(zip(gts preds_index))<block_start><if_stmt>opt.self<eq>"RotNet"<block_start>gt,pred=gt<times>90 pred<times>90<block_end><if_stmt>i%4<ne>3<block_start>predicted_result_log<augadd>f"{gt} vs {pred} | "<block_end><else_stmt><block_start>predicted_result_log<augadd>f"{gt} vs {pred} \n"<block_end><block_end>predicted_result_log<augadd>f"{dashed_line}"<line_sep>valid_log=f"{valid_log}\n{predicted_result_log}"<line_sep>print(valid_log)<line_sep>log.write(valid_log+"\n")<block_end><block_end><block_end>print(f'finished the experiment: {opt.exp_name}, "CUDA_VISIBLE_DEVICES" was {opt.CUDA_VISIBLE_DEVICES}')<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--train_data" default="data_CVPR2021/training/unlabel/" help="path to training dataset" )<line_sep>parser.add_argument("--valid_data" default="data_CVPR2021/validation/" help="path to validation dataset" )<line_sep>parser.add_argument("--workers" type=int default=4 help="number of data loading workers")<line_sep>parser.add_argument("--batch_size" type=int default=128 help="input batch size")<line_sep>parser.add_argument("--num_iter" type=int default=200000 help="number of iterations to train for")<line_sep>parser.add_argument("--val_interval" type=int default=2000 help="Interval between each validation" )<line_sep>parser.add_argument("--FT" type=str default="init" help="whether to do fine-tuning |init|freeze|")<line_sep>parser.add_argument("--optimizer" type=str default="sgd" help="optimizer |sgd|adadelta|adam|")<line_sep>parser.add_argument("--lr" type=float default=0.1 help="learning rate. default for RotNet")<line_sep>parser.add_argument("--momentum" type=float default=0.9 help="momentum for SGD. default for RotNet" )<line_sep>parser.add_argument("--weight_decay" type=float default=5e-4 help="weight_decay for SGD. default for RotNet" )<line_sep>parser.add_argument("--schedule" default=[0.3 0.6 0.8] nargs="*" help="learning rate schedule (when to drop lr by lr_drop_rate) default for RotNet" )<line_sep>parser.add_argument("--lr_drop_rate" type=float default=0.2 help="lr_drop_rate. default for RotNet" )<line_sep>parser.add_argument("--grad_clip" type=float default=5 help="gradient clipping value. default=5")<line_sep>""" Data processing """<line_sep>parser.add_argument("--select_data" type=str default="unlabel" help="select training data default is `unlabel` which means 11 real labeled datasets" )<line_sep>parser.add_argument("--batch_ratio" type=str default="1" help="assign ratio for each selected data in the batch" )<line_sep>parser.add_argument("--total_data_usage_ratio" type=str default="1.0" help="total data usage ratio, this ratio is multiplied to total number of data." )<line_sep>parser.add_argument("--batch_max_length" type=int default=25 help="maximum-label-length")<line_sep>parser.add_argument("--imgH" type=int default=32 help="the height of the input image")<line_sep>parser.add_argument("--imgW" type=int default=100 help="the width of the input image")<line_sep>parser.add_argument("--Aug" type=str default="None" help="whether to use augmentation |None|mixup|manifold|cutmix|" )<line_sep>""" Model Architecture """<line_sep>parser.add_argument("--model_name" type=str required=<true> help="CRNN|TRBA")<line_sep>parser.add_argument("--num_fiducial" type=int default=20 help="number of fiducial points of TPS-STN" )<line_sep>parser.add_argument("--input_channel" type=int default=3 help="the number of input channel of Feature extractor" )<line_sep>parser.add_argument("--output_channel" type=int default=512 help="the number of output channel of Feature extractor" )<line_sep>parser.add_argument("--hidden_size" type=int default=256 help="the size of the LSTM hidden state")<line_sep>""" Self supervised learning """<line_sep>parser.add_argument("--self" type=str default="RotNet" help="whether to use self-supervised learning |RotNet|MoCo|" )<line_sep>parser.add_argument("--SelfSL_layer" type=str default="CNN" help="for SelfSL_layer")<line_sep># moco specific configs:
parser.add_argument("--moco_dim" default=128 type=int help="feature dimension (default: 128)")<line_sep>parser.add_argument("--moco_k" default=65536 type=int help="queue size; number of negative keys (default: 65536)" )<line_sep>parser.add_argument("--moco_m" default=0.999 type=float help="moco momentum of updating key encoder (default: 0.999)" )<line_sep>parser.add_argument("--moco_t" default=0.07 type=float help="softmax temperature (default: 0.07)")<line_sep>parser.add_argument("--moco_lr" default=0.03 type=float help="SGD lr for moco")<line_sep>parser.add_argument("--moco_wd" default=0.0001 type=float help="SGD weight_decay for moco")<line_sep>parser.add_argument("--moco_SGD_m" default=0.9 type=float help="SGD momentum for moco")<line_sep>parser.add_argument("--moco_schedule" default=[0.6 0.8] type=float help="SGD momentum for moco")<line_sep>parser.add_argument("--moco_lr_drop_rate" type=float default=0.1 help="moco lr_drop_rate")<line_sep>""" exp_name and etc """<line_sep>parser.add_argument("--exp_name" help="Where to store logs and models")<line_sep>parser.add_argument("--manual_seed" type=int default=111 help="for random seed setting")<line_sep>parser.add_argument("--saved_model" default="" help="path to model to continue training")<line_sep>opt=parser.parse_args()<line_sep>opt.gpu_name="_".join(torch.cuda.get_device_name().split())<line_sep># Use 'NV' for CRNN, 'NR' or 'TR' for TRBA.
<if_stmt>opt.model_name[0]<eq>"N"<block_start>opt.Transformation="None"<block_end><elif_stmt>opt.model_name[0]<eq>"T"<block_start>opt.Transformation="TPS"<block_end><else_stmt><block_start><raise><block_end><if_stmt>opt.model_name[1]<eq>"V"<block_start>opt.FeatureExtraction="VGG"<block_end><elif_stmt>opt.model_name[1]<eq>"R"<block_start>opt.FeatureExtraction="ResNet"<block_end><else_stmt><block_start><raise><block_end>opt.SequenceModeling="None"<line_sep>opt.Prediction="None"<if_stmt><not>opt.exp_name<block_start>opt.exp_name=(f"pretrain-{opt.model_name}-{opt.self}-{opt.SelfSL_layer}-{opt.gpu_name}")<line_sep>opt.exp_name<augadd>f"-Seed{opt.manual_seed}"<block_end>os.makedirs(f"./saved_models/{opt.exp_name}" exist_ok=<true>)<line_sep>log=open(f"./saved_models/{opt.exp_name}/log_train.txt" "a")<line_sep>command_line_input=" ".join(sys.argv)<line_sep>log.write(f"Command line input: {command_line_input}\n")<line_sep>""" Seed and GPU setting """<line_sep>random.seed(opt.manual_seed)<line_sep>np.random.seed(opt.manual_seed)<line_sep>torch.manual_seed(opt.manual_seed)<line_sep>torch.cuda.manual_seed_all(opt.manual_seed)# if you are using multi-GPU.
torch.cuda.manual_seed(opt.manual_seed)<line_sep>cudnn.benchmark=<true># It fasten training.
cudnn.deterministic=<true><line_sep>opt.gpu_name="_".join(torch.cuda.get_device_name().split())<if_stmt>sys.platform<eq>"linux"<block_start>opt.CUDA_VISIBLE_DEVICES=os.environ["CUDA_VISIBLE_DEVICES"]<block_end><else_stmt><block_start>opt.CUDA_VISIBLE_DEVICES=0# for convenience
<block_end>opt.num_gpu=torch.cuda.device_count()<if_stmt>opt.num_gpu<g>1<block_start>print("We recommend to use 1 GPU, check your GPU number, you would miss CUDA_VISIBLE_DEVICES=0 or typo")<line_sep>print("To use multi-gpu setting, remove or comment out these lines")<line_sep>sys.exit()<block_end><if_stmt>sys.platform<eq>"win32"<block_start>opt.workers=0<block_end>train(opt log)<block_end> |
<import_from_stmt>..core np<import_from_stmt>..exceptions *<import_from_stmt>.logps *<import_stmt>sampyl<as>smp<import_stmt>pytest<line_sep>#TODO: Make tests to check correctness of samplers
np_source=np.__package__<line_sep>n_samples=100<def_stmt>test_logp_with_grad <block_start>logp=poisson_with_grad<line_sep>start={'lam1':1. 'lam2':1.}<line_sep>nuts=smp.NUTS(logp start grad_logp=<true>)<line_sep>chain=nuts.sample(n_samples)<assert_stmt>(len(chain)<eq>n_samples)<block_end><def_stmt>test_parallel_lin_model <block_start>logp=linear_model_logp<line_sep>start={'b':np.zeros(5) 'sig':1.}<line_sep>metro=smp.Metropolis(logp start)<line_sep>nuts=smp.NUTS(logp start)<line_sep>metro_chains=metro.sample(n_samples n_chains=2)<line_sep>nuts_chains=nuts.sample(n_samples n_chains=2)<assert_stmt>(len(metro_chains)<eq>2)<assert_stmt>(len(nuts_chains)<eq>2)<block_end><def_stmt>test_parallel_2D <block_start>start={'lam1':1. 'lam2':1.}<line_sep>metro=smp.Metropolis(poisson_logp start)<line_sep>nuts=smp.NUTS(poisson_logp start)<line_sep>metro_chains=metro.sample(n_samples n_chains=2)<line_sep>nuts_chains=nuts.sample(n_samples n_chains=2)<assert_stmt>(len(metro_chains)<eq>2)<assert_stmt>(len(nuts_chains)<eq>2)<block_end><def_stmt>test_sample_chain <block_start>start={'lam1':1. 'lam2':1.}<line_sep>step1=smp.Metropolis(poisson_logp start condition=['lam2'])<line_sep>step2=smp.NUTS(poisson_logp start condition=['lam1'])<line_sep>chain=smp.Chain([step1 step2] start)<line_sep>trace=chain.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_conditional_chain <block_start>logp=poisson_logp<line_sep>start={'lam1':1. 'lam2':2.}<line_sep>metro=smp.Metropolis(logp start condition=['lam2'])<line_sep>nuts=smp.NUTS(logp start condition=['lam1'])<line_sep>state=metro._conditional_step()<assert_stmt>(state['lam2']<eq>2.)<line_sep>nuts.state.update(state)<line_sep>state=nuts._conditional_step()<assert_stmt>(len(state)<eq>2)<block_end><def_stmt>test_conditional <block_start>logp=poisson_logp<line_sep>start={'lam1':1. 'lam2':2.}<line_sep>metro=smp.Metropolis(logp start condition=['lam2'])<line_sep>state=metro._conditional_step()<assert_stmt>(len(state)<eq>2)<assert_stmt>(state['lam2']<eq>2.)<block_end><def_stmt>test_metropolis_linear_model <block_start>logp=linear_model_logp<line_sep>start={'b':np.zeros(5) 'sig':1.}<line_sep>metro=smp.Metropolis(logp start)<line_sep>trace=metro.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_hamiltonian_linear_model <block_start>logp=linear_model_logp<line_sep>start={'b':np.zeros(5) 'sig':1.}<line_sep>hmc=smp.Hamiltonian(logp start)<line_sep>trace=hmc.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_nuts_linear_model <block_start>logp=linear_model_logp<line_sep>start={'b':np.zeros(5) 'sig':1.}<line_sep>nuts=smp.NUTS(logp start)<line_sep>trace=nuts.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_metropolis <block_start>logp=normal_1D_logp<line_sep>start={'x':1.}<line_sep>metro=smp.Metropolis(logp start)<line_sep>trace=metro.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_hmc_autograd <block_start>logp=normal_1D_logp<line_sep>start={'x':1.}<if_stmt>np_source<eq>'autograd.numpy'<block_start>hmc=smp.Hamiltonian(logp start)<line_sep>trace=hmc.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><elif_stmt>np_source<eq>'numpy'<block_start><with_stmt>pytest.raises(AutogradError)<block_start>hmc=smp.Hamiltonian(logp start)<block_end><block_end><block_end><def_stmt>test_hmc_pass_grad_logp <block_start>logp,grad_logp=normal_1D_logp normal_1D_grad_logp<line_sep>start={'x':1.}<line_sep>hmc=smp.Hamiltonian(logp start grad_logp=grad_logp)<line_sep>trace=hmc.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_NUTS_autograd <block_start>logp=normal_1D_logp<line_sep>start={'x':1.}<if_stmt>np_source<eq>'autograd.numpy'<block_start>nuts=smp.NUTS(logp start)<line_sep>trace=nuts.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><elif_stmt>np_source<eq>'numpy'<block_start><with_stmt>pytest.raises(AutogradError)<block_start>nuts=smp.NUTS(logp start)<block_end><block_end><block_end><def_stmt>test_NUTS_pass_grad_logp <block_start>logp,grad_logp=normal_1D_logp normal_1D_grad_logp<line_sep>start={'x':1.}<line_sep>nuts=smp.NUTS(logp start grad_logp=grad_logp)<line_sep>trace=nuts.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_sampler_num_logp <block_start>logp=1.<line_sep>start={'x':<none>}<with_stmt>pytest.raises(TypeError)<block_start>metro=smp.Metropolis(logp start)<block_end><block_end><def_stmt>test_sampler_no_args_logp <block_start><def_stmt>logp <block_start><return>x<block_end>start={'x':<none>}<with_stmt>pytest.raises(ValueError)<block_start>metro=smp.Metropolis(logp start)<block_end><block_end><def_stmt>test_metropolis_two_vars <block_start>logp=poisson_logp<line_sep>start={'lam1':1. 'lam2':1.}<line_sep>metro=smp.Metropolis(logp start)<line_sep>trace=metro.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_metropolis_two_vars_start <block_start>logp=poisson_logp<line_sep>start={'lam1':1. 'lam2':1.}<line_sep>metro=smp.Metropolis(logp start)<line_sep>trace=metro.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_slice <block_start>logp=normal_1D_logp<line_sep>start={'x':1.}<line_sep>slice=smp.Slice(logp start)<line_sep>trace=slice.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end><def_stmt>test_slice_two_vars <block_start>logp=poisson_logp<line_sep>start={'lam1':1. 'lam2':1.}<line_sep>slice=smp.Slice(logp start)<line_sep>trace=slice.sample(n_samples)<assert_stmt>(trace.shape<eq>(n_samples ))<block_end> |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>mindspore.dataset<as>ds<import_stmt>mindspore.dataset.audio.transforms<as>audio<import_from_stmt>mindspore log<as>logger<import_from_stmt>mindspore.dataset.audio.utils Modulation Interpolation<def_stmt>count_unequal_element data_expected data_me rtol atol<block_start><assert_stmt>data_expected.shape<eq>data_me.shape<line_sep>total_count=len(data_expected.flatten())<line_sep>error=np.abs(data_expected-data_me)<line_sep>greater=np.greater(error atol+np.abs(data_expected)<times>rtol)<line_sep>loss_count=np.count_nonzero(greater)<assert_stmt>(loss_count/total_count)<l>rtol "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(data_expected[greater] data_me[greater] error[greater])<block_end><def_stmt>test_flanger_eager_sinusoidal_linear_float64 <block_start>""" mindspore eager mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[0.1 0.2 0.3] [0.4 0.5 0.6]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[0.10000000000 0.19999999536 0.29999998145] [0.23391812865 0.29239766081 0.35087719298]] dtype=np.float64)<line_sep>flanger_op=audio.Flanger(44100 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR)<line_sep># Filtered waveform by flanger
output=flanger_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_flanger_eager_triangular_linear_float32 <block_start>""" mindspore eager mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[-1.2 2 -3.6] [1 2.4 3.7]] dtype=np.float32)<line_sep># Expect waveform
expect_waveform=np.array([[-1.0000000000 1.0000000000 -1.0000000000] [0.58479529619 1.0000000000 1.0000000000]] dtype=np.float32)<line_sep>flanger_op=audio.Flanger(44100 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.TRIANGULAR Interpolation.LINEAR)<line_sep># Filtered waveform by flanger
output=flanger_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_flanger_eager_triangular_linear_int <block_start>""" mindspore eager mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[-2 -3 0] [2 2 3]] dtype=np.int)<line_sep># Expect waveform
expect_waveform=np.array([[-1 -1 0] [1 1 1]] dtype=np.int)<line_sep>flanger_op=audio.Flanger(44100 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.TRIANGULAR Interpolation.LINEAR)<line_sep># Filtered waveform by flanger
output=flanger_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_flanger_shape_221 <block_start>""" mindspore eager mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[[1] [1.1]] [[0.9] [0.6]]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[[1.00000000] [0.64327485]] [[0.90000000] [0.35087719]]] dtype=np.float64)<line_sep>flanger_op=audio.Flanger(44100)<line_sep># Filtered waveform by flanger
output=flanger_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_flanger_shape_11211 <block_start>""" mindspore eager mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[[[[0.44]] [[0.55]]]]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[[[[0.44000000]] [[0.55000000]]]]] dtype=np.float64)<line_sep>flanger_op=audio.Flanger(44100)<line_sep># Filtered waveform by flanger
output=flanger_op(waveform)<line_sep>count_unequal_element(expect_waveform output 0.0001 0.0001)<block_end><def_stmt>test_flanger_pipeline <block_start>""" mindspore pipeline mode normal testcase:flanger op"""<line_sep># Original waveform
waveform=np.array([[[1.1 1.2 1.3] [1.4 1.5 1.6]]] dtype=np.float64)<line_sep># Expect waveform
expect_waveform=np.array([[[1.00000000000 1.00000000000 1.00000000000] [0.81871345029 0.87719298245 0.93567251461]]] dtype=np.float64)<line_sep>data=(waveform np.random.sample((1 2 1)))<line_sep>dataset=ds.NumpySlicesDataset(data ["channel" "sample"] shuffle=<false>)<line_sep>flanger_op=audio.Flanger(44100)<line_sep># Filtered waveform by flanger
dataset=dataset.map(input_columns=["channel"] operations=flanger_op num_parallel_workers=1)<line_sep>i=0<for_stmt>item dataset.create_dict_iterator(num_epochs=1 output_numpy=<true>)<block_start>count_unequal_element(expect_waveform[i :] item['channel'] 0.0001 0.0001)<line_sep>i<augadd>1<block_end><block_end><def_stmt>test_invalid_flanger_input <block_start><def_stmt>test_invalid_input test_name sample_rate delay depth regen width speed phase modulation interpolation error error_msg<block_start>logger.info("Test Flanger with bad input: {0}".format(test_name))<with_stmt>pytest.raises(error)<as>error_info<block_start>audio.Flanger(sample_rate delay depth regen width speed phase modulation interpolation)<block_end><assert_stmt>error_msg<in>str(error_info.value)<block_end>test_invalid_input("invalid sample_rate parameter value" 0 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input sample_rate is not within the required interval of [-2147483648, 0) and (0, 2147483647].")<line_sep>test_invalid_input("invalid sample_rate parameter type as a float" 44100.5 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument sample_rate with value 44100.5 is not of "<concat>"type [<class 'int'>], but got <class 'float'>.")<line_sep>test_invalid_input("invalid sample_rate parameter type as a String" "44100" 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument sample_rate with value 44100 is not of "<concat>"type [<class 'int'>], but got <class 'str'>.")<line_sep>test_invalid_input("invalid delay parameter type as a String" 44100 "0.0" 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument delay with value 0.0 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid delay parameter value" 44100 50 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input delay is not within the required interval of [0, 30].")<line_sep>test_invalid_input("invalid depth parameter type as a String" 44100 0.0 "2.0" 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument depth with value 2.0 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid depth parameter value" 44100 0.0 50.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input depth is not within the required interval of [0, 10].")<line_sep>test_invalid_input("invalid regen parameter type as a String" 44100 0.0 2.0 "0.0" 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument regen with value 0.0 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid regen parameter value" 44100 0.0 2.0 100.0 71.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input regen is not within the required interval of [-95, 95].")<line_sep>test_invalid_input("invalid width parameter type as a String" 44100 0.0 2.0 0.0 "71.0" 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument width with value 71.0 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid width parameter value" 44100 0.0 2.0 0.0 150.0 0.5 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input width is not within the required interval of [0, 100].")<line_sep>test_invalid_input("invalid speed parameter type as a String" 44100 0.0 2.0 0.0 71.0 "0.5" 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument speed with value 0.5 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid speed parameter value" 44100 0.0 2.0 0.0 71.0 50 25.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input speed is not within the required interval of [0.1, 10].")<line_sep>test_invalid_input("invalid phase parameter type as a String" 44100 0.0 2.0 0.0 71.0 0.5 "25.0" Modulation.SINUSOIDAL Interpolation.LINEAR TypeError "Argument phase with value 25.0 is not of type [<class 'float'>, <class 'int'>],"<concat>" but got <class 'str'>.")<line_sep>test_invalid_input("invalid phase parameter value" 44100 0.0 2.0 0.0 71.0 0.5 150.0 Modulation.SINUSOIDAL Interpolation.LINEAR ValueError "Input phase is not within the required interval of [0, 100].")<line_sep>test_invalid_input("invalid modulation parameter value" 44100 0.0 2.0 0.0 71.0 0.5 25.0 "test" Interpolation.LINEAR TypeError "Argument modulation with value test is not of type [<enum 'Modulation'>], "<concat>"but got <class 'str'>.")<line_sep>test_invalid_input("invalid modulation parameter value" 44100 0.0 2.0 0.0 71.0 0.5 25.0 Modulation.SINUSOIDAL "test" TypeError "Argument interpolation with value test is not of type [<enum 'Interpolation'>], "<concat>"but got <class 'str'>.")<block_end><if_stmt>__name__<eq>'__main__'<block_start>test_flanger_eager_sinusoidal_linear_float64()<line_sep>test_flanger_eager_triangular_linear_float32()<line_sep>test_flanger_eager_triangular_linear_int()<line_sep>test_flanger_shape_221()<line_sep>test_flanger_shape_11211()<line_sep>test_flanger_pipeline()<line_sep>test_invalid_flanger_input()<block_end> |
# -*- coding:utf-8 -*-
"""
状态信息
Project: alphahunter
Author: HJQuant
Description: Asynchronous driven quantitative trading framework
"""<class_stmt>State<block_start>STATE_CODE_PARAM_MISS=1#交易接口初始化过程缺少参数
STATE_CODE_CONNECT_SUCCESS=2#交易接口连接成功
STATE_CODE_CONNECT_FAILED=3#交易接口连接失败
STATE_CODE_DISCONNECT=4#交易接口连接断开
STATE_CODE_RECONNECTING=5#交易接口重新连接中
STATE_CODE_READY=6#交易接口准备好
STATE_CODE_GENERAL_ERROR=7#交易接口常规错误
STATE_CODE_DB_SUCCESS=8#数据库连接成功
STATE_CODE_DB_ERROR=9#数据库连接失败
<def_stmt>__init__ self platform account msg code=STATE_CODE_PARAM_MISS<block_start>self._platform=platform<line_sep>self._account=account<line_sep>self._msg=msg<line_sep>self._code=code<block_end>@property<def_stmt>platform self<block_start><return>self._platform<block_end>@property<def_stmt>account self<block_start><return>self._account<block_end>@property<def_stmt>msg self<block_start><return>self._msg<block_end>@property<def_stmt>code self<block_start><return>self._code<block_end><def_stmt>__str__ self<block_start><return>"platform:{} account:{} msg:{}".format(self._platform self._account self._msg)<block_end><def_stmt>__repr__ self<block_start><return>str(self)<block_end><block_end> |
<import_from_stmt>cython.cimports cqueue<import_from_stmt>cython cast<line_sep>@cython.cclass<class_stmt>Queue<block_start>"""A queue class for C integer values.
>>> q = Queue()
>>> q.append(5)
>>> q.peek()
5
>>> q.pop()
5
"""<line_sep>_c_queue=cython.declare(cython.pointer(cqueue.Queue))<def_stmt>__cinit__ self<block_start>self._c_queue=cqueue.queue_new()<if_stmt>self._c_queue<is>cython.NULL<block_start><raise>MemoryError()<block_end><block_end><def_stmt>__dealloc__ self<block_start><if_stmt>self._c_queue<is><not>cython.NULL<block_start>cqueue.queue_free(self._c_queue)<block_end><block_end>@cython.ccall<def_stmt>append self value:cython.int<block_start><if_stmt><not>cqueue.queue_push_tail(self._c_queue cast(cython.p_void cast(cython.Py_ssize_t value)))<block_start><raise>MemoryError()<block_end><block_end># The `cpdef` feature is obviously not available for the original "extend()"
# method, as the method signature is incompatible with Python argument
# types (Python does not have pointers). However, we can rename
# the C-ish "extend()" method to e.g. "extend_ints()", and write
# a new "extend()" method that provides a suitable Python interface by
# accepting an arbitrary Python iterable.
@cython.ccall<def_stmt>extend self values<block_start><for_stmt>value values<block_start>self.append(value)<block_end><block_end>@cython.cfunc<def_stmt>extend_ints self values:cython.p_int count:cython.size_t<block_start>value:cython.int<for_stmt>value values[:count]# Slicing pointer to limit the iteration boundaries.
<block_start>self.append(value)<block_end><block_end>@cython.ccall@cython.exceptval(-1 check=<true>)<def_stmt>peek self<arrow>cython.int<block_start>value:cython.int=cast(cython.Py_ssize_t cqueue.queue_peek_head(self._c_queue))<if_stmt>value<eq>0# this may mean that the queue is empty,
# or that it happens to contain a 0 value
<block_start><if_stmt>cqueue.queue_is_empty(self._c_queue)<block_start><raise>IndexError("Queue is empty")<block_end><block_end><return>value<block_end>@cython.ccall@cython.exceptval(-1 check=<true>)<def_stmt>pop self<arrow>cython.int<block_start><if_stmt>cqueue.queue_is_empty(self._c_queue)<block_start><raise>IndexError("Queue is empty")<block_end><return>cast(cython.Py_ssize_t cqueue.queue_pop_head(self._c_queue))<block_end><def_stmt>__bool__ self<block_start><return><not>cqueue.queue_is_empty(self._c_queue)<block_end><block_end> |
<import_stmt>warnings<import_from_stmt>json loads<as>json_loads<import_from_stmt>os fsync<import_from_stmt>sys exc_info<import_from_stmt>json_tricks.utils is_py3 dict_default gzip_compress gzip_decompress JsonTricksDeprecation<import_from_stmt>.utils str_type NoNumpyException# keep 'unused' imports
<import_from_stmt>.comment strip_comments# keep 'unused' imports
#TODO @mark: imports removed?
<import_from_stmt>.encoders TricksEncoder json_date_time_encode class_instance_encode json_complex_encode json_set_encode numeric_types_encode numpy_encode nonumpy_encode nopandas_encode pandas_encode noenum_instance_encode enum_instance_encode pathlib_encode<line_sep># keep 'unused' imports
<import_from_stmt>.decoders TricksPairHook json_date_time_hook ClassInstanceHook json_complex_hook json_set_hook numeric_types_hook json_numpy_obj_hook json_nonumpy_obj_hook nopandas_hook pandas_hook EnumInstanceHook noenum_hook pathlib_hook nopathlib_hook<line_sep># keep 'unused' imports
ENCODING='UTF-8'<line_sep>_cih_instance=ClassInstanceHook()<line_sep>_eih_instance=EnumInstanceHook()<line_sep>DEFAULT_ENCODERS=[json_date_time_encode json_complex_encode json_set_encode numeric_types_encode class_instance_encode ]<line_sep>DEFAULT_HOOKS=[json_date_time_hook json_complex_hook json_set_hook numeric_types_hook _cih_instance ]<line_sep>#TODO @mark: add properties to all built-in encoders (for speed - but it should keep working without)
<try_stmt><block_start><import_stmt>enum<block_end><except_stmt>ImportError<block_start>DEFAULT_ENCODERS=[noenum_instance_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[noenum_hook ]+DEFAULT_HOOKS<block_end><else_stmt><block_start>DEFAULT_ENCODERS=[enum_instance_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[_eih_instance ]+DEFAULT_HOOKS<block_end><try_stmt><block_start><import_stmt>numpy<block_end><except_stmt>ImportError<block_start>DEFAULT_ENCODERS=[nonumpy_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[json_nonumpy_obj_hook ]+DEFAULT_HOOKS<block_end><else_stmt># numpy encode needs to be before complex
<block_start>DEFAULT_ENCODERS=[numpy_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[json_numpy_obj_hook ]+DEFAULT_HOOKS<block_end><try_stmt><block_start><import_stmt>pandas<block_end><except_stmt>ImportError<block_start>DEFAULT_ENCODERS=[nopandas_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[nopandas_hook ]+DEFAULT_HOOKS<block_end><else_stmt><block_start>DEFAULT_ENCODERS=[pandas_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[pandas_hook ]+DEFAULT_HOOKS<block_end><try_stmt><block_start><import_stmt>pathlib<block_end><except_stmt># No need to include a "nopathlib_encode" hook since we would not encounter
# the Path object if pathlib isn't available. However, we *could* encounter
# a serialized Path object (produced by a version of Python with pathlib).
<block_start>DEFAULT_HOOKS=[nopathlib_hook ]+DEFAULT_HOOKS<block_end><else_stmt><block_start>DEFAULT_ENCODERS=[pathlib_encode ]+DEFAULT_ENCODERS<line_sep>DEFAULT_HOOKS=[pathlib_hook ]+DEFAULT_HOOKS<block_end>DEFAULT_NONP_ENCODERS=[nonumpy_encode ]+DEFAULT_ENCODERS# DEPRECATED
DEFAULT_NONP_HOOKS=[json_nonumpy_obj_hook ]+DEFAULT_HOOKS# DEPRECATED
<def_stmt>dumps obj sort_keys=<none> cls=<none> obj_encoders=DEFAULT_ENCODERS extra_obj_encoders=() primitives=<false> compression=<none> allow_nan=<false> conv_str_byte=<false> fallback_encoders=() properties=<none> **jsonkwargs<block_start>"""
Convert a nested data structure to a json string.
:param obj: The Python object to convert.
:param sort_keys: Keep this False if you want order to be preserved.
:param cls: The json encoder class to use, defaults to NoNumpyEncoder which gives a warning for numpy arrays.
:param obj_encoders: Iterable of encoders to use to convert arbitrary objects into json-able promitives.
:param extra_obj_encoders: Like `obj_encoders` but on top of them: use this to add encoders without replacing defaults. Since v3.5 these happen before default encoders.
:param fallback_encoders: These are extra `obj_encoders` that 1) are ran after all others and 2) only run if the object hasn't yet been changed.
:param allow_nan: Allow NaN and Infinity values, which is a (useful) violation of the JSON standard (default False).
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:param properties: A dictionary of properties that is passed to each encoder that will accept it.
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to `cls`. Note that `sort_keys` should be false if you want to preserve order.
"""<if_stmt><not>hasattr(extra_obj_encoders '__iter__')<block_start><raise>TypeError('`extra_obj_encoders` should be a tuple in `json_tricks.dump(s)`')<block_end>encoders=tuple(extra_obj_encoders)+tuple(obj_encoders)<line_sep>properties=properties<or>{}<line_sep>dict_default(properties 'primitives' primitives)<line_sep>dict_default(properties 'compression' compression)<line_sep>dict_default(properties 'allow_nan' allow_nan)<if_stmt>cls<is><none><block_start>cls=TricksEncoder<block_end>txt=cls(sort_keys=sort_keys obj_encoders=encoders allow_nan=allow_nan primitives=primitives fallback_encoders=fallback_encoders properties=properties **jsonkwargs).encode(obj)<if_stmt><not>is_py3<and>isinstance(txt str)<block_start>txt=unicode(txt ENCODING)<block_end><if_stmt><not>compression<block_start><return>txt<block_end><if_stmt>compression<is><true><block_start>compression=5<block_end>txt=txt.encode(ENCODING)<line_sep>gzstring=gzip_compress(txt compresslevel=compression)<line_sep><return>gzstring<block_end><def_stmt>dump obj fp sort_keys=<none> cls=<none> obj_encoders=DEFAULT_ENCODERS extra_obj_encoders=() primitives=<false> compression=<none> force_flush=<false> allow_nan=<false> conv_str_byte=<false> fallback_encoders=() properties=<none> **jsonkwargs<block_start>"""
Convert a nested data structure to a json string.
:param fp: File handle or path to write to.
:param compression: The gzip compression level, or None for no compression.
:param force_flush: If True, flush the file handle used, when possibly also in the operating system (default False).
The other arguments are identical to `dumps`.
"""<if_stmt>(isinstance(obj str_type)<or>hasattr(obj 'write'))<and>isinstance(fp (list dict))<block_start><raise>ValueError('json-tricks dump arguments are in the wrong order: provide the data to be serialized before file handle')<block_end>txt=dumps(obj sort_keys=sort_keys cls=cls obj_encoders=obj_encoders extra_obj_encoders=extra_obj_encoders primitives=primitives compression=compression allow_nan=allow_nan conv_str_byte=conv_str_byte fallback_encoders=fallback_encoders properties=properties **jsonkwargs)<if_stmt>isinstance(fp str_type)<block_start><if_stmt>compression<block_start>fh=open(fp 'wb+')<block_end><else_stmt><block_start>fh=open(fp 'w+')<block_end><block_end><else_stmt><block_start>fh=fp<if_stmt>conv_str_byte<block_start><try_stmt><block_start>fh.write(b'')<block_end><except_stmt>TypeError<block_start><pass><line_sep># if not isinstance(txt, str_type):
# # Cannot write bytes, so must be in text mode, but we didn't get a text
# if not compression:
# txt = txt.decode(ENCODING)
<block_end><else_stmt><block_start><try_stmt><block_start>fh.write(u'')<block_end><except_stmt>TypeError<block_start><if_stmt>isinstance(txt str_type)<block_start>txt=txt.encode(ENCODING)<block_end><block_end><block_end><block_end><block_end><try_stmt><block_start><if_stmt>compression<and>'b'<not><in>getattr(fh 'mode' 'b?')<and><not>isinstance(txt str_type)<block_start><raise>IOError('If compression is enabled, the file must be opened in binary mode.')<block_end><try_stmt><block_start>fh.write(txt)<block_end><except_stmt>TypeError<as>err<block_start>err.args=(err.args[0]+'. A possible reason is that the file is not opened in binary mode; '<concat>'be sure to set file mode to something like "wb".' )<line_sep><raise><block_end><block_end><finally_stmt><block_start><if_stmt>force_flush<block_start>fh.flush()<try_stmt><block_start><if_stmt>fh.fileno()<is><not><none><block_start>fsync(fh.fileno())<block_end><block_end><except_stmt>(ValueError )<block_start><pass><block_end><block_end><if_stmt>isinstance(fp str_type)<block_start>fh.close()<block_end><block_end><return>txt<block_end><def_stmt>loads string preserve_order=<true> ignore_comments=<none> decompression=<none> obj_pairs_hooks=DEFAULT_HOOKS extra_obj_pairs_hooks=() cls_lookup_map=<none> allow_duplicates=<true> conv_str_byte=<false> properties=<none> **jsonkwargs<block_start>"""
Convert a nested data structure to a json string.
:param string: The string containing a json encoded data structure.
:param decode_cls_instances: True to attempt to decode class instances (requires the environment to be similar the the encoding one).
:param preserve_order: Whether to preserve order by using OrderedDicts or not.
:param ignore_comments: Remove comments (starting with # or //).
:param decompression: True to use gzip decompression, False to use raw data, None to automatically determine (default). Assumes utf-8 encoding!
:param obj_pairs_hooks: A list of dictionary hooks to apply.
:param extra_obj_pairs_hooks: Like `obj_pairs_hooks` but on top of them: use this to add hooks without replacing defaults. Since v3.5 these happen before default hooks.
:param cls_lookup_map: If set to a dict, for example ``globals()``, then classes encoded from __main__ are looked up this dict.
:param allow_duplicates: If set to False, an error will be raised when loading a json-map that contains duplicate keys.
:param parse_float: A function to parse strings to integers (e.g. Decimal). There is also `parse_int`.
:param conv_str_byte: Try to automatically convert between strings and bytes (assuming utf-8) (default False).
:return: The string containing the json-encoded version of obj.
Other arguments are passed on to json_func.
"""<if_stmt><not>hasattr(extra_obj_pairs_hooks '__iter__')<block_start><raise>TypeError('`extra_obj_pairs_hooks` should be a tuple in `json_tricks.load(s)`')<block_end><if_stmt>decompression<is><none><block_start>decompression=isinstance(string bytes)<and>string[:2]<eq>b'\x1f\x8b'<block_end><if_stmt>decompression<block_start>string=gzip_decompress(string).decode(ENCODING)<block_end><if_stmt><not>isinstance(string str_type)<block_start><if_stmt>conv_str_byte<block_start>string=string.decode(ENCODING)<block_end><else_stmt><block_start><raise>TypeError(('The input was of non-string type "{0:}" in `json_tricks.load(s)`. '<concat>'Bytes cannot be automatically decoding since the encoding is not known. Recommended '<concat>'way is to instead encode the bytes to a string and pass that string to `load(s)`, '<concat>'for example bytevar.encode("utf-8") if utf-8 is the encoding. Alternatively you can '<concat>'force an attempt by passing conv_str_byte=True, but this may cause decoding issues.').format(type(string)))<block_end><block_end><if_stmt>ignore_comments<or>ignore_comments<is><none><block_start>new_string=strip_comments(string)<if_stmt>ignore_comments<is><none><and><not>getattr(loads '_ignore_comments_warned' <false>)<and>string<ne>new_string<block_start>warnings.warn('`json_tricks.load(s)` stripped some comments, but `ignore_comments` was '<concat>'not passed; in the next major release, the behaviour when `ignore_comments` is not '<concat>'passed will change; it is recommended to explicitly pass `ignore_comments=True` if '<concat>'you want to strip comments; see https://github.com/mverleg/pyjson_tricks/issues/74' JsonTricksDeprecation)<line_sep>loads._ignore_comments_warned=<true><block_end>string=new_string<block_end>properties=properties<or>{}<line_sep>dict_default(properties 'preserve_order' preserve_order)<line_sep>dict_default(properties 'ignore_comments' ignore_comments)<line_sep>dict_default(properties 'decompression' decompression)<line_sep>dict_default(properties 'cls_lookup_map' cls_lookup_map)<line_sep>dict_default(properties 'allow_duplicates' allow_duplicates)<line_sep>hooks=tuple(extra_obj_pairs_hooks)+tuple(obj_pairs_hooks)<line_sep>hook=TricksPairHook(ordered=preserve_order obj_pairs_hooks=hooks allow_duplicates=allow_duplicates properties=properties)<line_sep><return>json_loads(string object_pairs_hook=hook **jsonkwargs)<block_end><def_stmt>load fp preserve_order=<true> ignore_comments=<none> decompression=<none> obj_pairs_hooks=DEFAULT_HOOKS extra_obj_pairs_hooks=() cls_lookup_map=<none> allow_duplicates=<true> conv_str_byte=<false> properties=<none> **jsonkwargs<block_start>"""
Convert a nested data structure to a json string.
:param fp: File handle or path to load from.
The other arguments are identical to loads.
"""<try_stmt><block_start><if_stmt>isinstance(fp str_type)<block_start><if_stmt>decompression<is><not><none><block_start>open_binary=bool(decompression)<block_end><else_stmt><block_start><with_stmt>open(fp 'rb')<as>fh# This attempts to detect gzip mode; gzip should always
# have this header, and text json can't have it.
<block_start>open_binary=(fh.read(2)<eq>b'\x1f\x8b')<block_end><block_end><with_stmt>open(fp 'rb'<if>open_binary<else>'r')<as>fh<block_start>string=fh.read()<block_end><block_end><else_stmt><block_start>string=fp.read()<block_end><block_end><except_stmt>UnicodeDecodeError<as>err# todo: not covered in tests, is it relevant?
<block_start><raise>Exception('There was a problem decoding the file content. A possible reason is that the file is not '+'opened in binary mode; be sure to set file mode to something like "rb".').with_traceback(exc_info()[2])<block_end><return>loads(string preserve_order=preserve_order ignore_comments=ignore_comments decompression=decompression obj_pairs_hooks=obj_pairs_hooks extra_obj_pairs_hooks=extra_obj_pairs_hooks cls_lookup_map=cls_lookup_map allow_duplicates=allow_duplicates conv_str_byte=conv_str_byte properties=properties **jsonkwargs)<block_end> |
"""Miscellaneous inspection tools
"""<import_from_stmt>tempfile NamedTemporaryFile<def_stmt>disassemble_elf_to_cfg elf<block_start>"""
Gets the CFG of the disassembly of an ELF object, elf, and renders it
appropriately depending on the execution environment (terminal/notebook).
"""<try_stmt><block_start><import_stmt>r2pipe<block_end><except_stmt>ImportError<block_start><raise>RuntimeError("r2pipe package needed for disasm CFG")<block_end><def_stmt>get_rendering cmd=<none><block_start><if_stmt>cmd<is><none><block_start><raise>ValueError("No command given")<block_end><with_stmt>NamedTemporaryFile(delete=<false>)<as>f<block_start>f.write(elf)<line_sep>f.flush()# force write, radare2 needs a binary blob on disk
# catch if r2pipe can actually talk to radare2
<try_stmt><block_start>flags=['-e io.cache=true' # fix relocations in disassembly
'-e scr.color=1' # 16 bit ANSI colour terminal
]<line_sep>r=r2pipe.open(f.name flags=flags)<line_sep>data=r.cmd('af;%s'%cmd)<line_sep>r.quit()<block_end><except_stmt>Exception<as>e<block_start><if_stmt>"radare2 in PATH"<in>str(e)<block_start>msg=("This feature requires 'radare2' to be "<concat>"installed and available on the system see: "<concat>"https://github.com/radareorg/radare2. "<concat>"Cannot find 'radare2' in $PATH.")<line_sep><raise>RuntimeError(msg)<block_end><else_stmt><block_start><raise>e<block_end><block_end><block_end><return>data<block_end><class_stmt>DisasmCFG(object)<block_start><def_stmt>_repr_svg_ self<block_start><try_stmt><block_start><import_stmt>graphviz<block_end><except_stmt>ImportError<block_start><raise>RuntimeError("graphviz package needed for disasm CFG")<block_end>jupyter_rendering=get_rendering(cmd='agfd')<line_sep># this just makes it read slightly better in jupyter notebooks
jupyter_rendering.replace('fontname="Courier",' 'fontname="Courier",fontsize=6,')<line_sep>src=graphviz.Source(jupyter_rendering)<line_sep><return>src.pipe('svg').decode('UTF-8')<block_end><def_stmt>__repr__ self<block_start><return>get_rendering(cmd='agf')<block_end><block_end><return>DisasmCFG()<block_end> |
<import_from_stmt>django.conf.urls url<import_from_stmt>. views<line_sep>urlpatterns=[url(r'^add/$' views.add_exploit name='crits-exploits-views-add_exploit') url(r'^edit/cve/$' views.edit_exploit_cve name='crits-exploits-views-edit_exploit_cve') url(r'^edit/name/(?P<id_>\S+)/$' views.edit_exploit_name name='crits-exploits-views-edit_exploit_name') url(r'^details/(?P<id_>\S+)/$' views.exploit_detail name='crits-exploits-views-exploit_detail') url(r'^remove/(?P<id_>\S+)/$' views.remove_exploit name='crits-exploits-views-remove_exploit') url(r'^list/$' views.exploits_listing name='crits-exploits-views-exploits_listing') url(r'^list/(?P<option>\S+)/$' views.exploits_listing name='crits-exploits-views-exploits_listing') ]<line_sep> |
"""
@author: <NAME>
@contact: <EMAIL>
"""<import_stmt>random<import_stmt>copy<import_stmt>numpy<as>np<import_from_stmt>torch.utils.data.dataset ConcatDataset<import_from_stmt>torch.utils.data.sampler Sampler<class_stmt>DefaultSampler(Sampler)<block_start>r"""Traverse all :math:`N` domains, randomly select :math:`K` samples in each domain to form a mini-batch of size
:math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
"""<def_stmt>__init__ self data_source:ConcatDataset batch_size:int<block_start>super(Sampler self).__init__()<line_sep>self.num_all_domains=len(data_source.cumulative_sizes)<line_sep>self.sample_idxes_per_domain=[]<line_sep>start=0<for_stmt>end data_source.cumulative_sizes<block_start>idxes=[idx<for>idx range(start end)]<line_sep>self.sample_idxes_per_domain.append(idxes)<line_sep>start=end<block_end><assert_stmt>batch_size%self.num_all_domains<eq>0<line_sep>self.batch_size_per_domain=batch_size<floordiv>self.num_all_domains<line_sep>self.length=len(list(self.__iter__()))<block_end><def_stmt>__iter__ self<block_start>sample_idxes_per_domain=copy.deepcopy(self.sample_idxes_per_domain)<line_sep>final_idxes=[]<line_sep>stop_flag=<false><while_stmt><not>stop_flag<block_start><for_stmt>domain range(self.num_all_domains)<block_start>sample_idxes=sample_idxes_per_domain[domain]<line_sep>selected_idxes=random.sample(sample_idxes self.batch_size_per_domain)<line_sep>final_idxes.extend(selected_idxes)<for_stmt>idx selected_idxes<block_start>sample_idxes_per_domain[domain].remove(idx)<block_end>remaining_size=len(sample_idxes_per_domain[domain])<if_stmt>remaining_size<l>self.batch_size_per_domain<block_start>stop_flag=<true><block_end><block_end><block_end><return>iter(final_idxes)<block_end><def_stmt>__len__ self<block_start><return>self.length<block_end><block_end><class_stmt>RandomDomainSampler(Sampler)<block_start>r"""Randomly sample :math:`N` domains, then randomly select :math:`K` samples in each domain to form a mini-batch of
size :math:`N\times K`.
Args:
data_source (ConcatDataset): dataset that contains data from multiple domains
batch_size (int): mini-batch size (:math:`N\times K` here)
n_domains_per_batch (int): number of domains to select in a single mini-batch (:math:`N` here)
"""<def_stmt>__init__ self data_source:ConcatDataset batch_size:int n_domains_per_batch:int<block_start>super(Sampler self).__init__()<line_sep>self.n_domains_in_dataset=len(data_source.cumulative_sizes)<line_sep>self.n_domains_per_batch=n_domains_per_batch<assert_stmt>self.n_domains_in_dataset<ge>self.n_domains_per_batch<line_sep>self.sample_idxes_per_domain=[]<line_sep>start=0<for_stmt>end data_source.cumulative_sizes<block_start>idxes=[idx<for>idx range(start end)]<line_sep>self.sample_idxes_per_domain.append(idxes)<line_sep>start=end<block_end><assert_stmt>batch_size%n_domains_per_batch<eq>0<line_sep>self.batch_size_per_domain=batch_size<floordiv>n_domains_per_batch<line_sep>self.length=len(list(self.__iter__()))<block_end><def_stmt>__iter__ self<block_start>sample_idxes_per_domain=copy.deepcopy(self.sample_idxes_per_domain)<line_sep>domain_idxes=[idx<for>idx range(self.n_domains_in_dataset)]<line_sep>final_idxes=[]<line_sep>stop_flag=<false><while_stmt><not>stop_flag<block_start>selected_domains=random.sample(domain_idxes self.n_domains_per_batch)<for_stmt>domain selected_domains<block_start>sample_idxes=sample_idxes_per_domain[domain]<if_stmt>len(sample_idxes)<l>self.batch_size_per_domain<block_start>selected_idxes=np.random.choice(sample_idxes self.batch_size_per_domain replace=<true>)<block_end><else_stmt><block_start>selected_idxes=random.sample(sample_idxes self.batch_size_per_domain)<block_end>final_idxes.extend(selected_idxes)<for_stmt>idx selected_idxes<block_start><if_stmt>idx<in>sample_idxes_per_domain[domain]<block_start>sample_idxes_per_domain[domain].remove(idx)<block_end><block_end>remaining_size=len(sample_idxes_per_domain[domain])<if_stmt>remaining_size<l>self.batch_size_per_domain<block_start>stop_flag=<true><block_end><block_end><block_end><return>iter(final_idxes)<block_end><def_stmt>__len__ self<block_start><return>self.length<block_end><block_end> |
<import_from_stmt>PyObjCTools.TestSupport *<import_stmt>AppKit<import_from_stmt>AppKit *<try_stmt><block_start>unicode<block_end><except_stmt>NameError<block_start>unicode=str<block_end><class_stmt>TestNSImageHelper(NSObject)<block_start><def_stmt>image_didLoadRepresentation_withStatus_ self i r s<block_start><pass><block_end><def_stmt>image_didLoadPartOfRepresentation_withValidRows_ self i r c<block_start><pass><block_end><block_end><class_stmt>TestNSImage(TestCase)<block_start><def_stmt>test_compositePoint self# comes straight from ReSTedit. Works on PPC, not on Intel (as of r1791)
<block_start>ws=AppKit.NSWorkspace.sharedWorkspace()<line_sep>txtIcon=ws.iconForFileType_("txt")<line_sep>txtIcon.setSize_((16 16))<line_sep>htmlIcon=ws.iconForFileType_("html")<line_sep>htmlIcon.setSize_((16 16))<line_sep>comboIcon=AppKit.NSImage.alloc().initWithSize_((100 100))<line_sep>comboIcon.lockFocus()<line_sep>txtIcon.compositeToPoint_fromRect_operation_((0 0) ((0 0) (16 16)) AppKit.NSCompositeCopy)<line_sep>htmlIcon.compositeToPoint_fromRect_operation_((8 0) ((8 0) (8 16)) AppKit.NSCompositeCopy)<line_sep>comboIcon.unlockFocus()<block_end><def_stmt>testConstants self<block_start>self.assertEqual(NSImageLoadStatusCompleted 0)<line_sep>self.assertEqual(NSImageLoadStatusCancelled 1)<line_sep>self.assertEqual(NSImageLoadStatusInvalidData 2)<line_sep>self.assertEqual(NSImageLoadStatusUnexpectedEOF 3)<line_sep>self.assertEqual(NSImageLoadStatusReadError 4)<line_sep>self.assertEqual(NSImageCacheDefault 0)<line_sep>self.assertEqual(NSImageCacheAlways 1)<line_sep>self.assertEqual(NSImageCacheBySize 2)<line_sep>self.assertEqual(NSImageCacheNever 3)<block_end>@min_os_level("10.5")<def_stmt>testConstants10_5 self<block_start>self.assertIsInstance(NSImageNameQuickLookTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameBluetoothTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameIChatTheaterTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameSlideshowTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameActionTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameSmartBadgeTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameIconViewTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameListViewTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameColumnViewTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameFlowViewTemplate unicode)<line_sep>self.assertIsInstance(NSImageNamePathTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameInvalidDataFreestandingTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameLockLockedTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameLockUnlockedTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameGoRightTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameGoLeftTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameRightFacingTriangleTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameLeftFacingTriangleTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameAddTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameRemoveTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameRevealFreestandingTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameFollowLinkFreestandingTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameEnterFullScreenTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameExitFullScreenTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameStopProgressTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameStopProgressFreestandingTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameRefreshTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameRefreshFreestandingTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameBonjour unicode)<line_sep>self.assertIsInstance(NSImageNameDotMac unicode)<line_sep>self.assertIsInstance(NSImageNameComputer unicode)<line_sep>self.assertIsInstance(NSImageNameFolderBurnable unicode)<line_sep>self.assertIsInstance(NSImageNameFolderSmart unicode)<line_sep>self.assertIsInstance(NSImageNameNetwork unicode)<line_sep>self.assertIsInstance(NSImageNameMultipleDocuments unicode)<line_sep>self.assertIsInstance(NSImageNameUserAccounts unicode)<line_sep>self.assertIsInstance(NSImageNamePreferencesGeneral unicode)<line_sep>self.assertIsInstance(NSImageNameAdvanced unicode)<line_sep>self.assertIsInstance(NSImageNameInfo unicode)<line_sep>self.assertIsInstance(NSImageNameFontPanel unicode)<line_sep>self.assertIsInstance(NSImageNameColorPanel unicode)<line_sep>self.assertIsInstance(NSImageNameUser unicode)<line_sep>self.assertIsInstance(NSImageNameUserGroup unicode)<line_sep>self.assertIsInstance(NSImageNameEveryone unicode)<block_end><def_stmt>testMethods self<block_start>self.assertResultIsBOOL(NSImage.setName_)<line_sep>self.assertArgIsBOOL(NSImage.setScalesWhenResized_ 0)<line_sep>self.assertResultIsBOOL(NSImage.scalesWhenResized)<line_sep>self.assertArgIsBOOL(NSImage.setDataRetained_ 0)<line_sep>self.assertResultIsBOOL(NSImage.isDataRetained)<line_sep>self.assertArgIsBOOL(NSImage.setCachedSeparately_ 0)<line_sep>self.assertResultIsBOOL(NSImage.isCachedSeparately)<line_sep>self.assertArgIsBOOL(NSImage.setCacheDepthMatchesImageDepth_ 0)<line_sep>self.assertResultIsBOOL(NSImage.cacheDepthMatchesImageDepth)<line_sep>self.assertArgIsBOOL(NSImage.setUsesEPSOnResolutionMismatch_ 0)<line_sep>self.assertResultIsBOOL(NSImage.usesEPSOnResolutionMismatch)<line_sep>self.assertArgIsBOOL(NSImage.setPrefersColorMatch_ 0)<line_sep>self.assertResultIsBOOL(NSImage.prefersColorMatch)<line_sep>self.assertArgIsBOOL(NSImage.setMatchesOnMultipleResolution_ 0)<line_sep>self.assertResultIsBOOL(NSImage.matchesOnMultipleResolution)<line_sep>self.assertResultIsBOOL(NSImage.drawRepresentation_inRect_)<line_sep>self.assertResultIsBOOL(NSImage.isValid)<line_sep>self.assertResultIsBOOL(NSImage.canInitWithPasteboard_)<line_sep>self.assertResultIsBOOL(NSImage.isFlipped)<line_sep>self.assertArgIsBOOL(NSImage.setFlipped_ 0)<line_sep>self.assertResultIsBOOL(NSImage.isTemplate)<line_sep>self.assertArgIsBOOL(NSImage.setTemplate_ 0)<block_end><def_stmt>testProtocols self<block_start>self.assertArgHasType(TestNSImageHelper.image_didLoadPartOfRepresentation_withValidRows_ 2 objc._C_NSInteger)<line_sep>self.assertArgHasType(TestNSImageHelper.image_didLoadRepresentation_withStatus_ 2 objc._C_NSUInteger)<block_end>@min_os_level('10.6')<def_stmt>testMethods10_6 self<block_start>self.assertArgHasType(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_ 0 NSRect.__typestr__)<line_sep>self.assertArgIsBOOL(NSImage.drawInRect_fromRect_operation_fraction_respectFlipped_hints_ 4)<line_sep>self.assertArgIsBOOL(NSImage.lockFocusFlipped_ 0)<line_sep>self.assertArgHasType(NSImage.initWithCGImage_size_ 1 NSSize.__typestr__)<line_sep>self.assertArgHasType(NSImage.CGImageForProposedRect_context_hints_ 0 b'o^'+NSRect.__typestr__)<line_sep>self.assertArgHasType(NSImage.bestRepresentationForRect_context_hints_ 0 NSRect.__typestr__)<line_sep>self.assertResultIsBOOL(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_)<line_sep>self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_ 0 NSRect.__typestr__)<line_sep>self.assertArgHasType(NSImage.hitTestRect_withImageDestinationRect_context_hints_flipped_ 1 NSRect.__typestr__)<block_end>@min_os_level('10.7')<def_stmt>testMethods10_7 self<block_start>self.assertResultIsBOOL(NSImage.matchesOnlyOnBestFittingAxis)<line_sep>self.assertArgIsBOOL(NSImage.setMatchesOnlyOnBestFittingAxis_ 0)<block_end>@min_os_level('10.8')<def_stmt>testMethods10_8 self<block_start>self.assertArgIsBOOL(NSImage.imageWithSize_flipped_drawingHandler_ 1)<line_sep>self.assertArgIsBlock(NSImage.imageWithSize_flipped_drawingHandler_ 2 objc._C_NSBOOL+NSRect.__typestr__)<block_end>@min_os_level('10.6')<def_stmt>testConstants10_6 self<block_start>self.assertIsInstance(NSImageHintCTM unicode)<line_sep>self.assertIsInstance(NSImageHintInterpolation unicode)<line_sep>self.assertIsInstance(NSImageNameFolder unicode)<line_sep>self.assertIsInstance(NSImageNameMobileMe unicode)<line_sep>self.assertIsInstance(NSImageNameUserGuest unicode)<line_sep>self.assertIsInstance(NSImageNameMenuOnStateTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameMenuMixedStateTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameApplicationIcon unicode)<line_sep>self.assertIsInstance(NSImageNameTrashEmpty unicode)<line_sep>self.assertIsInstance(NSImageNameTrashFull unicode)<line_sep>self.assertIsInstance(NSImageNameHomeTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameBookmarksTemplate unicode)<line_sep>self.assertIsInstance(NSImageNameCaution unicode)<line_sep>self.assertIsInstance(NSImageNameStatusAvailable unicode)<line_sep>self.assertIsInstance(NSImageNameStatusPartiallyAvailable unicode)<line_sep>self.assertIsInstance(NSImageNameStatusUnavailable unicode)<line_sep>self.assertIsInstance(NSImageNameStatusNone unicode)<block_end>@min_os_level('10.8')<def_stmt>testConstants10_8 self<block_start>self.assertIsInstance(NSImageNameShareTemplate unicode)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
#Input: nums = [1,2,3]
#Output: [[],[1],[2],[1,2],[3],[1,3],[2,3],[1,2,3]]
<def_stmt>subsets self nums:List[int]<arrow>List[List[int]]<block_start>self.result=[]<line_sep>self.helper(nums 0 [])<line_sep><return>self.result<block_end><def_stmt>helper self nums start subset<block_start>self.result.append(subset[::])<for_stmt>i range(start len(nums))<block_start>subset.append(nums[i])<line_sep>self.helper(nums i+1 subset)<line_sep>subset.pop()<block_end><block_end> |
"""
lib
"""<line_sep> |
# Copyright 2018 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functionality for working with probability spaces and random variables.
Basic recap of probability theory, and thus of classes in this file:
* A probability space is a (finite or infinite) set Omega with a probability
measure defined on this.
* A random variable is a mapping from a probability space to another measure
space.
* An event is a measurable set in a sample space.
For example, suppose a bag contains 3 balls: two red balls, and one white ball.
This could be represented by a discrete probability space of size 3 with
elements {1, 2, 3}, with equal measure assigned to all 3 elements; and a random
variable that maps 1->red, 2->red, and 3->white. Then the probability of drawing
a red ball is the measure in the probability space of the inverse under the
random variable mapping of {red}, i.e., of {1, 2}, which is 2/3.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>abc<import_stmt>itertools<line_sep># Dependency imports
<import_stmt>six<import_from_stmt>six.moves zip<import_stmt>sympy<line_sep>@six.add_metaclass(abc.ABCMeta)<class_stmt>Event(object)<block_start>"""Represents an event in a measure space."""<block_end>@six.add_metaclass(abc.ABCMeta)<class_stmt>ProbabilitySpace(object)<block_start>"""Represents a probability space."""<line_sep>@abc.abstractmethod<def_stmt>probability self event<block_start>"""Returns the probability of an event."""<block_end><block_end>@six.add_metaclass(abc.ABCMeta)<class_stmt>RandomVariable(object)<block_start>"""Random variable; a mapping from a probability space to a measure space."""<line_sep>@abc.abstractmethod<def_stmt>__call__ self event<block_start>"""Maps an `_Event` in the probability space to one in the sample space."""<block_end>@abc.abstractmethod<def_stmt>inverse self event<block_start>"""Maps event in the sample space back to the inverse in the prob. space."""<block_end><block_end><class_stmt>DiscreteEvent(Event)<block_start>"""Set of discrete values."""<def_stmt>__init__ self values<block_start>self._values=values<block_end>@property<def_stmt>values self<block_start><return>self._values<block_end><block_end><class_stmt>FiniteProductEvent(Event)<block_start>"""Event consisting of cartesian product of events."""<def_stmt>__init__ self events<block_start>"""Initializes a `FiniteProductEvent`.
Args:
events: Tuple of `Event`s; resulting event will be cartesian product of
these.
"""<line_sep>self._events=events<block_end>@property<def_stmt>events self<block_start><return>self._events<block_end><def_stmt>all_sequences self<block_start>"""Returns iterator of sequences by selecting a single event in each coord.
This assumes that every component event is an instance of `DiscreteEvent`.
Returns:
Iterator over tuples of values.
Raises:
ValueError: If one of the component events is not a `DiscreteEvent`.
"""<if_stmt><not>all(isinstance(event DiscreteEvent)<for>event self._events)<block_start><raise>ValueError('Not all component events are DiscreteEvents')<block_end>values_list=[event.values<for>event self._events]<line_sep><return>itertools.product(*values_list)<block_end><block_end><class_stmt>CountLevelSetEvent(Event)<block_start>"""Event of all sequences with fixed number of different values occurring."""<def_stmt>__init__ self counts<block_start>"""Initializes `CountLevelSetEvent`.
E.g., to construct the event of getting two red balls and one green ball,
pass `counts = {red: 2, green: 1}`. (Then `all_sequences()` would return
`[(red, red, green), (red, green, red), (green, red, red)]`.
Args:
counts: Dictionary mapping values to the number of times they occur in a
sequence.
"""<line_sep>self._counts=counts<line_sep>self._all_sequences=<none><block_end>@property<def_stmt>counts self<block_start><return>self._counts<block_end><def_stmt>all_sequences self<block_start>"""Returns all sequences generated by this level set."""<if_stmt>self._all_sequences<is><none># Generate via dynamic programming.
<block_start>cache={}# dict mapping tuple -> list of tuples
labels=list(self._counts.keys())<def_stmt>generate counts<block_start>"""Returns list of tuples for given `counts` of labels."""<if_stmt>sum(counts)<eq>0<block_start><return>[()]<block_end>counts=tuple(counts)<if_stmt>counts<in>cache<block_start><return>cache[counts]<block_end>generated=[]<for_stmt>i,count enumerate(counts)<block_start><if_stmt>count<eq>0<block_start><continue><block_end>counts_minus=list(counts)<line_sep>counts_minus[i]<augsub>1<line_sep>counts_minus=tuple(counts_minus)<line_sep>extensions=generate(counts_minus)<line_sep>generated<augadd>[tuple([labels[i]]+list(extension))<for>extension extensions]<block_end>cache[counts]=generated<line_sep><return>generated<block_end>self._all_sequences=generate(list(self._counts.values()))<block_end><return>self._all_sequences<block_end><block_end><class_stmt>SequenceEvent(Event)<block_start>"""Collection of sequences."""<def_stmt>__init__ self sequences<block_start>self._sequences=sequences<block_end><def_stmt>all_sequences self<block_start><return>self._sequences<block_end><block_end><def_stmt>normalize_weights weights<block_start>"""Normalizes the weights (as sympy.Rational) in dictionary of weights."""<line_sep>weight_sum=sum(six.itervalues(weights))<line_sep><return>{i:sympy.Rational(weight weight_sum)<for>i,weight six.iteritems(weights)}<block_end><class_stmt>DiscreteProbabilitySpace(ProbabilitySpace)<block_start>"""Discrete probability space."""<def_stmt>__init__ self weights=<none><block_start>"""Initializes an `DiscreteProbabilitySpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
"""<line_sep>self._weights=normalize_weights(weights)<block_end><def_stmt>probability self event<block_start><if_stmt>isinstance(event DiscreteEvent)<block_start><return>sum(self._weights[value]<for>value event.values<if>value<in>self._weights)<block_end><else_stmt><block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end><block_end>@property<def_stmt>weights self<block_start>"""Returns dictionary of probability of each element."""<line_sep><return>self._weights<block_end><block_end><class_stmt>FiniteProductSpace(ProbabilitySpace)<block_start>"""Finite cartesian product of probability spaces."""<def_stmt>__init__ self spaces<block_start>"""Initializes a `FiniteProductSpace`.
Args:
spaces: List of `ProbabilitySpace`.
"""<line_sep>self._spaces=spaces<block_end><def_stmt>all_spaces_equal self<block_start><return>all([self._spaces[0]<eq>space<for>space self._spaces])<block_end><def_stmt>probability self event# Specializations for optimization.
<block_start><if_stmt>isinstance(event FiniteProductEvent)<block_start><assert_stmt>len(self._spaces)<eq>len(event.events)<line_sep><return>sympy.prod([space.probability(event_slice)<for>space,event_slice zip(self._spaces event.events)])<block_end><if_stmt>isinstance(event CountLevelSetEvent)<and>self.all_spaces_equal()<block_start>space=self._spaces[0]<line_sep>counts=event.counts<line_sep>probabilities={value:space.probability(DiscreteEvent({value}))<for>value six.iterkeys(counts)}<line_sep>num_events=sum(six.itervalues(counts))<assert_stmt>num_events<eq>len(self._spaces)<line_sep># Multinomial coefficient:
coeff=(sympy.factorial(num_events)/sympy.prod([sympy.factorial(i)<for>i six.itervalues(counts)]))<line_sep><return>coeff<times>sympy.prod([pow(probabilities[value] counts[value])<for>value six.iterkeys(counts)])<block_end><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end>@property<def_stmt>spaces self<block_start>"""Returns list of spaces."""<line_sep><return>self._spaces<block_end><block_end><class_stmt>SampleWithoutReplacementSpace(ProbabilitySpace)<block_start>"""Probability space formed by sampling discrete space without replacement."""<def_stmt>__init__ self weights n_samples<block_start>"""Initializes a `SampleWithoutReplacementSpace`.
Args:
weights: Dictionary mapping values to relative probability of selecting
that value. This will be normalized.
n_samples: Number of samples to draw.
Raises:
ValueError: If `n_samples > len(weights)`.
"""<if_stmt>n_samples<g>len(weights)<block_start><raise>ValueError('n_samples is more than number of discrete elements')<block_end>self._weights=normalize_weights(weights)<line_sep>self._n_samples=n_samples<block_end>@property<def_stmt>n_samples self<block_start>"""Number of samples to draw."""<line_sep><return>self._n_samples<block_end><def_stmt>probability self event<block_start><try_stmt><block_start>all_sequences=event.all_sequences()<block_end><except_stmt>AttributeError<block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end>probability_sum=0<for_stmt>sequence all_sequences<block_start><if_stmt>len(sequence)<ne>len(set(sequence))<block_start><continue># not all unique, so not "without replacement".
<block_end>p_sequence=1<line_sep>removed_prob=0<for_stmt>i sequence<block_start>p=self._weights[i]<if>i<in>self._weights<else>0<if_stmt>p<eq>0<block_start>p_sequence=0<line_sep><break><block_end>p_sequence<augmul>p/(1-removed_prob)<line_sep>removed_prob<augadd>p<block_end>probability_sum<augadd>p_sequence<block_end><return>probability_sum<block_end><block_end><class_stmt>IdentityRandomVariable(RandomVariable)<block_start>"""Identity map of a probability space."""<def_stmt>__call__ self event<block_start><return>event<block_end><def_stmt>inverse self event<block_start><return>event<block_end><block_end><class_stmt>DiscreteRandomVariable(RandomVariable)<block_start>"""Specialization to discrete random variable.
This is simply a mapping from a discrete space to a discrete space (dictionary
lookup).
"""<def_stmt>__init__ self mapping<block_start>"""Initializes `DiscreteRandomVariable` from `mapping` dict."""<line_sep>self._mapping=mapping<line_sep>self._inverse={}<for_stmt>key,value six.iteritems(mapping)<block_start><if_stmt>value<in>self._inverse<block_start>self._inverse[value].add(key)<block_end><else_stmt><block_start>self._inverse[value]=set([key])<block_end><block_end><block_end><def_stmt>__call__ self event<block_start><if_stmt>isinstance(event DiscreteEvent)<block_start><return>DiscreteEvent({self._mapping[value]<for>value event.values})<block_end><else_stmt><block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end><block_end><def_stmt>inverse self event<block_start><if_stmt>isinstance(event DiscreteEvent)<block_start>set_=set()<for_stmt>value event.values<block_start><if_stmt>value<in>self._inverse<block_start>set_.update(self._inverse[value])<block_end><block_end><return>DiscreteEvent(set_)<block_end><else_stmt><block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end><block_end><block_end><class_stmt>FiniteProductRandomVariable(RandomVariable)<block_start>"""Product random variable.
This has the following semantics. Let this be X = (X_1, ..., X_n). Then
X(w) = (X_1(w_1), ..., X_n(w_n))
(the sample space is assumed to be of sequence type).
"""<def_stmt>__init__ self random_variables<block_start>"""Initializes a `FiniteProductRandomVariable`.
Args:
random_variables: Tuple of `RandomVariable`.
"""<line_sep>self._random_variables=random_variables<block_end><def_stmt>__call__ self event<block_start><if_stmt>isinstance(event FiniteProductEvent)<block_start><assert_stmt>len(event.events)<eq>len(self._random_variables)<line_sep>zipped=list(zip(self._random_variables event.events))<line_sep><return>FiniteProductEvent([random_variable(sub_event)<for>random_variable,sub_event zipped])<block_end><else_stmt><block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end><block_end><def_stmt>inverse self event# Specialization for `FiniteProductEvent`; don't need to take all sequences.
<block_start><if_stmt>isinstance(event FiniteProductEvent)<block_start><assert_stmt>len(event.events)<eq>len(self._random_variables)<line_sep>zipped=list(zip(self._random_variables event.events))<line_sep><return>FiniteProductEvent(tuple(random_variable.inverse(sub_event)<for>random_variable,sub_event zipped))<block_end># Try fallback of mapping each sequence separately.
<try_stmt><block_start>all_sequences=event.all_sequences()<block_end><except_stmt>AttributeError<block_start><raise>ValueError('Unhandled event type {}'.format(type(event)))<block_end>mapped=set()<for_stmt>sequence all_sequences<block_start><assert_stmt>len(sequence)<eq>len(self._random_variables)<line_sep>zipped=list(zip(self._random_variables sequence))<line_sep>mapped_sequence=FiniteProductEvent(tuple(random_variable.inverse(DiscreteEvent({element}))<for>random_variable,element zipped))<line_sep>mapped.update(mapped_sequence.all_sequences())<block_end><return>SequenceEvent(mapped)<block_end><block_end> |
<import_from_stmt>pygears gear datagear alternative module<import_from_stmt>pygears.typing.qround get_out_type get_cut_bits<import_from_stmt>pygears.typing Uint code Bool Int Fixp Ufixp<line_sep>@datagear<def_stmt>qround din * fract=0 cut_bits=b'get_cut_bits(din, fract)' signed=b'din.signed'<arrow>b'get_out_type(din, fract)'<block_start>res=code(din Int<if>signed<else>Uint)+(Bool(1)<lshift>(cut_bits-1))<line_sep><return>code(res<rshift>cut_bits module().tout)<block_end># @datagear
# def qround_even(din,
# *,
# fract=0,
# cut_bits=b'get_cut_bits(din, fract)',
# signed=b'din.signed') -> b'get_out_type(din, fract)':
# val_coded = code(din, Int if signed else Uint)
# round_bit = val_coded[cut_bits]
# res = val_coded + Uint([round_bit] + [~round_bit] * (cut_bits - 1))
# return code(res[cut_bits:])
@gear<def_stmt>truncate din * nbits=2<arrow>b'din'<block_start><pass><block_end>@gear<def_stmt>round_half_up din * nbits=2<arrow>b'din'<block_start><pass><block_end>@gear<def_stmt>round_to_zero din * nbits=2<arrow>b'din'<block_start><pass><block_end>@gear<async_keyword><def_stmt>round_to_even din * nbits=2<arrow>b'din'<block_start><async_keyword><with_stmt>din<as>d<block_start><return>round(float(d)/(2<power>nbits))<times>(2<power>nbits)<block_end><block_end> |
<import_stmt>numpy<as>np<import_from_stmt>numpy.testing assert_allclose<import_stmt>pytest<import_from_stmt>mne._ola _COLA _Interp2 _Storer<def_stmt>test_interp_2pt <block_start>"""Test our two-point interpolator."""<line_sep>n_pts=200<assert_stmt>n_pts%50<eq>0<line_sep>feeds=[# test a bunch of feeds to make sure they don't break things
[n_pts] [50]<times>(n_pts<floordiv>50) [10]<times>(n_pts<floordiv>10) [5]<times>(n_pts<floordiv>5) [2]<times>(n_pts<floordiv>2) [1]<times>n_pts ]<line_sep># ZOH
values=np.array([10 -10])<line_sep>expected=np.full(n_pts 10)<for_stmt>feed feeds<block_start>expected[-1]=10<line_sep>interp=_Interp2([0 n_pts] values 'zero')<line_sep>out=np.concatenate([interp.feed(f)[0]<for>f feed])<line_sep>assert_allclose(out expected)<line_sep>interp=_Interp2([0 n_pts-1] values 'zero')<line_sep>expected[-1]=-10<line_sep>out=np.concatenate([interp.feed(f)[0]<for>f feed])<line_sep>assert_allclose(out expected)<block_end># linear and inputs of different sizes
values=[np.arange(2)[: np.newaxis np.newaxis] np.array([20 10])]<line_sep>expected=[np.linspace(0 1 n_pts endpoint=<false>)[np.newaxis np.newaxis :] np.linspace(20 10 n_pts endpoint=<false>)]<for_stmt>feed feeds<block_start>interp=_Interp2([0 n_pts] values 'linear')<line_sep>outs=[interp.feed(f)<for>f feed]<line_sep>outs=[np.concatenate([o[0]<for>o outs] axis=-1) np.concatenate([o[1]<for>o outs] axis=-1)]<line_sep>assert_allclose(outs[0] expected[0] atol=1e-7)<line_sep>assert_allclose(outs[1] expected[1] atol=1e-7)<block_end># cos**2 and more interesting bounds
values=np.array([10 -10])<line_sep>expected=np.full(n_pts 10.)<line_sep>expected[-5:]=-10<line_sep>cos=np.cos(np.linspace(0 np.pi/2. n_pts-9 endpoint=<false>))<line_sep>expected[4:-5]=cos<power>2<times>20-10<for_stmt>feed feeds<block_start>interp=_Interp2([4 n_pts-5] values 'cos2')<line_sep>out=np.concatenate([interp.feed(f)[0]<for>f feed])<line_sep>assert_allclose(out expected atol=1e-7)<block_end>out=interp.feed(10)[0]<line_sep>assert_allclose(out [values[-1]]<times>10 atol=1e-7)<line_sep># hann and broadcasting
n_hann=n_pts-9<line_sep>expected[4:-5]=np.hanning(2<times>n_hann+1)[n_hann:-1]<times>20-10<line_sep>expected=np.array([expected expected[::-1]<times>0.5])<line_sep>values=np.array([values values[::-1]<times>0.5]).T<for_stmt>feed feeds<block_start>interp=_Interp2([4 n_pts-5] values 'hann')<line_sep>out=np.concatenate([interp.feed(f)[0]<for>f feed] axis=-1)<line_sep>assert_allclose(out expected atol=1e-7)<block_end># one control point and None support
values=[np.array([10]) <none>]<for_stmt>start [0 50 99 100 1000]<block_start>interp=_Interp2([start] values 'zero')<line_sep>out,none=interp.feed(n_pts)<assert_stmt>none<is><none><line_sep>expected=np.full(n_pts 10.)<line_sep>assert_allclose(out expected)<block_end><block_end>@pytest.mark.parametrize('ndim' (1 2 3))<def_stmt>test_cola ndim<block_start>"""Test COLA processing."""<line_sep>sfreq=1000.<line_sep>rng=np.random.RandomState(0)<def_stmt>processor x<block_start><return>(x/2. )<block_end># halve the signal
<for_stmt>n_total (999 1000 1001)<block_start>signal=rng.randn(n_total)<line_sep>out=rng.randn(n_total)# shouldn't matter
<for_stmt>_ range(ndim-1)<block_start>signal=signal[np.newaxis]<line_sep>out=out[np.newaxis]<block_end><for_stmt>n_samples (99 100 101 102 n_total-n_total<floordiv>2+1 n_total)<block_start><for_stmt>window ('hann' 'bartlett' 'boxcar' 'triang')# A few example COLA possibilities
<block_start>n_overlaps=()<if_stmt>window<in>('hann' 'bartlett')<or>n_samples%2<eq>0<block_start>n_overlaps<augadd>((n_samples+1)<floordiv>2 )<block_end><if_stmt>window<eq>'boxcar'<block_start>n_overlaps<augadd>(0 )<block_end><for_stmt>n_overlap n_overlaps# can pass callable or ndarray
<block_start><for_stmt>storer (out _Storer(out))<block_start>cola=_COLA(processor storer n_total n_samples n_overlap sfreq window)<line_sep>n_input=0<line_sep># feed data in an annoying way
<while_stmt>n_input<l>n_total<block_start>next_len=min(rng.randint(1 30) n_total-n_input)<line_sep>cola.feed(signal[<ellipsis> n_input:n_input+next_len])<line_sep>n_input<augadd>next_len<block_end>assert_allclose(out signal/2. atol=1e-7)<block_end><block_end><block_end><block_end><block_end><block_end> |
# Copyright (c) 2011-2021, <NAME>
# License: MIT License
<import_stmt>pytest<import_stmt>ezdxf<import_from_stmt>ezdxf.tools.test load_entities<import_from_stmt>ezdxf.sections.table Table<import_from_stmt>ezdxf.lldxf.tagwriter TagCollector<line_sep>@pytest.fixture(scope="module")<def_stmt>table <block_start>doc=ezdxf.new()<line_sep><return>doc.appids<block_end><def_stmt>test_table_entry_dxf_type table<block_start><assert_stmt>table.entry_dxftype<eq>"APPID"<block_end><def_stmt>test_ac1009_load_table <block_start>doc=ezdxf.new("R12")<line_sep>entities=list(load_entities(AC1009TABLE "TABLES"))<line_sep>table=Table(doc entities[1:-1])# without SECTION tags and ENDTAB
<assert_stmt>len(table)<eq>10<block_end><def_stmt>test_load_table_with_invalid_table_entry <block_start>"""This LAYERS table has an invalid APPID table entry, which should be
ignored at the loading stage.
"""<line_sep>doc=ezdxf.new("R12")<line_sep>entities=list(load_entities(INVALID_TABLE_ENTRY "TABLES"))<line_sep>table=Table(doc entities[1:-1])# without SECTION tags and ENDTAB
<assert_stmt>len(table)<eq>0<block_end><def_stmt>test_ac1009_write table<block_start>collector=TagCollector(dxfversion="AC1009")<line_sep>table.export_dxf(collector)<line_sep>tags=collector.tags<assert_stmt>tags[0]<eq>(0 "TABLE")<assert_stmt>tags[1]<eq>(2 "APPID")<line_sep># exporting table entries is tested by associated class tests
<assert_stmt>tags[-1]<eq>(0 "ENDTAB")<block_end><def_stmt>test_ac1024_load_table <block_start>doc=ezdxf.new("R2010")<line_sep>entities=list(load_entities(AC1024TABLE "TABLES"))<line_sep>table=Table(doc entities[1:-1])# without SECTION tags and ENDTAB
<assert_stmt>10<eq>len(table)<block_end><def_stmt>test_ac1024_write table<block_start>collector=TagCollector(dxfversion="R2004")<line_sep>table.export_dxf(collector)<line_sep>tags=collector.tags<assert_stmt>tags[0]<eq>(0 "TABLE")<assert_stmt>tags[1]<eq>(2 "APPID")<line_sep># exporting table entries is tested by associated class tests
<assert_stmt>tags[-1]<eq>(0 "ENDTAB")<block_end><def_stmt>test_get_table_entry table<block_start>entry=table.get("ACAD")<assert_stmt>"ACAD"<eq>entry.dxf.name<block_end><def_stmt>test_entry_names_are_case_insensitive table<block_start>entry=table.get("acad")<assert_stmt>"ACAD"<eq>entry.dxf.name<block_end><def_stmt>test_duplicate_entry table<block_start>new_entry=table.duplicate_entry("ACAD" "ACAD2018")<assert_stmt>new_entry.dxf.name<eq>"ACAD2018"<line_sep>entry2=table.get("ACAD2018")<assert_stmt>new_entry.dxf.handle<eq>entry2.dxf.handle<line_sep>new_entry2=table.duplicate_entry("ACAD2018" "ACAD2019")<line_sep>new_entry.dxf.flags=71<line_sep>new_entry2.dxf.flags=17<line_sep># really different entities
<assert_stmt>new_entry.dxf.flags<eq>71<assert_stmt>new_entry2.dxf.flags<eq>17<block_end><def_stmt>test_create_vport_table <block_start>doc=ezdxf.new()<assert_stmt>len(doc.viewports)<eq>1<line_sep># standard viewport exists
<assert_stmt>"*Active"<in>doc.viewports<line_sep># create a multi-viewport configuration
# create two entries with same name
vp1=doc.viewports.new("V1")<line_sep>vp2=doc.viewports.new("V1")<assert_stmt>len(doc.viewports)<eq>3<line_sep># get multi-viewport configuration as list
conf=doc.viewports.get_config("V1")<assert_stmt>len(conf)<eq>2<line_sep># check handles
vports=[vp1 vp2]<assert_stmt>conf[0]<in>vports<assert_stmt>conf[1]<in>vports<assert_stmt>"Test"<not><in>doc.viewports<with_stmt>pytest.raises(ezdxf.DXFTableEntryError)<block_start>_=doc.viewports.get_config("test")<block_end># delete: ignore not existing configurations
<with_stmt>pytest.raises(ezdxf.DXFTableEntryError)<block_start>doc.viewports.delete_config("test")<block_end># delete multi config
doc.viewports.delete_config("V1")<assert_stmt>len(doc.viewports)<eq>1<block_end>AC1009TABLE="""0
SECTION
2
TABLES
0
TABLE
2
APPID
70
10
0
APPID
2
ACAD
70
0
0
APPID
2
ACADANNOPO
70
0
0
APPID
2
ACADANNOTATIVE
70
0
0
APPID
2
ACAD_DSTYLE_DIMJAG
70
0
0
APPID
2
ACAD_DSTYLE_DIMTALN
70
0
0
APPID
2
ACAD_MLEADERVER
70
0
0
APPID
2
ACAECLAYERSTANDARD
70
0
0
APPID
2
ACAD_EXEMPT_FROM_CAD_STANDARDS
70
0
0
APPID
2
ACAD_DSTYLE_DIMBREAK
70
0
0
APPID
2
ACAD_PSEXT
70
0
0
ENDTAB
0
ENDSEC
"""<line_sep>AC1024TABLE=""" 0
SECTION
2
TABLES
0
TABLE
2
APPID
5
9
330
0
100
AcDbSymbolTable
70
10
0
APPID
5
12
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD
70
0
0
APPID
5
DD
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
AcadAnnoPO
70
0
0
APPID
5
DE
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
AcadAnnotative
70
0
0
APPID
5
DF
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_DSTYLE_DIMJAG
70
0
0
APPID
5
E0
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_DSTYLE_DIMTALN
70
0
0
APPID
5
107
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_MLEADERVER
70
0
0
APPID
5
1B5
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
AcAecLayerStandard
70
0
0
APPID
5
1BA
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_EXEMPT_FROM_CAD_STANDARDS
70
0
0
APPID
5
237
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_DSTYLE_DIMBREAK
70
0
0
APPID
5
28E
330
9
100
AcDbSymbolTableRecord
100
AcDbRegAppTableRecord
2
ACAD_PSEXT
70
0
0
ENDTAB
0
ENDSEC
"""<line_sep>INVALID_TABLE_ENTRY="""0
SECTION
2
TABLES
0
TABLE
2
LAYERS
70
10
0
APPID
2
ACAD
70
0
0
ENDTAB
0
ENDSEC
"""<line_sep> |
<import_stmt>KratosMultiphysics.KratosUnittest<as>KratosUnittest<import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.TrilinosApplication<as>KratosTrilinos<class_stmt>TestTrilinosMatrix(KratosUnittest.TestCase)<block_start><def_stmt>test_resize self<block_start>comm=KratosTrilinos.CreateEpetraCommunicator(KratosMultiphysics.DataCommunicator.GetDefault())<line_sep>space=KratosTrilinos.TrilinosSparseSpace()<line_sep>pb=space.CreateEmptyVectorPointer(comm)<line_sep>space.ResizeVector(pb 2)<line_sep>n=space.Size(pb.GetReference())<line_sep>self.assertEqual(n 2)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>KratosUnittest.main()<block_end> |
<import_from_stmt>clpy.indexing *# NOQA
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: v3/asset/ip/v4/geolocation/geolocation.proto
<import_stmt>sys<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='v3/asset/ip/v4/geolocation/geolocation.proto' package='v3.asset.ip.v4.geolocation' syntax='proto2' serialized_options=<none> serialized_pb=_b('\n,v3/asset/ip/v4/geolocation/geolocation.proto\x12\x1av3.asset.ip.v4.geolocation\"\x94\x02\n\x07Message\x12\x0c\n\x04host\x18\x01 \x02(\t\x12\x0c\n\x04mask\x18\x02 \x01(\t\x12\x12\n\x07version\x18\x03 \x02(\x05:\x01\x34\x12\x11\n\tcontinent\x18\x05 \x01(\t\x12\x16\n\x0e\x63ontinent_code\x18\x06 \x01(\t\x12\x0f\n\x07\x63ountry\x18\x07 \x01(\t\x12\x14\n\x0c\x63ountry_code\x18\x08 \x01(\t\x12\x0e\n\x06region\x18\t \x01(\t\x12\x13\n\x0bregion_name\x18\n \x01(\t\x12\x0c\n\x04\x63ity\x18\x0b \x01(\t\x12\x0b\n\x03zip\x18\x0c \x01(\t\x12\x10\n\x08latitude\x18\r \x01(\x02\x12\x11\n\tlongitude\x18\x0e \x01(\x02\x12\x10\n\x08timezone\x18\x0f \x01(\t\x12\x10\n\x08\x64istrict\x18\x10 \x01(\t'))<line_sep>_MESSAGE=_descriptor.Descriptor(name='Message' full_name='v3.asset.ip.v4.geolocation.Message' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='host' full_name='v3.asset.ip.v4.geolocation.Message.host' index=0 number=1 type=9 cpp_type=9 label=2 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='mask' full_name='v3.asset.ip.v4.geolocation.Message.mask' index=1 number=2 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='version' full_name='v3.asset.ip.v4.geolocation.Message.version' index=2 number=3 type=5 cpp_type=1 label=2 has_default_value=<true> default_value=4 message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='continent' full_name='v3.asset.ip.v4.geolocation.Message.continent' index=3 number=5 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='continent_code' full_name='v3.asset.ip.v4.geolocation.Message.continent_code' index=4 number=6 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='country' full_name='v3.asset.ip.v4.geolocation.Message.country' index=5 number=7 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='country_code' full_name='v3.asset.ip.v4.geolocation.Message.country_code' index=6 number=8 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='region' full_name='v3.asset.ip.v4.geolocation.Message.region' index=7 number=9 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='region_name' full_name='v3.asset.ip.v4.geolocation.Message.region_name' index=8 number=10 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='city' full_name='v3.asset.ip.v4.geolocation.Message.city' index=9 number=11 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='zip' full_name='v3.asset.ip.v4.geolocation.Message.zip' index=10 number=12 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='latitude' full_name='v3.asset.ip.v4.geolocation.Message.latitude' index=11 number=13 type=2 cpp_type=6 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='longitude' full_name='v3.asset.ip.v4.geolocation.Message.longitude' index=12 number=14 type=2 cpp_type=6 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='timezone' full_name='v3.asset.ip.v4.geolocation.Message.timezone' index=13 number=15 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='district' full_name='v3.asset.ip.v4.geolocation.Message.district' index=14 number=16 type=9 cpp_type=9 label=1 has_default_value=<false> default_value=_b("").decode('utf-8') message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> serialized_options=<none> file=DESCRIPTOR) ] extensions=[] nested_types=[] enum_types=[] serialized_options=<none> is_extendable=<false> syntax='proto2' extension_ranges=[] oneofs=[] serialized_start=77 serialized_end=353 )<line_sep>DESCRIPTOR.message_types_by_name['Message']=_MESSAGE<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>Message=_reflection.GeneratedProtocolMessageType('Message' (_message.Message ) dict(DESCRIPTOR=_MESSAGE __module__='v3.asset.ip.v4.geolocation.geolocation_pb2'# @@protoc_insertion_point(class_scope:v3.asset.ip.v4.geolocation.Message)
))<line_sep>_sym_db.RegisterMessage(Message)<line_sep># @@protoc_insertion_point(module_scope)
|
<if_stmt><true><block_start><import_stmt>numpy<as>np<line_sep>d=3<line_sep>K=50<line_sep>N=10<power>6<line_sep>a=np.zeros(3)<line_sep>b=np.ones(3)<line_sep>orders=np.array([K<for>i range(d)])<line_sep>coeffs=np.random.random([k+2<for>k orders])<line_sep>points=np.random.random((N d))# each line is a vector
points_c=points.T.copy()# each column is a vector
vals=np.zeros(N)<line_sep>print(points.max().max())<line_sep>print(points.min().min())<import_stmt>time<import_from_stmt>alternative_implementations *<import_from_stmt>eval_cubic_splines_cython vec_eval_cubic_spline_3<as>rr<line_sep>vec_eval_cubic_spline_3(a b orders coeffs points vals)# warmup
vec_eval_cubic_spline_3_inlined(a b orders coeffs points vals)# warmup
vec_eval_cubic_spline_3_inlined_columns(a b orders coeffs points_c vals)<line_sep># warmup
vec_eval_cubic_spline_3_kernel(a b orders coeffs points vals)# warmup
vec_eval_cubic_spline_3_inlined_lesswork(orders coeffs points vals Ad dAd)<line_sep># rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a b orders coeffs points vals)<line_sep>t1=time.time()<line_sep>vec_eval_cubic_spline_3(a b orders coeffs points vals)<line_sep>t2=time.time()<line_sep>vec_eval_cubic_spline_3_inlined(a b orders coeffs points vals)<line_sep>t3=time.time()<line_sep>vec_eval_cubic_spline_3_inlined_columns(a b orders coeffs points_c vals)<line_sep>t4=time.time()<line_sep>vec_eval_cubic_spline_3_kernel(a b orders coeffs points vals)<line_sep>t5=time.time()<line_sep>vec_eval_cubic_spline_3_inlined_lesswork(orders coeffs points vals Ad dAd)<line_sep>t6=time.time()<line_sep># rr(a,b,orders,coeffs,points,vals,Ad,dAd)
rr(a b orders coeffs points vals)<line_sep>t7=time.time()<line_sep>print("one function call per point: {}".format(t2-t1))<line_sep>print("inlined (points in rows): {}".format(t3-t2))<line_sep>print("inlined (points in columns): {}".format(t4-t3))<line_sep>print("kernel: {}".format(t5-t4))<line_sep>print("less work: {}".format(t6-t5))<line_sep>print("cython: {}".format(t7-t6))<line_sep>print(vals[:10 0])<block_end> |
<import_from_stmt>django.contrib admin<import_from_stmt>django.contrib.auth.admin UserAdmin<import_from_stmt>django.contrib.auth.models User<line_sep>UserAdmin.list_display=('username' 'email' 'first_name' 'last_name' 'is_active' 'date_joined')<line_sep>admin.site.unregister(User)<line_sep>admin.site.register(User UserAdmin)<line_sep> |
<import_from_stmt>.attribute_dict AttributeDict<line_sep>#from .experiment_schedule import mount_experiment_heap, get_free_gpus, pop_half_gpu, pop_one_gpu
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>ctppsRPAlignmentCorrectionsDataESSourceXML=cms.ESSource("CTPPSRPAlignmentCorrectionsDataESSourceXML" verbosity=cms.untracked.uint32(0) MeasuredFiles=cms.vstring() RealFiles=cms.vstring() MisalignedFiles=cms.vstring())<line_sep> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<import_from_stmt>config gamma lr<def_stmt>flat_grad grads<block_start>grad_flatten=[]<for_stmt>grad grads<block_start>grad_flatten.append(grad.view(-1))<block_end>grad_flatten=torch.cat(grad_flatten)<line_sep><return>grad_flatten<block_end><def_stmt>flat_hessian hessians<block_start>hessians_flatten=[]<for_stmt>hessian hessians<block_start>hessians_flatten.append(hessian.contiguous().view(-1))<block_end>hessians_flatten=torch.cat(hessians_flatten).data<line_sep><return>hessians_flatten<block_end><def_stmt>flat_params model<block_start>params=[]<for_stmt>param model.parameters()<block_start>params.append(param.data.view(-1))<block_end>params_flatten=torch.cat(params)<line_sep><return>params_flatten<block_end><def_stmt>update_model model new_params<block_start>index=0<for_stmt>params model.parameters()<block_start>params_length=len(params.view(-1))<line_sep>new_param=new_params[index:index+params_length]<line_sep>new_param=new_param.view(params.size())<line_sep>params.data.copy_(new_param)<line_sep>index<augadd>params_length<block_end><block_end><def_stmt>kl_divergence net old_net states<block_start>policy=net(states)<line_sep>old_policy=old_net(states).detach()<line_sep>kl=old_policy<times>torch.log(old_policy/policy)<line_sep>kl=kl.sum(1 keepdim=<true>)<line_sep><return>kl<block_end><def_stmt>fisher_vector_product net states p cg_damp=0.1<block_start>kl=kl_divergence(net net states)<line_sep>kl=kl.mean()<line_sep>kl_grad=torch.autograd.grad(kl net.parameters() create_graph=<true>)# create_graph is True if we need higher order derivative products
kl_grad=flat_grad(kl_grad)<line_sep>kl_grad_p=(kl_grad<times>p.detach()).sum()<line_sep>kl_hessian_p=torch.autograd.grad(kl_grad_p net.parameters())<line_sep>kl_hessian_p=flat_hessian(kl_hessian_p)<line_sep><return>kl_hessian_p+cg_damp<times>p.detach()<block_end><def_stmt>conjugate_gradient net states loss_grad n_step=10 residual_tol=1e-10<block_start>x=torch.zeros(loss_grad.size())<line_sep>r=loss_grad.clone()<line_sep>p=loss_grad.clone()<line_sep>r_dot_r=torch.dot(r r)<for_stmt>i range(n_step)<block_start>A_dot_p=fisher_vector_product(net states p)<line_sep>alpha=r_dot_r/torch.dot(p A_dot_p)<line_sep>x<augadd>alpha<times>p<line_sep>r<augsub>alpha<times>A_dot_p<line_sep>new_r_dot_r=torch.dot(r r)<line_sep>betta=new_r_dot_r/r_dot_r<line_sep>p=r+betta<times>p<line_sep>r_dot_r=new_r_dot_r<if_stmt>r_dot_r<l>residual_tol<block_start><break><block_end><block_end><return>x<block_end><class_stmt>TNPG(nn.Module)<block_start><def_stmt>__init__ self num_inputs num_outputs<block_start>super(TNPG self).__init__()<line_sep>self.t=0<line_sep>self.num_inputs=num_inputs<line_sep>self.num_outputs=num_outputs<line_sep>self.fc_1=nn.Linear(num_inputs 128)<line_sep>self.fc_2=nn.Linear(128 num_outputs)<for_stmt>m self.modules()<block_start><if_stmt>isinstance(m nn.Linear)<block_start>nn.init.xavier_uniform(m.weight)<block_end><block_end><block_end><def_stmt>forward self input<block_start>x=torch.tanh(self.fc_1(input))<line_sep>policy=F.softmax(self.fc_2(x))<line_sep><return>policy<block_end>@classmethod<def_stmt>train_model cls net transitions<block_start>states,actions,rewards,masks=transitions.state transitions.action transitions.reward transitions.mask<line_sep>states=torch.stack(states)<line_sep>actions=torch.stack(actions)<line_sep>rewards=torch.Tensor(rewards)<line_sep>masks=torch.Tensor(masks)<line_sep>returns=torch.zeros_like(rewards)<line_sep>running_return=0<for_stmt>t reversed(range(len(rewards)))<block_start>running_return=rewards[t]+gamma<times>running_return<times>masks[t]<line_sep>returns[t]=running_return<block_end>policies=net(states)<line_sep>policies=policies.view(-1 net.num_outputs)<line_sep>policy_actions=(policies<times>actions.detach()).sum(dim=1)<line_sep>loss=(policy_actions<times>returns).mean()<line_sep>loss_grad=torch.autograd.grad(loss net.parameters())<line_sep>loss_grad=flat_grad(loss_grad)<line_sep>step_dir=conjugate_gradient(net states loss_grad.data)<line_sep>params=flat_params(net)<line_sep>new_params=params+lr<times>step_dir<line_sep>update_model(net new_params)<line_sep><return>-loss<block_end><def_stmt>get_action self input<block_start>policy=self.forward(input)<line_sep>policy=policy[0].data.numpy()<line_sep>action=np.random.choice(self.num_outputs 1 p=policy)[0]<line_sep><return>action<block_end><block_end> |
"""
Vasu is running up a stone staircase with N stones, and can hop(jump) either 1 step, 2 steps or 3 steps at a time.
You have to count, how many possible ways Vasu can run up to the stone stairs.
Input Format:
Input contains integer N that is number of steps
Constraints:
1<= N <=30
Output Format:
Output for each integer N the no of possible ways w.
"""<def_stmt>hop N<block_start><if_stmt>(N<eq>1<or>N<eq>0)<block_start><return>1<block_end><elif_stmt>(N<eq>2)<block_start><return>2<block_end><else_stmt><block_start><return>hop(N-3)+hop(N-2)+hop(N-1)<block_end><block_end>N=int(input())<line_sep>print(hop(N))<line_sep> |
<import_from_stmt>clpy.padding *# NOQA
|
""" Collection of Active Learning Scorers """<line_sep># Copyright (c) 2020. Lightly AG and its affiliates.
# All Rights Reserved
<import_from_stmt>lightly.active_learning.scorers.scorer Scorer<import_from_stmt>lightly.active_learning.scorers.classification ScorerClassification<import_from_stmt>lightly.active_learning.scorers.detection ScorerObjectDetection<import_from_stmt>lightly.active_learning.scorers.semantic_segmentation ScorerSemanticSegmentation<line_sep> |
<import_stmt>os<import_stmt>launch<import_stmt>launch_ros.actions<import_from_stmt>ament_index_python.packages get_package_share_directory<def_stmt>generate_launch_description <block_start>graphbasedslam_param_dir=launch.substitutions.LaunchConfiguration('graphbasedslam_param_dir' default=os.path.join(get_package_share_directory('graph_based_slam') 'param' 'graphbasedslam.yaml'))<line_sep>graphbasedslam=launch_ros.actions.Node(package='graph_based_slam' executable='graph_based_slam_node' parameters=[graphbasedslam_param_dir] output='screen')<line_sep><return>launch.LaunchDescription([launch.actions.DeclareLaunchArgument('graphbasedslam_param_dir' default_value=graphbasedslam_param_dir description='Full path to graphbasedslam parameter file to load') graphbasedslam ])<block_end> |
<import_stmt>supriya.nonrealtime<def_stmt>test_01 <block_start>session=supriya.nonrealtime.Session()<assert_stmt>session.offsets<eq>[float("-inf") 0.0]<assert_stmt>session.duration<eq>0.0<block_end><def_stmt>test_02 <block_start>session=supriya.nonrealtime.Session()<with_stmt>session.at(0)<block_start>session.add_group()<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 float("inf")]<assert_stmt>session.duration<eq>0.0<block_end><def_stmt>test_03 <block_start>session=supriya.nonrealtime.Session()<with_stmt>session.at(23.5)<block_start>session.add_group()<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 float("inf")]<assert_stmt>session.duration<eq>23.5<block_end><def_stmt>test_04 <block_start>session=supriya.nonrealtime.Session()<with_stmt>session.at(23.5)<block_start>session.add_group(duration=1.0)<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 24.5]<assert_stmt>session.duration<eq>24.5<block_end><def_stmt>test_05 <block_start>session=supriya.nonrealtime.Session()<with_stmt>session.at(0)<block_start>session.add_group()<block_end><with_stmt>session.at(23.5)<block_start>session.add_group(duration=1.0)<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 24.5 float("inf")]<assert_stmt>session.duration<eq>24.5<block_end><def_stmt>test_06 <block_start>session=supriya.nonrealtime.Session(padding=11.0)<assert_stmt>session.offsets<eq>[float("-inf") 0.0]<assert_stmt>session.duration<eq>0.0<block_end><def_stmt>test_07 <block_start>session=supriya.nonrealtime.Session(padding=11.0)<with_stmt>session.at(0)<block_start>session.add_group()<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 float("inf")]<assert_stmt>session.duration<eq>0.0<block_end><def_stmt>test_08 <block_start>session=supriya.nonrealtime.Session(padding=11.0)<with_stmt>session.at(23.5)<block_start>session.add_group()<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 float("inf")]<assert_stmt>session.duration<eq>34.5<block_end><def_stmt>test_09 <block_start>session=supriya.nonrealtime.Session(padding=11.0)<with_stmt>session.at(23.5)<block_start>session.add_group(duration=1.0)<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 24.5]<assert_stmt>session.duration<eq>35.5<block_end><def_stmt>test_10 <block_start>session=supriya.nonrealtime.Session(padding=11.0)<with_stmt>session.at(0)<block_start>session.add_group()<block_end><with_stmt>session.at(23.5)<block_start>session.add_group(duration=1.0)<block_end><assert_stmt>session.offsets<eq>[float("-inf") 0.0 23.5 24.5 float("inf")]<assert_stmt>session.duration<eq>35.5<block_end> |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_from_stmt>collections deque<import_from_stmt>threading Event<class_stmt>BufferedQueue(object)<block_start>"""
Thread safe buffered queue
"""<def_stmt>__init__ self<block_start>self.__queue=deque()<line_sep>self.__data_ready_event=Event()<line_sep>self.__queue_end=<false># sign that buffer is empty
self.__queue_feeder_end=<false># EOF sign
<block_end><def_stmt>__notify_ready self<block_start>"""
Notify reader that data is ready to be consumed
"""<line_sep>self.__queue_end=<false><line_sep>self.__data_ready_event.set()<block_end><def_stmt>notify_end self<block_start>"""
Notify queue about end of producer stream, allow consumer to read buffer to the end
"""<line_sep>self.__queue_feeder_end=<true><line_sep>self.__notify_ready()<block_end><def_stmt>put self item<block_start>"""
Add object to the buffer
"""<if_stmt>self.__queue_feeder_end<block_start><raise>IndexError("'notify_end' was called, queue is locked for writing")<block_end>self.__queue.append(item)<line_sep>self.__notify_ready()<block_end><def_stmt>get self timeout=<none><block_start>"""
Read data from buffer at least in `timeout` seconds. If no data ready in `timeout`, would be returned None.
:param timeout: amount of time to wait for data availability
:return: data or None if no data were read in `timeout` or no more data available (buffer is empty)
"""<try_stmt><block_start><if_stmt><not>self.__queue_feeder_end<block_start>self.__data_ready_event.wait(timeout)<block_end><return>self.__queue.popleft()<block_end><except_stmt>IndexError<block_start><if_stmt>timeout<block_start><return><none><block_end>self.__queue_end=<true><block_end><finally_stmt><block_start><if_stmt>self.count<eq>0<block_start>self.__data_ready_event.clear()<if_stmt>self.__queue_feeder_end<block_start>self.__queue_end=<true><block_end><block_end><block_end><block_end><def_stmt>reset self<block_start>"""
Clear instance state and data
"""<line_sep>self.__data_ready_event.clear()<line_sep>self.__queue.clear()<line_sep>self.__queue_feeder_end=<false><line_sep>self.__queue_end=<false><block_end>@property<def_stmt>empty self<block_start><if_stmt>self.__queue_feeder_end<and>self.count<eq>0<block_start><return><true><block_end><return>self.__queue_end<block_end>@property<def_stmt>count self<block_start><return>len(self.__queue)<block_end><block_end> |
<import_from_stmt>pympler.garbagegraph start_debug_garbage<import_from_stmt>pympler web<class_stmt>Leaf(object)<block_start><pass><block_end><class_stmt>Branch(object)<block_start><def_stmt>__init__ self root<block_start>self.root=root<line_sep>self.leaf=Leaf()<block_end><block_end><class_stmt>Root(object)<block_start><def_stmt>__init__ self num_branches<block_start>self.branches=[Branch(self)<for>_ range(num_branches)]<block_end><block_end>start_debug_garbage()<line_sep>tree=Root(2)<del_stmt>tree<line_sep>web.start_profiler(debug=<true>)<line_sep> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.