content stringlengths 0 1.55M |
|---|
"""Utilities for pandas."""<import_from_stmt>.core FirstColFtClassifier# noqa: F401
<import_from_stmt>.core IdxBasedFtClassifier# noqa: F401
<import_from_stmt>.core FirstObjFtClassifier# noqa: F401
<import_from_stmt>.core ColLblBasedFtClassifier# noqa: F401
<import_from_stmt>.core SeriesFtClassifier# noqa: F401
<import_from_stmt>._version get_versions<line_sep>__version__=get_versions()['version']<del_stmt>get_versions<for_stmt>name ['get_versions' '_version' 'core' 'name']<block_start><try_stmt><block_start>globals().pop(name)<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end> |
<import_stmt>unittest<import_stmt>clpy<import_from_stmt>clpy.random distributions<import_from_stmt>clpy testing<line_sep>@testing.parameterize(*testing.product({'shape':[(4 3 2) (3 2)] 'loc_shape':[() (3 2)] 'scale_shape':[() (3 2)] }))@testing.gpu<class_stmt>TestDistributions(unittest.TestCase)<block_start>_multiprocess_can_split_=<true><def_stmt>check_distribution self dist_func loc_dtype scale_dtype dtype<block_start>loc=clpy.ones(self.loc_shape dtype=loc_dtype)<line_sep>scale=clpy.ones(self.scale_shape dtype=scale_dtype)<line_sep>out=dist_func(loc scale self.shape dtype)<line_sep>self.assertEqual(self.shape out.shape)<line_sep>self.assertEqual(out.dtype dtype)<block_end>@clpy.testing.for_float_dtypes('dtype' no_float16=<true>)@clpy.testing.for_float_dtypes('loc_dtype')@clpy.testing.for_float_dtypes('scale_dtype')<def_stmt>test_normal self loc_dtype scale_dtype dtype<block_start>self.check_distribution(distributions.normal loc_dtype scale_dtype dtype)<block_end><block_end> |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>migrate.changeset constraint<import_stmt>sqlalchemy<def_stmt>upgrade migrate_engine<block_start>meta=sqlalchemy.MetaData(bind=migrate_engine)<line_sep>resource=sqlalchemy.Table('resource' meta autoload=<true>)<line_sep>resource_properties_data=sqlalchemy.Table('resource_properties_data' meta autoload=<true>)<line_sep>attr_data_id=sqlalchemy.Column('attr_data_id' sqlalchemy.Integer)<line_sep>attr_data_id.create(resource)<line_sep>res_fkey=constraint.ForeignKeyConstraint(columns=[resource.c.attr_data_id] refcolumns=[resource_properties_data.c.id] name='rsrc_attr_data_ref')<line_sep>res_fkey.create()<block_end> |
<import_stmt>pytest<import_stmt>unittest<import_from_stmt>pydu.dict AttrDict LookupDict CaseInsensitiveDict OrderedDefaultDict attrify<class_stmt>TestAttrDict<block_start><def_stmt>test_attr_access_with_init self<block_start>d=AttrDict(key=1)<assert_stmt>d['key']<eq>1<assert_stmt>d.key<eq>1<block_end><def_stmt>test_attr_access_without_init self<block_start>d=AttrDict()<line_sep>d['key']=1<assert_stmt>d['key']<eq>1<assert_stmt>d.key<eq>1<line_sep>d.anotherkey=1<assert_stmt>d.anotherkey<eq>1<assert_stmt>d['anotherkey']<eq>1<block_end><def_stmt>test_attr_delete self<block_start>d=AttrDict(key=1)<del_stmt>d.key<with_stmt>pytest.raises(AttributeError)<block_start><del_stmt>d.key<block_end><block_end><def_stmt>test_repr self<block_start>d=AttrDict()<assert_stmt>repr(d)<eq>'<AttrDict {}>'<block_end><block_end><class_stmt>TestLooUpDict<block_start><def_stmt>test_key_exist self<block_start>d=LookupDict()<line_sep>d['key']=1<assert_stmt>d['key']<eq>1<block_end><def_stmt>test_key_not_exist self<block_start>d=LookupDict()<assert_stmt>d['key']<is><none><block_end><block_end><class_stmt>TestCaseInsensitiveDict(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.d=CaseInsensitiveDict()<line_sep>self.d['Accept']=1<block_end><def_stmt>test_ci_dict_set self<block_start><assert_stmt>self.d['aCCept']<eq>1<assert_stmt>list(self.d)<eq>['Accept']<block_end><def_stmt>test_ci_dict_del self<block_start><del_stmt>self.d['accept']<assert_stmt><not>self.d<block_end><def_stmt>test_ci_dict_copy_and_equal self<block_start>d=self.d.copy()<assert_stmt>d<eq>self.d<block_end><block_end><class_stmt>TestOrderedDefaultDict<block_start><def_stmt>test_default_normal self<block_start>d=OrderedDefaultDict(int)<assert_stmt>d[1]<eq>0<assert_stmt>d['a']<eq>0<line_sep>d[2]=2<assert_stmt>d[2]<eq>2<assert_stmt>list(d.keys())<eq>[1 'a' 2]<line_sep>d=OrderedDefaultDict(int a=1)<assert_stmt>d['a']<eq>1<block_end><def_stmt>test_default_factory_not_callable self<block_start><with_stmt>pytest.raises(TypeError)<block_start>OrderedDefaultDict('notcallable')<block_end><block_end><def_stmt>test_default_factory_none self<block_start>d=OrderedDefaultDict()<with_stmt>pytest.raises(KeyError)<block_start>d[1]<block_end><block_end><def_stmt>test_copy self<block_start>d1=OrderedDefaultDict(int a=[])<line_sep>d2=d1.copy()<assert_stmt>d2['a']<eq>[]<line_sep>d1['a'].append(1)<assert_stmt>d2['a']<eq>[1]<block_end><def_stmt>test_deepcopy self<block_start><import_stmt>copy<line_sep>d1=OrderedDefaultDict(int a=[])<line_sep>d2=copy.deepcopy(d1)<assert_stmt>d2['a']<eq>[]<line_sep>d1['a'].append(1)<assert_stmt>d2['a']<eq>[]<block_end><def_stmt>test_repr self<block_start>d=OrderedDefaultDict(int a=1)<assert_stmt>repr(d).startswith('OrderedDefaultDict')<block_end><block_end><def_stmt>test_attrify <block_start>attrd=attrify({'a':[1 2 {'b':'b'}] 'c':'c' })<assert_stmt>attrd.a<eq>[1 2 {'b':'b'}]<assert_stmt>attrd.a[2].b<eq>'b'<assert_stmt>attrd.c<eq>'c'<line_sep>attrd=attrify((1 2))<assert_stmt>attrd<eq>(1 2)<line_sep>attrd=attrify({'a':1 'b':(1 2)})<assert_stmt>attrd.a<eq>1<assert_stmt>attrd.b<eq>(1 2)<block_end> |
<import_from_stmt>gym_env.dota_game DotaGame TEAM_RADIANT TEAM_DIRE<import_from_stmt>dotaservice.protos.dota_shared_enums_pb2 DOTA_GAMEMODE_1V1MID<import_from_stmt>agents.dota_agent PPOAgent<import_from_stmt>multiprocessing Process<import_from_stmt>sys platform<import_stmt>time<import_stmt>os<import_stmt>pathlib<line_sep># path example
DOTA_CLINET_PATH_MAC="~/Library/Application Support/Steam/steamapps/common/dota 2 beta/game"<line_sep>DOTA_CLINET_PATH_WINDOWS=r'E:\SteamLibrary\steamapps\common\dota 2 beta\game'<line_sep>DOTA_CLINET_PATH_LINUX="~/.steam/steam/steamapps/common/dota 2 beta/game"<line_sep>TMP_PATH_WINDOWS=str(pathlib.Path(__file__).parent.resolve())+r'\tmp'<line_sep>LAST_ORDER_PROJECT_PATH_MAC=pathlib.Path(__file__).parent.resolve()<line_sep>LAST_ORDER_PROJECT_PATH_WINDOWS=pathlib.Path(__file__).parent.resolve()<line_sep>LAST_ORDER_PROJECT_PATH_LINUX=pathlib.Path(__file__).parent.resolve()<line_sep>print(LAST_ORDER_PROJECT_PATH_WINDOWS)<def_stmt>dota_process_exists <block_start><if_stmt>platform<eq>'win32'<block_start><return>len(os.popen("tasklist /v | findstr dota2.exe").read())<ne>0<block_end><else_stmt><block_start><return>len(os.popen("ps aux | grep dota2 | grep -v grep").read())<ne>0<block_end><block_end><def_stmt>run_human_vs_ai dota_game:DotaGame team_id:int player_id:int opponent_player_id:int<block_start><if_stmt>platform<eq>'darwin'<block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_MAC<block_end><elif_stmt>platform<eq>'win32'<block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_WINDOWS<block_end><else_stmt><block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_LINUX<block_end>agent=PPOAgent(dota_game team_id player_id opponent_player_id "" "self_eval" )<line_sep>agent.run()<block_end><def_stmt>run_dota <block_start>dota_game=DotaGame(host_timescale=1 ticks_per_observation=6 game_mode=DOTA_GAMEMODE_1V1MID host_mode="HOST_MODE_GUI_MENU")<if_stmt>platform<eq>'darwin'<block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_MAC<block_end><elif_stmt>platform<eq>'win32'<block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_WINDOWS<block_end><else_stmt><block_start>dota_game.session_folder=LAST_ORDER_PROJECT_PATH_LINUX<block_end><try_stmt><block_start>dota_game.stop_dota_pids()<line_sep>dota_game.run_dota()<line_sep>time.sleep(10)<block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>dota_game.stop_dota_pids()<block_end><block_end><def_stmt>supervisor <block_start><while_stmt><true><block_start><if_stmt><not>dota_process_exists()<block_start>Process(target=run_dota).run()<line_sep>dota_game=DotaGame(host_timescale=1 ticks_per_observation=6 game_mode=DOTA_GAMEMODE_1V1MID host_mode="HOST_MODE_GUI_MENU")<line_sep>dp=Process(target=run_human_vs_ai args=(dota_game TEAM_RADIANT 1 0))<line_sep>dp.start()<block_end>time.sleep(20)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>supervisor()<block_end> |
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''
Defines the `cache` decorator.
See its documentation for more details.
'''<line_sep># todo: examine thread-safety
<import_stmt>datetime<as>datetime_module<import_from_stmt>python_toolbox misc_tools<import_from_stmt>python_toolbox binary_search<import_from_stmt>python_toolbox decorator_tools<import_from_stmt>python_toolbox.sleek_reffing SleekCallArgs<import_from_stmt>python_toolbox.third_party.decorator decorator<as>decorator_<line_sep>infinity=float('inf')<class_stmt>CLEAR_ENTIRE_CACHE(misc_tools.NonInstantiable)<block_start>'''Sentinel object for clearing the entire cache.'''<block_end><def_stmt>_get_now <block_start>'''
Get the current datetime.
This is specified as a function to make testing easier.
'''<line_sep><return>datetime_module.datetime.now()<block_end>@decorator_tools.helpful_decorator_builder<def_stmt>cache max_size=infinity time_to_keep=<none><block_start>'''
Cache a function, saving results so they won't have to be computed again.
This decorator understands function arguments. For example, it understands
that for a function like this:
@cache()
def f(a, b=2):
return whatever
The calls `f(1)` or `f(1, 2)` or `f(b=2, a=1)` are all identical, and a
cached result saved for one of these calls will be used for the others.
All the arguments are sleekreffed to prevent memory leaks. Sleekref is a
variation of weakref. Sleekref is when you try to weakref an object, but if
it's non-weakreffable, like a `list` or a `dict`, you maintain a normal,
strong reference to it. (See documentation of
`python_toolbox.sleek_reffing` for more details.) Thanks to sleekreffing
you can avoid memory leaks when using weakreffable arguments, but if you
ever want to use non-weakreffable arguments you are still able to.
(Assuming you don't mind the memory leaks.)
You may optionally specify a `max_size` for maximum number of cached
results to store; old entries are thrown away according to a
least-recently-used alogrithm. (Often abbreivated LRU.)
You may optionally specific a `time_to_keep`, which is a time period after
which a cache entry will expire. (Pass in either a `timedelta` object or
keyword arguments to create one.)
'''<line_sep># todo idea: figure how how complex the function's argspec is, and then
# compile a function accordingly, so functions with a simple argspec won't
# have to go through so much shit. update: probably it will help only for
# completely argumentless function. so do one for those.
<import_from_stmt>python_toolbox.nifty_collections OrderedDict<if_stmt>time_to_keep<is><not><none><block_start><if_stmt>max_size<ne>infinity<block_start><raise>NotImplementedError<block_end><if_stmt><not>isinstance(time_to_keep datetime_module.timedelta)<block_start><try_stmt><block_start>time_to_keep=datetime_module.timedelta(**time_to_keep)<block_end><except_stmt>Exception<as>exception<block_start><raise>TypeError('`time_limit` must be either a `timedelta` object or a '<concat>'dict of keyword arguments for constructing a '<concat>'`timedelta` object.')<from>exception<block_end><block_end><assert_stmt>isinstance(time_to_keep datetime_module.timedelta)<block_end><def_stmt>decorator function# In case we're being given a function that is already cached:
<block_start><if_stmt>getattr(function 'is_cached' <false>)<block_start><return>function<block_end><if_stmt>max_size<eq>infinity<block_start><if_stmt>time_to_keep<block_start>sorting_key_function=<lambda>sleek_call_args:cached._cache[sleek_call_args][1]<def_stmt>remove_expired_entries <block_start>almost_cutting_point=binary_search.binary_search_by_index(list(cached._cache.keys()) _get_now() sorting_key_function rounding=binary_search.LOW)<if_stmt>almost_cutting_point<is><not><none><block_start>cutting_point=almost_cutting_point+1<for_stmt>key list(cached._cache.keys())[:cutting_point]<block_start><del_stmt>cached._cache[key]<block_end><block_end><block_end>@misc_tools.set_attributes(_cache=OrderedDict())<def_stmt>cached function *args **kwargs<block_start>remove_expired_entries()<line_sep>sleek_call_args=SleekCallArgs(cached._cache function *args **kwargs)<try_stmt><block_start><return>cached._cache[sleek_call_args][0]<block_end><except_stmt>KeyError<block_start>value=function(*args **kwargs)<line_sep>cached._cache[sleek_call_args]=(value _get_now()+time_to_keep)<line_sep>cached._cache.sort(key=sorting_key_function)<line_sep><return>value<block_end><block_end><block_end><else_stmt># not time_to_keep
<block_start>@misc_tools.set_attributes(_cache={})<def_stmt>cached function *args **kwargs<block_start>sleek_call_args=SleekCallArgs(cached._cache function *args **kwargs)<try_stmt><block_start><return>cached._cache[sleek_call_args]<block_end><except_stmt>KeyError<block_start>cached._cache[sleek_call_args]=value=function(*args **kwargs)<line_sep><return>value<block_end><block_end><block_end><block_end><else_stmt># max_size < infinity
<block_start>@misc_tools.set_attributes(_cache=OrderedDict())<def_stmt>cached function *args **kwargs<block_start>sleek_call_args=SleekCallArgs(cached._cache function *args **kwargs)<try_stmt><block_start>result=cached._cache[sleek_call_args]<line_sep>cached._cache.move_to_end(sleek_call_args)<line_sep><return>result<block_end><except_stmt>KeyError<block_start>cached._cache[sleek_call_args]=value=function(*args **kwargs)<if_stmt>len(cached._cache)<g>max_size<block_start>cached._cache.popitem(last=<false>)<block_end><return>value<block_end><block_end><block_end>result=decorator_(cached function)<def_stmt>cache_clear key=CLEAR_ENTIRE_CACHE<block_start><if_stmt>key<is>CLEAR_ENTIRE_CACHE<block_start>cached._cache.clear()<block_end><else_stmt><block_start><try_stmt><block_start><del_stmt>cached._cache[key]<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end>result.cache_clear=cache_clear<line_sep>result.is_cached=<true><line_sep><return>result<block_end><return>decorator<block_end> |
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
"""A script to evaluate test values for special functions in high precision.
This scripts looks for .csv files in /test/Tests/Data/SpecialFunctionsValues.
These files are expected to contain sets of arguments and expected result values
for some special functions.
Whenever the script encounters a file for which it has a defined function,
it evaluates that function for every set of arguments present in that file
and replaces the expected result in the file with the one it computed,
except for Infinite or NaN results, which are preserved.
.csv files are expected to have the header of the form
arg0,arg1,...,argN,expectedresult
use comma as a value separator, dot as a decimal separator, and
"Infinity", "-Infinity", and "NaN" to designate the corresponding values.
The correspondence between file names and functions is set in the pair_info
dictionary within the script.
To add a new test case, add a new row to the csv file using zero for the expectedresult.
Then run this script to replace the dummy value.
"""<import_from_future_stmt> division<import_stmt>os<import_stmt>csv<import_from_stmt>mpmath *<import_stmt>time<line_sep>mp.pretty=<true><line_sep>mp.dps=500<line_sep>output_dps=50<def_stmt>normal_cdf_moment_ratio n x<block_start><if_stmt>x<l>0<block_start><return>power(2 -0.5-n/2)<times>hyperu(n/2+0.5 0.5 x<times>x/2)<block_end><return>exp(x<times>x/4)<times>pcfu(0.5+n -x)<block_end><def_stmt>normal_cdf2 x y r<block_start>"""
This function produces correct results for inputs currently present in /test/Tests/Data/SpecialFunctionsValues.
Other inputs may fall into areas where currently present algorithms produce incorrect results and may require modifying this function.
"""<if_stmt>x<eq>-inf<or>y<eq>-inf<block_start><return>mpf('0')<block_end><if_stmt>x<eq>inf<block_start><return>ncdf(y)<block_end><if_stmt>y<eq>inf<block_start><return>ncdf(x)<block_end><if_stmt>r<eq>mpf('1')<block_start><return>ncdf(min(x y))<block_end><if_stmt>r<eq>mpf('-1')<block_start><if_stmt>x<le>-y<block_start><return>mpf('0')<block_end><elif_stmt>x<g>y<block_start><return>ncdf(y)-ncdf(-x)<block_end><else_stmt><block_start><return>ncdf(x)-ncdf(-y)<block_end><block_end><if_stmt>abs(y)<g>abs(x)<block_start>z=x<line_sep>x=y<line_sep>y=z<block_end># Avoid quadrature with r < 0 since it is sometimes inaccurate.
<if_stmt>r<l>0<and>x-y<l>0# phi(x,y,r) = phi(inf,y,r) - phi(-x,y,-r)
# phi(x,y,r) = phi(x,inf,r) - phi(x,-y,-r)
<block_start><return>ncdf(x)-normal_cdf2(x -y -r)<block_end><if_stmt>x<g>0<and>-x+y<le>0<block_start><return>ncdf(y)-normal_cdf2(-x y -r)<block_end><if_stmt>x+y<g>0# phi(x,y,r) = phi(-x,-y,r) + phi(x,y,-1)
<block_start><return>normal_cdf2(-x -y r)+normal_cdf2(x y -1)<block_end><def_stmt>f t<block_start><if_stmt>abs(t)<eq>mpf('1')# When t = -1, (x*x+y*y-2*t*x*y) = (x+y)^2 >= 0
# When t = 1, (x*x+y*y-2*t*x*y) = (x-y)^2 >= 0
<block_start><return>mpf('0')<block_end>omt2=(1-t)<times>(1+t)<line_sep><return>1/(2<times>pi<times>sqrt(omt2))<times>exp(-(x<times>x+y<times>y-2<times>t<times>x<times>y)/(2<times>omt2))<block_end>omr2=(1+r)<times>(1-r)<line_sep>ymrx=y-r<times>x<def_stmt>f2 t<block_start><return>npdf(t-x)<times>normal_cdf((ymrx+r<times>t)/omr2)<block_end># This integral excludes normal_cdf2(x,y,-1)
# which will be zero when x+y <= 0
result,err=safe_quad(f [-1 r])<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(result)<block_start>result,err=safe_quad(f2 [0 inf])<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(result)<block_start>print(f"Suspiciously big error when evaluating an integral for normal_cdf2({nstr(x)}, {nstr(y)}, {nstr(r)}).")<line_sep>print(f"Integral: {nstr(result)}")<line_sep>print(f"Integral error estimate: {nstr(err)}")<block_end><block_end><return>result<block_end><def_stmt>safe_quad f points<block_start>verbose=<false><line_sep># get a quick estimate of the result
estimate=quad(f points maxdegree=1 verbose=verbose)<if_stmt>verbose<block_start>print(f"Rescaling integrand by {nstr(1/estimate)}")<block_end>result,err=quad(<lambda>x:f(x)/estimate points error=<true> verbose=verbose)<line_sep>result<augmul>estimate<line_sep>err<augmul>estimate<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(result)<block_start>estimate=result<if_stmt>verbose<block_start>print(f"Rescaling integrand by {nstr(1/estimate)}")<block_end>result,err=quad(<lambda>x:f(x)/estimate points error=<true> verbose=verbose)<line_sep>result<augmul>estimate<line_sep>err<augmul>estimate<block_end><return>result err<block_end><def_stmt>normal_cdf2_ln x y r<block_start><return>ln(normal_cdf2(x y r))<block_end><def_stmt>normal_cdf2_ratio_ln x y r sqrtomr2<block_start><if_stmt>sqrtomr2<l>0.618<block_start>omr2=sqrtomr2<times>sqrtomr2<line_sep>r=sign(r)<times>sqrt(1-omr2)<block_end><else_stmt><block_start>omr2=1-r<times>r<block_end><return>normal_cdf2_ln(x y r)+(x<times>x+y<times>y-2<times>r<times>x<times>y)/2/omr2+log(2<times>pi)<block_end><def_stmt>logistic_gaussian m v<block_start><if_stmt>m<eq>inf<block_start><if_stmt>v<eq>inf<block_start><return>inf<block_end><return>mpf('1.0')<block_end><if_stmt>v<eq>inf<block_start><return>mpf('0.5')<block_end>logEpsilon=log(eps)<if_stmt>2<times>m+4<times>v<l>logEpsilon<block_start><return>mpf(exp(m+v/2)<times>(1-exp(m+1.5<times>v)<times>(1-exp(m+2.5<times>v))))<block_end>tanhm=tanh(m)<line_sep># Not really a precise threshold, but fine for our data
<if_stmt>tanhm<eq>mpf('1.0')<block_start><return>tanhm<block_end># The integration routine below is obtained by substituting x = atanh(t)*sqrt(v)
# into the definition of logistic_gaussian
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / (1 + mpmath.exp(-x))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
# mpmath.quad uses exponential spacing between quadrature points, so we want the transformation to grow like log(x).
sqrtv=sqrt(v)<line_sep>misqrtv=m/sqrtv<line_sep>scale=max(10 m+sqrtv)/sqrtv<def_stmt>f t<block_start>x=scale<times>atanh(t)<line_sep><return>exp(-(x-misqrtv)<power>2/2)/(1+exp(-x<times>sqrtv))/(1-t<times>t)<block_end>coef=scale/sqrt(2<times>pi)<line_sep>points=[-1 0 1]<line_sep>int,err=safe_quad(f points)<line_sep>result=coef<times>int<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(int)<block_start>print(f"Suspiciously big error when evaluating an integral for logistic_gaussian({nstr(m)}, {nstr(v)}).")<line_sep>print(f"Integral: {nstr(int)}")<line_sep>print(f"integral error estimate: {nstr(err)}")<line_sep>print(f"Coefficient: {nstr(coef)}")<line_sep>print(f"Result (Coefficient * Integral): {nstr(result)}")<block_end><return>result<block_end><def_stmt>logistic_gaussian_deriv m v<block_start><if_stmt>m<eq>inf<or>m<eq>-inf<or>v<eq>inf<block_start><return>mpf('0.0')<block_end># The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian'
#
# f = lambda x: mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) / ((1 + mpmath.exp(-x)) * (1 + mpmath.exp(x)))
# result = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf) * mpmath.quad(f, [-mpmath.inf, mpmath.inf])
#
# Such substitution makes mpmath.quad call much faster.
<def_stmt>f t<block_start>one_minus_t_squared=1-t<times>t<line_sep><return>exp(-(atanh(t)-m)<power>2/(2<times>v))/(one_minus_t_squared+sqrt(one_minus_t_squared))<block_end>coef=0.5/sqrt(2<times>pi<times>v)<line_sep>int,err=safe_quad(f [-1 1])<line_sep>result=coef<times>int<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(int)<block_start>print(f"Suspiciously big error when evaluating an integral for logistic_gaussian'({m}, {v}).")<line_sep>print(f"Integral: {int}")<line_sep>print(f"integral error estimate: {err}")<line_sep>print(f"Coefficient: {coef}")<line_sep>print(f"Result (Coefficient * Integral): {result}")<block_end><return>result<block_end><def_stmt>logistic_gaussian_deriv2 m v<block_start><if_stmt>m<eq>inf<or>m<eq>-inf<or>v<eq>inf<or>m<eq>mpf(0)<block_start><return>mpf(0)<block_end># The integration routine below is obtained by substituting x = atanh(t)
# into the definition of logistic_gaussian''
#
# def f(x):
# expx = mpmath.exp(x)
# one_plus_expx = 1 + expx
# return mpmath.exp(-(x - mmpf) * (x - mmpf) / (2 * vmpf)) * (1 - expx) / ((1 + mpmath.exp(-x)) * one_plus_expx * one_plus_expx)
# coef = 1 / mpmath.sqrt(2 * mpmath.pi * vmpf)
# int = mpmath.quad(f, [-mpmath.inf, mpmath.inf])
# result = coef * int
#
# Such substitution makes mpmath.quad call much faster.
<def_stmt>f t<block_start>one_minus_t=1-t<line_sep>one_minus_t_squared=1-t<times>t<line_sep>sqrt_one_minus_t_squared=sqrt(one_minus_t_squared)<line_sep><return>exp(-(atanh(t)-m)<power>2/(2<times>v))<times>(one_minus_t-sqrt_one_minus_t_squared)/((one_minus_t_squared+sqrt_one_minus_t_squared)<times>(one_minus_t+sqrt_one_minus_t_squared))<block_end>coef=0.5/sqrt(2<times>pi<times>v)<line_sep>int,err=safe_quad(f [-1 1])<line_sep>result=coef<times>int<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(int)<block_start>print(f"Suspiciously big error when evaluating an integral for logistic_gaussian''({m}, {v}).")<line_sep>print(f"Integral: {nstr(int)}")<line_sep>print(f"integral error estimate: {nstr(err)}")<line_sep>print(f"Coefficient: {nstr(coef)}")<line_sep>print(f"Result (Coefficient * Integral): {nstr(result)}")<block_end><return>result<block_end><def_stmt>normal_cdf x<block_start>"""
An alternate way of computing ncdf that avoids the bugs in ncdf
"""<line_sep><return>0.5<times>gammainc(0.5 x<times>x/2 inf)/gamma(0.5)<block_end><def_stmt>normal_pdf_ln x<block_start><return>-x<times>x/2-log(sqrt(2<times>pi))<block_end><def_stmt>normal_cdf_integral x y r<block_start><if_stmt>x<eq>-inf<or>y<eq>-inf<block_start><return>mpf('0.0')<block_end><if_stmt>x<eq>inf<block_start><return>inf<block_end><if_stmt>y<eq>inf<block_start>result=normal_cdf2(x y r)<if_stmt>x<g>0<block_start><return>result<times>x+exp(normal_pdf_ln(x)-log(ncdf(x)))<block_end><else_stmt><block_start><return>result<times>normal_cdf_moment_ratio(mpf('1.0') x)<times>exp(normal_pdf_ln(x)-log(ncdf(x)))<block_end><block_end><if_stmt>r<eq>mpf(1)<block_start><if_stmt>x<le>y<block_start><return>normal_cdf_moment_ratio(mpf('1.0') x)<times>exp(normal_pdf_ln(x))<block_end><else_stmt><block_start>npdfy=exp(normal_pdf_ln(y))<line_sep><return>(normal_cdf_moment_ratio(mpf('1.0') y)+(x-y)<times>ncdf(y)/npdfy)<times>npdfy<block_end><block_end><if_stmt>r<eq>mpf(-1)<block_start><if_stmt>x+y<le>0<block_start><return>mpf(0)<block_end><else_stmt><block_start><return>x<times>normal_cdf2(x y r)+npdf(x)-npdf(y)<block_end><block_end># This area separation works well for inputs currently present in /test/Tests/Data/SpecialFunctionsValues
# Other inputs may require making this more accurate
<if_stmt>x<g>0<and>y<g>0<and>1+r<l>mpf('1e-12')<block_start><return>normal_cdf_integral(x y -1)-normal_cdf_integral(-x -y r)<block_end>omr2=(1-r)<times>(1+r)<line_sep>sqrtomr2=sqrt(omr2)<line_sep># This is accurate when x >= 0 and r >= 0
<if_stmt><true>#x >= 0 and r >= 0:
<block_start><return>x<times>normal_cdf2(x y r)+exp(normal_pdf_ln(x)+log(ncdf((y-r<times>x)/sqrtomr2)))+r<times>exp(normal_pdf_ln(y)+log(ncdf((x-r<times>y)/sqrtomr2)))<block_end># try quadrature on the integral definition
<def_stmt>f t<block_start><return>t<times>npdf(t-x)<times>normal_cdf((y-r<times>(x-t))/omr2)<block_end>result,err=safe_quad(f [0 inf])<if_stmt>mpf(10)<power>output_dps<times>abs(err)<g>abs(result)<block_start>print(f"Suspiciously big error when evaluating an integral for normal_cdf_integral({x}, {y}, {r}).")<line_sep>print(f"Integral: {nstr(result)}")<line_sep>print(f"integral error estimate: {nstr(err)}")<block_end><return>result<block_end><def_stmt>normal_cdf_integral_ratio x y r<block_start>int_z=normal_cdf_integral(x y r)<if_stmt>int_z<eq>mpf(0)<block_start><return>int_z<block_end>z=normal_cdf2(x y r)<line_sep><return>int_z/z<block_end><def_stmt>beta_cdf x a b<block_start><if_stmt>x<le>0<block_start><return>0<block_end><if_stmt>x<ge>1<block_start><return>1<block_end><return>betainc(a b 0 x regularized=<true>)<block_end>pair_info={'BesselI.csv':besseli 'BetaCdf.csv':beta_cdf 'Digamma.csv':digamma 'Erfc.csv':erfc 'ExpMinus1.csv':expm1 'ExpMinus1RatioMinus1RatioMinusHalf.csv':<lambda>x:((exp(x)-1)/x-1)/x-0.5<if>x<ne>mpf(0)<else>mpf(0) 'Gamma.csv':gamma 'GammaLn.csv':loggamma 'GammaLnSeries.csv':<lambda>x:loggamma(x)-(x-0.5)<times>log(x)+x-0.5<times>log(2<times>pi) 'GammaLower.csv':<lambda>s x:gammainc(s 0 x regularized=<true>)<if>s<ne>inf<else>mpf(0) 'GammaUpper.csv':<lambda>s x:gammainc(s x inf) 'GammaUpperRegularized.csv':<lambda>s x:gammainc(s x inf regularized=<true>)<if>s<ne>inf<else>mpf(1) 'GammaUpperScale.csv':<lambda>s x:x<power>s<times>exp(-x)/gamma(s) 'Log1MinusExp.csv':<lambda>x:log(1-exp(x)) 'Log1Plus.csv':log1p 'LogExpMinus1.csv':<lambda>x:log(exp(x)-1) 'Logistic.csv':<lambda>x:1/(1+exp(-x)) 'logisticGaussian.csv':logistic_gaussian 'logisticGaussianDeriv.csv':logistic_gaussian_deriv 'logisticGaussianDeriv2.csv':logistic_gaussian_deriv2 'LogisticLn.csv':<lambda>x:-log(1+exp(-x)) 'LogSumExp.csv':<lambda>x y:log(exp(x)+exp(y)) 'NormalCdf.csv':ncdf 'NormalCdf2.csv':normal_cdf2 'NormalCdfIntegral.csv':normal_cdf_integral 'NormalCdfIntegralRatio.csv':normal_cdf_integral_ratio 'NormalCdfInv.csv':<lambda>x:-sqrt(mpf(2))<times>erfinv(1-2<times>x) 'NormalCdfLn.csv':<lambda>x:log(ncdf(x)) 'NormalCdfLn2.csv':normal_cdf2_ln 'NormalCdfLogit.csv':<lambda>x:log(ncdf(x))-log(ncdf(-x)) 'NormalCdfMomentRatio.csv':normal_cdf_moment_ratio 'NormalCdfRatioLn2.csv':normal_cdf2_ratio_ln 'Tetragamma.csv':<lambda>x:polygamma(2 x) 'Trigamma.csv':<lambda>x:polygamma(1 x) 'XMinusLog1Plus.csv':<lambda>x:x-log(1+x) }<def_stmt>float_str_csharp_to_python s<block_start><return>s.replace('NaN' 'nan').replace('Infinity' 'inf')<block_end><def_stmt>float_str_python_to_csharp s<block_start><return>s.replace('nan' 'NaN').replace('inf' 'Infinity').replace('inf' 'Infinity')<block_end>dir=os.path.join(os.path.dirname(os.path.realpath(__file__)) '..' '..' '..' 'test' 'Tests' 'data' 'SpecialFunctionsValues')<with_stmt>os.scandir(dir)<as>it<block_start><for_stmt>entry it<block_start><if_stmt>entry.name.endswith('.csv')<and>entry.is_file()<block_start>print(f'Processing {entry.name}...')<if_stmt>entry.name<not><in>pair_info.keys()<or>pair_info[entry.name]<eq><none><block_start>print("Don't know how to process. Skipping.")<line_sep><continue><block_end>f=pair_info[entry.name]<with_stmt>open(entry.path)<as>csvfile<block_start>reader=csv.DictReader(csvfile delimiter=',')<line_sep>fieldnames=reader.fieldnames<line_sep>arg_count=len(fieldnames)-1<line_sep>newrows=[]<for_stmt>row reader<block_start><if_stmt>entry.name<eq>'NormalCdfRatioLn2.csv'<block_start>sqrtomr2=mpf(float_str_csharp_to_python(row['arg3']))<line_sep>r=mpf(float_str_csharp_to_python(row['arg2']))<if_stmt>sqrtomr2<l>0.618<block_start>row['arg2']=nstr(sign(r)<times>sqrt(1-sqrtomr2<times>sqrtomr2) output_dps)<block_end><block_end>newrow=dict(row)<line_sep>args=[]<for_stmt>i range(arg_count)<block_start>args.append(mpf(float_str_csharp_to_python(row[f'arg{i}'])))<block_end>result_in_file=row['expectedresult']<line_sep>verbose=<true><if_stmt>result_in_file<eq>'Infinity'<or>result_in_file<eq>'-Infinity'<or>result_in_file<eq>'NaN'<block_start>newrow['expectedresult']=result_in_file<block_end><else_stmt><block_start><try_stmt><block_start><if_stmt>verbose<block_start>print(f'{entry.name}{args}')<line_sep>startTime=time.time()<block_end>result=f(*args)<if_stmt>verbose<block_start>elapsed=time.time()-startTime<line_sep>print(f'({elapsed} seconds elapsed)')<line_sep>nprint(result output_dps)<block_end><block_end><except_stmt>ValueError<block_start>print(f'ValueError for args {args}. Setting result to NaN.')<line_sep>result=mpf('nan')<block_end>newrow['expectedresult']=float_str_python_to_csharp(nstr(result output_dps))<block_end>newrows.append(newrow)<block_end><block_end><with_stmt>open(entry.path 'w' newline='')<as>csvfile<block_start>writer=csv.DictWriter(csvfile fieldnames=fieldnames delimiter=',')<line_sep>writer.writeheader()<line_sep>writer.writerows(newrows)<block_end><block_end><block_end><block_end> |
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_stmt>numpy asarray<import_from_stmt>scipy.spatial Voronoi<import_from_stmt>scipy.spatial Delaunay<line_sep>__all__=['delaunay_from_points_numpy' 'voronoi_from_points_numpy' ]<def_stmt>delaunay_from_points_numpy points<block_start>"""Computes the delaunay triangulation for a list of points using Numpy.
Parameters
----------
points : sequence of tuple
XYZ coordinates of the original points.
boundary : sequence of tuples
list of ordered points describing the outer boundary (optional)
holes : list of sequences of tuples
list of polygons (ordered points describing internal holes (optional)
Returns
-------
list
The faces of the triangulation.
Each face is a triplet of indices referring to the list of point coordinates.
Examples
--------
>>>
"""<line_sep>xyz=asarray(points)<line_sep>d=Delaunay(xyz[: 0:2])<line_sep><return>d.simplices<block_end><def_stmt>voronoi_from_points_numpy points<block_start>"""Generate a voronoi diagram from a set of points.
Parameters
----------
points : list of list of float
XYZ coordinates of the voronoi sites.
Returns
-------
Examples
--------
>>>
"""<line_sep>points=asarray(points)<line_sep>voronoi=Voronoi(points)<line_sep><return>voronoi<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>numpy.random randint<class_stmt>p<block_start>train_both=<true># whether to train just one model or both
sparse_signal=<false># train on incorrect data points or not
signal_strength=1.0# how much to weight kl-divergence
starting_folder='../models/init_models/'# folder that store initial models
num_iters=3# how many epochs to train for
seed=42# random seed
bias=""<line_sep>out_dir='../models/trained_models/'# directory to save to
# exporting ########
pid=''.join(["%s"%randint(0 9)<for>num range(0 20)])<def_stmt>_str self<block_start>vals=vars(p)<line_sep><return>'pid='+vals['pid']<block_end><def_stmt>_dict self<block_start><return>{attr:val<for>(attr val) vars(p).items()<if><not>attr.startswith('_')}<block_end><block_end> |
# Copyright 2016 Rackspace
# Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>sqlalchemy MetaData select Table and_ not_<def_stmt>has_migrations engine<block_start>"""Returns true if at least one data row can be migrated.
There are rows left to migrate if:
#1 There exists a row with visibility not set yet.
Or
#2 There exists a private image with active members but its visibility
isn't set to 'shared' yet.
Note: This method can return a false positive if data migrations
are running in the background as it's being called.
"""<line_sep>meta=MetaData(engine)<line_sep>images=Table('images' meta autoload=<true>)<line_sep>rows_with_null_visibility=(select([images.c.id]).where(images.c.visibility<eq><none>).limit(1).execute())<if_stmt>rows_with_null_visibility.rowcount<eq>1<block_start><return><true><block_end>image_members=Table('image_members' meta autoload=<true>)<line_sep>rows_with_pending_shared=(select([images.c.id]).where(and_(images.c.visibility<eq>'private' images.c.id.in_(select([image_members.c.image_id]).distinct().where(not_(image_members.c.deleted))))).limit(1).execute())<if_stmt>rows_with_pending_shared.rowcount<eq>1<block_start><return><true><block_end><return><false><block_end><def_stmt>_mark_all_public_images_with_public_visibility images<block_start>migrated_rows=(images.update().values(visibility='public').where(images.c.is_public).execute())<line_sep><return>migrated_rows.rowcount<block_end><def_stmt>_mark_all_non_public_images_with_private_visibility images<block_start>migrated_rows=(images.update().values(visibility='private').where(not_(images.c.is_public)).execute())<line_sep><return>migrated_rows.rowcount<block_end><def_stmt>_mark_all_private_images_with_members_as_shared_visibility images image_members<block_start>migrated_rows=(images.update().values(visibility='shared').where(and_(images.c.visibility<eq>'private' images.c.id.in_(select([image_members.c.image_id]).distinct().where(not_(image_members.c.deleted))))).execute())<line_sep><return>migrated_rows.rowcount<block_end><def_stmt>_migrate_all engine<block_start>meta=MetaData(engine)<line_sep>images=Table('images' meta autoload=<true>)<line_sep>image_members=Table('image_members' meta autoload=<true>)<line_sep>num_rows=_mark_all_public_images_with_public_visibility(images)<line_sep>num_rows<augadd>_mark_all_non_public_images_with_private_visibility(images)<line_sep>num_rows<augadd>_mark_all_private_images_with_members_as_shared_visibility(images image_members)<line_sep><return>num_rows<block_end><def_stmt>migrate engine<block_start>"""Set visibility column based on is_public and image members."""<line_sep><return>_migrate_all(engine)<block_end> |
f=plt.figure(figsize=(6 6))<line_sep>plt.scatter(pres.swing_full lp.weights.lag_spatial(w pres.swing_full))<line_sep>plt.plot((-.3 .1) (-.3 .1) color='k')<line_sep>plt.title('$I = {:.3f} \ \ (p < {:.3f})$'.format(moran.I moran.p_sim))<line_sep> |
"""
Build the CUDA-STREAM benchmark for multiple CUDA compute capabilities.
Make each build available as a SCI-F application.
"""<line_sep>Stage0<augadd>baseimage(image='nvcr.io/nvidia/cuda:9.1-devel-centos7' _as='devel')<line_sep># Install the GNU compiler
Stage0<augadd>gnu(fortran=<false>)<line_sep># Install SCI-F
Stage0<augadd>pip(packages=['scif'] upgrade=<true>)<line_sep># Download a single copy of the source code
Stage0<augadd>packages(ospackages=['ca-certificates' 'git'])<line_sep>Stage0<augadd>shell(commands=['cd /var/tmp' 'git clone --depth=1 https://github.com/bcumming/cuda-stream.git cuda-stream'])<line_sep># Build CUDA-STREAM as a SCI-F application for each CUDA compute capability
<for_stmt>cc ['35' '60' '70']<block_start>binpath='/scif/apps/cc{}/bin'.format(cc)<line_sep>stream=scif(name='cc{}'.format(cc))<line_sep>stream<augadd>comment('CUDA-STREAM built for CUDA compute capability {}'.format(cc))<line_sep>stream<augadd>shell(commands=['nvcc -std=c++11 -ccbin=g++ -gencode arch=compute_{0},code=\\"sm_{0},compute_{0}\\" -o {1}/stream /var/tmp/cuda-stream/stream.cu'.format(cc binpath)])<line_sep>stream<augadd>environment(variables={'PATH':'{}:$PATH'.format(binpath)})<line_sep>stream<augadd>label(metadata={'COMPUTE_CAPABILITY':cc})<line_sep>stream<augadd>shell(commands=['stream'] _test=<true>)<line_sep>stream<augadd>runscript(commands=['stream'])<line_sep>Stage0<augadd>stream<block_end># Runtime stage
Stage1<augadd>baseimage(image='nvcr.io/nvidia/cuda:9.1-base-centos7')<line_sep># Install SCI-F
Stage1<augadd>pip(packages=['scif'] upgrade=<true>)<line_sep># Install runtime components from the first stage
Stage1<augadd>Stage0.runtime()<line_sep> |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Decay learning rate per epoch."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<class_stmt>Controller(object)<block_start>"""Controll learning rate per epoch.
Args:
learning_rate_init: A float value, the initial learning rate
decay_start_epoch: int, the epoch to start decay
decay_rate: A float value, the rate to decay the current learning rate
decay_patient_epoch: int, decay learning rate if results have not been
improved for 'decay_patient_epoch'
lower_better: If True, the lower, the better.
If False, the higher, the better.
worst_value: A flaot value, the worst value of evaluation
"""<def_stmt>__init__ self learning_rate_init decay_start_epoch decay_rate decay_patient_epoch=1 lower_better=<true> worst_value=1<block_start>self.learning_rate_init=learning_rate_init<line_sep>self.decay_start_epoch=decay_start_epoch<line_sep>self.decay_rate=decay_rate<line_sep>self.decay_patient_epoch=decay_patient_epoch<line_sep>self.not_improved_epoch=0<line_sep>self.lower_better=lower_better<line_sep>self.best_value=worst_value<block_end><def_stmt>decay_lr self learning_rate epoch value<block_start>"""Decay learning rate per epoch.
Args:
learning_rate: A float value, the current learning rete
epoch: int, the current epoch
value: A value to evaluate
Returns:
learning_rate_decayed: A float value, the decayed learning rate
"""<if_stmt><not>self.lower_better<block_start>value<augmul>-1<block_end><if_stmt>epoch<l>self.decay_start_epoch<block_start><if_stmt>value<l>self.best_value# Update
<block_start>self.best_value=value<block_end><return>learning_rate<block_end><if_stmt>value<l>self.best_value# Improved
<block_start>self.best_value=value<line_sep>self.not_improved_epoch=0<line_sep><return>learning_rate<block_end><elif_stmt>self.not_improved_epoch<l>self.decay_patient_epoch# Not improved, but learning rate will be not decayed
<block_start>self.not_improved_epoch<augadd>1<line_sep><return>learning_rate<block_end><else_stmt># Not improved, and learning rate will be decayed
<block_start>self.not_improved_epoch=0<line_sep>learning_rate_decayed=learning_rate<times>self.decay_rate<line_sep><return>learning_rate_decayed<block_end><block_end><block_end> |
# Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> print_function<import_stmt>sys<import_stmt>time<import_stmt>os<import_stmt>os.path<import_stmt>logging<import_stmt>shutil<import_stmt>re<import_stmt>json<import_stmt>multiprocessing<import_stmt>functools<import_from_stmt>collections namedtuple<import_stmt>yaml<import_stmt>gevent.core<import_stmt>minemeld.loader<line_sep>__all__=['load_config' 'validate_config' 'resolve_prototypes']<line_sep># disables construction of timestamp objects
yaml.SafeLoader.add_constructor(u'tag:yaml.org,2002:timestamp' yaml.SafeLoader.construct_yaml_str)<line_sep>LOG=logging.getLogger(__name__)<line_sep>COMMITTED_CONFIG='committed-config.yml'<line_sep>RUNNING_CONFIG='running-config.yml'<line_sep>PROTOTYPE_ENV='MINEMELD_PROTOTYPE_PATH'<line_sep>MGMTBUS_NUM_CONNS_ENV='MGMTBUS_NUM_CONNS'<line_sep>FABRIC_NUM_CONNS_ENV='FABRIC_NUM_CONNS'<line_sep>CHANGE_ADDED=0<line_sep>CHANGE_DELETED=1<line_sep>CHANGE_INPUT_ADDED=2<line_sep>CHANGE_INPUT_DELETED=3<line_sep>CHANGE_OUTPUT_ENABLED=4<line_sep>CHANGE_OUTPUT_DISABLED=5<line_sep>_ConfigChange=namedtuple('_ConfigChange' ['nodename' 'nodeclass' 'change' 'detail'])<line_sep>_Config=namedtuple('_Config' ['nodes' 'fabric' 'mgmtbus' 'changes'])<class_stmt>MineMeldConfigChange(_ConfigChange)<block_start><def_stmt>__new__ _cls nodename nodeclass change detail=<none><block_start><return>_ConfigChange.__new__(_cls nodename=nodename nodeclass=nodeclass change=change detail=detail)<block_end><block_end><class_stmt>MineMeldConfig(_Config)<block_start><def_stmt>as_nset self<block_start>result=set()<for_stmt>nname,nvalue self.nodes.iteritems()<block_start>result.add(json.dumps([nname nvalue.get('class' <none>)] sort_keys=<true>))<block_end><return>result<block_end><def_stmt>compute_changes self oconfig<block_start><if_stmt>oconfig<is><none># oconfig is None, mark everything as added
<block_start><for_stmt>nodename,nodeattrs self.nodes.iteritems()<block_start>self.changes.append(MineMeldConfigChange(nodename=nodename nodeclass=nodeattrs['class'] change=CHANGE_ADDED))<block_end><return><block_end>my_nset=self.as_nset()<line_sep>other_nset=oconfig.as_nset()<line_sep>deleted=other_nset-my_nset<line_sep>added=my_nset-other_nset<line_sep>untouched=my_nset&other_nset<line_sep># mark delted as deleted
<for_stmt>snode deleted<block_start>nodename,nodeclass=json.loads(snode)<line_sep>change=MineMeldConfigChange(nodename=nodename nodeclass=nodeclass change=CHANGE_DELETED detail=oconfig.nodes[nodename])<line_sep>self.changes.append(change)<block_end># mark added as added
<for_stmt>snode added<block_start>nodename,nodeclass=json.loads(snode)<line_sep>change=MineMeldConfigChange(nodename=nodename nodeclass=nodeclass change=CHANGE_ADDED)<line_sep>self.changes.append(change)<block_end># check inputs/output for untouched
<for_stmt>snode untouched<block_start>nodename,nodeclass=json.loads(snode)<line_sep>my_inputs=set(self.nodes[nodename].get('inputs' []))<line_sep>other_inputs=set(oconfig.nodes[nodename].get('inputs' []))<line_sep>iadded=my_inputs-other_inputs<line_sep>ideleted=other_inputs-my_inputs<for_stmt>i iadded<block_start>change=MineMeldConfigChange(nodename=nodename nodeclass=nodeclass change=CHANGE_INPUT_ADDED detail=i)<line_sep>self.changes.append(change)<block_end><for_stmt>i ideleted<block_start>change=MineMeldConfigChange(nodename=nodename nodeclass=nodeclass change=CHANGE_INPUT_DELETED detail=i)<line_sep>self.changes.append(change)<block_end>my_output=self.nodes[nodename].get('output' <false>)<line_sep>other_output=oconfig.nodes[nodename].get('output' <false>)<if_stmt>my_output<eq>other_output<block_start><continue><block_end>change_type=CHANGE_OUTPUT_DISABLED<if_stmt>my_output<block_start>change_type=CHANGE_OUTPUT_ENABLED<block_end>change=MineMeldConfigChange(nodename=nodename nodeclass=nodeclass change=change_type)<line_sep>self.changes.append(change)<block_end><block_end>@classmethod<def_stmt>from_dict cls dconfig=<none><block_start><if_stmt>dconfig<is><none><block_start>dconfig={}<block_end>fabric=dconfig.get('fabric' <none>)<if_stmt>fabric<is><none><block_start>fabric_num_conns=int(os.getenv(FABRIC_NUM_CONNS_ENV 50))<line_sep>fabric={'class':'ZMQRedis' 'config':{'num_connections':fabric_num_conns 'priority':gevent.core.MINPRI# pylint:disable=E1101
}}<block_end>mgmtbus=dconfig.get('mgmtbus' <none>)<if_stmt>mgmtbus<is><none><block_start>mgmtbus_num_conns=int(os.getenv(MGMTBUS_NUM_CONNS_ENV 10))<line_sep>mgmtbus={'transport':{'class':'ZMQRedis' 'config':{'num_connections':mgmtbus_num_conns 'priority':gevent.core.MAXPRI# pylint:disable=E1101
}} 'master':{} 'slave':{}}<block_end>nodes=dconfig.get('nodes' <none>)<if_stmt>nodes<is><none><block_start>nodes={}<block_end><return>cls(nodes=nodes fabric=fabric mgmtbus=mgmtbus changes=[])<block_end><block_end><def_stmt>_load_node_prototype protoname paths<block_start>proto_module,proto_name=protoname.rsplit('.' 1)<line_sep>pmodule=<none><line_sep>pmprotos={}<for_stmt>p paths<block_start>pmpath=os.path.join(p proto_module+'.yml')<try_stmt><block_start><with_stmt>open(pmpath 'r')<as>pf<block_start>pmodule=yaml.safe_load(pf)<if_stmt>pmodule<is><none><block_start>pmodule={}<block_end><block_end><block_end><except_stmt>IOError<block_start>pmodule=<none><line_sep><continue><block_end>pmprotos=pmodule.get('prototypes' {})<if_stmt>proto_name<not><in>pmprotos<block_start>pmodule=<none><line_sep><continue><block_end><if_stmt>'class'<not><in>pmprotos[proto_name]<block_start>pmodule=<none><line_sep><continue><block_end><return>pmprotos[proto_name]<block_end><raise>RuntimeError('Unable to load prototype %s: '<concat>' not found'%(protoname))<block_end><def_stmt>_load_config_from_file f<block_start>valid=<true><line_sep>config=yaml.safe_load(f)<if_stmt><not>isinstance(config dict)<and>config<is><not><none><block_start><raise>ValueError('Invalid config YAML type')<block_end><return>valid MineMeldConfig.from_dict(config)<block_end><def_stmt>_load_and_validate_config_from_file path<block_start>valid=<false><line_sep>config=<none><if_stmt>os.path.isfile(path)<block_start><try_stmt><block_start><with_stmt>open(path 'r')<as>cf<block_start>valid,config=_load_config_from_file(cf)<block_end><if_stmt><not>valid<block_start>LOG.error('Invalid config file {}'.format(path))<block_end><block_end><except_stmt>(RuntimeError IOError)<block_start>LOG.exception('Error loading config {}, config ignored'.format(path))<line_sep>valid,config=<false> <none><block_end><block_end><if_stmt>valid<and>config<is><not><none><block_start>valid=resolve_prototypes(config)<block_end><if_stmt>valid<and>config<is><not><none><block_start>vresults=validate_config(config)<if_stmt>len(vresults)<ne>0<block_start>LOG.error('Invalid config {}: {}'.format(path ', '.join(vresults)))<line_sep>valid=<false><block_end><block_end><return>valid config<block_end><def_stmt>_destroy_node change installed_nodes=<none> installed_nodes_gcs=<none><block_start>LOG.info('Destroying {!r}'.format(change))<line_sep>destroyed_name=change.nodename<line_sep>destroyed_class=change.nodeclass<if_stmt>destroyed_class<is><none><block_start>LOG.error('Node {} with no class destroyed'.format(destroyed_name))<line_sep><return>1<block_end># load node class GC from entry_point or from "gc" staticmethod of class
node_gc=<none><line_sep>mmep=installed_nodes_gcs.get(destroyed_class <none>)<if_stmt>mmep<is><none><block_start>mmep=installed_nodes.get(destroyed_class <none>)<try_stmt><block_start>nodep=mmep.ep.load()<if_stmt>hasattr(nodep 'gc')<block_start>node_gc=nodep.gc<block_end><block_end><except_stmt>ImportError<block_start>LOG.exception("Error checking node class {} for gc method".format(destroyed_class))<block_end><block_end><else_stmt><block_start><try_stmt><block_start>node_gc=mmep.ep.load()<block_end><except_stmt>ImportError<block_start>LOG.exception("Error resolving gc for class {}".format(destroyed_class))<block_end><block_end><if_stmt>node_gc<is><none><block_start>LOG.error('Node {} with class {} with no garbage collector destroyed'.format(destroyed_name destroyed_class))<line_sep><return>1<block_end><try_stmt><block_start>node_gc(destroyed_name config=change.detail.get('config' <none>))<block_end><except_stmt><block_start>LOG.exception('Exception destroying old node {} of class {}'.format(destroyed_name destroyed_class))<line_sep><return>1<block_end><return>0<block_end><def_stmt>_destroy_old_nodes config# this destroys resources used by destroyed nodes
# a nodes has been destroyed if a node with same
# name & config does not exist in the new config
# the case of different node config but same and name
# and class is handled by node itself
<block_start>destroyed_nodes=[c<for>c config.changes<if>c.change<eq>CHANGE_DELETED]<line_sep>LOG.info('Destroyed nodes: {!r}'.format(destroyed_nodes))<if_stmt>len(destroyed_nodes)<eq>0<block_start><return><block_end>installed_nodes=minemeld.loader.map(minemeld.loader.MM_NODES_ENTRYPOINT)<line_sep>installed_nodes_gcs=minemeld.loader.map(minemeld.loader.MM_NODES_GCS_ENTRYPOINT)<line_sep>dpool=multiprocessing.Pool()<line_sep>_bound_destroy_node=functools.partial(_destroy_node installed_nodes=installed_nodes installed_nodes_gcs=installed_nodes_gcs)<line_sep>dpool.imap_unordered(_bound_destroy_node destroyed_nodes)<line_sep>dpool.close()<line_sep>dpool.join()<line_sep>dpool=<none><block_end><def_stmt>_load_config_from_dir path<block_start>ccpath=os.path.join(path COMMITTED_CONFIG)<line_sep>rcpath=os.path.join(path RUNNING_CONFIG)<line_sep>ccvalid,cconfig=_load_and_validate_config_from_file(ccpath)<line_sep>rcvalid,rcconfig=_load_and_validate_config_from_file(rcpath)<if_stmt><not>rcvalid<and><not>ccvalid# both running and canidate are not valid
<block_start>print("At least one of" RUNNING_CONFIG "or" COMMITTED_CONFIG "should exist in" path file=sys.stderr)<line_sep>sys.exit(1)<block_end><elif_stmt>rcvalid<and><not>ccvalid# running is valid but candidate is not
<block_start><return>rcconfig<block_end><elif_stmt><not>rcvalid<and>ccvalid# candidate is valid while running is not
<block_start>LOG.info('Switching to candidate config')<line_sep>cconfig.compute_changes(rcconfig)<line_sep>LOG.info('Changes in config: {!r}'.format(cconfig.changes))<line_sep>_destroy_old_nodes(cconfig)<if_stmt>rcconfig<is><not><none><block_start>shutil.copyfile(rcpath '{}.{}'.format(rcpath int(time.time())))<block_end>shutil.copyfile(ccpath rcpath)<line_sep><return>cconfig<block_end><elif_stmt>rcvalid<and>ccvalid<block_start>LOG.info('Switching to candidate config')<line_sep>cconfig.compute_changes(rcconfig)<line_sep>LOG.info('Changes in config: {!r}'.format(cconfig.changes))<line_sep>_destroy_old_nodes(cconfig)<line_sep>shutil.copyfile(rcpath '{}.{}'.format(rcpath int(time.time())))<line_sep>shutil.copyfile(ccpath rcpath)<line_sep><return>cconfig<block_end><block_end><def_stmt>_detect_cycles nodes# using Topoligical Sorting to detect cycles in graph, see Wikipedia
<block_start>graph={}<line_sep>S=set()<line_sep>L=[]<for_stmt>n nodes<block_start>graph[n]={'inputs':[] 'outputs':[]}<block_end><for_stmt>n,v nodes.iteritems()<block_start><for_stmt>i v.get('inputs' [])<block_start><if_stmt>i<in>graph<block_start>graph[i]['outputs'].append(n)<line_sep>graph[n]['inputs'].append(i)<block_end><block_end><block_end><for_stmt>n,v graph.iteritems()<block_start><if_stmt>len(v['inputs'])<eq>0<block_start>S.add(n)<block_end><block_end><while_stmt>len(S)<ne>0<block_start>n=S.pop()<line_sep>L.append(n)<for_stmt>m graph[n]['outputs']<block_start>graph[m]['inputs'].remove(n)<if_stmt>len(graph[m]['inputs'])<eq>0<block_start>S.add(m)<block_end><block_end>graph[n]['outputs']=[]<block_end>nedges=0<for_stmt>n,v graph.iteritems()<block_start>nedges<augadd>len(v['inputs'])<line_sep>nedges<augadd>len(v['outputs'])<block_end><return>nedges<eq>0<block_end><def_stmt>resolve_prototypes config# retrieve prototype dir from environment
# used for main library and local library
<block_start>paths=os.getenv(PROTOTYPE_ENV <none>)<if_stmt>paths<is><none><block_start><raise>RuntimeError('Unable to load prototypes: %s '<concat>'environment variable not set'%(PROTOTYPE_ENV))<block_end>paths=paths.split(':')<line_sep># add prototype dirs from extension to paths
prototypes_entrypoints=minemeld.loader.map(minemeld.loader.MM_PROTOTYPES_ENTRYPOINT)<for_stmt>epname,mmep prototypes_entrypoints.iteritems()<block_start><if_stmt><not>mmep.loadable<block_start>LOG.info('Prototypes entrypoint {} not loadable'.format(epname))<line_sep><continue><block_end><try_stmt><block_start>ep=mmep.ep.load()<line_sep># we add prototype paths in front, to let extensions override default protos
paths.insert(0 ep())<block_end><except_stmt><block_start>LOG.exception('Exception retrieving path from prototype entrypoint {}'.format(epname))<block_end><block_end># resolve all prototypes
valid=<true><line_sep>nodes_config=config.nodes<for_stmt>_,nconfig nodes_config.iteritems()<block_start><if_stmt>'prototype'<in>nconfig<block_start><try_stmt><block_start>nproto=_load_node_prototype(nconfig['prototype'] paths)<block_end><except_stmt>RuntimeError<as>e<block_start>LOG.error('Error loading prototype {}: {}'.format(nconfig['prototype'] str(e)))<line_sep>valid=<false><line_sep><continue><block_end>nconfig.pop('prototype')<line_sep>nconfig['class']=nproto['class']<line_sep>nproto_config=nproto.get('config' {})<line_sep>nproto_config.update(nconfig.get('config' {}))<line_sep>nconfig['config']=nproto_config<block_end><block_end><return>valid<block_end><def_stmt>validate_config config<block_start>result=[]<line_sep>nodes=config.nodes<for_stmt>n nodes.keys()<block_start><if_stmt>re.match('^[a-zA-Z0-9_\-]+$' n)<is><none># pylint:disable=W1401
<block_start>result.append('%s node name is invalid'%n)<block_end><block_end><for_stmt>n,v nodes.iteritems()<block_start><for_stmt>i v.get('inputs' [])<block_start><if_stmt>i<not><in>nodes<block_start>result.append('%s -> %s is unknown'%(n i))<line_sep><continue><block_end><if_stmt><not>nodes[i].get('output' <false>)<block_start>result.append('%s -> %s output disabled'%(n i))<block_end><block_end><block_end>installed_nodes=minemeld.loader.map(minemeld.loader.MM_NODES_ENTRYPOINT)<for_stmt>n,v nodes.iteritems()<block_start>nclass=v.get('class' <none>)<if_stmt>nclass<is><none><block_start>result.append('No class in {}'.format(n))<line_sep><continue><block_end>mmep=installed_nodes.get(nclass <none>)<if_stmt>mmep<is><none><block_start>result.append('Unknown node class {} in {}'.format(nclass n))<line_sep><continue><block_end><if_stmt><not>mmep.loadable<block_start>result.append('Class {} in {} not safe to load'.format(nclass n))<block_end><block_end><if_stmt><not>_detect_cycles(nodes)<block_start>result.append('loop detected')<block_end><return>result<block_end><def_stmt>load_config config_path<block_start><if_stmt>os.path.isdir(config_path)<block_start><return>_load_config_from_dir(config_path)<block_end># this is just a file, as we can't do a delta
# we just load it and mark all the nodes as added
valid,config=_load_and_validate_config_from_file(config_path)<if_stmt><not>valid<block_start><raise>RuntimeError('Invalid config')<block_end>config.compute_changes(<none>)<line_sep><return>config<block_end> |
# Generated by Django 3.2.11 on 2022-02-02 07:10
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('engine' '0048_auto_20211112_1918') ]<line_sep>operations=[migrations.AlterField(model_name='labeledshape' name='type' field=models.CharField(choices=[('rectangle' 'RECTANGLE') ('polygon' 'POLYGON') ('polyline' 'POLYLINE') ('points' 'POINTS') ('ellipse' 'ELLIPSE') ('cuboid' 'CUBOID')] max_length=16) ) migrations.AlterField(model_name='trackedshape' name='type' field=models.CharField(choices=[('rectangle' 'RECTANGLE') ('polygon' 'POLYGON') ('polyline' 'POLYLINE') ('points' 'POINTS') ('ellipse' 'ELLIPSE') ('cuboid' 'CUBOID')] max_length=16) ) ]<block_end> |
# Problem: https://www.hackerrank.com/challenges/s10-weighted-mean/problem
# Score: 30
n=int(input())<line_sep>arr=list(map(int input().split()))<line_sep>weights=list(map(int input().split()))<line_sep>print(round(sum([arr[x]<times>weights[x]<for>x range(len(arr))])/sum(weights) 1))<line_sep> |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FeatureColumns and many Column"""<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_future_stmt> unicode_literals<import_stmt>os<import_stmt>sys<import_stmt>struct<import_from_stmt>six.moves zip map<import_stmt>itertools<import_stmt>gzip<import_from_stmt>functools partial<import_stmt>six<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>glob glob<import_from_stmt>propeller.data.feature_column FeatureColumns<as>FCBase<import_from_stmt>propeller.paddle.data.functional Dataset<import_stmt>multiprocessing<line_sep>log=logging.getLogger(__name__)<line_sep>__all__=['FeatureColumns']<class_stmt>FeatureColumns(FCBase)<block_start>"""A Dataset Factory object"""<def_stmt>build_dataset self *args **kwargs<block_start>"""
build `Dataset` from `data_dir` or `data_file`
if `use_gz`, will try to convert data_files to gz format and save to `gz_dir`, if `gz_dir` not given, will create one.
"""<line_sep>ds=super(FeatureColumns self).build_dataset(*args **kwargs)<line_sep>ds.__class__=Dataset<line_sep><return>ds<block_end><def_stmt>build_dataset_from_stdin self *args **kwargs<block_start>"""doc"""<line_sep>ds=super(FeatureColumns self).build_dataset_from_stdin(*args **kwargs)<line_sep>ds.__class__=Dataset<line_sep><return>ds<block_end><block_end> |
<import_stmt>sqlalchemy.ext.declarative<as>dec<import_from_stmt>requests Session<line_sep>SqlAlchemyBase=dec.declarative_base()<class_stmt>BaseMixins<block_start><def_stmt>update self *args **kwarg<block_start>self.__init__(*args **kwarg)<block_end>@classmethod<def_stmt>get_ref cls_type session:Session match_value:str match_attr:str="id"<block_start>eff_ref=getattr(cls_type match_attr)<line_sep><return>session.query(cls_type).filter(eff_ref<eq>match_value).one_or_none()<block_end><block_end> |
<import_from_stmt>torch.nn.utils clip_grad<import_from_stmt>mmcv.runner.hooks.hook Hook<class_stmt>OptimizerHook(Hook)<block_start><def_stmt>__init__ self grad_clip=<none> mean_teacher=<none><block_start>self.grad_clip=grad_clip<line_sep>self.mean_teacher=mean_teacher<block_end><def_stmt>clip_grads self params<block_start>clip_grad.clip_grad_norm_(filter(<lambda>p:p.requires_grad params) **self.grad_clip)<block_end><def_stmt>after_train_iter self runner<block_start>runner.optimizer.zero_grad()<line_sep>runner.outputs['loss'].backward()<if_stmt>self.grad_clip<is><not><none><block_start>self.clip_grads(runner.model.parameters())<block_end>runner.optimizer.step()<line_sep>#mean teacher
<if_stmt>self.mean_teacher<block_start><for_stmt>k,v runner.model.module.state_dict().items()<block_start><if_stmt>k.find('num_batches_tracked')<eq>-1<block_start>runner.teacher_dict[k]=self.mean_teacher.alpha<times>runner.teacher_dict[k]+(1-self.mean_teacher.alpha)<times>v<block_end><else_stmt><block_start>runner.teacher_dict[k]=1<times>v<block_end><block_end><block_end><block_end><block_end> |
<import_from_stmt>models.basic_model BasicModel<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>copy<def_stmt>apply_noise weights noise_config<block_start><if_stmt>noise_config<is><none><block_start><return>weights<block_end>noise_type=noise_config.get('type' 'normal')<if_stmt>noise_type<eq>'normal'<block_start>ratio=noise_config.get('ratio' 1e-3)<line_sep>std=np.std(weights)<line_sep>noise=np.random.normal(0 std<times>ratio size=weights.shape)<block_end><elif_stmt>noise_type<eq>'uniform'<block_start>ratio=noise_config.get('ratio' 1e-3)<line_sep>mean,_max=np.mean(weights) np.max(weights)<line_sep>width=(_max-mean)<times>ratio<line_sep>noise=np.random.uniform(-width width size=weights.shape)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><return>weights+noise<block_end><def_stmt>get_layer_by_name name<block_start><if_stmt>name<eq>'conv'<block_start><return>ConvLayer<block_end><elif_stmt>name<eq>'fc'<block_start><return>FCLayer<block_end><elif_stmt>name<eq>'pool'<block_start><return>PoolLayer<block_end><else_stmt><block_start><raise>ValueError('Unknown layer type: %s'%name)<block_end><block_end><def_stmt>get_magnifier old_size indices<block_start>_l=np.zeros(old_size)<for_stmt>x indices<block_start>_l[x]<augadd>1<block_end>magnifier=(1.0/_l)[indices]<line_sep><return>magnifier<block_end><def_stmt>get_random_remapping old_size new_size<block_start>base=np.arange(old_size)<line_sep>indices=np.concatenate([base np.random.choice(base new_size-old_size)])<line_sep>magnifier=get_magnifier(old_size indices)<line_sep><return>indices magnifier<block_end><class_stmt>BaseLayer<block_start>"""
_id, batch normalization, activation, dropout, ready
"""<def_stmt>__init__ self _id use_bn=<true> activation='relu' keep_prob=1.0 ready=<true> pre_activation=<true><block_start>self._id=_id<line_sep>self.use_bn=use_bn<line_sep>self.activation=activation<line_sep>self.keep_prob=keep_prob<line_sep>self.ready=ready<line_sep>self.pre_activation=pre_activation<line_sep>self._scope=<none><line_sep>self._init=<none><line_sep>self.output_op=<none><block_end>@property<def_stmt>id self<block_start><return>self._id<block_end>@id.setter<def_stmt>id self value<block_start>self._id=value<block_end>@property<def_stmt>init self<block_start><return>self._init<block_end>@property<def_stmt>param_initializer self<block_start><if_stmt>self._init<is><none><block_start><return><none><block_end>param_initializer={}<for_stmt>key self.variable_list.keys()<block_start><if_stmt>self._init[key]<is><not><none><block_start>param_initializer[key]=tf.constant_initializer(self._init[key])<block_end><block_end><if_stmt>len(param_initializer)<eq>0<block_start>param_initializer=<none><block_end><return>param_initializer<block_end><def_stmt>renew_init self net:BasicModel<block_start><if_stmt>net<is><none><block_start><return>copy.deepcopy(self._init)<block_end>self._init={}<for_stmt>key,var_name self.variable_list.items()<block_start>var=net.graph.get_tensor_by_name('%s/%s'%(self._scope var_name))<line_sep>self._init[key]=net.sess.run(var)<block_end><if_stmt>len(self._init)<eq>0<block_start>self._init=<none><block_end><return>copy.deepcopy(self._init)<block_end><def_stmt>copy self<block_start><return>self.set_from_config(self.get_config() layer_init=copy.deepcopy(self._init))<block_end><def_stmt>get_config self<block_start><return>{'_id':self.id 'use_bn':self.use_bn 'activation':self.activation 'keep_prob':self.keep_prob 'pre_activation':self.pre_activation }<block_end>@property<def_stmt>variable_list self<block_start>"""
beta: mean scale
gamma: variance scale
y = gamma * (x - moving_mean) / sqrt(epsilon + moving_variance) + beta
"""<if_stmt>self.use_bn<block_start><return>{'moving_mean':'BatchNorm/moving_mean:0' 'moving_variance':'BatchNorm/moving_variance:0' 'beta':'BatchNorm/beta:0' 'gamma':'BatchNorm/gamma:0' }<block_end><else_stmt><block_start><return>{}<block_end><block_end>@staticmethod<def_stmt>set_from_config layer_config layer_init<block_start><raise>NotImplementedError<block_end><def_stmt>build self _input net store_output_op<block_start><raise>NotImplementedError<block_end><def_stmt>prev_widen self indices magnifier noise=<none><block_start><raise>NotImplementedError<block_end><def_stmt>set_identity_layer self strict param noise<block_start><raise>NotImplementedError<block_end><def_stmt>widen_bn self indices magnifier noise=<none><block_start><if_stmt>self.use_bn<block_start>self._init['beta']=self._init['beta'][indices]<line_sep>self._init['gamma']=self._init['gamma'][indices]<line_sep>self._init['moving_mean']=self._init['moving_mean'][indices]<line_sep>self._init['moving_variance']=self._init['moving_variance'][indices]<block_end><block_end><def_stmt>set_bn_identity self strict=<true> param=<none> noise=<none><block_start><if_stmt>self.use_bn<block_start><if_stmt>strict<block_start>self._init['moving_mean']=param['moving_mean']<line_sep>self._init['moving_variance']=param['moving_variance']<line_sep>self._init['beta']=self._init['moving_mean']<line_sep>self._init['gamma']=np.sqrt(self._init['moving_variance']+param['epsilon'])<block_end><else_stmt># use default initialization for batch normalization layer
<block_start>self._init['moving_mean'],self._init['moving_variance']=<none> <none><line_sep>self._init['beta'],self._init['gamma']=<none> <none><block_end><block_end><block_end><block_end><class_stmt>ConvLayer(BaseLayer)<block_start><def_stmt>__init__ self _id filter_num kernel_size=3 strides=1 use_bn=<true> activation='relu' keep_prob=1.0 ready=<true> pre_activation=<true> **kwargs<block_start>BaseLayer.__init__(self _id use_bn activation keep_prob ready pre_activation)<line_sep>self.filter_num=filter_num<line_sep>self.kernel_size=kernel_size<line_sep>self.strides=strides<block_end>@property<def_stmt>layer_str self<block_start><return>'C%d,%d,%d'%(self.filter_num self.kernel_size self.strides)<block_end>@property<def_stmt>variable_list self<block_start>var_list={'kernel':'kernel:0'}<line_sep>var_list.update(super(ConvLayer self).variable_list)<line_sep><return>var_list<block_end><def_stmt>get_config self<block_start><return>{'name':'conv' 'filter_num':self.filter_num 'kernel_size':self.kernel_size 'strides':self.strides **super(ConvLayer self).get_config() }<block_end>@staticmethod<def_stmt>set_from_config layer_config layer_init=<none><block_start>conv_layer=ConvLayer(**layer_config)<line_sep>conv_layer._init=layer_init<line_sep><return>conv_layer<block_end><def_stmt>build self _input net:BasicModel store_output_op=<false><block_start>output=_input<if_stmt><not>self.ready<block_start><return>output<block_end><with_stmt>tf.variable_scope(self._id)<block_start>self._scope=tf.get_variable_scope().name<line_sep>param_initializer=self.param_initializer<if_stmt>self.pre_activation# batch normalization
<block_start><if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<line_sep># convolutional
output=BasicModel.conv2d(output self.filter_num self.kernel_size self.strides param_initializer=param_initializer)<block_end><else_stmt># convolutional
<block_start>output=BasicModel.conv2d(output self.filter_num self.kernel_size self.strides param_initializer=param_initializer)<line_sep># batch normalization
<if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<block_end># dropout
output=BasicModel.dropout(output self.keep_prob net.is_training)<block_end><if_stmt>store_output_op<block_start>self.output_op=output<block_end><return>output<block_end><def_stmt>widen_filters self new_filter_num noise=<none><block_start>"""
Increase the filter number of a conv layer while preserving the functionality
Proposed in 'Net2Net': https://arxiv.org/abs/1511.05641
"""<assert_stmt>new_filter_num<g>self.filter_num 'Invalid new filter number: %d'%new_filter_num<assert_stmt>self._init<is><not><none> 'Uninitialized layer'<line_sep>old_size,new_size=self.filter_num new_filter_num<line_sep>indices,magnifier=get_random_remapping(old_size new_size)<line_sep># more filters
self.filter_num=new_filter_num<line_sep>new_kernel=self._init['kernel'][: : : indices]<line_sep>new_kernel[: : : old_size:]=apply_noise(new_kernel[: : : old_size:] noise.get('wider'))<line_sep>self._init['kernel']=new_kernel<if_stmt><not>self.pre_activation# widen batch norm variables if use batch norm
<block_start>self.widen_bn(indices magnifier noise=noise)<block_end><return>indices magnifier<block_end><def_stmt>prev_widen self indices magnifier noise=<none><block_start><assert_stmt>self._init<is><not><none> 'Uninitialized layer'<line_sep># rescale kernel
self._init['kernel']=self._init['kernel'][: : indices :]<times>magnifier.reshape([1 1 -1 1])<if_stmt>self.pre_activation<block_start>self.widen_bn(indices magnifier noise=noise)<block_end><block_end><def_stmt>set_identity_layer self strict=<true> param=<none> noise=<none><block_start>self._init={}<line_sep>self.set_bn_identity(strict param noise=noise)<line_sep>mid=self.kernel_size<floordiv>2<line_sep>self._init['kernel']=np.zeros([self.kernel_size self.kernel_size self.filter_num self.filter_num])<line_sep>self._init['kernel'][mid mid]=np.eye(self.filter_num)<line_sep>self._init['kernel']=apply_noise(self._init['kernel'] noise.get('deeper'))<line_sep>self.ready=<true><block_end><def_stmt>remap self indices noise=<none><block_start>self.filter_num=len(indices)<line_sep>self._init['kernel']=self._init['kernel'][: : : indices]<line_sep>self._init['kernel']=apply_noise(self._init['kernel'] noise.get('wider'))<if_stmt><not>self.pre_activation<block_start>self.widen_bn(indices <none> noise=noise)<block_end><return>self<block_end><block_end><class_stmt>FCLayer(BaseLayer)<block_start><def_stmt>__init__ self _id units use_bn=<true> use_bias=<false> activation='relu' keep_prob=1.0 ready=<true> pre_activation=<false> **kwargs<block_start>BaseLayer.__init__(self _id use_bn activation keep_prob ready pre_activation)<line_sep>self.units=units<line_sep>self.use_bias=use_bias<block_end>@property<def_stmt>layer_str self<block_start><return>'FC%d'%self.units<block_end>@property<def_stmt>variable_list self<block_start>var_list={'W':'W:0'}<if_stmt>self.use_bias<block_start>var_list['bias']='bias:0'<block_end>var_list.update(super(FCLayer self).variable_list)<line_sep><return>var_list<block_end><def_stmt>get_config self<block_start><return>{'name':'fc' 'units':self.units 'use_bias':self.use_bias **super(FCLayer self).get_config() }<block_end>@staticmethod<def_stmt>set_from_config layer_config layer_init=<none><block_start>fc_layer=FCLayer(**layer_config)<line_sep>fc_layer._init=layer_init<line_sep><return>fc_layer<block_end><def_stmt>build self _input net:BasicModel store_output_op=<false><block_start>output=_input<if_stmt><not>self.ready<block_start><return>output<block_end><with_stmt>tf.variable_scope(self._id)<block_start>self._scope=tf.get_variable_scope().name<line_sep>param_initializer=self.param_initializer<line_sep># flatten if not
output=BasicModel.flatten(output)<if_stmt>self.pre_activation# batch normalization
<block_start><if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<line_sep># FC
output=BasicModel.fc_layer(output self.units self.use_bias param_initializer=param_initializer)<block_end><else_stmt># FC
<block_start>output=BasicModel.fc_layer(output self.units self.use_bias param_initializer=param_initializer)<line_sep># batch normalization
<if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<block_end># dropout
output=BasicModel.dropout(output self.keep_prob net.is_training)<block_end><if_stmt>store_output_op<block_start>self.output_op=output<block_end><return>output<block_end><def_stmt>widen_units self new_units_num noise=<none><block_start>"""
Increase the units number of a fc layer while preserving the functionality
Proposed in 'Net2Net': https://arxiv.org/abs/1511.05641
W: [in_dim, out_units]
bias: [out_units]
"""<assert_stmt>new_units_num<g>self.units 'Invalid new units number: %d'%new_units_num<assert_stmt>self._init<is><not><none> 'Uninitialized layer'<line_sep>old_size,new_size=self.units new_units_num<line_sep>indices,magnifier=get_random_remapping(old_size new_size)<line_sep># more units
self._init['W']=self._init['W'][: indices]<line_sep>self._init['W'][: old_size:]=apply_noise(self._init['W'][: old_size:] noise.get('wider'))<line_sep>self.units=new_units_num<line_sep># widen bias variable if exist
<if_stmt>self.use_bias<block_start>self._init['bias']=self._init['bias'][indices]<line_sep>self._init['bias'][old_size:]=apply_noise(self._init['bias'][old_size:] noise.get('wider'))<block_end><if_stmt><not>self.pre_activation# widen batch norm variables if use batch norm
<block_start>self.widen_bn(indices magnifier noise=noise)<block_end><return>indices magnifier<block_end><def_stmt>prev_widen self indices magnifier noise=<none><block_start><assert_stmt>self._init<is><not><none> 'Uninitialized layer'<line_sep># rescale W
self._init['W']=self._init['W'][indices]<times>magnifier.reshape([-1 1])<if_stmt>self.pre_activation<block_start>self.widen_bn(indices magnifier noise=noise)<block_end><block_end><def_stmt>set_identity_layer self strict=<true> param=<none> noise=<none><block_start>self._init={}<line_sep>self.set_bn_identity(strict param noise=noise)<if_stmt>self.use_bias<block_start>self._init['bias']=[0.0]<times>self.units<block_end>self._init['W']=np.eye(self.units)<line_sep>self._init['W']=apply_noise(self._init['W'] noise.get('deeper'))<line_sep>self.ready=<true><block_end><def_stmt>remap self indices noise=<none><block_start>self.units=len(indices)<line_sep>self._init['W']=self._init['W'][: indices]<line_sep>self._init['W']=apply_noise(self._init['W'] noise.get('wider'))<if_stmt>self.use_bias<block_start>self._init['bias']=self._init['bias'][indices]<block_end><if_stmt><not>self.pre_activation<block_start>self.widen_bn(indices <none> noise=noise)<block_end><return>self<block_end><block_end><class_stmt>PoolLayer(BaseLayer)<block_start><def_stmt>__init__ self _id _type kernel_size=2 strides=2 use_bn=<false> activation=<none> keep_prob=1.0 ready=<true> pre_activation=<true> **kwargs<block_start>BaseLayer.__init__(self _id use_bn activation keep_prob ready pre_activation)<line_sep>self._type=_type<line_sep>self.kernel_size=kernel_size<line_sep>self.strides=strides<block_end>@property<def_stmt>layer_str self<block_start><return>'P%d,%d'%(self.kernel_size self.strides)<block_end><def_stmt>get_config self<block_start><return>{'name':'pool' '_type':self._type 'kernel_size':self.kernel_size 'strides':self.strides **super(PoolLayer self).get_config() }<block_end>@staticmethod<def_stmt>set_from_config layer_config layer_init=<none><block_start>pool_layer=PoolLayer(**layer_config)<line_sep>pool_layer._init=layer_init<line_sep><return>pool_layer<block_end><def_stmt>build self _input net:BasicModel store_output_op=<false><block_start>output=_input<if_stmt><not>self.ready<block_start><return>output<block_end><with_stmt>tf.variable_scope(self._id)<block_start>self._scope=tf.get_variable_scope().name<line_sep>param_initializer=self.param_initializer<if_stmt>self.pre_activation# batch normalization
<block_start><if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<line_sep># Pooling
<if_stmt>self._type<eq>'avg'<block_start>output=BasicModel.avg_pool(output k=self.kernel_size s=self.strides)<block_end><elif_stmt>self._type<eq>'max'<block_start>output=BasicModel.max_pool(output k=self.kernel_size s=self.strides)<block_end><else_stmt><block_start><raise>ValueError('Do not support the pooling type: %s'%self._type)<block_end><block_end><else_stmt># Pooling
<block_start><if_stmt>self._type<eq>'avg'<block_start>output=BasicModel.avg_pool(output k=self.kernel_size s=self.strides)<block_end><elif_stmt>self._type<eq>'max'<block_start>output=BasicModel.max_pool(output k=self.kernel_size s=self.strides)<block_end><else_stmt><block_start><raise>ValueError('Do not support the pooling type: %s'%self._type)<block_end># batch normalization
<if_stmt>self.use_bn<block_start>output=BasicModel.batch_norm(output net.is_training net.net_config.bn_epsilon net.net_config.bn_decay param_initializer=param_initializer)<block_end># activation
output=BasicModel.activation(output self.activation)<block_end># dropout
output=BasicModel.dropout(output self.keep_prob net.is_training)<block_end><if_stmt>store_output_op<block_start>self.output_op=output<block_end><return>output<block_end><def_stmt>set_identity_layer self strict=<true> param=<none> noise=<none><block_start><raise>ValueError('Pooling layer can never be an identity layer')<block_end><def_stmt>prev_widen self indices magnifier noise=<none><block_start>self.widen_bn(indices magnifier noise=noise)<block_end><block_end> |
#implements Caesar substitution cipher
#Author: <NAME>
#Created: 2012-04-28
<import_from_stmt>pycipher.base Cipher<class_stmt>Caesar(Cipher)<block_start>"""The Caesar Cipher has a key consisting of an integer 1-25.
This cipher encrypts a letter according to the following equation::
c = (p + key)%26
where c is the ciphertext letter, p the plaintext letter.
For more details on the Caesar cipher, see http://www.practicalcryptography.com/ciphers/caesar-cipher/
:param key: The additive key. Allowable values are integers 0-25.
"""<def_stmt>__init__ self key=13<block_start>self.key=key%26<block_end><def_stmt>encipher self string keep_punct=<false><block_start>r"""Encipher string using Caesar cipher according to initialised key.
Example::
ciphertext = Caesar(3).encipher(plaintext)
:param string: The string to encipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The enciphered string.
"""<if_stmt><not>keep_punct<block_start>string=self.remove_punctuation(string)<block_end>ret=''<for_stmt>c string<block_start><if_stmt>c.isalpha()<block_start>ret<augadd>self.i2a(self.a2i(c)+self.key)<block_end><else_stmt><block_start>ret<augadd>c<block_end><block_end><return>ret<block_end><def_stmt>decipher self string keep_punct=<false><block_start>r"""Decipher string using Caesar cipher according to initialised key.
Example::
plaintext = Caesar(3).decipher(ciphertext)
:param string: The string to decipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The deciphered string.
"""<if_stmt><not>keep_punct<block_start>string=self.remove_punctuation(string)<block_end>ret=''<for_stmt>c string<block_start><if_stmt>c.isalpha()<block_start>ret<augadd>self.i2a(self.a2i(c)-self.key)<block_end><else_stmt><block_start>ret<augadd>c<block_end><block_end><return>ret<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>print('use "import pycipher" to access functions')<block_end> |
# The following code is designed to perform a searchlight at every voxel in the brain looking at the difference in pattern similarity between musical genres (i.e. classical and jazz). In the study where the data was obtained, subjects were required to listen to a set of 16 songs twice (two runs) in an fMRI scanner. The 16 songs consisted of 8 jazz songs and 8 classical songs. The goal of this searchlight is to find voxels that seem to represent distinct information about these different musical genres. Presumably, these voxels would be found in the auditory cortex which happens to be the most organized system in the brain for processing sound information.
<import_stmt>numpy<as>np<import_stmt>time<import_from_stmt>mpi4py MPI<import_from_stmt>nilearn.image load_img<import_stmt>sys<import_from_stmt>brainiak.searchlight.searchlight Searchlight<import_from_stmt>scipy stats<import_from_stmt>scipy.sparse random<import_stmt>os<line_sep># MPI variables
comm=MPI.COMM_WORLD<line_sep>rank=comm.rank<line_sep>size=comm.size<line_sep># Generate random data
<if_stmt>rank<eq>0<block_start>np.random.seed(0)<line_sep>data1_rand=np.random.rand(91 109 91 16)<line_sep>data2_rand=np.random.rand(91 109 91 16)<line_sep>classical=np.random.rand(2600)<line_sep>jazz=np.random.rand(2600)<line_sep>d1_reshape=np.reshape(data1_rand (91<times>109<times>91 16))<line_sep>d2_reshape=np.reshape(data2_rand (91<times>109<times>91 16))<line_sep>a1=load_img('a1plus_2mm.nii.gz')<line_sep>a1_vec=np.reshape(a1.get_data() (91<times>109<times>91))<line_sep>a1_idx=np.nonzero(a1_vec)<for_stmt>i range(8)<block_start>d1_reshape[a1_idx[0] i]<augadd>classical<line_sep>d1_reshape[a1_idx[0] i+8]<augadd>jazz<line_sep>d2_reshape[a1_idx[0] i]<augadd>classical<line_sep>d2_reshape[a1_idx[0] i+8]<augadd>jazz<block_end>data1=np.reshape(d1_reshape (91 109 91 16))<line_sep>data2=np.reshape(d2_reshape (91 109 91 16))<line_sep># Flatten data, then zscore data, then reshape data back into MNI coordinate space
data1=stats.zscore(np.reshape(data1 (91<times>109<times>91 16)))<line_sep>data1=np.reshape(data1 (91 109 91 16))<line_sep>data2=stats.zscore(np.reshape(data2 (91<times>109<times>91 16)))<line_sep>data2=np.reshape(data2 (91 109 91 16))<block_end><else_stmt><block_start>data1=<none><line_sep>data2=<none><block_end># Load mask
mask_img=load_img('MNI152_T1_2mm_brain_mask.nii')<line_sep>mask_img=mask_img.get_data()<line_sep># Definte function that takes the difference between within vs. between genre comparisons
<def_stmt>corr2_coeff AB msk myrad bcast_var<block_start><if_stmt><not>np.all(msk)<block_start><return><none><block_end>A,B=(AB[0] AB[1])<line_sep>A=A.reshape((-1 A.shape[-1]))<line_sep>B=B.reshape((-1 B.shape[-1]))<line_sep>corrAB=np.corrcoef(A.T B.T)[16: :16]<line_sep>classical_within=np.mean(corrAB[0:8 0:8])<line_sep>jazz_within=np.mean(corrAB[8:16 8:16])<line_sep>classJazz_between=np.mean(corrAB[8:16 0:8])<line_sep>jazzClass_between=np.mean(corrAB[0:8 8:16])<line_sep>within_genre=np.mean([classical_within jazz_within])<line_sep>between_genre=np.mean([classJazz_between jazzClass_between])<line_sep>diff=within_genre-between_genre<line_sep><return>diff<block_end>comm.Barrier()<line_sep>begin_time=time.time()<line_sep>comm.Barrier()<line_sep># Create and run searchlight
sl=Searchlight(sl_rad=1 max_blk_edge=5)<line_sep>sl.distribute([data1 data2] mask_img)<line_sep>sl.broadcast(<none>)<line_sep>global_outputs=sl.run_searchlight(corr2_coeff)<line_sep>comm.Barrier()<line_sep>end_time=time.time()<line_sep>comm.Barrier()<line_sep># Plot searchlight results
<if_stmt>rank<eq>0<block_start>print('Searchlight Done: ' end_time-begin_time)<line_sep>maxval=np.max(global_outputs[np.not_equal(global_outputs <none>)])<line_sep>minval=np.min(global_outputs[np.not_equal(global_outputs <none>)])<line_sep>global_outputs=np.array(global_outputs dtype=np.float)<line_sep>print(global_outputs)<line_sep># Save searchlight images
out_dir="searchlight_images"<if_stmt><not>os.path.exists(out_dir)<block_start>os.makedirs(out_dir)<block_end><import_stmt>matplotlib.pyplot<as>plt<for_stmt>(cnt img) enumerate(global_outputs)<block_start>plt.imshow(img vmin=minval vmax=maxval)<line_sep>plt.colorbar()<line_sep>plt.savefig('searchlight_images/'+'img'+str(cnt)+'.png')<line_sep>plt.clf()<block_end><block_end> |
# SPDX-FileCopyrightText: 2021 <NAME> for Adafruit Industries
# SPDX-License-Identifier: Unlicense
"""
CircuitPython Essentials Storage CP Filesystem code.py file
"""<import_stmt>time<import_stmt>board<import_stmt>microcontroller<import_stmt>neopixel<line_sep>pixel=neopixel.NeoPixel(board.NEOPIXEL 1)<try_stmt><block_start><with_stmt>open("/temperature.txt" "a")<as>temp_log<block_start><while_stmt><true># The microcontroller temperature in Celsius. Include the
# math to do the C to F conversion here, if desired.
<block_start>temperature=microcontroller.cpu.temperature<line_sep># Write the temperature to the temperature.txt file every 10 seconds.
temp_log.write('{0:.2f}\n'.format(temperature))<line_sep>temp_log.flush()<line_sep># Blink the NeoPixel on every write...
pixel.fill((255 0 0))<line_sep>time.sleep(1)# ...for one second.
pixel.fill((0 0 0))# Then turn it off...
time.sleep(9)<block_end><block_end><block_end># ...for the other 9 seconds.
<except_stmt>OSError<as>e# When the filesystem is NOT writable by CircuitPython...
<block_start>delay=0.5# ...blink the NeoPixel every half second.
<if_stmt>e.args[0]<eq>28# If the file system is full...
<block_start>delay=0.15# ...blink the NeoPixel every 0.15 seconds!
<block_end><while_stmt><true><block_start>pixel.fill((255 0 0))<line_sep>time.sleep(delay)<line_sep>pixel.fill((0 0 0))<line_sep>time.sleep(delay)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_stmt>itertools<import_from_stmt>collections Iterable<import_stmt>torch<class_stmt>GeneralCollateFunction(object)<block_start>"""A Generic `Collate_fn`.
For finetuning-train.
"""<def_stmt>__init__ self trfms times<block_start>"""Initialize a `GeneralCollateFunction`.
Args:
trfms (list): A list of torchvision transforms.
times (int): Specify the augment times. (0 or 1 for not to augment)
"""<line_sep>super(GeneralCollateFunction self).__init__()<line_sep>self.trfms=trfms<line_sep>self.times=times<block_end><def_stmt>method self batch<block_start>"""Apply transforms and augmentations on a batch.
The images and targets in a batch are augmented by the number of `self.times` and the targets are augmented
to match the shape of images.
Args:
batch (list of tuple): A batch returned by dataset.
Returns:
tuple: A tuple of (images, targets), here len(images)=len(targets).
"""<try_stmt><block_start>images,targets=zip(*batch)<line_sep>images=list(itertools.chain.from_iterable([[image]<times>self.times<for>image images]))<line_sep>images=[self.trfms(image).unsqueeze(0)<for>image images]<line_sep>targets=list(itertools.chain.from_iterable([[target]<times>self.times<for>target targets]))<line_sep>targets=[torch.tensor([target])<for>target targets]<assert_stmt>len(images)<eq>len(targets) "Inconsistent number of images and labels!"<line_sep>images=torch.cat(images)<line_sep>targets=torch.tensor(targets dtype=torch.int64)<line_sep><return>images targets<block_end><except_stmt>TypeError<block_start><raise>TypeError("Error, probably because the transforms are passed to the dataset, the transforms should be "<concat>"passed to the collate_fn")<block_end><block_end><def_stmt>__call__ self batch<block_start><return>self.method(batch)<block_end><block_end><class_stmt>FewShotAugCollateFunction(object)<block_start>"""`Collate_fn` for few-shot dataloader.
For finetuning-val, finetuning-test and meta/metric-train/val/test.
"""<def_stmt>__init__ self trfms times times_q way_num shot_num query_num episode_size<block_start>"""Initialize a `FewShotAugCollateFunction`.
Args:
trfms (list or tuple of list): A torchvision transfrom list of a tuple of 2 torchvision transform list.
if `list`, both support and query images will be applied the same transforms, otherwise the 1st one will
apply to support images and the 2nd one will apply to query images.
times (int): Augment times of support iamges
times_q (int ): Augment times of query images
way_num (int): Few-shot way setting
shot_num (int): Few-shot shot setting
query_num (int): Few-shot query setting
episode_size (int): Few-shot episode size setting
"""<line_sep>super(FewShotAugCollateFunction self).__init__()<try_stmt><block_start>self.trfms_support,self.trfms_query=trfms<block_end><except_stmt>Exception<block_start>self.trfms_support=self.trfms_query=trfms<block_end># Allow different trfms: when single T, apply to S and Q equally;
# When trfms=(T,T), apply to S and Q separately;
self.times=1<if>times<eq>0<else>times<line_sep>self.times_q=1<if>times_q<eq>0<else>times_q<line_sep>self.way_num=way_num<line_sep>self.shot_num=shot_num<line_sep>self.query_num=query_num<line_sep>self.shot_aug=self.shot_num<times>self.times<line_sep>self.query_aug=self.query_num<times>self.times_q<line_sep>self.episode_size=episode_size<block_end><def_stmt>method self batch<block_start>"""Apply transforms and augmentations on a **few-shot** batch.
The samples of query and support are augmented separately.
For example: if aug_times=5, then 01234 -> 0000011111222223333344444.
Args:
batch (list of tuple): A batch returned by a few-shot dataset.
Returns:
tuple: a tuple of (images, gt_labels).
"""<try_stmt><block_start>images,labels=zip(*batch)<line_sep># images = [img_label_tuple[0] for img_label_tuple in batch] # 111111222222 (5s1q for example)
images_split_by_label=[images[index:index+self.shot_num+self.query_num]<for>index range(0 len(images) self.shot_num+self.query_num)]<line_sep># 111111; 222222 ;
images_split_by_label_type=[[spt_qry[:self.shot_num] spt_qry[self.shot_num:]]<for>spt_qry images_split_by_label]<line_sep># 11111,1;22222,2; == [shot, query]
# aug support
# fixme: should have a elegant method
# 1111111111,1;2222222222,2 (aug_time = 2 for example)
<for_stmt>cls images_split_by_label_type<block_start>cls[0]=cls[0]<times>self.times# aug support
cls[1]=cls[1]<times>self.times_q# aug query
<block_end># flatten and apply trfms
flat=<lambda>t:[x<for>sub t<for>x flat(sub)]<if>isinstance(t Iterable)<else>[t]<line_sep>images=flat(images_split_by_label_type)<line_sep># 1111111111122222222222
# images = [self.trfms(image) for image in images] # list of tensors([c, h, w])
images=[self.trfms_support(image)<if>index%(self.shot_aug+self.query_aug)<l>self.shot_aug<else>self.trfms_query(image)<for>index,image enumerate(images)]<line_sep># list of tensors([c, h, w])
images=torch.stack(images)# [b', c, h, w] <- b' = b after aug
# labels
# global_labels = torch.tensor(labels,dtype=torch.int64)
# global_labels = torch.tensor(labels,dtype=torch.int64).reshape(self.episode_size,self.way_num,
# self.shot_num*self.times+self.query_num)
global_labels=torch.tensor(labels dtype=torch.int64).reshape(self.episode_size self.way_num self.shot_num+self.query_num)<line_sep>global_labels=(global_labels[<ellipsis> 0].unsqueeze(-1).repeat(1 1 self.shot_num<times>self.times+self.query_num<times>self.times_q ))<line_sep><return>images global_labels<line_sep># images.shape = [e*w*(q+s) x c x h x w], global_labels.shape = [e x w x (q+s)]
<block_end><except_stmt>TypeError<block_start><raise>TypeError("Error, probably because the transforms are passed to the dataset, the transforms should be "<concat>"passed to the collate_fn")<block_end><block_end><def_stmt>__call__ self batch<block_start><return>self.method(batch)<block_end><block_end> |
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>.multi_pose MultiPoseTrainer<line_sep>train_factory={'multi_pose':MultiPoseTrainer }<line_sep> |
<import_from_future_stmt> print_function<line_sep>#!/usr/bin/env python
""" An illustration of using the NSGA-II multi-objective optimization algorithm
on Constrained Multi-Objective Optimization benchmark function. """<line_sep>__author__='<NAME>, <EMAIL>'<import_from_stmt>pybrain.optimization ConstMultiObjectiveGA<import_from_stmt>pybrain.rl.environments.functions.multiobjective ConstDeb ConstSrn ConstOsy ConstTnk ConstBnh<import_stmt>pylab<import_from_stmt>scipy zeros array<line_sep># The Deb function
#f = ConstDeb()
# The Srinivas & Deb function
#f = ConstSrn()
# The Osyczka & Kundu function
#f = ConstOsy()
# The Tanaka function
#f = ConstTnk()
# The Binh & Korn function
f=ConstBnh()<line_sep># start at the origin
x0=zeros(f.indim)<line_sep>x0=array([min_<for>min_,max_ f.xbound])<line_sep># the optimization for a maximum of 25 generations
n=ConstMultiObjectiveGA(f x0 storeAllEvaluations=<true> populationSize=100 eliteProportion=1.0 topProportion=1.0 mutationProb=1.0 mutationStdDev=0.3 storeAllPopulations=<true> allowEquality=<false>)<line_sep>print('Start Learning')<line_sep>n.learn(50)<line_sep>print('End Learning')<line_sep># plotting the results (blue = all evaluated points, red = resulting pareto front)
print('Plotting the Results')<line_sep>print('All Evaluations.... take some time')<for_stmt>x n._allEvaluations<block_start><if_stmt>x[1]<block_start>pylab.plot([x[0][0]] [x[0][1]] 'b.')<block_end><else_stmt><block_start>pylab.plot([x[0][0]] [x[0][1]] 'r.')<block_end><block_end><for_stmt>x n.bestEvaluation<block_start>pylab.plot([x[0][0]] [x[0][1]] 'go')<block_end>pylab.show()<line_sep>print('Pareto Front')<for_stmt>x n.bestEvaluation<block_start>pylab.plot([x[0][0]] [x[0][1]] 'go')<block_end>pylab.show()<line_sep>print('===========')<line_sep>print('= Results =')<line_sep>print('===========')<line_sep>'''
i=0
for gen in n._allGenerations:
print 'Generation: ',i
for j in range(len(gen[1])):
print gen[1].keys()[j],gen[1].values()[j]
i+=1
'''<line_sep>print('Population size ' n.populationSize)<line_sep>print('Elitism Proportion ' n.eliteProportion)<line_sep>print('Mutation Probability ' n.mutationProb)<line_sep>print('Mutation Std Deviation ' n.mutationStdDev)<line_sep>print('Objective Evaluation number ' n.numEvaluations)<line_sep>print('last generation Length of bestEvaluation ' len(n.bestEvaluation))<line_sep>print('Best Evaluable : Best Evaluation')<for_stmt>i range(len(n.bestEvaluation))<block_start><assert_stmt>len(n.bestEvaluation)<eq>len(n.bestEvaluable)<line_sep>print(n.bestEvaluable[i] ':' n.bestEvaluation[i])<block_end> |
<import_from_stmt>dassl.utils Registry check_availability<line_sep>EVALUATOR_REGISTRY=Registry("EVALUATOR")<def_stmt>build_evaluator cfg **kwargs<block_start>avai_evaluators=EVALUATOR_REGISTRY.registered_names()<line_sep>check_availability(cfg.TEST.EVALUATOR avai_evaluators)<if_stmt>cfg.VERBOSE<block_start>print("Loading evaluator: {}".format(cfg.TEST.EVALUATOR))<block_end><return>EVALUATOR_REGISTRY.get(cfg.TEST.EVALUATOR)(cfg **kwargs)<block_end> |
<import_from_stmt>hybrid_astar_planner.HybridAStar.hybrid_astar_wrapper apply_hybrid_astar<import_stmt>numpy<as>np<import_from_stmt>pylot.planning.planner Planner<class_stmt>HybridAStarPlanner(Planner)<block_start>"""Wrapper around the Hybrid A* planner.
Note:
Details can be found at `Hybrid A* Planner`_.
Args:
world: (:py:class:`~pylot.planning.world.World`): A reference to the
planning world.
flags (absl.flags): Object to be used to access absl flags.
.. _Hybrid A* Planner:
https://github.com/erdos-project/hybrid_astar_planner
"""<def_stmt>__init__ self world flags logger<block_start>super().__init__(world flags logger)<line_sep>self._hyperparameters={"step_size":flags.step_size_hybrid_astar "max_iterations":flags.max_iterations_hybrid_astar "completion_threshold":flags.completion_threshold "angle_completion_threshold":flags.angle_completion_threshold "rad_step":flags.rad_step "rad_upper_range":flags.rad_upper_range "rad_lower_range":flags.rad_lower_range "obstacle_clearance":flags.obstacle_clearance_hybrid_astar "lane_width":flags.lane_width_hybrid_astar "radius":flags.radius "car_length":flags.car_length "car_width":flags.car_width }<block_end><def_stmt>run self timestamp ttd=<none><block_start>"""Runs the planner.
Note:
The planner assumes that the world is up-to-date.
Returns:
:py:class:`~pylot.planning.waypoints.Waypoints`: Waypoints of the
planned trajectory.
"""<line_sep>obstacle_list=self._world.get_obstacle_list()<if_stmt>len(obstacle_list)<eq>0# Do not use Hybrid A* if there are no obstacles.
<block_start>output_wps=self._world.follow_waypoints(self._flags.target_speed)<block_end><else_stmt># Hybrid a* does not take into account the driveable region.
# It constructs search space as a top down, minimum bounding
# rectangle with padding in each dimension.
<block_start>self._logger.debug("@{}: Hyperparameters: {}".format(timestamp self._hyperparameters))<line_sep>initial_conditions=self._compute_initial_conditions(obstacle_list)<line_sep>self._logger.debug("@{}: Initial conditions: {}".format(timestamp initial_conditions))<line_sep>path_x,path_y,_,success=apply_hybrid_astar(initial_conditions self._hyperparameters)<if_stmt>success<block_start>self._logger.debug("@{}: Hybrid A* succeeded".format(timestamp))<line_sep>speeds=[self._flags.target_speed]<times>len(path_x)<line_sep>self._logger.debug("@{}: Hybrid A* Path X: {}".format(timestamp path_x.tolist()))<line_sep>self._logger.debug("@{}: Hybrid A* Path Y: {}".format(timestamp path_y.tolist()))<line_sep>self._logger.debug("@{}: Hybrid A* Speeds: {}".format(timestamp speeds))<line_sep>output_wps=self.build_output_waypoints(path_x path_y speeds)<block_end><else_stmt><block_start>self._logger.error("@{}: Hybrid A* failed. "<concat>"Sending emergency stop.".format(timestamp))<line_sep>output_wps=self._world.follow_waypoints(0)<block_end><block_end><return>output_wps<block_end><def_stmt>_compute_initial_conditions self obstacles<block_start>ego_transform=self._world.ego_transform<line_sep>start=np.array([ego_transform.location.x ego_transform.location.y np.deg2rad(ego_transform.rotation.yaw) ])<line_sep>self._world.waypoints.remove_completed(ego_transform.location)<line_sep>end_index=min(self._flags.num_waypoints_ahead len(self._world.waypoints.waypoints)-1)<if_stmt>end_index<l>0# If no more waypoints left. Then our location is our end wp.
<block_start>self._logger.debug("@{}: No more waypoints left")<line_sep>end_wp=ego_transform<block_end><else_stmt><block_start>end_wp=self._world.waypoints.waypoints[end_index]<block_end>end=np.array([end_wp.location.x end_wp.location.y np.deg2rad(ego_transform.rotation.yaw)])<line_sep>initial_conditions={"start":start "end":end "obs":obstacles }<line_sep><return>initial_conditions<block_end><block_end> |
<import_stmt>unittest<import_stmt>numpy<import_from_stmt>pyscf.pbc gto<import_from_stmt>pyscf.pbc scf cc<import_from_stmt>pyscf cc<as>mol_cc<import_from_stmt>pyscf.pbc.tools.pbc super_cell<line_sep>#from pyscf import lib
#from pyscf.pbc import gto
#from pyscf.pbc import scf,cc
#from pyscf.pbc.cc import kccsd_uhf
#from pyscf.pbc.cc import kccsd
#from pyscf.pbc.cc import eom_kccsd_ghf
#from pyscf.pbc.cc import eom_kccsd_uhf
#from pyscf.pbc.lib import kpts_helper
#from pyscf.pbc.tools.pbc import super_cell
# generating the cell
cell=gto.M(unit='B' a=[[0. 3.37013733 3.37013733] [3.37013733 0. 3.37013733] [3.37013733 3.37013733 0.]] mesh=[13]<times>3 atom='''He 0 0 0
He 1.68506866 1.68506866 1.68506866''' basis=[[0 (1. 1.)] [0 (.5 1.)]] verbose=0 )<line_sep>nmp=[3 3 1]<line_sep># treating supercell at gamma point
supcell=super_cell(cell nmp)<line_sep>gmf=scf.UHF(supcell exxdiv=<none>)<line_sep>ehf=gmf.kernel()<line_sep>gcc=cc.UCCSD(gmf)<line_sep>ecc,t1,t2=gcc.kernel()<line_sep>print('UHF energy (supercell) %f \n'%(float(ehf)/numpy.prod(nmp)+4.343308413289))<line_sep>print('UCCSD correlation energy (supercell) %f \n'%(float(ecc)/numpy.prod(nmp)+0.009470753047083676))<line_sep># treating mesh of k points
kpts=cell.make_kpts(nmp)<line_sep>kpts<augsub>kpts[0]<line_sep>kmf=scf.KUHF(cell kpts exxdiv=<none>)<line_sep>ehf=kmf.kernel()<line_sep>kcc=cc.KUCCSD(kmf)<line_sep>ecc,t1,t2=kcc.kernel()<line_sep>print('UHF energy (kpts) %f \n'%(float(ehf+4.343308413289)))<line_sep>print('UCCSD correlation energy (kpts) %f \n'%(float(ecc+0.009470753047083676)))<line_sep> |
<import_stmt>warnings<import_stmt>collections<import_from_stmt>functools partial<import_stmt>numpy<as>np<import_from_stmt>menpo.base name_of_callable<import_from_stmt>menpo.shape TriMesh<import_from_stmt>menpo.transform PiecewiseAffine<def_stmt>check_diagonal diagonal<block_start>r"""
Checks that the diagonal length used to normalize the images' size is
``>= 20``.
Parameters
----------
diagonal : `int`
The value to check.
Returns
-------
diagonal : `int`
The value if it's correct.
Raises
------
ValueError
diagonal must be >= 20 or None
"""<if_stmt>diagonal<is><not><none><and>diagonal<l>20<block_start><raise>ValueError("diagonal must be >= 20 or None")<block_end><return>diagonal<block_end><def_stmt>check_landmark_trilist image transform group=<none><block_start>r"""
Checks that the provided image has a triangulated shape (thus an isntance of
`menpo.shape.TriMesh`) and the transform is `menpo.transform.PiecewiseAffine`
Parameters
----------
image : `menpo.image.Image` or subclass
The input image.
transform : `menpo.transform.PiecewiseAffine`
The transform object.
group : `str` or ``None``, optional
The group of the shape to check.
Raises
------
Warning
The given images do not have an explicit triangulation applied. A
Delaunay Triangulation will be computed and used for warping. This may
be suboptimal and cause warping artifacts.
"""<line_sep>shape=image.landmarks[group].lms<line_sep>check_trilist(shape transform)<block_end><def_stmt>check_trilist shape transform<block_start>r"""
Checks that the provided shape is triangulated (thus an isntance of
`menpo.shape.TriMesh`) and the transform is `menpo.transform.PiecewiseAffine`
Parameters
----------
shape : `menpo.shape.TriMesh`
The input shape (usually the reference/mean shape of a model).
transform : `menpo.transform.PiecewiseAffine`
The transform object.
Raises
------
Warning
The given images do not have an explicit triangulation applied. A
Delaunay Triangulation will be computed and used for warping. This may
be suboptimal and cause warping artifacts.
"""<if_stmt><not>isinstance(shape TriMesh)<and>isinstance(transform PiecewiseAffine)<block_start>warnings.warn('The given images do not have an explicit triangulation '<concat>'applied. A Delaunay Triangulation will be computed '<concat>'and used for warping. This may be suboptimal and cause '<concat>'warping artifacts.')<block_end><block_end><def_stmt>check_scales scales<block_start>r"""
Checks that the provided `scales` argument is either `int` or `float` or an
iterable of those. It makes sure that it returns a `list` of `scales`.
Parameters
----------
scales : `int` or `float` or `list/tuple` of those
The value to check.
Returns
-------
scales : `list` of `int` or `float`
The scales in a list.
Raises
------
ValueError
scales must be an int/float or a list/tuple of int/float
"""<if_stmt>isinstance(scales (int float))<block_start><return>[scales]<block_end><elif_stmt>len(scales)<eq>1<and>isinstance(scales[0] (int float))<block_start><return>list(scales)<block_end><elif_stmt>len(scales)<g>1<block_start><return>check_scales(scales[0])+check_scales(scales[1:])<block_end><else_stmt><block_start><raise>ValueError("scales must be an int/float or a list/tuple of "<concat>"int/float")<block_end><block_end><def_stmt>check_multi_scale_param n_scales types param_name param<block_start>r"""
General function for checking a parameter defined for multiple scales. It
raises an error if the parameter is not an iterable with the correct size and
correct types.
Parameters
----------
n_scales : `int`
The number of scales.
types : `tuple`
The `tuple` of variable types that the parameter is allowed to have.
param_name : `str`
The name of the parameter.
param : `types`
The parameter value.
Returns
-------
param : `list` of `types`
The list of values per scale.
Raises
------
ValueError
{param_name} must be in {types} or a list/tuple of {types} with the same
length as the number of scales
"""<line_sep>error_msg="{0} must be in {1} or a list/tuple of "<concat>"{1} with the same length as the number "<concat>"of scales".format(param_name types)<line_sep># Could be a single value - or we have an error
<if_stmt>isinstance(param types)<block_start><return>[param]<times>n_scales<block_end><elif_stmt><not>isinstance(param collections.Iterable)<block_start><raise>ValueError(error_msg)<block_end># Must be an iterable object
len_param=len(param)<line_sep>isinstance_all_in_param=all(isinstance(p types)<for>p param)<if_stmt>len_param<eq>1<and>isinstance_all_in_param<block_start><return>list(param)<times>n_scales<block_end><elif_stmt>len_param<eq>n_scales<and>isinstance_all_in_param<block_start><return>list(param)<block_end><else_stmt><block_start><raise>ValueError(error_msg)<block_end><block_end><def_stmt>check_callable callables n_scales<block_start>r"""
Checks the callable type per level.
Parameters
----------
callables : `callable` or `list` of `callables`
The callable to be used per scale.
n_scales : `int`
The number of scales.
Returns
-------
callable_list : `list`
A `list` of callables.
Raises
------
ValueError
callables must be a callable or a list/tuple of callables with the same
length as the number of scales
"""<if_stmt>callable(callables)<block_start><return>[callables]<times>n_scales<block_end><elif_stmt>len(callables)<eq>1<and>np.alltrue([callable(f)<for>f callables])<block_start><return>list(callables)<times>n_scales<block_end><elif_stmt>len(callables)<eq>n_scales<and>np.alltrue([callable(f)<for>f callables])<block_start><return>list(callables)<block_end><else_stmt><block_start><raise>ValueError("callables must be a callable or a list/tuple of "<concat>"callables with the same length as the number "<concat>"of scales")<block_end><block_end><def_stmt>check_patch_shape patch_shape n_scales<block_start>r"""
Function for checking a multi-scale `patch_shape` parameter value.
Parameters
----------
patch_shape : `list/tuple` of `int/float` or `list` of those
The patch shape per scale
n_scales : `int`
The number of scales.
Returns
-------
patch_shape : `list` of `list/tuple` of `int/float`
The list of patch shape per scale.
Raises
------
ValueError
patch_shape must be a list/tuple of int or a list/tuple of lit/tuple of
int/float with the same length as the number of scales
"""<if_stmt>len(patch_shape)<eq>2<and>isinstance(patch_shape[0] int)<block_start><return>[patch_shape]<times>n_scales<block_end><elif_stmt>len(patch_shape)<eq>1<block_start><return>check_patch_shape(patch_shape[0] 1)<block_end><elif_stmt>len(patch_shape)<eq>n_scales<block_start>l1=check_patch_shape(patch_shape[0] 1)<line_sep>l2=check_patch_shape(patch_shape[1:] n_scales-1)<line_sep><return>l1+l2<block_end><else_stmt><block_start><raise>ValueError("patch_shape must be a list/tuple of int or a "<concat>"list/tuple of lit/tuple of int/float with the "<concat>"same length as the number of scales")<block_end><block_end><def_stmt>check_max_components max_components n_scales var_name<block_start>r"""
Checks the maximum number of components per scale. It must be ``None`` or
`int` or `float` or a `list` of those containing ``1`` or ``{n_scales}``
elements.
Parameters
----------
max_components : ``None`` or `int` or `float` or a `list` of those
The value to check.
n_scales : `int`
The number of scales.
var_name : `str`
The name of the variable.
Returns
-------
max_components : `list` of ``None`` or `int` or `float`
The list of max components per scale.
Raises
------
ValueError
{var_name} must be None or an int > 0 or a 0 <= float <= 1 or a list of
those containing 1 or {n_scales} elements
"""<line_sep>str_error=("{} must be None or an int > 0 or a 0 <= float <= 1 or "<concat>"a list of those containing 1 or {} elements").format(var_name n_scales)<if_stmt><not>isinstance(max_components (list tuple))<block_start>max_components_list=[max_components]<times>n_scales<block_end><elif_stmt>len(max_components)<eq>1<block_start>max_components_list=[max_components[0]]<times>n_scales<block_end><elif_stmt>len(max_components)<eq>n_scales<block_start>max_components_list=max_components<block_end><else_stmt><block_start><raise>ValueError(str_error)<block_end><for_stmt>comp max_components_list<block_start><if_stmt>comp<is><not><none><block_start><if_stmt><not>isinstance(comp int)<block_start><if_stmt><not>isinstance(comp float)<block_start><raise>ValueError(str_error)<block_end><block_end><block_end><block_end><return>max_components_list<block_end><def_stmt>check_max_iters max_iters n_scales<block_start>r"""
Function that checks the value of a `max_iters` parameter defined for
multiple scales. It must be `int` or `list` of `int`.
Parameters
----------
max_iters : `int` or `list` of `int`
The value to check.
n_scales : `int`
The number of scales.
Returns
-------
max_iters : `list` of `int`
The list of values per scale.
Raises
------
ValueError
max_iters can be integer, integer list containing 1 or {n_scales}
elements or None
"""<if_stmt>type(max_iters)<is>int<block_start>max_iters=[np.round(max_iters/n_scales)<for>_ range(n_scales)]<block_end><elif_stmt>len(max_iters)<eq>1<and>n_scales<g>1<block_start>max_iters=[np.round(max_iters[0]/n_scales)<for>_ range(n_scales)]<block_end><elif_stmt>len(max_iters)<ne>n_scales<block_start><raise>ValueError('max_iters can be integer, integer list '<concat>'containing 1 or {} elements or '<concat>'None'.format(n_scales))<block_end><return>np.require(max_iters dtype=np.int)<block_end><def_stmt>check_sampling sampling n_scales<block_start>r"""
Function that checks the value of a `sampling` parameter defined for
multiple scales. It must be `int` or `ndarray` or `list` of those.
Parameters
----------
sampling : `int` or `ndarray` or `list` of those
The value to check.
n_scales : `int`
The number of scales.
Returns
-------
sampling : `list` of `int` or `ndarray`
The list of values per scale.
Raises
------
ValueError
A sampling list can only contain 1 element or {n_scales} elements
ValueError
sampling can be an integer or ndarray, a integer or ndarray list
containing 1 or {n_scales} elements or None
"""<if_stmt>(isinstance(sampling (list tuple))<and>np.alltrue([isinstance(s (np.ndarray np.int))<or>sampling<is><none><for>s sampling]))<block_start><if_stmt>len(sampling)<eq>1<block_start><return>sampling<times>n_scales<block_end><elif_stmt>len(sampling)<eq>n_scales<block_start><return>sampling<block_end><else_stmt><block_start><raise>ValueError('A sampling list can only '<concat>'contain 1 element or {} '<concat>'elements'.format(n_scales))<block_end><block_end><elif_stmt>isinstance(sampling (np.ndarray np.int))<or>sampling<is><none><block_start><return>[sampling]<times>n_scales<block_end><else_stmt><block_start><raise>ValueError('sampling can be an integer or ndarray, '<concat>'a integer or ndarray list '<concat>'containing 1 or {} elements or '<concat>'None'.format(n_scales))<block_end><block_end><def_stmt>set_models_components models n_components<block_start>r"""
Function that sets the number of active components to a list of models.
Parameters
----------
models : `list` or `class`
The list of models per scale.
n_components : `int` or `float` or ``None`` or `list` of those
The number of components per model.
Raises
------
ValueError
n_components can be an integer or a float or None or a list containing 1
or {n_scales} of those
"""<if_stmt>n_components<is><not><none><block_start>n_scales=len(models)<if_stmt>type(n_components)<is>int<or>type(n_components)<is>float<block_start><for_stmt>am models<block_start>am.n_active_components=n_components<block_end><block_end><elif_stmt>len(n_components)<eq>1<and>n_scales<g>1<block_start><for_stmt>am models<block_start>am.n_active_components=n_components[0]<block_end><block_end><elif_stmt>len(n_components)<eq>n_scales<block_start><for_stmt>am,n zip(models n_components)<block_start>am.n_active_components=n<block_end><block_end><else_stmt><block_start><raise>ValueError('n_components can be an integer or a float '<concat>'or None or a list containing 1 or {} of '<concat>'those'.format(n_scales))<block_end><block_end><block_end><def_stmt>check_model model cls<block_start>r"""
Function that checks whether the provided `class` object is a subclass of
the provided base `class`.
Parameters
----------
model : `class`
The object.
cls : `class`
The required base class.
Raises
------
ValueError
Model must be a {cls} instance.
"""<if_stmt><not>isinstance(model cls)<block_start><raise>ValueError('Model must be a {} instance.'.format(name_of_callable(cls)))<block_end><block_end><def_stmt>check_algorithm_cls algorithm_cls n_scales base_algorithm_cls<block_start>r"""
Function that checks whether the `list` of `class` objects defined per scale
are subclasses of the provided base `class`.
Parameters
----------
algorithm_cls : `class` or `list` of `class`
The list of objects per scale.
n_scales : `int`
The number of scales.
base_algorithm_cls : `class`
The required base class.
Raises
------
ValueError
algorithm_cls must be a subclass of {base_algorithm_cls} or a list/tuple
of {base_algorithm_cls} subclasses with the same length as the number of
scales {n_scales}
"""<if_stmt>(isinstance(algorithm_cls partial)<and>base_algorithm_cls<in>algorithm_cls.func.mro())<block_start><return>[algorithm_cls]<times>n_scales<block_end><elif_stmt>(isinstance(algorithm_cls type)<and>base_algorithm_cls<in>algorithm_cls.mro())<block_start><return>[algorithm_cls]<times>n_scales<block_end><elif_stmt>len(algorithm_cls)<eq>1<block_start><return>check_algorithm_cls(algorithm_cls[0] n_scales base_algorithm_cls)<block_end><elif_stmt>len(algorithm_cls)<eq>n_scales<block_start><return>[check_algorithm_cls(a 1 base_algorithm_cls)[0]<for>a algorithm_cls]<block_end><else_stmt><block_start><raise>ValueError("algorithm_cls must be a subclass of {} or a "<concat>"list/tuple of {} subclasses with the same length "<concat>"as the number of scales {}".format(base_algorithm_cls base_algorithm_cls n_scales))<block_end><block_end><def_stmt>check_graph graph graph_types param_name n_scales<block_start>r"""
Checks the provided graph per pyramidal level. The graph must be a
subclass of `graph_types` or a `list` of those.
Parameters
----------
graph : `graph` or `list` of `graph` types
The graph argument to check.
graph_types : `graph` or `tuple` of `graphs`
The `tuple` of allowed graph types.
param_name : `str`
The name of the graph parameter.
n_scales : `int`
The number of pyramidal levels.
Returns
-------
graph : `list` of `graph` types
The graph per scale in a `list`.
Raises
------
ValueError
{param_name} must be a list of length equal to the number of scales.
ValueError
{param_name} must be a list of {graph_types_str}. {} given instead.
"""<line_sep># check if the provided graph is a list
<if_stmt><not>isinstance(graph list)<block_start>graphs=[graph]<times>n_scales<block_end><elif_stmt>len(graph)<eq>1<block_start>graphs=graph<times>n_scales<block_end><elif_stmt>len(graph)<eq>n_scales<block_start>graphs=graph<block_end><else_stmt><block_start><raise>ValueError('{} must be a list of length equal to the number of '<concat>'scales.'.format(param_name))<block_end># check if the provided graph_types is a list
<if_stmt><not>isinstance(graph_types list)<block_start>graph_types=[graph_types]<block_end># check each member of the graphs list
<for_stmt>g graphs<block_start><if_stmt>g<is><not><none><block_start><if_stmt>type(g)<not><in>graph_types<block_start>graph_types_str=' or '.join(gt.__name__<for>gt graph_types)<line_sep><raise>ValueError('{} must be a list of {}. {} given '<concat>'instead.'.format(param_name graph_types_str type(g).__name__))<block_end><block_end><block_end><return>graphs<block_end> |
# -*- coding: utf-8 -*-
# Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_stmt>codecs<import_from_stmt>getpass getpass<import_from_stmt>os.path abspath expanduser<import_stmt>re<import_stmt>socket<import_from_stmt>.compat input on_win<import_from_stmt>.path split_filename strip_pkg_extension<import_from_stmt>.._vendor.auxlib.decorators memoize<import_from_stmt>.._vendor.urllib3.exceptions LocationParseError<import_from_stmt>.._vendor.urllib3.util.url Url parse_url<try_stmt># pragma: py2 no cover
# Python 3
<block_start><import_from_stmt>urllib.parse quote quote_plus unquote unquote_plus <block_end><except_stmt>ImportError# pragma: py3 no cover
# Python 2
<block_start><import_from_stmt>urllib quote quote_plus unquote unquote_plus <block_end># NOQA
<def_stmt>hex_octal_to_int ho<block_start>ho=ord(ho)<line_sep>o0=ord('0')<line_sep>o9=ord('9')<line_sep>oA=ord('A')<line_sep>oF=ord('F')<line_sep>res=ho-o0<if>ho<ge>o0<and>ho<le>o9<else>(ho-oA+10)<if>ho<ge>oA<and>ho<le>oF<else><none><line_sep><return>res<block_end>@memoize<def_stmt>percent_decode path# This is not fast so avoid when we can.
<block_start><if_stmt>'%'<not><in>path<block_start><return>path<block_end>ranges=[]<for_stmt>m re.finditer(r'(%[0-9A-F]{2})' path)<block_start>ranges.append((m.start() m.end()))<block_end><if_stmt><not>len(ranges)<block_start><return>path<block_end># Sorry! Correctness is more important than speed at the moment.
# Should use a map + lambda eventually.
result=b''<line_sep>skips=0<for_stmt>i,c enumerate(path)<block_start><if_stmt>skips<g>0<block_start>skips<augsub>1<line_sep><continue><block_end>c=c.encode('ascii')<line_sep>emit=c<if_stmt>c<eq>b'%'<block_start><for_stmt>r ranges<block_start><if_stmt>i<eq>r[0]<block_start><import_stmt>struct<line_sep>emit=struct.pack("B" hex_octal_to_int(path[i+1])<times>16+hex_octal_to_int(path[i+2]))<line_sep>skips=2<line_sep><break><block_end><block_end><block_end><if_stmt>emit<block_start>result<augadd>emit<block_end><block_end><return>codecs.utf_8_decode(result)[0]<block_end>file_scheme='file://'<line_sep># Keeping this around for now, need to combine with the same function in conda/common/path.py
"""
def url_to_path(url):
assert url.startswith(file_scheme), "{} is not a file-scheme URL".format(url)
decoded = percent_decode(url[len(file_scheme):])
if decoded.startswith('/') and decoded[2] == ':':
# A Windows path.
decoded.replace('/', '\\')
return decoded
"""<line_sep>@memoize<def_stmt>path_to_url path<block_start><if_stmt><not>path<block_start><raise>ValueError('Not allowed: %r'%path)<block_end><if_stmt>path.startswith(file_scheme)<block_start><try_stmt><block_start>path.decode('ascii')<block_end><except_stmt>UnicodeDecodeError<block_start><raise>ValueError('Non-ascii not allowed for things claiming to be URLs: %r'%path)<block_end><return>path<block_end>path=abspath(expanduser(path)).replace('\\' '/')<line_sep># We do not use urljoin here because we want to take our own
# *very* explicit control of how paths get encoded into URLs.
# We should not follow any RFCs on how to encode and decode
# them, we just need to make sure we can represent them in a
# way that will not cause problems for whatever amount of
# urllib processing we *do* need to do on them (which should
# be none anyway, but I doubt that is the case). I have gone
# for ASCII and % encoding of everything not alphanumeric or
# not in `!'()*-._/:`. This should be pretty save.
#
# To avoid risking breaking the internet, this code only runs
# for `file://` URLs.
#
percent_encode_chars="!'()*-._/\\:"<line_sep>percent_encode=<lambda>s:"".join(["%%%02X"%ord(c) c][c<l>"{"<and>c.isalnum()<or>c<in>percent_encode_chars]<for>c s)<if_stmt>any(ord(char)<ge>128<for>char path)<block_start>path=percent_encode(path.decode('unicode-escape')<if>hasattr(path 'decode')<else>bytes(path "utf-8").decode('unicode-escape'))<block_end># https://blogs.msdn.microsoft.com/ie/2006/12/06/file-uris-in-windows/
<if_stmt>len(path)<g>1<and>path[1]<eq>':'<block_start>path=file_scheme+'/'+path<block_end><else_stmt><block_start>path=file_scheme+path<block_end><return>path<block_end>@memoize<def_stmt>urlparse url<block_start><if_stmt>on_win<and>url.startswith('file:')<block_start>url.replace('\\' '/')<block_end><return>parse_url(url)<block_end><def_stmt>url_to_s3_info url<block_start>"""Convert an s3 url to a tuple of bucket and key.
Examples:
>>> url_to_s3_info("s3://bucket-name.bucket/here/is/the/key")
('bucket-name.bucket', '/here/is/the/key')
"""<line_sep>parsed_url=parse_url(url)<assert_stmt>parsed_url.scheme<eq>'s3' "You can only use s3: urls (not %r)"%url<line_sep>bucket,key=parsed_url.host parsed_url.path<line_sep><return>bucket key<block_end><def_stmt>is_url url<block_start>"""
Examples:
>>> is_url(None)
False
>>> is_url("s3://some/bucket")
True
"""<if_stmt><not>url<block_start><return><false><block_end><try_stmt><block_start><return>urlparse(url).scheme<is><not><none><block_end><except_stmt>LocationParseError<block_start><return><false><block_end><block_end><def_stmt>is_ipv4_address string_ip<block_start>"""
Examples:
>>> [is_ipv4_address(ip) for ip in ('8.8.8.8', '192.168.10.10', '255.255.255.255')]
[True, True, True]
>>> [is_ipv4_address(ip) for ip in ('8.8.8', '192.168.10.10.20', '256.255.255.255', '::1')]
[False, False, False, False]
"""<try_stmt><block_start>socket.inet_aton(string_ip)<block_end><except_stmt>socket.error<block_start><return><false><block_end><return>string_ip.count('.')<eq>3<block_end><def_stmt>is_ipv6_address string_ip<block_start>"""
Examples:
>> [is_ipv6_address(ip) for ip in ('::1', '2001:db8:85a3::370:7334', '1234:'*7+'1234')]
[True, True, True]
>> [is_ipv6_address(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""<try_stmt><block_start>inet_pton=socket.inet_pton<block_end><except_stmt>AttributeError<block_start><return>is_ipv6_address_win_py27(string_ip)<block_end><try_stmt><block_start>inet_pton(socket.AF_INET6 string_ip)<block_end><except_stmt>socket.error<block_start><return><false><block_end><return><true><block_end><def_stmt>is_ipv6_address_win_py27 string_ip<block_start>"""
Examples:
>>> [is_ipv6_address_win_py27(ip) for ip in ('::1', '1234:'*7+'1234')]
[True, True]
>>> [is_ipv6_address_win_py27(ip) for ip in ('192.168.10.10', '1234:'*8+'1234')]
[False, False]
"""<line_sep># python 2.7 on windows does not have socket.inet_pton
<return>bool(re.match(r""# lgtm [py/regex/unmatchable-dollar]
<concat>r"^(((?=.*(::))(?!.*\3.+\3))\3?|[\dA-F]{1,4}:)"<concat>r"([\dA-F]{1,4}(\3|:\b)|\2){5}"<concat>r"(([\dA-F]{1,4}(\3|:\b|$)|\2){2}|"<concat>r"(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})\Z" string_ip flags=re.DOTALL|re.IGNORECASE))<block_end><def_stmt>is_ip_address string_ip<block_start>"""
Examples:
>> is_ip_address('192.168.10.10')
True
>> is_ip_address('::1')
True
>> is_ip_address('www.google.com')
False
"""<line_sep><return>is_ipv4_address(string_ip)<or>is_ipv6_address(string_ip)<block_end><def_stmt>join *args<block_start>start='/'<if><not>args[0]<or>args[0].startswith('/')<else>''<line_sep><return>start+'/'.join(y<for>y (x.strip('/')<for>x args<if>x)<if>y)<block_end>join_url=join<def_stmt>has_scheme value<block_start><return>re.match(r'[a-z][a-z0-9]{0,11}://' value)<block_end><def_stmt>strip_scheme url<block_start>"""
Examples:
>>> strip_scheme("https://www.conda.io")
'www.conda.io'
>>> strip_scheme("s3://some.bucket/plus/a/path.ext")
'some.bucket/plus/a/path.ext'
"""<line_sep><return>url.split('://' 1)[-1]<block_end><def_stmt>mask_anaconda_token url<block_start>_,token=split_anaconda_token(url)<line_sep><return>url.replace(token "<TOKEN>" 1)<if>token<else>url<block_end><def_stmt>split_anaconda_token url<block_start>"""
Examples:
>>> split_anaconda_token("https://1.2.3.4/t/tk-123-456/path")
(u'https://1.2.3.4/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/t//path")
(u'https://1.2.3.4/path', u'')
>>> split_anaconda_token("https://some.domain/api/t/tk-123-456/path")
(u'https://some.domain/api/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/conda/t/tk-123-456/path")
(u'https://1.2.3.4/conda/path', u'tk-123-456')
>>> split_anaconda_token("https://1.2.3.4/path")
(u'https://1.2.3.4/path', None)
>>> split_anaconda_token("https://10.2.3.4:8080/conda/t/tk-123-45")
(u'https://10.2.3.4:8080/conda', u'tk-123-45')
"""<line_sep>_token_match=re.search(r'/t/([a-zA-Z0-9-]*)' url)<line_sep>token=_token_match.groups()[0]<if>_token_match<else><none><line_sep>cleaned_url=url.replace('/t/'+token '' 1)<if>token<is><not><none><else>url<line_sep><return>cleaned_url.rstrip('/') token<block_end><def_stmt>split_platform known_subdirs url<block_start>"""
Examples:
>>> from conda.base.constants import KNOWN_SUBDIRS
>>> split_platform(KNOWN_SUBDIRS, "https://1.2.3.4/t/tk-123/linux-ppc64le/path")
(u'https://1.2.3.4/t/tk-123/path', u'linux-ppc64le')
"""<line_sep>_platform_match=_split_platform_re(known_subdirs).search(url)<line_sep>platform=_platform_match.groups()[0]<if>_platform_match<else><none><line_sep>cleaned_url=url.replace('/'+platform '' 1)<if>platform<is><not><none><else>url<line_sep><return>cleaned_url.rstrip('/') platform<block_end>@memoize<def_stmt>_split_platform_re known_subdirs<block_start>_platform_match_regex=r'/(%s)(?:/|$)'%r'|'.join(r'%s'%d<for>d known_subdirs)<line_sep><return>re.compile(_platform_match_regex re.IGNORECASE)<block_end><def_stmt>has_platform url known_subdirs<block_start>url_no_package_name,_=split_filename(url)<if_stmt><not>url_no_package_name<block_start><return><none><block_end>maybe_a_platform=url_no_package_name.rsplit('/' 1)[-1]<line_sep><return>maybe_a_platform<in>known_subdirs<and>maybe_a_platform<or><none><block_end><def_stmt>split_scheme_auth_token url<block_start>"""
Examples:
>>> split_scheme_auth_token("https://u:p@conda.io/t/x1029384756/more/path")
('conda.io/more/path', 'https', 'u:p', 'x1029384756')
>>> split_scheme_auth_token(None)
(None, None, None, None)
"""<if_stmt><not>url<block_start><return><none> <none> <none> <none><block_end>cleaned_url,token=split_anaconda_token(url)<line_sep>url_parts=urlparse(cleaned_url)<line_sep>remainder_url=Url(host=url_parts.host port=url_parts.port path=url_parts.path query=url_parts.query).url<line_sep><return>remainder_url url_parts.scheme url_parts.auth token<block_end><def_stmt>split_conda_url_easy_parts known_subdirs url# scheme, auth, token, platform, package_filename, host, port, path, query
<block_start>cleaned_url,token=split_anaconda_token(url)<line_sep>cleaned_url,platform=split_platform(known_subdirs cleaned_url)<line_sep>_,ext=strip_pkg_extension(cleaned_url)<line_sep>cleaned_url,package_filename=cleaned_url.rsplit('/' 1)<if>ext<else>(cleaned_url <none>)<line_sep># TODO: split out namespace using regex
url_parts=urlparse(cleaned_url)<line_sep><return>(url_parts.scheme url_parts.auth token platform package_filename url_parts.host url_parts.port url_parts.path url_parts.query)<block_end>@memoize<def_stmt>get_proxy_username_and_pass scheme<block_start>username=input("\n%s proxy username: "%scheme)<line_sep>passwd=getpass("Password: ")<line_sep><return>username passwd<block_end><def_stmt>add_username_and_password url username password<block_start>url_parts=parse_url(url)._asdict()<line_sep>url_parts['auth']=username+':'+quote(password '')<line_sep><return>Url(**url_parts).url<block_end><def_stmt>maybe_add_auth url auth force=<false><block_start>"""Add auth if the url doesn't currently have it.
By default, does not replace auth if it already exists. Setting ``force`` to ``True``
overrides this behavior.
Examples:
>>> maybe_add_auth("https://www.conda.io", "user:passwd")
'https://user:passwd@www.conda.io'
>>> maybe_add_auth("https://www.conda.io", "")
'https://www.conda.io'
"""<if_stmt><not>auth<block_start><return>url<block_end>url_parts=urlparse(url)._asdict()<if_stmt>url_parts['auth']<and><not>force<block_start><return>url<block_end>url_parts['auth']=auth<line_sep><return>Url(**url_parts).url<block_end><def_stmt>maybe_unquote url<block_start><return>unquote_plus(remove_auth(url))<if>url<else>url<block_end><def_stmt>remove_auth url<block_start>url_parts=parse_url(url)._asdict()<if_stmt>url_parts['auth']<block_start><del_stmt>url_parts['auth']<block_end><return>Url(**url_parts).url<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>doctest<line_sep>doctest.testmod()<block_end> |
"""pypyr step yaml definition for commands - domain specific language."""<import_stmt>shlex<import_stmt>subprocess<import_stmt>logging<import_from_stmt>pypyr.errors ContextError<import_from_stmt>pypyr.utils types<line_sep># logger means the log level will be set correctly
logger=logging.getLogger(__name__)<class_stmt>CmdStep()<block_start>"""A pypyr step that represents a command runner step.
This models a step that takes config like this:
cmd: <<cmd string>>
OR, as a dict
cmd:
run: str. mandatory. command + args to execute.
save: bool. defaults False. save output to cmdOut.
If save is True, will save the output to context as follows:
cmdOut:
returncode: 0
stdout: 'stdout str here. None if empty.'
stderr: 'stderr str here. None if empty.'
cmdOut.returncode is the exit status of the called process. Typically 0
means OK. A negative value -N indicates that the child was terminated by
signal N (POSIX only).
The run_step method does the actual work. init loads the yaml.
"""<def_stmt>__init__ self name context<block_start>"""Initialize the CmdStep.
The step config in the context dict looks like this:
cmd: <<cmd string>>
OR, as a dict
cmd:
run: str. mandatory. command + args to execute.
save: bool. optional. defaults False. save output to cmdOut.
cwd: str/path. optional. if specified, change the working
directory just for the duration of the command.
Args:
name: Unique name for step. Likely __name__ of calling step.
context: pypyr.context.Context. Look for config in this context
instance.
"""<assert_stmt>name ("name parameter must exist for CmdStep.")<assert_stmt>context ("context param must exist for CmdStep.")<line_sep># this way, logs output as the calling step, which makes more sense
# to end-user than a mystery steps.dsl.blah logging output.
self.logger=logging.getLogger(name)<line_sep>context.assert_key_has_value(key='cmd' caller=name)<line_sep>self.context=context<line_sep>self.is_save=<false><line_sep>cmd_config=context.get_formatted('cmd')<if_stmt>isinstance(cmd_config str)<block_start>self.cmd_text=cmd_config<line_sep>self.cwd=<none><line_sep>self.logger.debug("Processing command string: %s" cmd_config)<block_end><elif_stmt>isinstance(cmd_config dict)<block_start>context.assert_child_key_has_value(parent='cmd' child='run' caller=name)<line_sep>self.cmd_text=cmd_config['run']<line_sep>self.is_save=types.cast_to_bool(cmd_config.get('save' <false>))<line_sep>cwd_string=cmd_config.get('cwd' <none>)<if_stmt>cwd_string<block_start>self.cwd=cwd_string<line_sep>self.logger.debug("Processing command string in dir "<concat>"%s: %s" self.cwd self.cmd_text)<block_end><else_stmt><block_start>self.cwd=<none><line_sep>self.logger.debug("Processing command string: %s" self.cmd_text)<block_end><block_end><else_stmt><block_start><raise>ContextError(f"{name} cmd config should be either a simple "<concat>"string cmd='mycommandhere' or a dictionary "<concat>"cmd={'run': 'mycommandhere', 'save': False}.")<block_end><block_end><def_stmt>run_step self is_shell<block_start>"""Run a command.
Runs a program or executable. If is_shell is True, executes the command
through the shell.
Args:
is_shell: bool. defaults False. Set to true to execute cmd through
the default shell.
"""<assert_stmt>is_shell<is><not><none> ("is_shell param must exist for CmdStep.")<line_sep># why? If shell is True, it is recommended to pass args as a string
# rather than as a sequence.
<if_stmt>is_shell<block_start>args=self.cmd_text<block_end><else_stmt><block_start>args=shlex.split(self.cmd_text)<block_end><if_stmt>self.is_save<block_start>completed_process=subprocess.run(args cwd=self.cwd shell=is_shell # capture_output=True,only>py3.7
stdout=subprocess.PIPE stderr=subprocess.PIPE # text=True, only>=py3.7,
universal_newlines=<true>)<line_sep>self.context['cmdOut']={'returncode':completed_process.returncode 'stdout':(completed_process.stdout.rstrip()<if>completed_process.stdout<else><none>) 'stderr':(completed_process.stderr.rstrip()<if>completed_process.stderr<else><none>)}<line_sep># when capture is true, output doesn't write to stdout
self.logger.info("stdout: %s" completed_process.stdout)<if_stmt>completed_process.stderr<block_start>self.logger.error("stderr: %s" completed_process.stderr)<block_end># don't swallow the error, because it's the Step swallow decorator
# responsibility to decide to ignore or not.
completed_process.check_returncode()<block_end><else_stmt># check=True throws CalledProcessError if exit code != 0
<block_start>subprocess.run(args shell=is_shell check=<true> cwd=self.cwd)<block_end><block_end><block_end> |
<import_from_stmt>t_1000.application T1000<line_sep> |
# -*- coding: utf-8 -*-
<import_stmt>pytest<import_from_stmt>girder.exceptions AccessException<import_from_stmt>girder.models.setting Setting<import_from_stmt>girder.models.user User<import_from_stmt>girder.settings SettingKey<import_from_stmt>pytest_girder.assertions assertStatus assertStatusOk<def_stmt>testInitializeOtp user# The logic for the server hostname as the issuer cannot be tested here, since there is no
# current request, but that logic is explicitly tested in testOtpApiWorkflow
<block_start>Setting().set(SettingKey.BRAND_NAME 'Branded Girder')<line_sep>otpUris=User().initializeOtp(user)<line_sep># A URI for TOTP should be returned
<assert_stmt>otpUris['totpUri'].startswith('otpauth://')<assert_stmt>user['login']<in>otpUris['totpUri']<assert_stmt>'issuer=Branded%20Girder'<in>otpUris['totpUri']<line_sep># OTP should not be enabled yet, since it's not finalized
<assert_stmt>user['otp']['enabled']<is><false><line_sep># TOTP parameters should be generated
<assert_stmt>'totp'<in>user['otp']<block_end><def_stmt>testHasOtpEnabled user<block_start><assert_stmt>User().hasOtpEnabled(user)<is><false><line_sep>User().initializeOtp(user)<line_sep># OTP is not yet enabled
<assert_stmt>User().hasOtpEnabled(user)<is><false><line_sep>user['otp']['enabled']=<true><assert_stmt>User().hasOtpEnabled(user)<is><true><block_end><def_stmt>_tokenFromTotpUri totpUri valid=<true># Create an external TOTP instance
<block_start><import_from_stmt>passlib.totp TOTP<line_sep>totp=TOTP.from_uri(totpUri)<line_sep># Generate a valid token
otpToken=totp.generate().token<if_stmt><not>valid# Increment the token by 1 to invalidate it
<block_start>otpToken='%06d'%((int(otpToken)+1)%int(1e6))<block_end><return>otpToken<block_end><def_stmt>testVerifyOtp server user# Enable OTP
<block_start>otpUris=User().initializeOtp(user)<line_sep>user['otp']['enabled']=<true><line_sep># Generate an invalid token
otpToken=_tokenFromTotpUri(otpUris['totpUri'] <false>)<with_stmt>pytest.raises(AccessException)<block_start>User().verifyOtp(user otpToken)<block_end># Generate a valid token
otpToken=_tokenFromTotpUri(otpUris['totpUri'])<line_sep># Verify the token, which should succeed without raising an exception
User().verifyOtp(user otpToken)<line_sep># Re-verify the same token, which should fail
# The "server" fixture is necessary for this to work
<with_stmt>pytest.raises(AccessException)<block_start>User().verifyOtp(user otpToken)<block_end><block_end><def_stmt>testAuthenticateWithOtp user# Providing an unnecessary token should fail
<block_start><with_stmt>pytest.raises(AccessException)<block_start>User().authenticate('user' 'password' '<PASSWORD>')<block_end># Enable OTP and save user
otpUris=User().initializeOtp(user)<line_sep>user['otp']['enabled']=<true><line_sep>User().save(user)<line_sep># Providing no token should now fail
<with_stmt>pytest.raises(AccessException)<block_start>User().authenticate('user' 'password')<block_end># Generate a valid token
otpToken=_tokenFromTotpUri(otpUris['totpUri'])<line_sep># Authenticate successfully with the valid token
User().authenticate('user' 'password' otpToken)<block_end><def_stmt>testAuthenticateWithOtpConcatenated user# Non-OTP-user authentication should still succeed with "otpToken=True"
<block_start>User().authenticate('user' 'password' <true>)<line_sep># Enable OTP and save user
otpUris=User().initializeOtp(user)<line_sep>user['otp']['enabled']=<true><line_sep>User().save(user)<line_sep># Authentication should now fail
<with_stmt>pytest.raises(AccessException)<block_start>User().authenticate('user' 'password' <true>)<block_end># Generate a valid token
otpToken=_tokenFromTotpUri(otpUris['totpUri'])<line_sep># Authenticate successfully with the valid token
User().authenticate('user' 'password'+otpToken <true>)<block_end><def_stmt>testOtpApiWorkflow server user# Try to finalize OTP before it's been initialized
<block_start>resp=server.request(path='/user/%s/otp'%user['_id'] method='PUT' user=user additionalHeaders=[('Girder-OTP' '123456')])<line_sep># This should fail cleanly
assertStatus(resp 400)<assert_stmt>'not initialized'<in>resp.json['message']<line_sep># Try to disable OTP before it's been enabled
resp=server.request(path='/user/%s/otp'%user['_id'] method='DELETE' user=user)<line_sep># This should fail cleanly
assertStatus(resp 400)<assert_stmt>'not enabled'<in>resp.json['message']<line_sep># Initialize OTP
resp=server.request(path='/user/%s/otp'%user['_id'] method='POST' user=user)<line_sep>assertStatusOk(resp)<line_sep># Save the URI
totpUri=resp.json['totpUri']<line_sep># Test the logic for server hostname as OTP URI issuer
<assert_stmt>'issuer=127.0.0.1'<in>totpUri<line_sep># Login without an OTP
resp=server.request(path='/user/authentication' method='GET' basicAuth='<PASSWORD>:password')<line_sep># Since OTP has not been finalized, this should still succeed
assertStatusOk(resp)<line_sep># Finalize without an OTP
resp=server.request(path='/user/%s/otp'%user['_id'] method='PUT' user=user)<line_sep>assertStatus(resp 400)<assert_stmt>'Girder-OTP'<in>resp.json['message']<line_sep># Finalize with an invalid OTP
resp=server.request(path='/user/%s/otp'%user['_id'] method='PUT' user=user additionalHeaders=[('Girder-OTP' _tokenFromTotpUri(totpUri <false>))])<line_sep>assertStatus(resp 403)<assert_stmt>'validation failed'<in>resp.json['message']<line_sep># Finalize with a valid OTP
resp=server.request(path='/user/%s/otp'%user['_id'] method='PUT' user=user additionalHeaders=[('Girder-OTP' _tokenFromTotpUri(totpUri))])<line_sep>assertStatusOk(resp)<line_sep># The valid token from this time period was used to finalize OTP; to prevent having to wait for
# the next time period, flush the rateLimitBuffer
<import_from_stmt>girder.utility._cache rateLimitBuffer<line_sep>rateLimitBuffer.invalidate()<line_sep># Login without an OTP
resp=server.request(path='/user/authentication' method='GET' basicAuth='user:password')<line_sep>assertStatus(resp 401)<assert_stmt>'Girder-OTP'<in>resp.json['message']<line_sep># Login with an invalid OTP
resp=server.request(path='/user/authentication' method='GET' basicAuth='user:password' additionalHeaders=[('Girder-OTP' _tokenFromTotpUri(totpUri <false>))])<line_sep>assertStatus(resp 401)<assert_stmt>'Token did not match'<in>resp.json['message']<line_sep># Login with a valid OTP
resp=server.request(path='/user/authentication' method='GET' basicAuth='user:password' additionalHeaders=[('Girder-OTP' _tokenFromTotpUri(totpUri))])<line_sep>assertStatusOk(resp)<line_sep># Disable OTP
resp=server.request(path='/user/%s/otp'%user['_id'] method='DELETE' user=user)<line_sep>assertStatusOk(resp)<block_end> |
'''Entry point and command line parsing for ig.'''<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>logging<import_stmt>os<import_stmt>sys<import_from_stmt>ig colors graph serve walk<def_stmt>setup_logging <block_start>'''Sets up the root logger.'''<line_sep>handler=logging.StreamHandler(sys.stderr)<line_sep>formatter=logging.Formatter('[%(levelname)s] %(message)s')<line_sep>handler.setFormatter(formatter)<line_sep>log=logging.getLogger(__package__)<line_sep>log.addHandler(handler)<line_sep>log.setLevel(logging.INFO)<line_sep><return>log<block_end><def_stmt>parse_arguments args<block_start>'''
Sets up the command line argument parser and parses arguments.
Args:
args: The list of argumnets passed to the command line
Returns:
The parsed arguments.
'''<line_sep>parser=argparse.ArgumentParser(description='Visualize C++ include graphs')<line_sep>parser.add_argument('directories' nargs='+' help='The directories to inspect')<line_sep>parser.add_argument('--pattern' action='append' default=['*.[ch]pp' '*.[ch]'] dest='patterns' help='The file (glob) patterns to look for')<line_sep>parser.add_argument('-i' '-I' '--prefix' action='append' dest='prefixes' default=[os.getcwd()] help='An include path for headers to recognize')<line_sep>parser.add_argument('-v' '--verbose' action='store_true' help='Turn on verbose output')<line_sep>parser.add_argument('-p' '--port' type=int default=8080 help='The port to serve the visualization on')<line_sep>parser.add_argument('-o' '--open' action='store_true' help='Open the webpage immediately')<line_sep>parser.add_argument('-j' '--json' action='store_true' help='Print the graph JSON instead of serving it')<line_sep>parser.add_argument('-d' '--dir' dest='directory' help='The directory to store the served files in. If '<concat>'not supplied, a temporary directory is created.')<line_sep>parser.add_argument('--relation' choices=['includes' 'included-by'] default='included-by' help='The relation of edges in the graph')<line_sep>parser.add_argument('--min-degree' type=float default=0.1 help='The initial minimum degree nodes should have to '<concat>'be displayed')<line_sep>parser.add_argument('--group-granularity' type=int default=2 help='How coarse to group nodes (by folder)')<line_sep>parser.add_argument('--full-path' action='store_true' help='If set, shows the full path for nodes')<line_sep>parser.add_argument('--colors' type=<lambda>p:colors.Colors(map(int p.split(','))) default='234, 82, 77' help='The base RGB colors separated by commas')<line_sep>parser.add_argument('--color-variation' type=int default=200 help='The variation in RGB around the base colors')<line_sep>parser.add_argument('--color-alpha-min' type=float default=0.7 help='The minimum alpha value for colors')<line_sep>args=parser.parse_args(args)<line_sep># Necessary for standard includes
args.prefixes.append('')<if_stmt><not>(0<le>args.color_alpha_min<le>1)<block_start><raise>RuntimeError('--color-alpha-min must be in interval [0, 1]')<block_end>args.colors.variation=args.color_variation<line_sep>args.colors.alpha_min=args.color_alpha_min<line_sep><return>args<block_end><def_stmt>make_json args graph_json<block_start>'''
Creates the JSON payload for the visualization.
Args:
args: The command line arguments.
graph_json: The JSON dict from the graph.
Returns:
The payload.
'''<if_stmt>args.json<block_start>print(graph_json)<line_sep>sys.exit(0)<block_end># Additional settings to configure the visualization
settings=dict(initialDegree=args.min_degree)<line_sep><return>dict(settings=settings graph=graph_json)<block_end><def_stmt>main <block_start>log=setup_logging()<line_sep>args=parse_arguments(sys.argv[1:])<if_stmt>args.verbose<block_start>log.setLevel(logging.DEBUG)<block_end>log.debug('Received arguments: %s' args)<line_sep>include_graph=graph.Graph(args.relation args.full_path args.colors args.group_granularity)<line_sep>walk.walk(include_graph args)<if_stmt>include_graph.is_empty<block_start>log.debug('Could not find a single node, exiting')<line_sep>sys.exit(-1)<block_end>json=make_json(args include_graph.to_json())<with_stmt>serve.Server(args.directory)<as>server<block_start>server.write(json)<line_sep>server.run(args.open args.port)<block_end>log.info('Shutting down')<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>random<import_stmt>re<import_stmt>subprocess<import_stmt>time<import_from_stmt>.background schedule<import_from_stmt>.nlp.ner NamedEntitiyRecognizer<import_from_stmt>.skills.bus Bus<import_from_stmt>.skills.feed FeedNotifier<import_from_stmt>.skills.github GithubManager<import_from_stmt>.skills.humor Humor<import_from_stmt>.skills.maxim Maxim<import_from_stmt>.skills.naver Naver<import_from_stmt>.skills.question AttentionQuestion<import_from_stmt>.skills.question HappyQuestion<import_from_stmt>.skills.rescue_time RescueTime<import_from_stmt>.skills.samhangsi.generator SamhangSiGenerator<import_from_stmt>.skills.summary Summary<import_from_stmt>.skills.todoist TodoistManager<import_from_stmt>.skills.toggl TogglManager<import_from_stmt>.skills.trello TrelloManager<import_from_stmt>.skills.twitter TwitterManager<import_from_stmt>.skills.weather Weather<import_from_stmt>.slack.slackbot SlackerAdapter<import_from_stmt>.slack.resource MsgResource<import_from_stmt>.utils.arrow ArrowUtil<import_from_stmt>.utils.data_handler DataHandler<import_from_stmt>.utils.data_loader SkillData<import_from_stmt>.utils.data_loader FeedData<import_from_stmt>.utils.logger Logger<import_from_stmt>.utils.member Member<class_stmt>Functions(object)<block_start>IDEA_LIST="Inbox"<line_sep>KANBAN_TASKS="Tasks"<line_sep>KANBAN_DOING="Doing"<line_sep>KANBAN_DONE="Done"<line_sep>KANBAN_BREAK="Break"<def_stmt>__init__ self slackbot=<none><block_start>self.data_handler=DataHandler()<line_sep>self.registered=RegisteredFuctions().list<line_sep>self.logger=Logger().get_logger()<if_stmt>slackbot<is><none><block_start>self.slackbot=SlackerAdapter()<block_end><else_stmt><block_start>self.slackbot=slackbot<block_end><block_end><def_stmt>check_go_to_bed self<block_start>summary=Summary()<line_sep>summary.check_go_to_bed()<line_sep>summary.check_commit_count()<line_sep>self._reset_data()<block_end><def_stmt>_reset_data self<block_start>self.data_handler.edit_cache(("feed_links" []) fname="cache_feed.json")# NOTE: hard-code
self.data_handler.edit_cache(("tweet_ids" []) fname="cache_feed.json")<line_sep>FeedData().reset()<line_sep>SkillData().reset()<block_end><def_stmt>feed_notify self<block_start>"""
keyword: [["피드", "알다"], ["피드", "있다"], ["새 소식", "있다"]]
description: "Feed"
icon: ":spock-hand: "
"""<line_sep>self.slackbot.send_message(text=MsgResource.FEED_ACK)<line_sep>feed_notifier=FeedNotifier()<line_sep>feed_notifier.notify_all()<line_sep>twitter_manager=TwitterManager()<line_sep>twitter_manager.notify_popular_tweet()<block_end><def_stmt>health_check self<block_start>bot_id=self.slackbot.get_bot_id()<if_stmt>self.slackbot.is_active(bot_id)<block_start>self.logger.info("Healthy.")<block_end><else_stmt># NOTE: restart with script.
<block_start>subprocess.call("sh ~/restart_kino.sh" shell=<true>)<block_end><block_end><def_stmt>holiday_setting self<block_start>"""
keyword: ["휴일", "쉬는 날", "holiday"]
description: "Holiday!"
icon: ":relaxed: "
"""<line_sep>Summary().record_holiday(<true>)<line_sep>self.slackbot.send_message(text=MsgResource.HOLIDAY)<block_end><def_stmt>good_morning self<block_start>"""
keyword: ["굿모닝", "좋은 아침", "good morning"]
description: "Good Morning"
icon: ":sunrise: "
"""<line_sep>self.slackbot.send_message(text=MsgResource.PROFILE_WAKE_UP)<line_sep>self.forecast(timely="daily")<line_sep>trello=TrelloManager()<line_sep>trello.clean_board(except_list_name=[self.IDEA_LIST self.KANBAN_BREAK])<line_sep>self.kanban_sync()<block_end><def_stmt>good_night self<block_start>"""
keyword: ["굿나잇", "굿밤", "자다", "good night"]
description: "Good Night"
icon: ":night_with_stars: "
"""<line_sep>self.slackbot.send_message(text=MsgResource.PROFILE_GO_TO_BED)<line_sep>summary=Summary()<line_sep>summary.check_commit_count()<line_sep>self._reset_data()<block_end><def_stmt>activity_task_sync self<block_start>"""
keyword: ["토글 싱크"]
description: "Toggl <-> Task Sync"
icon: ":tornado: "
"""<line_sep>toggl=TogglManager(slackbot=self.slackbot)<line_sep>toggl.sync_task()<block_end><def_stmt>air_quality self<block_start>"""
keyword: ["공기질", "미세먼지", "air quality"]
description: "Air quality forecast. (can use only Korea [airkoreaPy](https://github.com/DongjunLee/airkoreaPy))"
icon: ":factory: "
"""<line_sep>weather=Weather(slackbot=self.slackbot)<line_sep>weather.air_quality()<block_end><def_stmt>attention_question self text:str=<none><block_start>"""
keyword: [["집중도", "조사"], ["집중도", "확인"], ["attention", "question"]]
description: "Attention survey after do task."
icon: ":writing_hand: "
"""<line_sep>attention=AttentionQuestion(slackbot=self.slackbot)<line_sep>attention.question()<block_end><def_stmt>attention_report self timely:str="daily"<block_start>"""
keyword: [["집중도", "리포트"], ["attention", "report"]]
description: "Attention Report."
icon: ":writing_hand: "
"""<if_stmt>timely<is><none><block_start>timely="daily"<block_end>attention=AttentionQuestion(slackbot=self.slackbot)<line_sep>attention.report(timely=timely)<block_end><def_stmt>bus_stop self station_id:str=<none> real_time:str=<none><block_start>"""
keyword: [["버스", "도착"], ["버스", "언제"], ["버스", "조회"]]
description: "Bus arrival information. (can use only Korea (gbus api))"
icon: ":oncoming_bus: "
"""<if_stmt>real_time<is><none><block_start>real_time=<false><block_end>bus=Bus(slackbot=self.slackbot)<line_sep>bus.arrive_info(station_id real_time=real_time)<block_end><def_stmt>forecast self timely:str="current"<block_start>"""
keyword: ["날씨", "예보", "weather", "forecast"]
description: "Weather forecast. (using [darksky](https://darksky.net/))"
icon: ":sun_with_face: "
"""<if_stmt>timely<is><none><block_start>timely="current"<block_end>weather=Weather(slackbot=self.slackbot)<line_sep>weather.forecast(timely=timely)<line_sep>self.air_quality()<block_end><def_stmt>github_commit self timely:str="daily"<block_start>"""
keyword: ["커밋", "commit", "깃헙", "github"]
description: "Check [Github](https://github.com) push count."
icon: ":octocat: "
"""<if_stmt>timely<is><none><block_start>timely="daily"<block_end>github=GithubManager(slackbot=self.slackbot)<line_sep>github.commit(timely=timely)<block_end><def_stmt>happy_question self<block_start>"""
keyword: [["행복도", "조사"], ["행복도", "확인"], ["happy", "question"]]
description: "Happiness survey."
icon: ":smile: "
"""<line_sep>happy=HappyQuestion(slackbot=self.slackbot)<line_sep>happy.question()<block_end><def_stmt>happy_report self timely:str="daily"<block_start>"""
keyword: [["행복도", "리포트"], ["happy", "report"]]
description: "Happiness Report."
icon: ":smile: "
"""<if_stmt>timely<is><none><block_start>timely="daily"<block_end>happy=HappyQuestion(slackbot=self.slackbot)<line_sep>happy.report(timely=timely)<block_end><def_stmt>honeyjam self<block_start>"""
keyword: [["재밌는", "이야기"], ["개그"]]
description: "**Easter Egg** - Korea Azae Humor (using [honeyjam](https://github.com/DongjunLee/honeyjam))."
icon: ":honey_pot: "
"""<line_sep>humor=Humor()<line_sep>question,answer=humor.honeyjam()<line_sep>self.slackbot.send_message(text=MsgResource.HUMOR_QUESTION(question=question))<line_sep>time.sleep(2)<line_sep>self.slackbot.send_message(text=MsgResource.HUMOR_ANSWER(answer=answer))<line_sep>haha_num=random.randint(1 5)<line_sep>self.slackbot.send_message(text=MsgResource.HUMOR_END(haha_num))<line_sep>sorry_index=random.randint(1 100)<if_stmt>sorry_index<l>25<block_start>time.sleep(1)<line_sep>self.slackbot.send_message(text=MsgResource.HUMOR_SORRY)<block_end><block_end><def_stmt>jenkins_build self job_name:str=<none> branch:str=<none><block_start>"""
keyword: ["배포", "deploy"]
description: "Build a registered project for Jenkins."
icon: ":building_construction: "
"""<line_sep>jenkins=JenkinsClient()<line_sep>jenkins.build(job_name branch)<block_end><def_stmt>kanban_sync self<block_start>"""
keyword: [["칸반", "싱크"], ["kanban", "sync"]]
description: "Todoist's tasks and Kanban board's card Syncing."
icon: ":clipboard: "
"""<line_sep>self.slackbot.send_message(text=MsgResource.KANBAN_SYNC)<line_sep>todoist=TodoistManager(slackbot=self.slackbot)<line_sep>today_label_tasks=todoist.get_today_tasks_with_label()<line_sep>trello=TrelloManager()<line_sep>task_list=trello.get_list_by_name(self.KANBAN_TASKS)<line_sep>task_list.archive_all_cards()<for_stmt>task today_label_tasks<block_start>card_name=task["label"]+" - "+task["content"]<line_sep>task_list.add_card(re.sub(r" \d+분" "" card_name))<block_end><block_end><def_stmt>keep_idea self hashtag:str=<none><block_start>"""
keyword: [["keep", "idea"], ["킵", "아이디어"], ["아이디어", "저장"], ["아이디어", "기억"]]
description: "Keep idea in Trello board's inbox list."
icon: ":thinking_face: "
"""<if_stmt>hashtag<is><none><block_start>self.slackbot.send_message(text=MsgResource.HASHTAG_NOT_FOUND)<line_sep><return><block_end>trello=TrelloManager()<line_sep>trello.add_card(self.IDEA_LIST hashtag)<line_sep>self.slackbot.send_message(text=MsgResource.ADD_IDEA)<block_end><def_stmt>maxim_nietzsche self<block_start>"""
keyword: [["니체", "명언"], ["nietzsche", "maxim"]]
description: "Nietzsche's Maxim."
icon: ":scales: "
"""<line_sep>maxim=Maxim(slackbot=self.slackbot)<line_sep>maxim.nietzsche()<block_end><def_stmt>remind_idea self<block_start>"""
keyword: [["remind", "idea"], ["리마인드", "아이디어"]]
description: "Remind Trello's inbox card randomly pick."
icon: ":thinking_face: "
"""<line_sep>trello=TrelloManager()<line_sep>idea=trello.get_random_card_name()<if_stmt>idea<is><none><block_start>self.slackbot.send_message(text=MsgResource.EMPTY_IDEA)<block_end><else_stmt><block_start>self.slackbot.send_message(text=MsgResource.REMIND_IDEA(idea=idea))<block_end><block_end><def_stmt>rescuetime_efficiency self timely:str="daily"<block_start>"""
keyword: ["레스큐타임 효율성", "작업 효율", "생산성 차트", ["rescuetime", "chart"]]
description: "RescueTime Efficiency Chart"
icon: ":chart_with_upwards_trend: "
"""<if_stmt>timely<is><none><block_start>timely="daily"<block_end>rescuetime=RescueTime(slackbot=self.slackbot)<line_sep>rescuetime.efficiency(timely=timely)<block_end><def_stmt>samhangsi self samhangsi_tag:str=<none><block_start>"""
keyword: ["삼행시"]
description: "I am thinking about the Samhangsi with the kor ballad! (using [char-rnn-tensorflow](https://github.com/DongjunLee/char-rnn-tensorflow))"
icon: ":musical_score: "
"""<line_sep>word=samhangsi_tag[1:]<line_sep>non_hangul=re.findall("[^ ㄱ-ㅣ가-힣]+" word)<if_stmt>len(non_hangul)<g>0<block_start>self.slackbot.send_message(text=MsgResource.SAMHANGSI_ONLY_KOR)<line_sep><return><block_end>self.slackbot.send_message(text=MsgResource.SAMHANGSI_PREPARE(word=word))<line_sep>generator=SamhangSiGenerator()<line_sep>generator.load_model()<line_sep>result=generator.generate(word)<line_sep>self.slackbot.send_message(text=result)<block_end><def_stmt>send_message self text:str=<none><block_start>"""
keyword: []
description: "Send a text message."
icon: ":speech_balloon: "
"""<line_sep>self.slackbot.send_message(text=text)<block_end><def_stmt>today_briefing self<block_start>"""
keyword: [["하루", "브리핑"], ["오늘하루", "브리핑"], ["today", "briefing"]]
description: "Today Briefing - brief Todoist tasks"
icon: ":city_sunset: "
"""<line_sep>todoist=TodoistManager(slackbot=self.slackbot)<line_sep>todoist.schedule()<block_end><def_stmt>today_summary self timely:str=<none><block_start>"""
keyword: [["하루", "마무리"], ["하루", "요약"], ["today", "summary"]]
description: "Today summary - **toggl_report**, **rescuetime_efficiency**, **happy_report**, **attention_report**, **github_commit**"
icon: ":night_with_stars: "
"""<line_sep>self.slackbot.send_message(text=MsgResource.TODAY_SUMMARY)<line_sep># self.todoist_feedback()
self.toggl_report(timely=timely)<line_sep>self.rescuetime_efficiency(timely=timely)<line_sep>self.happy_report(timely=timely)<line_sep>self.attention_report(timely=timely)<line_sep>self.github_commit(timely=timely)<block_end><def_stmt>todoist_feedback self<block_start>"""
keyword: [["할일", "피드백"], ["todoist", "feedback"]]
description: "Feedback from Todoist activity."
icon: ":memo: "
"""<line_sep>todoist=TodoistManager(slackbot=self.slackbot)<line_sep>todoist.feedback()<block_end><def_stmt>todoist_remain self<block_start>"""
keyword: [["남은", "작업"], ["remain", "task"]]
description: "Show todoist's remaining tasks."
icon: ":page_with_curl: "
"""<line_sep>todoist=TodoistManager(slackbot=self.slackbot)<line_sep>todoist.remain_task()<block_end><def_stmt>toggl_checker self<block_start>"""
keyword: [["작업", "시간"], ["시간", "체크"], ["task", "time", "check"]]
description: "Toggl time checker Every 30 minutes."
icon: ":bell: "
"""<line_sep>toggl=TogglManager(slackbot=self.slackbot)<line_sep>toggl.check_toggl_timer()<block_end><def_stmt>toggl_report self kind:str="chart" timely:str="daily"<block_start>"""
keyword: [["작업", "리포트"], ["task", "report"]]
description: "Toggl task Report."
icon: ":bar_chart: "
"""<if_stmt>kind<is><none><block_start>kind="chart"<block_end><if_stmt>timely<is><none><block_start>timely="daily"<block_end>toggl=TogglManager(slackbot=self.slackbot)<line_sep>toggl.report(kind=kind timely=timely)<block_end><def_stmt>toggl_timer self description:str=<none><block_start>"""
keyword: ["toggl"]
description: "Toggl Timer."
icon: ":watch: "
"""<line_sep>toggl=TogglManager(slackbot=self.slackbot)<line_sep>toggl.timer(description=description)<block_end><def_stmt>total_chart self<block_start>"""
keyword: [["종합", "차트"], ["overall", "chart"], ["total", "chart"]]
description: "Overall chart - weekly productivity, happiness, overall score chart."
icon: ":chart: "
"""<line_sep>summary=Summary(slackbot=self.slackbot)<line_sep>summary.total_chart()<block_end><def_stmt>total_score self<block_start>"""
keyword: [["종합", "점수"], ["overall", "score"], ["total", "score"]]
description: "Overall score - Productivity (RescueTime, Github Commit, Todoist, Toggl), Mean happiness, mean attention, Exercise, Diary."
icon: ":chart: "
"""<line_sep>summary=Summary(slackbot=self.slackbot)<line_sep>summary.total_score()<block_end><def_stmt>translate self english:str="" source:str="en" target:str="ko"<block_start>"""
keyword: ["번역", "translate"]
description: "Language translation using [Naver Papago api](https://developers.naver.com/docs/nmt/reference/)."
icon: ":crystal_ball: "
"""<if_stmt>source<is><none><block_start>source="en"<block_end><if_stmt>target<is><none><block_start>target="ko"<block_end>naver=Naver(slackbot=self.slackbot)<line_sep>naver.translate(english source=source target=target)<block_end><block_end><class_stmt>RegisteredFuctions(object)<block_start><class_stmt>__List<block_start><def_stmt>__init__ self<block_start>self.list=DataHandler().read_file("skills.json")<block_end><block_end>instance=<none><def_stmt>__init__ self<block_start><if_stmt><not>RegisteredFuctions.instance<block_start>RegisteredFuctions.instance=RegisteredFuctions.__List()<block_end><block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.instance name)<block_end><block_end><class_stmt>FunctionRunner(object)<block_start><def_stmt>__init__ self text=<none><block_start>self.input=text<line_sep>self.functions=Functions().registered<line_sep>self.logger=Logger().get_logger()<block_end><def_stmt>load_function self start_time=<none> end_time=<none> func_name=<none> params=<none> repeat=<false> day_of_week=<none> not_holiday=<false> <block_start><if_stmt>not_holiday<and>Summary().is_holiday()<block_start><return><block_end><if_stmt><not>ArrowUtil.is_today_day_of_week(day_of_week)<block_start><return><block_end><if_stmt><not>repeat<block_start>self.__excute(func_name params)<line_sep><return>schedule.CancelJob<block_end><elif_stmt>(repeat)<and>(ArrowUtil.is_between(start_time end_time))<block_start>self.__excute(func_name params)<block_end><block_end><def_stmt>__excute self func_name params<block_start>self.logger.info("load_function: "+str(func_name)+", "+str(params))<line_sep>getattr(Functions() func_name)(**params)<block_end><def_stmt>filter_f_params self text func_name<block_start>ner=NamedEntitiyRecognizer()<line_sep>func_param_list=ner.skills[func_name]["params"]<line_sep>params={k:ner.parse(v text)<for>k,v ner.params.items()}<line_sep>member=Member()<line_sep>member_name=member.get_names(text)<line_sep>params["member"]=member_name<line_sep>f_params={}<if_stmt>params<is><not><none><block_start><for_stmt>k,v params.items()<block_start><if_stmt>k<in>func_param_list<and>v<is><not><none><block_start>f_params[k]=v<block_end><block_end><block_end><return>f_params<block_end><block_end> |
<import_stmt>os<import_stmt>hashlib<import_stmt>importlib<import_stmt>zipfile<def_stmt>zipdir path out_path timestamp=<none># zipf is zipfile handle
<block_start>zipf=zipfile.ZipFile(out_path 'w' zipfile.ZIP_DEFLATED)<for_stmt>root,dirs,files os.walk(path)<block_start><for_stmt>file files<block_start>src=os.path.join(root file)<if_stmt>timestamp<is><none><block_start>dst=os.path.relpath(os.path.join(root file) os.path.join(path '..'))<block_end><else_stmt><block_start>dst=os.path.relpath(os.path.join(f'{root}_{timestamp}' file) os.path.join(path '..'))<block_end>zipf.write(src dst)<block_end><block_end>zipf.close()<block_end># def module_to_bytes(path, ignore=['__pycache__']):
# # zipf is zipfile handle
# cont = b''
# for root, dirs, files in os.walk(path):
# for file in files:
# for i in ignore:
# src = os.path.join(root, file)
# if i not in src:
# with open(src, 'rb') as f:
# cont += f.read()
# return cont
<def_stmt>get_module_hash path ignore=['__pycache__']# zipf is zipfile handle
<block_start>sha=hashlib.sha256()<for_stmt>root,dirs,files os.walk(path)<block_start><for_stmt>file files<block_start><for_stmt>i ignore<block_start>src=os.path.join(root file)<if_stmt>i<not><in>src<block_start><with_stmt>open(src 'rb')<as>f<block_start>sha.update(f.read())<line_sep># cont += f.read()
<block_end><block_end><block_end><block_end><block_end><return>sha.hexdigest()<block_end><def_stmt>import_by_string full_name<block_start>module_name,unit_name=full_name.rsplit('.' 1)<line_sep>mod=importlib.import_module(module_name)<line_sep><return>getattr(mod unit_name)<block_end><def_stmt>exec_dyn_class idx class_name<block_start>my_class=import_by_string(class_name)<line_sep>instance=my_class(idx)<line_sep><return>instance._run(ret_success=<true>)<block_end><def_stmt>get_import_name_by_script script_name timestamp=<none><block_start>mod_name=os.path.splitext(script_name)[0]<if_stmt>timestamp<is><not><none><block_start>mod_list=mod_name.split('.')<line_sep>mod_list[0]=f'{mod_list[0]}_{timestamp}'<line_sep>mod_name='.'.join(mod_list)<block_end><return>f'{mod_name}.LostScript'<block_end> |
<import_stmt>os<import_from_stmt>unittest TestCase<line_sep># most of the features of this script are already tested indirectly when
# running vensim and xmile integration tests
_root=os.path.dirname(__file__)<class_stmt>TestErrors(TestCase)<block_start><def_stmt>test_canonical_file_not_found self<block_start><import_from_stmt>pysd.tools.benchmarking runner<with_stmt>self.assertRaises(FileNotFoundError)<as>err<block_start>runner(os.path.join(_root "more-tests/not_existent.mdl"))<block_end>self.assertIn('Canonical output file not found.' str(err.exception))<block_end><def_stmt>test_non_valid_model self<block_start><import_from_stmt>pysd.tools.benchmarking runner<with_stmt>self.assertRaises(ValueError)<as>err<block_start>runner(os.path.join(_root "more-tests/not_vensim/test_not_vensim.txt"))<block_end>self.assertIn('Modelfile should be *.mdl or *.xmile' str(err.exception))<block_end><def_stmt>test_non_valid_outputs self<block_start><import_from_stmt>pysd.tools.benchmarking load_outputs<with_stmt>self.assertRaises(ValueError)<as>err<block_start>load_outputs(os.path.join(_root "more-tests/not_vensim/test_not_vensim.txt"))<block_end>self.assertIn("Not able to read '" str(err.exception))<line_sep>self.assertIn("more-tests/not_vensim/test_not_vensim.txt'." str(err.exception))<block_end><def_stmt>test_different_frames_error self<block_start><import_from_stmt>pysd.tools.benchmarking load_outputs assert_frames_close<with_stmt>self.assertRaises(AssertionError)<as>err<block_start>assert_frames_close(load_outputs(os.path.join(_root "data/out_teacup.csv")) load_outputs(os.path.join(_root "data/out_teacup_modified.csv")))<block_end>self.assertIn("Following columns are not close:\n\tTeacup Temperature" str(err.exception))<line_sep>self.assertNotIn("Column 'Teacup Temperature' is not close." str(err.exception))<line_sep>self.assertNotIn("Actual values:\n\t" str(err.exception))<line_sep>self.assertNotIn("Expected values:\n\t" str(err.exception))<with_stmt>self.assertRaises(AssertionError)<as>err<block_start>assert_frames_close(load_outputs(os.path.join(_root "data/out_teacup.csv")) load_outputs(os.path.join(_root "data/out_teacup_modified.csv")) verbose=<true>)<block_end>self.assertIn("Following columns are not close:\n\tTeacup Temperature" str(err.exception))<line_sep>self.assertIn("Column 'Teacup Temperature' is not close." str(err.exception))<line_sep>self.assertIn("Actual values:\n\t" str(err.exception))<line_sep>self.assertIn("Expected values:\n\t" str(err.exception))<block_end><def_stmt>test_different_frames_warning self<block_start><import_from_stmt>warnings catch_warnings<import_from_stmt>pysd.tools.benchmarking load_outputs assert_frames_close<with_stmt>catch_warnings(record=<true>)<as>ws<block_start>assert_frames_close(load_outputs(os.path.join(_root "data/out_teacup.csv")) load_outputs(os.path.join(_root "data/out_teacup_modified.csv")) assertion="warn")<line_sep># use only user warnings
wu=[w<for>w ws<if>issubclass(w.category UserWarning)]<line_sep>self.assertEqual(len(wu) 1)<line_sep>self.assertIn("Following columns are not close:\n\tTeacup Temperature" str(wu[0].message))<line_sep>self.assertNotIn("Column 'Teacup Temperature' is not close." str(wu[0].message))<line_sep>self.assertNotIn("Actual values:\n\t" str(wu[0].message))<line_sep>self.assertNotIn("Expected values:\n\t" str(wu[0].message))<block_end><with_stmt>catch_warnings(record=<true>)<as>ws<block_start>assert_frames_close(load_outputs(os.path.join(_root "data/out_teacup.csv")) load_outputs(os.path.join(_root "data/out_teacup_modified.csv")) assertion="warn" verbose=<true>)<line_sep># use only user warnings
wu=[w<for>w ws<if>issubclass(w.category UserWarning)]<line_sep>self.assertEqual(len(wu) 1)<line_sep>self.assertIn("Following columns are not close:\n\tTeacup Temperature" str(wu[0].message))<line_sep>self.assertIn("Column 'Teacup Temperature' is not close." str(wu[0].message))<line_sep>self.assertIn("Actual values:\n\t" str(wu[0].message))<line_sep>self.assertIn("Expected values:\n\t" str(wu[0].message))<block_end><block_end><def_stmt>test_transposed_frame self<block_start><import_from_stmt>pysd.tools.benchmarking load_outputs assert_frames_close<line_sep>assert_frames_close(load_outputs(os.path.join(_root "data/out_teacup.csv")) load_outputs(os.path.join(_root "data/out_teacup_transposed.csv") transpose=<true>))<block_end><def_stmt>test_load_columns self<block_start><import_from_stmt>pysd.tools.benchmarking load_outputs<line_sep>out0=load_outputs(os.path.join(_root "data/out_teacup.csv"))<line_sep>out1=load_outputs(os.path.join(_root "data/out_teacup.csv") columns=["Room Temperature" "Teacup Temperature"])<line_sep>out2=load_outputs(os.path.join(_root "data/out_teacup_transposed.csv") transpose=<true> columns=["Heat Loss to Room"])<line_sep>self.assertEqual(set(out1.columns) set(["Room Temperature" "Teacup Temperature"]))<line_sep>self.assertEqual(set(out2.columns) set(["Heat Loss to Room"]))<line_sep>self.assertTrue((out0.index<eq>out1.index).all())<line_sep>self.assertTrue((out0.index<eq>out2.index).all())<block_end><def_stmt>test_different_cols self<block_start><import_from_stmt>warnings catch_warnings<import_from_stmt>pysd.tools.benchmarking assert_frames_close<import_stmt>pandas<as>pd<line_sep>d1=pd.DataFrame({'a':[1 2] 'b':[3 4] 'd':[6 7]})<line_sep>d2=pd.DataFrame({'a':[1 2]})<line_sep>d3=pd.DataFrame({'a':[1 2] 'c':[3 4]})<with_stmt>self.assertRaises(ValueError)<as>err<block_start>assert_frames_close(actual=d1 expected=d2)<block_end>self.assertIn("Columns from actual and expected values must be equal." str(err.exception))<with_stmt>catch_warnings(record=<true>)<as>ws<block_start>assert_frames_close(actual=d1 expected=d2 assertion="warn")<line_sep># use only user warnings
wu=[w<for>w ws<if>issubclass(w.category UserWarning)]<line_sep>self.assertEqual(len(wu) 1)<line_sep>self.assertIn("'b'" str(wu[0].message))<line_sep>self.assertIn("'d'" str(wu[0].message))<line_sep>self.assertIn("from actual values not found in expected values." str(wu[0].message))<block_end><with_stmt>catch_warnings(record=<true>)<as>ws<block_start>assert_frames_close(expected=d1 actual=d2 assertion="warn")<line_sep># use only user warnings
wu=[w<for>w ws<if>issubclass(w.category UserWarning)]<line_sep>self.assertEqual(len(wu) 1)<line_sep>self.assertIn("'b'" str(wu[0].message))<line_sep>self.assertIn("'d'" str(wu[0].message))<line_sep>self.assertIn("from expected values not found in actual values." str(wu[0].message))<block_end><with_stmt>catch_warnings(record=<true>)<as>ws<block_start>assert_frames_close(actual=d1 expected=d3 assertion="warn")<line_sep># use only user warnings
wu=[w<for>w ws<if>issubclass(w.category UserWarning)]<line_sep>self.assertEqual(len(wu) 1)<line_sep>self.assertIn("'b'" str(wu[0].message))<line_sep>self.assertIn("'d'" str(wu[0].message))<line_sep>self.assertIn("from actual values not found in expected values." str(wu[0].message))<line_sep>self.assertIn("Columns 'c' from expected values not found in actual "<concat>"values." str(wu[0].message))<block_end><block_end><def_stmt>test_invalid_input self<block_start><import_from_stmt>pysd.tools.benchmarking assert_frames_close<with_stmt>self.assertRaises(TypeError)<as>err<block_start>assert_frames_close(actual=[1 2] expected=[1 2])<block_end>self.assertIn("Inputs must both be pandas DataFrames." str(err.exception))<block_end><block_end> |
<import_from_stmt>abc abstractmethod abstractproperty<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>collections.abc<import_stmt>logging<import_stmt>pkg_resources<import_stmt>uuid<import_from_stmt>urllib.parse urlparse<import_from_stmt>typing Set List<import_stmt>threading<import_stmt>numpy<as>np<import_from_stmt>frozendict frozendict<import_stmt>pyarrow<as>pa<import_stmt>vaex<import_stmt>vaex.execution<import_stmt>vaex.settings<import_stmt>vaex.utils<import_from_stmt>vaex.array_types data_type<import_from_stmt>.column Column ColumnIndexed supported_column_types<import_from_stmt>. array_types<import_from_stmt>vaex encoding<line_sep>logger=logging.getLogger('vaex.dataset')<line_sep>opener_classes=[]<line_sep>HASH_VERSION="1"<line_sep>HASH_VERSION_KEY="version"<line_sep>chunk_size_default=vaex.settings.main.chunk.size<or>1024<power>2<line_sep>_dataset_types={}<line_sep>lock=threading.Lock()<def_stmt>register cls name=<none><block_start>name=name<or>getattr(cls 'snake_name')<or>cls.__name__<line_sep>_dataset_types[name]=cls<line_sep><return>cls<block_end>@encoding.register('dataset')<class_stmt>dataset_encoding<block_start>@staticmethod<def_stmt>encode encoding dataset<block_start><return>dataset.encode(encoding)<block_end>@staticmethod<def_stmt>decode encoding dataset_spec<block_start>dataset_spec=dataset_spec.copy()<line_sep>type=dataset_spec.pop('dataset_type')<line_sep>cls=_dataset_types[type]<line_sep><return>cls.decode(encoding dataset_spec)<block_end><block_end><def_stmt>open path fs_options={} fs=<none> *args **kwargs<block_start>failures=[]<with_stmt>lock# since we cache, make this thread save
<block_start><if_stmt><not>opener_classes<block_start><for_stmt>entry pkg_resources.iter_entry_points(group='vaex.dataset.opener')<block_start>logger.debug('trying opener: '+entry.name)<try_stmt><block_start>opener=entry.load()<line_sep>opener_classes.append(opener)<block_end><except_stmt>Exception<as>e<block_start>logger.exception('issue loading '+entry.name)<line_sep>failures.append((e entry))<block_end><block_end><block_end><block_end># first the quick path
<for_stmt>opener opener_classes<block_start><if_stmt>opener.quick_test(path fs_options=fs_options fs=fs)<block_start><if_stmt>opener.can_open(path fs_options=fs_options fs=fs *args **kwargs)<block_start><return>opener.open(path fs_options=fs_options fs=fs *args **kwargs)<block_end><block_end><block_end># otherwise try all openers
<for_stmt>opener opener_classes<block_start><try_stmt><block_start><if_stmt>opener.can_open(path fs_options=fs_options fs=fs *args **kwargs)<block_start><return>opener.open(path fs_options=fs_options fs=fs *args **kwargs)<block_end><block_end><except_stmt>Exception<as>e<block_start>failures.append((e opener))<block_end><block_end>failures="\n".join([f'\n-----{who}-----\n:'+vaex.utils.format_exception_trace(e)<for>e,who failures])<if_stmt>failures<block_start><raise>IOError(f'Cannot open {path}, failures: {failures}.')<block_end><else_stmt><block_start><raise>IOError(f'Cannot open {path} nobody knows how to read it.')<block_end><block_end><def_stmt>_to_bytes ar<block_start><try_stmt><block_start><return>ar.view(np.uint8)<block_end><except_stmt>ValueError<block_start><return>ar.copy().view(np.uint8)<block_end><block_end><def_stmt>hash_combine *hashes<block_start>hasher=vaex.utils.create_hasher(large_data=<false>)<for_stmt>hash hashes<block_start>hasher.update(hash.encode())<block_end><return>hasher.hexdigest()<block_end><def_stmt>hash_slice hash start end<block_start>hasher=vaex.utils.create_hasher(hash.encode() large_data=<false>)<line_sep>slice=np.array([start end] dtype=np.int64)<line_sep>hasher.update(_to_bytes(slice))<line_sep><return>hasher.hexdigest()<block_end><def_stmt>hash_array_data ar# this function should stay consistent with all future versions
# since this is the expensive part of the hashing
<block_start><if_stmt>isinstance(ar np.ndarray)<block_start>ar=ar.ravel()<if_stmt>ar.dtype<eq>np.object_<block_start><return>{"type":"numpy" "data":str(uuid.uuid4()) "mask":<none>}<block_end><if_stmt>np.ma.isMaskedArray(ar)<block_start>data_byte_ar=_to_bytes(ar.data)<line_sep>hasher=vaex.utils.create_hasher(data_byte_ar large_data=<true>)<line_sep>hash_data={"type":"numpy" "data":hasher.hexdigest() "mask":<none>}<if_stmt>ar.mask<is><not><true><and>ar.mask<is><not><false><and>ar.mask<is><not>np.True_<and>ar.mask<is><not>np.False_<block_start>mask_byte_ar=_to_bytes(ar.mask)<line_sep>hasher=vaex.utils.create_hasher(mask_byte_ar large_data=<true>)<line_sep>hash_data["mask"]=hasher.hexdigest()<block_end><return>hash_data<block_end><else_stmt><block_start><try_stmt><block_start>byte_ar=_to_bytes(ar)<block_end><except_stmt>ValueError<block_start>byte_ar=ar.copy().view(np.uint8)<block_end>hasher=vaex.utils.create_hasher(byte_ar large_data=<true>)<line_sep>hash_data={"type":"numpy" "data":hasher.hexdigest() "mask":<none>}<block_end><block_end><elif_stmt>isinstance(ar (pa.Array pa.ChunkedArray))<block_start>hasher=vaex.utils.create_hasher(large_data=<true>)<line_sep>buffer_hashes=[]<line_sep>hash_data={"type":"arrow" "buffers":buffer_hashes}<if_stmt>isinstance(ar pa.ChunkedArray)<block_start>chunks=ar.chunks<block_end><else_stmt><block_start>chunks=[ar]<block_end><for_stmt>chunk chunks<block_start><for_stmt>buffer chunk.buffers()<block_start><if_stmt>buffer<is><not><none><block_start>hasher.update(memoryview(buffer))<line_sep>buffer_hashes.append(hasher.hexdigest())<block_end><else_stmt><block_start>buffer_hashes.append(<none>)<block_end><block_end><block_end><block_end><elif_stmt>isinstance(ar vaex.column.Column)<block_start>hash_data={"type":"column" "fingerprint":ar.fingerprint()}<block_end><else_stmt><block_start><raise>TypeError<block_end><return>hash_data<block_end><def_stmt>hash_array ar hash_info=<none> return_info=<false># this function can change over time, as it builds on top of the expensive part
# (hash_array_data), so we can cheaply calculate new hashes if we pass on hash_info
<block_start><if_stmt>hash_info<is><none><block_start>hash_info=hash_array_data(ar)<block_end><if_stmt>hash_info.get(HASH_VERSION_KEY)<eq>HASH_VERSION# TODO: semver check?
<block_start><return>hash_info['hash'] hash_info<block_end><if_stmt>isinstance(ar np.ndarray)<block_start><if_stmt>ar.dtype<eq>np.object_<block_start><return>hash_info['data']# uuid, so always unique
<block_end><if_stmt>np.ma.isMaskedArray(ar)<block_start><if_stmt><not>(hash_info['type']<eq>'numpy'<and>hash_info['data']<and>hash_info['mask'])<block_start>hash_info=hash_array_data(ar)<block_end><block_end><else_stmt><block_start><if_stmt><not>(hash_info['type']<eq>'numpy'<and>hash_info['data'])<block_start>hash_info=hash_array_data(ar)<block_end><block_end>keys=[HASH_VERSION hash_info['type'] hash_info['data']]<if_stmt>hash_info['mask']<block_start>keys.append(hash_info['mask'])<block_end><block_end><elif_stmt>isinstance(ar vaex.array_types.supported_arrow_array_types)<block_start><if_stmt><not>(hash_info['type']<eq>'arrow'<and>hash_info['buffers'])<block_start>hash_info=hash_array_data(ar)<block_end>keys=[HASH_VERSION]<line_sep>keys.extend(["NO_BUFFER"<if><not>b<else>b<for>b hash_info['buffers']])<block_end><elif_stmt>isinstance(ar vaex.column.Column)<block_start><if_stmt><not>(hash_info['type']<eq>'column')<block_start>hash_info=hash_array_data(ar)<block_end>keys=[HASH_VERSION]<line_sep>keys.append(hash_info['fingerprint'])<block_end>hasher=vaex.utils.create_hasher(large_data=<false>)# small amounts of data
<for_stmt>key keys<block_start>hasher.update(key.encode('ascii'))<block_end>hash=hasher.hexdigest()<if_stmt>return_info<block_start>hash_info['hash']=hash<line_sep>hash_info[HASH_VERSION_KEY]=HASH_VERSION<line_sep><return>hash hash_info<block_end><else_stmt><block_start><return>hash<block_end><block_end><def_stmt>to_supported_array ar<block_start><if_stmt><not>isinstance(ar supported_column_types)<block_start>ar=np.asanyarray(ar)<block_end><if_stmt>isinstance(ar np.ndarray)<and>ar.dtype.kind<eq>'U'<block_start>ar=vaex.column.ColumnArrowLazyCast(ar pa.string())<block_end><elif_stmt>isinstance(ar np.ndarray)<and>ar.dtype.kind<eq>'O'<block_start>ar_data=ar<if_stmt>np.ma.isMaskedArray(ar)<block_start>ar_data=ar.data<block_end><try_stmt># "k != k" is a way to detect NaN's and NaT's
<block_start>types=list({type(k)<for>k ar_data<if>k<is><not><none><and>k<eq>k})<block_end><except_stmt>ValueError# If there is an array value in the column, Numpy throws a ValueError
# "The truth value of an array with more than one element is ambiguous".
# We don't handle this by default as it is a bit slower.
<block_start><def_stmt>is_missing k<block_start><if_stmt>k<is><none><block_start><return><true><block_end><try_stmt># a way to detect NaN's and NaT
<block_start><return><not>(k<eq>k)<block_end><except_stmt>ValueError# if a value is an array, this will fail, and it is a non-missing
<block_start><return><false><block_end><block_end>types=list({type(k)<for>k ar_data<if>k<is><not>is_missing(k)})<block_end><if_stmt>len(types)<eq>1<and>issubclass(types[0] str)# TODO: how do we know it should not be large_string?
# self._dtypes_override[valid_name] = pa.string()
<block_start>ar=vaex.column.ColumnArrowLazyCast(ar pa.string())<block_end><if_stmt>len(types)<eq>0# can only be if all nan right?
<block_start>ar=ar.astype(np.float64)<block_end><block_end><return>ar<block_end><def_stmt>_concat_chunk_list list_of_chunks<block_start>dict_of_list_of_arrays=collections.defaultdict(list)<for_stmt>chunks list_of_chunks<block_start><for_stmt>name,array chunks.items()<block_start><if_stmt>isinstance(array pa.ChunkedArray)<block_start>dict_of_list_of_arrays[name].extend(array.chunks)<block_end><else_stmt><block_start>dict_of_list_of_arrays[name].append(array)<block_end><block_end><block_end>chunks={name:vaex.array_types.concat(arrays)<for>name,arrays dict_of_list_of_arrays.items()}<line_sep><return>chunks<block_end><def_stmt>_slice_of_chunks chunks_ready_list chunk_size<block_start>current_row_count=0<line_sep>chunks_current_list=[]<while_stmt>current_row_count<l>chunk_size<and>chunks_ready_list<block_start>chunks_current=chunks_ready_list.pop(0)<line_sep>chunk=list(chunks_current.values())[0]<line_sep># chunks too large, split, and put back a part
<if_stmt>current_row_count+len(chunk)<g>chunk_size<block_start>strict=<true><if_stmt>strict<block_start>needed_length=chunk_size-current_row_count<line_sep>current_row_count<augadd>needed_length<assert_stmt>current_row_count<eq>chunk_size<line_sep>chunks_head={name:vaex.array_types.slice(chunk 0 needed_length)<for>name,chunk chunks_current.items()}<line_sep>chunks_current_list.append(chunks_head)<line_sep>chunks_extra={name:vaex.array_types.slice(chunk needed_length)<for>name,chunk chunks_current.items()}<line_sep>chunks_ready_list.insert(0 chunks_extra)# put back the extra in front
<block_end><else_stmt><block_start>current_row_count<augadd>len(chunk)<line_sep>chunks_current_list.append(chunks_current)<block_end><block_end><else_stmt><block_start>current_row_count<augadd>len(chunk)<line_sep>chunks_current_list.append(chunks_current)<block_end><block_end><return>chunks_current_list current_row_count<block_end><def_stmt>chunk_rechunk chunk_iter chunk_size<block_start>chunks_ready_list=[]<line_sep>i1=i2=0<for_stmt>_,_,chunks chunk_iter<block_start>chunks_ready_list.append(chunks)<line_sep>total_row_count=sum([len(list(k.values())[0])<for>k chunks_ready_list])<if_stmt>total_row_count<g>chunk_size<block_start>chunks_current_list,current_row_count=vaex.dataset._slice_of_chunks(chunks_ready_list chunk_size)<line_sep>i2<augadd>current_row_count<line_sep>chunks=vaex.dataset._concat_chunk_list(chunks_current_list)<line_sep><yield>i1 i2 chunks<line_sep>i1=i2<block_end><block_end><while_stmt>chunks_ready_list<block_start>chunks_current_list,current_row_count=vaex.dataset._slice_of_chunks(chunks_ready_list chunk_size)<line_sep>i2<augadd>current_row_count<line_sep>chunks=vaex.dataset._concat_chunk_list(chunks_current_list)<line_sep><yield>i1 i2 chunks<line_sep>i1=i2<block_end><block_end><def_stmt>_rechunk chunk_iter chunk_size<block_start><def_stmt>wrapper <block_start>i1=i2=0<for_stmt>chunks chunk_iter<block_start>i2<augadd>len(list(chunks.values())[0])<line_sep><yield>i1 i2 chunks<line_sep>i1=i2<block_end><block_end><yield><from>chunk_rechunk(wrapper() chunk_size)<block_end><def_stmt>empty_chunk_iterator start end chunk_size<block_start>length=end-start<line_sep>i1=0<line_sep>i2=min(length i1+chunk_size)<while_stmt>i1<l>length<block_start><yield>i1 i2 {}<line_sep>i1=i2<line_sep>i2=min(length i1+chunk_size)<block_end><block_end><class_stmt>Dataset(collections.abc.Mapping)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self._columns=frozendict()<line_sep>self._row_count=<none><line_sep>self._id=str(uuid.uuid4())<line_sep>self._cached_fingerprint=<none><block_end><def_stmt>__repr__ self<block_start><import_stmt>yaml<line_sep>data=self.__repr_data__()<line_sep><return>yaml.dump(data sort_keys=<false> indent=4)<block_end><def_stmt>__repr_data__ self<block_start>state=self.__getstate__()<def_stmt>normalize v<block_start><if_stmt>isinstance(v Dataset)<block_start><return>v.__repr_data__()<block_end><if_stmt>isinstance(v frozendict)<block_start><return>dict(v)<block_end><if_stmt>isinstance(v vaex.dataframe.DataFrame)<block_start><return>{'type':'dataframe' 'repr':repr(v)}<block_end><if_stmt>isinstance(v np.ndarray)<block_start><return>v.tolist()<block_end><return>v<block_end><return>{'type':self.snake_name **{k:normalize(v)<for>k,v state.items()<if><not>k.startswith('_')}}<block_end>@property<def_stmt>id self<block_start>'''id that uniquely identifies a dataset at runtime'''<line_sep><return>self.fingerprint<block_end>@property<def_stmt>fingerprint self<block_start>'''id that uniquely identifies a dataset cross runtime, might be more expensive and require hasing'''<if_stmt>self._cached_fingerprint<is><none><block_start>self._cached_fingerprint=self._fingerprint<block_end><return>self._cached_fingerprint<block_end>@abstractproperty<def_stmt>_fingerprint self<block_start><pass><block_end><def_stmt>encode self encoding<block_start><if_stmt><not>encoding.has_object_spec(self.id)<block_start>spec=self._encode(encoding)<line_sep>encoding.set_object_spec(self.id spec)<block_end><return>{'dataset_type':self.snake_name 'object-id':self.id}<block_end>@classmethod<def_stmt>decode cls encoding spec<block_start>id=spec['object-id']<if_stmt><not>encoding.has_object(id)<block_start>spec=encoding.get_object_spec(id)<line_sep>ds=cls._decode(encoding spec)<line_sep>encoding.set_object(id ds)<block_end><return>encoding.get_object(id)<block_end>@abstractmethod<def_stmt>_create_columns self<block_start><pass><block_end>@property<def_stmt>name self# TODO: in the future, we might want to use self.fingerprint or self.id
<block_start><return>"no-name"<block_end><def_stmt>__getstate__ self<block_start>state=self.__dict__.copy()<del_stmt>state['_columns']<del_stmt>state['_cached_fingerprint']<line_sep><return>state<block_end><def_stmt>__setstate__ self state<block_start>self.__dict__.update(state)<line_sep>self._cached_fingerprint=<none><line_sep>self._create_columns()<block_end><def_stmt>schema self array_type=<none><block_start><return>{name:vaex.array_types.data_type(col)<for>name,col self.items()}<block_end><def_stmt>shapes self<block_start><return>{name:self.shape(name)<for>name,col self.items()}<block_end><def_stmt>_set_row_count self<block_start><if_stmt><not>self._columns<block_start><return><block_end>values=list(self._columns.values())<line_sep>self._row_count=len(values[0])<for_stmt>name,value list(self._columns.items())[1:]<block_start><if_stmt>len(value)<ne>self._row_count<block_start><raise>ValueError(f'First columns has length {self._row_count}, while column {name} has length {len(value)}')<block_end><block_end><block_end>@property<def_stmt>row_count self<block_start><return>self._row_count<block_end><def_stmt>project self *names<block_start>all=set(self)<line_sep>drop=all-set(names)<line_sep># we want a deterministic order for fingerprints
drop=list(drop)<line_sep>drop.sort()<line_sep><return>self.dropped(*list(drop))<block_end><def_stmt>concat self *others resolver='flexible'<block_start>datasets=[]<if_stmt>isinstance(self DatasetConcatenated)<block_start>datasets.extend(self.datasets)<block_end><else_stmt><block_start>datasets.extend([self])<block_end><for_stmt>other others<block_start><if_stmt>isinstance(other DatasetConcatenated)<block_start>datasets.extend(other.datasets)<block_end><else_stmt><block_start>datasets.extend([other])<block_end><block_end><return>DatasetConcatenated(datasets resolver=resolver)<block_end><def_stmt>take self indices masked=<false><block_start><return>DatasetTake(self indices masked=masked)<block_end><def_stmt>renamed self renaming<block_start><return>DatasetRenamed(self renaming)<block_end><def_stmt>merged self rhs<block_start><return>DatasetMerged(self rhs)<block_end><def_stmt>dropped self *names<block_start><return>DatasetDropped(self names)<block_end><def_stmt>__getitem__ self item<block_start><if_stmt>isinstance(item slice)<block_start><assert_stmt>item.step<in>[1 <none>]<line_sep><return>self.slice(item.start<or>0 item.stop<or>self.row_count)<block_end><return>self._columns[item]<block_end><def_stmt>__len__ self<block_start><return>len(self._columns)<block_end><def_stmt>__iter__ self<block_start><return>iter(self._columns)<block_end><def_stmt>get_data self i1 i2 names<block_start><raise>NotImplementedError<block_end><def_stmt>__eq__ self rhs<block_start><if_stmt><not>isinstance(rhs Dataset)<block_start><return>NotImplemented<block_end># simple case, if fingerprints are equal, the data is equal
<if_stmt>self.fingerprint<eq>rhs.fingerprint<block_start><return><true><block_end># but no the other way around
keys=set(self)<line_sep>keys_hashed=set(self._ids)<line_sep>missing=keys^keys_hashed<if_stmt>missing<block_start><return>self.fingerprint<eq>rhs.fingerprint<block_end>keys=set(rhs)<line_sep>keys_hashed=set(rhs._ids)<line_sep>missing=keys^keys_hashed<if_stmt>missing<block_start><return>self.fingerprint<eq>rhs.fingerprint<block_end><return>self._ids<eq>rhs._ids<block_end><def_stmt>__hash__ self<block_start>keys=set(self)<line_sep>keys_hashed=set(self._ids)<line_sep>missing=keys^keys_hashed<if_stmt>missing# if we don't have hashes for all columns, we just use the fingerprint
<block_start><return>hash(self.fingerprint)<block_end><return>hash(tuple(self._ids.items()))<block_end><def_stmt>_default_lazy_chunk_iterator self array_map columns chunk_size reverse=<false><block_start>chunk_size=chunk_size<or>1024<power>2<line_sep>chunk_count=(self.row_count+chunk_size-1)<floordiv>chunk_size<line_sep>chunks=range(chunk_count)<if_stmt>reverse<block_start>chunks=reversed(chunks)<block_end><for_stmt>i chunks<block_start>i1=i<times>chunk_size<line_sep>i2=min((i+1)<times>chunk_size self.row_count)<def_stmt>reader i1=i1 i2=i2<block_start>chunks={k:array_map[k][i1:i2]<for>k columns}<line_sep>length=i2-i1<for_stmt>name,chunk chunks.items()<block_start><assert_stmt>len(chunk)<eq>length f'Oops, got a chunk ({name}) of length {len(chunk)} while it is expected to be of length {length} (at {i1}-{i2}'<block_end><return>chunks<block_end><yield>i1 i2 reader<block_end><block_end><def_stmt>_default_chunk_iterator self array_map columns chunk_size reverse=<false><block_start><for_stmt>i1,i2,reader self._default_lazy_chunk_iterator(array_map columns chunk_size reverse)<block_start><yield>i1 i2 reader()<block_end><block_end>@abstractmethod<def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><pass><block_end>@abstractmethod<def_stmt>is_masked self column<block_start><pass><block_end>@abstractmethod<def_stmt>shape self column<block_start><pass><block_end>@abstractmethod<def_stmt>close self<block_start>'''Close file handles or other resources, the DataFrame will not be in a usable state afterwards.'''<line_sep><pass><block_end>@abstractmethod<def_stmt>slice self start end<block_start><pass><block_end>@abstractmethod<def_stmt>hashed self<block_start><pass><block_end>@abstractmethod<def_stmt>leafs self<arrow>List["Dataset"]<block_start><pass><block_end><block_end><class_stmt>DatasetDecorator(Dataset)<block_start><def_stmt>__init__ self original<block_start>super().__init__()<line_sep>self.original=original<block_end><def_stmt>leafs self<arrow>List[Dataset]<block_start><return>self.original.leafs()<block_end><def_stmt>close self<block_start>self.original.close()<block_end><def_stmt>is_masked self column<block_start><return>self.original.is_masked(column)<block_end><def_stmt>shape self column<block_start><return>self.original.shape(column)<block_end><block_end><class_stmt>ColumnProxy(vaex.column.Column)<block_start>'''To give the Dataset._columns object useful containers for debugging'''<line_sep>ds:Dataset<def_stmt>__init__ self ds name type<block_start>self.ds=ds<line_sep>self.name=name<line_sep>self.dtype=type<block_end><def_stmt>_fingerprint self<block_start>fp=vaex.cache.fingerprint(self.ds.fingerprint self.name)<line_sep><return>f'column-proxy-{fp}'<block_end><def_stmt>__len__ self<block_start><return>self.ds.row_count<block_end><def_stmt>to_numpy self<block_start>values=self[:]<line_sep><return>np.array(values)<block_end><def_stmt>__getitem__ self item<block_start><if_stmt>isinstance(item slice)<block_start>array_chunks=[]<line_sep>ds=self.ds.__getitem__(item)<for_stmt>chunk_start,chunk_end,chunks ds.chunk_iterator([self.name])<block_start>ar=chunks[self.name]<if_stmt>isinstance(ar pa.ChunkedArray)<block_start>array_chunks.extend(ar.chunks)<block_end><else_stmt><block_start>array_chunks.append(ar)<block_end><block_end><if_stmt>len(array_chunks)<eq>1<block_start><return>array_chunks[0]<block_end><if_stmt>len(array_chunks)<eq>0<block_start><return>vaex.dtype(self.dtype).create_array([])<block_end><return>vaex.array_types.concat(array_chunks)<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><block_end><block_end>@register<class_stmt>DatasetRenamed(DatasetDecorator)<block_start>snake_name='rename'<def_stmt>__init__ self original renaming<block_start>super().__init__(original)<line_sep>self.renaming=renaming<line_sep>self.reverse={v:k<for>k,v renaming.items()}<line_sep>self._create_columns()<line_sep>self._ids=frozendict({renaming.get(name name):ar<for>name,ar original._ids.items()})<line_sep>self._set_row_count()<block_end><def_stmt>renamed self renaming# # {'a': 'x', 'b': 'y'} and {'x': 'a', 'b': 'z', 'c', 'q'} -> {'b': 'z', 'c': 'q'}
<block_start>resulting={}<line_sep>renaming=renaming.copy()# we'll modify in place
<for_stmt>old,new self.renaming.items()<block_start><if_stmt>new<in>renaming<block_start><if_stmt>old<eq>renaming[new]<block_start><pass># e.g. x->a->x
<block_end><else_stmt><block_start>resulting[old]=renaming[new]<block_end><del_stmt>renaming[new]# we already covered this
<block_end><else_stmt># e.g. x->a->a
<block_start>resulting[old]=new<block_end><block_end># e.g. x->x->a
resulting.update(renaming)<line_sep><return>DatasetRenamed(self.original resulting)<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.fingerprint self.renaming)<line_sep><return>f'dataset-{self.snake_name}-{self.original.fingerprint}'<block_end><def_stmt>_create_columns self<block_start>self._columns=frozendict({self.renaming.get(name name):ar<for>name,ar self.original.items()})<block_end><def_stmt>_encode self encoding<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep><return>{'renaming':dict(self.renaming) 'dataset':dataset_spec}<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<line_sep><return>cls(dataset spec['renaming'])<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><for_stmt>name columns<block_start><if_stmt>name<in>self.renaming<block_start>rename=self.renaming[name]<line_sep><raise>KeyError(f'Oops, you tried to get column {name}, but you renamed it to {rename}')<block_end><block_end>columns=[self.reverse.get(name name)<for>name columns]<for_stmt>i1,i2,chunks self.original.chunk_iterator(columns chunk_size reverse=reverse)<block_start><yield>i1 i2 {self.renaming.get(name name):ar<for>name,ar chunks.items()}<block_end><block_end><def_stmt>is_masked self column<block_start><return>self.original.is_masked(self.reverse.get(column column))<block_end><def_stmt>shape self column<block_start><return>self.original.shape(self.reverse.get(column column))<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>type(self)(self.original.slice(start end) self.renaming)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self.renaming)<block_end><block_end>@register<class_stmt>DatasetConcatenated(Dataset)<block_start>snake_name="concat"<def_stmt>__init__ self datasets resolver<block_start>super().__init__()<line_sep>self.datasets=datasets<line_sep>self.resolver=resolver<if_stmt>self.resolver<eq>'strict'<block_start><for_stmt>dataset datasets[1:]<block_start><if_stmt>set(dataset)<ne>set(datasets[0])<block_start>l=set(dataset)<line_sep>r=set(datasets[0])<line_sep>diff=l^r<line_sep><raise>NameError(f'Concatenating datasets with different names: {l} and {r} (difference: {diff})')<block_end><block_end>self._schema=datasets[0].schema()<line_sep>self._shapes=datasets[0].shapes()<for_stmt>dataset datasets[1:]<block_start><if_stmt>dataset.shapes()<ne>self._shapes<block_start><raise>ValueError(f'Cannot concatenate with different shapes: {self._shapes} != {dataset.shapes()}')<block_end><block_end><for_stmt>dataset datasets[1:]<block_start>schema=dataset.schema()<if_stmt>dataset.schema()<ne>self._schema<block_start><raise>ValueError(f'Cannot concatenate with different schemas: {self._shapes} != {dataset.shapes()}')<block_end><block_end><block_end><elif_stmt>self.resolver<eq>'flexible'<block_start>schemas=[ds.schema()<for>ds datasets]<line_sep>shapes=[ds.shapes()<for>ds datasets]<line_sep># try to keep the order of the original dataset
schema_list_map={}<for_stmt>schema schemas<block_start><for_stmt>name,type schema.items()<block_start><if_stmt>name<not><in>schema_list_map<block_start>schema_list_map[name]=[]<block_end><block_end><block_end><for_stmt>name,type_list schema_list_map.items()<block_start><for_stmt>schema schemas# None means it is means the column is missing
<block_start>type_list.append(schema.get(name))<block_end><block_end><import_from_stmt>.schema resolver_flexible<line_sep># shapes
shape_list_map={}<for_stmt>shape shapes<block_start><for_stmt>name,type shape.items()<block_start><if_stmt>name<not><in>shape_list_map<block_start>shape_list_map[name]=[]<block_end><block_end><block_end><for_stmt>name,shape_list shape_list_map.items()<block_start><for_stmt>shapes_ shapes# None means it is means the column is missing
<block_start>shape_list.append(shapes_.get(name))<block_end><block_end>self._schema={}<line_sep>self._shapes={}<for_stmt>name shape_list_map<block_start>self._schema[name],self._shapes[name]=resolver_flexible.resolve(schema_list_map[name] shape_list_map[name])<block_end><block_end><else_stmt><block_start><raise>ValueError(f'Invalid resolver {resolver}, choose between "strict" or "flexible"')<block_end>self._create_columns()<line_sep>self._set_row_count()<block_end>@property<def_stmt>_fingerprint self<block_start>ids=[ds.fingerprint<for>ds self.datasets]<line_sep>id=vaex.cache.fingerprint(*ids)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end><def_stmt>_create_columns self<block_start>columns={}<line_sep>hashes={}<for_stmt>name self._schema<block_start>columns[name]=ColumnProxy(self name self._schema[name])<if_stmt>all(name<in>ds._ids<for>ds self.datasets)<block_start>hashes[name]=hash_combine(*[ds._ids[name]<for>ds self.datasets])<block_end><block_end>self._columns=frozendict(columns)<line_sep>self._ids=frozendict(hashes)<block_end><def_stmt>_encode self encoding skip=set()<block_start>datasets=encoding.encode_list('dataset' self.datasets)<line_sep>spec={'dataset_type':self.snake_name 'datasets':datasets 'resolver':self.resolver}<line_sep><return>spec<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>datasets=encoding.decode_list('dataset' spec['datasets'])<line_sep>ds=cls(datasets spec['resolver'])<line_sep><return>ds<block_end><def_stmt>is_masked self column<block_start><for_stmt>dataset self.datasets<block_start><if_stmt>column<not><in>dataset<block_start><return><true><block_end><block_end><return>any(k.is_masked(column)<for>k self.datasets)<block_end><def_stmt>shape self column<block_start><return>self._shapes[column]<block_end><def_stmt>_set_row_count self<block_start>self._row_count=sum(ds.row_count<for>ds self.datasets)<block_end><def_stmt>schema self array_type=<none><block_start><return>self._schema.copy()<block_end><def_stmt>_chunk_iterator_non_strict self columns chunk_size=<none> reverse=<false> start=0 end=<none><block_start>end=self.row_count<if>end<is><none><else>end<line_sep>offset=0<for_stmt>dataset self.datasets<block_start>present=[k<for>k columns<if>k<in>dataset]<line_sep># skip over whole datasets
<if_stmt>start<ge>offset+dataset.row_count<block_start>offset<augadd>dataset.row_count<line_sep><continue><block_end># we are past the end
<if_stmt>end<le>offset<block_start><break><block_end><for_stmt>i1,i2,chunks dataset.chunk_iterator(present chunk_size=chunk_size reverse=reverse)# chunks = {name: vaex.array_types.to_arrow(ar) for name, ar in chunks.items()}
<block_start>length=i2-i1<line_sep>chunk_start=offset<line_sep>chunk_end=offset+length<if_stmt>start<ge>chunk_end# we didn't find the beginning yet
<block_start>offset<augadd>length<line_sep><continue><block_end><if_stmt>end<le>chunk_start# we are past the end
# assert False
<block_start><break><block_end><if_stmt>start<g>chunk_start# this means we have to cut off a piece of the beginning
<block_start><if_stmt>end<l>chunk_end# AND the end
<block_start>length=end-chunk_start# without the start cut off
length<augsub>start-chunk_start# correcting for the start cut off
<assert_stmt>length<g>0<line_sep>chunks={name:vaex.array_types.slice(ar start-chunk_start length)<for>name,ar chunks.items()}<for_stmt>name,ar chunks.items()<block_start><assert_stmt>len(ar)<eq>length f'Oops, array was expected to be of length {length} but was {len(ar)}'<block_end><block_end><else_stmt><block_start>length<augsub>start-chunk_start# correcting for the start cut off
<assert_stmt>length<g>0<line_sep>chunks={name:vaex.array_types.slice(ar start-chunk_start)<for>name,ar chunks.items()}<for_stmt>name,ar chunks.items()<block_start><assert_stmt>len(ar)<eq>length f'Oops, array was expected to be of length {length} but was {len(ar)}'<block_end><block_end><block_end><else_stmt><block_start><if_stmt>end<l>chunk_end# we only need to cut off a piece of the end
<block_start>length=end-chunk_start<assert_stmt>length<g>0<line_sep>chunks={name:vaex.array_types.slice(ar 0 length)<for>name,ar chunks.items()}<for_stmt>name,ar chunks.items()<block_start><assert_stmt>len(ar)<eq>length f'Oops, array was expected to be of length {length} but was {len(ar)}'<block_end><block_end><block_end><import_from_stmt>.schema resolver_flexible<line_sep>allchunks={name:resolver_flexible.align(length chunks.get(name) self._schema[name] self._shapes[name])<for>name columns}<line_sep><yield>{k:allchunks[k]<for>k columns}<line_sep>offset<augadd>(i2-i1)<block_end><block_end><block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false> start=0 end=<none><block_start>chunk_size=chunk_size<or>1024<times>1024<line_sep>i1=0<line_sep>i1=i2=0<if_stmt><not>columns<block_start>end=self.row_count<if>end<is><none><else>end<line_sep><yield><from>empty_chunk_iterator(start end chunk_size)<block_end><else_stmt><block_start>chunk_iterator=self._chunk_iterator_non_strict(columns chunk_size reverse=reverse start=start end=self.row_count<if>end<is><none><else>end)<line_sep><yield><from>_rechunk(chunk_iterator chunk_size)<block_end><block_end><def_stmt>close self<block_start><for_stmt>ds self.datasets<block_start>ds.close()<block_end><block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end># TODO: we can be smarter here, and trim off some datasets
<return>DatasetSliced(self start=start end=end)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)([dataset.hashed()<for>dataset self.datasets] resolver=self.resolver)<block_end><def_stmt>leafs self<arrow>List[Dataset]<block_start><return>[self]<block_end># def leafs(self) -> List[Dataset]:
# leafs = list()
# for ds in self.datasets:
# leafs.extend(ds.leafs())
# return leafs
<block_end>@register<class_stmt>DatasetTake(DatasetDecorator)<block_start>snake_name="take"<def_stmt>__init__ self original indices masked<block_start>super().__init__(original)<line_sep>self.indices=indices<line_sep>self.masked=masked<line_sep>self._lazy_hash_index=<none><line_sep>self._create_columns()<line_sep>self._set_row_count()<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.fingerprint self._hash_index self.masked)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end>@property<def_stmt>_hash_index self<block_start><if_stmt>self._lazy_hash_index<is><none><block_start>self._lazy_hash_index=hash_array(self.indices)<block_end><return>self._lazy_hash_index<block_end><def_stmt>_create_columns self# if the columns in ds already have a ColumnIndex
# we could do, direct_indices = df.column['bla'].indices[indices]
# which should be shared among multiple ColumnIndex'es, so we store
# them in this dict
<block_start>direct_indices_map={}<line_sep>columns={}<line_sep>hashes={}<for_stmt>name,column self.original.items()<block_start>columns[name]=ColumnIndexed.index(column self.indices direct_indices_map masked=self.masked)<if_stmt>name<in>self.original._ids<block_start>hashes[name]=hash_combine(self._hash_index self.original._ids[name])<block_end><block_end>self._columns=frozendict(columns)<line_sep>self._ids=frozendict(hashes)<block_end><def_stmt>_encode self encoding skip=set()<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep>spec={'dataset_type':self.snake_name 'dataset':dataset_spec}<line_sep>spec['indices']=encoding.encode('array' self.indices)<line_sep>spec['masked']=self.masked<line_sep><return>spec<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<line_sep>indices=encoding.decode('array' spec['indices'])<line_sep>ds=cls(dataset indices spec['masked'])<line_sep><return>ds<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false># TODO: we may be able to do this slightly more efficient by first
# materializing the columns
<block_start><yield><from>self._default_chunk_iterator(self._columns columns chunk_size reverse=reverse)<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>DatasetSlicedArrays(self start=start end=end)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self.indices self.masked)<block_end><block_end>@register<class_stmt>DatasetFiltered(DatasetDecorator)<block_start>snake_name='filter'<def_stmt>__init__ self original filter expected_length=<none> state=<none> selection=<none><block_start>super().__init__(original)<line_sep>self._filter=filter<line_sep>self._lazy_hash_filter=<none><line_sep>self._create_columns()<line_sep>self._row_count=np.sum(self._filter).item()<line_sep>self.state=state<line_sep>self.selection=selection<if_stmt>expected_length<is><not><none><block_start><if_stmt>expected_length<ne>self._row_count<block_start><raise>ValueError(f'Expected filter to have {expected_length} true values, but counted {self._row_count}')<block_end><block_end><block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.id self._hash_index self.state self.selection)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end>@property<def_stmt>_hash_index self<block_start><if_stmt>self._lazy_hash_filter<is><none><block_start>self._lazy_hash_filter=hash_array(self._filter)<block_end><return>self._lazy_hash_filter<block_end><def_stmt>_create_columns self<block_start>columns={name:vaex.dataset.ColumnProxy(self name data_type(col))<for>name,col self.original._columns.items()}<line_sep>hashes={}<for_stmt>name,column self.original.items()<block_start><if_stmt>name<in>self.original._ids<block_start>hashes[name]=hash_combine(self._hash_index self.original._ids[name])<block_end><block_end>self._columns=frozendict(columns)<line_sep>self._ids=frozendict(hashes)<block_end><def_stmt>_encode self encoding skip=set()<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep>spec={'dataset':dataset_spec}<if_stmt>self.state<is><not><none><and>self.selection<is><not><none><block_start>spec['state']=encoding.encode('dataframe-state' self.state)<line_sep>spec['selection']=encoding.encode('selection' self.selection)<block_end>spec['filter_array']=encoding.encode('array' self._filter)<line_sep><return>spec<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<if_stmt>'filter_array'<in>spec<block_start>filter=encoding.decode('array' spec['filter_array'])<line_sep>ds=cls(dataset filter)<block_end><else_stmt><block_start>state=encoding.decode('dataframe-state' spec['state'])<line_sep>selection=encoding.decode('selection' spec['selection'])<line_sep>df=vaex.from_dataset(dataset)<line_sep>df.state_set(state)<line_sep>df.set_selection(vaex.dataframe.FILTER_SELECTION_NAME selection)<line_sep>df._push_down_filter()<line_sep>filter=df.dataset.filter<line_sep>ds=cls(dataset filter state=state selection=selection)<block_end><return>ds<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start>chunk_size=chunk_size<or>1024<power>2<if_stmt><not>columns<block_start>end=self.row_count<line_sep>length=end<line_sep>i1=i2=0<line_sep>i2=min(length i1+chunk_size)<while_stmt>i1<l>length<block_start><yield>i1 i2 {}<line_sep>i1=i2<line_sep>i2=min(length i1+chunk_size)<block_end><return><block_end><def_stmt>filtered_chunks <block_start><for_stmt>i1,i2,chunks self.original.chunk_iterator(columns chunk_size=chunk_size reverse=reverse)<block_start>chunks_filtered={name:vaex.array_types.filter(ar self._filter[i1:i2])<for>name,ar chunks.items()}<line_sep><yield>chunks_filtered<block_end><block_end><yield><from>_rechunk(filtered_chunks() chunk_size)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self._filter)<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end>expected_length=end-start<line_sep>mask=vaex.superutils.Mask(memoryview(self._filter))<line_sep>start,end=mask.indices(start end-1)<line_sep>end<augadd>1<line_sep>filter=self._filter[start:end]<assert_stmt>filter.sum()<eq>expected_length<line_sep><return>type(self)(self.original.slice(start end) filter)<block_end><block_end>@register<class_stmt>DatasetSliced(DatasetDecorator)<block_start>snake_name="slice"<def_stmt>__init__ self original start end<block_start>super().__init__(original)<line_sep>self.start=start<line_sep>self.end=end<line_sep>self._row_count=end-start<line_sep>self._create_columns()<line_sep># self._ids = {}
self._ids=frozendict({name:hash_slice(hash start end)<for>name,hash original._ids.items()})<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.fingerprint self.start self.end)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end><def_stmt>leafs self<arrow>List[Dataset]# we don't want to propagate slicing
<block_start><return>[self]<block_end><def_stmt>_encode self encoding skip=set()<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep><return>{'dataset':dataset_spec 'start':self.start 'end':self.end}<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<line_sep><return>cls(dataset spec['start'] spec['end'])<block_end><def_stmt>_create_columns self<block_start>self._columns={name:vaex.dataset.ColumnProxy(self name data_type(col))<for>name,col self.original._columns.items()}<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><yield><from>self.original.chunk_iterator(columns chunk_size=chunk_size reverse=reverse start=self.start end=self.end)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self.start self.end)<block_end><def_stmt>slice self start end<block_start>length=end-start<line_sep>start<augadd>self.start<line_sep>end=start+length<if_stmt>end<g>self.original.row_count<block_start><raise>IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')<block_end><return>type(self)(self.original start end)<block_end><block_end>@register<class_stmt>DatasetSlicedArrays(DatasetDecorator)<block_start>snake_name='slice_arrays'<def_stmt>__init__ self original start end<block_start>super().__init__(original)<line_sep># maybe we want to avoid slicing twice, and collapse it to 1?
self.start=start<line_sep>self.end=end<line_sep># TODO: this is the old dataframe.trim method, we somehow need to test/capture that
# if isinstance(column, array_types.supported_array_types): # real array
# df.columns[name] = column[self._index_start:self._index_end]
# else:
# df.columns[name] = column.trim(self._index_start, self._index_end)
self._create_columns()<line_sep>self._ids=frozendict({name:hash_slice(hash start end)<for>name,hash original._ids.items()})<line_sep>self._set_row_count()<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.fingerprint self.start self.end)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end><def_stmt>leafs self<arrow>List[Dataset]# we don't want to propagate slicing
<block_start><return>[self]<block_end><def_stmt>_create_columns self<block_start>columns={}<for_stmt>name,column self.original.items()<block_start><if_stmt>isinstance(column array_types.supported_array_types)# real array
<block_start>column=column[self.start:self.end]<block_end><else_stmt><block_start>column=column.trim(self.start self.end)<block_end>columns[name]=column<block_end>self._columns=frozendict(columns)<block_end><def_stmt>_encode self encoding skip=set()<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep><return>{'dataset':dataset_spec 'start':self.start 'end':self.end}<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<line_sep><return>cls(dataset spec['start'] spec['end'])<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><yield><from>self._default_chunk_iterator(self._columns columns chunk_size reverse=reverse)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self.start self.end)<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end>length=end-start<line_sep>start<augadd>self.start<line_sep>end=start+length<if_stmt>end<g>self.original.row_count<block_start><raise>IndexError(f'Slice end ({end}) if larger than number of rows: {self.original.row_count}')<block_end><return>type(self)(self.original start end)<block_end><block_end>@register<class_stmt>DatasetDropped(DatasetDecorator)<block_start>snake_name="drop"<def_stmt>__init__ self original names<block_start>super().__init__(original)<line_sep>self._dropped_names=tuple(names)<line_sep>self._create_columns()<line_sep>self._ids=frozendict({name:ar<for>name,ar original._ids.items()<if>name<not><in>names})<line_sep>self._set_row_count()<block_end><def_stmt>dropped self *names<block_start><return>DatasetDropped(self.original self._dropped_names+names)<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.original.fingerprint self._dropped_names)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end><def_stmt>_create_columns self<block_start>self._columns=frozendict({name:ar<for>name,ar self.original.items()<if>name<not><in>self._dropped_names})<block_end><def_stmt>_encode self encoding<block_start>dataset_spec=encoding.encode('dataset' self.original)<line_sep><return>{'dataset':dataset_spec 'names':list(self._dropped_names)}<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>dataset=encoding.decode('dataset' spec['dataset'])<line_sep>ds=cls(dataset spec['names'])<line_sep><return>ds<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><for_stmt>column columns<block_start><if_stmt>column<in>self._dropped_names<block_start><raise>KeyError(f'Oops, you tried to get column {column} while it is actually dropped')<block_end><block_end><yield><from>self.original.chunk_iterator(columns chunk_size=chunk_size reverse=reverse)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self._dropped_names)<block_end><def_stmt>close self<block_start>self.original.close()<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>type(self)(self.original.slice(start end) self._dropped_names)<block_end><block_end>@register<class_stmt>DatasetMerged(Dataset)<block_start>snake_name="merge"<def_stmt>__init__ self left right<block_start>super().__init__()<line_sep>self.left=left<line_sep>self.right=right<if_stmt>self.left.row_count<ne>self.right.row_count<block_start><raise>ValueError(f'Merging datasets with unequal row counts ({self.left.row_count} != {self.right.row_count})')<block_end>self._row_count=self.left.row_count<line_sep>overlap=set(left)&set(right)<if_stmt>overlap<block_start><raise>NameError(f'Duplicate names: {overlap}')<block_end>self._create_columns()<line_sep>self._ids=frozendict({**left._ids **right._ids})<line_sep>self._set_row_count()<block_end>@property<def_stmt>_fingerprint self<block_start>id=vaex.cache.fingerprint(self.left.fingerprint self.right.fingerprint)<line_sep><return>f'dataset-{self.snake_name}-{id}'<block_end><def_stmt>leafs self<arrow>List[Dataset]<block_start><return>self.left.leafs()+self.right.leafs()<block_end><def_stmt>_create_columns self# TODO: for DatasetArray, we might want to just do this?
# self._columns = frozendict({**left._columns, **right._columns})
<block_start>self._columns={**{name:ColumnProxy(self.left name data_type(col))<for>name,col self.left._columns.items()} **{name:ColumnProxy(self.right name data_type(col))<for>name,col self.right._columns.items()}}<block_end><def_stmt>_encode self encoding skip=set()<block_start>dataset_spec_left=encoding.encode('dataset' self.left)<line_sep>dataset_spec_right=encoding.encode('dataset' self.right)<line_sep>spec={'left':dataset_spec_left 'right':dataset_spec_right}<line_sep><return>spec<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>left=encoding.decode('dataset' spec['left'])<line_sep>right=encoding.decode('dataset' spec['right'])<line_sep>ds=cls(left right)<line_sep><return>ds<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start>columns_left=[k<for>k columns<if>k<in>self.left]<line_sep>columns_right=[k<for>k columns<if>k<in>self.right]<if_stmt><not>columns_left<block_start><yield><from>self.right.chunk_iterator(columns chunk_size reverse=reverse)<block_end><elif_stmt><not>columns_right<block_start><yield><from>self.left.chunk_iterator(columns chunk_size reverse=reverse)<block_end><else_stmt><block_start><for_stmt>(i1 i2 ichunks),(j1 j2 jchunks) zip(self.left.chunk_iterator(columns_left chunk_size reverse=reverse) self.right.chunk_iterator(columns_right chunk_size reverse=reverse))# TODO: if one of the datasets does not respect the chunk_size (e.g. parquet)
# this might fail
<block_start><assert_stmt>i1<eq>j1<assert_stmt>i2<eq>j2<line_sep><yield>i1 i2 {**ichunks **jchunks}<block_end><block_end><block_end><def_stmt>is_masked self column<block_start><if_stmt>column<in>self.left<block_start><return>self.left.is_masked(column)<block_end><else_stmt><block_start><return>self.right.is_masked(column)<block_end><block_end><def_stmt>shape self column<block_start><if_stmt>column<in>self.left<block_start><return>self.left.shape(column)<block_end><else_stmt><block_start><return>self.right.shape(column)<block_end><block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.left.hashed() self.right.hashed())<block_end><def_stmt>close self<block_start>self.left.close()<line_sep>self.right.close()<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>type(self)(self.left.slice(start end) self.right.slice(start end))<block_end><block_end>@register<class_stmt>DatasetArrays(Dataset)<block_start>snake_name="arrays"<def_stmt>__init__ self mapping=<none> hashed=<true> **kwargs<block_start>super().__init__()<if_stmt>mapping<is><none><block_start>mapping={}<block_end>columns={**mapping **kwargs}<line_sep>columns={key:to_supported_array(ar)<for>key,ar columns.items()}<line_sep># TODO: we finally want to get rid of datasets with no columns
self._columns=frozendict(columns)<if_stmt>hashed<block_start>self._ids=frozendict({key:hash_array(array)<for>key,array self._columns.items()})<block_end><else_stmt><block_start>self._ids=frozendict()<block_end>self._set_row_count()<block_end>@property<def_stmt>id self<block_start><try_stmt># requires hashing and is expensive
<block_start><return>self.fingerprint<block_end><except_stmt>ValueError<block_start><return>f'dataset-{self.snake_name}-uuid4-{self._id}'<block_end><block_end>@property<def_stmt>_fingerprint self<block_start>keys=set(self)<line_sep>keys_hashed=set(self._ids)<line_sep>missing=keys^keys_hashed<if_stmt>missing# if we don't have hashes for all columns, we do it like id
<block_start><return>f'dataset-{self.snake_name}-uuid4-{self._id}'<block_end># self.__hash__() # invoke just to check we don't have missing hashes
# but Python's hash functions are not deterministic (cross processs)
fp=vaex.cache.fingerprint(tuple(self._ids.items()))<line_sep><return>f'dataset-{self.snake_name}-hashed-{fp}'<block_end><def_stmt>leafs self<arrow>List[Dataset]<block_start><return>[self]<block_end><def_stmt>_encode self encoding<block_start>arrays=encoding.encode_dict('array' self._columns)<line_sep>spec={'dataset_type':self.snake_name 'arrays':arrays}<if_stmt>self._ids<block_start>fingerprints=dict(self._ids)<line_sep>spec['fingerprints']=fingerprints<block_end><return>spec<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start>arrays=encoding.decode_dict('array' spec['arrays'])<line_sep>ds=cls(arrays)<if_stmt>'fingerprints'<in>spec<block_start>ds._ids=frozendict(spec['fingerprints'])<block_end><return>ds<block_end><def_stmt>__getstate__ self<block_start>state=self.__dict__.copy()<line_sep># here, we actually DO want to keep the columns
# del state['_columns']
<return>state<block_end><def_stmt>__setstate__ self state<block_start>super().__setstate__(state)<block_end><def_stmt>_create_columns self<block_start><pass><block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><yield><from>self._default_chunk_iterator(self._columns columns chunk_size reverse=reverse)<block_end><def_stmt>is_masked self column<block_start>ar=self._columns[column]<if_stmt><not>isinstance(ar np.ndarray)<block_start>ar=ar[0:1]# take a small piece
<block_end><if_stmt>isinstance(ar np.ndarray)<block_start><return>np.ma.isMaskedArray(ar)<block_end><else_stmt><block_start><return><false><block_end><block_end># an arrow array always has null value options
<def_stmt>shape self column<block_start>ar=self._columns[column]<if_stmt><not>isinstance(ar np.ndarray)<block_start>ar=ar[0:1]# take a small piece
<block_end><if_stmt>isinstance(ar vaex.array_types.supported_arrow_array_types)<block_start><return>tuple()<block_end><else_stmt><block_start><return>ar.shape[1:]<block_end><block_end><def_stmt>merged self rhs# TODO: if we don't allow emtpy datasets, we can remove this method
<block_start><if_stmt>len(self)<eq>0<block_start><return>rhs<block_end><if_stmt>len(rhs)<eq>0<block_start><return>self<block_end># TODO: this is where we want to check if both are array like
# and have faster version of merged
<return>DatasetMerged(self rhs)<block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>DatasetSlicedArrays(self start=start end=end)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end>new=type(self)(self._columns)<line_sep>new._ids=frozendict({key:hash_array(array)<for>key,array new._columns.items()})<line_sep><return>new<block_end><def_stmt>close self<block_start><pass><block_end><block_end># nothing to do, maybe drop a refcount?
# TODO: we might want to really get rid of these, since we want to avoid copying them over the network?
# def dropped(self, names):
<class_stmt>DatasetFile(Dataset)<block_start>"""Datasets that map to a file can keep their ids/hashes in the file itself,
or keep them in a meta file.
"""<def_stmt>__init__ self path write=<false> fs_options={} fs=<none><block_start>super().__init__()<line_sep>self.path=path<line_sep>self.fs_options=fs_options<line_sep>self.fs=fs<line_sep>self.write=write<line_sep>self._columns={}<line_sep>self._ids={}<line_sep>self._frozen=<false><line_sep>self._hash_calculations=0# track it for testing purposes
self._hash_info={}<line_sep>self._hash_cache_needs_write=<false><line_sep>self._read_hashes()<block_end>@property<def_stmt>name self<block_start>base,ext,fs_options=vaex.file.split_ext(self.path)<line_sep>base=os.path.basename(base)<line_sep><return>base<block_end>@property<def_stmt>_fingerprint self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start>fingerprint=vaex.cache.fingerprint(dict(self._ids))<line_sep><return>f'dataset-{self.snake_name}-hashed-{fingerprint}'<block_end><else_stmt># TODO: if the dataset is hashed, return a fingerprint based on that
<block_start>fingerprint=vaex.file.fingerprint(self.path fs_options=self.fs_options fs=self.fs)<line_sep><return>f'dataset-{self.snake_name}-{fingerprint}'<block_end><block_end><def_stmt>leafs self<arrow>List[Dataset]<block_start><return>[self]<block_end><def_stmt>_create_columns self<block_start><pass><block_end>@classmethod<def_stmt>quick_test cls path fs_options={} fs=<none> *args **kwargs<block_start><return><false><block_end>@classmethod<def_stmt>open cls path *args **kwargs<block_start><return>cls(path *args **kwargs)<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start><yield><from>self._default_chunk_iterator(self._columns columns chunk_size reverse=reverse)<block_end><def_stmt>is_masked self column<block_start>ar=self._columns[column]<if_stmt><not>isinstance(ar np.ndarray)<block_start>ar=ar[0:1]# take a small piece
<block_end><if_stmt>isinstance(ar np.ndarray)<block_start><return>np.ma.isMaskedArray(ar)<block_end><else_stmt><block_start><return><false><block_end><block_end># an arrow array always has null value options
<def_stmt>shape self column<block_start>ar=self._columns[column]<if_stmt><not>isinstance(ar np.ndarray)<block_start>ar=ar[0:1]# take a small piece
<block_end><if_stmt>isinstance(ar vaex.array_types.supported_arrow_array_types)<block_start><return>tuple()<block_end><else_stmt><block_start><return>ar.shape[1:]<block_end><block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>DatasetSlicedArrays(self start=start end=end)<block_end><def_stmt>_read_hashes self<block_start>path_hashes=Path(self.path+'.d')/'hashes.yaml'<try_stmt><block_start>exists=path_hashes.exists()<block_end><except_stmt>OSError# happens for windows py<38
<block_start>exists=<false><block_end><if_stmt>exists<block_start><with_stmt>path_hashes.open()<as>f<block_start>hashes=vaex.utils.yaml_load(f)<if_stmt>hashes<is><none><block_start><raise>ValueError(f'{path_hashes} was probably truncated due to another process writing.')<block_end>self._hash_info=hashes.get('columns' {})<block_end><block_end><block_end><def_stmt>_freeze self<block_start>self._ids=frozendict(self._ids)<line_sep>self._columns=frozendict(self._columns)<line_sep>self._set_row_count()<line_sep>self._frozen=<true><if_stmt>self._hash_cache_needs_write<block_start>self._write_hash_info()<block_end><block_end><def_stmt>encode self encoding skip=set()<block_start>spec={'dataset_type':self.snake_name 'write':self.write 'path':self.path 'fs_options':self.fs_options 'fs':self.fs}<line_sep><return>spec<block_end><def_stmt>__getstate__ self# we don't have the columns in the state, since we should be able
# to get them from disk again
<block_start><return>{'write':self.write 'path':self.path 'fs_options':self.fs_options 'fs':self.fs '_ids':dict(self._ids)# serialize the hases as non-frozen dict
}<block_end><def_stmt>__setstate__ self state<block_start>super().__setstate__(state)<line_sep># 'ctor' like initialization
self._frozen=<false><line_sep>self._hash_calculations=0<line_sep>self._columns={}<line_sep>self._hash_info={}<line_sep>self._hash_cache_needs_write=<false><line_sep>self._read_hashes()<block_end><def_stmt>add_column self name data<block_start>self._columns[name]=data<if_stmt>self.write<block_start><return># the columns don't include the final data
# the hashes will be done in .freeze()
<block_end>hash_info=self._hash_info.get(name)<if_stmt>hash_info<block_start>hash_info_previous=hash_info.copy()<line_sep>hash,hash_info=hash_array(data hash_info return_info=<true>)<if_stmt>hash_info_previous<ne>hash_info<block_start>self._hash_cache_needs_write=<true><block_end>self._ids[name]=hash<line_sep>self._hash_info[name]=hash_info<block_end><block_end># always update the information
@property<def_stmt>_local_hash_path self# TODO: support s3 and gcs
# TODO: fallback directory when a user cannot write
<block_start><if_stmt>Path(self.path).exists()<block_start>directory=Path(self.path+'.d')<line_sep>directory.mkdir(exist_ok=<true>)<block_end><else_stmt><block_start>o=urlparse(self.path)<line_sep>directory=Path(vaex.utils.get_private_dir('dataset' o.scheme o.netloc o.path[1:]))<block_end><return>directory/'hashes.yaml'<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end>cls=type(self)<line_sep># use pickle protocol to clone
new=cls.__new__(cls)<line_sep>new.__setstate__(self.__getstate__())<line_sep>hashes={}<line_sep>disk_cached_hashes={}<for_stmt>name,column new.items()<block_start>hash_info=self._hash_info.get(name)<if_stmt>hash_info<is><none><block_start>logging.warning(f'Calculating hash for column {name} of length {len(column)} (1 time operation, will be cached on disk)')<line_sep>hash_info=hash_array_data(column)<block_end>hash,hash_info=hash_array(column hash_info return_info=<true>)<line_sep>new._hash_calculations<augadd>1<line_sep>hashes[name]=hash<line_sep>disk_cached_hashes[name]=hash_info<block_end>new._ids=frozendict(hashes)<line_sep>new._hash_info=frozendict(disk_cached_hashes)<line_sep>path_hashes=new._local_hash_path<line_sep># TODO: without this check, if multiple processes are writing (e.g. tests/execution_test.py::test_task_sum with ray)
# this leads to a race condition, where we write the file, and while truncated, _read_hases() fails (because the file exists)
# if new._hash_info != new._ids:
new._write_hash_info()<line_sep><return>new<block_end><def_stmt>_write_hash_info self<block_start><if_stmt>self._hash_info# TODO: file lock
<block_start>path_hashes=self._local_hash_path<with_stmt>path_hashes.open('w')<as>f<block_start>vaex.utils.yaml_dump(f {'columns':dict(self._hash_info)})<block_end><block_end><block_end><block_end><class_stmt>DatasetCached(DatasetDecorator)<block_start>snake_name="cached"<line_sep>shared_cache={}<def_stmt>__init__ self original names cache=<none> to_numpy=<false><block_start>super(DatasetCached self).__init__(original)<line_sep>self.original=original<line_sep>self.names=names<line_sep>self._shared=cache<is><none><or>cache<is>self.shared_cache<line_sep>self.cache=cache<if>cache<is><not><none><else>self.shared_cache<line_sep>self.to_numpy=to_numpy<line_sep>self._create_columns()<line_sep>self._row_count=self.original.row_count<block_end>@property<def_stmt>_fingerprint self<block_start><return>self.original.fingerprint<block_end><def_stmt>_create_columns self<block_start>columns={}<line_sep>schema=self.original.schema()<for_stmt>name,column self.original.items()<block_start>columns[name]=ColumnProxy(self name schema[name])<block_end>self._columns=frozendict(columns)<line_sep>self._ids=frozendict(self.original._ids)<block_end><def_stmt>_encode self encoding skip=set()<block_start><raise>NotImplementedError("cannot serialize cache")<block_end>@classmethod<def_stmt>_decode cls encoding spec<block_start><raise>NotImplementedError("cannot serialize cache")<block_end><def_stmt>chunk_iterator self columns chunk_size=<none> reverse=<false><block_start>chunk_size=chunk_size<or>chunk_size_default<line_sep>columns_all=set(columns)<line_sep>columns_cachable=columns_all&set(self.names)<line_sep># avoids asking the cache twice, by using .get() and then testing for None
columns_cached={name:self.cache.get(self._cache_key(name))<for>name columns_cachable}<line_sep>columns_cached={name:array<for>name,array columns_cached.items()<if>array<is><not><none>}<line_sep>columns_to_cache=columns_cachable-set(columns_cached)<line_sep>column_required=columns_all-set(columns_cached)<line_sep>cache_chunks={name:[]<for>name columns_to_cache}<def_stmt>cached_iterator <block_start>chunks_list=[chunks<for>name,chunks columns_cached.items()]<line_sep># chunks_list is of form [[ar1x, ar2x, a3x], [ar1y, ar2y, a3y]]
# and now we want to yield
# * i1, i2 {'x': ar1x, 'y': ar1y}
# * i1, i2 {'x': ar2x, 'y': ar2y}
# * i1, i2 {'x': ar3x, 'y': ar3y}
names=[name<for>name,chunks columns_cached.items()]<line_sep>i1=0<line_sep>i2=0<for_stmt>chunks zip(*chunks_list)<block_start>i2<augadd>len(chunks[0])<for_stmt>chunk chunks<block_start><assert_stmt>len(chunk)<eq>len(chunks[0])<block_end><yield>i1 i2 dict(zip(names chunks))<line_sep>i1=i2<block_end><block_end><if_stmt>columns_cached<block_start>cached_iter=chunk_rechunk(cached_iterator() chunk_size)<block_end><else_stmt><block_start>cached_iter=empty_chunk_iterator(0 self.row_count chunk_size)<block_end><if_stmt>column_required<block_start>original_iter=self.original.chunk_iterator(column_required chunk_size reverse=reverse)<block_end><else_stmt><block_start>original_iter=empty_chunk_iterator(0 self.row_count chunk_size)<block_end>original_iter=list(original_iter)<line_sep>cached_iter=list(cached_iter)<for_stmt>(o1 o2 ochunks),(c1 c2 cchunks) zip(original_iter cached_iter)<block_start><assert_stmt>o1<eq>c1<assert_stmt>o2<eq>c2<line_sep><yield>o1 o2 {**ochunks **cchunks}<for_stmt>name columns_to_cache<block_start><if_stmt>self.to_numpy<block_start>ochunks={k:vaex.array_types.to_numpy(v)<for>k,v ochunks.items()}<block_end>cache_chunks[name].append(ochunks[name])<block_end><block_end># we write it too the cache in 1 go
<for_stmt>name columns_to_cache<block_start>self.cache[self._cache_key(name)]=cache_chunks[name]<block_end><block_end><def_stmt>slice self start end<block_start><if_stmt>start<eq>0<and>end<eq>self.row_count<block_start><return>self<block_end><return>type(self)(self.original.slice(start end) self.names cache=self.cache)<block_end><def_stmt>hashed self<block_start><if_stmt>set(self._ids)<eq>set(self)<block_start><return>self<block_end><return>type(self)(self.original.hashed() self.names cache=self.cache)<block_end><def_stmt>_cache_key self name<block_start><return>f"{self.fingerprint}-{name}"<block_end><block_end> |
<import_stmt>os<import_stmt>sys<import_stmt>argparse<import_stmt>time<import_stmt>json<import_stmt>tvm<import_stmt>torch<import_from_stmt>flextensor.utils Config<import_from_stmt>flextensor.task Task TASK_TABLE<import_from_stmt>flextensor.scheduler schedule schedule_with_config<import_from_stmt>flextensor.measure _evaluate<import_from_stmt>flextensor.configs.conv2d_config *<line_sep>shape_dict={"yolo":yolo_shapes "google":google_shapes "squeeze":squeeze_shapes "res":res_shapes "vgg-16":vgg_16_shapes "vgg-19":vgg_19_shapes}<def_stmt>optimize prefix from_ shapes target="llvm" dev_id=0 trials=100 timeout=4.0 parallel=1 method="searching" use_model=<false> logfile=sys.stdout<block_start>ret=dict()<for_stmt>i,shape enumerate(shapes)<block_start>print("Optimize {} convolution layer {} shape {}".format(prefix i+1+from_ shape) flush=<true>)<line_sep>batch,in_channel,height,width,out_channel,_,k_h,k_w,_,stride,padding,_,_=shape<line_sep># create an empty task but has the correct key we want
task=Task("gemm_conv2d" prefix+str(i+from_) <none> (batch in_channel height width out_channel k_h stride padding 1 1) target dev_id)<line_sep>beg=time.time()<line_sep>s,bufs,configs=schedule(task.key op_trial=trials timeout=timeout op_stop=30 parallel=parallel method=method use_model=use_model trials=[trials<floordiv>10 trials<floordiv>5 trials trials<floordiv>5])<line_sep>end=time.time()<line_sep># print(tvm.lower(s, bufs, simple_mode=True))
print("######################################")<line_sep>print("op schedules:")<for_stmt>config configs.op_config_lst<block_start>print("----------------------------------")<for_stmt>name,value config.items()<block_start><if_stmt>value<block_start>print(name value)<block_end><block_end><block_end>print("######################################")<line_sep>print("graph schedules:")<for_stmt>name,value configs.graph_config.items()<block_start><if_stmt>value<block_start>print(name value)<block_end><block_end>ret[task.key]=configs<line_sep>string=json.dumps(configs)<line_sep>line=task.key+":"+string<line_sep>print(line file=logfile flush=<true>)<line_sep>s,bufs=schedule_with_config(task.key configs)<line_sep>time_cost=_evaluate(s bufs target task.dev_id 10)<line_sep>print("######################################")<line_sep>print("Use" time_cost "ms")<line_sep>print("Cost" end-beg "s")<line_sep>print()<block_end><return>ret<block_end><def_stmt>test task_key configs dev_id=<none><block_start>task=TASK_TABLE[task_key]<line_sep>s,bufs=schedule_with_config(task_key configs)<line_sep>dev_id=dev_id<if>dev_id<is><not><none><else>task.dev_id<line_sep>time_cost=_evaluate(s bufs task.target dev_id 10)<line_sep>print(task_key "use" time_cost "ms")<line_sep>print()<block_end><def_stmt>schedule_with_config_local <block_start><with_stmt>open("/home/retina/skw/work/AutoScheduler/gemm_conv.log" 'r')<as>f<block_start><block_end><block_end>"""
(1, 3, 448, 448, 64, 3, 7, 7, 1, 2, 3, 1, 1)
Conv 2d on cpu: 0.011640000343322753s
Conv 2d on cuda: 0.006447720527648926s
(1, 64, 112, 112, 192, 64, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.010520696640014648s
Conv 2d on cuda: 0.006895184516906738s
(1, 192, 56, 56, 128, 192, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.00572810173034668s
Conv 2d on cuda: 0.005124855041503906s
(1, 128, 56, 56, 256, 128, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.005372405052185059s
Conv 2d on cuda: 0.003541111946105957s
(1, 256, 56, 56, 256, 256, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.00752255916595459s
Conv 2d on cuda: 0.0071736335754394535s
(1, 256, 56, 56, 512, 256, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.014762544631958007s
Conv 2d on cuda: 0.006854510307312012s
(1, 512, 28, 28, 256, 512, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.0043433189392089845s
Conv 2d on cuda: 0.0035385370254516603s
(1, 256, 28, 28, 512, 256, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.005109810829162597s
Conv 2d on cuda: 0.0018965244293212891s
(1, 512, 28, 28, 512, 512, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.004613542556762695s
Conv 2d on cuda: 0.003508114814758301s
(1, 512, 28, 28, 1024, 512, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.015011453628540039s
Conv 2d on cuda: 0.0038038253784179687s
(1, 1024, 14, 14, 512, 1024, 1, 1, 1, 1, 0, 1, 1)
Conv 2d on cpu: 0.003091883659362793s
Conv 2d on cuda: 0.001864314079284668s
(1, 512, 14, 14, 1024, 512, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.007311129570007324s
Conv 2d on cuda: 0.0012821674346923829s
(1, 1024, 14, 14, 1024, 1024, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.020050597190856934s
Conv 2d on cuda: 0.0026390790939331056s
(1, 1024, 14, 14, 1024, 1024, 3, 3, 1, 2, 1, 1, 1)
Conv 2d on cpu: 0.0181943416595459s
Conv 2d on cuda: 0.002562427520751953s
(1, 1024, 7, 7, 1024, 1024, 3, 3, 1, 1, 1, 1, 1)
Conv 2d on cpu: 0.018287014961242676s
Conv 2d on cuda: 0.0017349958419799806s
"""<if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-s" "--shapes" help="Use which shapes [yolo, google, res, squeeze, vgg-16, vgg-19]" type=str default="")<line_sep>parser.add_argument("-f" "--from_" help="From which shape" type=int default=0)<line_sep>parser.add_argument("-t" "--to" help="To which shape" type=int default=-1)<line_sep>parser.add_argument("-l" "--log" help="Log file name" type=str default="")<line_sep>parser.add_argument("--test" help="test file name" type=str default="")<line_sep>parser.add_argument("--trials" help="number of trials for op" type=int default=100)<line_sep>parser.add_argument("--target" help="target device type" type=str default="llvm")<line_sep>parser.add_argument("--device" help="target device number" type=int default=0)<line_sep>parser.add_argument("--timeout" help="timeout" type=float default=4.0)<line_sep>parser.add_argument("--parallel" help="parallel" type=int default=1)<line_sep>parser.add_argument("--use_model" help="use performance model" action="store_true")<line_sep>parser.add_argument("--method" help="how to schedule" type=str default="searching")<line_sep>parser.add_argument("--test_torch" help="whether to test torch implementation" type=bool default=<false>)<line_sep>args=parser.parse_args()<if_stmt>args.shapes<ne>""<block_start>shapes=shape_dict[args.shapes]<if_stmt>args.to<l>0<block_start>end=len(shapes)<block_end><else_stmt><block_start>end=args.to<block_end><if_stmt>args.log<ne>""<block_start><with_stmt>open(args.log "a")<as>flog<block_start>ret=optimize(args.shapes args.from_ shapes[args.from_:end] target=args.target dev_id=args.device timeout=args.timeout trials=args.trials parallel=args.parallel method=args.method use_model=args.use_model logfile=flog)<block_end><block_end><else_stmt><block_start>ret=optimize(args.shapes args.from_ shapes[args.from_:end] target=args.target dev_id=args.device timeout=args.timeout trials=args.trials parallel=args.parallel method=args.method use_model=args.use_model logfile=sys.stdout)<block_end><block_end><if_stmt>args.test<ne>""<block_start><with_stmt>open(args.test "r")<as>fin<block_start><for_stmt>line fin<block_start>name,string=line.split(":" 1)<line_sep>obj=json.loads(string)<line_sep>configs=Config(obj[0] obj[1])<line_sep>test(name configs dev_id=args.device)<block_end><block_end><block_end><if_stmt>args.test_torch<block_start><assert_stmt>args.shapes<ne>""<line_sep>shapes=shape_dict[args.shapes]<line_sep>""" Warm up """<line_sep>batch,in_channel,height,width,out_channel,_,k_h,k_w,_,stride,padding,dilation,groups=shapes[0]<line_sep>conv2d=torch.nn.Conv2d(in_channel out_channel (k_h k_w) stride=stride padding=padding dilation=dilation groups=groups).cuda()<line_sep>img=torch.rand((batch in_channel height width)).cuda()<line_sep>res=conv2d(img)<for_stmt>shape shapes<block_start>print(shape)<line_sep>batch,in_channel,height,width,out_channel,_,k_h,k_w,_,stride,padding,dilation,groups=shape<line_sep>start_time=time.time()<line_sep>conv2d=torch.nn.Conv2d(in_channel out_channel (k_h k_w) stride=stride padding=padding dilation=dilation groups=groups)<for_stmt>i range(args.trials)<block_start>img=torch.rand((batch in_channel height width))<line_sep>res=conv2d(img)<block_end>cpu_time=time.time()-start_time<line_sep>print("Conv 2d on cpu: {}s".format(cpu_time/args.trials))<line_sep>start_time=time.time()<line_sep>conv2d=conv2d.cuda()<for_stmt>i range(args.trials)<block_start>img=torch.rand((batch in_channel height width)).cuda()<line_sep>res=conv2d(img)<block_end>cuda_time=time.time()-start_time<line_sep>print("Conv 2d on cuda: {}s".format(cuda_time/args.trials))<block_end><block_end><block_end> |
""" test for app action functionality """<import_from_stmt>unittest.mock patch<import_from_stmt>django.template.response TemplateResponse<import_from_stmt>django.test TestCase<import_from_stmt>django.test.client RequestFactory<import_from_stmt>bookwyrm.tests.validate_html validate_html<import_from_stmt>bookwyrm models views<class_stmt>ImportManualReviewViews(TestCase)<block_start>"""goodreads import views"""<def_stmt>setUp self<block_start>"""we need basic test data and mocks"""<line_sep>self.factory=RequestFactory()<with_stmt>patch("bookwyrm.suggested_users.rerank_suggestions_task.delay") patch("bookwyrm.activitystreams.populate_stream_task.delay") patch("bookwyrm.lists_stream.populate_lists_task.delay")<block_start>self.local_user=models.User.objects.create_user("<EMAIL>" "<EMAIL>" "password" local=<true> localname="mouse" )<block_end>models.SiteSettings.objects.create()<line_sep>self.job=models.ImportJob.objects.create(user=self.local_user mappings={})<line_sep>work=models.Work.objects.create(title="Test Work")<line_sep>self.book=models.Edition.objects.create(title="Example Edition" remote_id="https://example.com/book/1" parent_work=work )<block_end><def_stmt>test_import_troubleshoot_get self<block_start>"""there are so many views, this just makes sure it LOADS"""<line_sep>view=views.ImportManualReview.as_view()<line_sep>request=self.factory.get("")<line_sep>request.user=self.local_user<with_stmt>patch("bookwyrm.tasks.app.AsyncResult")<as>async_result<block_start>async_result.return_value=[]<line_sep>result=view(request self.job.id)<block_end>self.assertIsInstance(result TemplateResponse)<line_sep>validate_html(result.render())<line_sep>self.assertEqual(result.status_code 200)<block_end><def_stmt>test_approve_item self<block_start>"""a guess is correct"""<line_sep>import_item=models.ImportItem.objects.create(index=0 job=self.job book_guess=self.book fail_reason="no match" data={} normalized_data={} )<line_sep>request=self.factory.post("")<line_sep>request.user=self.local_user<with_stmt>patch("bookwyrm.importers.importer.import_item_task.delay")<as>mock<block_start>views.approve_import_item(request self.job.id import_item.id)<block_end>self.assertEqual(mock.call_count 1)<line_sep>import_item.refresh_from_db()<line_sep>self.assertIsNone(import_item.fail_reason)<line_sep>self.assertIsNone(import_item.book_guess)<line_sep>self.assertEqual(import_item.book.id self.book.id)<block_end><def_stmt>test_delete_item self<block_start>"""a guess is correct"""<line_sep>import_item=models.ImportItem.objects.create(index=0 job=self.job book_guess=self.book fail_reason="no match" data={} normalized_data={} )<line_sep>request=self.factory.post("")<line_sep>request.user=self.local_user<line_sep>views.delete_import_item(request self.job.id import_item.id)<line_sep>import_item.refresh_from_db()<line_sep>self.assertEqual(import_item.fail_reason "no match")<line_sep>self.assertIsNone(import_item.book_guess)<line_sep>self.assertIsNone(import_item.book)<block_end><block_end> |
<import_stmt>os<import_stmt>argparse<import_stmt>pyautogui<import_stmt>time<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("-p" "--path" help="absolute path to store screenshot." default=r"./images")<line_sep>parser.add_argument("-t" "--type" help="h (in hour) or m (in minutes) or s (in seconds)" default='h')<line_sep>parser.add_argument("-f" "--frequency" help="frequency for taking screenshot per h/m/s." default=1 type=int)<line_sep>args=parser.parse_args()<line_sep>sec=0.<if_stmt>args.type<eq>'h'<block_start>sec=60<times>60/args.frequency<block_end><elif_stmt>args.type<eq>'m'<block_start>sec=60/args.frequency<block_end><if_stmt>sec<l>1.<block_start>sec=1.<block_end><if_stmt>os.path.isdir(args.path)<ne><true><block_start>os.mkdir(args.path)<block_end><try_stmt><block_start><while_stmt><true><block_start>t=time.localtime()<line_sep>current_time=time.strftime("%H_%M_%S" t)<line_sep>file=current_time+".jpg"<line_sep>image=pyautogui.screenshot(os.path.join(args.path file))<line_sep>print(f"{file} saved successfully.\n")<line_sep>time.sleep(sec)<block_end><block_end><except_stmt>KeyboardInterrupt<block_start>print("End of script by user interrupt")<block_end> |
<import_from_stmt>functools partial<import_from_stmt>six reraise<import_from_stmt>characteristic attributes<import_from_stmt>effect sync_performer Effect ComposedDispatcher TypeDispatcher base_dispatcher <import_from_stmt>treq get<import_from_stmt>pyrsistent PClass field<import_from_stmt>txeffect deferred_performer<import_from_stmt>eliot startAction Message<import_from_stmt>eliot.twisted DeferredContext<line_sep># This is from https://github.com/radix/effect/pull/46
@attributes(['results' 'exc_info'] apply_immutable=<true>)<class_stmt>SequenceFailed(Exception object)<block_start>"""
Raised if an effect in a :class:``Sequence`` fails.
:ivar list results: The list of successful results.
:ivar error: The error result of the last run effect.
"""<def_stmt>__str__ self# Exception has a custom __str__ that looks at arguments pass to it's
# init. Since we don't pass any, it is useless. The following will
# duplicate the class name in the traceback, but is better than
# otherwise.
<block_start><return>repr(self)<block_end><block_end>@attributes(["effects"] apply_with_init=<false> apply_immutable=<true>)<class_stmt>Sequence(object)<block_start>"""
Runs a sequence of effects serially.
:returns list: The list of results of the effects.
:raises SequenceFailed: If one of the effects fails.
"""<def_stmt>__init__ self effects<block_start>"""
:param effects: The list of effects to execute in sequence.
"""<line_sep>self.effects=effects<block_end><block_end><def_stmt>sequence effects<block_start>"""
Given multiple Effects, return one Effect that represents the sequence of
all of their effects. The result of the aggregate Effect will be a list of
their results, in the same order as the input to this function. If any
child effect fails, the first such failure will be propagated as a
:class:`SequenceFailed` exception.
:param effects: Effects which should be performed in sequence.
:return: An Effect that results in a list of results, or which fails with
a :class:`SequenceFailed`.
"""<line_sep><return>Effect(Sequence(list(effects)))<block_end>@sync_performer<def_stmt>perform_sequence dispatcher intent<block_start>"""Performer for :class:`Sequence`."""<line_sep>effects=list(intent.effects)<if_stmt><not>effects<block_start><return>[]<block_end>results=[]<def_stmt>succeed next_effect result<block_start>results.append(result)<line_sep><return>next_effect<block_end><def_stmt>fail result<block_start>reraise(SequenceFailed SequenceFailed(results=results exc_info=result) result[2])<block_end><def_stmt>reducer next_effect effect<block_start><return>effect.on(success=partial(succeed next_effect) error=fail)<block_end><return>reduce(reducer reversed(effects) results)<block_end><class_stmt>HTTPGet(PClass)<block_start>"""
Intent for HTTP GET requests.
:ivar bytes url: The URL to make a GET request to.
"""<line_sep>url=field(type=bytes mandatory=<true>)<block_end><def_stmt>http_get url<block_start>"""
Wrapper to create an :class:`HTTPGet` Effect.
:param bytes url: The url to make a GET request to.
:returns: The ``Effect`` of making a GET request to ``url``.
"""<line_sep><return>Effect(HTTPGet(url=url))<block_end>@deferred_performer<def_stmt>treq_get dispatcher intent<block_start>"""
Performer to execute an HTTP GET.
:param dispatcher: The dispatcher used to dispatch this performance.
:param HTTPGet intent: The intent to be performed.
"""<line_sep>action=startAction(action_type=u"flocker:provision:_effect:treq_get")<with_stmt>action.context()<block_start>Message.log(url=intent.url)<line_sep># Do not use persistent HTTP connections, because they will not be
# cleaned up by the end of the test.
d=DeferredContext(get(intent.url persistent=<false>))<line_sep>d.addActionFinish()<line_sep><return>d.result<block_end><block_end>dispatcher=ComposedDispatcher([TypeDispatcher({Sequence:perform_sequence HTTPGet:treq_get }) base_dispatcher ])<line_sep> |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of decomposable attention model.
https://arxiv.org/abs/1606.01933.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>language.common.layers common_layers<import_from_stmt>language.common.utils tensor_utils<import_stmt>tensorflow.compat.v1<as>tf<def_stmt>decomposable_attention emb1 len1 emb2 len2 hidden_size hidden_layers dropout_ratio mode epsilon=1e-8<block_start>"""See https://arxiv.org/abs/1606.01933.
Args:
emb1: A Tensor with shape [batch_size, max_len1, emb_size] representing the
first input sequence.
len1: A Tensor with shape [batch_size], indicating the true sequence length
of `emb1`. This is required due to padding.
emb2: A Tensor with shape [batch_size, max_len2, emb_size] representing the
second input sequence.
len2: A Tensor with shape [batch_size], indicating the true sequence length
of `emb1`. This is required due to padding.
hidden_size: An integer indicating the size of each hidden layer in the
feed-forward neural networks.
hidden_layers: An integer indicating the number of hidden layers in the
feed-forward neural networks.
dropout_ratio: The probability of dropping out each unit in the activation.
This can be None, and is only applied during training.
mode: One of the keys from tf.estimator.ModeKeys.
epsilon: A small positive constant to add to masks for numerical stability.
Returns:
final_emb: A Tensor with shape [batch_size, hidden_size].
"""<line_sep># [batch_size, maxlen1]
mask1=tf.sequence_mask(len1 tensor_utils.shape(emb1 1) dtype=tf.float32)<line_sep># [batch_size, maxlen2]
mask2=tf.sequence_mask(len2 tensor_utils.shape(emb2 1) dtype=tf.float32)<with_stmt>tf.variable_scope("attend")<block_start>projected_emb1=common_layers.ffnn(emb1 [hidden_size]<times>hidden_layers dropout_ratio mode)<block_end><with_stmt>tf.variable_scope("attend" reuse=<true>)<block_start>projected_emb2=common_layers.ffnn(emb2 [hidden_size]<times>hidden_layers dropout_ratio mode)<block_end># [batch_size, maxlen1, maxlen2]
attention_scores=tf.matmul(projected_emb1 projected_emb2 transpose_b=<true>)<line_sep>attention_weights1=tf.nn.softmax(attention_scores+tf.log(tf.expand_dims(mask2 1)+epsilon) 2)<line_sep>attention_weights2=tf.nn.softmax(attention_scores+tf.log(tf.expand_dims(mask1 2)+epsilon) 1)<line_sep># [batch_size, maxlen1, emb_size]
attended_emb1=tf.matmul(attention_weights1 emb2)<line_sep># [batch_size, maxlen2, emb_size]
attended_emb2=tf.matmul(attention_weights2 emb1 transpose_a=<true>)<with_stmt>tf.variable_scope("compare")<block_start>compared_emb1=common_layers.ffnn(tf.concat([emb1 attended_emb1] -1) [hidden_size]<times>hidden_layers dropout_ratio mode)<block_end><with_stmt>tf.variable_scope("compare" reuse=<true>)<block_start>compared_emb2=common_layers.ffnn(tf.concat([emb2 attended_emb2] -1) [hidden_size]<times>hidden_layers dropout_ratio mode)<block_end>compared_emb1<augmul>tf.expand_dims(mask1 -1)<line_sep>compared_emb2<augmul>tf.expand_dims(mask2 -1)<line_sep># [batch_size, hidden_size]
aggregated_emb1=tf.reduce_sum(compared_emb1 1)<line_sep>aggregated_emb2=tf.reduce_sum(compared_emb2 1)<with_stmt>tf.variable_scope("aggregate")<block_start>final_emb=common_layers.ffnn(tf.concat([aggregated_emb1 aggregated_emb2] -1) [hidden_size]<times>hidden_layers dropout_ratio mode)<block_end><return>final_emb<block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("WRITE")<line_sep>process.source=cms.Source("EmptySource" numberEventsInLuminosityBlock=cms.untracked.uint32(4))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(20))<line_sep>process.out=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string("multi_lumi.root"))<line_sep>process.o=cms.EndPath(process.out)<line_sep> |
<import_from_future_stmt> absolute_import print_function unicode_literals<import_from_stmt>builtins dict str<import_stmt>os<import_stmt>logging<import_from_stmt>indra get_config<line_sep># Before the import, we have to deal with the CLASSPATH to avoid clashes
# with Eidos.
<def_stmt>_set_classpath <block_start>clp=os.environ.get('CLASSPATH')<line_sep>eip=get_config('EIDOSPATH')<line_sep>rep=get_config('REACHPATH')<line_sep>clp_parts=clp.split(':')<if>clp<else>[]<line_sep>new_clp_parts=[]<line_sep>has_reach=<false><line_sep># Look at all the parts of the CLASSPATH
<for_stmt>part clp_parts# If Eidos is on the CLASSPATH, remove it
<block_start><if_stmt><not>eip<or>os.path.abspath(part)<ne>eip<block_start>new_clp_parts.append(part)<block_end># If REACH is not on the CLASSPATH, add it
<if_stmt>rep<and>os.path.abspath(part)<eq>rep<block_start>has_reach=<true><block_end><block_end><if_stmt>rep<and><not>has_reach<block_start>new_clp_parts.append(rep)<block_end># Set the new CLASSPATH
new_clp=':'.join(new_clp_parts)<line_sep>os.environ['CLASSPATH']=new_clp<block_end>_set_classpath()<import_from_stmt>indra.java_vm autoclass JavaException<line_sep>logger=logging.getLogger(__name__)<class_stmt>ReachOfflineReadingError(Exception)<block_start><pass><block_end><class_stmt>ReachReader(object)<block_start>"""The ReachReader wraps a singleton instance of the REACH reader.
This allows calling the reader many times without having to wait for it to
start up each time.
Attributes
----------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""<def_stmt>__init__ self<block_start>self.api_ruler=<none><block_end><def_stmt>get_api_ruler self<block_start>"""Return the existing reader if it exists or launch a new one.
Returns
-------
api_ruler : org.clulab.reach.apis.ApiRuler
An instance of the REACH ApiRuler class (java object).
"""<if_stmt>self.api_ruler<is><none><block_start><try_stmt><block_start>self.api_ruler=autoclass('org.clulab.reach.export.apis.ApiRuler')<block_end><except_stmt>JavaException<as>e<block_start><raise>ReachOfflineReadingError(e)<block_end><block_end><return>self.api_ruler<block_end><block_end> |
<import_from_stmt>jmetal.algorithm.singleobjective.simulated_annealing SimulatedAnnealing<import_from_stmt>jmetal.operator BitFlipMutation<import_from_stmt>jmetal.problem OneMax<import_from_stmt>jmetal.util.solution print_function_values_to_file print_variables_to_file<import_from_stmt>jmetal.util.termination_criterion StoppingByEvaluations<if_stmt>__name__<eq>'__main__'<block_start>problem=OneMax(number_of_bits=1024)<line_sep>max_evaluations=20000<line_sep>algorithm=SimulatedAnnealing(problem=problem mutation=BitFlipMutation(probability=1.0/problem.number_of_bits) termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations))<line_sep>algorithm.run()<line_sep>result=algorithm.get_result()<line_sep># Save results to file
print_function_values_to_file(result 'FUN.'+algorithm.get_name()+"."+problem.get_name())<line_sep>print_variables_to_file(result 'VAR.'+algorithm.get_name()+"."+problem.get_name())<line_sep>print('Algorithm: '+algorithm.get_name())<line_sep>print('Problem: '+problem.get_name())<line_sep>print('Solution: '+result.get_binary_string())<line_sep>print('Fitness: '+str(result.objectives[0]))<line_sep>print('Computing time: '+str(algorithm.total_computing_time))<block_end> |
"""
owtf.managers.plugin
~~~~~~~~~~~~~~~~~~~~
This module manages the plugins and their dependencies
"""<import_stmt>imp<import_stmt>json<import_stmt>os<import_from_stmt>owtf.models.plugin Plugin<import_from_stmt>owtf.models.test_group TestGroup<import_from_stmt>owtf.settings PLUGINS_DIR<import_from_stmt>owtf.utils.error abort_framework<import_from_stmt>owtf.utils.file FileOperations<line_sep>TEST_GROUPS=["web" "network" "auxiliary"]<def_stmt>get_test_groups_config file_path<block_start>"""Reads the test groups from a config file
.. note::
This needs to be a list instead of a dictionary to preserve order in python < 2.7
:param file_path: The path to the config file
:type file_path: `str`
:return: List of test groups
:rtype: `list`
"""<line_sep>test_groups=[]<line_sep>config_file=FileOperations.open(file_path "r").read().splitlines()<for_stmt>line config_file<block_start><if_stmt>"#"<eq>line[0]<block_start><continue># Skip comments
<block_end><try_stmt><block_start>code,priority,descrip,hint,url=line.strip().split(" | ")<block_end><except_stmt>ValueError<block_start>abort_framework("Problem in Test Groups file: '{!s}' -> Cannot parse line: {!s}".format(file_path line))<block_end><if_stmt>len(descrip)<l>2<block_start>descrip=hint<block_end><if_stmt>len(hint)<l>2<block_start>hint=""<block_end>test_groups.append({"code":code "priority":priority "descrip":descrip "hint":hint "url":url })<block_end><return>test_groups<block_end><def_stmt>load_test_groups session file_default file_fallback plugin_group<block_start>"""Load test groups into the DB.
:param test_groups_file: The path to the test groups config
:type test_groups_file: `str`
:param plugin_group: Plugin group to load
:type plugin_group: `str`
:return: None
:rtype: None
"""<line_sep>file_path=file_default<if_stmt><not>os.path.isfile(file_default)<block_start>file_path=file_fallback<block_end>test_groups=get_test_groups_config(file_path)<for_stmt>group test_groups<block_start>session.merge(TestGroup(code=group["code"] priority=group["priority"] descrip=group["descrip"] hint=group["hint"] url=group["url"] group=plugin_group ))<block_end>session.commit()<block_end><def_stmt>load_plugins session<block_start>"""Loads the plugins from the filesystem and updates their info.
.. note::
Walks through each sub-directory of `PLUGINS_DIR`.
For each file, loads it thanks to the imp module.
Updates the database with the information for each plugin:
+ 'title': the title of the plugin
+ 'name': the name of the plugin
+ 'code': the internal code of the plugin
+ 'group': the group of the plugin (ex: web)
+ 'type': the type of the plugin (ex: active, passive, ...)
+ 'descrip': the description of the plugin
+ 'file': the filename of the plugin
+ 'internet_res': does the plugin use internet resources?
:return: None
:rtype: None
"""<line_sep># TODO: When the -t, -e or -o is given to OWTF command line, only load
# the specific plugins (and not all of them like below).
# Retrieve the list of the plugins (sorted) from the directory given by
# 'PLUGIN_DIR'.
plugins=[]<for_stmt>root,_,files os.walk(PLUGINS_DIR)<block_start>plugins.extend([os.path.join(root filename)<for>filename files<if>filename.endswith("py")])<block_end>plugins=sorted(plugins)<line_sep># Retrieve the information of the plugin.
<for_stmt>plugin_path plugins# Only keep the relative path to the plugin
<block_start>plugin=plugin_path.replace(PLUGINS_DIR "")<line_sep># TODO: Using os.path.sep might not be portable especially on
# Windows platform since it allows '/' and '\' in the path.
# Retrieve the group, the type and the file of the plugin.
# Ensure all empty strings are removed from the list
chunks=list(filter(<none> plugin.split(os.path.sep)))<line_sep># TODO: Ensure that the variables group, type and file exist when
# the length of chunks is less than 3.
<if_stmt>len(chunks)<eq>3<block_start>group,type,file=chunks<block_end># Retrieve the internal name and code of the plugin.
name,code=os.path.splitext(file)[0].split("@")<line_sep># Only load the plugin if in XXX_TEST_GROUPS configuration (e.g. web_testgroups.cfg)
<if_stmt>session.query(TestGroup).get(code)<is><none><block_start><continue><block_end># Load the plugin as a module.
filename,pathname,desc=imp.find_module(os.path.splitext(os.path.basename(plugin_path))[0] [os.path.dirname(plugin_path)] )<line_sep>plugin_module=imp.load_module(os.path.splitext(file)[0] filename pathname desc)<line_sep># Try te retrieve the `attr` dictionary from the module and convert
# it to json in order to save it into the database.
attr=<none><try_stmt><block_start>attr=json.dumps(plugin_module.ATTR)<block_end><except_stmt>AttributeError# The plugin didn't define an attr dict.
<block_start><pass><block_end># Save the plugin into the database.
session.merge(Plugin(key="{!s}@{!s}".format(type code) group=group type=type title=name.title().replace("_" " ") name=name code=code file=file descrip=plugin_module.DESCRIPTION attr=attr ))<block_end>session.commit()<block_end><def_stmt>get_types_for_plugin_group session plugin_group<block_start>"""Get available plugin types for a plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of available plugin types
:rtype: `list`
"""<line_sep>plugin_types=session.query(Plugin.type).filter_by(group=plugin_group).distinct().all()<line_sep>plugin_types=[i[0]<for>i plugin_types]<line_sep><return>plugin_types<block_end><def_stmt>plugin_gen_query session criteria<block_start>"""Generate a SQLAlchemy query based on the filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return:
:rtype:
"""<line_sep>query=session.query(Plugin).join(TestGroup)<if_stmt>criteria.get("type" <none>)<block_start><if_stmt>isinstance(criteria["type"] str)<block_start>query=query.filter(Plugin.type<eq>criteria["type"])<block_end><if_stmt>isinstance(criteria["type"] list)<block_start>query=query.filter(Plugin.type.in_(criteria["type"]))<block_end><block_end><if_stmt>criteria.get("group" <none>)<block_start><if_stmt>isinstance(criteria["group"] str)<block_start>query=query.filter(Plugin.group<eq>criteria["group"])<block_end><if_stmt>isinstance(criteria["group"] list)<block_start>query=query.filter(Plugin.group.in_(criteria["group"]))<block_end><block_end><if_stmt>criteria.get("code" <none>)<block_start><if_stmt>isinstance(criteria["code"] str)<block_start>query=query.filter(Plugin.code<eq>criteria["code"])<block_end><if_stmt>isinstance(criteria["code"] list)<block_start>query=query.filter(Plugin.code.in_(criteria["code"]))<block_end><block_end><if_stmt>criteria.get("name" <none>)<block_start><if_stmt>isinstance(criteria["name"] str)<block_start>query=query.filter(Plugin.name<eq>criteria["name"])<block_end><if_stmt>isinstance(criteria["name"] list)<block_start>query=query.filter(Plugin.name.in_(criteria["name"]))<block_end><block_end><return>query.order_by(TestGroup.priority.desc())<block_end><def_stmt>get_all_plugin_dicts session criteria=<none><block_start>"""Get plugin dicts based on filter criteria
:param criteria: Filter criteria
:type criteria: `dict`
:return: List of plugin dicts
:rtype: `list`
"""<if_stmt>criteria<is><none><block_start>criteria={}<block_end><if_stmt>"code"<in>criteria<block_start>criteria["code"]=Plugin.name_to_code(session criteria["code"])<block_end>query=plugin_gen_query(session criteria)<line_sep>plugin_obj_list=query.all()<line_sep>plugin_dicts=[]<for_stmt>obj plugin_obj_list<block_start>plugin_dicts.append(obj.to_dict())<block_end><return>plugin_dicts<block_end><def_stmt>get_plugins_by_type session plugin_type<block_start>"""Get plugins based on type argument
:param plugin_type: Plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""<line_sep><return>get_all_plugin_dicts(session {"type":plugin_type})<block_end><def_stmt>get_plugins_by_group session plugin_group<block_start>"""Get plugins by plugin group
:param plugin_group: Plugin group
:type plugin_group: `str`
:return: List of plugin dicts
:rtype: `list`
"""<line_sep><return>get_all_plugin_dicts(session {"group":plugin_group})<block_end><def_stmt>get_plugins_by_group_type session plugin_group plugin_type<block_start>"""Get plugins by group and plugin type
:param plugin_group: Plugin group
:type plugin_group: `str`
:param plugin_type: plugin type
:type plugin_type: `str`
:return: List of plugin dicts
:rtype: `list`
"""<line_sep><return>get_all_plugin_dicts(session {"type":plugin_type "group":plugin_group})<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>pyriemann.utils.geodesic geodesic_riemann geodesic_euclid geodesic_logeuclid geodesic <import_from_stmt>pyriemann.utils.mean mean_riemann mean_logeuclid mean_euclid<import_stmt>pytest<import_from_stmt>pytest approx<def_stmt>get_geod_func <block_start>geod_func=[geodesic_riemann geodesic_euclid geodesic_logeuclid]<for_stmt>gf geod_func<block_start><yield>gf<block_end><block_end><def_stmt>get_geod_name <block_start>geod_name=["riemann" "euclid" "logeuclid"]<for_stmt>gn geod_name<block_start><yield>gn<block_end><block_end>@pytest.mark.parametrize("geodesic_func" [geodesic_riemann geodesic_euclid geodesic_logeuclid])<class_stmt>GeodesicFuncTestCase<block_start><def_stmt>test_simple_mat self geodesic_func get_covmats<block_start>n_channels=3<if_stmt>geodesic_func<is>geodesic_euclid<block_start>A=1.0<times>np.eye(n_channels)<line_sep>B=2.0<times>np.eye(n_channels)<line_sep>Ctrue=1.5<times>np.eye(n_channels)<block_end><else_stmt><block_start>A=0.5<times>np.eye(n_channels)<line_sep>B=2<times>np.eye(n_channels)<line_sep>Ctrue=np.eye(n_channels)<block_end>self.geodesic_0(geodesic_func A B)<line_sep>self.geodesic_1(geodesic_func A B)<line_sep>self.geodesic_middle(geodesic_func A B Ctrue)<block_end><def_stmt>test_random_mat self geodesic_func get_covmats<block_start>n_trials,n_channels=2 5<line_sep>covmats=get_covmats(n_trials n_channels)<line_sep>A,B=covmats[0] covmats[1]<if_stmt>geodesic_func<is>geodesic_euclid<block_start>Ctrue=mean_euclid(covmats)<block_end><elif_stmt>geodesic_func<is>geodesic_logeuclid<block_start>Ctrue=mean_logeuclid(covmats)<block_end><elif_stmt>geodesic_func<is>geodesic_riemann<block_start>Ctrue=mean_riemann(covmats)<block_end>self.geodesic_0(geodesic_func A B)<line_sep>self.geodesic_1(geodesic_func A B)<line_sep>self.geodesic_middle(geodesic_func A B Ctrue)<block_end><block_end><class_stmt>TestGeodesicFunc(GeodesicFuncTestCase)<block_start><def_stmt>geodesic_0 self geodesic_func A B<block_start><assert_stmt>geodesic_func(A B 0)<eq>approx(A)<block_end><def_stmt>geodesic_1 self geodesic_func A B<block_start><assert_stmt>geodesic_func(A B 1)<eq>approx(B)<block_end><def_stmt>geodesic_middle self geodesic_func A B Ctrue<block_start><assert_stmt>geodesic_func(A B 0.5)<eq>approx(Ctrue)<block_end><block_end>@pytest.mark.parametrize("metric" get_geod_name())<def_stmt>test_distance_wrapper_simple metric<block_start>n_channels=3<if_stmt>metric<eq>"euclid"<block_start>A=1.0<times>np.eye(n_channels)<line_sep>B=2.0<times>np.eye(n_channels)<line_sep>Ctrue=1.5<times>np.eye(n_channels)<block_end><else_stmt><block_start>A=0.5<times>np.eye(n_channels)<line_sep>B=2<times>np.eye(n_channels)<line_sep>Ctrue=np.eye(n_channels)<block_end><assert_stmt>geodesic(A B 0.5 metric=metric)<eq>approx(Ctrue)<block_end>@pytest.mark.parametrize("met, gfunc" zip(get_geod_name() get_geod_func()))<def_stmt>test_distance_wrapper_random met gfunc get_covmats<block_start>n_trials,n_channels=2 5<line_sep>covmats=get_covmats(n_trials n_channels)<line_sep>A,B=covmats[0] covmats[1]<if_stmt>gfunc<is>geodesic_euclid<block_start>Ctrue=mean_euclid(covmats)<block_end><elif_stmt>gfunc<is>geodesic_logeuclid<block_start>Ctrue=mean_logeuclid(covmats)<block_end><elif_stmt>gfunc<is>geodesic_riemann<block_start>Ctrue=mean_riemann(covmats)<block_end><assert_stmt>geodesic(A B 0.5 metric=met)<eq>approx(Ctrue)<block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>.._common *<import_from_stmt>.util md5 md5x cmd5x<line_sep># vms
# src=1702633101b340d8917a69cf8a4b8c7c
# salt=t6hrq6k0n6n6k6qdh6tje6wpb62v7654
# salt=u6fnp3eok0dpftcq9qbr4n9svk8tqh7u
# src=02020031010000000000
# salt=3sj8xof48xof4tk9f4tk9ypgk9ypg5ul
<def_stmt>gettmts tvid vid<block_start>tm=int(time.time()<times>1000)<line_sep>key='d5fb4bd9d50c4be6948c97edd7254b0e'<line_sep>host='https://cache.m.iqiyi.com'<line_sep>params={'src':'76f90cbd92f94a2e925d83e8ccd22cb7' 'sc':md5(str(tm)+key+vid) 't':tm}<line_sep>req_url='{host}/tmts/{tvid}/{vid}/'.format(**vars())<line_sep><return>get_response(req_url params=params).json()<block_end><def_stmt>getdash tvid vid bid=500<block_start>cmd5x_null=cmd5x('')<line_sep>tm=int(time.time()<times>1000)<line_sep>host='https://cache.video.iqiyi.com'<line_sep>params=urlencode({#'uid': '',
'k_uid':get_random_id(32 'k_uid') # necessary
#'dfp': dfp,
#'pck': '',
#'bop': '{{"version":"10.0","dfp":"{dfp}"}}'.format(dfp=dfp),
# keys above are relative to cookies
'tvid':tvid 'bid':bid 'vid':vid 'src':'01010031010000000000' 'vt':0 'rs':1 'ori':'pcw' 'ps':1 'pt':0 'd':0 's':'' 'lid':'' 'cf':'' 'ct':'' 'authKey':cmd5x('{cmd5x_null}{tm}{tvid}'.format(**vars())) 'k_tag':1 'ost':0 'ppt':0 'locale':'zh_cn' 'prio':'{"ff":"f4v","code":2}' 'k_err_retries':0 'up':'' 'qd_v':2 'tm':tm 'qdy':'a' 'qds':0 'ut':0 # 600 bid isn't available
# relative to encode
#'k_ft1': ,
#'k_ft4': ,
#'k_ft5': ,
})<line_sep>src='/dash?'+params<line_sep>vf=cmd5x(src)<line_sep>req_url='{host}{src}&vf={vf}'.format(**vars())<line_sep><return>get_response(req_url).json()<block_end><def_stmt>getvps tvid vid<block_start>tm=int(time.time()<times>1000)<line_sep>host='http://cache.video.qiyi.com'<line_sep>params=urlencode({'tvid':tvid 'vid':vid 'v':0 'qypid':'{}_12'.format(tvid) 'src':'01012001010000000000' 't':tm 'k_tag':1 'k_uid':get_random_id(32 'k_uid') 'rs':1 })<line_sep>src='/vps?'+params<line_sep>vf=md5x(src)<line_sep>req_url='{host}{src}&vf={vf}'.format(**vars())<line_sep><return>get_response(req_url).json()<block_end><class_stmt>Iqiyi(Extractor)<block_start>name='爱奇艺 (Iqiyi)'<line_sep>vd_2_id=dict(sum([[(vd id)<for>vd vds]<for>id,vds {'4K':[10 19] 'BD':[5 18 600] 'TD':[4 17 500] 'HD':[2 14 21 75 300] 'SD':[1 200] 'LD':[96 100]}.items()] []))<line_sep>id_2_profile={'4K':'4K' 'BD':'1080p' 'TD':'720p' 'HD':'540p' 'SD':'360p' 'LD':'210p'}<def_stmt>prepare self<block_start>info=MediaInfo(self.name)<if_stmt>self.url<and><not>self.vid<block_start>vid=match(self.url 'curid=([^_]+)_([\w]+)')<if_stmt>vid<block_start>self.vid=vid<try_stmt><block_start>info_json=get_response('http://pcw-api.iqiyi.com/video/video/playervideoinfo' params={'tvid':self.vid[0]}).json()<line_sep>info.title=info_json['data']['vn']<block_end><except_stmt><block_start>self.vid=<none><block_end><block_end><block_end><def_stmt>get_vid <block_start>html=get_content(self.url)<line_sep>video_info=match1(html ":video-info='(.+?)'")<if_stmt>video_info<block_start>video_info=json.loads(video_info)<line_sep>self.vid=str(video_info['tvId']) str(video_info['vid'])<line_sep>info.title=video_info['name']<block_end><else_stmt><block_start>tvid=match1(html 'tvId:\s*"([^"]+)' 'data-video-tvId="([^"]+)' '''\['tvid'\]\s*=\s*"([^"]+)''' '"tvId":\s*([^,]+)')<line_sep>videoid=match1(html 'data-video-vid="([^"]+)' 'vid"?\'?\]?\s*(?:=|:)\s*"?\'?([^"\',]+)')<if_stmt><not>(tvid<and>videoid)<block_start>url=match1(html '(www\.iqiyi\.com/v_\w+\.html)')<if_stmt>url<block_start>self.url='https://'+url<line_sep><return>get_vid()<block_end><block_end>self.vid=(tvid videoid)<line_sep>info.title=match1(html '<title>([^<]+)').split('-')[0]<block_end><block_end><if_stmt>self.url<and><not>self.vid<block_start>get_vid()<block_end>tvid,vid=self.vid<assert_stmt>tvid<and>vid "can't play this video!!"<def_stmt>push_stream_vd vs<block_start>vd=vs['vd']<line_sep>stream=self.vd_2_id[vd]<line_sep>stream_profile=self.id_2_profile[stream]<line_sep>fmt=vs.get('fileFormat')<if_stmt>fmt<block_start>stream<augadd>'-'+fmt<block_end>m3u8=vs['m3utx']<line_sep>info.streams[stream]={'video_profile':stream_profile 'container':'m3u8' 'src':[m3u8] 'size':0}<block_end><def_stmt>push_stream_bid url_prefix bid container fs_array size<block_start>stream=self.vd_2_id[bid]<line_sep>real_urls=[]<for_stmt>seg_info fs_array<block_start>url=url_prefix+seg_info['l']<line_sep>json_data=get_response(url).json()<line_sep>down_url=json_data['l']<line_sep>real_urls.append(down_url)<block_end>stream_profile=self.id_2_profile[stream]<line_sep>info.streams[stream]={'video_profile':stream_profile 'container':container 'src':real_urls 'size':size}<block_end><def_stmt>fetch_tmts #raise
# try use tmts first
# less http requests, get results quickly
<block_start>tmts_data=gettmts(tvid vid)<assert_stmt>tmts_data['code']<eq>'A00000'<line_sep>vs_array=tmts_data['data']['vidl']<for_stmt>vs vs_array<block_start>push_stream_vd(vs)<block_end>vip_conf=tmts_data['data'].get('ctl' {}).get('configs')<if_stmt>vip_conf<block_start><for_stmt>vds (('5' '18') ('10' '19'))<block_start><for_stmt>vd vds<block_start><if_stmt>vd<in>vip_conf<block_start>tmts_data=gettmts(tvid vip_conf[vd]['vid'])<if_stmt>tmts_data['code']<eq>'A00000'<block_start>push_stream_vd(tmts_data['data'])<line_sep><break><block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>fetch_vps # use vps as preferred fallback
<block_start>vps_data=getvps(tvid vid)<assert_stmt>vps_data['code']<eq>'A00000'<line_sep>url_prefix=vps_data['data']['vp'].get('du')<assert_stmt>url_prefix<line_sep>vs_array=vps_data['data']['vp']['tkl'][0]['vs']<for_stmt>vs vs_array<block_start>bid=vs['bid']<line_sep>fs_array=vs['fs']<line_sep>size=vs['vsize']<line_sep>push_stream_bid(url_prefix bid 'flv' fs_array size)<block_end><block_end><def_stmt>fetch_dash # use dash as fallback
<block_start><for_stmt>bid (500 300 200 100)<block_start>dash_data=getdash(tvid vid bid)<assert_stmt>dash_data['code']<eq>'A00000'<line_sep>url_prefix=dash_data['data'].get('dd')<if_stmt>url_prefix<is><none><block_start><continue><block_end>streams=dash_data['data']['program']['video']<for_stmt>stream streams<block_start><if_stmt>'fs'<in>stream<block_start>_bid=stream['bid']<line_sep>container=stream['ff']<line_sep>fs_array=stream['fs']<line_sep>size=stream['vsize']<line_sep>push_stream_bid(url_prefix _bid container fs_array size)<line_sep><break><block_end><block_end><block_end><block_end><for_stmt>fetch (fetch_tmts fetch_vps fetch_dash)<block_start><try_stmt><block_start>fetch()<line_sep><break><block_end><except_stmt>AssertionError<block_start><break><block_end><except_stmt>Exception<as>e<block_start>self.logger.debug(e exc_info=<true>)<line_sep><continue><block_end><block_end><assert_stmt>info.streams "can't play this video!!"<line_sep><return>info<block_end><def_stmt>prepare_list self<block_start>html=get_content(self.url)<line_sep><return>matchall(html 'data-tvid=\"([^\"]+)\" data-vid=\"([^\"]+)\"')<block_end><block_end>site=Iqiyi()<line_sep> |
"""
Stitches submodels together.
"""<import_stmt>numpy<as>np<import_stmt>time os<import_stmt>itertools<import_from_stmt>functools partial<import_from_stmt>collections defaultdict namedtuple<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<line_sep># Custom modules
<import_from_stmt>src hyperprior<import_from_stmt>src.loss losses<import_from_stmt>src.helpers maths datasets utils<import_from_stmt>src.network encoder generator discriminator hyper<import_from_stmt>src.loss.perceptual_similarity perceptual_loss<as>ps<import_from_stmt>default_config ModelModes ModelTypes hific_args directories<line_sep>Intermediates=namedtuple("Intermediates" ["input_image" # [0, 1] (after scaling from [0, 255])
"reconstruction" # [0, 1]
"latents_quantized" # Latents post-quantization.
"n_bpp" # Differential entropy estimate.
"q_bpp"])<line_sep># Shannon entropy estimate.
Disc_out=namedtuple("disc_out" ["D_real" "D_gen" "D_real_logits" "D_gen_logits"])<class_stmt>Model(nn.Module)<block_start><def_stmt>__init__ self args logger storage_train=defaultdict(list) storage_test=defaultdict(list) model_mode=ModelModes.TRAINING model_type=ModelTypes.COMPRESSION<block_start>super(Model self).__init__()<line_sep>"""
Builds hific model from submodels in network.
"""<line_sep>self.args=args<line_sep>self.model_mode=model_mode<line_sep>self.model_type=model_type<line_sep>self.logger=logger<line_sep>self.log_interval=args.log_interval<line_sep>self.storage_train=storage_train<line_sep>self.storage_test=storage_test<line_sep>self.step_counter=0<if_stmt>self.args.use_latent_mixture_model<is><true><block_start>self.args.latent_channels=self.args.latent_channels_DLMM<block_end><if_stmt><not>hasattr(ModelTypes self.model_type.upper())<block_start><raise>ValueError("Invalid model_type: [{}]".format(self.model_type))<block_end><if_stmt><not>hasattr(ModelModes self.model_mode.upper())<block_start><raise>ValueError("Invalid model_mode: [{}]".format(self.model_mode))<block_end>self.image_dims=self.args.image_dims# Assign from dataloader
self.batch_size=self.args.batch_size<line_sep>self.entropy_code=<false><if_stmt>model_mode<eq>ModelModes.EVALUATION<block_start>self.entropy_code=<true><block_end>self.Encoder=encoder.Encoder(self.image_dims self.batch_size C=self.args.latent_channels channel_norm=self.args.use_channel_norm)<line_sep>self.Generator=generator.Generator(self.image_dims self.batch_size C=self.args.latent_channels n_residual_blocks=self.args.n_residual_blocks channel_norm=self.args.use_channel_norm sample_noise=self.args.sample_noise noise_dim=self.args.noise_dim)<if_stmt>self.args.use_latent_mixture_model<is><true><block_start>self.Hyperprior=hyperprior.HyperpriorDLMM(bottleneck_capacity=self.args.latent_channels likelihood_type=self.args.likelihood_type mixture_components=self.args.mixture_components entropy_code=self.entropy_code)<block_end><else_stmt><block_start>self.Hyperprior=hyperprior.Hyperprior(bottleneck_capacity=self.args.latent_channels likelihood_type=self.args.likelihood_type entropy_code=self.entropy_code)<block_end>self.amortization_models=[self.Encoder self.Generator]<line_sep>self.amortization_models.extend(self.Hyperprior.amortization_models)<line_sep># Use discriminator if GAN mode enabled and in training/validation
self.use_discriminator=(self.model_type<eq>ModelTypes.COMPRESSION_GAN<and>(self.model_mode<ne>ModelModes.EVALUATION))<if_stmt>self.use_discriminator<is><true><block_start><assert_stmt>self.args.discriminator_steps<g>0 'Must specify nonzero training steps for D!'<line_sep>self.discriminator_steps=self.args.discriminator_steps<line_sep>self.logger.info('GAN mode enabled. Training discriminator for {} steps.'.format(self.discriminator_steps))<line_sep>self.Discriminator=discriminator.Discriminator(image_dims=self.image_dims context_dims=self.args.latent_dims C=self.args.latent_channels)<line_sep>self.gan_loss=partial(losses.gan_loss args.gan_loss_type)<block_end><else_stmt><block_start>self.discriminator_steps=0<line_sep>self.Discriminator=<none><block_end>self.squared_difference=torch.nn.MSELoss(reduction='none')<line_sep># Expects [-1,1] images or [0,1] with normalize=True flag
self.perceptual_loss=ps.PerceptualLoss(model='net-lin' net='alex' use_gpu=torch.cuda.is_available() gpu_ids=[args.gpu])<block_end><def_stmt>store_loss self key loss<block_start><assert_stmt>type(loss)<eq>float 'Call .item() on loss before storage'<if_stmt>self.training<is><true><block_start>storage=self.storage_train<block_end><else_stmt><block_start>storage=self.storage_test<block_end><if_stmt>self.writeout<is><true><block_start>storage[key].append(loss)<block_end><block_end><def_stmt>compression_forward self x<block_start>"""
Forward pass through encoder, hyperprior, and decoder.
Inputs
x: Input image. Format (N,C,H,W), range [0,1],
or [-1,1] if args.normalize_image is True
torch.Tensor
Outputs
intermediates: NamedTuple of intermediate values
"""<line_sep>image_dims=tuple(x.size()[1:])# (C,H,W)
<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>)<block_start>n_encoder_downsamples=self.Encoder.n_downsampling_layers<line_sep>factor=2<power>n_encoder_downsamples<line_sep>x=utils.pad_factor(x x.size()[2:] factor)<block_end># Encoder forward pass
y=self.Encoder(x)<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>)<block_start>n_hyperencoder_downsamples=self.Hyperprior.analysis_net.n_downsampling_layers<line_sep>factor=2<power>n_hyperencoder_downsamples<line_sep>y=utils.pad_factor(y y.size()[2:] factor)<block_end>hyperinfo=self.Hyperprior(y spatial_shape=x.size()[2:])<line_sep>latents_quantized=hyperinfo.decoded<line_sep>total_nbpp=hyperinfo.total_nbpp<line_sep>total_qbpp=hyperinfo.total_qbpp<line_sep># Use quantized latents as input to G
reconstruction=self.Generator(latents_quantized)<if_stmt>self.args.normalize_input_image<is><true><block_start>reconstruction=torch.tanh(reconstruction)<block_end># Undo padding
<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>)<block_start>reconstruction=reconstruction[: : :image_dims[1] :image_dims[2]]<block_end>intermediates=Intermediates(x reconstruction latents_quantized total_nbpp total_qbpp)<line_sep><return>intermediates hyperinfo<block_end><def_stmt>discriminator_forward self intermediates train_generator<block_start>""" Train on gen/real batches simultaneously. """<line_sep>x_gen=intermediates.reconstruction<line_sep>x_real=intermediates.input_image<line_sep># Alternate between training discriminator and compression models
<if_stmt>train_generator<is><false><block_start>x_gen=x_gen.detach()<block_end>D_in=torch.cat([x_real x_gen] dim=0)<line_sep>latents=intermediates.latents_quantized.detach()<line_sep>latents=torch.repeat_interleave(latents 2 dim=0)<line_sep>D_out,D_out_logits=self.Discriminator(D_in latents)<line_sep>D_out=torch.squeeze(D_out)<line_sep>D_out_logits=torch.squeeze(D_out_logits)<line_sep>D_real,D_gen=torch.chunk(D_out 2 dim=0)<line_sep>D_real_logits,D_gen_logits=torch.chunk(D_out_logits 2 dim=0)<line_sep><return>Disc_out(D_real D_gen D_real_logits D_gen_logits)<block_end><def_stmt>distortion_loss self x_gen x_real# loss in [0,255] space but normalized by 255 to not be too big
# - Delegate scaling to weighting
<block_start>sq_err=self.squared_difference(x_gen<times>255. x_real<times>255.)# / 255.
<return>torch.mean(sq_err)<block_end><def_stmt>perceptual_loss_wrapper self x_gen x_real normalize=<true><block_start>""" Assumes inputs are in [0, 1] if normalize=True, else [-1, 1] """<line_sep>LPIPS_loss=self.perceptual_loss.forward(x_gen x_real normalize=normalize)<line_sep><return>torch.mean(LPIPS_loss)<block_end><def_stmt>compression_loss self intermediates hyperinfo<block_start>x_real=intermediates.input_image<line_sep>x_gen=intermediates.reconstruction<if_stmt>self.args.normalize_input_image<is><true># [-1.,1.] -> [0.,1.]
<block_start>x_real=(x_real+1.)/2.<line_sep>x_gen=(x_gen+1.)/2.<block_end>distortion_loss=self.distortion_loss(x_gen x_real)<line_sep>perceptual_loss=self.perceptual_loss_wrapper(x_gen x_real normalize=<true>)<line_sep>weighted_distortion=self.args.k_M<times>distortion_loss<line_sep>weighted_perceptual=self.args.k_P<times>perceptual_loss<line_sep>weighted_rate,rate_penalty=losses.weighted_rate_loss(self.args total_nbpp=intermediates.n_bpp total_qbpp=intermediates.q_bpp step_counter=self.step_counter ignore_schedule=self.args.ignore_schedule)<line_sep>weighted_R_D_loss=weighted_rate+weighted_distortion<line_sep>weighted_compression_loss=weighted_R_D_loss+weighted_perceptual<line_sep># Bookkeeping
<if_stmt>(self.step_counter%self.log_interval<eq>1)<block_start>self.store_loss('rate_penalty' rate_penalty)<line_sep>self.store_loss('distortion' distortion_loss.item())<line_sep>self.store_loss('perceptual' perceptual_loss.item())<line_sep>self.store_loss('n_rate' intermediates.n_bpp.item())<line_sep>self.store_loss('q_rate' intermediates.q_bpp.item())<line_sep>self.store_loss('n_rate_latent' hyperinfo.latent_nbpp.item())<line_sep>self.store_loss('q_rate_latent' hyperinfo.latent_qbpp.item())<line_sep>self.store_loss('n_rate_hyperlatent' hyperinfo.hyperlatent_nbpp.item())<line_sep>self.store_loss('q_rate_hyperlatent' hyperinfo.hyperlatent_qbpp.item())<line_sep>self.store_loss('weighted_rate' weighted_rate.item())<line_sep>self.store_loss('weighted_distortion' weighted_distortion.item())<line_sep>self.store_loss('weighted_perceptual' weighted_perceptual.item())<line_sep>self.store_loss('weighted_R_D' weighted_R_D_loss.item())<line_sep>self.store_loss('weighted_compression_loss_sans_G' weighted_compression_loss.item())<block_end><return>weighted_compression_loss<block_end><def_stmt>GAN_loss self intermediates train_generator=<false><block_start>"""
train_generator: Flag to send gradients to generator
"""<line_sep>disc_out=self.discriminator_forward(intermediates train_generator)<line_sep>D_loss=self.gan_loss(disc_out mode='discriminator_loss')<line_sep>G_loss=self.gan_loss(disc_out mode='generator_loss')<line_sep># Bookkeeping
<if_stmt>(self.step_counter%self.log_interval<eq>1)<block_start>self.store_loss('D_gen' torch.mean(disc_out.D_gen).item())<line_sep>self.store_loss('D_real' torch.mean(disc_out.D_real).item())<line_sep>self.store_loss('disc_loss' D_loss.item())<line_sep>self.store_loss('gen_loss' G_loss.item())<line_sep>self.store_loss('weighted_gen_loss' (self.args.beta<times>G_loss).item())<block_end><return>D_loss G_loss<block_end><def_stmt>compress self x silent=<false><block_start>"""
* Pass image through encoder to obtain latents: x -> Encoder() -> y
* Pass latents through hyperprior encoder to obtain hyperlatents:
y -> hyperencoder() -> z
* Encode hyperlatents via nonparametric entropy model.
* Pass hyperlatents through mean-scale hyperprior decoder to obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Encode latents via entropy model derived from (mean, scale) hyperprior output.
"""<assert_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>) (f'Set model mode to {ModelModes.EVALUATION} for compression.')<line_sep>spatial_shape=tuple(x.size()[2:])<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>)<block_start>n_encoder_downsamples=self.Encoder.n_downsampling_layers<line_sep>factor=2<power>n_encoder_downsamples<line_sep>x=utils.pad_factor(x x.size()[2:] factor)<block_end># Encoder forward pass
y=self.Encoder(x)<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>)<block_start>n_hyperencoder_downsamples=self.Hyperprior.analysis_net.n_downsampling_layers<line_sep>factor=2<power>n_hyperencoder_downsamples<line_sep>y=utils.pad_factor(y y.size()[2:] factor)<block_end>compression_output=self.Hyperprior.compress_forward(y spatial_shape)<line_sep>attained_hbpp=32<times>len(compression_output.hyperlatents_encoded)/np.prod(spatial_shape)<line_sep>attained_lbpp=32<times>len(compression_output.latents_encoded)/np.prod(spatial_shape)<line_sep>attained_bpp=32<times>((len(compression_output.hyperlatents_encoded)+len(compression_output.latents_encoded))/np.prod(spatial_shape))<if_stmt>silent<is><false><block_start>self.logger.info('[ESTIMATED]')<line_sep>self.logger.info(f'BPP: {compression_output.total_bpp:.3f}')<line_sep>self.logger.info(f'HL BPP: {compression_output.hyperlatent_bpp:.3f}')<line_sep>self.logger.info(f'L BPP: {compression_output.latent_bpp:.3f}')<line_sep>self.logger.info('[ATTAINED]')<line_sep>self.logger.info(f'BPP: {attained_bpp:.3f}')<line_sep>self.logger.info(f'HL BPP: {attained_hbpp:.3f}')<line_sep>self.logger.info(f'L BPP: {attained_lbpp:.3f}')<block_end><return>compression_output<block_end><def_stmt>decompress self compression_output<block_start>"""
* Recover z* from compressed message.
* Pass recovered hyperlatents through mean-scale hyperprior decoder obtain mean,
scale over latents: z -> hyperdecoder() -> (mu, sigma).
* Use latent entropy model to recover y* from compressed image.
* Pass quantized latent through generator to obtain the reconstructed image.
y* -> Generator() -> x*.
"""<assert_stmt>self.model_mode<eq>ModelModes.EVALUATION<and>(self.training<is><false>) (f'Set model mode to {ModelModes.EVALUATION} for decompression.')<line_sep>latents_decoded=self.Hyperprior.decompress_forward(compression_output device=utils.get_device())<line_sep># Use quantized latents as input to G
reconstruction=self.Generator(latents_decoded)<if_stmt>self.args.normalize_input_image<is><true><block_start>reconstruction=torch.tanh(reconstruction)<block_end># Undo padding
image_dims=compression_output.spatial_shape<line_sep>reconstruction=reconstruction[: : :image_dims[0] :image_dims[1]]<if_stmt>self.args.normalize_input_image<is><true># [-1.,1.] -> [0.,1.]
<block_start>reconstruction=(reconstruction+1.)/2.<block_end>reconstruction=torch.clamp(reconstruction min=0. max=1.)<line_sep><return>reconstruction<block_end><def_stmt>forward self x train_generator=<false> return_intermediates=<false> writeout=<true><block_start>self.writeout=writeout<line_sep>losses=dict()<if_stmt>train_generator<is><true># Define a 'step' as one cycle of G-D training
<block_start>self.step_counter<augadd>1<block_end>intermediates,hyperinfo=self.compression_forward(x)<if_stmt>self.model_mode<eq>ModelModes.EVALUATION<block_start>reconstruction=intermediates.reconstruction<if_stmt>self.args.normalize_input_image<is><true># [-1.,1.] -> [0.,1.]
<block_start>reconstruction=(reconstruction+1.)/2.<block_end>reconstruction=torch.clamp(reconstruction min=0. max=1.)<line_sep><return>reconstruction intermediates.q_bpp<block_end>compression_model_loss=self.compression_loss(intermediates hyperinfo)<if_stmt>self.use_discriminator<is><true># Only send gradients to generator when training generator via
# `train_generator` flag
<block_start>D_loss,G_loss=self.GAN_loss(intermediates train_generator)<line_sep>weighted_G_loss=self.args.beta<times>G_loss<line_sep>compression_model_loss<augadd>weighted_G_loss<line_sep>losses['disc']=D_loss<block_end>losses['compression']=compression_model_loss<line_sep># Bookkeeping
<if_stmt>(self.step_counter%self.log_interval<eq>1)<block_start>self.store_loss('weighted_compression_loss' compression_model_loss.item())<block_end><if_stmt>return_intermediates<is><true><block_start><return>losses intermediates<block_end><else_stmt><block_start><return>losses<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>compress_test=<false><if_stmt>compress_test<is><true><block_start>model_mode=ModelModes.EVALUATION<block_end><else_stmt><block_start>model_mode=ModelModes.TRAINING<block_end>logger=utils.logger_setup(logpath=os.path.join(directories.experiments 'logs') filepath=os.path.abspath(__file__))<line_sep>device=utils.get_device()<line_sep>logger.info(f'Using device {device}')<line_sep>storage_train=defaultdict(list)<line_sep>storage_test=defaultdict(list)<line_sep>model=Model(hific_args logger storage_train storage_test model_mode=model_mode model_type=ModelTypes.COMPRESSION_GAN)<line_sep>model.to(device)<line_sep>logger.info(model)<line_sep>transform_param_names=list()<line_sep>transform_params=list()<line_sep>logger.info('ALL PARAMETERS')<for_stmt>n,p model.named_parameters()<block_start><if_stmt>('Encoder'<in>n)<or>('Generator'<in>n)<block_start>transform_param_names.append(n)<line_sep>transform_params.append(p)<block_end><if_stmt>('analysis'<in>n)<or>('synthesis'<in>n)<block_start>transform_param_names.append(n)<line_sep>transform_params.append(p)<block_end>logger.info(f'{n} - {p.shape}')<block_end>logger.info('AMORTIZATION PARAMETERS')<line_sep>amortization_named_parameters=itertools.chain.from_iterable([am.named_parameters()<for>am model.amortization_models])<for_stmt>n,p amortization_named_parameters<block_start>logger.info(f'{n} - {p.shape}')<block_end>logger.info('AMORTIZATION PARAMETERS')<for_stmt>n,p zip(transform_param_names transform_params)<block_start>logger.info(f'{n} - {p.shape}')<block_end>logger.info('HYPERPRIOR PARAMETERS')<for_stmt>n,p model.Hyperprior.hyperlatent_likelihood.named_parameters()<block_start>logger.info(f'{n} - {p.shape}')<block_end><if_stmt>compress_test<is><false><block_start>logger.info('DISCRIMINATOR PARAMETERS')<for_stmt>n,p model.Discriminator.named_parameters()<block_start>logger.info(f'{n} - {p.shape}')<block_end><block_end>logger.info("Number of trainable parameters: {}".format(utils.count_parameters(model)))<line_sep>logger.info("Estimated size: {} MB".format(utils.count_parameters(model)<times>4./10<power>6))<line_sep>B=10<line_sep>shape=[B 3 256 256]<line_sep>x=torch.randn(shape).to(device)<line_sep>start_time=time.time()<if_stmt>compress_test<is><true><block_start>model.eval()<line_sep>logger.info('Starting compression with input shape {}'.format(shape))<line_sep>compression_output=model.compress(x)<line_sep>reconstruction=model.decompress(compression_output)<line_sep>logger.info(f"n_bits: {compression_output.total_bits}")<line_sep>logger.info(f"bpp: {compression_output.total_bpp}")<line_sep>logger.info(f"MSE: {torch.mean(torch.square(reconstruction-x)).item()}")<block_end><else_stmt><block_start>logger.info('Starting forward pass with input shape {}'.format(shape))<line_sep>losses=model(x)<line_sep>compression_loss,disc_loss=losses['compression'] losses['disc']<block_end>logger.info('Delta t {:.3f}s'.format(time.time()-start_time))<block_end> |
##############################################################################
#
# Copyright (c) 2005 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""ZODB connection registry
"""<class_stmt>ConnectionRegistry<block_start>'''ZODB connection registry
This registry can hold either ZODB.Connection objects or OFS.Application
objects. In the latter case, a close operation will close the REQUEST as
well as the Connection referenced by the Application's _p_jar attribute.
'''<def_stmt>__init__ self<block_start>self._conns=[]<block_end><def_stmt>register self conn<block_start>self._conns.append(conn)<block_end><def_stmt>contains self conn<block_start><return>conn<in>self._conns<block_end><def_stmt>__len__ self<block_start><return>len(self._conns)<block_end><def_stmt>count self<block_start><return>len(self)<block_end><def_stmt>close self conn<block_start><if_stmt>self.contains(conn)<block_start>self._conns.remove(conn)<block_end>self._do_close(conn)<block_end><def_stmt>closeAll self<block_start><for_stmt>conn self._conns<block_start>self._do_close(conn)<block_end>self._conns=[]<block_end><def_stmt>_do_close self conn<block_start><if_stmt>hasattr(conn 'close')<block_start>conn.close()<block_end><else_stmt><block_start>conn.REQUEST.close()<line_sep>conn._p_jar.close()<block_end><block_end><block_end>registry=ConnectionRegistry()<line_sep>register=registry.register<line_sep>contains=registry.contains<line_sep>count=registry.count<line_sep>close=registry.close<line_sep>closeAll=registry.closeAll<line_sep> |
<import_from_stmt>clpy.manipulation.transpose *# NOQA
|
{"targets":[{"target_name":"fuse" "include_dirs":["<!(node -e \"require('napi-macros')\")" "<!(node -e \"require('fuse-shared-library/include')\")" ] "libraries":["<!(node -e \"require('fuse-shared-library/lib')\")" ] "sources":["fuse-native.c"] 'xcode_settings':{'OTHER_CFLAGS':['-g' '-O3' '-Wall']} 'cflags':['-g' '-O3' '-Wall'] } {"target_name":"postinstall" "type":"none" "dependencies":["fuse"] "copies":[{"destination":"build/Release" "files":["<!(node -e \"require('fuse-shared-library/lib')\")"] }]}]}<line_sep> |
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Courier utilities."""<import_from_stmt>typing Any Callable Text<import_from_stmt>absl logging<import_stmt>courier<def_stmt>_should_expose_method func:Callable[<ellipsis> Any] method_name:Text<arrow>bool<block_start><return>(callable(func)<and><not>method_name.startswith('_')<and>method_name<ne>'set_courier_server')<block_end><def_stmt>make_courier_server instance:Any *courier_args **courier_kwargs<arrow>courier.Server<block_start>"""Builds a courier.Server for an instance.
Args:
instance: The instance that the courier server associates with.
*courier_args: positional arguments to pass to courier.Server().
**courier_kwargs: keyword arguments to pass to courier.Server().
Returns:
A courier.Server object.
"""<line_sep>server=courier.Server(*courier_args **courier_kwargs)<line_sep># Bind all non-private user-defined local methods.
<for_stmt>method_name dir(instance)<block_start><if_stmt>method_name.startswith('_')<block_start><continue><block_end>func=getattr(instance method_name)<line_sep>logging.info('Binding: %s' method_name)<if_stmt>_should_expose_method(func method_name)<block_start>server.Bind(method_name func)<block_end><block_end><return>server<block_end> |
<import_stmt>pytest<line_sep>@pytest.fixture<def_stmt>experiment_chip_control testapp lab award ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-10-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'control_type':'input library'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_chip_H3K4me3 testapp lab award target_H3K4me3 ileum experiment_chip_control<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-10-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K4me3['uuid'] 'possible_controls':[experiment_chip_control['@id']]}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_chip_H3K27me3 testapp lab award target_H3K27me3 experiment_chip_control ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K27me3['uuid'] 'possible_controls':[experiment_chip_control['uuid']]}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_chip_CTCF testapp lab award target_CTCF k562<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-10-08' 'biosample_ontology':k562['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_CTCF['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_rna testapp lab award h1<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-10-08' 'assay_term_name':'RNA-seq' 'biosample_ontology':h1['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_dnase testapp lab award heart<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-10-08' 'assay_term_name':'DNase-seq' 'biosample_ontology':heart['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>ctrl_experiment testapp lab award cell_free<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'biosample_ontology':cell_free['uuid'] 'status':'in progress' 'assay_term_name':'ChIP-seq'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_no_read_length testapp experiment bam_file file_fastq_no_read_length replicate_1_1 base_library analysis_step_bam analysis_step_version_bam analysis_step_run_bam encode_lab <block_start>testapp.patch_json(replicate_1_1['@id'] {'experiment':experiment['@id'] 'library':base_library['@id'] })<line_sep>testapp.patch_json(file_fastq_no_read_length['@id'] {'dataset':experiment['@id'] 'replicate':replicate_1_1['@id'] })<line_sep>testapp.patch_json(bam_file['@id'] {'dataset':experiment['@id'] 'step_run':analysis_step_run_bam['@id'] 'assembly':'GRCh38' 'lab':encode_lab['@id'] 'derived_from':[file_fastq_no_read_length['@id']] })<line_sep>testapp.patch_json(experiment['@id'] {'status':'released' 'date_released':'2016-01-01' 'assay_term_name':'long read RNA-seq' })<line_sep><return>testapp.get(experiment['@id']+'@@index-data')<block_end>@pytest.fixture<def_stmt>file_exp lab award testapp experiment ileum<block_start>item={'lab':lab['uuid'] 'award':award['uuid'] 'assay_term_name':'RAMPAGE' 'biosample_ontology':ileum['uuid'] 'possible_controls':[experiment['uuid']] 'status':'released' 'date_released':'2016-01-01'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>file_exp2 lab award testapp ileum<block_start>item={'lab':lab['uuid'] 'award':award['uuid'] 'assay_term_name':'RAMPAGE' 'biosample_ontology':ileum['uuid'] 'status':'released' 'date_released':'2016-01-01'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_RNA_seq testapp lab award ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'RNA-seq'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_RRBS testapp lab award ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'assay_term_name':'RRBS' 'biosample_ontology':ileum['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_WGBS testapp lab award ileum<block_start>item={'award':award['uuid'] 'biosample_ontology':ileum['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'assay_term_name':'whole-genome shotgun bisulfite sequencing'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_control testapp lab award ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'control_type':'control'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K27me3 testapp lab award target_H3K27me3 ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K27me3['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K36me3 testapp lab award target_H3K36me3 ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K36me3['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K4me1 testapp lab award target_H3K4me1 ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K4me1['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K4me3 testapp lab award target_H3K4me3 ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K4me3['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K27ac testapp lab award target_H3K27ac ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K27ac['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>reference_experiment_chip_seq_H3K9me3 testapp lab award target_H3K9me3 ileum<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'status':'released' 'date_released':'2019-01-08' 'biosample_ontology':ileum['uuid'] 'assay_term_name':'ChIP-seq' 'target':target_H3K9me3['uuid']}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_pipeline_error testapp lab award cell_free<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'ChIP-seq' 'biosample_ontology':cell_free['uuid'] 'internal_status':'pipeline error' }<line_sep><return>item<block_end>@pytest.fixture<def_stmt>experiment_no_error testapp lab award cell_free<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'ChIP-seq' 'biosample_ontology':cell_free['uuid'] 'internal_status':'release ready' }<line_sep><return>item<block_end>@pytest.fixture<def_stmt>experiment_1 testapp lab award cell_free<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'RNA-seq' 'biosample_ontology':cell_free['uuid'] 'status':'in progress'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_2 testapp lab award cell_free<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'RNA-seq' 'biosample_ontology':cell_free['uuid'] 'status':'in progress'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>base_experiment_submitted testapp lab award cell_free<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'RNA-seq' 'biosample_ontology':cell_free['uuid'] 'status':'submitted' 'date_submitted':'2015-07-23' }<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_1_0 root experiment file file_ucsc_browser_composite<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<assert_stmt>root.get_by_uuid(file['uuid']).properties['dataset']<eq>str(item.uuid)<assert_stmt>root.get_by_uuid(file_ucsc_browser_composite['uuid']).properties['dataset']<ne>str(item.uuid)<line_sep>properties.update({'schema_version':'1' 'files':[file['uuid'] file_ucsc_browser_composite['uuid']]})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_2_0 <block_start><return>{'schema_version':'2' 'encode2_dbxrefs':['wgEncodeEH002945'] 'geo_dbxrefs':['GSM99494'] }<block_end>@pytest.fixture<def_stmt>experiment_3 <block_start><return>{'schema_version':'3' 'status':"DELETED" }<block_end>@pytest.fixture<def_stmt>experiment_6 <block_start><return>{'schema_version':'6' 'dataset_type':'experiment' }<block_end>@pytest.fixture<def_stmt>experiment_7 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'7' 'dbxrefs':['UCSC-ENCODE-cv:K562' 'UCSC-ENCODE-cv:K562'] 'aliases':['testing:123' 'testing:123']})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_10 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'10' 'status':'in progress' 'aliases':['andrew-fire:my_experiment' 'j-michael-cherry:Lib:XZ:20100107:11--ChIP:XZ:20100104:09:AdiposeNuclei:H3K4Me3' 'roadmap-epigenomics:Bisulfite-Seq analysis of ucsf-4* stem cell line from UCSF-4||Tue Apr 16 16:10:36 -0500 2013||85822' 'encode:[this is]_qu#ite:bad" ' 'manuel-garber:10% DMSO for 2 hours' 'UCSC_encode_db:Illumina_HiSeq_2000' 'encode:Illumina_HiSeq_2000']})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_13 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'13' 'status':'proposed' })<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_14 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'14' 'biosample_type':'in vitro sample' })<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_15 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'15' 'biosample_type':'immortalized cell line'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_16 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'16' 'biosample_type':'immortalized cell line' 'status':'ready for review'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_17 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'17' 'biosample_type':'immortalized cell line' 'status':'started'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_21 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'21' 'biosample_type':'induced pluripotent stem cell line' 'status':'started'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_22 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'22' 'biosample_type':'primary cell' 'biosample_term_id':'CL:0000765' 'biosample_term_name':'erythroblast' 'internal_tags':['cre_inputv10' 'cre_inputv11' 'ENCYCLOPEDIAv3'] 'status':'started'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_25 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'25' 'assay_term_name':'ISO-seq'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_26 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'26' 'assay_term_name':'single-nuclei ATAC-seq'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_27 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'27' 'experiment_classification':['functional genomics assay']})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment testapp lab award cell_free<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'RNA-seq' 'biosample_ontology':cell_free['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>base_experiment testapp lab award heart<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'RNA-seq' 'biosample_ontology':heart['uuid'] 'status':'in progress'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_with_RNA_library testapp base_experiment base_replicate base_library <block_start>testapp.patch_json(base_library['@id'] {'nucleic_acid_term_name':'RNA'})<line_sep>testapp.patch_json(base_replicate['@id'] {'library':base_library['@id']})<line_sep><return>testapp.get(base_experiment['@id']+'@@index-data')<block_end>@pytest.fixture<def_stmt>ChIP_experiment testapp lab award cell_free target base_matched_set<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'ChIP-seq' 'biosample_ontology':cell_free['uuid'] 'target':target['@id'] 'possible_controls':[base_matched_set['@id']]}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>micro_rna_experiment testapp base_experiment replicate_1_1 replicate_2_1 library_1 library_2 biosample_1 biosample_2 mouse_donor_1_6 file_fastq_3 file_fastq_4 file_bam_1_1 file_bam_2_1 file_tsv_1_1 file_tsv_1_2 spearman_correlation_quality_metric micro_rna_quantification_quality_metric_1_2 micro_rna_mapping_quality_metric_2_1 analysis_step_run_bam analysis_step_version_bam analysis_step_bam pipeline_bam <block_start>testapp.patch_json(file_fastq_3['@id'] {'read_length':20})<line_sep>testapp.patch_json(file_fastq_4['@id'] {'read_length':100})<line_sep>testapp.patch_json(file_bam_1_1['@id'] {'step_run':analysis_step_run_bam['@id'] 'assembly':'mm10'})<line_sep>testapp.patch_json(file_bam_2_1['@id'] {'step_run':analysis_step_run_bam['@id'] 'assembly':'mm10'})<line_sep>testapp.patch_json(pipeline_bam['@id'] {'title':'microRNA-seq pipeline'})<line_sep>testapp.patch_json(spearman_correlation_quality_metric['@id'] {'quality_metric_of':[file_tsv_1_1['@id'] file_tsv_1_2['@id']]})<line_sep>testapp.patch_json(biosample_1['@id'] {'donor':mouse_donor_1_6['@id']})<line_sep>testapp.patch_json(biosample_2['@id'] {'donor':mouse_donor_1_6['@id']})<line_sep>testapp.patch_json(biosample_1['@id'] {'organism':'/organisms/mouse/'})<line_sep>testapp.patch_json(biosample_2['@id'] {'organism':'/organisms/mouse/'})<line_sep>testapp.patch_json(biosample_1['@id'] {'model_organism_sex':'mixed'})<line_sep>testapp.patch_json(biosample_2['@id'] {'model_organism_sex':'mixed'})<line_sep>testapp.patch_json(library_1['@id'] {'biosample':biosample_1['@id']})<line_sep>testapp.patch_json(library_2['@id'] {'biosample':biosample_2['@id']})<line_sep>testapp.patch_json(replicate_1_1['@id'] {'library':library_1['@id']})<line_sep>testapp.patch_json(replicate_2_1['@id'] {'library':library_2['@id']})<line_sep>testapp.patch_json(file_tsv_1_1['@id'] {'output_type':'microRNA quantifications'})<line_sep>testapp.patch_json(file_tsv_1_2['@id'] {'output_type':'microRNA quantifications'})<line_sep>testapp.patch_json(base_experiment['@id'] {'status':'released' 'date_released':'2016-01-01' 'assay_term_name':'microRNA-seq'})<line_sep><return>testapp.get(base_experiment['@id']+'@@index-data')<block_end>@pytest.fixture<def_stmt>experiment_with_analysis testapp lab award heart analysis_1<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'ChIP-seq' 'status':'in progress' 'biosample_ontology':heart['uuid'] 'analyses':[analysis_1['@id']]}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_with_analysis_2 testapp lab award heart analysis_2<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'ChIP-seq' 'status':'in progress' 'biosample_ontology':heart['uuid'] 'analyses':[analysis_2['@id']]}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_28 testapp lab award heart<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'Mint-ChIP-seq' 'biosample_ontology':heart['uuid'] 'status':'in progress'}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_v28 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'28' 'internal_status':'pipeline error' 'pipeline_error_detail':'The pipeline didn\'t work for reasons' 'notes':'Insert essential details here'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>ATAC_experiment testapp lab award cell_free<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'ATAC-seq' 'biosample_ontology':cell_free['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_29 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'29' 'assay_term_name':'single cell isolation followed by RNA-seq'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_mint_chip testapp lab award heart target_H3K27me3 experiment_chip_control<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'Mint-ChIP-seq' 'biosample_ontology':heart['uuid'] 'status':'in progress' 'target':target_H3K27me3['uuid'] 'possible_controls':[experiment_chip_control['uuid']]}<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>ATAC_experiment_replicated testapp lab award heart<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'ATAC-seq' 'biosample_ontology':heart['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_30 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'30' 'analyses':[{'files':['/files/ENCFF881NAX/' '/files/ENCFF674HJF/']} {'files':['/files/ENCFF282TIA/' '/files/ENCFF910JDS/']} ] 'notes':'Previous notes.'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>ChIA_PET_experiment testapp lab encode4_award heart<block_start>item={'lab':lab['@id'] 'award':encode4_award['@id'] 'assay_term_name':'ChIA-PET' 'biosample_ontology':heart['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_31 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'31' 'assay_term_name':'single-nucleus RNA-seq'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>BruChase_2h testapp lab award heart<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'BruChase' 'biosample_ontology':heart['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>BruChase_6h testapp lab award heart<block_start>item={'lab':lab['@id'] 'award':award['@id'] 'assay_term_name':'BruChase' 'biosample_ontology':heart['uuid']}<line_sep><return>testapp.post_json('/experiment' item).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>single_cell_ATAC_experiment root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'32' 'assay_term_name':'single-cell ATAC-seq'})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_33 root experiment analysis_released<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'33' 'analysis_objects':[analysis_released['uuid']]})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>base_single_cell_experiment_submitted testapp lab award heart<block_start>item={'award':award['uuid'] 'lab':lab['uuid'] 'assay_term_name':'single-cell RNA sequencing assay' 'biosample_ontology':heart['uuid'] 'status':'submitted' 'date_submitted':'2015-07-23' }<line_sep><return>testapp.post_json('/experiment' item status=201).json['@graph'][0]<block_end>@pytest.fixture<def_stmt>experiment_34 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'34' 'internal_tags':['RegulomeDB']})<line_sep><return>properties<block_end>@pytest.fixture<def_stmt>experiment_35 root experiment<block_start>item=root.get_by_uuid(experiment['uuid'])<line_sep>properties=item.properties.copy()<line_sep>properties.update({'schema_version':'35' 'assay_term_name':'Capture Hi-C'})<line_sep><return>properties<block_end> |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for resource estimation"""<import_stmt>inspect<import_stmt>pennylane<as>qml<def_stmt>_get_absolute_import_path fn<block_start><return>f"{inspect.getmodule(fn).__name__}.{fn.__name__}"<block_end><def_stmt>specs qnode max_expansion=<none><block_start>"""Resource information about a quantum circuit.
This transform converts a QNode into a callable that provides resource information
about the circuit.
Args:
qnode (.QNode): the QNode to calculate the specifications for
Keyword Args:
max_expansion (int): The number of times the internal circuit should be expanded when
calculating the specification. Defaults to ``qnode.max_expansion``.
Returns:
A function that has the same argument signature as ``qnode``. This function
returns a dictionary of information about qnode structure.
**Example**
.. code-block:: python3
x = np.array([0.1, 0.2])
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(x, add_ry=True):
qml.RX(x[0], wires=0)
qml.CNOT(wires=(0,1))
if add_ry:
qml.RY(x[1], wires=1)
return qml.probs(wires=(0,1))
>>> qml.specs(circuit)(x, add_ry=False)
{'gate_sizes': defaultdict(int, {1: 1, 2: 1}),
'gate_types': defaultdict(int, {'RX': 1, 'CNOT': 1}),
'num_operations': 2,
'num_observables': 1,
'num_diagonalizing_gates': 0,
'num_used_wires': 2,
'depth': 2,
'num_device_wires': 2,
'device_name': 'default.qubit.autograd',
'diff_method': 'backprop'}
.. UsageDetails::
``qml.specs`` can also be used with :class:`~.beta.qnode`:
.. code-block:: python3
x = np.array([0.1, 0.2])
dev = qml.device('default.qubit', wires=2)
@qml.beta.qnode(dev, diff_method="parameter-shift", shift=np.pi / 4)
def circuit(x, add_ry=True):
qml.RX(x[0], wires=0)
qml.CNOT(wires=(0,1))
if add_ry:
qml.RY(x[1], wires=1)
return qml.probs(wires=(0,1))
>>> qml.specs(circuit)(x, add_ry=False)
{'gate_sizes': defaultdict(int, {1: 1, 2: 1}),
'gate_types': defaultdict(int, {'RX': 1, 'CNOT': 1}),
'num_operations': 2,
'num_observables': 1,
'num_diagonalizing_gates': 0,
'num_used_wires': 2,
'depth': 2,
'num_trainable_params': 1,
'num_device_wires': 2,
'device_name': 'default.qubit',
'diff_method': 'parameter-shift',
'expansion_strategy': 'gradient',
'gradient_options': {'shift': 0.7853981633974483},
'interface': 'autograd',
'gradient_fn': 'pennylane.gradients.parameter_shift.param_shift',
'num_gradient_executions': 2}
"""<def_stmt>specs_qnode *args **kwargs<block_start>"""Returns information on the structure and makeup of provided QNode.
Dictionary keys:
* ``"num_operations"``
* ``"num_observables"``
* ``"num_diagonalizing_gates"``
* ``"gate_sizes"``: dictionary mapping gate number of wires to number of occurances
* ``"gate_types"``: dictionary mapping gate types to number of occurances
* ``"num_used_wires"``: number of wires used by the circuit
* ``"num_device_wires"``: number of wires in device
* ``"depth"``: longest path in directed acyclic graph representation
* ``"dev_short_name"``: name of QNode device
* ``"diff_method"``
Potential Additional Information:
* ``"num_trainable_params"``: number of individual scalars that are trainable
* ``"num_parameter_shift_executions"``: number of times circuit will execute when
calculating the derivative
Returns:
dict[str, Union[defaultdict,int]]: dictionaries that contain QNode specifications
"""<line_sep>initial_max_expansion=qnode.max_expansion<line_sep>qnode.max_expansion=max_expansion<try_stmt><block_start>qnode.construct(args kwargs)<block_end><finally_stmt><block_start>qnode.max_expansion=initial_max_expansion<block_end><if_stmt>isinstance(qnode qml.QNode)# TODO: remove when the old QNode is removed
<block_start><return>qnode.specs<block_end>info=qnode.qtape.specs.copy()<line_sep>info["num_device_wires"]=qnode.device.num_wires<line_sep>info["device_name"]=qnode.device.short_name<line_sep>info["expansion_strategy"]=qnode.expansion_strategy<line_sep>info["gradient_options"]=qnode.gradient_kwargs<line_sep>info["interface"]=qnode.interface<line_sep>info["diff_method"]=(_get_absolute_import_path(qnode.diff_method)<if>callable(qnode.diff_method)<else>qnode.diff_method)<if_stmt>isinstance(qnode.gradient_fn qml.gradients.gradient_transform)<block_start>info["gradient_fn"]=_get_absolute_import_path(qnode.gradient_fn)<try_stmt><block_start>info["num_gradient_executions"]=len(qnode.gradient_fn(qnode.qtape)[0])<block_end><except_stmt>Exception<as>e# pylint: disable=broad-except
# In the case of a broad exception, we don't want the `qml.specs` transform
# to fail. Instead, we simply indicate that the number of gradient executions
# is not supported for the reason specified.
<block_start>info["num_gradient_executions"]=f"NotSupported: {str(e)}"<block_end><block_end><else_stmt><block_start>info["gradient_fn"]=qnode.gradient_fn<block_end><return>info<block_end><return>specs_qnode<block_end> |
<import_from_stmt>django.utils.functional cached_property<import_from_stmt>redis.exceptions ConnectionError ResponseError<import_from_stmt>experiments.redis_client get_redis_client<line_sep>COUNTER_CACHE_KEY='experiments:participants:%s'<line_sep>COUNTER_FREQ_CACHE_KEY='experiments:freq:%s'<class_stmt>Counters(object)<block_start>@cached_property<def_stmt>_redis self<block_start><return>get_redis_client()<block_end><def_stmt>increment self key participant_identifier count=1<block_start><if_stmt>count<eq>0<block_start><return><block_end><try_stmt><block_start>cache_key=COUNTER_CACHE_KEY%key<line_sep>freq_cache_key=COUNTER_FREQ_CACHE_KEY%key<line_sep>new_value=self._redis.hincrby(cache_key participant_identifier count)<line_sep># Maintain histogram of per-user counts
<if_stmt>new_value<g>count<block_start>self._redis.hincrby(freq_cache_key new_value-count -1)<block_end>self._redis.hincrby(freq_cache_key new_value 1)<block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><pass><block_end><block_end><def_stmt>clear self key participant_identifier<block_start><try_stmt># Remove the direct entry
<block_start>cache_key=COUNTER_CACHE_KEY%key<line_sep>pipe=self._redis.pipeline()<line_sep>freq,_=pipe.hget(cache_key participant_identifier).hdel(cache_key participant_identifier).execute()<line_sep># Remove from the histogram
freq_cache_key=COUNTER_FREQ_CACHE_KEY%key<line_sep>self._redis.hincrby(freq_cache_key freq<or>0 -1)<block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><pass><block_end><block_end><def_stmt>get self key<block_start><try_stmt><block_start>cache_key=COUNTER_CACHE_KEY%key<line_sep><return>self._redis.hlen(cache_key)<block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><return>0<block_end><block_end><def_stmt>get_frequency self key participant_identifier<block_start><try_stmt><block_start>cache_key=COUNTER_CACHE_KEY%key<line_sep>freq=self._redis.hget(cache_key participant_identifier)<line_sep><return>int(freq)<if>freq<else>0<block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><return>0<block_end><block_end><def_stmt>get_frequencies self key<block_start><try_stmt><block_start>freq_cache_key=COUNTER_FREQ_CACHE_KEY%key<line_sep># In some cases when there are concurrent updates going on, there can
# briefly be a negative result for some frequency count. We discard these
# as they shouldn't really affect the result, and they are about to become
# zero anyway.
<return>dict((int(k) int(v))<for>(k v) self._redis.hgetall(freq_cache_key).items()<if>int(v)<g>0)<block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><return>dict()<block_end><block_end><def_stmt>reset self key<block_start><try_stmt><block_start>cache_key=COUNTER_CACHE_KEY%key<line_sep>self._redis.delete(cache_key)<line_sep>freq_cache_key=COUNTER_FREQ_CACHE_KEY%key<line_sep>self._redis.delete(freq_cache_key)<line_sep><return><true><block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><return><false><block_end><block_end><def_stmt>reset_pattern self pattern_key#similar to above, but can pass pattern as arg instead
<block_start><try_stmt><block_start>cache_key=COUNTER_CACHE_KEY%pattern_key<for_stmt>key self._redis.keys(cache_key)<block_start>self._redis.delete(key)<block_end>freq_cache_key=COUNTER_FREQ_CACHE_KEY%pattern_key<for_stmt>key self._redis.keys(freq_cache_key)<block_start>self._redis.delete(key)<block_end><return><true><block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><return><false><block_end><block_end><def_stmt>reset_prefix self key_prefix# Delete all data in redis for a given key prefix
<block_start><import_from_stmt>experiments.utils grouper<try_stmt><block_start><for_stmt>key_pattern [COUNTER_CACHE_KEY COUNTER_FREQ_CACHE_KEY]<block_start>match="%s:*"%(key_pattern%key_prefix)<line_sep>key_iter=self._redis.scan_iter(match)<line_sep># Delete keys in groups of 1000 to prevent problems with long
# running experiments having many participants
<for_stmt>keys grouper(key_iter 1000)# The last group will be padded with None to reach the specified batch
# size, so these are filtered out here
<block_start>self._redis.delete(*filter(<none> keys))<block_end><block_end><block_end><except_stmt>(ConnectionError ResponseError)# Handle Redis failures gracefully
<block_start><pass><block_end><block_end><block_end> |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/structured_output | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow.compat.v2<as>tf<import_from_stmt>tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model common<class_stmt>TestModule(tf.Module)# The fNNNN name prefixes in this file are such that the sorted order of the
# functions in the resulting MLIR output match the order in the source file,
# allowing us to conveniently co-locate the CHECK's with the code they are
# checking.
#
# Note: CHECK-DAG doesn't work with CHECK-SAME/CHECK-NEXT.
# Check index paths for results.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = []})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0000_single_return"]
<block_start>@tf.function(input_signature=[])<def_stmt>f0000_single_return self<block_start><return>tf.constant(1.0 shape=[1])<block_end># Check index paths for results with multiple return values.
# Note that semantically in Python, multiple return values are equivalent
# to returning a tuple/list.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0001_multiple_results_no_punctuation"]
@tf.function(input_signature=[])<def_stmt>f0001_multiple_results_no_punctuation self<block_start><return>tf.constant(1.0 shape=[1]) tf.constant(1.0 shape=[2])<block_end># Check index paths for results written explicitly with parentheses.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0002_multiple_results_parentheses"]
@tf.function(input_signature=[])<def_stmt>f0002_multiple_results_parentheses self<block_start><return>(tf.constant(1.0 shape=[1]) tf.constant(1.0 shape=[2]))<block_end># Check index paths for results written explicitly with brackets.
# This is semantically equivalent to the earlier test without parentheses,
# but this test serves as documentation of this behavior for the purposes
# of tf_saved_model users.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0003_multiple_results_brackets"]
@tf.function(input_signature=[])<def_stmt>f0003_multiple_results_brackets self<block_start><return>[tf.constant(1.0 shape=[1]) tf.constant(1.0 shape=[2])]<block_end># Check index paths for lists.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = [0, 0]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = [0, 1]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0004_list_2_elements"]
@tf.function(input_signature=[])<def_stmt>f0004_list_2_elements self<block_start><return>[[tf.constant(1.0 shape=[1]) tf.constant(1.0 shape=[2])]]<block_end># Check index paths for dicts.
# Keys are linearized in sorted order, matching `tf.nest.flatten`.
# More thorough testing of this is in structured_input.py. The underlying code
# path for linearization is shared, so no need to replicate that testing here.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}() -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]},
# CHECK-SAME: tensor<2xf32> {tf_saved_model.index_path = ["y"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0005_dict_2_keys"]
@tf.function(input_signature=[])<def_stmt>f0005_dict_2_keys self<block_start><return>{'x':tf.constant(1.0 shape=[1]) 'y':tf.constant(1.0 shape=[2]) }<block_end># Check index paths for outputs are correctly handled in the presence of
# multiple return statements.
#
# CHECK: func {{@[a-zA-Z_0-9]+}}(
# CHECK-SAME: %arg0: tensor<f32> {tf_saved_model.index_path = [0]}
# CHECK-SAME: ) -> (
# CHECK-SAME: tensor<1xf32> {tf_saved_model.index_path = ["x"]})
# CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["f0006_multiple_return_statements"]
@tf.function(input_signature=[tf.TensorSpec([] tf.float32)])<def_stmt>f0006_multiple_return_statements self x<block_start><if_stmt>x<g>3.<block_start><return>{'x':tf.constant(1.0 shape=[1])}<block_end><else_stmt><block_start><return>{'x':tf.constant(1.0 shape=[1])}<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>common.do_test(TestModule)<block_end> |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
<import_from_stmt>atom.api Bool Enum List Typed ForwardTyped observe<import_from_stmt>enaml.core.declarative d_<import_from_stmt>.action Action<import_from_stmt>.action_group ActionGroup<import_from_stmt>.constraints_widget ConstraintsWidget ProxyConstraintsWidget<class_stmt>ProxyToolBar(ProxyConstraintsWidget)<block_start>""" The abstract definition of a proxy ToolBar object.
"""<line_sep>#: A reference to the ToolBar declaration.
declaration=ForwardTyped(<lambda>:ToolBar)<def_stmt>set_button_style self style<block_start><raise>NotImplementedError<block_end><def_stmt>set_movable self movable<block_start><raise>NotImplementedError<block_end><def_stmt>set_floatable self floatable<block_start><raise>NotImplementedError<block_end><def_stmt>set_floating self floating<block_start><raise>NotImplementedError<block_end><def_stmt>set_dock_area self area<block_start><raise>NotImplementedError<block_end><def_stmt>set_allowed_dock_areas self areas<block_start><raise>NotImplementedError<block_end><def_stmt>set_orientation self orientation<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>ToolBar(ConstraintsWidget)<block_start>""" A widget which displays a row of tool buttons.
A ToolBar is typically used as a child of a MainWindow where it can
be dragged and docked in various locations in the same fashion as a
DockPane. However, a ToolBar can also be used as the child of a
Container and layed out with constraints, though in this case it will
lose its ability to be docked.
"""<line_sep>#: The button style to apply to actions added to the tool bar.
button_style=d_(Enum('icon_only' 'text_only' 'text_beside_icon' 'text_under_icon'))<line_sep>#: Whether or not the tool bar is movable by the user. This value
#: only has meaning if the tool bar is the child of a MainWindow.
movable=d_(Bool(<true>))<line_sep>#: Whether or not the tool bar can be floated as a separate window.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
floatable=d_(Bool(<true>))<line_sep>#: A boolean indicating whether or not the tool bar is floating.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
floating=d_(Bool(<false>))<line_sep>#: The dock area in the MainWindow where the tool bar is docked.
#: This value only has meaning if the tool bar is the child of a
#: MainWindow.
dock_area=d_(Enum('top' 'right' 'left' 'bottom'))<line_sep>#: The areas in the MainWindow where the tool bar can be docked
#: by the user. This value only has meaning if the tool bar is the
#: child of a MainWindow.
allowed_dock_areas=d_(List(Enum('top' 'right' 'left' 'bottom' 'all') ['all'] ))<line_sep>#: The orientation of the toolbar. This only has meaning when the
#: toolbar is not a child of a MainWindow and is used as part of
#: a constraints based layout.
orientation=d_(Enum('horizontal' 'vertical'))<line_sep>#: Whether or not to automatically adjust the 'hug_width' and
#: 'hug_height' values based on the value of 'orientation'.
auto_hug=d_(Bool(<true>))<line_sep>#: A reference to the ProxyToolBar object.
proxy=Typed(ProxyToolBar)<def_stmt>items self<block_start>""" Get the items defined on the tool bar.
"""<line_sep>allowed=(Action ActionGroup)<line_sep><return>[c<for>c self.children<if>isinstance(c allowed)]<block_end>#--------------------------------------------------------------------------
# Observers
#--------------------------------------------------------------------------
@observe('button_style' 'movable' 'floatable' 'floating' 'dock_area' 'allowed_dock_areas' 'orientation')<def_stmt>_update_proxy self change<block_start>""" An observer which sends state change to the proxy.
"""<line_sep># The superclass handler implementation is sufficient.
super(ToolBar self)._update_proxy(change)<block_end>#--------------------------------------------------------------------------
# DefaultValue Handlers
#--------------------------------------------------------------------------
<def_stmt>_default_hug_width self<block_start>""" Get the default hug width for the slider.
The default hug width is computed based on the orientation.
"""<if_stmt>self.orientation<eq>'horizontal'<block_start><return>'ignore'<block_end><return>'strong'<block_end><def_stmt>_default_hug_height self<block_start>""" Get the default hug height for the slider.
The default hug height is computed based on the orientation.
"""<if_stmt>self.orientation<eq>'vertical'<block_start><return>'ignore'<block_end><return>'strong'<block_end>#--------------------------------------------------------------------------
# PostSetAttr Handlers
#--------------------------------------------------------------------------
<def_stmt>_post_setattr_orientation self old new<block_start>""" Post setattr the orientation for the tool bar.
If auto hug is enabled, the hug values will be updated.
"""<if_stmt>self.auto_hug<block_start><if_stmt>new<eq>'vertical'<block_start>self.hug_width='strong'<line_sep>self.hug_height='ignore'<block_end><else_stmt><block_start>self.hug_width='ignore'<line_sep>self.hug_height='strong'<block_end><block_end><block_end><block_end> |
<def_stmt>f <block_start><def_stmt>g <block_start>__class__<block_end><block_end> |
<import_from_stmt>hyperparameter_hunter Environment CVExperiment<import_from_stmt>hyperparameter_hunter.utils.learning_utils get_toy_classification_data<import_from_stmt>xgboost XGBClassifier<def_stmt>execute <block_start>env=Environment(train_dataset=get_toy_classification_data() results_path="HyperparameterHunterAssets" metrics=["roc_auc_score"] cv_type="StratifiedKFold" cv_params=dict(n_splits=5 shuffle=<true> random_state=32) )<line_sep>experiment=CVExperiment(model_initializer=XGBClassifier model_init_params=dict(objective="reg:linear" max_depth=3 n_estimators=100 subsample=0.5) )<block_end><if_stmt>__name__<eq>"__main__"<block_start>execute()<block_end> |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The base class of all system node handlers."""<import_stmt>abc<import_from_stmt>tfx.orchestration metadata<import_from_stmt>tfx.proto.orchestration pipeline_pb2<import_from_stmt>ml_metadata.proto metadata_store_pb2<class_stmt>SystemNodeHandler(abc.ABC)<block_start>"""SystemNodeHandler is the base class of all system nodes' handler."""<line_sep>@abc.abstractmethod<def_stmt>run self mlmd_connection:metadata.Metadata pipeline_node:pipeline_pb2.PipelineNode pipeline_info:pipeline_pb2.PipelineInfo pipeline_runtime_spec:pipeline_pb2.PipelineRuntimeSpec<arrow>metadata_store_pb2.Execution<block_start>"""Runs the system node and return the Execution.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""<line_sep><pass><block_end><block_end> |
"""
This module provides a functions that, given an oracle function that returns
``True`` when a message is properly padded and ``False`` otherwise, will
decrypt or encrypt a given message assuming that the underlying cipher
operates in CBC mode.
"""<import_from_future_stmt> print_function division<import_stmt>functools<import_stmt>multiprocessing<import_stmt>threading<import_stmt>os<import_from_stmt>six.moves map range<line_sep>__all__=['padding_oracle_decrypt' 'padding_oracle_encrypt']<def_stmt>interruptable_iter event iterable<block_start><for_stmt>value iterable<block_start><yield>value<if_stmt>event.is_set()<block_start><break><block_end><block_end><block_end><def_stmt>consult_oracle oracle chunk block is_last_byte<block_start><if_stmt><not>oracle(bytes(chunk+block))<block_start><return><false><block_end><if_stmt>is_last_byte<block_start>chunk[-2]<augxor>0x01<if_stmt><not>oracle(bytes(chunk+block))<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>check_padding_decrypt event oracle block_len chunk block plain i j<block_start><if_stmt>event.is_set()<block_start><return><none><block_end>chunk,plain=chunk[:] plain[:]<line_sep>plain[i]=j<line_sep>chunk[i]<augxor>j<if_stmt>consult_oracle(oracle chunk block i<eq>block_len-1)<block_start>event.set()<line_sep><return>plain<block_end><block_end><def_stmt>decrypt_block oracle block_len alphabet pool progress params<block_start>start,prev,block,prefix,suffix,is_last_block=params<if_stmt>pool<is><not><none><block_start>event_factory=multiprocessing.Manager().Event<line_sep>map_func=pool.imap_unordered<block_end><else_stmt><block_start>event_factory=threading.Event<line_sep>map_func=map<block_end>plain=bytearray([0]<times>block_len)<for_stmt>i,j enumerate(prefix)<block_start>plain[i]=j<if_stmt>progress<is><not><none><block_start>progress(start+i j)<block_end><block_end><for_stmt>i,j enumerate(reversed(suffix))<block_start>plain[block_len-i-1]=j<if_stmt>progress<is><not><none><block_start>progress(start+block_len-i-1 j)<block_end><block_end>in_padding=is_last_block<and><not>suffix<line_sep>i=block_len-1-len(suffix)<while_stmt>i<ge>len(prefix)<block_start>chunk=prev[:]<for_stmt>k range(i block_len)<block_start>chunk[k]<augxor>plain[k]^(block_len-i)<block_end>event=event_factory()<line_sep>f=functools.partial(check_padding_decrypt event oracle block_len chunk block plain i)<if_stmt>in_padding<block_start>_alphabet=range(1 17)<block_end><else_stmt><block_start>_alphabet=alphabet<block_end><for_stmt>result map_func(f interruptable_iter(event _alphabet))<block_start><if_stmt>result<is><not><none><block_start>plain=result<block_end><block_end><if_stmt><not>event.is_set()<block_start><raise>RuntimeError('Oracle is unstable')<block_end><if_stmt>in_padding<block_start>in_padding=<false><line_sep>pad_value=plain[-1]<for_stmt>j range(block_len-pad_value i)<block_start>plain[j]=pad_value<if_stmt>progress<is><not><none><block_start>progress(start+j pad_value)<block_end><block_end>i<augsub>pad_value<block_end><else_stmt><block_start><if_stmt>progress<is><not><none><block_start>progress(start+i plain[i])<block_end>i<augsub>1<block_end><block_end><return>plain<block_end><def_stmt>block_pairs block_len data known_prefix known_suffix<block_start>data_len=len(data)<line_sep>suffix_len=len(known_suffix)<for_stmt>prev,start,suffix_start zip(range(data_len-block_len<times>2 -1 -block_len) range(data_len-block_len -1 -block_len) range(suffix_len-block_len -data_len-1 -block_len))<block_start><yield>(prev data[prev:start] data[start:start+block_len] known_prefix[prev:start] known_suffix[max(suffix_start 0):max(suffix_start+block_len 0)] start+block_len<eq>data_len)<block_end><block_end><def_stmt>padding_oracle_decrypt oracle ciphertext known_prefix=b'' known_suffix=b'' block_size=128 alphabet=<none> pool=<none> block_pool=<none> progress=<none><block_start>"""
Decrypt ciphertext using an oracle function that returns ``True`` if the
provided ciphertext is correctly PKCS#7 padded after decryption. The
cipher needs to operate in CBC mode.
Args:
oracle(callable): The oracle function. Will be called repeatedly with
a chunk of ciphertext.
ciphertext(bytes): The data to decrypt. Should include the IV at the
start.
known_prefix(bytes): If the start of the plaintext is known, it can be
provided to skip decrypting the known prefix.
known_suffix(bytes): If the end of the plaintext is known, it can be
provided to skip decrypting the known suffix. Should include
padding.
block_size(int): The cipher's block size in bits.
alphabet(bytes): Optimize decryption if you know which characters the
plaintext will consist of.
pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the decryption. This pool is used to call the oracle
function. Fairly heavy due to the required inter-process state
synchronization. If ``None`` (the default), no multiprocessing
will be used.
block_pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the decryption. This pool is used to decrypt entire
blocks in parallel. When decrypting ciphertext consisting of
multiple blocks, it is usually more efficient than using the
``pool`` argument. If ``None`` (the default), no multiprocessing
will be used.
progress(callable): A callable that will be called each time a new
byte is decrypted. Is called with the positition of the character
in the plaintext result and the character itself.
Returns:
bytes: The decrypted data with its PKCS#7 padding stripped.
Raises:
RuntimeError: Raised if the oracle behaves unpredictable.
Example:
>>> from pwny import *
>>> with multiprocessing.Pool(5) as pool:
>>> print(padding_oracle_decrypt(oracle_function, encrypted_data, pool=pool))
b'decrypted data'
"""<line_sep>block_len=block_size<floordiv>8<assert_stmt>len(ciphertext)%block_len<eq>0<and>len(ciphertext)<ge>2<times>block_len<line_sep>known_prefix=bytearray(known_prefix)<line_sep>known_suffix=bytearray(known_suffix)<if_stmt>alphabet<is><none><block_start>alphabet=bytearray(range(256))<block_end><if_stmt>block_pool<is><not><none><block_start>map_func=block_pool.imap<block_end><else_stmt><block_start>map_func=map<block_end>plaintext=bytearray()<line_sep>decrypt_func=functools.partial(decrypt_block oracle block_len alphabet pool progress)<for_stmt>plain map_func(decrypt_func block_pairs(block_len bytearray(ciphertext) known_prefix known_suffix))<block_start>plaintext[0:0]=plain<block_end><return>bytes(plaintext[:-plaintext[-1]])<block_end><def_stmt>check_padding_encrypt event oracle block_len chunk block i j<block_start>chunk=chunk[:]<line_sep>chunk[i]=j<if_stmt>consult_oracle(oracle chunk block i<eq>block_len-1)<block_start>event.set()<line_sep><return>chunk<block_end><block_end><def_stmt>encrypt_block oracle block_len block plain pool<block_start><if_stmt>pool<is><not><none><block_start>event_factory=multiprocessing.Manager().Event<line_sep>map_func=pool.imap_unordered<block_end><else_stmt><block_start>event_factory=threading.Event<line_sep>map_func=map<block_end>cipher=bytearray([0]<times>block_len)<for_stmt>i range(block_len-1 -1 -1)<block_start>chunk=cipher[:]<for_stmt>k range(i+1 block_len)<block_start>chunk[k]<augxor>block_len-i<block_end>event=event_factory()<line_sep>f=functools.partial(check_padding_encrypt event oracle block_len chunk block i)<for_stmt>result map_func(f interruptable_iter(event range(256)))<block_start><if_stmt>result<is><not><none><block_start>cipher[i]=result[i]^(block_len-i)<block_end><block_end><if_stmt><not>event.is_set()<block_start><raise>RuntimeError('Oracle is unstable')<block_end><block_end><for_stmt>k,p enumerate(plain)<block_start>cipher[k]<augxor>p<block_end><return>cipher<block_end><def_stmt>padding_oracle_encrypt oracle plaintext block_size=128 pool=<none><block_start>"""
Encrypt plaintext using an oracle function that returns ``True`` if the
provided ciphertext is correctly PKCS#7 padded after decryption. The
cipher needs to operate in CBC mode.
Args:
oracle(callable): The oracle function. Will be called repeatedly with
a chunk of ciphertext.
plaintext(bytes): The plaintext data to encrypt.
block_size(int): The cipher's block size in bits.
pool(multiprocessing.Pool): A multiprocessing pool to use to
parallelize the encryption. This pool is used to call the oracle
function. Fairly heavy due to the required inter-process state
synchronization. If ``None`` (the default), no multiprocessing
will be used.
Returns:
bytes: The encrypted data.
Raises:
RuntimeError: Raised if the oracle behaves unpredictable.
"""<line_sep>plaintext=bytearray(plaintext)<line_sep>block_len=block_size<floordiv>8<line_sep>padding_len=block_len-(len(plaintext)%block_len)<line_sep>plaintext.extend([padding_len]<times>padding_len)<line_sep>ciphertext=bytearray()<line_sep>chunk=bytearray(os.urandom(block_len))<line_sep>ciphertext[0:0]=chunk<for_stmt>plain_start range(len(plaintext)-block_len -1 -block_len)<block_start>plain=plaintext[plain_start:plain_start+block_len]<line_sep>chunk=ciphertext[0:0]=encrypt_block(oracle block_len chunk plain pool)<block_end><return>bytes(ciphertext)<block_end> |
<import_stmt>hydra<import_stmt>hydra.utils<as>utils<import_from_stmt>pathlib Path<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>tqdm tqdm<import_stmt>soundfile<as>sf<import_from_stmt>model_encoder Encoder Encoder_lf0<import_from_stmt>model_decoder Decoder_ac<import_from_stmt>model_encoder SpeakerEncoder<as>Encoder_spk<import_stmt>os<import_stmt>random<import_from_stmt>glob glob<import_stmt>subprocess<import_from_stmt>spectrogram logmelspectrogram<import_stmt>kaldiio<import_stmt>resampy<import_stmt>pyworld<as>pw<def_stmt>select_wavs paths min_dur=2 max_dur=8<block_start>pp=[]<for_stmt>p paths<block_start>x,fs=sf.read(p)<if_stmt>len(x)/fs<ge>min_dur<and>len(x)/fs<le>8<block_start>pp.append(p)<block_end><block_end><return>pp<block_end><def_stmt>extract_logmel wav_path mean std sr=16000# wav, fs = librosa.load(wav_path, sr=sr)
<block_start>wav,fs=sf.read(wav_path)<if_stmt>fs<ne>sr<block_start>wav=resampy.resample(wav fs sr axis=0)<line_sep>fs=sr<block_end>#wav, _ = librosa.effects.trim(wav, top_db=15)
# duration = len(wav)/fs
<assert_stmt>fs<eq>16000<line_sep>peak=np.abs(wav).max()<if_stmt>peak<g>1.0<block_start>wav<augdiv>peak<block_end>mel=logmelspectrogram(x=wav fs=fs n_mels=80 n_fft=400 n_shift=160 win_length=400 window='hann' fmin=80 fmax=7600 )<line_sep>mel=(mel-mean)/(std+1e-8)<line_sep>tlen=mel.shape[0]<line_sep>frame_period=160/fs<times>1000<line_sep>f0,timeaxis=pw.dio(wav.astype('float64') fs frame_period=frame_period)<line_sep>f0=pw.stonemask(wav.astype('float64') f0 timeaxis fs)<line_sep>f0=f0[:tlen].reshape(-1).astype('float32')<line_sep>nonzeros_indices=np.nonzero(f0)<line_sep>lf0=f0.copy()<line_sep>lf0[nonzeros_indices]=np.log(f0[nonzeros_indices])# for f0(Hz), lf0 > 0 when f0 != 0
mean,std=np.mean(lf0[nonzeros_indices]) np.std(lf0[nonzeros_indices])<line_sep>lf0[nonzeros_indices]=(lf0[nonzeros_indices]-mean)/(std+1e-8)<line_sep><return>mel lf0<block_end>@hydra.main(config_path="config/convert.yaml")<def_stmt>convert cfg<block_start>src_wav_paths=glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p225/*mic1.flac')# modified to absolute wavs path, can select any unseen speakers
src_wav_paths=select_wavs(src_wav_paths)<line_sep>tar1_wav_paths=glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p231/*mic1.flac')# can select any unseen speakers
tar2_wav_paths=glob('/Dataset/VCTK-Corpus/wav48_silence_trimmed/p243/*mic1.flac')# can select any unseen speakers
# tar1_wav_paths = select_wavs(tar1_wav_paths)
# tar2_wav_paths = select_wavs(tar2_wav_paths)
tar1_wav_paths=[sorted(tar1_wav_paths)[0]]<line_sep>tar2_wav_paths=[sorted(tar2_wav_paths)[0]]<line_sep>print('len(src):' len(src_wav_paths) 'len(tar1):' len(tar1_wav_paths) 'len(tar2):' len(tar2_wav_paths))<line_sep>tmp=cfg.checkpoint.split('/')<line_sep>steps=tmp[-1].split('-')[-1].split('.')[0]<line_sep>out_dir=f'test/{tmp[-3]}-{tmp[-2]}-{steps}'<line_sep>out_dir=Path(utils.to_absolute_path(out_dir))<line_sep>out_dir.mkdir(exist_ok=<true> parents=<true>)<line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>encoder=Encoder(**cfg.model.encoder)<line_sep>encoder_lf0=Encoder_lf0()<line_sep>encoder_spk=Encoder_spk()<line_sep>decoder=Decoder_ac(dim_neck=64)<line_sep>encoder.to(device)<line_sep>encoder_lf0.to(device)<line_sep>encoder_spk.to(device)<line_sep>decoder.to(device)<line_sep>print("Load checkpoint from: {}:".format(cfg.checkpoint))<line_sep>checkpoint_path=utils.to_absolute_path(cfg.checkpoint)<line_sep>checkpoint=torch.load(checkpoint_path map_location=<lambda>storage loc:storage)<line_sep>encoder.load_state_dict(checkpoint["encoder"])<line_sep>encoder_spk.load_state_dict(checkpoint["encoder_spk"])<line_sep>decoder.load_state_dict(checkpoint["decoder"])<line_sep>encoder.eval()<line_sep>encoder_spk.eval()<line_sep>decoder.eval()<line_sep>mel_stats=np.load('./data/mel_stats.npy')<line_sep>mean=mel_stats[0]<line_sep>std=mel_stats[1]<line_sep>feat_writer=kaldiio.WriteHelper("ark,scp:{o}.ark,{o}.scp".format(o=str(out_dir)+'/feats.1'))<for_stmt>i,src_wav_path tqdm(enumerate(src_wav_paths 1))<block_start><if_stmt>i<g>10<block_start><break><block_end>mel,lf0=extract_logmel(src_wav_path mean std)<if_stmt>i%2<eq>1<block_start>ref_wav_path=random.choice(tar2_wav_paths)<line_sep>tar='tarMale_'<block_end><else_stmt><block_start>ref_wav_path=random.choice(tar1_wav_paths)<line_sep>tar='tarFemale_'<block_end>ref_mel,_=extract_logmel(ref_wav_path mean std)<line_sep>mel=torch.FloatTensor(mel.T).unsqueeze(0).to(device)<line_sep>lf0=torch.FloatTensor(lf0).unsqueeze(0).to(device)<line_sep>ref_mel=torch.FloatTensor(ref_mel.T).unsqueeze(0).to(device)<line_sep>out_filename=os.path.basename(src_wav_path).split('.')[0]<with_stmt>torch.no_grad()<block_start>z,_,_,_=encoder.encode(mel)<line_sep>lf0_embs=encoder_lf0(lf0)<line_sep>spk_embs=encoder_spk(ref_mel)<line_sep>output=decoder(z lf0_embs spk_embs)<line_sep>logmel=output.squeeze(0).cpu().numpy()<line_sep>feat_writer[out_filename]=logmel<line_sep>feat_writer[out_filename+'_src']=mel.squeeze(0).cpu().numpy().T<line_sep>feat_writer[out_filename+'_ref']=ref_mel.squeeze(0).cpu().numpy().T<block_end>subprocess.call(['cp' src_wav_path out_dir])<block_end>feat_writer.close()<line_sep>print('synthesize waveform...')<line_sep>cmd=['parallel-wavegan-decode' '--checkpoint' '/vocoder/checkpoint-3000000steps.pkl' '--feats-scp' f'{str(out_dir)}/feats.1.scp' '--outdir' str(out_dir)]<line_sep>subprocess.call(cmd)<block_end><if_stmt>__name__<eq>"__main__"<block_start>convert()<block_end> |
<class_stmt>ListView(Control IComponent IDisposable IOleControl IOleObject IOleInPlaceObject IOleInPlaceActiveObject IOleWindow IViewObject IViewObject2 IPersist IPersistStreamInit IPersistPropertyBag IPersistStorage IQuickActivate ISupportOleDropSource IDropTarget ISynchronizeInvoke IWin32Window IArrangedElement IBindableComponent)<block_start>"""
Represents a Windows list view control,which displays a collection of items that can be displayed using one of four different views.
ListView()
"""<def_stmt>AccessibilityNotifyClients self *args<block_start>"""
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,objectID: int,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control .
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
objectID: The identifier of the System.Windows.Forms.AccessibleObject.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
AccessibilityNotifyClients(self: Control,accEvent: AccessibleEvents,childID: int)
Notifies the accessibility client applications of the specified
System.Windows.Forms.AccessibleEvents for the specified child control.
accEvent: The System.Windows.Forms.AccessibleEvents to notify the accessibility client applications of.
childID: The child System.Windows.Forms.Control to notify of the accessible event.
"""<line_sep><pass><block_end><def_stmt>ArrangeIcons self value=<none><block_start>"""
ArrangeIcons(self: ListView)
Arranges items in the control when they are displayed as icons based on the value of the
System.Windows.Forms.ListView.Alignment property.
ArrangeIcons(self: ListView,value: ListViewAlignment)
Arranges items in the control when they are displayed as icons with a specified alignment
setting.
value: One of the System.Windows.Forms.ListViewAlignment values.
"""<line_sep><pass><block_end><def_stmt>AutoResizeColumn self columnIndex headerAutoResize<block_start>"""
AutoResizeColumn(self: ListView,columnIndex: int,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the given column as indicated by the resize style.
columnIndex: The zero-based index of the column to resize.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""<line_sep><pass><block_end><def_stmt>AutoResizeColumns self headerAutoResize<block_start>"""
AutoResizeColumns(self: ListView,headerAutoResize: ColumnHeaderAutoResizeStyle)
Resizes the width of the columns as indicated by the resize style.
headerAutoResize: One of the System.Windows.Forms.ColumnHeaderAutoResizeStyle values.
"""<line_sep><pass><block_end><def_stmt>BeginUpdate self<block_start>"""
BeginUpdate(self: ListView)
Prevents the control from drawing until the System.Windows.Forms.ListView.EndUpdate method is
called.
"""<line_sep><pass><block_end><def_stmt>Clear self<block_start>"""
Clear(self: ListView)
Removes all items and columns from the control.
"""<line_sep><pass><block_end><def_stmt>CreateAccessibilityInstance self *args<block_start>"""
CreateAccessibilityInstance(self: Control) -> AccessibleObject
Creates a new accessibility object for the control.
Returns: A new System.Windows.Forms.AccessibleObject for the control.
"""<line_sep><pass><block_end><def_stmt>CreateControlsInstance self *args<block_start>"""
CreateControlsInstance(self: Control) -> ControlCollection
Creates a new instance of the control collection for the control.
Returns: A new instance of System.Windows.Forms.Control.ControlCollection assigned to the control.
"""<line_sep><pass><block_end><def_stmt>CreateHandle self *args<block_start>""" CreateHandle(self: ListView) """<line_sep><pass><block_end><def_stmt>DefWndProc self *args<block_start>"""
DefWndProc(self: Control,m: Message) -> Message
Sends the specified message to the default window procedure.
m: The Windows System.Windows.Forms.Message to process.
"""<line_sep><pass><block_end><def_stmt>DestroyHandle self *args<block_start>"""
DestroyHandle(self: Control)
Destroys the handle associated with the control.
"""<line_sep><pass><block_end><def_stmt>Dispose self<block_start>"""
Dispose(self: ListView,disposing: bool)
Releases the unmanaged resources used by the System.Windows.Forms.ListView and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""<line_sep><pass><block_end><def_stmt>EndUpdate self<block_start>"""
EndUpdate(self: ListView)
Resumes drawing of the list view control after drawing is suspended by the
System.Windows.Forms.ListView.BeginUpdate method.
"""<line_sep><pass><block_end><def_stmt>EnsureVisible self index<block_start>"""
EnsureVisible(self: ListView,index: int)
Ensures that the specified item is visible within the control,scrolling the contents of the
control if necessary.
index: The zero-based index of the item to scroll into view.
"""<line_sep><pass><block_end><def_stmt>FindItemWithText self text includeSubItemsInSearch=<none> startIndex=<none> isPrefixSearch=<none><block_start>"""
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int,isPrefixSearch: bool) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
isPrefixSearch: true to allow partial matches; otherwise,false.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str,includeSubItemsInSearch: bool,startIndex: int) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem or
System.Windows.Forms.ListViewItem.ListViewSubItem,if indicated,that begins with the specified
text value. The search starts at the specified index.
text: The text to search for.
includeSubItemsInSearch: true to include subitems in the search; otherwise,false.
startIndex: The index of the item at which to start the search.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
FindItemWithText(self: ListView,text: str) -> ListViewItem
Finds the first System.Windows.Forms.ListViewItem that begins with the specified text value.
text: The text to search for.
Returns: The first System.Windows.Forms.ListViewItem that begins with the specified text value.
"""<line_sep><pass><block_end><def_stmt>FindNearestItem self *__args<block_start>"""
FindNearestItem(self: ListView,searchDirection: SearchDirectionHint,x: int,y: int) -> ListViewItem
Finds the next item from the given x- and y-coordinates,searching in the specified direction.
searchDirection: One of the System.Windows.Forms.SearchDirectionHint values.
x: The x-coordinate for the point at which to begin searching.
y: The y-coordinate for the point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given coordinates,searching in the
specified direction.
FindNearestItem(self: ListView,dir: SearchDirectionHint,point: Point) -> ListViewItem
Finds the next item from the given point,searching in the specified direction
dir: One of the System.Windows.Forms.SearchDirectionHint values.
point: The point at which to begin searching.
Returns: The System.Windows.Forms.ListViewItem that is closest to the given point,searching in the
specified direction.
"""<line_sep><pass><block_end><def_stmt>GetAccessibilityObjectById self *args<block_start>"""
GetAccessibilityObjectById(self: Control,objectId: int) -> AccessibleObject
Retrieves the specified System.Windows.Forms.AccessibleObject.
objectId: An Int32 that identifies the System.Windows.Forms.AccessibleObject to retrieve.
Returns: An System.Windows.Forms.AccessibleObject.
"""<line_sep><pass><block_end><def_stmt>GetAutoSizeMode self *args<block_start>"""
GetAutoSizeMode(self: Control) -> AutoSizeMode
Retrieves a value indicating how a control will behave when its
System.Windows.Forms.Control.AutoSize property is enabled.
Returns: One of the System.Windows.Forms.AutoSizeMode values.
"""<line_sep><pass><block_end><def_stmt>GetItemAt self x y<block_start>"""
GetItemAt(self: ListView,x: int,y: int) -> ListViewItem
Retrieves the item at the specified location.
x: The x-coordinate of the location to search for an item (expressed in client coordinates).
y: The y-coordinate of the location to search for an item (expressed in client coordinates).
Returns: A System.Windows.Forms.ListViewItem that represents the item at the specified position. If there
is no item at the specified location,the method returns null.
"""<line_sep><pass><block_end><def_stmt>GetItemRect self index portion=<none><block_start>"""
GetItemRect(self: ListView,index: int,portion: ItemBoundsPortion) -> Rectangle
Retrieves the specified portion of the bounding rectangle for a specific item within the list
view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
portion: One of the System.Windows.Forms.ItemBoundsPortion values that represents a portion of the
System.Windows.Forms.ListViewItem for which to retrieve the bounding rectangle.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle for the specified portion of
the specified System.Windows.Forms.ListViewItem.
GetItemRect(self: ListView,index: int) -> Rectangle
Retrieves the bounding rectangle for a specific item within the list view control.
index: The zero-based index of the item within the System.Windows.Forms.ListView.ListViewItemCollection
whose bounding rectangle you want to return.
Returns: A System.Drawing.Rectangle that represents the bounding rectangle of the specified
System.Windows.Forms.ListViewItem.
"""<line_sep><pass><block_end><def_stmt>GetScaledBounds self *args<block_start>"""
GetScaledBounds(self: Control,bounds: Rectangle,factor: SizeF,specified: BoundsSpecified) -> Rectangle
Retrieves the bounds within which the control is scaled.
bounds: A System.Drawing.Rectangle that specifies the area for which to retrieve the display bounds.
factor: The height and width of the control's bounds.
specified: One of the values of System.Windows.Forms.BoundsSpecified that specifies the bounds of the
control to use when defining its size and position.
Returns: A System.Drawing.Rectangle representing the bounds within which the control is scaled.
"""<line_sep><pass><block_end><def_stmt>GetService self *args<block_start>"""
GetService(self: Component,service: Type) -> object
Returns an object that represents a service provided by the System.ComponentModel.Component or
by its System.ComponentModel.Container.
service: A service provided by the System.ComponentModel.Component.
Returns: An System.Object that represents a service provided by the System.ComponentModel.Component,or
null if the System.ComponentModel.Component does not provide the specified service.
"""<line_sep><pass><block_end><def_stmt>GetStyle self *args<block_start>"""
GetStyle(self: Control,flag: ControlStyles) -> bool
Retrieves the value of the specified control style bit for the control.
flag: The System.Windows.Forms.ControlStyles bit to return the value from.
Returns: true if the specified control style bit is set to true; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>GetTopLevel self *args<block_start>"""
GetTopLevel(self: Control) -> bool
Determines if the control is a top-level control.
Returns: true if the control is a top-level control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>HitTest self *__args<block_start>"""
HitTest(self: ListView,x: int,y: int) -> ListViewHitTestInfo
Provides item information,given x- and y-coordinates.
x: The x-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
y: The y-coordinate at which to retrieve the item information. The coordinate is relative to the
upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
HitTest(self: ListView,point: Point) -> ListViewHitTestInfo
Provides item information,given a point.
point: The System.Drawing.Point at which to retrieve the item information. The coordinates are relative
to the upper-left corner of the control.
Returns: A System.Windows.Forms.ListViewHitTestInfo.
"""<line_sep><pass><block_end><def_stmt>InitLayout self *args<block_start>"""
InitLayout(self: Control)
Called after the control has been added to another container.
"""<line_sep><pass><block_end><def_stmt>InvokeGotFocus self *args<block_start>"""
InvokeGotFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>InvokeLostFocus self *args<block_start>"""
InvokeLostFocus(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the event to.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>InvokeOnClick self *args<block_start>"""
InvokeOnClick(self: Control,toInvoke: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event for the specified control.
toInvoke: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Click event to.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>InvokePaint self *args<block_start>"""
InvokePaint(self: Control,c: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>InvokePaintBackground self *args<block_start>"""
InvokePaintBackground(self: Control,c: Control,e: PaintEventArgs)
Raises the PaintBackground event for the specified control.
c: The System.Windows.Forms.Control to assign the System.Windows.Forms.Control.Paint event to.
e: An System.Windows.Forms.PaintEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>IsInputChar self *args<block_start>"""
IsInputChar(self: Control,charCode: Char) -> bool
Determines if a character is an input character that the control recognizes.
charCode: The character to test.
Returns: true if the character should be sent directly to the control and not preprocessed; otherwise,
false.
"""<line_sep><pass><block_end><def_stmt>IsInputKey self *args<block_start>"""
IsInputKey(self: ListView,keyData: Keys) -> bool
keyData: One of the System.Windows.Forms.Keys values.
Returns: true if the specified key is a regular input key; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>MemberwiseClone self *args<block_start>"""
MemberwiseClone(self: MarshalByRefObject,cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity,which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone,which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""<line_sep><pass><block_end><def_stmt>NotifyInvalidate self *args<block_start>"""
NotifyInvalidate(self: Control,invalidatedArea: Rectangle)
Raises the System.Windows.Forms.Control.Invalidated event with a specified region of the control
to invalidate.
invalidatedArea: A System.Drawing.Rectangle representing the area to invalidate.
"""<line_sep><pass><block_end><def_stmt>OnAfterLabelEdit self *args<block_start>"""
OnAfterLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.AfterLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnAutoSizeChanged self *args<block_start>"""
OnAutoSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.AutoSizeChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnBackColorChanged self *args<block_start>"""
OnBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnBackgroundImageChanged self *args<block_start>"""
OnBackgroundImageChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnBackgroundImageLayoutChanged self *args<block_start>"""
OnBackgroundImageLayoutChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnBeforeLabelEdit self *args<block_start>"""
OnBeforeLabelEdit(self: ListView,e: LabelEditEventArgs)
Raises the System.Windows.Forms.ListView.BeforeLabelEdit event.
e: A System.Windows.Forms.LabelEditEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnBindingContextChanged self *args<block_start>"""
OnBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnCacheVirtualItems self *args<block_start>"""
OnCacheVirtualItems(self: ListView,e: CacheVirtualItemsEventArgs)
Raises the System.Windows.Forms.ListView.CacheVirtualItems event.
e: A System.Windows.Forms.CacheVirtualItemsEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnCausesValidationChanged self *args<block_start>"""
OnCausesValidationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CausesValidationChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnChangeUICues self *args<block_start>"""
OnChangeUICues(self: Control,e: UICuesEventArgs)
Raises the System.Windows.Forms.Control.ChangeUICues event.
e: A System.Windows.Forms.UICuesEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnClick self *args<block_start>"""
OnClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Click event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnClientSizeChanged self *args<block_start>"""
OnClientSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ClientSizeChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnColumnClick self *args<block_start>"""
OnColumnClick(self: ListView,e: ColumnClickEventArgs)
Raises the System.Windows.Forms.ListView.ColumnClick event.
e: A System.Windows.Forms.ColumnClickEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnColumnReordered self *args<block_start>"""
OnColumnReordered(self: ListView,e: ColumnReorderedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnReordered event.
e: The System.Windows.Forms.ColumnReorderedEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnColumnWidthChanged self *args<block_start>"""
OnColumnWidthChanged(self: ListView,e: ColumnWidthChangedEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanged event.
e: A System.Windows.Forms.ColumnWidthChangedEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnColumnWidthChanging self *args<block_start>"""
OnColumnWidthChanging(self: ListView,e: ColumnWidthChangingEventArgs)
Raises the System.Windows.Forms.ListView.ColumnWidthChanging event.
e: A System.Windows.Forms.ColumnWidthChangingEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnContextMenuChanged self *args<block_start>"""
OnContextMenuChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnContextMenuStripChanged self *args<block_start>"""
OnContextMenuStripChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ContextMenuStripChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnControlAdded self *args<block_start>"""
OnControlAdded(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlAdded event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnControlRemoved self *args<block_start>"""
OnControlRemoved(self: Control,e: ControlEventArgs)
Raises the System.Windows.Forms.Control.ControlRemoved event.
e: A System.Windows.Forms.ControlEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnCreateControl self *args<block_start>"""
OnCreateControl(self: Control)
Raises the System.Windows.Forms.Control.CreateControl method.
"""<line_sep><pass><block_end><def_stmt>OnCursorChanged self *args<block_start>"""
OnCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDockChanged self *args<block_start>"""
OnDockChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DockChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDoubleClick self *args<block_start>"""
OnDoubleClick(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DoubleClick event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDpiChangedAfterParent self *args<block_start>""" OnDpiChangedAfterParent(self: Control,e: EventArgs) """<line_sep><pass><block_end><def_stmt>OnDpiChangedBeforeParent self *args<block_start>""" OnDpiChangedBeforeParent(self: Control,e: EventArgs) """<line_sep><pass><block_end><def_stmt>OnDragDrop self *args<block_start>"""
OnDragDrop(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragDrop event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDragEnter self *args<block_start>"""
OnDragEnter(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragEnter event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDragLeave self *args<block_start>"""
OnDragLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.DragLeave event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDragOver self *args<block_start>"""
OnDragOver(self: Control,drgevent: DragEventArgs)
Raises the System.Windows.Forms.Control.DragOver event.
drgevent: A System.Windows.Forms.DragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDrawColumnHeader self *args<block_start>"""
OnDrawColumnHeader(self: ListView,e: DrawListViewColumnHeaderEventArgs)
Raises the System.Windows.Forms.ListView.DrawColumnHeader event.
e: A System.Windows.Forms.DrawListViewColumnHeaderEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDrawItem self *args<block_start>"""
OnDrawItem(self: ListView,e: DrawListViewItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawItem event.
e: A System.Windows.Forms.DrawListViewItemEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnDrawSubItem self *args<block_start>"""
OnDrawSubItem(self: ListView,e: DrawListViewSubItemEventArgs)
Raises the System.Windows.Forms.ListView.DrawSubItem event.
e: A System.Windows.Forms.DrawListViewSubItemEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnEnabledChanged self *args<block_start>"""
OnEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnEnter self *args<block_start>"""
OnEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Enter event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnFontChanged self *args<block_start>"""
OnFontChanged(self: ListView,e: EventArgs)
Raises the FontChanged event.
e: The System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnForeColorChanged self *args<block_start>"""
OnForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnGiveFeedback self *args<block_start>"""
OnGiveFeedback(self: Control,gfbevent: GiveFeedbackEventArgs)
Raises the System.Windows.Forms.Control.GiveFeedback event.
gfbevent: A System.Windows.Forms.GiveFeedbackEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnGotFocus self *args<block_start>"""
OnGotFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.GotFocus event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnHandleCreated self *args<block_start>"""
OnHandleCreated(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnHandleDestroyed self *args<block_start>"""
OnHandleDestroyed(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnHelpRequested self *args<block_start>"""
OnHelpRequested(self: Control,hevent: HelpEventArgs)
Raises the System.Windows.Forms.Control.HelpRequested event.
hevent: A System.Windows.Forms.HelpEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnImeModeChanged self *args<block_start>"""
OnImeModeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ImeModeChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnInvalidated self *args<block_start>"""
OnInvalidated(self: Control,e: InvalidateEventArgs)
Raises the System.Windows.Forms.Control.Invalidated event.
e: An System.Windows.Forms.InvalidateEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemActivate self *args<block_start>"""
OnItemActivate(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.ItemActivate event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemCheck self *args<block_start>"""
OnItemCheck(self: ListView,ice: ItemCheckEventArgs)
Raises the System.Windows.Forms.ListView.ItemCheck event.
ice: An System.Windows.Forms.ItemCheckEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemChecked self *args<block_start>"""
OnItemChecked(self: ListView,e: ItemCheckedEventArgs)
Raises the System.Windows.Forms.ListView.ItemChecked event.
e: An System.Windows.Forms.ItemCheckedEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemDrag self *args<block_start>"""
OnItemDrag(self: ListView,e: ItemDragEventArgs)
Raises the System.Windows.Forms.ListView.ItemDrag event.
e: An System.Windows.Forms.ItemDragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemMouseHover self *args<block_start>"""
OnItemMouseHover(self: ListView,e: ListViewItemMouseHoverEventArgs)
Raises the System.Windows.Forms.ListView.ItemMouseHover event.
e: A System.Windows.Forms.ListViewItemMouseHoverEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnItemSelectionChanged self *args<block_start>"""
OnItemSelectionChanged(self: ListView,e: ListViewItemSelectionChangedEventArgs)
Raises the System.Windows.Forms.ListView.ItemSelectionChanged event.
e: A System.Windows.Forms.ListViewItemSelectionChangedEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnKeyDown self *args<block_start>"""
OnKeyDown(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyDown event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnKeyPress self *args<block_start>"""
OnKeyPress(self: Control,e: KeyPressEventArgs)
Raises the System.Windows.Forms.Control.KeyPress event.
e: A System.Windows.Forms.KeyPressEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnKeyUp self *args<block_start>"""
OnKeyUp(self: Control,e: KeyEventArgs)
Raises the System.Windows.Forms.Control.KeyUp event.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnLayout self *args<block_start>"""
OnLayout(self: Control,levent: LayoutEventArgs)
Raises the System.Windows.Forms.Control.Layout event.
levent: A System.Windows.Forms.LayoutEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnLeave self *args<block_start>"""
OnLeave(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Leave event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnLocationChanged self *args<block_start>"""
OnLocationChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LocationChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnLostFocus self *args<block_start>"""
OnLostFocus(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.LostFocus event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMarginChanged self *args<block_start>"""
OnMarginChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MarginChanged event.
e: A System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseCaptureChanged self *args<block_start>"""
OnMouseCaptureChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseCaptureChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseClick self *args<block_start>"""
OnMouseClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseDoubleClick self *args<block_start>"""
OnMouseDoubleClick(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDoubleClick event.
e: An System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseDown self *args<block_start>"""
OnMouseDown(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseDown event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseEnter self *args<block_start>"""
OnMouseEnter(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseEnter event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseHover self *args<block_start>"""
OnMouseHover(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.Control.MouseHover event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseLeave self *args<block_start>"""
OnMouseLeave(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseMove self *args<block_start>"""
OnMouseMove(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseMove event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseUp self *args<block_start>"""
OnMouseUp(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseUp event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMouseWheel self *args<block_start>"""
OnMouseWheel(self: Control,e: MouseEventArgs)
Raises the System.Windows.Forms.Control.MouseWheel event.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnMove self *args<block_start>"""
OnMove(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Move event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnNotifyMessage self *args<block_start>"""
OnNotifyMessage(self: Control,m: Message)
Notifies the control of Windows messages.
m: A System.Windows.Forms.Message that represents the Windows message.
"""<line_sep><pass><block_end><def_stmt>OnPaddingChanged self *args<block_start>"""
OnPaddingChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.PaddingChanged event.
e: A System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnPaint self *args<block_start>"""
OnPaint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnPaintBackground self *args<block_start>"""
OnPaintBackground(self: Control,pevent: PaintEventArgs)
Paints the background of the control.
pevent: A System.Windows.Forms.PaintEventArgs that contains information about the control to paint.
"""<line_sep><pass><block_end><def_stmt>OnParentBackColorChanged self *args<block_start>"""
OnParentBackColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackColorChanged event when the
System.Windows.Forms.Control.BackColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentBackgroundImageChanged self *args<block_start>"""
OnParentBackgroundImageChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BackgroundImageChanged event when the
System.Windows.Forms.Control.BackgroundImage property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentBindingContextChanged self *args<block_start>"""
OnParentBindingContextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.BindingContextChanged event when the
System.Windows.Forms.Control.BindingContext property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentChanged self *args<block_start>"""
OnParentChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentCursorChanged self *args<block_start>"""
OnParentCursorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.CursorChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentEnabledChanged self *args<block_start>"""
OnParentEnabledChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.EnabledChanged event when the
System.Windows.Forms.Control.Enabled property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentFontChanged self *args<block_start>"""
OnParentFontChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.FontChanged event when the
System.Windows.Forms.Control.Font property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentForeColorChanged self *args<block_start>"""
OnParentForeColorChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.ForeColorChanged event when the
System.Windows.Forms.Control.ForeColor property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentRightToLeftChanged self *args<block_start>"""
OnParentRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event when the
System.Windows.Forms.Control.RightToLeft property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnParentVisibleChanged self *args<block_start>"""
OnParentVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event when the
System.Windows.Forms.Control.Visible property value of the control's container changes.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnPreviewKeyDown self *args<block_start>"""
OnPreviewKeyDown(self: Control,e: PreviewKeyDownEventArgs)
Raises the System.Windows.Forms.Control.PreviewKeyDown event.
e: A System.Windows.Forms.PreviewKeyDownEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnPrint self *args<block_start>"""
OnPrint(self: Control,e: PaintEventArgs)
Raises the System.Windows.Forms.Control.Paint event.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnQueryContinueDrag self *args<block_start>"""
OnQueryContinueDrag(self: Control,qcdevent: QueryContinueDragEventArgs)
Raises the System.Windows.Forms.Control.QueryContinueDrag event.
qcdevent: A System.Windows.Forms.QueryContinueDragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnRegionChanged self *args<block_start>"""
OnRegionChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RegionChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnResize self *args<block_start>"""
OnResize(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnRetrieveVirtualItem self *args<block_start>"""
OnRetrieveVirtualItem(self: ListView,e: RetrieveVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.RetrieveVirtualItem event.
e: A System.Windows.Forms.RetrieveVirtualItemEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnRightToLeftChanged self *args<block_start>"""
OnRightToLeftChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.RightToLeftChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnRightToLeftLayoutChanged self *args<block_start>"""
OnRightToLeftLayoutChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.RightToLeftLayoutChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnSearchForVirtualItem self *args<block_start>"""
OnSearchForVirtualItem(self: ListView,e: SearchForVirtualItemEventArgs)
Raises the System.Windows.Forms.ListView.SearchForVirtualItem event.
e: A System.Windows.Forms.SearchForVirtualItemEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnSelectedIndexChanged self *args<block_start>"""
OnSelectedIndexChanged(self: ListView,e: EventArgs)
Raises the System.Windows.Forms.ListView.SelectedIndexChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnSizeChanged self *args<block_start>"""
OnSizeChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.SizeChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnStyleChanged self *args<block_start>"""
OnStyleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.StyleChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnSystemColorsChanged self *args<block_start>"""
OnSystemColorsChanged(self: ListView,e: EventArgs)
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnTabIndexChanged self *args<block_start>"""
OnTabIndexChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabIndexChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnTabStopChanged self *args<block_start>"""
OnTabStopChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TabStopChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnTextChanged self *args<block_start>"""
OnTextChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.TextChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnValidated self *args<block_start>"""
OnValidated(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.Validated event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnValidating self *args<block_start>"""
OnValidating(self: Control,e: CancelEventArgs)
Raises the System.Windows.Forms.Control.Validating event.
e: A System.ComponentModel.CancelEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>OnVirtualItemsSelectionRangeChanged self *args<block_start>"""
OnVirtualItemsSelectionRangeChanged(self: ListView,e: ListViewVirtualItemsSelectionRangeChangedEventArgs)
Raises the System.Windows.Forms.ListView.VirtualItemsSelectionRangeChanged event.
e: A System.Windows.Forms.ListViewVirtualItemsSelectionRangeChangedEventArgs that contains the
event data.
"""<line_sep><pass><block_end><def_stmt>OnVisibleChanged self *args<block_start>"""
OnVisibleChanged(self: Control,e: EventArgs)
Raises the System.Windows.Forms.Control.VisibleChanged event.
e: An System.EventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>ProcessCmdKey self *args<block_start>"""
ProcessCmdKey(self: Control,msg: Message,keyData: Keys) -> (bool,Message)
Processes a command key.
msg: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the character was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessDialogChar self *args<block_start>"""
ProcessDialogChar(self: Control,charCode: Char) -> bool
Processes a dialog character.
charCode: The character to process.
Returns: true if the character was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessDialogKey self *args<block_start>"""
ProcessDialogKey(self: Control,keyData: Keys) -> bool
Processes a dialog key.
keyData: One of the System.Windows.Forms.Keys values that represents the key to process.
Returns: true if the key was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessKeyEventArgs self *args<block_start>"""
ProcessKeyEventArgs(self: Control,m: Message) -> (bool,Message)
Processes a key message and generates the appropriate control events.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessKeyMessage self *args<block_start>"""
ProcessKeyMessage(self: Control,m: Message) -> (bool,Message)
Processes a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessKeyPreview self *args<block_start>"""
ProcessKeyPreview(self: Control,m: Message) -> (bool,Message)
Previews a keyboard message.
m: A System.Windows.Forms.Message,passed by reference,that represents the window message to
process.
Returns: true if the message was processed by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>ProcessMnemonic self *args<block_start>"""
ProcessMnemonic(self: Control,charCode: Char) -> bool
Processes a mnemonic character.
charCode: The character to process.
Returns: true if the character was processed as a mnemonic by the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>RaiseDragEvent self *args<block_start>"""
RaiseDragEvent(self: Control,key: object,e: DragEventArgs)
Raises the appropriate drag event.
key: The event to raise.
e: A System.Windows.Forms.DragEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>RaiseKeyEvent self *args<block_start>"""
RaiseKeyEvent(self: Control,key: object,e: KeyEventArgs)
Raises the appropriate key event.
key: The event to raise.
e: A System.Windows.Forms.KeyEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>RaiseMouseEvent self *args<block_start>"""
RaiseMouseEvent(self: Control,key: object,e: MouseEventArgs)
Raises the appropriate mouse event.
key: The event to raise.
e: A System.Windows.Forms.MouseEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>RaisePaintEvent self *args<block_start>"""
RaisePaintEvent(self: Control,key: object,e: PaintEventArgs)
Raises the appropriate paint event.
key: The event to raise.
e: A System.Windows.Forms.PaintEventArgs that contains the event data.
"""<line_sep><pass><block_end><def_stmt>RealizeProperties self *args<block_start>"""
RealizeProperties(self: ListView)
Initializes the properties of the System.Windows.Forms.ListView control that manage the
appearance of the control.
"""<line_sep><pass><block_end><def_stmt>RecreateHandle self *args<block_start>"""
RecreateHandle(self: Control)
Forces the re-creation of the handle for the control.
"""<line_sep><pass><block_end><def_stmt>RedrawItems self startIndex endIndex invalidateOnly<block_start>"""
RedrawItems(self: ListView,startIndex: int,endIndex: int,invalidateOnly: bool)
Forces a range of System.Windows.Forms.ListViewItem objects to be redrawn.
startIndex: The index for the first item in the range to be redrawn.
endIndex: The index for the last item of the range to be redrawn.
invalidateOnly: true to invalidate the range of items; false to invalidate and repaint the items.
"""<line_sep><pass><block_end><def_stmt>RescaleConstantsForDpi self *args<block_start>""" RescaleConstantsForDpi(self: Control,deviceDpiOld: int,deviceDpiNew: int) """<line_sep><pass><block_end><def_stmt>ResetMouseEventArgs self *args<block_start>"""
ResetMouseEventArgs(self: Control)
Resets the control to handle the System.Windows.Forms.Control.MouseLeave event.
"""<line_sep><pass><block_end><def_stmt>RtlTranslateAlignment self *args<block_start>"""
RtlTranslateAlignment(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
RtlTranslateAlignment(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
RtlTranslateAlignment(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""<line_sep><pass><block_end><def_stmt>RtlTranslateContent self *args<block_start>"""
RtlTranslateContent(self: Control,align: ContentAlignment) -> ContentAlignment
Converts the specified System.Drawing.ContentAlignment to the appropriate
System.Drawing.ContentAlignment to support right-to-left text.
align: One of the System.Drawing.ContentAlignment values.
Returns: One of the System.Drawing.ContentAlignment values.
"""<line_sep><pass><block_end><def_stmt>RtlTranslateHorizontal self *args<block_start>"""
RtlTranslateHorizontal(self: Control,align: HorizontalAlignment) -> HorizontalAlignment
Converts the specified System.Windows.Forms.HorizontalAlignment to the appropriate
System.Windows.Forms.HorizontalAlignment to support right-to-left text.
align: One of the System.Windows.Forms.HorizontalAlignment values.
Returns: One of the System.Windows.Forms.HorizontalAlignment values.
"""<line_sep><pass><block_end><def_stmt>RtlTranslateLeftRight self *args<block_start>"""
RtlTranslateLeftRight(self: Control,align: LeftRightAlignment) -> LeftRightAlignment
Converts the specified System.Windows.Forms.LeftRightAlignment to the appropriate
System.Windows.Forms.LeftRightAlignment to support right-to-left text.
align: One of the System.Windows.Forms.LeftRightAlignment values.
Returns: One of the System.Windows.Forms.LeftRightAlignment values.
"""<line_sep><pass><block_end><def_stmt>ScaleControl self *args<block_start>"""
ScaleControl(self: Control,factor: SizeF,specified: BoundsSpecified)
Scales a control's location,size,padding and margin.
factor: The factor by which the height and width of the control will be scaled.
specified: A System.Windows.Forms.BoundsSpecified value that specifies the bounds of the control to use
when defining its size and position.
"""<line_sep><pass><block_end><def_stmt>ScaleCore self *args<block_start>"""
ScaleCore(self: Control,dx: Single,dy: Single)
This method is not relevant for this class.
dx: The horizontal scaling factor.
dy: The vertical scaling factor.
"""<line_sep><pass><block_end><def_stmt>Select self<block_start>"""
Select(self: Control,directed: bool,forward: bool)
Activates a child control. Optionally specifies the direction in the tab order to select the
control from.
directed: true to specify the direction of the control to select; otherwise,false.
forward: true to move forward in the tab order; false to move backward in the tab order.
"""<line_sep><pass><block_end><def_stmt>SetAutoSizeMode self *args<block_start>"""
SetAutoSizeMode(self: Control,mode: AutoSizeMode)
Sets a value indicating how a control will behave when its System.Windows.Forms.Control.AutoSize
property is enabled.
mode: One of the System.Windows.Forms.AutoSizeMode values.
"""<line_sep><pass><block_end><def_stmt>SetBoundsCore self *args<block_start>"""
SetBoundsCore(self: Control,x: int,y: int,width: int,height: int,specified: BoundsSpecified)
Performs the work of setting the specified bounds of this control.
x: The new System.Windows.Forms.Control.Left property value of the control.
y: The new System.Windows.Forms.Control.Top property value of the control.
width: The new System.Windows.Forms.Control.Width property value of the control.
height: The new System.Windows.Forms.Control.Height property value of the control.
specified: A bitwise combination of the System.Windows.Forms.BoundsSpecified values.
"""<line_sep><pass><block_end><def_stmt>SetClientSizeCore self *args<block_start>"""
SetClientSizeCore(self: Control,x: int,y: int)
Sets the size of the client area of the control.
x: The client area width,in pixels.
y: The client area height,in pixels.
"""<line_sep><pass><block_end><def_stmt>SetStyle self *args<block_start>"""
SetStyle(self: Control,flag: ControlStyles,value: bool)
Sets a specified System.Windows.Forms.ControlStyles flag to either true or false.
flag: The System.Windows.Forms.ControlStyles bit to set.
value: true to apply the specified style to the control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>SetTopLevel self *args<block_start>"""
SetTopLevel(self: Control,value: bool)
Sets the control as the top-level control.
value: true to set the control as the top-level control; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>SetVisibleCore self *args<block_start>"""
SetVisibleCore(self: Control,value: bool)
Sets the control to the specified visible state.
value: true to make the control visible; otherwise,false.
"""<line_sep><pass><block_end><def_stmt>SizeFromClientSize self *args<block_start>"""
SizeFromClientSize(self: Control,clientSize: Size) -> Size
Determines the size of the entire control from the height and width of its client area.
clientSize: A System.Drawing.Size value representing the height and width of the control's client area.
Returns: A System.Drawing.Size value representing the height and width of the entire control.
"""<line_sep><pass><block_end><def_stmt>Sort self<block_start>"""
Sort(self: ListView)
Sorts the items of the list view.
"""<line_sep><pass><block_end><def_stmt>ToString self<block_start>"""
ToString(self: ListView) -> str
Returns a string representation of the System.Windows.Forms.ListView control.
Returns: A string that states the control type,the count of items in the System.Windows.Forms.ListView
control,and the type of the first item in the System.Windows.Forms.ListView,if the count is
not 0.
"""<line_sep><pass><block_end><def_stmt>UpdateBounds self *args<block_start>"""
UpdateBounds(self: Control,x: int,y: int,width: int,height: int,clientWidth: int,clientHeight: int)
Updates the bounds of the control with the specified size,location,and client size.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
clientWidth: The client System.Drawing.Size.Width of the control.
clientHeight: The client System.Drawing.Size.Height of the control.
UpdateBounds(self: Control,x: int,y: int,width: int,height: int)
Updates the bounds of the control with the specified size and location.
x: The System.Drawing.Point.X coordinate of the control.
y: The System.Drawing.Point.Y coordinate of the control.
width: The System.Drawing.Size.Width of the control.
height: The System.Drawing.Size.Height of the control.
UpdateBounds(self: Control)
Updates the bounds of the control with the current size and location.
"""<line_sep><pass><block_end><def_stmt>UpdateExtendedStyles self *args<block_start>"""
UpdateExtendedStyles(self: ListView)
Updates the extended styles applied to the list view control.
"""<line_sep><pass><block_end><def_stmt>UpdateStyles self *args<block_start>"""
UpdateStyles(self: Control)
Forces the assigned styles to be reapplied to the control.
"""<line_sep><pass><block_end><def_stmt>UpdateZOrder self *args<block_start>"""
UpdateZOrder(self: Control)
Updates the control in its parent's z-order.
"""<line_sep><pass><block_end><def_stmt>WndProc self *args<block_start>"""
WndProc(self: ListView,m: Message) -> Message
Overrides System.Windows.Forms.Control.WndProc(System.Windows.Forms.Message@).
m: The Windows System.Windows.Forms.Message to process.
"""<line_sep><pass><block_end><def_stmt>__enter__ self *args<block_start>"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""<line_sep><pass><block_end><def_stmt>__exit__ self *args<block_start>"""
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""<line_sep><pass><block_end><def_stmt>__init__ self *args<block_start>""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """<line_sep><pass><block_end><def_stmt>__str__ self *args<block_start><pass><block_end>Activation=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the type of action the user must take to activate an item.
Get: Activation(self: ListView) -> ItemActivation
Set: Activation(self: ListView)=value
"""<line_sep>Alignment=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the alignment of items in the control.
Get: Alignment(self: ListView) -> ListViewAlignment
Set: Alignment(self: ListView)=value
"""<line_sep>AllowColumnReorder=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the user can drag column headers to reorder columns in the control.
Get: AllowColumnReorder(self: ListView) -> bool
Set: AllowColumnReorder(self: ListView)=value
"""<line_sep>AutoArrange=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets whether icons are automatically kept arranged.
Get: AutoArrange(self: ListView) -> bool
Set: AutoArrange(self: ListView)=value
"""<line_sep>BackColor=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the background color.
Get: BackColor(self: ListView) -> Color
Set: BackColor(self: ListView)=value
"""<line_sep>BackgroundImageLayout=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets an System.Windows.Forms.ImageLayout value.
Get: BackgroundImageLayout(self: ListView) -> ImageLayout
Set: BackgroundImageLayout(self: ListView)=value
"""<line_sep>BackgroundImageTiled=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the background image of the System.Windows.Forms.ListView should be tiled.
Get: BackgroundImageTiled(self: ListView) -> bool
Set: BackgroundImageTiled(self: ListView)=value
"""<line_sep>BorderStyle=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the border style of the control.
Get: BorderStyle(self: ListView) -> BorderStyle
Set: BorderStyle(self: ListView)=value
"""<line_sep>CanEnableIme=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a value indicating whether the System.Windows.Forms.Control.ImeMode property can be set to an active value,to enable IME support.
"""<line_sep>CanRaiseEvents=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Determines if events can be raised on the control.
"""<line_sep>CheckBoxes=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether a check box appears next to each item in the control.
Get: CheckBoxes(self: ListView) -> bool
Set: CheckBoxes(self: ListView)=value
"""<line_sep>CheckedIndices=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the indexes of the currently checked items in the control.
Get: CheckedIndices(self: ListView) -> CheckedIndexCollection
"""<line_sep>CheckedItems=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the currently checked items in the control.
Get: CheckedItems(self: ListView) -> CheckedListViewItemCollection
"""<line_sep>Columns=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the collection of all column headers that appear in the control.
Get: Columns(self: ListView) -> ColumnHeaderCollection
"""<line_sep>CreateParams=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""This property is not relevant for this class.
"""<line_sep>DefaultCursor=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the default cursor for the control.
"""<line_sep>DefaultImeMode=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the default Input Method Editor (IME) mode supported by the control.
"""<line_sep>DefaultMargin=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the space,in pixels,that is specified by default between controls.
"""<line_sep>DefaultMaximumSize=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the length and height,in pixels,that is specified as the default maximum size of a control.
"""<line_sep>DefaultMinimumSize=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the length and height,in pixels,that is specified as the default minimum size of a control.
"""<line_sep>DefaultPadding=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the internal spacing,in pixels,of the contents of a control.
"""<line_sep>DefaultSize=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>DesignMode=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode.
"""<line_sep>DoubleBuffered=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>Events=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the list of event handlers that are attached to this System.ComponentModel.Component.
"""<line_sep>FocusedItem=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the item in the control that currently has focus.
Get: FocusedItem(self: ListView) -> ListViewItem
Set: FocusedItem(self: ListView)=value
"""<line_sep>FontHeight=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the height of the font of the control.
"""<line_sep>ForeColor=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the foreground color.
Get: ForeColor(self: ListView) -> Color
Set: ForeColor(self: ListView)=value
"""<line_sep>FullRowSelect=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether clicking an item selects all its subitems.
Get: FullRowSelect(self: ListView) -> bool
Set: FullRowSelect(self: ListView)=value
"""<line_sep>GridLines=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether grid lines appear between the rows and columns containing the items and subitems in the control.
Get: GridLines(self: ListView) -> bool
Set: GridLines(self: ListView)=value
"""<line_sep>Groups=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the collection of System.Windows.Forms.ListViewGroup objects assigned to the control.
Get: Groups(self: ListView) -> ListViewGroupCollection
"""<line_sep>HeaderStyle=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the column header style.
Get: HeaderStyle(self: ListView) -> ColumnHeaderStyle
Set: HeaderStyle(self: ListView)=value
"""<line_sep>HideSelection=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the selected item in the control remains highlighted when the control loses focus.
Get: HideSelection(self: ListView) -> bool
Set: HideSelection(self: ListView)=value
"""<line_sep>HotTracking=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the text of an item or subitem has the appearance of a hyperlink when the mouse pointer passes over it.
Get: HotTracking(self: ListView) -> bool
Set: HotTracking(self: ListView)=value
"""<line_sep>HoverSelection=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether an item is automatically selected when the mouse pointer remains over the item for a few seconds.
Get: HoverSelection(self: ListView) -> bool
Set: HoverSelection(self: ListView)=value
"""<line_sep>ImeModeBase=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the IME mode of a control.
"""<line_sep>InsertionMark=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets an object used to indicate the expected drop location when an item is dragged within a System.Windows.Forms.ListView control.
Get: InsertionMark(self: ListView) -> ListViewInsertionMark
"""<line_sep>Items=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a collection containing all items in the control.
Get: Items(self: ListView) -> ListViewItemCollection
"""<line_sep>LabelEdit=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the user can edit the labels of items in the control.
Get: LabelEdit(self: ListView) -> bool
Set: LabelEdit(self: ListView)=value
"""<line_sep>LabelWrap=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether item labels wrap when items are displayed in the control as icons.
Get: LabelWrap(self: ListView) -> bool
Set: LabelWrap(self: ListView)=value
"""<line_sep>LargeImageList=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as large icons in the control.
Get: LargeImageList(self: ListView) -> ImageList
Set: LargeImageList(self: ListView)=value
"""<line_sep>ListViewItemSorter=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the sorting comparer for the control.
Get: ListViewItemSorter(self: ListView) -> IComparer
Set: ListViewItemSorter(self: ListView)=value
"""<line_sep>MultiSelect=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether multiple items can be selected.
Get: MultiSelect(self: ListView) -> bool
Set: MultiSelect(self: ListView)=value
"""<line_sep>OwnerDraw=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the System.Windows.Forms.ListView control is drawn by the operating system or by code that you provide.
Get: OwnerDraw(self: ListView) -> bool
Set: OwnerDraw(self: ListView)=value
"""<line_sep>Padding=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the space between the System.Windows.Forms.ListView control and its contents.
Get: Padding(self: ListView) -> Padding
Set: Padding(self: ListView)=value
"""<line_sep>RenderRightToLeft=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""This property is now obsolete.
"""<line_sep>ResizeRedraw=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the control redraws itself when resized.
"""<line_sep>RightToLeftLayout=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the control is laid out from right to left.
Get: RightToLeftLayout(self: ListView) -> bool
Set: RightToLeftLayout(self: ListView)=value
"""<line_sep>ScaleChildren=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a value that determines the scaling of child controls.
"""<line_sep>Scrollable=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether a scroll bar is added to the control when there is not enough room to display all items.
Get: Scrollable(self: ListView) -> bool
Set: Scrollable(self: ListView)=value
"""<line_sep>SelectedIndices=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the indexes of the selected items in the control.
Get: SelectedIndices(self: ListView) -> SelectedIndexCollection
"""<line_sep>SelectedItems=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets the items that are selected in the control.
Get: SelectedItems(self: ListView) -> SelectedListViewItemCollection
"""<line_sep>ShowFocusCues=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a value indicating whether the control should display focus rectangles.
"""<line_sep>ShowGroups=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether items are displayed in groups.
Get: ShowGroups(self: ListView) -> bool
Set: ShowGroups(self: ListView)=value
"""<line_sep>ShowItemToolTips=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether ToolTips are shown for the System.Windows.Forms.ListViewItem objects contained in the System.Windows.Forms.ListView.
Get: ShowItemToolTips(self: ListView) -> bool
Set: ShowItemToolTips(self: ListView)=value
"""<line_sep>ShowKeyboardCues=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets a value indicating whether the user interface is in the appropriate state to show or hide keyboard accelerators.
"""<line_sep>SmallImageList=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the System.Windows.Forms.ImageList to use when displaying items as small icons in the control.
Get: SmallImageList(self: ListView) -> ImageList
Set: SmallImageList(self: ListView)=value
"""<line_sep>Sorting=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the sort order for items in the control.
Get: Sorting(self: ListView) -> SortOrder
Set: Sorting(self: ListView)=value
"""<line_sep>StateImageList=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the System.Windows.Forms.ImageList associated with application-defined states in the control.
Get: StateImageList(self: ListView) -> ImageList
Set: StateImageList(self: ListView)=value
"""<line_sep>Text=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""This property is not relevant for this class.
Get: Text(self: ListView) -> str
Set: Text(self: ListView)=value
"""<line_sep>TileSize=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the size of the tiles shown in tile view.
Get: TileSize(self: ListView) -> Size
Set: TileSize(self: ListView)=value
"""<line_sep>TopItem=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the first visible item in the control.
Get: TopItem(self: ListView) -> ListViewItem
Set: TopItem(self: ListView)=value
"""<line_sep>UseCompatibleStateImageBehavior=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether the System.Windows.Forms.ListView uses state image behavior that is compatible with the .NET Framework 1.1 or the .NET Framework 2.0.
Get: UseCompatibleStateImageBehavior(self: ListView) -> bool
Set: UseCompatibleStateImageBehavior(self: ListView)=value
"""<line_sep>View=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets how items are displayed in the control.
Get: View(self: ListView) -> View
Set: View(self: ListView)=value
"""<line_sep>VirtualListSize=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets the number of System.Windows.Forms.ListViewItem objects contained in the list when in virtual mode.
Get: VirtualListSize(self: ListView) -> int
Set: VirtualListSize(self: ListView)=value
"""<line_sep>VirtualMode=property(<lambda>self:object() <lambda>self v:<none> <lambda>self:<none>)<line_sep>"""Gets or sets a value indicating whether you have provided your own data-management operations for the System.Windows.Forms.ListView control.
Get: VirtualMode(self: ListView) -> bool
Set: VirtualMode(self: ListView)=value
"""<line_sep>AfterLabelEdit=<none><line_sep>BackgroundImageLayoutChanged=<none><line_sep>BeforeLabelEdit=<none><line_sep>CacheVirtualItems=<none><line_sep>CheckedIndexCollection=<none><line_sep>CheckedListViewItemCollection=<none><line_sep>ColumnClick=<none><line_sep>ColumnHeaderCollection=<none><line_sep>ColumnReordered=<none><line_sep>ColumnWidthChanged=<none><line_sep>ColumnWidthChanging=<none><line_sep>DrawColumnHeader=<none><line_sep>DrawItem=<none><line_sep>DrawSubItem=<none><line_sep>ItemActivate=<none><line_sep>ItemCheck=<none><line_sep>ItemChecked=<none><line_sep>ItemDrag=<none><line_sep>ItemMouseHover=<none><line_sep>ItemSelectionChanged=<none><line_sep>ListViewItemCollection=<none><line_sep>PaddingChanged=<none><line_sep>Paint=<none><line_sep>RetrieveVirtualItem=<none><line_sep>RightToLeftLayoutChanged=<none><line_sep>SearchForVirtualItem=<none><line_sep>SelectedIndexChanged=<none><line_sep>SelectedIndexCollection=<none><line_sep>SelectedListViewItemCollection=<none><line_sep>TextChanged=<none><line_sep>VirtualItemsSelectionRangeChanged=<none><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>fpconv.pointnet2.pointnet2_modules PointnetFPModule PointnetSAModule<import_stmt>fpconv.pointnet2.pytorch_utils<as>pt_utils<import_from_stmt>fpconv.base AssemRes_BaseBlock<import_from_stmt>fpconv.fpconv FPConv4x4_BaseBlock FPConv6x6_BaseBlock<line_sep>NPOINTS=[8192 2048 512 128]<line_sep>RADIUS=[0.1 0.2 0.4 0.8 1.6]<line_sep>NSAMPLE=[32 32 32 32 16]<line_sep>MLPS=[[64 64] [128 128] [256 256] [512 512] [1024 1024]]<line_sep>FP_MLPS=[[128 128] [256 128] [512 256] [1024 512]]<line_sep>CLS_FC=[128]<line_sep>DP_RATIO=0.5<def_stmt>get_model num_class input_channels=3<block_start><return>Pointnet2SSG(num_class input_channels)<block_end><class_stmt>Pointnet2SSG(nn.Module)<block_start><def_stmt>__init__ self num_class input_channels=3 use_xyz=<false># input_channels: input feature channels (not include xyz)
<block_start>super().__init__()<line_sep>print(NPOINTS)<line_sep>self.SA_modules=nn.ModuleList()<line_sep>self.conv0=AssemRes_BaseBlock(CONV_BASE=FPConv6x6_BaseBlock npoint=<none> radius=RADIUS[0] nsample=NSAMPLE[0] channel_list=[input_channels]+MLPS[0] use_xyz=use_xyz)<line_sep>channel_in=MLPS[0][-1]<line_sep>skip_channel_list=[channel_in]<for_stmt>k range(NPOINTS.__len__())<block_start>mlps=[MLPS[k+1].copy()]<line_sep>channel_out=0<for_stmt>idx range(mlps.__len__())<block_start>mlps[idx]=[channel_in]+mlps[idx]<line_sep>channel_out<augadd>mlps[idx][-1]<block_end>print(mlps[0] RADIUS[k] RADIUS[k+1])<if_stmt>k<l>2<block_start>self.SA_modules.append(AssemRes_BaseBlock(CONV_BASE=FPConv6x6_BaseBlock npoint=NPOINTS[k] nsample=NSAMPLE[k] radius=RADIUS[k] channel_list=mlps[0] nsample_ds=NSAMPLE[k+1] radius_ds=RADIUS[k+1] use_xyz=use_xyz))<block_end><else_stmt><block_start>self.SA_modules.append(AssemRes_BaseBlock(CONV_BASE=FPConv4x4_BaseBlock npoint=NPOINTS[k] nsample=NSAMPLE[k] radius=RADIUS[k] channel_list=mlps[0] nsample_ds=NSAMPLE[k+1] radius_ds=RADIUS[k+1] use_xyz=use_xyz))<block_end>skip_channel_list.append(channel_out)<line_sep>channel_in=channel_out<block_end>self.FP_modules=nn.ModuleList()<for_stmt>k range(FP_MLPS.__len__())<block_start>pre_channel=FP_MLPS[k+1][-1]<if>k+1<l>len(FP_MLPS)<else>channel_out<line_sep>mlp=[pre_channel+skip_channel_list[k]]+FP_MLPS[k]<line_sep>print(mlp)<line_sep>self.FP_modules.append(PointnetFPModule(mlp=mlp))<block_end>cls_layers=[]<line_sep>pre_channel=FP_MLPS[0][-1]<for_stmt>k range(0 CLS_FC.__len__())<block_start>cls_layers.append(pt_utils.Conv2d(pre_channel CLS_FC[k] bn=<true>))<line_sep>pre_channel=CLS_FC[k]<block_end>cls_layers.append(pt_utils.Conv2d(pre_channel num_class activation=<none> bn=<false>))<line_sep>cls_layers.insert(1 nn.Dropout(0.5))<line_sep>self.cls_layer=nn.Sequential(*cls_layers)<block_end><def_stmt>_break_up_pc self pc<block_start>xyz=pc[<ellipsis> 0:3].contiguous()<line_sep>features=(pc[<ellipsis> 3:].transpose(1 2).contiguous()<if>pc.size(-1)<g>3<else><none>)<line_sep><return>xyz features<block_end><def_stmt>forward self pointcloud:torch.cuda.FloatTensor<block_start>xyz,features=self._break_up_pc(pointcloud)<line_sep>_,features=self.conv0(xyz features)<line_sep>l_xyz,l_features=[xyz] [features]<for_stmt>i range(len(self.SA_modules))<block_start>li_xyz,li_features=self.SA_modules[i](l_xyz[i] l_features[i])<line_sep>l_xyz.append(li_xyz)<line_sep>l_features.append(li_features)<block_end><for_stmt>i range(-1 -(len(self.FP_modules)+1) -1)<block_start>l_features[i-1]=self.FP_modules[i](l_xyz[i-1] l_xyz[i] l_features[i-1] l_features[i])<block_end>fn_feats=l_features[0].unsqueeze(-1)# B, C, N, 1
pred_cls=self.cls_layer(fn_feats).squeeze(-1).transpose(1 2).contiguous()# B, N, C
<return>pred_cls<block_end><block_end> |
<import_stmt>random<import_stmt>threading<import_stmt>numpy<as>np<import_from_stmt>PIL Image ImageOps ImageFilter<import_from_stmt>tensorflow.keras.preprocessing.image ImageDataGenerator DirectoryIterator<class_stmt>SegDataset(object)<block_start>"""
Segmentation base dataset.
Parameters:
----------
root : str
Path to data folder.
mode : str
'train', 'val', 'test', or 'demo'.
transform : callable
A function that transforms the image.
"""<def_stmt>__init__ self root mode transform base_size=520 crop_size=480<block_start>super(SegDataset self).__init__()<assert_stmt>(mode<in>("train" "val" "test" "demo"))<assert_stmt>(mode<in>("test" "demo"))<line_sep>self.root=root<line_sep>self.mode=mode<line_sep>self.transform=transform<line_sep>self.base_size=base_size<line_sep>self.crop_size=crop_size<block_end><def_stmt>_val_sync_transform self image mask<block_start>outsize=self.crop_size<line_sep>short_size=outsize<line_sep>w,h=image.size<if_stmt>w<g>h<block_start>oh=short_size<line_sep>ow=int(1.0<times>w<times>oh/h)<block_end><else_stmt><block_start>ow=short_size<line_sep>oh=int(1.0<times>h<times>ow/w)<block_end>image=image.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># center crop
w,h=image.size<line_sep>x1=int(round(0.5<times>(w-outsize)))<line_sep>y1=int(round(0.5<times>(h-outsize)))<line_sep>image=image.crop((x1 y1 x1+outsize y1+outsize))<line_sep>mask=mask.crop((x1 y1 x1+outsize y1+outsize))<line_sep># final transform
image,mask=self._img_transform(image) self._mask_transform(mask)<line_sep><return>image mask<block_end><def_stmt>_sync_transform self image mask# random mirror
<block_start><if_stmt>random.random()<l>0.5<block_start>image=image.transpose(Image.FLIP_LEFT_RIGHT)<line_sep>mask=mask.transpose(Image.FLIP_LEFT_RIGHT)<block_end>crop_size=self.crop_size<line_sep># random scale (short edge)
short_size=random.randint(int(self.base_size<times>0.5) int(self.base_size<times>2.0))<line_sep>w,h=image.size<if_stmt>h<g>w<block_start>ow=short_size<line_sep>oh=int(1.0<times>h<times>ow/w)<block_end><else_stmt><block_start>oh=short_size<line_sep>ow=int(1.0<times>w<times>oh/h)<block_end>image=image.resize((ow oh) Image.BILINEAR)<line_sep>mask=mask.resize((ow oh) Image.NEAREST)<line_sep># pad crop
<if_stmt>short_size<l>crop_size<block_start>padh=crop_size-oh<if>oh<l>crop_size<else>0<line_sep>padw=crop_size-ow<if>ow<l>crop_size<else>0<line_sep>image=ImageOps.expand(image border=(0 0 padw padh) fill=0)<line_sep>mask=ImageOps.expand(mask border=(0 0 padw padh) fill=0)<block_end># random crop crop_size
w,h=image.size<line_sep>x1=random.randint(0 w-crop_size)<line_sep>y1=random.randint(0 h-crop_size)<line_sep>image=image.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep>mask=mask.crop((x1 y1 x1+crop_size y1+crop_size))<line_sep># gaussian blur as in PSP
<if_stmt>random.random()<l>0.5<block_start>image=image.filter(ImageFilter.GaussianBlur(radius=random.random()))<block_end># final transform
image,mask=self._img_transform(image) self._mask_transform(mask)<line_sep><return>image mask<block_end>@staticmethod<def_stmt>_img_transform image<block_start><return>np.array(image)<block_end>@staticmethod<def_stmt>_mask_transform mask<block_start><return>np.array(mask).astype(np.int32)<block_end><def_stmt>__getitem__ self index<block_start><raise>NotImplementedError<block_end><def_stmt>__len__ self<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>SegDirectoryIterator(DirectoryIterator)<block_start>allowed_class_modes={'categorical' 'binary' 'sparse' 'input' <none>}<def_stmt>__init__ self directory image_data_generator target_size=(256 256) color_mode='rgb' classes=<none> class_mode='categorical' batch_size=32 shuffle=<true> seed=<none> data_format='channels_last' save_to_dir=<none> save_prefix='' save_format='png' follow_links=<false> subset=<none> interpolation='nearest' dtype='float32' dataset=<none><block_start>super(SegDirectoryIterator self).set_processing_attrs(image_data_generator target_size color_mode data_format save_to_dir save_prefix save_format subset interpolation)<line_sep>self.dataset=dataset<line_sep>self.class_mode=class_mode<line_sep>self.dtype=dtype<line_sep>self.n=len(self.dataset)<line_sep>self.batch_size=batch_size<line_sep>self.seed=seed<line_sep>self.shuffle=shuffle<line_sep>self.batch_index=0<line_sep>self.total_batches_seen=0<line_sep>self.lock=threading.Lock()<line_sep>self.index_array=<none><line_sep>self.index_generator=self._flow_index()<block_end><def_stmt>_get_batches_of_transformed_samples self index_array<block_start>"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns:
A batch of transformed samples.
"""<line_sep># batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=self.dtype)
# batch_y = np.zeros((len(index_array),) + self.image_shape, dtype=np.int32)
batch_x=<none><line_sep>batch_y=<none><for_stmt>i,j enumerate(index_array)<block_start>x,y=self.dataset[j]<if_stmt>batch_x<is><none><block_start>batch_x=np.zeros((len(index_array) )+x.shape dtype=self.dtype)<line_sep>batch_y=np.zeros((len(index_array) )+y.shape dtype=np.int32)<block_end># if self.data_format == "channel_first":
# print("*")
# print("batch_x.shape={}".format(batch_x.shape))
# print("batch_y.shape={}".format(batch_y.shape))
# print("x.shape={}".format(x.shape))
# print("y.shape={}".format(y.shape))
batch_x[i]=x<line_sep>batch_y[i]=y<block_end><return>batch_x batch_y<block_end><block_end><class_stmt>SegImageDataGenerator(ImageDataGenerator)<block_start><def_stmt>flow_from_directory self directory target_size=(256 256) color_mode='rgb' classes=<none> class_mode='categorical' batch_size=32 shuffle=<true> seed=<none> save_to_dir=<none> save_prefix='' save_format='png' follow_links=<false> subset=<none> interpolation='nearest' dataset=<none><block_start><return>SegDirectoryIterator(directory self target_size=target_size color_mode=color_mode classes=classes class_mode=class_mode data_format=self.data_format batch_size=batch_size shuffle=shuffle seed=seed save_to_dir=save_to_dir save_prefix=save_prefix save_format=save_format follow_links=follow_links subset=subset interpolation=interpolation dataset=dataset)<block_end><block_end> |
''' Agents: stop/random/shortest/seq2seq '''<import_stmt>json<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>random<import_from_stmt>collections namedtuple<import_stmt>torch<import_stmt>torch.nn<as>nn<import_from_stmt>torch.autograd Variable<import_stmt>torch.nn.functional<as>F<import_stmt>torch.distributions<as>D<import_from_stmt>utils vocab_pad_idx vocab_eos_idx flatten structured_map try_cuda<line_sep>#from env import FOLLOWER_MODEL_ACTIONS, FOLLOWER_ENV_ACTIONS, IGNORE_ACTION_INDEX, LEFT_ACTION_INDEX, RIGHT_ACTION_INDEX, START_ACTION_INDEX, END_ACTION_INDEX, FORWARD_ACTION_INDEX, index_action_tuple
InferenceState=namedtuple("InferenceState" "prev_inference_state, world_state, observation, flat_index, last_action, last_action_embedding, action_count, score, h_t, c_t, last_alpha")<line_sep>Cons=namedtuple("Cons" "first, rest")<def_stmt>cons_to_list cons<block_start>l=[]<while_stmt><true><block_start>l.append(cons.first)<line_sep>cons=cons.rest<if_stmt>cons<is><none><block_start><break><block_end><block_end><return>l<block_end><def_stmt>backchain_inference_states last_inference_state<block_start>states=[]<line_sep>observations=[]<line_sep>actions=[]<line_sep>inf_state=last_inference_state<line_sep>scores=[]<line_sep>last_score=<none><line_sep>attentions=[]<while_stmt>inf_state<is><not><none><block_start>states.append(inf_state.world_state)<line_sep>observations.append(inf_state.observation)<line_sep>actions.append(inf_state.last_action)<line_sep>attentions.append(inf_state.last_alpha)<if_stmt>last_score<is><not><none><block_start>scores.append(last_score-inf_state.score)<block_end>last_score=inf_state.score<line_sep>inf_state=inf_state.prev_inference_state<block_end>scores.append(last_score)<line_sep><return>list(reversed(states)) list(reversed(observations)) list(reversed(actions))[1:] list(reversed(scores))[1:] list(reversed(attentions))[1:]<block_end># exclude start action
<def_stmt>least_common_viewpoint_path inf_state_a inf_state_b# return inference states traversing from A to X, then from Y to B,
# where X and Y are the least common ancestors of A and B respectively that share a viewpointId
<block_start>path_to_b_by_viewpoint={}<line_sep>b=inf_state_b<line_sep>b_stack=Cons(b <none>)<while_stmt>b<is><not><none><block_start>path_to_b_by_viewpoint[b.world_state.viewpointId]=b_stack<line_sep>b=b.prev_inference_state<line_sep>b_stack=Cons(b b_stack)<block_end>a=inf_state_a<line_sep>path_from_a=[a]<while_stmt>a<is><not><none><block_start>vp=a.world_state.viewpointId<if_stmt>vp<in>path_to_b_by_viewpoint<block_start>path_to_b=cons_to_list(path_to_b_by_viewpoint[vp])<assert_stmt>path_from_a[-1].world_state.viewpointId<eq>path_to_b[0].world_state.viewpointId<line_sep><return>path_from_a+path_to_b[1:]<block_end>a=a.prev_inference_state<line_sep>path_from_a.append(a)<block_end><raise>AssertionError("no common ancestor found")<block_end><def_stmt>batch_instructions_from_encoded encoded_instructions max_length reverse=<false> sort=<false># encoded_instructions: list of lists of token indices (should not be padded, or contain BOS or EOS tokens)
#seq_tensor = np.array(encoded_instructions)
# make sure pad does not start any sentence
<block_start>num_instructions=len(encoded_instructions)<line_sep>seq_tensor=np.full((num_instructions max_length) vocab_pad_idx)<line_sep>seq_lengths=[]<for_stmt>i,inst enumerate(encoded_instructions)<block_start><if_stmt>len(inst)<g>0<block_start><assert_stmt>inst[-1]<ne>vocab_eos_idx<block_end><if_stmt>reverse<block_start>inst=inst[::-1]<block_end>inst=np.concatenate((inst [vocab_eos_idx]))<line_sep>inst=inst[:max_length]<line_sep>seq_tensor[i :len(inst)]=inst<line_sep>seq_lengths.append(len(inst))<block_end>seq_tensor=torch.from_numpy(seq_tensor)<if_stmt>sort<block_start>seq_lengths,perm_idx=torch.from_numpy(np.array(seq_lengths)).sort(0 <true>)<line_sep>seq_lengths=list(seq_lengths)<line_sep>seq_tensor=seq_tensor[perm_idx]<block_end>mask=(seq_tensor<eq>vocab_pad_idx)[: :max(seq_lengths)]<line_sep>ret_tp=try_cuda(Variable(seq_tensor requires_grad=<false>).long()) try_cuda(mask.byte()) seq_lengths<if_stmt>sort<block_start>ret_tp=ret_tp+(list(perm_idx) )<block_end><return>ret_tp<block_end><class_stmt>BaseAgent(object)<block_start>''' Base class for an R2R agent to generate and save trajectories. '''<def_stmt>__init__ self env results_path<block_start>self.env=env<line_sep>self.results_path=results_path<line_sep>random.seed(1)<line_sep>self.results={}<line_sep>self.losses=[]<block_end># For learning agents
<def_stmt>write_results self<block_start>results={}<for_stmt>key,item self.results.items()<block_start>results[key]={'instr_id':item['instr_id'] 'trajectory':item['trajectory'] }<block_end><with_stmt>open(self.results_path 'w')<as>f<block_start>json.dump(results f)<block_end><block_end><def_stmt>rollout self<block_start>''' Return a list of dicts containing instr_id:'xx', path:[(viewpointId, heading_rad, elevation_rad)] '''<line_sep><raise>NotImplementedError<block_end>@staticmethod<def_stmt>get_agent name<block_start><return>globals()[name+"Agent"]<block_end><def_stmt>test self<block_start>self.env.reset_epoch()<line_sep>self.losses=[]<line_sep>self.results={}<line_sep># We rely on env showing the entire batch before repeating anything
#print 'Testing %s' % self.__class__.__name__
looped=<false><line_sep>rollout_scores=[]<line_sep>beam_10_scores=[]<while_stmt><true><block_start>rollout_results=self.rollout()<line_sep># if self.feedback == 'argmax':
# beam_results = self.beam_search(1, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# assert rollout_traj['instr_id'] == beam_trajs[0]['instr_id']
# assert rollout_traj['trajectory'] == beam_trajs[0]['trajectory']
# assert np.allclose(rollout_traj['score'], beam_trajs[0]['score'])
# print("passed check: beam_search with beam_size=1")
#
# self.env.set_beam_size(10)
# beam_results = self.beam_search(10, load_next_minibatch=False)
# assert len(rollout_results) == len(beam_results)
# for rollout_traj, beam_trajs in zip(rollout_results, beam_results):
# rollout_score = rollout_traj['score']
# rollout_scores.append(rollout_score)
# beam_score = beam_trajs[0]['score']
# beam_10_scores.append(beam_score)
# # assert rollout_score <= beam_score
# self.env.set_beam_size(1)
# # print("passed check: beam_search with beam_size=10")
# if self.feedback == 'teacher' and self.beam_size == 1:
# rollout_loss = self.loss
# path_obs, path_actions, encoded_instructions = self.env.gold_obs_actions_and_instructions(self.episode_len, load_next_minibatch=False)
# for i in range(len(rollout_results)):
# assert rollout_results[i]['actions'] == path_actions[i]
# assert [o1['viewpoint'] == o2['viewpoint']
# for o1, o2 in zip(rollout_results[i]['observations'], path_obs[i])]
# trajs, loss = self._score_obs_actions_and_instructions(path_obs, path_actions, encoded_instructions)
# for traj, rollout in zip(trajs, rollout_results):
# assert traj['instr_id'] == rollout['instr_id']
# assert traj['actions'] == rollout['actions']
# assert np.allclose(traj['score'], rollout['score'])
# assert np.allclose(rollout_loss.data[0], loss.data[0])
# print('passed score test')
<for_stmt>result rollout_results<block_start><if_stmt>result['instr_id']<in>self.results<block_start>looped=<true><block_end><else_stmt><block_start>self.results[result['instr_id']]=result<block_end><block_end><if_stmt>looped<block_start><break><block_end><block_end># if self.feedback == 'argmax':
# print("avg rollout score: ", np.mean(rollout_scores))
# print("avg beam 10 score: ", np.mean(beam_10_scores))
<return>self.results<block_end><block_end><def_stmt>path_element_from_observation ob<block_start><return>(ob['viewpoint'] ob['heading'] ob['elevation'])<block_end><class_stmt>StopAgent(BaseAgent)<block_start>''' An agent that doesn't move! '''<def_stmt>rollout self<block_start>world_states=self.env.reset()<line_sep>obs=self.env.observe(world_states)<line_sep>traj=[{'instr_id':ob['instr_id'] 'trajectory':[path_element_from_observation(ob)]}<for>ob obs]<line_sep><return>traj<block_end><block_end><class_stmt>RandomAgent(BaseAgent)<block_start>''' An agent that picks a random direction then tries to go straight for
five viewpoint steps and then stops. '''<def_stmt>rollout self<block_start>world_states=self.env.reset()<line_sep>obs=self.env.observe(world_states)<line_sep>traj=[{'instr_id':ob['instr_id'] 'trajectory':[path_element_from_observation(ob)]}<for>ob obs]<line_sep>ended=[<false>]<times>len(obs)<line_sep>self.steps=[0]<times>len(obs)<for_stmt>t range(6)<block_start>actions=[]<for_stmt>i,ob enumerate(obs)<block_start><if_stmt>self.steps[i]<ge>5<block_start>actions.append(0)# do nothing, i.e. end
ended[i]=<true><block_end><elif_stmt>self.steps[i]<eq>0<block_start>a=np.random.randint(len(ob['adj_loc_list'])-1)+1<line_sep>actions.append(a)# choose a random adjacent loc
self.steps[i]<augadd>1<block_end><else_stmt><block_start><assert_stmt>len(ob['adj_loc_list'])<g>1<line_sep>actions.append(1)# go forward
self.steps[i]<augadd>1<block_end><block_end>world_states=self.env.step(world_states actions obs)<line_sep>obs=self.env.observe(world_states)<for_stmt>i,ob enumerate(obs)<block_start><if_stmt><not>ended[i]<block_start>traj[i]['trajectory'].append(path_element_from_observation(ob))<block_end><block_end><block_end><return>traj<block_end><block_end><class_stmt>ShortestAgent(BaseAgent)<block_start>''' An agent that always takes the shortest path to goal. '''<def_stmt>rollout self<block_start>world_states=self.env.reset()<line_sep>#obs = self.env.observe(world_states)
all_obs,all_actions=self.env.shortest_paths_to_goals(world_states 20)<line_sep><return>[{'instr_id':obs[0]['instr_id'] # end state will appear twice because stop action is a no-op, so exclude it
'trajectory':[path_element_from_observation(ob)<for>ob obs[:-1]]}<for>obs all_obs]<block_end><block_end><class_stmt>Seq2SeqAgent(BaseAgent)<block_start>''' An agent based on an LSTM seq2seq model with attention. '''<line_sep># For now, the agent can't pick which forward move to make - just the one in the middle
# env_actions = FOLLOWER_ENV_ACTIONS
# start_index = START_ACTION_INDEX
# ignore_index = IGNORE_ACTION_INDEX
# forward_index = FORWARD_ACTION_INDEX
# end_index = END_ACTION_INDEX
feedback_options=['teacher' 'argmax' 'sample']<def_stmt>__init__ self env results_path encoder decoder episode_len=10 beam_size=1 reverse_instruction=<true> max_instruction_length=80<block_start>super(Seq2SeqAgent self).__init__(env results_path)<line_sep>self.encoder=encoder<line_sep>self.decoder=decoder<line_sep>self.episode_len=episode_len<line_sep>self.losses=[]<line_sep>self.criterion=nn.CrossEntropyLoss(ignore_index=-1)<line_sep>self.beam_size=beam_size<line_sep>self.reverse_instruction=reverse_instruction<line_sep>self.max_instruction_length=max_instruction_length<block_end># @staticmethod
# def n_inputs():
# return len(FOLLOWER_MODEL_ACTIONS)
#
# @staticmethod
# def n_outputs():
# return len(FOLLOWER_MODEL_ACTIONS)-2 # Model doesn't output start or ignore
<def_stmt>_feature_variables self obs beamed=<false><block_start>''' Extract precomputed features into variable. '''<line_sep>feature_lists=list(zip(*[ob['feature']<for>ob (flatten(obs)<if>beamed<else>obs)]))<assert_stmt>len(feature_lists)<eq>len(self.env.image_features_list)<line_sep>batched=[]<for_stmt>featurizer,feature_list zip(self.env.image_features_list feature_lists)<block_start>batched.append(featurizer.batch_features(feature_list))<block_end><return>batched<block_end><def_stmt>_action_variable self obs# get the maximum number of actions of all sample in this batch
<block_start>max_num_a=-1<for_stmt>i,ob enumerate(obs)<block_start>max_num_a=max(max_num_a len(ob['adj_loc_list']))<block_end>is_valid=np.zeros((len(obs) max_num_a) np.float32)<line_sep>action_embedding_dim=obs[0]['action_embedding'].shape[-1]<line_sep>action_embeddings=np.zeros((len(obs) max_num_a action_embedding_dim) dtype=np.float32)<for_stmt>i,ob enumerate(obs)<block_start>adj_loc_list=ob['adj_loc_list']<line_sep>num_a=len(adj_loc_list)<line_sep>is_valid[i 0:num_a]=1.<for_stmt>n_a,adj_dict enumerate(adj_loc_list)<block_start>action_embeddings[i :num_a :]=ob['action_embedding']<block_end><block_end><return>(Variable(torch.from_numpy(action_embeddings) requires_grad=<false>).cuda() Variable(torch.from_numpy(is_valid) requires_grad=<false>).cuda() is_valid)<block_end><def_stmt>_teacher_action self obs ended<block_start>''' Extract teacher actions into variable. '''<line_sep>a=torch.LongTensor(len(obs))<for_stmt>i,ob enumerate(obs)# Supervised teacher only moves one axis at a time
<block_start>a[i]=ob['teacher']<if><not>ended[i]<else>-1<block_end><return>try_cuda(Variable(a requires_grad=<false>))<block_end><def_stmt>_proc_batch self obs beamed=<false><block_start>encoded_instructions=[ob['instr_encoding']<for>ob (flatten(obs)<if>beamed<else>obs)]<line_sep><return>batch_instructions_from_encoded(encoded_instructions self.max_instruction_length reverse=self.reverse_instruction)<block_end><def_stmt>rollout self<block_start><if_stmt>self.beam_size<eq>1<block_start><return>self._rollout_with_loss()<block_end><else_stmt><block_start><assert_stmt>self.beam_size<ge>1<line_sep>beams,_,_=self.beam_search(self.beam_size)<line_sep><return>[beam[0]<for>beam beams]<block_end><block_end><def_stmt>_score_obs_actions_and_instructions self path_obs path_actions encoded_instructions<block_start>batch_size=len(path_obs)<assert_stmt>len(path_actions)<eq>batch_size<assert_stmt>len(encoded_instructions)<eq>batch_size<for_stmt>path_o,path_a zip(path_obs path_actions)<block_start><assert_stmt>len(path_o)<eq>len(path_a)+1<block_end>seq,seq_mask,seq_lengths,perm_indices=batch_instructions_from_encoded(encoded_instructions self.max_instruction_length reverse=self.reverse_instruction sort=<true>)<line_sep>loss=0<line_sep>ctx,h_t,c_t=self.encoder(seq seq_lengths)<line_sep>u_t_prev=self.decoder.u_begin.expand(batch_size -1)# init action
ended=np.array([<false>]<times>batch_size)<line_sep>sequence_scores=try_cuda(torch.zeros(batch_size))<line_sep>traj=[{'instr_id':path_o[0]['instr_id'] 'trajectory':[path_element_from_observation(path_o[0])] 'actions':[] 'scores':[] 'observations':[path_o[0]] 'instr_encoding':path_o[0]['instr_encoding']}<for>path_o path_obs]<line_sep>obs=<none><for_stmt>t range(self.episode_len)<block_start>next_obs=[]<line_sep>next_target_list=[]<for_stmt>perm_index,src_index enumerate(perm_indices)<block_start>path_o=path_obs[src_index]<line_sep>path_a=path_actions[src_index]<if_stmt>t<l>len(path_a)<block_start>next_target_list.append(path_a[t])<line_sep>next_obs.append(path_o[t])<block_end><else_stmt><block_start>next_target_list.append(-1)<line_sep>next_obs.append(obs[perm_index])<block_end><block_end>obs=next_obs<line_sep>target=try_cuda(Variable(torch.LongTensor(next_target_list) requires_grad=<false>))<line_sep>f_t_list=self._feature_variables(obs)# Image features from obs
all_u_t,is_valid,_=self._action_variable(obs)<assert_stmt>len(f_t_list)<eq>1 'for now, only work with MeanPooled feature'<line_sep>h_t,c_t,alpha,logit,alpha_v=self.decoder(u_t_prev all_u_t f_t_list[0] h_t c_t ctx seq_mask)<line_sep># Mask outputs of invalid actions
logit[is_valid<eq>0]=-float('inf')<line_sep># Supervised training
loss<augadd>self.criterion(logit target)<line_sep># Determine next model inputs
a_t=torch.clamp(target min=0)# teacher forcing
# update the previous action
u_t_prev=all_u_t[np.arange(batch_size) a_t :].detach()<line_sep>action_scores=-F.cross_entropy(logit target ignore_index=-1 reduce=<false>).data<line_sep>sequence_scores<augadd>action_scores<line_sep># Save trajectory output
<for_stmt>perm_index,src_index enumerate(perm_indices)<block_start>ob=obs[perm_index]<if_stmt><not>ended[perm_index]<block_start>traj[src_index]['trajectory'].append(path_element_from_observation(ob))<line_sep>traj[src_index]['score']=float(sequence_scores[perm_index])<line_sep>traj[src_index]['scores'].append(action_scores[perm_index])<line_sep>traj[src_index]['actions'].append(a_t.data[perm_index])<line_sep># traj[src_index]['observations'].append(ob)
<block_end><block_end># Update ended list
<for_stmt>i range(batch_size)<block_start>action_idx=a_t[i].data[0]<if_stmt>action_idx<eq>0<block_start>ended[i]=<true><block_end><block_end># Early exit if all ended
<if_stmt>ended.all()<block_start><break><block_end><block_end><return>traj loss<block_end><def_stmt>_rollout_with_loss self<block_start>initial_world_states=self.env.reset(sort=<true>)<line_sep>initial_obs=self.env.observe(initial_world_states)<line_sep>initial_obs=np.array(initial_obs)<line_sep>batch_size=len(initial_obs)<line_sep># get mask and lengths
seq,seq_mask,seq_lengths=self._proc_batch(initial_obs)<line_sep># Forward through encoder, giving initial hidden state and memory cell for decoder
# TODO consider not feeding this into the decoder, and just using attention
self.loss=0<line_sep>feedback=self.feedback<line_sep>ctx,h_t,c_t=self.encoder(seq seq_lengths)<line_sep># Record starting point
traj=[{'instr_id':ob['instr_id'] 'trajectory':[path_element_from_observation(ob)] 'actions':[] 'scores':[] 'observations':[ob] 'instr_encoding':ob['instr_encoding']}<for>ob initial_obs]<line_sep>obs=initial_obs<line_sep>world_states=initial_world_states<line_sep># Initial action
u_t_prev=self.decoder.u_begin.expand(batch_size -1)# init action
ended=np.array([<false>]<times>batch_size)# Indices match permuation of the model, not env
# Do a sequence rollout and calculate the loss
env_action=[<none>]<times>batch_size<line_sep>sequence_scores=try_cuda(torch.zeros(batch_size))<for_stmt>t range(self.episode_len)<block_start>f_t_list=self._feature_variables(obs)# Image features from obs
all_u_t,is_valid,_=self._action_variable(obs)<assert_stmt>len(f_t_list)<eq>1 'for now, only work with MeanPooled feature'<line_sep>h_t,c_t,alpha,logit,alpha_v=self.decoder(u_t_prev all_u_t f_t_list[0] h_t c_t ctx seq_mask)<line_sep># Mask outputs of invalid actions
logit[is_valid<eq>0]=-float('inf')<line_sep># Supervised training
target=self._teacher_action(obs ended)<line_sep>self.loss<augadd>self.criterion(logit target)<line_sep># Determine next model inputs
<if_stmt>feedback<eq>'teacher'# turn -1 (ignore) to 0 (stop) so that the action is executable
<block_start>a_t=torch.clamp(target min=0)<block_end><elif_stmt>feedback<eq>'argmax'<block_start>_,a_t=logit.max(1)# student forcing - argmax
a_t=a_t.detach()<block_end><elif_stmt>feedback<eq>'sample'<block_start>probs=F.softmax(logit dim=1)# sampling an action from model
# Further mask probs where agent can't move forward
# Note input to `D.Categorical` does not have to sum up to 1
# http://pytorch.org/docs/stable/torch.html#torch.multinomial
probs[is_valid<eq>0]=0.<line_sep>m=D.Categorical(probs)<line_sep>a_t=m.sample()<block_end><else_stmt><block_start>sys.exit('Invalid feedback option')<block_end># update the previous action
u_t_prev=all_u_t[np.arange(batch_size) a_t :].detach()<line_sep>action_scores=-F.cross_entropy(logit a_t ignore_index=-1 reduce=<false>).data<line_sep>sequence_scores<augadd>action_scores<line_sep># dfried: I changed this so that the ended list is updated afterward; this causes <end> to be added as the last action, along with its score, and the final world state will be duplicated (to more closely match beam search)
# Make environment action
<for_stmt>i range(batch_size)<block_start>action_idx=a_t[i].data[0]<line_sep>env_action[i]=action_idx<block_end>world_states=self.env.step(world_states env_action obs)<line_sep>obs=self.env.observe(world_states)<line_sep># print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, world_states[0], a_t.data[0], sequence_scores[0]))
# Save trajectory output
<for_stmt>i,ob enumerate(obs)<block_start><if_stmt><not>ended[i]<block_start>traj[i]['trajectory'].append(path_element_from_observation(ob))<line_sep>traj[i]['score']=sequence_scores[i]<line_sep>traj[i]['scores'].append(action_scores[i])<line_sep>traj[i]['actions'].append(a_t.data[i])<line_sep>traj[i]['observations'].append(ob)<block_end><block_end># Update ended list
<for_stmt>i range(batch_size)<block_start>action_idx=a_t[i].data[0]<if_stmt>action_idx<eq>0<block_start>ended[i]=<true><block_end><block_end># Early exit if all ended
<if_stmt>ended.all()<block_start><break><block_end><block_end>#self.losses.append(self.loss.data[0] / self.episode_len)
# shouldn't divide by the episode length because of masking
self.losses.append(self.loss.data[0])<line_sep><return>traj<block_end><def_stmt>beam_search self beam_size load_next_minibatch=<true> mask_undo=<false><block_start><assert_stmt>self.env.beam_size<ge>beam_size<line_sep>world_states=self.env.reset(sort=<true> beamed=<true> load_next_minibatch=load_next_minibatch)<line_sep>obs=self.env.observe(world_states beamed=<true>)<line_sep>batch_size=len(world_states)<line_sep># get mask and lengths
seq,seq_mask,seq_lengths=self._proc_batch(obs beamed=<true>)<line_sep># Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t=self.encoder(seq seq_lengths)<line_sep>completed=[]<for_stmt>_ range(batch_size)<block_start>completed.append([])<block_end>beams=[[InferenceState(prev_inference_state=<none> world_state=ws[0] observation=o[0] flat_index=i last_action=-1 last_action_embedding=self.decoder.u_begin.view(-1) action_count=0 score=0.0 h_t=<none> c_t=<none> last_alpha=<none>)]<for>i,(ws o) enumerate(zip(world_states obs))]<line_sep># Do a sequence rollout and calculate the loss
<for_stmt>t range(self.episode_len)<block_start>flat_indices=[]<line_sep>beam_indices=[]<line_sep>u_t_list=[]<for_stmt>beam_index,beam enumerate(beams)<block_start><for_stmt>inf_state beam<block_start>beam_indices.append(beam_index)<line_sep>flat_indices.append(inf_state.flat_index)<line_sep>u_t_list.append(inf_state.last_action_embedding)<block_end><block_end>u_t_prev=torch.stack(u_t_list dim=0)<assert_stmt>len(u_t_prev.shape)<eq>2<line_sep>flat_obs=flatten(obs)<line_sep>f_t_list=self._feature_variables(flat_obs)# Image features from obs
all_u_t,is_valid,is_valid_numpy=self._action_variable(flat_obs)<assert_stmt>len(f_t_list)<eq>1 'for now, only work with MeanPooled feature'<line_sep>h_t,c_t,alpha,logit,alpha_v=self.decoder(u_t_prev all_u_t f_t_list[0] h_t[flat_indices] c_t[flat_indices] ctx[beam_indices] seq_mask[beam_indices])<line_sep># Mask outputs of invalid actions
logit[is_valid<eq>0]=-float('inf')<line_sep># # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
<if_stmt>mask_undo<block_start>masked_logit=logit.clone()<block_end><else_stmt><block_start>masked_logit=logit<block_end>log_probs=F.log_softmax(logit dim=1).data<line_sep># force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#action_scores, action_indices = log_probs.topk(min(beam_size, logit.size()[1]), dim=1)
_,action_indices=masked_logit.data.topk(min(beam_size logit.size()[1]) dim=1)<line_sep>action_scores=log_probs.gather(1 action_indices)<assert_stmt>action_scores.size()<eq>action_indices.size()<line_sep>start_index=0<line_sep>new_beams=[]<assert_stmt>len(beams)<eq>len(world_states)<line_sep>all_successors=[]<for_stmt>beam_index,(beam beam_world_states beam_obs) enumerate(zip(beams world_states obs))<block_start>successors=[]<line_sep>end_index=start_index+len(beam)<assert_stmt>len(beam_world_states)<eq>len(beam)<assert_stmt>len(beam_obs)<eq>len(beam)<if_stmt>beam<block_start><for_stmt>inf_index,(inf_state world_state ob action_score_row action_index_row) enumerate(zip(beam beam_world_states beam_obs action_scores[start_index:end_index] action_indices[start_index:end_index]))<block_start>flat_index=start_index+inf_index<for_stmt>action_score,action_index zip(action_score_row action_index_row)<block_start><if_stmt>is_valid_numpy[flat_index action_index]<eq>0<block_start><continue><block_end>successors.append(InferenceState(prev_inference_state=inf_state world_state=world_state # will be updated later after successors are pruned
observation=ob # will be updated later after successors are pruned
flat_index=flat_index last_action=action_index last_action_embedding=all_u_t[flat_index action_index].detach() action_count=inf_state.action_count+1 score=float(inf_state.score+action_score) h_t=<none> c_t=<none> last_alpha=alpha[flat_index].data))<block_end><block_end><block_end>start_index=end_index<line_sep>successors=sorted(successors key=<lambda>t:t.score reverse=<true>)[:beam_size]<line_sep>all_successors.append(successors)<block_end>successor_world_states=[[inf_state.world_state<for>inf_state successors]<for>successors all_successors]<line_sep>successor_env_actions=[[inf_state.last_action<for>inf_state successors]<for>successors all_successors]<line_sep>successor_last_obs=[[inf_state.observation<for>inf_state successors]<for>successors all_successors]<line_sep>successor_world_states=self.env.step(successor_world_states successor_env_actions successor_last_obs beamed=<true>)<line_sep>successor_obs=self.env.observe(successor_world_states beamed=<true>)<line_sep>all_successors=structured_map(<lambda>inf_state world_state obs:inf_state._replace(world_state=world_state observation=obs) all_successors successor_world_states successor_obs nested=<true>)<line_sep># if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
<for_stmt>beam_index,successors enumerate(all_successors)<block_start>new_beam=[]<for_stmt>successor successors<block_start><if_stmt>successor.last_action<eq>0<or>t<eq>self.episode_len-1<block_start>completed[beam_index].append(successor)<block_end><else_stmt><block_start>new_beam.append(successor)<block_end><block_end><if_stmt>len(completed[beam_index])<ge>beam_size<block_start>new_beam=[]<block_end>new_beams.append(new_beam)<block_end>beams=new_beams<line_sep>world_states=[[inf_state.world_state<for>inf_state beam]<for>beam beams]<line_sep>obs=[[inf_state.observation<for>inf_state beam]<for>beam beams]<line_sep># Early exit if all ended
<if_stmt><not>any(beam<for>beam beams)<block_start><break><block_end><block_end>trajs=[]<for_stmt>this_completed completed<block_start><assert_stmt>this_completed<line_sep>this_trajs=[]<for_stmt>inf_state sorted(this_completed key=<lambda>t:t.score reverse=<true>)[:beam_size]<block_start>path_states,path_observations,path_actions,path_scores,path_attentions=backchain_inference_states(inf_state)<line_sep># this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory=[path_element_from_observation(ob)<for>ob path_observations]<line_sep>this_trajs.append({'instr_id':path_observations[0]['instr_id'] 'instr_encoding':path_observations[0]['instr_encoding'] 'trajectory':trajectory 'observations':path_observations 'actions':path_actions 'score':inf_state.score 'scores':path_scores 'attentions':path_attentions})<block_end>trajs.append(this_trajs)<block_end>traversed_lists=<none># todo
<return>trajs completed traversed_lists<block_end><def_stmt>state_factored_search self completion_size successor_size load_next_minibatch=<true> mask_undo=<false> first_n_ws_key=4<block_start><assert_stmt>self.env.beam_size<ge>successor_size<line_sep>world_states=self.env.reset(sort=<true> beamed=<true> load_next_minibatch=load_next_minibatch)<line_sep>initial_obs=self.env.observe(world_states beamed=<true>)<line_sep>batch_size=len(world_states)<line_sep># get mask and lengths
seq,seq_mask,seq_lengths=self._proc_batch(initial_obs beamed=<true>)<line_sep># Forward through encoder, giving initial hidden state and memory cell for decoder
ctx,h_t,c_t=self.encoder(seq seq_lengths)<line_sep>completed=[]<line_sep>completed_holding=[]<for_stmt>_ range(batch_size)<block_start>completed.append({})<line_sep>completed_holding.append({})<block_end>state_cache=[{ws[0][0:first_n_ws_key]:(InferenceState(prev_inference_state=<none> world_state=ws[0] observation=o[0] flat_index=<none> last_action=-1 last_action_embedding=self.decoder.u_begin.view(-1) action_count=0 score=0.0 h_t=h_t[i] c_t=c_t[i] last_alpha=<none>) <true>)}<for>i,(ws o) enumerate(zip(world_states initial_obs))]<line_sep>beams=[[inf_state<for>world_state,(inf_state expanded) sorted(instance_cache.items())]<for>instance_cache state_cache]<line_sep># sorting is a noop here since each instance_cache should only contain one
# traversed_lists = None
# list of inference states containing states in order of the states being expanded
last_expanded_list=[]<line_sep>traversed_lists=[]<for_stmt>beam beams<block_start><assert_stmt>len(beam)<eq>1<line_sep>first_state=beam[0]<line_sep>last_expanded_list.append(first_state)<line_sep>traversed_lists.append([first_state])<block_end><def_stmt>update_traversed_lists new_visited_inf_states<block_start><assert_stmt>len(new_visited_inf_states)<eq>len(last_expanded_list)<assert_stmt>len(new_visited_inf_states)<eq>len(traversed_lists)<for_stmt>instance_index,instance_states enumerate(new_visited_inf_states)<block_start>last_expanded=last_expanded_list[instance_index]<line_sep># todo: if this passes, shouldn't need traversed_lists
<assert_stmt>last_expanded.world_state.viewpointId<eq>traversed_lists[instance_index][-1].world_state.viewpointId<for_stmt>inf_state instance_states<block_start>path_from_last_to_next=least_common_viewpoint_path(last_expanded inf_state)<line_sep># path_from_last should include last_expanded's world state as the first element, so check and drop that
<assert_stmt>path_from_last_to_next[0].world_state.viewpointId<eq>last_expanded.world_state.viewpointId<assert_stmt>path_from_last_to_next[-1].world_state.viewpointId<eq>inf_state.world_state.viewpointId<line_sep>traversed_lists[instance_index].extend(path_from_last_to_next[1:])<line_sep>last_expanded=inf_state<block_end>last_expanded_list[instance_index]=last_expanded<block_end><block_end># Do a sequence rollout and calculate the loss
<while_stmt>any(len(comp)<l>completion_size<for>comp completed)<block_start>beam_indices=[]<line_sep>u_t_list=[]<line_sep>h_t_list=[]<line_sep>c_t_list=[]<line_sep>flat_obs=[]<for_stmt>beam_index,beam enumerate(beams)<block_start><for_stmt>inf_state beam<block_start>beam_indices.append(beam_index)<line_sep>u_t_list.append(inf_state.last_action_embedding)<line_sep>h_t_list.append(inf_state.h_t.unsqueeze(0))<line_sep>c_t_list.append(inf_state.c_t.unsqueeze(0))<line_sep>flat_obs.append(inf_state.observation)<block_end><block_end>u_t_prev=torch.stack(u_t_list dim=0)<assert_stmt>len(u_t_prev.shape)<eq>2<line_sep>f_t_list=self._feature_variables(flat_obs)# Image features from obs
all_u_t,is_valid,is_valid_numpy=self._action_variable(flat_obs)<line_sep>h_t=torch.cat(h_t_list dim=0)<line_sep>c_t=torch.cat(c_t_list dim=0)<assert_stmt>len(f_t_list)<eq>1 'for now, only work with MeanPooled feature'<line_sep>h_t,c_t,alpha,logit,alpha_v=self.decoder(u_t_prev all_u_t f_t_list[0] h_t c_t ctx[beam_indices] seq_mask[beam_indices])<line_sep># Mask outputs of invalid actions
logit[is_valid<eq>0]=-float('inf')<line_sep># # Mask outputs where agent can't move forward
# no_forward_mask = [len(ob['navigableLocations']) <= 1 for ob in flat_obs]
<if_stmt>mask_undo<block_start>masked_logit=logit.clone()<block_end><else_stmt><block_start>masked_logit=logit<block_end>log_probs=F.log_softmax(logit dim=1).data<line_sep># force ending if we've reached the max time steps
# if t == self.episode_len - 1:
# action_scores = log_probs[:,self.end_index].unsqueeze(-1)
# action_indices = torch.from_numpy(np.full((log_probs.size()[0], 1), self.end_index))
# else:
#_, action_indices = masked_logit.data.topk(min(successor_size, logit.size()[1]), dim=1)
_,action_indices=masked_logit.data.topk(logit.size()[1] dim=1)# todo: fix this
action_scores=log_probs.gather(1 action_indices)<assert_stmt>action_scores.size()<eq>action_indices.size()<line_sep>start_index=0<assert_stmt>len(beams)<eq>len(world_states)<line_sep>all_successors=[]<for_stmt>beam_index,(beam beam_world_states) enumerate(zip(beams world_states))<block_start>successors=[]<line_sep>end_index=start_index+len(beam)<assert_stmt>len(beam_world_states)<eq>len(beam)<if_stmt>beam<block_start><for_stmt>inf_index,(inf_state world_state action_score_row) enumerate(zip(beam beam_world_states log_probs[start_index:end_index]))<block_start>flat_index=start_index+inf_index<for_stmt>action_index,action_score enumerate(action_score_row)<block_start><if_stmt>is_valid_numpy[flat_index action_index]<eq>0<block_start><continue><block_end>successors.append(InferenceState(prev_inference_state=inf_state world_state=world_state # will be updated later after successors are pruned
observation=flat_obs[flat_index] # will be updated later after successors are pruned
flat_index=<none> last_action=action_index last_action_embedding=all_u_t[flat_index action_index].detach() action_count=inf_state.action_count+1 score=inf_state.score+action_score h_t=h_t[flat_index] c_t=c_t[flat_index] last_alpha=alpha[flat_index].data))<block_end><block_end><block_end>start_index=end_index<line_sep>successors=sorted(successors key=<lambda>t:t.score reverse=<true>)<line_sep>all_successors.append(successors)<block_end>successor_world_states=[[inf_state.world_state<for>inf_state successors]<for>successors all_successors]<line_sep>successor_env_actions=[[inf_state.last_action<for>inf_state successors]<for>successors all_successors]<line_sep>successor_last_obs=[[inf_state.observation<for>inf_state successors]<for>successors all_successors]<line_sep>successor_world_states=self.env.step(successor_world_states successor_env_actions successor_last_obs beamed=<true>)<line_sep>all_successors=structured_map(<lambda>inf_state world_state:inf_state._replace(world_state=world_state) all_successors successor_world_states nested=<true>)<line_sep># if all_successors[0]:
# print("t: %s\tstate: %s\taction: %s\tscore: %s" % (t, all_successors[0][0].world_state, all_successors[0][0].last_action, all_successors[0][0].score))
<assert_stmt>len(all_successors)<eq>len(state_cache)<line_sep>new_beams=[]<for_stmt>beam_index,(successors instance_cache) enumerate(zip(all_successors state_cache))# early stop if we've already built a sizable completion list
<block_start>instance_completed=completed[beam_index]<line_sep>instance_completed_holding=completed_holding[beam_index]<if_stmt>len(instance_completed)<ge>completion_size<block_start>new_beams.append([])<line_sep><continue><block_end><for_stmt>successor successors<block_start>ws_keys=successor.world_state[0:first_n_ws_key]<if_stmt>successor.last_action<eq>0<or>successor.action_count<eq>self.episode_len<block_start><if_stmt>ws_keys<not><in>instance_completed_holding<or>instance_completed_holding[ws_keys][0].score<l>successor.score<block_start>instance_completed_holding[ws_keys]=(successor <false>)<block_end><block_end><else_stmt><block_start><if_stmt>ws_keys<not><in>instance_cache<or>instance_cache[ws_keys][0].score<l>successor.score<block_start>instance_cache[ws_keys]=(successor <false>)<block_end><block_end><block_end># third value: did this come from completed_holding?
uncompleted_to_consider=((ws_keys inf_state <false>)<for>(ws_keys (inf_state expanded)) instance_cache.items()<if><not>expanded)<line_sep>completed_to_consider=((ws_keys inf_state <true>)<for>(ws_keys (inf_state expanded)) instance_completed_holding.items()<if><not>expanded)<import_stmt>itertools<import_stmt>heapq<line_sep>to_consider=itertools.chain(uncompleted_to_consider completed_to_consider)<line_sep>ws_keys_and_inf_states=heapq.nlargest(successor_size to_consider key=<lambda>pair:pair[1].score)<line_sep>new_beam=[]<for_stmt>ws_keys,inf_state,is_completed ws_keys_and_inf_states<block_start><if_stmt>is_completed<block_start><assert_stmt>instance_completed_holding[ws_keys]<eq>(inf_state <false>)<line_sep>instance_completed_holding[ws_keys]=(inf_state <true>)<if_stmt>ws_keys<not><in>instance_completed<or>instance_completed[ws_keys].score<l>inf_state.score<block_start>instance_completed[ws_keys]=inf_state<block_end><block_end><else_stmt><block_start>instance_cache[ws_keys]=(inf_state <true>)<line_sep>new_beam.append(inf_state)<block_end><block_end><if_stmt>len(instance_completed)<ge>completion_size<block_start>new_beams.append([])<block_end><else_stmt><block_start>new_beams.append(new_beam)<block_end><block_end>beams=new_beams<line_sep># Early exit if all ended
<if_stmt><not>any(beam<for>beam beams)<block_start><break><block_end>world_states=[[inf_state.world_state<for>inf_state beam]<for>beam beams]<line_sep>successor_obs=self.env.observe(world_states beamed=<true>)<line_sep>beams=structured_map(<lambda>inf_state obs:inf_state._replace(observation=obs) beams successor_obs nested=<true>)<line_sep>update_traversed_lists(beams)<block_end>completed_list=[]<for_stmt>this_completed completed<block_start>completed_list.append(sorted(this_completed.values() key=<lambda>t:t.score reverse=<true>)[:completion_size])<block_end>completed_ws=[[inf_state.world_state<for>inf_state comp_l]<for>comp_l completed_list]<line_sep>completed_obs=self.env.observe(completed_ws beamed=<true>)<line_sep>completed_list=structured_map(<lambda>inf_state obs:inf_state._replace(observation=obs) completed_list completed_obs nested=<true>)<line_sep># TODO: consider moving observations and this update earlier so that we don't have to traverse as far back
update_traversed_lists(completed_list)<line_sep># TODO: sanity check the traversed lists here
trajs=[]<for_stmt>this_completed completed_list<block_start><assert_stmt>this_completed<line_sep>this_trajs=[]<for_stmt>inf_state this_completed<block_start>path_states,path_observations,path_actions,path_scores,path_attentions=backchain_inference_states(inf_state)<line_sep># this will have messed-up headings for (at least some) starting locations because of
# discretization, so read from the observations instead
## path = [(obs.viewpointId, state.heading, state.elevation)
## for state in path_states]
trajectory=[path_element_from_observation(ob)<for>ob path_observations]<line_sep>this_trajs.append({'instr_id':path_observations[0]['instr_id'] 'instr_encoding':path_observations[0]['instr_encoding'] 'trajectory':trajectory 'observations':path_observations 'actions':path_actions 'score':inf_state.score 'scores':path_scores 'attentions':path_attentions})<block_end>trajs.append(this_trajs)<block_end># completed_list: list of lists of final inference states corresponding to the candidates, one list per instance
# traversed_lists: list of "physical states" that the robot has explored, one per instance
<return>trajs completed_list traversed_lists<block_end><def_stmt>set_beam_size self beam_size<block_start><if_stmt>self.env.beam_size<l>beam_size<block_start>self.env.set_beam_size(beam_size)<block_end>self.beam_size=beam_size<block_end><def_stmt>test self use_dropout=<false> feedback='argmax' allow_cheat=<false> beam_size=1<block_start>''' Evaluate once on each instruction in the current environment '''<if_stmt><not>allow_cheat# permitted for purpose of calculating validation loss only
<block_start><assert_stmt>feedback<in>['argmax' 'sample']# no cheating by using teacher at test time!
<block_end>self.feedback=feedback<if_stmt>use_dropout<block_start>self.encoder.train()<line_sep>self.decoder.train()<block_end><else_stmt><block_start>self.encoder.eval()<line_sep>self.decoder.eval()<block_end>self.set_beam_size(beam_size)<line_sep><return>super(Seq2SeqAgent self).test()<block_end><def_stmt>train self encoder_optimizer decoder_optimizer n_iters feedback='teacher'<block_start>''' Train for a given number of iterations '''<assert_stmt>all(f<in>self.feedback_options<for>f feedback.split("+"))<line_sep>self.feedback=feedback<line_sep>self.encoder.train()<line_sep>self.decoder.train()<line_sep>self.losses=[]<line_sep>it=range(1 n_iters+1)<try_stmt><block_start><import_stmt>tqdm<line_sep>it=tqdm.tqdm(it)<block_end><except_stmt><block_start><pass><block_end><for_stmt>_ it<block_start>encoder_optimizer.zero_grad()<line_sep>decoder_optimizer.zero_grad()<line_sep>self._rollout_with_loss()<line_sep>self.loss.backward()<line_sep>encoder_optimizer.step()<line_sep>decoder_optimizer.step()<block_end><block_end><def_stmt>_encoder_and_decoder_paths self base_path<block_start><return>base_path+"_enc" base_path+"_dec"<block_end><def_stmt>save self path<block_start>''' Snapshot models '''<line_sep>encoder_path,decoder_path=self._encoder_and_decoder_paths(path)<line_sep>torch.save(self.encoder.state_dict() encoder_path)<line_sep>torch.save(self.decoder.state_dict() decoder_path)<block_end><def_stmt>load self path **kwargs<block_start>''' Loads parameters (but not training state) '''<line_sep>encoder_path,decoder_path=self._encoder_and_decoder_paths(path)<line_sep>self.encoder.load_state_dict(torch.load(encoder_path **kwargs))<line_sep>self.decoder.load_state_dict(torch.load(decoder_path **kwargs))<block_end><block_end> |
<import_from_stmt>dataclasses dataclass<import_stmt>discord<line_sep>@dataclass(init=<false>)<class_stmt>TwitchProfile<block_start><def_stmt>__init__ self **kwargs<block_start>self.id=kwargs.get("id")<line_sep>self.login=kwargs.get("login")<line_sep>self.display_name=kwargs.get("display_name")<line_sep>self.acc_type=kwargs.get("acc_type")<line_sep>self.broadcaster_type=kwargs.get("broadcaster_type")<line_sep>self.description=kwargs.get("description")<line_sep>self.profile_image_url=kwargs.get("profile_image_url")<line_sep>self.offline_image_url=kwargs.get("offline_image_url")<line_sep>self.view_count=kwargs.get("view_count")<block_end>@classmethod<def_stmt>from_json cls data:dict<block_start>data=data["data"][0]<line_sep><return>cls(**data)<block_end><def_stmt>make_user_embed self<arrow>discord.Embed# makes the embed for a twitch profile
<block_start>em=discord.Embed(colour=int("6441A4" 16))<line_sep>em.description=self.description<line_sep>url="https://twitch.tv/{}".format(self.login)<line_sep>em.set_author(name=self.display_name url=url icon_url=self.profile_image_url)<line_sep>em.set_image(url=self.offline_image_url)<line_sep>em.set_thumbnail(url=self.profile_image_url)<line_sep>footer_text="{} Viewer count".format(self.view_count)<line_sep>em.set_footer(text=footer_text icon_url=self.profile_image_url)<line_sep><return>em<block_end><block_end>@dataclass(init=<false>)<class_stmt>TwitchFollower<block_start><def_stmt>__init__ self **kwargs<block_start>self.from_id=kwargs.get("from_id")<line_sep>self.to_id=kwargs.get("to_id")<line_sep>self.followed_at=kwargs.get("followed_at")<block_end>@classmethod<def_stmt>from_json cls data:dict<block_start><return>cls(**data)<block_end><block_end> |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
<import_from_stmt>typing List<import_from_stmt>botbuilder.schema Activity<import_from_stmt>botbuilder.dialogs.choices Choice ListStyle<class_stmt>PromptOptions<block_start>"""
Contains settings to pass to a :class:`Prompt` object when the prompt is started.
"""<def_stmt>__init__ self prompt:Activity=<none> retry_prompt:Activity=<none> choices:List[Choice]=<none> style:ListStyle=<none> validations:object=<none> number_of_attempts:int=0 <block_start>"""
Sets the initial prompt to send to the user as an :class:`botbuilder.schema.Activity`.
:param prompt: The initial prompt to send to the user
:type prompt: :class:`botbuilder.schema.Activity`
:param retry_prompt: The retry prompt to send to the user
:type retry_prompt: :class:`botbuilder.schema.Activity`
:param choices: The choices to send to the user
:type choices: :class:`List`
:param style: The style of the list of choices to send to the user
:type style: :class:`ListStyle`
:param validations: The prompt validations
:type validations: :class:`Object`
:param number_of_attempts: The number of attempts allowed
:type number_of_attempts: :class:`int`
"""<line_sep>self.prompt=prompt<line_sep>self.retry_prompt=retry_prompt<line_sep>self.choices=choices<line_sep>self.style=style<line_sep>self.validations=validations<line_sep>self.number_of_attempts=number_of_attempts<block_end><block_end> |
<import_stmt>pandas<as>pd<import_from_stmt>pandas_datareader data<line_sep>start_date='2014-01-01'<line_sep>end_date='2018-01-01'<line_sep>SRC_DATA_FILENAME='goog_data.pkl'<try_stmt><block_start>goog_data2=pd.read_pickle(SRC_DATA_FILENAME)<block_end><except_stmt>FileNotFoundError<block_start>goog_data2=data.DataReader('GOOG' 'yahoo' start_date end_date)<line_sep>goog_data2.to_pickle(SRC_DATA_FILENAME)<block_end>goog_data=goog_data2.tail(620)<line_sep>close=goog_data['Close']<line_sep>'''
Standard Deviation is a statistical calculation
used to measure the variability. In trading this value is known
as volatility. A low standard deviation indicates that the data
points tend to be very close to the mean, whereas high standard
deviation indicates that the data points are spread out over a large
range of values.
n = number of periods
Calculate the moving average.
The formula is:
d = ((P1-MA)^2 + (P2-MA)^2 + ... (Pn-MA)^2)/n
Pn is the price you pay for the nth interval
n is the number of periods you select
Take the square root of d. This gives you the standard deviation.
stddev = sqrt(d)
'''<import_stmt>statistics<as>stats<import_stmt>math<as>math<line_sep>time_period=20# look back period
history=[]# history of prices
sma_values=[]# to track moving average values for visualization purposes
stddev_values=[]# history of computed stdev values
<for_stmt>close_price close<block_start>history.append(close_price)<if_stmt>len(history)<g>time_period# we track at most 'time_period' number of prices
<block_start><del_stmt>(history[0])<block_end>sma=stats.mean(history)<line_sep>sma_values.append(sma)<line_sep>variance=0# variance is square of standard deviation
<for_stmt>hist_price history<block_start>variance=variance+((hist_price-sma)<power>2)<block_end>stdev=math.sqrt(variance/len(history))<line_sep>stddev_values.append(stdev)<block_end>goog_data=goog_data.assign(ClosePrice=pd.Series(close index=goog_data.index))<line_sep>goog_data=goog_data.assign(StandardDeviationOver20Days=pd.Series(stddev_values index=goog_data.index))<line_sep>close_price=goog_data['ClosePrice']<line_sep>stddev=goog_data['StandardDeviationOver20Days']<import_stmt>matplotlib.pyplot<as>plt<line_sep>fig=plt.figure()<line_sep>ax1=fig.add_subplot(211 ylabel='Google price in $')<line_sep>close_price.plot(ax=ax1 color='g' lw=2. legend=<true>)<line_sep>ax2=fig.add_subplot(212 ylabel='Stddev in $')<line_sep>stddev.plot(ax=ax2 color='b' lw=2. legend=<true>)<line_sep>ax2.axhline(y=stats.mean(stddev_values) color='k')<line_sep>plt.show()<line_sep> |
<import_stmt>random<import_stmt>pecan<import_from_stmt>pecan expose response request<line_sep>_body=pecan.x_test_body<line_sep>_headers=pecan.x_test_headers<class_stmt>TestController<block_start><def_stmt>__init__ self account_id<block_start>self.account_id=account_id<block_end>@expose(content_type='text/plain')<def_stmt>test self<block_start>user_agent=request.headers['User-Agent']# NOQA
limit=request.params.get('limit' '10')# NOQA
response.headers.update(_headers)<line_sep><return>_body<block_end><block_end><class_stmt>HelloController<block_start>@expose()<def_stmt>_lookup self account_id *remainder<block_start><return>TestController(account_id) remainder<block_end><block_end><class_stmt>RootController<block_start>@expose(content_type='text/plain')<def_stmt>index self<block_start>response.headers.update(_headers)<line_sep><return>_body<block_end>hello=HelloController()<block_end> |
########################################################################################################
## pyFAST - Fingerprint and Similarity Thresholding in python
##
## <NAME>
## 11/14/2016
##
## (see Yoon et. al. 2015, Sci. Adv. for algorithm details)
##
########################################################################################################
##
## Feature Extraction (Fingerprinting)
##
########################################################################################################
<import_stmt>numpy<as>np<import_stmt>pywt<as>wt<import_from_stmt>sklearn.preprocessing normalize<import_from_stmt>scipy.signal spectrogram<import_from_stmt>scipy.misc imresize<def_stmt>init_feature_extractor params ntimes<block_start>feats=FeatureExtractor(sampling_rate=params['fingerprint']['sampling_rate'] window_length=params['fingerprint']['spec_length'] window_lag=params['fingerprint']['spec_lag'] fingerprint_length=params['fingerprint']['fp_length'] fingerprint_lag=params['fingerprint']['fp_lag'] min_freq=params['fingerprint']["min_freq"] max_freq=params['fingerprint']["max_freq"] nfreq=params['fingerprint']['nfreq'] ntimes=ntimes)<line_sep><return>feats<block_end><class_stmt>FeatureExtractor(object)<block_start><def_stmt>__init__ self sampling_rate window_length window_lag fingerprint_length fingerprint_lag min_freq=0 max_freq=<none> nfreq=32 ntimes=64<block_start>self.sampling_rate=sampling_rate#/ sampling rate
self.window_len=window_length#/ length of window (seconds) used in spectrogram
self.window_lag=window_lag#/ window lag (seconds) used in spectrogram
self.fp_len=fingerprint_length#/ width of fingerprint (samples)
self.fp_lag=fingerprint_lag#/ lag between fingerprints (samples)
self.max_freq=self._initialize_frequencies(max_freq)#/ minimum and maximum frequencies for bandpass filter
self.min_freq=min_freq<line_sep>self.new_d1=int(nfreq)#/ number of frequency / time bins in fingerprints (must be power of 2) - TODO: error checking
self.new_d2=int(ntimes)<line_sep>self.d1=<none>#/ dimension of spectral images prior to resizing
self.d2=<none><line_sep>self.haar_means=<none><line_sep>self.haar_stddevs=<none><line_sep>self.haar_medians=<none><line_sep>self.haar_absdevs=<none><block_end><def_stmt>_initialize_frequencies self max_freq#/ initializes data structure
<block_start><if_stmt>max_freq<is><none><block_start>max_freq=self.sampling_rate/2.0<block_end><return>max_freq<block_end><def_stmt>update self field value<block_start><if_stmt>hasattr(self field)<block_start>setattr(self field value)<block_end><else_stmt><block_start>print('WARNING: object has no attribute: '+field)<line_sep>print('object has the following attributes:'+self.__dict__.keys())<block_end><return><block_end><def_stmt>get_params self<block_start>mdict=dict()<for_stmt>k self.__dict__.keys()<block_start><if_stmt>k<not><in>['haar_means' 'haar_stddevs' 'haar_absdevs' 'haar_medians']<block_start>mdict[k]=self.__dict__[k]<block_end><block_end><return>mdict<block_end>#/ returns indicies for overlapping windows
<def_stmt>get_window_params self N L dL<block_start>idx0=np.asarray(range(0 N+1 dL))<line_sep>idx2=np.asarray(range(L N+1 dL))<line_sep>nWindows=len(idx2)<line_sep>idx1=idx0[0:nWindows]<line_sep><return>nWindows idx1 idx2<block_end>########################################################################
## FOR COMPUTING FINGERPRINTS ##
########################################################################
#/ computes spectrogram from continous timeseries data
<def_stmt>data_to_spectrogram self x_data window_type='hanning'<block_start>f,t,Sxx=spectrogram(x_data fs=self.sampling_rate window=window_type nperseg=int(self.sampling_rate<times>self.window_len) noverlap=int(self.sampling_rate<times>(self.window_len-self.window_lag)))<line_sep># Truncate spectrogram, keep only passband frequencies
<if_stmt>self.min_freq<g>0<block_start>fidx_keep=(f<ge>self.min_freq)<line_sep>Sxx=Sxx[fidx_keep :]<line_sep>f=f[fidx_keep]<block_end><if_stmt>self.max_freq<l>f[-1]<block_start>fidx_keep=(f<le>self.max_freq)<line_sep>Sxx=Sxx[fidx_keep :]<line_sep>f=f[fidx_keep]<block_end>self.frequencies=f<line_sep>self.times=t<line_sep><return>f t Sxx<block_end>#/ breaks spectrogram into overlapping spectral images
<def_stmt>spectrogram_to_spectral_images self Sxx<block_start>nFreq,nTimes=np.shape(Sxx)<line_sep>nWindows,idx1,idx2=self.get_window_params(nTimes self.fp_len self.fp_lag)<line_sep>spectral_images=np.zeros([nWindows nFreq self.fp_len])<for_stmt>i range(nWindows)<block_start>spectral_images[i : :]=Sxx[: idx1[i]:idx2[i]]<block_end>self.nwindows=nWindows<line_sep>nWindows,self.d1,self.d2=np.shape(spectral_images)<line_sep>#self.new_d1, self.new_d2 = np.exp2(np.floor(np.log2([self.d1, self.d2])))
<return>spectral_images nWindows idx1 idx2<block_end>#/ resizes each spectral image to specified dimensions
<def_stmt>_resize_spectral_images self spectral_images new_d1 new_d2<block_start>new_spectral_images=np.zeros([self.nwindows new_d1 new_d2])<for_stmt>i range(self.nwindows)<block_start>new_spectral_images[i : :]=imresize(spectral_images[i : :] (new_d1 new_d2) interp='bilinear' mode='F')<block_end><return>new_spectral_images<block_end>#/ reshapes output from PyWavelets 2d wavelet transform into image
<def_stmt>_unwrap_wavelet_coeffs self coeffs<block_start>L=len(coeffs)<line_sep>cA=coeffs[0]<for_stmt>i range(1 L)<block_start>(cH cV cD)=coeffs[i]<line_sep>cA=np.concatenate((np.concatenate((cA cV) axis=1) np.concatenate((cH cD) axis=1)) axis=0)<block_end><return>cA<block_end>#/ computes wavelet transform for each spectral image
<def_stmt>spectral_images_to_wavelet self spectral_images wavelet=wt.Wavelet('db1')<block_start><if_stmt>(int(self.new_d1)<ne>self.d1)<or>(int(self.new_d2)<ne>self.d2)<block_start>spectral_images=self._resize_spectral_images(spectral_images self.new_d1 self.new_d2)<block_end>haar_images=np.zeros([self.nwindows self.new_d1 self.new_d2])<for_stmt>i range(self.nwindows)<block_start>coeffs=wt.wavedec2(spectral_images[i : :] wavelet)<line_sep>haar_images[i : :]=self._unwrap_wavelet_coeffs(coeffs)<block_end><return>haar_images<block_end>#/ computes (normalized) haar_images from continous timeseries data
<def_stmt>data_to_haar_images self x_data<block_start>f,t,Sxx=self.data_to_spectrogram(x_data)<line_sep>spectral_images,nWindows,idx1,idx2=self.spectrogram_to_spectral_images(Sxx)<line_sep>haar_images=self.spectral_images_to_wavelet(spectral_images)<line_sep>haar_images=normalize(self._images_to_vectors(haar_images) axis=1)<line_sep><return>haar_images nWindows idx1 idx2 Sxx t<block_end>#/ converts set of images to array of vectors
<def_stmt>_images_to_vectors self images<block_start>N,d1,d2=np.shape(images)<line_sep>vectors=np.zeros([N d1<times>d2])<for_stmt>i range(N)<block_start>vectors[i :]=np.reshape(images[i : :] (1 d1<times>d2))<block_end><return>vectors<block_end>#/ converts set of vectors into set of images (of dimension d1 x d2)
<def_stmt>_vectors_to_images self vectors d1 d2<block_start>N,D=np.shape(vectors)<if_stmt>D<ne>d1<times>d2<block_start>print('warning: invalid dimensions')<line_sep><return>vectors<block_end><else_stmt><block_start>images=np.zeros([N d1 d2])<for_stmt>i range(N)<block_start>images[i : :]=np.reshape(vectors[i :] (d1 d2))<block_end><return>images<block_end><block_end><def_stmt>compute_haar_stats self haar_images type=<none><block_start><if_stmt>type<is>'MAD'<block_start>shape=haar_images.shape<line_sep>medians=[]<for_stmt>i range(shape[1])<block_start>medians.append(np.median(haar_images[: i]))<block_end>self.haar_medians=np.array(medians)<line_sep>mad=[]<for_stmt>i range(shape[1])<block_start>tmp=abs(haar_images[: i]-medians[i])<line_sep>mad.append(np.median(tmp))<block_end>self.haar_absdevs=np.array(mad)<line_sep><return>self.haar_medians self.haar_absdevs<block_end><if_stmt>type<is>'Zscore'<block_start>self.haar_means=np.mean(haar_images axis=0)<line_sep>self.haar_stddevs=np.std(haar_images axis=0)<line_sep><return>self.haar_means self.haar_stddevs<block_end><block_end><def_stmt>standardize_haar self haar_images type='MAD'<block_start><if_stmt>type<is>'Zscore'<block_start>haar_images=(haar_images-self.haar_means)/self.haar_stddevs<line_sep><return>haar_images<block_end><elif_stmt>type<is>'MAD'<block_start>haar_images=(haar_images-self.haar_medians)/self.haar_absdevs<line_sep><return>haar_images<block_end><else_stmt><block_start>print('Warning: invalid type - select type MAD or Zscore')<line_sep><return><none><block_end><block_end><def_stmt>binarize_vectors_topK_sign self coeff_vectors K<block_start>self.K=K<line_sep>N,M=np.shape(coeff_vectors)<line_sep>binary_vectors=np.zeros((N 2<times>M) dtype=bool)<for_stmt>i range(N)<block_start>idx=np.argsort(abs(coeff_vectors[i :]))[-K:]<line_sep>binary_vectors[i idx]=coeff_vectors[i idx]<g>0<line_sep>binary_vectors[i idx+M]=coeff_vectors[i idx]<l>0<block_end><return>binary_vectors<block_end><def_stmt>vectors_to_topK_sign self coeff_vectors K<block_start>self.K=K<line_sep>N,M=np.shape(coeff_vectors)<line_sep>sign_vectors=np.zeros([N M])<for_stmt>i range(N)<block_start>idx=np.argsort(abs(coeff_vectors[i :]))[-K:]<line_sep>sign_vectors[i idx]=np.sign(coeff_vectors[i idx])<block_end><return>sign_vectors<block_end><def_stmt>sign_to_binary self vector<block_start>L=len(vector)<line_sep>new_vec=np.zeros((L 2) dtype=bool)<line_sep>new_vec[: 0]=vector<g>0<line_sep>new_vec[: 1]=vector<l>0<line_sep><return>np.reshape(new_vec (1 2<times>L))<block_end><def_stmt>binarize_vectors_topK self coeff_vectors K<block_start>self.K=K<line_sep>N,M=np.shape(coeff_vectors)<line_sep>sign_vectors=np.zeros((N M) dtype=bool)<for_stmt>i range(N)<block_start>idx=np.argsort(coeff_vectors[i :])[-K:]<line_sep>sign_vectors[i idx]=1<block_end><return>sign_vectors<block_end><def_stmt>jaccard_sim self vec1 vec2<block_start><return>sum(vec1&vec2)/(1.0<times>sum(vec1|vec2))<block_end><block_end> |
<import_stmt>csv<import_stmt>os<def_stmt>remove_if_exist path<block_start><if_stmt>os.path.exists(path)<block_start>os.remove(path)<block_end><block_end><def_stmt>load_metadata path<block_start>res={}<line_sep>headers=<none><with_stmt>open(path newline='')<as>csvfile<block_start>reader=csv.reader(csvfile delimiter=',' quotechar='"')<for_stmt>row reader<block_start><if_stmt>headers<is><none><block_start>headers=row<line_sep><continue><block_end>item={}<line_sep>uid=row[0]<for_stmt>index,token enumerate(row)<block_start><if_stmt>index<ne>0<block_start>item[headers[index]]=token<block_end><block_end>res[uid]=item<block_end><block_end><return>res<block_end><def_stmt>load_specter_embeddings path<block_start>res={}<line_sep>dim=<none><with_stmt>open(path newline='')<as>csvfile<block_start>reader=csv.reader(csvfile delimiter=',')<for_stmt>row reader<block_start>uid=row[0]<line_sep>vector=row[1:]<line_sep>res[uid]=vector<if_stmt>dim<is><none><block_start>dim=len(vector)<block_end><else_stmt><block_start><assert_stmt>dim<eq>len(vector) "Embedding dimension mismatch"<block_end><block_end><block_end><return>res dim<block_end><def_stmt>save_index_to_uid_file index_to_uid index path<block_start>remove_if_exist(path)<with_stmt>open(path 'w')<as>f<block_start><for_stmt>index,uid enumerate(index_to_uid)<block_start>f.write(f"{index} {uid}\n")<block_end><block_end><block_end> |
"""
Use the ``RelocationModel`` class to choose movers based on
relocation rates.
"""<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>. util<line_sep>logger=logging.getLogger(__name__)<def_stmt>find_movers choosers rates rate_column<block_start>"""
Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""<line_sep>logger.debug('start: find movers for relocation')<line_sep>relocation_rates=pd.Series(np.zeros(len(choosers)) index=choosers.index)<for_stmt>_,row rates.iterrows()<block_start>indexes=util.filter_table(choosers row ignore={rate_column}).index<line_sep>relocation_rates.loc[indexes]=row[rate_column]<block_end>movers=relocation_rates.index[relocation_rates<g>np.random.random(len(choosers))]<line_sep>logger.debug('picked {} movers for relocation'.format(len(movers)))<line_sep>logger.debug('finish: find movers for relocation')<line_sep><return>movers<block_end><class_stmt>RelocationModel(object)<block_start>"""
Find movers within a population according to a table of
relocation rates.
Parameters
----------
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object, optional
Name of column in `rates` table that contains relocation rates.
If not given 'probability_of_relocating' is used.
"""<def_stmt>__init__ self rates rate_column=<none><block_start>self.relocation_rates=rates<line_sep>self.rate_column=rate_column<or>'probability_of_relocating'<block_end><def_stmt>find_movers self choosers<block_start>"""
Select movers from among a table of `choosers` according to the
stored relocation rates.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index.
"""<line_sep><return>find_movers(choosers self.relocation_rates self.rate_column)<block_end><block_end> |
<import_stmt>distutils.version<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.contrib.rnn<as>rnn<import_stmt>feudal_networks.policies.policy<as>policy<import_stmt>feudal_networks.policies.policy_utils<as>policy_utils<import_from_stmt>feudal_networks.models.models SingleStepLSTM<import_from_stmt>feudal_networks.policies.configs.feudal_config config<import_from_stmt>feudal_networks.policies.feudal_batch_processor FeudalBatchProcessor<class_stmt>FeudalPolicy(policy.Policy)<block_start>"""
Policy of the Feudal network architecture.
"""<def_stmt>__init__ self obs_space act_space global_step<block_start>self.global_step=global_step<line_sep>self.obs_space=obs_space<line_sep>self.act_space=act_space<line_sep>self.config=config<line_sep>self.k=config.k#Dimensionality of w
self.g_dim=config.g_dim<line_sep>self.c=config.c<line_sep>self.batch_processor=FeudalBatchProcessor(self.c)<line_sep>self._build_model()<block_end><def_stmt>_build_model self<block_start>"""
Builds the manager and worker models.
"""<with_stmt>tf.variable_scope('FeUdal')<block_start>self._build_placeholders()<line_sep>self._build_perception()<line_sep>self._build_manager()<line_sep>self._build_worker()<line_sep>self._build_loss()<line_sep>self.var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES tf.get_variable_scope().name)<block_end># for v in self.var_list:
# print v.name
self.state_in=[self.worker_lstm.state_in[0] self.worker_lstm.state_in[1] self.manager_lstm.state_in[0] self.manager_lstm.state_in[1]]<line_sep>self.state_out=[self.worker_lstm.state_out[0] self.worker_lstm.state_out[1] self.manager_lstm.state_out[0] self.manager_lstm.state_out[1]]<line_sep># for v in self.var_list:
# print v
<block_end><def_stmt>_build_placeholders self#standard for all policies
<block_start>self.obs=tf.placeholder(tf.float32 [<none>]+list(self.obs_space))<line_sep>self.r=tf.placeholder(tf.float32 (<none> ))<line_sep>self.ac=tf.placeholder(tf.float32 (<none> self.act_space))<line_sep>self.adv=tf.placeholder(tf.float32 [<none>])#unused
#specific to FeUdal
self.prev_g=tf.placeholder(tf.float32 (<none> <none> self.g_dim))<line_sep>self.ri=tf.placeholder(tf.float32 (<none> ))<line_sep>self.s_diff=tf.placeholder(tf.float32 (<none> self.g_dim))<block_end><def_stmt>_build_perception self<block_start>conv1=tf.layers.conv2d(inputs=self.obs filters=16 kernel_size=[8 8] activation=tf.nn.elu strides=4)<line_sep>conv2=tf.layers.conv2d(inputs=conv1 filters=32 kernel_size=[4 4] activation=tf.nn.elu strides=2)<line_sep>flattened_filters=policy_utils.flatten(conv2)<line_sep>self.z=tf.layers.dense(inputs=flattened_filters units=256 activation=tf.nn.elu)<block_end><def_stmt>_build_manager self<block_start><with_stmt>tf.variable_scope('manager')# Calculate manager internal state
<block_start>self.s=tf.layers.dense(inputs=self.z units=self.g_dim activation=tf.nn.elu)<line_sep># Calculate manager output g
x=tf.expand_dims(self.s [0])<line_sep>self.manager_lstm=SingleStepLSTM(x self.g_dim step_size=tf.shape(self.obs)[:1])<line_sep>g_hat=self.manager_lstm.output<line_sep>self.g=tf.nn.l2_normalize(g_hat dim=1)<line_sep>self.manager_vf=self._build_value(g_hat)<line_sep># self.manager_vf = tf.Print(self.manager_vf,[self.manager_vf])
<block_end><block_end><def_stmt>_build_worker self<block_start><with_stmt>tf.variable_scope('worker')<block_start>num_acts=self.act_space<line_sep># Calculate U
self.worker_lstm=SingleStepLSTM(tf.expand_dims(self.z [0]) size=num_acts<times>self.k step_size=tf.shape(self.obs)[:1])<line_sep>flat_logits=self.worker_lstm.output<line_sep>self.worker_vf=self._build_value(flat_logits)<line_sep>U=tf.reshape(flat_logits [-1 num_acts self.k])<line_sep># Calculate w
cut_g=tf.stop_gradient(self.g)<line_sep>cut_g=tf.expand_dims(cut_g [1])<line_sep>gstack=tf.concat([self.prev_g cut_g] axis=1)<line_sep>self.last_c_g=gstack[: 1:]<line_sep># print self.last_c_g
gsum=tf.reduce_sum(gstack axis=1)<line_sep>phi=tf.get_variable("phi" (self.g_dim self.k))<line_sep>w=tf.matmul(gsum phi)<line_sep>w=tf.expand_dims(w [2])<line_sep># Calculate policy and sample
logits=tf.reshape(tf.matmul(U w) [-1 num_acts])<line_sep>self.pi=tf.nn.softmax(logits)<line_sep>self.log_pi=tf.nn.log_softmax(logits)<line_sep>self.sample=policy_utils.categorical_sample(tf.reshape(logits [-1 num_acts]) num_acts)[0 :]<block_end><block_end><def_stmt>_build_value self input<block_start><with_stmt>tf.variable_scope('VF')<block_start>hidden=tf.layers.dense(inputs=input units=self.config.vf_hidden_size activation=tf.nn.elu)<line_sep>w=tf.get_variable("weights" (self.config.vf_hidden_size 1))<line_sep><return>tf.matmul(hidden w)<block_end><block_end><def_stmt>_build_loss self<block_start>cutoff_vf_manager=tf.reshape(tf.stop_gradient(self.manager_vf) [-1])<line_sep>dot=tf.reduce_sum(tf.multiply(self.s_diff self.g) axis=1)<line_sep>gcut=tf.stop_gradient(self.g)<line_sep>mag=tf.norm(self.s_diff axis=1)<times>tf.norm(gcut axis=1)+.0001<line_sep>dcos=dot/mag<line_sep>manager_loss=-tf.reduce_sum((self.r-cutoff_vf_manager)<times>dcos)<line_sep>cutoff_vf_worker=tf.reshape(tf.stop_gradient(self.worker_vf) [-1])<line_sep>log_p=tf.reduce_sum(self.log_pi<times>self.ac [1])<line_sep>worker_loss=(self.r+self.config.alpha<times>self.ri-cutoff_vf_worker)<times>log_p<line_sep>worker_loss=-tf.reduce_sum(worker_loss axis=0)<line_sep>Am=self.r-self.manager_vf<line_sep>manager_vf_loss=.5<times>tf.reduce_sum(tf.square(Am))<line_sep>Aw=(self.r+self.config.alpha<times>self.ri)-self.worker_vf<line_sep>worker_vf_loss=.5<times>tf.reduce_sum(tf.square(Aw))<line_sep>entropy=-tf.reduce_sum(self.pi<times>self.log_pi)<line_sep>beta=tf.train.polynomial_decay(config.beta_start self.global_step end_learning_rate=config.beta_end decay_steps=config.decay_steps power=1)<line_sep># worker_loss = tf.Print(worker_loss,[manager_loss,worker_loss,manager_vf_loss,worker_vf_loss,entropy])
self.loss=worker_loss+manager_loss+worker_vf_loss+manager_vf_loss-entropy<times>beta<line_sep>bs=tf.to_float(tf.shape(self.obs)[0])<line_sep>tf.summary.scalar("model/manager_loss" manager_loss/bs)<line_sep>tf.summary.scalar("model/worker_loss" worker_loss/bs)<line_sep>tf.summary.scalar("model/value_mean" tf.reduce_mean(self.manager_vf))<line_sep>tf.summary.scalar("model/value_loss" manager_vf_loss/bs)<line_sep>tf.summary.scalar("model/value_loss_scaled" manager_vf_loss/bs<times>.5)<line_sep>tf.summary.scalar("model/entropy" entropy/bs)<line_sep>tf.summary.scalar("model/entropy_loss_scaleed" -entropy/bs<times>beta)<line_sep># tf.summary.scalar("model/grad_global_norm", tf.global_norm(grads))
tf.summary.scalar("model/var_global_norm" tf.global_norm(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES tf.get_variable_scope().name)))<line_sep>tf.summary.scalar("model/beta" beta)<line_sep>tf.summary.image("model/state" self.obs)<line_sep>self.summary_op=tf.summary.merge_all()<block_end><def_stmt>get_initial_features self<block_start><return>np.zeros((1 1 self.g_dim) np.float32) self.worker_lstm.state_init+self.manager_lstm.state_init<block_end><def_stmt>act self ob g cw hw cm hm<block_start>sess=tf.get_default_session()<line_sep><return>sess.run([self.sample self.manager_vf self.g self.s self.last_c_g]+self.state_out {self.obs:[ob] self.state_in[0]:cw self.state_in[1]:hw self.state_in[2]:cm self.state_in[3]:hm self.prev_g:g})<block_end><def_stmt>value self ob g cw hw cm hm<block_start>sess=tf.get_default_session()<line_sep><return>sess.run(self.manager_vf {self.obs:[ob] self.state_in[0]:cw self.state_in[1]:hw self.state_in[2]:cm self.state_in[3]:hm self.prev_g:g})[0]<block_end><def_stmt>update_batch self batch<block_start><return>self.batch_processor.process_batch(batch)<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-Sdstor
GUID : afe654eb-0a83-4eb4-948f-d4510ec39c30
"""<import_from_stmt>construct Int8sl Int8ul Int16ul Int16sl Int32sl Int32ul Int64sl Int64ul Bytes Double Float32l Struct<import_from_stmt>etl.utils WString CString SystemTime Guid<import_from_stmt>etl.dtyp Sid<import_from_stmt>etl.parsers.etw.core Etw declare guid<line_sep>@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30") event_id=100 version=0)<class_stmt>Microsoft_Windows_Sdstor_100_0(Etw)<block_start>pattern=Struct("Port"/Int8ul "Bus"/Int8ul "Target"/Int8ul "LUN"/Int8ul "RequestDuration"/Int64ul "CDBLength"/Int32ul "CDB"/Bytes(<lambda>this:this.CDBLength) "SrbStatus"/Int8ul "Irp"/Int64ul "OriginalIrp"/Int64ul)<block_end>@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30") event_id=101 version=0)<class_stmt>Microsoft_Windows_Sdstor_101_0(Etw)<block_start>pattern=Struct("PackedCommandCount"/Int32ul "NumIrpsPacked"/Int32ul)<block_end>@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30") event_id=102 version=0)<class_stmt>Microsoft_Windows_Sdstor_102_0(Etw)<block_start>pattern=Struct("PackedCommandCount"/Int32ul "NumIrpsPacked"/Int32ul)<block_end>@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30") event_id=105 version=0)<class_stmt>Microsoft_Windows_Sdstor_105_0(Etw)<block_start>pattern=Struct("ResultCode"/Int32ul)<block_end>@declare(guid=guid("afe654eb-0a83-4eb4-948f-d4510ec39c30") event_id=107 version=0)<class_stmt>Microsoft_Windows_Sdstor_107_0(Etw)<block_start>pattern=Struct("LBA"/Int64ul "Length"/Int32ul)<block_end> |
# Time: O(n^2 * k)
# Space: O(k)
<class_stmt>Solution(object)<block_start><def_stmt>maxVacationDays self flights days<block_start>"""
:type flights: List[List[int]]
:type days: List[List[int]]
:rtype: int
"""<if_stmt><not>days<or><not>flights<block_start><return>0<block_end>dp=[[0]<times>len(days)<for>_ xrange(2)]<for_stmt>week reversed(xrange(len(days[0])))<block_start><for_stmt>cur_city xrange(len(days))<block_start>dp[week%2][cur_city]=days[cur_city][week]+dp[(week+1)%2][cur_city]<for_stmt>dest_city xrange(len(days))<block_start><if_stmt>flights[cur_city][dest_city]<eq>1<block_start>dp[week%2][cur_city]=max(dp[week%2][cur_city] days[dest_city][week]+dp[(week+1)%2][dest_city])<block_end><block_end><block_end><block_end><return>dp[0][0]<block_end><block_end> |
"""
This module contains functions for ML-matcher combiner selection.
Note: This is not going to be there for the first release of py_entitymatching.
"""<import_stmt>itertools<import_stmt>six<import_from_stmt>py_entitymatching.matcherselector.mlmatcherselection select_matcher<import_from_stmt>py_entitymatching.matcher.ensemblematcher EnsembleMatcher<def_stmt>selector_matcher_combiner matchers combiners x=<none> y=<none> table=<none> exclude_attrs=<none> target_attr=<none> weights=<none> threshold=<none> k=5<block_start><if_stmt><not>isinstance(matchers list)<block_start>matchers=[matchers]<block_end><if_stmt><not>isinstance(combiners list)<block_start>combiners=[combiners]<block_end>matcher_list=get_matcher_list(matchers combiners weights threshold)<line_sep><return>select_matcher(matcher_list x=x y=y table=table exclude_attrs=exclude_attrs target_attr=target_attr k=k)<block_end><def_stmt>get_matcher_list matchers combiners weights threshold<block_start>ensemble_len=range(2 len(matchers)+1)<line_sep>matcher_list=[]<line_sep>matcher_list.extend(matchers)<for_stmt>l ensemble_len<block_start>iter_combns=itertools.combinations(six.moves.xrange(0 len(matchers)) l)<for_stmt>ic iter_combns<block_start><for_stmt>c combiners<block_start>m=[matchers[i]<for>i ic]<if_stmt>c<is>'Weighted'<block_start>em=EnsembleMatcher(m voting=c weights=weights threshold=threshold)<block_end><else_stmt><block_start>em=EnsembleMatcher(m voting=c)<block_end>matcher_list.append(em)<block_end><block_end><block_end><return>matcher_list<block_end> |
"""unit tests for utils.py"""<import_stmt>os<import_from_stmt>xdfile utils<line_sep>TEST_DIRECTORY=os.path.abspath(os.path.dirname(__file__))<def_stmt>test_find_files <block_start>mygen=utils.find_files(TEST_DIRECTORY)<for_stmt>fullfn,contents mygen# It should throw out anything starting with '.'
<block_start><assert_stmt><not>fullfn.startswith('.')<block_end><block_end> |
# coding=utf-8
# Copyright 2018 Google LLC & <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface for GAN models that can be trained using the Estimator API."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>abc<import_stmt>six<import_stmt>tensorflow<as>tf<line_sep>@six.add_metaclass(abc.ABCMeta)<class_stmt>AbstractGAN(object)<block_start>"""Interface for GAN models that can be training using the Estimator API."""<def_stmt>__init__ self dataset parameters model_dir<block_start>super(AbstractGAN self).__init__()<line_sep>self._dataset=dataset<line_sep>self._parameters=parameters<line_sep>self._model_dir=model_dir<block_end><def_stmt>as_estimator self run_config batch_size use_tpu<block_start>"""Returns a TPUEstimator for this GAN."""<line_sep><return>tf.contrib.tpu.TPUEstimator(config=run_config use_tpu=use_tpu model_fn=self.model_fn train_batch_size=batch_size)<block_end>@abc.abstractmethod<def_stmt>as_module_spec self params mode<block_start>"""Returns the generator network as TFHub module spec."""<block_end>@abc.abstractmethod<def_stmt>input_fn self params mode<block_start>"""Input function that retuns a `tf.data.Dataset` object.
This function will be called once for each host machine.
Args:
params: Python dictionary with parameters given to TPUEstimator.
Additional TPUEstimator will set the key `batch_size` with the batch
size for this host machine and `tpu_contextu` with a TPUContext
object.
mode: `tf.estimator.MoedeKeys` value.
Returns:
A `tf.data.Dataset` object with batched features and labels.
"""<block_end>@abc.abstractmethod<def_stmt>model_fn self features labels params mode<block_start>"""Constructs the model for the given features and mode.
This interface only requires implementing the TRAIN mode.
On TPUs the model_fn should construct a graph for a single TPU core.
Wrap the optimizer with a `tf.contrib.tpu.CrossShardOptimizer` to do
synchronous training with all TPU cores.c
Args:
features: A dictionary with the feature tensors.
labels: Tensor will labels. Will be None if mode is PREDICT.
params: Dictionary with hyperparameters passed to TPUEstimator.
Additional TPUEstimator will set 3 keys: `batch_size`, `use_tpu`,
`tpu_context`. `batch_size` is the batch size for this core.
mode: `tf.estimator.ModeKeys` value (TRAIN, EVAL, PREDICT). The mode
should be passed to the TPUEstimatorSpec and your model should be
build this mode.
Returns:
A `tf.contrib.tpu.TPUEstimatorSpec`.
"""<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>CondCore.DBCommon.CondDBCommon_cfi *<line_sep>PoolDBESSourcebtagMuJetsWpNoTtbar=cms.ESSource("PoolDBESSource" CondDBCommon toGet=cms.VPSet(#
# working points
#
cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVL_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVL_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVL_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVM_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVM_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVM_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVT_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1L_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1L_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1M_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1M_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVV1T_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVV1T_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1L_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1L_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1M_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1M_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARCSVSLV1T_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARCSVSLV1T_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPL_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPL_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPL_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPM_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPM_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPM_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARJPT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARJPT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARJPT_WP')) cms.PSet(record=cms.string('PerformancePayloadRecord') tag=cms.string('PerformancePayloadFromBinnedTFormula_MUJETSWPBTAGNOTTBARTCHPT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_T')) cms.PSet(record=cms.string('PerformanceWPRecord') tag=cms.string('PerformanceWorkingPoint_MUJETSWPBTAGNOTTBARTCHPT_v10_offline') label=cms.untracked.string('MUJETSWPBTAGNOTTBARTCHPT_WP')) ))<line_sep>PoolDBESSourcebtagMuJetsWpNoTtbar.connect='frontier://FrontierProd/CMS_COND_PAT_000'<line_sep> |
'Test of label scope and label exporter'<import_stmt>numpy<as>np<import_from_stmt>videocore.assembler qpu get_label_positions<import_from_stmt>videocore.driver Driver<line_sep>@qpu<def_stmt>given_jmp asm<block_start>mov(ra0 uniform)<line_sep>mov(r0 0)<line_sep>L.entry<line_sep>jmp(reg=ra0)<line_sep>nop()<line_sep>nop()<line_sep>nop()<line_sep>iadd(r0 r0 1)<line_sep>L.test<line_sep>iadd(r0 r0 4)<line_sep>setup_vpm_write()<line_sep>mov(vpm r0)<line_sep>setup_dma_store(nrows=1)<line_sep>start_dma_store(uniform)<line_sep>wait_dma_store()<line_sep>exit()<block_end><def_stmt>test_given_jump <block_start>lbls=get_label_positions(given_jmp)<line_sep>entry_pc=0<line_sep>test_pc=0<for_stmt>lbl,pc lbls<block_start><if_stmt>lbl.name<eq>'entry'<block_start>entry_pc=pc<block_end><if_stmt>lbl.name<eq>'test'<block_start>test_pc=pc<block_end><block_end><with_stmt>Driver()<as>drv<block_start>X=drv.alloc((1 16) 'int32')<line_sep>X[:]=1234<line_sep>drv.execute(n_threads=1 program=drv.program(given_jmp) uniforms=[test_pc-entry_pc-32 X.address])<assert_stmt>np.all(X<eq>4)<block_end><block_end>@qpu<def_stmt>with_namespace asm<block_start>mov(r0 0)<with_stmt>namespace('ns1')<block_start>jmp(L.test)<line_sep>nop()<line_sep>nop()<line_sep>nop()<line_sep>iadd(r0 r0 10)<line_sep>L.test<line_sep>iadd(r0 r0 1)<with_stmt>namespace('nested')<block_start>jmp(L.test)<line_sep>nop()<line_sep>nop()<line_sep>nop()<line_sep>iadd(r0 r0 10)<line_sep>L.test<line_sep>iadd(r0 r0 1)<block_end><block_end><with_stmt>namespace('ns2')<block_start>jmp(L.test)<line_sep>nop()<line_sep>nop()<line_sep>nop()<line_sep>iadd(r0 r0 10)<line_sep>L.test<line_sep>iadd(r0 r0 1)<block_end>jmp(L.test)<line_sep>nop()<line_sep>nop()<line_sep>nop()<line_sep>iadd(r0 r0 10)<line_sep>L.test<line_sep>iadd(r0 r0 1)<line_sep>setup_vpm_write()<line_sep>mov(vpm r0)<line_sep>setup_dma_store(nrows=1)<line_sep>start_dma_store(uniform)<line_sep>wait_dma_store()<line_sep>exit()<block_end><def_stmt>test_with_namespace <block_start><with_stmt>Driver()<as>drv<block_start>X=drv.alloc((1 16) 'int32')<line_sep>X[:]=1234<line_sep>drv.execute(n_threads=1 program=drv.program(with_namespace) uniforms=[X.address])<assert_stmt>np.all(X<eq>4)<block_end><block_end> |
<import_from_stmt>KratosMultiphysics *<import_stmt>KratosMultiphysics.KratosUnittest<as>UnitTest<import_stmt>KratosMultiphysics.kratos_utilities<as>kratos_utils<try_stmt><block_start><import_from_stmt>KratosMultiphysics.FluidDynamicsApplication *<line_sep>have_fluid_dynamics=<true><block_end><except_stmt>ImportError<block_start>have_fluid_dynamics=<false><block_end><import_stmt>filecmp<import_stmt>os<class_stmt>WorkFolderScope(object)<block_start>'''Auxiliary class to define a work folder for the tests.'''<def_stmt>__init__ self work_folder<block_start>self.currentPath=os.getcwd()<line_sep>self.scope=os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)) work_folder))<block_end><def_stmt>__enter__ self<block_start>os.chdir(self.scope)<block_end><def_stmt>__exit__ self exc_type exc_value traceback<block_start>os.chdir(self.currentPath)<block_end><block_end>@UnitTest.skipUnless(have_fluid_dynamics "Missing required application: FluidDynamicsApplication")<class_stmt>TestGiDIOGaussPoints(UnitTest.TestCase)<block_start>'''Tests related to GiD I/O Gauss point results printing.'''<def_stmt>setUp self<block_start>self.setModelPart()<line_sep>self.workFolder="auxiliar_files_for_python_unittest/gid_io"<block_end><def_stmt>tearDown self<block_start><with_stmt>WorkFolderScope(self.workFolder)<block_start><for_stmt>suffix ['_0.post.res' '_0.post.msh']<block_start>kratos_utils.DeleteFileIfExisting(self.output_file_name+suffix)<block_end><block_end><block_end><def_stmt>setModelPart self<block_start>self.model=Model()<line_sep>modelPart=self.model.CreateModelPart("Test ModelPart")<line_sep>modelPart.AddNodalSolutionStepVariable(DISTANCE)<line_sep>modelPart.AddNodalSolutionStepVariable(VELOCITY)<line_sep>nodes=list()<line_sep>nodes.append(modelPart.CreateNewNode(1 0.0 0.0 0.0))<line_sep>nodes.append(modelPart.CreateNewNode(2 1.0 0.0 0.0))<line_sep>nodes.append(modelPart.CreateNewNode(3 1.0 1.0 0.0))<line_sep>nodes.append(modelPart.CreateNewNode(4 0.0 1.0 0.0))<line_sep>nodes.append(modelPart.CreateNewNode(5 0.5 0.5 1.0))<for_stmt>node nodes<block_start>rx=node.X-0.5<line_sep>rz=node.Z-0.5<line_sep>r=(rx<power>2+rz<power>2)<power>0.5<line_sep>vel=Array3()<line_sep>vel[0]=-rz/r<line_sep>vel[1]=0.0<line_sep>vel[2]=rx/r<line_sep>node.SetSolutionStepValue(VELOCITY 0 vel)<line_sep>node.SetSolutionStepValue(DISTANCE 0 r)<block_end>properties=modelPart.GetProperties()[0]<line_sep>modelPart.CreateNewElement("VMS3D" 1 [1 2 4 5] properties)<line_sep>modelPart.CreateNewElement("VMS3D" 2 [2 3 4 5] properties)<line_sep>modelPart.CreateNewCondition("MonolithicWallCondition3D" 1 [1 5 4] properties)<line_sep>modelPart.CreateNewCondition("MonolithicWallCondition3D" 2 [1 2 5] properties)<line_sep>modelPart.CreateNewCondition("MonolithicWallCondition3D" 3 [2 3 5] properties)<line_sep>modelPart.CreateNewCondition("MonolithicWallCondition3D" 4 [3 4 5] properties)<line_sep>modelPart.SetBufferSize(2)<line_sep>self.modelPart=modelPart<block_end><def_stmt>deactivateSome self<block_start><for_stmt>elem self.modelPart.Elements<block_start><if_stmt>elem.Id%2<eq>0<block_start>elem.Set(ACTIVE <false>)<block_end><block_end><for_stmt>cond self.modelPart.Conditions<block_start><if_stmt>cond.Id%2<eq>0<block_start>cond.Set(ACTIVE <false>)<block_end><block_end><block_end><def_stmt>initializeOutputFile self<block_start>self.gid_io=GidIO(self.output_file_name self.post_mode MultiFileFlag.SingleFile WriteDeformedMeshFlag.WriteUndeformed WriteConditionsFlag.WriteConditions)<line_sep>self.gid_io.InitializeMesh(0)<line_sep>self.gid_io.WriteMesh(self.modelPart.GetMesh())<line_sep>self.gid_io.FinalizeMesh()<line_sep>self.gid_io.InitializeResults(0.0 self.modelPart.GetMesh())<block_end><def_stmt>writeResults self label<block_start>self.gid_io.WriteNodalResults(VELOCITY self.modelPart.Nodes label 0)<line_sep>self.gid_io.PrintOnGaussPoints(VORTICITY self.modelPart label)<line_sep>self.gid_io.PrintOnGaussPoints(NORMAL self.modelPart label)<line_sep>self.gid_io.PrintFlagsOnGaussPoints(ACTIVE "ACTIVE" self.modelPart label)<block_end><def_stmt>finalizeOutputFile self<block_start>self.gid_io.FinalizeResults()<block_end><def_stmt>outputMatchesReferenceSolution self<block_start>msh_file_matches=filecmp.cmp(self.reference_file_name+'_0.post.msh' self.output_file_name+'_0.post.msh')<line_sep>res_file_matches=filecmp.cmp(self.reference_file_name+'_0.post.res' self.output_file_name+'_0.post.res')<line_sep><return>msh_file_matches<and>res_file_matches<block_end><def_stmt>test_write_active_only self<block_start>self.post_mode=GiDPostMode.GiD_PostAscii<line_sep>self.output_file_name="test_gid_io_gp_active_only"<line_sep>self.reference_file_name="ref_gid_io_gp_active_only"<line_sep>self.deactivateSome()<with_stmt>WorkFolderScope(self.workFolder)<block_start>self.initializeOutputFile()<line_sep>self.writeResults(0.0)<line_sep>self.finalizeOutputFile()<line_sep>self.assertTrue(self.outputMatchesReferenceSolution())<block_end><block_end><def_stmt>test_write_dynamic_deactivation self<block_start>self.post_mode=GiDPostMode.GiD_PostAscii<line_sep>self.output_file_name="test_gid_io_gp_dynamic_deactivation"<line_sep>self.reference_file_name="ref_gid_io_gp_dynamic_deactivation"<with_stmt>WorkFolderScope(self.workFolder)<block_start>self.initializeOutputFile()<line_sep>self.writeResults(0.0)<line_sep>self.deactivateSome()<line_sep>self.writeResults(1.0)<line_sep>self.finalizeOutputFile()<line_sep>self.assertTrue(self.outputMatchesReferenceSolution())<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test=TestGiDIOGaussPoints()<line_sep>test.setUp()<line_sep>test.test_write_active_only()<line_sep>test.tearDown()<line_sep>test.setUp()<line_sep>test.test_write_dynamic_deactivation()<line_sep>test.tearDown()<block_end> |
# __all__ = ['core','utils','constants','render']
<import_from_stmt>.core BayesianTracker __version__<line_sep> |
"""
Test job metadata
System tests for operations on stored job metadata
"""<import_from_stmt>..conftest TEST_SPIDER_NAME<import_from_stmt>.conftest start_job<def_stmt>_assertMetadata meta1 meta2<block_start><def_stmt>_clean m<block_start><return>dict((k v)<for>k,v m.items()<if>k<ne>'updated_time')<block_end>meta1=_clean(meta1)<line_sep>meta2=_clean(meta2)<assert_stmt>meta1<eq>meta2<block_end><def_stmt>test_basic hsclient hsproject<block_start>job=hsproject.push_job(TEST_SPIDER_NAME)<assert_stmt>'auth'<not><in>job.metadata<assert_stmt>'state'<in>job.metadata<assert_stmt>job.metadata['spider']<eq>TEST_SPIDER_NAME<line_sep># set some metadata and forget it
job.metadata['foo']='bar'<assert_stmt>job.metadata['foo']<eq>'bar'<line_sep>job.metadata.expire()<assert_stmt>'foo'<not><in>job.metadata<line_sep># set it again and persist it
job.metadata['foo']='bar'<assert_stmt>job.metadata['foo']<eq>'bar'<line_sep>job.metadata.save()<assert_stmt>job.metadata['foo']<eq>'bar'<line_sep>job.metadata.expire()<assert_stmt>job.metadata['foo']<eq>'bar'<line_sep># refetch the job and compare its metadata
job2=hsclient.get_job(job.key)<line_sep>_assertMetadata(job2.metadata job.metadata)<line_sep># delete foo but do not persist it
<del_stmt>job.metadata['foo']<assert_stmt>'foo'<not><in>job.metadata<line_sep>job.metadata.expire()<assert_stmt>job.metadata.get('foo')<eq>'bar'<line_sep># persist it to be sure it is not removed
job.metadata.save()<assert_stmt>job.metadata.get('foo')<eq>'bar'<line_sep># and finally delete again and persist it
<del_stmt>job.metadata['foo']<assert_stmt>'foo'<not><in>job.metadata<line_sep>job.metadata.save()<assert_stmt>'foo'<not><in>job.metadata<line_sep>job.metadata.expire()<assert_stmt>'foo'<not><in>job.metadata<line_sep>job2=hsclient.get_job(job.key)<line_sep>_assertMetadata(job.metadata job2.metadata)<block_end><def_stmt>test_updating hsproject<block_start>job=hsproject.push_job(TEST_SPIDER_NAME)<assert_stmt>job.metadata.get('foo')<is><none><line_sep>job.update_metadata({'foo':'bar'})<line_sep># metadata attr should change
<assert_stmt>job.metadata.get('foo')<eq>'bar'<line_sep># as well as actual metadata
job=hsproject.get_job(job.key)<assert_stmt>job.metadata.get('foo')<eq>'bar'<line_sep>job.update_metadata({'foo':<none>})<assert_stmt><not>job.metadata.get('foo' <false>)<line_sep># there are ignored fields like: auth, _key, state
state=job.metadata['state']<line_sep>job.update_metadata({'state':'running'})<assert_stmt>job.metadata['state']<eq>state<block_end><def_stmt>test_representation hsproject<block_start>job=hsproject.push_job(TEST_SPIDER_NAME)<line_sep>meta=job.metadata<assert_stmt>str(meta)<ne>repr(meta)<assert_stmt>meta<eq>eval(str(meta))<assert_stmt>meta.__class__.__name__<in>repr(meta)<assert_stmt>meta.__class__.__name__<not><in>str(meta)<block_end><def_stmt>test_jobauth hsclient hsproject<block_start>job=hsproject.push_job(TEST_SPIDER_NAME)<assert_stmt>job.jobauth<is><none><assert_stmt>job.auth<eq>hsproject.auth<assert_stmt>job.items.auth<eq>hsproject.auth<line_sep>samejob=hsclient.get_job(job.key)<assert_stmt>samejob.auth<is><none><assert_stmt>samejob.jobauth<is><none><assert_stmt>samejob.items.auth<eq>hsproject.auth<block_end><def_stmt>test_authtoken hsproject<block_start>pendingjob=hsproject.push_job(TEST_SPIDER_NAME)<line_sep>runningjob=start_job(hsproject)<assert_stmt>pendingjob.key<eq>runningjob.key<assert_stmt>runningjob.jobauth<assert_stmt>runningjob.jobauth<eq>runningjob.auth<assert_stmt>runningjob.auth[0]<eq>runningjob.key<assert_stmt>runningjob.auth[1]<block_end> |
<import_from_stmt>typing Optional List<import_from_stmt>platypush.message.response Response<class_stmt>PrinterResponse(Response)<block_start><def_stmt>__init__ self *args name:str printer_type:int info:str uri:str state:int is_shared:bool state_message:Optional[str]=<none> state_reasons:Optional[List[str]]=<none> location:Optional[str]=<none> uri_supported:Optional[str]=<none> make_and_model:Optional[str]=<none> **kwargs<block_start>super().__init__(*args output={'name':name 'printer_type':printer_type 'info':info 'uri':uri 'state':state 'is_shared':is_shared 'state_message':state_message 'state_reasons':state_reasons 'location':location 'uri_supported':uri_supported 'make_and_model':make_and_model } **kwargs)<block_end><block_end><class_stmt>PrintersResponse(Response)<block_start><def_stmt>__init__ self *args printers:List[PrinterResponse] **kwargs<block_start>super().__init__(*args output={p.output['name']:p.output<for>p printers} **kwargs)<block_end><block_end><class_stmt>PrinterJobAddedResponse(Response)<block_start><def_stmt>__init__ self *args printer:str job_id:int **kwargs<block_start>super().__init__(*args output={'printer':printer 'job_id':job_id } **kwargs)<block_end><block_end># vim:sw=4:ts=4:et:
|
################################################################################
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
"""Some helper functions for implementing quantized layers"""<import_stmt>copy<import_from_stmt>ft_tensorflow_quantization.python.layers.tensor_quantizer QuantDescriptor<class_stmt>QuantMixin()<block_start>"""Mixin class for adding basic quantization logic to quantized modules"""<line_sep>default_quant_desc_input=QuantDescriptor('input')<line_sep>default_quant_desc_kernel=QuantDescriptor('kernel' axis=-1)<line_sep>@classmethod<def_stmt>set_default_quant_desc_input cls value<block_start>"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""<if_stmt><not>isinstance(value QuantDescriptor)<block_start><raise>ValueError("{} is not an instance of QuantDescriptor!")<block_end>cls.default_quant_desc_input=copy.deepcopy(value)<block_end>@classmethod<def_stmt>set_default_quant_desc_kernel cls value<block_start>"""
Args:
value: An instance of :func:`QuantDescriptor <quantization.QuantDescriptor>`
"""<if_stmt><not>isinstance(value QuantDescriptor)<block_start><raise>ValueError("{} is not an instance of QuantDescriptor!")<block_end>cls.default_quant_desc_kernel=copy.deepcopy(value)<block_end><block_end><def_stmt>pop_quant_desc_in_kwargs quant_cls **kwargs<block_start>"""Pop quant descriptors in kwargs
If there is no descriptor in kwargs, the default one in quant_cls will be used
Arguments:
quant_cls: A class that has default quantization descriptors
Keyword Arguments:
quant_desc_input: An instance of QuantDescriptor. Quantization descriptor of input.
quant_desc_kernel: An instance of QuantDescriptor. Quantization descriptor of kernel.
"""<line_sep>quant_desc_input=kwargs.pop('quant_desc_input' quant_cls.default_quant_desc_input)<line_sep>quant_desc_kernel=kwargs.pop('quant_desc_kernel' quant_cls.default_quant_desc_kernel)<line_sep># base layers may use kwargs, so do not check if anything is left in **kwargs
<return>quant_desc_input quant_desc_kernel<block_end> |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""<import_from_stmt>django.conf settings<import_stmt>env<import_from_stmt>api.client BKComponentClient<line_sep>NODEMAN_API_ENTRY=env.BK_NODEMAN_API_ENTRY<or>"{}/{}".format(settings.BK_PAAS_ESB_HOST "api/c/compapi/v2/nodeman")<line_sep>NODEMAN_API_ENTRY_V2=env.BK_NODEMAN_API_ENTRY<or>"{}/{}".format(settings.BK_PAAS_ESB_HOST "api/c/compapi/{bk_api_ver}/nodeman/api".format(bk_api_ver=settings.DEFAULT_BK_API_VER) )<def_stmt>_get_nodeman_api api_name<block_start><return>"{}/{}/".format(NODEMAN_API_ENTRY api_name)<block_end><def_stmt>_get_nodeman_api_v2 api_name<block_start><return>"{}/{}/".format(NODEMAN_API_ENTRY_V2 api_name)<block_end><class_stmt>BKNodeManClient(BKComponentClient)<block_start><def_stmt>create_task self bk_biz_id bk_cloud_id node_type op_type creator hosts<block_start><return>self._request(method="post" url=_get_nodeman_api("create_task") data={"bk_biz_id":bk_biz_id "bk_cloud_id":bk_cloud_id "node_type":node_type "op_type":op_type "creator":creator "hosts":hosts } )<block_end><def_stmt>get_task_info self bk_biz_id job_id<block_start><return>self._request(method="get" url=_get_nodeman_api("get_task_info") data={"bk_biz_id":bk_biz_id "job_id":job_id} )<block_end><def_stmt>get_log self host_id bk_biz_id<block_start><return>self._request(method="get" url=_get_nodeman_api("get_log") data={"host_id":host_id "bk_biz_id":bk_biz_id} )<block_end><def_stmt>search_host_plugin self bk_biz_id pagesize conditions<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("plugin/search") data={"bk_biz_id":bk_biz_id "pagesize":pagesize "conditions":conditions} )<block_end><def_stmt>job_install self job_type hosts **kwargs<block_start>data={"job_type":job_type "hosts":hosts}<line_sep>data.update(kwargs)<line_sep><return>self._request(method="post" url=_get_nodeman_api_v2("job/install") data=data)<block_end><def_stmt>remove_host self bk_biz_id bk_host_id is_proxy<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("remove_host") data={"bk_biz_id":bk_biz_id "bk_host_id":bk_host_id "is_proxy":is_proxy} # 是否移除PROXY
)<block_end><def_stmt>job_operate self job_type bk_biz_id bk_host_id<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("job/operate") data={"job_type":job_type "bk_biz_id":bk_biz_id "bk_host_id":bk_host_id} )<block_end><def_stmt>job_details self job_id<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("job/details") data={"job_id":job_id})<block_end><def_stmt>get_job_log self job_id instance_id<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("job/log") data={"job_id":job_id "instance_id":instance_id} )<block_end><def_stmt>cloud_list self<block_start>print(_get_nodeman_api_v2("cloud"))<line_sep><return>self._request(method="get" url=_get_nodeman_api_v2("cloud") data={})<block_end><def_stmt>ap_list self<block_start><return>self._request(method="get" url=_get_nodeman_api_v2("ap") data={})<block_end><def_stmt>plugin_operate self params:dict<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("plugin/operate") data=params)<block_end><def_stmt>plugin_process self category<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("plugin/process") data={"category":category})<block_end><def_stmt>plugin_package self name os<block_start><return>self._request(method="post" url=_get_nodeman_api_v2("plugin/package") data={"name":name "os":os})<block_end><def_stmt>get_rsa_public_key self executor<block_start><return>self._request(method="post" url=_get_nodeman_api("core/api/encrypt_rsa/fetch_public_keys") data={"bk_app_code":settings.APP_CODE "bk_app_secret":settings.SECRET_KEY "bk_username":executor "names":["DEFAULT"] } )<block_end><block_end> |
<import_from_stmt>collections defaultdict<import_from_stmt>game_logic game_service game_decider<import_from_stmt>game_logic.game_decider Decision<import_from_stmt>game_logic.models.player Player<import_from_stmt>game_logic.models.roll Roll<class_stmt>GameRound<block_start><def_stmt>__init__ self game_id:str player1:Player player2:Player p1_roll:Roll p2_roll:Roll<block_start>self.p2_roll=p2_roll<line_sep>self.p1_roll=p1_roll<line_sep>self.game_id=game_id<line_sep>self.player1=player1<line_sep>self.player2=player2<line_sep>self.decision_p1_to_p2=<none><line_sep>history=game_service.get_game_history(game_id)<line_sep>self.round=len(history)<floordiv>2+1<line_sep>self.player1_wins=GameRound.count_wins(self.player1 history)<line_sep>self.player2_wins=GameRound.count_wins(self.player2 history)<line_sep>self.WIN_COUNT_MIN=3<line_sep>self.PLAY_COUNT_MIN=5<line_sep>self.is_over=game_service.is_game_over(game_id)<block_end><def_stmt>play self<block_start><if_stmt>self.is_over<block_start><raise>Exception("Game is already over, cannot play further.")<block_end>d=game_decider.decide(self.p1_roll self.p2_roll)<line_sep>self.decision_p1_to_p2=d<line_sep>self.record_roll(d self.player1 self.p1_roll self.player1_wins)<line_sep>self.record_roll(d.reversed() self.player2 self.p2_roll self.player2_wins)<line_sep>print("RECORDING ROUND")<line_sep>print("Player 1: {}, prior wins {}, outcome: {}".format(self.p1_roll.name self.player1_wins d))<line_sep>print("Player 2: {}, prior wins {}, outcome: {}".format(self.p2_roll.name self.player2_wins d.reversed()))<line_sep>print()<line_sep>self.is_over=game_service.is_game_over(self.game_id)<block_end><def_stmt>record_roll self decision:Decision player:Player roll:Roll win_count:int<block_start>final_round_candidate=self.round<ge>self.PLAY_COUNT_MIN<and>win_count+1<ge>self.WIN_COUNT_MIN<line_sep>wins_game=final_round_candidate<and>decision<eq>Decision.win<line_sep>game_service.record_roll(player roll self.game_id wins_game self.round)<block_end>@staticmethod<def_stmt>count_wins player history<block_start>grouped_moves=defaultdict(list)<for_stmt>h history<block_start>grouped_moves[h.roll_number].append(h)<block_end>win_count=0<for_stmt>rnd_data grouped_moves.values()<block_start><if_stmt>len(rnd_data)<ne>2<block_start><continue><block_end>player_move=[m<for>m rnd_data<if>m.player_id<eq>player.id][0]<line_sep>opponent_move=[m<for>m rnd_data<if>m.player_id<ne>player.id][0]<line_sep>player_roll=game_service.find_roll_by_id(player_move.roll_id)<line_sep>opponent_roll=game_service.find_roll_by_id(opponent_move.roll_id)<if_stmt>game_decider.decide(player_roll opponent_roll)<eq>Decision.win<block_start>win_count<augadd>1<block_end><block_end><return>win_count<block_end><block_end> |
<import_from_stmt>panel.io.state state<def_stmt>test_as_cached_key_only <block_start><global>i<line_sep>i=0<def_stmt>test_fn <block_start><global>i<line_sep>i<augadd>1<line_sep><return>i<block_end><assert_stmt>state.as_cached('test' test_fn)<eq>1<assert_stmt>state.as_cached('test' test_fn)<eq>1<line_sep>state.cache.clear()<block_end><def_stmt>test_as_cached_key_and_kwarg <block_start><global>i<line_sep>i=0<def_stmt>test_fn a<block_start><global>i<line_sep>i<augadd>1<line_sep><return>i<block_end><assert_stmt>state.as_cached('test' test_fn a=1)<eq>1<assert_stmt>state.as_cached('test' test_fn a=1)<eq>1<assert_stmt>state.as_cached('test' test_fn a=2)<eq>2<assert_stmt>state.as_cached('test' test_fn a=1)<eq>1<assert_stmt>state.as_cached('test' test_fn a=2)<eq>2<line_sep>state.cache.clear()<block_end> |
# -*- coding: utf-8 -*-
# @Author: luoling
# @Date: 2019-12-06 10:41:34
# @Last Modified by: luoling
# @Last Modified time: 2019-12-18 17:52:49
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_stmt>torch.nn<as>nn<def_stmt>cross_entropy2d input target weight=<none> reduction='none'<block_start>n,c,h,w=input.size()<line_sep>nt,ht,wt=target.size()<line_sep># Handle inconsistent size between input and target
<if_stmt>h<ne>ht<or>w<ne>wt<block_start>input=F.interpolate(input size=(ht wt) mode="bilinear" align_corners=<true>)<block_end># https://zhuanlan.zhihu.com/p/76583143
input=input.transpose(1 2).transpose(2 3).contiguous().view(-1 c)<line_sep>target=target.view(-1)<line_sep># https://www.cnblogs.com/marsggbo/p/10401215.html
loss=F.cross_entropy(input target weight=weight reduction=reduction ignore_index=250)<line_sep><return>loss<block_end><def_stmt>bootstrapped_cross_entropy2d input target K=100000 weight=<none> size_average=<true><block_start>"""High-performance semantic segmentation using very deep fully convolutional networks"""<line_sep>batch_size=input.size()[0]<def_stmt>_bootstrap_xentropy_single input target K weight=<none> size_average=<true><block_start>n,c,h,w=input.size()<line_sep>input=input.transpose(1 2).transpose(2 3).contiguous().view(-1 c)<line_sep>target=target.view(-1)<line_sep>loss=F.cross_entropy(input target weight=weight reduce=<false> size_average=<false> ignore_index=250)<line_sep>topk_loss,_=loss.topk(K)<line_sep>reduced_topk_loss=topk_loss.sum()/K<line_sep><return>reduced_topk_loss<block_end>loss=0.0<line_sep># Bootstrap from each image not entire batch
<for_stmt>i range(batch_size)<block_start>loss<augadd>_bootstrap_xentropy_single(input=torch.unsqueeze(input[i] 0) target=torch.unsqueeze(target[i] 0) K=K weight=weight size_average=size_average )<block_end><return>loss/float(batch_size)<block_end><class_stmt>DiceLoss(nn.Module)<block_start><def_stmt>__init__ self smooth=1. ignore_index=255<block_start>super(DiceLoss self).__init__()<line_sep>self.ignore_index=ignore_index<line_sep>self.smooth=smooth<block_end>@staticmethod<def_stmt>make_one_hot labels classes<block_start>one_hot=torch.cuda.FloatTensor(labels.size()[0] classes labels.size()[2] labels.size()[3]).zero_()<line_sep>target=one_hot.scatter_(1 labels.data 1)<line_sep><return>target<block_end><def_stmt>forward self output target<block_start><if_stmt>self.ignore_index<not><in>range(target.min() target.max())<block_start><if_stmt>(target<eq>self.ignore_index).sum()<g>0<block_start>target[target<eq>self.ignore_index]=target.min()<block_end><block_end>target=self.make_one_hot(target.unsqueeze(dim=1) classes=output.size()[1])<line_sep>output=F.softmax(output dim=1)<line_sep>output_flat=output.contiguous().view(-1)<line_sep>target_flat=target.contiguous().view(-1)<line_sep>intersection=(output_flat<times>target_flat).sum()<line_sep>loss=1-((2.<times>intersection+self.smooth)/(output_flat.sum()+target_flat.sum()+self.smooth))<line_sep><return>loss<block_end><block_end><class_stmt>CriterionAll(nn.Module)<block_start>"""Segmentation aware and Edge aware loss."""<def_stmt>__init__ self alpha=50 ignore_index=255<block_start>super(CriterionAll self).__init__()<line_sep>self.ignore_index=ignore_index<line_sep>self.criterion=nn.CrossEntropyLoss(ignore_index=ignore_index)<line_sep>self.weighted_criterion=nn.CrossEntropyLoss(ignore_index=ignore_index reduction='none')<line_sep>self.alpha=alpha<block_end><def_stmt>parsing_loss self preds target<block_start>h,w=target[0].size(1) target[0].size(2)<line_sep>pos_num=torch.sum(target[1]<eq>1 dtype=torch.float)<line_sep>neg_num=torch.sum(target[1]<eq>0 dtype=torch.float)<line_sep>weight_pos=neg_num/(pos_num+neg_num)<line_sep>weight_neg=pos_num/(pos_num+neg_num)<line_sep>weights=torch.tensor([weight_neg weight_pos])<line_sep>loss=0<line_sep># Edge-aware branch
preds_edge=preds[1][0]<line_sep>scale_pred=F.interpolate(input=preds_edge size=(h w) mode='bilinear' align_corners=<true>)<line_sep>loss<augadd>F.cross_entropy(scale_pred target[1] weights.cuda() ignore_index=self.ignore_index)<line_sep># Segmentation-aware branch
preds_parsing=preds[0]<if_stmt>isinstance(preds_parsing list)<block_start><for_stmt>idx,pred_parsing enumerate(preds_parsing)<block_start>scale_pred=F.interpolate(input=pred_parsing size=(h w) mode='bilinear' align_corners=<true>)<if_stmt>idx<eq>len(preds_parsing)-1# Is that the last term ?
<block_start>loss<augadd>(torch.mul(self.weighted_criterion(scale_pred target[0]) torch.where(target[1]<eq>0 torch.Tensor([1]).cuda() torch.Tensor([1+self.alpha]).cuda()))).mean()<block_end><else_stmt><block_start>loss<augadd>self.criterion(scale_pred target[0])<block_end><block_end><block_end><else_stmt><block_start>scale_pred=F.interpolate(input=preds_parsing size=(h w) mode='bilinear' align_corners=<true>)<line_sep>loss<augadd>self.criterion(scale_pred target[0])<block_end><return>loss<block_end><def_stmt>forward self preds target<block_start>loss=self.parsing_loss(preds target)<line_sep><return>loss<block_end><block_end> |
# Generated by Django 3.0.8 on 2020-07-14 20:35
<import_stmt>django.contrib.postgres.fields<import_stmt>django.core.validators<import_from_stmt>django.db migrations models<import_stmt>pydis_site.apps.api.models.bot.user<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('api' '0055_merge_20200714_2027') ]<line_sep>operations=[migrations.AlterField(model_name='user' name='roles' field=django.contrib.postgres.fields.ArrayField(base_field=models.BigIntegerField(validators=[django.core.validators.MinValueValidator(limit_value=0 message='Role IDs cannot be negative.') pydis_site.apps.api.models.bot.user._validate_existing_role]) blank=<true> default=list help_text='IDs of roles the user has on the server' size=<none>) ) ]<block_end> |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_stmt>json<import_stmt>unittest<import_from_stmt>core results_merger<class_stmt>ResultMergerTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.sample_json_string='''
{
"interrupted": false,
"num_failures_by_type": {},
"seconds_since_epoch": 10.0,
"tests": {},
"version": 3
}
'''<block_end><def_stmt>test_json_version_check_exception self<block_start>json_string='{"seconds_since_epoch": 1.0, "version": 2}'<line_sep>result=json.loads(json_string)<with_stmt>self.assertRaises(results_merger.MergeException)<as>c<block_start>results_merger.merge_test_results([result])<block_end>self.assertTrue('Unsupported version'<in>str(c.exception) 'Version check failure message is not in exception. Exception: %s'%c.exception)<block_end><def_stmt>test_json_required_field_check_exception self<block_start>json_string='{"seconds_since_epoch": 1.0, "version": 3}'<line_sep>result=json.loads(json_string)<with_stmt>self.assertRaises(results_merger.MergeException)<as>c<block_start>results_merger.merge_test_results([result])<block_end>self.assertTrue('Invalid json test results'<in>str(c.exception) 'Required key check failure message is not in exception. Exception: %s'%c.exception)<block_end><def_stmt>test_json_merge_tests self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_3=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS"
},
"Story-2": {
"actual": "SKIP"
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-3": {
"actual": "FAIL"
}
},
"Benchmark-2": {
"Story-1": {
"actual": "SKIP"
}
}
}
''')<line_sep>result_3['tests']=json.loads('''
{
"Benchmark-2": {
"Story-2": {
"actual": "PASS"
}
},
"Benchmark-3": {
"Story-1": {
"actual": "PASS"
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2 result_3])<line_sep>self.assertEqual(len(merged_results['tests']) 3)<line_sep>self.assertEqual(len(merged_results['tests']['Benchmark-1']) 3)<line_sep>self.assertEqual(len(merged_results['tests']['Benchmark-2']) 2)<line_sep>self.assertEqual(len(merged_results['tests']['Benchmark-3']) 1)<block_end><def_stmt>test_json_merge_tests_non_dict_exception self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS"
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL"
}
}
}
''')<with_stmt>self.assertRaises(results_merger.MergeException)<as>c<block_start>results_merger.merge_test_results([result_1 result_2])<block_end>self.assertTrue('not mergable'<in>str(c.exception) 'Merge failure message is not in exception. Exception: %s'%c.exception)<block_end><def_stmt>test_json_merge_interrupted self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_2['interrupted']=<true><line_sep>merged_results=results_merger.merge_test_results([result_1 result_2])<line_sep>self.assertEqual(merged_results['interrupted'] <true>)<block_end><def_stmt>test_json_merge_seconds_since_epoch self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_2['seconds_since_epoch']=5.0<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2])<line_sep>self.assertEqual(merged_results['seconds_since_epoch'] 5.0)<block_end><def_stmt>test_json_merge_nums self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['num_failures_by_type']=json.loads('''
{
"PASS": 1,
"SKIP": 5
}
''')<line_sep>result_2['num_failures_by_type']=json.loads('''
{
"PASS": 3,
"FAIL": 2
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2])<line_sep>self.assertEqual(merged_results['num_failures_by_type']['PASS'] 4)<line_sep>self.assertEqual(merged_results['num_failures_by_type']['SKIP'] 5)<line_sep>self.assertEqual(merged_results['num_failures_by_type']['FAIL'] 2)<block_end><def_stmt>test_json_merge_tests_cross_device self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS PASS",
"artifacts": {
"logs.txt": [
"123/1/logs.txt",
"123/2/logs.txt"
],
"trace.html": [
"123/1/trace.html",
"123/2/trace.html"
]
},
"expected": "PASS",
"is_unexpected": false,
"shard": 0,
"time": 1.0,
"times": [
1.0,
1.1
]
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt",
"456/2/logs.txt"
],
"screenshot.png": [
"456/1/screenshot.png"
]
},
"expected": "PASS",
"is_unexpected": true,
"shard": 1,
"time": 1.0,
"times": [
1.0,
1.2
]
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual(len(merged_results['tests']) 1)<line_sep>self.assertEqual(len(merged_results['tests']['Benchmark-1']) 1)<line_sep>self.assertIn('FAIL' merged_results['tests']['Benchmark-1']['Story-1']['actual'].split())<line_sep>self.assertIn('PASS' merged_results['tests']['Benchmark-1']['Story-1']['actual'].split())<line_sep>self.assertEqual(4 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['logs.txt']))<line_sep>self.assertEqual(2 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['trace.html']))<line_sep>self.assertEqual(1 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['screenshot.png']))<line_sep>self.assertEqual(4 len(merged_results['tests']['Benchmark-1']['Story-1']['times']))<line_sep>self.assertNotIn('shard' merged_results['tests']['Benchmark-1']['Story-1'])<line_sep>self.assertEqual(<true> merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])<block_end><def_stmt>test_json_merge_tests_cross_device_actual_pass self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual('PASS PASS' merged_results['tests']['Benchmark-1']['Story-1']['actual'])<line_sep>self.assertEqual(<false> merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])<block_end><def_stmt>test_json_merge_tests_cross_device_actual_fail self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "FAIL PASS PASS",
"expected": "PASS",
"is_unexpected": true
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"is_unexpected": false
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertIn('PASS' merged_results['tests']['Benchmark-1']['Story-1']['actual'])<line_sep>self.assertIn('FAIL' merged_results['tests']['Benchmark-1']['Story-1']['actual'])<line_sep>self.assertEqual(<true> merged_results['tests']['Benchmark-1']['Story-1']['is_unexpected'])<block_end><def_stmt>test_json_merge_tests_cross_device_artifacts self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"123/1/logs.txt"
]
}
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt"
],
"trace.html": [
"123/1/trace.html"
]
}
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual(2 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['logs.txt']))<line_sep>self.assertEqual(1 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['trace.html']))<block_end><def_stmt>test_json_merge_tests_cross_device_artifacts_missing self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS"
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"artifacts": {
"logs.txt": [
"456/1/logs.txt"
],
"trace.html": [
"123/1/trace.html"
]
}
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual(1 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['logs.txt']))<line_sep>self.assertEqual(1 len(merged_results['tests']['Benchmark-1']['Story-1']['artifacts']['trace.html']))<block_end><def_stmt>test_json_merge_tests_cross_device_times self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 10.0,
"times": [10.0, 15.0, 25.0]
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 20.0,
"times": [20.0, 30.0]
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual(5 len(merged_results['tests']['Benchmark-1']['Story-1']['times']))<line_sep>self.assertEqual(10.0 merged_results['tests']['Benchmark-1']['Story-1']['time'])<block_end><def_stmt>test_json_merge_tests_cross_device_times_missing self<block_start>result_1=json.loads(self.sample_json_string)<line_sep>result_2=json.loads(self.sample_json_string)<line_sep>result_1['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS"
}
}
}
''')<line_sep>result_2['tests']=json.loads('''
{
"Benchmark-1": {
"Story-1": {
"actual": "PASS",
"expected": "PASS",
"time": 20.0,
"times": [20.0, 30.0]
}
}
}
''')<line_sep>merged_results=results_merger.merge_test_results([result_1 result_2] <true>)<line_sep>self.assertEqual(2 len(merged_results['tests']['Benchmark-1']['Story-1']['times']))<line_sep>self.assertEqual(20.0 merged_results['tests']['Benchmark-1']['Story-1']['time'])<block_end><block_end> |
"""
Line styles
-----------
The :meth:`pygmt.Figure.plot` method can plot lines in different styles.
The default line style is a 0.25-point wide, black, solid line, and can be
customized with the ``pen`` parameter.
A *pen* in GMT has three attributes: *width*, *color*, and *style*.
The *style* attribute controls the appearance of the line.
Giving "dotted" or "." yields a dotted line, whereas a dashed pen is requested
with "dashed" or "-". Also combinations of dots and dashes, like ".-" for a
dot-dashed line, are allowed.
For more advanced *pen* attributes, see the GMT cookbook
:gmt-docs:`cookbook/features.html#wpen-attrib`.
"""<import_stmt>numpy<as>np<import_stmt>pygmt<line_sep># Generate a two-point line for plotting
x=np.array([0 7])<line_sep>y=np.array([9 9])<line_sep>fig=pygmt.Figure()<line_sep>fig.basemap(region=[0 10 0 10] projection="X15c/8c" frame='+t"Line Styles"')<line_sep># Plot the line using the default line style
fig.plot(x=x y=y)<line_sep>fig.text(x=x[-1] y=y[-1] text="solid (default)" justify="ML" offset="0.2c/0c")<line_sep># Plot the line using different line styles
<for_stmt>linestyle ["1p,red,-" # dashed line
"1p,blue,." # dotted line
"1p,lightblue,-." # dash-dotted line
"2p,blue,..-" # dot-dot-dashed line
"2p,tomato,--." # dash-dash-dotted line
# A pattern of 4-point-long line segments and 2-point-long gaps between
# segments, with pattern offset by 2 points from the origin
"2p,tomato,4_2:2p" ]<block_start>y<augsub>1# Move the current line down
fig.plot(x=x y=y pen=linestyle)<line_sep>fig.text(x=x[-1] y=y[-1] text=linestyle justify="ML" offset="0.2c/0c")<block_end># Plot the line like a railway track (black/white).
# The trick here is plotting the same line twice but with different line styles
y<augsub>1# move the current line down
fig.plot(x=x y=y pen="5p,black")<line_sep>fig.plot(x=x y=y pen="4p,white,20p_20p")<line_sep>fig.text(x=x[-1] y=y[-1] text="5p,black" justify="ML" offset="0.2c/0.2c")<line_sep>fig.text(x=x[-1] y=y[-1] text="4p,white,20p_20p" justify="ML" offset="0.2c/-0.2c")<line_sep>fig.show()<line_sep> |
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing base test results classes."""<line_sep># The test passed.
PASS='SUCCESS'<line_sep># The test was intentionally skipped.
SKIP='SKIPPED'<line_sep># The test failed.
FAIL='FAILURE'<line_sep># The test caused the containing process to crash.
CRASH='CRASH'<line_sep># The test timed out.
TIMEOUT='TIMEOUT'<line_sep># The test ran, but we couldn't determine what happened.
UNKNOWN='UNKNOWN'<line_sep># The test did not run.
NOTRUN='NOTRUN'<line_sep> |
"""Test the collective reducescatter API on a distributed Ray cluster."""<import_stmt>pytest<import_stmt>ray<import_stmt>cupy<as>cp<import_stmt>torch<import_from_stmt>ray.util.collective.tests.util create_collective_multigpu_workers init_tensors_for_gather_scatter_multigpu<line_sep>@pytest.mark.parametrize("tensor_backend" ["cupy" "torch"])@pytest.mark.parametrize("array_size" [2 2<power>5 2<power>10 2<power>15 2<power>20 [2 2] [5 5 5]])<def_stmt>test_reducescatter_different_array_size ray_start_distributed_multigpu_2_nodes_4_gpus array_size tensor_backend<block_start>world_size=2<line_sep>num_gpu_per_worker=2<line_sep>actual_world_size=world_size<times>num_gpu_per_worker<line_sep>actors,_=create_collective_multigpu_workers(world_size)<line_sep>init_tensors_for_gather_scatter_multigpu(actors array_size=array_size tensor_backend=tensor_backend)<line_sep>results=ray.get([a.do_reducescatter_multigpu.remote()<for>a actors])<for_stmt>i range(world_size)<block_start><for_stmt>j range(num_gpu_per_worker)<block_start><if_stmt>tensor_backend<eq>"cupy"<block_start><assert_stmt>(results[i][j]<eq>cp.ones(array_size dtype=cp.float32)<times>actual_world_size).all()<block_end><else_stmt><block_start><assert_stmt>(results[i][j]<eq>torch.ones(array_size dtype=torch.float32).cuda(j)<times>actual_world_size).all()<block_end><block_end><block_end><block_end><def_stmt>test_reducescatter_torch_cupy ray_start_distributed_multigpu_2_nodes_4_gpus<block_start>world_size=2<line_sep>num_gpu_per_worker=2<line_sep>actual_world_size=world_size<times>num_gpu_per_worker<line_sep>shape=[10 10]<line_sep>actors,_=create_collective_multigpu_workers(world_size)<line_sep># tensor is pytorch, list is cupy
<for_stmt>i,a enumerate(actors)<block_start>ray.get([a.set_buffer.remote(shape tensor_type0="torch" tensor_type1="torch")])<line_sep>ray.get([a.set_list_buffer.remote(shape tensor_type0="cupy" tensor_type1="cupy")])<block_end>results=ray.get([a.do_reducescatter_multigpu.remote()<for>a actors])<for_stmt>i range(world_size)<block_start><for_stmt>j range(num_gpu_per_worker)<block_start><assert_stmt>(results[i][j]<eq>torch.ones(shape dtype=torch.float32).cuda(j)<times>actual_world_size).all()<block_end><block_end># tensor is cupy, list is pytorch
<for_stmt>i,a enumerate(actors)<block_start>ray.get([a.set_buffer.remote(shape tensor_type0="cupy" tensor_type1="cupy")])<line_sep>ray.get([a.set_list_buffer.remote(shape tensor_type0="torch" tensor_type1="torch")])<block_end>results=ray.get([a.do_reducescatter_multigpu.remote()<for>a actors])<for_stmt>i range(world_size)<block_start><for_stmt>j range(num_gpu_per_worker)<block_start><assert_stmt>(results[i][j]<eq>cp.ones(shape dtype=cp.float32)<times>actual_world_size).all()<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>pytest<import_stmt>sys<line_sep>sys.exit(pytest.main(["-v" "-x" __file__]))<block_end> |
<import_stmt>re<import_from_stmt>os environ<import_stmt>boto3<import_stmt>pytest<import_from_stmt>botocore.exceptions ClientError<import_from_stmt>moto mock_efs<import_from_stmt>tests.test_efs.junk_drawer has_status_code<line_sep>ARN_PATT=r"^arn:(?P<Partition>[^:\n]*):(?P<Service>[^:\n]*):(?P<Region>[^:\n]*):(?P<AccountID>[^:\n]*):(?P<Ignore>(?P<ResourceType>[^:\/\n]*)[:\/])?(?P<Resource>.*)$"<line_sep>STRICT_ARN_PATT=r"^arn:aws:[a-z]+:[a-z]{2}-[a-z]+-[0-9]:[0-9]+:[a-z-]+\/[a-z0-9-]+$"<line_sep>SAMPLE_1_PARAMS={"CreationToken":"myFileSystem1" "PerformanceMode":"generalPurpose" "Backup":<true> "Encrypted":<true> "Tags":[{"Key":"Name" "Value":"Test Group1"}] }<line_sep>SAMPLE_2_PARAMS={"CreationToken":"myFileSystem2" "PerformanceMode":"generalPurpose" "Backup":<true> "AvailabilityZoneName":"us-west-2b" "Encrypted":<true> "ThroughputMode":"provisioned" "ProvisionedThroughputInMibps":60 "Tags":[{"Key":"Name" "Value":"Test Group1"}] }<line_sep>@pytest.fixture(scope="function")<def_stmt>aws_credentials <block_start>"""Mocked AWS Credentials for moto."""<line_sep>environ["AWS_ACCESS_KEY_ID"]="testing"<line_sep>environ["AWS_SECRET_ACCESS_KEY"]="testing"<line_sep>environ["AWS_SECURITY_TOKEN"]="testing"<line_sep>environ["AWS_SESSION_TOKEN"]="testing"<block_end>@pytest.fixture(scope="function")<def_stmt>efs aws_credentials<block_start><with_stmt>mock_efs()<block_start><yield>boto3.client("efs" region_name="us-east-1")<block_end><block_end># Testing Create
# ==============
<def_stmt>test_create_file_system_correct_use efs<block_start><import_from_stmt>datetime datetime<line_sep>creation_token="<PASSWORD>"<line_sep>create_fs_resp=efs.create_file_system(CreationToken=creation_token Tags=[{"Key":"Name" "Value":"Test EFS Container"}] )<line_sep># Check the response.
<assert_stmt>has_status_code(create_fs_resp 201)<assert_stmt>create_fs_resp["CreationToken"]<eq>creation_token<assert_stmt>"fs-"<in>create_fs_resp["FileSystemId"]<assert_stmt>isinstance(create_fs_resp["CreationTime"] datetime)<assert_stmt>create_fs_resp["LifeCycleState"]<eq>"available"<assert_stmt>create_fs_resp["Tags"][0]<eq>{"Key":"Name" "Value":"Test EFS Container"}<assert_stmt>create_fs_resp["ThroughputMode"]<eq>"bursting"<assert_stmt>create_fs_resp["PerformanceMode"]<eq>"generalPurpose"<assert_stmt>create_fs_resp["Encrypted"]<eq><false><assert_stmt>create_fs_resp["NumberOfMountTargets"]<eq>0<for_stmt>key_name ["Value" "ValueInIA" "ValueInStandard"]<block_start><assert_stmt>key_name<in>create_fs_resp["SizeInBytes"]<assert_stmt>create_fs_resp["SizeInBytes"][key_name]<eq>0<block_end><assert_stmt>re.match(STRICT_ARN_PATT create_fs_resp["FileSystemArn"])<line_sep># Check the (lack of the) backup policy.
<with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.describe_backup_policy(FileSystemId=create_fs_resp["FileSystemId"])<block_end>resp=exc_info.value.response<assert_stmt>resp["ResponseMetadata"]["HTTPStatusCode"]<eq>404<assert_stmt>"PolicyNotFound"<in>resp["Error"]["Message"]<line_sep># Check the arn in detail
match_obj=re.match(ARN_PATT create_fs_resp["FileSystemArn"])<line_sep>arn_parts=match_obj.groupdict()<assert_stmt>arn_parts["ResourceType"]<eq>"file-system"<assert_stmt>arn_parts["Resource"]<eq>create_fs_resp["FileSystemId"]<assert_stmt>arn_parts["Service"]<eq>"elasticfilesystem"<assert_stmt>arn_parts["AccountID"]<eq>create_fs_resp["OwnerId"]<block_end><def_stmt>test_create_file_system_aws_sample_1 efs<block_start>resp=efs.create_file_system(**SAMPLE_1_PARAMS)<line_sep>resp_metadata=resp.pop("ResponseMetadata")<assert_stmt>resp_metadata["HTTPStatusCode"]<eq>201<assert_stmt>set(resp.keys())<eq>{"OwnerId" "CreationToken" "Encrypted" "PerformanceMode" "FileSystemId" "FileSystemArn" "CreationTime" "LifeCycleState" "NumberOfMountTargets" "SizeInBytes" "Tags" "ThroughputMode" }<assert_stmt>resp["Tags"]<eq>[{"Key":"Name" "Value":"Test Group1"}]<assert_stmt>resp["PerformanceMode"]<eq>"generalPurpose"<assert_stmt>resp["Encrypted"]<line_sep>policy_resp=efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])<assert_stmt>policy_resp["BackupPolicy"]["Status"]<eq>"ENABLED"<block_end><def_stmt>test_create_file_system_aws_sample_2 efs<block_start>resp=efs.create_file_system(**SAMPLE_2_PARAMS)<line_sep>resp_metadata=resp.pop("ResponseMetadata")<assert_stmt>resp_metadata["HTTPStatusCode"]<eq>201<assert_stmt>set(resp.keys())<eq>{"AvailabilityZoneId" "AvailabilityZoneName" "PerformanceMode" "ProvisionedThroughputInMibps" "SizeInBytes" "Tags" "ThroughputMode" "CreationTime" "CreationToken" "Encrypted" "LifeCycleState" "FileSystemId" "FileSystemArn" "NumberOfMountTargets" "OwnerId" }<assert_stmt>resp["ProvisionedThroughputInMibps"]<eq>60<assert_stmt>resp["AvailabilityZoneId"]<eq>"usw2-az1"<assert_stmt>resp["AvailabilityZoneName"]<eq>"us-west-2b"<assert_stmt>resp["ThroughputMode"]<eq>"provisioned"<line_sep>policy_resp=efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])<assert_stmt>policy_resp["BackupPolicy"]["Status"]<eq>"ENABLED"<block_end><def_stmt>test_create_file_system_az_name_given_backup_default efs<block_start>resp=efs.create_file_system(AvailabilityZoneName="us-east-1e")<line_sep>policy_resp=efs.describe_backup_policy(FileSystemId=resp["FileSystemId"])<assert_stmt>policy_resp["BackupPolicy"]["Status"]<eq>"ENABLED"<block_end><def_stmt>test_create_file_system_no_creation_token_given efs# Note that from the API docs, it would seem this should create an error. However it
# turns out that botocore just automatically assigns a UUID.
<block_start>resp=efs.create_file_system()<assert_stmt>resp["ResponseMetadata"]["HTTPStatusCode"]<eq>201<assert_stmt>"CreationToken"<in>resp<block_end><def_stmt>test_create_file_system_file_system_already_exists efs<block_start>efs.create_file_system(CreationToken="foo")<with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.create_file_system(CreationToken="foo")<block_end>resp=exc_info.value.response<assert_stmt>resp["ResponseMetadata"]["HTTPStatusCode"]<eq>409<assert_stmt>"FileSystemAlreadyExists"<in>resp["Error"]["Message"]<block_end># Testing Describe
# ================
<def_stmt>test_describe_file_systems_minimal_case efs# Create the file system.
<block_start>create_fs_resp=efs.create_file_system(CreationToken="foobar")<line_sep>create_fs_resp.pop("ResponseMetadata")<line_sep># Describe the file systems.
desc_fs_resp=efs.describe_file_systems()<line_sep>desc_fs_resp_metadata=desc_fs_resp.pop("ResponseMetadata")<assert_stmt>desc_fs_resp_metadata["HTTPStatusCode"]<eq>200<line_sep># Check the list results.
fs_list=desc_fs_resp["FileSystems"]<assert_stmt>len(fs_list)<eq>1<line_sep>file_system=fs_list[0]<assert_stmt>set(file_system.keys())<eq>{"CreationTime" "CreationToken" "Encrypted" "LifeCycleState" "PerformanceMode" "SizeInBytes" "Tags" "ThroughputMode" "FileSystemId" "FileSystemArn" "NumberOfMountTargets" "OwnerId" }<assert_stmt>file_system["FileSystemId"]<eq>create_fs_resp["FileSystemId"]<line_sep># Pop out the timestamps and see if the rest of the description is the same.
create_fs_resp["SizeInBytes"].pop("Timestamp")<line_sep>file_system["SizeInBytes"].pop("Timestamp")<assert_stmt>file_system<eq>create_fs_resp<block_end><def_stmt>test_describe_file_systems_aws_create_sample_2 efs<block_start>efs.create_file_system(**SAMPLE_2_PARAMS)<line_sep># Describe the file systems.
desc_resp=efs.describe_file_systems()<line_sep>desc_fs_resp_metadata=desc_resp.pop("ResponseMetadata")<assert_stmt>desc_fs_resp_metadata["HTTPStatusCode"]<eq>200<line_sep># Check the list results.
fs_list=desc_resp["FileSystems"]<assert_stmt>len(fs_list)<eq>1<line_sep>file_system=fs_list[0]<assert_stmt>set(file_system.keys())<eq>{"AvailabilityZoneId" "AvailabilityZoneName" "CreationTime" "CreationToken" "Encrypted" "LifeCycleState" "PerformanceMode" "ProvisionedThroughputInMibps" "SizeInBytes" "Tags" "ThroughputMode" "FileSystemId" "FileSystemArn" "NumberOfMountTargets" "OwnerId" }<assert_stmt>file_system["ProvisionedThroughputInMibps"]<eq>60<assert_stmt>file_system["AvailabilityZoneId"]<eq>"usw2-az1"<assert_stmt>file_system["AvailabilityZoneName"]<eq>"us-west-2b"<assert_stmt>file_system["ThroughputMode"]<eq>"provisioned"<block_end><def_stmt>test_describe_file_systems_paging efs# Create several file systems.
<block_start><for_stmt>i range(10)<block_start>efs.create_file_system(CreationToken="foobar_{}".format(i))<block_end># First call (Start)
# ------------------
# Call the tested function
resp1=efs.describe_file_systems(MaxItems=4)<line_sep># Check the response status
<assert_stmt>has_status_code(resp1 200)<line_sep># Check content of the result.
resp1.pop("ResponseMetadata")<assert_stmt>set(resp1.keys())<eq>{"NextMarker" "FileSystems"}<assert_stmt>len(resp1["FileSystems"])<eq>4<line_sep>fs_id_set_1={fs["FileSystemId"]<for>fs resp1["FileSystems"]}<line_sep># Second call (Middle)
# --------------------
# Get the next marker.
resp2=efs.describe_file_systems(MaxItems=4 Marker=resp1["NextMarker"])<line_sep># Check the response status
resp2_metadata=resp2.pop("ResponseMetadata")<assert_stmt>resp2_metadata["HTTPStatusCode"]<eq>200<line_sep># Check the response contents.
<assert_stmt>set(resp2.keys())<eq>{"NextMarker" "FileSystems" "Marker"}<assert_stmt>len(resp2["FileSystems"])<eq>4<assert_stmt>resp2["Marker"]<eq>resp1["NextMarker"]<line_sep>fs_id_set_2={fs["FileSystemId"]<for>fs resp2["FileSystems"]}<assert_stmt>fs_id_set_1&fs_id_set_2<eq>set()<line_sep># Third call (End)
# ----------------
# Get the last marker results
resp3=efs.describe_file_systems(MaxItems=4 Marker=resp2["NextMarker"])<line_sep># Check the response status
resp3_metadata=resp3.pop("ResponseMetadata")<assert_stmt>resp3_metadata["HTTPStatusCode"]<eq>200<line_sep># Check the response contents.
<assert_stmt>set(resp3.keys())<eq>{"FileSystems" "Marker"}<assert_stmt>len(resp3["FileSystems"])<eq>2<assert_stmt>resp3["Marker"]<eq>resp2["NextMarker"]<line_sep>fs_id_set_3={fs["FileSystemId"]<for>fs resp3["FileSystems"]}<assert_stmt>fs_id_set_3&(fs_id_set_1|fs_id_set_2)<eq>set()<block_end><def_stmt>test_describe_file_systems_invalid_marker efs<block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.describe_file_systems(Marker="fiddlesticks")<block_end>resp=exc_info.value.response<assert_stmt>has_status_code(resp 400)<assert_stmt>"BadRequest"<in>resp["Error"]["Message"]<block_end><def_stmt>test_describe_file_systems_invalid_creation_token efs<block_start>resp=efs.describe_file_systems(CreationToken="fizzle")<assert_stmt>has_status_code(resp 200)<assert_stmt>len(resp["FileSystems"])<eq>0<block_end><def_stmt>test_describe_file_systems_invalid_file_system_id efs<block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.describe_file_systems(FileSystemId="fs-29879313")<block_end>resp=exc_info.value.response<assert_stmt>has_status_code(resp 404)<assert_stmt>"FileSystemNotFound"<in>resp["Error"]["Message"]<block_end><def_stmt>test_describe_file_system_creation_token_and_file_system_id efs<block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.describe_file_systems(CreationToken="<PASSWORD>" FileSystemId="fs-07987987")<block_end>resp=exc_info.value.response<assert_stmt>has_status_code(resp 400)<assert_stmt>"BadRequest"<in>resp["Error"]["Message"]<block_end># Testing Delete
# ==============
<def_stmt>test_delete_file_system_minimal_case efs# Create the file system
<block_start>resp=efs.create_file_system()<line_sep># Describe the file system, prove it shows up.
desc1=efs.describe_file_systems()<assert_stmt>len(desc1["FileSystems"])<eq>1<assert_stmt>resp["FileSystemId"]<in>{fs["FileSystemId"]<for>fs desc1["FileSystems"]}<line_sep># Delete the file system.
del_resp=efs.delete_file_system(FileSystemId=resp["FileSystemId"])<assert_stmt>has_status_code(del_resp 204)<line_sep># Check that the file system is no longer there.
desc2=efs.describe_file_systems()<assert_stmt>len(desc2["FileSystems"])<eq>0<block_end><def_stmt>test_delete_file_system_invalid_file_system_id efs<block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>efs.delete_file_system(FileSystemId="fs-2394287")<block_end>resp=exc_info.value.response<assert_stmt>has_status_code(resp 404)<assert_stmt>"FileSystemNotFound"<in>resp["Error"]["Message"]<block_end> |
# -*- coding: utf-8 -*-
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("MUSCLEFITMUONPRODUCER")<line_sep>process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1000))<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring("file:/home/demattia/3C83C26B-8B91-DF11-9CE6-90E6BAE8CC13.root"))<line_sep>process.poolDBESSource=cms.ESSource("PoolDBESSource" BlobStreamerName=cms.untracked.string('TBufferBlobStreamingService') DBParameters=cms.PSet(messageLevel=cms.untracked.int32(2) authenticationPath=cms.untracked.string('/afs/cern.ch/cms/DB/conddb')) timetype=cms.untracked.string('runnumber') # connect = cms.string('sqlite_file:dummyScale.db'),
connect=cms.string('oracle://cms_orcoff_prod/CMS_COND_31X_PHYSICSTOOLS') toGet=cms.VPSet(cms.PSet(record=cms.string('MuScleFitDBobjectRcd') tag=cms.string('MuScleFit_Scale_JPsi_1_3_invNb_innerTrack'))))<line_sep>process.MuScleFitMuonProducer=cms.EDProducer('MuScleFitMuonProducer' MuonLabel=cms.InputTag("muons") DbObjectLabel=cms.untracked.string("") PatMuons=cms.bool(<false>))<line_sep>process.out=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('myOutputFile.root'))<line_sep>process.p=cms.Path(process.MuScleFitMuonProducer)<line_sep>process.e=cms.EndPath(process.out)<line_sep> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.