content
stringlengths
0
1.55M
# pylint: disable=unused-wildcard-import, wildcard-import # # This file is part of FaceDancer. # <import_stmt>asyncio<import_from_stmt>typing Iterable<import_from_stmt>. default_main<import_from_stmt>..future *<import_from_stmt>..classes.hid.usage *<import_from_stmt>..classes.hid.descriptor *<import_from_stmt>..classes.hid.keyboard *<line_sep># Specifies how many simultaneously keys we want to support. KEY_ROLLOVER=8<line_sep>@use_inner_classes_automatically<class_stmt>USBKeyboardDevice(USBDevice)<block_start>""" Simple USB keyboard device. """<line_sep>name:str="USB keyboard device"<line_sep>product_string:str="Non-suspicious Keyboard"<class_stmt>KeyboardConfiguration(USBConfiguration)<block_start>""" Primary USB configuration: act as a keyboard. """<class_stmt>KeyboardInterface(USBInterface)<block_start>""" Core HID interface for our keyboard. """<line_sep>name:str="USB keyboard interface"<line_sep>class_number:int=3<class_stmt>KeyEventEndpoint(USBEndpoint)<block_start>number:int=3<line_sep>direction:USBDirection=USBDirection.IN<line_sep>transfer_type:USBTransferType=USBTransferType.INTERRUPT<line_sep>interval:int=10<block_end># # Raw descriptors -- TODO: build these from their component parts. # <class_stmt>USBClassDescriptor(USBClassDescriptor)<block_start>number:int=USBDescriptorTypeNumber.HID<line_sep>raw:bytes=b'\x09\x21\x10\x01\x00\x01\x22\x2b\x00'<block_end><class_stmt>ReportDescriptor(HIDReportDescriptor)<block_start>fields:tuple=(# Identify ourselves as a keyboard. USAGE_PAGE(HIDUsagePage.GENERIC_DESKTOP) USAGE(HIDGenericDesktopUsage.KEYBOARD) COLLECTION(HIDCollection.APPLICATION) USAGE_PAGE(HIDUsagePage.KEYBOARD) # Modifier keys. # These span the full range of modifier key codes (left control to right meta), # and each has two possible values (0 = unpressed, 1 = pressed). USAGE_MINIMUM(KeyboardKeys.LEFTCTRL) USAGE_MAXIMUM(KeyboardKeys.RIGHTMETA) LOGICAL_MINIMUM(0) LOGICAL_MAXIMUM(1) REPORT_SIZE(1) REPORT_COUNT(KeyboardKeys.RIGHTMETA-KeyboardKeys.LEFTCTRL+1) INPUT(variable=<true>) # One byte of constant zero-padding. # This is required for compliance; and Windows will ignore this report # if the zero byte isn't present. REPORT_SIZE(8) REPORT_COUNT(1) INPUT(constant=<true>) # Capture our actual, pressed keyboard keys. # Support a standard, 101-key keyboard; which has # keycodes from 0 (NONE) to 101 (COMPOSE). # # We provide the capability to press up to eight keys # simultaneously. Setting the REPORT_COUNT effectively # sets the key rollover; so 8 reports means we can have # up to eight keys pressed at once. USAGE_MINIMUM(KeyboardKeys.NONE) USAGE_MAXIMUM(KeyboardKeys.COMPOSE) LOGICAL_MINIMUM(KeyboardKeys.NONE) LOGICAL_MAXIMUM(KeyboardKeys.COMPOSE) REPORT_SIZE(8) REPORT_COUNT(KEY_ROLLOVER) INPUT() # End the report. END_COLLECTION() )<block_end>@class_request_handler(number=USBStandardRequests.GET_INTERFACE)@to_this_interface<def_stmt>handle_get_interface_request self request# Silently stall GET_INTERFACE class requests. <block_start>request.stall()<block_end><block_end><block_end><def_stmt>__post_init__ self<block_start>super().__post_init__()<line_sep># Keep track of any pressed keys, and any pressed modifiers. self.active_keys=set()<line_sep>self.modifiers=0<block_end><def_stmt>_generate_hid_report self<arrow>bytes<block_start>""" Generates a single HID report for the given keyboard state. """<line_sep># If we have active keypresses, compose a set of scancodes from them. scancodes=list(self.active_keys)[:KEY_ROLLOVER]+[0]<times>(KEY_ROLLOVER-len(self.active_keys))<line_sep><return>bytes([self.modifiers 0 *scancodes])<block_end><def_stmt>handle_data_requested self endpoint:USBEndpoint<block_start>""" Provide data once per host request. """<line_sep>report=self._generate_hid_report()<line_sep>endpoint.send(report)<block_end># # User-facing API. # <def_stmt>key_down self code:KeyboardKeys<block_start>""" Marks a given key as pressed; should be a scancode from KeyboardKeys. """<line_sep>self.active_keys.add(code)<block_end><def_stmt>key_up self code:KeyboardKeys<block_start>""" Marks a given key as released; should be a scancode from KeyboardKeys. """<line_sep>self.active_keys.remove(code)<block_end><def_stmt>modifier_down self code:KeyboardModifiers<block_start>""" Marks a given modifier as pressed; should be a flag from KeyboardModifiers. """<if_stmt>code<is><not><none><block_start>self.modifiers<augor>code<block_end><block_end><def_stmt>modifier_up self code:KeyboardModifiers<block_start>""" Marks a given modifier as released; should be a flag from KeyboardModifiers. """<if_stmt>code<is><not><none><block_start>self.modifiers<augand>~code<block_end><block_end><async_keyword><def_stmt>type_scancode self code:KeyboardKeys duration:float=0.1 modifiers:KeyboardModifiers=<none><block_start>""" Presses, and then releases, a single key. Parameters: code -- The keyboard key to be pressed's scancode. duration -- How long the given key should be pressed, in seconds. modifiers -- Any modifier keys that should be held while typing. """<line_sep>self.modifier_down(modifiers)<line_sep>self.key_down(code)<line_sep><await>asyncio.sleep(duration)<line_sep>self.key_up(code)<line_sep>self.modifier_up(modifiers)<block_end><async_keyword><def_stmt>type_scancodes self *codes:Iterable[KeyboardKeys] duration:float=0.1<block_start>""" Presses, and then releases, a collection of keys, in order. Parameters: *code -- The keyboard keys to be pressed's scancodes. duration -- How long each key should be pressed, in seconds. """<for_stmt>code codes<block_start><await>self.type_scancode(code duration=duration)<block_end><block_end><async_keyword><def_stmt>type_letter self letter:str duration:float=0.1 modifiers:KeyboardModifiers=<none><block_start>""" Attempts to type a single letter, based on its ASCII string representation. Parameters: letter -- A single-character string literal, to be typed. duration -- How long each key should be pressed, in seconds. modifiers -- Any modifier keys that should be held while typing. """<line_sep>shift,code=KeyboardKeys.get_scancode_for_ascii(letter)<line_sep>modifiers=shift<if>modifiers<is><none><else>modifiers|shift<line_sep><await>self.type_scancode(code modifiers=modifiers duration=duration)<block_end><async_keyword><def_stmt>type_letters self *letters:Iterable[str] duration:float=0.1<block_start>""" Attempts to type a string of letters, based on ASCII string representations. Parameters: *letters -- A collection of single-character string literal, to be typed in order. duration -- How long each key should be pressed, in seconds. """<for_stmt>letter letters<block_start><await>self.type_letter(letter duration=duration)<block_end><block_end><async_keyword><def_stmt>type_string self to_type:str * duration:float=0.1 modifiers:KeyboardModifiers=<none><block_start>""" Attempts to type a python string into the remote host. Parameters: letter -- A collection of single-character string literal, to be typed in order. duration -- How long each key should be pressed, in seconds. modifiers -- Any modifier keys that should be held while typing. """<line_sep>self.modifier_down(modifiers)<for_stmt>letter to_type<block_start><await>self.type_letter(letter duration=duration)<block_end>self.modifier_up(modifiers)<block_end><def_stmt>all_keys_up self * include_modifiers:bool=<true><block_start>""" Releases all keys currently pressed. Parameters: include_modifiers -- If set to false, modifiers will be left at their current states. """<line_sep>self.active_keys.clear()<if_stmt>include_modifiers<block_start>self.all_modifiers_up()<block_end><block_end><def_stmt>all_modifiers_up self<block_start>""" Releases all modifiers currently held. """<line_sep>self.modifiers=0<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>default_main(USBKeyboardDevice)<block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>argparse<import_stmt>h5py<import_stmt>json<import_stmt>os<import_stmt>scipy.misc<import_stmt>sys<import_stmt>re<import_stmt>fnmatch<import_stmt>datetime<import_from_stmt>PIL Image<import_stmt>numpy<as>np<line_sep>''' srun --mem 10000 python lib/datasets/wider/convert_face_to_coco.py --dataset cs6-train-det '''<def_stmt>add_path path<block_start><if_stmt>path<not><in>sys.path<block_start>sys.path.insert(0 path)<block_end><block_end>this_dir=os.path.dirname(__file__)<line_sep>add_path(this_dir)<line_sep># print(this_dir) add_path(os.path.join(this_dir '..' '..'))<import_stmt>utils<import_stmt>utils.boxes<as>bboxs_util<import_stmt>utils.face_utils<as>face_util<def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description='Convert dataset')<line_sep>parser.add_argument('--dataset' help="wider" default='wider' type=str)<line_sep>parser.add_argument('--outdir' help="output dir for json files" default='' type=str)<line_sep>parser.add_argument('--datadir' help="data dir for annotations to be converted" default='' type=str)<line_sep>parser.add_argument('--imdir' help="root directory for loading dataset images" default='' type=str)<line_sep>parser.add_argument('--annotfile' help="directly specify the annotations file" default='' type=str)<line_sep>parser.add_argument('--thresh' help="specify the confidence threshold on detections" default=-1 type=float)<line_sep># if len(sys.argv) == 1: # parser.print_help() # sys.exit(1) <return>parser.parse_args()<block_end><def_stmt>convert_wider_annots data_dir out_dir data_set='WIDER' conf_thresh=0.5<block_start>"""Convert from WIDER FDDB-style format to COCO bounding box"""<line_sep># http://cocodataset.org/#format-data: [x,w,width,height] json_name='wider_face_train_annot_coco_style.json'<line_sep>img_id=0<line_sep>ann_id=0<line_sep>cat_id=1<line_sep>print('Starting %s'%data_set)<line_sep>ann_dict={}<line_sep>categories=[{"id":1 "name":'face'}]<line_sep>images=[]<line_sep>annotations=[]<line_sep>ann_file=os.path.join(data_dir 'wider_face_train_annot.txt')<line_sep>wider_annot_dict=face_util.parse_wider_gt(ann_file)# [im-file] = [[x,y,w,h], ...] <for_stmt>filename wider_annot_dict.keys()<block_start><if_stmt>len(images)%50<eq>0<block_start>print("Processed %s images, %s annotations"%(len(images) len(annotations)))<block_end>image={}<line_sep>image['id']=img_id<line_sep>img_id<augadd>1<line_sep>im=Image.open(os.path.join(data_dir filename))<line_sep>image['width']=im.height<line_sep>image['height']=im.width<line_sep>image['file_name']=filename<line_sep>images.append(image)<for_stmt>gt_bbox wider_annot_dict[filename]<block_start>ann={}<line_sep>ann['id']=ann_id<line_sep>ann_id<augadd>1<line_sep>ann['image_id']=image['id']<line_sep>ann['segmentation']=[]<line_sep>ann['category_id']=cat_id# 1:"face" for WIDER ann['iscrowd']=0<line_sep>ann['area']=gt_bbox[2]<times>gt_bbox[3]<line_sep>ann['bbox']=gt_bbox<line_sep>annotations.append(ann)<block_end><block_end>ann_dict['images']=images<line_sep>ann_dict['categories']=categories<line_sep>ann_dict['annotations']=annotations<line_sep>print("Num categories: %s"%len(categories))<line_sep>print("Num images: %s"%len(images))<line_sep>print("Num annotations: %s"%len(annotations))<with_stmt>open(os.path.join(out_dir json_name) 'w' encoding='utf8')<as>outfile<block_start>outfile.write(json.dumps(ann_dict))<block_end><block_end><def_stmt>convert_cs6_annots ann_file im_dir out_dir data_set='CS6-subset' conf_thresh=0.5<block_start>"""Convert from WIDER FDDB-style format to COCO bounding box"""<line_sep># cs6 subsets <if_stmt>data_set<eq>'CS6-subset'<block_start>json_name='cs6-subset_face_train_annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-subset-score'# include "scores" as soft-labels <block_start>json_name='cs6-subset_face_train_score-annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-subset-gt'<block_start>json_name='cs6-subset-gt_face_train_annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-train-gt'# full train set of CS6 (86 videos) <block_start>json_name='cs6-train-gt.json'<block_end><elif_stmt>data_set<eq>'CS6-train-det-score'# soft-labels used in distillation <block_start>json_name='cs6-train-det-score_face_train_annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-train-det-score-0.5'# soft-labels used in distillation, keeping dets with score > 0.5 <block_start>json_name='cs6-train-det-score-0.5_face_train_annot_coco_style.json'<line_sep>conf_thresh=0.5<block_end><elif_stmt>data_set<eq>'CS6-train-det'<block_start>json_name='cs6-train-det_face_train_annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-train-det-0.5'<block_start>json_name='cs6-train-det-0.5_face_train_annot_coco_style.json'<block_end><elif_stmt>data_set<eq>'CS6-train-easy-hp'<block_start>json_name='cs6-train-easy-hp.json'<block_end><elif_stmt>data_set<eq>'CS6-train-easy-gt'<block_start>json_name='cs6-train-easy-gt.json'<block_end><elif_stmt>data_set<eq>'CS6-train-easy-det'<block_start>json_name='cs6-train-easy-det.json'<block_end><elif_stmt>data_set<eq>'CS6-train-hp'<block_start>json_name='cs6-train-hp.json'<block_end><else_stmt><block_start><raise>NotImplementedError<block_end>img_id=0<line_sep>ann_id=0<line_sep>cat_id=1<line_sep>print('Starting %s'%data_set)<line_sep>ann_dict={}<line_sep>categories=[{"id":1 "name":'face'}]<line_sep>images=[]<line_sep>annotations=[]<line_sep>wider_annot_dict=face_util.parse_wider_gt(ann_file)# [im-file] = [[x,y,w,h], ...] <for_stmt>filename wider_annot_dict.keys()<block_start><if_stmt>len(images)%50<eq>0<block_start>print("Processed %s images, %s annotations"%(len(images) len(annotations)))<block_end><if_stmt>'score'<in>data_set<block_start>dets=np.array(wider_annot_dict[filename])<if_stmt><not>any(dets[: 4]<g>conf_thresh)<block_start><continue><block_end><block_end>image={}<line_sep>image['id']=img_id<line_sep>img_id<augadd>1<line_sep>im=Image.open(os.path.join(im_dir filename))<line_sep>image['width']=im.height<line_sep>image['height']=im.width<line_sep>image['file_name']=filename<line_sep>images.append(image)<for_stmt>gt_bbox wider_annot_dict[filename]<block_start>ann={}<line_sep>ann['id']=ann_id<line_sep>ann_id<augadd>1<line_sep>ann['image_id']=image['id']<line_sep>ann['segmentation']=[]<line_sep>ann['category_id']=cat_id# 1:"face" for WIDER ann['iscrowd']=0<line_sep>ann['area']=gt_bbox[2]<times>gt_bbox[3]<line_sep>ann['bbox']=gt_bbox[:4]<line_sep>ann['dataset']=data_set<line_sep>score=gt_bbox[4]<if_stmt>score<l>conf_thresh<block_start><continue><block_end><if_stmt>'hp'<in>data_set<block_start>ann['score']=score# for soft-label distillation ann['source']=gt_bbox[5]<block_end># annot source: {1: detection, 2:tracker} <if_stmt>data_set<eq>'CS6-train-easy-det'<block_start><if_stmt>gt_bbox[5]<ne>1<block_start><continue># ignore if annot source is not detection (i.e. skip HP) <block_end><block_end>annotations.append(ann)<block_end><block_end>ann_dict['images']=images<line_sep>ann_dict['categories']=categories<line_sep>ann_dict['annotations']=annotations<line_sep>print("Num categories: %s"%len(categories))<line_sep>print("Num images: %s"%len(images))<line_sep>print("Num annotations: %s"%len(annotations))<with_stmt>open(os.path.join(out_dir json_name) 'w' encoding='utf8')<as>outfile<block_start>outfile.write(json.dumps(ann_dict indent=2))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>args=parse_args()<if_stmt>args.dataset<eq>"wider"<block_start>convert_wider_annots(args.datadir args.outdir)<block_end># -------------------------------------------------------------------------- # CS6 Train GT # -------------------------------------------------------------------------- <elif_stmt>args.dataset<eq>"cs6-subset"<block_start>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-subset')<block_end><elif_stmt>args.dataset<eq>"cs6-subset-score"<block_start>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-subset-score')<block_end><elif_stmt>args.dataset<eq>"cs6-subset-gt"<block_start>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-subset-gt')<block_end><elif_stmt>args.dataset<eq>"cs6-train-gt"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='data/CS6_annot/annot-format-GT/cs6_gt_annot_train.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-gt')<line_sep># Distillation scores for CS6-Train detections (conf 0.25) <block_end><elif_stmt>args.dataset<eq>"cs6-train-det-score"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='data/CS6_annot/annot-format-GT/cs6_det_annot_train_scores.txt'<block_end><block_end># -------------------------------------------------------------------------- # CS6 Train unlabeled # -------------------------------------------------------------------------- # Pseudo-labels from CS6-Train <elif_stmt>args.dataset<eq>"cs6-train-det"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.25.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-det')<block_end><elif_stmt>args.dataset<eq>"cs6-train-det-0.5"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='data/CS6_annot/annot-format-GT/cs6_det_annot_train_conf-0.50.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-det-0.5')<line_sep># Hard positives from CS6-Train <block_end><elif_stmt>args.dataset<eq>"cs6-train-hp"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='Outputs/tracklets/hp-res-cs6/hp_cs6_train.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-hp' conf_thresh=0.5)<block_end># -------------------------------------------------------------------------- # CS6 "EASY" set # -------------------------------------------------------------------------- <elif_stmt>args.dataset<eq>"cs6-train-easy-hp"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='Outputs/tracklets/hp-res-cs6/hp_cs6_easy.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-easy-hp')<block_end><elif_stmt>args.dataset<eq>"cs6-train-easy-gt"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='data/CS6_annot/annot-format-GT/cs6_gt_annot_train-easy.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-easy-gt')<block_end><elif_stmt>args.dataset<eq>"cs6-train-easy-det"# set defaults if inputs args are empty <block_start><if_stmt><not>args.annotfile<block_start>args.annotfile='Outputs/tracklets/hp-res-cs6/hp_cs6_train_easy.txt'<block_end><if_stmt><not>args.imdir<block_start>args.imdir='data/CS6_annot'<block_end><if_stmt><not>args.outdir<block_start>args.outdir='data/CS6_annot'<block_end>convert_cs6_annots(args.annotfile args.imdir args.outdir data_set='CS6-train-easy-det')<block_end><else_stmt><block_start>print("Dataset not supported: %s"%args.dataset)<block_end><block_end>
# Copyright 2020 Petuum, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>kubernetes_asyncio<as>kubernetes<import_from_stmt>aiohttp web<import_stmt>logging<import_from_stmt>adaptdl.sched_hints SCHED_HINTS<import_from_stmt>adaptdl_sched.config get_supervisor_port<line_sep>LOG=logging.getLogger(__name__)<line_sep>LOG.setLevel(logging.INFO)<class_stmt>Supervisor<block_start>""" Supervisor provides a simple REST interface for several functionalities. Currently, it has two endpoints: 1. /hints for jobs to send scheduling hints. 2. /discover for finding the pod IPs of a job. """<def_stmt>__init__ self port host='0.0.0.0'<block_start>self._host=host<line_sep>self._port=port<line_sep>self._core_api=kubernetes.client.CoreV1Api()<line_sep>self._objs_api=kubernetes.client.CustomObjectsApi()<block_end><async_keyword><def_stmt>_handle_healthz self request# Health check. <block_start><return>web.Response()<block_end><async_keyword><def_stmt>_handle_discover self request# Long-polling endpoint used for discovering pod IPs for a given job. <block_start>namespace=request.match_info["namespace"]<line_sep>name=request.match_info["name"]<line_sep>group=request.match_info["group"]<line_sep>timeout=int(request.query.get("timeout" "30"))<line_sep>pod_ip_list=<none><async_keyword><with_stmt>kubernetes.watch.Watch()<as>w<block_start>stream=w.stream(self._core_api.list_namespaced_pod namespace label_selector="adaptdl/job={}".format(name) field_selector="status.podIP!=" timeout_seconds=timeout)<async_keyword><for_stmt>event stream<block_start>pod=event["object"]<line_sep>replicas=int(pod.metadata.annotations["adaptdl/replicas"])<line_sep>rank=int(pod.metadata.annotations["adaptdl/rank"])<if_stmt>pod.metadata.annotations["adaptdl/group"]<eq>group<block_start><if_stmt>pod_ip_list<is><none><block_start>pod_ip_list=[<none>]<times>replicas<block_end>pod_ip_list[rank]=pod.status.pod_ip<if_stmt>all(pod_ip<is><not><none><for>pod_ip pod_ip_list)<block_start><return>web.json_response(pod_ip_list)<block_end><block_end><block_end><block_end><return>web.json_response(status=408)<block_end># Timeout. <async_keyword><def_stmt>_handle_report self request<block_start>namespace=request.match_info['namespace']<line_sep>name=request.match_info['name']<line_sep>hints=<await>request.json()<line_sep># Drop all unrecognized fields. TODO: validate each client-sent field. hints={k:hints[k]<for>k SCHED_HINTS<if>k<in>hints}<line_sep># Patch only the train field to avoid conflicts with controller. patch={"status":{"train":hints}}<line_sep>LOG.info("Patch AdaptDLJob %s/%s: %s" namespace name patch)<line_sep><await>self._objs_api.patch_namespaced_custom_object_status("adaptdl.petuum.com" "v1" namespace "adaptdljobs" name patch)<line_sep><return>web.Response()<block_end><def_stmt>run self<block_start>self.app=web.Application()<line_sep>self.app.add_routes([web.get('/healthz' self._handle_healthz) web.get('/discover/{namespace}/{name}/{group}' self._handle_discover) web.put('/hints/{namespace}/{name}' self._handle_report) ])<line_sep>LOG.info("%s %s" self._host self._port)<line_sep>web.run_app(self.app host=self._host port=self._port)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>logging.basicConfig()<line_sep>kubernetes.config.load_incluster_config()<line_sep>supervisor=Supervisor(get_supervisor_port())<line_sep>supervisor.run()<block_end>
<import_stmt>numpy<as>np<import_stmt>scipy.sparse<as>spar<import_stmt>scipy.sparse.linalg<as>sparla<def_stmt>is_psd_within_tol A tol<block_start>""" Return True if we can certify that A is PSD (up to tolerance "tol"). First we check if A is PSD according to the Gershgorin Circle Theorem. If Gershgorin is inconclusive, then we use an iterative method (from ARPACK, as called through SciPy) to estimate extremal eigenvalues of certain shifted versions of A. The shifts are chosen so that the signs of those eigenvalues tell us the signs of the eigenvalues of A. If there are numerical issues then it's possible that this function returns False even when A is PSD. If you know that you're in that situation, then you should replace A by A = cvxpy.atoms.affine.wraps.psd_wrap(A). Parameters ---------- A : Union[np.ndarray, spar.spmatrx] Symmetric (or Hermitian) NumPy ndarray or SciPy sparse matrix. tol : float Nonnegative. Something very small, like 1e-10. """<if_stmt>gershgorin_psd_check(A tol)<block_start><return><true><block_end><def_stmt>SA_eigsh sigma<block_start><return>sparla.eigsh(A k=1 sigma=sigma which='SA' return_eigenvectors=<false>)<line_sep># Returns the eigenvalue w[i] of A where 1/(w[i] - sigma) is minimized. # # If A - sigma*I is PSD, then w[i] should be equal to the largest # eigenvalue of A. # # If A - sigma*I is not PSD, then w[i] should be the largest eigenvalue # of A where w[i] - sigma < 0. # # We should only call this function with sigma < 0. In this case, if # A - sigma*I is not PSD then A is not PSD, and w[i] < -abs(sigma) is # a negative eigenvalue of A. If A - sigma*I is PSD, then we obviously # have that the smallest eigenvalue of A is >= sigma. <block_end>ev=np.NaN<try_stmt><block_start>ev=SA_eigsh(-tol)# might return np.NaN, or raise exception <block_end><finally_stmt><block_start><if_stmt>np.isnan(ev).all()# will be NaN if A has an eigenvalue which is exactly -tol # (We might also hit this code block for other reasons.) <block_start>temp=tol-np.finfo(A.dtype).eps<line_sep>ev=SA_eigsh(-temp)<block_end><block_end><return>np.all(ev<ge>-tol)<block_end><def_stmt>gershgorin_psd_check A tol<block_start>""" Use the Gershgorin Circle Theorem https://en.wikipedia.org/wiki/Gershgorin_circle_theorem As a sufficient condition for A being PSD with tolerance "tol". The computational complexity of this function is O(nnz(A)). Parameters ---------- A : Union[np.ndarray, spar.spmatrx] Symmetric (or Hermitian) NumPy ndarray or SciPy sparse matrix. tol : float Nonnegative. Something very small, like 1e-10. Returns ------- True if A is PSD according to the Gershgorin Circle Theorem. Otherwise, return False. """<if_stmt>isinstance(A spar.spmatrix)<block_start>diag=A.diagonal()<if_stmt>np.any(diag<l>-tol)<block_start><return><false><block_end>A_shift=A-spar.diags(diag)<line_sep>A_shift=np.abs(A_shift)<line_sep>radii=np.array(A_shift.sum(axis=0)).ravel()<line_sep><return>np.all(diag-radii<ge>-tol)<block_end><elif_stmt>isinstance(A np.ndarray)<block_start>diag=np.diag(A)<if_stmt>np.any(diag<l>-tol)<block_start><return><false><block_end>A_shift=A-np.diag(diag)<line_sep>A_shift=np.abs(A_shift)<line_sep>radii=A_shift.sum(axis=0)<line_sep><return>np.all(diag-radii<ge>-tol)<block_end><else_stmt><block_start><raise>ValueError()<block_end><block_end>
<import_stmt>unittest<import_from_stmt>kivy3.loaders OBJLoader<class_stmt>OBJLoaderTest(unittest.TestCase)<block_start><pass><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_from_stmt>all_models.models TbAdminTeamPermissionRelation<class_stmt>TeamPermissionRelationService(object)<block_start>@staticmethod<def_stmt>updateTeamPermission teamPermissionData<block_start>tbModel=TbAdminTeamPermissionRelation.objects.filter(id=teamPermissionData["id"])<line_sep>tbModel.update(**teamPermissionData)<block_end><block_end>
<import_from_stmt>vedacore.misc build_from_cfg registry<def_stmt>build_loss cfg<block_start><return>build_from_cfg(cfg registry 'loss')<block_end>
# Copyright 2019-2020 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>pytest<import_from_stmt>klio_core config<import_from_stmt>klio_cli __version__<as>klio_cli_version<import_from_stmt>klio_cli cli<import_from_stmt>klio_cli.commands.job run<as>run_job<line_sep>@pytest.fixture<def_stmt>mock_os_environ mocker<block_start><return>mocker.patch.dict(run_job.base.os.environ {"USER":"cookiemonster"})<block_end>@pytest.fixture<def_stmt>klio_config <block_start>conf={"job_name":"test-job" "version":1 "pipeline_options":{"worker_harness_container_image":"test-image" "region":"some-region" "project":"test-project" } "job_config":{"inputs":[{"topic":"foo-topic" "subscription":"foo-sub" "data_location":"foo-input-location" }] "outputs":[{"topic":"foo-topic-output" "data_location":"foo-output-location" }] } }<line_sep><return>config.KlioConfig(conf)<block_end>@pytest.fixture<def_stmt>docker_runtime_config <block_start><return>cli.DockerRuntimeConfig(image_tag="foo-123" force_build=<false> config_file_override="klio-job2.yaml" )<block_end>@pytest.fixture<def_stmt>run_job_config <block_start><return>cli.RunJobConfig(direct_runner=<false> update=<false> git_sha="12345678")<block_end>@pytest.fixture<def_stmt>mock_docker_client mocker<block_start>mock_client=mocker.Mock()<line_sep>mock_container=mocker.Mock()<line_sep>mock_container.wait.return_value={"StatusCode":0}<line_sep>mock_container.logs.return_value=[b"a log line\n" b"another log line\n"]<line_sep>mock_client.containers.run.return_value=mock_container<line_sep><return>mock_client<block_end>@pytest.fixture<def_stmt>run_pipeline klio_config docker_runtime_config run_job_config mock_docker_client mock_os_environ monkeypatch <block_start>job_dir="/test/dir/jobs/test_run_job"<line_sep>pipeline=run_job.RunPipeline(job_dir=job_dir klio_config=klio_config docker_runtime_config=docker_runtime_config run_job_config=run_job_config )<line_sep>monkeypatch.setattr(pipeline "_docker_client" mock_docker_client)<line_sep><return>pipeline<block_end>@pytest.mark.parametrize("direct_runner,db_url" ((<true> <none>) (<false> "https://foo") (<false> <none>)) )<def_stmt>test_run_docker_container direct_runner db_url run_pipeline run_job_config caplog mocker monkeypatch <block_start>run_job_config=run_job_config._replace(direct_runner=direct_runner)<line_sep>monkeypatch.setattr(run_pipeline "run_job_config" run_job_config)<line_sep>mock_sd_utils=mocker.Mock()<line_sep>mock_sd_utils.get_stackdriver_group_url.return_value=db_url<line_sep>monkeypatch.setattr(run_job "sd_utils" mock_sd_utils)<line_sep>runflags={"a":"flag"}<line_sep>run_pipeline._run_docker_container(runflags)<line_sep>run_pipeline._docker_client.containers.run.assert_called_once_with(**runflags)<line_sep>ret_container=run_pipeline._docker_client.containers.run.return_value<line_sep>ret_container.logs.assert_called_once_with(stream=<true>)<if_stmt><not>direct_runner<block_start>mock_sd_utils.get_stackdriver_group_url.assert_called_once_with("test-project" "test-job" "some-region")<assert_stmt>1<eq>len(caplog.records)<block_end><else_stmt><block_start>mock_sd_utils.get_stackdriver_group_url.assert_not_called()<assert_stmt><not>len(caplog.records)<block_end><block_end><def_stmt>test_failure_in_docker_container_returns_nonzero run_pipeline run_job_config caplog mocker monkeypatch <block_start>mock_sd_utils=mocker.Mock()<line_sep>monkeypatch.setattr(run_job "sd_utils" mock_sd_utils)<line_sep>container_run=run_pipeline._docker_client.containers.run<line_sep>container_run.return_value.wait.return_value={"StatusCode":1}<line_sep>runflags={"a":"flag"}<assert_stmt>run_pipeline._run_docker_container(runflags)<eq>1<line_sep>container_run.assert_called_once_with(**runflags)<line_sep>ret_container=run_pipeline._docker_client.containers.run.return_value<line_sep>ret_container.logs.assert_called_once_with(stream=<true>)<line_sep>mock_sd_utils.get_stackdriver_group_url.assert_not_called()<block_end><def_stmt>test_run_docker_container_dashboard_raises run_pipeline caplog mocker monkeypatch<block_start>mock_sd_utils=mocker.Mock()<line_sep>mock_sd_utils.get_stackdriver_group_url.side_effect=Exception("fuu")<line_sep>monkeypatch.setattr(run_job "sd_utils" mock_sd_utils)<line_sep>runflags={"a":"flag"}<line_sep>run_pipeline._run_docker_container(runflags)<line_sep>run_pipeline._docker_client.containers.run.assert_called_once_with(**runflags)<line_sep>ret_container=run_pipeline._docker_client.containers.run.return_value<line_sep>ret_container.logs.assert_called_once_with(stream=<true>)<line_sep>mock_sd_utils.get_stackdriver_group_url.assert_called_once_with("test-project" "test-job" "some-region")<assert_stmt>1<eq>len(caplog.records)<block_end><def_stmt>test_get_environment run_pipeline<block_start>gcreds="/usr/gcloud/application_default_credentials.json"<line_sep>exp_envs={"PYTHONPATH":"/usr/src/app" "GOOGLE_APPLICATION_CREDENTIALS":gcreds "USER":"cookiemonster" "GOOGLE_CLOUD_PROJECT":"test-project" "COMMIT_SHA":"12345678" "KLIO_CLI_VERSION":klio_cli_version }<assert_stmt>exp_envs<eq>run_pipeline._get_environment()<block_end>@pytest.mark.parametrize("config_file" (<none> "klio-job2.yaml") )@pytest.mark.parametrize("image_tag,exp_image_flags" ((<none> []) ("foo-123" ["--image-tag" "foo-123"])) )@pytest.mark.parametrize("update,exp_update_flag" ((<true> ["--update"]) (<false> ["--no-update"]) (<none> [])) )@pytest.mark.parametrize("direct_runner,exp_runner_flag" ((<false> []) (<true> ["--direct-runner"])))<def_stmt>test_get_command direct_runner exp_runner_flag update exp_update_flag image_tag exp_image_flags config_file run_pipeline monkeypatch <block_start>run_job_config=run_pipeline.run_job_config._replace(direct_runner=direct_runner update=update)<line_sep>monkeypatch.setattr(run_pipeline "run_job_config" run_job_config)<line_sep>runtime_config=run_pipeline.docker_runtime_config._replace(image_tag=image_tag config_file_override=config_file)<line_sep>monkeypatch.setattr(run_pipeline "docker_runtime_config" runtime_config)<line_sep>exp_command=["run"]<line_sep>exp_command.extend(exp_update_flag)<line_sep>exp_command.extend(exp_runner_flag)<line_sep>exp_command.extend(exp_image_flags)<assert_stmt>sorted(exp_command)<eq>sorted(run_pipeline._get_command())<block_end>@pytest.mark.parametrize("direct_runner" (<true> <false>))<def_stmt>test_setup_docker_image direct_runner run_pipeline mock_docker_client mocker monkeypatch<block_start>run_job_config=run_pipeline.run_job_config._replace(direct_runner=direct_runner)<line_sep>monkeypatch.setattr(run_pipeline "run_job_config" run_job_config)<line_sep>mock_super=mocker.Mock()<line_sep>monkeypatch.setattr(run_job.base.BaseDockerizedPipeline "_setup_docker_image" mock_super)<line_sep>mock_docker_utils=mocker.Mock()<line_sep>monkeypatch.setattr(run_job "docker_utils" mock_docker_utils)<line_sep>run_pipeline._setup_docker_image()<line_sep>mock_super.assert_called_once_with()<if_stmt><not>direct_runner<block_start>mock_docker_utils.push_image_to_gcr.assert_called_once_with("test-image:foo-123" "foo-123" mock_docker_client )<block_end><else_stmt><block_start>mock_docker_utils.push_image_to_gcr.assert_not_called()<block_end><block_end>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Unit tests for pinpoint_send_email_message_email_api.py. """<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<import_stmt>pytest<import_stmt>pinpoint_send_email_message_email_api<as>api<line_sep>@pytest.mark.parametrize('error_code' [<none> 'TestException'])<def_stmt>test_send_email_message make_stubber error_code<block_start>pinpoint_email_client=boto3.client('pinpoint-email')<line_sep>pinpoint_email_stubber=make_stubber(pinpoint_email_client)<line_sep>sender='test-sender'<line_sep>to_addresses=['test-to']<line_sep>cc_addresses=['test-cc']<line_sep>char_set='test-charset'<line_sep>subject='test-subject'<line_sep>html_message='<p>test html</p>'<line_sep>text_message='test-message'<line_sep>message_id='test-id'<line_sep>pinpoint_email_stubber.stub_send_email(sender to_addresses cc_addresses char_set subject html_message text_message message_id error_code=error_code)<if_stmt>error_code<is><none><block_start>got_message_id=api.send_email_message(pinpoint_email_client sender to_addresses cc_addresses char_set subject html_message text_message)<assert_stmt>got_message_id<eq>message_id<block_end><else_stmt><block_start><with_stmt>pytest.raises(ClientError)<as>exc_info<block_start>api.send_email_message(pinpoint_email_client sender to_addresses cc_addresses char_set subject html_message text_message)<block_end><assert_stmt>exc_info.value.response['Error']['Code']<eq>error_code<block_end><block_end>
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved <import_stmt>os<import_from_stmt>pathlib Path<import_stmt>torchaudio<import_stmt>progressbar<import_stmt>argparse<import_stmt>torch<import_stmt>tqdm<def_stmt>findAllSeqs dirName extension='.flac' loadCache=<false><block_start>r""" Lists all the sequences with the given extension in the dirName directory. Output: outSequences, speakers outSequence A list of tuples seq_path, speaker where: - seq_path is the relative path of each sequence relative to the parent directory - speaker is the corresponding speaker index outSpeakers The speaker labels (in order) The speaker labels are organized the following way \dirName \speaker_label \.. ... seqName.extension """<line_sep>cache_path=os.path.join(dirName '_seqs_cache.txt')<if_stmt>loadCache<block_start><try_stmt><block_start>outSequences,speakers=torch.load(cache_path)<line_sep>print(f'Loaded from cache {cache_path} successfully')<line_sep><return>outSequences speakers<block_end><except_stmt>OSError<as>err<block_start>print(f'Ran in an error while loading {cache_path}: {err}')<block_end>print('Could not load cache, rebuilding')<block_end><if_stmt>dirName[-1]<ne>os.sep<block_start>dirName<augadd>os.sep<block_end>prefixSize=len(dirName)<line_sep>speakersTarget={}<line_sep>outSequences=[]<for_stmt>root,dirs,filenames tqdm.tqdm(os.walk(dirName))<block_start>filtered_files=[f<for>f filenames<if>f.endswith(extension)]<if_stmt>len(filtered_files)<g>0<block_start>speakerStr=root[prefixSize:].split(os.sep)[0]<if_stmt>speakerStr<not><in>speakersTarget<block_start>speakersTarget[speakerStr]=len(speakersTarget)<block_end>speaker=speakersTarget[speakerStr]<for_stmt>filename filtered_files<block_start>full_path=os.path.join(root[prefixSize:] filename)<line_sep>outSequences.append((speaker full_path))<block_end><block_end><block_end>outSpeakers=[<none><for>x speakersTarget]<for_stmt>key,index speakersTarget.items()<block_start>outSpeakers[index]=key<block_end><try_stmt><block_start>torch.save((outSequences outSpeakers) cache_path)<line_sep>print(f'Saved cache file at {cache_path}')<block_end><except_stmt>OSError<as>err<block_start>print(f'Ran in an error while saving {cache_path}: {err}')<block_end><return>outSequences outSpeakers<block_end><def_stmt>get_file_duration_ms path_file<block_start>info=torchaudio.info(path_file)[0]<line_sep><return>1000<times>(info.length<floordiv>(info.rate))<block_end><def_stmt>get_lst path_db file_list<block_start>bar=progressbar.ProgressBar(maxval=len(file_list))<line_sep>bar.start()<line_sep>path_db=Path(path_db)<line_sep>out=[]<for_stmt>index,file_name enumerate(file_list)<block_start>bar.update(index)<line_sep>full_path=str(path_db/file_name)<line_sep>duration=get_file_duration_ms(full_path)<line_sep>out.append((full_path full_path int(duration)))<block_end>bar.finish()<line_sep><return>out<block_end><def_stmt>save_lst data path_out<block_start><with_stmt>open(path_out 'w')<as>file<block_start><for_stmt>id,path,val data<block_start>file.write(' '.join((id path str(val)))+'\n')<block_end><block_end><block_end><def_stmt>reorder_vad path_vad lst<block_start>path_vad=Path(path_vad)<for_stmt>id,full_path_wav,_ lst<block_start>full_path_vad=(path_vad/id).with_suffix('.vad')<line_sep>full_path_out=Path(full_path_wav).with_suffix('.vad')<line_sep>full_path_vad.replace(full_path_out)<line_sep>full_path_vad.with_suffix('.fwt').unlink(missing_ok=<true>)<line_sep>full_path_vad.with_suffix('.tsc').unlink(missing_ok=<true>)<line_sep>full_path_vad.with_suffix('.sts').unlink(missing_ok=<true>)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description="Build the vad inputs")<line_sep>parser.add_argument('path_db' type=str help="Path to the dataset directory")<line_sep>parser.add_argument('path_out' type=str)<line_sep>parser.add_argument('--ignore_cache' action='store_true')<line_sep>parser.add_argument('--debug' action='store_true')<line_sep>parser.add_argument('--extension' type=str default='.wav')<line_sep>args=parser.parse_args()<line_sep>seqList,_=findAllSeqs(args.path_db extension=args.extension loadCache=<not>args.ignore_cache)<if_stmt>args.debug<block_start>seqList=seqList[:10]<block_end>seqList=[i[1]<for>i seqList]<line_sep>vad_data=get_lst(args.path_db seqList)<line_sep>save_lst(vad_data args.path_out)<block_end>
"""The KEF Wireless Speakers component."""<line_sep>
<import_stmt>numpy<as>np<import_stmt>keras.backend<as>K<line_sep>K.set_image_data_format('channels_last')<class_stmt>DataConfig(object)<block_start>"""Input frame configuration and data augmentation setup."""<def_stmt>__init__ self crop_resolution=(256 256) image_channels=(3 ) angles=[0] fixed_angle=0 scales=[1] fixed_scale=1 trans_x=[0] fixed_trans_x=0 trans_y=[0] fixed_trans_y=0 hflips=[0 1] fixed_hflip=0 chpower=0.01<times>np.array(range(90 110+1 2)) fixed_chpower=1 geoocclusion=<none> fixed_geoocclusion=<none> subsampling=[1] fixed_subsampling=1<block_start>self.crop_resolution=crop_resolution<line_sep>self.image_channels=image_channels<if_stmt>K.image_data_format()<eq>'channels_last'<block_start>self.input_shape=crop_resolution+image_channels<block_end><else_stmt><block_start>self.input_shape=image_channels+crop_resolution<block_end>self.angles=angles<line_sep>self.fixed_angle=fixed_angle<line_sep>self.scales=scales<line_sep>self.fixed_scale=fixed_scale<line_sep>self.trans_x=trans_x<line_sep>self.trans_y=trans_y<line_sep>self.fixed_trans_x=fixed_trans_x<line_sep>self.fixed_trans_y=fixed_trans_y<line_sep>self.hflips=hflips<line_sep>self.fixed_hflip=fixed_hflip<line_sep>self.chpower=chpower<line_sep>self.fixed_chpower=fixed_chpower<line_sep>self.geoocclusion=geoocclusion<line_sep>self.fixed_geoocclusion=fixed_geoocclusion<line_sep>self.subsampling=subsampling<line_sep>self.fixed_subsampling=fixed_subsampling<block_end><def_stmt>get_fixed_config self<block_start><return>{'angle':self.fixed_angle 'scale':self.fixed_scale 'transx':self.fixed_trans_x 'transy':self.fixed_trans_y 'hflip':self.fixed_hflip 'chpower':self.fixed_chpower 'geoocclusion':self.fixed_geoocclusion 'subspl':self.fixed_subsampling}<block_end><def_stmt>random_data_generator self<block_start>angle=DataConfig._getrand(self.angles)<line_sep>scale=DataConfig._getrand(self.scales)<line_sep>trans_x=DataConfig._getrand(self.trans_x)<line_sep>trans_y=DataConfig._getrand(self.trans_y)<line_sep>hflip=DataConfig._getrand(self.hflips)<line_sep>chpower=(DataConfig._getrand(self.chpower) DataConfig._getrand(self.chpower) DataConfig._getrand(self.chpower))<line_sep>geoocclusion=self.__get_random_geoocclusion()<line_sep>subsampling=DataConfig._getrand(self.subsampling)<line_sep><return>{'angle':angle 'scale':scale 'transx':trans_x 'transy':trans_y 'hflip':hflip 'chpower':chpower 'geoocclusion':geoocclusion 'subspl':subsampling}<block_end><def_stmt>__get_random_geoocclusion self<block_start><if_stmt>self.geoocclusion<is><not><none><block_start>w=int(DataConfig._getrand(self.geoocclusion)/2)<line_sep>h=int(DataConfig._getrand(self.geoocclusion)/2)<line_sep>xmin=w+1<line_sep>xmax=self.crop_resolution[0]-xmin<line_sep>ymin=h+1<line_sep>ymax=self.crop_resolution[1]-ymin<line_sep>x=DataConfig._getrand(range(xmin xmax 5))<line_sep>y=DataConfig._getrand(range(ymin ymax 5))<line_sep>bbox=(x-w y-h x+w y+h)<line_sep><return>bbox<block_end><else_stmt><block_start><return><none><block_end><block_end>@staticmethod<def_stmt>_getrand x<block_start><return>x[np.random.randint(0 len(x))]<block_end><block_end># Data generation and configuration setup mpii_sp_dataconf=DataConfig(crop_resolution=(256 256) angles=np.array(range(-40 40+1 5)) scales=np.array([0.7 1. 1.3]) )<line_sep>pennaction_dataconf=DataConfig(crop_resolution=(256 256) angles=np.array(range(-30 30+1 5)) scales=np.array([0.7 1.0 1.3]) trans_x=np.array(range(-40 40+1 5)) trans_y=np.array(range(-10 10+1 5)) subsampling=[4 6 8] fixed_subsampling=6)<line_sep>pennaction_pe_dataconf=DataConfig(crop_resolution=(256 256) angles=np.array(range(-40 40+1 5)) scales=np.array([0.7 1.0 1.3 2.0]) trans_x=np.array(range(-40 40+1 5)) trans_y=np.array(range(-10 10+1 5)) )<line_sep>human36m_dataconf=DataConfig(crop_resolution=(256 256) angles=np.array(range(-10 10+1 5)) scales=np.array([0.8 1.0 1.2]) trans_x=np.array(range(-20 20+1 5)) trans_y=np.array(range(-4 4+1 1)) geoocclusion=np.array(range(20 90)) )<line_sep>ntu_dataconf=DataConfig(crop_resolution=(256 256) angles=[0] scales=np.array([0.7 1.0 1.3]) trans_x=range(-40 40+1 5) trans_y=range(-10 10+1 5) subsampling=[3 4 5] fixed_subsampling=4)<line_sep>ntu_pe_dataconf=DataConfig(crop_resolution=(256 256) angles=np.array(range(-10 10+1 5)) scales=np.array([0.7 1.0 1.3 2.0]) trans_x=np.array(range(-40 40+1 5)) trans_y=np.array(range(-10 10+1 5)) )<class_stmt>ModelConfig(object)<block_start>"""Hyperparameters for models."""<def_stmt>__init__ self input_shape poselayout num_actions=[] num_pyramids=8 action_pyramids=[1 2] # list of pyramids to perform AR num_levels=4 kernel_size=(5 5) growth=96 image_div=8 predict_rootz=<false> downsampling_type='maxpooling' pose_replica=<false> num_pose_features=128 num_visual_features=128 sam_alpha=1 dbg_decoupled_pose=<false> dbg_decoupled_h=<false><block_start>self.input_shape=input_shape<line_sep>self.num_joints=poselayout.num_joints<line_sep>self.dim=poselayout.dim<assert_stmt>type(num_actions)<eq>list 'num_actions should be a list'<line_sep>self.num_actions=num_actions<line_sep>self.num_pyramids=num_pyramids<line_sep>self.action_pyramids=action_pyramids<line_sep>self.num_levels=num_levels<line_sep>self.kernel_size=kernel_size<line_sep>self.growth=growth<line_sep>self.image_div=image_div<line_sep>self.predict_rootz=predict_rootz<line_sep>self.downsampling_type=downsampling_type<line_sep>self.pose_replica=pose_replica<line_sep>self.num_pose_features=num_pose_features<line_sep>self.num_visual_features=num_visual_features<line_sep>self.sam_alpha=sam_alpha<line_sep>"""Debugging flags."""<line_sep>self.dbg_decoupled_pose=dbg_decoupled_pose<line_sep>self.dbg_decoupled_h=dbg_decoupled_h<block_end><block_end># Aliases. mpii_dataconf=mpii_sp_dataconf<line_sep>
"""A sensor that monitors trends in other components."""<import_from_stmt>homeassistant.const Platform<line_sep>DOMAIN="trend"<line_sep>PLATFORMS=[Platform.BINARY_SENSOR]<line_sep>
# coding=utf-8 # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause <import_stmt>functools<import_stmt>math<import_stmt>warnings<import_from_stmt>abc abstractmethod ABCMeta<import_stmt>numpy<as>np<import_from_stmt>scipy.stats mode<import_from_stmt>sklearn.base BaseEstimator ClassifierMixin<import_from_stmt>sklearn.ensemble BaseEnsemble BaggingClassifier<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.neighbors KNeighborsClassifier<import_from_stmt>sklearn.preprocessing LabelEncoder<import_from_stmt>sklearn.utils.validation check_X_y check_is_fitted check_array check_random_state <import_from_stmt>deslib.util KNNE<import_from_stmt>deslib.util faiss_knn_wrapper<import_from_stmt>deslib.util.dfp frienemy_pruning_preprocessed<import_from_stmt>deslib.util.instance_hardness hardness_region_competence<class_stmt>BaseDS(BaseEstimator ClassifierMixin)<block_start>"""Base class for a dynamic classifier selection (dcs) and dynamic ensemble selection (des) methods. All dcs and des techniques should inherit from this class. Warning: This class should not be used directly. Use derived classes instead. """<line_sep>__metaclass__=ABCMeta<line_sep>@abstractmethod<def_stmt>__init__ self pool_classifiers=<none> k=7 DFP=<false> with_IH=<false> safe_k=<none> IH_rate=0.30 needs_proba=<false> random_state=<none> knn_classifier='knn' DSEL_perc=0.5 knne=<false> n_jobs=-1 voting=<none><block_start>self.pool_classifiers=pool_classifiers<line_sep>self.k=k<line_sep>self.DFP=DFP<line_sep>self.with_IH=with_IH<line_sep>self.safe_k=safe_k<line_sep>self.IH_rate=IH_rate<line_sep>self.needs_proba=needs_proba<line_sep>self.random_state=random_state<line_sep>self.knn_classifier=knn_classifier<line_sep>self.DSEL_perc=DSEL_perc<line_sep>self.knne=knne<line_sep>self.n_jobs=n_jobs<line_sep>self.voting=voting<line_sep># Check optional dependency <if_stmt>knn_classifier<eq>'faiss'<and><not>faiss_knn_wrapper.is_available()<block_start><raise>ImportError('Using knn_classifier="faiss" requires that the FAISS library '<concat>'be installed.Please check the Installation Guide.')<block_end><block_end><def_stmt>fit self X y<block_start>"""Prepare the DS model by setting the KNN algorithm and pre-processing the information required to apply the DS methods Parameters ---------- X : array of shape (n_samples, n_features) The input data. y : array of shape (n_samples) class labels of each example in X. Returns ------- self """<line_sep>self.random_state_=check_random_state(self.random_state)<line_sep>X,y=check_X_y(X y)<line_sep># Check if the pool of classifiers is None. # If yes, use a BaggingClassifier for the pool. <if_stmt>self.pool_classifiers<is><none><block_start>X_dsel,y_dsel=self._fit_pool_classifiers(X y)<block_end><else_stmt><block_start>self._check_base_classifier_fitted()<line_sep>self.pool_classifiers_=self.pool_classifiers<line_sep>X_dsel=X<line_sep>y_dsel=y<block_end>self.n_classifiers_=len(self.pool_classifiers_)<line_sep># allow base models with feature subspaces. <if_stmt>hasattr(self.pool_classifiers_ "estimators_features_")<block_start>self.estimator_features_=np.array(self.pool_classifiers_.estimators_features_)<block_end><else_stmt><block_start>indices=np.arange(X.shape[1])<line_sep>self.estimator_features_=np.tile(indices (self.n_classifiers_ 1))<block_end># check if the input parameters are correct. self._setup_label_encoder(y)<line_sep>y_dsel=self.enc_.transform(y_dsel)<line_sep>self._set_dsel(X_dsel y_dsel)<line_sep>self._set_region_of_competence_algorithm()<line_sep>self._validate_parameters()<line_sep>self.roc_algorithm_.fit(X_dsel y_dsel)<line_sep>self.BKS_DSEL_=self._predict_base(self.DSEL_data_)<line_sep>self.DSEL_processed_=self.BKS_DSEL_<eq>y_dsel[: np.newaxis]<line_sep><return>self<block_end><def_stmt>get_competence_region self query k=<none><block_start>"""Compute the region of competence of the query sample using the data belonging to DSEL. Parameters ---------- query : array of shape (n_samples, n_features) The test examples. k : int (Default = self.k) The number of neighbors used to in the region of competence. Returns ------- dists : array of shape (n_samples, k) The distances between the query and each sample in the region of competence. The vector is ordered in an ascending fashion. idx : array of shape (n_samples, k) Indices of the instances belonging to the region of competence of the given query sample. """<if_stmt>k<is><none><block_start>k=self.k_<block_end>dists,idx=self.roc_algorithm_.kneighbors(query n_neighbors=k return_distance=<true>)<line_sep><return>np.atleast_2d(dists) np.atleast_2d(idx)<block_end>@abstractmethod<def_stmt>estimate_competence self competence_region distances=<none> predictions=<none><block_start>"""estimate the competence of each base classifier :math:`c_{i}` the classification of the query sample :math:`\\mathbf{x}`. Returns an array containing the level of competence estimated for each base classifier. The size of the vector is equals to the size of the generated_pool of classifiers. Parameters ---------- competence_region : array of shape (n_samples, n_neighbors) Indices of the k nearest neighbors according for each test sample. distances : array of shape (n_samples, n_neighbors) Distances of the k nearest neighbors according for each test sample. predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for all test examples Returns ------- competences : array (n_classifiers) containing the competence level estimated for each base classifier """<line_sep><pass><block_end>@abstractmethod<def_stmt>select self competences<block_start>"""Select the most competent classifier for the classification of the query sample x. The most competent classifier (dcs) or an ensemble with the most competent classifiers (des) is returned Parameters ---------- competences : array of shape (n_samples, n_classifiers) The estimated competence level of each base classifier for test example Returns ------- selected_classifiers : array containing the selected base classifiers for each test sample """<line_sep><pass><block_end>@abstractmethod<def_stmt>classify_with_ds self predictions probabilities=<none> neighbors=<none> distances=<none> DFP_mask=<none><block_start>"""Predicts the label of the corresponding query sample. Returns the predicted label. Parameters ---------- predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for all test examples probabilities : array of shape (n_samples, n_classifiers, n_classes) Probabilities estimates of each base classifier for all test examples (For methods that always require probabilities from the base classifiers) neighbors : array of shape (n_samples, n_neighbors) Indices of the k nearest neighbors. distances : array of shape (n_samples, n_neighbors) Distances from the k nearest neighbors to the query DFP_mask : array of shape (n_samples, n_classifiers) Mask containing 1 for the selected base classifier and 0 otherwise. Returns ------- predicted_label : array of shape (n_samples) The predicted label for each query """<line_sep><pass><block_end>@abstractmethod<def_stmt>predict_proba_with_ds self predictions probabilities neighbors=<none> distances=<none> DFP_mask=<none><block_start>"""Predicts the posterior probabilities of the corresponding query sample. Returns the probability estimates of each class. Parameters ---------- predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for all test examples probabilities : array of shape (n_samples, n_classifiers, n_classes) The predictions of each base classifier for all samples (For methods that always require probabilities from the base classifiers). neighbors : array of shape (n_samples, n_neighbors) Indices of the k nearest neighbors. distances : array of shape (n_samples, n_neighbors) Distances from the k nearest neighbors to the query DFP_mask : array of shape (n_samples, n_classifiers) Mask containing 1 for the selected base classifier and 0 otherwise. Returns ------- predicted_proba: array of shape (n_samples, n_classes) Posterior probabilities estimates for each test example. """<line_sep><pass><block_end><def_stmt>predict self X<block_start>"""Predict the class label for each sample in X. Parameters ---------- X : array of shape (n_samples, n_features) The input data. Returns ------- predicted_labels : array of shape (n_samples) Predicted class label for each sample in X. """<line_sep>X=self._check_predict(X)<line_sep>preds=np.empty(X.shape[0] dtype=np.intp)<line_sep>need_proba=self.needs_proba<or>self.voting<eq>'soft'<line_sep>base_preds,base_probas=self._preprocess_predictions(X need_proba)<line_sep># predict all agree ind_disagreement,ind_all_agree=self._split_agreement(base_preds)<if_stmt>ind_all_agree.size<block_start>preds[ind_all_agree]=base_preds[ind_all_agree 0]<block_end># predict with IH <if_stmt>ind_disagreement.size<block_start>distances,ind_ds_classifier,neighbors=self._IH_prediction(X ind_disagreement preds is_proba=<false>)<line_sep># Predict with DS - Check if there are still samples to be labeled. <if_stmt>ind_ds_classifier.size<block_start>DFP_mask=self._get_DFP_mask(neighbors)<line_sep>inds,sel_preds,sel_probas=self._prepare_indices_DS(base_preds base_probas ind_disagreement ind_ds_classifier)<line_sep>preds_ds=self.classify_with_ds(sel_preds sel_probas neighbors distances DFP_mask)<line_sep>preds[inds]=preds_ds<block_end><block_end><return>self.classes_.take(preds)<block_end><def_stmt>_check_predict self X<block_start>check_is_fitted(self ["DSEL_processed_" "DSEL_data_" "DSEL_target_"])<line_sep>X=check_array(X)<if_stmt>self.n_features_<ne>X.shape[1]<block_start><raise>ValueError("Number of features of the model must "<concat>"match the input. Model n_features is {0} and "<concat>"input n_features is {1}."<concat>"".format(self.n_features_ X.shape[1]))<block_end><return>X<block_end><def_stmt>predict_proba self X<block_start>"""Estimates the posterior probabilities for sample in X. Parameters ---------- X : array of shape (n_samples, n_features) The input data. Returns ------- predicted_proba : array of shape (n_samples, n_classes) Probabilities estimates for each sample in X. """<line_sep>X=self._check_predict(X)<line_sep>self._check_predict_proba()<line_sep>probas=np.zeros((X.shape[0] self.n_classes_))<line_sep>base_preds,base_probas=self._preprocess_predictions(X <true>)<line_sep># predict all agree ind_disagreement,ind_all_agree=self._split_agreement(base_preds)<if_stmt>ind_all_agree.size<block_start>probas[ind_all_agree]=base_probas[ind_all_agree].mean(axis=1)<block_end># predict with IH <if_stmt>ind_disagreement.size<block_start>distances,ind_ds_classifier,neighbors=self._IH_prediction(X ind_disagreement probas is_proba=<true>)<line_sep># Predict with DS - Check if there are still samples to be labeled. <if_stmt>ind_ds_classifier.size<block_start>DFP_mask=self._get_DFP_mask(neighbors)<line_sep>inds,sel_preds,sel_probas=self._prepare_indices_DS(base_preds base_probas ind_disagreement ind_ds_classifier)<line_sep>probas_ds=self.predict_proba_with_ds(sel_preds sel_probas neighbors distances DFP_mask)<line_sep>probas[inds]=probas_ds<block_end><block_end><return>probas<block_end><def_stmt>_preprocess_predictions self X req_proba<block_start><if_stmt>req_proba<block_start>base_probabilities=self._predict_proba_base(X)<line_sep>base_predictions=base_probabilities.argmax(axis=2)<block_end><else_stmt><block_start>base_probabilities=<none><line_sep>base_predictions=self._predict_base(X)<block_end><return>base_predictions base_probabilities<block_end><def_stmt>_split_agreement self base_predictions<block_start>all_agree_vector=BaseDS._all_classifier_agree(base_predictions)<line_sep>ind_all_agree=np.where(all_agree_vector)[0]<line_sep>ind_disagreement=np.where(~all_agree_vector)[0]<line_sep><return>ind_disagreement ind_all_agree<block_end><def_stmt>_IH_prediction self X ind_disagree predicted_proba is_proba=<false><block_start>X_DS=X[ind_disagree :]<line_sep>distances,region_competence=self.get_competence_region(X_DS)<if_stmt>self.with_IH<block_start>ind_hard,ind_easy=self._split_easy_samples(region_competence)<line_sep>distances,region_competence=self._predict_easy_samples(X_DS distances ind_disagree ind_easy region_competence predicted_proba is_proba)<block_end><else_stmt># IH was not considered. So all samples go to predict with DS <block_start>ind_hard=np.arange(ind_disagree.size)<block_end><return>distances ind_hard region_competence<block_end><def_stmt>_split_easy_samples self neighbors<block_start>hardness=hardness_region_competence(neighbors self.DSEL_target_ self.safe_k)<line_sep># Get the index associated with the easy and hard samples. # easy samples are classified by the knn. easy_samples_mask=hardness<l>self.IH_rate<line_sep>ind_knn_classifier=np.where(easy_samples_mask)[0]<line_sep>ind_ds_classifier=np.where(~easy_samples_mask)[0]<line_sep><return>ind_ds_classifier ind_knn_classifier<block_end><def_stmt>_predict_easy_samples self X_DS distances ind_disagreement ind_easy neighbors predictions is_proba<block_start><if_stmt>ind_easy.size# Accessing which samples in the original array. <block_start>ind_knn_original_matrix=ind_disagreement[ind_easy]<if_stmt>is_proba<block_start>predictions[ind_knn_original_matrix]=self.roc_algorithm_.predict_proba(X_DS[ind_easy])<block_end><else_stmt><block_start>y_neighbors=self.DSEL_target_[neighbors[ind_easy :self.safe_k]]<line_sep>predictions_knn,_=mode(y_neighbors axis=1)<line_sep>predictions[ind_knn_original_matrix]=predictions_knn.reshape(-1 )<block_end>neighbors=np.delete(neighbors ind_easy axis=0)<line_sep>distances=np.delete(distances ind_easy axis=0)<block_end><return>distances neighbors<block_end><def_stmt>_prepare_indices_DS self base_predictions base_probabilities ind_disagreement ind_ds_classifier# Get the real indices_ of the samples that will be classified # using a DS algorithm. <block_start>ind_ds_original_matrix=ind_disagreement[ind_ds_classifier]<if_stmt>base_probabilities<is><not><none><block_start>selected_probas=base_probabilities[ind_ds_original_matrix]<block_end><else_stmt><block_start>selected_probas=<none><block_end>selected_preds=base_predictions[ind_ds_original_matrix]<line_sep><return>ind_ds_original_matrix selected_preds selected_probas<block_end><def_stmt>_get_DFP_mask self neighbors<block_start><if_stmt>self.DFP<block_start>DFP_mask=frienemy_pruning_preprocessed(neighbors self.DSEL_target_ self.DSEL_processed_)<block_end><else_stmt><block_start>DFP_mask=np.ones((neighbors.shape[0] self.n_classifiers_))<block_end><return>DFP_mask<block_end><def_stmt>_fit_pool_classifiers self X y<block_start><if_stmt>len(X)<l>2<block_start><raise>ValueError('More than one sample is needed '<concat>'if the pool of classifiers is not informed.')<block_end># Split the dataset into training (for the base classifier) and # DSEL (for DS) X_train,X_dsel,y_train,y_dsel=train_test_split(X y test_size=self.DSEL_perc random_state=self.random_state_)<line_sep>self.pool_classifiers_=BaggingClassifier(random_state=self.random_state_ n_jobs=self.n_jobs)<line_sep>self.pool_classifiers_.fit(X_train y_train)<line_sep><return>X_dsel y_dsel<block_end><def_stmt>_check_label_encoder self# Check if base classifiers are not using LabelEncoder (the case for # scikit-learn's ensembles): <block_start><if_stmt>isinstance(self.pool_classifiers_ BaseEnsemble)<block_start><if_stmt>np.array_equal(self.pool_classifiers_.classes_ self.pool_classifiers_[0].classes_)<block_start>self.base_already_encoded_=<false><block_end><else_stmt><block_start>self.base_already_encoded_=<true><block_end><block_end><else_stmt><block_start>self.base_already_encoded_=<false><block_end><block_end><def_stmt>_compute_highest_possible_IH self<block_start>highest_IH=(self.safe_k-math.ceil(self.safe_k/self.n_classes_))/self.safe_k<line_sep><return>highest_IH<block_end><def_stmt>_validate_ih self<block_start>highest_IH=self._compute_highest_possible_IH()<if_stmt>self.IH_rate<g>highest_IH<block_start>warnings.warn("IH_rate is bigger than the highest possible IH." category=RuntimeWarning)<block_end><block_end><def_stmt>_validate_k self# validate safe_k <block_start><if_stmt>self.k<is><none><block_start>self.k_=self.n_samples_<block_end><elif_stmt>self.k<g>self.n_samples_<block_start>msg="k is bigger than DSEL size. Using All DSEL examples "<concat>"for competence estimation."<line_sep>warnings.warn(msg category=RuntimeWarning)<line_sep>self.k_=self.n_samples_-1<block_end><else_stmt><block_start>self.k_=self.k<block_end># Validate safe_k <if_stmt>self.with_IH<and>self.safe_k<is><none><block_start>self.safe_k=self.k<block_end><block_end><def_stmt>_setup_label_encoder self y<block_start>self._check_label_encoder()<line_sep>self.enc_=LabelEncoder()<line_sep>self.enc_.fit(y)<line_sep>self.classes_=self.enc_.classes_<block_end><def_stmt>_encode_base_labels self y<block_start><if_stmt>self.base_already_encoded_<block_start><return>y<block_end><else_stmt><block_start><return>self.enc_.transform(y)<block_end><block_end><def_stmt>_set_dsel self X y<block_start>"""Pre-Process the input X and y data into the dynamic selection dataset(DSEL) and get information about the structure of the data (e.g., n_classes, n_samples, classes) Parameters ---------- X : array of shape (n_samples, n_features) The Input data. y : array of shape (n_samples) class labels of each sample in X. """<line_sep>self.DSEL_data_=X<line_sep>self.DSEL_target_=y<line_sep>self.n_classes_=self.classes_.size<line_sep>self.n_features_=X.shape[1]<line_sep>self.n_samples_=self.DSEL_target_.size<block_end><def_stmt>_set_region_of_competence_algorithm self<block_start><if_stmt>self.knn_classifier<is><none><or>self.knn_classifier<in>['knn' 'sklearn']<block_start>knn_class=functools.partial(KNeighborsClassifier n_jobs=self.n_jobs algorithm="auto")<block_end><elif_stmt>self.knn_classifier<eq>'faiss'<block_start>knn_class=functools.partial(faiss_knn_wrapper.FaissKNNClassifier n_jobs=self.n_jobs algorithm="brute")<block_end><elif_stmt>callable(self.knn_classifier)<block_start>knn_class=self.knn_classifier<block_end><else_stmt><block_start><raise>ValueError('"knn_classifier" should be one of the following '<concat>'["knn", "faiss", None] or an estimator class.')<block_end><if_stmt>self.knne<block_start>self.knn_class_=functools.partial(KNNE knn_classifier=knn_class n_jobs=self.n_jobs algorithm="auto")<block_end><else_stmt><block_start>self.knn_class_=knn_class<block_end>self.roc_algorithm_=self.knn_class_(n_neighbors=self.k)<block_end><def_stmt>_preprocess_dsel self<block_start>"""Compute the prediction of each base classifier for all samples in DSEL. Used to speed-up the test phase, by not requiring to re-classify training samples during test. Returns ------- DSEL_processed_ : array of shape (n_samples, n_classifiers). Each element indicates whether the base classifier predicted the correct label for the corresponding sample (True), otherwise (False). BKS_DSEL_ : array of shape (n_samples, n_classifiers) Predicted labels of each base classifier for all samples in DSEL. """<line_sep>BKS_dsel=self._predict_base(self.DSEL_data_)<line_sep>processed_dsel=BKS_dsel<eq>self.DSEL_target_[: np.newaxis]<line_sep><return>processed_dsel BKS_dsel<block_end><def_stmt>_predict_base self X<block_start>""" Get the predictions of each base classifier in the pool for all samples in X. Parameters ---------- X : array of shape (n_samples, n_features) The test examples. Returns ------- predictions : array of shape (n_samples, n_classifiers) The predictions of each base classifier for all samples in X. """<line_sep>predictions=np.zeros((X.shape[0] self.n_classifiers_) dtype=np.intp)<for_stmt>index,clf enumerate(self.pool_classifiers_)<block_start>labels=clf.predict(X[: self.estimator_features_[index]])<line_sep>predictions[: index]=self._encode_base_labels(labels)<block_end><return>predictions<block_end><def_stmt>_predict_proba_base self X<block_start>""" Get the predictions (probabilities) of each base classifier in the pool for all samples in X. Parameters ---------- X : array of shape (n_samples, n_features) The test examples. Returns ------- probabilities : array of shape (n_samples, n_classifiers, n_classes) Probabilities estimates of each base classifier for all test samples. """<line_sep>probas=np.zeros((X.shape[0] self.n_classifiers_ self.n_classes_))<for_stmt>index,clf enumerate(self.pool_classifiers_)<block_start>probas[: index]=clf.predict_proba(X[: self.estimator_features_[index]])<block_end><return>probas<block_end>@staticmethod<def_stmt>_all_classifier_agree predictions<block_start>"""Check whether there is a difference in opinion among the classifiers in the generated_pool. Parameters ---------- predictions : array of shape (n_samples, n_classifiers) Predictions of the base classifiers for the test examples Returns ------- array of shape (classes) containing True if all classifiers in the generated_pool agrees on the same label, otherwise False. """<line_sep><return>np.all(predictions<eq>predictions[: 0].reshape(-1 1) axis=1)<block_end><def_stmt>_validate_parameters self<block_start>"""Verify if the input parameters are correct (generated_pool and k) raises an error if k < 1 or generated_pool is not fitted. """<if_stmt>self.k<is><not><none><block_start><if_stmt><not>isinstance(self.k int)<block_start><raise>TypeError("parameter k should be an integer")<block_end><if_stmt>self.k<le>1<block_start><raise>ValueError("parameter k must be higher than 1."<concat>"input k is {} ".format(self.k))<block_end><block_end><if_stmt>self.safe_k<is><not><none><block_start><if_stmt><not>isinstance(self.safe_k int)<block_start><raise>TypeError("parameter safe_k should be an integer")<block_end><if_stmt>self.safe_k<le>1<block_start><raise>ValueError("parameter safe_k must be higher than 1."<concat>"input safe_k is {} ".format(self.safe_k))<block_end><block_end># safe_k should be equals or lower the neighborhood size k. <if_stmt>self.safe_k<is><not><none><and>self.k<is><not><none><block_start><if_stmt>self.safe_k<g>self.k<block_start><raise>ValueError("parameter safe_k must be equal or less than parameter k."<concat>"input safe_k is {} and k is {}".format(self.k self.safe_k))<block_end><block_end><if_stmt><not>isinstance(self.IH_rate float)<block_start><raise>TypeError("parameter IH_rate should be a float between [0.0, 0.5]")<block_end><if_stmt>self.IH_rate<l>0<or>self.IH_rate<g>0.5<block_start><raise>ValueError("Parameter IH_rate should be between [0.0, 0.5]."<concat>"IH_rate = {}".format(self.IH_rate))<block_end>self._validate_pool_classifiers()<line_sep># validate the value of k self._validate_k()<line_sep># validate the IH <if_stmt>self.with_IH<block_start>self._validate_ih()<block_end><block_end><def_stmt>_validate_pool_classifiers self<block_start>""" Check the estimator and the n_estimator attribute, set the `base_estimator_` attribute. Raises ------- ValueError If the pool of classifiers is empty. """<if_stmt>self.n_classifiers_<le>1<block_start><raise>ValueError("n_classifiers must be greater than one, "<concat>"got {}.".format(self.n_classifiers_))<block_end><block_end><def_stmt>_check_predict_proba self<block_start>""" Checks if each base classifier in the pool implements the predict_proba method. Raises ------- ValueError If the base classifiers do not implements the predict_proba method. """<for_stmt>clf self.pool_classifiers_<block_start><if_stmt>"predict_proba"<not><in>dir(clf)<block_start><raise>ValueError("All base classifiers should output probability estimates")<block_end><block_end><block_end><def_stmt>_check_base_classifier_fitted self<block_start>""" Checks if each base classifier in the pool is fitted. Raises ------- NotFittedError: If any of the base classifiers is not yet fitted. """<for_stmt>clf self.pool_classifiers<block_start>check_is_fitted(clf "classes_")<block_end><block_end><block_end>
<import_from_stmt>DaPy.core Series SeriesSet<import_from_stmt>DaPy.core is_seq<import_from_stmt>copy copy<def_stmt>proba2label seq labels<block_start><if_stmt>hasattr(seq 'shape')<is><false><block_start>seq=SeriesSet(seq)<block_end><if_stmt>seq.shape[1]<g>1<block_start><return>clf_multilabel(seq labels)<block_end><return>clf_binlabel(seq labels)<block_end><def_stmt>clf_multilabel seq groupby=<none><block_start><if_stmt>is_seq(groupby)<block_start>groupby=dict(enumerate(map(str groupby)))<block_end><if_stmt><not>groupby<block_start>groupby=dict()<block_end><assert_stmt>isinstance(groupby dict) '`labels` must be a list of str or dict object.'<line_sep>max_ind=seq.argmax(axis=1).T.tolist()[0]<line_sep><return>Series(groupby.get(int(_) _)<for>_ max_ind)<block_end><def_stmt>clf_binlabel seq labels cutpoint=0.5<block_start><return>Series(labels[0]<if>_<ge>cutpoint<else>labels[1]<for>_ seq)<block_end><class_stmt>BaseClassifier(object)<block_start><def_stmt>__init__ self<block_start>self._labels=[]<block_end>@property<def_stmt>labels self<block_start><return>copy(self._labels)<block_end><def_stmt>_calculate_accuracy self predict target<block_start>pred_labels=predict.argmax(axis=1).T.tolist()[0]<line_sep>targ_labels=target.argmax(axis=1).T.tolist()[0]<line_sep><return>sum(1.0<for>p,t zip(pred_labels targ_labels)<if>p<eq>t)/len(predict)<block_end><def_stmt>predict_proba self X<block_start>''' Predict your own data with fitted model Paremeter --------- data : matrix The new data that you expect to predict. Return ------ Matrix: the predict result of your data. '''<line_sep>X=self._engine.mat(X)<line_sep><return>self._forecast(X)<block_end><def_stmt>predict self X<block_start>''' Predict your data with a fitted model and return the label Parameter --------- data : matrix the data that you expect to predict Return ------ Series : the labels of each record '''<line_sep><return>proba2label(self.predict_proba(X) self._labels)<block_end><block_end>
""" This tests ensures that there is no memory leakage when params.cpp:ExecuteMulti function does conversion of Unicode to Bytes. In ExecuteMulti function after DoExecute label SQLExecute returns One scenario where SQLParamData function will be used is when there is a varchar(max), a parameter with an unknown size in the INSERT INTO query. In this case, a unicode string is being added to a varchar(max) field. In order to execute the INSERT INTO query, SQLExecute is used. SQLExecute will return SQL_NEED_DATA (SQL_NEED_DATA = 99). Then SQLParamData will be used to create a SQL parameter and will return SQL_NEED_DATA too. When PyUnicode_Check(pInfo->cell) is true, a conversion of Unicode to Bytes is required before it can be used by SQLPutData. During this conversion a new PyObject, called bytes, is created and assigned to objCell. This object never gets Py_XDECREF, and the data will stay stuck in the memory without a reference. This memory leak is only visible when using varchar(max) because varchar(max) required additional allocation of memory that correspond to the size of the input while varchar(100) for example will not case another SQL_NEED_DATA status. To see how to reproduce the memory leak, look at https://github.com/mkleehammer/pyodbc/issues/802 """<import_stmt>os<import_stmt>unittest<import_stmt>psutil<import_from_stmt>tests3.testutils add_to_path load_setup_connection_string<line_sep>add_to_path()<import_stmt>pyodbc<line_sep>KB=1024<line_sep>MB=KB<times>1024<line_sep>CONNECTION_STRING=<none><line_sep>CONNECTION_STRING_ERROR_MESSAGE=(r"Please create tmp\setup.cfg file or set a valid value to CONNECTION_STRING.")<line_sep>process=psutil.Process()<def_stmt>memory <block_start><return>process.memory_info().vms<block_end><class_stmt>SQLPutDataUnicodeToBytesMemoryLeakTestCase(unittest.TestCase)<block_start>driver=pyodbc<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>filename=os.path.splitext(os.path.basename(__file__))[0]<line_sep>cls.connection_string=(load_setup_connection_string(filename)<or>CONNECTION_STRING)<if_stmt><not>cls.connection_string<block_start><return>ValueError(CONNECTION_STRING_ERROR_MESSAGE)<block_end><block_end><def_stmt>test__varchar_max__inserting_many_rows__same_memory_usage self<block_start>varchar_limit="max"<line_sep>num_rows=50_000<line_sep>data=[(i f"col{i:06}" 3.14159265<times>(i+1))<for>i range(num_rows)]<line_sep>table_name="pd_test"<line_sep>col_names=["id" "txt_col" "float_col"]<line_sep>ins_sql=f"INSERT INTO {table_name} ({','.join(col_names)}) VALUES ({','.join('?'<times>len(col_names))})"<with_stmt>pyodbc.connect(self.connection_string autocommit=<true>)<as>cnxn# First time adds memory, not related to the test. <block_start>self.action(cnxn data ins_sql table_name varchar_limit)<for_stmt>iteration range(3)<block_start>start_memory=memory()<line_sep>self.action(cnxn data ins_sql table_name varchar_limit)<line_sep>end_memory=memory()<line_sep>memory_diff=end_memory-start_memory<line_sep>self.assertLess(memory_diff 100<times>KB)<block_end><block_end><block_end><def_stmt>action self cnxn data ins_sql table_name varchar_limit<block_start>crsr=cnxn.cursor()<line_sep>crsr.execute(f"DROP TABLE IF EXISTS {table_name}")<line_sep>crsr.execute(f"CREATE TABLE {table_name} (id int, txt_col varchar({varchar_limit}), float_col float(53))")<line_sep>crsr.fast_executemany=<true><line_sep>crsr.executemany(ins_sql data)<line_sep>crsr.close()<block_end><block_end><def_stmt>main <block_start>unittest.main()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
""" Copyright (c) 2021, NVIDIA CORPORATION. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep>""" These APIs are used along with TensorFlow 1.x. They are similar to tf.get_variable() """<import_from_stmt>tensorflow.python.framework ops<import_stmt>threading<line_sep>_SparseOperationKitEmbeddingLayerStoreKey="SparseOperationKitEmbeddingLayerStore"<class_stmt>_EmbeddingLayerStore(threading.local)<block_start><def_stmt>__init__ self<block_start>super(_EmbeddingLayerStore self).__init__()<line_sep>self._embedding_layer_container=dict()<block_end><def_stmt>_create_embedding self name constructor **kwargs<block_start><if_stmt>constructor<is><none><block_start><raise>ValueError("embedding_layer: '{}' does not exist and "<concat>"cannot create it with constructor: "<concat>"{}".format(name constructor))<block_end>embedding_layer=constructor(**kwargs)<line_sep>self._embedding_layer_container[name]=embedding_layer<line_sep><return>embedding_layer<block_end><def_stmt>get_embedding self name constructor=<none> **kwargs<block_start>emb=self._embedding_layer_container.get(name <none>)<line_sep><return>emb<or>self._create_embedding(name constructor=constructor **kwargs)<block_end><block_end><def_stmt>_get_embedding_store <block_start>emb_store=ops.get_collection(_SparseOperationKitEmbeddingLayerStoreKey)<if_stmt><not>emb_store<block_start>emb_store=_EmbeddingLayerStore()<line_sep>ops.add_to_collection(_SparseOperationKitEmbeddingLayerStoreKey emb_store)<block_end><else_stmt><block_start>emb_store=emb_store[0]<block_end><return>emb_store<block_end><def_stmt>get_embedding name constructor=<none> **kwargs<block_start>""" This method is used to get or create a embedding layer. Parameters ---------- name: string unique name used to identify embedding layer. constructor: SOK.embedding_layer the construction function used to create a new embedding layer. When creating a new embedding layer, constructor(**kwargs) will be called. kwargs: keyword arguments for new embedding layer creation. Returns ------- embedding_layer: embedding layer created by constructor(**kwargs) Examples -------- .. code-block:: python # here to create a new embedding layer. emb_layer = sok.get_embedding(name="Emb", sok.All2AllDenseEmbedding, max_vocabulary_size_per_gpu=1024, embedding_vec_size=16, slot_num=10, nnz_per_slot=1, dynamic_input=False) outputs = emb_layer(inputs) ... # here to reuse already created embedding layer. emb_layer = sok.get_embedding(name="Emb") outputs_1 = emb_layer(inputs) ... """<line_sep><return>_get_embedding_store().get_embedding(name=name constructor=constructor **kwargs)<block_end>
<import_stmt>re<import_stmt>sublime<import_stmt>sublime_plugin<import_from_stmt>..lib.scope_data COMPILED_HEADS<import_from_stmt>..lib syntax_paths<import_from_stmt>..lib inhibit_word_completions<line_sep>__all__=('SyntaxDefCompletionsListener' 'PackagedevCommitScopeCompletionCommand')<line_sep># a list of kinds used to denote the different kinds of completions KIND_HEADER_BASE=(sublime.KIND_ID_NAMESPACE 'K' 'Header Key')<line_sep>KIND_HEADER_DICT=(sublime.KIND_ID_NAMESPACE 'D' 'Header Dict')<line_sep>KIND_HEADER_LIST=(sublime.KIND_ID_NAMESPACE 'L' 'Header List')<line_sep>KIND_BRANCH=(sublime.KIND_ID_NAVIGATION 'b' 'Branch Point')<line_sep>KIND_CONTEXT=(sublime.KIND_ID_KEYWORD 'c' 'Context')<line_sep>KIND_FUNCTION=(sublime.KIND_ID_FUNCTION 'f' 'Function')<line_sep>KIND_FUNCTION_TRUE=(sublime.KIND_ID_FUNCTION 'f' 'Function')<line_sep>KIND_FUNCTION_FALSE=(sublime.KIND_ID_FUNCTION 'f' 'Function')<line_sep>KIND_CAPTURUE=(sublime.KIND_ID_FUNCTION 'c' 'Captures')<line_sep>KIND_SCOPE=(sublime.KIND_ID_NAMESPACE 's' 'Scope')<line_sep>KIND_VARIABLE=(sublime.KIND_ID_VARIABLE 'v' 'Variable')<line_sep>PACKAGE_NAME=__package__.split('.')[0]<def_stmt>status msg console=<false><block_start>msg="[%s] %s"%(PACKAGE_NAME msg)<line_sep>sublime.status_message(msg)<if_stmt>console<block_start>print(msg)<block_end><block_end><def_stmt>format_static_completions templates<block_start><def_stmt>format_item trigger kind details<block_start><if_stmt>kind<in>(KIND_HEADER_DICT KIND_CAPTURUE KIND_CONTEXT)<block_start>completion_format=sublime.COMPLETION_FORMAT_SNIPPET<line_sep>suffix=":\n "<block_end><elif_stmt>kind<is>KIND_HEADER_LIST<block_start>completion_format=sublime.COMPLETION_FORMAT_SNIPPET<line_sep>suffix=":\n - "<block_end><elif_stmt>kind<is>KIND_FUNCTION_TRUE<block_start>completion_format=sublime.COMPLETION_FORMAT_SNIPPET<line_sep>suffix=": ${1:true}"<block_end><elif_stmt>kind<is>KIND_FUNCTION_FALSE<block_start>completion_format=sublime.COMPLETION_FORMAT_SNIPPET<line_sep>suffix=": ${1:false}"<block_end><else_stmt><block_start>completion_format=sublime.COMPLETION_FORMAT_TEXT<line_sep>suffix=": "<block_end><return>sublime.CompletionItem(trigger=trigger kind=kind details=details completion=trigger+suffix completion_format=completion_format )<block_end><return>[format_item(*template)<for>template templates]<block_end><def_stmt>format_completions items annotation="" kind=sublime.KIND_AMBIGUOUS<block_start>format_string="Defined at line <a href='subl:goto_line {{\"line\": \"{0}\"}}'>{0}</a>"<line_sep><return>[sublime.CompletionItem(trigger=trigger annotation=annotation kind=kind details=format_string.format(row)<if>row<is><not><none><else>"" )<for>trigger,row items]<block_end><class_stmt>SyntaxDefCompletionsListener(sublime_plugin.ViewEventListener)<block_start>base_completions_root=format_static_completions(templates=(# base keys ('name' KIND_HEADER_BASE "The display name of the syntax.") ('scope' KIND_HEADER_BASE "The main scope of the syntax.") ('version' KIND_HEADER_BASE "The sublime-syntax version.") ('extends' KIND_HEADER_BASE "The syntax which is to be extended.") ('name' KIND_HEADER_BASE "The display name of the syntax.") ('first_line_match' KIND_HEADER_BASE "The pattern to identify a file by content.") # dict keys ('variables' KIND_HEADER_DICT 'The variables definitions.') ('contexts' KIND_HEADER_DICT 'The syntax contexts.') # list keys ('file_extensions' KIND_HEADER_LIST "The list of file extensions.") ('hidden_extensions' KIND_HEADER_LIST "The list of hidden file extensions.")))<line_sep>base_completions_contexts=format_static_completions(templates=(# meta functions ('meta_append' KIND_FUNCTION_TRUE "Add rules to the end of the inherit context.") ('meta_content_scope' KIND_FUNCTION "A scope to apply to the content of a context.") ('meta_include_prototype' KIND_FUNCTION_FALSE "Flag to in-/exclude `prototype`") ('meta_prepend' KIND_FUNCTION_TRUE "Add rules to the beginning of the inherit context.") ('meta_scope' KIND_FUNCTION "A scope to apply to the full context.") ('clear_scopes' KIND_FUNCTION "Clear meta scopes.") # matching tokens ('match' KIND_FUNCTION "Pattern to match tokens.") # scoping ('scope' KIND_FUNCTION "The scope to apply if a token matches") ('captures' KIND_CAPTURUE "Assigns scopes to the capture groups.") # contexts ('push' KIND_FUNCTION "Push a context onto the stack.") ('set' KIND_FUNCTION "Set a context onto the stack.") ('pop' KIND_FUNCTION_TRUE 'Pop context(s) from the stack.') ('with_prototype' KIND_FUNCTION "Rules to prepend to each context.") # branching ('branch_point' KIND_FUNCTION "Name of the point to rewind to if a branch fails.") ('branch' KIND_FUNCTION "Push branches onto the stack.") ('fail' KIND_FUNCTION "Fail the current branch.") # embedding ('embed' KIND_FUNCTION "A context or syntax to embed.") ('embed_scope' KIND_FUNCTION "A scope to apply to the embedded syntax.") ('escape' KIND_FUNCTION "A pattern to denote the end of the embedded syntax.") ('escape_captures' KIND_CAPTURUE "Assigns scopes to the capture groups.") # including ('include' KIND_FUNCTION "Includes a context.") ('apply_prototype' KIND_FUNCTION_TRUE "Apply prototype of included syntax.") ))<line_sep># These instance variables are for communicating # with our PostCompletionsListener instance. base_suffix=<none><line_sep>@classmethod<def_stmt>applies_to_primary_view_only cls<block_start><return><false><block_end>@classmethod<def_stmt>is_applicable cls settings<block_start><return>settings.get('syntax')<eq>syntax_paths.SYNTAX_DEF<block_end>@inhibit_word_completions<def_stmt>on_query_completions self prefix locations<block_start><def_stmt>match_selector selector offset=0<block_start>"""Verify scope for each location."""<line_sep><return>all(self.view.match_selector(point+offset selector)<for>point locations)<block_end># None of our business <if_stmt><not>match_selector("- comment - (source.regexp - keyword.other.variable)")<block_start><return><none><block_end># Scope name completions based on our scope_data database <if_stmt>match_selector("meta.expect-scope, meta.scope" -1)<block_start><return>self._complete_scope(prefix locations)<block_end># Auto-completion for include values using the 'contexts' keys and for <if_stmt>match_selector("meta.expect-context-list-or-content"<concat>" | meta.context-list-or-content" -1)<block_start><return>(self._complete_keyword(prefix locations)+self._complete_context(prefix locations))<block_end># Auto-completion for include values using the 'contexts' keys <if_stmt>match_selector("meta.expect-context-list | meta.expect-context"<concat>" | meta.include | meta.context-list" -1)<block_start><return>self._complete_context(prefix locations)<or><none><block_end># Auto-completion for branch points with 'fail' key <if_stmt>match_selector("meta.expect-branch-point-reference"<concat>" | meta.branch-point-reference" -1)<block_start><return>self._complete_branch_point()<block_end># Auto-completion for variables in match patterns using 'variables' keys <if_stmt>match_selector("keyword.other.variable")<block_start><return>self._complete_variable()<block_end># Standard completions for unmatched regions <return>self._complete_keyword(prefix locations)<block_end><def_stmt>_line_prefix self point<block_start>_,col=self.view.rowcol(point)<line_sep>line=self.view.substr(self.view.line(point))<line_sep><return>line[:col]<block_end><def_stmt>_complete_context self prefix locations# Verify that we're not looking for an external include <block_start><for_stmt>point locations<block_start>line_prefix=self._line_prefix(point)<line_sep>real_prefix=re.search(r"[^,\[ ]*$" line_prefix).group(0)<if_stmt>real_prefix.startswith("scope:")<or>"/"<in>real_prefix<block_start><return>[]# Don't show any completions here <block_end><elif_stmt>real_prefix<ne>prefix# print("Unexpected prefix mismatch: {} vs {}".format(real_prefix, prefix)) <block_start><return>[]<block_end><block_end><return>format_completions([(self.view.substr(r) self.view.rowcol(r.begin())[0]+1)<for>r self.view.find_by_selector("entity.name.function.context")] annotation="" kind=KIND_CONTEXT )<block_end><def_stmt>_complete_keyword self prefix locations<block_start><def_stmt>match_selector selector offset=0<block_start>"""Verify scope for each location."""<line_sep><return>all(self.view.match_selector(point+offset selector)<for>point locations)<block_end>prefixes=set()<for_stmt>point locations# Ensure that we are completing a key name everywhere <block_start>line_prefix=self._line_prefix(point)<line_sep>real_prefix=re.sub(r"^ +(- +)*" " " line_prefix)# collapse leading whitespace prefixes.add(real_prefix)<block_end><if_stmt>len(prefixes)<ne>1<block_start><return><none><block_end><else_stmt><block_start>real_prefix=next(iter(prefixes))<block_end># (Supposedly) all keys start their own line match=re.match(r"^(\s*)[\w-]*$" real_prefix)<if_stmt><not>match<block_start><return><none><block_end><elif_stmt><not>match.group(1)<block_start><return>self.base_completions_root<block_end><elif_stmt>match_selector("meta.block.contexts")<block_start><return>self.base_completions_contexts<block_end><else_stmt><block_start><return><none><block_end><block_end><def_stmt>_complete_scope self prefix locations# Determine entire prefix <block_start>window=self.view.window()<line_sep>prefixes=set()<for_stmt>point locations<block_start>*_,real_prefix=self._line_prefix(point).rpartition(" ")<line_sep>prefixes.add(real_prefix)<block_end><if_stmt>len(prefixes)<g>1<block_start><return><none><block_end><else_stmt><block_start>real_prefix=next(iter(prefixes))<block_end># Tokenize the current selector tokens=real_prefix.split(".")<if_stmt>len(tokens)<le>1# No work to be done here, just return the heads <block_start><return>COMPILED_HEADS.to_completion()<block_end>base_scope_completion=self._complete_base_scope(tokens[-1])<line_sep># Browse the nodes and their children nodes=COMPILED_HEADS<for_stmt>i,token enumerate(tokens[:-1])<block_start>node=nodes.find(token)<if_stmt><not>node<block_start>status("`%s` not found in scope naming conventions"%'.'.join(tokens[:i+1]) window)<line_sep><break><block_end>nodes=node.children<if_stmt><not>nodes<block_start>status("No nodes available in scope naming conventions after `%s`"%'.'.join(tokens[:-1]) window)<line_sep><break><block_end><block_end><else_stmt># Offer to complete from conventions or base scope <block_start><return>nodes.to_completion()+base_scope_completion<block_end># Since we don't have anything to offer, # just complete the base scope appendix/suffix. <return>base_scope_completion<block_end><def_stmt>_complete_base_scope self last_token<block_start>window=self.view.window()<line_sep>regions=self.view.find_by_selector("meta.scope string - meta.block")<if_stmt>len(regions)<ne>1<block_start>status("Warning: Could not determine base scope uniquely" window console=<true>)<line_sep>self.base_suffix=<none><line_sep><return>[]<block_end>base_scope=self.view.substr(regions[0])<line_sep>*_,base_suffix=base_scope.rpartition(".")<line_sep># Only useful when the base scope suffix is not already the last one # In this case it is even useful to inhibit other completions completely <if_stmt>last_token<eq>base_suffix<block_start>self.base_suffix=<none><line_sep><return>[]<block_end>self.base_suffix=base_suffix<line_sep><return>format_completions([(base_suffix <none>)] "base suffix" KIND_SCOPE)<block_end><def_stmt>_complete_variable self<block_start><return>format_completions([(self.view.substr(r) self.view.rowcol(r.begin())[0]+1)<for>r self.view.find_by_selector("entity.name.constant")] annotation="" kind=KIND_VARIABLE )<block_end><def_stmt>_complete_branch_point self<block_start><return>format_completions([(self.view.substr(r) self.view.rowcol(r.begin())[0]+1)<for>r self.view.find_by_selector("entity.name.label.branch-point")] annotation="" kind=KIND_BRANCH )<block_end><block_end><class_stmt>PackagedevCommitScopeCompletionCommand(sublime_plugin.TextCommand)<block_start><def_stmt>run self edit<block_start>self.view.run_command("commit_completion")<line_sep># Don't add duplicated dot, if scope is edited in the middle. <if_stmt>self.view.substr(self.view.sel()[0].a)<eq>"."<block_start><return><block_end># Check if the completed value was the base suffix # and don't re-open auto complete in that case. listener=sublime_plugin.find_view_event_listener(self.view SyntaxDefCompletionsListener)<if_stmt>listener<and>listener.base_suffix<block_start>point=self.view.sel()[0].a<line_sep>region=sublime.Region(point-len(listener.base_suffix)-1 point)<if_stmt>self.view.substr(region)<eq>"."+listener.base_suffix<block_start><return><block_end><block_end># Insert a . and trigger next completion self.view.run_command('insert' {'characters':"."})<line_sep>self.view.run_command('auto_complete' {'disable_auto_insert':<true>})<block_end><block_end>
"""Operator to delete an Export Collection by name."""<import_from_stmt>bpy.props StringProperty<import_from_stmt>bpy.types Operator<import_from_stmt>..functions get_export_collection_by_name<class_stmt>EmbarkDeleteExportCollection(Operator)# pylint: disable=too-few-public-methods <block_start>"""Deletes the named Export Collection, but leaves all contained objects in the scene."""<line_sep>bl_idname="object.embark_delete_export_collection"<line_sep>bl_label="Delete Export Collection"<line_sep>bl_description="Deletes this Export Collection, but leaves all contained objects in the scene"<line_sep>bl_options={'REGISTER' 'UNDO'}<line_sep>collection_name:StringProperty(options={'HIDDEN'})<def_stmt>execute self context<block_start>"""Deletes the named Collection."""<line_sep>collection=get_export_collection_by_name(self.collection_name)<if_stmt><not>collection<block_start>self.report({'ERROR'} f"Failed to find an Export Collection named '{self.collection_name}'")<line_sep><return>{'CANCELLED'}<block_end>collection.delete()<line_sep>self.report({'INFO'} f"Deleted Export Collection '{self.collection_name}'")<line_sep><return>{'FINISHED'}<block_end><block_end>
print("Привет".find("т"))<line_sep>print("Привет".find("П"))<line_sep>print("Привет".rfind("т"))<line_sep>print("Привет".rfind("П"))<line_sep>print("Привет".index("т"))<line_sep>print("Привет".index("П"))<line_sep>
# Calculates a^b <def_stmt>power a b<block_start><if_stmt>b<eq>0<block_start><return>1<block_end>b<augsub>1<line_sep><return>a<times>power(a b)<block_end>x=int(input())<line_sep>y=int(input())<line_sep>print(power(x y))<line_sep>
# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Build and install `tensorboard_data_server` into your virtualenv. This bundles the `//tensorboard/data/server` binary built with Bazel. To uninstall, just `pip uninstall tensorboard_data_server`. """<import_stmt>os<import_stmt>pathlib<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<def_stmt>_shell *args **kwargs<block_start>kwargs.setdefault("check" <true>)<line_sep><return>subprocess.run(*args **kwargs)<block_end><def_stmt>main <block_start><if_stmt><not>os.environ.get("VIRTUAL_ENV")<block_start>sys.stderr.write("Not in a virtualenv. You probably don't want to do this.\n")<line_sep>sys.exit(1)<block_end>tmpdir=tempfile.TemporaryDirectory()<line_sep>thisdir=pathlib.Path(os.path.dirname(__file__))<line_sep>server_binary=thisdir/".."/"server"<line_sep>build_script=thisdir/"build"<try_stmt><block_start>result=_shell([build_script "--server-binary=%s"%(server_binary ) "--out-dir=%s"%(tmpdir.name ) ] capture_output=<true> )<block_end><except_stmt>subprocess.CalledProcessError<as>e<block_start>sys.stdout.buffer.write(e.stdout)<line_sep>sys.stdout.flush()<line_sep>sys.stderr.buffer.write(e.stderr)<line_sep>sys.stderr.flush()<line_sep><raise><block_end>lines=result.stdout.decode("utf-8").splitlines()<if_stmt>len(lines)<ne>1<block_start><raise>RuntimeError("Expected one line of stdout; got: %r"%lines)<block_end>wheel=lines[0]<line_sep>_shell(["pip" "uninstall" "-y" "tensorboard_data_server"])<line_sep>_shell(["pip" "install" "--" wheel])<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
''' Program Description: Calculate factorial of a given number '''<def_stmt>calculate_factorial n<block_start><if_stmt>n<eq>0<block_start><return>1<block_end><if_stmt>n<l>0<block_start><raise>ValueError<block_end>fact=1<for_stmt>x range(1 n+1)<block_start>fact<augmul>x<block_end><return>fact<block_end>print('N = ' end='')<try_stmt><block_start>result=calculate_factorial(int(input()))<line_sep>print('Output =' result)<block_end><except_stmt>ValueError<block_start>print('Only positive numbers are supported')<block_end>
# Copyright 2020 The SQLFlow Authors. All rights reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>unittest<import_stmt>runtime.temp_file<as>temp_file<import_stmt>runtime.testing<as>testing<import_from_stmt>runtime.feature.column NumericColumn<import_from_stmt>runtime.feature.field_desc FieldDesc<import_from_stmt>runtime.local evaluate explain pred train<class_stmt>TestXGBoostTrain(unittest.TestCase)<block_start>@unittest.skipUnless(testing.get_driver()<eq>"mysql" "skip non mysql tests")<def_stmt>test_train self<block_start>ds=testing.get_datasource()<line_sep>original_sql="""SELECT * FROM iris.train TO TRAIN xgboost.gbtree WITH objective="multi:softmax", num_boost_round=20, num_class=3, validation.select="SELECT * FROM iris.test" INTO iris.xgboost_train_model_test; """<line_sep>select="SELECT * FROM iris.train"<line_sep>val_select="SELECT * FROM iris.test"<line_sep>train_params={"num_boost_round":20 }<line_sep>model_params={"num_class":3 "objective":"multi:softmax"}<with_stmt>temp_file.TemporaryDirectory(as_cwd=<true>)<block_start>eval_result=train(ds original_sql select val_select "xgboost.gbtree" "" <none> NumericColumn(FieldDesc(name="class")) model_params train_params <none> "iris.xgboost_train_model_test" <none>)<line_sep>self.assertLess(eval_result['train']['merror'][-1] 0.01)<line_sep>self.assertLess(eval_result['validate']['merror'][-1] 0.01)<block_end><with_stmt>temp_file.TemporaryDirectory(as_cwd=<true>)<block_start>pred_original_sql="""SELECT * FROM iris.test TO PREDICT iris.xgboost_pred_result.pred_val USING iris.xgboost_train_model_test;"""<line_sep>pred(ds pred_original_sql "SELECT * FROM iris.test" "iris.xgboost_train_model_test" "pred_val" model_params "iris.xgboost_pred_result")<block_end><with_stmt>temp_file.TemporaryDirectory(as_cwd=<true>)<block_start>explain_original_sql="""SELECT * FROM iris.test TO EXPLAIN iris.xgboost_train_model_test INTO iris.xgboost_explain_result;"""<line_sep>explain(ds explain_original_sql "SELECT * FROM iris.test" "iris.xgboost_train_model_test" model_params "iris.xgboost_explain_result")<block_end><with_stmt>temp_file.TemporaryDirectory(as_cwd=<true>)<block_start>evaluate_original_sql="""SELECT * FROM iris.test TO EVALUATE iris.xgboost_train_model_test WITH label_col=class INTO iris.xgboost_evaluate_result;"""<line_sep>evaluate(ds evaluate_original_sql "SELECT * FROM iris.test" "class" "iris.xgboost_train_model_test" model_params "iris.xgboost_evaluate_result")<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
#Incomplete ordering <class_stmt>PartOrdered(object)<block_start><def_stmt>__eq__ self other<block_start><return>self<is>other<block_end><def_stmt>__ne__ self other<block_start><return>self<is><not>other<block_end><def_stmt>__hash__ self<block_start><return>id(self)<block_end><def_stmt>__lt__ self other<block_start><return><false><block_end><block_end>#Don't blame a sub-class for super-class's sins. <class_stmt>DerivedPartOrdered(PartOrdered)<block_start><pass><block_end>
<import_stmt>numpy<as>np<import_from_stmt>gtsam SfmTrack<import_from_stmt>gtsfm.common.image Image<import_stmt>gtsfm.utils.images<as>image_utils<def_stmt>test_get_average_point_color <block_start>""" Ensure 3d point color is computed as mean of RGB per 2d measurement."""<line_sep># random point; 2d measurements below are dummy locations (not actual projection) triangulated_pt=np.array([1 2 1])<line_sep>track_3d=SfmTrack(triangulated_pt)<line_sep># in camera 0 track_3d.add_measurement(idx=0 m=np.array([130 80]))<line_sep># in camera 1 track_3d.add_measurement(idx=1 m=np.array([10 60]))<line_sep>img0=np.zeros((100 200 3) dtype=np.uint8)<line_sep>img0[80 130]=np.array([40 50 60])<line_sep>img1=np.zeros((100 200 3) dtype=np.uint8)<line_sep>img1[60 10]=np.array([60 70 80])<line_sep>images={0:Image(img0) 1:Image(img1)}<line_sep>r,g,b=image_utils.get_average_point_color(track_3d images)<assert_stmt>r<eq>50<assert_stmt>g<eq>60<assert_stmt>b<eq>70<block_end><def_stmt>test_get_downsampling_factor_per_axis_leaveintact <arrow><none><block_start>"""Ensure that image is left intact, when shorter side is smaller than max_resolution."""<line_sep>img_h=700<line_sep>img_w=1500<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=800<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_downsampling_factor_per_axis(img_h img_w max_resolution)<assert_stmt>scale_u<eq>1.0<assert_stmt>scale_v<eq>1.0<assert_stmt>new_h<eq>700<assert_stmt>new_w<eq>1500<block_end><def_stmt>test_get_rescaling_factor_per_axis_upsample <arrow><none><block_start>"""Ensure that max resolution constraint is met, when upsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 800 px. """<line_sep>img_h=700<line_sep>img_w=1500<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=800<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_rescaling_factor_per_axis(img_h img_w max_resolution)<line_sep># 8/7 will not give a clean integer division <assert_stmt>np.isclose(scale_u 1.1427 atol=4)<assert_stmt>np.isclose(scale_v 1.1429 atol=4)<assert_stmt>new_h<eq>800<assert_stmt>new_w<eq>1714<block_end><def_stmt>test_get_downsampling_factor_per_axis <arrow><none><block_start>"""Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px. Image is in landscape mode. """<line_sep>img_h=700<line_sep>img_w=1500<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=600<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_downsampling_factor_per_axis(img_h img_w max_resolution)<line_sep># Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. <assert_stmt>np.isclose(scale_u 0.8573 atol=4)<assert_stmt>np.isclose(scale_v 0.8571 atol=4)<assert_stmt>new_h<eq>600<assert_stmt>new_w<eq>1286<block_end><def_stmt>test_get_rescaling_factor_per_axis_downsample <arrow><none><block_start>"""Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px. Image is in landscape mode. """<line_sep>img_h=700<line_sep>img_w=1500<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=600<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_rescaling_factor_per_axis(img_h img_w max_resolution)<line_sep># Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. <assert_stmt>np.isclose(scale_u 0.8573 atol=4)<assert_stmt>np.isclose(scale_v 0.8571 atol=4)<assert_stmt>new_h<eq>600<assert_stmt>new_w<eq>1286<block_end><def_stmt>test_get_downsampling_factor_per_axis_portrait <arrow><none><block_start>"""Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is AT MOST 600 px. Image is in portrait mode. """<line_sep>img_h=1500<line_sep>img_w=700<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=600<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_downsampling_factor_per_axis(img_h img_w max_resolution)<line_sep># Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. <assert_stmt>np.isclose(scale_u 0.8571 atol=4)<assert_stmt>np.isclose(scale_v 0.8573 atol=4)<assert_stmt>new_h<eq>1286<assert_stmt>new_w<eq>600<block_end><def_stmt>test_get_rescaling_factor_per_axis_downsample_portrait <arrow><none><block_start>"""Ensure that max resolution constraint is met, when downsampling image. Resize a 700x1500 image, so that the shorter image side is EXACTLY 600 px. Image is in portrait mode. """<line_sep>img_h=1500<line_sep>img_w=700<line_sep>img=Image(np.zeros((img_h img_w 3) dtype=np.uint8))<line_sep>max_resolution=600<line_sep>scale_u,scale_v,new_h,new_w=image_utils.get_rescaling_factor_per_axis(img_h img_w max_resolution)<line_sep># Note that 600 / 700 = 0.85714 # 1500 * 0.85714 = 1285.7, which we round up to 1286. <assert_stmt>np.isclose(scale_v 0.8571 atol=4)<assert_stmt>np.isclose(scale_u 0.8573 atol=4)<assert_stmt>new_h<eq>1286<assert_stmt>new_w<eq>600<block_end>
""" Script for visualizing a robot from a URDF. Author: <NAME> """<import_stmt>argparse<import_stmt>urdfpy<if_stmt>__name__<eq>'__main__'# Parse Args <block_start>parser=argparse.ArgumentParser(description='Visualize a robot from a URDF file')<line_sep>parser.add_argument('urdf' type=str help='Path to URDF file that describes the robot')<line_sep>parser.add_argument('-a' action='store_true' help='Visualize robot articulation')<line_sep>parser.add_argument('-c' action='store_true' help='Use collision geometry')<line_sep>args=parser.parse_args()<line_sep>robot=urdfpy.URDF.load(args.urdf)<if_stmt>args.a<block_start>robot.animate(use_collision=args.c)<block_end><else_stmt><block_start>robot.show(use_collision=args.c)<block_end><block_end>
<import_stmt>requests<import_from_stmt>json loads<import_from_stmt>termcolor colored<import_from_stmt>configparser RawConfigParser<def_stmt>init domain<block_start>PDCH=[]<line_sep>print(colored("[*]-Searching Project Discovery Chaos..." "yellow"))<line_sep>parser=RawConfigParser()<line_sep>parser.read("config.ini")<line_sep>CHAOS_KEY=parser.get("PDChaos" "CHAOS_API_KEY")<if_stmt>CHAOS_KEY<eq>""<block_start>print(" \__" colored("No Project Discovery Chaos API key configured" "red"))<line_sep><return>[]<block_end>headers={"user-agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:88.0) Gecko/20100101 Firefox/88.0" "Authorization":CHAOS_KEY}<line_sep>url="https://dns.projectdiscovery.io/dns/{0}/subdomains".format(domain)<try_stmt><block_start>response=requests.get(url headers=headers).text<line_sep>subdomains=loads(response)["subdomains"]<for_stmt>subdomain subdomains<block_start><if_stmt>subdomain<block_start>PDCH.append("{0}.{1}".format(subdomain domain))<block_end><block_end>PDCH=set(PDCH)<line_sep>print(" \__ {0}: {1}".format(colored("Subdomains found" "cyan") colored(len(PDCH) "yellow")))<line_sep><return>PDCH<block_end><except_stmt>requests.exceptions.RequestException<as>err<block_start>print(" \__" colored(err "red"))<line_sep><return>[]<block_end><except_stmt>requests.exceptions.HTTPError<as>errh<block_start>print(" \__" colored(errh "red"))<line_sep><return>[]<block_end><except_stmt>requests.exceptions.ConnectionError<as>errc<block_start>print(" \__" colored(errc "red"))<line_sep><return>[]<block_end><except_stmt>requests.exceptions.Timeout<as>errt<block_start>print(" \__" colored(errt "red"))<line_sep><return>[]<block_end><except_stmt>Exception<block_start>print(" \__" colored("Something went wrong!" "red"))<line_sep><return>[]<block_end><block_end>
'''OpenGL extension ANGLE.program_binary This module customises the behaviour of the OpenGL.raw.GLES2.ANGLE.program_binary to provide a more Python-friendly API Overview (from the spec) This extension makes available a program binary format, PROGRAM_BINARY_ANGLE. It enables retrieving and loading of pre-linked ANGLE program objects. The official definition of this extension is available here: http://www.opengl.org/registry/specs/ANGLE/program_binary.txt '''<import_from_stmt>OpenGL platform constant arrays<import_from_stmt>OpenGL extensions wrapper<import_stmt>ctypes<import_from_stmt>OpenGL.raw.GLES2 _types _glgets<import_from_stmt>OpenGL.raw.GLES2.ANGLE.program_binary *<import_from_stmt>OpenGL.raw.GLES2.ANGLE.program_binary _EXTENSION_NAME<def_stmt>glInitProgramBinaryANGLE <block_start>'''Return boolean indicating whether this extension is available'''<import_from_stmt>OpenGL extensions<line_sep><return>extensions.hasGLExtension(_EXTENSION_NAME)<block_end>### END AUTOGENERATED SECTION
<import_stmt>argparse<import_stmt>os<class_stmt>Opts<block_start><def_stmt>__init__ self<block_start>self.parser=argparse.ArgumentParser()<block_end><def_stmt>init self<block_start>self.parser.add_argument('-expID' default='default' help='Experiment ID')<line_sep>self.parser.add_argument('-data' default='default' help='Input data folder')<line_sep>self.parser.add_argument('-nThreads' default=4 type=int help='Number of threads')<line_sep>self.parser.add_argument('-expDir' default='../exp' help='Experiments directory')<line_sep>self.parser.add_argument('-scaleAugFactor' default=0.25 type=float help='Scale augment factor')<line_sep>self.parser.add_argument('-rotAugProb' default=0.4 type=float help='Rotation augment probability')<line_sep>self.parser.add_argument('-flipAugProb' default=0.5 type=float help='Flip augment probability')<line_sep>self.parser.add_argument('-rotAugFactor' default=30 type=float help='Rotation augment factor')<line_sep>self.parser.add_argument('-colorAugFactor' default=0.2 type=float help='Colo augment factor')<line_sep>self.parser.add_argument('-imgSize' default=368 type=int help='Number of threads')<line_sep>self.parser.add_argument('-hmSize' default=46 type=int help='Number of threads')<line_sep>self.parser.add_argument('-DEBUG' type=int default=0 help='Debug')<line_sep>self.parser.add_argument('-sigmaPAF' default=5 type=int help='Width of PAF')<line_sep>self.parser.add_argument('-sigmaHM' default=7 type=int help='Std. of Heatmap')<line_sep>self.parser.add_argument('-variableWidthPAF' dest='variableWidthPAF' action='store_true' help='Variable width PAF based on length of part')<line_sep>self.parser.add_argument('-dataset' default='coco' help='Dataset')<line_sep>self.parser.add_argument('-model' default='vgg' help='Model')<line_sep>self.parser.add_argument('-batchSize' default=8 type=int help='Batch Size')<line_sep>self.parser.add_argument('-LR' default=1e-3 type=float help='Learn Rate')<line_sep>self.parser.add_argument('-nEpoch' default=150 type=int help='Number of Epochs')<line_sep>self.parser.add_argument('-dropLR' type=float default=50 help='Drop LR')<line_sep>self.parser.add_argument('-valInterval' type=int default=1 help='Val Interval')<line_sep>self.parser.add_argument('-loadModel' default='none' help='Load pre-trained')<line_sep>self.parser.add_argument('-train' dest='train' action='store_true' help='Train')<line_sep>self.parser.add_argument('-vizOut' dest='vizOut' action='store_true' help='Visualize output?')<line_sep>self.parser.add_argument('-criterionHm' default='mse' help='Heatmap Criterion')<line_sep>self.parser.add_argument('-criterionPaf' default='mse' help='PAF Criterion')<block_end><def_stmt>parse self<block_start>self.init()<line_sep>self.opt=self.parser.parse_args()<line_sep>self.opt.saveDir=os.path.join(self.opt.expDir self.opt.expID)<if_stmt>self.opt.DEBUG<g>0<block_start>self.opt.nThreads=1<block_end>args=dict((name getattr(self.opt name))<for>name dir(self.opt)<if><not>name.startswith('_'))<if_stmt><not>os.path.exists(self.opt.saveDir)<block_start>os.makedirs(self.opt.saveDir)<block_end>file_name=os.path.join(self.opt.saveDir 'opt.txt')<with_stmt>open(file_name 'wt')<as>opt_file<block_start>opt_file.write('==> Args:\n')<for_stmt>k,v sorted(args.items())<block_start>opt_file.write(' %s: %s\n'%(str(k) str(v)))<block_end><block_end><return>self.opt<block_end><block_end>
<class_stmt>n_A(flatdata.archive.Archive)<block_start>_SCHEMA="""namespace n { archive A { } } """<line_sep>_NAME="A"<line_sep>_RESOURCES={"A.archive":flatdata.archive.ResourceSignature(container=flatdata.resources.RawData initializer=<none> schema=_SCHEMA is_optional=<false> doc="Archive signature") }<def_stmt>__init__ self resource_storage<block_start>flatdata.archive.Archive.__init__(self resource_storage)<block_end><block_end>
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_stmt>os<import_stmt>shutil<import_stmt>io<import_stmt>unittest<import_stmt>mock<import_from_stmt>ebcli.operations spotops<class_stmt>TestSpotOps(unittest.TestCase)<block_start>@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')<def_stmt>test_get_spot_instance_types_from_customer__success self prompt_for_instance_types_mock <block_start>enable_spot=<true><line_sep>interactive=<true><line_sep>prompt_for_instance_types_mock.return_value='t2.micro, t3.micro'<line_sep>self.assertEqual('t2.micro, t3.micro' spotops.get_spot_instance_types_from_customer(interactive enable_spot))<block_end>@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')<def_stmt>test_get_spot_instance_types_from_customer__test_for_prompting self prompt_for_instance_types_mock <block_start>prompt_for_instance_types_mock.return_value=''<line_sep>self.assertEqual('' spotops.get_spot_instance_types_from_customer(interactive=<true> enable_spot=<true> ))<line_sep>prompt_for_instance_types_mock.assert_called_once_with()<block_end>@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')<def_stmt>test_get_spot_instance_types_from_customer__enable_spot_not_passed self prompt_for_instance_types_mock <block_start>enable_spot=<none><line_sep>interactive=<true><line_sep>prompt_for_instance_types_mock.assert_not_called()<line_sep>self.assertFalse(spotops.get_spot_instance_types_from_customer(interactive enable_spot))<block_end>@mock.patch('ebcli.operations.spotops.prompt_for_instance_types')<def_stmt>test_get_spot_instance_types_from_customer__interactive_is_disabled self prompt_for_instance_types_mock <block_start>enable_spot=<false><line_sep>interactive=<false><line_sep>prompt_for_instance_types_mock.assert_not_called()<line_sep>self.assertFalse(spotops.get_spot_instance_types_from_customer(interactive enable_spot))<block_end><block_end>
<import_stmt>os<import_from_stmt>io BytesIO<import_stmt>tarfile<import_stmt>tempfile<import_from_stmt>six.moves urllib<import_stmt>time<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>cv2 pdb glob argparse<import_stmt>tensorflow<as>tf<line_sep># code borrowed from: https://github.com/senguptaumd/Background-Matting/blob/master/test_segmentation_deeplab.py ## setup #################### <def_stmt>create_pascal_label_colormap <block_start>"""Creates a label colormap used in PASCAL VOC segmentation benchmark. Returns: A Colormap for visualizing segmentation results. """<line_sep>colormap=np.zeros((256 3) dtype=int)<line_sep>ind=np.arange(256 dtype=int)<for_stmt>shift reversed(range(8))<block_start><for_stmt>channel range(3)<block_start>colormap[: channel]<augor>((ind<rshift>channel)&1)<lshift>shift<block_end>ind<augrshift>3<block_end><return>colormap<block_end><def_stmt>label_to_color_image label<block_start>"""Adds color defined by the dataset colormap to the label. Args: label: A 2D array with integer type, storing the segmentation label. Returns: result: A 2D array with floating type. The element of the array is the color indexed by the corresponding element in the input label to the PASCAL color map. Raises: ValueError: If label is not of rank 2 or its value is larger than color map maximum entry. """<if_stmt>label.ndim<ne>2<block_start><raise>ValueError('Expect 2-D input label')<block_end>colormap=create_pascal_label_colormap()<if_stmt>np.max(label)<ge>len(colormap)<block_start><raise>ValueError('label value too large.')<block_end><return>colormap[label]<block_end>LABEL_NAMES=np.asarray(['background' 'aeroplane' 'bicycle' 'bird' 'boat' 'bottle' 'bus' 'car' 'cat' 'chair' 'cow' 'diningtable' 'dog' 'horse' 'motorbike' 'person' 'pottedplant' 'sheep' 'sofa' 'train' 'tv'])<line_sep>FULL_LABEL_MAP=np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES) 1)<line_sep>FULL_COLOR_MAP=label_to_color_image(FULL_LABEL_MAP)<line_sep>MODEL_NAME='xception_coco_voctrainval'# @param ['mobilenetv2_coco_voctrainaug', 'mobilenetv2_coco_voctrainval', 'xception_coco_voctrainaug', 'xception_coco_voctrainval'] _DOWNLOAD_URL_PREFIX='http://download.tensorflow.org/models/'<line_sep>_MODEL_URLS={'mobilenetv2_coco_voctrainaug':'deeplabv3_mnv2_pascal_train_aug_2018_01_29.tar.gz' 'mobilenetv2_coco_voctrainval':'deeplabv3_mnv2_pascal_trainval_2018_01_29.tar.gz' 'xception_coco_voctrainaug':'deeplabv3_pascal_train_aug_2018_01_04.tar.gz' 'xception_coco_voctrainval':'deeplabv3_pascal_trainval_2018_01_04.tar.gz' }<line_sep>_TARBALL_NAME=_MODEL_URLS[MODEL_NAME]<class_stmt>DeepLabModel(object)<block_start>"""Class to load deeplab model and run inference."""<line_sep>INPUT_TENSOR_NAME='ImageTensor:0'<line_sep>OUTPUT_TENSOR_NAME='SemanticPredictions:0'<line_sep>INPUT_SIZE=513<line_sep>FROZEN_GRAPH_NAME='frozen_inference_graph'<def_stmt>__init__ self tarball_path#"""Creates and loads pretrained deeplab model.""" <block_start>self.graph=tf.Graph()<line_sep>graph_def=<none><line_sep># Extract frozen graph from tar archive. tar_file=tarfile.open(tarball_path)<for_stmt>tar_info tar_file.getmembers()<block_start><if_stmt>self.FROZEN_GRAPH_NAME<in>os.path.basename(tar_info.name)<block_start>file_handle=tar_file.extractfile(tar_info)<line_sep>graph_def=tf.GraphDef.FromString(file_handle.read())<line_sep><break><block_end><block_end>tar_file.close()<if_stmt>graph_def<is><none><block_start><raise>RuntimeError('Cannot find inference graph in tar archive.')<block_end><with_stmt>self.graph.as_default()<block_start>tf.import_graph_def(graph_def name='')<block_end>self.sess=tf.Session(graph=self.graph)<block_end><def_stmt>run self image<block_start>"""Runs inference on a single image. Args: image: A PIL.Image object, raw input image. Returns: resized_image: RGB image resized from original input image. seg_map: Segmentation map of `resized_image`. """<line_sep>width,height=image.size<line_sep>resize_ratio=1.0<times>self.INPUT_SIZE/max(width height)<line_sep>target_size=(int(resize_ratio<times>width) int(resize_ratio<times>height))<line_sep>resized_image=image.convert('RGB').resize(target_size Image.ANTIALIAS)<line_sep>batch_seg_map=self.sess.run(self.OUTPUT_TENSOR_NAME feed_dict={self.INPUT_TENSOR_NAME:[np.asarray(resized_image)]})<line_sep>seg_map=batch_seg_map[0]<line_sep><return>resized_image seg_map<block_end><block_end><def_stmt>process_masks img_paths save_paths model_dir='deeplab_model'# download model if not already have it <block_start><if_stmt><not>os.path.exists(model_dir)<block_start>tf.gfile.MakeDirs(model_dir)<block_end>download_path=os.path.join(model_dir _TARBALL_NAME)<if_stmt><not>os.path.exists(download_path)<block_start>print('downloading model to %s, this might take a while...'%download_path)<line_sep>urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX+_MODEL_URLS[MODEL_NAME] download_path)<line_sep>print('download completed! loading DeepLab model...')<block_end>MODEL=DeepLabModel(download_path)<line_sep>print('model loaded successfully!')<line_sep># process images <for_stmt>i range(0 len(img_paths))<block_start>start_time=time.time()<if_stmt>i%500<eq>0<block_start>print(f"{i+1}/{len(img_paths)}")<block_end>start_read=time.time()<line_sep>image=Image.open(img_paths[i])<line_sep>start_read=time.time()<line_sep>res_im,seg=MODEL.run(image)<line_sep>seg=cv2.resize(seg.astype(np.uint8) image.size)<line_sep>mask_sel=(seg<eq>15).astype(np.float32)<line_sep>save_dir=os.path.dirname(save_paths[i])<line_sep>os.makedirs(save_dir exist_ok=<true>)<line_sep># dilate the boundary a bit because as the mask is not accurate kernel=np.ones((3 3))<line_sep>mask_sel=cv2.dilate(mask_sel kernel=kernel iterations=1)<line_sep>start_read=time.time()<line_sep>cv2.imwrite(save_paths[i] (255<times>mask_sel).astype(np.uint8))<block_end>print("finish mask processing.")<block_end><def_stmt>process_bbox_masks img_paths save_paths bboxes model_dir='deeplab_model' mul=1.0<block_start><if_stmt><not>os.path.exists(model_dir)<block_start>tf.gfile.MakeDirs(model_dir)<block_end>download_path=os.path.join(model_dir _TARBALL_NAME)<if_stmt><not>os.path.exists(download_path)<block_start>print('downloading model to %s, this might take a while...'%download_path)<line_sep>urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX+_MODEL_URLS[MODEL_NAME] download_path)<line_sep>print('download completed! loading DeepLab model...')<block_end>MODEL=DeepLabModel(download_path)<line_sep>print('model loaded successfully!')<line_sep># process images <for_stmt>i range(0 len(img_paths))<block_start><if_stmt>i%500<eq>0<block_start>print(f"{i+1}/{len(img_paths)}")<block_end>start_read=time.time()<line_sep>image=Image.open(img_paths[i])<line_sep>W,H=image.size<line_sep># crop by bounding box cx,cy,box_len=bboxes[i]<line_sep>cx,cy=int(cx) int(cy)<line_sep>box_len=int(box_len<times>0.5<times>mul)<line_sep>left=max(cx-box_len 0)<line_sep>top=max(cy-box_len 0)<line_sep>right=min(cx+box_len W)<line_sep>bot=min(cy+box_len H)<line_sep>cropped=image.crop((left top right bot))<line_sep>res_im,seg=MODEL.run(cropped)<line_sep>seg=cv2.resize(seg.astype(np.uint8) cropped.size)<line_sep>mask_sel=((seg<eq>15).astype(np.uint8))<line_sep>mask=np.zeros((H W) dtype=np.uint8)<line_sep>mask[top:bot left:right]=mask_sel<line_sep># put the cropped part back to the original image save_dir=os.path.dirname(save_paths[i])<line_sep>os.makedirs(save_dir exist_ok=<true>)<line_sep># dilate the boundary a bit because as the mask is not accurate kernel=np.ones((3 3))<line_sep>mask=cv2.dilate(mask kernel=kernel iterations=1)[<ellipsis> <none>]<line_sep>cv2.imwrite(save_paths[i] (255<times>mask).astype(np.uint8))<block_end>print("finish mask processing.")<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>deepdish<as>dd<import_stmt>imageio<line_sep>parser=argparse.ArgumentParser(description='Arguments for masks extraction')<line_sep>parser.add_argument("-b" "--base_path" type=str #default="data/h36m/h36m_full", default='data/h36m/' help='base directory')<line_sep>parser.add_argument("-t" "--type" type=str default='h36m' help='type of data to process')<line_sep>parser.add_argument("-c" "--camera_id" type=int default=<none> help='camera to extract')<line_sep>parser.add_argument("-s" "--subject" type=str default="S9" help='subject to extract')<line_sep>parser.add_argument("-r" "--res" type=float default=1.0 help='mask resolution')<line_sep>parser.add_argument("--h5_path" type=str default=<none> help='path to a .h5 file, mainly for MonPerfCap')<line_sep>args=parser.parse_args()<line_sep>base_path=args.base_path#"data/h36m/h36m_full" #if args.h5_path is None: <if_stmt>args.type<eq>'h36m'<block_start>subject=args.subject# "S9" camera_id=args.camera_id# -1 cameras=["54138969" "55011271" "58860488" "60457274"]<line_sep>camera=<none><if_stmt>camera_id<is><not><none><block_start>camera=cameras[camera_id]<if_stmt>subject<ne>'S1'<block_start>h5_name=os.path.join(base_path f"{subject}-camera=[{camera}]-subsample=5.h5")<block_end><else_stmt><block_start>h5_name=os.path.join(base_path f"{subject}-camera=[{camera}]-subsample=1.h5")<block_end><block_end><else_stmt><block_start>h5_name=os.path.join(base_path f"{subject}_SPIN_rect_output-maxmin.h5")<block_end>print(h5_name)<line_sep>img_paths=dd.io.load(h5_name "/img_path")<line_sep>bboxes=dd.io.load(h5_name "/bbox_params")<line_sep>img_paths=[os.path.join(base_path img_path)<for>img_path img_paths]<line_sep>mask_paths=[img_path.replace(f"{subject}" f"{subject}m_")<for>img_path img_paths]<line_sep>#process_masks(img_paths, mask_paths, res=args.res) #process_masks(img_paths, mask_paths) process_bbox_masks(img_paths mask_paths bboxes mul=1.1)<line_sep>""" cameras = ["54138969", "55011271", "58860488", "60457274"] base_path = args.base_path subject = args.subject h5_name = os.path.join(base_path, f"{subject}_processed.h5") img_paths = dd.io.load(h5_name, "/img_path") img_paths = [os.path.join(base_path, img_path) for img_path in img_paths] mask_paths = [img_path.replace(f"{subject}", f"{subject}m") for img_path in img_paths] process_masks(img_paths, mask_paths, res=args.res) """<block_end><elif_stmt>args.type<eq>'perfcap'<block_start><import_from_stmt>load_perfcap read_spin_data<line_sep>processed_est=read_spin_data(args.h5_path)<line_sep>img_paths=processed_est["img_path"]<line_sep>img_paths=[os.path.join(args.base_path img_path)<for>img_path img_paths]<line_sep>save_paths=[img_path.replace("/images/" "/masks/")<for>img_path img_paths]<line_sep>process_bbox_masks(img_paths save_paths processed_est["bboxes"])<block_end><elif_stmt>args.type<eq>'3dhp'<block_start><import_from_stmt>load_3dhp read_3dhp_spin_data<line_sep>subject=args.subject# "S9" processed_est=read_3dhp_spin_data(args.h5_path subject=subject)<line_sep>img_paths=processed_est["img_path"]<line_sep>img_paths=[os.path.join(args.base_path img_path)<for>img_path img_paths]<line_sep>save_paths=[img_path.replace("/imageSequence/" "/masks/")<for>img_path img_paths]<line_sep>process_bbox_masks(img_paths save_paths processed_est["bboxes"])<block_end><block_end>
<import_from_stmt>datetime datetime<import_from_stmt>typing List Optional<import_from_stmt>pydantic BaseModel<class_stmt>SuccessResponse(BaseModel)<block_start>message:str="Successful"<block_end><class_stmt>SearchWindowSizeResponse(BaseModel)<block_start>sizes:List[int]<block_end><class_stmt>AvailableSymbolsResponse(BaseModel)<block_start>symbols:List[str]<block_end><class_stmt>MatchResponse(BaseModel)<block_start>symbol:str<line_sep>distance:float<line_sep>start_date:str<line_sep>end_date:str<line_sep>todays_value:Optional[float]<line_sep>future_value:Optional[float]<line_sep>change:Optional[float]<line_sep>values:Optional[List[float]]<block_end><class_stmt>TopKSearchResponse(BaseModel)<block_start>matches:List[MatchResponse]=[]<line_sep>forecast_type:str<line_sep>forecast_confidence:float<line_sep>anchor_symbol:str<line_sep>anchor_values:Optional[List[float]]<line_sep>window_size:int<line_sep>top_k:int<line_sep>future_size:int<block_end><class_stmt>DataRefreshResponse(BaseModel)<block_start>message:str="Last (most recent) refresh"<line_sep>date:datetime<block_end><class_stmt>IsReadyResponse(BaseModel)<block_start>is_ready:bool<block_end>
# Copyright 2021 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mahalanobis metric."""<import_stmt>numpy<as>np<import_stmt>datasets<line_sep>_DESCRIPTION=""" Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. <NAME> in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """<line_sep>_CITATION="""\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={<NAME>, <NAME> <NAME> <NAME>{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """<line_sep>_KWARGS_DESCRIPTION=""" Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric("mahalanobis") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """<line_sep>@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION _KWARGS_DESCRIPTION)<class_stmt>Mahalanobis(datasets.Metric)<block_start><def_stmt>_info self<block_start><return>datasets.MetricInfo(description=_DESCRIPTION citation=_CITATION inputs_description=_KWARGS_DESCRIPTION features=datasets.Features({"X":datasets.Sequence(datasets.Value("float" id="sequence") id="X") }) )<block_end><def_stmt>_compute self X reference_distribution# convert to numpy arrays <block_start>X=np.array(X)<line_sep>reference_distribution=np.array(reference_distribution)<line_sep># Assert that arrays are 2D <if_stmt>len(X.shape)<ne>2<block_start><raise>ValueError("Expected `X` to be a 2D vector")<block_end><if_stmt>len(reference_distribution.shape)<ne>2<block_start><raise>ValueError("Expected `reference_distribution` to be a 2D vector")<block_end><if_stmt>reference_distribution.shape[0]<l>2<block_start><raise>ValueError("Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension")<block_end># Get mahalanobis distance for each prediction X_minus_mu=X-np.mean(reference_distribution)<line_sep>cov=np.cov(reference_distribution.T)<try_stmt><block_start>inv_covmat=np.linalg.inv(cov)<block_end><except_stmt>np.linalg.LinAlgError<block_start>inv_covmat=np.linalg.pinv(cov)<block_end>left_term=np.dot(X_minus_mu inv_covmat)<line_sep>mahal_dist=np.dot(left_term X_minus_mu.T).diagonal()<line_sep><return>{"mahalanobis":mahal_dist}<block_end><block_end>
# Note: Run this file from Arena directory (the one above /tafl) <import_stmt>Arena<import_from_stmt>MCTS MCTS<import_from_stmt>tafl.TaflGame TaflGame display<import_from_stmt>tafl.TaflPlayers *<line_sep>#from tafl.keras.NNet import NNetWrapper as NNet <import_stmt>numpy<as>np<import_from_stmt>utils *<line_sep>""" use this script to play any two agents against each other, or play manually with any agent. """<line_sep>g=TaflGame("Brandubh")<line_sep># all players rp=RandomTaflPlayer(g).play<line_sep>gp=GreedyTaflPlayer(g).play<line_sep>hp=HumanTaflPlayer(g).play<line_sep># nnet players #n1 = NNet(g) #n1.load_checkpoint('./pretrained_models/tafl/keras/','6x100x25_best.pth.tar') #args1 = dotdict({'numMCTSSims': 50, 'cpuct':1.0}) #mcts1 = MCTS(g, n1, args1) #n1p = lambda x: np.argmax(mcts1.getActionProb(x, temp=0)) arena=Arena.Arena(hp gp g display=display)<line_sep>#arena = Arena.Arena(gp, rp, g, display=display) print(arena.playGames(2 verbose=<true>))<line_sep>
''' Given a string S, consider all duplicated substrings: (contiguous) substrings of S that occur 2 or more times. (The occurrences may overlap.) Return any duplicated substring that has the longest possible length. (If S does not have a duplicated substring, the answer is "".) Example 1: Input: "banana" Output: "ana" Example 2: Input: "abcd" Output: "" Note: 2 <= S.length <= 10^5 S consists of lowercase English letters. '''<class_stmt>Suffix(object)<block_start><def_stmt>__init__ self<block_start>self.index=0<line_sep>self.first_rank=-1<line_sep>self.adjacent_rank=-1<block_end><def_stmt>__lt__ self other<block_start><if_stmt>self.first_rank<eq>other.first_rank<block_start><return>self.adjacent_rank<l>other.adjacent_rank<block_end><return>self.first_rank<l>other.first_rank<block_end><block_end><def_stmt>create_suffix_array s<block_start>N=len(s)<line_sep>suffix_array=[]<for_stmt>index,char enumerate(s)<block_start>suffix_obj=Suffix()<line_sep>suffix_obj.index=index<line_sep>suffix_obj.first_rank=ord(char)-ord('a')<line_sep>suffix_obj.adjacent_rank=ord(s[index+1])-ord('a')<if>(index+1<l>N)<else>-1<line_sep>suffix_array.append(suffix_obj)<block_end>suffix_array.sort()<line_sep>no_char=4<line_sep>index_map={}<while_stmt>no_char<l>2<times>N<block_start>rank=0<line_sep>prev_rank,suffix_array[0].first_rank=suffix_array[0].first_rank rank<line_sep>index_map[suffix_array[0].index]=0<for_stmt>index range(1 N)<block_start><if_stmt>suffix_array[index].first_rank<eq>prev_rank<and>suffix_array[index].adjacent_rank<eq>suffix_array[index-1].adjacent_rank<block_start>suffix_array[index].first_rank=rank<block_end><else_stmt><block_start>rank<augadd>1<line_sep>prev_rank,suffix_array[index].first_rank=suffix_array[index].first_rank rank<block_end>index_map[suffix_array[index].index]=index<block_end><for_stmt>index range(N)<block_start>adjacent_index=suffix_array[index].index+(no_char/2)<line_sep>suffix_array[index].adjacent_rank=suffix_array[index_map[adjacent_index]]<if>adjacent_index<l>N<else>-1<block_end>suffix_array.sort()<line_sep>no_char<augmul>2<block_end><return>[suffix.index<for>suffix suffix_array]<block_end><def_stmt>lcp_w_suffix_str array s<block_start>N=len(array)<line_sep>lcp_array=[0]<times>N<line_sep>inv_suffix=[0]<times>N<for_stmt>index range(N)<block_start>inv_suffix[array[index]]=index<block_end>maxLen=0<for_stmt>index range(N)<block_start><if_stmt>inv_suffix[index]<eq>N-1<block_start>maxLen=0<line_sep><continue><block_end>index_j=array[inv_suffix[index]+1]<while_stmt>(index+maxLen<l>N<and>index_j+maxLen<l>N<and>s[index+maxLen]<eq>s[index_j+maxLen])<block_start>maxLen<augadd>1<block_end>lcp_array[inv_suffix[index]]=maxLen<if_stmt>maxLen<g>0<block_start>maxLen<augsub>1<block_end><block_end><return>lcp_array<block_end><class_stmt>Solution(object)<block_start><def_stmt>longestDupSubstring self S<block_start>""" :type S: str :rtype: str """<line_sep>suffix_array=create_suffix_array(S)<line_sep>lcp_array=lcp_w_suffix_str(suffix_array S)<line_sep>start,end=0 0<for_stmt>index range(len(S))<block_start><if_stmt>lcp_array[index]<g>end<block_start>end=lcp_array[index]<line_sep>start=suffix_array[index]<block_end><block_end><if_stmt>end<eq>0<block_start><return>""<block_end># print start, end <return>S[start:start+end]<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>torch<as>th<import_stmt>torch.nn<as>nn<import_from_stmt>rls.nn.mlps MLP<import_from_stmt>rls.nn.represent_nets RepresentationNetwork<class_stmt>QattenMixer(nn.Module)<block_start><def_stmt>__init__ self n_agents:int state_spec rep_net_params agent_own_state_size:bool query_hidden_units:int query_embed_dim:int key_embed_dim:int head_hidden_units:int n_attention_head:int constrant_hidden_units:int is_weighted:bool=<true><block_start>super().__init__()<line_sep>self.n_agents=n_agents<line_sep>self.rep_net=RepresentationNetwork(obs_spec=state_spec rep_net_params=rep_net_params)<line_sep>self.u_dim=agent_own_state_size# TODO: implement this self.query_embed_dim=query_embed_dim<line_sep>self.key_embed_dim=key_embed_dim<line_sep>self.n_attention_head=n_attention_head<line_sep>self.is_weighted=is_weighted<line_sep>self.query_embedding_layers=nn.ModuleList()<line_sep>self.key_embedding_layers=nn.ModuleList()<for_stmt>i range(self.n_attention_head)<block_start>self.query_embedding_layers.append(MLP(input_dim=self.rep_net.h_dim hidden_units=query_hidden_units layer='linear' act_fn='relu' output_shape=query_embed_dim))<line_sep>self.key_embedding_layers.append(nn.Linear(self.u_dim self.key_embed_dim))<block_end>self.scaled_product_value=np.sqrt(self.query_embed_dim)<line_sep>self.head_embedding_layer=MLP(input_dim=self.rep_net.h_dim hidden_units=head_hidden_units layer='linear' act_fn='relu' output_shape=n_attention_head)<line_sep>self.constrant_value_layer=MLP(input_dim=self.rep_net.h_dim hidden_units=constrant_hidden_units layer='linear' act_fn='relu' output_shape=1)<block_end><def_stmt>forward self q_values state **kwargs<block_start>""" params: q_values: [T, B, 1, N] state: [T, B, *] """<line_sep>time_step=q_values.shape[0]# T batch_size=q_values.shape[1]# B # state: [T, B, *] state_feat,_=self.rep_net(state **kwargs)# [T, B, *] us=self._get_us(state_feat)# [T, B, N, *] q_lambda_list=[]<for_stmt>i range(self.n_attention_head)<block_start>state_embedding=self.query_embedding_layers[i](state_feat)<line_sep># [T, B, *] u_embedding=self.key_embedding_layers[i](us)# [T, B, N, *] state_embedding=state_embedding.unsqueeze(-2)# [T, B, 1, *] u_embedding=u_embedding.swapaxes(-1 -2)# [T, B, *, N] raw_lambda=(state_embedding@u_embedding)/self.scaled_product_value<line_sep># [T, B, 1, N] q_lambda=raw_lambda.softmax(dim=-1)# [T, B, 1, N] q_lambda_list.append(q_lambda)<block_end># H * [T, B, 1, N] q_lambda_list=th.cat(q_lambda_list dim=-2)# [T, B, H, N] q_lambda_list=q_lambda_list.swapaxes(-1 -2)# [T, B, N, H] q_h=q_values@q_lambda_list# [T, B, 1, H] <if_stmt>self.is_weighted# shape: [-1, n_attention_head, 1] <block_start>w_h=th.abs(self.head_embedding_layer(state_feat))# [T, B, H] w_h=w_h.unsqueeze(-1)# [T, B, H, 1] sum_q_h=q_h@w_h# [T, B, 1, 1] sum_q_h=sum_q_h.view(time_step batch_size 1)# [T, B, 1] <block_end><else_stmt><block_start>sum_q_h=q_h.sum(-1)<block_end># [T, B, 1] c=self.constrant_value_layer(state_feat)# [T, B, 1] q_tot=sum_q_h+c# [T, B, 1] <return>q_tot<block_end><def_stmt>_get_us self state_feat<block_start>time_step=state_feat.shape[0]# T batch_size=state_feat.shape[1]# B agent_own_state_size=self.u_dim<with_stmt>th.no_grad()<block_start>us=state_feat[: : :agent_own_state_size<times>self.n_agents].view(time_step batch_size self.n_agents agent_own_state_size)<line_sep># [T, B, N, *] <block_end><return>us<block_end><block_end>
## Just the JSON decoder, because I only need a decoder for now. #from rpython.rlib.unicodedata import unicodedb_6_2_0 as unicodedb <import_from_stmt>rpython.rlib.listsort make_timsort_class<import_from_stmt>rpython.rlib rfile<import_from_stmt>rpython.rlib.rstring UnicodeBuilder<import_from_stmt>rpython.rlib.objectmodel specialize always_inline<import_from_stmt>space *<import_from_stmt>space numbers<import_from_stmt>stdlib fs<import_stmt>space<import_stmt>pathobj<import_stmt>os<line_sep>module=Module(u'json' {} frozen=<true>)<def_stmt>builtin deco<block_start><def_stmt>_deco_ fn<block_start>name=fn.__name__.rstrip('_').decode('utf-8')<line_sep>module.setattr_force(name Builtin(deco(fn) name))<line_sep><return>fn<block_end><return>_deco_<block_end>@builtin(signature(Object Object Dict optional=1))<def_stmt>write_file pathname obj config<block_start>name=pathobj.os_stringify(pathname).encode('utf-8')<try_stmt><block_start>fd=rfile.create_file(name "wb")<try_stmt># TODO: sort of defeats the purpose of # incremental encoder. <block_start>fd.write(configured_stringify(obj config).encode('utf-8'))<line_sep>fd.write('\n')<block_end><finally_stmt><block_start>fd.close()<block_end><block_end><except_stmt>IOError<as>error<block_start>message=os.strerror(error.errno).decode('utf-8')<line_sep><raise>OldError(u"%s: %s"%(pathobj.stringify(pathname) message))<block_end><return>null<block_end>@builtin(signature(Object Dict optional=1))<def_stmt>write_string obj config<block_start><return>space.String(configured_stringify(obj config))<block_end><def_stmt>configured_stringify obj config<block_start><if_stmt>config<is><none><block_start>ub=UnicodeBuilder()<line_sep>quick_stringify(ub obj)<line_sep><return>ub.build()<block_end>margin=space.to_int(get_config(config u"margin" space.Integer(80)))<line_sep>scan=Scanner(Printer(margin))<line_sep>scan.indent=space.to_int(get_config(config u"indent" space.Integer(2)))<line_sep>scan.sort_keys=space.is_true(get_config(config u"sort_keys" space.false))<line_sep>stringify(scan obj)<line_sep>scan.finish()<line_sep><return>scan.printer.result.build()<block_end><def_stmt>get_config config text default<block_start><return>config.data.get(space.String(text) default)<block_end><def_stmt>quick_stringify ub obj<block_start><if_stmt>isinstance(obj space.Dict)<block_start>ub.append(u"{")<line_sep>more=<false><for_stmt>key,value obj.data.iteritems()<block_start><if_stmt><not>isinstance(key String)<block_start><raise>unwind(LError(u"json supports only strings as keys: "+key.repr()))<block_end><if_stmt>more<block_start>ub.append(u",")<block_end>ub.append(escape_string(key.string))<line_sep>ub.append(u':')<line_sep>quick_stringify(ub value)<line_sep>more=<true><block_end>ub.append(u"}")<block_end><elif_stmt>isinstance(obj space.List)<block_start>ub.append(u"[")<line_sep>more=<false><for_stmt>item obj.contents<block_start><if_stmt>more<block_start>ub.append(u",")<block_end>quick_stringify(ub item)<line_sep>more=<true><block_end>ub.append(u"]")<block_end><elif_stmt>isinstance(obj space.String)<block_start>ub.append(escape_string(obj.string))<block_end><elif_stmt>isinstance(obj space.Integer)<block_start>ub.append(numbers.integer_to_string(obj.value 10))<block_end><elif_stmt>isinstance(obj space.Float)<block_start>ub.append(numbers.float_to_string(obj.number))<block_end><elif_stmt>obj<is>space.null<block_start>ub.append(u"null")<block_end><elif_stmt>obj<is>space.true<block_start>ub.append(u"true")<block_end><elif_stmt>obj<is>space.false<block_start>ub.append(u"false")<block_end><else_stmt><block_start><raise>unwind(LError(u"no handler for: "+obj.repr()))<block_end><block_end><def_stmt>stringify scan obj<block_start><if_stmt>isinstance(obj space.Dict)<block_start>scan.left().text(u"{").blank(u"" scan.indent)<line_sep>more=<false><if_stmt>scan.sort_keys<block_start>pairs=[]<for_stmt>key,value obj.data.iteritems()<block_start><if_stmt><not>isinstance(key String)<block_start><raise>unwind(LError(u"json supports only strings as keys: "+key.repr()))<block_end>pairs.append((key value))<block_end>sorter=JSONKeySort(pairs len(pairs))<line_sep>sorter.sort()<for_stmt>key,value sorter.list<block_start><if_stmt>more<block_start>scan.text(u",").blank(u" " scan.indent)<block_end>scan.left()<line_sep>scan.text(escape_string(key.string)+u': ')<line_sep>stringify(scan value)<line_sep>scan.right()<line_sep>more=<true><block_end><block_end><else_stmt><block_start><for_stmt>key,value obj.data.iteritems()<block_start><if_stmt><not>isinstance(key String)<block_start><raise>unwind(LError(u"json supports only strings as keys: "+key.repr()))<block_end><if_stmt>more<block_start>scan.text(u",").blank(u" " scan.indent)<block_end>scan.left()<line_sep>scan.text(escape_string(key.string)+u': ')<line_sep>stringify(scan value)<line_sep>scan.right()<line_sep>more=<true><block_end><block_end>scan.blank(u"" 0).text(u"}").right()<block_end><elif_stmt>isinstance(obj space.List)<block_start>scan.left().text(u"[").blank(u"" scan.indent)<line_sep>more=<false><for_stmt>item obj.contents<block_start><if_stmt>more<block_start>scan.text(u",").blank(u" " scan.indent)<block_end>stringify(scan item)<line_sep>more=<true><block_end>scan.blank(u"" 0).text(u"]").right()<block_end><elif_stmt>isinstance(obj space.String)<block_start>scan.text(escape_string(obj.string))<block_end><elif_stmt>isinstance(obj space.Integer)<block_start>scan.text(numbers.integer_to_string(obj.value 10))<block_end><elif_stmt>isinstance(obj space.Float)<block_start>scan.text(numbers.float_to_string(obj.number))<block_end><elif_stmt>obj<is>space.null<block_start>scan.text(u"null")<block_end><elif_stmt>obj<is>space.true<block_start>scan.text(u"true")<block_end><elif_stmt>obj<is>space.false<block_start>scan.text(u"false")<block_end><else_stmt><block_start><raise>unwind(LError(u"no handler for: "+obj.repr()))<block_end><block_end>TimSort=make_timsort_class()<class_stmt>JSONKeySort(TimSort)<block_start><def_stmt>lt self a b<block_start><return>a[0].string<l>b[0].string<block_end><block_end># This is the easiest point of failure in your stringifier program. <def_stmt>escape_string string<block_start>out=UnicodeBuilder()<line_sep>out.append(u'"')<for_stmt>ch string<block_start>n=ord(ch)<if_stmt>0x20<le>n<and>n<le>0x7E<or>0xFF<l>n# remove the last part in cond if you don't want <block_start><if_stmt>ch<eq>u'\\'# unicode printed out for some reason. <block_start>ch=u'\\\\'<block_end><elif_stmt>ch<eq>u'"'<block_start>ch=u'\\"'<block_end><block_end><else_stmt><block_start>a=u"0123456789abcdef"[n<rshift>12]<line_sep>b=u"0123456789abcdef"[n<rshift>8&15]<line_sep>c=u"0123456789abcdef"[n<rshift>4&15]<line_sep>d=u"0123456789abcdef"[n&15]<line_sep>ch=u'u'+a+b+c+d<line_sep>ch=u'\\'+character_escapes.get(n ch)<block_end>out.append(ch)<block_end>out.append(u'"')<line_sep><return>out.build()<block_end>character_escapes={8:u'b' 9:u't' 10:u'n' 12:u'f' 13:u'r'}<line_sep># The scanner runs three line widths before the printer and checks how many # spaces the blanks and groups take. This allows the printer determine # whether the line or grouping should be broken into multiple lines. <class_stmt>Scanner(object)<block_start><def_stmt>__init__ self printer<block_start>self.printer=printer<line_sep>self.stream=[]<line_sep>self.stack=[]<line_sep>self.lastblank=<none><line_sep>self.left_total=1<line_sep>self.right_total=1# makes sure we won't treat the first # item differently than others. self.sort_keys=<false><line_sep>self.indent=2<block_end><def_stmt>left self<block_start><return>self.scan(Left())<block_end><def_stmt>right self<block_start><return>self.scan(Right())<block_end><def_stmt>blank self text indent<block_start><return>self.scan(Blank(text indent))<block_end><def_stmt>text self text<block_start><return>self.scan(Text(text))<block_end><def_stmt>scan self x<block_start><if_stmt>isinstance(x Left)<block_start>x.size=-self.right_total<line_sep>self.stack.append(x)<block_end><elif_stmt>isinstance(x Right)<block_start><if_stmt>len(self.stack)<g>0<block_start>self.stack.pop().size<augadd>self.right_total<block_end><block_end><elif_stmt>isinstance(x Blank)<block_start><if_stmt>self.lastblank<is><not><none><block_start>self.lastblank.size<augadd>self.right_total<block_end>self.lastblank=x<line_sep>x.size=-self.right_total<line_sep>self.right_total<augadd>len(x.text)<block_end><elif_stmt>isinstance(x Text)<block_start>self.right_total<augadd>len(x.text)<block_end>self.stream.append(x)<while_stmt>len(self.stream)<g>0<and>self.right_total-self.left_total<g>3<times>self.printer.margin<block_start>self.left_total<augadd>self.printer.scan(self.stream.pop(0))<block_end><return>self<block_end><def_stmt>finish self<block_start><if_stmt>self.lastblank<is><not><none># Well.. of course. <block_start>self.lastblank.size<augadd>self.right_total# I didn't figure this out earlier. <block_end><while_stmt>len(self.stream)<g>0<block_start>self.printer.scan(self.stream.pop(0))<block_end><block_end><block_end># Printer keeps the track of layout during printing. <class_stmt>Printer<block_start><def_stmt>__init__ self margin<block_start>self.margin=margin<line_sep>self.layout=Layout(<none> margin <false>)<line_sep>self.spaceleft=margin<line_sep>self.spaces=margin<line_sep>self.result=UnicodeBuilder()<block_end><def_stmt>scan self x<block_start><if_stmt>isinstance(x Left)<block_start>self.layout=Layout(self.layout self.spaces x.size<l>0<or>self.spaceleft<l>x.size)<block_end><elif_stmt>isinstance(x Right)<block_start><if_stmt>self.layout.parent<block_start>self.layout=self.layout.parent<block_end><block_end><elif_stmt>isinstance(x Blank)<block_start><if_stmt>x.size<l>0<or>self.spaceleft<l>x.size<or>self.layout.force_break<block_start>self.spaces=self.layout.spaces-x.indent<line_sep>self.spaceleft=self.spaces<line_sep>self.result.append(u'\n'+u' '<times>(self.margin-self.spaces))<block_end><else_stmt><block_start>self.result.append(x.text)<line_sep>self.spaceleft<augsub>len(x.text)<block_end><block_end><elif_stmt>isinstance(x Text)<block_start>self.result.append(x.text)<line_sep>self.spaceleft<augsub>len(x.text)<block_end><return>len(x)<block_end><block_end># These small objects are scanner and printer internals. <class_stmt>Layout(object)<block_start><def_stmt>__init__ self parent spaces force_break<block_start>self.parent=parent<line_sep>self.spaces=spaces<line_sep>self.force_break=force_break<block_end><block_end># These objects are mutated by the scanner, so they cannot be # reused. Users of the pretty printer should not create them themselves. <class_stmt>ScannerToken<block_start><def_stmt>__len__ self<block_start><return>0<block_end><block_end><class_stmt>Text(ScannerToken)<block_start><def_stmt>__init__ self text<block_start>self.text=text<block_end><def_stmt>__len__ self<block_start><return>len(self.text)<block_end><block_end><class_stmt>Left(ScannerToken)<block_start><def_stmt>__init__ self<block_start>self.size=0<block_end><block_end><class_stmt>Right(ScannerToken)<block_start><pass><block_end><class_stmt>Blank(ScannerToken)<block_start><def_stmt>__init__ self text indent=0<block_start>self.text=text<line_sep>self.indent=indent<line_sep>self.size=0<block_end><def_stmt>__len__ self<block_start><return>len(self.text)<block_end><block_end>@builtin(signature(Object))<def_stmt>read_file path<block_start>sobj=fs.read_file([path])<assert_stmt>isinstance(sobj String)<line_sep><return>read_string(sobj)<block_end>@builtin(signature(String))<def_stmt>read_string string<block_start>stack=[]<line_sep>ctx=ParserContext()<line_sep>state=0x00<for_stmt>ch string.string<block_start>cat=catcode[min(ord(ch) 0x7E)]<line_sep>state=parse_char(cat ch stack state ctx)<block_end>state=parse_char(catcode[32] u' ' stack state ctx)<if_stmt>state<ne>0x00<block_start><raise>unwind(LError(u"JSON decode error: truncated"))<block_end><if_stmt>len(ctx.ds)<ne>1<block_start><raise>unwind(LError(u"JSON decode error: too many objects"))<block_end><return>ctx.ds.pop()<block_end><class_stmt>ParserContext<block_start><def_stmt>__init__ self<block_start>self.ds=[]# data stack self.ss=UnicodeBuilder()# string stack self.es=UnicodeBuilder()<block_end><block_end># escape stack @always_inline<def_stmt>parse_char cat ch stack state ctx<block_start><while_stmt><true><block_start>code=states[state][cat]<line_sep>action=code<rshift>8&0xFF<line_sep>code=code&0xFF<if_stmt>action<eq>0xFF<and>code<eq>0xFF<block_start><raise>unwind(LError(u"JSON decode error: syntax"))<block_end><elif_stmt>action<ge>0x80# shift <block_start>stack.append(gotos[state])<line_sep>action<augsub>0x80<block_end><if_stmt>action<g>0<block_start>decode_json(action ch ctx)<block_end><if_stmt>code<eq>0xFF<block_start>state=stack.pop()<block_end><else_stmt><block_start>state=code<line_sep><return>state<block_end><block_end><block_end>@always_inline<def_stmt>decode_json action ch ctx<block_start><if_stmt>action<eq>0x1# push list <block_start>ctx.ds.append(space.List([]))<block_end># Push object to ds <elif_stmt>action<eq>0x2# push object <block_start>ctx.ds.append(space.Dict())<block_end><elif_stmt>action<eq>0x3# pop & append <block_start>val=ctx.ds.pop()<line_sep>top=ctx.ds[len(ctx.ds)-1]<assert_stmt>isinstance(top List)# we can trust this. top.contents.append(val)<block_end><elif_stmt>action<eq>0x4# pop pop & setitem <block_start>val=ctx.ds.pop()<line_sep>key=ctx.ds.pop()<line_sep>top=ctx.ds[len(ctx.ds)-1]<assert_stmt>isinstance(top Dict)# again.. top.data[key]=val<block_end><elif_stmt>action<eq>0x5# push null <block_start>ctx.ds.append(space.null)<block_end><elif_stmt>action<eq>0x6# push true <block_start>ctx.ds.append(space.true)<block_end><elif_stmt>action<eq>0x7# push false <block_start>ctx.ds.append(space.false)<block_end><elif_stmt>action<eq>0x8# push string <block_start>val=ctx.ss.build()<line_sep>ctx.ds.append(space.String(val))<line_sep>ctx.ss=UnicodeBuilder()<line_sep>ctx.es=UnicodeBuilder()<block_end><elif_stmt>action<eq>0x9<block_start>val=int(ctx.ss.build().encode('utf-8'))# push int ctx.ds.append(space.Integer(val))<line_sep>ctx.ss=UnicodeBuilder()<block_end><elif_stmt>action<eq>0xA<block_start>val=float(ctx.ss.build().encode('utf-8'))# push float ctx.ds.append(space.Float(val))<line_sep>ctx.ss=UnicodeBuilder()<block_end><elif_stmt>action<eq>0xB# push ch to ss <block_start>ctx.ss.append(ch)<block_end><elif_stmt>action<eq>0xC# push ch to es <block_start>ctx.es.append(ch)<block_end><elif_stmt>action<eq>0xD# push escape <block_start>ctx.ss.append(unichr(escape_characters[ch]))<block_end><elif_stmt>action<eq>0xE# push unicode point <block_start>ctx.ss.append(unichr(int(ctx.es.build().encode('utf-8') 16)))<line_sep>ctx.es=UnicodeBuilder()<block_end><else_stmt># This is very unlikely to happen. <block_start><assert_stmt><false> "JSON decoder bug"<block_end><block_end># Non-trivial escape characters. At worst you can # 'switch' or 'if/else' them into do_action -function. escape_characters={'b':8 't':9 'n':10 'f':12 'r':13}<line_sep># generated by build_tables.py program: http://github.com/cheery/json_algorithm states=[[0xffff 0x0000 0x801a 0xffff 0xffff 0x8b29 0xffff 0xffff 0x8b28 0x8b22 0xffff 0xffff 0xffff 0x810e 0xffff 0xffff 0xffff 0xffff 0xffff 0x8009 0xffff 0x8001 0xffff 0xffff 0x8005 0xffff 0x8212 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0002 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0003 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0004 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff 0x05ff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0006 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0007 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0008 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff 0x06ff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x000a 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x000b 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x000c 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x000d 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff 0x07ff ] [0xffff 0x000e 0x801a 0xffff 0xffff 0x8b29 0xffff 0xffff 0x8b28 0x8b22 0xffff 0xffff 0xffff 0x810e 0xffff 0x0011 0xffff 0xffff 0xffff 0x8009 0xffff 0x8001 0xffff 0xffff 0x8005 0xffff 0x8212 0xffff ] [0xffff 0x000f 0xffff 0xffff 0x0310 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0311 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0x0010 0x801a 0xffff 0xffff 0x8b29 0xffff 0xffff 0x8b28 0x8b22 0xffff 0xffff 0xffff 0x810e 0xffff 0xffff 0xffff 0xffff 0xffff 0x8009 0xffff 0x8001 0xffff 0xffff 0x8005 0xffff 0x8212 0xffff ] [0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff ] [0xffff 0x0012 0x801a 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0019 ] [0xffff 0x0013 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0014 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0x0014 0x801a 0xffff 0xffff 0x8b29 0xffff 0xffff 0x8b28 0x8b22 0xffff 0xffff 0xffff 0x810e 0xffff 0xffff 0xffff 0xffff 0xffff 0x8009 0xffff 0x8001 0xffff 0xffff 0x8005 0xffff 0x8212 0xffff ] [0xffff 0x0015 0xffff 0xffff 0x0416 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0419 ] [0xffff 0x0016 0x801a 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0x0017 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0018 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0x0018 0x801a 0xffff 0xffff 0x8b29 0xffff 0xffff 0x8b28 0x8b22 0xffff 0xffff 0xffff 0x810e 0xffff 0xffff 0xffff 0xffff 0xffff 0x8009 0xffff 0x8001 0xffff 0xffff 0x8005 0xffff 0x8212 0xffff ] [0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff 0x00ff ] [0x0b1a 0x0b1a 0x0021 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x001b 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a 0x0b1a ] [0xffff 0xffff 0x0b1a 0xffff 0xffff 0xffff 0xffff 0x0b1a 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0b1a 0xffff 0xffff 0x0d1a 0xffff 0x0d1a 0xffff 0x0d1a 0x0d1a 0xffff 0x0d1a 0x801c 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0c1d 0x0c1d 0xffff 0x0c1d 0x0c1d 0xffff 0xffff 0xffff 0x0c1d 0x0c1d 0x0c1d 0x0c1d 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0c1e 0x0c1e 0xffff 0x0c1e 0x0c1e 0xffff 0xffff 0xffff 0x0c1e 0x0c1e 0x0c1e 0x0c1e 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0c1f 0x0c1f 0xffff 0x0c1f 0x0c1f 0xffff 0xffff 0xffff 0x0c1f 0x0c1f 0x0c1f 0x0c1f 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0c20 0x0c20 0xffff 0x0c20 0x0c20 0xffff 0xffff 0xffff 0x0c20 0x0c20 0x0c20 0x0c20 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff 0x0eff ] [0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff 0x08ff ] [0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x0b23 0x09ff 0x0b22 0x0b22 0x09ff 0x09ff 0x0b25 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x0b25 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0b24 0x0b24 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0b24 0x0b24 0x0aff 0x0aff 0x0b25 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0b25 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff ] [0xffff 0xffff 0xffff 0x0b26 0xffff 0x0b26 0xffff 0xffff 0x0b27 0x0b27 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0b27 0x0b27 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] [0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0b27 0x0b27 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff 0x0aff ] [0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x0b23 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x0b25 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x0b25 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff 0x09ff ] [0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0x0b28 0x0b22 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff 0xffff ] ]<line_sep>gotos=[0 255 255 255 255 255 255 255 255 255 255 255 255 255 15 255 15 255 19 255 21 255 23 255 21 255 255 26 255 255 255 255 255 255 255 255 255 255 255 255 255 255]<line_sep>catcode=[0 0 0 0 0 0 0 0 0 1 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 2 0 0 0 0 0 0 0 0 3 4 5 6 7 8 9 9 9 9 9 9 9 9 9 10 0 0 0 0 0 0 11 11 11 11 12 11 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 13 14 15 0 0 0 16 17 11 11 18 19 0 0 0 0 0 20 0 21 0 0 0 22 23 24 25 0 0 0 0 0 26 0 27 0]<line_sep>
<import_from_stmt>. _compressor<import_from_stmt>. bzip2<import_from_stmt>. gzip<import_from_stmt>. lzma<import_from_stmt>. zlib<line_sep>
<import_from_stmt>fairseq.models.transformer_lm *<import_from_stmt>torch.nn CrossEntropyLoss<import_from_stmt>typing Any Dict List Optional Tuple<import_from_stmt>torch Tensor<class_stmt>TransformerLanguageModelWrapper(TransformerLanguageModel)<block_start>@classmethod<def_stmt>build_model cls args task<block_start>"""Build a new model instance."""<line_sep># make sure all arguments are present in older models base_lm_architecture(args)<if_stmt>args.decoder_layers_to_keep<block_start>args.decoder_layers=len(args.decoder_layers_to_keep.split(","))<block_end><if_stmt>getattr(args "max_target_positions" <none>)<is><none><block_start>args.max_target_positions=getattr(args "tokens_per_sample" DEFAULT_MAX_TARGET_POSITIONS)<block_end><if_stmt>args.character_embeddings<block_start>embed_tokens=CharacterTokenEmbedder(task.source_dictionary eval(args.character_filters) args.character_embedding_dim args.decoder_embed_dim args.char_embedder_highway_layers )<block_end><elif_stmt>args.adaptive_input<block_start>embed_tokens=AdaptiveInput(len(task.source_dictionary) task.source_dictionary.pad() args.decoder_input_dim args.adaptive_input_factor args.decoder_embed_dim options.eval_str_list(args.adaptive_input_cutoff type=int) args.quant_noise_pq args.quant_noise_pq_block_size )<block_end><else_stmt><block_start>embed_tokens=cls.build_embedding(args task.source_dictionary args.decoder_input_dim)<block_end><if_stmt>args.tie_adaptive_weights<block_start><assert_stmt>args.adaptive_input<assert_stmt>args.adaptive_input_factor<eq>args.adaptive_softmax_factor<assert_stmt>(args.adaptive_softmax_cutoff<eq>args.adaptive_input_cutoff) "{} != {}".format(args.adaptive_softmax_cutoff args.adaptive_input_cutoff)<assert_stmt>args.decoder_input_dim<eq>args.decoder_output_dim<block_end>decoder=TransformerDecoderWrapper(args task.target_dictionary embed_tokens no_encoder_attn=<true>)<line_sep><return>cls(decoder)<block_end><block_end><class_stmt>TransformerDecoderWrapper(TransformerDecoder)<block_start><def_stmt>__init__ self args dictionary embed_tokens no_encoder_attn=<false><block_start>super(TransformerDecoderWrapper self).__init__(args dictionary embed_tokens no_encoder_attn)<line_sep>self.use_parallel=<false><block_end><def_stmt>predict self prev_output_tokens inputs_embeds attention_mask labels encoder_out=<none> incremental_state=<none> full_context_alignment=<false> alignment_layer=<none> alignment_heads=<none><block_start>prev_output_tokens=prev_output_tokens.to("cuda:0")<line_sep>inputs_embeds=inputs_embeds.to("cuda:0")<line_sep># embed positions positions=(self.embed_positions(prev_output_tokens incremental_state=<none>)<if>self.embed_positions<is><not><none><else><none>)<line_sep>prev_output_tokens=prev_output_tokens.to("cuda:0")<line_sep>x=self.embed_scale<times>inputs_embeds<if_stmt>self.quant_noise<is><not><none><block_start>x=self.quant_noise(x)<block_end><if_stmt>self.project_in_dim<is><not><none><block_start>x=self.project_in_dim(x)<block_end><if_stmt>positions<is><not><none><block_start>x<augadd>positions.to("cuda:0")<block_end><if_stmt>self.layernorm_embedding<is><not><none><block_start>x=self.layernorm_embedding(x)<block_end>x=self.dropout_module(x)<line_sep>x=x.transpose(0 1)<line_sep>self_attn_padding_mask=prev_output_tokens.eq(self.padding_idx)<line_sep># decoder layers attn:Optional[Tensor]=<none><line_sep>#inner_states: List[Optional[Tensor]] = [x] <for_stmt>idx,layer enumerate(self.layers)<block_start><if_stmt>incremental_state<is><none><and><not>full_context_alignment<block_start>self_attn_mask=self.buffered_future_mask(x)<block_end><else_stmt><block_start>self_attn_mask=<none><block_end>x,layer_attn,_=layer(x encoder_out.encoder_out<if>encoder_out<is><not><none><else><none> encoder_out.encoder_padding_mask<if>encoder_out<is><not><none><else><none> incremental_state self_attn_mask=self_attn_mask self_attn_padding_mask=self_attn_padding_mask need_attn=bool((idx<eq>alignment_layer)) need_head_weights=bool((idx<eq>alignment_layer)) )<line_sep>#inner_states.append(x) <if_stmt>layer_attn<is><not><none><and>idx<eq>alignment_layer<block_start>attn=layer_attn.float().to(x)<block_end><block_end><if_stmt>attn<is><not><none><block_start><if_stmt>alignment_heads<is><not><none><block_start>attn=attn[:alignment_heads]<block_end># average probabilities over heads attn=attn.mean(dim=0)<block_end>x=x.to("cuda:0")<if_stmt>self.layer_norm<is><not><none><block_start>x=self.layer_norm(x)<block_end># T x B x C -> B x T x C x=x.transpose(0 1)<if_stmt>self.project_out_dim<is><not><none><block_start>x=self.project_out_dim(x)<block_end>lm_logits=self.output_layer(x)<line_sep># Shift so that tokens < n predict n shift_logits=lm_logits[<ellipsis> :-1 :].contiguous()<if_stmt>labels<is><not><none><block_start>shift_labels=labels[<ellipsis> 1:].contiguous()<line_sep># Flatten the tokens loss_fct=CrossEntropyLoss()<line_sep>loss=loss_fct(shift_logits.view(-1 shift_logits.size(-1)) shift_labels.view(-1))<line_sep># return loss, lm_logits <return>lm_logits loss<block_end><else_stmt><block_start><return>lm_logits <none><block_end><block_end><block_end>
"""Tests for the logix_driver.py file. The Logix Driver is beholden to the CIPDriver interface. Only tests which bind it to that interface should be allowed here. Tests binding to another interface such as Socket are an anti-pattern. There are quite a few methods in the LogixDriver which are difficult to read or test due to both code clarity issues and it being inconvenient. Also the vast majority of methods are private, I think that private methods should not be tested directly, but rather, their effects on public methods should be tested. pytest --cov=pycomm3 --cov-branch tests/offline/ ----------- coverage: platform linux, python 3.8.1-final-0 ----------- Name Stmts Miss Branch BrPart Cover ---------------------------------------------------------------- pycomm3/logix_driver.py 798 718 346 0 7% We're currently at 7% test coverage, I would like to increase that to >=50% and then continue to do so for the rest of the modules. """<import_from_stmt>unittest mock<import_stmt>pytest<import_from_stmt>pycomm3.cip_driver CIPDriver<import_from_stmt>pycomm3.const MICRO800_PREFIX SUCCESS<import_from_stmt>pycomm3.exceptions CommError PycommError RequestError<import_from_stmt>pycomm3.logix_driver LogixDriver encode_value<import_from_stmt>pycomm3.packets RequestPacket ResponsePacket<import_from_stmt>pycomm3.socket_ Socket<import_from_stmt>pycomm3.tag Tag<import_from_stmt>pycomm3.custom_types ModuleIdentityObject<line_sep>CONNECT_PATH='192.168.1.100/1'<line_sep>IDENTITY_CLX_V20={'vendor':'Rockwell Automation/Allen-Bradley' 'product_type':'Programmable Logic Controller' 'product_code':0 'revision':{'major':20 'minor':0} 'status':b'00' 'serial':'00000000' 'product_name':'1756-L55'}<line_sep>IDENTITY_CLX_V21={'vendor':'Rockwell Automation/Allen-Bradley' 'product_type':'Programmable Logic Controller' 'product_code':0 'revision':{'major':21 'minor':0} 'status':b'00' 'serial':'00000000' 'product_name':'1756-L62'}<line_sep>IDENTITY_CLX_V32={'vendor':'Rockwell Automation/Allen-Bradley' 'product_type':'Programmable Logic Controller' 'product_code':0 'revision':{'major':32 'minor':0} 'status':b'00' 'serial':'00000000' 'product_name':'1756-L85'}<line_sep>IDENTITY_M8000={'encap_protocol_version':1 'ip_address':'192.168.1.124' 'product_code':259 'product_name':'2080-LC50-48QWBS' 'product_type':'Programmable Logic Controller' 'revision':{'major':12 'minor':11} 'serial':'12345678' 'state':2 'status':b'4\x00' 'vendor':'Rockwell Automation/Allen-Bradley'}<def_stmt>test_open_call_init_driver_open <block_start>""" This test is to make sure that the initialize driver method is called during the `open()` method of the driver. """<with_stmt>mock.patch.object(CIPDriver 'open')<as>mock_open mock.patch.object(LogixDriver '_initialize_driver')<as>mock_init<block_start>driver=LogixDriver(CONNECT_PATH)<line_sep>driver.open()<assert_stmt>mock_open.called<assert_stmt>mock_init.called<block_end><block_end><def_stmt>test_open_call_init_driver_with <block_start>""" This test is to make sure that the initialize driver method is called during the `open()` method of the driver. """<with_stmt>mock.patch.object(CIPDriver 'open')<as>mock_open mock.patch.object(LogixDriver '_initialize_driver')<as>mock_init<block_start><with_stmt>LogixDriver(CONNECT_PATH)<block_start><ellipsis><block_end><assert_stmt>mock_open.called<assert_stmt>mock_init.called<block_end><block_end>@pytest.mark.parametrize('identity' [IDENTITY_CLX_V20 IDENTITY_CLX_V21 IDENTITY_CLX_V32])<def_stmt>test_logix_init_for_version_support_instance_ids_large_connection identity<block_start><with_stmt>mock.patch.object(LogixDriver '_list_identity')<as>mock_identity mock.patch.object(LogixDriver 'get_plc_info')<as>mock_get_info mock.patch.object(LogixDriver 'get_plc_name')<as>mock_get_name<block_start>mock_identity.return_value=identity<line_sep>mock_get_info.return_value=identity# this is the ListIdentity response # not the same as module idenity, but # has all the fields needed for the test plc=LogixDriver(CONNECT_PATH)<line_sep>plc._initialize_driver(<false> <false>)<assert_stmt>plc._micro800<is><false><assert_stmt>plc._cfg['use_instance_ids']<eq>(identity['revision']['major']<ge>21)<assert_stmt>mock_get_info.called<assert_stmt>mock_get_name.called<block_end><block_end>@pytest.mark.parametrize('identity' [IDENTITY_M8000 ])<def_stmt>test_logix_init_micro800 identity<block_start><with_stmt>mock.patch.object(LogixDriver '_list_identity')<as>mock_identity mock.patch.object(LogixDriver 'get_plc_info')<as>mock_get_info mock.patch.object(LogixDriver 'get_plc_name')<as>mock_get_name<block_start>mock_identity.return_value=identity<line_sep>mock_get_info.return_value=identity<line_sep>plc=LogixDriver(CONNECT_PATH)<line_sep>plc._initialize_driver(<false> <false>)<assert_stmt>plc._micro800<is><true><assert_stmt>plc._cfg['use_instance_ids']<is><false><assert_stmt>mock_get_info.called<assert_stmt><not>mock_get_name.called<assert_stmt><not>plc._cfg['cip_path']<block_end><block_end>@pytest.mark.parametrize('identity' [IDENTITY_CLX_V20 IDENTITY_CLX_V21 IDENTITY_CLX_V32 IDENTITY_M8000])<def_stmt>test_logix_init_calls_get_tag_list_if_init_tags identity<block_start><with_stmt>mock.patch.object(LogixDriver '_list_identity')<as>mock_identity mock.patch.object(LogixDriver 'get_plc_info')<as>mock_get_info mock.patch.object(LogixDriver 'get_plc_name') mock.patch.object(CIPDriver 'open') mock.patch.object(LogixDriver 'get_tag_list')<as>mock_tag<block_start>mock_identity.return_value=identity<line_sep>mock_get_info.return_value=identity<line_sep>driver=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<true>)<line_sep>driver._target_is_connected=<true><line_sep>driver.open()<block_end><assert_stmt>mock_tag.called<block_end><def_stmt>test_logix_context_manager_calls_open_and_close <block_start><with_stmt>mock.patch.object(LogixDriver 'open')<as>mock_open mock.patch.object(LogixDriver 'close')<as>mock_close<block_start><with_stmt>LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<block_start><pass><block_end><assert_stmt>mock_open.called<assert_stmt>mock_close.called<block_end><block_end><def_stmt>test__exit__returns_false_on_commerror <block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<assert_stmt>ld.__exit__(<none> <none> <none>)<is><true><block_end># Exit with no exception <def_stmt>test__exit__returns_true_on_no_error_and_no_exc_type <block_start><with_stmt>mock.patch.object(LogixDriver 'close')<block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<assert_stmt>ld.__exit__(<none> <none> <none>)<is><true><block_end><block_end><def_stmt>test__exit__returns_false_on_no_error_and_exc_type <block_start><with_stmt>mock.patch.object(LogixDriver 'close')<block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<assert_stmt>ld.__exit__('Some Exc Type' <none> <none>)<is><false><block_end><block_end><def_stmt>test__repr___ret_str <block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<line_sep>_repr=repr(ld)<assert_stmt>repr<assert_stmt>isinstance(_repr str)<block_end><def_stmt>test_default_logix_tags_are_empty_dict <block_start>"""Show that LogixDriver tags are an empty dict on init."""<line_sep>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<assert_stmt>ld.tags<eq>dict()<block_end><def_stmt>test_logix_connected_false_on_init_with_false_init_params <block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<assert_stmt>ld.connected<is><false><block_end><def_stmt>test_clx_get_plc_time_sends_packet <block_start><with_stmt>mock.patch.object(LogixDriver 'send')<as>mock_send mock.patch('pycomm3.cip_driver.with_forward_open')<block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<line_sep>ld.get_plc_time()<assert_stmt>mock_send.called<block_end><block_end><def_stmt>test_clx_set_plc_time_sends_packet <block_start><with_stmt>mock.patch.object(LogixDriver 'send')<as>mock_send mock.patch('pycomm3.cip_driver.with_forward_open')<block_start>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<line_sep>ld.set_plc_time()<assert_stmt>mock_send.called<block_end><block_end># TODO: all of the tag list associated tests @pytest.mark.skip(reason="""tag parsing is extremely complex, and it's \ nearly impossible to test this without also reverse-engineering it""")<def_stmt>test__get_tag_list_returns_expected_user_tags <block_start>EXPECTED_USER_TAGS=[{'tag_type':'struct' # bit 15 is a 1 'instance_id':1 'tag_name':b"\x00\x01" 'symbol_type':"" 'symbol_address':"" 'symbol_object_address':"" 'software_control':"" 'external_access':"" 'dimensions':["" "" ""]}]<line_sep>TEST_RESPONSE=ResponsePacket()<line_sep># 0 -> 4 are the 'instance', dint # 4 -> 6 is the 'tag_length', uint, used internally # 8 -> 'tag_length' is 'tag_name' # 8+tag_length -> 10+tag_length is 'symbol_type' uint # 10+tag_length -> 14+tag_length is 'symbol_address' udint # 14+tag_length -> 18+tag_length is 'symbol_object_address' udint # 18+tag_length -> 22+tag_length is 'software_control' udint # 'dim1', 'dim2' and 'dim3' are the next 12 bytes, udint TEST_RESPONSE.data=b"\x00\x00\x00\x01"+b"\x00\x01"+b"\x00\x01"+b"\x00\x00\x00\x00\x00\x10"<line_sep>TEST_RESPONSE.command="Something"<line_sep>TEST_RESPONSE.command_status=SUCCESS<line_sep>ld=LogixDriver(CONNECT_PATH init_info=<false> init_tags=<false>)<with_stmt>mock.patch.object(RequestPacket 'send')<as>mock_send mock.patch.object(CIPDriver '_forward_open') mock.patch.object(LogixDriver '_parse_instance_attribute_list')<block_start>mock_send.return_value=TEST_RESPONSE<line_sep>actual_tags=ld.get_tag_list()<block_end><assert_stmt>EXPECTED_USER_TAGS<eq>actual_tags<block_end>
<import_from_future_stmt> print_function absolute_import division unicode_literals<line_sep>_B=<false><line_sep>_A=<none><import_from_stmt>.compat no_limit_int<import_from_stmt>.anchor Anchor<if_stmt>_B<block_start><import_from_stmt>typing Text Any Dict List<block_end>__all__=['ScalarInt' 'BinaryInt' 'OctalInt' 'HexInt' 'HexCapsInt' 'DecimalInt']<class_stmt>ScalarInt(no_limit_int)<block_start><def_stmt>__new__ D *E **A<block_start>F=A.pop('width' _A)<line_sep>G=A.pop('underscore' _A)<line_sep>C=A.pop('anchor' _A)<line_sep>B=no_limit_int.__new__(D *E **A)<line_sep>B._width=F<line_sep>B._underscore=G<if_stmt>C<is><not>_A<block_start>B.yaml_set_anchor(C always_dump=<true>)<block_end><return>B<block_end><def_stmt>__iadd__ A a<block_start>B=type(A)(A+a)<line_sep>B._width=A._width<line_sep>B._underscore=A._underscore[:]<if>A._underscore<is><not>_A<else>_A<line_sep><return>B<block_end><def_stmt>__ifloordiv__ A a<block_start>B=type(A)(A<floordiv>a)<line_sep>B._width=A._width<line_sep>B._underscore=A._underscore[:]<if>A._underscore<is><not>_A<else>_A<line_sep><return>B<block_end><def_stmt>__imul__ A a<block_start>B=type(A)(A<times>a)<line_sep>B._width=A._width<line_sep>B._underscore=A._underscore[:]<if>A._underscore<is><not>_A<else>_A<line_sep><return>B<block_end><def_stmt>__ipow__ A a<block_start>B=type(A)(A<power>a)<line_sep>B._width=A._width<line_sep>B._underscore=A._underscore[:]<if>A._underscore<is><not>_A<else>_A<line_sep><return>B<block_end><def_stmt>__isub__ A a<block_start>B=type(A)(A-a)<line_sep>B._width=A._width<line_sep>B._underscore=A._underscore[:]<if>A._underscore<is><not>_A<else>_A<line_sep><return>B<block_end>@property<def_stmt>anchor self<block_start>A=self<if_stmt><not>hasattr(A Anchor.attrib)<block_start>setattr(A Anchor.attrib Anchor())<block_end><return>getattr(A Anchor.attrib)<block_end><def_stmt>yaml_anchor A any=_B<block_start><if_stmt><not>hasattr(A Anchor.attrib)<block_start><return>_A<block_end><if_stmt>any<or>A.anchor.always_dump<block_start><return>A.anchor<block_end><return>_A<block_end><def_stmt>yaml_set_anchor A value always_dump=_B<block_start>A.anchor.value=value<line_sep>A.anchor.always_dump=always_dump<block_end><block_end><class_stmt>BinaryInt(ScalarInt)<block_start><def_stmt>__new__ A value width=_A underscore=_A anchor=_A<block_start><return>ScalarInt.__new__(A value width=width underscore=underscore anchor=anchor)<block_end><block_end><class_stmt>OctalInt(ScalarInt)<block_start><def_stmt>__new__ A value width=_A underscore=_A anchor=_A<block_start><return>ScalarInt.__new__(A value width=width underscore=underscore anchor=anchor)<block_end><block_end><class_stmt>HexInt(ScalarInt)<block_start><def_stmt>__new__ A value width=_A underscore=_A anchor=_A<block_start><return>ScalarInt.__new__(A value width=width underscore=underscore anchor=anchor)<block_end><block_end><class_stmt>HexCapsInt(ScalarInt)<block_start><def_stmt>__new__ A value width=_A underscore=_A anchor=_A<block_start><return>ScalarInt.__new__(A value width=width underscore=underscore anchor=anchor)<block_end><block_end><class_stmt>DecimalInt(ScalarInt)<block_start><def_stmt>__new__ A value width=_A underscore=_A anchor=_A<block_start><return>ScalarInt.__new__(A value width=width underscore=underscore anchor=anchor)<block_end><block_end>
<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_from_stmt>fnmatch fnmatch<import_stmt>sys<import_from_stmt>unittest.suite _call_if_exists _DebugResult _isnotsuite TestSuite<import_from_stmt>unittest util<import_stmt>unittest<import_from_stmt>io StringIO<import_from_stmt>green.config default_args<import_from_stmt>green.output GreenStream<import_from_stmt>green.result ProtoTest<class_stmt>GreenTestSuite(TestSuite)<block_start>""" This version of a test suite has two important functions: 1) It brings Python 3.x-like features to Python 2.7 2) It adds Green-specific features (see customize()) """<line_sep>args=<none><def_stmt>__init__ self tests=() args=<none># You should either set GreenTestSuite.args before instantiation, or # pass args into __init__ <block_start>self._removed_tests=0<line_sep>self.allow_stdout=default_args.allow_stdout<line_sep>self.full_test_pattern="test"+default_args.test_pattern<line_sep>self.customize(args)<line_sep>super(GreenTestSuite self).__init__(tests)<block_end><def_stmt>addTest self test<block_start>""" Override default behavior with some green-specific behavior. """<if_stmt>(self.full_test_pattern# test can actually be suites and things. Only tests have # _testMethodName <and>getattr(test "_testMethodName" <false>)# Fake test cases (generated for module import failures, for example) # do not start with 'test'. We still want to see those fake cases. <and>test._testMethodName.startswith("test"))<block_start><if_stmt><not>fnmatch(test._testMethodName self.full_test_pattern)<block_start><return><block_end><block_end>super(GreenTestSuite self).addTest(test)<block_end><def_stmt>customize self args<block_start>""" Green-specific behavior customization via an args dictionary from the green.config module. If you don't pass in an args dictionary, then this class acts like TestSuite from Python 3.x. """<line_sep># Set a new args on the CLASS <if_stmt>args<block_start>self.args=args<block_end># Use the class args <if_stmt>self.args<and>getattr(self.args "allow_stdout" <none>)<block_start>self.allow_stdout=self.args.allow_stdout<block_end><if_stmt>self.args<and>getattr(self.args "test_pattern" <none>)<block_start>self.full_test_pattern="test"+self.args.test_pattern<block_end><block_end><def_stmt>_removeTestAtIndex self index<block_start>""" Python 3.x-like version of this function for Python 2.7's sake. """<line_sep>test=self._tests[index]<if_stmt>hasattr(test "countTestCases")<block_start>self._removed_tests<augadd>test.countTestCases()<block_end>self._tests[index]=<none><block_end><def_stmt>countTestCases self<block_start>""" Python 3.x-like version of this function for Python 2.7's sake. """<line_sep>cases=self._removed_tests<for_stmt>test self<block_start><if_stmt>test<block_start>cases<augadd>test.countTestCases()<block_end><block_end><return>cases<block_end><def_stmt>_handleClassSetUpPre38 self test result# pragma: nocover <block_start>previousClass=getattr(result "_previousTestClass" <none>)<line_sep>currentClass=test.__class__<if_stmt>currentClass<eq>previousClass<block_start><return><block_end><if_stmt>result._moduleSetUpFailed<block_start><return><block_end><if_stmt>getattr(currentClass "__unittest_skip__" <false>)# pragma: no cover <block_start><return><block_end><try_stmt><block_start>currentClass._classSetupFailed=<false><block_end><except_stmt>TypeError# pragma: no cover # test may actually be a function # so its class will be a builtin-type <block_start><pass><block_end>setUpClass=getattr(currentClass "setUpClass" <none>)<if_stmt>setUpClass<is><not><none><block_start>_call_if_exists(result "_setupStdout")<try_stmt><block_start>setUpClass()<block_end># Upstream Python forgets to take SkipTest into account <except_stmt>unittest.case.SkipTest<as>e<block_start>currentClass.__unittest_skip__=<true><line_sep>currentClass.__unittest_skip_why__=str(e)<block_end># -- END of fix <except_stmt>Exception<as>e# pragma: no cover <block_start><if_stmt>isinstance(result _DebugResult)<block_start><raise><block_end>currentClass._classSetupFailed=<true><line_sep>className=util.strclass(currentClass)<line_sep>errorName="setUpClass (%s)"%className<line_sep>self._addClassOrModuleLevelException(result e errorName)<block_end><finally_stmt><block_start>_call_if_exists(result "_restoreStdout")<block_end><block_end><block_end><def_stmt>_handleClassSetUpPost38 self test result# pragma: no cover -- because it's just like *Pre38 <block_start>previousClass=getattr(result "_previousTestClass" <none>)<line_sep>currentClass=test.__class__<if_stmt>currentClass<eq>previousClass<block_start><return><block_end><if_stmt>result._moduleSetUpFailed<block_start><return><block_end><if_stmt>getattr(currentClass "__unittest_skip__" <false>)<block_start><return><block_end><try_stmt><block_start>currentClass._classSetupFailed=<false><block_end><except_stmt>TypeError# test may actually be a function # so its class will be a builtin-type <block_start><pass><block_end>setUpClass=getattr(currentClass "setUpClass" <none>)<if_stmt>setUpClass<is><not><none><block_start>_call_if_exists(result "_setupStdout")<try_stmt><block_start>setUpClass()<block_end># Upstream Python forgets to take SkipTest into account <except_stmt>unittest.case.SkipTest<as>e<block_start>currentClass.__unittest_skip__=<true><line_sep>currentClass.__unittest_skip_why__=str(e)<block_end># -- END of fix <except_stmt>Exception<as>e<block_start><if_stmt>isinstance(result _DebugResult)<block_start><raise><block_end>currentClass._classSetupFailed=<true><line_sep>className=util.strclass(currentClass)<line_sep>self._createClassOrModuleLevelException(result e "setUpClass" className)<block_end><finally_stmt><block_start>_call_if_exists(result "_restoreStdout")<if_stmt>currentClass._classSetupFailed<is><true><block_start>currentClass.doClassCleanups()<if_stmt>len(currentClass.tearDown_exceptions)<g>0<block_start><for_stmt>exc currentClass.tearDown_exceptions<block_start>self._createClassOrModuleLevelException(result exc[1] "setUpClass" className info=exc)<block_end><block_end><block_end><block_end><block_end><block_end><if_stmt>sys.version_info<l>(3 8)# pragma: no cover <block_start>_handleClassSetUp=_handleClassSetUpPre38<block_end><else_stmt><block_start>_handleClassSetUp=_handleClassSetUpPost38<block_end><def_stmt>run self result<block_start>""" Emulate unittest's behavior, with Green-specific changes. """<line_sep>topLevel=<false><if_stmt>getattr(result "_testRunEntered" <false>)<is><false><block_start>result._testRunEntered=topLevel=<true><block_end><for_stmt>index,test enumerate(self)<block_start><if_stmt>result.shouldStop<block_start><break><block_end><if_stmt>_isnotsuite(test)<block_start>self._tearDownPreviousClass(test result)<line_sep>self._handleModuleFixture(test result)<line_sep>self._handleClassSetUp(test result)<line_sep>result._previousTestClass=test.__class__<if_stmt>getattr(test.__class__ "_classSetupFailed" <false>)<or>getattr(result "_moduleSetUpFailed" <false>)<block_start><continue><block_end><if_stmt><not>self.allow_stdout<block_start>captured_stdout=StringIO()<line_sep>captured_stderr=StringIO()<line_sep>saved_stdout=sys.stdout<line_sep>saved_stderr=sys.stderr<line_sep>sys.stdout=GreenStream(captured_stdout)<line_sep>sys.stderr=GreenStream(captured_stderr)<block_end><block_end>test(result)<if_stmt>_isnotsuite(test)<block_start><if_stmt><not>self.allow_stdout<block_start>sys.stdout=saved_stdout<line_sep>sys.stderr=saved_stderr<line_sep>result.recordStdout(test captured_stdout.getvalue())<line_sep>result.recordStderr(test captured_stderr.getvalue())<block_end># Since we're intercepting the stdout/stderr out here at the # suite level, we need to poke the test result and let it know # when we're ready to transmit results back up to the parent # process. I would rather just do it automatically at test # stop time, but we don't have the captured stuff at that # point. Messy...but the only other alternative I can think of # is monkey-patching loaded TestCases -- which could be from # unittest or twisted or some other custom subclass. result.finalize()<block_end>self._removeTestAtIndex(index)<block_end># Green's subprocesses have handled all actual tests and sent up the # result, but unittest expects to be able to add teardown errors to # the result still, so we'll need to watch for that ourself. errors_before=len(result.errors)<if_stmt>topLevel<block_start>self._tearDownPreviousClass(<none> result)<line_sep>self._handleModuleTearDown(result)<line_sep>result._testRunEntered=<false><block_end># Special handling for class/module tear-down errors. startTest() and # finalize() both trigger communication between the subprocess and # the runner process. addError() <if_stmt>errors_before<ne>len(result.errors)<block_start>difference=len(result.errors)-errors_before<line_sep>result.errors,new_errors=(result.errors[:-difference] result.errors[-difference:] )<for_stmt>(test err) new_errors# test = ProtoTest() <block_start>test.module=result._previousTestClass.__module__<line_sep>test.class_name=result._previousTestClass.__name__<line_sep># test.method_name = 'some method name' test.is_class_or_module_teardown_error=<true><line_sep>test.name="Error in class or module teardown"<line_sep># test.docstr_part = 'docstr part' # error_holder.description result.startTest(test)<line_sep>result.addError(test err)<line_sep>result.stopTest(test)<line_sep>result.finalize()<block_end><block_end><return>result<block_end><block_end>
<import_from_stmt>quart g jsonify<import_from_stmt>http HTTPStatus<import_from_stmt>lnbits.decorators api_validate_post_request api_check_wallet_key<import_from_stmt>lnbits.core.crud get_user<import_from_stmt>. tipjar_ext<import_from_stmt>.helpers get_charge_details<import_from_stmt>.crud create_tipjar get_tipjar create_tip get_tipjars get_tip get_tips update_tip update_tipjar delete_tip delete_tipjar <import_from_stmt>..satspay.crud create_charge<line_sep>@tipjar_ext.route("/api/v1/tipjars" methods=["POST"])@api_check_wallet_key("invoice")@api_validate_post_request(schema={"name":{"type":"string" "required":<true>} "wallet":{"type":"string" "required":<true>} "webhook":{"type":"string"} "onchain":{"type":"string"} })<async_keyword><def_stmt>api_create_tipjar <block_start>"""Create a tipjar, which holds data about how/where to post tips"""<try_stmt><block_start>tipjar=<await>create_tipjar(**g.data)<block_end><except_stmt>Exception<as>e<block_start><return>jsonify({"message":str(e)}) HTTPStatus.INTERNAL_SERVER_ERROR<block_end><return>jsonify(tipjar._asdict()) HTTPStatus.CREATED<block_end>@tipjar_ext.route("/api/v1/tips" methods=["POST"])@api_validate_post_request(schema={"name":{"type":"string"} "sats":{"type":"integer" "required":<true>} "tipjar":{"type":"integer" "required":<true>} "message":{"type":"string"} })<async_keyword><def_stmt>api_create_tip <block_start>"""Take data from tip form and return satspay charge"""<line_sep>sats=g.data["sats"]<line_sep>message=g.data.get("message" "")[:144]<if_stmt><not>message<block_start>message="No message"<block_end>tipjar_id=g.data["tipjar"]<line_sep>tipjar=<await>get_tipjar(tipjar_id)<line_sep>webhook=tipjar.webhook<line_sep>charge_details=<await>get_charge_details(tipjar.id)<line_sep>name=g.data.get("name" "")[:25]<line_sep># Ensure that description string can be split reliably name=name.replace('"' "''")<if_stmt><not>name<block_start>name="Anonymous"<block_end>description=f'"{name}": {message}'<line_sep>charge=<await>create_charge(amount=sats webhook=webhook description=description **charge_details )<line_sep><await>create_tip(id=charge.id wallet=tipjar.wallet message=message name=name sats=g.data["sats"] tipjar=g.data["tipjar"] )<line_sep><return>(jsonify({"redirect_url":f"/satspay/{charge.id}"}) HTTPStatus.OK)<block_end>@tipjar_ext.route("/api/v1/tipjars" methods=["GET"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_get_tipjars <block_start>"""Return list of all tipjars assigned to wallet with given invoice key"""<line_sep>wallet_ids=(<await>get_user(g.wallet.user)).wallet_ids<line_sep>tipjars=[]<for_stmt>wallet_id wallet_ids<block_start>new_tipjars=<await>get_tipjars(wallet_id)<line_sep>tipjars<augadd>new_tipjars<if>new_tipjars<else>[]<block_end><return>(jsonify([tipjar._asdict()<for>tipjar tipjars]<if>tipjars<else>[]) HTTPStatus.OK )<block_end>@tipjar_ext.route("/api/v1/tips" methods=["GET"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_get_tips <block_start>"""Return list of all tips assigned to wallet with given invoice key"""<line_sep>wallet_ids=(<await>get_user(g.wallet.user)).wallet_ids<line_sep>tips=[]<for_stmt>wallet_id wallet_ids<block_start>new_tips=<await>get_tips(wallet_id)<line_sep>tips<augadd>new_tips<if>new_tips<else>[]<block_end><return>(jsonify([tip._asdict()<for>tip tips]<if>tips<else>[]) HTTPStatus.OK )<block_end>@tipjar_ext.route("/api/v1/tips/<tip_id>" methods=["PUT"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_update_tip tip_id=<none><block_start>"""Update a tip with the data given in the request"""<if_stmt>tip_id<block_start>tip=<await>get_tip(tip_id)<if_stmt><not>tip<block_start><return>(jsonify({"message":"Tip does not exist."}) HTTPStatus.NOT_FOUND )<block_end><if_stmt>tip.wallet<ne>g.wallet.id<block_start><return>(jsonify({"message":"Not your tip."}) HTTPStatus.FORBIDDEN)<block_end>tip=<await>update_tip(tip_id **g.data)<block_end><else_stmt><block_start><return>(jsonify({"message":"No tip ID specified"}) HTTPStatus.BAD_REQUEST )<block_end><return>jsonify(tip._asdict()) HTTPStatus.CREATED<block_end>@tipjar_ext.route("/api/v1/tipjars/<tipjar_id>" methods=["PUT"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_update_tipjar tipjar_id=<none><block_start>"""Update a tipjar with the data given in the request"""<if_stmt>tipjar_id<block_start>tipjar=<await>get_tipjar(tipjar_id)<if_stmt><not>tipjar<block_start><return>(jsonify({"message":"TipJar does not exist."}) HTTPStatus.NOT_FOUND )<block_end><if_stmt>tipjar.wallet<ne>g.wallet.id<block_start><return>(jsonify({"message":"Not your tipjar."}) HTTPStatus.FORBIDDEN)<block_end>tipjar=<await>update_tipjar(tipjar_id **g.data)<block_end><else_stmt><block_start><return>(jsonify({"message":"No tipjar ID specified"}) HTTPStatus.BAD_REQUEST)<block_end><return>jsonify(tipjar._asdict()) HTTPStatus.CREATED<block_end>@tipjar_ext.route("/api/v1/tips/<tip_id>" methods=["DELETE"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_delete_tip tip_id<block_start>"""Delete the tip with the given tip_id"""<line_sep>tip=<await>get_tip(tip_id)<if_stmt><not>tip<block_start><return>(jsonify({"message":"No tip with this ID!"}) HTTPStatus.NOT_FOUND)<block_end><if_stmt>tip.wallet<ne>g.wallet.id<block_start><return>(jsonify({"message":"Not authorized to delete this tip!"}) HTTPStatus.FORBIDDEN )<block_end><await>delete_tip(tip_id)<line_sep><return>"" HTTPStatus.NO_CONTENT<block_end>@tipjar_ext.route("/api/v1/tipjars/<tipjar_id>" methods=["DELETE"])@api_check_wallet_key("invoice")<async_keyword><def_stmt>api_delete_tipjar tipjar_id<block_start>"""Delete the tipjar with the given tipjar_id"""<line_sep>tipjar=<await>get_tipjar(tipjar_id)<if_stmt><not>tipjar<block_start><return>(jsonify({"message":"No tipjar with this ID!"}) HTTPStatus.NOT_FOUND)<block_end><if_stmt>tipjar.wallet<ne>g.wallet.id<block_start><return>(jsonify({"message":"Not authorized to delete this tipjar!"}) HTTPStatus.FORBIDDEN )<block_end><await>delete_tipjar(tipjar_id)<line_sep><return>"" HTTPStatus.NO_CONTENT<block_end>
# Copyright 2018 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PubSubTopic provider implements GCP PubSub Topic specific transaltions Supports V1, CFT versions """<import_stmt>providers.baseprovider<as>base<line_sep>### PubSub Subscription start ### <class_stmt>PubSubSubscriptionBase(base.BaseProvider)<block_start>""" Common implementation shared accross all PubSub Subscription versions. This class should not be used outside of it's child classes. """<def_stmt>__init__ self dm_api gcloud_stage gcloud_flags=''<block_start>base.BaseProvider.__init__(self "pubsub" "subscriptions" dm_api gcloud_stage gcloud_flags)<block_end><def_stmt>get_new self<block_start><return><none><block_end><block_end># not supposed to run <class_stmt>PubSubSubscriptionV1(PubSubSubscriptionBase)<block_start>""" PubSub-topic V1 API provider"""<def_stmt>__init__ self gcloud_flags=''<block_start>PubSubSubscriptionBase.__init__(self "gcp-types/pubsub-v1:projects.subscriptions" "" gcloud_flags)<block_end><def_stmt>get_new self<block_start><return>PubSubSubscriptionV1()<block_end><block_end><class_stmt>PubSubSubscriptionCFT(PubSubSubscriptionBase)<block_start>""" PubSub-Subscription CFT API provider - DO NOT USE DIRECTLY"""<def_stmt>__init__ self gcloud_flags=''<block_start>PubSubSubscriptionBase.__init__(self "../templates/pubsub/pubsub.py" " " gcloud_flags)<block_end><def_stmt>get_new self<block_start><return>PubSubSubscriptionCFT()<block_end><def_stmt>fill_properties self<block_start>self.base_yaml['properties']['topic']=self.properties['topic']<line_sep>self.base_yaml['properties']['subscriptions']=[self.properties]<block_end><block_end>### PubSub Subscription end ### ### PubSub Topics start ### <class_stmt>PubSubTopicBase(base.BaseProvider)<block_start>""" Common implementation shared accross all PubSub Topic versions. This class should not be used outside of it's child classes. """<def_stmt>__init__ self dm_api gcloud_stage gcloud_flags=''<block_start>base.BaseProvider.__init__(self "pubsub" "topics" dm_api gcloud_stage gcloud_flags)<block_end><def_stmt>get_new self<block_start><return><none><block_end><block_end># not supposed to run <class_stmt>PubSubTopicV1(PubSubTopicBase)<block_start>""" PubSub-topic V1 API provider"""<def_stmt>__init__ self gcloud_flags=''<block_start>PubSubTopicBase.__init__(self "gcp-types/pubsub-v1:projects.topics" "" gcloud_flags)<block_end><def_stmt>get_new self<block_start><return>PubSubTopicV1()<block_end><block_end><class_stmt>PubSubTopicCFT(PubSubTopicBase)<block_start>""" PubSub-topic CFT API provider """<def_stmt>__init__ self gcloud_flags=''<block_start>PubSubTopicBase.__init__(self "../templates/pubsub/pubsub.py" " " gcloud_flags)<block_end><def_stmt>get_new self<block_start><return>PubSubTopicCFT()<block_end><def_stmt>fill_properties self<block_start>self.base_yaml['properties']['topic']=self.properties<line_sep>self.base_yaml['properties']['subscriptions']=[]<line_sep>self.get_subscriptions()<block_end><def_stmt>get_subscriptions self<block_start>""" Sub-optimal implementation """<line_sep>__subscriptions=PubSubSubscriptionCFT().get_list()<line_sep>__subs_yaml=[]<for_stmt>sub __subscriptions<block_start><if_stmt>sub.base_yaml['properties']['topic']<eq>self.base_yaml['properties']['topic']['name']<block_start>__subs_yaml.append(sub.base_yaml['properties']['subscriptions'][0])<block_end><block_end>self.base_yaml['properties']['subscriptions']=__subs_yaml<block_end><block_end>
# -*- coding: utf-8 -*- # # Enteletaor - https://github.com/cr0hn/enteletaor # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the # following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # <import_stmt>os<import_stmt>six<import_stmt>logging<import_from_stmt>.utils get_server_type<if_stmt>six.PY2<block_start><import_from_stmt>.cracker cracking<block_end><else_stmt># from .cracker3 import cracking <block_start><import_from_stmt>.cracker cracking<block_end># Reconfigure AMQP LOGGER logging.getLogger('amqp').setLevel(100)<line_sep>log=logging.getLogger()<line_sep># ---------------------------------------------------------------------- <def_stmt>cmd_brute_main config# -------------------------------------------------------------------------- # Check requisites # -------------------------------------------------------------------------- <block_start><if_stmt><not>config.target<block_start>logging.error(" <!> target option, '-t', is required")<line_sep><return><block_end><if_stmt><not>config.wordlist<block_start>logging.error(" <!> wordlist option, '-w', is required")<line_sep><return><block_end># Fix wordlist path <if_stmt><not>os.path.exists(config.wordlist)<block_start>wordlist_base=os.path.join(os.path.dirname(__file__) ".." ".." "resources" "wordlist")<line_sep># Try to find into internal wordlists internal_wordlists=[x<for>x os.listdir(os.path.abspath(wordlist_base))<if>"readme"<not><in>x.lower()]<line_sep>wordlist_choice="%s.txt"%config.wordlist<if>".txt"<not><in>config.wordlist<else>config.wordlist<line_sep># Is wordlist available? <if_stmt>wordlist_choice<not><in>internal_wordlists<block_start>log.error(" <!> Wordlist '%s' not found."%wordlist_choice)<line_sep><return><block_end># Fix wordlist path config.wordlist=os.path.abspath(os.path.join(wordlist_base wordlist_choice))<block_end># -------------------------------------------------------------------------- # Preparing scan # -------------------------------------------------------------------------- server_type,status,port=get_server_type(config)<if_stmt>status<ne>"closed"<block_start>log.error(" - Detected '%s' server with '%s'."%('unknown'<if>server_type<is><none><else>server_type status))<block_end><if_stmt>server_type.lower()<eq>"rabbitmq"<block_start>log.error(" - Set user to '%s'"%config.user)<block_end># -------------------------------------------------------------------------- # Do brute # -------------------------------------------------------------------------- <if_stmt>status<eq>"auth"<block_start>log.error(" - Starting bruteforcer using wordlist: '%s'"%config.wordlist)<line_sep>cracking(server_type port config)<block_end><elif_stmt>status<eq>"open"<block_start>log.error(" - '%s' '%s' server is open. No password cracking need"%(server_type config.target))<block_end><else_stmt><block_start>log.error(" - Not detected brokers in '%s'."%config.target)<block_end><block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['DefaultServiceAccountsArgs' 'DefaultServiceAccounts']<line_sep>@pulumi.input_type<class_stmt>DefaultServiceAccountsArgs<block_start><def_stmt>__init__ __self__ * action:pulumi.Input[str] project:pulumi.Input[str] restore_policy:Optional[pulumi.Input[str]]=<none><block_start>""" The set of arguments for constructing a DefaultServiceAccounts resource. :param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy :param pulumi.Input[str] project: The project ID where service accounts are created. :param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. """<line_sep>pulumi.set(__self__ "action" action)<line_sep>pulumi.set(__self__ "project" project)<if_stmt>restore_policy<is><not><none><block_start>pulumi.set(__self__ "restore_policy" restore_policy)<block_end><block_end>@property@pulumi.getter<def_stmt>action self<arrow>pulumi.Input[str]<block_start>""" The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy """<line_sep><return>pulumi.get(self "action")<block_end>@action.setter<def_stmt>action self value:pulumi.Input[str]<block_start>pulumi.set(self "action" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>pulumi.Input[str]<block_start>""" The project ID where service accounts are created. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:pulumi.Input[str]<block_start>pulumi.set(self "project" value)<block_end>@property@pulumi.getter(name="restorePolicy")<def_stmt>restore_policy self<arrow>Optional[pulumi.Input[str]]<block_start>""" The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. """<line_sep><return>pulumi.get(self "restore_policy")<block_end>@restore_policy.setter<def_stmt>restore_policy self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "restore_policy" value)<block_end><block_end>@pulumi.input_type<class_stmt>_DefaultServiceAccountsState<block_start><def_stmt>__init__ __self__ * action:Optional[pulumi.Input[str]]=<none> project:Optional[pulumi.Input[str]]=<none> restore_policy:Optional[pulumi.Input[str]]=<none> service_accounts:Optional[pulumi.Input[Mapping[str Any]]]=<none><block_start>""" Input properties used for looking up and filtering DefaultServiceAccounts resources. :param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy :param pulumi.Input[str] project: The project ID where service accounts are created. :param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. :param pulumi.Input[Mapping[str, Any]] service_accounts: The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy. """<if_stmt>action<is><not><none><block_start>pulumi.set(__self__ "action" action)<block_end><if_stmt>project<is><not><none><block_start>pulumi.set(__self__ "project" project)<block_end><if_stmt>restore_policy<is><not><none><block_start>pulumi.set(__self__ "restore_policy" restore_policy)<block_end><if_stmt>service_accounts<is><not><none><block_start>pulumi.set(__self__ "service_accounts" service_accounts)<block_end><block_end>@property@pulumi.getter<def_stmt>action self<arrow>Optional[pulumi.Input[str]]<block_start>""" The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy """<line_sep><return>pulumi.get(self "action")<block_end>@action.setter<def_stmt>action self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "action" value)<block_end>@property@pulumi.getter<def_stmt>project self<arrow>Optional[pulumi.Input[str]]<block_start>""" The project ID where service accounts are created. """<line_sep><return>pulumi.get(self "project")<block_end>@project.setter<def_stmt>project self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "project" value)<block_end>@property@pulumi.getter(name="restorePolicy")<def_stmt>restore_policy self<arrow>Optional[pulumi.Input[str]]<block_start>""" The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. """<line_sep><return>pulumi.get(self "restore_policy")<block_end>@restore_policy.setter<def_stmt>restore_policy self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "restore_policy" value)<block_end>@property@pulumi.getter(name="serviceAccounts")<def_stmt>service_accounts self<arrow>Optional[pulumi.Input[Mapping[str Any]]]<block_start>""" The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy. """<line_sep><return>pulumi.get(self "service_accounts")<block_end>@service_accounts.setter<def_stmt>service_accounts self value:Optional[pulumi.Input[Mapping[str Any]]]<block_start>pulumi.set(self "service_accounts" value)<block_end><block_end><class_stmt>DefaultServiceAccounts(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> action:Optional[pulumi.Input[str]]=<none> project:Optional[pulumi.Input[str]]=<none> restore_policy:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>""" Allows management of Google Cloud Platform project default service accounts. When certain service APIs are enabled, Google Cloud Platform automatically creates service accounts to help get started, but this is not recommended for production environments as per [Google's documentation](https://cloud.google.com/iam/docs/service-accounts#default). See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details. > **WARNING** Some Google Cloud products do not work if the default service accounts are deleted so it is better to `DEPRIVILEGE` as Google **CAN NOT** recover service accounts that have been deleted for more than 30 days. Also Google recommends using the `constraints/iam.automaticIamGrantsForDefaultServiceAccounts` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to disable automatic IAM Grants to default service accounts. > This resource works on a best-effort basis, as no API formally describes the default service accounts and it is for users who are unable to use constraints. If the default service accounts change their name or additional service accounts are added, this resource will need to be updated. ## Example Usage ```python import pulumi import pulumi_gcp as gcp my_project = gcp.projects.DefaultServiceAccounts("myProject", action="DELETE", project="my-project-id") ``` To enable the default service accounts on the resource destroy: ```python import pulumi import pulumi_gcp as gcp my_project = gcp.projects.DefaultServiceAccounts("myProject", action="DISABLE", project="my-project-id", restore_policy="REVERT") ``` ## Import This resource does not support import :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy :param pulumi.Input[str] project: The project ID where service accounts are created. :param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:DefaultServiceAccountsArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" Allows management of Google Cloud Platform project default service accounts. When certain service APIs are enabled, Google Cloud Platform automatically creates service accounts to help get started, but this is not recommended for production environments as per [Google's documentation](https://cloud.google.com/iam/docs/service-accounts#default). See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details. > **WARNING** Some Google Cloud products do not work if the default service accounts are deleted so it is better to `DEPRIVILEGE` as Google **CAN NOT** recover service accounts that have been deleted for more than 30 days. Also Google recommends using the `constraints/iam.automaticIamGrantsForDefaultServiceAccounts` [constraint](https://www.terraform.io/docs/providers/google/r/google_organization_policy.html) to disable automatic IAM Grants to default service accounts. > This resource works on a best-effort basis, as no API formally describes the default service accounts and it is for users who are unable to use constraints. If the default service accounts change their name or additional service accounts are added, this resource will need to be updated. ## Example Usage ```python import pulumi import pulumi_gcp as gcp my_project = gcp.projects.DefaultServiceAccounts("myProject", action="DELETE", project="my-project-id") ``` To enable the default service accounts on the resource destroy: ```python import pulumi import pulumi_gcp as gcp my_project = gcp.projects.DefaultServiceAccounts("myProject", action="DISABLE", project="my-project-id", restore_policy="REVERT") ``` ## Import This resource does not support import :param str resource_name: The name of the resource. :param DefaultServiceAccountsArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(DefaultServiceAccountsArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> action:Optional[pulumi.Input[str]]=<none> project:Optional[pulumi.Input[str]]=<none> restore_policy:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=DefaultServiceAccountsArgs.__new__(DefaultServiceAccountsArgs)<if_stmt>action<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'action'")<block_end>__props__.__dict__["action"]=action<if_stmt>project<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'project'")<block_end>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["restore_policy"]=restore_policy<line_sep>__props__.__dict__["service_accounts"]=<none><block_end>super(DefaultServiceAccounts __self__).__init__('gcp:projects/defaultServiceAccounts:DefaultServiceAccounts' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> action:Optional[pulumi.Input[str]]=<none> project:Optional[pulumi.Input[str]]=<none> restore_policy:Optional[pulumi.Input[str]]=<none> service_accounts:Optional[pulumi.Input[Mapping[str Any]]]=<none><arrow>'DefaultServiceAccounts'<block_start>""" Get an existing DefaultServiceAccounts resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] action: The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy :param pulumi.Input[str] project: The project ID where service accounts are created. :param pulumi.Input[str] restore_policy: The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. :param pulumi.Input[Mapping[str, Any]] service_accounts: The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_DefaultServiceAccountsState.__new__(_DefaultServiceAccountsState)<line_sep>__props__.__dict__["action"]=action<line_sep>__props__.__dict__["project"]=project<line_sep>__props__.__dict__["restore_policy"]=restore_policy<line_sep>__props__.__dict__["service_accounts"]=service_accounts<line_sep><return>DefaultServiceAccounts(resource_name opts=opts __props__=__props__)<block_end>@property@pulumi.getter<def_stmt>action self<arrow>pulumi.Output[str]<block_start>""" The action to be performed in the default service accounts. Valid values are: `DEPRIVILEGE`, `DELETE`, `DISABLE`. Note that `DEPRIVILEGE` action will ignore the REVERT configuration in the restore_policy """<line_sep><return>pulumi.get(self "action")<block_end>@property@pulumi.getter<def_stmt>project self<arrow>pulumi.Output[str]<block_start>""" The project ID where service accounts are created. """<line_sep><return>pulumi.get(self "project")<block_end>@property@pulumi.getter(name="restorePolicy")<def_stmt>restore_policy self<arrow>pulumi.Output[Optional[str]]<block_start>""" The action to be performed in the default service accounts on the resource destroy. Valid values are NONE, REVERT and REVERT_AND_IGNORE_FAILURE. It is applied for any action but in the DEPRIVILEGE. If set to REVERT it attempts to restore all default SAs but the DEPRIVILEGE action. If set to REVERT_AND_IGNORE_FAILURE it is the same behavior as REVERT but ignores errors returned by the API. """<line_sep><return>pulumi.get(self "restore_policy")<block_end>@property@pulumi.getter(name="serviceAccounts")<def_stmt>service_accounts self<arrow>pulumi.Output[Mapping[str Any]]<block_start>""" The Service Accounts changed by this resource. It is used for `REVERT` the `action` on the destroy. """<line_sep><return>pulumi.get(self "service_accounts")<block_end><block_end>
# coding=utf-8 <import_from_stmt>.pocofw Poco<line_sep>
<import_stmt>json<import_from_stmt>.base BaseResource<class_stmt>JobResource(BaseResource)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(JobResource self).__init__(*args **kwargs)<block_end><def_stmt>on_get self req resp<block_start>'''List all job instances'''<line_sep>resp.content_type='application/json'<line_sep>resp.body=json.dumps(self.db.jobs.list(req.context['user'] req.params self.session))<block_end><def_stmt>on_post self req resp<block_start>'''Create new or delete job instance'''<line_sep>resp.content_type='application/json'<line_sep>action=req.params.get('action')<if_stmt>action<eq>'delete'<block_start>resp.body=json.dumps(self.db.jobs.delete(req.context['user'] req.params self.session self.scheduler))<block_end><else_stmt><block_start>resp.body=json.dumps(self.db.jobs.store(req.context['user'] req.params self.session self.scheduler))<block_end><block_end><block_end><class_stmt>JobDetailResource(BaseResource)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(JobDetailResource self).__init__(*args **kwargs)<block_end><def_stmt>on_get self req resp<block_start>'''Get details of specific job instance'''<line_sep>resp.content_type='application/json'<line_sep>resp.body=json.dumps(self.db.jobs.detail(req.context['user'] req.params self.session))<block_end><block_end>
"""Sampler components."""<import_from_stmt>.undersampler Undersampler<import_from_stmt>.oversampler Oversampler<line_sep>
""" This module tests dbm_demo/rbm.yaml """<import_stmt>os<import_from_stmt>pylearn2.testing skip<import_from_stmt>pylearn2.testing no_debug_mode<import_from_stmt>pylearn2.config yaml_parse<line_sep>@no_debug_mode<def_stmt>train_yaml yaml_file<block_start>train=yaml_parse.load(yaml_file)<line_sep>train.main_loop()<block_end><def_stmt>train yaml_file_path save_path<block_start>yaml=open("{0}/rbm.yaml".format(yaml_file_path) 'r').read()<line_sep>hyper_params={'detector_layer_dim':5 'monitoring_batches':2 'train_stop':500 'max_epochs':7 'save_path':save_path}<line_sep>yaml=yaml%(hyper_params)<line_sep>train_yaml(yaml)<block_end><def_stmt>test_dbm <block_start>skip.skip_if_no_data()<line_sep>yaml_file_path=os.path.abspath(os.path.join(os.path.dirname(__file__) '../dbm_demo'))<line_sep>save_path=os.path.dirname(os.path.realpath(__file__))<line_sep>train(yaml_file_path save_path)<try_stmt><block_start>os.remove("{}/dbm.pkl".format(save_path))<block_end><except_stmt><block_start><pass><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test_dbm()<block_end>
# RUN: %{python} %s <import_from_future_stmt> print_function<import_stmt>time<import_stmt>sys<line_sep>print("Running infinite loop")<line_sep>sys.stdout.flush()# Make sure the print gets flushed so it appears in lit output. <while_stmt><true><block_start><pass><block_end>
# Copyright (c) 2021 Graphcore Ltd. All rights reserved. <import_stmt>copy<import_stmt>inspect<import_stmt>torch<line_sep># Do not import any poptorch.* here: it will break the poptorch module <import_from_stmt>. _impl<import_from_stmt>._logging logger<class_stmt>ArgsParser<block_start><class_stmt>Args<block_start><def_stmt>__init__ self<block_start>self._args=[]<line_sep>self.first_none=<none><block_end><def_stmt>clone self<block_start>clone=ArgsParser.Args()<line_sep>clone._args=copy.copy(self._args)# pylint: disable=protected-access clone.first_none=self.first_none<line_sep><return>clone<block_end><def_stmt>_forEach self data fn<block_start><if_stmt>isinstance(data (tuple list))<block_start><return>type(data)(self._forEach(d fn)<for>d data)<block_end><if_stmt>isinstance(data dict)<block_start><return>{key:self._forEach(value fn)<for>key,value data.items()}<block_end><return>fn(data)<block_end><def_stmt>_forEachMatched self data condition doOnTrue conditionMatches<block_start><if_stmt>isinstance(data (tuple list))<block_start><return>type(data)(self._forEachMatched(d condition doOnTrue conditionMatches)<for>d data)<block_end><if_stmt>isinstance(data dict)<block_start><return>{key:self._forEachMatched(value condition doOnTrue conditionMatches)<for>key,value data.items()}<block_end><if_stmt>condition(data)<block_start>conditionMatches.setTrue()<line_sep><return>doOnTrue(data)<block_end><return>data<block_end><def_stmt>forEachMatchedAtLeastOnce self condition doOnTrue=<none><block_start><class_stmt>ConditionMatches<block_start><def_stmt>__init__ self<block_start>self._matches=<false><block_end><def_stmt>__bool__ self<block_start><return>self._matches<block_end><def_stmt>setTrue self<block_start>self._matches=<true><block_end><block_end>matches=ConditionMatches()<line_sep>self._args=self._forEachMatched(self._args condition doOnTrue matches)<line_sep><return>bool(matches)<block_end><def_stmt>forEach self fn<block_start>self._args=self._forEach(self._args fn)<block_end><def_stmt>asTuple self<block_start><return>tuple(self._args)<block_end><block_end><def_stmt>__init__ self model# Combine args and kwargs: <block_start><if_stmt>isinstance(model _impl.OptimizerWrapper)<block_start>sig=inspect.signature(model.model.forward)<block_end><else_stmt><block_start>sig=inspect.signature(model.forward)<block_end>self._has_variadic_arguments=any([p.kind<in>[p.VAR_POSITIONAL p.VAR_KEYWORD]<for>p sig.parameters.values()])<line_sep>self._varnames=list(sig.parameters.keys())<line_sep>self._defaults=[p.default<for>p sig.parameters.values()]<line_sep>self._warned_not_contiguous_input=<false><block_end><def_stmt>__call__ self args kwargs fast_path=<false><block_start>"""Checks the inputs are of a supported type. Inputs must be tensors or tuples/lists of tensors. Will convert list to tuples as we can't natively support lists in the JIT. """<line_sep>in_tensors=ArgsParser.Args()<assert_stmt>self._has_variadic_arguments<or>len(args)+len(kwargs)<le>len(self._varnames) ("Too many arguments provided: expected %s (%d) "<concat>"but got %d")%(self._varnames len(self._varnames) len(args)+len(kwargs))<line_sep>first_optional=len(self._varnames)-len(self._defaults)<line_sep>none_passed=[]<line_sep># Make sure all the arguments provided are allowed. <for_stmt>k kwargs.keys()<block_start><assert_stmt>k<in>self._varnames (f"{k} is not a valid parameter."<concat>f"Allowed values are {self._varnames}")<block_end><for_stmt>i,name enumerate(self._varnames)<block_start><if_stmt>i<l>len(args)<block_start>has_list=self._errorOnDictReturnTrueIfList(args[i] name [])<line_sep># Non fast path for compilation, fast path for executing. <if_stmt><not>fast_path<block_start><if_stmt>has_list<block_start>logger.warning("Lists as inputs only have partial support, they "<concat>"can be accessed but full Python functionality is "<concat>"not enabled. Consider changing input to tuple.")<block_end>data=self._convertLists(args[i])<line_sep>in_tensors._args.append(data)<block_end><else_stmt><block_start>in_tensors._args.append(args[i])<block_end><assert_stmt>name<not><in>kwargs ("Parameter %s was passed more "<concat>"than once")%name<block_end><elif_stmt>name<in>kwargs<block_start><assert_stmt><not>none_passed ("Torch doesn't support passing tensors (%s)"<concat>" after the following parameters have defaulted to None."<concat>" %s")%(name ", ".join(none_passed))<line_sep>has_list=self._errorOnDictReturnTrueIfList(kwargs[name] name [])<line_sep># Non fast path for compilation, fast path for executing. <if_stmt><not>fast_path<block_start><if_stmt>has_list<block_start>logger.warning("Lists as inputs only have partial support, they "<concat>"can be accessed but full Python functionality is "<concat>"not enabled. Consider changing input to tuple.")<block_end>kwargs[name]=self._convertLists(kwargs[name])<block_end>in_tensors._args.append(kwargs[name])<block_end><else_stmt><block_start><assert_stmt>i<ge>first_optional ("Mandatory parameter %s "<concat>"missing")%name<line_sep>value=self._defaults[i-first_optional]<if_stmt>value<is><none><block_start><if_stmt>in_tensors.first_none<is><none><block_start>in_tensors.first_none=i<block_end>none_passed.append("%s (%d)"%(name i))<block_end><if_stmt><not>none_passed<block_start>in_tensors._args.append(value)<block_end><block_end><block_end><if_stmt>in_tensors.first_none<is><none><block_start>in_tensors.first_none=len(self._varnames)<block_end># filter-out trailing None arguments when they default to None # Extending this to any argument set to its default value has # proven problematic - the trace may be computed with fewer # inputs than intended. <for_stmt>i reversed(range(len(in_tensors._args)))<block_start><if_stmt>in_tensors._args[i]<is><not><none><block_start><break><block_end><if_stmt>self._defaults[i]<is><not><none><block_start><break><block_end>in_tensors._args.pop()<if_stmt>in_tensors.first_none<eq>i<block_start>in_tensors.first_none=<none><block_end><block_end># assert we are not passing None parameters to avoid a cryptic error <assert_stmt><none><not><in>in_tensors._args "'None' may not be passed as explicit model argument. It may "+"only be used as default initialiser"<if_stmt>in_tensors.forEachMatchedAtLeastOnce(condition=<lambda>t:isinstance(t torch.Tensor)<and><not>t.is_contiguous() doOnTrue=<lambda>t:t.contiguous())<block_start><if_stmt><not>self._warned_not_contiguous_input<block_start>logger.warning("At least one input tensor is not contiguous: "<concat>"non-contiguous tensors will be converted.")<line_sep>self._warned_not_contiguous_input=<true><block_end><block_end><return>in_tensors<block_end><def_stmt>_convertLists self input<block_start><if_stmt>isinstance(input (tuple list))<block_start>new_tuple=[]<for_stmt>_,data enumerate(input)<block_start>new_tuple.append(self._convertLists(data))<block_end><return>tuple(new_tuple)<block_end><return>input<block_end><def_stmt>_errorOnDictReturnTrueIfList self data arg_name stack_list<block_start>has_list=<false><if_stmt>isinstance(data (tuple list))<block_start><for_stmt>idx,d enumerate(data)<block_start>stack_list.append(idx)<line_sep>has_list<augand>self._errorOnDictReturnTrueIfList(d arg_name stack_list)<line_sep>stack_list.pop()<block_end><if_stmt>isinstance(data list)<block_start>has_list=<true><block_end><block_end><if_stmt>isinstance(data (dict))<block_start>stack_list=[str(s)<for>s stack_list]<line_sep>end_msg=arg_name<if_stmt>stack_list<block_start>end_msg<augadd>"["+"][".join(stack_list)+"]"<block_end>end_msg<augadd>" = "+str(data)<block_end><if_stmt>isinstance(data dict)<block_start><raise>TypeError("Dictionaries are not supported as input arguments,"<concat>" including when nested in tuples.\nReceived dict "+end_msg)<block_end><return>has_list<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep># Remove duplicates from the electron list electronsNoDuplicates=cms.EDFilter("DuplicatedElectronCleaner" ## reco electron input source electronSource=cms.InputTag("gsfElectrons") )<line_sep>
# Time: O(n) # Space: O(1) <import_stmt>operator<import_from_stmt>functools reduce<class_stmt>Solution(object)<block_start>""" :type nums: List[int] :rtype: int """<def_stmt>singleNumber self A<block_start><return>reduce(operator.xor A)<block_end><block_end>
# Generated by Django 2.2.10 on 2020-04-29 14:01 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('api' '0024_auto_20200423_1601') ]<line_sep>operations=[migrations.AlterField(model_name='imagereport' name='status' field=models.CharField(choices=[('pending_review' 'pending_review') ('mature_filtered' 'mature_filtered') ('deindexed' 'deindexed') ('no_action' 'no_action')] default='pending_review' max_length=20) ) migrations.DeleteModel(name='ImageTags' ) ]<block_end>
<import_stmt>pytest<import_from_stmt>streaming_form_data.validators MaxSizeValidator ValidationError<def_stmt>test_max_size_validator_empty_input <block_start>validator=MaxSizeValidator(0)<with_stmt>pytest.raises(ValidationError)<block_start>validator('x')<block_end><block_end><def_stmt>test_max_size_validator_normal <block_start>validator=MaxSizeValidator(5)<for_stmt>char 'hello'<block_start>validator(char)<block_end><with_stmt>pytest.raises(ValidationError)<block_start>validator('x')<block_end><block_end>
<import_stmt>sys<import_stmt>os<import_stmt>numpy.random<import_from_stmt>amuse.test amusetest<import_from_stmt>amuse.units units nbody_system<import_from_stmt>amuse.ext.boss_bodenheimer bb79_cloud<line_sep>numpy.random.seed(1234567)<class_stmt>BossBodenheimerTests(amusetest.TestCase)<block_start><def_stmt>test1 self<block_start>numpy.random.seed(1234)<line_sep>mc=bb79_cloud(targetN=1000).result<line_sep>self.assertEqual(len(mc) 1000)<line_sep>ek=mc.kinetic_energy()<line_sep>ep=mc.potential_energy(G=nbody_system.G)<line_sep>eth=mc.thermal_energy()<line_sep>self.assertAlmostEqual(eth/ep -0.25 2)<line_sep>self.assertAlmostEqual(ek/ep -0.2 2)<block_end><def_stmt>test2 self<block_start>numpy.random.seed(1234)<line_sep>convert=nbody_system.nbody_to_si(1.|units.MSun 3.2e16|units.cm)<line_sep>mc=bb79_cloud(targetN=1000 convert_nbody=convert).result<line_sep>self.assertEqual(len(mc) 1000)<line_sep>ek=mc.kinetic_energy()<line_sep>ep=mc.potential_energy()<line_sep>eth=mc.thermal_energy()<line_sep>self.assertAlmostEqual(eth/ep -0.25 2)<line_sep>self.assertAlmostEqual(ek/ep -0.2 2)<block_end><block_end>
<import_from_stmt>raytkTools RaytkTools<line_sep># noinspection PyUnreachableCode <if_stmt><false># noinspection PyUnresolvedReferences <block_start><import_from_stmt>_stubs *<import_from_stmt>..ropEditor.ropEditor ROPEditor<line_sep>iop.ropEditor=ROPEditor(COMP())<block_end><class_stmt>CreateRopDialog<block_start><def_stmt>__init__ self ownerComp:'COMP'<block_start>self.ownerComp=ownerComp<block_end><def_stmt>_setMessageText self message<block_start>dat=self.ownerComp.op('set_messageText')<line_sep>dat.clear()<line_sep>dat.write(message<or>'')<block_end><def_stmt>Open self _=<none><block_start>self.ownerComp.op('window').par.winopen.pulse()<line_sep>self.ownerComp.op('typeName_field').par.Value0=''<line_sep>self._setMessageText('')<block_end><def_stmt>Close self _=<none><block_start>self.ownerComp.op('window').par.winclose.pulse()<line_sep>self._setMessageText('')<block_end><def_stmt>Create self<block_start>self._setMessageText('')<line_sep>category=self.ownerComp.op('category_dropmenu').par.Value0.eval()<line_sep>name=self.ownerComp.op('typeName_field').par.Value0.eval()<try_stmt><block_start>rop=RaytkTools().createNewRopType(typeName=name category=category)<block_end><except_stmt>Exception<as>err<block_start>self._setMessageText(str(err))<line_sep><return><block_end>iop.ropEditor.LoadROP(rop)<line_sep>self.Close()<block_end><block_end>
# TODO(TF-747): Reenable. # """Checks that tutorial notebooks behave as expected. # """ # # import unittest # import os # import shutil # import tempfile # # from flaky import flaky # # from notebook_tester import NotebookTestRunner # # # class TutorialNotebookTests(unittest.TestCase): # @classmethod # def setUpClass(cls): # cls.tmp_dir = tempfile.mkdtemp() # git_url = 'https://github.com/tensorflow/swift.git' # os.system('git clone %s %s -b jupyter-test-branch' % (git_url, cls.tmp_dir)) # # @classmethod # def tearDownClass(cls): # shutil.rmtree(cls.tmp_dir) # # @flaky(max_runs=5, min_passes=1) # def test_iris(self): # notebook = os.path.join(self.tmp_dir, 'docs', 'site', 'tutorials', # 'model_training_walkthrough.ipynb') # runner = NotebookTestRunner(notebook, verbose=False) # runner.run() # self.assertEqual([], runner.unexpected_errors) # all_stdout = '\n\n'.join(runner.stdout) # self.assertIn('Epoch 100:', all_stdout) # self.assertIn('Example 2 prediction:', all_stdout)
<import_stmt>jsonlines<import_stmt>sys<import_stmt>csv<line_sep>expl={}<with_stmt>open(sys.argv[2] 'rb')<as>f<block_start><for_stmt>item jsonlines.Reader(f)<block_start>expl[item['id']]=item['explanation']['open-ended']<block_end><block_end><with_stmt>open(sys.argv[1] 'rb')<as>f<block_start><with_stmt>open(sys.argv[3] 'w')<as>wf<block_start>wfw=csv.writer(wf delimiter=',' quotechar='"')<line_sep>wfw.writerow(['id' 'question' 'choice_0' 'choice_1' 'choice_2' 'choice_3' 'choice_4' 'label' 'human_expl_open-ended'])<for_stmt>item jsonlines.Reader(f)<block_start>label=-1<if_stmt>(item['answerKey']<eq>'A')<block_start>label=0<block_end><elif_stmt>(item['answerKey']<eq>'B')<block_start>label=1<block_end><elif_stmt>(item['answerKey']<eq>'C')<block_start>label=2<block_end><elif_stmt>(item['answerKey']<eq>'D')<block_start>label=3<block_end><else_stmt><block_start>label=4<block_end>wfw.writerow([item['id'] item['question']['stem'] item['question']['choices'][0]['text'] item['question']['choices'][1]['text'] item['question']['choices'][2]['text'] item['question']['choices'][3]['text'] item['question']['choices'][4]['text'] label expl[item['id']]])<block_end><block_end><block_end>
<import_from_stmt>stream CStream<import_from_stmt>tokenizer L2<import_from_stmt>data Expr Literal Position<line_sep>#import space #table = { # u'(': u'lp', u')': u'rp', # u'[': u'lb', u']': u'rb', # u'{': u'lc', u'}': u'rc', # u'and': u'and', u'or': u'or', u'not': u'not', # u'=': u'let', u':=': u'set', # u'<': u'chain', # u'>': u'chain', # u'<=': u'chain', # u'>=': u'chain', # u'==': u'chain', # u'!=': u'chain', # u'^': u'op', u'&': u'op', u'<<': u'op', # u'>>': u'op', u'!': u'op', u'*': u'op', # u'/': u'op', u'%': u'op', u'+': u'op', # u'-': u'op', u'|': u'op', u'++': u'op', # u':': u'symbol', # u'.': u'dot'} #binops = { # u'|': 10, # u'^': 10, # u'&': 20, # u'<<': 30, u'>>': 40, # u'++': 40, u'+': 40, u'-': 40, # u'*': 50, u'/': 50, u'%': 50, #} #right_binding = [] #prefixes = { # u'~': 90, # u'-': 90, # u'+': 90, #} #postfixes = { # u'!': 100, #} # #def read(source): # exps = [] # ts = L2(CStream(source), table) # while ts.filled: # if ts.position.col != 0: # raise space.Error(u"%s: layout error" % ts.first.start.repr()) # exps.append(toplevel(ts, 0)) # return exps # #def toplevel(ts, col): # head = expression(ts) # if head.dcf is not None and ts.filled: # if head.stop.lno == ts.position.lno: # head.dcf.capture = [toplevel(ts, col)] # elif ts.position.col > col: # head.dcf.capture = exps = [] # scol = ts.position.col # while ts.filled and ts.position.col == scol: # exp = toplevel(ts, scol) # exps.append(exp) # while ts.filled and ts.position.lno == exp.stop.lno and ts.position.col > scol: # exps.append(toplevel(ts, scol)) # return head # #def expressions(ts): # exps = [] # while ts.filled: # if match_some(ts.first, [u'rp', u'rb', u'rc']): # break # exps.append(expression(ts)) # return exps # #def expression(ts): # left = expression_and(ts) # if match(ts.first, u'or'): # op = ts.advance() # op.name = u'symbol' # right = expression(ts) # return Expr(left.start, right.stop, u'form', [op, left, right]) # return left # #def expression_and(ts): # left = expression_chain(ts) # if match(ts.first, u'and'): # op = ts.advance() # op.name = u'symbol' # right = expression_and(ts) # return Expr(left.start, right.stop, u'form', [op, left, right]) # return left # #def expression_chain(ts): # left = expression_bare(ts, 0) # if match(ts.first, u'chain'): # exps = [left] # while match(ts.first, u'chain'): # op = ts.advance() # op.name = u'symbol' # exps.append(op) # exps.append(expression_bare(ts, 0)) # left = Expr(exps[0].start, exps[len(exps)-1].stop, u'chain', exps) # return left # #def expression_bare(ts, rbp): # if on_prefix(ts): # op = ts.advance() # exp = expression_bare(ts, prefixes[op.value]) # op.name = u'symbol' # op.value = op.value+u'expr' # left = Expr(op.start, exp.stop, u'form', [op, exp]) # else: # left = terminal(ts) # while ts.filled: # if match(ts.first, u'dot'): # dot = ts.advance() # symbol = ts.advance() # if not match(symbol, u'symbol'): # raise space.Error(u"%s: expected symbol" % symbol.start.repr()) # left = Expr(left.start, symbol.stop, u'attr', [left, symbol]) # elif match(ts.first, u'lb') and left.stop.eq(ts.first.start): # lb = ts.advance() # exps = expressions(ts) # if not match(ts.first, u'rb'): # raise space.Error(u"%s: [] truncates at %s" % (lb.start.repr(), ts.position.repr())) # rb = ts.advance() # left = Expr(left.start, rb.stop, u'index', [left] + exps) # elif match_some(ts.first, [u'let', u'set']): # let = ts.advance() # exp = expression(ts) # left = Expr(left.start, exp.stop, let.name, [left, exp]) # elif match(ts.first, u'op') and match(ts.second, u'let') and ts.first.value in binops: # aug = ts.advance() # aug.name = u'symbol' # let = ts.advance() # exp = expression(ts) # left = Expr(left.start, exp.stop, u'aug', [aug, left, exp]) # else: # break # while ts.filled: # if on_binop(left, ts) and rbp < binops.get(ts.first.value, 0): # op = ts.advance() # op.name = u'symbol' # lbp = binops.get(op.value, 0) # right = expression_bare(ts, lbp - (ts.first.value in right_binding)) # left = Expr(left.start, right.stop, u'form', [op, left, right]) # elif on_postfix(left, ts) and rbp < postfixes.get(ts.first.value, 0): # op = ts.advance() # op.name = u'symbol' # lbp = postfixes.get(op.value, 0) # op.value = u'expr'+op.value # left = Expr(left.start, op.stop, u'form', [op, left]) # else: # break # return left # #def terminal(ts): # if match_some(ts.first, [u'symbol', u'string', u'int', u'hex', u'float']): # return ts.advance() # elif match(ts.first, u'lp'): # lp = ts.advance() # exps = expressions(ts) # if not match(ts.first, u'rp'): # raise space.Error(u"%s: form truncates at %s" % (lp.start.repr(), ts.position.repr())) # rp = ts.advance() # exp = Expr(lp.start, rp.stop, u'form', exps) # exp.dcf = exp # return exp # elif match(ts.first, u'lb'): # lb = ts.advance() # exps = expressions(ts) # if not match(ts.first, u'rb'): # raise space.Error(u"%s: list truncates at %s" % (lb.start.repr(), ts.position.repr())) # rb = ts.advance() # exp = Expr(lb.start, rb.stop, u'list', exps) # exp.dcf = exp # return exp # elif match(ts.first, u'lc'): # lc = ts.advance() # if match(ts.second, u'rc'): # exp = ts.advance() # exp.name = u'symbol' # else: # exp = expression(ts) # rc = ts.advance() # return exp # elif match(ts.first, u'not'): # op = ts.advance() # op.name = u'symbol' # exp = expression_chain(ts) # return Expr(op.start, exp.stop, u'form', [op, exp]) # if ts.filled: # raise space.Error(u"%s: expected term, got %s" % (ts.position.repr(), ts.first.value)) # raise space.Error(u"%s: expected term, got eof" % ts.position.repr()) # #def match_some(t, names): # return t is not None and t.name in names # #def match(t, name): # return t is not None and t.name == name # #def on_prefix(ts): # if match(ts.first, u'op') and ts.second is not None: # return ts.first.stop.eq(ts.second.start) # return False # #def on_binop(left, ts): # if match(ts.first, u'op') and ts.second is not None: # l = left.stop.eq(ts.first.start) # r = ts.first.stop.eq(ts.second.start) # return l == r # return False # #def on_postfix(left, ts): # if match(ts.first, u'op'): # l = left.stop.eq(ts.first.start) # r = ts.second is not None and ts.first.stop.eq(ts.second.start) # return l and not r # return False
"""In this module are stored generated bindings for standard schema like WSDL or SOAP."""<line_sep>
<import_stmt>unittest<import_from_stmt>descriptastorus MolFileIndex<import_stmt>os shutil<import_stmt>logging<import_stmt>datahook<line_sep>TEST_DIR="test1"<class_stmt>TestCase(unittest.TestCase)<block_start><def_stmt>setUp self<block_start><if_stmt>os.path.exists(TEST_DIR)<block_start>shutil.rmtree(TEST_DIR ignore_errors=<true>)<block_end>index=self.index=MolFileIndex.MakeSmilesIndex(os.path.join(datahook.datadir "../data/test1.smi") TEST_DIR hasHeader=<true> smilesColumn="smiles" nameColumn="name")<block_end><def_stmt>tearDown self<block_start><if_stmt>os.path.exists(TEST_DIR)<block_start>shutil.rmtree(TEST_DIR ignore_errors=<true>)<block_end><block_end><def_stmt>testIndexing self<block_start>logging.info("Running index test")<line_sep>self.assertEqual(self.index.N 14)<line_sep>self.assertEqual(self.index.getMol(12) 'c1ccccc1CCCCCCCCCCCC')<line_sep>self.assertEqual(self.index.getName(12) '13')<line_sep>self.assertEqual(self.index.getRDMol(13) <none>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>logging.getLogger().setLevel(logging.INFO)<line_sep>unittest.main()<block_end>
# Remove a encoding from the models file # Import required modules <import_stmt>sys<import_stmt>os<import_stmt>json<import_stmt>builtins<import_from_stmt>i18n _<line_sep># Get the absolute path and the username path=os.path.dirname(os.path.realpath(__file__))+"/.."<line_sep>user=builtins.howdy_user<line_sep># Check if enough arguments have been passed <if_stmt><not>builtins.howdy_args.arguments<block_start>print(_("Please add the ID of the model you want to remove as an argument"))<line_sep>print(_("For example:"))<line_sep>print("\n\thowdy remove 0\n")<line_sep>print(_("You can find the IDs by running:"))<line_sep>print("\n\thowdy list\n")<line_sep>sys.exit(1)<block_end># Check if the models file has been created yet <if_stmt><not>os.path.exists(path+"/models")<block_start>print(_("Face models have not been initialized yet, please run:"))<line_sep>print("\n\thowdy add\n")<line_sep>sys.exit(1)<block_end># Path to the models file enc_file=path+"/models/"+user+".dat"<line_sep># Try to load the models file and abort if the user does not have it yet <try_stmt><block_start>encodings=json.load(open(enc_file))<block_end><except_stmt>FileNotFoundError<block_start>print(_("No face model known for the user {}, please run:").format(user))<line_sep>print("\n\thowdy add\n")<line_sep>sys.exit(1)<block_end># Tracks if a encoding with that id has been found found=<false><line_sep># Get the ID from the cli arguments id=builtins.howdy_args.arguments[0]<line_sep># Loop though all encodings and check if they match the argument <for_stmt>enc encodings<block_start><if_stmt>str(enc["id"])<eq>id# Only ask the user if there's no -y flag <block_start><if_stmt><not>builtins.howdy_args.y# Double check with the user <block_start>print(_('This will remove the model called "{label}" for {user}').format(label=enc["label"] user=user))<line_sep>ans=input(_("Do you want to continue [y/N]: "))<line_sep># Abort if the answer isn't yes <if_stmt>(ans.lower()<ne>"y")<block_start>print(_('\nInterpreting as a "NO", aborting'))<line_sep>sys.exit(1)<block_end># Add a padding empty line print()<block_end># Mark as found and print an enter found=<true><line_sep><break><block_end><block_end># Abort if no matching id was found <if_stmt><not>found<block_start>print(_("No model with ID {id} exists for {user}").format(id=id user=user))<line_sep>sys.exit(1)<block_end># Remove the entire file if this encoding is the only one <if_stmt>len(encodings)<eq>1<block_start>os.remove(path+"/models/"+user+".dat")<line_sep>print(_("Removed last model, howdy disabled for user"))<block_end><else_stmt># A place holder to contain the encodings that will remain <block_start>new_encodings=[]<line_sep># Loop though all encodings and only add those that don't need to be removed <for_stmt>enc encodings<block_start><if_stmt>str(enc["id"])<ne>id<block_start>new_encodings.append(enc)<block_end><block_end># Save this new set to disk <with_stmt>open(enc_file "w")<as>datafile<block_start>json.dump(new_encodings datafile)<block_end>print(_("Removed model {}").format(id))<block_end>
<import_stmt>re<import_from_stmt>data_gen.tts.data_gen_utils PUNCS<import_from_stmt>g2p_en G2p<import_stmt>unicodedata<import_from_stmt>g2p_en.expand normalize_numbers<import_from_stmt>nltk pos_tag<import_from_stmt>nltk.tokenize TweetTokenizer<import_from_stmt>data_gen.tts.txt_processors.base_text_processor BaseTxtProcessor<class_stmt>EnG2p(G2p)<block_start>word_tokenize=TweetTokenizer().tokenize<def_stmt>__call__ self text# preprocessing <block_start>words=EnG2p.word_tokenize(text)<line_sep>tokens=pos_tag(words)# tuples of (word, tag) # steps prons=[]<for_stmt>word,pos tokens<block_start><if_stmt>re.search("[a-z]" word)<is><none><block_start>pron=[word]<block_end><elif_stmt>word<in>self.homograph2features# Check homograph <block_start>pron1,pron2,pos1=self.homograph2features[word]<if_stmt>pos.startswith(pos1)<block_start>pron=pron1<block_end><else_stmt><block_start>pron=pron2<block_end><block_end><elif_stmt>word<in>self.cmu# lookup CMU dict <block_start>pron=self.cmu[word][0]<block_end><else_stmt># predict for oov <block_start>pron=self.predict(word)<block_end>prons.extend(pron)<line_sep>prons.extend([" "])<block_end><return>prons[:-1]<block_end><block_end><class_stmt>TxtProcessor(BaseTxtProcessor)<block_start>g2p=EnG2p()<line_sep>@staticmethod<def_stmt>preprocess_text text<block_start>text=normalize_numbers(text)<line_sep>text=''.join(char<for>char unicodedata.normalize('NFD' text)<if>unicodedata.category(char)<ne>'Mn')<line_sep># Strip accents text=text.lower()<line_sep>text=re.sub("[\'\"()]+" "" text)<line_sep>text=re.sub("[-]+" " " text)<line_sep>text=re.sub(f"[^ a-z{PUNCS}]" "" text)<line_sep>text=re.sub(f" ?([{PUNCS}]) ?" r"\1" text)# !! -> ! text=re.sub(f"([{PUNCS}])+" r"\1" text)# !! -> ! text=text.replace("i.e." "that is")<line_sep>text=text.replace("i.e." "that is")<line_sep>text=text.replace("etc." "etc")<line_sep>text=re.sub(f"([{PUNCS}])" r" \1 " text)<line_sep>text=re.sub(rf"\s+" r" " text)<line_sep><return>text<block_end>@classmethod<def_stmt>process cls txt pre_align_args<block_start>txt=cls.preprocess_text(txt).strip()<line_sep>phs=cls.g2p(txt)<line_sep>phs_=[]<line_sep>n_word_sep=0<for_stmt>p phs<block_start><if_stmt>p.strip()<eq>''<block_start>phs_<augadd>['|']<line_sep>n_word_sep<augadd>1<block_end><else_stmt><block_start>phs_<augadd>p.split(" ")<block_end><block_end>phs=phs_<assert_stmt>n_word_sep+1<eq>len(txt.split(" ")) (phs f"\"{txt}\"")<line_sep><return>phs txt<block_end><block_end>
<import_from_stmt>sp_api.base AccessTokenClient<import_from_stmt>sp_api.base Credentials CredentialProvider<import_from_stmt>sp_api.base AuthorizationError<import_from_stmt>sp_api.base.credential_provider FromCodeCredentialProvider<line_sep>refresh_token='<refresh_token>'<line_sep>lwa_app_id='<lwa_app_id>'<line_sep>lwa_client_secret='<lwa_client_secret>'<line_sep>aws_secret_key='<aws_secret_access_key>'<line_sep>aws_access_key='<aws_access_key_id>'<line_sep>role_arn='<role_arn>'<def_stmt>test_auth_exception <block_start>e=AuthorizationError(200 'Foo' 999)<assert_stmt>e.status_code<eq>999<assert_stmt>e.error_code<eq>200<assert_stmt>e.message<eq>'Foo'<block_end><def_stmt>test_credentials <block_start>x=CredentialProvider()<assert_stmt>x.credentials.lwa_app_id<is><not><none><assert_stmt>x.credentials.lwa_client_secret<is><not><none><assert_stmt>x.credentials.aws_secret_key<is><not><none><assert_stmt>x.credentials.aws_access_key<is><not><none><block_end><def_stmt>test_auth_client <block_start>client=AccessTokenClient(credentials=CredentialProvider(credentials=dict(refresh_token=refresh_token lwa_app_id=lwa_app_id lwa_client_secret=lwa_client_secret aws_secret_key=aws_secret_key aws_access_key=aws_access_key role_arn=role_arn )).credentials)<line_sep>x=client._auth_code_request_body('foo')<assert_stmt>x.get('grant_type')<eq>'authorization_code'<try_stmt><block_start>client.authorize_auth_code('foo')<block_end><except_stmt>AuthorizationError<as>e<block_start><assert_stmt>isinstance(e AuthorizationError)<block_end><try_stmt><block_start>client._request('https://jsonplaceholder.typicode.com/posts/1' {} {})<block_end><except_stmt>AuthorizationError<as>e<block_start><assert_stmt>isinstance(e AuthorizationError)<block_end><block_end>
apiAttachAvailable=u'API Kullanilabilir'<line_sep>apiAttachNotAvailable=u'Kullanilamiyor'<line_sep>apiAttachPendingAuthorization=u'Yetkilendirme Bekliyor'<line_sep>apiAttachRefused=u'Reddedildi'<line_sep>apiAttachSuccess=u'Basarili oldu'<line_sep>apiAttachUnknown=u'Bilinmiyor'<line_sep>budDeletedFriend=u'Arkadas Listesinden Silindi'<line_sep>budFriend=u'Arkadas'<line_sep>budNeverBeenFriend=u'Arkadas Listesinde Hi\xe7 Olmadi'<line_sep>budPendingAuthorization=u'Yetkilendirme Bekliyor'<line_sep>budUnknown=u'Bilinmiyor'<line_sep>cfrBlockedByRecipient=u'\xc7agri alici tarafindan engellendi'<line_sep>cfrMiscError=u'Diger Hata'<line_sep>cfrNoCommonCodec=u'Genel codec yok'<line_sep>cfrNoProxyFound=u'Proxy bulunamadi'<line_sep>cfrNotAuthorizedByRecipient=u'Ge\xe7erli kullanici alici tarafindan yetkilendirilmemis'<line_sep>cfrRecipientNotFriend=u'Alici bir arkadas degil'<line_sep>cfrRemoteDeviceError=u'Uzak ses aygitinda problem var'<line_sep>cfrSessionTerminated=u'Oturum sonlandirildi'<line_sep>cfrSoundIOError=u'Ses G/\xc7 hatasi'<line_sep>cfrSoundRecordingError=u'Ses kayit hatasi'<line_sep>cfrUnknown=u'Bilinmiyor'<line_sep>cfrUserDoesNotExist=u'Kullanici/telefon numarasi mevcut degil'<line_sep>cfrUserIsOffline=u'\xc7evrim Disi'<line_sep>chsAllCalls=u'Eski Diyalog'<line_sep>chsDialog=u'Diyalog'<line_sep>chsIncomingCalls=u'\xc7oklu Sohbet Kabul\xfc Gerekli'<line_sep>chsLegacyDialog=u'Eski Diyalog'<line_sep>chsMissedCalls=u'Diyalog'<line_sep>chsMultiNeedAccept=u'\xc7oklu Sohbet Kabul\xfc Gerekli'<line_sep>chsMultiSubscribed=u'\xc7oklu Abonelik'<line_sep>chsOutgoingCalls=u'\xc7oklu Abonelik'<line_sep>chsUnknown=u'Bilinmiyor'<line_sep>chsUnsubscribed=u'Aboneligi Silindi'<line_sep>clsBusy=u'Mesgul'<line_sep>clsCancelled=u'Iptal Edildi'<line_sep>clsEarlyMedia=u'Early Media y\xfcr\xfct\xfcl\xfcyor'<line_sep>clsFailed=u'\xdczg\xfcn\xfcz, arama basarisiz!'<line_sep>clsFinished=u'Bitirildi'<line_sep>clsInProgress=u'Arama Yapiliyor'<line_sep>clsLocalHold=u'Yerel Beklemede'<line_sep>clsMissed=u'Cevapsiz Arama'<line_sep>clsOnHold=u'Beklemede'<line_sep>clsRefused=u'Reddedildi'<line_sep>clsRemoteHold=u'Uzak Beklemede'<line_sep>clsRinging=u'ariyor'<line_sep>clsRouting=u'Y\xf6nlendirme'<line_sep>clsTransferred=u'Bilinmiyor'<line_sep>clsTransferring=u'Bilinmiyor'<line_sep>clsUnknown=u'Bilinmiyor'<line_sep>clsUnplaced=u'Asla baglanmadi'<line_sep>clsVoicemailBufferingGreeting=u'Selamlama Ara Bellege Aliniyor'<line_sep>clsVoicemailCancelled=u'Sesli Posta Iptal Edildi'<line_sep>clsVoicemailFailed=u'Sesli Mesaj Basarisiz'<line_sep>clsVoicemailPlayingGreeting=u'Selamlama Y\xfcr\xfct\xfcl\xfcyor'<line_sep>clsVoicemailRecording=u'Sesli Mesaj Kaydediliyor'<line_sep>clsVoicemailSent=u'Sesli Posta G\xf6nderildi'<line_sep>clsVoicemailUploading=u'Sesli Posta Karsiya Y\xfckleniyor'<line_sep>cltIncomingP2P=u'Gelen Esler Arasi Telefon \xc7agrisi'<line_sep>cltIncomingPSTN=u'Gelen Telefon \xc7agrisi'<line_sep>cltOutgoingP2P=u'Giden Esler Arasi Telefon \xc7agrisi'<line_sep>cltOutgoingPSTN=u'Giden Telefon \xc7agrisi'<line_sep>cltUnknown=u'Bilinmiyor'<line_sep>cmeAddedMembers=u'Eklenen \xdcyeler'<line_sep>cmeCreatedChatWith=u'Sohbet Olusturuldu:'<line_sep>cmeEmoted=u'Bilinmiyor'<line_sep>cmeLeft=u'Birakilan'<line_sep>cmeSaid=u'Ifade'<line_sep>cmeSawMembers=u'G\xf6r\xfclen \xdcyeler'<line_sep>cmeSetTopic=u'Konu Belirleme'<line_sep>cmeUnknown=u'Bilinmiyor'<line_sep>cmsRead=u'Okundu'<line_sep>cmsReceived=u'Alindi'<line_sep>cmsSending=u'G\xf6nderiliyor...'<line_sep>cmsSent=u'G\xf6nderildi'<line_sep>cmsUnknown=u'Bilinmiyor'<line_sep>conConnecting=u'Baglaniyor'<line_sep>conOffline=u'\xc7evrim Disi'<line_sep>conOnline=u'\xc7evrim I\xe7i'<line_sep>conPausing=u'Duraklatiliyor'<line_sep>conUnknown=u'Bilinmiyor'<line_sep>cusAway=u'Uzakta'<line_sep>cusDoNotDisturb=u'Rahatsiz Etmeyin'<line_sep>cusInvisible=u'G\xf6r\xfcnmez'<line_sep>cusLoggedOut=u'\xc7evrim Disi'<line_sep>cusNotAvailable=u'Kullanilamiyor'<line_sep>cusOffline=u'\xc7evrim Disi'<line_sep>cusOnline=u'\xc7evrim I\xe7i'<line_sep>cusSkypeMe=u'Skype Me'<line_sep>cusUnknown=u'Bilinmiyor'<line_sep>cvsBothEnabled=u'Video G\xf6nderme ve Alma'<line_sep>cvsNone=u'Video Yok'<line_sep>cvsReceiveEnabled=u'Video Alma'<line_sep>cvsSendEnabled=u'Video G\xf6nderme'<line_sep>cvsUnknown=u''<line_sep>grpAllFriends=u'T\xfcm Arkadaslar'<line_sep>grpAllUsers=u'T\xfcm Kullanicilar'<line_sep>grpCustomGroup=u'\xd6zel'<line_sep>grpOnlineFriends=u'\xc7evrimi\xe7i Arkadaslar'<line_sep>grpPendingAuthorizationFriends=u'Yetkilendirme Bekliyor'<line_sep>grpProposedSharedGroup=u'Proposed Shared Group'<line_sep>grpRecentlyContactedUsers=u'Son Zamanlarda Iletisim Kurulmus Kullanicilar'<line_sep>grpSharedGroup=u'Shared Group'<line_sep>grpSkypeFriends=u'Skype Arkadaslari'<line_sep>grpSkypeOutFriends=u'SkypeOut Arkadaslari'<line_sep>grpUngroupedFriends=u'Gruplanmamis Arkadaslar'<line_sep>grpUnknown=u'Bilinmiyor'<line_sep>grpUsersAuthorizedByMe=u'Tarafimdan Yetkilendirilenler'<line_sep>grpUsersBlockedByMe=u'Engellediklerim'<line_sep>grpUsersWaitingMyAuthorization=u'Yetkilendirmemi Bekleyenler'<line_sep>leaAddDeclined=u'Ekleme Reddedildi'<line_sep>leaAddedNotAuthorized=u'Ekleyen Kisinin Yetkisi Olmali'<line_sep>leaAdderNotFriend=u'Ekleyen Bir Arkadas Olmali'<line_sep>leaUnknown=u'Bilinmiyor'<line_sep>leaUnsubscribe=u'Aboneligi Silindi'<line_sep>leaUserIncapable=u'Kullanicidan Kaynaklanan Yetersizlik'<line_sep>leaUserNotFound=u'Kullanici Bulunamadi'<line_sep>olsAway=u'Uzakta'<line_sep>olsDoNotDisturb=u'Rahatsiz Etmeyin'<line_sep>olsNotAvailable=u'Kullanilamiyor'<line_sep>olsOffline=u'\xc7evrim Disi'<line_sep>olsOnline=u'\xc7evrim I\xe7i'<line_sep>olsSkypeMe=u'Skype Me'<line_sep>olsSkypeOut=u'SkypeOut'<line_sep>olsUnknown=u'Bilinmiyor'<line_sep>smsMessageStatusComposing=u'Composing'<line_sep>smsMessageStatusDelivered=u'Delivered'<line_sep>smsMessageStatusFailed=u'Failed'<line_sep>smsMessageStatusRead=u'Read'<line_sep>smsMessageStatusReceived=u'Received'<line_sep>smsMessageStatusSendingToServer=u'Sending to Server'<line_sep>smsMessageStatusSentToServer=u'Sent to Server'<line_sep>smsMessageStatusSomeTargetsFailed=u'Some Targets Failed'<line_sep>smsMessageStatusUnknown=u'Unknown'<line_sep>smsMessageTypeCCRequest=u'Confirmation Code Request'<line_sep>smsMessageTypeCCSubmit=u'Confirmation Code Submit'<line_sep>smsMessageTypeIncoming=u'Incoming'<line_sep>smsMessageTypeOutgoing=u'Outgoing'<line_sep>smsMessageTypeUnknown=u'Unknown'<line_sep>smsTargetStatusAcceptable=u'Acceptable'<line_sep>smsTargetStatusAnalyzing=u'Analyzing'<line_sep>smsTargetStatusDeliveryFailed=u'Delivery Failed'<line_sep>smsTargetStatusDeliveryPending=u'Delivery Pending'<line_sep>smsTargetStatusDeliverySuccessful=u'Delivery Successful'<line_sep>smsTargetStatusNotRoutable=u'Not Routable'<line_sep>smsTargetStatusUndefined=u'Undefined'<line_sep>smsTargetStatusUnknown=u'Unknown'<line_sep>usexFemale=u'Kadin'<line_sep>usexMale=u'Erkek'<line_sep>usexUnknown=u'Bilinmiyor'<line_sep>vmrConnectError=u'Baglanti Hatasi'<line_sep>vmrFileReadError=u'Dosya Okuma Hatasi'<line_sep>vmrFileWriteError=u'Dosya Yazma Hatasi'<line_sep>vmrMiscError=u'Diger Hata'<line_sep>vmrNoError=u'Hata Yok'<line_sep>vmrNoPrivilege=u'Sesli Posta \xd6nceligi Yok'<line_sep>vmrNoVoicemail=u'B\xf6yle Bir Sesli Posta Yok'<line_sep>vmrPlaybackError=u'Y\xfcr\xfctme Hatasi'<line_sep>vmrRecordingError=u'Kayit Hatasi'<line_sep>vmrUnknown=u'Bilinmiyor'<line_sep>vmsBlank=u'Bos'<line_sep>vmsBuffering=u'Ara bellege aliniyor'<line_sep>vmsDeleting=u'Siliniyor'<line_sep>vmsDownloading=u'Karsidan Y\xfckleniyor'<line_sep>vmsFailed=u'Basarisiz Oldu'<line_sep>vmsNotDownloaded=u'Karsidan Y\xfcklenmedi'<line_sep>vmsPlayed=u'Y\xfcr\xfct\xfcld\xfc'<line_sep>vmsPlaying=u'Y\xfcr\xfct\xfcl\xfcyor'<line_sep>vmsRecorded=u'Kaydedildi'<line_sep>vmsRecording=u'Sesli Mesaj Kaydediliyor'<line_sep>vmsUnknown=u'Bilinmiyor'<line_sep>vmsUnplayed=u'Y\xfcr\xfct\xfclmemis'<line_sep>vmsUploaded=u'Karsiya Y\xfcklendi'<line_sep>vmsUploading=u'Karsiya Y\xfckleniyor'<line_sep>vmtCustomGreeting=u'\xd6zel Selamlama'<line_sep>vmtDefaultGreeting=u'Varsayilan Selamlama'<line_sep>vmtIncoming=u'gelen sesli mesaj'<line_sep>vmtOutgoing=u'Giden'<line_sep>vmtUnknown=u'Bilinmiyor'<line_sep>vssAvailable=u'Kullanilabilir'<line_sep>vssNotAvailable=u'Kullanilamiyor'<line_sep>vssPaused=u'Duraklatildi'<line_sep>vssRejected=u'Reddedildi'<line_sep>vssRunning=u'\xc7alisiyor'<line_sep>vssStarting=u'Basliyor'<line_sep>vssStopping=u'Durduruluyor'<line_sep>vssUnknown=u'Bilinmiyor'<line_sep>
<import_from_stmt>masonite.middleware Middleware<class_stmt>__class__(Middleware)<block_start><def_stmt>before self request response<block_start><return>request<block_end><def_stmt>after self request response<block_start><return>request<block_end><block_end>
<import_stmt>random<import_stmt>string<import_stmt>unittest<import_stmt>warnings<import_from_stmt>libs jenkinslib<import_from_stmt>libs.JAF.BaseCommandLineParser BaseCommandLineParser<import_from_stmt>libs.JAF.plugin_CreateAPIToken CreateAPIToken CreateAPITokenParser<import_from_stmt>libs.JAF.plugin_DeleteAPIToken DeleteAPIToken DeleteAPITokenParser<import_from_stmt>libs.JAF.plugin_ListAPITokens ListAPITokens ListAPITokensParser<import_from_stmt>.configuration server user_admin user_bad user_noaccess user_normal user_read_job_access user_read_no_job_access <import_from_stmt>.helpers DummyWebServer TestFramework<class_stmt>CreateAPITokenTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>warnings.simplefilter("ignore" ResourceWarning)<line_sep>self.testcommand="CreateAPIToken"<line_sep>self.TestParserClass=CreateAPITokenParser<line_sep>self.TestClass=CreateAPIToken<block_end><def_stmt>test_invalid_url self<block_start>"""Make sure that calling with invalid url fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59321/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_url_bad_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_url_and_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "http://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_jenkins_invalid_creds self<block_start>"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_anonymous_creds self<block_start>"""Make sure that calling with valid jenkins (but no creds)"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_unprivileged_creds self<block_start>"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_noaccess] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_normal_creds_with_user_argument self<block_start>"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_normal "-U" user_admin] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><class_stmt>CreateAPITokenParserTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>self.testcommand="CreateAPIToken"<line_sep>self.TestClass=CreateAPIToken<line_sep>self.TestParserClass=CreateAPITokenParser<block_end><def_stmt>test_no_args self<block_start>"""Ensure that calling with no arguments results in help output and not an error"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand] [r"usage: jaf.py {0} \[-h\]".format(self.testcommand) r"Jenkins Attack Framework" r"positional arguments:" ] )<block_end><block_end><class_stmt>DeleteAPITokenTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>warnings.simplefilter("ignore" ResourceWarning)<line_sep>self.testcommand="DeleteAPIToken"<line_sep>self.TestParserClass=DeleteAPITokenParser<line_sep>self.TestClass=DeleteAPIToken<block_end><def_stmt>test_invalid_url self<block_start>"""Make sure that calling with invalid url fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59321/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_url_bad_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_url_and_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "http://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_jenkins_invalid_creds self<block_start>"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_anonymous_creds self<block_start>"""Make sure that calling with valid jenkins (but no creds)"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_unprivileged_creds self<block_start>"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_noaccess] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_normal_creds_with_user_argument self<block_start>"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_normal "-U" user_admin] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><class_stmt>DeleteAPITokenParserTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>self.testcommand="DeleteAPIToken"<line_sep>self.TestClass=DeleteAPIToken<line_sep>self.TestParserClass=DeleteAPITokenParser<block_end><def_stmt>test_no_args self<block_start>"""Ensure that calling with no arguments results in help output and not an error"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand] [r"usage: jaf.py {0} \[-h\]".format(self.testcommand) r"Jenkins Attack Framework" r"positional arguments:" ] )<block_end><block_end><class_stmt>ListAPITokensTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>warnings.simplefilter("ignore" ResourceWarning)<line_sep>self.testcommand="ListAPITokens"<line_sep>self.TestParserClass=ListAPITokensParser<line_sep>self.TestClass=ListAPITokens<block_end><def_stmt>test_invalid_url self<block_start>"""Make sure that calling with invalid url fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59321/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_url_bad_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins or right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "https://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_url_and_protocol self<block_start>"""Make sure that calling with valid url (that isn't Jenkins but right protocol) fails gracefully"""<with_stmt>DummyWebServer()<block_start>self.basic_test_harness(["jaf.py" self.testcommand "-s" "http://127.0.0.1:59322/" "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><def_stmt>test_valid_jenkins_invalid_creds self<block_start>"""Make sure that calling with valid jenkins (but bad creds) fails gracefully"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_bad] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_anonymous_creds self<block_start>"""Make sure that calling with valid jenkins (but no creds)"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_unprivileged_creds self<block_start>"""Make sure that calling with valid jenkins (unprivileged creds) returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_noaccess] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><def_stmt>test_valid_jenkins_valid_read_no_job_creds_token_list self<block_start>"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""<line_sep>self.testcommand="ListAPITokens"<line_sep>self.TestClass=ListAPITokens<line_sep>self.TestParserClass=ListAPITokensParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_read_no_job_access] [r"Current API Tokens:"] )<block_end><def_stmt>test_valid_jenkins_valid_normal_creds_with_user_argument self<block_start>"""Make sure that calling with valid jenkins (normal creds) and user flag returns expected results"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_normal "-U" user_admin] [r"- \w+: Invalid Credentials or unable to access Jenkins server."] 1 )<block_end><block_end><class_stmt>ListAPITokensParserTest(unittest.TestCase TestFramework)<block_start><def_stmt>setUp self<block_start>self.testcommand="ListAPITokens"<line_sep>self.TestClass=ListAPITokens<line_sep>self.TestParserClass=ListAPITokensParser<block_end><def_stmt>test_no_args self<block_start>"""Ensure that calling with no arguments results in help output and not an error"""<line_sep>self.basic_test_harness(["jaf.py" self.testcommand] [r"usage: jaf.py {0} \[-h\]".format(self.testcommand) r"Jenkins Attack Framework" r"positional arguments:" ] )<block_end><block_end><class_stmt>CombinedAPITokenNormalUserCredentialsTest(unittest.TestCase TestFramework)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.token_name="testtoken"+"".join(random.choices(string.ascii_letters+string.digits k=26))<block_end><def_stmt>test_1_valid_jenkins_valid_read_no_job_creds_token_create self<block_start>"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""<line_sep>self.testcommand="CreateAPIToken"<line_sep>self.TestClass=CreateAPIToken<line_sep>self.TestParserClass=CreateAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_read_no_job_access self.token_name ] [r"Your new API Token is: "] )<block_end><def_stmt>test_2_valid_jenkins_valid_read_no_job_creds_token_list self<block_start>"""Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""<line_sep>self.testcommand="ListAPITokens"<line_sep>self.TestClass=ListAPITokens<line_sep>self.TestParserClass=ListAPITokensParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_read_no_job_access] [r"Token Name: "+self.token_name] )<block_end><def_stmt>test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list self<block_start>"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""<line_sep>self.testcommand="DeleteAPIToken"<line_sep>self.TestClass=DeleteAPIToken<line_sep>self.TestParserClass=DeleteAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_read_no_job_access] [r"Token Name: "+self.token_name] )<block_end><def_stmt>test_4_valid_jenkins_valid_read_no_job_creds_token_delete self<block_start>"""Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results"""<line_sep>self.testcommand="DeleteAPIToken"<line_sep>self.TestClass=DeleteAPIToken<line_sep>self.TestParserClass=DeleteAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_read_no_job_access self.token_name ] [r"Token Deleted Successfully."] )<block_end><block_end># For now this is commented out because we can only test this on a cloudbees federated setup, which we don't have ''' class CombinedAPITokenNormalUserCookieTest(unittest.TestCase, TestFramework): """ We need to specifically test auth with cookies because code has to do extra work to derive the logged-in user's username """ @classmethod def setUpClass(cls): cls.token_name = "testtoken" + "".join( random.choices(string.ascii_letters + string.digits, k=26) ) try: js = jenkinslib.Jenkins( server, username=user_read_no_job_access.split(':')[0], password=':'.join(user_read_no_job_access.split(':')[1:]), timeout=30, ) cls.cookie = js.get_cookie() except Exception: print(cls.cookie) #Failure will cause tests to fail, so we ignore here pass def test_1_valid_jenkins_valid_read_no_job_creds_token_create(self): """Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results""" self.testcommand = "CreateAPIToken" self.TestClass = CreateAPIToken self.TestParserClass = CreateAPITokenParser self.basic_test_harness( [ "jaf.py", self.testcommand, "-s", server, "-a", self.cookie, self.token_name, ], [r"Your new API Token is: "], ) def test_2_valid_jenkins_valid_read_no_job_creds_token_list(self): """Make sure that calling CreateAPIToken with valid jenkins (read only [no job access] creds) returns expected results""" self.testcommand = "ListAPITokens" self.TestClass = ListAPITokens self.TestParserClass = ListAPITokensParser self.basic_test_harness( [ "jaf.py", self.testcommand, "-s", server, "-a", self.cookie, ], [r"Token Name: " + self.token_name], ) def test_3_valid_jenkins_valid_read_no_job_creds_token_delete_list(self): """Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results""" self.testcommand = "DeleteAPIToken" self.TestClass = DeleteAPIToken self.TestParserClass = DeleteAPITokenParser self.basic_test_harness( [ "jaf.py", self.testcommand, "-s", server, "-a", self.cookie, ], [r"Token Name: " + self.token_name], ) def test_4_valid_jenkins_valid_read_no_job_creds_token_delete(self): """Make sure that calling DeleteAPIToken with valid jenkins (read only [no job access] creds) returns expected results""" self.testcommand = "DeleteAPIToken" self.TestClass = DeleteAPIToken self.TestParserClass = DeleteAPITokenParser self.basic_test_harness( [ "jaf.py", self.testcommand, "-s", server, "-a", self.cookie, self.token_name, ], [r"Token Deleted Successfully."], ) '''<class_stmt>CombinedAPITokenAdminUserTest(unittest.TestCase TestFramework)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.token_name="testtoken"+"".join(random.choices(string.ascii_letters+string.digits k=26))<block_end><def_stmt>test_1_valid_jenkins_valid_admin_creds_token_create_other_user self<block_start>"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""<line_sep>self.testcommand="CreateAPIToken"<line_sep>self.TestClass=CreateAPIToken<line_sep>self.TestParserClass=CreateAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_admin "-U" user_read_no_job_access self.token_name ] [r"Your new API Token is: "] )<block_end><def_stmt>test_2_valid_jenkins_valid_admin_creds_token_list_other_user self<block_start>"""Make sure that calling CreateAPIToken with valid jenkins (admin creds) returns expected results"""<line_sep>self.testcommand="ListAPITokens"<line_sep>self.TestClass=ListAPITokens<line_sep>self.TestParserClass=ListAPITokensParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_admin "-U" user_read_no_job_access ] [r"Token Name: "+self.token_name] )<block_end><def_stmt>test_3_valid_jenkins_valid_admin_creds_token_delete_list_other_user self<block_start>"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""<line_sep>self.testcommand="DeleteAPIToken"<line_sep>self.TestClass=DeleteAPIToken<line_sep>self.TestParserClass=DeleteAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_admin "-U" user_read_no_job_access ] [r"Token Name: "+self.token_name] )<block_end><def_stmt>test_4_valid_jenkins_valid_admin_creds_token_delete_other_user self<block_start>"""Make sure that calling DeleteAPIToken with valid jenkins (admin creds) returns expected results"""<line_sep>self.testcommand="DeleteAPIToken"<line_sep>self.TestClass=DeleteAPIToken<line_sep>self.TestParserClass=DeleteAPITokenParser<line_sep>self.basic_test_harness(["jaf.py" self.testcommand "-s" server "-a" user_admin "-U" user_read_no_job_access self.token_name ] [r"Token Deleted Successfully."] )<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_stmt>asyncio<import_stmt>json<import_stmt>logging<import_from_stmt>typing List Set<import_stmt>websockets<class_stmt>BrowserWebsocketServer<block_start>""" The BrowserWebsocketServer manages our connection to our browser extension, brokering messages between Google Meet and our plugin's EventHandler. We expect browser tabs (and our websockets) to come and go, and our plugin is long-lived, so we have a lot of exception handling to do here to keep the plugin running. Most actions are "best effort". We also have to handle the possibility of multiple browser websockets at the same time, e.g. in case the user refreshes their Meet window and we have stale websockets hanging around, or if we have multiple Meet tabs. """<def_stmt>__init__ self<block_start>""" Remember to call start() before attempting to use your new instance! """<line_sep>self._logger=logging.getLogger(__name__)<line_sep>""" Store all of the connected sockets we have open to the browser extension, so we can use them to send outbound messages from this plugin to the extension. """<line_sep>self._ws_clients:Set[websockets.WebSocketServerProtocol]=set()<line_sep>""" Any EventHandlers registered to receive inbound events from the browser extension. """<line_sep>self._handlers:List["EventHandler"]=[]<block_end><def_stmt>start self hostname:str port:int<arrow><none><block_start><return>websockets.serve(self._message_receive_loop hostname port)<block_end><async_keyword><def_stmt>send_to_clients self message:str<arrow><none><block_start>""" Send a message from our plugin to the Chrome extension. We broadcast to any connections we have, in case the user has multiple Meet windows/tabs open. """<if_stmt>self._ws_clients<block_start>self._logger.info(f"Broadcasting message to connected browser clients: {message}")<line_sep><await>asyncio.wait([client.send(message)<for>client self._ws_clients])<block_end><else_stmt><block_start>self._logger.warn(("There were no active browser extension clients to send our"<concat>f" message to! Message: {message}"))<block_end><block_end><def_stmt>register_event_handler self handler:"EventHandler"<arrow><none><block_start>""" Register your EventHandler to have it receive callbacks whenever we get an event over the wire from the browser extension. """<line_sep>self._handlers.append(handler)<block_end><def_stmt>num_connected_clients self<arrow>int<block_start><return>len(self._ws_clients)<block_end><def_stmt>_register_client self ws:websockets.WebSocketServerProtocol<arrow><none><block_start>self._ws_clients.add(ws)<line_sep>self._logger.info((f"{ws.remote_address} has connected to our browser websocket."<concat>f" We now have {len(self._ws_clients)} active connection(s)."))<block_end><async_keyword><def_stmt>_unregister_client self ws:websockets.WebSocketServerProtocol<arrow><none><block_start><try_stmt><block_start><await>ws.close()<block_end><except_stmt><block_start>self._logger.exception("Exception while closing browser webocket connection.")<block_end><if_stmt>ws<in>self._ws_clients<block_start>self._ws_clients.remove(ws)<block_end>self._logger.info((f"{ws.remote_address} has disconnected from our browser websocket."<concat>f" We now have {len(self._ws_clients)} active connection(s) remaining."))<block_end><async_keyword><def_stmt>_message_receive_loop self ws:websockets.WebSocketServerProtocol uri:str<arrow><none><block_start>""" Loop of waiting for and processing inbound websocket messages, until the connection dies. Each connection will create one of these coroutines. """<line_sep>self._register_client(ws)<try_stmt><block_start><async_keyword><for_stmt>message ws<block_start>self._logger.info(f"Received inbound message from browser extension. Message: {message}")<line_sep><await>self._process_inbound_message(message)<block_end><block_end><except_stmt><block_start>self._logger.exception("BrowserWebsocketServer encountered an exception while waiting for inbound messages.")<block_end><finally_stmt><block_start><await>self._unregister_client(ws)<block_end><if_stmt><not>self._ws_clients<block_start><for_stmt>handler self._handlers<block_start><try_stmt><block_start><await>handler.on_all_browsers_disconnected()<block_end><except_stmt><block_start>self._logger.exception("Connection mananger received an exception from EventHandler!")<block_end><block_end><block_end><block_end><async_keyword><def_stmt>_process_inbound_message self message:str<arrow><none><block_start>""" Process one individual inbound websocket message. """<try_stmt><block_start>parsed_event=json.loads(message)<block_end><except_stmt><block_start>self._logger.exception(f"Failed to parse browser websocket message as JSON. Message: {message}")<line_sep><return><block_end><for_stmt>handler self._handlers<block_start><try_stmt><block_start><await>handler.on_browser_event(parsed_event)<block_end><except_stmt><block_start>self._logger.exception("Connection mananger received an exception from EventHandler!")<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- # System SYSTEM_LANGUAGE_KEY='System/Language'<line_sep>SYSTEM_THEME_KEY='System/Theme'<line_sep>SYSTEM_THEME_DEFAULT='System'<line_sep># File FILE_SAVE_TO_DIR_KEY='File/SaveToDir'<line_sep>FILE_SAVE_TO_DIR_DEFAULT=''<line_sep>FILE_FILENAME_PREFIX_FORMAT_KEY='File/FilenamePrefixFormat'<line_sep>FILE_FILENAME_PREFIX_FORMAT_DEFAULT='{id}_{year}_{author}_{title}'<line_sep>FILE_OVERWRITE_EXISTING_FILE_KEY='File/OverwriteExistingFile'<line_sep>FILE_OVERWRITE_EXISTING_FILE_DEFAULT=<false><line_sep># Network NETWORK_SCIHUB_URL_KEY='Network/SciHubURL'<line_sep>NETWORK_SCIHUB_URL_DEFAULT='https://sci-hub.se'<line_sep>NETWORK_SCIHUB_URLS_KEY='Network/SciHubURLs'<line_sep>NETWORK_SCIHUB_URLS_DEFAULT=['https://sci-hub.se' 'https://sci-hub.st']<line_sep>NETWORK_TIMEOUT_KEY='Network/Timeout'<line_sep>NETWORK_TIMEOUT_DEFAULT=3000<line_sep>NETWORK_RETRY_TIMES_KEY='Network/RetryTimes'<line_sep>NETWORK_RETRY_TIMES_DEFAULT=3<line_sep>NETWORK_PROXY_ENABLE_KEY='Network/ProxyEnable'<line_sep>NETWORK_PROXY_ENABLE_DEFAULT=<false><line_sep>NETWORK_PROXY_TYPE_KEY='Network/ProxyType'<line_sep>NETWORK_PROXY_TYPE_DEFAULT='http'<line_sep>NETWORK_PROXY_HOST_KEY='Network/ProxyHost'<line_sep>NETWORK_PROXY_HOST_DEFAULT='127.0.0.1'<line_sep>NETWORK_PROXY_PORT_KEY='Network/ProxyPort'<line_sep>NETWORK_PROXY_PORT_DEFAULT='7890'<line_sep>NETWORK_PROXY_USERNAME_KEY='Network/ProxyUsername'<line_sep>NETWORK_PROXY_USERNAME_DEFAULT=''<line_sep>NETWORK_PROXY_PASSWORD_KEY='Network/ProxyPassword'<line_sep>NETWORK_PROXY_PASSWORD_DEFAULT=''<line_sep>
"""Test app with multiple hierarchy levels above the actual models.py file"""<line_sep>
<import_stmt>json<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>tensorflow.keras<as>keras<import_from_stmt>tensorflow.keras.models model_from_json<import_from_stmt>tensorflow.keras.preprocessing.sequence pad_sequences<import_from_stmt>tensorflow.python.client device_lib<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>wandb.keras WandbCallback<import_from_stmt>utils return_json_file_content<def_stmt>session_indexed s<block_start>""" Converts a session (of actions) to indices and adds start/end tokens :param s: list of actions in a session (i.e 'add','detail', etc) :return: """<line_sep># assign an integer to each possible action token action_to_idx={'start':0 'end':1 'add':2 'remove':3 'purchase':4 'detail':5 'view':6}<line_sep><return>[action_to_idx['start']]+[action_to_idx[e]<for>e s]+[action_to_idx['end']]<block_end><def_stmt>train_lstm_model x y epochs=200 patience=10 lstm_dim=48 batch_size=128 lr=1e-3<block_start>""" Train an LSTM to predict purchase (1) or abandon (0) :param x: session sequences :param y: target labels :param epochs: num training epochs :param patience: early stopping patience :param lstm_dim: lstm units :param batch_size: batch size :param lr: learning rate :return: """<line_sep># Verfiy if GPU/CPU is being used print("Print out system device...")<line_sep>print(device_lib.list_local_devices())<line_sep>print("Starting training now...")<line_sep>X_train,X_test,y_train,y_test=train_test_split(x y)<line_sep># pad sequences for training in batches max_len=max(len(_)<for>_ x)<line_sep>X_train=pad_sequences(X_train padding="post" value=7 maxlen=max_len)<line_sep>X_test=pad_sequences(X_test padding="post" value=7 maxlen=max_len)<line_sep># convert to one-hot X_train=tf.one_hot(X_train depth=7)<line_sep>X_test=tf.one_hot(X_test depth=7)<line_sep>y_train=np.array(y_train)<line_sep>y_test=np.array(y_test)<line_sep># Define Model model=keras.Sequential()<line_sep>model.add(keras.layers.InputLayer(input_shape=(<none> 7)))<line_sep># Masking layer ignores padded time-steps model.add(keras.layers.Masking())<line_sep>model.add(keras.layers.LSTM(lstm_dim))<line_sep>model.add(keras.layers.Dense(1 activation='sigmoid'))<line_sep>model.summary()<line_sep># Some Hyper Params opt=keras.optimizers.Adam(learning_rate=lr)<line_sep>loss=keras.losses.BinaryCrossentropy()<line_sep>es=keras.callbacks.EarlyStopping(monitor='val_loss' patience=patience verbose=1 restore_best_weights=<true>)<line_sep># Include wandb callback for tracking callbacks=[es WandbCallback()]<line_sep>model.compile(optimizer=opt loss=loss metrics=['accuracy'])<line_sep># Train Model model.fit(X_train y_train validation_data=(X_test y_test) batch_size=batch_size epochs=epochs callbacks=callbacks)<line_sep># return trained model # NB: to store model as Metaflow Artifact it needs to be pickle-able! <return>model.to_json() model.get_weights() model<block_end><def_stmt>make_predictions predictor# load test data <block_start>test_inp={'instances':tf.one_hot(np.array([[0 1 1 3 4 5]]) on_value=1 off_value=0 depth=7).numpy()}<line_sep># make predictions preds=predictor.predict(test_inp)<line_sep><return>preds<block_end>
""" The script expects the MViT (MDef-DETR or MDETR) detections in .txt format. For example, there should be, One .txt file for each image and each line in the file represents a detection. The format of a single detection should be "<label> <confidence> <x1> <y1> <x2> <y2> Please see the 'mvit_detections' for reference. """<import_stmt>os<import_stmt>argparse<import_stmt>xml.etree.ElementTree<as>ET<import_from_stmt>fvcore.common.file_io PathManager<import_stmt>numpy<as>np<import_stmt>time<import_stmt>cv2<import_from_stmt>nms nms<line_sep>TASK1_TRAIN_LIST="t1_train.txt"<line_sep>TASK2_TRAIN_LIST="t2_train.txt"<line_sep>TASK3_TRAIN_LIST="t3_train.txt"<line_sep>TASK4_TRAIN_LIST="t4_train.txt"<def_stmt>read_image_list path<block_start><with_stmt>open(path 'r')<as>f<block_start>lines=f.read()<block_end>images=lines.split('\n')<line_sep><return>images[:-1]<block_end>TASK1_TRAIN_IMAGES=read_image_list(TASK1_TRAIN_LIST)<line_sep>TASK2_TRAIN_IMAGES=read_image_list(TASK2_TRAIN_LIST)<line_sep>TASK3_TRAIN_IMAGES=read_image_list(TASK3_TRAIN_LIST)<line_sep>TASK4_TRAIN_IMAGES=read_image_list(TASK4_TRAIN_LIST)<line_sep>TASK1_KNOWN_CLASSES=["aeroplane" "bicycle" "bird" "boat" "bottle" "bus" "car" "cat" "chair" "cow" "diningtable" "dog" "horse" "motorbike" "person" "pottedplant" "sheep" "sofa" "train" "tvmonitor" "airplane" "dining table" "motorcycle" "potted plant" "couch" "tv"]<line_sep>TASK2_KNOWN_CLASSES=TASK1_KNOWN_CLASSES+["truck" "traffic light" "fire hydrant" "stop sign" "parking meter" "bench" "elephant" "bear" "zebra" "giraffe" "backpack" "umbrella" "handbag" "tie" "suitcase" "microwave" "oven" "toaster" "sink" "refrigerator"]<line_sep>TASK3_KNOWN_CLASSES=TASK2_KNOWN_CLASSES+["frisbee" "skis" "snowboard" "sports ball" "kite" "baseball bat" "baseball glove" "skateboard" "surfboard" "tennis racket" "banana" "apple" "sandwich" "orange" "broccoli" "carrot" "hot dog" "pizza" "donut" "cake"]<line_sep>TASK4_KNOWN_CLASSES=TASK3_KNOWN_CLASSES+["bed" "toilet" "laptop" "mouse" "remote" "keyboard" "cell phone" "book" "clock" "vase" "scissors" "teddy bear" "hair drier" "toothbrush" "wine glass" "cup" "fork" "knife" "spoon" "bowl"]<def_stmt>parse_arguments <block_start>""" Parse the command line arguments """<line_sep>ap=argparse.ArgumentParser()<line_sep>ap.add_argument("-ann" "--annotations_dir_path" required=<true> help="Path to the directory containing the original annotations in pascal VOC format.")<line_sep>ap.add_argument("-det" "--detections_dir_path" required=<true> help="Path to the directory containing the detections generated using class agnostic object "<concat>"detector. One .txt file for each image where each line in the file represents a detection."<concat>"The format of a single detection should be "<concat>"<label> <confidence> <x1> <y1> <x2> <y2>")<line_sep>ap.add_argument("-o" "--output_dir_path" required=<true> help="The output dir path to save the updated annotations.")<line_sep>ap.add_argument("-det_conf" "--detection_confidence_threshold" required=<false> type=float default=0.5 help="The confidence threshold to filter potential detections at first step. All detections with "<concat>"confidence less than this threshold value will be ignored.")<line_sep>ap.add_argument("-iou" "--iou_thresh_unk" required=<false> type=float default=0.5 help="All detections, having an overlap greater than iou_thresh with any of the ground truths, "<concat>"will be ignored.")<line_sep>ap.add_argument("-nms" "--apply_nms" required=<false> type=bool default=<false> help="Flag to decide either to apply NMS on detections before assigning them unknown/gt or not.")<line_sep>ap.add_argument("-iou_nms" "--iou_thresh_nms" required=<false> type=float default=0.2 help="IOU threshold for NMS.")<line_sep>args=vars(ap.parse_args())<line_sep><return>args<block_end><def_stmt>parse_voc_gt_kn path<block_start>image_name=os.path.basename(path).split('.')[0]<if_stmt>os.path.exists(path)<block_start><with_stmt>PathManager.open(path)<as>f<block_start>tree=ET.parse(f)<block_end>boxes=[]<for_stmt>obj tree.findall("object")<block_start>cls=obj.find("name").text<if_stmt>image_name<in>TASK1_TRAIN_IMAGES<block_start><if_stmt>cls<not><in>TASK1_KNOWN_CLASSES<block_start><continue><block_end><block_end><elif_stmt>image_name<in>TASK2_TRAIN_IMAGES<block_start><if_stmt>cls<not><in>TASK2_KNOWN_CLASSES<block_start><continue><block_end><block_end><elif_stmt>image_name<in>TASK3_TRAIN_IMAGES<block_start><if_stmt>cls<not><in>TASK3_KNOWN_CLASSES<block_start><continue><block_end><block_end><elif_stmt>image_name<in>TASK4_TRAIN_IMAGES<block_start><if_stmt>cls<not><in>TASK4_KNOWN_CLASSES<block_start><continue><block_end><block_end><else_stmt># Not a training image <block_start><return>boxes tree <false><block_end>bbox=obj.find("bndbox")<line_sep>bbox=[float(bbox.find(x).text)<for>x ["xmin" "ymin" "xmax" "ymax"]]<line_sep># Original annotations are integers in the range [1, W or H] # Assuming they mean 1-based pixel indices (inclusive), # a box with annotation (xmin=1, xmax=W) covers the whole image. # In coordinate space this is represented by (xmin=0, xmax=W) bbox[0]<augsub>1.0<line_sep>bbox[1]<augsub>1.0<line_sep>boxes.append(bbox)<block_end><block_end><else_stmt># No annotation file found, create an empty xml node and return <block_start>image_name=f"{os.path.basename(path).split('.')[0]}.jpg"<line_sep>image_path=f"{os.path.dirname(os.path.dirname(path))}/JPEGImages/{image_name}"<line_sep>img=cv2.imread(image_path)<line_sep>h,w,c=img.shape<line_sep>node_root=ET.Element('annotation')<line_sep>node_folder=ET.SubElement(node_root 'folder')<line_sep>node_folder.text='VOC2007'<line_sep>node_filename=ET.SubElement(node_root 'filename')<line_sep>node_filename.text=image_name<line_sep>node_size=ET.SubElement(node_root 'size')<line_sep>node_width=ET.SubElement(node_size 'width')<line_sep>node_width.text=str(int(w))<line_sep>node_height=ET.SubElement(node_size 'height')<line_sep>node_height.text=str(int(h))<line_sep>node_depth=ET.SubElement(node_size 'depth')<line_sep>node_depth.text=str(int(c))<line_sep>tree=ET.ElementTree(node_root)<line_sep>boxes=[]<block_end><return>boxes tree <true><block_end><def_stmt>parse_det_txt path conf_thresh=0.5<block_start><if_stmt>os.path.exists(path)<block_start><with_stmt>open(path "r")<as>f<block_start>lines=f.readlines()<block_end>boxes=[]<line_sep>scores=[]<for_stmt>line lines<block_start>content=line.rstrip().split(' ')<line_sep>bbox=content[2:]<line_sep># Only keep the boxes with score >= conf_thresh det_conf=float(content[1])<if_stmt>det_conf<ge>conf_thresh<block_start>boxes.append([int(b)<for>b bbox])<line_sep>scores.append(det_conf)<block_end><block_end><return>boxes scores<block_end><else_stmt><block_start><return>[] []<block_end><block_end><def_stmt>class_agnostic_nms boxes scores iou=0.7# boxes = non_max_suppression_fast(np.array(boxes), iou) <block_start>boxes=nms(np.array(boxes) np.array(scores) iou)<line_sep><return>list(boxes)<block_end><def_stmt>get_unk_det gt det iou<block_start><if_stmt><not>gt<block_start><return>det<block_end>gt=np.array(gt)<line_sep>unk_det=[]<for_stmt>dl det<block_start>d=np.array(dl)<line_sep>ixmin=np.maximum(gt[: 0] d[0])<line_sep>iymin=np.maximum(gt[: 1] d[1])<line_sep>ixmax=np.minimum(gt[: 2] d[2])<line_sep>iymax=np.minimum(gt[: 3] d[3])<line_sep>iw=np.maximum(ixmax-ixmin+1.0 0.0)<line_sep>ih=np.maximum(iymax-iymin+1.0 0.0)<line_sep>inters=iw<times>ih<line_sep>uni=((d[2]-d[0]+1.0)<times>(d[3]-d[1]+1.0)+(gt[: 2]-gt[: 0]+1.0)<times>(gt[: 3]-gt[: 1]+1.0)-inters)<line_sep>overlaps=inters/uni<line_sep>ov_max=np.max(overlaps)<if_stmt>ov_max<l>iou<block_start>unk_det.append(dl)<block_end><block_end><return>unk_det<block_end><def_stmt>main ann_dir det_dir out_dir det_conf_thesh iou_thresh nms=<false> iou_thresh_nms=0.7<block_start>files=os.listdir(det_dir)<line_sep>start=time.time()<for_stmt>i,file_name enumerate(files)<block_start><if_stmt>i%100<eq>0<block_start>print(f"On image no. {i}. Time: {time.time()-start}")<line_sep>start=time.time()<block_end>ann_file_path=f"{ann_dir}/{file_name.split('.')[0]}.xml"<line_sep>ref_det_file_path=f"{det_dir}/{file_name.split('.')[0]}.txt"<line_sep>out_ann_file_path=f"{out_dir}/{file_name.split('.')[0]}.xml"<line_sep>gt_boxes,ann_tree,train=parse_voc_gt_kn(ann_file_path)# Read the ground truth bounding boxes # Only add the unknown detections if training image <if_stmt><not>train# Copy the original annotation file <block_start>ann_tree.write(out_ann_file_path encoding='latin-1')<line_sep><continue><block_end>det_boxes,scores=parse_det_txt(ref_det_file_path conf_thresh=det_conf_thesh)# Read the detections <if_stmt>nms<block_start>det_boxes=class_agnostic_nms(det_boxes scores iou_thresh_nms)# Apply NMS if prompted to do so <block_end>det_unk=get_unk_det(gt_boxes det_boxes iou_thresh)# Get the potential unknown detections # Create the updated annotation file <for_stmt>det det_unk<block_start>object=ET.SubElement(ann_tree.getroot() 'object')<line_sep>name=ET.SubElement(object "name")<line_sep>name.text="unknown"<line_sep>pose=ET.SubElement(object "pose")<line_sep>pose.text="Unspecified"<line_sep>truncated=ET.SubElement(object "truncated")<line_sep>truncated.text="2"<line_sep>difficult=ET.SubElement(object "difficult")<line_sep>difficult.text="0"<line_sep>bndbox=ET.SubElement(object "bndbox")<line_sep>xmin=ET.SubElement(bndbox "xmin")<line_sep>xmin.text=str(int(det[0]))<line_sep>ymin=ET.SubElement(bndbox "ymin")<line_sep>ymin.text=str(int(det[1]))<line_sep>xmax=ET.SubElement(bndbox "xmax")<line_sep>xmax.text=str(int(det[2]))<line_sep>ymax=ET.SubElement(bndbox "ymax")<line_sep>ymax.text=str(int(det[3]))<block_end># Save the updated annotations ann_tree.write(out_ann_file_path encoding='latin-1')<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_arguments()<line_sep>annotations_dir=args["annotations_dir_path"]<line_sep>detections_dir=args["detections_dir_path"]<line_sep>output_dir=args["output_dir_path"]<if_stmt><not>os.path.exists(output_dir)<block_start>os.makedirs(output_dir)<block_end>conf_threshold_det=args["detection_confidence_threshold"]<line_sep>iou_threshold_unk=args["iou_thresh_unk"]<line_sep>apply_nms=args["apply_nms"]<line_sep>iou_threshold_nms=args["iou_thresh_nms"]<line_sep>main(annotations_dir detections_dir output_dir conf_threshold_det iou_threshold_unk apply_nms iou_threshold_nms)<block_end>
expected_output={"interfaces":{"Port-channel1":{"name":"Port-channel1" "protocol":"lacp" "members":{"GigabitEthernet0/0/1":{"activity":"Active" "age":18 "aggregatable":<true> "collecting":<true> "defaulted":<false> "distributing":<true> "expired":<false> "flags":"FA" "interface":"GigabitEthernet0/0/1" "lacp_port_priority":100 "oper_key":1 "port_num":2 "port_state":63 "synchronization":<true> "system_id":"00127,6487.88ff.68ef" "timeout":"Short" } "GigabitEthernet0/0/7":{"activity":"Active" "age":0 "aggregatable":<true> "collecting":<false> "defaulted":<false> "distributing":<false> "expired":<false> "flags":"FA" "interface":"GigabitEthernet0/0/7" "lacp_port_priority":200 "oper_key":1 "port_num":1 "port_state":15 "synchronization":<true> "system_id":"00127,6487.88ff.68ef" "timeout":"Short" } } }}}<line_sep>
"""App utilites tests. """<import_stmt>sys<import_stmt>pytest<import_from_stmt>django.apps apps<import_from_stmt>..models Camera<line_sep>pytestmark=pytest.mark.django_db<line_sep>@pytest.mark.parametrize("testargs, output" [[["python" "manage.py" "runserver"] 4] [["python" "manage.py" "makemigration"] 0] [["python" "manage.py" "migrate"] 0] [["python" "manage.py" "test"] 0] [["pytest"] 0] ] )<def_stmt>test_app monkeypatch testargs output<block_start>"""test_create_demo_objects."""<line_sep>monkeypatch.setattr(sys "argv" testargs)<line_sep>app_config=apps.get_app_config("locations")<line_sep>app_config.ready()<line_sep>app_config=apps.get_app_config("cameras")<line_sep>app_config.ready()<assert_stmt>Camera.objects.count()<eq>output<block_end>@pytest.mark.parametrize("testenv, output" [["true" 4] ["True" 4] ["1" 4] ["false" 0] ["False" 0] ["0" 0] ["random_string" 4] ] )<def_stmt>test_app_not_create_demo monkeypatch testenv output<block_start>"""test_create_demo_objects."""<line_sep>monkeypatch.setenv("CREATE_DEMO" testenv)<line_sep>testargs=["python" "manage.py" "runserver"]<line_sep>monkeypatch.setattr(sys "argv" testargs)<line_sep>app_config=apps.get_app_config("locations")<line_sep>app_config.ready()<line_sep>app_config=apps.get_app_config("cameras")<line_sep>app_config.ready()<assert_stmt>Camera.objects.count()<eq>output<block_end>
__copyright__="Copyright (c) 2021 Jina AI Limited. All rights reserved."<line_sep>__license__="Apache-2.0"<import_from_stmt>typing Tuple Dict Union<import_stmt>numpy<as>np<import_from_stmt>jina.executors.decorators single<import_from_stmt>jina.executors.crafters BaseCrafter<import_from_stmt>.helper _crop_image _move_channel_axis _load_image<class_stmt>CenterImageCropper(BaseCrafter)<block_start>""" Crop the image with the center crop box. The coordinate is the same coordinate-system in the :py:mode:`PIL.Image`. :param target_size: Desired output size. If size is a sequence like (h, w), the output size will be matched to this. If size is an int, the output will have the same height and width as the `target_size`. :param channel_axis: Axis for channel :param args: Additional positional arguments :param kwargs: Additional keyword arguments """<def_stmt>__init__ self target_size:Union[Tuple[int int] int]=224 channel_axis:int=-1 *args **kwargs<block_start>"""Set Constructor."""<line_sep>super().__init__(*args **kwargs)<line_sep>self.target_size=target_size<line_sep>self.channel_axis=channel_axis<block_end>@single<def_stmt>craft self blob:'np.ndarray' *args **kwargs<arrow>Dict<block_start>""" Crop the input image array. :param blob: The ndarray of the image :param args: Additional positional arguments :param kwargs: Additional keyword arguments :return: A dict with the cropped image """<line_sep>raw_img=_load_image(blob self.channel_axis)<line_sep>_img,top,left=_crop_image(raw_img self.target_size how='center')<line_sep>img=_move_channel_axis(np.asarray(_img) -1 self.channel_axis)<line_sep><return>dict(offset=0 blob=img.astype('float32') location=(top left))<block_end><block_end>
<import_stmt>requests<import_stmt>os<line_sep>url_image='https://www.python.org/static/community_logos/python-logo.png'<line_sep>r_image=requests.get(url_image)<line_sep>print(r_image.headers['Content-Type'])<line_sep># image/png filename_image=os.path.basename(url_image)<line_sep>print(filename_image)<line_sep># python-logo.png <with_stmt>open('data/temp/'+filename_image 'wb')<as>f<block_start>f.write(r_image.content)<block_end>url_zip='http://www.post.japanpost.jp/zipcode/dl/oogaki/zip/13tokyo.zip'<line_sep>r_zip=requests.get(url_zip)<line_sep>print(r_zip.headers['Content-Type'])<line_sep># application/zip filename_zip=os.path.basename(url_zip)<line_sep>print(filename_zip)<line_sep># 13tokyo.zip <with_stmt>open('data/temp/'+filename_zip 'wb')<as>f<block_start>f.write(r_zip.content)<block_end>
<import_stmt>falcon<import_stmt>six<import_from_stmt>monitorrent.settings_manager SettingsManager<line_sep># noinspection PyUnusedLocal <class_stmt>SettingsNotifyOn(object)<block_start><def_stmt>__init__ self settings_manager<block_start>""" :type settings_manager: SettingsManager """<line_sep>self.settings_manager=settings_manager<block_end><def_stmt>on_get self req resp<block_start>resp.json=self.settings_manager.get_external_notifications_levels()<block_end><def_stmt>on_put self req resp<block_start><if_stmt>req.json<is><none><or>len(req.json)<eq>0<block_start><raise>falcon.HTTPBadRequest('BodyRequired' 'Expecting not empty JSON body')<block_end><if_stmt><not>isinstance(req.json list)<or>any([<not>isinstance(i six.text_type)<for>i req.json])<block_start><raise>falcon.HTTPBadRequest('ArrayOfStringExpected' 'Expecting list of string values')<block_end>existing_levels=self.settings_manager.get_existing_external_notifications_levels()<line_sep>unknown_levels=[l<for>l req.json<if>l<not><in>existing_levels]<if_stmt>len(unknown_levels)<g>0<block_start><raise>falcon.HTTPBadRequest('UnknownLevels' '{0} are unknow levels'.format(unknown_levels))<block_end>self.settings_manager.set_external_notifications_levels(req.json)<line_sep>resp.status=falcon.HTTP_NO_CONTENT<block_end><block_end>
"""From the project root directory (containing data files), this can be run with: Boolean logic evaluation: python -m spinn.models.fat_classifier --training_data_path ../bl-data/pbl_train.tsv \ --eval_data_path ../bl-data/pbl_dev.tsv SST sentiment (Demo only, model needs a full GloVe embeddings file to do well): python -m spinn.models.fat_classifier --data_type sst --training_data_path sst-data/train.txt \ --eval_data_path sst-data/dev.txt --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \ --model_dim 10 --word_embedding_dim 5 SNLI entailment (Demo only, model needs a full GloVe embeddings file to do well): python -m spinn.models.fat_classifier --data_type snli --training_data_path snli_1.0/snli_1.0_dev.jsonl \ --eval_data_path snli_1.0/snli_1.0_dev.jsonl --embedding_data_path spinn/tests/test_embedding_matrix.5d.txt \ --model_dim 10 --word_embedding_dim 5 Note: If you get an error starting with "TypeError: ('Wrong number of dimensions..." during development, there may already be a saved checkpoint in ckpt_path that matches the name of the model you're developing. Move or delete it as appropriate. """<import_from_stmt>functools partial<import_stmt>os<import_stmt>pprint<import_stmt>sys<import_stmt>gflags<import_from_stmt>theano tensor<as>T<import_stmt>theano<import_stmt>numpy<as>np<import_from_stmt>spinn afs_safe_logger<import_from_stmt>spinn util<import_from_stmt>spinn.data.boolean load_boolean_data<import_from_stmt>spinn.data.sst load_sst_data<import_from_stmt>spinn.data.snli load_snli_data<import_stmt>spinn.fat_stack<import_stmt>spinn.plain_rnn<import_stmt>spinn.cbow<line_sep>FLAGS=gflags.FLAGS<def_stmt>build_sentence_model cls vocab_size seq_length tokens transitions num_classes training_mode ground_truth_transitions_visible vs initial_embeddings=<none> project_embeddings=<false> ss_mask_gen=<none> ss_prob=0.0<block_start>""" Construct a classifier which makes use of some hard-stack model. Args: cls: Hard stack class to use (from e.g. `spinn.fat_stack`) vocab_size: seq_length: Length of each sequence provided to the stack model tokens: Theano batch (integer matrix), `batch_size * seq_length` transitions: Theano batch (integer matrix), `batch_size * seq_length` num_classes: Number of output classes training_mode: A Theano scalar indicating whether to act as a training model with dropout (1.0) or to act as an eval model with rescaling (0.0). ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access to ground truth transitions. This can be disabled at evaluation time to force Model 1 (or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0. vs: Variable store. """<line_sep># Prepare layer which performs stack element composition. <if_stmt>cls<is>spinn.plain_rnn.RNN<block_start><if_stmt>FLAGS.use_gru<block_start>compose_network=partial(util.GRULayer initializer=util.HeKaimingInitializer())<block_end><else_stmt><block_start>compose_network=partial(util.LSTMLayer initializer=util.HeKaimingInitializer())<block_end>embedding_projection_network=<none><block_end><elif_stmt>cls<is>spinn.cbow.CBOW<block_start>compose_network=<none><line_sep>embedding_projection_network=<none><block_end><else_stmt><block_start><if_stmt>FLAGS.lstm_composition<block_start><if_stmt>FLAGS.use_gru<block_start>compose_network=partial(util.TreeGRULayer initializer=util.HeKaimingInitializer())<block_end><else_stmt><block_start>compose_network=partial(util.TreeLSTMLayer initializer=util.HeKaimingInitializer())<block_end><block_end><else_stmt><block_start><assert_stmt><not>FLAGS.connect_tracking_comp "Can only connect tracking and composition unit while using TreeLSTM"<line_sep>compose_network=partial(util.ReLULayer initializer=util.HeKaimingInitializer())<block_end><if_stmt>project_embeddings<block_start>embedding_projection_network=util.Linear<block_end><else_stmt><block_start><assert_stmt>FLAGS.word_embedding_dim<eq>FLAGS.model_dim "word_embedding_dim must equal model_dim unless a projection layer is used."<line_sep>embedding_projection_network=util.IdentityLayer<block_end><block_end># Build hard stack which scans over input sequence. sentence_model=cls(FLAGS.model_dim FLAGS.word_embedding_dim vocab_size seq_length compose_network embedding_projection_network training_mode ground_truth_transitions_visible vs predict_use_cell=FLAGS.predict_use_cell use_tracking_lstm=FLAGS.use_tracking_lstm tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim X=tokens transitions=transitions initial_embeddings=initial_embeddings embedding_dropout_keep_rate=FLAGS.embedding_keep_rate ss_mask_gen=ss_mask_gen ss_prob=ss_prob connect_tracking_comp=FLAGS.connect_tracking_comp context_sensitive_shift=FLAGS.context_sensitive_shift context_sensitive_use_relu=FLAGS.context_sensitive_use_relu use_input_batch_norm=<false>)<line_sep># Extract top element of final stack timestep. <if_stmt>FLAGS.lstm_composition<or>cls<is>spinn.plain_rnn.RNN<block_start>sentence_vector=sentence_model.final_representations[: :FLAGS.model_dim/2].reshape((-1 FLAGS.model_dim/2))<line_sep>sentence_vector_dim=FLAGS.model_dim/2<block_end><else_stmt><block_start>sentence_vector=sentence_model.final_representations.reshape((-1 FLAGS.model_dim))<line_sep>sentence_vector_dim=FLAGS.model_dim<block_end>sentence_vector=util.BatchNorm(sentence_vector sentence_vector_dim vs "sentence_vector" training_mode)<line_sep>sentence_vector=util.Dropout(sentence_vector FLAGS.semantic_classifier_keep_rate training_mode)<line_sep># Feed forward through a single output layer logits=util.Linear(sentence_vector sentence_vector_dim num_classes vs name="semantic_classifier" use_bias=<true>)<line_sep><return>sentence_model.transitions_pred logits<block_end><def_stmt>build_sentence_pair_model cls vocab_size seq_length tokens transitions num_classes training_mode ground_truth_transitions_visible vs initial_embeddings=<none> project_embeddings=<false> ss_mask_gen=<none> ss_prob=0.0<block_start>""" Construct a classifier which makes use of some hard-stack model. Args: cls: Hard stack class to use (from e.g. `spinn.fat_stack`) vocab_size: seq_length: Length of each sequence provided to the stack model tokens: Theano batch (integer matrix), `batch_size * seq_length` transitions: Theano batch (integer matrix), `batch_size * seq_length` num_classes: Number of output classes training_mode: A Theano scalar indicating whether to act as a training model with dropout (1.0) or to act as an eval model with rescaling (0.0). ground_truth_transitions_visible: A Theano scalar. If set (1.0), allow the model access to ground truth transitions. This can be disabled at evaluation time to force Model 1 (or 2S) to evaluate in the Model 2 style with predicted transitions. Has no effect on Model 0. vs: Variable store. """<line_sep># Prepare layer which performs stack element composition. <if_stmt>cls<is>spinn.plain_rnn.RNN<block_start><if_stmt>FLAGS.use_gru<block_start>compose_network=partial(util.GRULayer initializer=util.HeKaimingInitializer())<block_end><else_stmt><block_start>compose_network=partial(util.LSTMLayer initializer=util.HeKaimingInitializer())<block_end>embedding_projection_network=<none><block_end><elif_stmt>cls<is>spinn.cbow.CBOW<block_start>compose_network=<none><line_sep>embedding_projection_network=<none><block_end><else_stmt><block_start><if_stmt>FLAGS.lstm_composition<block_start><if_stmt>FLAGS.use_gru<block_start>compose_network=partial(util.TreeGRULayer initializer=util.HeKaimingInitializer())<block_end><else_stmt><block_start>compose_network=partial(util.TreeLSTMLayer initializer=util.HeKaimingInitializer())<block_end><block_end><else_stmt><block_start><assert_stmt><not>FLAGS.connect_tracking_comp "Can only connect tracking and composition unit while using TreeLSTM"<line_sep>compose_network=partial(util.ReLULayer initializer=util.HeKaimingInitializer())<block_end><if_stmt>project_embeddings<block_start>embedding_projection_network=util.Linear<block_end><else_stmt><block_start><assert_stmt>FLAGS.word_embedding_dim<eq>FLAGS.model_dim "word_embedding_dim must equal model_dim unless a projection layer is used."<line_sep>embedding_projection_network=util.IdentityLayer<block_end><block_end># Split the two sentences premise_tokens=tokens[: : 0]<line_sep>hypothesis_tokens=tokens[: : 1]<line_sep>premise_transitions=transitions[: : 0]<line_sep>hypothesis_transitions=transitions[: : 1]<line_sep># Build two hard stack models which scan over input sequences. premise_model=cls(FLAGS.model_dim FLAGS.word_embedding_dim vocab_size seq_length compose_network embedding_projection_network training_mode ground_truth_transitions_visible vs predict_use_cell=FLAGS.predict_use_cell use_tracking_lstm=FLAGS.use_tracking_lstm tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim X=premise_tokens transitions=premise_transitions initial_embeddings=initial_embeddings embedding_dropout_keep_rate=FLAGS.embedding_keep_rate ss_mask_gen=ss_mask_gen ss_prob=ss_prob connect_tracking_comp=FLAGS.connect_tracking_comp context_sensitive_shift=FLAGS.context_sensitive_shift context_sensitive_use_relu=FLAGS.context_sensitive_use_relu use_attention=FLAGS.use_attention initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state)<line_sep>premise_stack_tops=premise_model.stack_tops<if>FLAGS.use_attention<ne>"None"<else><none><line_sep>premise_tracking_c_state_final=premise_model.tracking_c_state_final<if>cls<not><in>[spinn.plain_rnn.RNN spinn.cbow.CBOW]<else><none><line_sep>hypothesis_model=cls(FLAGS.model_dim FLAGS.word_embedding_dim vocab_size seq_length compose_network embedding_projection_network training_mode ground_truth_transitions_visible vs predict_use_cell=FLAGS.predict_use_cell use_tracking_lstm=FLAGS.use_tracking_lstm tracking_lstm_hidden_dim=FLAGS.tracking_lstm_hidden_dim X=hypothesis_tokens transitions=hypothesis_transitions initial_embeddings=initial_embeddings embedding_dropout_keep_rate=FLAGS.embedding_keep_rate ss_mask_gen=ss_mask_gen ss_prob=ss_prob connect_tracking_comp=FLAGS.connect_tracking_comp context_sensitive_shift=FLAGS.context_sensitive_shift context_sensitive_use_relu=FLAGS.context_sensitive_use_relu use_attention=FLAGS.use_attention premise_stack_tops=premise_stack_tops is_hypothesis=<true> initialize_hyp_tracking_state=FLAGS.initialize_hyp_tracking_state premise_tracking_c_state_final=premise_tracking_c_state_final)<line_sep># Extract top element of final stack timestep. <if_stmt>FLAGS.use_attention<eq>"None"<or>FLAGS.use_difference_feature<or>FLAGS.use_product_feature<block_start>premise_vector=premise_model.final_representations<line_sep>hypothesis_vector=hypothesis_model.final_representations<if_stmt>(FLAGS.lstm_composition<and>cls<is><not>spinn.cbow.CBOW)<or>cls<is>spinn.plain_rnn.RNN<block_start>premise_vector=premise_vector[: :FLAGS.model_dim/2].reshape((-1 FLAGS.model_dim/2))<line_sep>hypothesis_vector=hypothesis_vector[: :FLAGS.model_dim/2].reshape((-1 FLAGS.model_dim/2))<line_sep>sentence_vector_dim=FLAGS.model_dim/2<block_end><else_stmt><block_start>premise_vector=premise_vector.reshape((-1 FLAGS.model_dim))<line_sep>hypothesis_vector=hypothesis_vector.reshape((-1 FLAGS.model_dim))<line_sep>sentence_vector_dim=FLAGS.model_dim<block_end><block_end><if_stmt>FLAGS.use_attention<ne>"None"# Use the attention weighted representation <block_start>h_dim=FLAGS.model_dim/2<line_sep>mlp_input=hypothesis_model.final_weighed_representation.reshape((-1 h_dim))<line_sep>mlp_input_dim=h_dim<block_end><else_stmt># Create standard MLP features <block_start>mlp_input=T.concatenate([premise_vector hypothesis_vector] axis=1)<line_sep>mlp_input_dim=2<times>sentence_vector_dim<block_end><if_stmt>FLAGS.use_difference_feature<block_start>mlp_input=T.concatenate([mlp_input premise_vector-hypothesis_vector] axis=1)<line_sep>mlp_input_dim<augadd>sentence_vector_dim<block_end><if_stmt>FLAGS.use_product_feature<block_start>mlp_input=T.concatenate([mlp_input premise_vector<times>hypothesis_vector] axis=1)<line_sep>mlp_input_dim<augadd>sentence_vector_dim<block_end>mlp_input=util.BatchNorm(mlp_input mlp_input_dim vs "sentence_vectors" training_mode)<line_sep>mlp_input=util.Dropout(mlp_input FLAGS.semantic_classifier_keep_rate training_mode)<if_stmt>FLAGS.classifier_type<eq>"ResNet"<block_start>features=util.Linear(mlp_input mlp_input_dim FLAGS.sentence_pair_combination_layer_dim vs name="resnet/linear" use_bias=<true>)<line_sep>features_dim=FLAGS.sentence_pair_combination_layer_dim<for_stmt>layer range(FLAGS.num_sentence_pair_combination_layers)<block_start>features=util.HeKaimingResidualLayerSet(features features_dim vs training_mode name="resnet/"+str(layer) dropout_keep_rate=FLAGS.semantic_classifier_keep_rate depth=FLAGS.resnet_unit_depth initializer=util.HeKaimingInitializer())<line_sep>features=util.BatchNorm(features features_dim vs "combining_mlp/"+str(layer) training_mode)<line_sep>features=util.Dropout(features FLAGS.semantic_classifier_keep_rate training_mode)<block_end><block_end><elif_stmt>FLAGS.classifier_type<eq>"Highway"<block_start>features=util.Linear(mlp_input mlp_input_dim FLAGS.sentence_pair_combination_layer_dim vs name="resnet/linear" use_bias=<true>)<line_sep>features_dim=FLAGS.sentence_pair_combination_layer_dim<for_stmt>layer range(FLAGS.num_sentence_pair_combination_layers)<block_start>features=util.HighwayLayer(features features_dim vs training_mode name="highway/"+str(layer) dropout_keep_rate=FLAGS.semantic_classifier_keep_rate initializer=util.HeKaimingInitializer())<line_sep>features=util.BatchNorm(features features_dim vs "combining_mlp/"+str(layer) training_mode)<line_sep>features=util.Dropout(features FLAGS.semantic_classifier_keep_rate training_mode)<block_end><block_end><else_stmt># Apply a combining MLP <block_start>features=mlp_input<line_sep>features_dim=mlp_input_dim<for_stmt>layer range(FLAGS.num_sentence_pair_combination_layers)<block_start>features=util.ReLULayer(features features_dim FLAGS.sentence_pair_combination_layer_dim vs name="combining_mlp/"+str(layer) initializer=util.HeKaimingInitializer())<line_sep>features_dim=FLAGS.sentence_pair_combination_layer_dim<line_sep>features=util.BatchNorm(features features_dim vs "combining_mlp/"+str(layer) training_mode)<line_sep>features=util.Dropout(features FLAGS.semantic_classifier_keep_rate training_mode)<block_end><block_end># Feed forward through a single output layer logits=util.Linear(features features_dim num_classes vs name="semantic_classifier" use_bias=<true>)<line_sep><return>premise_model.transitions_pred hypothesis_model.transitions_pred logits<block_end><def_stmt>build_cost logits targets<block_start>""" Build a classification cost function. """<line_sep># Clip gradients coming from the cost function. logits=theano.gradient.grad_clip(logits -1.<times>FLAGS.clipping_max_value FLAGS.clipping_max_value)<line_sep>predicted_dist=T.nnet.softmax(logits)<line_sep>costs=T.nnet.categorical_crossentropy(predicted_dist targets)<line_sep>cost=costs.mean()<line_sep>pred=T.argmax(logits axis=1)<line_sep>acc=1.-T.mean(T.cast(T.neq(pred targets) theano.config.floatX))<line_sep><return>cost acc<block_end><def_stmt>build_transition_cost logits targets num_transitions<block_start>""" Build a parse action prediction cost function. """<line_sep># swap seq_length dimension to front so that we can scan per timestep logits=T.swapaxes(logits 0 1)<line_sep>targets=targets.T<def_stmt>cost_t logits tgt num_transitions# TODO(jongauthier): Taper down xent cost as we proceed through # sequence? <block_start>predicted_dist=T.nnet.softmax(logits)<line_sep>cost=T.nnet.categorical_crossentropy(predicted_dist tgt)<line_sep>pred=T.argmax(logits axis=1)<line_sep>error=T.neq(pred tgt)<line_sep><return>cost error<block_end>results,_=theano.scan(cost_t [logits targets] non_sequences=[num_transitions])<line_sep>costs,errors=results<line_sep># Create a mask that selects only transitions that involve real data. unrolling_length=T.shape(costs)[0]<line_sep>padding=unrolling_length-num_transitions<line_sep>padding=T.reshape(padding (1 -1))<line_sep>rng=T.arange(unrolling_length)+1<line_sep>rng=T.reshape(rng (-1 1))<line_sep>mask=T.gt(rng padding)<line_sep># Compute acc using the mask acc=1.0-(T.sum(errors<times>mask dtype=theano.config.floatX)/T.sum(num_transitions dtype=theano.config.floatX))<line_sep># Compute cost directly, since we *do* want a cost incentive to get the padding # transitions right. cost=T.mean(costs)<line_sep><return>cost acc<block_end><def_stmt>evaluate eval_fn eval_set logger step# Evaluate <block_start>acc_accum=0.0<line_sep>action_acc_accum=0.0<line_sep>eval_batches=0.0<for_stmt>(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch) eval_set[1]<block_start>acc_value,action_acc_value=eval_fn(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch 0.0 # Eval mode: Don't apply dropout. int(FLAGS.allow_gt_transitions_in_eval) # Allow GT transitions to be used according to flag. float(FLAGS.allow_gt_transitions_in_eval))<line_sep># If flag not set, used scheduled sampling # p(ground truth) = 0.0, # else SS p(ground truth) = 1.0 acc_accum<augadd>acc_value<line_sep>action_acc_accum<augadd>action_acc_value<line_sep>eval_batches<augadd>1.0<block_end>logger.Log("Step: %i\tEval acc: %f\t %f\t%s"%(step acc_accum/eval_batches action_acc_accum/eval_batches eval_set[0]))<line_sep><return>acc_accum/eval_batches<block_end><def_stmt>evaluate_expanded eval_fn eval_set eval_path logger step sentence_pair_data ind_to_word predict_transitions<block_start>""" Write the gold parses and predicted parses in the files <eval_out_path>.gld and <eval_out_path>.tst respectively. These files can be given as inputs to Evalb to evaluate parsing performance - evalb -p evalb_spinn.prm <eval_out_path>.gld <eval_out_path>.tst TODO(SB): Set up for RNN and Model0 on non-sentence-pair data; port support to classifier.py. """<line_sep># TODO: Prune out redundant code, make usable on Model0 as well. acc_accum=0.0<line_sep>action_acc_accum=0.0<line_sep>eval_batches=0.0<line_sep>eval_gold_path=eval_path+".gld"<line_sep>eval_out_path=eval_path+".tst"<line_sep>eval_lbl_path=eval_path+".lbl"<with_stmt>open(eval_gold_path "w")<as>eval_gold open(eval_out_path "w")<as>eval_out<block_start><if_stmt>FLAGS.write_predicted_label<block_start>label_out=open(eval_lbl_path "w")<block_end><if_stmt>sentence_pair_data<block_start><for_stmt>(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch) eval_set[1]<block_start>acc_value,action_acc_value,sem_logit_values,logits_pred_hyp,logits_pred_prem=eval_fn(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch 0.0 # Eval mode: Don't apply dropout. int(FLAGS.allow_gt_transitions_in_eval) # Allow GT transitions to be used according to flag. float(FLAGS.allow_gt_transitions_in_eval))<line_sep># adjust visibility of GT acc_accum<augadd>acc_value<line_sep>action_acc_accum<augadd>action_acc_value<line_sep>eval_batches<augadd>1.0<line_sep># write each predicted transition to file <for_stmt>orig_transitions,pred_logit_hyp,pred_logit_prem,tokens,true_class,example_sem_logits zip(eval_transitions_batch logits_pred_hyp logits_pred_prem eval_X_batch eval_y_batch sem_logit_values)<block_start><if_stmt>predict_transitions<block_start>orig_hyp_transitions,orig_prem_transitions=orig_transitions.T<line_sep>pred_hyp_transitions=pred_logit_hyp.argmax(axis=1)<line_sep>pred_prem_transitions=pred_logit_prem.argmax(axis=1)<block_end><else_stmt><block_start>orig_hyp_transitions=orig_prem_transitions=pred_hyp_transitions=pred_prem_transitions=<none><block_end>hyp_tokens,prem_tokens=tokens.T<line_sep>hyp_words=[ind_to_word[t]<for>t hyp_tokens]<line_sep>prem_words=[ind_to_word[t]<for>t prem_tokens]<line_sep>eval_gold.write(util.TransitionsToParse(orig_hyp_transitions hyp_words)+"\n")<line_sep>eval_out.write(util.TransitionsToParse(pred_hyp_transitions hyp_words)+"\n")<line_sep>eval_gold.write(util.TransitionsToParse(orig_prem_transitions prem_words)+"\n")<line_sep>eval_out.write(util.TransitionsToParse(pred_prem_transitions prem_words)+"\n")<line_sep>predicted_class=np.argmax(example_sem_logits)<line_sep>exp_logit_values=np.exp(example_sem_logits)<line_sep>class_probs=exp_logit_values/np.sum(exp_logit_values)<line_sep>class_probs_repr="\t".join(map(<lambda>p:"%.8f"%(p ) class_probs))<if_stmt>FLAGS.write_predicted_label<block_start>label_out.write(str(true_class<eq>predicted_class)+"\t"+str(true_class)+"\t"+str(predicted_class)+"\t"+class_probs_repr+"\n")<block_end><block_end><block_end><block_end><else_stmt><block_start><for_stmt>(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch) eval_set[1]<block_start>acc_value,action_acc_value,sem_logit_values,logits_pred=eval_fn(eval_X_batch eval_transitions_batch eval_y_batch eval_num_transitions_batch 0.0 # Eval mode: Don't apply dropout. int(FLAGS.allow_gt_transitions_in_eval) # Allow GT transitions to be used according to flag. float(FLAGS.allow_gt_transitions_in_eval))<line_sep># adjust visibility of GT acc_accum<augadd>acc_value<line_sep>action_acc_accum<augadd>action_acc_value<line_sep>eval_batches<augadd>1.0<line_sep># write each predicted transition to file <for_stmt>orig_transitions,pred_logit,tokens,true_class,example_sem_logits zip(eval_transitions_batch logits_pred eval_X_batch eval_y_batch sem_logit_values)<block_start>words=[ind_to_word[t]<for>t tokens]<line_sep>eval_gold.write(util.TransitionsToParse(orig_transitions words)+"\n")<line_sep>eval_out.write(util.TransitionsToParse(pred_logit.argmax(axis=1) words)+"\n")<line_sep>predicted_class=np.argmax(example_sem_logits)<line_sep>exp_logit_values=np.exp(example_sem_logits)<line_sep>class_probs=exp_logit_values/np.sum(exp_logit_values)<line_sep>class_probs_repr="\t".join(map(<lambda>p:"%.3f"%(p ) class_probs))<if_stmt>FLAGS.write_predicted_label<block_start>label_out.write(str(true_class<eq>predicted_class)+"\t"+str(true_class)+"\t"+str(predicted_class)+"\t"+class_probs_repr+"\n")<block_end><block_end><block_end><block_end><block_end>logger.Log("Written gold parses in %s"%(eval_gold_path))<line_sep>logger.Log("Written predicted parses in %s"%(eval_out_path))<if_stmt>FLAGS.write_predicted_label<block_start>logger.Log("Written predicted labels in %s"%(eval_lbl_path))<line_sep>label_out.close()<block_end>logger.Log("Step: %i\tEval acc: %f\t %f\t%s"%(step acc_accum/eval_batches action_acc_accum/eval_batches eval_set[0]))<block_end><def_stmt>run only_forward=<false><block_start>logger=afs_safe_logger.Logger(os.path.join(FLAGS.log_path FLAGS.experiment_name)+".log")<if_stmt>FLAGS.data_type<eq>"bl"<block_start>data_manager=load_boolean_data<block_end><elif_stmt>FLAGS.data_type<eq>"sst"<block_start>data_manager=load_sst_data<block_end><elif_stmt>FLAGS.data_type<eq>"snli"<block_start>data_manager=load_snli_data<block_end><else_stmt><block_start>logger.Log("Bad data type.")<line_sep><return><block_end>pp=pprint.PrettyPrinter(indent=4)<line_sep>logger.Log("Flag values:\n"+pp.pformat(FLAGS.FlagValuesDict()))<line_sep># Load the data. raw_training_data,vocabulary=data_manager.load_data(FLAGS.training_data_path)<line_sep># Load the eval data. raw_eval_sets=[]<if_stmt>FLAGS.eval_data_path<block_start><for_stmt>eval_filename FLAGS.eval_data_path.split(":")<block_start>eval_data,_=data_manager.load_data(eval_filename)<line_sep>raw_eval_sets.append((eval_filename eval_data))<block_end><block_end># Prepare the vocabulary. <if_stmt><not>vocabulary<block_start>logger.Log("In open vocabulary mode. Using loaded embeddings without fine-tuning.")<line_sep>train_embeddings=<false><line_sep>vocabulary=util.BuildVocabulary(raw_training_data raw_eval_sets FLAGS.embedding_data_path logger=logger sentence_pair_data=data_manager.SENTENCE_PAIR_DATA)<block_end><else_stmt><block_start>logger.Log("In fixed vocabulary mode. Training embeddings.")<line_sep>train_embeddings=<true><block_end># Load pretrained embeddings. <if_stmt>FLAGS.embedding_data_path<block_start>logger.Log("Loading vocabulary with "+str(len(vocabulary))+" words from "+FLAGS.embedding_data_path)<line_sep>initial_embeddings=util.LoadEmbeddingsFromASCII(vocabulary FLAGS.word_embedding_dim FLAGS.embedding_data_path)<block_end><else_stmt><block_start>initial_embeddings=<none><block_end># Trim dataset, convert token sequences to integer sequences, crop, and # pad. logger.Log("Preprocessing training data.")<line_sep>training_data=util.PreprocessDataset(raw_training_data vocabulary FLAGS.seq_length data_manager eval_mode=<false> logger=logger sentence_pair_data=data_manager.SENTENCE_PAIR_DATA for_rnn=FLAGS.model_type<eq>"RNN"<or>FLAGS.model_type<eq>"CBOW")<line_sep>training_data_iter=util.MakeTrainingIterator(training_data FLAGS.batch_size)<line_sep>eval_iterators=[]<for_stmt>filename,raw_eval_set raw_eval_sets<block_start>logger.Log("Preprocessing eval data: "+filename)<line_sep>e_X,e_transitions,e_y,e_num_transitions=util.PreprocessDataset(raw_eval_set vocabulary FLAGS.seq_length data_manager eval_mode=<true> logger=logger sentence_pair_data=data_manager.SENTENCE_PAIR_DATA for_rnn=FLAGS.model_type<eq>"RNN"<or>FLAGS.model_type<eq>"CBOW")<line_sep>eval_iterators.append((filename util.MakeEvalIterator((e_X e_transitions e_y e_num_transitions) FLAGS.batch_size)))<block_end># Set up the placeholders. y=T.vector("y" dtype="int32")<line_sep>lr=T.scalar("lr")<line_sep>training_mode=T.scalar("training_mode")# 1: Training with dropout, 0: Eval ground_truth_transitions_visible=T.scalar("ground_truth_transitions_visible" dtype="int32")<line_sep>logger.Log("Building model.")<line_sep>vs=util.VariableStore(default_initializer=util.UniformInitializer(FLAGS.init_range) logger=logger)<if_stmt>FLAGS.model_type<eq>"CBOW"<block_start>model_cls=spinn.cbow.CBOW<block_end><elif_stmt>FLAGS.model_type<eq>"RNN"<block_start>model_cls=spinn.plain_rnn.RNN<block_end><else_stmt><block_start>model_cls=getattr(spinn.fat_stack FLAGS.model_type)<block_end># Generator of mask for scheduled sampling numpy_random=np.random.RandomState(1234)<line_sep>ss_mask_gen=T.shared_randomstreams.RandomStreams(numpy_random.randint(999999))<line_sep># Training step number ss_prob=T.scalar("ss_prob")<if_stmt>data_manager.SENTENCE_PAIR_DATA<block_start>X=T.itensor3("X")<line_sep>transitions=T.itensor3("transitions")<line_sep>num_transitions=T.imatrix("num_transitions")<line_sep>predicted_premise_transitions,predicted_hypothesis_transitions,logits=build_sentence_pair_model(model_cls len(vocabulary) FLAGS.seq_length X transitions len(data_manager.LABEL_MAP) training_mode ground_truth_transitions_visible vs initial_embeddings=initial_embeddings project_embeddings=(<not>train_embeddings) ss_mask_gen=ss_mask_gen ss_prob=ss_prob)<block_end><else_stmt><block_start>X=T.matrix("X" dtype="int32")<line_sep>transitions=T.imatrix("transitions")<line_sep>num_transitions=T.vector("num_transitions" dtype="int32")<line_sep>predicted_transitions,logits=build_sentence_model(model_cls len(vocabulary) FLAGS.seq_length X transitions len(data_manager.LABEL_MAP) training_mode ground_truth_transitions_visible vs initial_embeddings=initial_embeddings project_embeddings=(<not>train_embeddings) ss_mask_gen=ss_mask_gen ss_prob=ss_prob)<block_end>xent_cost,acc=build_cost(logits y)<line_sep># Set up L2 regularization. l2_cost=0.0<for_stmt>var vs.trainable_vars<block_start>l2_cost<augadd>FLAGS.l2_lambda<times>T.sum(T.sqr(vs.vars[var]))<block_end># Compute cross-entropy cost on action predictions. <if_stmt>(<not>data_manager.SENTENCE_PAIR_DATA)<and>FLAGS.model_type<not><in>["Model0" "RNN" "CBOW"]<block_start>transition_cost,action_acc=build_transition_cost(predicted_transitions transitions num_transitions)<block_end><elif_stmt>data_manager.SENTENCE_PAIR_DATA<and>FLAGS.model_type<not><in>["Model0" "RNN" "CBOW"]<block_start>p_transition_cost,p_action_acc=build_transition_cost(predicted_premise_transitions transitions[: : 0] num_transitions[: 0])<line_sep>h_transition_cost,h_action_acc=build_transition_cost(predicted_hypothesis_transitions transitions[: : 1] num_transitions[: 1])<line_sep>transition_cost=p_transition_cost+h_transition_cost<line_sep>action_acc=(p_action_acc+h_action_acc)/2.0# TODO(SB): Average over transitions, not words. <block_end><else_stmt><block_start>transition_cost=T.constant(0.0)<line_sep>action_acc=T.constant(0.0)<block_end>transition_cost=transition_cost<times>FLAGS.transition_cost_scale<line_sep>total_cost=xent_cost+l2_cost+transition_cost<if_stmt>".ckpt"<in>FLAGS.ckpt_path<block_start>checkpoint_path=FLAGS.ckpt_path<block_end><else_stmt><block_start>checkpoint_path=os.path.join(FLAGS.ckpt_path FLAGS.experiment_name+".ckpt")<block_end><if_stmt>os.path.isfile(checkpoint_path)<block_start>logger.Log("Found checkpoint, restoring.")<line_sep>step,best_dev_error=vs.load_checkpoint(checkpoint_path num_extra_vars=2 skip_saved_unsavables=FLAGS.skip_saved_unsavables)<block_end><else_stmt><block_start><assert_stmt><not>only_forward "Can't run an eval-only run without a checkpoint. Supply a checkpoint."<line_sep>step=0<line_sep>best_dev_error=1.0<block_end># Do an evaluation-only run. <if_stmt>only_forward<block_start><if_stmt>FLAGS.eval_output_paths<block_start>eval_output_paths=FLAGS.eval_output_paths.strip().split(":")<assert_stmt>len(eval_output_paths)<eq>len(eval_iterators) "Invalid no. of output paths."<block_end><else_stmt><block_start>eval_output_paths=[FLAGS.experiment_name+"-"+os.path.split(eval_set[0])[1]+"-parse"<for>eval_set eval_iterators]<block_end># Load model from checkpoint. logger.Log("Checkpointed model was trained for %d steps."%(step ))<line_sep># Generate function for forward pass. logger.Log("Building forward pass.")<if_stmt>data_manager.SENTENCE_PAIR_DATA<block_start>eval_fn=theano.function([X transitions y num_transitions training_mode ground_truth_transitions_visible ss_prob] [acc action_acc logits predicted_hypothesis_transitions predicted_premise_transitions] on_unused_input='ignore' allow_input_downcast=<true>)<block_end><else_stmt><block_start>eval_fn=theano.function([X transitions y num_transitions training_mode ground_truth_transitions_visible ss_prob] [acc action_acc logits predicted_transitions] on_unused_input='ignore' allow_input_downcast=<true>)<block_end># Generate the inverse vocabulary lookup table. ind_to_word={v:k<for>k,v vocabulary.iteritems()}<line_sep># Do a forward pass and write the output to disk. <for_stmt>eval_set,eval_out_path zip(eval_iterators eval_output_paths)<block_start>logger.Log("Writing eval output for %s."%(eval_set[0] ))<line_sep>evaluate_expanded(eval_fn eval_set eval_out_path logger step data_manager.SENTENCE_PAIR_DATA ind_to_word FLAGS.model_type<not><in>["Model0" "RNN" "CBOW"])<block_end><block_end><else_stmt># Train <block_start>new_values=util.RMSprop(total_cost vs.trainable_vars.values() lr)<line_sep>new_values<augadd>[(key vs.nongradient_updates[key])<for>key vs.nongradient_updates]<line_sep># Training open-vocabulary embeddings is a questionable idea right now. Disabled: # new_values.append( # util.embedding_SGD(total_cost, embedding_params, embedding_lr)) # Create training and eval functions. # Unused variable warnings are supressed so that num_transitions can be passed in when training Model 0, # which ignores it. This yields more readable code that is very slightly slower. logger.Log("Building update function.")<line_sep>update_fn=theano.function([X transitions y num_transitions lr training_mode ground_truth_transitions_visible ss_prob] [total_cost xent_cost transition_cost action_acc l2_cost acc] updates=new_values on_unused_input='ignore' allow_input_downcast=<true>)<line_sep>logger.Log("Building eval function.")<line_sep>eval_fn=theano.function([X transitions y num_transitions training_mode ground_truth_transitions_visible ss_prob] [acc action_acc] on_unused_input='ignore' allow_input_downcast=<true>)<line_sep>logger.Log("Training.")<line_sep># Main training loop. <for_stmt>step range(step FLAGS.training_steps)<block_start><if_stmt>step%FLAGS.eval_interval_steps<eq>0<block_start><for_stmt>index,eval_set enumerate(eval_iterators)<block_start>acc=evaluate(eval_fn eval_set logger step)<if_stmt>FLAGS.ckpt_on_best_dev_error<and>index<eq>0<and>(1-acc)<l>0.99<times>best_dev_error<and>step<g>1000<block_start>best_dev_error=1-acc<line_sep>logger.Log("Checkpointing with new best dev accuracy of %f"%acc)<line_sep>vs.save_checkpoint(checkpoint_path+"_best" extra_vars=[step best_dev_error])<block_end><block_end><block_end>X_batch,transitions_batch,y_batch,num_transitions_batch=training_data_iter.next()<line_sep>learning_rate=FLAGS.learning_rate<times>(FLAGS.learning_rate_decay_per_10k_steps<power>(step/10000.0))<line_sep>ret=update_fn(X_batch transitions_batch y_batch num_transitions_batch learning_rate 1.0 1.0 np.exp(step<times>np.log(FLAGS.scheduled_sampling_exponent_base)))<line_sep>total_cost_val,xent_cost_val,transition_cost_val,action_acc_val,l2_cost_val,acc_val=ret<if_stmt>step%FLAGS.statistics_interval_steps<eq>0<block_start>logger.Log("Step: %i\tAcc: %f\t%f\tCost: %5f %5f %5f %5f"%(step acc_val action_acc_val total_cost_val xent_cost_val transition_cost_val l2_cost_val))<block_end><if_stmt>step%FLAGS.ckpt_interval_steps<eq>0<and>step<g>0<block_start>vs.save_checkpoint(checkpoint_path extra_vars=[step best_dev_error])<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'# Experiment naming. <block_start>gflags.DEFINE_string("experiment_name" "experiment" "")<line_sep># Data types. gflags.DEFINE_enum("data_type" "bl" ["bl" "sst" "snli"] "Which data handler and classifier to use.")<line_sep># Where to store checkpoints gflags.DEFINE_string("ckpt_path" "." "Where to save/load checkpoints. Can be either "<concat>"a filename or a directory. In the latter case, the experiment name serves as the "<concat>"base for the filename.")<line_sep>gflags.DEFINE_string("log_path" "." "A directory in which to write logs.")<line_sep># Data settings. gflags.DEFINE_string("training_data_path" <none> "")<line_sep>gflags.DEFINE_string("eval_data_path" <none> "Can contain multiple file paths, separated "<concat>"using ':' tokens. The first file should be the dev set, and is used for determining "<concat>"when to save the early stopping 'best' checkpoints.")<line_sep>gflags.DEFINE_integer("seq_length" 30 "")<line_sep>gflags.DEFINE_integer("eval_seq_length" 30 "")<line_sep>gflags.DEFINE_string("embedding_data_path" <none> "If set, load GloVe-formatted embeddings from here.")<line_sep># Model architecture settings. gflags.DEFINE_enum("model_type" "Model0" ["CBOW" "RNN" "Model0" "Model1" "Model2" "Model2S"] "")<line_sep>gflags.DEFINE_boolean("allow_gt_transitions_in_eval" <false> "Whether to use ground truth transitions in evaluation when appropriate "<concat>"(i.e., in Model 1 and Model 2S.)")<line_sep>gflags.DEFINE_integer("model_dim" 8 "")<line_sep>gflags.DEFINE_integer("word_embedding_dim" 8 "")<line_sep>gflags.DEFINE_integer("tracking_lstm_hidden_dim" 4 "")<line_sep>gflags.DEFINE_boolean("use_tracking_lstm" <true> "Whether to use LSTM in the tracking unit")<line_sep>gflags.DEFINE_boolean("predict_use_cell" <false> "For models which predict parser actions, use "<concat>"both the tracking LSTM hidden and cell values as "<concat>"input to the prediction layer")<line_sep>gflags.DEFINE_enum("use_attention" "None" ["None" "Rocktaschel" "WangJiang" "Thang" "TreeWangJiang" "TreeThang"] "")<line_sep>gflags.DEFINE_boolean("context_sensitive_shift" <false> "Use LSTM hidden state and word embedding to determine the vector to be pushed")<line_sep>gflags.DEFINE_boolean("context_sensitive_use_relu" <false> "Use ReLU Layer to combine embedding and tracking unit hidden state")<line_sep>gflags.DEFINE_float("semantic_classifier_keep_rate" 0.5 "Used for dropout in the semantic task classifier.")<line_sep>gflags.DEFINE_float("embedding_keep_rate" 0.5 "Used for dropout on transformed embeddings.")<line_sep>gflags.DEFINE_boolean("lstm_composition" <true> "")<line_sep>gflags.DEFINE_enum("classifier_type" "MLP" ["MLP" "Highway" "ResNet"] "")<line_sep>gflags.DEFINE_integer("resnet_unit_depth" 2 "")<line_sep># gflags.DEFINE_integer("num_composition_layers", 1, "") gflags.DEFINE_integer("num_sentence_pair_combination_layers" 2 "")<line_sep>gflags.DEFINE_integer("sentence_pair_combination_layer_dim" 1024 "")<line_sep>gflags.DEFINE_float("scheduled_sampling_exponent_base" 0.99 "Used for scheduled sampling, with probability of Model 1 over Model 2 being base^#training_steps")<line_sep>gflags.DEFINE_boolean("use_difference_feature" <true> "Supply the sentence pair classifier with sentence difference features.")<line_sep>gflags.DEFINE_boolean("use_product_feature" <true> "Supply the sentence pair classifier with sentence product features.")<line_sep>gflags.DEFINE_boolean("connect_tracking_comp" <true> "Connect tracking unit and composition unit. Can only be true if using LSTM in both units.")<line_sep>gflags.DEFINE_boolean("initialize_hyp_tracking_state" <false> "Initialize the c state of the tracking unit of hypothesis model with the final"<concat>"tracking unit c state of the premise model.")<line_sep>gflags.DEFINE_boolean("use_gru" <false> "Use GRU units instead of LSTM units.")<line_sep># Optimization settings. gflags.DEFINE_integer("training_steps" 500000 "Stop training after this point.")<line_sep>gflags.DEFINE_integer("batch_size" 32 "SGD minibatch size.")<line_sep>gflags.DEFINE_float("learning_rate" 0.001 "Used in RMSProp.")<line_sep>gflags.DEFINE_float("learning_rate_decay_per_10k_steps" 0.75 "Used in RMSProp.")<line_sep>gflags.DEFINE_float("clipping_max_value" 5.0 "")<line_sep>gflags.DEFINE_float("l2_lambda" 1e-5 "")<line_sep>gflags.DEFINE_float("init_range" 0.005 "Mainly used for softmax parameters. Range for uniform random init.")<line_sep>gflags.DEFINE_float("transition_cost_scale" 1.0 "Multiplied by the transition cost.")<line_sep># Display settings. gflags.DEFINE_integer("statistics_interval_steps" 100 "Print training set results at this interval.")<line_sep>gflags.DEFINE_integer("eval_interval_steps" 100 "Evaluate at this interval.")<line_sep>gflags.DEFINE_integer("ckpt_interval_steps" 5000 "Update the checkpoint on disk at this interval.")<line_sep>gflags.DEFINE_boolean("ckpt_on_best_dev_error" <true> "If error on the first eval set (the dev set) is "<concat>"at most 0.99 of error at the previous checkpoint, save a special 'best' checkpoint.")<line_sep># Evaluation settings gflags.DEFINE_boolean("expanded_eval_only_mode" <false> "If set, a checkpoint is loaded and a forward pass is done to get the predicted "<concat>"transitions. The inferred parses are written to the supplied file(s) along with example-"<concat>"by-example accuracy information. Requirements: Must specify checkpoint path.")<line_sep>gflags.DEFINE_string("eval_output_paths" <none> "Used when expanded_eval_only_mode is set. The number of supplied paths should be same"<concat>"as the number of eval sets.")<line_sep>gflags.DEFINE_boolean("write_predicted_label" <false> "Write the predicted labels in a <eval_output_name>.lbl file.")<line_sep>gflags.DEFINE_boolean("skip_saved_unsavables" <false> "Assume that variables marked as not savable will appear in checkpoints anyway, and "<concat>"skip them when loading. This should be used only when loading old checkpoints.")<line_sep># Parse command line flags. FLAGS(sys.argv)<line_sep>run(only_forward=FLAGS.expanded_eval_only_mode)<block_end>
<import_from_stmt>.book Book<line_sep>
<import_stmt>cv2<import_stmt>numpy<as>np<line_sep>thres=0.45<line_sep>nms_threshold=0.2<line_sep>#Default Camera Capture cap=cv2.VideoCapture(0)<line_sep>cap.set(3 1280)<line_sep>cap.set(4 720)<line_sep>cap.set(10 150)<line_sep>##Importing the COCO dataset in a list classNames=[]<line_sep>classFile='coco.names'<with_stmt>open(classFile 'rt')<as>f<block_start>classNames=f.read().rstrip('\n').split('\n')<block_end>##Configuring both SSD model and weights (assigning) configPath='ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'<line_sep>weightsPath='frozen_inference_graph.pb'<line_sep>##dnn-Inbuilt method of OpenCV net=cv2.dnn_DetectionModel(weightsPath configPath)<line_sep>net.setInputSize(320 320)<line_sep>net.setInputScale(1.0/127.5)<line_sep>net.setInputMean((127.5 127.5 127.5))<line_sep>net.setInputSwapRB(<true>)<line_sep>## using Detect method <while_stmt><true><block_start>success,img=cap.read()<line_sep>classIds,confs,bbox=net.detect(img confThreshold=thres)<line_sep>bbox=list(bbox)<line_sep>confs=list(np.array(confs).reshape(1 -1)[0])<line_sep>confs=list(map(float confs))<line_sep>indices=cv2.dnn.NMSBoxes(bbox confs thres nms_threshold)<for_stmt>i indices<block_start>i=i[0]<line_sep>box=bbox[i]<line_sep>x,y,w,h=box[0] box[1] box[2] box[3]<line_sep>cv2.rectangle(img (x y) (x+w h+y) color=(0 255 0) thickness=2)<line_sep>cv2.putText(img classNames[classIds[i][0]-1].upper() (box[0]+10 box[1]+30) cv2.FONT_HERSHEY_COMPLEX 1 (0 255 0) 2)<block_end>cv2.imshow("Output" img)<if_stmt>cv2.waitKey(1)&0xFF<eq>ord('q')<block_start><break><block_end><block_end>cap.release()<line_sep>cv2.destroyAllWindows()<line_sep>
"""Forms used in Ahmia."""<import_stmt>logging<import_from_stmt>django forms<import_from_stmt>django.conf settings<import_from_stmt>django.core.mail send_mail<import_from_stmt>django.utils.translation ugettext<as>_<import_from_stmt>.validators validate_onion_url validate_status<line_sep>logger=logging.getLogger("ahmia")<class_stmt>AddOnionForm(forms.Form)<block_start>"""Request to add an onion domain."""<line_sep>onion=forms.CharField(validators=[validate_onion_url validate_status] widget=forms.TextInput(attrs={'placeholder':_('Enter your .onion address here')}))<def_stmt>send_new_onion self<block_start>"""Send a new onion request by email."""<if_stmt>settings.DEBUG<block_start><return><block_end>subject="Hidden service add onion request"<line_sep>message="User requests to add the following onion url {0}".format(self.cleaned_data['onion'])<try_stmt><block_start>send_mail(subject message settings.DEFAULT_FROM_EMAIL settings.RECIPIENT_LIST fail_silently=<false>)<block_end><except_stmt>IOError<as>e<block_start>logger.exception(e)<block_end><block_end><block_end><class_stmt>ReportOnionForm(forms.Form)<block_start>"""Report an onion domain."""<line_sep>onion=forms.CharField(validators=[validate_onion_url validate_status] widget=forms.TextInput(attrs={'placeholder':_('Enter your .onion address here')}))<def_stmt>send_abuse_report self<block_start>"""Send an abuse report by email."""<if_stmt>settings.DEBUG<block_start><return><block_end>subject="Hidden service abuse notice"<line_sep>message="User sent abuse notice for onion url {0}".format(self.cleaned_data['onion'])<line_sep>send_mail(subject message settings.DEFAULT_FROM_EMAIL settings.RECIPIENT_LIST fail_silently=<false>)<block_end><block_end>
<import_from_stmt>rest_framework authentication exceptions<import_from_stmt>profiles.models Token<class_stmt>TokenAuthentication(authentication.TokenAuthentication)<block_start>""" A custom authentication scheme which enforces Token expiration times. """<line_sep>model=Token<line_sep>keyword='Bearer'<def_stmt>authenticate_credentials self key<block_start>model=self.get_model()<try_stmt><block_start>token=model.objects.prefetch_related('user').get(key=key)<block_end><except_stmt>model.DoesNotExist<block_start><raise>exceptions.AuthenticationFailed("Invalid token")<block_end># Enforce the Token's expiration time, if one has been set. <if_stmt>token.is_expired()<block_start><raise>exceptions.AuthenticationFailed("Token expired")<block_end><if_stmt><not>token.user.is_active<block_start><raise>exceptions.AuthenticationFailed("User inactive")<block_end><return>token.user token<block_end><block_end>
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility to import classes."""<import_stmt>importlib<def_stmt>ImportClass path_to_module<block_start>"""Import the module give the path to its file and the class. Args: path_to_module: A string specifying the location of the module. E.g. makani.analysis.my_checks.MyModule. Returns: The module object. """<line_sep>class_path=path_to_module.split('.')<line_sep>class_name=class_path[-1]<line_sep>module_path='.'.join(class_path[:-1])<line_sep>module=importlib.import_module(module_path)<try_stmt><block_start>cls=getattr(module class_name)<line_sep><return>cls<block_end><except_stmt>AttributeError e<block_start><raise>AttributeError(('Cannot import "%s" from "%s" because of '<concat>'AttributeError: %s.')%(class_name path_to_module e.message))<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>DQMServices.Core.DQMEDHarvester DQMEDHarvester<line_sep>noiseratesClient=DQMEDHarvester("NoiseRatesClient" # outputFile = cms.untracked.string('NoiseRatesHarvestingME.root'), outputFile=cms.untracked.string('') DQMDirName=cms.string("/")# root directory )<line_sep>
<import_stmt>hashlib<import_stmt>os<import_stmt>pickle<import_from_stmt>zoltpy.quantile_io json_io_dict_from_quantile_csv_file<import_from_stmt>zoltpy util<import_from_stmt>zoltpy.connection ZoltarConnection<import_from_stmt>zoltpy.covid19 COVID_TARGETS covid19_row_validator validate_quantile_csv_file<import_stmt>glob<import_stmt>json<import_stmt>sys<line_sep>UPDATE=<false><if_stmt>len(sys.argv)<g>1<block_start><if_stmt>sys.argv[1].lower()<eq>'update'<block_start>print('Only updating')<line_sep>UPDATE=<true><block_end><block_end># util function to get filename from the path <def_stmt>get_filename_from_path path<block_start>print(path path.split(os.path.sep)[-1])<line_sep><return>path.split(os.path.sep)[-1]<block_end>g_db=<none><def_stmt>get_db <block_start><global>g_db<if_stmt>g_db<is><none><block_start>g_db=json.load(open('code/zoltar_scripts/validated_file_db.json'))<block_end><return>g_db<block_end><def_stmt>dump_db <block_start><global>g_db<with_stmt>open('code/zoltar_scripts/validated_file_db.json' 'w')<as>fw<block_start>json.dump(g_db fw indent=4)<block_end><block_end>list_of_model_directories=os.listdir('./data-processed/')<for_stmt>directory list_of_model_directories<block_start><if_stmt>"."<in>directory<block_start><continue><block_end># Get all forecasts in the directory of this model path='./data-processed/'+directory+'/'<line_sep>forecasts=glob.glob(path+"*.csv")<for_stmt>forecast forecasts<block_start><with_stmt>open(forecast "rb")<as>f# Get the current hash of a processed file <block_start>checksum=hashlib.md5(f.read()).hexdigest()<block_end>db=get_db()<line_sep># Validate covid19 file <if_stmt>UPDATE<and>db.get(get_filename_from_path(forecast) <none>)<eq>checksum<block_start><continue><block_end>errors_from_validation=validate_quantile_csv_file(forecast)<line_sep># Upload forecast <if_stmt>"no errors"<eq>errors_from_validation# Check this hash against the previous version of hash <block_start><if_stmt>db.get(get_filename_from_path(forecast) <none>)<ne>checksum<block_start>db[get_filename_from_path(forecast)]=checksum<block_end><block_end><else_stmt><block_start>print(errors_from_validation)<block_end><block_end><block_end>print('Dumping db')<line_sep>dump_db()<line_sep>
# -*- coding: utf-8 -*- """ Test module imports =================== """<import_stmt>sys<def_stmt>test_module_imports <block_start><try_stmt><block_start><import_stmt>ahrs<block_end><except_stmt><block_start>sys.exit("[ERROR] Package AHRS not found. Go to root directory of package and type:\n\n\tpip install .\n")<block_end><try_stmt><block_start><import_stmt>numpy scipy matplotlib<block_end><except_stmt>ModuleNotFoundError<block_start>sys.exit("[ERROR] You don't have the required packages. Try reinstalling the package.")<block_end><block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkmarket.endpoint endpoint_data<class_stmt>DescribeCommoditiesRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'Market' '2015-11-01' 'DescribeCommodities' 'yunmarket')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_CommodityGmtModifiedTo self<block_start><return>self.get_query_params().get('CommodityGmtModifiedTo')<block_end><def_stmt>set_CommodityGmtModifiedTo self CommodityGmtModifiedTo<block_start>self.add_query_param('CommodityGmtModifiedTo' CommodityGmtModifiedTo)<block_end><def_stmt>get_CommodityGmtModifiedFrom self<block_start><return>self.get_query_params().get('CommodityGmtModifiedFrom')<block_end><def_stmt>set_CommodityGmtModifiedFrom self CommodityGmtModifiedFrom<block_start>self.add_query_param('CommodityGmtModifiedFrom' CommodityGmtModifiedFrom)<block_end><def_stmt>get_CommodityId self<block_start><return>self.get_query_params().get('CommodityId')<block_end><def_stmt>set_CommodityId self CommodityId<block_start>self.add_query_param('CommodityId' CommodityId)<block_end><def_stmt>get_CommodityGmtPublishFrom self<block_start><return>self.get_query_params().get('CommodityGmtPublishFrom')<block_end><def_stmt>set_CommodityGmtPublishFrom self CommodityGmtPublishFrom<block_start>self.add_query_param('CommodityGmtPublishFrom' CommodityGmtPublishFrom)<block_end><def_stmt>get_CommodityStatuses self<block_start><return>self.get_query_params().get('CommodityStatuses')<block_end><def_stmt>set_CommodityStatuses self CommodityStatuses<block_start>self.add_query_param('CommodityStatuses' CommodityStatuses)<block_end><def_stmt>get_PageNumber self<block_start><return>self.get_query_params().get('PageNumber')<block_end><def_stmt>set_PageNumber self PageNumber<block_start>self.add_query_param('PageNumber' PageNumber)<block_end><def_stmt>get_CommodityGmtCreatedFrom self<block_start><return>self.get_query_params().get('CommodityGmtCreatedFrom')<block_end><def_stmt>set_CommodityGmtCreatedFrom self CommodityGmtCreatedFrom<block_start>self.add_query_param('CommodityGmtCreatedFrom' CommodityGmtCreatedFrom)<block_end><def_stmt>get_CommodityIds self<block_start><return>self.get_query_params().get('CommodityIds')<block_end><def_stmt>set_CommodityIds self CommodityIds<block_start>self.add_query_param('CommodityIds' CommodityIds)<block_end><def_stmt>get_CommodityGmtCreatedTo self<block_start><return>self.get_query_params().get('CommodityGmtCreatedTo')<block_end><def_stmt>set_CommodityGmtCreatedTo self CommodityGmtCreatedTo<block_start>self.add_query_param('CommodityGmtCreatedTo' CommodityGmtCreatedTo)<block_end><def_stmt>get_PageSize self<block_start><return>self.get_query_params().get('PageSize')<block_end><def_stmt>set_PageSize self PageSize<block_start>self.add_query_param('PageSize' PageSize)<block_end><def_stmt>get_CommodityGmtPublishTo self<block_start><return>self.get_query_params().get('CommodityGmtPublishTo')<block_end><def_stmt>set_CommodityGmtPublishTo self CommodityGmtPublishTo<block_start>self.add_query_param('CommodityGmtPublishTo' CommodityGmtPublishTo)<block_end><def_stmt>get_CommodityAuditStatuses self<block_start><return>self.get_query_params().get('CommodityAuditStatuses')<block_end><def_stmt>set_CommodityAuditStatuses self CommodityAuditStatuses<block_start>self.add_query_param('CommodityAuditStatuses' CommodityAuditStatuses)<block_end><def_stmt>get_Properties self<block_start><return>self.get_query_params().get('Properties')<block_end><def_stmt>set_Properties self Properties<block_start>self.add_query_param('Properties' Properties)<block_end><def_stmt>get_CommodityCategoryIds self<block_start><return>self.get_query_params().get('CommodityCategoryIds')<block_end><def_stmt>set_CommodityCategoryIds self CommodityCategoryIds<block_start>self.add_query_param('CommodityCategoryIds' CommodityCategoryIds)<block_end><block_end>
# Time: O(m * n) # Space: O(1) <class_stmt>Solution(object)<block_start><def_stmt>shiftGrid self grid k<block_start>""" :type grid: List[List[int]] :type k: int :rtype: List[List[int]] """<def_stmt>rotate grids k<block_start><def_stmt>reverse grid start end<block_start><while_stmt>start<l>end<block_start>start_r,start_c=divmod(start len(grid[0]))<line_sep>end_r,end_c=divmod(end-1 len(grid[0]))<line_sep>grid[start_r][start_c],grid[end_r][end_c]=grid[end_r][end_c] grid[start_r][start_c]<line_sep>start<augadd>1<line_sep>end<augsub>1<block_end><block_end>k<augmod>len(grid)<times>len(grid[0])<line_sep>reverse(grid 0 len(grid)<times>len(grid[0]))<line_sep>reverse(grid 0 k)<line_sep>reverse(grid k len(grid)<times>len(grid[0]))<block_end>rotate(grid k)<line_sep><return>grid<block_end><block_end>
# MIT licensed # Copyright (c) 2013-2020 lilydjwg <<EMAIL>>, et al. HACKAGE_URL='https://hackage.haskell.org/package/%s/preferred.json'<async_keyword><def_stmt>get_version name conf * cache **kwargs<block_start>key=conf.get('hackage' name)<line_sep>data=<await>cache.get_json(HACKAGE_URL%key)<line_sep><return>data['normal-version'][0]<block_end>
<import_stmt>requests<import_stmt>progressbar<as>pb<import_stmt>os<import_stmt>cv2<import_stmt>imageio<import_from_stmt>imutils paths<import_stmt>numpy<as>np<def_stmt>download_file url file_name dest_dir<block_start><if_stmt><not>os.path.exists(dest_dir)<block_start>os.makedirs(dest_dir)<block_end>full_path_to_file=dest_dir+os.path.sep+file_name<if_stmt>os.path.exists(dest_dir+os.path.sep+file_name)<block_start><return>full_path_to_file<block_end>print("Downloading "+file_name+" from "+url)<try_stmt><block_start>r=requests.get(url allow_redirects=<true> stream=<true>)<block_end><except_stmt><block_start>print("Could not establish connection. Download failed")<line_sep><return><none><block_end>file_size=int(r.headers['Content-Length'])<line_sep>chunk_size=1024<line_sep>num_bars=round(file_size/chunk_size)<line_sep>bar=pb.ProgressBar(maxval=num_bars).start()<if_stmt>r.status_code<ne>requests.codes.ok<block_start>print("Error occurred while downloading file")<line_sep><return><none><block_end>count=0<with_stmt>open(full_path_to_file 'wb')<as>file<block_start><for_stmt>chunk r.iter_content(chunk_size=chunk_size)<block_start>file.write(chunk)<line_sep>bar.update(count)<line_sep>count<augadd>1<block_end><block_end><return>full_path_to_file<block_end><def_stmt>get_frames video_file save_dir=<none> save_prefix='' ext='jpg'<block_start>video=cv2.VideoCapture(video_file)<if_stmt><not>video.isOpened()<block_start>print("[ERROR] Could not open video file " video_file)<line_sep>video.release()<line_sep><return><block_end>frames=[]<line_sep>frame_count=0<while_stmt>video.isOpened()<block_start>status,frame=video.read()<if_stmt><not>status<block_start><break><block_end>frames.append(frame)<if_stmt>save_dir<block_start>frame_count<augadd>1<line_sep>out_file=save_dir+os.path.sep+save_prefix+'frame_'+str(frame_count)+'.'+ext<line_sep>print('[INFO] Writing file to .. ' out_file)<line_sep>cv2.imwrite(out_file frame)<block_end><block_end>video.release()<line_sep><return>frames<block_end><def_stmt>animate src gif_name reshape=<none> fps=25<block_start><if_stmt><not>isinstance(src list)<block_start><if_stmt>os.path.isdir(src)<block_start>src=list(paths.list_images(src))<for_stmt>idx,image enumerate(src)<block_start>src[idx]=cv2.imread(image)<block_end><block_end><block_end><if_stmt>reshape<block_start><for_stmt>idx,image enumerate(src)<block_start>src[idx]=cv2.resize(image reshape)<block_end><block_end><for_stmt>idx,image enumerate(src)<block_start>src[idx]=cv2.cvtColor(image cv2.COLOR_BGR2RGB)<block_end>src=np.array(src)<line_sep>imageio.mimsave(gif_name src fps=fps)<block_end>
<import_stmt>re<import_from_stmt>cfn_tools dump_yaml<import_from_stmt>templates ALL MASTER CLUSTER SCHEDULER WEBSERVER WORKERSET<def_stmt>test_if_important_properties_are_specified <block_start><for_stmt>template ALL<block_start><for_stmt>specs template["Parameters"].values()<block_start><assert_stmt>"Description"<in>specs<assert_stmt>"Type"<in>specs<if_stmt>"AllowedPattern"<in>specs<block_start><assert_stmt>"ConstraintDescription"<in>specs<block_end><if_stmt>"MinValue"<in>specs<or>"MaxValue"<in>specs<block_start><assert_stmt>"ConstraintDescription"<in>specs<block_end><block_end><block_end><block_end><def_stmt>test_if_properties_are_in_order <block_start><def_stmt>is_ordered left right array<block_start>left_index=array.index(left)<if>left<in>array<else><none><line_sep>right_index=array.index(right)<if>right<in>array<else><none><if_stmt>left_index<is><none><or>right_index<is><none><block_start><return><true><block_end><return>left_index<l>right_index<block_end><for_stmt>template ALL<block_start><for_stmt>spec template["Parameters"].values()<block_start>props=list(spec.keys())<assert_stmt>is_ordered("Description" "ConstraintDescription" props)<assert_stmt>is_ordered("ConstraintDescription" "AllowedPattern" props)<assert_stmt>is_ordered("AllowedPattern" "Default" props)<assert_stmt>is_ordered("Default" "Type" props)<assert_stmt>is_ordered("Description" "AllowedValues" props)<assert_stmt>is_ordered("AllowedValues" "Default" props)<assert_stmt>is_ordered("ConstraintDescription" "MinValue" props)<assert_stmt>is_ordered("MinValue" "MaxValue" props)<assert_stmt>is_ordered("MaxValue" "Default" props)<block_end><block_end><block_end><def_stmt>test_if_default_value_satisfies_pattern <block_start><for_stmt>template ALL<block_start><for_stmt>specs template["Parameters"].values()<block_start><if_stmt>"AllowedPattern"<in>specs<and>"Default"<in>specs<block_start><assert_stmt>re.match(specs["AllowedPattern"] specs["Default"])<block_end><block_end><block_end><block_end><def_stmt>test_if_description_ends_in_dot <block_start><for_stmt>template ALL<block_start><for_stmt>specs template["Parameters"].values()<block_start><assert_stmt>specs["Description"].endswith(".")<block_end><block_end><block_end><def_stmt>test_if_constraint_description_ends_in_dot <block_start><for_stmt>template ALL<block_start><for_stmt>specs template["Parameters"].values()<block_start><if_stmt>"ConstraintDescription"<in>specs<block_start><assert_stmt>specs["ConstraintDescription"].endswith(".")<block_end><block_end><block_end><block_end><def_stmt>test_consistency <block_start>pairs=[(MASTER CLUSTER) (CLUSTER SCHEDULER) (CLUSTER WEBSERVER) (CLUSTER WORKERSET) ]<for_stmt>(t_outer t_inner) pairs<block_start><for_stmt>param1,specs1 t_outer["Parameters"].items()<block_start><for_stmt>param2,specs2 t_inner["Parameters"].items()<block_start><if_stmt>param1<eq>param2<block_start><assert_stmt>(param1 dump_yaml(specs1))<eq>(param2 dump_yaml(specs2))<block_end><block_end><block_end><block_end><block_end>
# coding: utf-8 <import_from_future_stmt> unicode_literals division absolute_import print_function<line_sep>__version__='1.2.1'<line_sep>__version_info__=(1 2 1)<line_sep>
''' '''<import_stmt>sys<import_stmt>json<import_stmt>argparse<import_stmt>eosfactory.core.utils<as>utils<import_stmt>eosfactory.core.config<as>config<line_sep>IS_ERROR=2<line_sep>IS_WARNING=1<class_stmt>Checklist()<block_start><def_stmt>__init__ self is_html=<false> error_codes=""<block_start>self.is_html=is_html<line_sep>self.html_text=""<line_sep>self.is_error=<false><line_sep>self.is_warning=<false><line_sep>self.IS_WINDOWS=utils.is_windows_ubuntu()<line_sep>self.os_version=utils.os_version()<line_sep>self.print_msg("EOSFactory version {}".format(config.VERSION))<line_sep>################################################################################ # psutil ################################################################################ <try_stmt><block_start><if_stmt>"psutil"<in>error_codes<block_start><import_stmt>psutil1<block_end><else_stmt><block_start><import_stmt>psutil<block_end><block_end><except_stmt>ImportError<block_start>command="pip3 install --user psutil"<line_sep>button=''' <button style="text-align:left;" class="btn ${{BASH_COMMAND}}"; class="btn"; id="Install psutil"; title="Install psutil. Click the button then ENTER in a newly created bash terminal window, to go." > {} </button> '''.format(command)<line_sep>self.error_msg(''' Module 'psutil' is not installed. Install it: {} '''.format(button))<line_sep>self.print_error('''Module 'psutil' is not installed. Install it: ''')<line_sep>self.print_code("`{}`\n".format(command))<block_end>################################################################################ # termcolor ################################################################################ <try_stmt><block_start><if_stmt>"termcolor"<in>error_codes<block_start><import_stmt>termcolor1<block_end><else_stmt><block_start><import_stmt>termcolor<block_end><block_end><except_stmt>ImportError<block_start>command="pip3 install --user termcolor"<line_sep>button=''' <button style="text-align:left;" class="btn ${{BASH_COMMAND}}"; class="btn"; id="Install termcolor"; title="Install termcolor. Click the button then ENTER in a newly created bash terminal window, to go." > {} </button> '''.format(command)<line_sep>self.error_msg(''' Module 'termcolor' is not installed. Install it: {} '''.format(button))<line_sep>self.print_error('''Module 'termcolor' is not installed. Install it: ''')<line_sep>self.print_code("`{}`\n".format(command))<block_end><if_stmt>self.IS_WINDOWS################################################################################ # Ubuntu version ################################################################################ <block_start>lsb_release,error=utils.spawn(["lsb_release" "-r" "-s"] raise_exception=<false>)<if_stmt>error<block_start>self.error_msg(error)<block_end><else_stmt><block_start><if_stmt>"ubuntuversion"<in>error_codes<block_start>lsb_release="16.4.1"<block_end>ubuntu_version=int(lsb_release.split(".")[0])<if_stmt>ubuntu_version<l>config.UBUNTU_VERSION_MIN<block_start>msg=''' WSL Ubuntu version is {}. EOSIO nodeos can fail with Windows WSL Ubuntu below version 16. '''.format(lsb_release)<line_sep>self.status_msg(self.warning(msg))<line_sep>self.print_warning(msg)<block_end><block_end>################################################################################ # WSL root ################################################################################ root=config.wsl_root()<if_stmt><not>root<or>"wslroot"<in>error_codes<block_start>self.error_msg('''Cannot determine the root of the WSL. Set it: <button class="btn ${FIND_WSL}"; id=""; title="Click the button to open file dialog. Navigate to a directory containing the Ubuntu file system." > Indicate WSL root </button> ''')<line_sep>self.print_error('''Cannot determine the root of the WSL. To indicate it, use the command:''')<line_sep>self.print_code("`python3 -m eosfactory.config --wsl_root`\n")<block_end><block_end>################################################################################ # eosio ################################################################################ eosio_version=config.eosio_version()<if_stmt>"eosio"<in>error_codes<block_start>eosio_version=["" "1.8.0"]<block_end># eosio_version = ["1.3.3", "1.8.0"] <if_stmt>eosio_version[0]<block_start>self.status_msg("Found eosio version {}".format(eosio_version[0]))<line_sep>self.print_status("Found eosio version {}".format(eosio_version[0]))<block_end><if_stmt><not>eosio_version[0]<or>len(eosio_version)<g>1<and><not>self.equal(eosio_version[0] eosio_version[1])<block_start>command=""<if_stmt>self.os_version<eq>utils.UBUNTU<block_start>ubuntu_version=utils.spawn(["lsb_release" "-r" "-s"] raise_exception=<false>)[0].split(".")[0]<if_stmt>ubuntu_version<and>ubuntu_version<eq>16<block_start>command='''sudo apt remove eosio &&\\ wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-16.04_amd64.deb &&\\ sudo apt install ./eosio_1.8.0-1-ubuntu-16.04_amd64.deb '''<block_end><else_stmt><block_start>command='''sudo apt remove eosio &&\\ wget https://github.com/eosio/eos/releases/download/v1.8.0/eosio_1.8.0-1-ubuntu-18.04_amd64.deb &&\\ apt install ./eosio_1.8.0-1-ubuntu-18.04_amd64.deb '''<block_end><block_end><elif_stmt>self.os_version<eq>utils.DARWIN<block_start>command='''brew remove eosio &&\\ brew tap eosio/eosio &&\\ brew install eosio '''<block_end>button=''' <button style="text-align:left;" class="btn ${{BASH_COMMAND}}"; class="btn"; id="Install eosio v{0}"; title="Install eosio v{0}. Click the button then ENTER in a newly created bash terminal window, to go." > {1} </button> '''.format(eosio_version[1] command)<line_sep>instructions='<a href="https://github.com/EOSIO/eos">EOSIO installation instructions</a>'<if_stmt>eosio_version[0]<and>len(eosio_version)<g>1<block_start>self.warning_msg(''' NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}:<br> {2} '''.format(eosio_version[1] eosio_version[0] button<if>command<else>instructions))<line_sep>self.print_warning('''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}: '''.format(eosio_version[1] eosio_version[0]))<line_sep>self.print_code('''``` {} ``` '''.format(command<if>command<else>instructions))<block_end><else_stmt><block_start><if_stmt><not>"ignoreeoside"<in>error_codes<block_start>self.warning_msg(''' Cannot determine that eosio is installed as nodeos does not response.<br> It hangs up sometimes.<br> EOSFactory expects eosio version {}. Install eosio, if not installed:<br> {}<br> '''.format(eosio_version[1] button<if>command<else>instructions))<line_sep>self.print_warning('''Cannot determine that eosio is installed as nodeos does not response. It hangs up sometimes. EOSFactory expects eosio version {}. Install eosio, if not installed: '''.format(eosio_version[1]))<line_sep>self.print_code('''``` {} ``` '''.format(command<if>command<else>instructions))<block_end><block_end><block_end>################################################################################ # eosio_cdt ################################################################################ eosio_cdt_version=config.eosio_cdt_version()<if_stmt>"eosio_cdt"<in>error_codes<block_start>eosio_cdt_version=["" "1.6.0"]<block_end># eosio_cdt_version = ["1.6.1", "1.6.0"] <if_stmt>eosio_cdt_version[0]<block_start>self.status_msg("Found eosio.cdt version {}.".format(eosio_cdt_version[0]))<line_sep>self.print_status("Found eosio.cdt version {}.".format(eosio_cdt_version[0]))<block_end><if_stmt><not>eosio_cdt_version[0]<or>len(eosio_cdt_version)<g>1<and><not>self.equal(eosio_cdt_version[0] eosio_cdt_version[1])<block_start>command=""<if_stmt>self.os_version<eq>utils.UBUNTU<block_start>command='''sudo apt remove eosio.cdt &&\\ wget https://github.com/eosio/eosio.cdt/releases/download/v1.6.1/eosio.cdt_1.6.1-1_amd64.deb &&\\ sudo apt install ./eosio.cdt_1.6.1-1_amd64.deb '''<block_end><elif_stmt>self.os_version<eq>utils.DARWIN<block_start>command='''brew remove eosio.cdt &&\\ brew tap eosio/eosio.cdt && \\ brew install eosio.cdt '''<block_end>button=''' <button style="text-align:left;" class="btn ${{BASH_COMMAND}}"; class="btn"; id="Install eosio.cdt v{0}"; title="Install eosio.cdt v{0}. Click the button then ENTER in a newly created bash terminal window, to go." > {1} </button> '''.format(eosio_cdt_version[1] command)<line_sep>instructions='<a href="https://github.com/EOSIO/eosio.cdt">EOSIO.cdt installation instructions</a>'<if_stmt>eosio_cdt_version[0]<and>len(eosio_cdt_version)<g>1<and><not>eosio_cdt_version[0]<eq>eosio_cdt_version[1]<block_start>self.warning_msg(''' NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio.cdt v{0}:<br> {2} '''.format(eosio_cdt_version[1] eosio_cdt_version[0] button<if>command<else>instructions))<line_sep>self.print_warning('''NOTE: EOSFactory was tested with version {0} while installed is {1}. Install eosio v{0}: '''.format(eosio_cdt_version[1] eosio_cdt_version[0]))<line_sep>self.print_code('''``` {} ``` '''.format(command<if>command<else>instructions))<block_end><else_stmt><block_start>self.error_msg(''' Cannot determine that eosio.cdt is installed as eosio-cpp does not response.<br> EOSFactory expects eosio.cdt version {}. Install it, if not installed. {}<br> '''.format(eosio_cdt_version[1] button<if>command<else>instructions))<line_sep>self.print_error('''Cannot determine that eosio.cdt is installed as eosio-cpp does not response. EOSFactory expects eosio.cdt version {}. Install it, if not installed. '''.format(eosio_cdt_version[1]))<line_sep>self.print_code('''``` {} ``` '''.format(command<if>command<else>instructions))<block_end><block_end>################################################################################ # Default workspace ################################################################################ <try_stmt><block_start>contract_workspace_dir=config.contract_workspace_dir()<block_end><except_stmt><block_start>contract_workspace_dir=<none><block_end>button=''' <button class="btn ${CHANGE_WORKSPACE}"; id="${CHANGE_WORKSPACE}"; title="Set workspace" > Set workspace. </button> '''<if_stmt><not>contract_workspace_dir<or>"workspace"<in>error_codes<block_start>self.error_msg(''' Default workspace is not set, or it does not exist.{} '''.format(button))<block_end><else_stmt><block_start>self.status_msg('''Default workspace is {}.{} '''.format(contract_workspace_dir button))<block_end><block_end>################################################################################ # ################################################################################ <def_stmt>just_msg self msg<block_start><if_stmt>self.is_html<block_start>msg=msg.replace("&&\\" "&&\\<br>")<line_sep>print("{}\n".format(msg))<block_end><block_end><def_stmt>print_msg self msg<block_start><if_stmt><not>self.is_html<block_start>print(msg)<block_end><block_end><def_stmt>status_msg self msg<block_start><if_stmt>self.is_html<block_start>msg=msg.replace("&&\\" "&&\\<br>")<line_sep>print("<li>{}</li>\n".format(msg))<block_end><block_end><def_stmt>print_status self msg<block_start><if_stmt><not>self.is_html<block_start>msg=msg.replace("<br>" "")<line_sep>print(msg)<block_end><block_end><def_stmt>warning self msg<block_start>self.is_warning=<true><if_stmt>self.is_html<block_start>msg=msg.replace("&&\\" "&&\\<br>")<line_sep><return>'<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg)<block_end><block_end><def_stmt>warning_msg self msg<block_start>self.is_warning=<true><if_stmt>self.is_html<block_start>msg=msg.replace("&&\\" "&&\\<br>")<line_sep>print('<em style="color: ${{WARNING_COLOR}}"> {} </em>'.format(msg))<block_end><block_end><def_stmt>print_warning self msg<block_start><if_stmt><not>self.is_html<block_start>msg=msg.replace("<br>" "")<line_sep>msg="WARNING:\n"+msg<try_stmt><block_start><import_stmt>termcolor<line_sep>msg=termcolor.colored(msg "yellow")<block_end><except_stmt><block_start><pass><block_end>print(msg)<block_end><block_end><def_stmt>error_msg self msg<block_start><if_stmt>self.is_html<block_start>self.is_error=<true><line_sep>msg=msg.replace("&&\\" "&&\\<br>")<line_sep>print('<p style="color: ${{ERROR_COLOR}}">ERROR: {}</p>'.format(msg))<block_end><block_end><def_stmt>print_error self msg<block_start><if_stmt><not>self.is_html<block_start>self.is_error=<true><line_sep>msg=msg.replace("<br>" "")<line_sep>msg="ERROR:\n"+msg<try_stmt><block_start><import_stmt>termcolor<line_sep>msg=termcolor.colored(msg "magenta")<block_end><except_stmt><block_start><pass><block_end>print(msg)<block_end><block_end><def_stmt>print_code self msg<block_start><if_stmt><not>self.is_html<block_start>msg=msg.replace("<br>" "")<try_stmt><block_start><import_stmt>termcolor<line_sep>msg=termcolor.colored(msg "blue")<block_end><except_stmt><block_start><pass><block_end>print(msg)<block_end><block_end><def_stmt>equal self version1 version2<block_start><return>version1.split(".")[0]<eq>version2.split(".")[0]<and>version1.split(".")[1]<eq>version2.split(".")[1]<block_end><block_end><def_stmt>main <block_start>parser=argparse.ArgumentParser(description=''' Check whether installation conditions are fulfilled. ''')<line_sep>parser.add_argument("--html" help="Print html output." action="store_true")<line_sep>parser.add_argument("--error" help="Error code" default="")<line_sep>parser.add_argument("--wsl_root" help="Show set the root of the WSL and exit." action="store_true")<line_sep>parser.add_argument("--dont_set_workspace" help="Ignore empty workspace directory." action="store_true")<line_sep>parser.add_argument("--json" help="Bare config JSON and exit." action="store_true")<line_sep>parser.add_argument("--workspace" help="Set contract workspace and exit." action="store_true")<line_sep>parser.add_argument("--dependencies" help="Set dependencies and exit." action="store_true")<line_sep>args=parser.parse_args()<if_stmt>args.json<block_start>print(json.dumps(config.current_config(dont_set_workspace=args.dont_set_workspace) sort_keys=<true> indent=4))<block_end><elif_stmt>args.wsl_root<block_start>config.wsl_root()<block_end><elif_stmt>args.workspace<block_start>config.set_contract_workspace_dir()<block_end><elif_stmt>args.html<block_start>checklist=Checklist(args.html args.error)<if_stmt>checklist.is_error<block_start>sys.exit(IS_ERROR)<block_end><elif_stmt>checklist.is_warning<block_start>sys.exit(IS_WARNING)<block_end><block_end><elif_stmt>args.dependencies<block_start>checklist=Checklist(<false> args.error)<block_end><else_stmt><block_start>print("Checking dependencies of EOSFactory...")<line_sep>checklist=Checklist(<false> args.error)<if_stmt><not>checklist.is_error<and><not>checklist.is_warning<block_start>print("... all the dependencies are in place.\n\n")<block_end><else_stmt><block_start>print('''Some functionalities of EOSFactory may fail if the indicated errors are not corrected. ''')<block_end>config.config()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_from_future_stmt> division absolute_import print_function<import_from_stmt>.prototype *<import_from_stmt>.repeating *<line_sep>
<import_stmt>numpy<as>np<line_sep>arr=np.array([[2 5] [1 3]])<line_sep>arr_inv=np.linalg.inv(arr)<line_sep>print(arr_inv)<line_sep># [[ 3. -5.] # [-1. 2.]] mat=np.matrix([[2 5] [1 3]])<line_sep>mat_inv=np.linalg.inv(mat)<line_sep>print(mat_inv)<line_sep># [[ 3. -5.] # [-1. 2.]] mat_inv=mat<power>-1<line_sep>print(mat_inv)<line_sep># [[ 3. -5.] # [-1. 2.]] mat_inv=mat.I<line_sep>print(mat_inv)<line_sep># [[ 3. -5.] # [-1. 2.]] result=mat<times>mat.I<line_sep>print(result)<line_sep># [[1. 0.] # [0. 1.]] # print(arr.I) # AttributeError: 'numpy.ndarray' object has no attribute 'I' arr_s=np.array([[0 0] [1 3]])<line_sep># print(np.linalg.inv(arr_s)) # LinAlgError: Singular matrix arr_pinv=np.linalg.pinv(arr_s)<line_sep>print(arr_pinv)<line_sep># [[0. 0.1] # [0. 0.3]] print(arr_s@arr_inv)<line_sep># [[0. 0.] # [0. 1.]] print(np.linalg.pinv(arr_pinv))<line_sep># [[0. 0.] # [1. 3.]] print(np.linalg.inv(arr))<line_sep># [[ 3. -5.] # [-1. 2.]] print(np.linalg.pinv(arr))<line_sep># [[ 3. -5.] # [-1. 2.]] mat_s=np.mat([[0 0] [1 3]])<line_sep># print(np.linalg.inv(mat_s)) # LinAlgError: Singular matrix # print(mat_s**-1) # LinAlgError: Singular matrix # print(mat_s.I) # LinAlgError: Singular matrix print(np.linalg.pinv(mat_s))<line_sep># [[0. 0.1] # [0. 0.3]]
# ***************************************************************************** # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # See NOTICE file for details. # # ***************************************************************************** <import_stmt>_jpype<import_stmt>jpype<import_from_stmt>jpype.types *<import_from_stmt>jpype java<import_stmt>common<try_stmt><block_start><import_stmt>numpy<as>np<block_end><except_stmt>ImportError<block_start><pass><block_end><class_stmt>CustomizerTestCase(common.JPypeTestCase)<block_start><def_stmt>setUp self<block_start>common.JPypeTestCase.setUp(self)<line_sep>self.fixture=JClass('jpype.common.Fixture')()<block_end><def_stmt>testSticky self<block_start>@jpype.JImplementationFor("jpype.override.A")<class_stmt>_A<block_start>@jpype.JOverride(sticky=<true> rename="remove_")<def_stmt>remove self obj<block_start><pass><block_end><block_end>A=jpype.JClass("jpype.override.A")<line_sep>B=jpype.JClass("jpype.override.B")<line_sep>self.assertEqual(A.remove _A.remove)<line_sep>self.assertEqual(B.remove _A.remove)<line_sep>self.assertEqual(str(A.remove_) "jpype.override.A.remove")<line_sep>self.assertEqual(str(B.remove_) "jpype.override.B.remove")<block_end><block_end>