content
stringlengths
1
1.05M
input_ids
listlengths
1
883k
ratio_char_token
float64
1
22.9
token_count
int64
1
883k
from imutils import face_utils from scipy.spatial import distance import cv2 import dlib import imutils import pygame import time # Initializing the alert sound pygame.mixer.init() alert_sound = pygame.mixer.Sound("alert_sound.wav") default_volume = 0.2 # Eye-Aspect-Ratio data EAR_threshhold = 0.17 # One valid frame is counted when EAR is lower than this value frame_count = 0 # Number of frames when EAR is lower than EAR_threshhold EAR_total_frame = 25 # Having frame_count larger than this value is considered drowsiness # Play the alarm in a given volume # Given an eye landmark, compute its eye_aspect_ratio # Initialize the face detector and Facial landmark predictor detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat") # Access the camera cap = cv2.VideoCapture(0) # Main loop for drowsiness detection while True: # Read the camera input, resize it, and concert it to grayscale frame ret, frame = cap.read() frame = imutils.resize(frame, width=600) raw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Detect faces in grayscale frame bounds = detector(raw,0) for bound in bounds: # Predict facial landmarks for each detected face shape = predictor(raw,bound) # Convert the facial lanmarks into a 1-D numpy array (x, y) shape = face_utils.shape_to_np(shape) # Left and right eyes' indexes for facial landmarks left_eye = shape[42:48] right_eye = shape[36:42] # The main EAR is the average of left and right eye's EAR left_EAR = eye_aspect_ratio(left_eye) right_EAR = eye_aspect_ratio(right_eye) EAR = (left_EAR + right_EAR) / 2 # Draw the facial landmarks for left eye for (x, y) in left_eye: cv2.circle(frame, (x, y), 1, (0, 255, 0), -1) # Draw the facial landmarks for right eye for (x, y) in right_eye: cv2.circle(frame, (x, y), 1, (0, 255, 0), -1) # Alarm when drowsiness is detected if EAR < EAR_threshhold: frame_count += 1 # Volume increases gradually if frame_count >= EAR_total_frame: alert(0.2 + (frame_count - 25) * 0.2) time.sleep(3) else: frame_count = 0 # Display informations cv2.putText(frame, "Frame: {:.0f}".format(frame_count), (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "Eye-Aspect-Ratio: {:.2f}".format(EAR), (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "Press Q to exit.", (410, 320), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # Display the frame cv2.imshow("Drowsiness_Detector", frame) # Provide a way to exit the program -- pressing "Q" key = cv2.waitKey(1) & 0xFF if key == ord("q"): break cv2.destroyAllWindows()
[ 6738, 545, 26791, 1330, 1986, 62, 26791, 198, 6738, 629, 541, 88, 13, 2777, 34961, 1330, 5253, 198, 11748, 269, 85, 17, 198, 11748, 288, 8019, 198, 11748, 545, 26791, 198, 11748, 12972, 6057, 198, 11748, 640, 198, 198, 2, 20768, 2890,...
2.340963
1,267
#!/usr/bin/env python3 # < trunk-tap.py > # Version 1.0 < 20171022 > # Copyright 2017: Alexander Schreiber < schreiberstein[at]gmail.com > # https://github.com/schreiberstein/trunk-tap.py # MIT License: # ============ # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # See: https://opensource.org/licenses/MIT # Introduction: # ============= # trunk-tap.py is a Linux command line utility to connects a set of 802.1Q VLANs to a TINC VPN/OpenVPN TAP-interface and is designed to be invoked by ifup/ifdown scripts after starting or stopping a VPN connection. # Dependencies (on Debian): python3, iproute2, bridge-utils, vlan (including kernel module '8021q' in /etc/modules) # It reads the filenames from the content of a folder containing files corresponding to the VLAN ID (e.g. '100', '105', ...), then creates VLAN interfaces on a local Ethernet adapter used as "trunk port" (e.g. 'eth1.100', 'eth1.105', ...). # The script then proceeds to generate bridge interfaces for every VLAN ID. (e.g. "trunk0.100", "trunk0.105", ...) and attaches the respective Ethernet VLAN interfaces to the bridge. (e.g. 'trunk0.105 <-> eth1.105', ...) # After that, the local infrastructure is ready to be attached to the VPN layer 2 tunnel. # This is achieved by enabling the TAP interface ("up"), creating VLAN interfaces on the TAP adapter (e.g. 'tap0.100', 'tap0.105', ...) and attaching them to the respective bridge. # Illustration: # ============= # (TINC VPN / OpenVPN) # -------- SITE 1 ------- -------- SITE 2 ------- # eth1.100 <-> trunk0.100 <--\ ################ /--> trunk0.100 <-> eth1.100 # eth1.105 <-> trunk0.105 <--->> ---TAP-TUNNEL--- <<---> trunk0.105 <-> eth1.105 # eth1.110 <-> trunk0.110 <--/ ################ \--> trunk0.110 <-> eth1.110 # Hint: Interface names (ethernet adapter, bridge name, ...) do not neccesarily have to be identical among sites. # --------------------------------------------------------------------------------------------------------------- # # Code: # ===== # Import required Python3 modules import os, sys, subprocess from pathlib import Path # Create VLAN-interfaces on trunk interface (e.g. 'eth1.100', 'eth1.105', ...) # Function to remove VLAN interfaces from trunk interface # Function to create main bridge (no VLAN ID - May be used to attach a VLAN/network to provide network to devices without VLAN support (VLAN0 - untagged)) # Function to remove bridge # Creates bridges to be used for VLAN bridging (e.g. 'trunk0.100', 'trunk0.105', ..) - illustration: eth1.105 <-> Bridge: trunk0.105 <-> tap0.105 # Function to remove VLAN interfaces from the bridge # Function to bridge the VLANs of the physical interface with the VLANs of the bridge # Create VLAN-interfaces on tap interface # Function to bridge the VLANs of the physical interface with the VLANs of the bridge # Function to enable ("up") the tap interface # Function to disable ("down") the tap interface # Function to remove VLAN interfaces from tap interface # Function to remove members attached by the tap_bridge() function # Function to remove members attached by the bridge() function # ------------------------ # Note: Order of execution # ------------------------ # Start: # ------ # trunk_vlan_add() # bridge_add() # bridge_vlan_add() # bridge() # tap_if_up() # tap_vlan_add() # tap_bridge() # Stop: # ----- # tap_unbridge() # tap_vlan_del() # tap_if_down() # unbridge() # bridge_vlan_del() # bridge_del() # trunk_vlan_del() # Start function - Used to execute all other functions # Stop function - reverses the actions performed by start() # # # # # # # # # # Main function # # # # # # # # # # # Only run main if the script is explicitly executed (e.g. './trunktap.py') if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 198, 2, 1279, 21427, 12, 44335, 13, 9078, 1875, 198, 2, 10628, 352, 13, 15, 1279, 2177, 940, 1828, 1875, 220, 198, 2, 15069, 2177, 25, 10009, 3059, 260, 1856, 1279, 5513, 260, ...
3.342361
1,440
# Copyright 2020 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import contextlib import os import re import subprocess from azurelinuxagent.common.utils import fileutil from tests.tools import patch, data_dir # # Default values for the mocked commands. # # The output comes from an Ubuntu 18 system # _default_commands = [ (r"systemctl --version", '''systemd 237 +PAM +AUDIT +SELINUX +IMA +APPARMOR +SMACK +SYSVINIT +UTMP +LIBCRYPTSETUP +GCRYPT +GNUTLS +ACL +XZ +LZ4 +SECCOMP +BLKID +ELFUTILS +KMOD -IDN2 +IDN -PCRE2 default-hierarchy=hybrid '''), (r"mount -t cgroup", '''cgroup on /sys/fs/cgroup/systemd type cgroup (rw,nosuid,nodev,noexec,relatime,xattr,name=systemd) cgroup on /sys/fs/cgroup/rdma type cgroup (rw,nosuid,nodev,noexec,relatime,rdma) cgroup on /sys/fs/cgroup/cpuset type cgroup (rw,nosuid,nodev,noexec,relatime,cpuset) cgroup on /sys/fs/cgroup/net_cls,net_prio type cgroup (rw,nosuid,nodev,noexec,relatime,net_cls,net_prio) cgroup on /sys/fs/cgroup/perf_event type cgroup (rw,nosuid,nodev,noexec,relatime,perf_event) cgroup on /sys/fs/cgroup/hugetlb type cgroup (rw,nosuid,nodev,noexec,relatime,hugetlb) cgroup on /sys/fs/cgroup/freezer type cgroup (rw,nosuid,nodev,noexec,relatime,freezer) cgroup on /sys/fs/cgroup/memory type cgroup (rw,nosuid,nodev,noexec,relatime,memory) cgroup on /sys/fs/cgroup/pids type cgroup (rw,nosuid,nodev,noexec,relatime,pids) cgroup on /sys/fs/cgroup/devices type cgroup (rw,nosuid,nodev,noexec,relatime,devices) cgroup on /sys/fs/cgroup/cpu,cpuacct type cgroup (rw,nosuid,nodev,noexec,relatime,cpu,cpuacct) cgroup on /sys/fs/cgroup/blkio type cgroup (rw,nosuid,nodev,noexec,relatime,blkio) '''), (r"mount -t cgroup2", '''cgroup on /sys/fs/cgroup/unified type cgroup2 (rw,nosuid,nodev,noexec,relatime) '''), (r"systemctl show walinuxagent\.service --property CPUAccounting", '''CPUAccounting=no '''), (r"systemctl show walinuxagent\.service --property MemoryAccounting", '''MemoryAccounting=no '''), (r"systemd-run --unit=([^\s]+) --scope ([^\s]+)", ''' Running scope as unit: TEST_UNIT.scope Thu 28 May 2020 07:25:55 AM PDT '''), ] _default_files = ( (r"/proc/self/cgroup", os.path.join(data_dir, 'cgroups', 'proc_self_cgroup')), (r"/proc/[0-9]+/cgroup", os.path.join(data_dir, 'cgroups', 'proc_pid_cgroup')), (r"/sys/fs/cgroup/unified/cgroup.controllers", os.path.join(data_dir, 'cgroups', 'sys_fs_cgroup_unified_cgroup.controllers')), )
[ 2, 15069, 12131, 5413, 10501, 198, 2, 198, 2, 49962, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 2, 345, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 2, 921, 743, 733...
2.578676
1,163
from __future__ import print_function import os import neat # 2-input XOR inputs and expected outputs. xor_inputs = [(0.0, 0.0), (0.0, 1.0), (1.0, 0.0), (1.0, 1.0)] xor_outputs = [(0.0,),(1.0,),(1.0,),(0.0,)] local_dir = os.path.dirname(__file__) config_path = os.path.join(local_dir, 'config-feedforward') run(config_path)
[ 6738, 11593, 37443, 834, 1330, 3601, 62, 8818, 198, 11748, 28686, 198, 11748, 15049, 198, 198, 2, 362, 12, 15414, 1395, 1581, 17311, 290, 2938, 23862, 13, 198, 87, 273, 62, 15414, 82, 796, 47527, 15, 13, 15, 11, 657, 13, 15, 828, ...
2.231293
147
""" _ _____ _ _ |_| __ | |___ ___| |_ | | __ -| | . | _| '_| |_|_____|_|___|___|_,_| iBlock is a machine learning video game! This game is played on a 8x6 board (48 spaces) and the goal is to fill up the enemy's column with your pieces! Once that happens the game will reset and log all the data for the AI's to observe! In the first few games the AI will take random moves and attempt winning. Once one of the AI's win, the information on how they one gets processed and they try to attempt it again using that information! Rather then focusing on attacking, these AI naturally plays offensively! You will see them defend their base while at the same time try to attack the enemy! The AI also doesn't know which spaces it must fill to win so as it plays it must learn on it's own (this also allows for the creation of custom maps). iBlock has multiple different game options for how to set up the way the AI will play! New gamemodes coming soon! Copyright (c) SavSec 2017 Copyright (c) SavSec iBlock 2017 Format: Encoding: UTF-8 Tab: 2 System: Python 2.7 Modules: sys, time, random License: MIT License Developer: @Russian_Otter - Instagram """ import sys, random, time, argparse parser = argparse.ArgumentParser() parser.add_argument("-i", "--intelligence",help="Activates dynamic machine learning mode for both players",action="store_true") parser.add_argument("-r", "--random",help="Activates random machine learning mode for both players",action="store_true") parser.add_argument("-p", "--pvai",help="Activates Player vs AI mode",action="store_true") parser.add_argument("-R", "--Reset",help="Activates reset mode for both players",action="store_true") parser.add_argument("-sm", "--show-moves",help="Shows the last move for each turn",action="store_true") parser.add_argument("-d", "--display",help="Set to False to disable table display",default=True) parser.add_argument("-pg", "--progress",help="Displays progress graphs", action="store_true") parser.add_argument("-t", "--time",help="Turn rate for each player",default=0.05) parser.add_argument("-q", "--quick",help="Plays a 1 match game", action="store_true") parser.add_argument("-H", "--Hide",help="Hides help",action="store_true") args = parser.parse_args() if args.pvai: human_mode = True else: human_mode = False if args.Reset: fresh_start1,fresh_start0 = True,True else: fresh_start0,fresh_start1 = False,False if args.show_moves: show_move = True else: show_move = False if args.progress: progress_graphing = True else: progress_graphing = False display = args.display mtime = float(args.time) if show_move: from time import gmtime, strftime if progress_graphing: """ import matplotlib.pyplot as plt import numpy as np Still in progress """ pass global last_move last_move = ["41"] table = { "1":".", "2":".", "3":".", "4":".", "5":".", "6":".", "7":".", "8":"0", "9":".", "10":".", "11":".", "12":".", "13":".", "14":".", "15":".", "16":".", "17":".", "18":".", "19":".", "20":".", "21":".", "22":".", "23":".", "24":".", "25":".", "26":".", "27":".", "28":".", "29":".", "30":".", "31":".", "32":".", "33":".", "34":".", "35":".", "36":".", "37":".", "38":".", "39":".", "40":".", "41":"1", "42":".", "43":".", "44":".", "45":".", "46":".", "47":".", "48":"." } # up left = -9 # up down = +-8 # right left = +-1 # down right = +9 # up right = -7 # down left = +7 def reset_knowldge(): """ Reseting knowldge wipes all past game history and updates it with random winning moves. """ print "Reseting Knowldge..." time.sleep(1) if not fresh_start0 or not fresh_start1: print "You must change values: \"fresh_start0\" and \"fresh_start1\" to True before reseting." print "Be sure to change those values back to False while not in reset mode." time.sleep(3) if mtime > 0.0009 or display == True: print "Consider Temporarily Changing You Game Settings For Reset:" print "-Speed should be less than 0.0009" print "-Display should be turned off" time.sleep(3) try: iblock(False,False) except: pass print "Reset Complete!" time.sleep(1) def random_ai_mode(): """ Random AI mode disables the learning ability of the program which causes it to make random moves. (Personally this is more entiretaining than Intelligence Mode) """ print "Starting Random AI Mode..." if mtime < 0.05: print "Consider changing the frame rate to more than 0.05 while in random mode" time.sleep(3) if display == False: print "Consider changing display to True inorder to view the game in random mode" time.sleep(3) time.sleep(1) try: iblock(False,False) except: print "Game Paused" def intelligent_1v1(): """ This is a 1 match mode to quickly see who wins a fast fight """ print "Starting Intelligent 1v1..." if mtime < 0.005: print "Consider changing the frame rate to more than 0.005 while in intelligence mode" time.sleep(3) if display == False: print "Consider changing display to True inorder to view the game in intelligence mode" time.sleep(3) time.sleep(1) try: iblock(True,True) except: print "Game Paused" def human_vs_iblock(): """ You'll probably loose... """ # Coming Soon # if not args.Hide: print """ _ _____ _ _ |_| __ | |___ ___| |_ | | __ -| | . | _| '_| |_|_____|_|___|___|_,_| """ parser.print_help() print print "Available Game Modes/Options:" print "-Random Mode" print "-Intelligence Mode" print "-1 Match Intelligence Mode" print "-Reset Mode" print "-Human vs Player Mode" print "\n(Enter the function name for the gamemode you want in the python terminal or set your arguments to choose your gamemode)\n" print "Set arguments to \"-H\" to disable this message." time.sleep(0.5) if len(sys.argv) > 1: if args.intelligence: intelligence_mode() sys.exit() if args.random: random_ai_mode() sys.exit() if args.Reset: print "Stop the program once both player's fitness is at your desired stat" reset_knowldge() sys.exit() if args.quick: intelligent_1v1() sys.exit() if human_mode: try: iblockgo() except: print "Game Paused/Stopped"
[ 37811, 198, 4808, 220, 29343, 4808, 220, 220, 220, 220, 220, 220, 220, 220, 4808, 220, 220, 220, 198, 91, 62, 91, 11593, 220, 930, 930, 17569, 46444, 91, 930, 62, 220, 198, 91, 930, 11593, 532, 91, 930, 764, 930, 220, 4808, 91, ...
2.908658
2,102
if __name__ == "__main__": main()
[ 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 1388, 3419, 198 ]
2.105263
19
"""Action Module circuits component to update incidents from QRadar Ariel queries""" import logging from datetime import datetime import time import copy import json from string import Template from pkg_resources import Requirement, resource_filename import resilient_circuits.template_functions as template_functions from query_runner.lib.query_action import QueryRunner from query_runner.lib.qradar_rest_client import QRadarClient from query_runner.lib.misc import SearchTimeout, SearchFailure try: basestring except NameError: basestring = str LOG = logging.getLogger(__name__) CONFIG_DATA_SECTION = 'ariel' def config_section_data(): """sample config data for use in app.config""" section_config_fn = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/app.config.qradar") query_dir = resource_filename(Requirement("rc-qradar-search"), "query_runner/data/queries_ariel") with open(section_config_fn, 'r') as section_config_file: section_config = Template(section_config_file.read()) return section_config.safe_substitute(directory=query_dir) ############################# # Functions for running Query ############################# def _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval): """ Poll QRadar until search execution finishes """ start_time = time.time() search_status = qradar_client.get_search_status(search_id) if not search_status: # Sometimes it takes a little while to be able to query a search id time.sleep(4) search_status = qradar_client.get_search_status(search_id) while search_status.get("status", "") in ("WAIT", "EXECUTE", "SORTING"): if timeout != 0: if time.time() - start_time > timeout: raise SearchTimeout(search_id, search_status.get("status", "")) time.sleep(polling_interval) search_status = qradar_client.get_search_status(search_id) if search_status.get("status", "") != "COMPLETED": LOG.error(search_status) raise SearchFailure(search_id, search_status.get("status", "")) # end _wait_for_query_to_complete def _get_query_results(search_id, qradar_client, item_range): """ Get results from a complete QRadar query """ if item_range: headers = {"Range": item_range} else: headers = None url = "ariel/searches/{0}/results".format(search_id, headers=headers) response = qradar_client.get(url) LOG.debug(response) # Replace "NULL" with "" response = remove_nulls(response) return response # end _get_query_results def remove_nulls(d): """ recursively replace 'NULL' with '' in dictionary """ if isinstance(d, basestring): if d == u'NULL': return u'' else: return d new = {} LOG.debug("d={d} ".format(d=d)) LOG.debug("type of d is {t}".format(t=type(d))) for k, v in d.items(): if isinstance(v, dict): v = remove_nulls(v) elif isinstance(v, list): v = [remove_nulls(v1) for v1 in v] elif isinstance(v, basestring) and v == u'NULL': v = u'' new[k] = v LOG.info("Returning: {n}".format(n=new)) return new def run_search(options, query_definition, event_message): """ Run Ariel search and return result """ # Read the options and construct a QRadar client qradar_url = options.get("qradar_url", "") qradar_token = options.get("qradar_service_token", "") timeout = int(options.get("query_timeout", 600)) polling_interval = int(options.get("polling_interval", 5)) if not all((qradar_url, qradar_token, timeout, polling_interval)): LOG.error("Configuration file missing required values!") raise Exception("Missing Configuration Values") verify = options.get("qradar_verify", "") if verify[:1].lower() in ("0", "f", "n"): verify = False else: verify = True qradar_client = QRadarClient(qradar_url, qradar_token, verify=verify) error = None response = None try: params = {'query_expression': query_definition.query} url = "ariel/searches" response = qradar_client.post(url, params=params) LOG.debug(response) search_id = response.get('search_id', '') if not search_id: error = "Query Failed: " + response.get("message", "No Error Message Found") else: LOG.info("Queued Search %s", search_id) _wait_for_query_to_complete(search_id, qradar_client, timeout, polling_interval) # Query Execution Finished, Get Results response = _get_query_results(search_id, qradar_client, query_definition.range) except Exception as exc: if not query_definition.onerror: raise LOG.error(exc) error = u"{}".format(exc) if error: mapdata = copy.deepcopy(event_message) mapdata.update(query_definition.vars) mapdata.update({"query": query_definition.query}) mapdata.update({"error": error}) error_template = json.dumps({"events": [query_definition.onerror]}, indent=2) error_rendered = template_functions.render_json(error_template, mapdata) response = error_rendered if not response or len(response["events"]) == 0: LOG.warn("No data returned from query") if query_definition.default: mapdata = copy.deepcopy(event_message) mapdata.update(query_definition.vars) mapdata.update({"query": query_definition.query}) default_template = json.dumps({"events": [query_definition.default]}, indent=2) default_rendered = template_functions.render_json(default_template, mapdata) response = default_rendered return response # end run_search
[ 37811, 12502, 19937, 24907, 7515, 284, 4296, 10207, 422, 1195, 15546, 283, 33364, 20743, 37811, 198, 11748, 18931, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 11748, 640, 198, 11748, 4866, 198, 11748, 33918, 198, 6738, 4731, 1330, 37350, ...
2.563676
2,285
#! /usr/bin/env python # -*- coding: utf-8 -*- from email import encoders from email.header import Header from email.mime.multipart import MIMEBase, MIMEMultipart from email.mime.text import MIMEText from email.utils import parseaddr, formataddr import smtplib # # from_addr = input('From') password = input('Password') # to_addr = input('To') # smtp_server = input('SMTP server') # # msg = MIMEText('Hello,send by Python...', 'plain', 'utf-8') # HTML msg = MIMEText('<html><body><h1>Hello</h1>' + '<p>send by <a href="http://www.python.org">Python</a>...</p>' + '</body></html>', 'html', 'utf-8') # msg['From'] = _format_addr('Python<%s>' % from_addr) # msg['To'] = _format_addr('<%s>' % to_addr) # msg['Subject'] = Header('SMTP...', 'utf-8').encode() # msg = MIMEMultipart() msg = MIMEMultipart('alternative') msg['From'] = _format_addr('Python<%s>' % from_addr) msg['To'] = _format_addr('<%s>' % to_addr) msg['Subject'] = Header('SMTP', 'utf-8').encode() # MIMEText: msg.attach(MIMEText('send with file...', 'plain', 'utf-8')) msg.attach(MIMEText('<html><body><h1>Hello</h1>' + '<p><img src="cid:0"></p>' + '</body></html>', 'html', 'utf-8')) with open('/Users/doc88/Desktop/banner.png', 'rb') as f: # MIME mime = MIMEBase('image', 'jpeg', filename='banner.png') # mime.add_header('Content-Disposition', 'attachment', filename='banner.png') mime.add_header('Content-ID', '<0>') mime.add_header('X-Attachment-Id', '0') # mime.set_payload(f.read()) # Base64 encoders.encode_base64(mime) # MIMEMultipart: msg.attach(mime) try: # # server = smtplib.SMTP_SSL(smtp_server, 465) # SMTP server.set_debuglevel(1) # server.login(from_addr, password) # # server.sendmail(from_addr, [to_addr], msg.as_string()) # server.quit() print('Success!') except smtplib.SMTPException as e: print('Fail,%s' % e)
[ 2, 0, 1220, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 3053, 1330, 2207, 375, 364, 198, 6738, 3053, 13, 25677, 1330, 48900, 198, 6738, 3053, 13, 76, 524, 13, ...
2.15818
923
#!/usr/bin/env python # encoding=utf-8 """ Copyright (c) 2021 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 2, 21004, 28, 40477, 12, 23, 198, 37811, 198, 15269, 357, 66, 8, 33448, 43208, 21852, 1766, 1539, 43, 8671, 13, 198, 198, 9654, 35389, 1046, 318, 11971, 739, 17996, 272, 6599, 43, 41...
2.94709
189
__author__ = 'schien' from django.contrib import admin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import User from django.contrib.admin import BooleanFieldListFilter from api.models import Scan, Measure, InstalledMeasure, MeasureCategory, App, MessageThread, RedirectUrl, TrackableURL, Click, UserProfile, Favourite, \ LoggerMessage from api.models import Device, House, Note, HomeOwnerProfile, Message admin.site.register(RedirectUrl, RedirectUrlAdmin) admin.site.register(TrackableURL) admin.site.register(Click) admin.site.register(House, HouseAdmin) admin.site.register(Message, MessagesAdmin) admin.site.register(MessageThread) admin.site.register(Device, CreatedDateAdmin) admin.site.register(Scan, CreatedDateAdmin) admin.site.register(Note, CreatedDateAdmin) # Define a new User admin # bristol admin.site.register(Measure) admin.site.register(UserProfile, UserProfileAdmin) admin.site.register(HomeOwnerProfile) admin.site.register(InstalledMeasure) admin.site.register(MeasureCategory) admin.site.register(App) admin.site.register(LoggerMessage) # frome # Re-register UserAdmin admin.site.unregister(User) admin.site.register(User, UserAdmin)
[ 198, 198, 834, 9800, 834, 796, 705, 20601, 2013, 6, 198, 6738, 42625, 14208, 13, 3642, 822, 1330, 13169, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 13, 28482, 1330, 11787, 46787, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 184...
3.315217
368
import numpy as np from scipy.sparse import csc_matrix, diags, tril from .basis import Basis __author__ = 'Randall' # TODO: complete this class # todo: compare performance of csr_matrix and csc_matrix to deal with sparse interpolation operators # fixme: interpolation is 25 slower than in matlab when 2 dimensions!! 2x slower with only one
[ 11748, 299, 32152, 355, 45941, 198, 6738, 629, 541, 88, 13, 82, 29572, 1330, 269, 1416, 62, 6759, 8609, 11, 2566, 3775, 11, 491, 346, 198, 6738, 764, 12093, 271, 1330, 6455, 271, 198, 198, 834, 9800, 834, 796, 705, 38918, 439, 6, ...
3.376238
101
import os import os.path import sys # Modified version from Python-3.3. 'env' environ dict override has been added. def which(cmd, mode=os.F_OK | os.X_OK, env=None): """Given a command, mode, and a PATH string, return the path which conforms to the given mode on the PATH, or None if there is no such file. `mode` defaults to os.F_OK | os.X_OK. `env` defaults to os.environ, if not supplied. """ # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. # Short circuit. If we're given a full path which matches the mode # and it exists, we're done here. if _access_check(cmd, mode): return cmd if env is None: env = os.environ path = env.get("PATH", os.defpath).split(os.pathsep) if sys.platform == "win32": # The current directory takes precedence on Windows. if not os.curdir in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. default_pathext = \ '.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC' pathext = env.get("PATHEXT", default_pathext).split(os.pathsep) # See if the given file matches any of the expected path extensions. # This will allow us to short circuit when given "python.exe". matches = [cmd for ext in pathext if cmd.lower().endswith(ext.lower())] # If it does match, only test that one, otherwise we have to try # others. files = [cmd] if matches else [cmd + ext.lower() for ext in pathext] else: # On other platforms you don't have things like PATHEXT to tell you # what file suffixes are executable, so just pass on cmd as-is. files = [cmd] seen = set() for dir in path: dir = os.path.normcase(dir) if not dir in seen: seen.add(dir) for thefile in files: name = os.path.join(dir, thefile) if _access_check(name, mode): return name return None
[ 11748, 28686, 198, 11748, 28686, 13, 6978, 198, 11748, 25064, 628, 198, 2, 40499, 2196, 422, 11361, 12, 18, 13, 18, 13, 705, 24330, 6, 551, 2268, 8633, 20957, 468, 587, 2087, 13, 198, 198, 4299, 543, 7, 28758, 11, 4235, 28, 418, 1...
2.453089
874
"""Defines the classes SymbolTable and SymbolTableNode""" import sys from numpy import ones
[ 37811, 7469, 1127, 262, 6097, 38357, 10962, 290, 38357, 10962, 19667, 37811, 198, 11748, 25064, 198, 6738, 299, 32152, 1330, 3392, 628 ]
4.227273
22
from setuptools import setup, version setup( name="NorthNet", version="0.0", author="William E. Robinson", packages = ["NorthNet"], )
[ 6738, 900, 37623, 10141, 1330, 9058, 11, 2196, 198, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 14157, 7934, 1600, 198, 220, 220, 220, 2196, 2625, 15, 13, 15, 1600, 198, 220, 220, 220, 1772, 2625, 17121, 412, 13, 12652, 1600, 198...
2.745455
55
"""Class with high-level methods for processing NAPS and NAPS BE datasets.""" from config import DATA_NAPS_BE_ALL from lib import partition_naps from lib import plot from lib import plot_clusters from lib import plot_clusters_with_probability from lib import plot_setup from lib import read_naps from lib import read_naps_be from lib import reindex_partitions import json import matplotlib.pyplot as plt import numpy as np import os import scipy import sklearn
[ 37811, 9487, 351, 1029, 12, 5715, 5050, 329, 7587, 399, 44580, 290, 399, 44580, 9348, 40522, 526, 15931, 198, 198, 6738, 4566, 1330, 42865, 62, 45, 44580, 62, 12473, 62, 7036, 198, 6738, 9195, 1330, 18398, 62, 77, 1686, 198, 6738, 919...
3.481203
133
""" 1618. Maximum Font to Fit a Sentence in a Screen Medium You are given a string text. We want to display text on a screen of width w and height h. You can choose any font size from array fonts, which contains the available font sizes in ascending order. You can use the FontInfo interface to get the width and height of any character at any available font size. The FontInfo interface is defined as such: interface FontInfo { // Returns the width of character ch on the screen using font size fontSize. // O(1) per call public int getWidth(int fontSize, char ch); // Returns the height of any character on the screen using font size fontSize. // O(1) per call public int getHeight(int fontSize); } The calculated width of text for some fontSize is the sum of every getWidth(fontSize, text[i]) call for each 0 <= i < text.length (0-indexed). The calculated height of text for some fontSize is getHeight(fontSize). Note that text is displayed on a single line. It is guaranteed that FontInfo will return the same value if you call getHeight or getWidth with the same parameters. It is also guaranteed that for any font size fontSize and any character ch: getHeight(fontSize) <= getHeight(fontSize+1) getWidth(fontSize, ch) <= getWidth(fontSize+1, ch) Return the maximum font size you can use to display text on the screen. If text cannot fit on the display with any font size, return -1. Example 1: Input: text = "helloworld", w = 80, h = 20, fonts = [6,8,10,12,14,16,18,24,36] Output: 6 Example 2: Input: text = "leetcode", w = 1000, h = 50, fonts = [1,2,4] Output: 4 Example 3: Input: text = "easyquestion", w = 100, h = 100, fonts = [10,15,20,25] Output: -1 Constraints: 1 <= text.length <= 50000 text contains only lowercase English letters. 1 <= w <= 107 1 <= h <= 104 1 <= fonts.length <= 105 1 <= fonts[i] <= 105 fonts is sorted in ascending order and does not contain duplicates. """ # """ # This is FontInfo's API interface. # You should not implement it, or speculate about its implementation # """ #class FontInfo(object): # Return the width of char ch when fontSize is used. # def getWidth(self, fontSize, ch): # """ # :type fontSize: int # :type ch: char # :rtype int # """ # # def getHeight(self, fontSize): # """ # :type fontSize: int # :rtype int # """
[ 37811, 198, 1433, 1507, 13, 22246, 24060, 284, 25048, 257, 11352, 594, 287, 257, 15216, 198, 31205, 198, 198, 1639, 389, 1813, 257, 4731, 2420, 13, 775, 765, 284, 3359, 2420, 319, 257, 3159, 286, 9647, 266, 290, 6001, 289, 13, 921, ...
3.130607
758
''' Copyright 2022 Airbus SAS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' from sos_trades_core.execution_engine.sos_discipline import SoSDiscipline from climateeconomics.core.core_dice.tempchange_model import TempChange from sos_trades_core.tools.post_processing.charts.two_axes_instanciated_chart import InstanciatedSeries, TwoAxesInstanciatedChart from sos_trades_core.tools.post_processing.charts.chart_filter import ChartFilter import pandas as pd
[ 7061, 6, 198, 15269, 33160, 39173, 35516, 198, 198, 26656, 15385, 739, 262, 24843, 13789, 11, 10628, 362, 13, 15, 357, 1169, 366, 34156, 15341, 198, 5832, 743, 407, 779, 428, 2393, 2845, 287, 11846, 351, 262, 13789, 13, 198, 1639, 743...
3.496296
270
#!/usr/bin/env python3 """ The PyMKM example app. """ __author__ = "Andreas Ehrlund" __version__ = "2.0.4" __license__ = "MIT" import os import csv import json import shelve import logging import logging.handlers import pprint import uuid import sys from datetime import datetime import micromenu import progressbar import requests import tabulate as tb from pkg_resources import parse_version from .pymkm_helper import PyMkmHelper from .pymkmapi import PyMkmApi, CardmarketError
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 18, 198, 37811, 198, 464, 9485, 33907, 44, 1672, 598, 13, 198, 37811, 198, 198, 834, 9800, 834, 796, 366, 31258, 292, 412, 11840, 37525, 1, 198, 834, 9641, 834, 796, 366, 17, 13, 15, 13...
3.012422
161
import ee from ee_plugin import Map dataset = ee.Image('CSP/ERGo/1_0/US/physioDiversity') physiographicDiversity = dataset.select('b1') physiographicDiversityVis = { 'min': 0.0, 'max': 1.0, } Map.setCenter(-94.625, 39.825, 7) Map.addLayer( physiographicDiversity, physiographicDiversityVis, 'Physiographic Diversity')
[ 11748, 304, 68, 220, 198, 6738, 304, 68, 62, 33803, 1330, 9347, 220, 198, 198, 19608, 292, 316, 796, 304, 68, 13, 5159, 10786, 34, 4303, 14, 1137, 5247, 14, 16, 62, 15, 14, 2937, 14, 34411, 952, 35, 1608, 11537, 198, 34411, 72, ...
2.522727
132
from django import forms from . import models
[ 6738, 42625, 14208, 1330, 5107, 198, 198, 6738, 764, 1330, 4981, 628 ]
4
12
# -*- coding: utf-8 -*- import base64 print('Choose your choice:') n=''' 1:Encode string to base64 2:Decode base64 to string ''' c=int(eval(input(n))) # if c == 1: #1 print('Type string to be encoded:') inp=input() out = str(base64.encodebytes(inp.encode("utf-8")), "utf-8") print(out) # b if c == 2: print('Type string to be decoded:') inp2=bytes(input(),('utf-8')) dec = base64.decodebytes(inp2) print(dec.decode())
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 2779, 2414, 198, 198, 4798, 10786, 31851, 534, 3572, 25, 11537, 198, 77, 28, 7061, 6, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 352, 25, 483...
2.115556
225
import time, os, sys import scsynth, scosc server = 0 # reference to app's sc server process sndLoader = 0 synthon = 0 # did we start the scsythn process? ##workingpath = os.getcwd() # must be set to the right path in case something special is need sndpath = os.path.join( os.getcwd() , 'sounds' ) synthdefpath = os.path.join( os.getcwd() , 'synthdefs' ) def start( exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0, spew=0, startscsynth=0 ) : """ starts scsynth process. interfaces scsynth module. Inits the OSC communication and classes that handle it exe='', exedir='', port=57110, inputs=2, outputs=2, samplerate=44100, verbose=0, spew=0 """ global server, sndLoader # because they are init in this func exe = 'scsynth' # if none is set take workingdir as exedir on mac and windows if sys.platform == 'win32' : exe += '.exe' # add extension if exedir == '' : exedir = 'C:\Program Files\SuperCollider' elif os.uname()[0] == 'Linux' : if exedir == '' : exedir = '/usr/bin' if not os.path.isfile(os.path.join(exedir, exe)): # in case it is in /usr/bin/local print 'Error : /usr/bin/scsynth does not exist. Trying to find scsnth in /usr/local/bin...' exedir = '/usr/local/bin' elif sys.platform == 'darwin': if exedir == '' : exedir = '/Applications/SuperCollider' print "trying to run scsynth from :", exedir server = scsynth.start( #exe = exe, #exedir = exedir, port = port, #inputs = inputs, #outputs = outputs, #samplerate = samplerate, verbose = verbose, spew = spew, ) if startscsynth : # starts scsynth server process global synthon synthon = 1 server.instance = scsynth.startServer( exe = exe, exedir = exedir, port = port, inputs = inputs, outputs = outputs, samplerate = samplerate, verbose = verbose, #spew = spew, ) time.sleep(1) # wait to start up sndLoader = scsynth.Loader(server) # manages sound files def register(address, fun) : """ bind OSC address to function callback """ server.listener.register( address, fun ) # sound buffer related utilities. def loadSnd(filename, wait=False) : """ load sound buffer from current sound folder (sc.sndpath) and return buffer's id sends back /b_info labeled OSC message. The arguments to /b_info are as follows: int - buffer number int - number of frames int - number of channels """ abspath = os.path.join( sndpath, filename ) return loadSndAbs(abspath, wait) def unloadSnd(buf_id) : """ unload sound buffer from server memory by buffer id """ sndLoader.unload( buf_id, wait=False ) def loadSndAbs(path, wait=False) : """ same as loadSnd but takes absolute path to snd file """ if os.path.isfile(path) : return sndLoader.load( path, wait, b_query=True ) else : print "file %s does NOT exist" % path return 0 # classes
[ 11748, 640, 11, 28686, 11, 25064, 198, 11748, 629, 28869, 400, 11, 629, 17500, 628, 198, 15388, 796, 657, 1303, 4941, 284, 598, 338, 629, 4382, 1429, 198, 82, 358, 17401, 796, 657, 198, 198, 28869, 400, 261, 796, 657, 1303, 750, 356...
2.219947
1,514
import pytest from fastapi import FastAPI from httpx import AsyncClient from starlette.status import HTTP_200_OK pytestmark = pytest.mark.asyncio
[ 11748, 12972, 9288, 198, 6738, 3049, 15042, 1330, 12549, 17614, 198, 6738, 2638, 87, 1330, 1081, 13361, 11792, 198, 6738, 3491, 21348, 13, 13376, 1330, 14626, 62, 2167, 62, 11380, 198, 198, 9078, 9288, 4102, 796, 12972, 9288, 13, 4102, ...
3.288889
45
import linefinder.linefinder as linefinder import linefinder.config as linefinder_config import linefinder.utils.file_management as file_management ######################################################################## sim_name = 'm12i' '''The simulation to run tracking on.''' tag = '{}_sightline'.format( sim_name ) '''Identifying tag used as part of the filenames. E.g. the IDs file will have the format `ids_{}.hdf5.format( tag )`. ''' # Tracking Parameters tracker_kwargs = { # What particle types to track. Typically just stars and gas. 'p_types': [ 0, 4,], # What snapshots to compile the particle tracks for. 'snum_start': 1, 'snum_end': 600, 'snum_step': 1, } file_manager = file_management.FileManager() sampler_kwargs = { 'ignore_duplicates': True, 'p_types': [ 0, 4 ], 'snapshot_kwargs': { 'sdir': file_manager.get_sim_dir( sim_name ), 'halo_data_dir': file_manager.get_halo_dir( sim_name ), 'main_halo_id': linefinder_config.MAIN_MT_HALO_ID[sim_name], 'ahf_index': 600, 'length_scale_used': 'R_vir', } } visualization_kwargs = { 'install_firefly': True, 'export_to_firefly_kwargs': { 'firefly_dir': '/work/03057/zhafen/firefly_repos/sightline', 'classifications': [ 'is_in_CGM', 'is_CGM_IGM_accretion', 'is_CGM_wind', 'is_CGM_satellite_wind', 'is_CGM_satellite_ISM', ], 'classification_ui_labels': [ 'All', 'IGMAcc', 'Wind', 'SatWind', 'Sat' ], 'tracked_properties': [ 'logT', 'logZ', 'logDen', 'vr_div_v_cool', 'logvr_div_v_cool_offset', ], 'tracked_filter_flags': [ True, ] * 5, 'tracked_colormap_flags': [ True, ] * 5, 'snum': 465, }, } # This is the actual function that runs linefinder. # In general you don't need to touch this function but if you want to, # for example, turn off one of the steps because you're rerunning and you # already did that step, you can do so below. linefinder.run_linefinder_jug( sim_name = sim_name, tag = tag, galdef = '_galdefv3', # The galdef is a set of parameters used for the galaxy linking and # classification steps. Don't touch this unless you know what you're doing. tracker_kwargs = tracker_kwargs, sampler_kwargs = sampler_kwargs, visualization_kwargs = visualization_kwargs, run_id_selecting = False, run_id_sampling = False, run_tracking = False, run_galaxy_linking = False, run_classifying = False, )
[ 11748, 1627, 22805, 13, 1370, 22805, 355, 1627, 22805, 198, 11748, 1627, 22805, 13, 11250, 355, 1627, 22805, 62, 11250, 198, 198, 11748, 1627, 22805, 13, 26791, 13, 7753, 62, 27604, 355, 2393, 62, 27604, 198, 198, 29113, 29113, 7804, 19...
2.380692
1,098
# 47. 1+2+3+...+n # 1+2+3+...+nforwhileifelseswitchcaseA?B:C # -*- coding:utf-8 -*-
[ 2, 6298, 13, 352, 10, 17, 10, 18, 10, 986, 10, 77, 198, 198, 2, 352, 10, 17, 10, 18, 10, 986, 10, 77, 1640, 4514, 361, 1424, 274, 42248, 7442, 32, 30, 33, 25, 34, 628, 198, 2, 532, 9, 12, 19617, 25, 40477, 12, 23, 532, 9...
1.72
50
from django.urls import path from .views import AboutTemplateView urlpatterns = [path("", AboutTemplateView.as_view(), name="about")]
[ 6738, 42625, 14208, 13, 6371, 82, 1330, 3108, 628, 198, 6738, 764, 33571, 1330, 7994, 30800, 7680, 198, 198, 6371, 33279, 82, 796, 685, 6978, 7203, 1600, 7994, 30800, 7680, 13, 292, 62, 1177, 22784, 1438, 2625, 10755, 4943, 60, 198 ]
3.341463
41
''' send out activitypub messages ''' from base64 import b64encode from Crypto.PublicKey import RSA from Crypto.Signature import pkcs1_15 from Crypto.Hash import SHA256 from datetime import datetime import json import requests from fedireads import incoming from fedireads.settings import DOMAIN def get_recipients(user, post_privacy, direct_recipients=None): ''' deduplicated list of recipient inboxes ''' recipients = direct_recipients or [] if post_privacy == 'direct': # all we care about is direct_recipients, not followers return recipients # load all the followers of the user who is sending the message followers = user.followers.all() if post_privacy == 'public': # post to public shared inboxes shared_inboxes = set(u.shared_inbox for u in followers) recipients += list(shared_inboxes) # TODO: not every user has a shared inbox # TODO: direct to anyone who's mentioned if post_privacy == 'followers': # don't send it to the shared inboxes inboxes = set(u.inbox for u in followers) recipients += list(inboxes) return recipients def broadcast(sender, activity, recipients): ''' send out an event ''' errors = [] for recipient in recipients: try: sign_and_send(sender, activity, recipient) except requests.exceptions.HTTPError as e: # TODO: maybe keep track of users who cause errors errors.append({ 'error': e, 'recipient': recipient, 'activity': activity, }) return errors def sign_and_send(sender, activity, destination): ''' crpyto whatever and http junk ''' # TODO: handle http[s] with regex inbox_fragment = sender.inbox.replace('https://%s' % DOMAIN, '') now = datetime.utcnow().isoformat() signature_headers = [ '(request-target): post %s' % inbox_fragment, 'host: https://%s' % DOMAIN, 'date: %s' % now ] message_to_sign = '\n'.join(signature_headers) # TODO: raise an error if the user doesn't have a private key signer = pkcs1_15.new(RSA.import_key(sender.private_key)) signed_message = signer.sign(SHA256.new(message_to_sign.encode('utf8'))) signature = { 'keyId': '%s#main-key' % sender.actor, 'algorithm': 'rsa-sha256', 'headers': '(request-target) host date', 'signature': b64encode(signed_message).decode('utf8'), } signature = ','.join('%s="%s"' % (k, v) for (k, v) in signature.items()) response = requests.post( destination, data=json.dumps(activity), headers={ 'Date': now, 'Signature': signature, 'Host': 'https://%s' % DOMAIN, 'Content-Type': 'application/activity+json; charset=utf-8', }, ) if not response.ok: response.raise_for_status() incoming.handle_response(response)
[ 7061, 6, 3758, 503, 3842, 12984, 6218, 705, 7061, 198, 6738, 2779, 2414, 1330, 275, 2414, 268, 8189, 198, 6738, 36579, 13, 15202, 9218, 1330, 42319, 198, 6738, 36579, 13, 11712, 1300, 1330, 279, 74, 6359, 16, 62, 1314, 198, 6738, 3657...
2.472107
1,201
from django.shortcuts import render from django.http import HttpResponse from django.template import RequestContext, loader from .models import Question # Create your views here.
[ 6738, 42625, 14208, 13, 19509, 23779, 1330, 8543, 198, 6738, 42625, 14208, 13, 4023, 1330, 367, 29281, 31077, 198, 6738, 42625, 14208, 13, 28243, 1330, 19390, 21947, 11, 40213, 198, 198, 6738, 764, 27530, 1330, 18233, 198, 198, 2, 13610, ...
4
46
from __future__ import division from builtins import str from builtins import range from astropy.utils.misc import isiterable from past.utils import old_div import copy import collections import numpy as np import healpy as hp import astropy.units as u import matplotlib.pyplot as plt import matplotlib as mpl from scipy.stats import poisson from astropy.convolution import Gaussian2DKernel from astropy.convolution import convolve_fft as convolve from astropy.coordinates import Angle from threeML.plugin_prototype import PluginPrototype from threeML.utils.statistics.gammaln import logfactorial from threeML.parallel import parallel_client from threeML.io.logging import setup_logger log = setup_logger(__name__) log.propagate = False from tqdm.auto import tqdm from astromodels import Parameter from hawc_hal.maptree import map_tree_factory from hawc_hal.maptree.map_tree import MapTree from hawc_hal.maptree.data_analysis_bin import DataAnalysisBin from hawc_hal.response import hawc_response_factory from hawc_hal.convolved_source import ConvolvedPointSource, \ ConvolvedExtendedSource3D, ConvolvedExtendedSource2D, ConvolvedSourcesContainer from hawc_hal.healpix_handling import FlatSkyToHealpixTransform from hawc_hal.healpix_handling import SparseHealpix from hawc_hal.healpix_handling import get_gnomonic_projection from hawc_hal.psf_fast import PSFConvolutor from hawc_hal.log_likelihood import log_likelihood from hawc_hal.util import ra_to_longitude def get_saturated_model_likelihood(self): """ Returns the likelihood for the saturated model (i.e. a model exactly equal to observation - background). :return: """ return sum(self._saturated_model_like_per_maptree.values()) def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=None): """ Set the active analysis bins to use during the analysis. It can be used in two ways: - Specifying a range: if the response and the maptree allows it, you can specify a minimum id and a maximum id number. This only works if the analysis bins are numerical, like in the normal fHit analysis. For example: > set_active_measurement(bin_id_min=1, bin_id_max=9) - Specifying a list of bins as strings. This is more powerful, as allows to select any bins, even non-contiguous bins. For example: > set_active_measurement(bin_list=[list]) :param bin_id_min: minimum bin (only works for fHit analysis. For the others, use bin_list) :param bin_id_max: maximum bin (only works for fHit analysis. For the others, use bin_list) :param bin_list: a list of analysis bins to use :return: None """ # Check for legal input if bin_id_min is not None: assert bin_id_max is not None, ( "If you provide a minimum bin, you also need to provide a maximum bin." ) # Make sure they are integers bin_id_min = int(bin_id_min) bin_id_max = int(bin_id_max) self._active_planes = [] for this_bin in range(bin_id_min, bin_id_max + 1): this_bin = str(this_bin) if this_bin not in self._all_planes: raise ValueError(f"Bin {this_bin} is not contained in this maptree.") self._active_planes.append(this_bin) else: assert bin_id_max is None, ( "If you provie a maximum bin, you also need to provide a minimum bin." ) assert bin_list is not None self._active_planes = [] for this_bin in bin_list: if not this_bin in self._all_planes: raise ValueError(f"Bin {this_bin} is not contained in this maptree.") self._active_planes.append(this_bin) if self._likelihood_model: self.set_model( self._likelihood_model ) def display(self, verbose=False): """ Prints summary of the current object content. """ log.info("Region of Interest: ") log.info("-------------------") self._roi.display() log.info("") log.info("Flat sky projection: ") log.info("--------------------") log.info( f"Width x height {self._flat_sky_projection.npix_width} x {self._flat_sky_projection.npix_height} px" ) #log.info("Width x height: %s x %s px" % (self._flat_sky_projection.npix_width, # self._flat_sky_projection.npix_height)) log.info(f"Pixel sizes: {self._flat_sky_projection.pixel_size} deg") #log.info("Pixel sizes: %s deg" % self._flat_sky_projection.pixel_size) log.info("") log.info("Response: ") log.info("---------") self._response.display(verbose) log.info("") log.info("Map Tree: ") log.info("----------") self._maptree.display() log.info("") #log.info("Active energy/nHit planes ({}):".format(len(self._active_planes))) log.info(f"Active energy/nHit planes ({len(self._active_planes)}):") log.info("-------------------------------") log.info(self._active_planes) def set_model(self, likelihood_model_instance): """ Set the model to be used in the joint minimization. Must be a LikelihoodModel instance. """ self._likelihood_model = likelihood_model_instance # Reset self._convolved_point_sources.reset() self._convolved_ext_sources.reset() # For each point source in the model, build the convolution class for source in list(self._likelihood_model.point_sources.values()): this_convolved_point_source = ConvolvedPointSource(source, self._response, self._flat_sky_projection) self._convolved_point_sources.append(this_convolved_point_source) # Samewise for extended sources ext_sources = list(self._likelihood_model.extended_sources.values()) # NOTE: ext_sources evaluate to False if empty if ext_sources: # We will need to convolve self._setup_psf_convolutors() for source in ext_sources: if source.spatial_shape.n_dim == 2: this_convolved_ext_source = ConvolvedExtendedSource2D(source, self._response, self._flat_sky_projection) else: this_convolved_ext_source = ConvolvedExtendedSource3D(source, self._response, self._flat_sky_projection) self._convolved_ext_sources.append(this_convolved_ext_source) def get_excess_background(self, ra, dec, radius): """ Calculates area, excess (data - background) and model counts of source at different distance from the source. :param: radius: radial distance away from the center (degrees). :returns: tuple of numpy.ndarrays for areas, excess, model, and background this information is used in the get_radial_profile function. """ radius_radians = np.deg2rad(radius) total_counts = np.zeros(len(self._active_planes), dtype=float) background = np.zeros_like(total_counts) observation = np.zeros_like(total_counts) model = np.zeros_like(total_counts) signal = np.zeros_like(total_counts) area = np.zeros_like(total_counts) n_point_sources = self._likelihood_model.get_number_of_point_sources() n_ext_sources = self._likelihood_model.get_number_of_extended_sources() longitude = ra_to_longitude(ra) latitude = dec center = hp.ang2vec(longitude, latitude, lonlat=True) for i, energy_id in enumerate(self._active_planes): data_analysis_bin = self._maptree[energy_id] this_nside = data_analysis_bin.observation_map.nside pixels_at_radius = hp.query_disc( this_nside, center, radius_radians, inclusive=False, ) # calculate the areas per bin by the product # of pixel area by the number of pixels at each radial bin area[i] = hp.nside2pixarea(this_nside)*pixels_at_radius.shape[0] # NOTE: select active pixels according to each radial bin bin_active_pixel_indexes = np.searchsorted(self._active_pixels[energy_id], pixels_at_radius) # obtain the excess, background, and expected excess at each radial bin data = data_analysis_bin.observation_map.as_partial() bkg = data_analysis_bin.background_map.as_partial() mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial() bin_data = np.array([data[i] for i in bin_active_pixel_indexes]) bin_bkg = np.array([bkg[i] for i in bin_active_pixel_indexes]) bin_model = np.array([mdl[i] for i in bin_active_pixel_indexes]) this_data_tot = np.sum(bin_data) this_bkg_tot = np.sum(bin_bkg) this_model_tot = np.sum(bin_model) background[i] = this_bkg_tot observation[i] = this_data_tot model[i] = this_model_tot signal[i] = this_data_tot - this_bkg_tot return area, signal, model, background def get_radial_profile( self, ra, dec, active_planes=None, max_radius=3.0, n_radial_bins=30, model_to_subtract=None, subtract_model_from_model=False, ): """ Calculates radial profiles of data - background & model. :param ra: R.A. of origin for radial profile. :param dec: Declination of origin of radial profile. :param active_planes: List of analysis over which to average; if None, use HAWC default (bins 1-9). :param: max_radius: Radius up to which the radial profile is evaluated; for the disk to calculate the gamma/hadron weights (Default: 3.0). :param n_radial_bins: Number of bins for the radial profile (Default: 30). :param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None). :param subtract_model_from_model: If True and model_to_subtract is not None, subtract model from model too (Defalt: False). :return: np.arrays with the radii, model profile, data profile, data uncertainty, and list of analysis bins used. """ # default is to use all active bins if active_planes is None: active_planes = self._active_planes # Make sure we use bins with data good_planes = [plane_id in active_planes for plane_id in self._active_planes] plane_ids = set(active_planes) & set(self._active_planes) delta_r = 1.0*max_radius/n_radial_bins radii = np.array([delta_r*(r + 0.5) for r in range(0, n_radial_bins)]) # Get area of all pixels in a given circle # The area of each ring is then given by the difference between two # subsequent circe areas. area = np.array( [self.get_excess_background(ra, dec, r + 0.5*delta_r)[0] for r in radii ] ) temp = area[1:] - area[:-1] area[1:] = temp # model # convert 'top hat' excess into 'ring' excesses. model = np.array( [self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii] ) temp = model[1:] - model[:-1] model[1:] = temp # signals signal = np.array( [self.get_excess_background(ra, dec, r + 0.5*delta_r)[1] for r in radii] ) temp = signal[1:] - signal[:-1] signal[1:] = temp # backgrounds bkg = np.array( [self.get_excess_background(ra, dec, r + 0.5*delta_r)[3] for r in radii] ) temp = bkg[1:] - bkg[:-1] bkg[1:] = temp counts = signal + bkg if model_to_subtract is not None: this_model = copy.deepcopy(self._likelihood_model) self.set_model(model_to_subtract) model_subtract = np.array( [self.get_excess_background(ra, dec, r + 0.5*delta_r)[2] for r in radii] ) temp = model_subtract[1:] - model_subtract[:-1] model_subtract[1:] = temp signal -= model_subtract if subtract_model_from_model: model -= model_subtract self.set_model(this_model) # NOTE: weights are calculated as expected number of gamma-rays/number of background counts. # here, use max_radius to evaluate the number of gamma-rays/bkg counts. # The weights do not depend on the radius, but fill a matrix anyway so # there's no confusion when multiplying them to the data later. # Weight is normalized (sum of weights over the bins = 1). total_excess = np.array( self.get_excess_background(ra, dec, max_radius)[1] )[good_planes] total_model = np.array( self.get_excess_background(ra, dec, max_radius)[2] )[good_planes] total_bkg = np.array( self.get_excess_background(ra, dec, max_radius)[3] )[good_planes] w = np.divide(total_model, total_bkg) weight = np.array([w/np.sum(w) for r in radii]) # restric profiles to the user-specified analysis bins area = area[:, good_planes] signal = signal[:, good_planes] model = model[:, good_planes] counts = counts[:, good_planes] bkg = bkg[:, good_planes] # average over the analysis bins excess_data = np.average(signal/area, weights=weight, axis=1) excess_error = np.sqrt(np.sum(counts*weight*weight/(area*area), axis=1)) excess_model = np.average(model/area, weights=weight, axis=1) return radii, excess_model, excess_data, excess_error, sorted(plane_ids) def plot_radial_profile( self, ra, dec, active_planes=None, max_radius=3.0, n_radial_bins=30, model_to_subtract=None, subtract_model_from_model=False ): """ Plots radial profiles of data - background & model. :param ra: R.A. of origin for radial profile. :param dec: Declination of origin of radial profile. :param active_planes: List of analysis bins over which to average; if None, use HAWC default (bins 1-9). :param max_radius: Radius up to which the radial profile is evaluated; also used as the radius for the disk to calculate the gamma/hadron weights. Default: 3.0 :param model_to_subtract: Another model that is to be subtracted from the data excess (Default: None). :param subtract_model_from_model: If True and model_to_subtract is not None, subtract from model too (Default: False). :return: plot of data - background vs model radial profiles. """ ( radii, excess_model, excess_data, excess_error, plane_ids, ) = self.get_radial_profile( ra, dec, active_planes, max_radius, n_radial_bins, model_to_subtract, subtract_model_from_model, ) #font = { # "family":"serif", # "weight":"regular", # "size":12 #} #mpl.rc("font", **font) fig, ax = plt.subplots(figsize=(10,8)) plt.errorbar( radii, excess_data, yerr=excess_error, capsize=0, color="black", label="Excess (data-bkg)", fmt=".", ) plt.plot(radii, excess_model, color="red", label="Model") plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1) plt.axhline(0, color="deepskyblue", linestyle="--") x_limits=[0, max_radius] plt.xlim(x_limits) plt.ylabel(r"Apparent Radial Excess [sr$^{-1}$]") plt.xlabel( f"Distance from source at ({ra:0.2f} $^{{\circ}}$, {dec:0.2f} $^{{\circ}}$)" ) if len(plane_ids) == 1: title = f"Radial Profile, bin {plane_ids[0]}" else: tmptitle=f"Radial Profile, bins \n{plane_ids}" width=70 title="\n".join( tmptitle[i:i+width] for i in range(0, len(tmptitle), width) ) title=tmptitle plt.title(title) ax.grid(True) try: plt.tight_layout() except: pass return fig def display_spectrum(self): """ Make a plot of the current spectrum and its residuals (integrated over space) :return: a matplotlib.Figure """ n_point_sources = self._likelihood_model.get_number_of_point_sources() n_ext_sources = self._likelihood_model.get_number_of_extended_sources() total_counts = np.zeros(len(self._active_planes), dtype=float) total_model = np.zeros_like(total_counts) model_only = np.zeros_like(total_counts) net_counts = np.zeros_like(total_counts) yerr_low = np.zeros_like(total_counts) yerr_high = np.zeros_like(total_counts) for i, energy_id in enumerate(self._active_planes): data_analysis_bin = self._maptree[energy_id] this_model_map_hpx = self._get_expectation(data_analysis_bin, energy_id, n_point_sources, n_ext_sources) this_model_tot = np.sum(this_model_map_hpx) this_data_tot = np.sum(data_analysis_bin.observation_map.as_partial()) this_bkg_tot = np.sum(data_analysis_bin.background_map.as_partial()) total_counts[i] = this_data_tot net_counts[i] = this_data_tot - this_bkg_tot model_only[i] = this_model_tot this_wh_model = this_model_tot + this_bkg_tot total_model[i] = this_wh_model if this_data_tot >= 50.0: # Gaussian limit # Under the null hypothesis the data are distributed as a Gaussian with mu = model # and sigma = sqrt(model) # NOTE: since we neglect the background uncertainty, the background is part of the # model yerr_low[i] = np.sqrt(this_data_tot) yerr_high[i] = np.sqrt(this_data_tot) else: # Low-counts # Under the null hypothesis the data are distributed as a Poisson distribution with # mean = model, plot the 68% confidence interval (quantile=[0.16,1-0.16]). # NOTE: since we neglect the background uncertainty, the background is part of the # model quantile = 0.16 mean = this_wh_model y_low = poisson.isf(1-quantile, mu=mean) y_high = poisson.isf(quantile, mu=mean) yerr_low[i] = mean-y_low yerr_high[i] = y_high-mean residuals = old_div((total_counts - total_model), np.sqrt(total_model)) residuals_err = [old_div(yerr_high, np.sqrt(total_model)), old_div(yerr_low, np.sqrt(total_model))] yerr = [yerr_high, yerr_low] return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err) def get_log_like(self): """ Return the value of the log-likelihood with the current values for the parameters """ n_point_sources = self._likelihood_model.get_number_of_point_sources() n_ext_sources = self._likelihood_model.get_number_of_extended_sources() # Make sure that no source has been added since we filled the cache assert (n_point_sources == self._convolved_point_sources.n_sources_in_cache and n_ext_sources == self._convolved_ext_sources.n_sources_in_cache), ( "The number of sources has changed. Please re-assign the model to the plugin." ) #assert n_point_sources == self._convolved_point_sources.n_sources_in_cache and \ # n_ext_sources == self._convolved_ext_sources.n_sources_in_cache, \ # "The number of sources has changed. Please re-assign the model to the plugin." # This will hold the total log-likelihood total_log_like = 0 for bin_id in self._active_planes: data_analysis_bin = self._maptree[bin_id] this_model_map_hpx = self._get_expectation(data_analysis_bin, bin_id, n_point_sources, n_ext_sources) # Now compare with observation bkg_renorm = list(self._nuisance_parameters.values())[0].value obs = data_analysis_bin.observation_map.as_partial() # type: np.array bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array this_pseudo_log_like = log_likelihood(obs, bkg, this_model_map_hpx) total_log_like += this_pseudo_log_like - self._log_factorials[bin_id] \ - self._saturated_model_like_per_maptree[bin_id] return total_log_like def write(self, response_file_name, map_tree_file_name): """ Write this dataset to disk in HDF format. :param response_file_name: filename for the response :param map_tree_file_name: filename for the map tree :return: None """ self._maptree.write(map_tree_file_name) self._response.write(response_file_name) def get_simulated_dataset(self, name): """ Return a simulation of this dataset using the current model with current parameters. :param name: new name for the new plugin instance :return: a HAL instance """ # First get expectation under the current model and store them, if we didn't do it yet if self._clone is None: n_point_sources = self._likelihood_model.get_number_of_point_sources() n_ext_sources = self._likelihood_model.get_number_of_extended_sources() expectations = collections.OrderedDict() for bin_id in self._maptree: data_analysis_bin = self._maptree[bin_id] if bin_id not in self._active_planes: expectations[bin_id] = None else: expectations[bin_id] = self._get_expectation(data_analysis_bin, bin_id, n_point_sources, n_ext_sources) + \ data_analysis_bin.background_map.as_partial() if parallel_client.is_parallel_computation_active(): # Do not clone, as the parallel environment already makes clones clone = self else: clone = copy.deepcopy(self) self._clone = (clone, expectations) # Substitute the observation and background for each data analysis bin for bin_id in self._clone[0]._maptree: data_analysis_bin = self._clone[0]._maptree[bin_id] if bin_id not in self._active_planes: continue else: # Active plane. Generate new data expectation = self._clone[1][bin_id] new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten() # Substitute data data_analysis_bin.observation_map.set_new_values(new_data) # Now change name and return self._clone[0]._name = name # Adjust the name of the nuisance parameter old_name = list(self._clone[0]._nuisance_parameters.keys())[0] new_name = old_name.replace(self.name, name) self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop(old_name) # Recompute biases self._clone[0]._compute_likelihood_biases() return self._clone[0] def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): """ Make a figure containing 4 maps for each active analysis bins with respectively model, data, background and residuals. The model, data and residual maps are smoothed, the background map is not. :param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel, for all but background maps :param display_colorbar: whether or not to display the colorbar in the residuals :return: a matplotlib.Figure """ n_point_sources = self._likelihood_model.get_number_of_point_sources() n_ext_sources = self._likelihood_model.get_number_of_extended_sources() # This is the resolution (i.e., the size of one pixel) of the image resolution = 3.0 # arcmin # The image is going to cover the diameter plus 20% padding xsize = self._get_optimal_xsize(resolution) n_active_planes = len(self._active_planes) n_columns = 4 fig, subs = plt.subplots(n_active_planes, n_columns, figsize=(2.7 * n_columns, n_active_planes * 2), squeeze=False) prog_bar = tqdm(total = len(self._active_planes), desc="Smoothing planes") images = ['None'] * n_columns for i, plane_id in enumerate(self._active_planes): data_analysis_bin = self._maptree[plane_id] # Get the center of the projection for this plane this_ra, this_dec = self._roi.ra_dec_center # Make a full healpix map for a second whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense() # Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that: longitude = ra_to_longitude(this_ra) # Declination is already between -90 and 90 latitude = this_dec # Background and excess maps bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True) # Make all the projections: model, excess, background, residuals proj_model = self._represent_healpix_map(fig, whole_map, longitude, latitude, xsize, resolution, smoothing_kernel_sigma) # Here we removed the background otherwise nothing is visible # Get background (which is in a way "part of the model" since the uncertainties are neglected) proj_data = self._represent_healpix_map(fig, bkg_subtracted, longitude, latitude, xsize, resolution, smoothing_kernel_sigma) # No smoothing for this one (because a goal is to check it is smooth). proj_bkg = self._represent_healpix_map(fig, background_map, longitude, latitude, xsize, resolution, None) proj_residuals = proj_data - proj_model # Common color scale range for model and excess maps vmin = min(np.nanmin(proj_model), np.nanmin(proj_data)) vmax = max(np.nanmax(proj_model), np.nanmax(proj_data)) # Plot model images[0] = subs[i][0].imshow(proj_model, origin='lower', vmin=vmin, vmax=vmax) subs[i][0].set_title('model, bin {}'.format(data_analysis_bin.name)) # Plot data map images[1] = subs[i][1].imshow(proj_data, origin='lower', vmin=vmin, vmax=vmax) subs[i][1].set_title('excess, bin {}'.format(data_analysis_bin.name)) # Plot background map. images[2] = subs[i][2].imshow(proj_bkg, origin='lower') subs[i][2].set_title('background, bin {}'.format(data_analysis_bin.name)) # Now residuals images[3] = subs[i][3].imshow(proj_residuals, origin='lower') subs[i][3].set_title('residuals, bin {}'.format(data_analysis_bin.name)) # Remove numbers from axis for j in range(n_columns): subs[i][j].axis('off') if display_colorbar: for j, image in enumerate(images): plt.colorbar(image, ax=subs[i][j]) prog_bar.update(1) fig.set_tight_layout(True) return fig def display_stacked_image(self, smoothing_kernel_sigma=0.5): """ Display a map with all active analysis bins stacked together. :param smoothing_kernel_sigma: sigma for the Gaussian smoothing kernel to apply :return: a matplotlib.Figure instance """ # This is the resolution (i.e., the size of one pixel) of the image in arcmin resolution = 3.0 # The image is going to cover the diameter plus 20% padding xsize = self._get_optimal_xsize(resolution) active_planes_bins = [self._maptree[x] for x in self._active_planes] # Get the center of the projection for this plane this_ra, this_dec = self._roi.ra_dec_center # Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that: longitude = ra_to_longitude(this_ra) # Declination is already between -90 and 90 latitude = this_dec total = None for i, data_analysis_bin in enumerate(active_planes_bins): # Plot data background_map = data_analysis_bin.background_map.as_dense() this_data = data_analysis_bin.observation_map.as_dense() - background_map idx = np.isnan(this_data) # this_data[idx] = hp.UNSEEN if i == 0: total = this_data else: # Sum only when there is no UNSEEN, so that the UNSEEN pixels will stay UNSEEN total[~idx] += this_data[~idx] delta_coord = (self._roi.data_radius.to("deg").value * 2.0) / 15.0 fig, sub = plt.subplots(1, 1) proj = self._represent_healpix_map(fig, total, longitude, latitude, xsize, resolution, smoothing_kernel_sigma) cax = sub.imshow(proj, origin='lower') fig.colorbar(cax) sub.axis('off') hp.graticule(delta_coord, delta_coord) return fig def inner_fit(self): """ This is used for the profile likelihood. Keeping fixed all parameters in the LikelihoodModel, this method minimize the logLike over the remaining nuisance parameters, i.e., the parameters belonging only to the model for this particular detector. If there are no nuisance parameters, simply return the logLike value. """ return self.get_log_like() def get_number_of_data_points(self): """ Return the number of active bins across all active analysis bins :return: number of active bins """ n_points = 0 for bin_id in self._maptree: n_points += self._maptree[bin_id].observation_map.as_partial().shape[0] return n_points def _get_model_map(self, plane_id, n_pt_src, n_ext_src): """ This function returns a model map for a particular bin """ if plane_id not in self._active_planes: raise ValueError( f"{plane_id} not a plane in the current model" ) model_map = SparseHealpix(self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src), self._active_pixels[plane_id], self._maptree[plane_id].observation_map.nside) return model_map def _get_excess(self, data_analysis_bin, all_maps=True): """ This function returns the excess counts for a particular bin if all_maps=True, also returns the data and background maps """ data_map = data_analysis_bin.observation_map.as_dense() bkg_map = data_analysis_bin.background_map.as_dense() excess = data_map - bkg_map if all_maps: return excess, data_map, bkg_map return excess def _write_a_map(self, file_name, which, fluctuate=False, return_map=False): """ This writes either a model map or a residual map, depending on which one is preferred """ which = which.lower() assert which in ['model', 'residual'] n_pt = self._likelihood_model.get_number_of_point_sources() n_ext = self._likelihood_model.get_number_of_extended_sources() map_analysis_bins = collections.OrderedDict() if fluctuate: poisson_set = self.get_simulated_dataset("model map") for plane_id in self._active_planes: data_analysis_bin = self._maptree[plane_id] bkg = data_analysis_bin.background_map obs = data_analysis_bin.observation_map if fluctuate: model_excess = poisson_set._maptree[plane_id].observation_map \ - poisson_set._maptree[plane_id].background_map else: model_excess = self._get_model_map(plane_id, n_pt, n_ext) if which == 'residual': bkg += model_excess if which == 'model': obs = model_excess + bkg this_bin = DataAnalysisBin(plane_id, observation_hpx_map=obs, background_hpx_map=bkg, active_pixels_ids=self._active_pixels[plane_id], n_transits=data_analysis_bin.n_transits, scheme='RING') map_analysis_bins[plane_id] = this_bin # save the file new_map_tree = MapTree(map_analysis_bins, self._roi) new_map_tree.write(file_name) if return_map: return new_map_tree def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False): """ This function writes the model map to a file. The interface is based off of HAWCLike for consistency """ if test_return_map: log.warning("test_return_map=True should only be used for testing purposes!") return self._write_a_map(file_name, 'model', poisson_fluctuate, test_return_map) def write_residual_map(self, file_name, test_return_map=False): """ This function writes the residual map to a file. The interface is based off of HAWCLike for consistency """ if test_return_map: log.warning("test_return_map=True should only be used for testing purposes!") return self._write_a_map(file_name, 'residual', False, test_return_map)
[ 6738, 11593, 37443, 834, 1330, 7297, 198, 198, 6738, 3170, 1040, 1330, 965, 198, 6738, 3170, 1040, 1330, 2837, 198, 6738, 6468, 28338, 13, 26791, 13, 44374, 1330, 318, 2676, 540, 198, 6738, 1613, 13, 26791, 1330, 1468, 62, 7146, 198, ...
2.140248
16,756
# File: sshcustodian/sshcustodian.py # -*- coding: utf-8 -*- # Python 2/3 Compatibility from __future__ import (unicode_literals, division, absolute_import, print_function) from six.moves import filterfalse """ This module creates a subclass of the main Custodian class in the Custodian project (github.com/materialsproject/custodian), which is a wrapper that manages jobs running on computing clusters. The Custodian module is part of The Materials Project (materialsproject.org/). This subclass adds the functionality to copy the temporary directory created via monty to the scratch partitions on slave compute nodes, provided that the cluster's filesystem is configured in this way. The implementation invokes a subprocess to utilize the ssh executable installed on the cluster, so it is not particularly elegant or platform independent, nor is this solution likely to be general to all clusters. This is why this modification has not been submitted as a pull request to the main Custodian project. """ # Import modules import logging import subprocess import sys import datetime import time import os import re from itertools import islice, groupby from socket import gethostname from monty.tempfile import ScratchDir from monty.shutil import gzip_dir from monty.json import MontyEncoder from monty.serialization import dumpfn from custodian.custodian import Custodian from custodian.custodian import CustodianError # Module-level logger logger = logging.getLogger(__name__)
[ 2, 9220, 25, 26678, 66, 436, 375, 666, 14, 45824, 66, 436, 375, 666, 13, 9078, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 11361, 362, 14, 18, 46021, 198, 6738, 11593, 37443, 834, 1330, 357, 46903...
3.665049
412
''' Sherlock: Distributed Locks with a choice of backend ==================================================== :mod:`sherlock` is a library that provides easy-to-use distributed inter-process locks and also allows you to choose a backend of your choice for lock synchronization. |Build Status| |Coverage Status| .. |Build Status| image:: https://travis-ci.org/vaidik/sherlock.png :target: https://travis-ci.org/vaidik/sherlock/ .. |Coverage Status| image:: https://coveralls.io/repos/vaidik/incoming/badge.png :target: https://coveralls.io/r/vaidik/incoming Overview -------- When you are working with resources which are accessed by multiple services or distributed services, more than often you need some kind of locking mechanism to make it possible to access some resources at a time. Distributed Locks or Mutexes can help you with this. :mod:`sherlock` provides the exact same facility, with some extra goodies. It provides an easy-to-use API that resembles standard library's `threading.Lock` semantics. Apart from this, :mod:`sherlock` gives you the flexibilty of using a backend of your choice for managing locks. :mod:`sherlock` also makes it simple for you to extend :mod:`sherlock` to use backends that are not supported. Features ++++++++ * API similar to standard library's `threading.Lock`. * Support for With statement, to cleanly acquire and release locks. * Backend agnostic: supports `Redis`_, `Memcached`_ and `Etcd`_ as choice of backends. * Extendable: can be easily extended to work with any other of backend of choice by extending base lock class. Read :ref:`extending`. .. _Redis: http://redis.io .. _Memcached: http://memcached.org .. _Etcd: http://github.com/coreos/etcd Supported Backends and Client Libraries +++++++++++++++++++++++++++++++++++++++ Following client libraries are supported for every supported backend: * Redis: `redis-py`_ * Memcached: `pylibmc`_ * Etcd: `python-etcd`_ .. _redis-py: http://github.com .. _pylibmc: http://github.com .. _python-etcd: https://github.com/jplana/python-etcd As of now, only the above mentioned libraries are supported. Although :mod:`sherlock` takes custom client objects so that you can easily provide settings that you want to use for that backend store, but :mod:`sherlock` also checks if the provided client object is an instance of the supported clients and accepts client objects which pass this check, even if the APIs are the same. :mod:`sherlock` might get rid of this issue later, if need be and if there is a demand for that. Installation ------------ Installation is simple. .. code:: bash pip install sherlock .. note:: :mod:`sherlock` will install all the client libraries for all the supported backends. Basic Usage ----------- :mod:`sherlock` is simple to use as at the API and semantics level, it tries to conform to standard library's :mod:`threading.Lock` APIs. .. code-block:: python import sherlock from sherlock import Lock # Configure :mod:`sherlock`'s locks to use Redis as the backend, # never expire locks and retry acquiring an acquired lock after an # interval of 0.1 second. sherlock.configure(backend=sherlock.backends.REDIS, expire=None, retry_interval=0.1) # Note: configuring sherlock to use a backend does not limit you # another backend at the same time. You can import backend specific locks # like RedisLock, MCLock and EtcdLock and use them just the same way you # use a generic lock (see below). In fact, the generic Lock provided by # sherlock is just a proxy that uses these specific locks under the hood. # acquire a lock called my_lock lock = Lock('my_lock') # acquire a blocking lock lock.acquire() # check if the lock has been acquired or not lock.locked() == True # release the lock lock.release() Support for ``with`` statement ++++++++++++++++++++++++++++++ .. code-block:: python # using with statement with Lock('my_lock'): # do something constructive with your locked resource here pass Blocking and Non-blocking API +++++++++++++++++++++++++++++ .. code-block:: python # acquire non-blocking lock lock1 = Lock('my_lock') lock2 = Lock('my_lock') # successfully acquire lock1 lock1.acquire() # try to acquire lock in a non-blocking way lock2.acquire(False) == True # returns False # try to acquire lock in a blocking way lock2.acquire() # blocks until lock is acquired to timeout happens Using two backends at the same time +++++++++++++++++++++++++++++++++++ Configuring :mod:`sherlock` to use a backend does not limit you from using another backend at the same time. You can import backend specific locks like RedisLock, MCLock and EtcdLock and use them just the same way you use a generic lock (see below). In fact, the generic Lock provided by :mod:`sherlock` is just a proxy that uses these specific locks under the hood. .. code-block:: python import sherlock from sherlock import Lock # Configure :mod:`sherlock`'s locks to use Redis as the backend sherlock.configure(backend=sherlock.backends.REDIS) # Acquire a lock called my_lock, this lock uses Redis lock = Lock('my_lock') # Now acquire locks in Memcached from sherlock import MCLock mclock = MCLock('my_mc_lock') mclock.acquire() Tests ----- To run all the tests (including integration), you have to make sure that all the databases are running. Make sure all the services are running: .. code:: bash # memcached memcached # redis-server redis-server # etcd (etcd is probably not available as package, here is the simplest way # to run it). wget https://github.com/coreos/etcd/releases/download/<version>/etcd-<version>-<platform>.tar.gz tar -zxvf etcd-<version>-<platform>.gz ./etcd-<version>-<platform>/etcd Run tests like so: .. code:: bash python setup.py test Documentation ------------- Available `here`_. .. _here: http://sher-lock.readthedocs.org Roadmap ------- * Support for `Zookeeper`_ as backend. * Support for `Gevent`_, `Multithreading`_ and `Multiprocessing`_. .. _Zookeeper: http://zookeeper.apache.org/ .. _Gevent: http://www.gevent.org/ .. _Multithreading: http://docs.python.org/2/library/multithreading.html .. _Multiprocessing: http://docs.python.org/2/library/multiprocessing.html License ------- See `LICENSE`_. **In short**: This is an open-source project and exists in the public domain for anyone to modify and use it. Just be nice and attribute the credits wherever you can. :) .. _LICENSE: http://github.com/vaidik/sherlock/blob/master/LICENSE.rst Distributed Locking in Other Languages -------------------------------------- * NodeJS - https://github.com/thedeveloper/warlock ''' import etcd import pylibmc import redis def configure(**kwargs): ''' Set basic global configuration for :mod:`sherlock`. :param backend: global choice of backend. This backend will be used for managing locks by :class:`sherlock.Lock` class objects. :param client: global client object to use to connect with backend store. This client object will be used to connect to the backend store by :class:`sherlock.Lock` class instances. The client object must be a valid object of the client library. If the backend has been configured using the `backend` parameter, the custom client object must belong to the same library that is supported for that backend. If the backend has not been set, then the custom client object must be an instance of a valid supported client. In that case, :mod:`sherlock` will set the backend by introspecting the type of provided client object. :param str namespace: provide global namespace :param float expire: provide global expiration time. If expicitly set to `None`, lock will not expire. :param float timeout: provide global timeout period :param float retry_interval: provide global retry interval Basic Usage: >>> import sherlock >>> from sherlock import Lock >>> >>> # Configure sherlock to use Redis as the backend and the timeout for >>> # acquiring locks equal to 20 seconds. >>> sherlock.configure(timeout=20, backend=sherlock.backends.REDIS) >>> >>> import redis >>> redis_client = redis.StrictRedis(host='X.X.X.X', port=6379, db=1) >>> sherlock.configure(client=redis_client) ''' _configuration.update(**kwargs) # Create a backends singleton backends = _Backends() # Create a configuration singleton _configuration = _Configuration() # Import important Lock classes from . import lock from .lock import *
[ 7061, 6, 198, 28782, 5354, 25, 4307, 6169, 406, 3320, 351, 257, 3572, 286, 30203, 198, 10052, 4770, 1421, 198, 198, 25, 4666, 25, 63, 82, 372, 5354, 63, 318, 257, 5888, 326, 3769, 2562, 12, 1462, 12, 1904, 9387, 987, 12, 14681, 19...
3.049761
2,934
from typing import List, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import random from skimage.draw import random_shapes import os import json def get_masks_for_training( mask_shapes: List[Tuple] = [(1, 128, 128), (1, 64, 64), (1, 32, 32), (1, 16, 16), (1, 8, 8), (4096,), (365,)], device: str = 'cpu', add_batch_size: bool = False, p_random_mask: float = 0.3) -> List[torch.Tensor]: ''' Method returns random masks similar to 3.2. of the paper :param mask_shapes: (List[Tuple]) Shapes of the features generated by the vgg16 model :param device: (str) Device to store tensor masks :param add_batch_size: (bool) If true a batch size is added to each mask :param p_random_mask: (float) Probability that a random mask is generated else no mask is utilized :return: (List[torch.Tensor]) Generated masks for each feature tensor ''' # Select layer where no masking is used. Every output from the deeper layers get mapped out. Every higher layer gets # masked by a random shape selected_stage = random.choice(list(range(len(mask_shapes))) + [0, 1]) # Make masks masks = [] # Apply spatial varying masks spatial_varying_masks = (np.random.rand() < p_random_mask) \ and (selected_stage < (len(mask_shapes) - 1)) \ and (selected_stage > 0) # Init random mask if spatial_varying_masks: random_mask = random_shapes(tuple(reversed(mask_shapes))[selected_stage + 1][1:], min_shapes=1, max_shapes=4, min_size=min(8, tuple(reversed(mask_shapes))[selected_stage + 1][1] // 2), allow_overlap=True)[0][:, :, 0] # Random mask to torch tensor random_mask = torch.tensor(random_mask, dtype=torch.float32, device=device)[None, :, :] # Change range of mask to [0, 1] random_mask = (random_mask == 255.0).float() # Loop over all shapes for index, mask_shape in enumerate(reversed(mask_shapes)): # Case if spatial varying masks are applied after selected stage if spatial_varying_masks: if index == selected_stage: masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device)) elif index < selected_stage: masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device)) else: masks.append(F.interpolate(random_mask[None], size=mask_shape[1:], mode='nearest')[0]) # Case if only one stage is selected else: if index == selected_stage: masks.append(torch.ones(mask_shape, dtype=torch.float32, device=device)) else: masks.append(torch.zeros(mask_shape, dtype=torch.float32, device=device)) # Add batch size dimension if add_batch_size: for index in range(len(masks)): masks[index] = masks[index].unsqueeze(dim=0) # Reverse order of masks to match the features of the vgg16 model masks.reverse() return masks def normalize_0_1_batch(input: torch.tensor) -> torch.tensor: ''' Normalize a given tensor to a range of [-1, 1] :param input: (Torch tensor) Input tensor :return: (Torch tensor) Normalized output tensor ''' input_flatten = input.view(input.shape[0], -1) return ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / ( torch.max(input_flatten, dim=1)[0][:, None, None, None] - torch.min(input_flatten, dim=1)[0][:, None, None, None])) def normalize_m1_1_batch(input: torch.tensor) -> torch.tensor: ''' Normalize a given tensor to a range of [-1, 1] :param input: (Torch tensor) Input tensor :return: (Torch tensor) Normalized output tensor ''' input_flatten = input.view(input.shape[0], -1) return 2 * ((input - torch.min(input_flatten, dim=1)[0][:, None, None, None]) / ( torch.max(input_flatten, dim=1)[0][:, None, None, None] - torch.min(input_flatten, dim=1)[0][:, None, None, None])) - 1
[ 6738, 19720, 1330, 7343, 11, 309, 29291, 11, 4479, 198, 198, 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 11748, 28034, 13, 20471, 13, 45124, 355, 376, 198, 11748, 299, 32152, 355, 45941, 198, 11748, 4738, 198, 6738, ...
2.295233
1,846
import math from typing import Dict, List, Tuple, Union from EasyMCDM.models.MCDM import MCDM # Instant-Runoff Multicriteria Optimization (IRMO)
[ 11748, 10688, 201, 198, 6738, 19720, 1330, 360, 713, 11, 7343, 11, 309, 29291, 11, 4479, 201, 198, 201, 198, 6738, 16789, 44, 8610, 44, 13, 27530, 13, 44, 8610, 44, 1330, 337, 8610, 44, 201, 198, 201, 198, 2, 24470, 12, 10987, 236...
2.763636
55
import numpy as np from rpy2.robjects import FloatVector from rpy2.robjects.packages import importr from rpy2 import robjects stats = importr('stats') base = importr('base')
[ 11748, 299, 32152, 355, 45941, 198, 6738, 374, 9078, 17, 13, 22609, 752, 82, 1330, 48436, 38469, 198, 6738, 374, 9078, 17, 13, 22609, 752, 82, 13, 43789, 1330, 1330, 81, 198, 6738, 374, 9078, 17, 1330, 3857, 752, 82, 198, 34242, 796...
3
60
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import unittest from azure.functions.decorators.constants import TIMER_TRIGGER from azure.functions.decorators.core import BindingDirection, DataType from azure.functions.decorators.timer import TimerTrigger
[ 2, 220, 15069, 357, 66, 8, 5413, 10501, 13, 1439, 2489, 10395, 13, 198, 2, 220, 49962, 739, 262, 17168, 13789, 13, 198, 11748, 555, 715, 395, 198, 198, 6738, 35560, 495, 13, 12543, 2733, 13, 12501, 273, 2024, 13, 9979, 1187, 1330, ...
3.517241
87
import socket import threading import time from threading import Thread import utilities as utils import error_handling as check BUFFER_SIZE = 1024 BROADCAST_MAC = "FF:FF:FF:FF:FF:FF"
[ 11748, 17802, 198, 11748, 4704, 278, 198, 11748, 640, 198, 6738, 4704, 278, 1330, 14122, 198, 11748, 20081, 355, 3384, 4487, 198, 11748, 4049, 62, 4993, 1359, 355, 2198, 198, 198, 19499, 45746, 62, 33489, 796, 28119, 198, 11473, 41048, ...
3.189655
58
import json from great_expectations.core.util import convert_to_json_serializable from great_expectations.types import SerializableDictDot, safe_deep_copy from great_expectations.util import deep_filter_properties_iterable
[ 11748, 33918, 198, 198, 6738, 1049, 62, 1069, 806, 602, 13, 7295, 13, 22602, 1330, 10385, 62, 1462, 62, 17752, 62, 46911, 13821, 198, 6738, 1049, 62, 1069, 806, 602, 13, 19199, 1330, 23283, 13821, 35, 713, 35, 313, 11, 3338, 62, 220...
3.515625
64
# #1. Health check # # Ask user for their temperature. # # If the user enters below 35, then output "not too cold" # # If 35 to 37 (inclusive), output "all right" # # If the temperature over 37, then output "possible fever" # user_temp = float(input('What is your temperature?')) if user_temp < 35: print('not too cold?') elif user_temp >= 35 and user_temp <= 37: print('all right') else: # temperature over 37 print('possible fever')
[ 2, 1303, 16, 13, 3893, 2198, 198, 2, 1303, 16981, 2836, 329, 511, 5951, 13, 198, 2, 1303, 1002, 262, 2836, 14170, 2174, 3439, 11, 788, 5072, 366, 1662, 1165, 4692, 1, 198, 2, 1303, 1002, 3439, 284, 5214, 357, 259, 5731, 828, 5072,...
3.118056
144
import json from tests.factories import (NOWSubmissionFactory, MineFactory, NOWClientFactory, NOWApplicationIdentityFactory)
[ 11748, 33918, 198, 198, 6738, 5254, 13, 22584, 1749, 1330, 357, 45669, 7004, 3411, 22810, 11, 11517, 22810, 11, 20229, 11792, 22810, 11, 198, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, 220, ...
2.644068
59
from output.models.nist_data.list_pkg.non_positive_integer.schema_instance.nistschema_sv_iv_list_non_positive_integer_enumeration_2_xsd.nistschema_sv_iv_list_non_positive_integer_enumeration_2 import ( NistschemaSvIvListNonPositiveIntegerEnumeration2, NistschemaSvIvListNonPositiveIntegerEnumeration2Type, ) __all__ = [ "NistschemaSvIvListNonPositiveIntegerEnumeration2", "NistschemaSvIvListNonPositiveIntegerEnumeration2Type", ]
[ 6738, 5072, 13, 27530, 13, 77, 396, 62, 7890, 13, 4868, 62, 35339, 13, 13159, 62, 24561, 62, 41433, 13, 15952, 2611, 62, 39098, 13, 77, 1023, 2395, 2611, 62, 21370, 62, 452, 62, 4868, 62, 13159, 62, 24561, 62, 41433, 62, 268, 6975...
2.554286
175
import time cache = {} print(cache_compute(1, 2)) print(cache_compute(3, 5)) print(cache_compute(3, 5)) print(cache_compute(6, 7)) print(cache_compute(1, 2))
[ 11748, 640, 628, 198, 198, 23870, 796, 23884, 628, 198, 198, 4798, 7, 23870, 62, 5589, 1133, 7, 16, 11, 362, 4008, 198, 4798, 7, 23870, 62, 5589, 1133, 7, 18, 11, 642, 4008, 198, 4798, 7, 23870, 62, 5589, 1133, 7, 18, 11, 642, ...
2.309859
71
from datetime import datetime import jwt from src import ConfigManager secret = ConfigManager.get_config("DL_COOKIE_SECRET_KEY") secure = ConfigManager.get_config("APP_SECURE")
[ 6738, 4818, 8079, 1330, 4818, 8079, 198, 198, 11748, 474, 46569, 198, 198, 6738, 12351, 1330, 17056, 13511, 198, 198, 21078, 796, 17056, 13511, 13, 1136, 62, 11250, 7203, 19260, 62, 34, 15308, 10008, 62, 23683, 26087, 62, 20373, 4943, 1...
3.192982
57
# Copyright 2020 by Roman Khuramshin <mr.linqu@gmail.com>. # All rights reserved. # This file is part of the Intsa Term Client - X2Go terminal client for Windows, # and is released under the "MIT License Agreement". Please see the LICENSE # file that should have been included as part of this package. import logging import threading import os import time import win32print from .Handler import Handler
[ 2, 15069, 12131, 416, 7993, 5311, 333, 321, 1477, 259, 1279, 43395, 13, 2815, 421, 31, 14816, 13, 785, 28401, 198, 2, 1439, 2489, 10395, 13, 198, 2, 770, 2393, 318, 636, 286, 262, 2558, 11400, 35118, 20985, 532, 1395, 17, 5247, 1209...
3.92233
103
import unittest import sys sys.path.append(".") sys.path.insert(0, '..\\') from calculator.simplecalculator import Calculator if __name__ == '__main__': unittest.main()
[ 11748, 555, 715, 395, 198, 11748, 25064, 198, 17597, 13, 6978, 13, 33295, 7203, 19570, 198, 17597, 13, 6978, 13, 28463, 7, 15, 11, 705, 492, 6852, 11537, 198, 6738, 28260, 13, 36439, 9948, 3129, 1352, 1330, 43597, 628, 198, 361, 11593...
2.868852
61
#!/usr/bin/python3 # -*- coding: utf-8 -*- import datetime import sys import subprocess import os from playsound import playsound # ****************************************************************** # Definitionen # ****************************************************************** filename = 'countdown.txt' audiofile = 'ringing.mp3' settimer = 'add.py' stoptimer = 'stop.py' overlay = 'overlay.py' title = "" zeit = "" command = "" path = "" diff = 0 # ****************************************************************** # Funktionen # ****************************************************************** # ****************************************************************** # Main # ****************************************************************** if __name__ == "__main__": main()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 11748, 4818, 8079, 198, 11748, 25064, 198, 11748, 850, 14681, 198, 11748, 28686, 198, 6738, 5341, 633, 1330, 5341, 63...
4.422222
180
# -*- coding: utf-8 -*- ## Author: Aziz Khan ## License: GPL v3 ## Copyright 2017 Aziz Khan <azez.khan__AT__gmail.com> from rest_framework import serializers from portal.models import Matrix, MatrixAnnotation from django.http import HttpRequest
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2235, 6434, 25, 7578, 528, 11356, 198, 2235, 13789, 25, 38644, 410, 18, 198, 2235, 15069, 220, 2177, 7578, 528, 11356, 1279, 6201, 89, 13, 74, 7637, 834, 1404, 834, 14...
3.08642
81
import torch import torch.nn as nn from generator_model import G1, G2 from helper_functions.Blocks import downBlock, Block3x3_leakRelu from helper_functions.ret_image import Interpolate, condAugmentation from helper_functions.initial_weights import weights_init from helper_functions.losses import KLloss, custom_loss from helper_functions.Blocks import upScale, normalBlock, Residual import helper_functions.config as cfg
[ 11748, 28034, 198, 11748, 28034, 13, 20471, 355, 299, 77, 198, 6738, 17301, 62, 19849, 1330, 402, 16, 11, 402, 17, 198, 6738, 31904, 62, 12543, 2733, 13, 45356, 1330, 866, 12235, 11, 9726, 18, 87, 18, 62, 293, 461, 6892, 84, 198, ...
3.525
120
import numpy as np def NDVI(nir,red): ''' # https://eos.com/make-an-analysis/ndvi/ Inputs: nxm numpy arrays NIR reflection in the near-infrared spectrum RED reflection in the red range of the spectrum ''' num = nir-red dom = nir+red ndvi = np.divide(num,dom) ndvi[np.isnan(ndvi)]=0 # Clean array with nan return(ndvi)
[ 11748, 299, 32152, 355, 45941, 198, 198, 4299, 25524, 12861, 7, 32986, 11, 445, 2599, 198, 220, 220, 220, 705, 7061, 198, 220, 220, 220, 1303, 3740, 1378, 68, 418, 13, 785, 14, 15883, 12, 272, 12, 20930, 14, 358, 8903, 14, 220, 19...
2.171429
175
from starlette.applications import Starlette from starlette.responses import JSONResponse from api import workflow import oyaml as yaml app = Starlette(debug=True)
[ 6738, 3491, 21348, 13, 1324, 677, 602, 1330, 2907, 21348, 198, 6738, 3491, 21348, 13, 16733, 274, 1330, 19449, 31077, 198, 6738, 40391, 1330, 30798, 198, 11748, 35104, 43695, 355, 331, 43695, 198, 198, 1324, 796, 2907, 21348, 7, 24442, ...
3.75
44
# -*- coding: utf-8 -*- DESC = "tsf-2018-03-26" INFO = { "DeletePublicConfig": { "params": [ { "name": "ConfigId", "desc": "ID" } ], "desc": "" }, "DescribeSimpleGroups": { "params": [ { "name": "GroupIdList", "desc": "ID" }, { "name": "ApplicationId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "Limit", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "GroupId", "desc": "ID" }, { "name": "SearchWord", "desc": "" }, { "name": "AppMicroServiceType", "desc": "Mservice mesh, P M" } ], "desc": "" }, "CreateGroup": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "GroupName", "desc": "" }, { "name": "ClusterId", "desc": "ID" }, { "name": "GroupDesc", "desc": "" } ], "desc": "" }, "CreateCluster": { "params": [ { "name": "ClusterName", "desc": "" }, { "name": "ClusterType", "desc": "" }, { "name": "VpcId", "desc": "ID" }, { "name": "ClusterCIDR", "desc": "IPCIDR" }, { "name": "ClusterDesc", "desc": "" }, { "name": "TsfRegionId", "desc": "TSF" }, { "name": "TsfZoneId", "desc": "TSF" }, { "name": "SubnetId", "desc": "ID" } ], "desc": "" }, "DescribePkgs": { "params": [ { "name": "ApplicationId", "desc": "IDID" }, { "name": "SearchWord", "desc": "ID" }, { "name": "OrderBy", "desc": "\"UploadTime\"" }, { "name": "OrderType", "desc": "0/1" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "ModifyContainerReplicas": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "InstanceNum", "desc": "" } ], "desc": "" }, "DescribeConfigSummary": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "SearchWord", "desc": "" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20" } ], "desc": "" }, "DeployContainerGroup": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "Server", "desc": "server" }, { "name": "TagName", "desc": ",v1" }, { "name": "InstanceNum", "desc": "" }, { "name": "Reponame", "desc": "/tsf/nginx" }, { "name": "CpuLimit", "desc": " CPU K8S limit request 2 " }, { "name": "MemLimit", "desc": " MiB K8S limit request 2 " }, { "name": "JvmOpts", "desc": "jvm" }, { "name": "CpuRequest", "desc": " CPU K8S request" }, { "name": "MemRequest", "desc": " MiB K8S request" }, { "name": "DoNotStart", "desc": "" }, { "name": "RepoName", "desc": "/tsf/nginx" }, { "name": "UpdateType", "desc": "0: 1:" }, { "name": "UpdateIvl", "desc": "" } ], "desc": "" }, "AddClusterInstances": { "params": [ { "name": "ClusterId", "desc": "ID" }, { "name": "InstanceIdList", "desc": "ID" }, { "name": "OsName", "desc": "" }, { "name": "ImageId", "desc": "ID" }, { "name": "Password", "desc": "" }, { "name": "KeyId", "desc": "" }, { "name": "SgId", "desc": "" }, { "name": "InstanceImportMode", "desc": "RTSFMagent" } ], "desc": "TSF" }, "DescribePodInstances": { "params": [ { "name": "GroupId", "desc": "groupId" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20 1~50" } ], "desc": "" }, "DescribeServerlessGroups": { "params": [ { "name": "SearchWord", "desc": "groupName" }, { "name": "ApplicationId", "desc": "ID" }, { "name": "OrderBy", "desc": " createTimeid name createTime" }, { "name": "OrderType", "desc": "101" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20 1~50" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" } ], "desc": "Serverless" }, "CreateNamespace": { "params": [ { "name": "NamespaceName", "desc": "" }, { "name": "ClusterId", "desc": "ID" }, { "name": "NamespaceDesc", "desc": "" }, { "name": "NamespaceResourceType", "desc": "(DEF)" }, { "name": "NamespaceType", "desc": "(DEFGLOBAL)" }, { "name": "NamespaceId", "desc": "ID" } ], "desc": "" }, "DeleteApplication": { "params": [ { "name": "ApplicationId", "desc": "ID" } ], "desc": "" }, "DeleteMicroservice": { "params": [ { "name": "MicroserviceId", "desc": "ID" } ], "desc": "" }, "StartGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "DeleteNamespace": { "params": [ { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" } ], "desc": "" }, "DescribeGroupInstances": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "SearchWord", "desc": "" }, { "name": "OrderBy", "desc": "" }, { "name": "OrderType", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "DeleteConfig": { "params": [ { "name": "ConfigId", "desc": "ID" } ], "desc": "" }, "DescribePublicConfigSummary": { "params": [ { "name": "SearchWord", "desc": "" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20" } ], "desc": "" }, "DeletePkgs": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "PkgIds", "desc": "ID" } ], "desc": "\n10001000UpperDeleteLimit" }, "RevocationPublicConfig": { "params": [ { "name": "ConfigReleaseId", "desc": "ID" } ], "desc": "" }, "DescribePublicConfigs": { "params": [ { "name": "ConfigId", "desc": "ID" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20" }, { "name": "ConfigIdList", "desc": "ID" }, { "name": "ConfigName", "desc": "" }, { "name": "ConfigVersion", "desc": "" } ], "desc": "" }, "DescribeSimpleClusters": { "params": [ { "name": "ClusterIdList", "desc": "ID" }, { "name": "ClusterType", "desc": "" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20 1~50" }, { "name": "SearchWord", "desc": "idname" } ], "desc": "" }, "CreateServerlessGroup": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "GroupName", "desc": "1~60" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" } ], "desc": "Serverless" }, "DescribeConfigs": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "ConfigId", "desc": "ID" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" }, { "name": "ConfigIdList", "desc": "ID" }, { "name": "ConfigName", "desc": "" }, { "name": "ConfigVersion", "desc": "" } ], "desc": "" }, "DescribeConfig": { "params": [ { "name": "ConfigId", "desc": "ID" } ], "desc": "" }, "DescribeMicroservices": { "params": [ { "name": "NamespaceId", "desc": "ID" }, { "name": "SearchWord", "desc": "" }, { "name": "OrderBy", "desc": "" }, { "name": "OrderType", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "StartContainerGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "RemoveInstances": { "params": [ { "name": "ClusterId", "desc": " ID" }, { "name": "InstanceIdList", "desc": " ID " } ], "desc": " TSF " }, "ExpandGroup": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "InstanceIdList", "desc": "ID" } ], "desc": "" }, "DeleteGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "DescribeContainerGroupDetail": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": " " }, "DeleteContainerGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "RollbackConfig": { "params": [ { "name": "ConfigReleaseLogId", "desc": "ID" }, { "name": "ReleaseDesc", "desc": "" } ], "desc": "" }, "ModifyMicroservice": { "params": [ { "name": "MicroserviceId", "desc": " ID" }, { "name": "MicroserviceDesc", "desc": "" } ], "desc": "" }, "CreatePublicConfig": { "params": [ { "name": "ConfigName", "desc": "" }, { "name": "ConfigVersion", "desc": "" }, { "name": "ConfigValue", "desc": "yaml" }, { "name": "ConfigVersionDesc", "desc": "" }, { "name": "ConfigType", "desc": "" } ], "desc": "" }, "DescribeImageTags": { "params": [ { "name": "ApplicationId", "desc": "Id" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20 1~100" }, { "name": "QueryImageIdFlag", "desc": "0: 1:" }, { "name": "SearchWord", "desc": " tag " } ], "desc": "" }, "DescribeServerlessGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "Serverless" }, "DescribeMicroservice": { "params": [ { "name": "MicroserviceId", "desc": "ID" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "DescribePublicConfigReleaseLogs": { "params": [ { "name": "NamespaceId", "desc": "ID" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20" } ], "desc": "" }, "DescribeApplicationAttribute": { "params": [ { "name": "ApplicationId", "desc": "ID" } ], "desc": "" }, "RevocationConfig": { "params": [ { "name": "ConfigReleaseId", "desc": "ID" } ], "desc": "" }, "ReleasePublicConfig": { "params": [ { "name": "ConfigId", "desc": "ID" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ReleaseDesc", "desc": "" } ], "desc": "" }, "ReleaseConfig": { "params": [ { "name": "ConfigId", "desc": "ID" }, { "name": "GroupId", "desc": "ID" }, { "name": "ReleaseDesc", "desc": "" } ], "desc": "" }, "DescribeReleasedConfig": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "group" }, "CreateContainGroup": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "GroupName", "desc": "1~60" }, { "name": "InstanceNum", "desc": "" }, { "name": "AccessType", "desc": "0: 1: 2NodePort" }, { "name": "ProtocolPorts", "desc": "" }, { "name": "ClusterId", "desc": "ID" }, { "name": "CpuLimit", "desc": " CPU K8S limit" }, { "name": "MemLimit", "desc": " MiB K8S limit" }, { "name": "GroupComment", "desc": "200" }, { "name": "UpdateType", "desc": "0: 1:" }, { "name": "UpdateIvl", "desc": "" }, { "name": "CpuRequest", "desc": " CPU K8S request" }, { "name": "MemRequest", "desc": " MiB K8S request" } ], "desc": "" }, "DescribePublicConfigReleases": { "params": [ { "name": "ConfigName", "desc": "" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "Limit", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "ConfigId", "desc": "ID" } ], "desc": "" }, "DescribeGroups": { "params": [ { "name": "SearchWord", "desc": "" }, { "name": "ApplicationId", "desc": "ID" }, { "name": "OrderBy", "desc": "" }, { "name": "OrderType", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" }, { "name": "GroupResourceTypeList", "desc": "" } ], "desc": "" }, "DescribeSimpleNamespaces": { "params": [ { "name": "NamespaceIdList", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" }, { "name": "Limit", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "NamespaceResourceTypeList", "desc": "" }, { "name": "SearchWord", "desc": "idname" }, { "name": "NamespaceTypeList", "desc": "" }, { "name": "NamespaceName", "desc": "" }, { "name": "IsDefault", "desc": "01" } ], "desc": " " }, "DescribeConfigReleaseLogs": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" }, { "name": "ApplicationId", "desc": "ID" } ], "desc": "" }, "CreateMicroservice": { "params": [ { "name": "NamespaceId", "desc": "ID" }, { "name": "MicroserviceName", "desc": "" }, { "name": "MicroserviceDesc", "desc": "" } ], "desc": "" }, "DescribeDownloadInfo": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "PkgId", "desc": "ID" } ], "desc": "TSFCOSAPICOSCOS APISDK\nCOShttps://cloud.tencent.com/document/product/436" }, "DeployServerlessGroup": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "PkgId", "desc": "ID" }, { "name": "Memory", "desc": " 1Gi 2Gi 4Gi 8Gi 16Gi 1Gi" }, { "name": "InstanceRequest", "desc": " [1, 4] 1" }, { "name": "StartupParameters", "desc": "" } ], "desc": "Serverless" }, "DescribeGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "CreateConfig": { "params": [ { "name": "ConfigName", "desc": "" }, { "name": "ConfigVersion", "desc": "" }, { "name": "ConfigValue", "desc": "" }, { "name": "ApplicationId", "desc": "ID" }, { "name": "ConfigVersionDesc", "desc": "" }, { "name": "ConfigType", "desc": "" } ], "desc": "" }, "DescribeContainerGroups": { "params": [ { "name": "SearchWord", "desc": "groupName" }, { "name": "ApplicationId", "desc": "ID" }, { "name": "OrderBy", "desc": " createTimeid name createTime" }, { "name": "OrderType", "desc": "101" }, { "name": "Offset", "desc": "0" }, { "name": "Limit", "desc": "20 1~50" }, { "name": "ClusterId", "desc": "ID" }, { "name": "NamespaceId", "desc": " ID" } ], "desc": "" }, "DeleteImageTags": { "params": [ { "name": "ImageTags", "desc": "" } ], "desc": "" }, "DescribeClusterInstances": { "params": [ { "name": "ClusterId", "desc": "ID" }, { "name": "SearchWord", "desc": "" }, { "name": "OrderBy", "desc": "" }, { "name": "OrderType", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" } ], "desc": "" }, "CreateApplication": { "params": [ { "name": "ApplicationName", "desc": "" }, { "name": "ApplicationType", "desc": "VCSserverless" }, { "name": "MicroserviceType", "desc": "Mservice meshNG" }, { "name": "ApplicationDesc", "desc": "" }, { "name": "ApplicationLogConfig", "desc": "" }, { "name": "ApplicationResourceType", "desc": "" }, { "name": "ApplicationRuntimeType", "desc": "runtime" } ], "desc": "" }, "StopGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "ShrinkGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "DeployGroup": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "PkgId", "desc": "ID" }, { "name": "StartupParameters", "desc": "" } ], "desc": "" }, "DescribeApplications": { "params": [ { "name": "SearchWord", "desc": "" }, { "name": "OrderBy", "desc": "" }, { "name": "OrderType", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "Limit", "desc": "" }, { "name": "ApplicationType", "desc": "" }, { "name": "MicroserviceType", "desc": "" }, { "name": "ApplicationResourceTypeList", "desc": "" } ], "desc": "" }, "DeleteServerlessGroup": { "params": [ { "name": "GroupId", "desc": "groupId" } ], "desc": "Serverless" }, "DescribeUploadInfo": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "PkgName", "desc": "" }, { "name": "PkgVersion", "desc": "" }, { "name": "PkgType", "desc": "" }, { "name": "PkgDesc", "desc": "" } ], "desc": "TSFCOSIdCOS APISDK\nCOShttps://cloud.tencent.com/document/product/436" }, "DescribeConfigReleases": { "params": [ { "name": "ConfigName", "desc": "" }, { "name": "GroupId", "desc": "ID" }, { "name": "NamespaceId", "desc": "ID" }, { "name": "ClusterId", "desc": "ID" }, { "name": "Limit", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "ConfigId", "desc": "ID" }, { "name": "ApplicationId", "desc": "ID" } ], "desc": "" }, "StopContainerGroup": { "params": [ { "name": "GroupId", "desc": "ID" } ], "desc": "" }, "DescribeSimpleApplications": { "params": [ { "name": "ApplicationIdList", "desc": "ID" }, { "name": "ApplicationType", "desc": "" }, { "name": "Limit", "desc": "" }, { "name": "Offset", "desc": "" }, { "name": "MicroserviceType", "desc": "" }, { "name": "ApplicationResourceTypeList", "desc": "" }, { "name": "SearchWord", "desc": "idname" } ], "desc": "" }, "DescribePublicConfig": { "params": [ { "name": "ConfigId", "desc": "ID" } ], "desc": "" }, "ModifyContainerGroup": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "AccessType", "desc": "0: 1: 2NodePort" }, { "name": "ProtocolPorts", "desc": "ProtocolPorts" }, { "name": "UpdateType", "desc": "0: 1:" }, { "name": "UpdateIvl", "desc": "," } ], "desc": "" }, "DescribeApplication": { "params": [ { "name": "ApplicationId", "desc": "ID" } ], "desc": "" }, "ShrinkInstances": { "params": [ { "name": "GroupId", "desc": "ID" }, { "name": "InstanceIdList", "desc": "ID" } ], "desc": "" }, "ModifyUploadInfo": { "params": [ { "name": "ApplicationId", "desc": "ID" }, { "name": "PkgId", "desc": "DescribeUploadInfoID" }, { "name": "Result", "desc": "COS0" }, { "name": "Md5", "desc": "MD5" }, { "name": "Size", "desc": "" } ], "desc": "COSTSF\n" }, "AddInstances": { "params": [ { "name": "ClusterId", "desc": "ID" }, { "name": "InstanceIdList", "desc": "ID" }, { "name": "OsName", "desc": "" }, { "name": "ImageId", "desc": "ID" }, { "name": "Password", "desc": "" }, { "name": "KeyId", "desc": "" }, { "name": "SgId", "desc": "" }, { "name": "InstanceImportMode", "desc": "RTSFMagent" } ], "desc": "TSF" } }
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 30910, 34, 796, 366, 912, 69, 12, 7908, 12, 3070, 12, 2075, 1, 198, 10778, 796, 1391, 198, 220, 366, 38727, 15202, 16934, 1298, 1391, 198, 220, 220, 220, 366, 37266, ...
1.675378
15,458
import onnx from pathlib import Path import subprocess import sys cwd_path = Path.cwd() # obtain list of added or modified files in this PR obtain_diff = subprocess.Popen(['git', 'diff', '--name-only', '--diff-filter=AM', 'origin/master', 'HEAD'], cwd=cwd_path, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutput, stderroutput = obtain_diff.communicate() diff_list = stdoutput.split() # identify list of changed onnx models in model Zoo model_list = [str(model).replace("b'","").replace("'", "") for model in diff_list if ".onnx" in str(model)] # run lfs install before starting the tests run_lfs_install() print("\n=== Running ONNX Checker on added models ===\n") # run checker on each model failed_models = [] for model_path in model_list: model_name = model_path.split('/')[-1] print("Testing:", model_name) try: pull_lfs_file(model_path) model = onnx.load(model_path) onnx.checker.check_model(model) print("Model", model_name, "has been successfully checked!") except Exception as e: print(e) failed_models.append(model_path) if len(failed_models) != 0: print(str(len(failed_models)) +" models failed onnx checker.") sys.exit(1) print(len(model_list), "model(s) checked.")
[ 11748, 319, 77, 87, 198, 6738, 3108, 8019, 1330, 10644, 198, 11748, 850, 14681, 198, 11748, 25064, 198, 198, 66, 16993, 62, 6978, 796, 10644, 13, 66, 16993, 3419, 198, 198, 2, 7330, 1351, 286, 2087, 393, 9518, 3696, 287, 428, 4810, ...
2.69181
464
from .utils import Config
[ 6738, 764, 26791, 1330, 17056 ]
5
5
import glob import pandas as pd from tqdm import tqdm from classifier import config
[ 11748, 15095, 198, 11748, 19798, 292, 355, 279, 67, 198, 198, 6738, 256, 80, 36020, 1330, 256, 80, 36020, 198, 6738, 1398, 7483, 1330, 4566, 628 ]
3.307692
26
#!/usr/bin/env python from setuptools import setup, find_packages setup( name="urdu_digit", version="0.0.17", keywords=["urdu", "numeric", "digit", "converter"], description="English to Urdu numeric digit converter.", long_description=open('README.md').read(), project_urls={ 'Homepage': 'https://www.techtum.dev/work-urdu-digit-211001.html', 'Source': 'https://github.com/siphr/urdu-digit', 'Tracker': 'https://github.com/siphr/urdu-digit/issues', }, author="siphr", author_email="pypi@techtum.dev", packages=['urdu_digit'], platforms="any", install_requires=[] )
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 198, 6738, 900, 37623, 10141, 1330, 9058, 11, 1064, 62, 43789, 628, 198, 40406, 7, 198, 220, 220, 220, 1438, 2625, 333, 646, 62, 27003, 1600, 198, 220, 220, 220, 2196, 2625, 15, 13, ...
2.413534
266
import collections Interval = collections.namedtuple("Interval", "start, end")
[ 11748, 17268, 198, 198, 9492, 2100, 796, 17268, 13, 13190, 83, 29291, 7203, 9492, 2100, 1600, 366, 9688, 11, 886, 4943, 628 ]
3.681818
22
import numpy as np import matplotlib.pyplot as plt
[ 11748, 299, 32152, 355, 45941, 198, 11748, 2603, 29487, 8019, 13, 9078, 29487, 355, 458, 83, 628, 628, 628, 628, 628, 198 ]
2.772727
22
# ! /usr/bin/python # -*- coding: utf-8 -*- # ============================================================================= # Copyright 2020 NVIDIA. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= import time import warnings from collections import OrderedDict import numpy as np import onnx import tensorrt as trt from .tensorrt_format import FormatManager from .tensorrt_runner import ( DEFAULT_SHAPE_VALUE, TRT_LOGGER, TensorRTRunnerV2, default_value, find_in_dict, get_input_metadata_from_profile, is_dimension_dynamic, is_shape_dynamic, is_valid_shape_override, send_on_queue, write_timestamped, ) from nemo import logging, logging_mode # Caches data loaded by a DataLoader for use across multiple runners. # ONNX loaders return ONNX models in memory.
[ 2, 5145, 1220, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 2, 38093, 25609, 198, 2, 15069, 12131, 15127, 13, 1439, 6923, 33876, 13, 198, 2, 198, 2, 49962, 739, 262, 24843, ...
3.434466
412
import logging import signal from PySide2 import QtCore import vstreamer_utils
[ 11748, 18931, 198, 11748, 6737, 198, 6738, 9485, 24819, 17, 1330, 33734, 14055, 198, 11748, 410, 5532, 263, 62, 26791, 628 ]
3.809524
21
#Define environment variable FABRIC_UTILS_PATH and provide path to fabric_utils before running import time import os from contrail_fixtures import * import testtools from tcutils.commands import * from fabric.context_managers import settings from tcutils.wrappers import preposttest_wrapper from tcutils.util import * from fabric.api import run from fabric.state import connections import test from upgrade.verify import VerifyFeatureTestCases from base import ResetConfigBaseTest
[ 2, 7469, 500, 2858, 7885, 376, 6242, 41132, 62, 3843, 45484, 62, 34219, 290, 2148, 3108, 284, 9664, 62, 26791, 878, 2491, 198, 11748, 640, 198, 11748, 28686, 198, 6738, 25736, 346, 62, 69, 25506, 1330, 1635, 198, 11748, 1332, 31391, 1...
3.942623
122
from server.services.wiki.pages.templates import OverviewPageTemplates from server.services.wiki.pages.page_service import PageService from server.services.wiki.mediawiki_service import MediaWikiService from server.services.wiki.wiki_text_service import WikiTextService from server.services.wiki.wiki_table_service import WikiTableService from server.services.wiki.wiki_section_service import WikiSectionService from server.models.serializers.document import OverviewPageSchema
[ 6738, 4382, 13, 30416, 13, 15466, 13, 31126, 13, 11498, 17041, 1330, 28578, 9876, 12966, 17041, 198, 6738, 4382, 13, 30416, 13, 15466, 13, 31126, 13, 7700, 62, 15271, 1330, 7873, 16177, 198, 6738, 4382, 13, 30416, 13, 15466, 13, 11431, ...
4.165217
115
# Generated by Django 4.0 on 2022-03-06 02:23 from django.db import migrations, models
[ 2, 2980, 515, 416, 37770, 604, 13, 15, 319, 33160, 12, 3070, 12, 3312, 7816, 25, 1954, 198, 198, 6738, 42625, 14208, 13, 9945, 1330, 15720, 602, 11, 4981, 628 ]
2.966667
30
import imp import os.path from app import db from migrate.versioning import api from config import SQLALCHEMY_DATABASE_URI from config import SQLALCHEMY_MIGRATE_REPO # end of file
[ 11748, 848, 198, 11748, 28686, 13, 6978, 198, 198, 6738, 598, 1330, 20613, 198, 198, 6738, 32492, 13, 9641, 278, 1330, 40391, 198, 198, 6738, 4566, 1330, 16363, 1847, 3398, 3620, 56, 62, 35, 1404, 6242, 11159, 62, 47269, 198, 6738, 45...
2.921875
64
from xmuda.models.SSC2d_proj3d2d import SSC2dProj3d2d from xmuda.data.NYU.nyu_dm import NYUDataModule from xmuda.data.semantic_kitti.kitti_dm import KittiDataModule from xmuda.common.utils.sscMetrics import SSCMetrics from xmuda.data.NYU.params import class_relation_freqs as NYU_class_relation_freqs, class_freq_1_4 as NYU_class_freq_1_4, class_freq_1_8 as NYU_class_freq_1_8, class_freq_1_16 as NYU_class_freq_1_16 import numpy as np import torch import torch.nn.functional as F from xmuda.models.ssc_loss import get_class_weights from tqdm import tqdm import pickle import os #model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/no_mask_255/v12_removeCPThreshold_KLnonzeros_LRDecay30_NYU_1_0.0001_0.0001_CPThreshold0.0_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=030-val/mIoU=0.26983.ckpt" model_path = "/gpfsscratch/rech/kvd/uyl37fq/logs/kitti/v12_ProjectScale2_CPAt1_8_1divlog_LargerFOV_kitti_1_FrusSize_4_WD0_lr0.0001_CEssc_MCAssc_ProportionLoss_CERel_CRCP_Proj_2_4_8/checkpoints/epoch=037-val/mIoU=0.11056.ckpt" class_weights = { '1_4': get_class_weights(NYU_class_freq_1_4).cuda(), '1_8': get_class_weights(NYU_class_freq_1_8).cuda(), '1_16': get_class_weights(NYU_class_freq_1_16).cuda(), } #dataset = "NYU" dataset = "kitti" if dataset == "NYU": NYU_root = "/gpfswork/rech/kvd/uyl37fq/data/NYU/depthbin" NYU_preprocess_dir = "/gpfsscratch/rech/kvd/uyl37fq/precompute_data/NYU" kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti" full_scene_size = (240, 144, 240) output_scene_size = (60, 36, 60) NYUdm = NYUDataModule(NYU_root, NYU_preprocess_dir, batch_size=4, num_workers=3) NYUdm.setup() _C = 12 data_loader = NYUdm.val_dataloader() else: kitti_root = "/gpfswork/rech/kvd/uyl37fq/data/semantic_kitti" kitti_depth_root = "/gpfsscratch/rech/kvd/uyl37fq/Adabin/KITTI/" kitti_logdir = '/gpfsscratch/rech/kvd/uyl37fq/logs/kitti' kitti_tsdf_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/TSDF_pred_depth_adabin/kitti" kitti_label_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/labels/kitti" kitti_occ_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/occupancy_adabin/kitti" kitti_sketch_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/sketch_3D/kitti" kitti_mapping_root = "/gpfsscratch/rech/kvd/uyl37fq/sketch_dataset/mapping_adabin/kitti" full_scene_size = (256, 256, 32) KITTIdm = KittiDataModule(root=kitti_root, data_aug=True, TSDF_root=kitti_tsdf_root, label_root=kitti_label_root, mapping_root=kitti_mapping_root, occ_root=kitti_occ_root, depth_root=kitti_depth_root, sketch_root=kitti_sketch_root, batch_size=1, num_workers=3) KITTIdm.setup() _C = 20 data_loader = KITTIdm.val_dataloader() class_relation_weights = get_class_weights(NYU_class_relation_freqs) model = SSC2dProj3d2d.load_from_checkpoint(model_path) model.cuda() model.eval() count = 0 out_dict = {} count = 0 write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/draw_output/kitti" with torch.no_grad(): for batch in tqdm(data_loader): if dataset == "NYU": y_true = batch['ssc_label_1_4'].detach().cpu().numpy() valid_pix_4 = batch['valid_pix_4'] else: y_true = batch['ssc_label_1_1'].detach().cpu().numpy() # valid_pix_1 = batch['valid_pix_1'] valid_pix_1 = batch['valid_pix_double'] batch['img'] = batch['img'].cuda() pred = model(batch) y_pred = torch.softmax(pred['ssc'], dim=1).detach().cpu().numpy() y_pred = np.argmax(y_pred, axis=1) for i in range(y_true.shape[0]): out_dict = { "y_pred": y_pred[i].astype(np.uint16), "y_true": y_true[i].astype(np.uint16), } if dataset == "NYU": filepath = os.path.join(write_path, batch['name'][i] + ".pkl") out_dict["cam_pose"] = batch['cam_pose'][i].detach().cpu().numpy() out_dict["vox_origin"] = batch['vox_origin'][i].detach().cpu().numpy() elif dataset == "kitti": filepath = os.path.join(write_path, batch['sequence'][i], batch['frame_id'][i] + ".pkl") out_dict['valid_pix_1'] = valid_pix_1[i].detach().cpu().numpy() out_dict['cam_k'] = batch['cam_k'][i].detach().cpu().numpy() out_dict['T_velo_2_cam'] = batch['T_velo_2_cam'][i].detach().cpu().numpy() os.makedirs(os.path.join(write_path, batch['sequence'][i]), exist_ok=True) with open(filepath, 'wb') as handle: pickle.dump(out_dict, handle) print("wrote to", filepath) count += 1 # if count == 4: # break # write_path = "/gpfsscratch/rech/kvd/uyl37fq/temp/output" # filepath = os.path.join(write_path, "output.pkl") # with open(filepath, 'wb') as handle: # pickle.dump(out_dict, handle) # print("wrote to", filepath)
[ 6738, 2124, 76, 15339, 13, 27530, 13, 5432, 34, 17, 67, 62, 1676, 73, 18, 67, 17, 67, 1330, 311, 6173, 17, 67, 2964, 73, 18, 67, 17, 67, 198, 6738, 2124, 76, 15339, 13, 7890, 13, 12805, 52, 13, 3281, 84, 62, 36020, 1330, 48166...
1.880773
2,793
# Author: penhe@microsoft.com # Date: 05/30/2019 # """ Data parallel module """ from collections import OrderedDict import numpy as np import torch from torch.cuda.comm import broadcast_coalesced from torch.cuda.comm import reduce_add_coalesced from torch.nn.parallel import parallel_apply from torch.nn.parallel.scatter_gather import scatter_kwargs,gather import torch.cuda.comm as comm import pdb from bert.optimization import BertAdam def optimizer_factory(args, training_steps=None, init_spec=None, no_decay=['bias', 'LayerNorm.weight']): return optimizer_fn
[ 2, 6434, 25, 3112, 258, 31, 40485, 13, 785, 198, 2, 7536, 25, 8870, 14, 1270, 14, 23344, 198, 2, 198, 37811, 6060, 10730, 8265, 198, 37811, 198, 198, 6738, 17268, 1330, 14230, 1068, 35, 713, 198, 11748, 299, 32152, 355, 45941, 198, ...
3.161111
180
from rest_framework import serializers from .models import User from .models import Product from django.contrib.auth import get_user_model
[ 6738, 1334, 62, 30604, 1330, 11389, 11341, 198, 6738, 764, 27530, 1330, 11787, 198, 6738, 764, 27530, 1330, 8721, 198, 6738, 42625, 14208, 13, 3642, 822, 13, 18439, 1330, 651, 62, 7220, 62, 19849, 628, 198 ]
3.916667
36
''' Author: geekli Date: 2020-12-27 10:38:46 LastEditTime: 2020-12-27 10:40:44 LastEditors: your name Description: FilePath: \pythonQT\ch02\multiSinal_button.py ''' import sys from PyQt5.QtWidgets import QApplication, QWidget, QPushButton if __name__ == '__main__': app = QApplication(sys.argv) demo = Demo() demo.show() sys.exit(app.exec_())
[ 7061, 6, 198, 13838, 25, 27314, 4528, 198, 10430, 25, 12131, 12, 1065, 12, 1983, 838, 25, 2548, 25, 3510, 198, 5956, 18378, 7575, 25, 12131, 12, 1065, 12, 1983, 838, 25, 1821, 25, 2598, 198, 5956, 18378, 669, 25, 534, 1438, 198, 1...
2.42953
149
from __main__ import * hm_df = functs_df[~((functs_df.head_type == 'prep') & (functs_df.suffix))].copy()
[ 6738, 11593, 12417, 834, 1330, 1635, 198, 23940, 62, 7568, 796, 1257, 310, 82, 62, 7568, 58, 93, 19510, 12543, 310, 82, 62, 7568, 13, 2256, 62, 4906, 6624, 705, 46012, 11537, 1222, 357, 12543, 310, 82, 62, 7568, 13, 37333, 844, 4008...
2.234043
47
from autodc.components.hpo_optimizer.smac_optimizer import SMACOptimizer from autodc.components.hpo_optimizer.mfse_optimizer import MfseOptimizer from autodc.components.hpo_optimizer.bohb_optimizer import BohbOptimizer from autodc.components.hpo_optimizer.tpe_optimizer import TPEOptimizer
[ 6738, 1960, 375, 66, 13, 5589, 3906, 13, 71, 7501, 62, 40085, 7509, 13, 5796, 330, 62, 40085, 7509, 1330, 9447, 2246, 27871, 320, 7509, 198, 6738, 1960, 375, 66, 13, 5589, 3906, 13, 71, 7501, 62, 40085, 7509, 13, 76, 69, 325, 62, ...
2.719626
107
# -*- coding: utf-8 -*- import sys, select, termios,tty import os if __name__ == "__main__": main()
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 25064, 11, 2922, 11, 3381, 4267, 11, 42852, 198, 11748, 28686, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 1388, 3419 ]
2.428571
42
#!/usr/bin/python3 # Copyright (c) 2019 Bart Massey # [This program is licensed under the "MIT License"] # Please see the file LICENSE in the source # distribution of this software for license terms. # Find maximum and minimum sample in an audio file. import sys import wave as wav # Get the signal file. wavfile = wav.open(sys.argv[1], 'rb') # Channels per frame. channels = wavfile.getnchannels() # Bytes per sample. width = wavfile.getsampwidth() # Sample rate rate = wavfile.getframerate() # Number of frames. frames = wavfile.getnframes() # Length of a frame frame_width = width * channels # Get the signal and check it. max_sample = None min_sample = None wave_bytes = wavfile.readframes(frames) # Iterate over frames. for f in range(0, len(wave_bytes), frame_width): frame = wave_bytes[f : f + frame_width] # Iterate over channels. for c in range(0, len(frame), width): # Build a sample. sample_bytes = frame[c : c + width] # XXX Eight-bit samples are unsigned sample = int.from_bytes(sample_bytes, byteorder='little', signed=(width>1)) # Check extrema. if max_sample == None: max_sample = sample if min_sample == None: min_sample = sample if sample > max_sample: max_sample = sample if sample < min_sample: min_sample = sample wavfile.close() print("min: {} max: {}".format(min_sample, max_sample))
[ 2, 48443, 14629, 14, 8800, 14, 29412, 18, 198, 2, 15069, 357, 66, 8, 13130, 13167, 11066, 4397, 198, 2, 685, 1212, 1430, 318, 11971, 739, 262, 366, 36393, 13789, 8973, 198, 2, 4222, 766, 262, 2393, 38559, 24290, 287, 262, 2723, 198,...
2.473083
613
''' A very simple test application to exercise a round trip of messages through the thywill system. This also illustrates the bare, bare minimum implementation of the 'thywill_interface.py' module - all it does is echo back incoming messages to the client who sent them. '''
[ 7061, 6, 198, 32, 845, 2829, 1332, 3586, 284, 5517, 257, 2835, 5296, 286, 6218, 832, 262, 11906, 10594, 1080, 13, 198, 198, 1212, 635, 21290, 262, 6247, 11, 6247, 5288, 7822, 286, 262, 705, 20057, 10594, 62, 39994, 13, 9078, 6, 8265...
4.365079
63
from tests import app
[ 6738, 5254, 1330, 598, 628 ]
4.6
5
import typing from jk_cachefunccalls import cacheCalls from jk_cmdoutputparsinghelper import ValueParser_ByteWithUnit from .parsing_utils import * from .invoke_utils import run #import jk_json _parserColonKVP = ParseAtFirstDelimiter(delimiter=":", valueCanBeWrappedInDoubleQuotes=False, keysReplaceSpacesWithUnderscores=True) # # Returns: # # [ # { # "<key>": "<value>", # ... # }, # ... # ] # def parse_proc_cpu_info(stdout:str, stderr:str, exitcode:int) -> typing.Tuple[list,dict]: """ processor : 0 vendor_id : GenuineIntel cpu family : 6 model : 92 model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz stepping : 9 microcode : 0x38 cpu MHz : 1000.000 cache size : 1024 KB physical id : 0 siblings : 4 core id : 0 cpu cores : 4 apicid : 0 initial apicid : 0 fpu : yes fpu_exception : yes cpuid level : 21 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities bugs : monitor spectre_v1 spectre_v2 bogomips : 2995.20 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 1 vendor_id : GenuineIntel cpu family : 6 model : 92 model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz stepping : 9 microcode : 0x38 cpu MHz : 800.000 cache size : 1024 KB physical id : 0 siblings : 4 core id : 1 cpu cores : 4 apicid : 2 initial apicid : 2 fpu : yes fpu_exception : yes cpuid level : 21 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities bugs : monitor spectre_v1 spectre_v2 bogomips : 2995.20 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 2 vendor_id : GenuineIntel cpu family : 6 model : 92 model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz stepping : 9 microcode : 0x38 cpu MHz : 800.000 cache size : 1024 KB physical id : 0 siblings : 4 core id : 2 cpu cores : 4 apicid : 4 initial apicid : 4 fpu : yes fpu_exception : yes cpuid level : 21 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities bugs : monitor spectre_v1 spectre_v2 bogomips : 2995.20 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: processor : 3 vendor_id : GenuineIntel cpu family : 6 model : 92 model name : Intel(R) Pentium(R) CPU J4205 @ 1.50GHz stepping : 9 microcode : 0x38 cpu MHz : 1100.000 cache size : 1024 KB physical id : 0 siblings : 4 core id : 3 cpu cores : 4 apicid : 6 initial apicid : 6 fpu : yes fpu_exception : yes cpuid level : 21 wp : yes flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc aperfmperf pni pclmulqdq dtes64 monitor ds_cpl vmx est tm2 ssse3 sdbg cx16 xtpr pdcm sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave rdrand lahf_lm 3dnowprefetch intel_pt ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid fsgsbase tsc_adjust smep erms mpx rdseed smap clflushopt sha_ni xsaveopt xsavec xgetbv1 dtherm ida arat pln pts md_clear arch_capabilities bugs : monitor spectre_v1 spectre_v2 bogomips : 2995.20 clflush size : 64 cache_alignment : 64 address sizes : 39 bits physical, 48 bits virtual power management: """ if exitcode != 0: raise Exception() cpuInfos = splitAtEmptyLines(stdout.split("\n")) retExtra = {} ret = [] for group in cpuInfos: d = _parserColonKVP.parseLines(group) if "processor" not in d: for k, v in d.items(): retExtra[k.lower()] = v continue if "cache_size" in d: d["cache_size_kb"] = ValueParser_ByteWithUnit.parse(d["cache_size"]) // 1024 del d["cache_size"] if "bogomips" in d: d["bogomips"] = float(d["apicid"]) elif "BogoMIPS" in d: d["bogomips"] = float(d["BogoMIPS"]) del d["BogoMIPS"] if "bugs" in d: d["bugs"] = d["bugs"].split() if "flags" in d: d["flags"] = sorted(d["flags"].split()) elif "Features" in d: d["flags"] = sorted(d["Features"].split()) del d["Features"] # bool for key in [ "fpu", "fpu_exception", "wp" ]: if key in d: d[key.lower()] = d[key] == "yes" if key != key.lower(): del d[key] # int for key in [ "CPU_architecture", "CPU_revision", "physical_id", "initial_apicid", "cpu_cores", "core_id", "clflush_size", "cache_alignment", "apicid" ]: if key in d: d[key.lower()] = int(d[key]) if key != key.lower(): del d[key] # float for key in [ "cpu_MHz" ]: if key in d: d[key.lower()] = float(d[key]) if key != key.lower(): del d[key] # str for key in [ "CPU_implementer", "CPU_part", "CPU_variant" ]: if key in d: d[key.lower()] = d[key] if key != key.lower(): del d[key] d["processor"] = int(d["processor"]) if "siblings" in d: d["siblings"] = int(d["siblings"]) #jk_json.prettyPrint(d) ret.append(d) return ret, retExtra # # # Returns: # # [ # { # "<key>": "<value>", # ... # }, # ... # ] # #
[ 198, 11748, 19720, 198, 198, 6738, 474, 74, 62, 23870, 12543, 535, 5691, 1330, 12940, 34, 5691, 198, 6738, 474, 74, 62, 28758, 22915, 79, 945, 278, 2978, 525, 1330, 11052, 46677, 62, 40778, 3152, 26453, 198, 198, 6738, 764, 79, 945, ...
2.22551
3,042
''' This module provides the Telegram. '''
[ 7061, 6, 198, 1212, 8265, 3769, 262, 50203, 13, 198, 7061, 6, 628 ]
3.384615
13
from testing_framework.report import report from typing import Tuple import html
[ 6738, 4856, 62, 30604, 13, 13116, 1330, 989, 198, 6738, 19720, 1330, 309, 29291, 198, 11748, 27711 ]
4.705882
17
while(True): inp = [int(x) for x in input().split()] if inp[0] == 0 and inp[1] == 0: break print(inp[0]//inp[1], inp[0]%inp[1], "/", inp[1])
[ 4514, 7, 17821, 2599, 198, 220, 220, 220, 287, 79, 796, 685, 600, 7, 87, 8, 329, 2124, 287, 5128, 22446, 35312, 3419, 60, 198, 220, 220, 220, 611, 287, 79, 58, 15, 60, 6624, 657, 290, 287, 79, 58, 16, 60, 6624, 657, 25, 198, ...
1.797753
89
#!/usr/bin/env python import sys import time import rospy import subprocess import actionlib from std_msgs.msg import Float32 from sensor_msgs.msg import Joy from geometry_msgs.msg import Twist, PoseWithCovarianceStamped from actionlib_msgs.msg import GoalStatus, GoalStatusArray from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
[ 2, 48443, 14629, 14, 8800, 14, 24330, 21015, 198, 11748, 25064, 198, 11748, 640, 198, 11748, 686, 2777, 88, 198, 11748, 850, 14681, 198, 11748, 2223, 8019, 198, 198, 6738, 14367, 62, 907, 14542, 13, 19662, 1330, 48436, 2624, 198, 6738, ...
3.31068
103
from backend.domain.contracts import NewClient, NewOrder, NewOrderItem from .new_product import NewProduct
[ 6738, 30203, 13, 27830, 13, 28484, 82, 1330, 968, 11792, 11, 968, 18743, 11, 968, 18743, 7449, 198, 198, 6738, 764, 3605, 62, 11167, 1330, 968, 15667, 198 ]
3.857143
28
from uuid import UUID import json from ..mappings import * def add_doc_audit_entry(session, doc_id, status, data): """"Add an audit entry, requires that a commit be run on the session afterwards """ if not isinstance(doc_id, UUID): raise ValueError("Expecting UUID") if not isinstance(data, dict): raise ValueError("Expecting dict") session.add(FileUsage( document_id=doc_id.bytes, fileusage_type=status, data=json.dumps(data) ))
[ 198, 6738, 334, 27112, 1330, 471, 27586, 198, 198, 11748, 33918, 198, 198, 6738, 11485, 76, 39242, 1330, 1635, 198, 198, 4299, 751, 62, 15390, 62, 3885, 270, 62, 13000, 7, 29891, 11, 2205, 62, 312, 11, 3722, 11, 1366, 2599, 198, 220...
2.490196
204
# versions of libraries used import sys import tweepy import numpy as np import pymongo import emoji import nltk.tokenize import requests print("Python version:{}".format(sys.version)) print("tweepy version:{}".format(tweepy.__version__)) print("pymongo version:{}".format(pymongo.__version__)) print("emoji version:{}".format(emoji.__version__)) print("requests version:{}".format(requests.__version__)) print("numpy version:{}".format(np.__version__)) print("nltk version:{}".format(nltk.__version__))
[ 2, 6300, 286, 12782, 973, 201, 198, 11748, 25064, 201, 198, 11748, 4184, 538, 88, 201, 198, 11748, 299, 32152, 355, 45941, 201, 198, 11748, 279, 4948, 25162, 201, 198, 11748, 44805, 201, 198, 11748, 299, 2528, 74, 13, 30001, 1096, 201...
2.796791
187
count_ones(20)
[ 198, 9127, 62, 1952, 7, 1238, 8 ]
2.142857
7
#!/usr/bin/python # -*- coding: utf-8 -*- import dbm from sklearn.datasets import load_iris from classifer.base import BaseClassifier from classifer.decision_tree import DecisionTreeClassifier import numpy as np def test(): n_classes = 3 plot_colors = "bry" plot_step = 0.02 # Load data iris = load_iris() import matplotlib.pyplot as plt # We only take the two corresponding features pairidx = 0 pair =[0,1] X = iris.data[:, pair] y = iris.target # Shuffle idx = np.arange(X.shape[0]) np.random.seed(13) np.random.shuffle(idx) X = X[idx] y = y[idx] # Standardize mean = X.mean(axis=0) std = X.std(axis=0) X = (X - mean) / std # Train clf = DecisionAdaBoostClassifier(num_rounds=3) # clf = DecisionTreeClassifier() # print X print y clf.train(X, y) # Plot the decision boundary plt.subplot(2, 3, pairidx + 1) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) print '----' print iris.data[:1, ] values = np.c_[xx.ravel(), yy.ravel()] Z = clf.predict(values) print Z print Z.shape print xx.shape Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.xlabel(iris.feature_names[pair[0]]) plt.ylabel(iris.feature_names[pair[1]]) plt.axis("tight") # Plot the training points for i, color in zip(range(n_classes), plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i], cmap=plt.cm.Paired) plt.axis("tight") plt.suptitle("Decision surface of a decision tree using paired features") plt.legend() plt.show() if __name__ == '__main__': test()
[ 2, 48443, 14629, 14, 8800, 14, 29412, 198, 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 11748, 288, 20475, 198, 6738, 1341, 35720, 13, 19608, 292, 1039, 1330, 3440, 62, 29616, 198, 198, 6738, 1398, 7087, 13, 8692, ...
2.105206
922
import numpy as np from bioinfo.assembly.errors import InvalidPair from bioinfo.molecules.sequence import Sequence
[ 11748, 299, 32152, 355, 45941, 198, 198, 6738, 13401, 10951, 13, 41873, 13, 48277, 1330, 17665, 47, 958, 198, 6738, 13401, 10951, 13, 76, 2305, 13930, 13, 43167, 1330, 45835, 628, 198 ]
3.6875
32
from __future__ import annotations from . import _base
[ 6738, 11593, 37443, 834, 1330, 37647, 198, 198, 6738, 764, 1330, 4808, 8692, 628 ]
4.071429
14
import pytest def test_with_fixture(fixture2): assert fixture2 == 2
[ 11748, 12972, 9288, 628, 628, 198, 4299, 1332, 62, 4480, 62, 69, 9602, 7, 69, 9602, 17, 2599, 198, 220, 220, 220, 6818, 29220, 17, 6624, 362, 198 ]
2.714286
28
from searcher import CLIPSearcher from utils import get_args if __name__ == "__main__": args = get_args() cs = CLIPSearcher(device=args.device, store_path=args.store_path) cs.load_dir(args.dir, save_every=args.save_every, recursive=args.recursive, load_new=(not args.dont_load_new)) cs.search(texts=args.texts, images=args.images, results=args.results, outdir=args.outdir)
[ 6738, 9622, 2044, 1330, 43749, 3705, 50194, 198, 6738, 3384, 4487, 1330, 651, 62, 22046, 198, 198, 361, 11593, 3672, 834, 6624, 366, 834, 12417, 834, 1298, 198, 220, 220, 220, 26498, 796, 651, 62, 22046, 3419, 198, 220, 220, 220, 5011...
2.689655
145
"""For entities that have a property template.""" from gemd.entity.link_by_uid import LinkByUID from gemd.entity.setters import validate_list from gemd.entity.template.base_template import BaseTemplate from gemd.entity.template.property_template import PropertyTemplate from gemd.entity.bounds.base_bounds import BaseBounds from typing import Iterable
[ 37811, 1890, 12066, 326, 423, 257, 3119, 11055, 526, 15931, 198, 6738, 16840, 67, 13, 26858, 13, 8726, 62, 1525, 62, 27112, 1330, 7502, 3886, 27586, 198, 6738, 16840, 67, 13, 26858, 13, 2617, 1010, 1330, 26571, 62, 4868, 198, 6738, 16...
3.755319
94
from app.models import Subscriber from flask_wtf import FlaskForm from wtforms import TextAreaField, StringField, IntegerField, EmailField from wtforms.validators import InputRequired, ValidationError from flask import flash # comment form # subscriber form
[ 6738, 598, 13, 27530, 1330, 3834, 1416, 24735, 198, 6738, 42903, 62, 86, 27110, 1330, 46947, 8479, 198, 6738, 266, 83, 23914, 1330, 8255, 30547, 15878, 11, 10903, 15878, 11, 34142, 15878, 11, 9570, 15878, 198, 6738, 266, 83, 23914, 13, ...
4
65
import toml
[ 11748, 284, 4029, 628 ]
3.25
4
""" Retrieve GoDaddy DNS settings via their developer API See also: https://developer.godaddy.com/doc/endpoint/domains#/ """ import os import time from pprint import pprint from typing import List import requests import credential_loaders BASE_URL = "https://api.godaddy.com" # You can easily replace these with a different CredentialLoader to match your key management system API_KEY_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_KEY") API_SECRET_CRED_LOADER = credential_loaders.EnvVarCredentialLoader("GODADDY_API_SECRET") # API_KEY_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_key.txt") # API_SECRET_CRED_LOADER = credential_loaders.PlaintextCredentialLoader("./api_secret.txt") def _get_headers() -> dict: """Get authorization header for GoDaddy Developer API. https://developer.godaddy.com/keys """ api_key = API_KEY_CRED_LOADER.load_credentials() api_secret = API_SECRET_CRED_LOADER.load_credentials() return {"Authorization": "sso-key {}:{}".format(api_key, api_secret)} def _call_endpoint(url_suffix: str, base_url: str = BASE_URL) -> dict: """Call GoDaddy developer API endpoint. Only supports GET endpoints to keep access read-only. """ headers = _get_headers() url = os.path.join(base_url, url_suffix) resp = requests.get(url, headers=headers) return resp.json() def get_domains() -> List[str]: """Get list of Domains for this API key.""" ret = _call_endpoint("v1/domains") # Example response: # [{'createdAt': '2016-06-25T03:08:44.000Z', # 'domain': 'mydomain.com', # 'domainId': 12345678, # 'expirationProtected': False, # 'expires': '2020-06-25T03:08:44.000Z', # 'holdRegistrar': False, # 'locked': True, # 'nameServers': None, # 'privacy': False, # 'renewAuto': True, # 'renewDeadline': '2020-08-09T03:08:44.000Z', # 'renewable': True, # 'status': 'ACTIVE', # 'transferProtected': False},] domains = [d["domain"] for d in ret] return domains def get_domain_dns_records(domain): """Get DNS entries for a specific domain Returns: List with format (for example): [ {'data': '160.153.162.20', 'name': '_dmarc', 'ttl': 3600, 'type': 'A'}, {'data': 'ns37.domaincontrol.com', 'name': '@', 'ttl': 3600, 'type': 'NS'}, ...] """ url_suffix = "v1/domains/{}/records".format(domain) ret = _call_endpoint(url_suffix) if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN": # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'} raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}") return ret def print_all_dns_records(): """ Print each domain and its DNS records (for domains linked to this API key).""" for domain in sorted(get_domains()): dns_records = get_domain_dns_records(domain) print(domain) pprint(dns_records) print("*" * 50) # TODO: poor man's rate limiter. improve? time.sleep(2) if __name__ == "__main__": print_all_dns_records()
[ 37811, 198, 9781, 30227, 1514, 48280, 18538, 6460, 2884, 511, 8517, 7824, 198, 198, 6214, 635, 25, 198, 220, 220, 220, 3740, 1378, 16244, 263, 13, 25344, 13218, 13, 785, 14, 15390, 14, 437, 4122, 14, 3438, 1299, 2, 14, 198, 37811, 1...
2.522763
1,274
import cv2 import numpy as np
[ 11748, 220, 269, 85, 17, 201, 198, 11748, 299, 32152, 355, 45941, 220, 201 ]
2.357143
14
# -*- coding: utf-8 -*- from .tables.pronto_soccorsi import table
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 198, 6738, 764, 83, 2977, 13, 1050, 5957, 62, 82, 13966, 35255, 1330, 3084, 628 ]
2.344828
29
# -*- coding: utf-8 -*- # @Author : Administrator # @DateTime : 2021/10/17 20:40 # @FileName : __init__.py # @SoftWare : PyCharm
[ 2, 532, 9, 12, 19617, 25, 3384, 69, 12, 23, 532, 9, 12, 198, 2, 2488, 13838, 220, 220, 1058, 22998, 198, 2, 2488, 10430, 7575, 1058, 33448, 14, 940, 14, 1558, 1160, 25, 1821, 198, 2, 2488, 8979, 5376, 1058, 11593, 15003, 834, 13...
2.357143
56
import requests from datetime import date, timedelta today = date.today() yesterday = today - timedelta(days=1) country = "Russia" endpoint = f"https://api.covid19api.com/country/{country}/status/confirmed" params = {"from": str(yesterday), "to": str(today)} response = requests.get(endpoint, params=params).json() total_confirmed = 0 for day in response: cases = day.get("Cases", 0) total_confirmed += cases print("\n"f"Total Confirmed Covid-19 cases in {country}: {total_confirmed}")
[ 11748, 7007, 198, 6738, 4818, 8079, 1330, 3128, 11, 28805, 12514, 198, 198, 40838, 796, 3128, 13, 40838, 3419, 198, 8505, 6432, 796, 1909, 532, 28805, 12514, 7, 12545, 28, 16, 8, 198, 19315, 796, 366, 16347, 1, 198, 437, 4122, 796, ...
3.012121
165
# import models from torchvision from torchvision.models import * # import models from efficientnet from .efficientnet import b0, b1, b2, b3, b4, b5, b6, b7
[ 2, 1330, 4981, 422, 28034, 10178, 198, 6738, 28034, 10178, 13, 27530, 1330, 1635, 198, 2, 1330, 4981, 422, 6942, 3262, 198, 6738, 764, 16814, 3262, 1330, 275, 15, 11, 275, 16, 11, 275, 17, 11, 275, 18, 11, 275, 19, 11, 275, 20, ...
3.078431
51
#This file plots the results from the MPI timing runs import sys import numpy as np from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib.markers as mkr plt_style='ggplot' plt.rcParams['font.size'] = 11 plt.rcParams['font.family'] = 'serif' plt.rcParams['axes.labelsize'] = 11 plt.rcParams['axes.labelweight'] = 'bold' plt.rcParams['xtick.labelsize'] = 9 plt.rcParams['ytick.labelsize'] = 9 plt.rcParams['figure.titlesize'] = 12 #We begin by loading the CSV file of rank pairings and times into the appropriate format StartStr = str(sys.argv[1]) EndStr = str(sys.argv[2]) start = np.loadtxt(open(StartStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')}) end = np.loadtxt(open(EndStr), delimiter=',', dtype={'names': ('A','B','t'), 'formats':('i4','i4','f8')}) ds=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(start['A'],start['B'],start['t']) }] de=[{'%s:%s'%(a,b): (a,b,t) for a,b,t in zip(end['A'],end['B'],end['t']) }] #We take note of the starting time over all ranks as a 0 offset t0 = np.min(start['t']) #3D Rank A:B vs time diagram fig = plt.figure() plt.style.use(plt_style) fig.clf() ax = fig.add_subplot(111, projection='3d') ax.set_zlabel('time [s]') ax.set_ylabel('Rank To Merge') ax.set_xlabel('Rank Base') #Plot the recorded times and connect ranks that have been merged toegther for a in ds[0].keys(): ax.scatter( ds[0][a][0], ds[0][a][1], ds[0][a][2]-t0, c='r', marker='o') #Plot start ax.scatter( de[0][a][0], de[0][a][1], de[0][a][2]-t0, c='b', marker='x') #Plot end ax.plot( [ ds[0][a][0], de[0][a][0] ], [ ds[0][a][1], de[0][a][1] ], [ ds[0][a][2] - t0, de[0][a][2] - t0 ], c='k') #Draw line between start and finish ax.set_zlim3d([ 0, np.max(end['t']) - t0 ]) ax.set_ylim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ]) ax.set_xlim3d([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ]) plt.show() #Save the 3D plot output plt.savefig('3d_%s_%s.pdf'%(StartStr, EndStr)) plt.clf() plt.style.use( plt_style ) #2D connections diagram #Draw lines to mark the MPI ranks for ii in xrange(np.max([start['A'],start['B']])): plt.axhline(ii, xmin=0, xmax=1, linewidth=0.5) #Draw lines between the start and end for reducing 2 data sets for a in ds[0].keys(): plt.plot( [ ds[0][a][2] - t0, de[0][a][2] - t0] , [ds[0][a][1], de[0][a][0]], linestyle='-', linewidth=0.5, c='k', alpha=0.8) plt.scatter( start['t'] - t0, start['B'], marker='x', c='r', alpha=0.8) plt.scatter( end['t'] - t0, end['A'], marker='o', c='b', alpha=0.8) plt.xlabel('time [s]') plt.ylabel('MPI rank') plt.title('%s_%s'%(StartStr, EndStr)) plt.xlim([ 0, np.max(end['t']) - t0 ]) plt.ylim([ np.min([end['A'], end['B']]), np.max([end['A'],end['B']]) ]) plt.show() #Save the 2D plot output plt.savefig('2d_%s_%s.pdf'%(StartStr, EndStr))
[ 2, 1212, 2393, 21528, 262, 2482, 422, 262, 4904, 40, 10576, 4539, 198, 11748, 25064, 198, 11748, 299, 32152, 355, 45941, 198, 6738, 285, 489, 62, 25981, 74, 896, 13, 76, 29487, 18, 67, 1330, 12176, 274, 18, 35, 198, 11748, 2603, 294...
2.160061
1,312
from typing import Iterator from entitykb import Span, interfaces, Doc
[ 6738, 19720, 1330, 40806, 1352, 198, 198, 6738, 9312, 32812, 1330, 49101, 11, 20314, 11, 14432, 628, 628, 628 ]
4.052632
19
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # import pytest from click.testing import CliRunner from octavia_cli import entrypoint
[ 2, 198, 2, 15069, 357, 66, 8, 33448, 3701, 26327, 11, 3457, 1539, 477, 2489, 10395, 13, 198, 2, 198, 198, 11748, 12972, 9288, 198, 6738, 3904, 13, 33407, 1330, 1012, 72, 49493, 198, 6738, 19318, 40543, 62, 44506, 1330, 5726, 4122, 6...
3.386364
44
import json import logging import random from datetime import datetime from typing import Optional from paho.mqtt.client import MQTTMessage from enocean.protocol.constants import PACKET from enocean.protocol.packet import RadioPacket from src.command.switch_command import SwitchCommand from src.common.json_attributes import JsonAttributes from src.common.switch_state import SwitchState from src.device.base.cyclic_device import CheckCyclicTask from src.device.base.scene_actor import SceneActor from src.device.eltako.fsr61_eep import Fsr61Eep, Fsr61Action, Fsr61Command from src.device.misc.rocker_switch_tools import RockerSwitchTools, RockerAction, RockerButton from src.enocean_connector import EnoceanMessage from src.tools.enocean_tools import EnoceanTools from src.tools.pickle_tools import PickleTools
[ 11748, 33918, 198, 11748, 18931, 198, 11748, 4738, 198, 6738, 4818, 8079, 1330, 4818, 8079, 198, 6738, 19720, 1330, 32233, 198, 198, 6738, 279, 17108, 13, 76, 80, 926, 13, 16366, 1330, 337, 48, 15751, 12837, 198, 198, 6738, 551, 78, 5...
3.428571
238
import numpy as np import neuroml import neuroml.arraymorph as am
[ 11748, 299, 32152, 355, 45941, 198, 11748, 11943, 296, 75, 198, 11748, 11943, 296, 75, 13, 18747, 24503, 355, 716, 628 ]
3.190476
21