content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from django.urls import path from rest_framework.urlpatterns import format_suffix_patterns from django.conf.urls import url from . import views # The API URLs are now determined automatically by the router urlpatterns = [ url(r"^$", views.index, name="index_page"), url(r"^projects/create$", views.project_create_view, name="create_project"), url(r"^projects/(?P<pk>\d+)/tasks$", views.project_page_view, name="project_page"), url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/annotation$", views.annotate_task_view, name="annotation_page"), url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/delete$", views.task_delete_view, name="delete_task"), url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/list_annotations$", views.list_annotations_for_task_view, name="list_task_annotations"), url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/list_annotations/(?P<annotation_pk>\d+)/review$", views.review_annotation_view, name="review_page"), url(r"^projects/(?P<pk>\d+)/tasks/(?P<task_pk>\d+)/annotation/delete$", views.annotation_delete_view, name="delete_annotation"), url(r"^projects/(?P<pk>\d+)/edit$", views.project_edit_view, name="edit_project"), url(r"^projects/(?P<pk>\d+)/delete$", views.project_delete_view, name="delete_project"), # API VIEWS path('api/v1/projects/', views.ProjectList.as_view(), name="project-list"), path('api/v1/projects/<int:pk>/', views.ProjectDetail.as_view(), name="specific_project"), path('api/v1/projects/<int:pk>/tasks', views.ProjectTasks.as_view(), name="project-list-tasks"), path('api/v1/root', views.api_root, name="api_root"), ] urlpatterns = format_suffix_patterns(urlpatterns)
nilq/baby-python
python
from flask import Flask, render_template from flask_cors import CORS from prometheus_client import Summary, MetricsHandler, Counter from agaveflask.utils import AgaveApi, handle_error from controllers import MetricsResource, CronResource from errors import errors app = Flask(__name__) CORS(app) api = AgaveApi(app, errors=errors) REQUEST_TIME = Summary('request_processing_seconds', 'DESC: Time spent processing request') # todo - probably should add a basic auth check # for now, we comment this out because we do not authenticate the calls from prometheus; # Authn/z # @app.before_request # def auth(): # authn_and_authz() api.handle_error = handle_error api.handle_exception = handle_error api.handle_user_exception = handle_error # Resources api.add_resource(MetricsResource, '/metrics') api.add_resource(CronResource, '/cron') if __name__ == '__main__': app.run(host='0.0.0.0', debug=True)
nilq/baby-python
python
#!/usr/local/bin/python import SimpleHTTPServer, SocketServer, logging, subprocess, sys, glob, re, mimetypes import argparse as argparse # Stop traceback on ctrl-c sys.tracebacklimit = 0 parser = argparse.ArgumentParser() parser.add_argument("-p", nargs='?', default=8000) parser.add_argument("-d", nargs='?', default=None) args = parser.parse_args() PORT = int(args.p) serve_listing = glob.glob("serve/*") serve_files = [] for f in serve_listing: serve_files.append(f.replace("serve/", "")) ANSI_COLOR_RED = "\x1b[31m" ANSI_COLOR_GREEN = "\x1b[32m" ANSI_COLOR_YELLOW = "\x1b[33m" ANSI_COLOR_BLUE = "\x1b[34m" ANSI_COLOR_MAGENTA = "\x1b[35m" ANSI_COLOR_CYAN = "\x1b[36m" ANSI_COLOR_RESET = "\x1b[0m" class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def log_message(self, format, *args): pass def do_HEAD(self): self.server_version = "nginx" self.sys_version = "" self.send_response(204) self.send_header("Access-Control-Allow-Origin", "*") def do_GET(self): # Suppress information leakage & Deal with CORS self.server_version = "nginx" self.sys_version = "" rows, columns = subprocess.check_output(['stty', 'size']).split() print "="*int(columns) print "> %sRequested GET path: %s%s" % (ANSI_COLOR_MAGENTA, self.path, ANSI_COLOR_RESET) for h in self.headers: print "> %s%s%s: %s" % (ANSI_COLOR_GREEN, h, ANSI_COLOR_RESET, self.headers[h]) path = self.path[1:] path = re.sub("\?(.|\n)*", "", path) if path in serve_files: fp = "serve/%s" % path d = open(fp).read() t = mimetypes.guess_type(fp)[0] if not mimetypes.guess_type(fp)[0] == None else "text/plain" self.send_response(200) self.send_header("Access-Control-Allow-Origin", "*") self.send_header("Content-type", t) self.send_header("Content-length", len(d)) self.end_headers() self.wfile.write(d) return if args.d != None: fp = "serve/%s" % args.d d = open(fp).read() t = mimetypes.guess_type(fp)[0] if not mimetypes.guess_type(fp)[0] == None else "text/plain" self.send_response(200) self.send_header("Access-Control-Allow-Origin", "*") self.send_header("Content-type", t) self.send_header("Content-length", len(d)) self.end_headers() self.wfile.write(d) return self.send_response(404) self.send_header("Access-Control-Allow-Origin", "*") def do_POST(self): # Suppress information leakage & Deal with CORS self.server_version = "nginx" self.sys_version = "" rows, columns = subprocess.check_output(['stty', 'size']).split() print "="*int(columns) print "> %sRequested POST path: %s%s" % (ANSI_COLOR_MAGENTA, self.path, ANSI_COLOR_RESET) for h in self.headers: print "> %s%s%s: %s" % (ANSI_COLOR_BLUE, h, ANSI_COLOR_RESET, self.headers[h]) data = self.rfile.read(int(self.headers['Content-Length'])) print data self.send_response(200) Handler = GetHandler httpd = SocketServer.TCPServer(("", PORT), Handler) httpd.serve_forever()
nilq/baby-python
python
import os import time import numpy as np import paddle.fluid as fluid import config as cfg from nets.attention_model import attention_train_net from nets.crnn_ctc_model import ctc_train_net from utils import data_reader from utils.utility import get_ctc_feeder_data, get_attention_feeder_data def main(): """OCR training""" if cfg.use_model == "crnn_ctc": train_net = ctc_train_net get_feeder_data = get_ctc_feeder_data else: train_net = attention_train_net get_feeder_data = get_attention_feeder_data # define network sum_cost, error_evaluator, inference_program, model_average = train_net(cfg, cfg.data_shape, cfg.num_classes) # data reader train_reader = data_reader.train(batch_size=cfg.batch_size, prefix_path=cfg.train_prefix, cycle=cfg.total_step > 0, model=cfg.use_model) test_reader = data_reader.test(prefix_path=cfg.test_prefix, model=cfg.use_model) # prepare environment place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) # 加载初始化模型 if cfg.init_model: fluid.load(program=fluid.default_main_program(), model_path=cfg.init_model, executor=exe, var_list=fluid.io.get_program_parameter(fluid.default_main_program())) print("Init model from: %s." % cfg.init_model) train_exe = exe error_evaluator.reset(exe) if cfg.parallel: train_exe = fluid.ParallelExecutor(use_cuda=cfg.use_gpu, loss_name=sum_cost.name) fetch_vars = [sum_cost] + error_evaluator.metrics def train_one_batch(data): var_names = [var.name for var in fetch_vars] if cfg.parallel: results = train_exe.run(var_names, feed=get_feeder_data(data, place)) results = [np.array(r).sum() for r in results] else: results = exe.run(program=fluid.default_main_program(), feed=get_feeder_data(data, place), fetch_list=fetch_vars) results = [r[0] for r in results] return results def test(): error_evaluator.reset(exe) for data in test_reader(): exe.run(inference_program, feed=get_feeder_data(data, place)) _, test_seq_error = error_evaluator.eval(exe) return test_seq_error[0] def save_model(): if not os.path.exists(cfg.model_path): os.makedirs(cfg.model_path) fluid.save(program=fluid.default_main_program(), model_path=os.path.join(cfg.model_path, "model")) print("Saved model to: %s" % cfg.model_path) iter_num = 0 stop = False while not stop: total_loss = 0.0 total_seq_error = 0.0 # train a pass for data in train_reader(): if cfg.total_step < iter_num: stop = True break result = train_one_batch(data) total_loss += result[0] total_seq_error += result[2] iter_num += 1 # training log if iter_num % cfg.log_period == 0: print("[%s] - Iter[%d]; Avg loss: %.3f; Avg seq err: %.3f" % (time.asctime(time.localtime(time.time())), iter_num, total_loss / (cfg.log_period * cfg.batch_size), total_seq_error / (cfg.log_period * cfg.batch_size))) total_loss = 0.0 total_seq_error = 0.0 # evaluate if iter_num % cfg.eval_period == 0: if model_average: with model_average.apply(exe): test_seq_error = test() else: test_seq_error = test() print("\n[%s] - Iter[%d]; Test seq error: %.3f\n" % (time.asctime(time.localtime(time.time())), iter_num, test_seq_error)) # save model if iter_num % cfg.save_model_period == 0: if model_average: with model_average.apply(exe): save_model() else: save_model() if __name__ == "__main__": main()
nilq/baby-python
python
#Importing Libraries import os import cv2 import time import struct import socket import pyaudio import freenect import wikipedia import playsound import numpy as np from gtts import gTTS from scripts.rhino.rhino import * from scripts.porcupine.porcupine import * #Fucntion to get images def get_image(type, client): #'type' to tell about RGB image/depth image path = "" file = open(path,'w') file.write(IPaddr) file.close() #Sending the type of Image client.send(type) # It will wait until it gets the file name which is passed from the send function file_name = client.recv(1024).decode() print(file_name) # This will open a new file in your python Dir with same file name file = open(file_name,'wb') # It will recieve the starting 10 bytes data = client.recv(10) while data: #print(data) file.write(data) data = client.recv(1024) print("Data Recieved Succesfully") client.close() #returning RGB or depth image image = cv2.imread(file_name) return image #Function to check if center cit ent def co_incident(): pass #Function to go to an object def goTo(slots, net, LABELS, ln, client): #Getting the Value of the Key in Dictonary-Slots obj = str(slots['ob1']) #Initializing the variables x = y = z = None #Getting the coordinated of the object (x,y,z) = getCoordinates(obj, net, LABELS, ln, client) #Checking if the object was found or not if x == None or y == None or z == None: #Speaking that object was not found print("None here") playsound.playsound('not_found.mp3') else: #Ensuring the centers co-incident co_incident() print(x,y,z) #Move towards the object while z>=0.0: #Move forward and check the distance again send(client, "forward") time.sleep(1) (x,y,z) = getCoordinates(obj, net, LABELS, ln, client) #Function to get the coordinated of the given object def getCoordinates(obj, net, LABELS, ln, client): while True: #Get Images from Rpi frame = get_image("image", client) #Get Depth Image from Rpi depth = get_image("depth", client) #Fetting Shape of the frame (H, W) = frame.shape[:2] #Creating blob from image blob = cv2.dnn.blobFromImage(frame, 1/255.0, (224, 224), swapRB = True, crop = False) net.setInput(blob) layerOutputs = net.forward(ln) #Initializing lists for displaying the output boxes = [] confidences = [] classIds = [] #Looping over each layer's output for output in layerOutputs: #Looping over each detection for detect in output: #Extracting ClassID and confidence score = detect[5:] classID = np.argmax(score) confidence = score[classID] #Filtering weak detection if confidence > 0.5: #Getting bounding rectangle box = detect[:4] * np.array([W, H, W, H]) (centerX, centerY, Width, Height) = box.astype("int") #Getting Top and Left Points x = int(centerX - (Width/2)) y = int(centerY - (Height/2)) #Adding to lists boxes.append([x, y, int(Width), int(Height)]) classIds.append(classID) confidences.append(float(confidence)) #Non-Maxia Suppression idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.3) #Checking Minimum Detection if len(idxs) > 0: #Looping over indexs for i in idxs.flatten(): x = boxes[i][0] y = boxes[i][1] w1 = boxes[i][2] h1 = boxes[i][3] if LABELS[classIds[i]] == obj: #Calculating the coordinates print("Here") cx = int(x + (w1/2)) cy = int(y + (h1/2)) cz = 0.1236 * np.tan(depth[cy][cx] / 2842.5 + 1.1863) return (cx,cy,cz) #Function to speak/interact def speak(slots): #Getting the Value of the Key in Dictonary-Slots keyword = str(slots['p1']) #If the keyword in known if keyword == "yourself": #Declaring the text splitted = ["Hey, my name is groooot. I am a cute, cute robooooo. I am designed by Gaurav, Harish and Swati, and I work for them. Nice meeting you. I am here to help you, just spell groooooot."] #If keyword is not known else: #Searching search_result = wikipedia.summary(keyword) #Spliting splitted = search_result.split("\n") #Speech to text model speech = gTTS(text = splitted[0], lang = 'en-in' , slow = False) #Saving Audio File speech.save("speak.mp3") #Running Audio file playsound.playsound('speak.mp3') def send(client, dir): #Sending data to server client.send(dir) #Waiting for feedback while client.recv(1024)!= 'done': pass client.close() #Main Function def main(): #Initializing Variables awake = False intent_extraction_is_finalized = False #Loading Picovoice Models rhino_wakeword = Porcupine(library_path = "/home/garima/Gaurav/Blog_2/Integrated/res/libpv_porcupine.so", model_file_path = "/home/garima/Gaurav/Blog_2/Integrated/res/porcupine_params.pv", keyword_file_paths = ["/home/garima/Gaurav/Blog_2/Integrated/res/hey_groot.ppn"], sensitivities = [0.5]) rhino_commands = Rhino(library_path = "/home/garima/Gaurav/Blog_2/Integrated/res/libpv_rhino.so", model_path = "/home/garima/Gaurav/Blog_2/Integrated/res/rhino_params.pv", context_path = "/home/garima/Gaurav/Blog_2/Integrated/res/robo.rhn") # setup audio pa = pyaudio.PyAudio() audio_stream = pa.open(rate = rhino_commands.sample_rate, channels = 1, format = pyaudio.paInt16, input = True, frames_per_buffer=rhino_commands.frame_length) #Loading label, weight and configuration model paths for YOLO labelPath = os.path.sep.join(["yolo-coco","coco.names"]) weightPath = os.path.sep.join(["yolo-coco", "yolov3.weights"]) configPath = os.path.sep.join(["yolo-coco", "yolov3.cfg"]) #Loading Labels LABELS = open(labelPath).read().strip().split("\n") #Loading YOLO net = cv2.dnn.readNetFromDarknet(configPath, weightPath) #Determining YOLO output layer ln = net.getLayerNames() ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()] #Setting up Rpi GPIO pins numbering #GPIO.setmode(GPIO.BOARD) #Declaring Pin modes #GPIO.setup(3, GPIO.OUT) #GPIO.setup(5, GPIO.OUT) #GPIO.setup(11, GPIO.OUT) #GPIO.setup(13, GPIO.OUT) #Making Commonly used Audio Files #Speech to Text wake = gTTS(text = "At your service friend!", lang = "en-in", slow = False) error = gTTS(text = "I'm tired! I will take a nap.", lang = "en-in", slow = False) not_found = gTTS(text = "Object not found!", lang = "en-in", slow = False) not_understood = gTTS(text = "I understand your order friend", lang = "en-in", slow = False) #Saving Audio File wake.save("wake.mp3") error.save("error.mp3") not_found.save("not_found.mp3") not_understood.save("unclear.mp3") #Sockets Initializing network = socket.socket(socket.AF_INET,socket.SOCK_STREAM) # Intialising the Port port = 12345 network.bind(('',port)) hostname = socket.gethostname() IPaddr = socket.gethostbyname(hostname) network.listen(5) # Geting Client host name and the IP address Details client, addr = network.accept() print("Start") # detect commands in continuous loop while True: #Reading Input pcm = audio_stream.read(rhino_commands.frame_length) pcm = struct.unpack_from("h" * rhino_commands.frame_length, pcm) try: #If wake word is not spoken if not awake: #Processing the voice input result = rhino_wakeword.process(pcm) #If wake word is the input, result is true if result: #Wake Word detected awake = True time.sleep(0.1) print("awake") #playsound.playsound('wake.mp3') #os.system('mpg321 wake.mp3') #time.sleep(5) print("Speak More") elif not intent_extraction_is_finalized: #Getting Intent Extraction intent_extraction_is_finalized = rhino_commands.process(pcm) else: #If the command is detected if rhino_commands.is_understood(): #Getting Intent and Slots intent, slots = rhino_commands.get_intent() print(intent) playsound.playsound('wake.mp3') #os.system('mpg321 wake.mp3') #Checking Intent and doing Neccessary Action #If going to an object is an intent if intent == "goTo": #Shift the control to goTo function goTo(slots, net, LABELS, ln, client) #If speaking is the intent elif intent == "speak": #Shift the control to speak function speak(slots) #If coming back in the intent #elif intent == "comeBack": #Shift the control to comeBack function #comeBack(slots) #If Stop is the intent elif intent == "stop": #Shift the control to stop function stop() #No match else: #Command not found time.sleep(0.1) print("1") playsound.playsound('unclear.mp3') #Command not understood else: #print("Command not understood") time.sleep(0.1) playsound.playsound('unclear.mp3') #Resetting Rhino to detect new command rhino_commands.reset() awake = False intent_extraction_is_finalized = False except Exception as e: print(e) time.sleep(0.1) playsound.playsound('error.mp3') exit() #os.system('python3 try_1.py') #Calling Main Funciton if __name__ == "__main__": main()
nilq/baby-python
python
from .publish_measurement_handler import PublishMeasurementTransactionHandler from .issue_ggo_transaction_handler import IssueGGOTransactionHandler from .transfer_ggo_handler import TransferGGOTransactionHandler from .split_ggo_handler import SplitGGOTransactionHandler from .retire_ggo_handler import RetireGGOTransactionHandler from .settlement_handler import SettlementHandler
nilq/baby-python
python
__author__ = 'guorongxu' import sys import re import math import logging def parse_correlation(correlation_file): correlation_list = {} with open(correlation_file) as fp: lines = fp.readlines() for line in lines: fields = re.split(r'\t+', line) correlation_list.update({fields[0] + "_" + fields[1]:fields}) return correlation_list def parse_cluster(cluster_file, correlation_list): filewriter = open(cluster_file + ".rep", "a") with open(cluster_file) as fp: lines = fp.readlines() for line in lines: fields = re.split(r'\t+', line) if fields[0] == "cor": filewriter.write(line) if (fields[3] + "_" + fields[4]) in correlation_list: edge = correlation_list.get(fields[3] + "_" + fields[4]) filewriter.write(edge[2] + "\t" + fields[1] + "\t" + edge[3] + "\t" + fields[3] + "\t" + fields[4]) if (fields[4] + "_" + fields[3]) in correlation_list: edge = correlation_list.get(fields[4] + "_" + fields[3]) filewriter.write(edge[2] + "\t" + fields[1] + "\t" + edge[3] + "\t" + fields[3] + "\t" + fields[4]) filewriter.close() ## Main entry if __name__ == "__main__": correlation_file = sys.argv[1] cluster_file = sys.argv[2] print correlation_file logging.info("correlation file: " + correlation_file) logging.info("cluster file: " + cluster_file) correlation_list = parse_correlation(correlation_file) parse_cluster(cluster_file, correlation_list)
nilq/baby-python
python
"""Create grid-based spatial indexes. Basic Usage =========== Calculate the grid index or indices for a geometry provided in well-known binary format at a given resolution: Example: >>> from shapely.geometry import Point >>> pnt = Point(555000, 185000) >>> bng_pnt = calculate_bng_index( wkb = pnt.wkb, resolution = 100, ) Changing Resolution =================== Indices can be calculated for cell sizes of 1m, 10m, 100m, 1000m, 10000m and 100000m: Example: >>> from shapely.geometry import LineString >>> line = LineString([(450750, 175000), (535000, 195250)]) >>> bng_line = calculate_bng_index( wkb = line.wkb, resolution = 1000, ) Index Creation Options ====================== The ``how`` argument can be used to change the kind of indices created. Points and Multi-Points ----------------------- The default and only option for ``how`` is 'intersects'. This returns the British National Grid index that the point falls within. If the point lies on an edge or corner of the grid cell then 2 or 4 grid cells indices are returned as appropriate. LineStrings and MultiLineStrings -------------------------------- The default options for ``how`` is 'intersects'. This returns all indices for the British National Grid cells that the line geometry intersects. An alternative option is 'bounding box', which returns all indices that intersect with the bounding box of the line geometry: Example: >>> bng_line = calculate_bng_index( wkb = line.wkb, resolution = 100, how = 'bounding box' ) Although bounding boxes are fast to compute, in most cases 'intersects' will be preferable as bounding box indexing, particularly at higher resolutions, will lead to considerable redundancy. Polygons and MultiPolygons -------------------------- The default option for ``how`` is 'intersects', but alternative options of 'bounding box' and 'contains' are also available. The 'bounding box' returns the British National Grid indices which intersect the Polygon bounding box. The 'contains' option returns one or more tuples containing the indices that intersect the Polygon and a boolean, where ``true`` indicates that the grid cell is contained within the Polygon and ``false`` that the grid cell intersects the Polygon, but doesn't lie within it (e.g. the cell crosses the Polygon boundary). Example: >>> from shapely.geometry import Polygon >>> poly = Polygon([(535000, 175000), (555250, 185000), (556000, 162500), (527500, 160333), (535000, 175000)]) >>> bng_poly = calculate_bng_index( wkb = poly.wkb, resolution = 100, how = 'contains' ) Intended Usage ============== The top-level ``calculate_bng_index()`` function is intended to be applied over a column of geometries. The approach will support mixes of geometry types in a single column. Although it is primarily intended for use in Spark, we first present an example using ``geopandas`` which may be more familiar: Example: >>> import geopandas >>> gdf = geopandas.read_file('some file of interest') >>> bng = gdf.apply(lambda row: calculate_bng_index(row.geometry.wkb, 100), index = 1) When using the function in spark, the same approach applies, however you first need to create a user-defined function (udf). >>> from pyspark.sql.functions import udf >>> from pyspark.sql.types import StringType, ArrayType >>> from typing import Sequence >>> @udf(returnType=ArrayType(StringType())) >>> def apply_index(wkb: bytearray) -> Sequence[str]: return calculate_bng_index(wkb, resolution=100, how='intersects') This user defined function can then be applied to a spark dataframe, assuming it stores the geometry in well-known binary format: Example: >>> sdf = spark.read.parquet('some parquet file of interest') >>> sdf = sdf.withColumn('bng', apply_index('geometry')) The intent of the indexing is that it can then be used to benefit geospatial filtering and joining operations. Get British National Grid Cell Geometries ========================================= A top-level helper function is provided for simple translation of British National Grid references into well-known text that can be plotted. The resolution is inferred from each reference: Example: >>> import geopandas >>> from shapely.wkt import loads >>> box = wkt_from_bng("TQ3415") >>> gdf = geopandas.GeoDataFrame(geometry = [box]) >>> gdf.plot() The ``wkt_from_bng()`` function is also designed to be applied to collections of references: Example: >>> import geopandas >>> from shapely.wkt import loads >>> boxes = list(map(wkt_from_bng, ["TQ3415", "SP4087", "SS9015"])) >>> gdf = geopandas.GeoDataFrame(geometry = boxes) >>> gdf.plot() """ from bng_indexer._indexing import calculate_bng_index, wkt_from_bng
nilq/baby-python
python
from enum import IntEnum class Finger(IntEnum): Thumb = 0 Index = 1 Middle = 2 Ring = 3 Little = 4 @staticmethod def get_array_of_points(finger): finger_array = None if finger == Finger.Thumb: finger_array = [(0, 4), (4, 3), (3, 2), (2, 1)] elif finger == Finger.Index: finger_array = [(0, 8), (8, 7), (7, 6), (6, 5)] elif finger == Finger.Middle: finger_array = [(0, 12), (12, 11), (11, 10), (10, 9)] elif finger == Finger.Ring: finger_array = [(0, 16), (16, 15), (15, 14), (14, 13)] else: finger_array = [(0, 20), (20, 19), (19, 18), (18, 17)] return finger_array @staticmethod def get_finger_name(finger): finger_name = '' if finger == Finger.Thumb: finger_name = 'Thumb' elif finger == Finger.Index: finger_name = 'Index' elif finger == Finger.Middle: finger_name = 'Middle' elif finger == Finger.Ring: finger_name = 'Ring' elif finger == Finger.Little: finger_name = 'Little' return finger_name
nilq/baby-python
python
import torch import os import sys import re import logging from os.path import isfile import copy import threading import time import enum from torch.multiprocessing import Pool, Process, set_start_method, Manager, Value, Lock try: set_start_method('spawn') except RuntimeError: pass class CFMode(enum.Enum): MANUAL = 0 AUTO = 1 """ Auto aggressive checkpoint manager for PyTorch Usage : In the training script: Initialize with the model and optimizer in local_rank 0 chk = CFCheckpoint(model=model, optim=optimizer, dl=dl) chk_manager = CFManager(chk, chk_dir='./chk/', freq=CheckFreqMode.AUTO) To initiate a checkpoint at the given frequency (done internally if AUTO, if MANUAL, user code must trigger): Snapshot the state in-memory -------------------------- chk_manager.snapshot() Persist the in-memory snapshot in background -------------------------- chk_manager.persist() On recovery, to resume from a checkpoint: Restores the latest checkpoint in the dir being managed ---------------------------------------- chk = CFCheckpoint(model=model, optim=optimizer, dl=dl) chk_manager = CFManager(chk, chk_dir='./chk/', freq=CheckFreqMode.AUTO) chk_manager.restore() """ class CFManager: """ `chk_dir` : Directory where the checkpoints are managed. Can be local storage path, or any remote storage that exposes POSIX. All checkpoint versions are maintained in this directory, and on start-up, the latest checkpoint version in this dir is restored. `chk` : An instance of the CFCheckpoint class that tracks one or more tractable object for snapshotting. `overwrite` : If true, maintains only the latest version of the checkpoint at any instant, with the exception of checkpoints made at epoch boundaries (controlled by `keep_epoch_chk`). Storage space required is low. If false, keeps all versions of checkpoints written so far, managed by the checkpointing frequency. Uses a lot of storage space -TODO: control this by tuning `keep_latest_n` At any point, there can be a max of two active checkpoints if `overwrite` is True - one completed, and one ongoing. `keep_epoch_chk` : If true, keeps a version of the checkpoint taken at epoch boundaries that can be used later to restore the model to the best val accuracy for instance. This is the default behaviour. Although beware this assumes you have enough storage space to maintain `n` versions of the checkpoint, where `n` is the number of epochs you train for. If false, checkpoints are overwritten - at any instant, only the latest checkpoint is maintained. Default is to overwrite iter chk and keep epoch chk. `chk_prefix` : Prefix for the cjheckpoint file """ def __init__( self, chk_dir, chk, keep_epoch_chk = True, overwrite = True, mode = CFMode.AUTO, chk_prefix = 'model_v_'): self.logger = logging.getLogger(__name__) self.chk_dir = chk_dir self.chk = chk self.keep_epoch_chk = keep_epoch_chk self.overwrite = overwrite self.chk_prefix = chk_prefix self.mode = mode self.chk_epoch_subdir = 'epoch' self.mp_manager = Manager() self.snapshot_copy = None self.cpu_side = False # Active snapshot, if true, don't snapshot again self.active_snapshot = Value('i', 0) self.lock = Lock() self.in_progress_snapshot = Value('i', 0) # Handle to the process performing checkpoint # Can be only one at any instant. A new checkpoint # cannot start unless the previous one completes self.chk_process = None # `overwrite` supersedes if False if self.overwrite is False and self.keep_epoch_chk is False: self.keep_epoch_chk = True # Global ID of checkpoints being written # Used to format the checkpoint path # Instantiate from chk when restoring self.chk_global_id = -1 # Sorted List of available checkpoints (fnames) self.available_chk_iters = self.mp_manager.list() self.available_chk_epochs = self.mp_manager.list() self.initalize_chk_dir() self.logger.info("Available checkpoints : ") for item in self.available_chk_iters: self.logger.info(item) """ The API to be used by the training code to initiate checkpoint. `additional_snapshot` : The iter, epoch, arch, and DL state must be passed as a map if required. File is saved with the `global_id` suffix Only checkpoints at epoch boundaries are suffixed with epoch# instead of ID `is_epoch` : True if this is epoch boundary """ def save( self, \ additional_snapshot=None, \ is_epoch=False, \ epoch=0, \ synchronous=False, \ profile_full=False, profile_snap=False, use_thread=False, persist=False): s = time.time() self.logger.info("[{}] ENTER SAVE FN".format(time.time())) self.chk_global_id += 1 chk_fname = self.chk_prefix + str(self.chk_global_id) filepath = self._get_full_path(chk_fname) if is_epoch: chk_fname_link = self.chk_prefix + str(self.chk_global_id) + '_' +str(epoch) filepath_link = self._get_full_path(chk_fname, epoch=True) self.logger.info("Writing chk {} at {}".format(self.chk_global_id, filepath)) if synchronous: chk_fname_sync = chk_fname + '_sync' filepath_sync = self._get_full_path(chk_fname_sync) if not is_epoch: self.chk._serialize_and_persist_direct( self.chk.latest_snapshot, filepath_sync, additional_state=additional_snapshot, persist=persist, iter_chk = self.available_chk_iters, overwrite = self.overwrite) else: chk_fname_link_sync = chk_fname_link + '_sync' filepath_link_sync = self._get_full_path(chk_fname_link_sync, epoch=True) self.chk._serialize_and_persist_direct( self.chk.latest_snapshot, filepath_sync, additional_state=additional_snapshot, persist=persist, iter_chk = self.available_chk_iters, overwrite = self.overwrite, linkpath=filepath_link_sync, epoch_chk = self.available_chk_epochs) return # Check if there's an ongoing checkpoint operation if self.chk_process is not None: # There is an checkpoint underway. Wait if self.chk_process.is_alive(): self.chk_process.join() # Once complete, initiate the next checkpoint synchronously self.logger.info("[{}] START SNAPSHOT".format(time.time())) success = self.chk._snapshot(self.active_snapshot.value, additional_state=additional_snapshot) if success: with self.lock: self.active_snapshot.value = 1 dur_snap = time.time() -s if profile_snap: return dur_snap, 0 if use_thread: fn = getattr(threading, 'Thread') else: fn = globals()["Process"] print("Function is {}".format(fn)) # Start persist asynchronously if not is_epoch: keywords = { \ 'iter_chk':self.available_chk_iters, \ 'overwrite':self.overwrite} self.chk_process = \ fn(target=self.chk._serialize_and_persist, \ args=[filepath, self.chk.latest_snapshot, self.active_snapshot, self.lock], kwargs=keywords) else: keywords = { \ 'iter_chk':self.available_chk_iters, \ 'overwrite':self.overwrite, \ 'epoch_chk':self.available_chk_epochs,\ 'linkpath': filepath_link} self.chk_process = \ fn(target=self.chk._serialize_and_persist,\ args=[filepath, self.chk.latest_snapshot, self.active_snapshot, self.lock], kwargs=keywords) self.logger.info("[{}] CALL PROCESS NOW".format(time.time())) self.chk_process.start() self.logger.info("[{}] RETURN FROM START".format(time.time())) if profile_full: self.chk_process.join() dur = time.time() -s #time.sleep(1) return dur_snap, dur def save_cpu( self, \ additional_snapshot=None, \ is_epoch=False, \ epoch=0, \ synchronous=False, \ persist=False, profile_snap=False, profile_full=False, use_thread=True): self.logger.info("[{}] ENTER SAVE FN".format(time.time())) s = time.time() self.chk_global_id += 1 chk_fname = self.chk_prefix + str(self.chk_global_id) filepath = self._get_full_path(chk_fname) if is_epoch: chk_fname_link = self.chk_prefix + str(self.chk_global_id) + '_' +str(epoch) filepath_link = self._get_full_path(chk_fname, epoch=True) self.logger.info("Writing chk {} at {}".format(self.chk_global_id, filepath)) # Check if there's an ongoing checkpoint operation if self.chk_process is not None: # There is an checkpoint underway. Wait if self.chk_process.is_alive(): self.chk_process.join() self.logger.info("Starting next snapshot {:.2f}s".format(time.time()-s)) # Once complete, initiate the next checkpoint synchronously self.logger.info("[{}] SAVE FN SNAP NOW".format(time.time())) snap_ptr = {} for name, ref in self.chk.tracking_map.items(): snap_ptr[name] = ref.state_dict() # check current snapshot status if self.active_snapshot.value == 1: self.logger.info("ERROR! Active snapshot") return with self.lock: self.in_progress_snapshot.value = 1 self.logger.info("[{}] START SAVE CALL".format(time.time())) if synchronous: self.chk._snapshot_and_persist_async(filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr, iter_chk=self.available_chk_iters, overwrite=self.overwrite) self.logger.info("Returned from save in {:.2f}s".format(time.time()-s)) self.logger.info("[{}] END SAVE".format(time.time())) return if use_thread: fn = getattr(threading, 'Thread') else: fn = globals()["Process"] print("Function is {}".format(fn)) if not is_epoch: keywords = { \ 'iter_chk':self.available_chk_iters, \ 'overwrite':self.overwrite, \ 'profile': profile_snap } self.chk_process = \ fn(target=self.chk._snapshot_and_persist_async, \ args=[filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr], kwargs=keywords) else: keywords = { \ 'iter_chk':self.available_chk_iters, \ 'overwrite':self.overwrite, \ 'epoch_chk':self.available_chk_epochs,\ 'linkpath': filepath_link, \ 'profile': profile_snap } self.chk_process = \ fn(target=self.chk._snapshot_and_persist_async,\ args=[filepath, self.active_snapshot, self.in_progress_snapshot, self.lock, snap_ptr], kwargs=keywords) self.chk_process.start() if profile_snap or profile_full: self.chk_process.join() dur = time.time() -s #time.sleep(1) self.logger.info("Returned from save in {:.2f}s".format(time.time()-s)) self.logger.info("[{}] END SAVE".format(time.time())) return 0, dur """ Restores the latest checkpoint among all available, or the latest epoch boundary checkpoint corresponding to `epoch` Returns : Map of state of items that were not resumed yet This should be same as the map passed in to the save() call by the DL/script. These restorations are assumed to happen in the script/DL If nothing remains to be restore, returns None """ def restore(self, latest=True, epoch=0, gpu=0): fname = self.get_latest_checkpoint(latest=latest, epoch=epoch) if fname is None: return None filepath = self._get_full_path(fname, epoch=not latest) self.logger.info("Latest checkpoint is {}".format(filepath)) extra_state = self.chk._restore(filepath=filepath, gpu=gpu) return extra_state def initalize_chk_dir(self): if os.path.exists(self.chk_dir): # Get list of all files chk_files = [os.path.splitext(f)[0] for f in os.listdir(self.chk_dir) if isfile(os.path.join(self.chk_dir, f))] self.logger.info(chk_files) chk_files.sort(key=natural_keys) for files in chk_files: self.available_chk_iters.append(files) del chk_files epoch_chk_dir = os.path.join(self.chk_dir, self.chk_epoch_subdir) if os.path.exists(epoch_chk_dir): epoch_chk_files = [os.path.split(f)[0] for f in os.listdir(epoch_chk_dir) if isfile(os.path.join(epoch_chk_dir, f))] epoch_chk_files.sort(key=natural_keys) for files in epoch_chk_files: self.available_chk_epochs.append(files) del epoch_chk_files else: os.makedirs(epoch_chk_dir) else: os.makedirs(self.chk_dir) def get_latest_checkpoint(self, latest=True, epoch=0): """ Returns the full path of the latest checkpoint `latest` : If true, return most recent checkpoint If false, return chk corresponding to `epoch` if available """ fname = None if latest and len(self.available_chk_iters) > 0: fname = self.available_chk_iters[-1] elif len(self.available_chk_epochs) > 0: fname = self.available_chk_epochs[-1] return fname def _get_full_path(self, fname, epoch=False): if not epoch: return os.path.join(self.chk_dir, fname + '.chk') else: return os.path.join(self.chk_dir, self.chk_epoch_subdir, fname + '.chk') def weight_update(self): if 'optimizer' in self.chk.tracking_map: optimizer = self.chk.tracking_map['optimizer'] s = time.time() while self.in_progress_snapshot.value == 1: continue # self.logger.info("Progresssss") optimizer.step() #torch.cuda.synchronize() dur = time.time() - s #self.logger.info("Stall to weight update = {}s".format(dur)) else: self.logger.info("NO Optimizer found") # Returns size of tensors of all tractable items in MB @ property def get_chk_size(self): snap_ptr = {} size = 0 for name, ref in self.chk.tracking_map.items(): snap_ptr[name] = ref.state_dict() size += _get_all_size(snap_ptr[name]) return size/1024/1024 def _get_all_size(ele, sz = 0): if torch.is_tensor(ele): sz += ele.nelement()*ele.element_size() elif isinstance(ele, dict): for k,v in ele.items(): sz = _get_all_size(v, sz) elif isinstance(ele, list): for v in ele: sz = _get_all_size(v, sz) else: sz += sys.getsizeof(ele) return sz def _save(filepath, snap): torch.save(snap,filepath) def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [ atoi(c) for c in re.split(r'(\d+)', text) ]
nilq/baby-python
python
""" Expose PV data """ # # import logging # from datetime import datetime, timedelta # from typing import List # # from fastapi import APIRouter, Depends # from nowcasting_datamodel.models import PVYield # from nowcasting_datamodel.read.read_pv import get_latest_pv_yield, get_pv_systems # from sqlalchemy.orm.session import Session # # from database import get_session_pv # # logger = logging.getLogger(__name__) # # # router = APIRouter() # # @router.get("/pv_latest", response_model=List[PVYield]) # def get_latest_pv_data(session: Session = Depends(get_session_pv)) -> List[PVYield]: # """Get Latest PV data from specific pv sites # # Only provide PV data received within the last 1 hour # """ # # # get latest pv data # pv_systems_sql = get_pv_systems(session=session) # pv_yields_sql = get_latest_pv_yield(session=session, pv_systems=pv_systems_sql) # # # remove any data older than 1 hours # now_minus_1_hours = datetime.utcnow() - timedelta(hours=1) # pv_yields_sql = [ # pv_yield_sql # for pv_yield_sql in pv_yields_sql # if pv_yield_sql.datetime_utc >= now_minus_1_hours # ] # # # convert to pydantic # pv_yields = [PVYield.from_orm(pv_yield_sql) for pv_yield_sql in pv_yields_sql] # # return pv_yields
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.apps import AppConfig class HostManagementConfig(AppConfig): name = 'host_management'
nilq/baby-python
python
import numpy as np from scipy.io import readsav class fts: ll = None ii = None cc = None nu = None datafile = './fts_disk_center.idlsave' def __init__(self): # watt / (cm2 ster AA) as emitted at solar surface t = readsav(self.datafile) # convert to J s-1 m-2 m-1 sr-1 clight=2.99792458e8 #speed of light [m/s] aa_to_m=1e-10 cm_to_m=1e-2 self.ll = t['ftswav']* aa_to_m self.nu = clight / self.ll self.ii = t['ftsint'] * cm_to_m**(-2) * aa_to_m**(-1) # from from W /( cm2 ster AA) to W / (m2 ster m) self.cc = t['ftscnt'] * cm_to_m**(-2) * aa_to_m**(-1)
nilq/baby-python
python
# Copyright (c) 2019 Cisco and/or its affiliates. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module holding PLRsearch class.""" import logging import math import multiprocessing import time import dill # TODO: Inform pylint about scipy (of correct version) being available. from scipy.special import erfcx, erfc # TODO: Teach FD.io CSIT to use multiple dirs in PYTHONPATH, # then switch to absolute imports within PLRsearch package. # Current usage of relative imports is just a short term workaround. import Integrator # pylint: disable=relative-import from log_plus import log_plus, log_minus # pylint: disable=relative-import import stat_trackers # pylint: disable=relative-import class PLRsearch(object): """A class to encapsulate data relevant for the search method. The context is performance testing of packet processing systems. The system, when being offered a steady stream of packets, can process some of them successfully, other are considered "lost". See docstring of the search method for algorithm description. Two constants are stored as class fields for speed. Method othed than search (and than __init__) are just internal code structure. TODO: Those method names should start with underscore then. """ xerfcx_limit = math.pow(math.acos(0), -0.5) log_xerfcx_10 = math.log(xerfcx_limit - math.exp(10) * erfcx(math.exp(10))) def __init__( self, measurer, trial_duration_per_trial, packet_loss_ratio_target, trial_number_offset=0, timeout=1800.0, trace_enabled=False): """Store rate measurer and additional parameters. TODO: Copy AbstractMeasurer from MLRsearch. :param measurer: The measurer to call when searching. :param trial_duration_per_trial: Each trial has larger duration than the previous trial. This is the increment, in seconds. :param packet_loss_ratio_target: The algorithm tries to estimate the offered load leading to this ratio on average. Trial ratio is number of packets lost divided by packets offered. :param trial_number_offset: The "first" trial number will be 1+this. Use this to ensure first iterations have enough time to compute reasonable estimates for later trials to use. :param timeout: The search ends if it lasts more than this many seconds. :type measurer: MLRsearch.AbstractMeasurer :type trial_duration_per_trial: float :type packet_loss_ratio_target: float :type trial_number_offset: int :type timeout: float """ self.measurer = measurer self.trial_duration_per_trial = float(trial_duration_per_trial) self.packet_loss_ratio_target = float(packet_loss_ratio_target) self.trial_number_offset = int(trial_number_offset) self.timeout = float(timeout) self.trace_enabled = bool(trace_enabled) def search(self, min_rate, max_rate): """Perform the search, return average and stdev for throughput estimate. Considering measurer and packet_loss_ratio_target (see __init__), find such an offered load (called critical load) that is expected to hit the target loss ratio in the limit of very long trial duration. As the system is probabilistic (and test duration is finite), the critical ratio is only estimated. Return the average and standard deviation of the estimate. In principle, this algorithm performs trial measurements, each with varied offered load (which is constant during the trial). During each measurement, Bayesian inference is performed on all the measurement results so far. When timeout is up, the last estimate is returned, else another trial is performed. It is assumed that the system under test, even though not deterministic, still follows the rule of large numbers. In another words, any growing set of measurements at a particular offered load will converge towards unique (for the given load) packet loss ratio. This means there is a deterministic (but unknown) function mapping the offered load to average loss ratio. This function is called loss ratio function. This also assumes the average loss ratio does not depend on trial duration. The actual probability distribution of loss counts, achieving the average ratio on trials of various duration can be complicated (and can depend on offered load), but simply assuming Poisson distribution will make the algorithm converge. Binomial distribution would be more precise, but Poisson is more practical, as it effectively gives less information content to high ratio results. Even when applying other assumptions on the loss ratio function (increasing function, limit zero ratio when load goes to zero, global upper limit on rate of packets processed), there are still too many different shapes of possible loss functions, which makes full Bayesian reasoning intractable. This implementation radically simplifies things by examining only two shapes, each with finitely many (in this case just two) parameters. In other words, two fitting functions (each with two parameters and one argument). When restricting model space to one of the two fitting functions, the Bayesian inference becomes tractable (even though it needs numerical integration from Integrator class). The first measurement is done at the middle between min_rate and max_rate, to help with convergence if max_rate measurements give loss below target. TODO: Fix overflow error and use min_rate instead of the middle. The second measurement is done at max_rate, next few measurements have offered load of previous load minus excess loss rate. This simple rule is found to be good when offered loads so far are way above the critical rate. After few measurements, inference from fitting functions converges faster that this initial "optimistic" procedure. Offered loads close to (limiting) critical rate are the most useful, as linear approximation of the fitting function becomes good enough there (thus reducing the impact of the overall shape of fitting function). After several trials, usually one of the fitting functions has better predictions than the other one, but the algorithm does not track that. Simply, it uses the estimate average, alternating between the functions. Multiple workarounds are applied to try and avoid measurements both in zero loss region and in big loss region, as their results tend to make the critical load estimate worse. The returned average and stdev is a combination of the two fitting estimates. :param min_rate: Avoid measuring at offered loads below this, in packets per second. :param max_rate: Avoid measuring at offered loads above this, in packets per second. :type min_rate: float :type max_rate: float :returns: Average and stdev of critical load estimate. :rtype: 2-tuple of floats """ stop_time = time.time() + self.timeout min_rate = float(min_rate) max_rate = float(max_rate) logging.info("Started search with min_rate %(min)r, max_rate %(max)r", {"min": min_rate, "max": max_rate}) trial_result_list = list() trial_number = self.trial_number_offset focus_trackers = (None, None) transmit_rate = (min_rate + max_rate) / 2.0 lossy_loads = [max_rate] zeros = [0, 0] # Cosecutive zero loss, separately for stretch and erf. while 1: trial_number += 1 logging.info("Trial %(number)r", {"number": trial_number}) results = self.measure_and_compute( self.trial_duration_per_trial * trial_number, transmit_rate, trial_result_list, min_rate, max_rate, focus_trackers) measurement, average, stdev, avg1, avg2, focus_trackers = results index = trial_number % 2 zeros[index] += 1 # TODO: Ratio of fill rate to drain rate seems to have # exponential impact. Make it configurable, or is 4:3 good enough? if measurement.loss_fraction >= self.packet_loss_ratio_target: for _ in range(4 * zeros[index]): lossy_loads.append(measurement.target_tr) if measurement.loss_count > 0: zeros[index] = 0 lossy_loads.sort() if stop_time <= time.time(): return average, stdev trial_result_list.append(measurement) if (trial_number - self.trial_number_offset) <= 1: next_load = max_rate elif (trial_number - self.trial_number_offset) <= 3: next_load = (measurement.receive_rate / ( 1.0 - self.packet_loss_ratio_target)) else: index = (trial_number + 1) % 2 next_load = (avg1, avg2)[index] if zeros[index] > 0: if lossy_loads[0] > next_load: diminisher = math.pow(2.0, 1 - zeros[index]) next_load = lossy_loads[0] + diminisher * next_load next_load /= (1.0 + diminisher) # On zero measurement, we need to drain obsoleted low losses # even if we did not use them to increase next_load, # in order to get to usable loses with higher load. if len(lossy_loads) > 3: lossy_loads = lossy_loads[3:] logging.debug("Zeros %(z)r orig %(o)r next %(n)r loads %(s)r", {"z": zeros, "o": (avg1, avg2)[index], "n": next_load, "s": lossy_loads}) transmit_rate = min(max_rate, max(min_rate, next_load)) @staticmethod def lfit_stretch(trace, load, mrr, spread): """Stretch-based fitting function. Return the logarithm of average packet loss per second when the load (argument) is offered to a system with given mrr and spread (parameters). Stretch function is 1/(1+Exp[-x]). The average itself is definite integral from zero to load, of shifted and x-scaled stretch function. As the integrator is sensitive to discontinuities, and it calls this function at large areas of parameter space, the implementation has to avoid rounding errors, overflows, and correctly approximate underflows. TODO: Explain how the high-level description has been converted into an implementation full of ifs. :param trace: A multiprocessing-friendly logging function (closure). :param load: Offered load (positive), in packets per second. :param mrr: Parameter of this fitting function, equal to limiting (positive) average number of packets received (as opposed to lost) when offered load is many spreads more than mrr. :param spread: The x-scaling parameter (positive). No nice semantics, roughly corresponds to size of "tail" for loads below mrr. :type trace: function (str, object) -> NoneType :type load: float :type mrr: float :type spread: float :returns: Logarithm of average number of packets lost per second. :rtype: float """ # TODO: What is the fastest way to use such values? log_2 = math.log(2) log_3 = math.log(3) log_spread = math.log(spread) # TODO: chi is from https://en.wikipedia.org/wiki/Nondimensionalization chi = (load - mrr) / spread chi0 = -mrr / spread trace("stretch: load", load) trace("mrr", mrr) trace("spread", spread) trace("chi", chi) trace("chi0", chi0) if chi > 0: log_lps = math.log( load - mrr + (log_plus(0, -chi) - log_plus(0, chi0)) * spread) trace("big loss direct log_lps", log_lps) else: two_positive = log_plus(chi, 2 * chi0 - log_2) two_negative = log_plus(chi0, 2 * chi - log_2) if two_positive <= two_negative: log_lps = log_minus(chi, chi0) + log_spread trace("small loss crude log_lps", log_lps) return log_lps two = log_minus(two_positive, two_negative) three_positive = log_plus(two_positive, 3 * chi - log_3) three_negative = log_plus(two_negative, 3 * chi0 - log_3) three = log_minus(three_positive, three_negative) if two == three: log_lps = two + log_spread trace("small loss approx log_lps", log_lps) else: log_lps = math.log(log_plus(0, chi) - log_plus(0, chi0)) log_lps += log_spread trace("small loss direct log_lps", log_lps) return log_lps @staticmethod def lfit_erf(trace, load, mrr, spread): """Erf-based fitting function. Return the logarithm of average packet loss per second when the load (argument) is offered to a system with given mrr and spread (parameters). Erf function is Primitive function to normal distribution density. The average itself is definite integral from zero to load, of shifted and x-scaled erf function. As the integrator is sensitive to discontinuities, and it calls this function at large areas of parameter space, the implementation has to avoid rounding errors, overflows, and correctly approximate underflows. TODO: Explain how the high-level description has been converted into an implementation full of ifs. :param trace: A multiprocessing-friendly logging function (closure). :param load: Offered load (positive), in packets per second. :param mrr: Parameter of this fitting function, equal to limiting (positive) average number of packets received (as opposed to lost) when offered load is many spreads more than mrr. :param spread: The x-scaling parameter (positive). No nice semantics, roughly corresponds to size of "tail" for loads below mrr. :type trace: function (str, object) -> NoneType :type load: float :type mrr: float :type spread: float :returns: Logarithm of average number of packets lost per second. :rtype: float """ # Beware, this chi has the sign opposite to the stretch function chi. # TODO: The stretch sign is just to have less minuses. Worth changing? chi = (mrr - load) / spread chi0 = mrr / spread trace("Erf: load", load) trace("mrr", mrr) trace("spread", spread) trace("chi", chi) trace("chi0", chi0) if chi >= -1.0: trace("positive, b roughly bigger than m", None) if chi > math.exp(10): first = PLRsearch.log_xerfcx_10 + 2 * (math.log(chi) - 10) trace("approximated first", first) else: first = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi)) trace("exact first", first) first -= chi * chi second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0)) second -= chi0 * chi0 intermediate = log_minus(first, second) trace("first", first) else: trace("negative, b roughly smaller than m", None) exp_first = PLRsearch.xerfcx_limit + chi * erfcx(-chi) exp_first *= math.exp(-chi * chi) exp_first -= 2 * chi # TODO: Why has the following line chi there (as opposed to chi0)? # In general the functions would be more readable if they explicitly # return math.log(func(chi) - func(chi0)) # for some function "func", at least for some branches. second = math.log(PLRsearch.xerfcx_limit - chi * erfcx(chi0)) second -= chi0 * chi0 intermediate = math.log(exp_first - math.exp(second)) trace("exp_first", exp_first) trace("second", second) trace("intermediate", intermediate) result = intermediate + math.log(spread) - math.log(erfc(-chi0)) trace("result", result) return result @staticmethod def find_critical_rate( trace, lfit_func, min_rate, max_rate, loss_ratio_target, mrr, spread): """Given ratio target and parameters, return the achieving offered load. This is basically an inverse function to lfit_func when parameters are fixed. Instead of implementing effective implementation of the inverse function, this implementation uses brute force binary search. It is bisecting (nim_rate, max_rate) interval until the critical load is found (or interval becomes degenerate). This implementation assures min and max rate limits are honored. TODO: Use some method with faster convergence? :param trace: A multiprocessing-friendly logging function (closure). :param lfit_func: Fitting function, typically lfit_spread or lfit_erf. :param min_rate: Lower bound for binary search [pps]. :param max_rate: Upper bound for binary search [pps]. :param loss_ratio_target: Fitting function should return loss rate giving this ratio at the returned load and parameters [1]. :param mrr: The mrr parameter for the fitting function [pps]. :param spread: The spread parameter for the fittinmg function [pps]. :type trace: function (str, object) -> None :type lfit_func: Function from 3 floats to float. :type min_rate: float :type max_rate: float :type log_lps_target: float :type mrr: float :type spread: float :returns: Load [pps] which achieves the target with given parameters. :rtype: float """ trace("Finding critical rate for loss_ratio_target", loss_ratio_target) rate_lo = min_rate rate_hi = max_rate loss_ratio = -1 while loss_ratio != loss_ratio_target: rate = (rate_hi + rate_lo) / 2.0 if rate == rate_hi or rate == rate_lo: break loss_rate = math.exp(lfit_func(trace, rate, mrr, spread)) loss_ratio = loss_rate / rate if loss_ratio > loss_ratio_target: trace("halving down", rate) rate_hi = rate elif loss_ratio < loss_ratio_target: trace("halving up", rate) rate_lo = rate trace("found", rate) return rate @staticmethod def log_weight(trace, lfit_func, trial_result_list, mrr, spread): """Return log of weight of trial results by the function and parameters. Integrator assumes uniform distribution, but over different parameters. Weight and likelihood are used interchangeably here anyway. Each trial has an offered load, a duration and a loss count. Fitting function is used to compute the average loss per second. Poisson distribution (with average loss per trial) is used to get likelihood of one trial result, the overal likelihood is a product of all trial likelihoods. As likelihoods can be extremely small, logarithms are tracked instead. TODO: Copy ReceiveRateMeasurement from MLRsearch. :param trace: A multiprocessing-friendly logging function (closure). :param lfit_func: Fitting function, typically lfit_spread or lfit_erf. :param result_list: List of trial measurement results. :param mrr: The mrr parameter for the fitting function. :param spread: The spread parameter for the fittinmg function. :type trace: function (str, object) -> None :type lfit_func: Function from 3 floats to float. :type result_list: list of MLRsearch.ReceiveRateMeasurement :type mrr: float :type spread: float :returns: Logarithm of result weight for given function and parameters. :rtype: float """ log_likelihood = 0.0 trace("log_weight for mrr", mrr) trace("spread", spread) for result in trial_result_list: trace("for tr", result.target_tr) trace("lc", result.loss_count) trace("d", result.duration) log_avg_loss_per_second = lfit_func( trace, result.target_tr, mrr, spread) log_avg_loss_per_trial = ( log_avg_loss_per_second + math.log(result.duration)) # Poisson probability computation works nice for logarithms. log_trial_likelihood = ( result.loss_count * log_avg_loss_per_trial - math.exp(log_avg_loss_per_trial)) log_trial_likelihood -= math.lgamma(1 + result.loss_count) log_likelihood += log_trial_likelihood trace("avg_loss_per_trial", math.exp(log_avg_loss_per_trial)) trace("log_trial_likelihood", log_trial_likelihood) return log_likelihood # TODO: Refactor (somehow) so pylint stops complaining about # too many local variables. def measure_and_compute( self, trial_duration, transmit_rate, trial_result_list, min_rate, max_rate, focus_trackers=(None, None), max_samples=None): """Perform both measurement and computation at once. High level steps: Prepare and launch computation worker processes, perform the measurement, stop computation and combine results. Integrator needs a specific function to process (-1, 1) parameters. As our fitting functions use dimensional parameters, so a transformation is performed, resulting in a specific prior distribution over the dimensional parameters. Maximal rate (line rate) is needed for that transformation. Two fitting functions are used, computation is started on temporary worker process per fitting function. After the measurement, average and stdev of the critical rate (not log) of each worker are combined and returned. Raw averages are also returned, offered load for next iteration is chosen based on them. The idea is that one fitting function might be fitting much better, measurements at its avg are best for relevant results (for both), but we do not know which fitting function it is. Focus trackers are updated in-place. If a focus tracker in None, new instance is created. TODO: Define class for result object, so that fields are documented. TODO: Re-use processes, instead creating on each computation? TODO: As only one result is needed fresh, figure out a way how to keep the other worker running. This will alow shorter duration per trial. Special handling at first and last measurement will be needed (to properly initialize and to properly combine results). :param trial_duration: Length of the measurement in seconds. :param transmit_rate: Offered load in packets per second. :param trial_result_list: Results of previous measurements. :param min_rate: Practical minimum of possible ofered load. :param max_rate: Practical maximum of possible ofered load. :param focus_trackers: Pair of trackers initialized to speed up the numeric computation. :param max_samples: Limit for integrator samples, for debugging. :type trial_duration: float :type transmit_rate: float :type trial_result_list: list of MLRsearch.ReceiveRateMeasurement :type min_rate: float :type max_rate: float :type focus_trackers: 2-tuple of None or stat_trackers.VectorStatTracker :type max_samples: None or int :returns: Measurement and computation results. :rtype: 6-tuple: ReceiveRateMeasurement, 4 floats, 2-tuple of trackers. """ logging.debug( "measure_and_compute started with self %(self)r, trial_duration " + "%(dur)r, transmit_rate %(tr)r, trial_result_list %(trl)r, " + "max_rate %(mr)r, focus_trackers %(track)r, max_samples %(ms)r", {"self": self, "dur": trial_duration, "tr": transmit_rate, "trl": trial_result_list, "mr": max_rate, "track": focus_trackers, "ms": max_samples}) # Preparation phase. dimension = 2 stretch_focus_tracker, erf_focus_tracker = focus_trackers if stretch_focus_tracker is None: stretch_focus_tracker = stat_trackers.VectorStatTracker(dimension) stretch_focus_tracker.unit_reset() if erf_focus_tracker is None: erf_focus_tracker = stat_trackers.VectorStatTracker(dimension) erf_focus_tracker.unit_reset() old_trackers = stretch_focus_tracker.copy(), erf_focus_tracker.copy() def start_computing(fitting_function, focus_tracker): """Just a block of code to be used for each fitting function. Define function for integrator, create process and pipe ends, start computation, return the boss pipe end. :param fitting_function: lfit_erf or lfit_stretch. :param bias_avg: Tuple of floats to start searching around. :param bias_cov: Covariance matrix defining initial focus shape. :type fitting_function: Function from 3 floats to float. :type bias_avg: 2-tuple of floats :type bias_cov: 2-tuple of 2-tuples of floats :returns: Boss end of communication pipe. :rtype: multiprocessing.Connection """ def value_logweight_func(trace, x_mrr, x_spread): """Return log of critical rate and log of likelihood. This is a closure. The ancestor function got trial_result_list as a parameter, and we are accessing it. As integrator has strict conditions on function signature, trial_result_list cannot be an explicit argument of the current function. This is also why we have to define this closure at each invocation of the ancestor function anew. The dimensional spread parameter is the (dimensional) mrr raised to the power of x_spread scaled to interval (0, 1). The dimensional mrr parameter distribution has shape of 1/(1+x^2), but x==1 corresponds to max_rate and 1.0 pps is added to avoid numerical problems in fitting functions. TODO: x^-2 (for x>1.0) might be simpler/nicer prior. :param trace: Multiprocessing-safe logging function (closure). :param x_mrr: The first dimensionless param from (-1, 1) interval. :param x_spread: The second dimensionless param from (-1, 1) interval. :type trace: function (str, object) -> None :type x_mrr: float :type x_spread: float :returns: Log of critical rate [pps] and log of likelihood. :rtype: 2-tuple of float """ mrr = max_rate * (1.0 / (x_mrr + 1.0) - 0.5) + 1.0 spread = math.exp((x_spread + 1.0) / 2.0 * math.log(mrr)) logweight = self.log_weight( trace, fitting_function, trial_result_list, mrr, spread) value = math.log(self.find_critical_rate( trace, fitting_function, min_rate, max_rate, self.packet_loss_ratio_target, mrr, spread)) return value, logweight dilled_function = dill.dumps(value_logweight_func) boss_pipe_end, worker_pipe_end = multiprocessing.Pipe() boss_pipe_end.send( (dimension, dilled_function, focus_tracker, max_samples)) worker = multiprocessing.Process( target=Integrator.try_estimate_nd, args=( worker_pipe_end, 10.0, self.trace_enabled)) worker.daemon = True worker.start() return boss_pipe_end erf_pipe = start_computing( self.lfit_erf, erf_focus_tracker) stretch_pipe = start_computing( self.lfit_stretch, stretch_focus_tracker) # Measurement phase. measurement = self.measurer.measure(trial_duration, transmit_rate) # Processing phase. def stop_computing(name, pipe): """Just a block of code to be used for each worker. Send stop object, poll for result, then either unpack response, log messages and return, or raise traceback. TODO: Define class/structure for the return value? :param name: Human friendly worker identifier for logging purposes. :param pipe: Boss end of connection towards worker to stop. :type name: str :type pipe: multiprocessing.Connection :returns: Computed value tracker, actual focus tracker, and number of samples used for this iteration. :rtype: 3-tuple of tracker, tracker and int """ pipe.send(None) if not pipe.poll(10.0): raise RuntimeError( "Worker {name} did not finish!".format(name=name)) result_or_traceback = pipe.recv() try: value_tracker, focus_tracker, debug_list, trace_list, sampls = ( result_or_traceback) except ValueError: raise RuntimeError( "Worker {name} failed with the following traceback:\n{tr}" .format(name=name, tr=result_or_traceback)) logging.info("Logs from worker %(name)r:", {"name": name}) for message in debug_list: logging.info(message) for message in trace_list: logging.debug(message) logging.debug("trackers: value %(val)r focus %(foc)r", { "val": value_tracker, "foc": focus_tracker}) return value_tracker, focus_tracker, sampls stretch_value_tracker, stretch_focus_tracker, stretch_samples = ( stop_computing("stretch", stretch_pipe)) erf_value_tracker, erf_focus_tracker, erf_samples = ( stop_computing("erf", erf_pipe)) stretch_avg = stretch_value_tracker.average erf_avg = erf_value_tracker.average # TODO: Take into account secondary stats. stretch_stdev = math.exp(stretch_value_tracker.log_variance / 2) erf_stdev = math.exp(erf_value_tracker.log_variance / 2) avg = math.exp((stretch_avg + erf_avg) / 2.0) var = (stretch_stdev * stretch_stdev + erf_stdev * erf_stdev) / 2.0 var += (stretch_avg - erf_avg) * (stretch_avg - erf_avg) / 4.0 stdev = avg * math.sqrt(var) focus_trackers = (stretch_focus_tracker, erf_focus_tracker) logging.info( "measure_and_compute finished with trial result %(res)r " "avg %(avg)r stdev %(stdev)r stretch %(a1)r erf %(a2)r " "new trackers %(nt)r old trackers %(ot)r stretch samples %(ss)r " "erf samples %(es)r", {"res": measurement, "avg": avg, "stdev": stdev, "a1": math.exp(stretch_avg), "a2": math.exp(erf_avg), "nt": focus_trackers, "ot": old_trackers, "ss": stretch_samples, "es": erf_samples}) return ( measurement, avg, stdev, math.exp(stretch_avg), math.exp(erf_avg), focus_trackers)
nilq/baby-python
python
import discord from itertools import cycle from discord.ext import commands, tasks status = cycle(['Add ur text here','ur text here','ur text here','ur text here']) # you can add as much as you want EX: 'Stiizzy cat is hot','Name' bot = commands.Bot(command_prefix="!") # prefix will not be used for changng status @bot.event async def on_ready(): print("Changing Status started") change_status.start() @tasks.loop(seconds=5) # change to how many secs you want - 5 is best async def change_status(): await bot.change_presence(activity=discord.Game(next(status))) bot.run("Your Token here", bot=False) #made by stiizzy cat # not my fault if you get disabled
nilq/baby-python
python
import logging from datetime import datetime from pprint import pprint from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import sessionmaker from sqlalchemy.sql import text from opennem.db import db_connect from opennem.db.load_fixtures import update_existing_geos from opennem.db.models.opennem import Facility, Station from opennem.geo.google_geo import place_search logger = logging.getLogger(__name__) def build_address_string(station_record): l = [ station_record.network_name, station_record.locality, station_record.state, "Australia", ] address_string = ", ".join(str(i) for i in l if i) return address_string def opennem_geocode(limit=None): engine = db_connect() session = sessionmaker(bind=engine) s = session() records = ( s.query(Station) .filter(Station.geom == None) .filter(Station.geocode_skip == False) ) count = 0 skipped = 0 records_added = 0 for r in records: geo_address_string = build_address_string(r) logger.info("Geocoding record: {}".format(geo_address_string)) continue google_result = place_search(geo_address_string) pprint(google_result) if ( google_result and type(google_result) is list and len(google_result) > 0 ): result = google_result[0] r.place_id = result["place_id"] lat = result["geometry"]["location"]["lat"] lng = result["geometry"]["location"]["lng"] r.geom = "SRID=4326;POINT({} {})".format(lng, lat) r.geocode_processed_at = datetime.now() r.geocode_by = "google" r.geocode_approved = False try: s.add(r) s.commit() records_added += 1 except IntegrityError as e: logger.error(e) skipped += 1 pass except Exception as e: skipped += 1 logger.error("Error: {}".format(e)) else: skipped += 1 count += 1 if limit and count >= limit: break print( "Geocode of opennem records done. Added {} records. Couldn't match {}".format( records_added, skipped ) ) if __name__ == "__main__": update_existing_geos() opennem_geocode()
nilq/baby-python
python
# Copyright 2021 UW-IT, University of Washington # SPDX-License-Identifier: Apache-2.0 # from http://stackoverflow.com/questions/15896217/django-loading-a-page-that- # has-external-authentication-changes-the-session-key class PersistentSessionMiddleware(object): """ Injects the username into REMOTE_USER so that users continue to be logged in on views that don't require authentication. """ def __init__(self): pass def process_request(self, request): header = "REMOTE_USER" if request.user.is_authenticated() and header not in request.META: request.META[header] = request.user.username return None
nilq/baby-python
python
# -*- coding: UTF-8 -*- from mpi4py import MPI from sympy import pi, cos, sin from sympy.abc import x, y from sympy.utilities.lambdify import implemented_function import pytest from sympde.calculus import grad, dot from sympde.calculus import laplace from sympde.topology import ScalarFunctionSpace from sympde.topology import element_of from sympde.topology import NormalVector from sympde.topology import Square from sympde.topology import Union from sympde.expr import BilinearForm, LinearForm, integral from sympde.expr import Norm from sympde.expr import find, EssentialBC from psydac.fem.basic import FemField from psydac.api.discretization import discretize #============================================================================== def get_boundaries(*args): if not args: return () else: assert all(1 <= a <= 4 for a in args) assert len(set(args)) == len(args) boundaries = {1: {'axis': 0, 'ext': -1}, 2: {'axis': 0, 'ext': 1}, 3: {'axis': 1, 'ext': -1}, 4: {'axis': 1, 'ext': 1}} return tuple(boundaries[i] for i in args) #============================================================================== def run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells, degree, comm=None): assert isinstance(dir_zero_boundary , (list, tuple)) assert isinstance(dir_nonzero_boundary, (list, tuple)) #+++++++++++++++++++++++++++++++ # 1. Abstract model #+++++++++++++++++++++++++++++++ domain = Square() B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary]) B_dirichlet_i = Union(*[domain.get_boundary(**kw) for kw in dir_nonzero_boundary]) B_dirichlet = Union(B_dirichlet_0, B_dirichlet_i) B_neumann = domain.boundary.complement(B_dirichlet) V = ScalarFunctionSpace('V', domain) u = element_of(V, name='u') v = element_of(V, name='v') nn = NormalVector('nn') # Bilinear form a: V x V --> R a = BilinearForm((u, v), integral(domain, dot(grad(u), grad(v)))) # Linear form l: V --> R l0 = LinearForm(v, integral(domain, f * v)) if B_neumann: l1 = LinearForm(v, integral(B_neumann, v * dot(grad(solution), nn))) l = LinearForm(v, l0(v) + l1(v)) else: l = l0 # Dirichlet boundary conditions bc = [] if B_dirichlet_0: bc += [EssentialBC(u, 0, B_dirichlet_0)] if B_dirichlet_i: bc += [EssentialBC(u, solution, B_dirichlet_i)] # Variational model equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc) # Error norms error = u - solution l2norm = Norm(error, domain, kind='l2') h1norm = Norm(error, domain, kind='h1') #+++++++++++++++++++++++++++++++ # 2. Discretization #+++++++++++++++++++++++++++++++ # Create computational domain from topological domain domain_h = discretize(domain, ncells=ncells, comm=comm) # Discrete spaces Vh = discretize(V, domain_h, degree=degree) # Discretize equation using Dirichlet bc equation_h = discretize(equation, domain_h, [Vh, Vh]) # Discretize error norms l2norm_h = discretize(l2norm, domain_h, Vh) h1norm_h = discretize(h1norm, domain_h, Vh) #+++++++++++++++++++++++++++++++ # 3. Solution #+++++++++++++++++++++++++++++++ # Solve linear system x = equation_h.solve() uh = FemField( Vh, x ) # Compute error norms l2_error = l2norm_h.assemble(u=uh) h1_error = h1norm_h.assemble(u=uh) return l2_error, h1_error #============================================================================== def run_laplace_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells, degree, comm=None): assert isinstance(dir_zero_boundary , (list, tuple)) assert isinstance(dir_nonzero_boundary, (list, tuple)) #+++++++++++++++++++++++++++++++ # 1. Abstract model #+++++++++++++++++++++++++++++++ domain = Square() B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary]) B_dirichlet_i = Union(*[domain.get_boundary(**kw) for kw in dir_nonzero_boundary]) B_dirichlet = Union(B_dirichlet_0, B_dirichlet_i) B_neumann = domain.boundary.complement(B_dirichlet) V = ScalarFunctionSpace('V', domain) u = element_of(V, name='u') v = element_of(V, name='v') nn = NormalVector('nn') # Bilinear form a: V x V --> R a = BilinearForm((u, v), integral(domain, dot(grad(u), grad(v)) + u * v)) # Linear form l: V --> R l0 = LinearForm(v, integral(domain, f * v)) if B_neumann: l1 = LinearForm(v, integral(B_neumann, v * dot(grad(solution), nn))) l = LinearForm(v, l0(v) + l1(v)) else: l = l0 # Dirichlet boundary conditions bc = [] if B_dirichlet_0: bc += [EssentialBC(u, 0, B_dirichlet_0)] if B_dirichlet_i: bc += [EssentialBC(u, solution, B_dirichlet_i)] # Variational model equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc) # Error norms error = u - solution l2norm = Norm(error, domain, kind='l2') h1norm = Norm(error, domain, kind='h1') #+++++++++++++++++++++++++++++++ # 2. Discretization #+++++++++++++++++++++++++++++++ # Create computational domain from topological domain domain_h = discretize(domain, ncells=ncells, comm=comm) # Discrete spaces Vh = discretize(V, domain_h, degree=degree) # Discretize equation using Dirichlet bc equation_h = discretize(equation, domain_h, [Vh, Vh]) # Discretize error norms l2norm_h = discretize(l2norm, domain_h, Vh) h1norm_h = discretize(h1norm, domain_h, Vh) #+++++++++++++++++++++++++++++++ # 3. Solution #+++++++++++++++++++++++++++++++ # Solve linear system x = equation_h.solve() uh = FemField( Vh, x ) # Compute error norms l2_error = l2norm_h.assemble(u=uh) h1_error = h1norm_h.assemble(u=uh) return l2_error, h1_error #============================================================================== def run_biharmonic_2d_dir(solution, f, dir_zero_boundary, ncells, degree, comm=None): assert isinstance(dir_zero_boundary, (list, tuple)) #+++++++++++++++++++++++++++++++ # 1. Abstract model #+++++++++++++++++++++++++++++++ domain = Square() B_dirichlet_0 = Union(*[domain.get_boundary(**kw) for kw in dir_zero_boundary]) B_dirichlet_i = domain.boundary.complement(B_dirichlet_0) V = ScalarFunctionSpace('V', domain) u = element_of(V, name='u') v = element_of(V, name='v') nn = NormalVector('nn') # Bilinear form a: V x V --> R a = BilinearForm((u, v), integral(domain, laplace(u) * laplace(v))) # Linear form l: V --> R l = LinearForm(v, integral(domain, f * v)) # Essential boundary conditions dn = lambda a: dot(grad(a), nn) bc = [] if B_dirichlet_0: bc += [EssentialBC( u , 0, B_dirichlet_0)] bc += [EssentialBC(dn(u), 0, B_dirichlet_0)] if B_dirichlet_i: bc += [EssentialBC( u , solution , B_dirichlet_i)] bc += [EssentialBC(dn(u), dn(solution), B_dirichlet_i)] # Variational model equation = find(u, forall=v, lhs=a(u, v), rhs=l(v), bc=bc) # Error norms error = u - solution l2norm = Norm(error, domain, kind='l2') h1norm = Norm(error, domain, kind='h1') h2norm = Norm(error, domain, kind='h2') #+++++++++++++++++++++++++++++++ # 2. Discretization #+++++++++++++++++++++++++++++++ # Create computational domain from topological domain domain_h = discretize(domain, ncells=ncells, comm=comm) # Discrete spaces Vh = discretize(V, domain_h, degree=degree) # Discretize equation using Dirichlet bc equation_h = discretize(equation, domain_h, [Vh, Vh]) # Discretize error norms l2norm_h = discretize(l2norm, domain_h, Vh) h1norm_h = discretize(h1norm, domain_h, Vh) h2norm_h = discretize(h2norm, domain_h, Vh) #+++++++++++++++++++++++++++++++ # 3. Solution #+++++++++++++++++++++++++++++++ # Solve linear system x = equation_h.solve() uh = FemField( Vh, x ) # Compute error norms l2_error = l2norm_h.assemble(u=uh) h1_error = h1norm_h.assemble(u=uh) h2_error = h2norm_h.assemble(u=uh) return l2_error, h1_error, h2_error ############################################################################### # SERIAL TESTS ############################################################################### #============================================================================== # 2D Poisson's equation #============================================================================== def test_poisson_2d_dir0_1234(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*sin(pi*x)*sin(pi*y) dir_zero_boundary = get_boundaries(1, 2, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021808678604760232 expected_h1_error = 0.013023570720360362 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_234_neu0_1(): solution = cos(0.5*pi*x)*sin(pi*y) f = (5./4.)*pi**2*solution dir_zero_boundary = get_boundaries(2, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00015546057796452772 expected_h1_error = 0.00926930278452745 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_134_neu0_2(): solution = sin(0.5*pi*x)*sin(pi*y) f = (5./4.)*pi**2*solution dir_zero_boundary = get_boundaries(1, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.0001554605779481901 expected_h1_error = 0.009269302784527256 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_124_neu0_3(): solution = sin(pi*x)*cos(0.5*pi*y) f = (5./4.)*pi**2*solution dir_zero_boundary = get_boundaries(1, 2, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.0001554605779681901 expected_h1_error = 0.009269302784528678 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_123_neu0_4(): solution = sin(pi*x)*sin(0.5*pi*y) f = (5./4.)*pi**2*solution dir_zero_boundary = get_boundaries(1, 2, 3) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00015546057796339546 expected_h1_error = 0.009269302784526841 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_24_neu0_13(): solution = cos(0.5*pi*x)*cos(0.5*pi*y) f = (1./2.)*pi**2*solution dir_zero_boundary = get_boundaries(2, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 2.6119892736036942e-05 expected_h1_error = 0.0016032430287934746 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_13_neu0_24(): solution = sin(0.5*pi*x)*sin(0.5*pi*y) f = (1./2.)*pi**2*solution dir_zero_boundary = get_boundaries(1, 3) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 2.611989253883369e-05 expected_h1_error = 0.0016032430287973409 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_4_neu0_123(): solution = cos(pi*x)*cos(0.5*pi*y) f = 5./4.*pi**2*solution dir_zero_boundary = get_boundaries(4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00015494478505412876 expected_h1_error = 0.009242166414700994 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_234_neui_1(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*solution dir_zero_boundary = get_boundaries(2, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021786960672322118 expected_h1_error = 0.01302350067761091 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_134_neui_2(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*solution dir_zero_boundary = get_boundaries(1, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021786960672322118 expected_h1_error = 0.01302350067761091 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_124_neui_3(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*solution dir_zero_boundary = get_boundaries(1, 2, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021786960672322118 expected_h1_error = 0.01302350067761091 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_123_neui_4(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*solution dir_zero_boundary = get_boundaries(1, 2, 3) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021786960672322118 expected_h1_error = 0.01302350067761091 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #------------------------------------------------------------------------------ def test_poisson_2d_dir0_123_diri_4(): solution = sin(pi * x) * sin(0.5*pi * y) f = 5/4*pi**2 * solution dir_zero_boundary = get_boundaries(1, 2, 3) dir_nonzero_boundary = get_boundaries(4) l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00015292215711784052 expected_h1_error = 0.009293161646614652 assert abs(l2_error - expected_l2_error) < 1.e-7 assert abs(h1_error - expected_h1_error) < 1.e-7 #------------------------------------------------------------------------------ def test_poisson_2d_dir0_13_diri_24(): solution = sin(3*pi/2 * x) * sin(3*pi/2 * y) f = 9/2*pi**2 * solution dir_zero_boundary = get_boundaries(1, 3) dir_nonzero_boundary = get_boundaries(2, 4) l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.0007786454571731944 expected_h1_error = 0.0449669071240554 assert abs(l2_error - expected_l2_error) < 1.e-7 assert abs(h1_error - expected_h1_error) < 1.e-7 #------------------------------------------------------------------------------ def test_poisson_2d_dir0_1234_user_function(): solution = sin(pi*x)*sin(pi*y) # ... # User provides right-hand side in the form of a callable Python function: def f(x, y): from numpy import pi, sin return 2*pi**2*sin(pi*x)*sin(pi*y) # Python function is converted to Sympy's "implemented function" and then # called with symbolic arguments (x, y): f = implemented_function('f', f)(x, y) # ... dir_zero_boundary = get_boundaries(1, 2, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.00021808678604760232 expected_h1_error = 0.013023570720360362 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #============================================================================== # 2D "Laplace-like" equation #============================================================================== def test_laplace_2d_neu0_1234(): solution = cos(pi*x)*cos(pi*y) f = (2.*pi**2 + 1.)*solution dir_zero_boundary = get_boundaries() dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_laplace_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2]) expected_l2_error = 0.0002172846538950129 expected_h1_error = 0.012984852988125026 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #============================================================================== # 2D biharmonic equation #============================================================================== def test_biharmonic_2d_dir0_1234(): solution = sin(pi * x)**2 * sin(pi * y)**2 f = laplace(laplace(solution)) dir_zero_boundary = get_boundaries(1, 2, 3, 4) l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f, dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3]) expected_l2_error = 0.00019981371108040476 expected_h1_error = 0.0063205179028178295 expected_h2_error = 0.2123929568623994 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) assert( abs(h2_error - expected_h2_error) < 1.e-7) #------------------------------------------------------------------------------ @pytest.mark.xfail def test_biharmonic_2d_dir0_123_diri_4(): solution = sin(pi * x)**2 * sin(0.5*pi * y)**2 f = laplace(laplace(solution)) dir_zero_boundary = get_boundaries(1, 2, 3) l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f, dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3]) print() print(l2_error) print(h1_error) print(h2_error) print() assert False #------------------------------------------------------------------------------ @pytest.mark.xfail def test_biharmonic_2d_dir0_13_diri_24(): solution = sin(3*pi/2 * x)**2 * sin(3*pi/2 * y)**2 f = laplace(laplace(solution)) dir_zero_boundary = get_boundaries(1, 3) l2_error, h1_error, h2_error = run_biharmonic_2d_dir(solution, f, dir_zero_boundary, ncells=[2**3, 2**3], degree=[3, 3]) print() print(l2_error) print(h1_error) print(h2_error) print() assert False ############################################################################### # PARALLEL TESTS ############################################################################### #============================================================================== @pytest.mark.parallel def test_poisson_2d_dir0_1234_parallel(): solution = sin(pi*x)*sin(pi*y) f = 2*pi**2*sin(pi*x)*sin(pi*y) dir_zero_boundary = get_boundaries(1, 2, 3, 4) dir_nonzero_boundary = get_boundaries() l2_error, h1_error = run_poisson_2d(solution, f, dir_zero_boundary, dir_nonzero_boundary, ncells=[2**3, 2**3], degree=[2, 2], comm=MPI.COMM_WORLD) expected_l2_error = 0.00021808678604760232 expected_h1_error = 0.013023570720360362 assert( abs(l2_error - expected_l2_error) < 1.e-7) assert( abs(h1_error - expected_h1_error) < 1.e-7) #============================================================================== # CLEAN UP SYMPY NAMESPACE #============================================================================== def teardown_module(): from sympy import cache cache.clear_cache() def teardown_function(): from sympy import cache cache.clear_cache()
nilq/baby-python
python
# Solution of; # Project Euler Problem 226: A Scoop of Blancmange # https://projecteuler.net/problem=226 # # The blancmange curve is the set of points $(x, y)$ such that $0 \le x \le 1$ # and $y = \sum \limits_{n = 0}^{\infty} {\dfrac{s(2^n x)}{2^n}}$, where # $s(x)$ is the distance from $x$ to the nearest integer. The area under the # blancmange curve is equal to ½, shown in pink in the diagram below. Let C be # the circle with centre $\left ( \frac{1}{4}, \frac{1}{2} \right )$ and # radius $\frac{1}{4}$, shown in black in the diagram. What area under the # blancmange curve is enclosed by C?Give your answer rounded to eight decimal # places in the form 0. abcdefgh # # by lcsm29 http://github.com/lcsm29/project-euler import timed def dummy(n): pass if __name__ == '__main__': n = 1000 i = 10000 prob_id = 226 timed.caller(dummy, n, i, prob_id)
nilq/baby-python
python
from aiohttp import ClientSession from asyncio import get_event_loop class ManagedHTTP: def __init__(self): self.session = ClientSession() async def ensure_session(self): if self.session.closed: self.session = ClientSession() async def request(self, method: str, url: str, *args, **kwargs): await self.ensure_session() return await self.session.request(method, url, *args, **kwargs)
nilq/baby-python
python
"""Test the functions exposed at the top level of the module. This isn't a full test of each method's capabilities, just checking that the method is exposed at the top level namespace. """ import warnings import pytest import uk_politics import uk_politics.exceptions def test_color(): """Check that uk_politics.color works.""" assert uk_politics.color("Liberal Democrats") == "#FAA61A" def test_location(): """Check that uk_politics.Location works. Just creation and comparison here. """ wales = uk_politics.Location(country="Wales") west_glamorgan = uk_politics.elections.COUNTS[0].location assert wales >= west_glamorgan assert not west_glamorgan >= wales def test_find_party(): """Check that uk_politics.find_party works.""" assert uk_politics.find_party( "Tory", return_short_name=True) == "Conservative Party" def test_find_empty(): """Passing an empty string should raise assertion error.""" with pytest.raises(uk_politics.exceptions.PartyNicknameEmpty): assert uk_politics.find_party("") == "" def test_scottish_labour(): """Test variations on Scottish Labour return Labour not SNP. The name is a bit too close to two different parties. """ with warnings.catch_warnings(): warnings.filterwarnings("ignore", module="uk_politics") assert uk_politics.find_party("Scottish Labour") == "Labour Party" assert uk_politics.find_party("Scottish labour") == "Labour Party" def test_rename_gives_warning(caplog): """Test that a bad name prompts a rename warning.""" uk_politics.find_party("labuor") print(caplog.records) expected = "Renaming 'labuor' -> 'Labour Party'" assert expected in caplog.text
nilq/baby-python
python
# Copyright 2018 Google Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime import json import re import uuid from simpleeval import simple_eval from simpleeval import InvalidExpression from sqlalchemy import Column from sqlalchemy import Integer from sqlalchemy import String from sqlalchemy import DateTime from sqlalchemy import Text from sqlalchemy import Boolean from sqlalchemy import ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.orm import load_only from common import crmint_logging from common.task import Task from controller import inline from controller.database import BaseModel from controller.mailers import NotificationMailer def _parse_num(s): try: return int(s) except ValueError: try: return float(s) # TODO(dulacp) should raise a ValueError exception, not silence it except ValueError: return 0 class Pipeline(BaseModel): __tablename__ = 'pipelines' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) emails_for_notifications = Column(String(255)) status = Column(String(50), nullable=False, default='idle') status_changed_at = Column(DateTime) jobs = relationship('Job', backref='pipeline', lazy='dynamic') run_on_schedule = Column(Boolean, nullable=False, default=False) schedules = relationship('Schedule', lazy='dynamic') params = relationship('Param', lazy='dynamic', order_by='asc(Param.name)') class STATUS(object): IDLE = 'idle' FAILED = 'failed' SUCCEEDED = 'succeeded' STOPPING = 'stopping' RUNNING = 'running' INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED] def __init__(self, name=None): super(Pipeline, self).__init__() self.name = name @property def state(self): return self.status @property def has_jobs(self): return self.jobs.count() > 0 @property def recipients(self): if self.emails_for_notifications: return self.emails_for_notifications.split() return [] def assign_attributes(self, attributes): for key, value in attributes.items(): if key in ['schedules', 'jobs', 'params']: continue if key == 'run_on_schedule': self.__setattr__(key, value == 'True') continue self.__setattr__(key, value) def save_relations(self, relations): for key, value in relations.items(): if key == 'schedules': self.assign_schedules(value) elif key == 'params': self.assign_params(value) def assign_params(self, parameters): Param.update_list(parameters, self) def assign_schedules(self, arg_schedules): # Remove if records not in list ids for update arg_schedule_ids = [] for arg_schedule in arg_schedules: if arg_schedule.get('id') is not None: # Updating schedule = Schedule.find(arg_schedule.get('id')) schedule.update(cron=arg_schedule['cron']) arg_schedule_ids.append(arg_schedule['id']) else: # Creating schedule = Schedule.create(pipeline_id=self.id, cron=arg_schedule['cron']) arg_schedule_ids.append(schedule.id) # Removing ids_for_removing = [] for schedule in self.schedules: if schedule.id not in arg_schedule_ids: ids_for_removing.append(schedule.id) Schedule.destroy(*ids_for_removing) def populate_params_runtime_values(self): inline.open_session() try: global_context = {} for param in Param.where(pipeline_id=None, job_id=None).all(): global_context[param.name] = param.populate_runtime_value() pipeline_context = global_context.copy() for param in self.params.all(): pipeline_context[param.name] = param.populate_runtime_value(global_context) for job in self.jobs.all(): for param in job.params.all(): param.populate_runtime_value(pipeline_context) inline.close_session() return True except (InvalidExpression, TypeError, ValueError, SyntaxError) as e: inline.close_session() from common import crmint_logging job_id = 'N/A' worker_class = 'N/A' if param.job_id is not None: job_id = param.job_id worker_class = param.job.worker_class message = 'Invalid job parameter "%s": %s' % (param.label, e) elif param.pipeline_id is not None: message = 'Invalid pipeline variable "%s": %s' % (param.label, e) else: message = 'Invalid global variable "%s": %s' % (param.label, e) crmint_logging.logger.log_struct({ 'labels': { 'pipeline_id': self.id, 'job_id': job_id, 'worker_class': worker_class, }, 'log_level': 'ERROR', 'message': message, }) return False def set_status(self, status): self.update(status=status, status_changed_at=datetime.now()) def get_ready(self): if not self.populate_params_runtime_values(): return False for job in self.jobs.all(): if not job.get_ready(): return False self.set_status(Pipeline.STATUS.RUNNING) return True def start(self): if self.status not in Pipeline.STATUS.INACTIVE_STATUSES: return False jobs = self.jobs.all() if len(jobs) < 1: return False for job in jobs: if job.status not in Job.STATUS.INACTIVE_STATUSES: return False if not self.get_ready(): return False for job in jobs: job.start() return True def _cancel_all_tasks(self): for job in self.jobs: job.cancel_tasks() def stop(self): if self.status != Pipeline.STATUS.RUNNING: return False for job in self.jobs: job.stop() for job in self.jobs: if job.status not in [Job.STATUS.FAILED, Job.STATUS.SUCCEEDED]: job.set_status(Job.STATUS.STOPPING) self._cancel_all_tasks() return self.job_finished() def start_single_job(self, job): if self.status not in Pipeline.STATUS.INACTIVE_STATUSES: return False if not self.populate_params_runtime_values(): return False if not job.get_ready(): return False self.set_status(Pipeline.STATUS.RUNNING) job.start_as_single() return True def job_finished(self): for job in self.jobs: if job.status == Job.STATUS.STOPPING: job.set_status(Job.STATUS.FAILED) for job in self.jobs: if job.status not in Job.STATUS.INACTIVE_STATUSES: return False self._finish() return True def _finish(self): jobs = Job.query.outerjoin((StartCondition, Job.id == StartCondition.preceding_job_id)) jobs = jobs.filter(Job.pipeline_id == self.id) jobs = jobs.filter(StartCondition.preceding_job_id == None) jobs = jobs.options(load_only('status')).all() status = Pipeline.STATUS.SUCCEEDED for job in jobs: # IDLE means the job has not run at all or it has been cancelled if job.status == Job.STATUS.FAILED: status = Pipeline.STATUS.FAILED break self.set_status(status) NotificationMailer().finished_pipeline(self) def import_data(self, data): self.assign_params(data['params']) self.assign_schedules(data['schedules']) job_mapping = {} jobs = [] if data['jobs']: for job_data in data['jobs']: job = Job() job.pipeline_id = self.id job.assign_attributes(job_data) job.save() job.save_relations(job_data) jobs.append(job) job_mapping[job_data['id']] = job.id for job in jobs: index = list(job_mapping.values()).index(job.id) job_id = list(job_mapping.keys())[index] job_data = next((j for j in data['jobs'] if j['id'] == job_id), None) job.assign_hash_start_conditions(job_data['hash_start_conditions'], job_mapping) def is_blocked(self): return (self.run_on_schedule or self.status in [Pipeline.STATUS.RUNNING, Pipeline.STATUS.STOPPING]) def destroy(self): sc_ids = [sc.id for sc in self.schedules] if sc_ids: Schedule.destroy(*sc_ids) for job in self.jobs: job.destroy() param_ids = [p.id for p in self.params.all()] if param_ids: Param.destroy(*param_ids) self.delete() class Job(BaseModel): __tablename__ = 'jobs' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) status = Column(String(50), nullable=False, default='idle') status_changed_at = Column(DateTime) worker_class = Column(String(255)) pipeline_id = Column(Integer, ForeignKey('pipelines.id')) params = relationship('Param', backref='job', lazy='dynamic') start_conditions = relationship( 'StartCondition', primaryjoin='Job.id==StartCondition.job_id') dependent_jobs = relationship( 'Job', secondary='start_conditions', primaryjoin='Job.id==StartCondition.preceding_job_id', secondaryjoin='StartCondition.job_id==Job.id') class STATUS(object): IDLE = 'idle' FAILED = 'failed' SUCCEEDED = 'succeeded' RUNNING = 'running' WAITING = 'waiting' STOPPING = 'stopping' INACTIVE_STATUSES = [IDLE, FAILED, SUCCEEDED] def __init__(self, name=None, worker_class=None, pipeline_id=None): super(Job, self).__init__() self.name = name self.worker_class = worker_class self.pipeline_id = pipeline_id def destroy(self): sc_ids = [sc.id for sc in self.start_conditions] if sc_ids: StartCondition.destroy(*sc_ids) dependent_job_sc_ids = [ sc.id for sc in StartCondition.where(preceding_job_id=self.id).all()] if dependent_job_sc_ids: StartCondition.destroy(*dependent_job_sc_ids) param_ids = [p.id for p in self.params.all()] if param_ids: Param.destroy(*param_ids) self.delete() def get_ready(self): if self.status not in Job.STATUS.INACTIVE_STATUSES: return False self.set_status(Job.STATUS.WAITING) return True def _get_task_namespace(self): return 'pipeline=%s_job=%s' % (str(self.pipeline_id), str(self.id)) def _add_task_with_name(self, task_name): task_namespace = self._get_task_namespace() TaskEnqueued.create(task_namespace=task_namespace, task_name=task_name) return True def _delete_task_with_name(self, task_name): """ Returns: Number of remaining tasks in the DB. """ task_namespace = self._get_task_namespace() TaskEnqueued.where(task_namespace=task_namespace, task_name=task_name).delete() return self._enqueued_task_count() def cancel_tasks(self): task_namespace = self._get_task_namespace() enqueued_tasks = TaskEnqueued.where(task_namespace=task_namespace) if enqueued_tasks: TaskEnqueued.where(task_namespace=task_namespace).delete() def _enqueued_task_count(self): task_namespace = self._get_task_namespace() return TaskEnqueued.count_in_namespace(task_namespace) def _start_condition_is_fulfilled(self, start_condition): preceding_job_status = start_condition.preceding_job.status if start_condition.condition == StartCondition.CONDITION.SUCCESS: if preceding_job_status == Job.STATUS.FAILED: return False elif start_condition.condition == StartCondition.CONDITION.FAIL: if preceding_job_status == Job.STATUS.SUCCEEDED: return False return True def start_as_single(self): """ Returns: Task object that was added to the task queue, otherwise None. """ if self.status != Job.STATUS.WAITING: return None else: self.set_status(Job.STATUS.RUNNING) return self.run() def start(self): """ Returns: Task object that was added to the task queue, otherwise None. """ # Validates that preceding jobs fulfill the starting conditions. for start_condition in self.start_conditions: if self._start_condition_is_fulfilled(start_condition): if start_condition.preceding_job.status not in [ Job.STATUS.SUCCEEDED, Job.STATUS.FAILED]: return None else: # pipeline failure self.set_status(Job.STATUS.FAILED) self.pipeline.update(status=Pipeline.STATUS.FAILED, status_changed_at=datetime.now()) self.pipeline.stop() return None if self.pipeline.status == Pipeline.STATUS.FAILED: return None return self.start_as_single() def run(self): worker_params = dict([(p.name, p.worker_value) for p in self.params]) return self.enqueue(self.worker_class, worker_params) def stop(self): self.cancel_tasks() if self.status == Job.STATUS.WAITING: self.set_status(Job.STATUS.IDLE) return True elif self.status == Job.STATUS.RUNNING: self.set_status(Job.STATUS.STOPPING) return True return False def enqueue(self, worker_class, worker_params, delay=0): if self.status != Job.STATUS.RUNNING: return False name = str(uuid.uuid4()) general_settings = {gs.name: gs.value for gs in GeneralSetting.all()} task = Task(name, self.pipeline_id, self.id, worker_class, worker_params, general_settings) task.enqueue(delay) # Keep track of running tasks. self._add_task_with_name(name) self.save() return True def _start_dependent_jobs(self): if self.dependent_jobs: for job in self.dependent_jobs: job.start() def set_status(self, status): self.update(status=status, status_changed_at=datetime.now()) def _task_completed(self, task_name): """Completes task execution. Returns: True if it was the last tasks to be completed. False otherwise. """ remaining_tasks = self._delete_task_with_name(task_name) return remaining_tasks == 0 def task_succeeded(self, task_name): was_last_task = self._task_completed(task_name) # Updates the job database status if there is no more running tasks. # NB: `was_last_task` acts as a concurrent lock, only one task can # validate this condition. if was_last_task: # Cancel all tasks if one condition doesn't match the success status. for job in self.dependent_jobs: for start_condition in job.start_conditions: success_statuses = [ StartCondition.CONDITION.SUCCESS, StartCondition.CONDITION.WHATEVER ] if (start_condition.preceding_job.id == self.id and start_condition.condition not in success_statuses): self.set_status(Job.STATUS.SUCCEEDED) return self.pipeline.stop() self.set_status(Job.STATUS.SUCCEEDED) # We can safely start children jobs, because of our concurrent lock. self._start_dependent_jobs() self.pipeline.job_finished() def task_failed(self, task_name): was_last_task = self._task_completed(task_name) # If no dependent jobs then the pipeline failed if not self.dependent_jobs: self.set_status(Job.STATUS.FAILED) return self.pipeline.stop() # Cancel all tasks if one condition doesn't match the failed status. for job in self.dependent_jobs: for start_condition in job.start_conditions: failed_statuses = [ StartCondition.CONDITION.FAIL, StartCondition.CONDITION.WHATEVER ] if (start_condition.preceding_job.id == self.id and start_condition.condition not in failed_statuses): self.set_status(Job.STATUS.FAILED) return self.pipeline.stop() if was_last_task: self.set_status(Job.STATUS.FAILED) # We can safely start children jobs, because of our concurrent lock. self._start_dependent_jobs() self.pipeline.job_finished() def assign_attributes(self, attributes): for key, value in attributes.items(): if key in ['params', 'start_conditions', 'id', 'hash_start_conditions']: continue self.__setattr__(key, value) def save_relations(self, relations): for key, value in relations.items(): if key == 'params': self.assign_params(value) elif key == 'start_conditions': self.assign_start_conditions(value) def add_start_conditions(self, items): for item in items: self.start_conditions.append(item) def assign_params(self, parameters): Param.update_list(parameters, self) def assign_hash_start_conditions(self, arg_start_conditions, job_mapping): for arg_start_condition in arg_start_conditions: preceding_job_id = job_mapping[arg_start_condition['preceding_job_id']] StartCondition.create( job_id=self.id, preceding_job_id=preceding_job_id, condition=arg_start_condition['condition'] ) def assign_start_conditions(self, arg_start_conditions): scs = [] for arg_start_condition in arg_start_conditions: scs.append(StartCondition.parse_value(arg_start_condition)) arg_sc_ids = set([sc['id'] for sc in scs]) cur_sc_ids = set([sc.preceding_job_id for sc in self.start_conditions]) sc_intersection_ids = set(arg_sc_ids) & set(cur_sc_ids) new_sc_ids = set(arg_sc_ids) - set(cur_sc_ids) for v in scs: # Add new start conditions if v['id'] in new_sc_ids: StartCondition.create( job_id=self.id, preceding_job_id=v['id'], condition=v['condition'] ) # Update current start conditions elif v['id'] in sc_intersection_ids: sc = StartCondition.where( job_id=self.id, preceding_job_id=v['id'] ).first() sc.condition = v['condition'] sc.save() # Delete extra start conditions delete_sc_ids = set(cur_sc_ids) - set(arg_sc_ids) StartCondition.where( job_id=self.id, preceding_job_id__in=delete_sc_ids ).delete(synchronize_session=False) class Param(BaseModel): __tablename__ = 'params' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255), nullable=False) type = Column(String(50), nullable=False) pipeline_id = Column(Integer, ForeignKey('pipelines.id')) job_id = Column(Integer, ForeignKey('jobs.id')) is_required = Column(Boolean, nullable=False, default=False) description = Column(Text) label = Column(String(255)) value = Column(Text()) runtime_value = Column(Text()) _INLINER_REGEX = re.compile(r'{%.+?%}') def populate_runtime_value(self, context={}): names = context.copy() names.update({'True': True, 'False': False}) value = self.value inliners = self._INLINER_REGEX.findall(value) for inliner in inliners: result = simple_eval(inliner[2:-2], functions=inline.functions, names=names) value = value.replace(inliner, str(result)) if self.job_id is not None: self.update(runtime_value=value) return value @property def worker_value(self): if self.type == 'boolean': return self.runtime_value == '1' if self.type == 'number': return _parse_num(self.runtime_value) if self.type == 'string_list': return self.runtime_value.split('\n') if self.type == 'number_list': return [_parse_num(l) for l in self.runtime_value.split('\n') if l.strip()] return self.runtime_value @property def api_value(self): if self.type == 'boolean': return self.value == '1' return self.value def __init__(self, name=None, type=None): self.name = name self.type = type @classmethod def update_list(cls, parameters, obj=None): arg_param_ids = [] for arg_param in parameters: param = None if arg_param.get('id') is not None: # Updating param = Param.find(arg_param.get('id')) else: # Creating param = Param() if obj and obj.__class__.__name__ == 'Pipeline': param.pipeline_id = obj.id elif obj and obj.__class__.__name__ == 'Job': param.job_id = obj.id param.name = arg_param['name'] try: param.label = arg_param['label'] except KeyError: param.label = arg_param['name'] param.type = arg_param['type'] if arg_param['type'] == 'boolean': param.value = arg_param['value'] else: param.value = str(arg_param['value']).encode('utf-8') param.save() arg_param_ids.append(param.id) # Removing ids_for_removing = [] params = obj.params if obj else Param.where(pipeline_id=None, job_id=None).all() for param in params: if param.id not in arg_param_ids: ids_for_removing.append(param.id) Param.destroy(*ids_for_removing) class StartCondition(BaseModel): __tablename__ = 'start_conditions' id = Column(Integer, primary_key=True, autoincrement=True) job_id = Column(Integer, ForeignKey('jobs.id')) preceding_job_id = Column(Integer, ForeignKey('jobs.id')) condition = Column(String(255)) job = relationship('Job', foreign_keys=[job_id]) preceding_job = relationship('Job', foreign_keys=[preceding_job_id]) class CONDITION(object): SUCCESS = 'success' FAIL = 'fail' WHATEVER = 'whatever' def __init__(self, job_id=None, preceding_job_id=None, condition=None): self.job_id = job_id self.preceding_job_id = preceding_job_id self.condition = condition @property def preceding_job_name(self): return self.preceding_job.name @property def value(self): return ','.join([str(self.preceding_job_id), self.condition]) @classmethod def parse_value(cls, value): return { 'id': int(value['preceding_job_id']), 'condition': value['condition'] } class Schedule(BaseModel): __tablename__ = 'schedules' id = Column(Integer, primary_key=True, autoincrement=True) pipeline_id = Column(Integer, ForeignKey('pipelines.id')) cron = Column(String(255)) pipeline = relationship('Pipeline', foreign_keys=[pipeline_id]) class GeneralSetting(BaseModel): __tablename__ = 'general_settings' id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(255)) value = Column(Text()) class Stage(BaseModel): __tablename__ = 'stages' id = Column(Integer, primary_key=True, autoincrement=True) sid = Column(String(255)) def assign_attributes(self, attributes): for key, value in attributes.items(): self.__setattr__(key, value) class TaskEnqueued(BaseModel): __tablename__ = 'enqueued_tasks' id = Column(Integer, primary_key=True, autoincrement=True) task_namespace = Column(String(60), index=True) task_name = Column(String(100), index=True, unique=True) @classmethod def count_in_namespace(cls, task_namespace): count_query = cls.where(task_namespace=task_namespace) return count_query.count()
nilq/baby-python
python
# Generated by Django 3.1.2 on 2020-10-25 14:32 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='YandexKassaPayment', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('period', models.CharField(default='month', max_length=255)), ('status', models.CharField(default='pending', max_length=255)), ('description', models.TextField(blank=True, null=True)), ('value', models.PositiveIntegerField()), ('currency', models.CharField(max_length=255)), ('created_at', models.DateTimeField(auto_now_add=True)), ('user', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, related_name='yandex_kassa_payments', to=settings.AUTH_USER_MODEL)), ], ), ]
nilq/baby-python
python
name = 'membercount' aliases = ['members'] async def run(message): 'Lists the number of people that are in this server' true_member_count = message.guild.member_count await message.channel.send( f'There are **{true_member_count:,}** people in this server.' )
nilq/baby-python
python
import subprocess as sp import os import shutil import tempfile import logging logger = logging.getLogger(__name__) class AutoLoader(object): """Base class for automatic loaders (e.g. Git)""" pass class Git(AutoLoader): def __init__(self, url, import_as=None, branch=None): "Creates a temporary directory full of a git repo." self.url = url logger.debug("Creating temporary directory") self.path = tempfile.mkdtemp() logger.info("Importing {} using git".format(self.url)) git_cmd ="/usr/bin/git -C {dir} clone {url}".format(dir=self.path, url=self.url).split(" ") if import_as is not None: git_cmd.append(import_as) if branch is not None: git_cmd.extend(["-b", branch]) sp.check_call(git_cmd) def __del__(self): logger.debug("Deleting temporary directory {}".format(self.path)) shutil.rmtree(self.path)
nilq/baby-python
python
"""Filex.""" import random from utils import timex MIN_INT, MAX_INT = 10 ** 15, 10 ** 16 - 1 def read(file_name): """Read.""" with open(file_name, 'r') as fin: content = fin.read() fin.close() return content def write(file_name, content, mode='w'): """Write.""" with open(file_name, mode) as fout: fout.write(content) fout.close() def get_tmp_file(): """Get tmp file name.""" return '/tmp/tmp.%s.%d' % ( timex.format_time(timex.get_unixtime(), '%Y%m%d%H%M%S'), random.randint(MIN_INT, MAX_INT), )
nilq/baby-python
python
# -*- coding: utf-8 -*- """ fudcon.ui ------ fudcon ui application package """
nilq/baby-python
python
from enigma import eRect, eServiceReference, iServiceInformation, iPlayableService from Screens.Screen import Screen from Screens.ServiceInfo import ServiceInfoList, ServiceInfoListEntry from Components.ActionMap import ActionMap, NumberActionMap from Components.Pixmap import Pixmap from Components.Label import Label from Components.ScrollLabel import ScrollLabel from Tools.Directories import resolveFilename, pathExists, fileExists, SCOPE_MEDIA from Components.Sources.List import List from Components.ServicePosition import ServicePositionGauge from Components.ServiceEventTracker import ServiceEventTracker from Components.Sources.StaticText import StaticText from Components.ConfigList import ConfigList, ConfigListScreen from Components.config import * from Components.FileList import FileList from _ctypes import * import os, re from os import path as os_path #------------------------------------------------------------------------------------------ class MC_VideoInfoView(Screen): skin = """ <screen position="80,130" size="560,320" title="View Video Info" > <widget name="infolist" position="5,5" size="550,310" selectionDisabled="1" /> </screen>""" def __init__(self, session, fullname, name, ref): self.skin = MC_VideoInfoView.skin Screen.__init__(self, session) self["actions"] = ActionMap(["OkCancelActions"], { "cancel": self.close, "ok": self.close }, -1) tlist = [ ] self["infolist"] = ServiceInfoList(tlist) currPlay = self.session.nav.getCurrentService() if currPlay is not None: stitle = currPlay.info().getInfoString(iServiceInformation.sTitle) if stitle == "": stitle = currPlay.info().getName().split('/')[-1] tlist.append(ServiceInfoListEntry("Title: ", stitle)) tlist.append(ServiceInfoListEntry("sNamespace: ", currPlay.info().getInfoString(iServiceInformation.sNamespace))) tlist.append(ServiceInfoListEntry("sProvider: ", currPlay.info().getInfoString(iServiceInformation.sProvider))) tlist.append(ServiceInfoListEntry("sTimeCreate: ", currPlay.info().getInfoString(iServiceInformation.sTimeCreate))) tlist.append(ServiceInfoListEntry("sVideoWidth: ", currPlay.info().getInfoString(iServiceInformation.sVideoWidth))) tlist.append(ServiceInfoListEntry("sVideoHeight: ", currPlay.info().getInfoString(iServiceInformation.sVideoHeight))) tlist.append(ServiceInfoListEntry("sDescription: ", currPlay.info().getInfoString(iServiceInformation.sDescription))) class Showiframe(): def __init__(self): lib="/usr/lib/" if fileExists(lib +"libshowiframe.so.0.0.0"): self.showiframe = dlopen(lib +"libshowiframe.so.0.0.0") try: self.showSinglePic = dlsym(self.showiframe, "showSinglePic") self.finishShowSinglePic = dlsym(self.showiframe, "finishShowSinglePic") except OSError, e: self.showSinglePic = dlsym(self.showiframe, "_Z13showSinglePicPKc") self.finishShowSinglePic = dlsym(self.showiframe, "_Z19finishShowSinglePicv") def showStillpicture(self, pic): call_function(self.showSinglePic, (pic, )) def finishStillPicture(self): call_function(self.finishShowSinglePic, ()) def shortname(movie,showing = None): movielist = movie.split('/') for n in movielist: if n is not "": movie = n movie = movie.upper() movieback = movie movie = re.sub("\W720P(.*[^.]+).","",movie) movie = re.sub("\W1080I(.*[^.]+).","",movie) movie = re.sub("\W1080P(.*[^.]+).","",movie) movie = re.sub("\W[(].*?[)](.*[^.]+).","",movie) movie = re.sub("\W[[].*?[]](.*[^.]+).","",movie) movie = re.sub("\W[0-9]{4}","",movie) if not showing: movie = re.sub("\WDVDRIP(.*[^.]+).","",movie) movie = re.sub("\WAC3D(.*[^.]+).","",movie) movie = re.sub("\WAC3(.*[^.]+).","",movie) movie = re.sub("\WX264(.*[^.]+).","",movie) movie = re.sub("\WXVID(.*[^.]+).","",movie) movie = re.sub("\WBLURAY(.*[^.]+).","",movie) movie = re.sub("\WGERMAN(.*[^.]+).","",movie) movie = re.sub("\WCD[0-9]{2}","",movie) movie = re.sub("\WCD[0-9]","",movie) movie = re.sub("\WDVD[0-9]{2}","",movie) movie = re.sub("\WDVD[0-9]","",movie) movie = re.sub("\WDISC[0-9]{2}","",movie) movie = re.sub("\WDISC[0-9]","",movie) movie = re.sub("\W[0-9]{2}DISC","",movie) movie = re.sub("\W[0-9]DISC","",movie) # movie = re.sub("\WS[0-9]{2}","",movie) # movie = re.sub("\WE[0-9]{2}","",movie) movie = re.sub("\WSEASON[0-9]{2}","",movie) movie = re.sub("\WSEASON[0-9]","",movie) movie = re.sub("[0-9]{8} ","",movie) movie = re.sub(" -","-",movie) if len(movie) != 0: if movie[0] == '-': moviesplit = movie.split('-')[2:] movie = "".join(moviesplit) movie = movie[1:] replace_list = "rar iso img avi mkv mp4 mpg mpeg mts ogm m2ts pls trp ts vdr vob wmv AC3 AC3D BDRIP BLURAY CAM CAMRIP COMPLETE CUSTOM CUT DC Directors DL DOKU DTS DVDR DVDRIP DVDSCR DVDSCREENER EXTENDED FRENCH FiNNiSH GERMAN HD HDDVD HDDVDRip HDTV INT INTERNAL Int LD LiMiTED MULTi MULTiSUBS NORDIC NTSC PAL PL R1 R5 RECUT REMASTERED REPACK RIP SCREENER SE SEE special.edition SSE STV SUBBED SWEDISH Staffel TC TELECINE TELESYNC TS UNCUT UNRATED WS XXX iTALiAN mvcd rsvcd svcd x264" replacelist = replace_list.upper() replacelist = replacelist.split(' ') for n in replacelist: movie = movie.replace(" ", ".") movie = movie.replace(" " + n + " ", ".") movie = movie.replace("." + n + ".", ".") movie = movie.replace("." + n + "-", ".") movie = movie.replace("." + n + "_", ".") movie = movie.replace("-" + n + ".", ".") movie = movie.replace("-" + n + "-", ".") movie = movie.replace("-" + n + "_", ".") movie = movie.replace("_" + n + ".", ".") movie = movie.replace("_" + n + "-", ".") movie = movie.replace("_" + n + "_", ".") movie = movie.replace("..", ".") movie = movie.replace("..", ".") movie = movie.replace("..", ".") movie = movie.replace("..", ".") for n in replacelist: if movie.upper().endswith("." + n): if movie.__contains__("."): while not movie.endswith("."): movie = movie[:-1] movie = movie[:-1] movie = movie.replace(".", " ") movie = movie.replace("-", " ") movie = movie.replace("_", " ") movie = movie.replace(":", " ") if len(movie) == 0: movie = movieback return movie
nilq/baby-python
python
''' Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c)2018 Sergio Rojas (srojas@usb.ve) http://en.wikipedia.org/wiki/MIT_License http://creativecommons.org/licenses/by/4.0/ Created on march, 2018 Last Modified on: may 15, 2018 ''' def myfuncPrimeFactors(n): """ This function finds and returns the prime factorization of a whole number (excluding zero) via the reiterative division method. Example of usage: getPrimeFactors = myfuncPrimeFactors( 716 ) print(getPrimeFactors) """ i = 2 factors = [] while n != 1: if (n % i == 0): factors = factors + [i] n = n//i else: i = i+1 return factors
nilq/baby-python
python
import hashlib from settings import SIZE class Address: def __init__(self, ip, port): self.ip = ip self.port = port def __key(self): return f"{self.ip}{self.port}".encode() def __hash__(self): """ Python uses a random hash seed to prevent attackers from tar-pitting your application by sending you keys designed to collide. to prevent changing hash code for this test project I use this method. """ m = hashlib.sha256() m.update(self.__key()) return int(m.hexdigest(), 16) % SIZE def __eq__(self, other): if isinstance(other, Address): return self.__key() == other.__key() return NotImplemented def __lt__(self, other): return self.__hash__() < other.__hash__() def __gt__(self, other): return self.__hash__() > other.__hash__() def __le__(self, other): return self.__hash__() <= other.__hash__() def __ge__(self, other): return self.__hash__() >= other.__hash__() def __str__(self): return f'{self.ip}:{self.port}' def __repr__(self): return self.__str__() for port in range(9000, 9020): print(f"{port} -> {Address('127.0.0.1', port).__hash__()}")
nilq/baby-python
python
# -*- coding:utf-8 -*- import logging from time import sleep import bigsuds from networkapi.plugins import exceptions as base_exceptions from networkapi.system.facade import get_value as get_variable log = logging.getLogger(__name__) class Lb(object): def __init__(self, hostname, username, password, session=True): self._hostname = hostname self._username = username self._password = password self._time_reconn = 10 try: self._channel = bigsuds.BIGIP( hostname=self._hostname, username=self._username, password=self._password ) except Exception, e: logging.critical("Unable to connect to BIG-IP. Details: %s" % (e)) raise base_exceptions.CommandErrorException(e) else: log.info('Connected in hostname:%s' % hostname) try: self._version = self._channel.System.SystemInfo.get_version() if self._version[8:len(self._version)].split('.')[0] <= 10: raise base_exceptions.UnsupportedVersion( 'This plugin only supports BIG-IP v11 or above') else: if session: log.info('Try get new session') session_cur = self._channel.System.Session.get_session_timeout() log.info('Session Timeout Current: %s' % session_cur) session_timeout = get_variable("set_session_timeout_plugin_f5", 60) if int(session_cur) > session_timeout: self._channel.System.Session.set_session_timeout(session_timeout) self._channel = self.get_session() except Exception, e: log.error(e) raise base_exceptions.CommandErrorException(e) def get_session(self): try: channel = self._channel.with_session_id() log.info('Session %s', channel) except Exception, e: if 'There are too many existing user sessions.'.lower() in str(e).lower(): self._time_reconn *= 2 log.warning( 'There are too many existing user sessions. ' 'Trying again in %s seconds' % self._time_reconn) sleep(self._time_reconn) self.get_session() else: raise e else: return channel
nilq/baby-python
python
import image, touch, gc, time from machine import I2C from board import board_info from fpioa_manager import fm from Maix import GPIO import time from machine import SPI from micropython import const from sx127x import SX127x board_info=board_info() i2c = I2C(I2C.I2C3, freq=1000*1000, scl=24, sda=27) # amigo devices = i2c.scan() print(devices) touch.TouchLow.config(i2c) tmp = touch.Touch(320, 480, 200) whichButton = -1 message = "Welcome!" rssi = "" snr = "" loraPacket = "" myFreq = 433e6 mySF = 12 myBW = 7 myTX = 17 pingCounter = 0 squareWidth = 90 squareHeight = 70 check = [2,4,5,8] ################### config ################### LORA_RST = const(22) LORA_CS = const(12) LORA_SPI_SCK = const(19) LORA_SPI_MOSI = const(7) LORA_SPI_MISO = const(9) LORA_SPI_NUM = SPI.SPI1 LORA_SPI_FREQ_KHZ = const(100) ############################################## # gpio init fm.register(LORA_RST, fm.fpioa.GPIOHS22, force=True) # RST fm.register(LORA_CS, fm.fpioa.GPIOHS12, force=True) # CS # set gpiohs work mode to output mode cs = GPIO(GPIO.GPIOHS12, GPIO.OUT) rst = GPIO(GPIO.GPIOHS22, GPIO.IN) spi1 = SPI(LORA_SPI_NUM, mode=SPI.MODE_MASTER, baudrate=LORA_SPI_FREQ_KHZ * 1000, polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=LORA_SPI_SCK, mosi=LORA_SPI_MOSI, miso = LORA_SPI_MISO) lora = SX127x(spi=spi1, pin_ss=cs) def Version(): global message version = lora.readRegister(0x42) print("Version: 0x"+hex(version)) message = "Version: "+hex(version) if version == 0x12: message += " [o]" else: message += " [x]" showMap() def PING(): global pingCounter, message payload = 'PING #{0}'.format(pingCounter) print("Sending packet: {}".format(payload)) message = "Sent "+payload lora.print(payload) pingCounter += 1 showMap() def NOP(): print("NOP") def SF10(): global mySF, check mySF = 10 if check.count(4)>0: #SF12 check.remove(4) if check.count(3)==0: #SF10 check.append(3) setParameters() def SF12(): global mySF, check mySF = 12 if check.count(3)>0: #SF10 check.remove(3) if check.count(4)==0: #SF12 check.append(4) setParameters() def BW6(): global myBW, check myBW = 6 if check.count(2)>0: #BW7 check.remove(2) if check.count(1)==0: #BW6 check.append(1) setParameters() def BW7(): global myBW, check myBW = 7 if check.count(1)>0: #BW6 check.remove(1) if check.count(2)==0: #BW7 check.append(2) setParameters() def F433(): global myFreq, check myFreq = 433e6 if check.count(6)>0: #868 check.remove(6) if check.count(5)==0: #433 check.append(5) setParameters() def F868(): global myFreq, check myFreq = 868e6 if check.count(5)>0: #433 check.remove(5) if check.count(6)==0: #868 check.append(6) setParameters() def Tx10(): global myTX, check myTX = 10 if check.count(8)>0: #Tx17 check.remove(8) if check.count(7)==0: #Tx10 check.append(7) setParameters() def Tx17(): global myTX, check myTX = 17 if check.count(7)>0: #Tx10 check.remove(7) if check.count(8)==0: #Tx17 check.append(8) setParameters() menus = ["ping", "BW6", "BW7", "SF10", "SF12", "433", "868", "Tx10", "Tx17"] actions = [PING, BW6, BW7, SF10, SF12, F433, F868, Tx10, Tx17] numMenus = len(menus) def setParameters(): global mySF, myBW, myFreq, myTX, message # lora reset rst.value(0) time.sleep_ms(10) rst.value(1) time.sleep_ms(100) lora.init() fq = round(myFreq/1000000, 3) print("Setting freq to: {0} MHz".format(fq)) lora.setFrequency(myFreq) bins = (7.8E3, 10.4E3, 15.6E3, 20.8E3, 31.25E3, 41.7E3, 62.5E3, 125E3, 250E3, 500E3) if myBW<0 or myBW>9: myBW=7 BWrate = bins[myBW] print("Setting BW to: "+str(BWrate/1e3)+" KHz / "+str(myBW)) lora.setSignalBandwidth(BWrate) print("Setting SF to: "+str(mySF)) lora.setSpreadingFactor(mySF) print("Setting TX power to: "+str(myTX)) lora.setTxPower(myTX) print("------------------------") print("Checking:") fq = round(lora.getFrequency()/1000000.0, 3) print("• fq: {0} MHz".format(fq)) sf = lora.getSpreadingFactor() print("• sf: "+str(sf)) bwnum, bw = lora.getSignalBandwidth() print("• bw: {0} ie {1} KHz".format(bwnum, (bw/1e3))) Pout, Pmax, paboost = lora.getTxPower() if paboost: paboost = "PA_BOOST pin" else: paboost = "RFO pin" print('Pout {0} dBm, Pmax {1}, {2}'.format(Pout, Pmax, paboost)) print("------------------------") message = "{0} MHz SF{1} BW {2} KHz".format(fq, sf, round(bw/1e3, 1)) showMap() def showMap(): global whichButton, message, loraPacket, rssi, snr, squareWidth, squareHeight img = image.Image(size=(320, 480)) img.draw_rectangle(0, 0, 320, 480, color=(255, 64, 64), fill=True) img.draw_string(140, 10, "MENU", color=(255, 255, 255), scale=2) for i in range(0, numMenus): x = (i % 3) * (squareWidth+10) + 10 y = int(i/3) * (squareHeight+10) + 50 if whichButton == i: img.draw_rectangle(x, y, squareWidth, squareHeight, color=(0, 191, 191), fill=True) img.draw_rectangle(x, y, squareWidth, squareHeight, color=(0, 0, 0), thickness=3) clr = color=(255, 255, 255) if whichButton == i: clr = (33, 33, 33) offsetX = 32 offsetY = 22 if check.count(i)>0: # check mark img.draw_rectangle(x+3, y+3, squareWidth-6, squareHeight-6, color=(0, 0, 255), thickness=3) dsp = menus[i] offsetX = 45 - (8*len(dsp)) img.draw_string(x+offsetX, y+20, dsp, clr, scale=3) py = y + squareHeight + 10 ln = len(message) if ln > 0: myScale = 2 myWidth = 5 * myScale img.draw_string(int((320-ln*myWidth)/2), 470-myScale*10, message, (0, 0, 0), scale=myScale) ln = len(loraPacket) if ln > 0: myScale = 2 myWidth = 5 * myScale pieces=[] limit = 28 while len(loraPacket)>0: pieces.append(loraPacket[0:limit]) loraPacket=loraPacket[limit:] pieces.append(rssi+" "+snr) for i in pieces: ln = len(i) img.draw_string(6, py, i, (255, 222, 222), scale=myScale) py += 24 lcd.rotation(1) lcd.mirror(1) lcd.display(img) gc.collect() showMap() setParameters() while 1: tmp.event() #print(tmp.state, tmp.points) [(y0, x0, t0), (y1, x1, t1)] = tmp.points #print(str(x0)+":"+str(y0)) if(x0!=0 and y0 != 0): print("Touch") while(x0!=0 and y0 != 0): saveX = x1 saveY = y1 if saveY<50: whichButton = -1 else: x = int((saveX-10)/(squareWidth+10)) y = int((saveY-50)/(squareHeight+10)) whichButton = y*3+x showMap() tmp.event() [(y0, x0, t0), (y1, x1, t1)] = tmp.points print("Released") if saveY<50: print('abort') else: print(str(saveX)+":"+str(saveY)) x = int((saveX-10)/(squareWidth+10)) y = int((saveY-50)/(squareHeight+10)) index = y*3+x if index>(numMenus-1): print('abort') else: print("You selected menu: "+str(index)) actions[index]() whichButton = -1 showMap() gc.collect() if lora.receivedPacket(): try: loraPacket = lora.read_payload().decode() rssi = "RSSI: {}".format(lora.packetRssi()) snr = "SNR: {}".format(lora.packetSNR()) print("*** Received message *** {} {} {}".format(loraPacket, rssi, snr)) message = "Incoming!" showMap() except Exception as e: print(e) gc.collect() time.sleep_ms(30)
nilq/baby-python
python
#!/usr/bin/env python import boto3 import botocore import argparse import sys parser = argparse.ArgumentParser(description='Check if the given AWS VPC exists.') parser.add_argument('--region_name', dest='region_name', action='store', required=True, help='AWS Region name, e.g. eu-west-1') parser.add_argument('--vpc_name', dest='vpc_name', action='store', required=True, help='AWS VPC name, e.g. backend_vpc') args = parser.parse_args() try: conn_ec2 = boto3.resource('ec2', region_name=args.region_name) except botocore.exceptions.EndpointConnectionError as e: sys.stderr.write("EC2: Could not connect to AWS region: %s, check credentials, IAM role privileges, region name." % args.region_name) sys.stderr.write(str(e)) sys.exit(1) instances = conn_ec2.instances instances = conn_ec2.instances.filter(Filters=[]) all_vpc_ids = [instance.vpc_id for instance in instances] all_vpc_ids = list(set(all_vpc_ids)) if len(all_vpc_ids) == 0: sys.stderr.write("No VPCs found. Please verify that VPC %s exists and/or create one and try again." % args.vpc_name) sys.exit(1) target_vpc = [] for vpc_id in all_vpc_ids: if vpc_id is not None: if conn_ec2.Vpc(vpc_id).tags: if {'Key': 'Name', 'Value': args.vpc_name} in conn_ec2.Vpc(vpc_id).tags: target_vpc.append(vpc_id) if len(target_vpc) == 0: sys.stderr.write("No VPC found. Please verify that VPC %s exists and/or create one and then try again." % args.vpc_name) sys.exit(1) if len(target_vpc) > 1: sys.stderr.write("More than one %s VPC found. Please investigate. There can be only one..." % args.vpc_name) sys.exit(1) sys.stdout.write(target_vpc[0])
nilq/baby-python
python
def arrays(arr): # complete this function # use numpy.array return(numpy.array(arr,float))[::-1]
nilq/baby-python
python
# Copyright 2004-2018 Tom Rothamel <pytom@bishoujo.us> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This contains various Displayables that handle events. from __future__ import print_function import renpy.display import renpy.audio from renpy.display.render import render, Render import pygame_sdl2 as pygame import math def compile_event(key, keydown): """ Compiles a keymap entry into a python expression. keydown determines if we are dealing with keys going down (press), or keys going up (release). """ # Lists or tuples get turned into or expressions. if isinstance(key, (list, tuple)): if not key: return "(False)" return "(" + " or ".join([compile_event(i, keydown) for i in key]) + ")" # If it's in config.keymap, compile what's in config.keymap. if key in renpy.config.keymap: return compile_event(renpy.config.keymap[key], keydown) if key in renpy.config.default_keymap: return compile_event(renpy.config.default_keymap[key], keydown) if key is None: return "(False)" part = key.split("_") # Deal with the mouse. if part[0] == "mousedown": if keydown: return "(ev.type == %d and ev.button == %d)" % (pygame.MOUSEBUTTONDOWN, int(part[1])) else: return "(False)" if part[0] == "mouseup": if keydown: return "(ev.type == %d and ev.button == %d)" % (pygame.MOUSEBUTTONUP, int(part[1])) else: return "(False)" # Deal with the Joystick / Gamepad. if part[0] == "joy" or part[0] == "pad": return "(False)" # Otherwise, deal with it as a key. if keydown: rv = "(ev.type == %d" % pygame.KEYDOWN else: rv = "(ev.type == %d" % pygame.KEYUP MODIFIERS = { "repeat", "alt", "meta", "shift", "noshift", "ctrl" } modifiers = set() while part[0] in MODIFIERS: modifiers.add(part.pop(0)) key = "_".join(part) if "repeat" in modifiers: rv += " and (ev.repeat)" else: rv += " and (not ev.repeat)" if key not in [ "K_LALT", "K_RALT" ]: if "alt" in modifiers: rv += " and (ev.mod & %d)" % pygame.KMOD_ALT else: rv += " and not (ev.mod & %d)" % pygame.KMOD_ALT if key not in [ "K_LGUI", "K_RGUI" ]: if "meta" in modifiers: rv += " and (ev.mod & %d)" % pygame.KMOD_META else: rv += " and not (ev.mod & %d)" % pygame.KMOD_META if key not in [ "K_LCTRL", "K_RCTRL" ]: if "ctrl" in modifiers: rv += " and (ev.mod & %d)" % pygame.KMOD_CTRL else: rv += " and not (ev.mod & %d)" % pygame.KMOD_CTRL if key not in [ "K_LSHIFT", "K_RSHIFT" ]: if "shift" in modifiers: rv += " and (ev.mod & %d)" % pygame.KMOD_SHIFT if "noshift" in modifiers: rv += " and not (ev.mod & %d)" % pygame.KMOD_SHIFT if len(part) == 1: if len(part[0]) != 1: if renpy.config.developer: raise Exception("Invalid key specifier %s" % key) else: return "(False)" rv += " and ev.unicode == %r)" % part[0] else: if part[0] != "K": if renpy.config.developer: raise Exception("Invalid key specifier %s" % key) else: return "(False)" rv += " and ev.key == %d)" % (getattr(pygame.constants, key)) return rv # These store a lambda for each compiled key in the system. event_cache = { } keyup_cache = { } def clear_keymap_cache(): """ :doc: other Clears the keymap cache. This allows changes to :var:`config.keymap` to take effect without restarting Ren'Py. """ event_cache.clear() keyup_cache.clear() def queue_event(name, up=False, **kwargs): """ :doc: other Queues an event with the given name. `Name` should be one of the event names in :var:`config.keymap`, or a list of such names. `up` This should be false when the event begins (for example, when a keyboard button is pressed.) It should be true when the event ends (when the button is released.) The event is queued at the time this function is called. This function will not work to replace an event with another - doing so will change event order. (Use :var:`config.keymap` instead.) This method is threadsafe. """ # Avoid queueing events before we're ready. if not renpy.display.interface: return if not isinstance(name, (list, tuple)): name = [ name ] data = { "eventnames" : name, "up" : up } data.update(kwargs) ev = pygame.event.Event(renpy.display.core.EVENTNAME, data) pygame.event.post(ev) def map_event(ev, keysym): """ :doc: udd_utility Returns true if the pygame event `ev` matches `keysym` `keysym` One of: * The name of a keybinding in :var:`config.keymap`. * A keysym, as documented in the :ref:`keymap` section. * A list containing one or more keysyms. """ if ev.type == renpy.display.core.EVENTNAME: if (keysym in ev.eventnames) and not ev.up: return True return False check_code = event_cache.get(keysym, None) if check_code is None: check_code = eval("lambda ev : " + compile_event(keysym, True), globals()) event_cache[keysym] = check_code return check_code(ev) def map_keyup(ev, name): """Returns true if the event matches the named keycode being released.""" if ev.type == renpy.display.core.EVENTNAME: if (name in ev.eventnames) and ev.up: return True check_code = keyup_cache.get(name, None) if check_code is None: check_code = eval("lambda ev : " + compile_event(name, False), globals()) keyup_cache[name] = check_code return check_code(ev) def skipping(ev): """ This handles setting skipping in response to the press of one of the CONTROL keys. The library handles skipping in response to TAB. """ if not renpy.config.allow_skipping: return if not renpy.store._skipping: return if map_event(ev, "skip"): renpy.config.skipping = "slow" renpy.exports.restart_interaction() if map_keyup(ev, "skip") or map_event(ev, "stop_skipping"): renpy.config.skipping = None renpy.exports.restart_interaction() return def inspector(ev): return map_event(ev, "inspector") ############################################################################## # Utility functions for dealing with actions. def predict_action(var): """ Predicts some of the actions that may be caused by a variable. """ if var is None: return if isinstance(var, renpy.ui.Action): var.predict() if isinstance(var, (list, tuple)): for i in var: predict_action(i) def run(action, *args, **kwargs): """ :doc: run :name: renpy.run :args: (action) Run an action or list of actions. A single action is called with no arguments, a list of actions is run in order using this function, and None is ignored. Returns the result of the first action to return a value. """ if action is None: return None if isinstance(action, (list, tuple)): rv = None for i in action: new_rv = run(i, *args, **kwargs) if new_rv is not None: rv = new_rv return rv return action(*args, **kwargs) def run_unhovered(var): """ Calls the unhovered method on the variable, if it exists. """ if var is None: return None if isinstance(var, (list, tuple)): for i in var: run_unhovered(i) return f = getattr(var, "unhovered", None) if f is not None: f() def run_periodic(var, st): if isinstance(var, (list, tuple)): rv = None for i in var: v = run_periodic(i, st) if rv is None or v < rv: rv = v return rv if isinstance(var, renpy.ui.Action): return var.periodic(st) def is_selected(action): """ :doc: run Returns true if `action` indicates it is selected, or false otherwise. """ if isinstance(action, (list, tuple)): for i in action: if isinstance(i, renpy.store.SelectedIf): # @UndefinedVariable return i.get_selected() return any(is_selected(i) for i in action) elif isinstance(action, renpy.ui.Action): return action.get_selected() else: return False def is_sensitive(action): """ :doc: run Returns true if `action` indicates it is sensitive, or False otherwise. """ if isinstance(action, (list, tuple)): for i in action: if isinstance(i, renpy.store.SensitiveIf): # @UndefinedVariable return i.get_sensitive() return all(is_sensitive(i) for i in action) elif isinstance(action, renpy.ui.Action): return action.get_sensitive() else: return True def alt(clicked): if isinstance(clicked, (list, tuple)): rv = [ ] for i in clicked: t = alt(i) if t is not None: rv.append(t) if rv: return " ".join(rv) else: return None if isinstance(clicked, renpy.ui.Action): return clicked.alt else: return None ############################################################################## # Special-Purpose Displayables class Keymap(renpy.display.layout.Null): """ This is a behavior that maps keys to actions that are called when the key is pressed. The keys are specified by giving the appropriate k_constant from pygame.constants, or the unicode for the key. """ def __init__(self, replaces=None, activate_sound=None, **keymap): if activate_sound is not None: super(Keymap, self).__init__(style='default', activate_sound=activate_sound) else: super(Keymap, self).__init__(style='default') self.keymap = keymap def event(self, ev, x, y, st): for name, action in self.keymap.iteritems(): if map_event(ev, name): renpy.exports.play(self.style.activate_sound) rv = run(action) if rv is not None: return rv raise renpy.display.core.IgnoreEvent() def predict_one_action(self): for i in self.keymap.itervalues(): predict_action(i) class RollForward(renpy.display.layout.Null): """ This behavior implements rollforward. """ def __init__(self, value, **properties): super(RollForward, self).__init__(**properties) self.value = value def event(self, ev, x, y, st): if map_event(ev, "rollforward"): return renpy.exports.roll_forward_core(self.value) class PauseBehavior(renpy.display.layout.Null): """ This is a class implementing the Pause behavior, which is to return a value after a certain amount of time has elapsed. """ voice = False def __init__(self, delay, result=False, voice=False, **properties): super(PauseBehavior, self).__init__(**properties) self.delay = delay self.result = result self.voice = voice def event(self, ev, x, y, st): if st >= self.delay: if self.voice and renpy.config.nw_voice: if (not renpy.config.afm_callback()) or renpy.display.tts.is_active(): renpy.game.interface.timeout(0.05) return # If we have been drawn since the timeout, simply return # true. Otherwise, force a redraw, and return true when # it comes back. if renpy.game.interface.drawn_since(st - self.delay): return self.result else: renpy.game.interface.force_redraw = True renpy.game.interface.timeout(max(self.delay - st, 0)) class SoundStopBehavior(renpy.display.layout.Null): """ This is a class implementing the sound stop behavior, which is to return False when a sound is no longer playing on the named channel. """ def __init__(self, channel, result=False, **properties): super(SoundStopBehavior, self).__init__(**properties) self.channel = channel self.result = result def event(self, ev, x, y, st): if not renpy.audio.music.get_playing(self.channel): return self.result renpy.game.interface.timeout(.025) class SayBehavior(renpy.display.layout.Null): """ This is a class that implements the say behavior, which is to return True (ending the interaction) if the user presses space or enter, or clicks the left mouse button. """ focusable = True text = None dismiss_unfocused = [ 'dismiss_unfocused' ] def __init__(self, default=True, afm=None, dismiss=[ 'dismiss' ], allow_dismiss=None, dismiss_unfocused=[ 'dismiss_unfocused' ], **properties): super(SayBehavior, self).__init__(default=default, **properties) if not isinstance(dismiss, (list, tuple)): dismiss = [ dismiss ] if afm is not None: self.afm_length = len(afm) else: self.afm_length = None # What keybindings lead to dismissal? self.dismiss = dismiss self.allow_dismiss = allow_dismiss def _tts_all(self): raise renpy.display.tts.TTSRoot() def set_text(self, text): self.text = text self.afm_length = max(text.end - text.start, 1) def event(self, ev, x, y, st): if self.afm_length and renpy.game.preferences.afm_time and renpy.game.preferences.afm_enable: afm_delay = ( 1.0 * ( renpy.config.afm_bonus + self.afm_length ) / renpy.config.afm_characters ) * renpy.game.preferences.afm_time if self.text is not None: afm_delay += self.text.get_time() if st > afm_delay: if renpy.config.afm_callback: if renpy.config.afm_callback() and not renpy.display.tts.is_active(): return True else: renpy.game.interface.timeout(0.1) else: return True else: renpy.game.interface.timeout(afm_delay - st) dismiss = [ (i, True) for i in self.dismiss ] + [ (i, False) for i in self.dismiss_unfocused ] for dismiss_event, check_focus in dismiss: if map_event(ev, dismiss_event): if check_focus and not self.is_focused(): continue if renpy.config.skipping: renpy.config.skipping = None renpy.exports.restart_interaction() raise renpy.display.core.IgnoreEvent() if not renpy.config.enable_rollback_side: rollback_side = "disable" if renpy.mobile: rollback_side = renpy.game.preferences.mobile_rollback_side else: rollback_side = renpy.game.preferences.desktop_rollback_side if ev.type == pygame.MOUSEBUTTONUP: percent = 1.0 * x / renpy.config.screen_width if rollback_side == "left": if percent < renpy.config.rollback_side_size: renpy.exports.rollback() raise renpy.display.core.IgnoreEvent() elif rollback_side == "right": if (1.0 - percent) < renpy.config.rollback_side_size: renpy.exports.rollback() raise renpy.display.core.IgnoreEvent() if renpy.game.preferences.using_afm_enable and \ renpy.game.preferences.afm_enable and \ not renpy.game.preferences.afm_after_click: renpy.game.preferences.afm_enable = False renpy.exports.restart_interaction() raise renpy.display.core.IgnoreEvent() if self.allow_dismiss: if not self.allow_dismiss(): raise renpy.display.core.IgnoreEvent() return True skip_delay = renpy.config.skip_delay / 1000.0 if renpy.config.skipping and renpy.config.allow_skipping and renpy.store._skipping: if ev.type == renpy.display.core.TIMEEVENT and st >= skip_delay: if renpy.game.preferences.skip_unseen: return True elif renpy.config.skipping == "fast": return True elif renpy.game.context().seen_current(True): return True else: renpy.config.skipping = False renpy.exports.restart_interaction() else: renpy.game.interface.timeout(skip_delay - st) return None ############################################################################## # Button KEY_EVENTS = ( pygame.KEYDOWN, pygame.KEYUP, pygame.TEXTEDITING, pygame.TEXTINPUT ) class Button(renpy.display.layout.Window): keymap = { } action = None alternate = None longpress_start = None longpress_x = None longpress_y = None role_parameter = None keysym = None alternate_keysym = None def __init__(self, child=None, style='button', clicked=None, hovered=None, unhovered=None, action=None, role=None, time_policy=None, keymap={}, alternate=None, selected=None, sensitive=None, keysym=None, alternate_keysym=None, **properties): if isinstance(clicked, renpy.ui.Action): action = clicked super(Button, self).__init__(child, style=style, **properties) self.action = action self.selected = selected self.sensitive = sensitive self.clicked = clicked self.hovered = hovered self.unhovered = unhovered self.alternate = alternate self.focusable = True # (clicked is not None) or (action is not None) self.role_parameter = role self.keymap = keymap self.keysym = keysym self.alternate_keysym = alternate_keysym self.time_policy_data = None self._duplicatable = False def _duplicate(self, args): if args and args.args: args.extraneous() return self def predict_one_action(self): predict_action(self.clicked) predict_action(self.hovered) predict_action(self.unhovered) predict_action(self.alternate) if self.keymap: for v in self.keymap.itervalues(): predict_action(v) def render(self, width, height, st, at): if self.style.time_policy: st, self.time_policy_data = self.style.time_policy(st, self.time_policy_data, self.style) rv = super(Button, self).render(width, height, st, at) if self.clicked: rect = self.style.focus_rect if rect is not None: fx, fy, fw, fh = rect else: fx = self.style.left_margin fy = self.style.top_margin fw = rv.width - self.style.right_margin fh = rv.height - self.style.bottom_margin mask = self.style.focus_mask if mask is True: mask = rv elif mask is not None: try: mask = renpy.display.render.render(mask, rv.width, rv.height, st, at) except: if callable(mask): mask = mask else: raise Exception("Focus_mask must be None, True, a displayable, or a callable.") if mask is not None: fmx = 0 fmy = 0 else: fmx = None fmy = None rv.add_focus(self, None, fx, fy, fw, fh, fmx, fmy, mask) return rv def focus(self, default=False): super(Button, self).focus(default) rv = None if not default: rv = run(self.hovered) self.set_transform_event(self.role + "hover") if self.child is not None: self.child.set_transform_event(self.role + "hover") return rv def unfocus(self, default=False): super(Button, self).unfocus(default) self.longpress_start = None if not default: run_unhovered(self.hovered) run(self.unhovered) self.set_transform_event(self.role + "idle") if self.child is not None: self.child.set_transform_event(self.role + "idle") def is_selected(self): if self.selected is not None: return self.selected return is_selected(self.action) def is_sensitive(self): if self.sensitive is not None: return self.sensitive return is_sensitive(self.action) def per_interact(self): if self.action is not None: if self.is_selected(): role = 'selected_' else: role = '' if self.is_sensitive(): clicked = self.action else: clicked = None role = '' else: role = '' clicked = self.clicked if self.role_parameter is not None: role = self.role_parameter if (role != self.role) or (clicked is not self.clicked): renpy.display.render.invalidate(self) self.role = role self.clicked = clicked if self.clicked is not None: self.set_style_prefix(self.role + "idle_", True) self.focusable = True else: self.set_style_prefix(self.role + "insensitive_", True) self.focusable = False super(Button, self).per_interact() def event(self, ev, x, y, st): def handle_click(action): renpy.exports.play(self.style.activate_sound) rv = run(action) if rv is not None: return rv else: raise renpy.display.core.IgnoreEvent() # Call self.action.periodic() timeout = run_periodic(self.action, st) if timeout is not None: renpy.game.interface.timeout(timeout) # If we have a child, try passing the event to it. (For keyboard # events, this only happens if we're focused.) if (not (ev.type in KEY_EVENTS)) or self.style.key_events: rv = super(Button, self).event(ev, x, y, st) if rv is not None: return rv if (self.keysym is not None) and (self.clicked is not None): if map_event(ev, self.keysym): return handle_click(self.clicked) if (self.alternate_keysym is not None) and (self.alternate is not None): if map_event(ev, self.alternate_keysym): return handle_click(self.alternate) # If not focused, ignore all events. if not self.is_focused(): return None # Check the keymap. for name, action in self.keymap.iteritems(): if map_event(ev, name): return run(action) # Handle the longpress event, if necessary. if (self.alternate is not None) and renpy.display.touch: if ev.type == pygame.MOUSEBUTTONDOWN and ev.button == 1: self.longpress_start = st self.longpress_x = x self.longpress_y = y renpy.game.interface.timeout(renpy.config.longpress_duration) if self.longpress_start is not None: if math.hypot(x - self.longpress_x, y - self.longpress_y) > renpy.config.longpress_radius: self.longpress_start = None elif st >= (self.longpress_start + renpy.config.longpress_duration): renpy.exports.vibrate(renpy.config.longpress_vibrate) renpy.display.interface.after_longpress() return handle_click(self.alternate) # Ignore as appropriate: if (self.clicked is not None) and map_event(ev, "button_ignore"): raise renpy.display.core.IgnoreEvent() if (self.clicked is not None) and map_event(ev, "button_alternate_ignore"): raise renpy.display.core.IgnoreEvent() # If clicked, if (self.clicked is not None) and map_event(ev, "button_select"): return handle_click(self.clicked) if (self.alternate is not None) and map_event(ev, "button_alternate"): return handle_click(self.alternate) return None def set_style_prefix(self, prefix, root): if root: super(Button, self).set_style_prefix(prefix, root) def _tts(self): return "" def _tts_all(self): rv = self._tts_common(alt(self.action)) if self.is_selected(): rv += " " + renpy.minstore.__("selected") return rv # Reimplementation of the TextButton widget as a Button and a Text # widget. def TextButton(text, style='button', text_style='button_text', clicked=None, **properties): text_properties, button_properties = renpy.easy.split_properties(properties, "text_", "") text = renpy.text.text.Text(text, style=text_style, **text_properties) # @UndefinedVariable return Button(text, style=style, clicked=clicked, **button_properties) class ImageButton(Button): """ Used to implement the guts of an image button. """ def __init__(self, idle_image, hover_image=None, insensitive_image=None, activate_image=None, selected_idle_image=None, selected_hover_image=None, selected_insensitive_image=None, selected_activate_image=None, style='image_button', clicked=None, hovered=None, **properties): hover_image = hover_image or idle_image insensitive_image = insensitive_image or idle_image activate_image = activate_image or hover_image selected_idle_image = selected_idle_image or idle_image selected_hover_image = selected_hover_image or hover_image selected_insensitive_image = selected_insensitive_image or insensitive_image selected_activate_image = selected_activate_image or activate_image self.state_children = dict( idle_=renpy.easy.displayable(idle_image), hover_=renpy.easy.displayable(hover_image), insensitive_=renpy.easy.displayable(insensitive_image), activate_=renpy.easy.displayable(activate_image), selected_idle_=renpy.easy.displayable(selected_idle_image), selected_hover_=renpy.easy.displayable(selected_hover_image), selected_insensitive_=renpy.easy.displayable(selected_insensitive_image), selected_activate_=renpy.easy.displayable(selected_activate_image), ) super(ImageButton, self).__init__(None, style=style, clicked=clicked, hovered=hovered, **properties) def visit(self): return self.state_children.values() def get_child(self): return self.style.child or self.state_children[self.style.prefix] # This is used for an input that takes its focus from a button. class HoveredProxy(object): def __init__(self, a, b): self.a = a self.b = b def __call__(self): self.a() if self.b: return self.b() # The currently editable input value. current_input_value = None # Is the current input value active? input_value_active = False # The default input value to use if the currently editable value doesn't # exist. default_input_value = None # A list of input values that exist. input_values = [ ] # A list of inputs that exist in the current interaction. inputs = [ ] def input_pre_per_interact(): global input_values global inputs global default_input_value input_values = [ ] inputs = [ ] default_input_value = None def input_post_per_interact(): global current_input_value global input_value_active for i in input_values: if i is current_input_value: break else: current_input_value = default_input_value input_value_active = True for i in inputs: editable = (i.value is current_input_value) and input_value_active and i.value.editable content = i.value.get_text() if (i.editable != editable) or (content != i.content): i.update_text(content, editable) i.caret_pos = len(content) class Input(renpy.text.text.Text): # @UndefinedVariable """ This is a Displayable that takes text as input. """ changed = None prefix = "" suffix = "" caret_pos = 0 old_caret_pos = 0 pixel_width = None default = u"" edit_text = u"" value = None def __init__(self, default="", length=None, style='input', allow=None, exclude=None, prefix="", suffix="", changed=None, button=None, replaces=None, editable=True, pixel_width=None, value=None, **properties): super(Input, self).__init__("", style=style, replaces=replaces, substitute=False, **properties) if value: self.value = value changed = value.set_text default = value.get_text() self.default = unicode(default) self.content = self.default self.length = length self.allow = allow self.exclude = exclude self.prefix = prefix self.suffix = suffix self.changed = changed self.editable = editable self.pixel_width = pixel_width caretprops = { 'color' : None } for i in properties: if i.endswith("color"): caretprops[i] = properties[i] self.caret = renpy.display.image.Solid(xmaximum=1, style=style, **caretprops) self.caret_pos = len(self.content) self.old_caret_pos = self.caret_pos if button: self.editable = False button.hovered = HoveredProxy(self.enable, button.hovered) button.unhovered = HoveredProxy(self.disable, button.unhovered) if isinstance(replaces, Input): self.content = replaces.content self.editable = replaces.editable self.caret_pos = replaces.caret_pos self.update_text(self.content, self.editable) def _show(self): if self.default != self.content: self.content = self.default self.caret_pos = len(self.content) self.update_text(self.content, self.editable) def update_text(self, new_content, editable, check_size=False): edit = renpy.display.interface.text_editing old_content = self.content if new_content != self.content or editable != self.editable or edit: renpy.display.render.redraw(self, 0) self.editable = editable # Choose the caret. caret = self.style.caret if caret is None: caret = self.caret # Format text being edited by the IME. if edit: self.edit_text = edit.text edit_text_0 = edit.text[:edit.start] edit_text_1 = edit.text[edit.start:edit.start + edit.length] edit_text_2 = edit.text[edit.start + edit.length:] edit_text = "" if edit_text_0: edit_text += "{u=1}" + edit_text_0.replace("{", "{{") + "{/u}" if edit_text_1: edit_text += "{u=2}" + edit_text_1.replace("{", "{{") + "{/u}" if edit_text_2: edit_text += "{u=1}" + edit_text_2.replace("{", "{{") + "{/u}" else: self.edit_text = "" edit_text = "" def set_content(content): if content == "": content = u"\u200b" if editable: l = len(content) self.set_text([self.prefix, content[0:self.caret_pos].replace("{", "{{"), edit_text, caret, content[self.caret_pos:l].replace("{", "{{"), self.suffix]) else: self.set_text([self.prefix, content.replace("{", "{{"), self.suffix ]) set_content(new_content) if check_size and self.pixel_width: w, _h = self.size() if w > self.pixel_width: self.caret_pos = self.old_caret_pos set_content(old_content) return if new_content != old_content: self.content = new_content if self.changed: self.changed(new_content) # This is needed to ensure the caret updates properly. def set_style_prefix(self, prefix, root): if prefix != self.style.prefix: self.update_text(self.content, self.editable) super(Input, self).set_style_prefix(prefix, root) def enable(self): self.update_text(self.content, True) def disable(self): self.update_text(self.content, False) def per_interact(self): global default_input_value if self.value is not None: inputs.append(self) input_values.append(self.value) if self.value.default and (default_input_value is None): default_input_value = self.value def event(self, ev, x, y, st): self.old_caret_pos = self.caret_pos if not self.editable: return None if (ev.type == pygame.KEYDOWN) and (pygame.key.get_mods() & pygame.KMOD_LALT) and (not ev.unicode): return None l = len(self.content) raw_text = None if map_event(ev, "input_backspace"): if self.content and self.caret_pos > 0: content = self.content[0:self.caret_pos-1] + self.content[self.caret_pos:l] self.caret_pos -= 1 self.update_text(content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif map_event(ev, "input_enter"): content = self.content if self.edit_text: content = content[0:self.caret_pos] + self.edit_text + self.content[self.caret_pos:] if self.value: return self.value.enter() if not self.changed: return content elif map_event(ev, "input_left"): if self.caret_pos > 0: self.caret_pos -= 1 self.update_text(self.content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif map_event(ev, "input_right"): if self.caret_pos < l: self.caret_pos += 1 self.update_text(self.content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif map_event(ev, "input_delete"): if self.caret_pos < l: content = self.content[0:self.caret_pos] + self.content[self.caret_pos+1:l] self.update_text(content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif map_event(ev, "input_home"): self.caret_pos = 0 self.update_text(self.content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif map_event(ev, "input_end"): self.caret_pos = l self.update_text(self.content, self.editable) renpy.display.render.redraw(self, 0) raise renpy.display.core.IgnoreEvent() elif ev.type == pygame.TEXTEDITING: self.update_text(self.content, self.editable, check_size=True) raise renpy.display.core.IgnoreEvent() elif ev.type == pygame.TEXTINPUT: self.edit_text = "" raw_text = ev.text elif ev.type == pygame.KEYDOWN: if ev.unicode and ord(ev.unicode[0]) >= 32: raw_text = ev.unicode elif renpy.display.interface.text_event_in_queue(): raw_text = '' if raw_text is not None: text = "" for c in raw_text: if self.allow and c not in self.allow: continue if self.exclude and c in self.exclude: continue text += c if self.length: remaining = self.length - len(self.content) text = text[:remaining] if text: content = self.content[0:self.caret_pos] + text + self.content[self.caret_pos:l] self.caret_pos += len(text) self.update_text(content, self.editable, check_size=True) raise renpy.display.core.IgnoreEvent() def render(self, width, height, st, at): rv = super(Input, self).render(width, height, st, at) if self.editable: rv.text_input = True return rv # A map from adjustment to lists of displayables that want to be redrawn # if the adjustment changes. adj_registered = { } # This class contains information about an adjustment that can change the # position of content. class Adjustment(renpy.object.Object): """ :doc: ui :name: ui.adjustment class Adjustment objects represent a value that can be adjusted by a bar or viewport. They contain information about the value, the range of the value, and how to adjust the value in small steps and large pages. """ def __init__(self, range=1, value=0, step=None, page=None, changed=None, adjustable=None, ranged=None): # @ReservedAssignment """ The following parameters correspond to fields or properties on the adjustment object: `range` The range of the adjustment, a number. `value` The value of the adjustment, a number. `step` The step size of the adjustment, a number. If None, then defaults to 1/10th of a page, if set. Otherwise, defaults to the 1/20th of the range. This is used when scrolling a viewport with the mouse wheel. `page` The page size of the adjustment. If None, this is set automatically by a viewport. If never set, defaults to 1/10th of the range. It's can be used when clicking on a scrollbar. The following parameters control the behavior of the adjustment. `adjustable` If True, this adjustment can be changed by a bar. If False, it can't. It defaults to being adjustable if a `changed` function is given or if the adjustment is associated with a viewport, and not adjustable otherwise. `changed` This function is called with the new value when the value of the adjustment changes. `ranged` This function is called with the adjustment object when the range of the adjustment is set by a viewport. .. method:: change(value) Changes the value of the adjustment to `value`, updating any bars and viewports that use the adjustment. """ super(Adjustment, self).__init__() if adjustable is None: if changed: adjustable = True self._value = value self._range = range self._page = page self._step = step self.changed = changed self.adjustable = adjustable self.ranged = ranged def get_value(self): if self._value > self._range: return self._range return self._value def set_value(self, v): self._value = v value = property(get_value, set_value) def get_range(self): return self._range def set_range(self, v): self._range = v if self.ranged: self.ranged(self) range = property(get_range, set_range) # @ReservedAssignment def get_page(self): if self._page is not None: return self._page return self._range / 10 def set_page(self, v): self._page = v page = property(get_page, set_page) def get_step(self): if self._step is not None: return self._step if self._page is not None and self.page > 0: return self._page / 10 if isinstance(self._range, float): return self._range / 10 else: return 1 def set_step(self, v): self._step = v step = property(get_step, set_step) # Register a displayable to be redrawn when this adjustment changes. def register(self, d): adj_registered.setdefault(self, [ ]).append(d) def change(self, value): if value < 0: value = 0 if value > self._range: value = self._range if value != self._value: self._value = value for d in adj_registered.setdefault(self, [ ]): renpy.display.render.redraw(d, 0) if self.changed: return self.changed(value) return None def update(self): """ Updates things that depend on this adjustment without firing the changed handler. """ for d in adj_registered.setdefault(self, [ ]): renpy.display.render.redraw(d, 0) class Bar(renpy.display.core.Displayable): """ Implements a bar that can display an integer value, and respond to clicks on that value. """ __version__ = 2 def after_upgrade(self, version): if version < 1: self.adjustment = Adjustment(self.range, self.value, changed=self.changed) # E1101 self.adjustment.register(self) del self.range # E1101 del self.value # E1101 del self.changed # E1101 if version < 2: self.value = None def __init__(self, range=None, # @ReservedAssignment value=None, width=None, height=None, changed=None, adjustment=None, step=None, page=None, bar=None, style=None, vertical=False, replaces=None, hovered=None, unhovered=None, **properties): self.value = None if adjustment is None: if isinstance(value, renpy.ui.BarValue): if isinstance(replaces, Bar): value.replaces(replaces.value) self.value = value adjustment = value.get_adjustment() renpy.game.interface.timeout(0) else: adjustment = Adjustment(range, value, step=step, page=page, changed=changed) if style is None: if self.value is not None: if vertical: style = self.value.get_style()[1] else: style = self.value.get_style()[0] else: if vertical: style = 'vbar' else: style = 'bar' if width is not None: properties['xmaximum'] = width if height is not None: properties['ymaximum'] = height super(Bar, self).__init__(style=style, **properties) self.adjustment = adjustment self.focusable = True # These are set when we are first rendered. self.thumb_dim = 0 self.height = 0 self.width = 0 self.hidden = False self.hovered = hovered self.unhovered = unhovered def per_interact(self): if self.value is not None: adjustment = self.value.get_adjustment() if adjustment.value != self.value: renpy.display.render.invalidate(self) self.adjustment = adjustment self.focusable = self.adjustment.adjustable self.adjustment.register(self) def visit(self): rv = [ ] self.style._visit_bar(rv.append) return rv def render(self, width, height, st, at): # Handle redrawing. if self.value is not None: redraw = self.value.periodic(st) if redraw is not None: renpy.display.render.redraw(self, redraw) xminimum = self.style.xminimum yminimum = self.style.yminimum if xminimum is not None: width = max(width, xminimum) height = max(height, yminimum) # Store the width and height for the event function to use. self.width = width self.height = height range = self.adjustment.range # @ReservedAssignment value = self.adjustment.value page = self.adjustment.page if range <= 0: if self.style.unscrollable == "hide": self.hidden = True return renpy.display.render.Render(width, height) elif self.style.unscrollable == "insensitive": self.set_style_prefix("insensitive_", True) self.hidden = False if self.style.bar_invert ^ self.style.bar_vertical: value = range - value bar_vertical = self.style.bar_vertical if bar_vertical: dimension = height else: dimension = width fore_gutter = self.style.fore_gutter aft_gutter = self.style.aft_gutter active = dimension - fore_gutter - aft_gutter if range: thumb_dim = active * page / (range + page) else: thumb_dim = active thumb_offset = abs(self.style.thumb_offset) if bar_vertical: thumb = render(self.style.thumb, width, thumb_dim, st, at) thumb_shadow = render(self.style.thumb_shadow, width, thumb_dim, st, at) thumb_dim = thumb.height else: thumb = render(self.style.thumb, thumb_dim, height, st, at) thumb_shadow = render(self.style.thumb_shadow, thumb_dim, height, st, at) thumb_dim = thumb.width # Remove the offset from the thumb. thumb_dim -= thumb_offset * 2 self.thumb_dim = thumb_dim active -= thumb_dim if range: fore_size = active * value / range else: fore_size = active fore_size = int(fore_size) aft_size = active - fore_size fore_size += fore_gutter aft_size += aft_gutter rv = renpy.display.render.Render(width, height) if bar_vertical: if self.style.bar_resizing: foresurf = render(self.style.fore_bar, width, fore_size, st, at) aftsurf = render(self.style.aft_bar, width, aft_size, st, at) rv.blit(thumb_shadow, (0, fore_size - thumb_offset)) rv.blit(foresurf, (0, 0), main=False) rv.blit(aftsurf, (0, height-aft_size), main=False) rv.blit(thumb, (0, fore_size - thumb_offset)) else: foresurf = render(self.style.fore_bar, width, height, st, at) aftsurf = render(self.style.aft_bar, width, height, st, at) rv.blit(thumb_shadow, (0, fore_size - thumb_offset)) rv.blit(foresurf.subsurface((0, 0, width, fore_size)), (0, 0), main=False) rv.blit(aftsurf.subsurface((0, height - aft_size, width, aft_size)), (0, height - aft_size), main=False) rv.blit(thumb, (0, fore_size - thumb_offset)) else: if self.style.bar_resizing: foresurf = render(self.style.fore_bar, fore_size, height, st, at) aftsurf = render(self.style.aft_bar, aft_size, height, st, at) rv.blit(thumb_shadow, (fore_size - thumb_offset, 0)) rv.blit(foresurf, (0, 0), main=False) rv.blit(aftsurf, (width-aft_size, 0), main=False) rv.blit(thumb, (fore_size - thumb_offset, 0)) else: foresurf = render(self.style.fore_bar, width, height, st, at) aftsurf = render(self.style.aft_bar, width, height, st, at) rv.blit(thumb_shadow, (fore_size - thumb_offset, 0)) rv.blit(foresurf.subsurface((0, 0, fore_size, height)), (0, 0), main=False) rv.blit(aftsurf.subsurface((width - aft_size, 0, aft_size, height)), (width-aft_size, 0), main=False) rv.blit(thumb, (fore_size - thumb_offset, 0)) if self.focusable: rv.add_focus(self, None, 0, 0, width, height) return rv def focus(self, default=False): super(Bar, self).focus(default) self.set_transform_event("hover") if not default: run(self.hovered) def unfocus(self, default=False): super(Bar, self).unfocus() self.set_transform_event("idle") if not default: run_unhovered(self.hovered) run(self.unhovered) def event(self, ev, x, y, st): if not self.focusable: return None if not self.is_focused(): return None if self.hidden: return None range = self.adjustment.range # @ReservedAssignment old_value = self.adjustment.value value = old_value vertical = self.style.bar_vertical invert = self.style.bar_invert ^ vertical if invert: value = range - value grabbed = (renpy.display.focus.get_grab() is self) just_grabbed = False ignore_event = False if not grabbed and map_event(ev, "bar_activate"): renpy.display.tts.speak(renpy.minstore.__("activate")) renpy.display.focus.set_grab(self) self.set_style_prefix("selected_hover_", True) just_grabbed = True grabbed = True ignore_event = True if grabbed: if vertical: increase = "bar_down" decrease = "bar_up" else: increase = "bar_right" decrease = "bar_left" if map_event(ev, decrease): renpy.display.tts.speak(renpy.minstore.__("decrease")) value -= self.adjustment.step ignore_event = True if map_event(ev, increase): renpy.display.tts.speak(renpy.minstore.__("increase")) value += self.adjustment.step ignore_event = True if ev.type in (pygame.MOUSEMOTION, pygame.MOUSEBUTTONUP, pygame.MOUSEBUTTONDOWN): if vertical: tgutter = self.style.fore_gutter bgutter = self.style.aft_gutter zone_height = self.height - tgutter - bgutter - self.thumb_dim if zone_height: value = (y - tgutter - self.thumb_dim / 2) * range / zone_height else: value = 0 else: lgutter = self.style.fore_gutter rgutter = self.style.aft_gutter zone_width = self.width - lgutter - rgutter - self.thumb_dim if zone_width: value = (x - lgutter - self.thumb_dim / 2) * range / zone_width else: value = 0 ignore_event = True if isinstance(range, int): value = int(value) if value < 0: renpy.display.tts.speak("") value = 0 if value > range: renpy.display.tts.speak("") value = range if invert: value = range - value if grabbed and not just_grabbed and map_event(ev, "bar_deactivate"): renpy.display.tts.speak(renpy.minstore.__("deactivate")) self.set_style_prefix("hover_", True) renpy.display.focus.set_grab(None) ignore_event = True if value != old_value: rv = self.adjustment.change(value) if rv is not None: return rv if ignore_event: raise renpy.display.core.IgnoreEvent() else: return None def set_style_prefix(self, prefix, root): if root: super(Bar, self).set_style_prefix(prefix, root) def _tts(self): return "" def _tts_all(self): if self.value is not None: alt = self.value.alt else: alt = "" return self._tts_common(alt) + renpy.minstore.__("bar") class Conditional(renpy.display.layout.Container): """ This class renders its child if and only if the condition is true. Otherwise, it renders nothing. (Well, a Null). Warning: the condition MUST NOT update the game state in any way, as that would break rollback. """ def __init__(self, condition, *args, **properties): super(Conditional, self).__init__(*args, **properties) self.condition = condition self.null = renpy.display.layout.Null() self.state = eval(self.condition, vars(renpy.store)) def render(self, width, height, st, at): if self.state: return render(self.child, width, height, st, at) else: return render(self.null, width, height, st, at) def event(self, ev, x, y, st): state = eval(self.condition, vars(renpy.store)) if state != self.state: renpy.display.render.redraw(self, 0) self.state = state if state: return self.child.event(ev, x, y, st) class TimerState(renpy.python.RevertableObject): """ Stores the state of the timer, which may need to be rolled back. """ # Prevents us from having to worry about our initialization being # rolled back. started = False next_event = None class Timer(renpy.display.layout.Null): __version__ = 1 started = False def after_upgrade(self, version): if version < 1: self.state = TimerState() self.state.started = self.started self.state.next_event = self.next_event def __init__(self, delay, action=None, repeat=False, args=(), kwargs={}, replaces=None, **properties): super(Timer, self).__init__(**properties) if action is None: raise Exception("A timer must have an action supplied.") if delay <= 0: raise Exception("A timer's delay must be > 0.") # The delay. self.delay = delay # Should we repeat the event? self.repeat = repeat # The time the next event should occur. self.next_event = None # The function and its arguments. self.function = action self.args = args self.kwargs = kwargs # Did we start the timer? self.started = False if replaces is not None: self.state = replaces.state else: self.state = TimerState() def event(self, ev, x, y, st): state = self.state if not state.started: state.started = True state.next_event = st + self.delay if state.next_event is None: return if st < state.next_event: renpy.game.interface.timeout(state.next_event - st) return if not self.repeat: state.next_event = None else: state.next_event = state.next_event + self.delay if state.next_event < st: state.next_event = st + self.delay renpy.game.interface.timeout(state.next_event - st) return run(self.function, *self.args, **self.kwargs) class MouseArea(renpy.display.core.Displayable): # The offset between st and at. at_st_offset = 0 def __init__(self, hovered=None, unhovered=None, replaces=None, **properties): super(MouseArea, self).__init__(**properties) self.hovered = hovered self.unhovered = unhovered # Are we hovered right now? self.is_hovered = False if replaces is not None: self.is_hovered = replaces.is_hovered # Taken from the render. self.width = 0 self.height = 0 def render(self, width, height, st, at): self.width = width self.height = height self.at_st_offset = at - st return Render(width, height) def event(self, ev, x, y, st): # Mouseareas should not handle events when something else is grabbing. if renpy.display.focus.get_grab(): return if self.style.focus_mask is not None: crend = renpy.display.render.render(self.style.focus_mask, self.width, self.height, st, self.at_st_offset + st) is_hovered = crend.is_pixel_opaque(x, y) elif 0 <= x < self.width and 0 <= y < self.height: is_hovered = True else: is_hovered = False if is_hovered and not self.is_hovered: self.is_hovered = True return run(self.hovered) elif not is_hovered and self.is_hovered: self.is_hovered = False run_unhovered(self.hovered) run(self.unhovered) class OnEvent(renpy.display.core.Displayable): """ This is a displayable that runs an action in response to a transform event. It's used to implement the screen language on statement. """ def __init__(self, event, action=[ ]): """ `event` A string giving the event name. `action` An action or list of actions that are run when the event occurs. """ super(OnEvent, self).__init__() self.event_name = event self.action = action def _handles_event(self, event): if self.event_name == event: return True else: return False def set_transform_event(self, event): if event == self.event_name: run(self.action) def render(self, width, height, st, at): return renpy.display.render.Render(0, 0)
nilq/baby-python
python
# # -*- coding: utf-8 -*- from collections import Counter from tests.testapp.tests.base_tests import BaseRedisTestCase from tests.testapp.tests.multi_server_tests import MultiServerTests from django.test import TestCase, override_settings LOCATION = "unix://:yadayada@/tmp/redis0.sock?db=15" LOCATIONS = [ "unix://:yadayada@/tmp/redis0.sock?db=15", "unix://:yadayada@/tmp/redis1.sock?db=15", "unix://:yadayada@/tmp/redis2.sock?db=15", ] class SocketTestCase(BaseRedisTestCase, TestCase): pass @override_settings( CACHES={ 'default': { 'BACKEND': 'redis_cache.RedisCache', 'LOCATION': LOCATION, 'OPTIONS': { 'DB': 15, 'PASSWORD': 'yadayada', 'PARSER_CLASS': 'redis.connection.HiredisParser', 'PICKLE_VERSION': 2, 'CONNECTION_POOL_CLASS': 'redis.ConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 2, } }, }, } ) class SingleHiredisTestCase(SocketTestCase): pass @override_settings( CACHES={ 'default': { 'BACKEND': 'redis_cache.RedisCache', 'LOCATION': LOCATION, 'OPTIONS': { 'DB': 15, 'PASSWORD': 'yadayada', 'PARSER_CLASS': 'redis.connection.PythonParser', 'PICKLE_VERSION': 2, 'CONNECTION_POOL_CLASS': 'redis.ConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 2, } }, }, } ) class SinglePythonParserTestCase(SocketTestCase): pass @override_settings( CACHES={ 'default': { 'BACKEND': 'redis_cache.ShardedRedisCache', 'LOCATION': LOCATIONS, 'OPTIONS': { 'DB': 15, 'PASSWORD': 'yadayada', 'PARSER_CLASS': 'redis.connection.HiredisParser', 'PICKLE_VERSION': 2, 'CONNECTION_POOL_CLASS': 'redis.ConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 2, } }, }, } ) class MultipleHiredisTestCase(MultiServerTests, SocketTestCase): def test_equal_number_of_nodes(self): counter = Counter( [node._node[3] for node in self.cache.sharder._nodes] ) self.assertEqual(counter, { '/tmp/redis0.sock': 16, '/tmp/redis1.sock': 16, '/tmp/redis2.sock': 16, }) @override_settings( CACHES={ 'default': { 'BACKEND': 'redis_cache.ShardedRedisCache', 'LOCATION': LOCATIONS, 'OPTIONS': { 'DB': 15, 'PASSWORD': 'yadayada', 'PARSER_CLASS': 'redis.connection.PythonParser', 'PICKLE_VERSION': 2, 'CONNECTION_POOL_CLASS': 'redis.ConnectionPool', 'CONNECTION_POOL_CLASS_KWARGS': { 'max_connections': 2, } }, }, } ) class MultiplePythonParserTestCase(MultiServerTests, SocketTestCase): pass
nilq/baby-python
python
import os import logging # (windows only for now) if os.name == 'nt': try: logging.info('Looking for CUDA and adding it to path...') # some python versions fail to load the path variables, so we're doing it manually here before importing tf loaddir = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v11.2/bin" os.add_dll_directory(loaddir) logging.info('Found!') except Exception as ex: logging.info(f'CUDA not found, this gon be slow af \n{ex}') import shutil import json import numpy as np logging.info('Loading TensorFlow Libs...') # noqa: E402 import tensorflow as tf import keras from keras.models import Sequential from keras.layers import Dense, Dropout, GRU from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint logging.info('Done!') # Save model weights VERBOSE: int = 1 # 0: no visual feedback, 1: animated progress bar, 2: show number of epoch checkpointer = ModelCheckpoint( filepath="pred_model_weights.hdf5", verbose=VERBOSE, save_best_only=True) # Use early stopping to exit training if validation loss is not decreasing even after certain epochs (patience) PATIENCE: int = 20 # For early-stopping earlystopping = EarlyStopping( monitor='loss', mode='min', verbose=VERBOSE, patience=PATIENCE) model_metrics = [checkpointer, earlystopping] # This function builds a sequential model (linear - only one pathway) def build_sequential(window_len, input_columns, output_size, neurons=3000, activ_func='linear', dropout=0.2, loss='mse', optimizer='adam'): model = Sequential() # model.add(Embedding(window_len, 512)) model.add(GRU(neurons, input_shape=(window_len, input_columns))) model.add(Dense(1024, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(512, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(512, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(256, activation='relu')) model.add(Dropout(dropout)) model.add(Dense(units=output_size, activation=activ_func)) model.compile(loss=loss, optimizer=optimizer) return model # This function builds a nn model using the 'functional api': # it's more advanced and allows to have multiple pathways (non-linear) def build(window_len, input_columns, output_size, neurons, activ_func='linear', dropout=0.2, loss='mse', optimizer='adam'): inputs = keras.Input(shape=(window_len, input_columns)) # long path gru = GRU(neurons) x = gru(inputs) x = Dense(neurons*2, activation="relu")(x) x = Dropout(dropout)(x) x = Dense(neurons*2, activation="relu")(x) x = Dropout(dropout)(x) x = Dense(neurons*2, activation="relu")(x) x = Dropout(dropout)(x) # x = Dense(neurons*2, activation="relu")(x) # x = Dropout(dropout)(x) # x = Dense(neurons, activation="relu")(x) # x = Dropout(dropout)(x) # x = Dense(neurons, activation="relu")(x) # x = Dropout(dropout)(x) # x = Dense(neurons, activation="relu")(x) # x = Dropout(dropout)(x) # x = Dense(512, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(256, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(128, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(64, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(32, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(16, activation="linear")(x) # x = Dropout(dropout)(x) # x = Dense(output_size, activation="linear")(x) # # short path # gru = GRU(int(512)) # x2 = gru(inputs) # x2 = Dense(output_size, activation="linear")(x2) # x = Add()([x,x2]) # x = Activation('linear')(x) outputs = Dense(output_size, activation=activ_func)(x) model = keras.Model(inputs=inputs, outputs=outputs, name="Price_Prediction_Model") model.compile(loss=loss, optimizer=optimizer) return model def save(model, history, config): model_json = model.to_json() with open(f"{config.SYMBOL}_pred_model.json", "w") as json_file: json_file.write(model_json) if history is not None: np.save('history.npy', history.history) path = os.path.join('trained_models', config.MODEL_FOLDER) os.mkdir(path) if not os.path.isdir(path) else [logging.info( f'This model version already exists! OVERWRITING!'), shutil.rmtree(path), os.mkdir(path)] params = {'optimizer': config.OPTIMIZER, 'loss': config.LOSS} with open(os.path.join(path, 'params.json'), 'w') as json_file: json.dump(params, json_file) os.rename("history.npy", os.path.join(path, "history.npy")) os.rename(f"{config.SYMBOL}_pred_model.json", os.path.join( path, f"{config.SYMBOL}_pred_model.json")) os.rename("pred_model_weights.hdf5", os.path.join( path, f"{config.SYMBOL}_pred_model_weights.hdf5")) logging.info(f'Trained model successfully saved to {path}') def load(model_folder, optimizer=None, loss=None, metrics=None): logging.info(f'Loading model <{model_folder}>...') try: with open(os.path.join('trained_models', model_folder, 'eth_pred_model.json'), 'r') as json_file: json_saved_model = json_file.read() except Exception as e: logging.info(f'Failed to load the model - model not found!\n{e}') return model = tf.keras.models.model_from_json(json_saved_model) # history=np.load(os.path.join('trained_models', model_folder, 'history.npy'),allow_pickle='TRUE').item() if optimizer is None or loss is None: logging.info( f'No model parameters given - reading from file') try: with open(os.path.join('trained_models', model_folder, 'params.json')) as json_file: params = json.load(json_file) except Exception as e: logging.info(f'Failed to load the params - file not found! Please provide the optimizer and loss ' f'in the function call or in a params.json file\n{e}') return optimizer = params['optimizer'] loss = params['loss'] try: logging.info('Loading model weights...') model.load_weights(os.path.join( 'trained_models', model_folder, 'eth_pred_model_weights.hdf5')) except Exception as e: logging.info(f'Failed to load the model - weight file not found!\n{e}') model.compile(optimizer=optimizer, loss=loss, metrics=metrics) logging.info(f'Successfully loaded the model') return model
nilq/baby-python
python
import os import getpass import hashlib os.system('cls') print("Done...") if 'MainDrive' in os.listdir('.'): os.rmdir("MainDrive") os.mkdir('MainDrive/') os.mkdir('MainDrive/Users/') username = input("Username: ") password = getpass.getpass("Password (No echo): ") encp = password.encode() d = hashlib.sha256(encp) hash = d.hexdigest() os.mkdir(f'MainDrive/Users/{username}') if 'ubin' not in os.listdir('.'): os.mkdir('ubin') usrdir = open(f"MainDrive/Users/{username}/usrdir", "w").write(username) pswdir = open(f"MainDrive/Users/{username}/pswdir", "w").write(hash) os.mkdir(f'MainDrive/Users/{username}/Desktop') os.system(f'python3 bootscreen.py --noboot --nologin --username {username} --password {password}')
nilq/baby-python
python
# -*- encoding: utf-8 -*- """ @File : Surprise_SGD.py @Time : 2020/11/21 14:41 @Author : biao chen @Email : 1259319710@qq.com @Software: PyCharm """ from surprise import Dataset from surprise import Reader from surprise import BaselineOnly, KNNBasic from surprise import accuracy from surprise.model_selection import KFold # 数据读取 file_path = 'E:/python/machina/kaggle_practice/week4/data/ratings.csv' reader = Reader(line_format='user item rating timestamp', sep=',', skip_lines=1) data = Dataset.load_from_file(file_path, reader=reader) train_set = data.build_full_trainset() ''' SGD参数: reg:代价函数的正则化项,默认为0.02。 learning_rate:学习率,默认为0.005。 n_epochs:迭代次数,默认为20。 ''' # Baseline算法,使用SGD进行优化 bsl_options = {'method': 'sgd','n_epochs': 5} algo = BaselineOnly(bsl_options=bsl_options) # 定义K折交叉验证迭代器,K=3 kf = KFold(n_splits=3) for trainset, testset in kf.split(data): algo.fit(trainset) predictions = algo.test(testset) accuracy.rmse(predictions, verbose=True) uid = str(196) iid = str(302) pred = algo.predict(uid, iid, r_ui=4, verbose=True) print(pred) # 迭代速度比ALS快
nilq/baby-python
python
######## # Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. import uuid from datetime import datetime from flask import current_app from manager_rest import models, storage_manager from manager_rest.blueprints_manager import tasks, BlueprintsManager import manager_rest.manager_exceptions import manager_rest.workflow_client as wf_client from dsl_parser import constants from handlers import (DeploymentUpdateNodeHandler, DeploymentUpdateNodeInstanceHandler) from validator import StepValidator from utils import extract_ids from constants import STATE, CHANGE_TYPE class DeploymentUpdateManager(object): def __init__(self): self.sm = storage_manager.get_storage_manager() self.workflow_client = wf_client.get_workflow_client() self._node_handler = DeploymentUpdateNodeHandler() self._node_instance_handler = DeploymentUpdateNodeInstanceHandler() self._step_validator = StepValidator() def get_deployment_update(self, deployment_update_id): """Return the deployment update object :param deployment_update_id: :return: """ return self.sm.get_deployment_update(deployment_update_id) def deployment_updates_list(self, include=None, filters=None, pagination=None, sort=None): """Return a list of deployment updates. :param include: :param filters: :param pagination: :param sort: :return: """ return self.sm.deployment_updates_list(include=include, filters=filters, pagination=pagination, sort=sort) def stage_deployment_update(self, deployment_id, staged_blueprint): """Stage a deployment update :param deployment_id: the deployment id for the update :param staged_blueprint: the modified blueprint :return: """ self._validate_no_active_updates_per_deployment(deployment_id) deployment_update = models.DeploymentUpdate(deployment_id, staged_blueprint) self.sm.put_deployment_update(deployment_update) return deployment_update def create_deployment_update_step(self, deployment_update_id, operation, entity_type, entity_id): """Create deployment update step :param deployment_update_id: :param operation: add/remove/modify :param entity_type: add/relationship :param entity_id: :return: """ step = models.DeploymentUpdateStep(operation, entity_type, entity_id) dep_update = self.get_deployment_update(deployment_update_id) self._step_validator.validate(dep_update, step) self.sm.put_deployment_update_step(deployment_update_id, step) return step def commit_deployment_update(self, deployment_update_id): """commit the deployment update steps :param deployment_update_id: :return: """ dep_update = self.get_deployment_update(deployment_update_id) # mark deployment update as committing dep_update.state = STATE.COMMITTING self.sm.update_deployment_update(dep_update) # Update the nodes on the storage modified_entity_ids, depup_nodes = \ self._node_handler.handle(dep_update) # Extract changes from raw nodes node_instance_changes = self._extract_changes(dep_update, depup_nodes) # Create (and update for adding step type) node instances # according to the changes in raw_nodes depup_node_instances = \ self._node_instance_handler.handle(dep_update, node_instance_changes) # Saving the needed changes back to sm for future use # (removing entities). dep_update.deployment_update_nodes = depup_nodes dep_update.deployment_update_node_instances = depup_node_instances dep_update.modified_entity_ids = modified_entity_ids.to_dict() self.sm.update_deployment_update(dep_update) # Execute update workflow using added and related instances # This workflow will call a finalize_update, since removing entities # should be done after the executions. # The raw_node_instances are being used only for their ids, Thus # They should really hold the finished version for the node instance. self._execute_update_workflow(dep_update, depup_node_instances, modified_entity_ids.to_dict()) return models.DeploymentUpdate(deployment_update_id, dep_update.blueprint) def _validate_no_active_updates_per_deployment(self, deployment_id): """ Validate there are no uncommitted updates for provided deployment. raises conflict error if there are. :param deployment_id: deployment id """ existing_updates = \ self.deployment_updates_list(filters={ 'deployment_id': deployment_id }).items active_update = \ next(iter( [u for u in existing_updates if u.state != STATE.COMMITTED]), None) if active_update: raise manager_rest.manager_exceptions.ConflictError( 'deployment update {0} is not committed yet' .format(active_update.id) ) def _extract_changes(self, dep_update, raw_nodes): """Extracts the changes between the current node_instances and the raw_nodes specified :param dep_update: :param raw_nodes: :return: a dictionary of modification type and node instanced modifed """ deployment_id_filter = \ {'deployment_id': dep_update.deployment_id} # By this point the node_instances aren't updated yet raw_node_instances = \ [instance.to_dict() for instance in self.sm.get_node_instances(filters=deployment_id_filter).items] # project changes in deployment return tasks.modify_deployment( nodes=raw_nodes, previous_node_instances=raw_node_instances, modified_nodes=() ) def _execute_update_workflow(self, dep_update, node_instances, modified_entity_ids): """Executed the update workflow :param dep_update: :param node_instances: a dictionary of modification type and modified instances :param modified_entity_ids: the entire modified entities list (by id) :return: """ added_instances = node_instances[CHANGE_TYPE.ADDED_AND_RELATED] extended_instances = node_instances[CHANGE_TYPE.EXTENDED_AND_RELATED] reduced_instances = node_instances[CHANGE_TYPE.REDUCED_AND_RELATED] removed_instances = node_instances[CHANGE_TYPE.REMOVED_AND_RELATED] instance_ids = { # needed in order to finalize the commit 'update_id': dep_update.id, # For any added node instance 'added_instance_ids': extract_ids(added_instances.get(CHANGE_TYPE.AFFECTED)), 'added_target_instances_ids': extract_ids(added_instances.get(CHANGE_TYPE.RELATED)), # encapsulated all the change entity_ids (in a dictionary with # 'node' and 'relationship' keys. 'modified_entity_ids': modified_entity_ids, # Any nodes which were extended (positive modification) 'extended_instance_ids': extract_ids(extended_instances.get(CHANGE_TYPE.AFFECTED)), 'extend_target_instance_ids': extract_ids(extended_instances.get(CHANGE_TYPE.RELATED)), # Any nodes which were reduced (negative modification) 'reduced_instance_ids': extract_ids(reduced_instances.get(CHANGE_TYPE.AFFECTED)), 'reduce_target_instance_ids': extract_ids(reduced_instances.get(CHANGE_TYPE.RELATED)), # Any nodes which were removed as a whole 'removed_instance_ids': extract_ids(removed_instances.get(CHANGE_TYPE.AFFECTED)), 'remove_target_instance_ids': extract_ids(removed_instances.get(CHANGE_TYPE.RELATED)) } return self.execute_workflow(deployment_id=dep_update.deployment_id, workflow_id='update', parameters=instance_ids) def finalize_commit(self, deployment_update_id): """ finalizes the update process by removing any removed node/node instances and updating any reduced node :param deployment_update_id: :return: """ dep_update = self.get_deployment_update(deployment_update_id) self._node_instance_handler.finalize(dep_update) self._node_handler.finalize(dep_update) # mark deployment update as committed dep_update.state = STATE.COMMITTED self.sm.update_deployment_update(dep_update) return models.DeploymentUpdate(deployment_update_id, dep_update.blueprint) def execute_workflow(self, deployment_id, workflow_id, parameters=None, allow_custom_parameters=False, force=False): """Executes the specified workflow :param deployment_id: :param workflow_id: :param parameters: :param allow_custom_parameters: :param force: :return: """ deployment = self.sm.get_deployment(deployment_id) blueprint = self.sm.get_blueprint(deployment.blueprint_id) if workflow_id not in deployment.workflows: raise manager_rest.manager_exceptions.NonexistentWorkflowError( 'Workflow {0} does not exist in deployment {1}'.format( workflow_id, deployment_id)) workflow = deployment.workflows[workflow_id] execution_parameters = \ BlueprintsManager._merge_and_validate_execution_parameters( workflow, workflow_id, parameters, allow_custom_parameters) execution_id = str(uuid.uuid4()) new_execution = models.Execution( id=execution_id, status=models.Execution.PENDING, created_at=str(datetime.now()), blueprint_id=deployment.blueprint_id, workflow_id=workflow_id, deployment_id=deployment_id, error='', parameters=BlueprintsManager._get_only_user_execution_parameters( execution_parameters), is_system_workflow=False) self.sm.put_execution(new_execution.id, new_execution) # executing the user workflow workflow_plugins = blueprint.plan[ constants.WORKFLOW_PLUGINS_TO_INSTALL] self.workflow_client.execute_workflow( workflow_id, workflow, workflow_plugins=workflow_plugins, blueprint_id=deployment.blueprint_id, deployment_id=deployment_id, execution_id=execution_id, execution_parameters=execution_parameters) return new_execution # What we need to access this manager in Flask def get_deployment_updates_manager(): """ Get the current app's deployment updates manager, create if necessary """ manager = current_app.config.get('deployment_updates_manager') if not manager: current_app.config['deployment_updates_manager'] = \ DeploymentUpdateManager() manager = current_app.config.get('deployment_updates_manager') return manager
nilq/baby-python
python
######################################################################### # Copyright/License Notice (Modified BSD License) # ######################################################################### ######################################################################### # Copyright (c) 2008, Daniel Knaggs # # All rights reserved. # # # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: - # # # # * Redistributions of source code must retain the above copyright # # notice, this list of conditions and the following disclaimer. # # # # * Redistributions in binary form must reproduce the above copyright # # notice, this list of conditions and the following disclaimer in # # the documentation and/or other materials provided with the # # distribution. # # # # * Neither the name of the author nor the names of its contributors # # may be used to endorse or promote products derived from this # # software without specific prior written permission. # # # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ######################################################################### import serial class GSMDevice(object): def __init__(self, port, speed, bits, parity, stop, timeout): self.ser = serial.Serial() self.ser.baudrate = speed self.ser.bytesize = bits self.ser.parity = parity self.ser.port = port self.ser.stopbits = stop self.ser.timeout = timeout self.ser.open() def changeTimeout(self, newtimeout): self.ser.timeout = newtimeout def dispose(self): self.ser.close() self.ser = None def getBERPercentage(self, index): if index == 0: return "< 0.2%" elif index == 1: return "0.2-0.4%" elif index == 2: return "0.4-0.8%" elif index == 3: return "0.8-1.6%" elif index == 4: return "1.6-3.2%" elif index == 5: return "3.2-6.4%" elif index == 6: return "6.4-12.8%" elif index == 7: return "> 12.8%" elif index == 99: return "Not Known" def receiveChar(self, chars = 1): return self.ser.read(chars) def receiveDualResult(self): blank = self.receiveLine() ir = self.receiveLine() blank = self.receiveLine() frc = self.receiveLine() return ir, frc def receiveLine(self): return self.ser.readline().replace("\r", "").replace("\n", "") def receiveSingleResult(self): blank = self.receiveLine() frc = self.receiveLine() return frc def sendATCommand(self, command = "", skipcheck = False, dualcheck = False): if skipcheck: self.ser.write("AT%s\r" % command) return True else: self.ser.write("AT%s=?\r" % command.split("=")[0].replace("?", "")) if not dualcheck: if self.receiveSingleResult() == "OK": self.ser.write("AT%s\r" % command) return True else: return False else: ir, frc = self.receiveDualResult() if frc == "OK": self.ser.write("AT%s\r" % command) return True else: return False def sendRawCommand(self, command, newline = True): self.ser.write(command) if newline: self.ser.write("\r")
nilq/baby-python
python
from .test_helper import argv_kiwi_tests import sys import mock from mock import patch import azurectl from pytest import raises from azurectl.commands.storage_account import StorageAccountTask from azurectl.azurectl_exceptions import AzureInvalidCommand class TestStorageAccountTask: def setup(self): sys.argv = [ sys.argv[0], '--config', '../data/config', 'storage', 'account', 'list' ] self.task = StorageAccountTask() self.task.request_wait = mock.Mock() azurectl.commands.storage_account.StorageAccount = mock.Mock( return_value=mock.Mock() ) azurectl.commands.storage_account.AzureAccount = mock.Mock( return_value=mock.Mock() ) azurectl.commands.storage_account.Help = mock.Mock( return_value=mock.Mock() ) def teardown(self): sys.argv = argv_kiwi_tests def __init_command_args(self): self.task.command_args = { 'create': False, 'delete': False, 'help': False, 'list': False, 'show': False, 'update': False, 'regions': False, '--name': None, '--description': None, '--label': None, '--locally-redundant': None, '--zone-redundant': None, '--geo-redundant': None, '--read-access-geo-redundant': None, '--new-primary-key': None, '--new-secondary-key': None, '--wait': True } def test_process_storage_account_help(self): self.__init_command_args() self.task.command_args['help'] = True self.task.process() self.task.manual.show.assert_called_once_with( 'azurectl::storage::account' ) @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_list(self, mock_out): self.__init_command_args() self.task.command_args['list'] = True self.task.process() self.task.storage_account.list.assert_called_once_with() @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_show(self, mock_out): self.__init_command_args() self.task.command_args['show'] = True self.task.command_args['--name'] = 'test' self.task.process() self.task.storage_account.show.assert_called_once_with( self.task.command_args['--name'] ) @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_create(self, mock_out): self.__init_command_args() self.task.command_args['create'] = True self.task.command_args['--name'] = 'testname' self.task.command_args['--label'] = 'test-label' self.task.command_args['--description'] = 'test-description' self.task.command_args['--locally-redundant'] = True self.task.process() self.task.storage_account.create.assert_called_once_with( 'testname', 'test-description', 'test-label', 'Standard_LRS' ) @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_update(self, mock_out): self.__init_command_args() self.task.command_args['update'] = True self.task.command_args['--name'] = 'testname' self.task.command_args['--label'] = 'test-label' self.task.command_args['--description'] = 'test-description' self.task.command_args['--locally-redundant'] = True self.task.process() self.task.storage_account.update.assert_called_once_with( 'testname', 'test-description', 'test-label', 'Standard_LRS', None, None ) @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_delete(self, mock_out): self.__init_command_args() self.task.command_args['delete'] = True self.task.command_args['--name'] = 'test' self.task.process() self.task.storage_account.delete.assert_called_once_with( self.task.command_args['--name'] ) def test_storage_account_command_invalid_caps(self): self.__init_command_args() self.task.command_args['--name'] = 'CAPSAREINVALID' with raises(AzureInvalidCommand): self.task.validate_account_name() def test_storage_account_command_invalid_punctuation(self): self.__init_command_args() self.task.command_args['--name'] = 'punctuation-is.bad' with raises(AzureInvalidCommand): self.task.validate_account_name() @patch('azurectl.commands.storage_account.DataOutput') def test_process_storage_account_regions(self, mock_out): self.__init_command_args() self.task.command_args['regions'] = True self.task.process() self.task.account.locations.assert_called_once_with('Storage')
nilq/baby-python
python
def mySqrt(x): r = x precision = 10 ** (-10) print(precision) while abs(x - r * r) > precision: r = (r + x / r) / 2 return r print(mySqrt(25)) print(mySqrt(36))
nilq/baby-python
python
from restkit.handlers.http_mrg_handlers import query_handler as chandler_0 # noqa from restkit.handlers.http_mrg_handlers.http_report_handlers import report_csv_handler as chandler_1 # noqa __all__ = [ 'chandler_0', 'chandler_1', ]
nilq/baby-python
python
from dart_fss.api import filings def test_get_corp_code(): res = filings.get_corp_code() actual = res[0].keys() expected = ['corp_code', 'corp_name', 'stock_code', 'modify_date'] for act in actual: assert act in expected def test_get_corp_info(): se = filings.get_corp_info('00126380') actual = se['est_dt'] expected = '19690113' assert actual == expected def test_download_document(): import tempfile with tempfile.TemporaryDirectory() as path: res = filings.download_document(path, '20190401004781') assert res is not None def test_search_filings(): f = filings.search_filings(corp_code='00126380', bgn_de='20190101', end_de='20190301', last_reprt_at='Y') actual = f['total_count'] expected = 29 assert actual == expected
nilq/baby-python
python
from urllib.parse import urlencode import requests from module_pipedrive.pipedrive import exceptions from module_pipedrive.pipedrive.activities import Activities from module_pipedrive.pipedrive.deals import Deals from module_pipedrive.pipedrive.filters import Filters from module_pipedrive.pipedrive.leads import Leads from module_pipedrive.pipedrive.notes import Notes from module_pipedrive.pipedrive.organizations import Organizations from module_pipedrive.pipedrive.persons import Persons from module_pipedrive.pipedrive.pipelines import Pipelines from module_pipedrive.pipedrive.products import Products from module_pipedrive.pipedrive.recents import Recents from module_pipedrive.pipedrive.stages import Stages from module_pipedrive.pipedrive.users import Users from module_pipedrive.pipedrive.webhooks import Webhooks class Client: BASE_URL = 'https://api-proxy.pipedrive.com/' OAUTH_BASE_URL = 'https://oauth.pipedrive.com/oauth/' def __init__(self, client_id=None, client_secret=None, domain=None): self.client_id = client_id self.client_secret = client_secret self.access_token = None self.api_token = None self.activities = Activities(self) self.deals = Deals(self) self.filters = Filters(self) self.leads = Leads(self) self.notes = Notes(self) self.organizations = Organizations(self) self.persons = Persons(self) self.pipelines = Pipelines(self) self.products = Products(self) self.recents = Recents(self) self.stages = Stages(self) self.users = Users(self) self.webhooks = Webhooks(self) if domain: if not domain.endswith('/'): domain += '/' self.BASE_URL = domain + 'v1/' def authorization_url(self, redirect_uri, state=None): params = { 'client_id': self.client_id, 'redirect_uri': redirect_uri, } if state is not None: params['state'] = state return self.OAUTH_BASE_URL + 'authorize?' + urlencode(params) def exchange_code(self, redirect_uri, code): data = { 'grant_type': 'authorization_code', 'code': code, 'redirect_uri': redirect_uri } return self._post(self.OAUTH_BASE_URL + 'token', data=data, auth=(self.client_id, self.client_secret)) def refresh_token(self, refresh_token): data = { 'grant_type': 'refresh_token', 'refresh_token': refresh_token, } return self._post(self.OAUTH_BASE_URL + 'token', data=data, auth=(self.client_id, self.client_secret)) def set_access_token(self, access_token): self.access_token = access_token def set_api_token(self, api_token): self.api_token = api_token def _get(self, url, params=None, **kwargs): return self._request('get', url, params=params, **kwargs) def _post(self, url, **kwargs): return self._request('post', url, **kwargs) def _put(self, url, **kwargs): return self._request('put', url, **kwargs) def _delete(self, url, **kwargs): return self._request('delete', url, **kwargs) def _request(self, method, url, headers=None, params=None, **kwargs): _headers = {} _params = {} if self.access_token: _headers['Authorization'] = 'Bearer {}'.format(self.access_token) if self.api_token: _params['api_token'] = self.api_token if headers: _headers.update(headers) if params: _params.update(params) return self._parse(requests.request(method, url, headers=_headers, params=_params, **kwargs)) def _parse(self, response): status_code = response.status_code if 'Content-Type' in response.headers and 'application/json' in response.headers['Content-Type']: r = response.json() else: return response.text if not response.ok: error = None if 'error' in r: error = r['error'] if status_code == 400: raise exceptions.BadRequestError(error, response) elif status_code == 401: raise exceptions.UnauthorizedError(error, response) elif status_code == 403: raise exceptions.ForbiddenError(error, response) elif status_code == 404: raise exceptions.NotFoundError(error, response) elif status_code == 410: raise exceptions.GoneError(error, response) elif status_code == 415: raise exceptions.UnsupportedMediaTypeError(error, response) elif status_code == 422: raise exceptions.UnprocessableEntityError(error, response) elif status_code == 429: raise exceptions.TooManyRequestsError(error, response) elif status_code == 500: raise exceptions.InternalServerError(error, response) elif status_code == 501: raise exceptions.NotImplementedError(error, response) elif status_code == 503: raise exceptions.ServiceUnavailableError(error, response) else: raise exceptions.UnknownError(error, response) return r
nilq/baby-python
python
#!/usr/bin/python """ Copyright (C) International Business Machines Corp., 2005 Author: Dan Smith <danms@us.ibm.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; under version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA """ ## ## These are utility functions for test cases ## import sys import commands import os import pwd import time import pty import select import signal import re import glob TEST_PASS = 0 TEST_FAIL = 255 TEST_SKIP = 77 # We currently advise waiting this many seconds for the ramdisk to # boot inside a domU TEST_DOMU_BOOT_DELAY = 20 if os.environ.get("TEST_VERBOSE"): verbose = True else: verbose = False class TimeoutError(Exception): def __init__(self, msg, outputSoFar): self.msg = msg self.output = outputSoFar def __str__(self): return str(self.msg) def runWithTimeout(cmd, timeout): args = cmd.split() pid, fd = pty.fork(); startTime = time.time() if pid == 0: os.execvp(args[0], args) output = "" while time.time() - startTime < timeout: i, o, e = select.select([fd], [], [], timeout) if fd in i: try: str = os.read(fd, 1) output += str except OSError, e: exitPid, status = os.waitpid(pid, os.WNOHANG) if exitPid == pid: if verbose: print "Child exited with %i" % status return status, output if verbose: print "Command timed out: killing pid %i" % pid os.kill(pid, signal.SIGINT) raise TimeoutError("Command execution time exceeded %i seconds" % timeout, outputSoFar=output) def traceCommand(command, timeout=None, logOutput=True): if verbose: print "[dom0] Running `%s'" % command if timeout: status, output = runWithTimeout(command, timeout) else: status, output = commands.getstatusoutput(command) if logOutput and verbose: print output return status, output def getTestName(): script = sys.argv[0] fname = os.path.basename(script) match = re.match("([^\.]+)\.[a-z]+", fname) if match: tname = match.group(1) else: tname = "UNKNOWN" return tname def becomeNonRoot(): """Become a non-root user, or FAIL if this is not possible. This call succeeds if we are already running as a non-root user. """ if os.geteuid() == 0: # Try and become "nobody". This user is commonly in place, but this # could be extended to consider any number of users to be acceptable, # if there are systems where "nobody" is not present. allusers = pwd.getpwall() for u in allusers: if u[0] == "nobody": os.setreuid(u[2], u[2]) break if os.geteuid() == 0: FAIL("Could not become a non-root user") def FAIL(format, *args): print "\nREASON:", (format % args) sys.exit(TEST_FAIL) def SKIP(format, *args): print "\nREASON:", (format % args) sys.exit(TEST_SKIP) def saveLog(logText, filename=None): if not filename: filename = "log"; logfile = open(filename, 'w'); date = commands.getoutput("date"); logfile.write("-- BEGIN XmTest Log @" + date + "\n"); logfile.write(logText); logfile.write("\n-- END XmTest Log\n"); logfile.close(); def waitForBoot(): if verbose: print "[dom0] Waiting %i seconds for domU boot..." % TEST_DOMU_BOOT_DELAY time.sleep(TEST_DOMU_BOOT_DELAY) def timeStamp(): name = getTestName() t = time.asctime(time.localtime()) print "*** Test %s started at %s %s" % (name, t, time.tzname[time.daylight]) # # Try to start a domain and attach a console to it to see if # the console system is working # def isConsoleDead(): from XmTestLib import XmTestDomain, DomainError, XmConsole, ConsoleError domain = XmTestDomain() try: console = domain.start() console.runCmd("ls") except DomainError, e: return True except ConsoleError, e: domain.destroy() return True domain.destroy() return False # # We currently can only load as many concurrent HVM domains as loop # devices, need to find how many devices the system has. def getMaxHVMDomains(): nodes = glob.glob("/dev/loop*") maxd = len(nodes) return maxd if __name__ == "__main__": timeStamp() FAIL("foo")
nilq/baby-python
python
r"""Analyze Traffic Images This executable is used to annotate traffic images to highlight vehicle types and to produce stats and graphs for the amount of time bicycle lanes and bus stops are blocked by vehicles: Example usage: ./analyzeimages \ -path_images ./data/rawimages/ -path_labels_map data/car_label_map.pbtxt -save_directory data/processedimages/ """ import sys from matplotlib.ticker import FormatStrFormatter, FuncFormatter sys.path.append('./models-master/research/') from object_detection.utils import label_map_util from object_detection.utils import visualization_utils as vis_util import argparse from argparse import RawTextHelpFormatter import time import numpy as np import os import tensorflow as tf import csv from datetime import datetime import matplotlib.pyplot as plt import numpy as np from collections import defaultdict from io import StringIO # from matplotlib import pyplot as plt import matplotlib.path as mpltPath from PIL import Image import scipy.misc def processimages(path_images_dir, path_labels_map,save_directory): pathcpkt = 'data/output_inference_graph.pb/frozen_inference_graph.pb' csv_file = 'data/csvfile.csv' num_classes = 6 detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(pathcpkt, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') label_map = label_map_util.load_labelmap(path_labels_map) categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True) category_index = label_map_util.create_category_index(categories) f = open(csv_file, 'w') #f.write( # 'timestamp,number cars in bike lane, number trucks in bike lane, ' # 'number cars in bus stop, number trucks in bus stop\n') def load_image_into_numpy_array(imageconvert): (im_width, im_height) = imageconvert.size try: return np.array(imageconvert.getdata()).reshape( (im_height, im_width, 3)).astype(np.uint8) except ValueError: return np.array([]) with detection_graph.as_default(): with tf.Session(graph=detection_graph) as sess: # Definite input and output Tensors for detection_graph image_tensor = detection_graph.get_tensor_by_name('image_tensor:0') # Each box represents a part of the image where a particular object was detected. detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0') # Each score represent how level of confidence for each of the objects. # Score is shown on the result image, together with the class label. detection_scores = detection_graph.get_tensor_by_name('detection_scores:0') detection_classes = detection_graph.get_tensor_by_name('detection_classes:0') num_detections = detection_graph.get_tensor_by_name('num_detections:0') polygon_right_lane = [(178, 122), (188, 240), (231, 240), (187, 125)] polygon_left_lane = [(108, 143), (0, 215), (0, 233), (123, 142), (108, 97)] polygon_bus_lane = [(200, 155), (230, 240), (292, 240), (225, 157)] pathrightlane = mpltPath.Path(polygon_right_lane) pathleftlane = mpltPath.Path(polygon_left_lane) pathbuslane = mpltPath.Path(polygon_bus_lane) for testpath in os.listdir(path_images_dir): start_time = time.time() timestamp = testpath.split(".jpg")[0] try: image = Image.open(path_images_dir + '/' + testpath) image_np = load_image_into_numpy_array(image) except IOError: print("Issue opening "+testpath) continue if image_np.size == 0: print("Skipping image "+testpath) continue # Expand dimensions since the model expects images to have shape: [1, None, None, 3] image_np_expanded = np.expand_dims(image_np, axis=0) # Actual detection. (boxes, scores, classes, num) = sess.run( [detection_boxes, detection_scores, detection_classes, num_detections], feed_dict={image_tensor: image_np_expanded}) # Visualization of the results of a detection. vis_util.visualize_boxes_and_labels_on_image_array( image_np, np.squeeze(boxes), np.squeeze(classes).astype(np.int32), np.squeeze(scores), category_index, min_score_thresh=0.4, use_normalized_coordinates=True, line_thickness=2) scores = np.squeeze(scores) boxes = np.squeeze(boxes) num_cars_in_bikelane, num_cars_in_bus_stop, num_trucks_in_bike_lane, num_trucks_in_bus_stop = 0, 0, 0, 0 for i in range(boxes.shape[0]): if scores[i] > .4: box = tuple(boxes[i].tolist()) ymin, xmin, ymax, xmax = box center_x = (((xmax * 352) - (xmin * 352)) / 2) + (xmin * 352) center_y = (((ymax * 240) - (ymin * 240)) / 2) + (ymin * 240) classes = np.squeeze(classes).astype(np.int32) if classes[i] in category_index.keys(): class_name = category_index[classes[i]]['name'] else: class_name = 'N/A' if class_name == 'car': points = [(center_x, center_y)] if pathrightlane.contains_points(points) or pathleftlane.contains_points(points): num_cars_in_bikelane += 1 elif pathbuslane.contains_points(points): num_cars_in_bus_stop += 1 elif class_name == 'truck' or class_name == 'police' or class_name == 'ups': points = [(center_x, center_y)] if pathrightlane.contains_points(points) or pathleftlane.contains_points(points): num_trucks_in_bike_lane += 1 elif pathbuslane.contains_points(points): num_trucks_in_bus_stop += 1 # write to a csv file whenever there is a vehicle, how many and of what type with timestamp f.write(timestamp + ',' + str(num_cars_in_bikelane) + ',' + str(num_trucks_in_bike_lane) + ',' + str( num_cars_in_bus_stop) + ',' + str(num_trucks_in_bus_stop) + '\n') print("Process Time " + str(time.time() - start_time)) scipy.misc.imsave(save_directory + testpath, image_np) f.close() return csv_file def initialize_datastore(): blankarray = [0] * 24 alldata = [[list(blankarray), list(blankarray), list(blankarray)], [list(blankarray), list(blankarray), list(blankarray)]] # alldata [ [cars_blocking_bikelane[24],trucks_blocking_bikelane[24],eitherblockingbikelane[24] # [cars_blocking_buslane[24],trucks_blocking_buslane[24],eitherblockingbuslane[24]] weekdaydata = [[list(blankarray), list(blankarray), list(blankarray)], [list(blankarray), list(blankarray), list(blankarray)]] # same as alldata above but for weekdays, weekenddata same but for weekends weekenddata = [[list(blankarray), list(blankarray), list(blankarray)], [list(blankarray), list(blankarray), list(blankarray)]] return [alldata, weekdaydata, weekenddata] def weekday(datevalue): if datevalue.weekday() < 5: return True else: return False def incrementarray(array, blockagearray, delta_time): timestamp_string = (blockagearray[0].split(".jpg"))[0] datetime_object = datetime.strptime(timestamp_string, '%Y-%m-%d %H:%M:%S.%f') hour = datetime_object.hour num_cars_in_bike_lane = int(blockagearray[1]) num_trucks_in_bike_lane = int(blockagearray[2]) num_cars_in_bus_stop = int(blockagearray[3]) num_truck_in_bus_stop = int(blockagearray[4]) if num_cars_in_bike_lane > 0: array[0][0][hour] += delta_time if num_trucks_in_bike_lane > 0: array[0][1][hour] += delta_time if num_cars_in_bike_lane > 0 or num_trucks_in_bike_lane > 0: array[0][2][hour] += delta_time if num_cars_in_bus_stop > 0: array[1][0][hour] += delta_time if num_truck_in_bus_stop > 0: array[1][1][hour] += delta_time if num_cars_in_bus_stop > 0 or num_truck_in_bus_stop > 0: array[1][2][hour] += delta_time def incrementarrays(dataarrays, blockagearray, delta_time): alldata = dataarrays[0] weekdaydata = dataarrays[1] weekenddata = dataarrays[2] datetime_object = datetime.strptime((blockagearray[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f') incrementarray(alldata, blockagearray, delta_time) if weekday(datetime_object): incrementarray(weekdaydata, blockagearray, delta_time) else: incrementarray(weekenddata, blockagearray, delta_time) return [alldata, weekdaydata, weekenddata] def buildsaveplot(list_to_graph, title): label = ['', '', '', '', '', '6 am', '', '', '', '', '', '12 noon', '', '', '', '', '', '6 Pm', '', '', '', '', '', 'Midnight'] index = np.arange(len(label)) plt.bar(index, list_to_graph) plt.xticks(index, label, fontsize=10, rotation=30) plt.title(title) plt.plot() plt.ylim([0, 100.0]) ax = plt.gca() ax.yaxis.set_major_formatter(FormatStrFormatter('%.0f%%')) plt.savefig("output/"+title.replace(" ", "") + ".png", bbox_inches='tight') plt.close() def analyzeresults(csv_file): total_time_secs, total_time_bike_lane_blocked_secs, total_time_bus_stop_blocked_secs = 0, 0, 0 weekdaytotalseconds = [1] * 24 # where we are going to store how many seconds worth of images there are weekendtotalseconds = [1] * 24 # for each hour this is necessary beecause we may be missing images previous_timestamp = 0 dataarrays = initialize_datastore() data = csv.reader(open(csv_file, 'r')) data = sorted(data, key=lambda rowparse: datetime.strptime((rowparse[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f')) for row in data: datetime_object = datetime.strptime((row[0].split(".jpg"))[0], '%Y-%m-%d %H:%M:%S.%f') timestamp = float(datetime_object.strftime('%s')) hour = datetime_object.hour if previous_timestamp != 0: delta_time = timestamp - previous_timestamp if delta_time > 30: print("DELTA TIME LARGE") delta_time = 30 total_time_secs += delta_time if weekday(datetime_object): weekdaytotalseconds[hour] += delta_time # necessary because there may be time stamps missing in images else: weekendtotalseconds[hour] += delta_time dataarrays = incrementarrays(dataarrays, row, delta_time) previous_timestamp = timestamp weekendpercentageblocked = [[0] * 24, [0] * 24] # bike lane first array and bus lane second weekdaypercentageblocked = [[0] * 24, [0] * 24] for hour in range(0, 24): total_time_bike_lane_blocked_secs += dataarrays[0][0][2][hour] total_time_bus_stop_blocked_secs += dataarrays[0][1][2][hour] weekdaypercentageblocked[0][hour] = 100 * (dataarrays[1][0][2][hour] / weekdaytotalseconds[hour]) weekendpercentageblocked[0][hour] = 100 * (dataarrays[2][0][2][hour] / weekendtotalseconds[hour]) weekdaypercentageblocked[1][hour] = 100 * (dataarrays[1][1][2][hour] / weekdaytotalseconds[hour]) weekendpercentageblocked[1][hour] = 100 * (dataarrays[2][1][2][hour] / weekendtotalseconds[hour]) total_time_seven2seven, blockedbikelaneseven2seven, blockedbuslaneseven2seven = 0, 0, 0 for x in range(7, 19): total_time_seven2seven += weekdaytotalseconds[x] blockedbikelaneseven2seven += dataarrays[1][0][2][x] blockedbuslaneseven2seven += dataarrays[1][1][2][x] print("RESULTS \n Total Time " + str(total_time_secs) + " blocked bike lane time " + str( total_time_bike_lane_blocked_secs) + "blocked truck lane time" + str(total_time_bus_stop_blocked_secs)) print("Bike lane blocked " + str(100 * (total_time_bike_lane_blocked_secs / total_time_secs)) + "% of the time") print("Bus lane blocked " + str(100 * (total_time_bus_stop_blocked_secs / total_time_secs)) + "% of the time") print("Bike lane blocked " + str( 100 * (blockedbikelaneseven2seven / total_time_seven2seven)) + "% of the time durring weekday from 7 am to 7pm") print("Bus lane blocked " + str( 100 * (blockedbuslaneseven2seven / total_time_seven2seven)) + "% of the time durring weekday from 7 am to 7pm") buildsaveplot(weekdaypercentageblocked[0], 'Weekday Bike Lane Percentage Blocked by Hour') buildsaveplot(weekdaypercentageblocked[1], 'Weekday Bus Stop Percentage Blocked by Hour') buildsaveplot(weekendpercentageblocked[0], 'Weekend Bike Lane Percentage Blocked by Hour') buildsaveplot(weekendpercentageblocked[1], 'Weekend Bus Stop Percentage Blocked by Hour') if __name__ == '__main__': parser = argparse.ArgumentParser( description='Analyze traffic images to determine rate of blocking bike' 'and bus lanes', formatter_class=RawTextHelpFormatter) parser.add_argument('-path_images', help='the folder with all the downloaded images in it') parser.add_argument('-path_labels_map', help='the file with the integer to label map') parser.add_argument('-save_directory', help='the directory you want to save the annotated images to') args = parser.parse_args() #csv_file = processimages(args.path_images,args.path_labels_map,args.save_directory) analyzeresults('data/analysis10days.csv') analyzeresults(csv_file)
nilq/baby-python
python
import json import random from locoloco.models.db_orm import db from locoloco.models.db_models import User from locoloco.models.db_models import Country from locoloco.models.db_models import DistributionCenter from locoloco.models.db_models import StoreStatus from locoloco.models.db_models import Store from locoloco.models.db_models import StoreComponent # For demo purposes we are only generating about 10 stores: # - distribution center is randomly assigned to the existing 4 dcs # (range 1 to 3), 4 is for country LU. # - status will be randomly assigned (range 1 to 3) # - store numbers are simply increased and assigned. # - store components are randomly generated. # randomly to a distribution center, status will also be def load_users_from_json(): """ Importing JSON data to table users """ json_filename = 'db/json/users.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) users = [] for user in json_object['users']: # Each user is a dict users.append(User( provider='locoloco', social_id=User.generate_social_id(), email_address=user.get('email_address'), password=user.get('password')) ) # Add data to users db.session.add_all(users) # Flush the remaining changes and commit the transaction db.session.commit() # Close the Session db.session.close() def load_countries_from_json(): """ Importing JSON data to table stores """ json_filename = 'db/json/countries.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) countries = [] for country in json_object['countries']: countries.append(Country( country_code=country.get('country_code'), country_name=country.get('country_name')) ) db.session.add_all(countries) db.session.commit() db.session.close() def load_distribution_centers_from_json(): """ Importing JSON data to table distribution_centers """ json_filename = 'db/json/distribution_centers.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) dcs = [] for dc in json_object['distribution_centers']: dcs.append(DistributionCenter( country_code=dc.get('country_code'), number=dc.get('number'), name=dc.get('name'), tag=dc.get('tag')) ) db.session.add_all(dcs) db.session.commit() db.session.close() def load_stores_status_from_json(): """ Importing JSON data to table store_status """ json_filename = 'db/json/store_status.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) statuses = [] for status in json_object['store_status']: statuses.append(StoreStatus( sequence=status.get('sequence'), name=status.get('name'), description=status.get('description')) ) db.session.add_all(statuses) db.session.commit() db.session.close() def load_stores_from_json(): """ Importing JSON data to table stores. Table dependencies: countries, distribution_centers """ json_filename = 'db/json/stores.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) stores = [] number = 0 # Use default iterators/operators, no need for .keys() for key in json_object.get('stores'): # Default value for user_id will be 1, as there should always # be a default user. user_id = 1 # Retrieve country_code country_code = json_object.get( 'stores').get(key).get('store').get('country_code') # Retrieve dc_id using country_code and the exported dc number # dc_number = json_object.get( # 'stores').get(key).get('store').get('dc_number') if country_code == 'LU': dc_number = 4 else: dc_number = random.randint(1, 3) # Some countries can have no DC, so we change it to the # relevant parent country. # if country_code == 'LU': # dc_id = DistributionCenter.get_id('BE', dc_number) # else: # dc_id = DistributionCenter.get_id(country_code, dc_number) # Retrieve store number # number = json_object.get( # 'stores').get(key).get('store').get('number') number += 1 # Add the fields to the store stores.append(Store( user_id=user_id, country_code=country_code, dc_id=dc_number, number=int(key), name=json_object.get( 'stores').get(key).get('store').get('name'), status_id=random.randint(1, 3), street_name=json_object.get( 'stores').get(key).get('store').get('street_name'), street_number=json_object.get( 'stores').get(key).get('store').get('street_number'), postal_code=json_object.get( 'stores').get(key).get('store').get('postal_code'), city=json_object.get( 'stores').get(key).get('store').get('city'), )) db.session.add_all(stores) db.session.commit() db.session.close() def load_store_components_from_json(): """ Importing JSON data to table store_components. We only load components for stores with status Open (sequence 2). """ json_filename = 'db/json/stores.json' with open(json_filename, 'r', encoding='utf-8') as f: json_object = json.load(f) backoffices = [] network_routers = [] network_switches = [] access_points = [] # Use default iterators/operators, no need for .keys() for key in json_object.get('stores'): # backoffice i = 1 while i <= random.randint(1, 2): country_code = json_object.get( 'stores').get(key).get('store').get('country_code') number = int(key) bo_hostname = 'Backoffice {}'.format(i) backoffices.append(StoreComponent( store_id=Store.get_id(country_code, number), component_type='backoffice', hostname=bo_hostname, ip_address='127.0.0.1') ) i += 1 # network_routers i = 1 while i <= random.randint(1, 3): country_code = json_object.get( 'stores').get(key).get('store').get('country_code') number = int(key) nr_hostname = 'Network Router {}'.format(i) network_routers.append(StoreComponent( store_id=Store.get_id(country_code, number), component_type='network_routers', hostname=nr_hostname, ip_address='127.0.0.1') ) i += 1 # network_switches i = 1 while i <= random.randint(1, 2): country_code = json_object.get( 'stores').get(key).get('store').get('country_code') number = int(key) ns_hostname = 'Network Switch {}'.format(i) network_switches.append(StoreComponent( store_id=Store.get_id(country_code, number), component_type='network_switches', hostname=ns_hostname, ip_address='127.0.0.1') ) i += 1 # network_access_points i = 1 while i <= random.randint(1, 5): country_code = json_object.get( 'stores').get(key).get('store').get('country_code') number = int(key) ap_hostname = 'Network Access Point {}'.format(i) access_points.append(StoreComponent( store_id=Store.get_id(country_code, number), component_type='network_access_points', hostname=ap_hostname, ip_address='127.0.0.1') ) i += 1 db.session.add_all(backoffices) db.session.add_all(network_routers) db.session.add_all(network_switches) db.session.add_all(access_points) db.session.commit() db.session.close()
nilq/baby-python
python
from pdfrw import PdfObject, PdfReader, PdfWriter import os defaultlang = 'en-US' #read all files in the folder called 'files' files = os.listdir('files') for file in files: print(file) fixlist = [] trailer = PdfReader('files\\'+file) print("Lang: ",trailer.Root.Lang) if trailer.Root.Lang == None: fixlist.append('Lang') print("Title: ",trailer.Info.Title) if trailer.Info.Title == None: fixlist.append('Title') print(trailer.Root.MarkInfo) if trailer.Root.MarkInfo == None: fixlist.append('MarkInfo') print('') print('Found issues with these:') print(fixlist) tofix = input('Do you want to fix all of these issues? y or n') if tofix == 'y' or tofix == 'Y': print('Fixing this:') for fix in fixlist: print(fix) if fix == 'Lang': trailer.Root.Lang = defaultlang if fix == 'Title': totitle = input('Do you want the title to be: '+file.split(".")[0]) if totitle == 'y' or totitle == 'Y': title = file.split(".")[0] else: title = input('What does the title need to be?') trailer.Info.Title = title if fix == 'MarkInfo': trailer.Root.MarkInfo = PdfObject('<</Marked true>>') #commit the changes PdfWriter('out\\'+file, trailer=trailer).write() tofix = input('Do you want to fix anything else in this file? y or n')
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8; -*- # Copyright (c) 2021, 2022 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ import oci import os from ads.common import utils def api_keys( oci_config: str = os.path.join(os.path.expanduser("~"), ".oci", "config"), profile: str = "DEFAULT", client_kwargs: dict = None, ) -> dict: r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services using API Keys. Parameters ---------- oci_config : str OCI authentication config file location. Default is $HOME/.oci/config. profile : str Profile name to select from the config file. The defautl is DEFAULT client_kwargs : dict kwargs that are required to instantiate the Client if we need to override the defaults. Returns ------- dict Contains keys - config, signer and client_kwargs. - The config contains the config loaded from the configuration loaded from `oci_config`. - The signer contains the signer object created from the api keys. - client_kwargs contains the `client_kwargs` that was passed in as input parameter. Examples -------- >>> from ads.common import auth as authutil >>> from ads.common import oci_client as oc >>> auth = authutil.api_keys(oci_config="/home/datascience/.oci/config", profile="TEST", client_kwargs={"timeout": 6000}) >>> oc.OCIClientFactory(**auth).object_storage # Creates Object storage client with timeout set to 6000 using API Key authentication """ configuration = oci.config.from_file(oci_config, profile) return { "config": configuration, "signer": oci.signer.Signer( configuration["tenancy"], configuration["user"], configuration["fingerprint"], configuration["key_file"], configuration.get("pass_phrase"), ), "client_kwargs": client_kwargs, } def resource_principal(client_kwargs=None): r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services using Resource Principals. Parameters ---------- client_kwargs : dict kwargs that are required to instantiate the Client if we need to override the defaults. Returns ------- dict Contains keys - config, signer and client_kwargs. - The config contains and empty dictionary. - The signer contains the signer object created from the resource principal. - client_kwargs contains the `client_kwargs` that was passed in as input parameter. Examples -------- >>> from ads.common import auth as authutil >>> from ads.common import oci_client as oc >>> auth = authutil.resource_principal({"timeout": 6000}) >>> oc.OCIClientFactory(**auth).object_storage # Creates Object Storage client with timeout set to 6000 seconds using resource principal authentication """ return { "config": {}, "signer": oci.auth.signers.get_resource_principals_signer(), "client_kwargs": client_kwargs, } def default_signer(client_kwargs=None): r"""Prepares authentication and extra arguments necessary for creating clients for different OCI services based on the default authentication setting for the session. Refer ads.set_auth API for further reference. Parameters ---------- client_kwargs : dict kwargs that are required to instantiate the Client if we need to override the defaults. Returns ------- dict Contains keys - config, signer and client_kwargs. - The config contains the config loaded from the configuration loaded from the default location if the default auth mode is API keys, otherwise it is empty dictionary. - The signer contains the signer object created from default auth mode. - client_kwargs contains the `client_kwargs` that was passed in as input parameter. Examples -------- >>> from ads.common import auth as authutil >>> from ads.common import oci_client as oc >>> auth = authutil.default_signer() >>> oc.OCIClientFactory(**auth).object_storage # Creates Object storage client """ if utils.is_resource_principal_mode(): return resource_principal(client_kwargs) else: return api_keys(client_kwargs=client_kwargs, profile=utils.oci_key_profile()) def get_signer(oci_config=None, oci_profile=None, **client_kwargs): if oci_config and oci_profile: return api_keys(oci_config, oci_profile, client_kwargs) else: return resource_principal(client_kwargs)
nilq/baby-python
python
import sys def print_line(to_file=None): if to_file: print("--------------------------------------------------", file=to_file) else: print("--------------------------------------------------") def print_header(): print_line() print("NAS Parallel Benchmark v3.2") print(" MG") print("[main]: initializing...") print_line() print() def print_config(app_data): nx = ny = nz = app_data['prob_size'] print_line() print("# Problem Settings #") print("[main]: CLASS = \""+app_data['prob_class']+"\"") print("[main]: top level =", app_data['lt']) print("[main]: bottom level =", app_data['lb']) print("[main]: grid size =", nx, "x", ny, "x", nz) print("[main]: n-iterations =", app_data['nit']) print() print("# Stencil Co-efficients #") print("[main]: a =", app_data['a']) print("[main]: c =", app_data['c']) verify_data = app_data['verify_data'] print() print("# Verification Values #") print("[main]: threshold =", verify_data['epsilon']) print("[main]: Class \""+app_data['prob_class']+"\" " \ + "L2 norm =", verify_data['verify_value']) print_line() return def print_init_norm(app_data): print() print("# Initial Norms #") print("[main]: initial norm =", app_data['rnm2']) print("[main]: initial err =", app_data['rnmu']) print_line() return
nilq/baby-python
python
import asyncio import os from telethon import TelegramClient TELETHON_SESSION_FILE: os.path = input("Please insert path to telethon session file: ") API_ID: int = int(input("Please insert session api id: ")) API_HASH: str = input("Please insert session api hash: ") TG_USERNAME_RECIPIENT: str = input( "Please insert telegram username recipient fro test message: " ) async def check_telethon(): async with TelegramClient( TELETHON_SESSION_FILE.partition('.session')[0], api_id=API_ID, api_hash=API_HASH ) as client: await client.send_message(TG_USERNAME_RECIPIENT, "Client success work!") if __name__ == "__main__": asyncio.run(check_telethon())
nilq/baby-python
python
import logging from sklearn.dummy import DummyClassifier, DummyRegressor from amlb.benchmark import TaskConfig from amlb.data import Dataset from amlb.results import save_predictions from amlb.utils import Timer, unsparsify log = logging.getLogger(__name__) def run(dataset: Dataset, config: TaskConfig): log.info("\n**** Constant predictor (sklearn dummy) ****\n") is_classification = config.type == 'classification' predictor = DummyClassifier(strategy='prior') if is_classification else DummyRegressor(strategy='median') encode = config.framework_params.get('_encode', False) X_train = unsparsify(dataset.train.X_enc if encode else dataset.train.X, fmt='array') y_train = unsparsify(dataset.train.y_enc if encode else dataset.train.y, fmt='array') X_test = unsparsify(dataset.test.X_enc if encode else dataset.test.X, fmt='array') y_test = unsparsify(dataset.test.y_enc if encode else dataset.test.y, fmt='array') with Timer() as training: predictor.fit(X_train, y_train) with Timer() as predict: predictions = predictor.predict(X_test) probabilities = predictor.predict_proba(X_test) if is_classification else None save_predictions(dataset=dataset, output_file=config.output_predictions_file, probabilities=probabilities, predictions=predictions, truth=y_test, target_is_encoded=encode) return dict( models_count=1, training_duration=training.duration, predict_duration=predict.duration )
nilq/baby-python
python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial import argparse import os import random import time import distutils.util import numpy as np import paddle import paddle.nn.functional as F import paddlenlp as ppnlp from paddlenlp.datasets import load_dataset from paddlenlp.transformers import LinearDecayWithWarmup from paddlenlp.experimental import FasterErnieForSequenceClassification, to_tensor # yapf: disable parser = argparse.ArgumentParser() parser.add_argument("--save_dir", default='./checkpoint', type=str, help="The output directory where the model checkpoints will be written.") parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. " "Sequences longer than this will be truncated, sequences shorter will be padded.") parser.add_argument("--batch_size", default=32, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--epochs", default=3, type=int, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.0, type=float, help="Linear warmup proption over the training process.") parser.add_argument("--init_from_ckpt", type=str, default=None, help="The path of checkpoint to be loaded.") parser.add_argument("--seed", type=int, default=1000, help="random seed for initialization") parser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default="gpu", help="Select which device to train model, defaults to gpu.") parser.add_argument("--use_amp", type=distutils.util.strtobool, default=False, help="Enable mixed precision training.") parser.add_argument("--scale_loss", type=float, default=2**15, help="The value of scale_loss for fp16.") parser.add_argument("--save_steps", default=100, type=int, help="The interval steps to save checkppoints.") parser.add_argument("--logging_steps", default=10, type=int, help="The interval steps to logging.") args = parser.parse_args() # yapf: enable def set_seed(seed): """sets random seed""" random.seed(seed) np.random.seed(seed) paddle.seed(seed) @paddle.no_grad() def evaluate(model, criterion, metric, data_loader): model.eval() metric.reset() losses = [] for batch in data_loader: texts, labels = batch['text'], batch['label'] texts = to_tensor(texts, "texts") logits, predictions = model(texts) loss = criterion(logits, labels) losses.append(loss.numpy()) correct = metric.compute(logits, labels) metric.update(correct) accu = metric.accumulate() print("eval loss: %.5f, accuracy: %.5f" % (np.mean(losses), accu)) model.train() metric.reset() def create_dataloader(dataset, mode='train', batch_size=1): def trans_fn(example): return { "text": example["text"], "label": np.array( example["label"], dtype="int64") } dataset.map(trans_fn) shuffle = True if mode == 'train' else False if mode == 'train': batch_sampler = paddle.io.DistributedBatchSampler( dataset, batch_size=batch_size, shuffle=shuffle) else: batch_sampler = paddle.io.BatchSampler( dataset, batch_size=batch_size, shuffle=shuffle) return paddle.io.DataLoader(dataset=dataset, batch_sampler=batch_sampler) def do_train(): paddle.set_device(args.device) set_seed(args.seed) train_ds, dev_ds = load_dataset("chnsenticorp", splits=["train", "dev"]) model = FasterErnieForSequenceClassification.from_pretrained( 'ernie-1.0', num_classes=len(train_ds.label_list), max_seq_len=args.max_seq_length) train_data_loader = create_dataloader( train_ds, mode='train', batch_size=args.batch_size) dev_data_loader = create_dataloader( dev_ds, mode='dev', batch_size=args.batch_size) if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt): state_dict = paddle.load(args.init_from_ckpt) model.set_dict(state_dict) num_training_steps = len(train_data_loader) * args.epochs lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps, args.warmup_proportion) # Generate parameter names needed to perform weight decay. # All bias and LayerNorm parameters are excluded. decay_params = [ p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"]) ] optimizer = paddle.optimizer.AdamW( learning_rate=lr_scheduler, parameters=model.parameters(), weight_decay=args.weight_decay, apply_decay_param_fun=lambda x: x in decay_params) criterion = paddle.nn.loss.CrossEntropyLoss() metric = paddle.metric.Accuracy() if args.use_amp: scaler = paddle.amp.GradScaler(init_loss_scaling=args.scale_loss) global_step = 0 tic_train = time.time() total_train_time = 0 for epoch in range(1, args.epochs + 1): for step, batch in enumerate(train_data_loader, start=1): texts, labels = batch["text"], batch["label"] texts = to_tensor(texts) with paddle.amp.auto_cast( args.use_amp, custom_white_list=["fused_feedforward", "fused_attention"]): logits, predictions = model(texts) loss = criterion(logits, labels) probs = F.softmax(logits, axis=1) correct = metric.compute(logits, labels) metric.update(correct) acc = metric.accumulate() if args.use_amp: scaler.scale(loss).backward() scaler.minimize(optimizer, loss) else: loss.backward() optimizer.step() lr_scheduler.step() optimizer.clear_grad() global_step += 1 if global_step % args.logging_steps == 0: time_diff = time.time() - tic_train total_train_time += time_diff print( "global step %d, epoch: %d, batch: %d, loss: %.5f, accuracy: %.5f, speed: %.2f step/s" % (global_step, epoch, step, loss, acc, args.logging_steps / time_diff)) tic_train = time.time() if global_step % args.save_steps == 0: save_dir = os.path.join(args.save_dir, "model_%d" % global_step) if not os.path.exists(save_dir): os.makedirs(save_dir) evaluate(model, criterion, metric, dev_data_loader) model.save_pretrained(save_dir) tic_train = time.time() print("Speed: %.2f steps/s" % (global_step / total_train_time)) if __name__ == "__main__": do_train()
nilq/baby-python
python
from PLC.Faults import * from PLC.Method import Method from PLC.Parameter import Parameter, Mixed from PLC.Persons import Person, Persons from PLC.Auth import Auth class DeletePerson(Method): """ Mark an existing account as deleted. Users and techs can only delete themselves. PIs can only delete themselves and other non-PIs at their sites. ins can delete anyone. Returns 1 if successful, faults otherwise. """ roles = ['admin', 'pi', 'user', 'tech'] accepts = [ Auth(), Mixed(Person.fields['person_id'], Person.fields['email']) ] returns = Parameter(int, '1 if successful') def call(self, auth, person_id_or_email): # Get account information persons = Persons(self.api, [person_id_or_email]) if not persons: raise PLCInvalidArgument("No such account") person = persons[0] if person['peer_id'] is not None: raise PLCInvalidArgument("Not a local account") # Authenticated function assert self.caller is not None # Check if we can update this account if not self.caller.can_update(person): raise PLCPermissionDenied("Not allowed to delete specified account") person.delete() # Logging variables self.event_objects = {'Person': [person['person_id']]} self.message = 'Person %d deleted' % person['person_id'] return 1
nilq/baby-python
python
#------------------------------------------------------------------------------- # # An abstract base class implementation of the ITemplateDataNameItem interface # that looks for all specified values in its input context or optionally any of # its sub-contexts and outputs a context containing all such values found. # # Written by: David C. Morrill # # Date: 07/29/2007 # # (c) Copyright 2007 by Enthought, Inc. # #------------------------------------------------------------------------------- """ An abstract base class implementation of the ITemplateDataNameItem interface that looks for all specified values in its input context or optionally any of its sub-contexts and outputs a context containing all such values found. """ #------------------------------------------------------------------------------- # Imports: #------------------------------------------------------------------------------- from traits.api \ import HasPrivateTraits, Instance, Property, provides from apptools.template.template_traits \ import TBool from apptools.template.itemplate_data_context \ import ITemplateDataContext from apptools.template.itemplate_data_name_item \ import ITemplateDataNameItem from apptools.template.template_impl \ import Template from .template_data_context \ import TemplateDataContext from .helper \ import path_for #------------------------------------------------------------------------------- # 'AnyDataNameItem' class: #------------------------------------------------------------------------------- class AnyDataNameItem ( Template ): """ An abstract base class implementation of the ITemplateDataNameItem interface that looks for all specified values in its input context or optionally any of its sub-contexts and outputs a context containing all such values found. """ implements ( ITemplateDataNameItem ) #-- 'ITemplateDataNameItem' Interface Implementation ----------------------- # The data context which this data name item should match against: input_data_context = Instance( ITemplateDataContext ) # The data context containing the data values and/or contexts this data # name item matches: output_data_context = Instance( ITemplateDataContext ) # The ITemplateChoice instance representing the current settings of the # data name item. This value must be read/write, and must be overridden by # sublasses. data_name_item_choice = Property # The alternative choices the user has for the data name item settings for # the current input data context. The list may be empty, in which case the # user cannot change the settings of the data name item. This value can be # read only, and must be overridden by subclasses. data_name_item_choices = Property #-- Public Traits ---------------------------------------------------------- # Should all sub-contexts be included in the search: recursive = TBool( False ) # Should included sub-contexts be flattened into a single context? flatten = TBool( False ) #-- Private Traits --------------------------------------------------------- # The current recursive setting: current_recursive = TBool( False ) # The current input data context: current_input_data_context = Property #-- Abstract Methods (Must be overridden in subclasses) -------------------- def filter ( self, name, value ): """ Returns **True** if the specified context data *name* and *value* should be included in the output context; and **False** otherwise. """ raise NotImplementedError #-- Property Implementations ----------------------------------------------- def _get_data_name_item_choice ( self ): raise NotImplementedError def _set_data_name_item_choice ( self, value ): raise NotImplementedError def _get_data_name_item_choices ( self ): raise NotImplementedError def _get_current_input_data_context ( self ): return self.input_data_context #-- Trait Event Handlers --------------------------------------------------- def _recursive_changed ( self, value ): """ Handles the primary recursive setting being changed. """ self.current_recursive = value def _input_data_context_changed ( self ): """ Handles the 'input_data_context' trait being changed. """ self.inputs_changed() #-- Private Methods -------------------------------------------------------- def inputs_changed ( self ): """ Handles any input value being changed. This method should be called by subclasses when any of their input values change. """ output_context = None input_context = self.current_input_data_context if input_context is not None: values = {} if self.current_recursive: if self.flatten: self._add_context( input_context, values ) else: self._copy_context( input_context, values ) else: self._add_values( input_context, values, '' ) if len( values ) > 0: output_context = TemplateDataContext( data_context_path = input_context.data_context_path, data_context_name = input_context.data_context_name, values = values ) self.output_data_context = output_context def _add_values ( self, input_context, values, path = '' ): """ Adds all of the matching values in the specified *input_context* to the specified *values* dictionary. """ # Filter each name/value in the current input context to see if it # should be added to the output values: filter = self.filter gdcv = input_context.get_data_context_value for name in input_context.data_context_values: value = gdcv( name ) if self.filter( name, value ): values[ path_for( path, name ) ] = value def _add_context ( self, input_context, values, path = '' ): """ Adds all of the matching values in the specified *input_context* to the specified *output_context*, and then applies itself recursively to all contexts contained in the specified *input_context*. """ # Add all of the filtered values in the specified input context: self._add_values( input_context, values, path ) # Now process all of the input context's sub-contexts: gdc = input_context.get_data_context for name in input_context.data_contexts: self._add_context( gdc( name ), values, path_for( path, input_context.data_context_name ) ) def _copy_context ( self, input_context ): """ Clone the input context so that the result only contains values and contexts which contain valid values and are not empty. """ values = {} contexts = {} # Add all of the filtered values in the specified input context: self._add_values( input_context, values ) # Now process all of the input context's sub-contexts: gdc = input_context.get_data_context for name in input_context.data_contexts: context = self._copy_context( gdc( name ) ) if context is not None: contexts[ name ] = context if (len( values ) == 0) and (len( contexts ) == 0): return None return TemplateDataContext( data_context_path = input_context.data_context_path, data_context_name = input_context.data_context_name, values = values, contexts = contexts )
nilq/baby-python
python
import itertools import math def Solution(): N: int T = int(input("테스트 수행횟수 입력:")) # 갯수 for i in range(0, T): N = int(input("입력데이터: ")) sData = str(input("값 추가 \n")) ans = math.trunc(sortMaxMin(sData, N)) print(ans) def sortMaxMin(inputData: str, N: int): maxNumber : float minNumber : float maxNumber = float(inputData.split(' ')[0]) minNumber = float(inputData.split(' ')[0]) for i in range(0, N): if maxNumber < float(inputData.split(' ')[i]): maxNumber = float(inputData.split(' ')[i]) if minNumber > float(inputData.split(' ')[i]): minNumber = float(inputData.split(' ')[i]) return (float(str(maxNumber)) - float(str(minNumber))) # ans = int(max(float(inputData)) - min(float(inputData))) # return ans
nilq/baby-python
python
#!/usr/bin/python3 """ We want to use quad trees to store an N x N boolean grid. Each cell in the grid can only be true or false. The root node represents the whole grid. For each node, it will be subdivided into four children nodes until the values in the region it represents are all the same. Each node has another two boolean attributes : isLeaf and val. isLeaf is true if and only if the node is a leaf node. The val attribute for a leaf node contains the value of the region it represents. """ __author__ = 'Danyang' # Definition for a QuadTree node. class Node: def __init__(self, val, isLeaf, topLeft, topRight, bottomLeft, bottomRight): self.val = val self.isLeaf = isLeaf self.topLeft = topLeft self.topRight = topRight self.bottomLeft = bottomLeft self.bottomRight = bottomRight class Solution: def construct(self, grid): """ DPS, check 4 children then merge :type grid: List[List[int]] :rtype: Node """ l = len(grid) return self._construct(grid, 0, 0, l) def _construct(self, grid, row, col, l): """ Use row col for matrix rather than x y coordiate since the direction is error-prone """ if l == 1: return Node(grid[row][col], True, None, None, None, None) l_child = l // 2 topLeft = self._construct(grid, row, col, l_child) topRight = self._construct(grid, row, col + l_child, l_child) bottomLeft = self._construct(grid, row + l_child, col, l_child) bottomRight = self._construct(grid, row + l_child, col + l_child, l_child) is_leaf = ( topLeft.val == topRight.val == bottomLeft.val == bottomRight.val != "*" ) if is_leaf: return Node(grid[row][col], True, None, None, None, None) return Node("*", False, topLeft, topRight, bottomLeft, bottomRight)
nilq/baby-python
python
#!/usr/bin/env python # Small web app to allow a user to top up their personal PaperCut balance # Add a custom URL to the PaperCut user web page, which is used by end users # when they want to add credit to their PaperCut personal account. The url # should refer to this small web app When the user clicks on the URL link # (in the PaperCut user web page) to the web app, the user identification details # are passed as part of the URL. This is explained at: # https://www.papercut.com/products/ng/manual/common/topics/customize-user-web-pages.html#customize-user-web-pages-nav-links # The URL neeeds to something like http://localhost:8081/simpleTopUpBalance/?user=%user%&return_url=%return_url% # Generally additional security should be provided. For example if the URL is http://localhost:8081/promptForPassword/?user=%user%&return_url=%return_url% # then the user will need to enter their PaperCut password to access the payment system # Handy Tip: By default the link will open in a separate winodow. You can edit the advanced config property user.web.custom-links and # change "_body" to "_self". You should then use the %return_url% to return the user to the PaperCut MF/NG web interface # This code is a basic example only. It should not be used for production import xmlrpc.client import sys from json import load as loadjs import logging import traceback # Bottle does not depend on any external libraries. # You can just download bottle.py into your project directory and using # $ wget http://bottlepy.org/bottle.py from bottle import route, run, template, request, debug, response # Prefer HTTPS connection # If not localhost then this address will need to be whitelisted in PaperCut host = "http://localhost:9191/rpc/api/xmlrpc" auth = "token" # Value defined in advanced config property "auth.webservices.auth-token". Should be random proxy = xmlrpc.client.ServerProxy(host) # For more information on this user database file refer to the custom auth and sync demo paperCutAccountInfoFile = 'c:\\Program Files\\PaperCut MF\\server\\custom\\config.json' paperCutAccountData = {} # The user is sent back to the Summary page as if they had just logged in, # assuming their session has not timed out # Therefore return url should be consistent redirect_url = '' @route('/') def wrongUrl(): return("Please log into PaperCut and set top up your account from there") @route('/promptForPassword/') def prompForPassword(): user = request.query.user or "" try: if len(user) == 0 or not proxy.api.isUserExists(auth, user): return( "Can't find user {}".format(user)) except Exception as e: logging.error(traceback.format_exc()) return_url = request.query.return_url or "" return template( 'promptForPassword', user=user, return_url=return_url) @route('/simpleTopUpBalance/', method='GET') def promptUser(): user = request.query.user or "" return_url = request.query.return_url or "" password = request.query.password or "" if paperCutAccountData is None or paperCutAccountData['userdata'][user]['password'] == password: return template('promptForDeposit',user=user, return_url=return_url) # Password validation failed return template( 'promptForPassword', user=user, error_text="Invalid password entered", return_url=return_url) @route("/topUp/") def topUp(method="GET"): return_url = request.query.return_url or None if request.query.cancel == "cancel": if return_url is None: return "Cancelled. Please close this tab/window and return to PaperCut" else: response.set_header("Refresh", "5; url={}".format(return_url)) return "Cancelled. You will be returned to PaperCut in 5s" user = request.query.user amount = float(request.query.amount) if not amount > 0.0: # Example of data validation -- not used because our form already does this one return template('promptForDeposit',user=user, return_url=return_url, error_text="Invalid amount \"{}\" entered".format(amount)) proxy.api.adjustUserAccountBalance( auth, user, amount, "Money added by the Simple Top Up Page") if len(return_url) == 0: return "Updated balance is now {}<br><br>Please close this tab/window and return to PaperCut".format( proxy.api.getUserAccountBalance(auth,user)) # Add refresh with 5s timeout back to PaperCut MF/NG response.set_header("Refresh", "5; url={}".format(return_url)) return "Updated balance is now {}<br><br>You will be returned to PaperCcut in 5s".format( proxy.api.getUserAccountBalance(auth,user)) try: with open(paperCutAccountInfoFile) as f: paperCutAccountData = loadjs(f) except OSError: paperCutAccountData = None run(host='localhost', port=8081, debug=True, reloader=True)
nilq/baby-python
python
#!/usr/bin/python #-*- coding: utf-8 -*- from distutils.core import setup, Extension import os import sys prefix = os.environ.get("prefix", "/usr") from distutils.core import setup, Extension import subprocess as S setup(name="polkit", version="1.0.2", description="Python bindings for polkit-1", long_description="Python bindings for polkit-1", license="GNU GPL2", author="Bahadır Kandemir", author_email="bahadir@pardus.org.tr", url="http://github.com/Pardus-Linux/python-polkit/", py_modules = ["polkit"], ext_modules = [Extension('_polkit', sources=['pypolkit.c'], include_dirs=["/usr/include/polkit-1", "/usr/include/glib-2.0", "/usr/lib/glib-2.0/include"], libraries=["polkit-gobject-1", "gio-2.0", "gobject-2.0", "gmodule-2.0", "gthread-2.0", "pthread", "rt", "glib-2.0"], library_dirs=[], )], )
nilq/baby-python
python
""" Loading data and events submodule. """ from ..signal import find_events #from .eeg_preprocessing import * import numpy as np import pandas as pd import mne import os # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def read_eeg(filename, path="", eog=('HEOG', 'VEOG'), misc="auto", reference=None, montage="easycap-M1", preload=True, verbose="CRITICAL"): """ Load EEG data into mne.io.Raw file. Parameters ---------- filename : str Filename. path : str File's path. eog : list Names of channels or list of indices that should be designated EOG channels. Values should correspond to the vhdr file. Default is ('HEOG', 'VEOG'), but MNE's default is ('HEOGL', 'HEOGR', 'VEOGb'). misc : list Names of channels or list of indices that should be designated MISC channels. Values should correspond to the electrodes in the vhdr file. If 'auto', units in vhdr file are used for inferring misc channels. Default is 'auto'. reference : str or list re-reference using specific sensors. montage : str Path or instance of montage containing electrode positions. If None, sensor locations are (0,0,0). See the documentation of mne.channels.read_montage() for more information. preload : bool If True, all data are loaded at initialization. If False, data are not read until save. verbose : str Level of verbosity. "DEBUG", "INFO", "WARNING", "ERROR" or "CRITICAL". Returns ---------- raw : mne.io.Raw Raw data in FIF format. Example ---------- >>> import neurokit as nk >>> raw = nk.read_eeg("filename") Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - mne *See Also* - mne package: http://martinos.org/mne/dev/index.html """ file = path + filename # Find correct file extension = filename.split(".") if len(extension) == 1: extension = None else: extension = "." + extension[-1] if extension in [".vhdr", ".raw", ".set", ".fif", ".edf"]: file = file.split(".")[0] else: if extension is None: extension = ".vhdr" if os.path.exists(file + extension) is False: extension = ".raw" if os.path.exists(file + extension) is False: extension = ".set" if os.path.exists(file + extension) is False: extension = ".fif" if os.path.exists(file + extension) is False: extension = ".edf" if os.path.exists(file + extension) is False: print("NeuroKit Error: read_eeg(): couldn't find compatible format of data.") return() # Load the data try: if extension == ".vhdr": raw = mne.io.read_raw_brainvision(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose) elif extension == ".raw": raw = mne.io.read_raw_egi(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose) elif extension == ".set": raw = mne.io.read_raw_eeglab(file + extension, eog=eog, misc=misc, montage=montage, preload=preload, verbose=verbose) elif extension == ".fif": raw = mne.io.read_raw_fif(file + extension, preload=preload, verbose=verbose) elif extension == ".edf": raw = mne.io.read_raw_edf(file + extension, preload=preload, verbose=verbose) else: print("NeuroKit Error: read_eeg(): couldn't find compatible reader of data. Try to do it manually using mne.") # Re-reference if needed and if not MEG data if True not in ["MEG" in chan for chan in raw.info["ch_names"]]: if reference is None: raw.set_eeg_reference() else: raw.set_eeg_reference(reference) except KeyError: print("NeuroKit Error: read_eeg(): something went wrong. This might be because you have channel names that are missing from the montage definition. Try do read data manually using mne.") except FileNotFoundError: print("NeuroKit Error: read_eeg(): something went wrong, check the file names that are inside your info files (.vhdr, .vmrk, ...)") except: print("NeuroKit Error: read_eeg(): error in data loading. Try to do it manually using mne.") return(raw) # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_add_channel(raw, channel, sync_index_raw=0, sync_index_channel=0, channel_type=None, channel_name=None): """ Add a channel to a raw m/eeg file. Parameters ---------- raw : mne.io.Raw Raw EEG data. channel : list or numpy.array The channel to be added. sync_index_raw : int or list The index by which to align the two inputs. sync_index_channel : int or list The index by which to align the two inputs. channel_type : str Channel type. Currently supported fields are 'ecg', 'bio', 'stim', 'eog', 'misc', 'seeg', 'ecog', 'mag', 'eeg', 'ref_meg', 'grad', 'emg', 'hbr' or 'hbo'. Returns ---------- raw : mne.io.Raw Raw data in FIF format. Example ---------- >>> import neurokit as nk >>> raw = nk.eeg_add_channel(raw, ecg, channel_type="ecg") Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - mne *See Also* - mne: http://martinos.org/mne/dev/index.html """ if channel_name is None: if isinstance(channel, pd.core.series.Series): if channel.name is not None: channel_name = channel.name else: channel_name = "Added_Channel" else: channel_name = "Added_Channel" # Compute the distance between the two signals diff = sync_index_channel - sync_index_raw if diff > 0: channel = list(channel)[diff:len(channel)] channel = channel + [np.nan]*diff if diff < 0: channel = [np.nan]*diff + list(channel) channel = list(channel)[0:len(channel)] # Adjust to raw size if len(channel) < len(raw): channel = list(channel) + [np.nan]*(len(raw)-len(channel)) else: channel = list(channel)[0:len(raw)] # Crop to fit the raw data info = mne.create_info([channel_name], raw.info["sfreq"], ch_types=channel_type) channel = mne.io.RawArray([channel], info) raw.add_channels([channel], force_update_info=True) return(raw) # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_select_channels(raw, channel_names): """ Select one or several channels by name and returns them in a dataframe. Parameters ---------- raw : mne.io.Raw Raw EEG data. channel_names : str or list Channel's name(s). Returns ---------- channels : pd.DataFrame Channel. Example ---------- >>> import neurokit as nk >>> raw = nk.eeg_select_channel(raw, "TP7") Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - mne *See Also* - mne package: http://martinos.org/mne/dev/index.html """ if isinstance(channel_names, list) is False: channel_names = [channel_names] channels, time_index = raw.copy().pick_channels(channel_names)[:] if len(channel_names) > 1: channels = pd.DataFrame(channels.T, columns=channel_names) else: channels = pd.Series(channels[0]) channels.name = channel_names[0] return(channels) # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_create_events(onsets, conditions=None): """ Create MNE compatible events. Parameters ---------- onsets : list or array Events onsets. conditions : list A list of equal length containing the stimuli types/conditions. Returns ---------- (events, event_id) : tuple MNE-formated events and a dictionary with event's names. Example ---------- >>> import neurokit as nk >>> events, event_id = nk.create_mne_events(events_onset, trigger_list) Authors ---------- Dominique Makowski Dependencies ---------- None """ event_id = {} # Sanity check if len(conditions) != len(onsets): print("NeuroKit Warning: eeg_create_events(): conditions parameter of different length than onsets. Aborting.") return() if conditions is None: conditions = ["Event"] * len(onsets) event_names = list(set(conditions)) # event_index = [1, 2, 3, 4, 5, 32, 64, 128] event_index = list(range(len(event_names))) for i in enumerate(event_names): conditions = [event_index[i[0]] if x==i[1] else x for x in conditions] event_id[i[1]] = event_index[i[0]] events = np.array([onsets, [0]*len(onsets), conditions]) return(events, event_id) # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_add_events(raw, events_channel, conditions=None, treshold="auto", cut="higher", time_index=None, number="all", after=0, before=None, min_duration=1): """ Create MNE compatible events. Parameters ---------- raw : mne.io.Raw Raw EEG data. events_channel : str or array Name of the trigger channel if in the raw, or array of equal length if externally supplied. conditions : list List containing the stimuli types/conditions. treshold : float The treshold value by which to select the events. If "auto", takes the value between the max and the min. cut : str "higher" or "lower", define the events as above or under the treshold. For photosensors, a white screen corresponds usually to higher values. Therefore, if your events were signalled by a black colour, events values would be the lower ones, and you should set the cut to "lower". Add a corresponding datetime index, will return an addional array with the onsets as datetimes. number : str or int How many events should it select. after : int If number different than "all", then at what time should it start selecting the events. before : int If number different than "all", before what time should it select the events. min_duration : int The minimum duration of an event (in timepoints). Returns ---------- (raw, events, event_id) : tuple The raw file with events, the mne-formatted events and event_id. Example ---------- >>> import neurokit as nk >>> >>> raw, events, event_id = nk.eeg_add_events(raw, events_channel, conditions) Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) *Dependencies* - pandas *See Also* - mne: http://martinos.org/mne/dev/index.html References ----------- - None """ # Extract the events_channel from raw if needed if isinstance(events_channel, str): try: events_channel = eeg_select_channels(raw, events_channel) except: print("NeuroKit error: eeg_add_events(): Wrong events_channel name provided.") # Find event onsets events = find_events(events_channel, treshold=treshold, cut=cut, time_index=time_index, number=number, after=after, before=before, min_duration=min_duration) # Create mne compatible events events, event_id = eeg_create_events(events["onsets"], conditions) # Add them raw.add_events(events) return(raw, events, event_id) # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_epochs_to_dict(epochs, include="all", exclude=None, hemisphere="both", include_central=True): """ Convert mne.Epochs object to Python dict. """ data = {} for index, epoch in enumerate(epochs.get_data()): epoch = pd.DataFrame(epoch.T) epoch.columns = epochs.ch_names selection = eeg_select_sensor_area(include=include, exclude=exclude, hemisphere=hemisphere, include_central=include_central) data[index] = epoch[selection] return() # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== def eeg_select_sensor_area(include="all", exclude=None, hemisphere="both", include_central=True): """ Returns list of electrodes names (according to a 10-20 EEG montage). This function is probably not very flexibile. Looking for help to improve it. Parameters ---------- include : str Sensor area to include. exclude : str or None Sensor area to exclude. hemisphere : str Select both hemispheres? "both", "left" or "right". include_central : bool if `hemisphere != "both"`, select the central line? Returns ---------- sensors : list List of sensors corresponding to the selected area. Example ---------- >>> import neurokit as nk >>> nk.eeg_select_sensor_area(include="F", exclude="C") Notes ---------- *Authors* - Dominique Makowski (https://github.com/DominiqueMakowski) References ------------ - None """ sensors = ['AF3', 'AF4', 'AF7', 'AF8', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'CP1', 'CP2', 'CP3', 'CP4', 'CP5', 'CP6', 'CPz', 'Cz', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'FC1', 'FC2', 'FC3', 'FC4', 'FC5', 'FC6', 'Fp1', 'Fp2', 'FT10', 'FT7', 'FT8', 'FT9', 'O1', 'O2', 'Oz', 'P1', 'P2', 'P3', 'P4', 'P5', 'P6', 'P7', 'P8', 'PO3', 'PO4', 'PO7', 'PO8', 'POz', 'Pz', 'FCz', 'T7', 'T8', 'TP10', 'TP7', 'TP8', 'TP9', 'AFz'] if include != "all": sensors = [s for s in sensors if include in s] if exclude != None: if isinstance(exclude, str): exclude = [exclude] for to_exclude in exclude: sensors = [s for s in sensors if to_exclude not in s] if hemisphere != "both": if include_central == False: if hemisphere == "left": sensors = [s for s in sensors if "1" in s or "3" in s or "5" in s or "7" in s or "9" in s] if hemisphere == "right": sensors = [s for s in sensors if "2" in s or "4" in s or "6" in s or "8" in s or "10" in s] else: if hemisphere == "left": sensors = [s for s in sensors if "1" in s or "3" in s or "5" in s or "7" in s or "9" in s or "z" in s] if hemisphere == "right": sensors = [s for s in sensors if "2" in s or "4" in s or "6" in s or "8" in s or "10" in s or "z" in s] return(sensors) #============================================================================== #============================================================================== #============================================================================== #============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== # ============================================================================== #def eeg_create_raws(filename, path, participants=None, runs=None, lowpass_filter=None, highpass_filter=None, notch_filter=False, ica_eog=False, ica_ecg=False, resample=False): # """ # """ # if participants is None: # participants = os.listdir(path) # # raws = {} # Initialize empty dic # for participant in participants: # # if runs is None: # runs = os.listdir(path + "/" + participant + "/") # # raws[participant] = {} # for run in runs: # # Load the participant's file into a raw object # raw = eeg_load_raw(filename=filename, path=path + "/" + participant + "/" + run + "/") # # Filter and downsample # raw = eeg_filter(raw, lowpass=lowpass_filter, highpass=highpass_filter, notch=notch_filter) # # # Apply ICA to remove EOG and ECG artifacts # raw, ica = eeg_ica(raw, eog=ica_eog, ecg=ica_ecg) # # # Resample to 125 points/s # raw = raw.resample(resample) # # # Add data to dict # raws[participant][run] = raw # # return(raws) #
nilq/baby-python
python
import json your_json = '["foo", {"bar":["baz", null, 1.0, 2]}]' parsed = json.loads(your_json) print(type(your_json)) print(type(parsed)) #print(json.dumps(parsed, indent=4, sort_keys=True))
nilq/baby-python
python
# Time complexity: O(n) # Approach: Implementation using 2 arrays. class MinStack: def __init__(self): """ initialize your data structure here. """ self.stack = [] self.minStack = [] def push(self, val: int) -> None: if len(self.minStack)==0: self.stack.append(val) self.minStack.append(val) else: if val > self.minStack[-1]: self.minStack.append(self.minStack[-1]) else: self.minStack.append(val) self.stack.append(val) def pop(self) -> None: self.stack.pop() self.minStack.pop() def top(self) -> int: return self.stack[-1] def getMin(self) -> int: return self.minStack[-1] # Your MinStack object will be instantiated and called as such: # obj = MinStack() # obj.push(val) # obj.pop() # param_3 = obj.top() # param_4 = obj.getMin()
nilq/baby-python
python
from celery.utils.log import get_task_logger from flask.ext.celery import Celery from datetime import datetime, timedelta import time from app import app, db from models import Agency, Prediction from nextbus import Nextbus """ Celery is a task queue for background task processing. We're using it for scheduled tasks, which are configured in this file. The task execution schedule can be found/tweaked in config.py. """ # Create new Celery object with configured broker; get other cfg params celery = Celery(app) celery.conf.update(app.config) # This wraps task execution in an app context (for db session, etc) TaskBase = celery.Task class ContextTask(TaskBase): abstract = True def __call__(self, *args, **kwargs): with app.app_context(): return TaskBase.__call__(self, *args, **kwargs) celery.Task = ContextTask logger = get_task_logger(__name__) # Task definitions: @celery.task() def update_agencies(): """ Refresh our list of Agencies from NextBus """ Nextbus.get_agencies(truncate=True) @celery.task() def update_routes(agencies=None): """ Refresh our list of Routes, Stops, and Directions from Nextbus """ if not agencies: agencies = app.config['AGENCIES'] route_count = 0 for agency_tag in agencies: route_count += len(Nextbus.get_routes(agency_tag, truncate=True)) print("update_routes: Got {0} routes for {1} agencies"\ .format(route_count, len(agencies))) @celery.task() def update_predictions(agencies=None): """ Get the latest vehicle arrival predictions from Nextbus """ start = time.time() if not agencies: agencies = app.config['AGENCIES'] prediction_count = len(Nextbus.get_predictions(agencies, truncate=False)) elapsed = time.time() - start print("Got {0} predictions for {1} agencies in {2:0.2f} sec."\ .format(prediction_count, len(agencies), elapsed)) @celery.task() def update_vehicle_locations(agencies=None): """ Get the latest vehicle locations (coords/speed/heading) from NextBus """ start = time.time() if not agencies: agencies = app.config['AGENCIES'] vl_count = len(Nextbus.get_vehicle_locations(agencies, truncate=False)) elapsed = time.time() - start print("Got {0} vehicle locations for {1} agencies in {2:0.2f} seconds."\ .format(vl_count, len(agencies), elapsed)) @celery.task() def delete_stale_predictions(): """ Delete predictions older than PREDICTIONS_MAX_AGE. """ delete = Nextbus.delete_stale_predictions() print("{0} stale predictions deleted".format(delete)) @celery.task() def delete_stale_vehicle_locations(): """ Delete vehicle locations older than LOCATIONS_MAX_AGE. """ delete = Nextbus.delete_stale_vehicle_locations() print("{0} stale vehicle locations deleted".format(delete))
nilq/baby-python
python
""" Контекстный процессор для меню. """ from .utils import get_menus def menu_processor(request): """ Контекстный процессор для возможности отображения всех меню на сайте. Меню обычно распологаются на нескольких страницах, поэтому вынесено сюда. """ current_path = request.path context = { 'menus': get_menus(current_path), } return context
nilq/baby-python
python
from typing import Union import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn from torch import Tensor from .functions import ActivationModule from activations.utils.utils import _get_auto_axis_layout def tent_activation(x, delta): """ Functional implementation of TentActivation. """ return torch.clamp(delta - torch.abs(x), min=0) class TentActivation(ActivationModule): distribution_display_mode = "kde" list = [] def __init__(self, delta: Union[torch.Tensor, float] = 1.0, lb=0.0, ub=500.0, learnable: bool = False): """ Applies element-wise Tent(x) = max(0, delta - |x|) :param delta: The delta which is used as initialization :param lb: The lower bound of the possible delta values :param ub: The upper bound of the possible delta values """ super().__init__("tent") if torch.is_tensor(delta): self.delta = nn.Parameter(delta, requires_grad=learnable) else: self.delta = nn.Parameter(torch.tensor(delta), requires_grad=learnable) # self.delta.requires_grad = learnable self.lb = lb self.ub = ub self.learnable = learnable self.list.append(self) def forward(self, x: Tensor) -> Tensor: return tent_activation(x, self.delta) def extra_repr(self) -> str: return f'delta={self.delta}, lb={self.lb}, ub={self.ub}, learnable={self.learnable}' def __str__(self): return "Tent" @classmethod def show_all(cls, x=None, fitted_function=True, other_func=None, display=True, tolerance=0.001, title=None, axes=None, layout="auto", writer=None, step=None, colors="#1f77b4"): """ Shows a graph of the all instanciated rational functions (or returns \ it if ``returns=True``). Arguments: x (range): The range to print the function on.\n Default ``None`` fitted_function (bool): If ``True``, displays the best fitted function if searched. Otherwise, returns it. \n Default ``True`` other_funcs (callable): another function to be plotted or a list of other callable functions or a dictionary with the function name as key and the callable as value. display (bool): If ``True``, displays the plot. Otherwise, returns the figure. \n Default ``False`` tolerance (float): If the input histogram is used, it will be pruned. \n Every bin containg less than `tolerance` of the total \ input is pruned out. (Reduces noise). Default ``0.001`` title (str): If not None, a title for the figure Default ``None`` axes (matplotlib.pyplot.axis): On ax or a list of axes to be plotted on. \n If None, creates them automatically (see `layout`). \n Default ``None`` layout (tuple or 'auto'): Grid layout of the figure. If "auto", one is generated.\n Default ``"auto"`` writer (tensorboardX.SummaryWriter): A tensorboardX writer to give the image to, in case of debugging. Default ``None`` step (int): A step/epoch for tensorboardX writer. If None, incrementing itself. Default ``None`` """ if axes is None: if layout == "auto": total = len(cls.list) layout = _get_auto_axis_layout(total) if len(layout) != 2: msg = 'layout should be either "auto" or a tuple of size 2' raise TypeError(msg) figs = tuple(np.flip(np.array(layout)* (2, 3))) try: import seaborn as sns with sns.axes_style("whitegrid"): fig, axes = plt.subplots(*layout, figsize=figs) except ImportError: RationalImportSeabornWarning.warn() fig, axes = plt.subplots(*layout, figsize=figs) if isinstance(axes, plt.Axes): axes = np.array([axes]) # if display: for ax in axes.flatten()[len(cls.list):]: ax.remove() axes = axes[:len(cls.list)] elif isinstance(axes, plt.Axes): axes = np.array([axes for _ in range(len(cls.list))]) fig = plt.gcf() if isinstance(colors, str): colors = [colors]*len(axes.flatten()) if isinstance(x, list): for rat, ax, x_rat, color in zip(cls.list, axes.flatten(), x, colors): rat.show(x_rat, fitted_function, other_func, False, tolerance, title, axis=ax, writer=None, step=step, color=color) else: for rat, ax, color in zip(cls.list, axes.flatten(), colors): rat.show(x, fitted_function, other_func, False, tolerance, title, axis=ax, writer=None, step=step, color=color) if title is not None: fig.suptitle(title, y=0.95) fig = plt.gcf() fig.tight_layout() if writer is not None: if step is None: step = cls._step cls._step += 1 writer.add_figure(title, fig, step) elif display: # plt.legend() plt.show() else: return fig
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import division from __future__ import print_function import numpy as np import operator as op from NumPyNet.exception import LayerError from NumPyNet.utils import check_is_fitted from NumPyNet.layers.base import BaseLayer __author__ = ['Mattia Ceccarelli', 'Nico Curti'] __email__ = ['mattia.ceccarelli3@studio.unibo.it', 'nico.curti2@unibo.it'] class Route_layer(BaseLayer): def __init__(self, input_layers, by_channels=True, **kwargs): ''' Route layer For Now the idea is : it takes the seleted layers output and concatenate them along the batch axis OR the channels axis YOLOv3 implementation always concatenate by channels By definition, this layer can't be used without a Network model. Parameters ---------- input_layers: iterable, list of integers, or single integer, index of the layers in the network for which inputs have to concatenated. by_channels : bool, default True. It determines along which dimension the concatenation is performed. For examples if two input with size (b1, w, h , c) and (b2, w, h, c) are concatenated with by_channels=False, then the final output shape will be (b1 + b2, w, h, c). Otherwise, if the shapes are (b, w, h, c1) and (b, w, h, c2) and axis=3, the final output size will be (b, w, h, c1 + c2) (YOLOv3 model) ''' self.axis = 3 if by_channels else 0 if isinstance(input_layers, int): self.input_layer = (input_layers, ) elif hasattr(input_layers, '__iter__'): self.input_layers = tuple(input_layers) else : raise ValueError('Route Layer : parameter "input_layer" is neither iterable or an integer') super(Route_layer, self).__init__() def __str__(self): return 'route {}'.format(list(self.input_layers)) def _build(self, previous_layer): out_shapes = [x.out_shape for x in previous_layer] self.input_shape = list(out_shapes[-1]) if self.axis: print(np.sum(map(op.itemgetter(self.axis), out_shapes))) self.input_shape[-1] = np.sum(list(map(op.itemgetter(self.axis), out_shapes))) else: self.input_shape[0] = np.sum(list(map(op.itemgetter(self.axis), out_shapes))) self.input_shape = tuple(self.input_shape) return self def __call__(self, previous_layer): for prev in previous_layer: if prev.out_shape is None: class_name = self.__class__.__name__ prev_name = prev.__class__.__name__ raise LayerError('Incorrect shapes found. Layer {0} cannot be connected to the previous {1} layer.'.format(class_name, prev_name)) self._build(previous_layer) return self def forward(self, network): ''' Concatenate along chosen axis the outputs of selected network layers In main CNN applications, like YOLOv3, the concatenation happens long channels axis Parameters ---------- network : Network object type. Returns ------- Route Layer object ''' self.output = np.concatenate([network[layer_idx].output for layer_idx in self.input_layers], axis=self.axis) self.delta = np.zeros(shape=self.out_shape, dtype=float) return self def backward(self, delta, network): ''' Sum self.delta to the correct layer delta on the network Parameters ---------- delta : 4-d numpy array, network delta to be backpropagated network: Network object type. Returns ------- Route layer object ''' check_is_fitted(self, 'delta') # NumPyNet implementation if self.axis == 3: # this works for concatenation by channels axis channels_sum = 0 for idx in self.input_layers: channels = network[idx].out_shape[3] network[idx].delta += self.delta[..., channels_sum : channels_sum + channels] channels_sum += channels elif self.axis == 0: # this works for concatenation by batch axis batch_sum = 0 for idx in self.self.input_layers: batches = network[idx].out_shape[0] network[idx].delta += self.delta[batch_sum : batch_sum + batches, ...] batch_sum += batches return self if __name__ == '__main__': layer = Route_layer((1, 2)) print(layer) print(layer.out_shape) # TODO the idea is to create a toy model for numpynet and keras, and try some # concatenation (mainly by channel, since the batch implementation doesn't really # make sense to me)
nilq/baby-python
python
try: from libs.layers import * from libs.utils_ft import * except: from layers import * from utils_ft import * import copy import os import sys from collections import defaultdict from typing import Optional import torch import torch.nn as nn from torch import Tensor from torch.nn import MultiheadAttention, TransformerEncoderLayer from torch.nn.init import constant_, xavier_uniform_ from torchinfo import summary current_path = os.path.dirname(os.path.abspath(__file__)) SRC_ROOT = os.path.dirname(current_path) sys.path.append(SRC_ROOT) ADDITIONAL_ATTR = ['normalizer', 'raw_laplacian', 'return_latent', 'residual_type', 'norm_type', 'norm_eps', 'boundary_condition', 'upscaler_size', 'downscaler_size', 'spacial_dim', 'spacial_fc', 'regressor_activation', 'attn_activation', 'downscaler_activation', 'upscaler_activation', 'encoder_dropout', 'decoder_dropout', 'ffn_dropout'] class SimpleTransformerEncoderLayer(nn.Module): def __init__(self, d_model=96, pos_dim=1, n_head=2, dim_feedforward=512, attention_type='fourier', pos_emb=False, layer_norm=True, attn_norm=None, norm_type='layer', norm_eps=None, batch_norm=False, attn_weight=False, xavier_init: float=1e-2, diagonal_weight: float=1e-2, symmetric_init=False, residual_type='add', activation_type='relu', dropout=0.1, ffn_dropout=None, debug=False, ): super(SimpleTransformerEncoderLayer, self).__init__() dropout = default(dropout, 0.05) if attention_type in ['linear', 'softmax']: dropout = 0.1 ffn_dropout = default(ffn_dropout, dropout) norm_eps = default(norm_eps, 1e-5) attn_norm = default(attn_norm, not layer_norm) if (not layer_norm) and (not attn_norm): attn_norm = True norm_type = default(norm_type, 'layer') self.attn = SimpleAttention(n_head=n_head, d_model=d_model, attention_type=attention_type, diagonal_weight=diagonal_weight, xavier_init=xavier_init, symmetric_init=symmetric_init, pos_dim=pos_dim, norm=attn_norm, norm_type=norm_type, eps=norm_eps, dropout=dropout) self.d_model = d_model self.n_head = n_head self.pos_dim = pos_dim self.add_layer_norm = layer_norm if layer_norm: self.layer_norm1 = nn.LayerNorm(d_model, eps=norm_eps) self.layer_norm2 = nn.LayerNorm(d_model, eps=norm_eps) dim_feedforward = default(dim_feedforward, 2*d_model) self.ff = FeedForward(in_dim=d_model, dim_feedforward=dim_feedforward, batch_norm=batch_norm, activation=activation_type, dropout=ffn_dropout, ) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.residual_type = residual_type # plus or minus self.add_pos_emb = pos_emb if self.add_pos_emb: self.pos_emb = PositionalEncoding(d_model) self.debug = debug self.attn_weight = attn_weight self.__name__ = attention_type.capitalize() + 'TransformerEncoderLayer' def forward(self, x, pos=None, weight=None): ''' - x: node feature, (batch_size, seq_len, n_feats) - pos: position coords, needed in every head Remark: - for n_head=1, no need to encode positional information if coords are in features ''' if self.add_pos_emb: x = x.permute((1, 0, 2)) x = self.pos_emb(x) x = x.permute((1, 0, 2)) if pos is not None and self.pos_dim > 0: att_output, attn_weight = self.attn( x, x, x, pos=pos, weight=weight) # encoder no mask else: att_output, attn_weight = self.attn(x, x, x, weight=weight) if self.residual_type in ['add', 'plus'] or self.residual_type is None: x = x + self.dropout1(att_output) else: x = x - self.dropout1(att_output) if self.add_layer_norm: x = self.layer_norm1(x) x1 = self.ff(x) x = x + self.dropout2(x1) if self.add_layer_norm: x = self.layer_norm2(x) if self.attn_weight: return x, attn_weight else: return x class GalerkinTransformerDecoderLayer(nn.Module): r""" A lite implementation of the decoder layer based on linear causal attention adapted from the TransformerDecoderLayer in PyTorch https://github.com/pytorch/pytorch/blob/afc1d1b3d6dad5f9f56b1a4cb335de109adb6018/torch/nn/modules/transformer.py#L359 """ def __init__(self, d_model, nhead, pos_dim = 1, dim_feedforward=512, attention_type='galerkin', layer_norm=True, attn_norm=None, norm_type='layer', norm_eps=1e-5, xavier_init: float=1e-2, diagonal_weight: float = 1e-2, dropout=0.05, ffn_dropout=None, activation_type='relu', device=None, dtype=None, debug=False,) -> None: factory_kwargs = {'device': device, 'dtype': dtype, } super(GalerkinTransformerDecoderLayer, self).__init__() ffn_dropout = default(ffn_dropout, dropout) self.debug = debug self.self_attn = SimpleAttention(nhead, d_model, attention_type=attention_type, pos_dim=pos_dim, norm=attn_norm, eps=norm_eps, norm_type=norm_type, diagonal_weight=diagonal_weight, xavier_init=xavier_init, dropout=dropout,) self.multihead_attn = SimpleAttention(nhead, d_model, attention_type='causal', pos_dim=pos_dim, norm=attn_norm, eps=norm_eps, norm_type=norm_type, diagonal_weight=diagonal_weight, xavier_init=xavier_init, dropout=dropout,) dim_feedforward = default(dim_feedforward, 2*d_model) self.ff = FeedForward(in_dim=d_model, dim_feedforward=dim_feedforward, activation=activation_type, dropout=ffn_dropout, ) self.dropout = nn.Dropout(ffn_dropout) self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs) self.add_layer_norm = layer_norm if self.add_layer_norm: self.norm1 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs) self.norm2 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs) self.norm3 = nn.LayerNorm(d_model, eps=norm_eps, **factory_kwargs) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.activation = F.relu def forward(self, x: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None,) -> Tensor: r"""Pass the inputs (and mask) through the decoder layer. Args: tgt: the sequence to the decoder layer (required). memory: the sequence from the last layer of the encoder (required). tgt_mask: the mask for the tgt sequence (optional). memory_mask: the mask for the memory sequence (optional). Shape: see the docs in Transformer class. """ if self.add_layer_norm: x = self.norm1(x + self._sa_block(x, tgt_mask)) x = self.norm2(x + self._mha_block(x, memory, memory_mask)) x = self.norm3(x + self._ff_block(x)) else: x = x + self._sa_block(x, tgt_mask) x = x + self._mha_block(x, memory, memory_mask) x = x + self._ff_block(x) return x # self-attention block def _sa_block(self, x: Tensor, attn_mask: Optional[Tensor]) -> Tensor: x = self.self_attn(x, x, x, attn_mask=attn_mask,)[0] return self.dropout1(x) # multihead attention block def _mha_block(self, x: Tensor, mem: Tensor, attn_mask: Optional[Tensor]) -> Tensor: x = self.multihead_attn(x, mem, mem, mask=attn_mask,)[0] return self.dropout2(x) # feed forward block def _ff_block(self, x: Tensor) -> Tensor: x = self.ff(x) return self.dropout(x) class _TransformerEncoderLayer(nn.Module): r""" Taken from official torch implementation: https://pytorch.org/docs/stable/_modules/torch/nn/modules/transformer.html#TransformerEncoderLayer - add a layer norm switch - add an attn_weight output switch - batch first batch_first has been added in PyTorch 1.9.0 https://github.com/pytorch/pytorch/pull/55285 """ def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, layer_norm=True, attn_weight=False, ): super(_TransformerEncoderLayer, self).__init__() self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout) self.linear1 = nn.Linear(d_model, dim_feedforward) self.dropout = nn.Dropout(dropout) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm1 = nn.LayerNorm(d_model) self.norm2 = nn.LayerNorm(d_model) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.add_layer_norm = layer_norm self.attn_weight = attn_weight self.activation = nn.ReLU() def __setstate__(self, state): if 'activation' not in state: state['activation'] = F.relu super(_TransformerEncoderLayer, self).__setstate__(state) def forward(self, src: Tensor, pos: Optional[Tensor] = None, weight: Optional[Tensor] = None, src_mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor: r"""Pass the input through the encoder layer. Args (modified from torch): src: the sequence to the encoder layer (required): (batch_size, seq_len, d_model) src_mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). Shape: see the docs in Transformer class. Remark: PyTorch official implementation: (seq_len, n_batch, d_model) as input here we permute the first two dims as input so in the first line the dim needs to be permuted then permuted back """ if pos is not None: src = torch.cat([pos, src], dim=-1) src = src.permute(1, 0, 2) if (src_mask is None) or (src_key_padding_mask is None): src2, attn_weight = self.self_attn(src, src, src) else: src2, attn_weight = self.self_attn(src, src, src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask) src = src + self.dropout1(src2) if self.add_layer_norm: src = self.norm1(src) src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) src = src + self.dropout2(src2) if self.add_layer_norm: src = self.norm2(src) src = src.permute(1, 0, 2) if self.attn_weight: return src, attn_weight else: return src class TransformerEncoderWrapper(nn.Module): r"""TransformerEncoder is a stack of N encoder layers Modified from pytorch official implementation TransformerEncoder's input and output shapes follow those of the encoder_layer fed into as this is essentially a wrapper Args: encoder_layer: an instance of the TransformerEncoderLayer() class (required). num_layers: the number of sub-encoder-layers in the encoder (required). norm: the layer normalization component (optional). Examples:: >>> encoder_layer = nn.TransformerEncoderLayer(d_model=512, nhead=8) >>> transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=6) >>> src = torch.rand(10, 32, 512) >>> out = transformer_encoder(src) """ __constants__ = ['norm'] def __init__(self, encoder_layer, num_layers, norm=None,): super(TransformerEncoderWrapper, self).__init__() self.layers = nn.ModuleList( [copy.deepcopy(encoder_layer) for i in range(num_layers)]) self.num_layers = num_layers self.norm = norm def forward(self, src: Tensor, mask: Optional[Tensor] = None, src_key_padding_mask: Optional[Tensor] = None) -> Tensor: r"""Pass the input through the encoder layers in turn. Args: src: the sequence to the encoder (required). mask: the mask for the src sequence (optional). src_key_padding_mask: the mask for the src keys per batch (optional). Shape: see the docs in Transformer class. """ output = src for mod in self.layers: output = mod(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask) if self.norm is not None: output = self.norm(output) return output class GCN(nn.Module): def __init__(self, node_feats=4, out_features=96, num_gcn_layers=2, edge_feats=6, activation=True, raw_laplacian=False, dropout=0.1, debug=False): super(GCN, self).__init__() ''' A simple GCN, a wrapper for Kipf and Weiling's code learnable edge features similar to Graph Transformer https://arxiv.org/abs/1911.06455 but using neighbor agg ''' self.edge_learner = EdgeEncoder(out_dim=out_features, edge_feats=edge_feats, raw_laplacian=raw_laplacian ) self.gcn_layer0 = GraphConvolution(in_features=node_feats, # hard coded out_features=out_features, debug=debug, ) self.gcn_layers = nn.ModuleList([copy.deepcopy(GraphConvolution( in_features=out_features, # hard coded out_features=out_features, debug=debug )) for _ in range(1, num_gcn_layers)]) self.activation = activation self.relu = nn.ReLU() self.dropout = nn.Dropout(dropout) self.edge_feats = edge_feats self.debug = debug def forward(self, x, edge): x = x.permute(0, 2, 1).contiguous() edge = edge.permute([0, 3, 1, 2]).contiguous() assert edge.size(1) == self.edge_feats edge = self.edge_learner(edge) out = self.gcn_layer0(x, edge) for gc in self.gcn_layers[:-1]: out = gc(out, edge) if self.activation: out = self.relu(out) # last layer no activation out = self.gcn_layers[-1](out, edge) return out.permute(0, 2, 1) class GAT(nn.Module): def __init__(self, node_feats=4, out_features=96, num_gcn_layers=2, edge_feats=None, activation=False, debug=False): super(GAT, self).__init__() ''' A simple GAT: modified from the official implementation ''' self.gat_layer0 = GraphAttention(in_features=node_feats, out_features=out_features, ) self.gat_layers = nn.ModuleList([copy.deepcopy(GraphAttention( in_features=out_features, out_features=out_features, )) for _ in range(1, num_gcn_layers)]) self.activation = activation self.relu = nn.ReLU() self.debug = debug def forward(self, x, edge): ''' input: node feats (-1, seq_len, n_feats) edge only takes adj (-1, seq_len, seq_len) edge matrix first one in the last dim is graph Lap. ''' edge = edge[..., 0].contiguous() out = self.gat_layer0(x, edge) for layer in self.gat_layers[:-1]: out = layer(out, edge) if self.activation: out = self.relu(out) # last layer no activation return self.gat_layers[-1](out, edge) class PointwiseRegressor(nn.Module): def __init__(self, in_dim, # input dimension n_hidden, out_dim, # number of target dim num_layers: int = 2, spacial_fc: bool = False, spacial_dim=1, dropout=0.1, activation='silu', return_latent=False, debug=False): super(PointwiseRegressor, self).__init__() ''' A wrapper for a simple pointwise linear layers ''' dropout = default(dropout, 0.1) self.spacial_fc = spacial_fc activ = nn.SiLU() if activation == 'silu' else nn.ReLU() if self.spacial_fc: in_dim = in_dim + spacial_dim self.fc = nn.Linear(in_dim, n_hidden) self.ff = nn.ModuleList([nn.Sequential( nn.Linear(n_hidden, n_hidden), activ, )]) for _ in range(num_layers - 1): self.ff.append(nn.Sequential( nn.Linear(n_hidden, n_hidden), activ, )) self.dropout = nn.Dropout(dropout) self.out = nn.Linear(n_hidden, out_dim) self.return_latent = return_latent self.debug = debug def forward(self, x, grid=None): ''' 2D: Input: (-1, n, n, in_features) Output: (-1, n, n, n_targets) 1D: Input: (-1, n, in_features) Output: (-1, n, n_targets) ''' if self.spacial_fc: x = torch.cat([x, grid], dim=-1) x = self.fc(x) for layer in self.ff: x = layer(x) x = self.dropout(x) x = self.out(x) if self.return_latent: return x, None else: return x class SpectralRegressor(nn.Module): def __init__(self, in_dim, n_hidden, freq_dim, out_dim, modes: int, num_spectral_layers: int = 2, n_grid=None, dim_feedforward=None, spacial_fc=False, spacial_dim=2, return_freq=False, return_latent=False, normalizer=None, activation='silu', last_activation=True, dropout=0.1, debug=False): super(SpectralRegressor, self).__init__() ''' A wrapper for both SpectralConv1d and SpectralConv2d Ref: Li et 2020 FNO paper https://github.com/zongyi-li/fourier_neural_operator/blob/master/fourier_2d.py A new implementation incoporating all spacial-based FNO in_dim: input dimension, (either n_hidden or spacial dim) n_hidden: number of hidden features out from attention to the fourier conv ''' if spacial_dim == 2: # 2d, function + (x,y) spectral_conv = SpectralConv2d elif spacial_dim == 1: # 1d, function + x spectral_conv = SpectralConv1d else: raise NotImplementedError("3D not implemented.") activation = default(activation, 'silu') self.activation = nn.SiLU() if activation == 'silu' else nn.ReLU() dropout = default(dropout, 0.1) self.spacial_fc = spacial_fc # False in Transformer if self.spacial_fc: self.fc = nn.Linear(in_dim + spacial_dim, n_hidden) self.spectral_conv = nn.ModuleList([spectral_conv(in_dim=n_hidden, out_dim=freq_dim, n_grid=n_grid, modes=modes, dropout=dropout, activation=activation, return_freq=return_freq, debug=debug)]) for _ in range(num_spectral_layers - 1): self.spectral_conv.append(spectral_conv(in_dim=freq_dim, out_dim=freq_dim, n_grid=n_grid, modes=modes, dropout=dropout, activation=activation, return_freq=return_freq, debug=debug)) if not last_activation: self.spectral_conv[-1].activation = Identity() self.n_grid = n_grid # dummy for debug self.dim_feedforward = default(dim_feedforward, 2*spacial_dim*freq_dim) self.regressor = nn.Sequential( nn.Linear(freq_dim, self.dim_feedforward), self.activation, nn.Linear(self.dim_feedforward, out_dim), ) self.normalizer = normalizer self.return_freq = return_freq self.return_latent = return_latent self.debug = debug def forward(self, x, edge=None, pos=None, grid=None): ''' 2D: Input: (-1, n, n, in_features) Output: (-1, n, n, n_targets) 1D: Input: (-1, n, in_features) Output: (-1, n, n_targets) ''' x_latent = [] x_fts = [] if self.spacial_fc: x = torch.cat([x, grid], dim=-1) x = self.fc(x) for layer in self.spectral_conv: if self.return_freq: x, x_ft = layer(x) x_fts.append(x_ft.contiguous()) else: x = layer(x) if self.return_latent: x_latent.append(x.contiguous()) x = self.regressor(x) if self.normalizer: x = self.normalizer.inverse_transform(x) if self.return_freq or self.return_latent: return x, dict(preds_freq=x_fts, preds_latent=x_latent) else: return x class DownScaler(nn.Module): def __init__(self, in_dim, out_dim, dropout=0.1, padding=5, downsample_mode='conv', activation_type='silu', interp_size=None, debug=False): super(DownScaler, self).__init__() ''' A wrapper for conv2d/interp downscaler ''' if downsample_mode == 'conv': self.downsample = nn.Sequential(Conv2dEncoder(in_dim=in_dim, out_dim=out_dim, activation_type=activation_type, debug=debug), Conv2dEncoder(in_dim=out_dim, out_dim=out_dim, padding=padding, activation_type=activation_type, debug=debug)) elif downsample_mode == 'interp': self.downsample = Interp2dEncoder(in_dim=in_dim, out_dim=out_dim, interp_size=interp_size, activation_type=activation_type, dropout=dropout, debug=debug) else: raise NotImplementedError("downsample mode not implemented.") self.in_dim = in_dim self.out_dim = out_dim def forward(self, x): ''' 2D: Input: (-1, n, n, in_dim) Output: (-1, n_s, n_s, out_dim) ''' n_grid = x.size(1) bsz = x.size(0) x = x.view(bsz, n_grid, n_grid, self.in_dim) x = x.permute(0, 3, 1, 2) x = self.downsample(x) x = x.permute(0, 2, 3, 1) return x class UpScaler(nn.Module): def __init__(self, in_dim: int, out_dim: int, hidden_dim=None, padding=2, output_padding=0, dropout=0.1, upsample_mode='conv', activation_type='silu', interp_mode='bilinear', interp_size=None, debug=False): super(UpScaler, self).__init__() ''' A wrapper for DeConv2d upscaler or interpolation upscaler Deconv: Conv1dTranspose Interp: interp->conv->interp ''' hidden_dim = default(hidden_dim, in_dim) if upsample_mode in ['conv', 'deconv']: self.upsample = nn.Sequential( DeConv2dBlock(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim, padding=padding, output_padding=output_padding, dropout=dropout, activation_type=activation_type, debug=debug), DeConv2dBlock(in_dim=in_dim, out_dim=out_dim, hidden_dim=hidden_dim, padding=padding*2, output_padding=output_padding, dropout=dropout, activation_type=activation_type, debug=debug)) elif upsample_mode == 'interp': self.upsample = Interp2dUpsample(in_dim=in_dim, out_dim=out_dim, interp_mode=interp_mode, interp_size=interp_size, dropout=dropout, activation_type=activation_type, debug=debug) else: raise NotImplementedError("upsample mode not implemented.") self.in_dim = in_dim self.out_dim = out_dim def forward(self, x): ''' 2D: Input: (-1, n_s, n_s, in_dim) Output: (-1, n, n, out_dim) ''' x = x.permute(0, 3, 1, 2) x = self.upsample(x) x = x.permute(0, 2, 3, 1) return x class SimpleTransformer(nn.Module): def __init__(self, **kwargs): super(SimpleTransformer, self).__init__() self.config = defaultdict(lambda: None, **kwargs) self._get_setting() self._initialize() self.__name__ = self.attention_type.capitalize() + 'Transformer' def forward(self, node, edge, pos, grid=None, weight=None): ''' seq_len: n, number of grid points node_feats: number of features of the inputs edge_feats: number of Laplacian matrices (including learned) pos_dim: dimension of the Euclidean space - node: (batch_size, seq_len, node_feats) - pos: (batch_size, seq_len, pos_dim) - edge: (batch_size, seq_len, seq_len, edge_feats) - weight: (batch_size, seq_len, seq_len): mass matrix prefered or (batch_size, seq_len) when mass matrices are not provided Remark: for classic Transformer: pos_dim = n_hidden = 512 pos encodings is added to the latent representation ''' x_latent = [] attn_weights = [] x = self.feat_extract(node, edge) if self.spacial_residual or self.return_latent: res = x.contiguous() x_latent.append(res) for encoder in self.encoder_layers: if self.return_attn_weight: x, attn_weight = encoder(x, pos, weight) attn_weights.append(attn_weight) else: x = encoder(x, pos, weight) if self.return_latent: x_latent.append(x.contiguous()) if self.spacial_residual: x = res + x x_freq = self.freq_regressor( x)[:, :self.pred_len, :] if self.n_freq_targets > 0 else None x = self.dpo(x) x = self.regressor(x, grid=grid) return dict(preds=x, preds_freq=x_freq, preds_latent=x_latent, attn_weights=attn_weights) def _initialize(self): self._get_feature() self._get_encoder() if self.n_freq_targets > 0: self._get_freq_regressor() self._get_regressor() if self.decoder_type in ['pointwise', 'convolution']: self._initialize_layer(self.regressor) self.config = dict(self.config) @staticmethod def _initialize_layer(layer, gain=1e-2): for param in layer.parameters(): if param.ndim > 1: xavier_uniform_(param, gain=gain) else: constant_(param, 0) def _get_setting(self): all_attr = list(self.config.keys()) + ADDITIONAL_ATTR for key in all_attr: setattr(self, key, self.config[key]) self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden) self.spacial_dim = default(self.spacial_dim, self.pos_dim) self.spacial_fc = default(self.spacial_fc, False) self.dropout = default(self.dropout, 0.05) self.dpo = nn.Dropout(self.dropout) if self.decoder_type == 'attention': self.num_encoder_layers += 1 self.attention_types = ['fourier', 'integral', 'cosine', 'galerkin', 'linear', 'softmax'] def _get_feature(self): if self.num_feat_layers > 0 and self.feat_extract_type == 'gcn': self.feat_extract = GCN(node_feats=self.node_feats, edge_feats=self.edge_feats, num_gcn_layers=self.num_feat_layers, out_features=self.n_hidden, activation=self.graph_activation, raw_laplacian=self.raw_laplacian, debug=self.debug, ) elif self.num_feat_layers > 0 and self.feat_extract_type == 'gat': self.feat_extract = GAT(node_feats=self.node_feats, out_features=self.n_hidden, num_gcn_layers=self.num_feat_layers, activation=self.graph_activation, debug=self.debug, ) else: self.feat_extract = Identity(in_features=self.node_feats, out_features=self.n_hidden) def _get_encoder(self): if self.attention_type in self.attention_types: encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden, n_head=self.n_head, attention_type=self.attention_type, dim_feedforward=self.dim_feedforward, layer_norm=self.layer_norm, attn_norm=self.attn_norm, norm_type=self.norm_type, batch_norm=self.batch_norm, pos_dim=self.pos_dim, xavier_init=self.xavier_init, diagonal_weight=self.diagonal_weight, symmetric_init=self.symmetric_init, attn_weight=self.return_attn_weight, residual_type=self.residual_type, activation_type=self.attn_activation, dropout=self.encoder_dropout, ffn_dropout=self.ffn_dropout, debug=self.debug) else: encoder_layer = _TransformerEncoderLayer(d_model=self.n_hidden, nhead=self.n_head, dim_feedforward=self.dim_feedforward, layer_norm=self.layer_norm, attn_weight=self.return_attn_weight, dropout=self.encoder_dropout ) self.encoder_layers = nn.ModuleList( [copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)]) def _get_freq_regressor(self): if self.bulk_regression: self.freq_regressor = BulkRegressor(in_dim=self.seq_len, n_feats=self.n_hidden, n_targets=self.n_freq_targets, pred_len=self.pred_len) else: self.freq_regressor = nn.Sequential( nn.Linear(self.n_hidden, self.n_hidden), nn.ReLU(), nn.Linear(self.n_hidden, self.n_freq_targets), ) def _get_regressor(self): if self.decoder_type == 'pointwise': self.regressor = PointwiseRegressor(in_dim=self.n_hidden, n_hidden=self.n_hidden, out_dim=self.n_targets, spacial_fc=self.spacial_fc, spacial_dim=self.spacial_dim, activation=self.regressor_activation, dropout=self.decoder_dropout, debug=self.debug) elif self.decoder_type == 'ifft': self.regressor = SpectralRegressor(in_dim=self.n_hidden, n_hidden=self.n_hidden, freq_dim=self.freq_dim, out_dim=self.n_targets, num_spectral_layers=self.num_regressor_layers, modes=self.fourier_modes, spacial_dim=self.spacial_dim, spacial_fc=self.spacial_fc, dim_feedforward=self.freq_dim, activation=self.regressor_activation, dropout=self.decoder_dropout, ) else: raise NotImplementedError("Decoder type not implemented") def get_graph(self): return self.gragh def get_encoder(self): return self.encoder_layers class FourierTransformer2D(nn.Module): def __init__(self, **kwargs): super(FourierTransformer2D, self).__init__() self.config = defaultdict(lambda: None, **kwargs) self._get_setting() self._initialize() self.__name__ = self.attention_type.capitalize() + 'Transformer2D' def forward(self, node, edge, pos, grid, weight=None, boundary_value=None): ''' - node: (batch_size, n, n, node_feats) - pos: (batch_size, n_s*n_s, pos_dim) - edge: (batch_size, n_s*n_s, n_s*n_s, edge_feats) - weight: (batch_size, n_s*n_s, n_s*n_s): mass matrix prefered or (batch_size, n_s*n_s) when mass matrices are not provided (lumped mass) - grid: (batch_size, n-2, n-2, 2) excluding boundary ''' bsz = node.size(0) n_s = int(pos.size(1)**(0.5)) x_latent = [] attn_weights = [] if not self.downscaler_size: node = torch.cat( [node, pos.contiguous().view(bsz, n_s, n_s, -1)], dim=-1) x = self.downscaler(node) x = x.view(bsz, -1, self.n_hidden) x = self.feat_extract(x, edge) x = self.dpo(x) for encoder in self.encoder_layers: if self.return_attn_weight and self.attention_type != 'official': x, attn_weight = encoder(x, pos, weight) attn_weights.append(attn_weight) elif self.attention_type != 'official': x = encoder(x, pos, weight) else: out_dim = self.n_head*self.pos_dim + self.n_hidden x = x.view(bsz, -1, self.n_head, self.n_hidden//self.n_head).transpose(1, 2) x = torch.cat([pos.repeat([1, self.n_head, 1, 1]), x], dim=-1) x = x.transpose(1, 2).contiguous().view(bsz, -1, out_dim) x = encoder(x) if self.return_latent: x_latent.append(x.contiguous()) x = x.view(bsz, n_s, n_s, self.n_hidden) x = self.upscaler(x) if self.return_latent: x_latent.append(x.contiguous()) x = self.dpo(x) if self.return_latent: x, xr_latent = self.regressor(x, grid=grid) x_latent.append(xr_latent) else: x = self.regressor(x, grid=grid) if self.normalizer: x = self.normalizer.inverse_transform(x) if self.boundary_condition == 'dirichlet': x = x[:, 1:-1, 1:-1].contiguous() x = F.pad(x, (0, 0, 1, 1, 1, 1), "constant", 0) if boundary_value is not None: assert x.size() == boundary_value.size() x += boundary_value return dict(preds=x, preds_latent=x_latent, attn_weights=attn_weights) def _initialize(self): self._get_feature() self._get_scaler() self._get_encoder() self._get_regressor() self.config = dict(self.config) def cuda(self, device=None): self = super().cuda(device) if self.normalizer: self.normalizer = self.normalizer.cuda(device) return self def cpu(self): self = super().cpu() if self.normalizer: self.normalizer = self.normalizer.cpu() return self def to(self, *args, **kwargs): self = super().to(*args, **kwargs) if self.normalizer: self.normalizer = self.normalizer.to(*args, **kwargs) return self def print_config(self): for a in self.config.keys(): if not a.startswith('__'): print(f"{a}: \t", getattr(self, a)) @staticmethod def _initialize_layer(layer, gain=1e-2): for param in layer.parameters(): if param.ndim > 1: xavier_uniform_(param, gain=gain) else: constant_(param, 0) @staticmethod def _get_pos(pos, downsample): ''' get the downscaled position in 2d ''' bsz = pos.size(0) n_grid = pos.size(1) x, y = pos[..., 0], pos[..., 1] x = x.view(bsz, n_grid, n_grid) y = y.view(bsz, n_grid, n_grid) x = x[:, ::downsample, ::downsample].contiguous() y = y[:, ::downsample, ::downsample].contiguous() return torch.stack([x, y], dim=-1) def _get_setting(self): all_attr = list(self.config.keys()) + ADDITIONAL_ATTR for key in all_attr: setattr(self, key, self.config[key]) self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden) self.dropout = default(self.dropout, 0.05) self.dpo = nn.Dropout(self.dropout) if self.decoder_type == 'attention': self.num_encoder_layers += 1 self.attention_types = ['fourier', 'integral', 'local', 'global', 'cosine', 'galerkin', 'linear', 'softmax'] def _get_feature(self): if self.feat_extract_type == 'gcn' and self.num_feat_layers > 0: self.feat_extract = GCN(node_feats=self.n_hidden, edge_feats=self.edge_feats, num_gcn_layers=self.num_feat_layers, out_features=self.n_hidden, activation=self.graph_activation, raw_laplacian=self.raw_laplacian, debug=self.debug, ) elif self.feat_extract_type == 'gat' and self.num_feat_layers > 0: self.feat_extract = GAT(node_feats=self.n_hidden, out_features=self.n_hidden, num_gcn_layers=self.num_feat_layers, activation=self.graph_activation, debug=self.debug, ) else: self.feat_extract = Identity() def _get_scaler(self): if self.downscaler_size: self.downscaler = DownScaler(in_dim=self.node_feats, out_dim=self.n_hidden, downsample_mode=self.downsample_mode, interp_size=self.downscaler_size, dropout=self.downscaler_dropout, activation_type=self.downscaler_activation) else: self.downscaler = Identity(in_features=self.node_feats+self.spacial_dim, out_features=self.n_hidden) if self.upscaler_size: self.upscaler = UpScaler(in_dim=self.n_hidden, out_dim=self.n_hidden, upsample_mode=self.upsample_mode, interp_size=self.upscaler_size, dropout=self.upscaler_dropout, activation_type=self.upscaler_activation) else: self.upscaler = Identity() def _get_encoder(self): if self.attention_type in self.attention_types: encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden, n_head=self.n_head, attention_type=self.attention_type, dim_feedforward=self.dim_feedforward, layer_norm=self.layer_norm, attn_norm=self.attn_norm, batch_norm=self.batch_norm, pos_dim=self.pos_dim, xavier_init=self.xavier_init, diagonal_weight=self.diagonal_weight, symmetric_init=self.symmetric_init, attn_weight=self.return_attn_weight, dropout=self.encoder_dropout, ffn_dropout=self.ffn_dropout, norm_eps=self.norm_eps, debug=self.debug) elif self.attention_type == 'official': encoder_layer = TransformerEncoderLayer(d_model=self.n_hidden+self.pos_dim*self.n_head, nhead=self.n_head, dim_feedforward=self.dim_feedforward, dropout=self.encoder_dropout, batch_first=True, layer_norm_eps=self.norm_eps, ) else: raise NotImplementedError("encoder type not implemented.") self.encoder_layers = nn.ModuleList( [copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)]) def _get_regressor(self): if self.decoder_type == 'pointwise': self.regressor = PointwiseRegressor(in_dim=self.n_hidden, n_hidden=self.n_hidden, out_dim=self.n_targets, num_layers=self.num_regressor_layers, spacial_fc=self.spacial_fc, spacial_dim=self.spacial_dim, activation=self.regressor_activation, dropout=self.decoder_dropout, return_latent=self.return_latent, debug=self.debug) elif self.decoder_type == 'ifft2': self.regressor = SpectralRegressor(in_dim=self.n_hidden, n_hidden=self.freq_dim, freq_dim=self.freq_dim, out_dim=self.n_targets, num_spectral_layers=self.num_regressor_layers, modes=self.fourier_modes, spacial_dim=self.spacial_dim, spacial_fc=self.spacial_fc, activation=self.regressor_activation, last_activation=self.last_activation, dropout=self.decoder_dropout, return_latent=self.return_latent, debug=self.debug ) else: raise NotImplementedError("Decoder type not implemented") class FourierTransformer2DLite(nn.Module): ''' A lite model of the Fourier/Galerkin Transformer ''' def __init__(self, **kwargs): super(FourierTransformer2DLite, self).__init__() self.config = defaultdict(lambda: None, **kwargs) self._get_setting() self._initialize() def forward(self, node, edge, pos, grid=None): ''' seq_len: n, number of grid points node_feats: number of features of the inputs pos_dim: dimension of the Euclidean space - node: (batch_size, n*n, node_feats) - pos: (batch_size, n*n, pos_dim) - grid: (batch_size, n, n, pos_dim) Remark: for classic Transformer: pos_dim = n_hidden = 512 pos encodings is added to the latent representation ''' bsz = node.size(0) input_dim = node.size(-1) n_grid = grid.size(1) node = torch.cat([node.view(bsz, -1, input_dim), pos], dim=-1) x = self.feat_extract(node, edge) for encoder in self.encoder_layers: x = encoder(x, pos) x = self.dpo(x) x = x.view(bsz, n_grid, n_grid, -1) x = self.regressor(x, grid=grid) return dict(preds=x, preds_freq=None, preds_latent=None, attn_weights=None) def _initialize(self): self._get_feature() self._get_encoder() self._get_regressor() self.config = dict(self.config) def _get_setting(self): all_attr = list(self.config.keys()) + ADDITIONAL_ATTR for key in all_attr: setattr(self, key, self.config[key]) self.dim_feedforward = default(self.dim_feedforward, 2*self.n_hidden) self.spacial_dim = default(self.spacial_dim, self.pos_dim) self.spacial_fc = default(self.spacial_fc, False) self.dropout = default(self.dropout, 0.05) self.dpo = nn.Dropout(self.dropout) if self.decoder_type == 'attention': self.num_encoder_layers += 1 self.attention_types = ['fourier', 'integral', 'cosine', 'galerkin', 'linear', 'softmax'] def _get_feature(self): self.feat_extract = Identity(in_features=self.node_feats, out_features=self.n_hidden) def _get_encoder(self): encoder_layer = SimpleTransformerEncoderLayer(d_model=self.n_hidden, n_head=self.n_head, dim_feedforward=self.dim_feedforward, layer_norm=self.layer_norm, attention_type=self.attention_type, attn_norm=self.attn_norm, norm_type=self.norm_type, xavier_init=self.xavier_init, diagonal_weight=self.diagonal_weight, dropout=self.encoder_dropout, ffn_dropout=self.ffn_dropout, pos_dim=self.pos_dim, debug=self.debug) self.encoder_layers = nn.ModuleList( [copy.deepcopy(encoder_layer) for _ in range(self.num_encoder_layers)]) def _get_regressor(self): self.regressor = SpectralRegressor(in_dim=self.n_hidden, n_hidden=self.n_hidden, freq_dim=self.freq_dim, out_dim=self.n_targets, num_spectral_layers=self.num_regressor_layers, modes=self.fourier_modes, spacial_dim=self.spacial_dim, spacial_fc=self.spacial_fc, dim_feedforward=self.freq_dim, activation=self.regressor_activation, dropout=self.decoder_dropout, ) if __name__ == '__main__': for graph in ['gcn', 'gat']: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') config = defaultdict(lambda: None, node_feats=1, edge_feats=5, pos_dim=1, n_targets=1, n_hidden=96, num_feat_layers=2, num_encoder_layers=2, n_head=2, pred_len=0, n_freq_targets=0, dim_feedforward=96*2, feat_extract_type=graph, graph_activation=True, raw_laplacian=True, attention_type='fourier', # no softmax xavier_init=1e-4, diagonal_weight=1e-2, symmetric_init=False, layer_norm=True, attn_norm=False, batch_norm=False, spacial_residual=False, return_attn_weight=True, seq_len=None, bulk_regression=False, decoder_type='ifft', freq_dim=64, num_regressor_layers=2, fourier_modes=16, spacial_dim=1, spacial_fc=True, dropout=0.1, debug=False, ) ft = SimpleTransformer(**config) ft.to(device) batch_size, seq_len = 8, 512 summary(ft, input_size=[(batch_size, seq_len, 1), (batch_size, seq_len, seq_len, 5), (batch_size, seq_len, 1), (batch_size, seq_len, 1)], device=device) layer = TransformerEncoderLayer(d_model=128, nhead=4) print(layer.__class__)
nilq/baby-python
python
from django.conf.urls import url from authen import views from rest_framework.authtoken.views import obtain_auth_token urlpatterns = [ url(r'^api/user/(?P<pk>[0-9]+)/$', views.person_detail), url(r'^api/add_group/(?P<pk>[0-9]+)/$', views.add_group), url(r'^api/user/$', views.person_list), url(r'^$', views.home), url(r'^api/add_owner/$', views.add_owner), url(r'^api/login/$', obtain_auth_token), url(r'^api/user/(?P<pk>.+)/$', views.get_person_by_email), ]
nilq/baby-python
python
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle from deepspeech.models.ds2_online import DeepSpeech2ModelOnline class TestDeepSpeech2ModelOnline(unittest.TestCase): def setUp(self): paddle.set_device('cpu') self.batch_size = 2 self.feat_dim = 161 max_len = 210 # (B, T, D) audio = np.random.randn(self.batch_size, max_len, self.feat_dim) audio_len = np.random.randint(max_len, size=self.batch_size) audio_len[-1] = max_len # (B, U) text = np.array([[1, 2], [1, 2]]) text_len = np.array([2] * self.batch_size) self.audio = paddle.to_tensor(audio, dtype='float32') self.audio_len = paddle.to_tensor(audio_len, dtype='int64') self.text = paddle.to_tensor(text, dtype='int32') self.text_len = paddle.to_tensor(text_len, dtype='int64') def test_ds2_1(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=False) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_2(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=True) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_3(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=False) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_4(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=True) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_5(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=False) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_6(self): model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=3, rnn_size=1024, rnn_direction='bidirect', num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=False) loss = model(self.audio, self.audio_len, self.text, self.text_len) self.assertEqual(loss.numel(), 1) def test_ds2_7(self): use_gru = False model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=1, rnn_size=1024, rnn_direction='forward', num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=use_gru) model.eval() paddle.device.set_device("cpu") de_ch_size = 8 eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder( self.audio, self.audio_len) eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk( self.audio, self.audio_len, de_ch_size) eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1) eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list) decode_max_len = eouts.shape[1] eouts_by_chk = eouts_by_chk[:, :decode_max_len, :] self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True) self.assertEqual( paddle.allclose(final_state_h_box, final_state_h_box_chk), True) if use_gru is False: self.assertEqual( paddle.allclose(final_state_c_box, final_state_c_box_chk), True) def test_ds2_8(self): use_gru = True model = DeepSpeech2ModelOnline( feat_size=self.feat_dim, dict_size=10, num_conv_layers=2, num_rnn_layers=1, rnn_size=1024, rnn_direction='forward', num_fc_layers=2, fc_layers_size_list=[512, 256], use_gru=use_gru) model.eval() paddle.device.set_device("cpu") de_ch_size = 8 eouts, eouts_lens, final_state_h_box, final_state_c_box = model.encoder( self.audio, self.audio_len) eouts_by_chk_list, eouts_lens_by_chk_list, final_state_h_box_chk, final_state_c_box_chk = model.encoder.forward_chunk_by_chunk( self.audio, self.audio_len, de_ch_size) eouts_by_chk = paddle.concat(eouts_by_chk_list, axis=1) eouts_lens_by_chk = paddle.add_n(eouts_lens_by_chk_list) decode_max_len = eouts.shape[1] eouts_by_chk = eouts_by_chk[:, :decode_max_len, :] self.assertEqual(paddle.allclose(eouts_by_chk, eouts), True) self.assertEqual( paddle.allclose(final_state_h_box, final_state_h_box_chk), True) if use_gru is False: self.assertEqual( paddle.allclose(final_state_c_box, final_state_c_box_chk), True) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
# Copyright (c) 2021, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root # or https://opensource.org/licenses/BSD-3-Clause import os import unittest import numpy as np import torch from warp_drive.managers.data_manager import CUDADataManager from warp_drive.managers.function_manager import CUDAFunctionManager, CUDASampler from warp_drive.utils.common import get_project_root from warp_drive.utils.constants import Constants from warp_drive.utils.data_feed import DataFeed pytorch_cuda_init_success = torch.cuda.FloatTensor(8) _CUBIN_FILEPATH = f"{get_project_root()}/warp_drive/cuda_bin" _ACTIONS = Constants.ACTIONS class TestActionSampler(unittest.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.dm = CUDADataManager(num_agents=5, episode_length=1, num_envs=2) self.fm = CUDAFunctionManager( num_agents=int(self.dm.meta_info("n_agents")), num_envs=int(self.dm.meta_info("n_envs")), ) self.fm.load_cuda_from_binary_file(f"{_CUBIN_FILEPATH}/test_build.fatbin") self.sampler = CUDASampler(function_manager=self.fm) self.sampler.init_random(seed=None) def test_agent_action_distribution(self): tensor = DataFeed() tensor.add_data(name=f"{_ACTIONS}_a", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) self.dm.push_data_to_device(tensor, torch_accessible=True) self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_a")) self.sampler.register_actions(self.dm, f"{_ACTIONS}_a", 3) agent_distribution = np.array( [ [ [0.333, 0.333, 0.333], [0.2, 0.5, 0.3], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], ], [ [0.1, 0.7, 0.2], [0.7, 0.2, 0.1], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5], [0.5, 0.0, 0.5], ], ] ) agent_distribution = torch.from_numpy(agent_distribution) agent_distribution = agent_distribution.float().cuda() # run 10000 times to collect statistics actions_a_cuda = torch.from_numpy( np.empty((10000, 2, 5), dtype=np.int32) ).cuda() for i in range(10000): self.sampler.sample( self.dm, agent_distribution, action_name=f"{_ACTIONS}_a" ) actions_a_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_a") actions_a = actions_a_cuda.cpu().numpy() actions_a_env_0 = actions_a[:, 0] actions_a_env_1 = actions_a[:, 1] # Sampler is based on distribution, we test # sample mean = given mean and deviation < 10% mean self.assertAlmostEqual( (actions_a_env_0[:, 0] == 0).sum() / 10000.0, 0.333, delta=0.03 ) self.assertAlmostEqual( (actions_a_env_0[:, 0] == 1).sum() / 10000.0, 0.333, delta=0.03 ) self.assertAlmostEqual( (actions_a_env_0[:, 0] == 2).sum() / 10000.0, 0.333, delta=0.03 ) self.assertAlmostEqual( (actions_a_env_0[:, 1] == 0).sum() / 10000.0, 0.2, delta=0.02 ) self.assertAlmostEqual( (actions_a_env_0[:, 1] == 1).sum() / 10000.0, 0.5, delta=0.05 ) self.assertAlmostEqual( (actions_a_env_0[:, 1] == 2).sum() / 10000.0, 0.3, delta=0.03 ) self.assertEqual((actions_a_env_0[:, 2] == 0).sum(), 10000) self.assertEqual((actions_a_env_0[:, 3] == 1).sum(), 10000) self.assertEqual((actions_a_env_0[:, 4] == 2).sum(), 10000) self.assertAlmostEqual( (actions_a_env_1[:, 0] == 0).sum() / 10000.0, 0.1, delta=0.01 ) self.assertAlmostEqual( (actions_a_env_1[:, 0] == 1).sum() / 10000.0, 0.7, delta=0.07 ) self.assertAlmostEqual( (actions_a_env_1[:, 0] == 2).sum() / 10000.0, 0.2, delta=0.02 ) self.assertAlmostEqual( (actions_a_env_1[:, 1] == 0).sum() / 10000.0, 0.7, delta=0.07 ) self.assertAlmostEqual( (actions_a_env_1[:, 1] == 1).sum() / 10000.0, 0.2, delta=0.02 ) self.assertAlmostEqual( (actions_a_env_1[:, 1] == 2).sum() / 10000.0, 0.1, delta=0.01 ) self.assertAlmostEqual( (actions_a_env_1[:, 2] == 0).sum() / 10000.0, 0.5, delta=0.05 ) self.assertAlmostEqual( (actions_a_env_1[:, 2] == 1).sum() / 10000.0, 0.5, delta=0.05 ) self.assertEqual((actions_a_env_1[:, 2] == 2).sum(), 0) self.assertEqual((actions_a_env_1[:, 3] == 0).sum(), 0) self.assertAlmostEqual( (actions_a_env_1[:, 3] == 1).sum() / 10000.0, 0.5, delta=0.05 ) self.assertAlmostEqual( (actions_a_env_1[:, 3] == 2).sum() / 10000.0, 0.5, delta=0.05 ) self.assertAlmostEqual( (actions_a_env_1[:, 4] == 0).sum() / 10000.0, 0.5, delta=0.05 ) self.assertEqual((actions_a_env_1[:, 4] == 1).sum(), 0) self.assertAlmostEqual( (actions_a_env_1[:, 4] == 2).sum() / 10000.0, 0.5, delta=0.05 ) def test_planner_action_distribution(self): tensor = DataFeed() tensor.add_data(name=f"{_ACTIONS}_p", data=[[0], [0]]) self.dm.push_data_to_device(tensor, torch_accessible=True) self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_p")) self.sampler.register_actions(self.dm, f"{_ACTIONS}_p", 4) planner_distribution = np.array( [[[0.25, 0.25, 0.25, 0.25]], [[0.10, 0.60, 0.15, 0.15]]] ) planner_distribution = torch.from_numpy(planner_distribution) planner_distribution = planner_distribution.float().cuda() # run 10000 times to collect statistics actions_p_cuda = torch.from_numpy( np.empty((10000, 2, 1), dtype=np.int32) ).cuda() for i in range(10000): self.sampler.sample( self.dm, planner_distribution, action_name=f"{_ACTIONS}_p" ) actions_p_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_p") actions_p = actions_p_cuda.cpu().numpy() actions_p_env_0 = actions_p[:, 0] actions_p_env_1 = actions_p[:, 1] self.assertAlmostEqual( (actions_p_env_0[:, 0] == 0).sum() / 10000.0, 0.25, delta=0.03 ) self.assertAlmostEqual( (actions_p_env_0[:, 0] == 1).sum() / 10000.0, 0.25, delta=0.03 ) self.assertAlmostEqual( (actions_p_env_0[:, 0] == 2).sum() / 10000.0, 0.25, delta=0.03 ) self.assertAlmostEqual( (actions_p_env_0[:, 0] == 3).sum() / 10000.0, 0.25, delta=0.03 ) self.assertAlmostEqual( (actions_p_env_1[:, 0] == 0).sum() / 10000.0, 0.1, delta=0.01 ) self.assertAlmostEqual( (actions_p_env_1[:, 0] == 1).sum() / 10000.0, 0.6, delta=0.06 ) self.assertAlmostEqual( (actions_p_env_1[:, 0] == 2).sum() / 10000.0, 0.15, delta=0.015 ) self.assertAlmostEqual( (actions_p_env_1[:, 0] == 3).sum() / 10000.0, 0.15, delta=0.015 ) def test_seed_randomness_across_threads(self): tensor = DataFeed() tensor.add_data(name=f"{_ACTIONS}_s", data=[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) self.dm.push_data_to_device(tensor, torch_accessible=True) self.assertTrue(self.dm.is_data_on_device_via_torch(f"{_ACTIONS}_s")) self.sampler.register_actions(self.dm, f"{_ACTIONS}_s", 4) agent_distribution = np.array( [ [ [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], ], [ [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25], ], ] ) agent_distribution = torch.from_numpy(agent_distribution) agent_distribution = agent_distribution.float().cuda() # run 10 times to collect statistics actions_s_cuda = torch.from_numpy( np.empty((10000, 2, 5), dtype=np.int32) ).cuda() for i in range(10000): self.sampler.sample( self.dm, agent_distribution, action_name=f"{_ACTIONS}_s" ) actions_s_cuda[i] = self.dm.data_on_device_via_torch(f"{_ACTIONS}_s") actions_s = actions_s_cuda.cpu().numpy() self.assertTrue(actions_s.std(axis=-1).reshape(-1).mean() > 0.9)
nilq/baby-python
python
# Copyright (c) 2017 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from vmware_nsx.db import nsxv_db from vmware_nsx.db import nsxv_models from vmware_nsx.plugins.nsx_v.vshield import edge_utils from vmware_nsx.tests.unit.nsx_v import test_plugin PLUGIN_NAME = 'vmware_nsx.plugin.NsxVPlugin' # Run all relevant plugin tests when the metadata proxy is enabled. # Those tests does not specifically test the md_proxy. just verify that # nothing gets broken. class NsxVPluginWithMdV2TestCase(test_plugin.NsxVPluginV2TestCase): def setUp(self, plugin=PLUGIN_NAME, ext_mgr=None, service_plugins=None): # Add the metadata configuration cfg.CONF.set_override('mgt_net_moid', 'net-1', group="nsxv") cfg.CONF.set_override('mgt_net_proxy_ips', ['2.2.2.2'], group="nsxv") cfg.CONF.set_override('mgt_net_proxy_netmask', '255.255.255.0', group="nsxv") cfg.CONF.set_override('mgt_net_default_gateway', '1.1.1.1', group="nsxv") cfg.CONF.set_override('nova_metadata_ips', ['3.3.3.3'], group="nsxv") # Add some mocks required for the md code mock_alloc_vnic = mock.patch.object(nsxv_db, 'allocate_edge_vnic') mock_alloc_vnic_inst = mock_alloc_vnic.start() mock_alloc_vnic_inst.return_value = nsxv_models.NsxvEdgeVnicBinding mock.patch.object(edge_utils, "update_internal_interface").start() super(NsxVPluginWithMdV2TestCase, self).setUp( plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins) class TestNetworksWithMdV2(test_plugin.TestNetworksV2, NsxVPluginWithMdV2TestCase): # Skip all the tests that count networks, as there is an # additional internal network for metadata. def test_list_networks_with_sort_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_without_pk_in_fields_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_sort_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_shared(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_without_pk_in_fields_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_parameters(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_reverse_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_pagination_reverse_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_networks_with_fields(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_wrong_input(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_native_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_native_quotas(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_networks_bulk_emulated_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") class TestSubnetsWithMdV2(test_plugin.TestSubnetsV2, NsxVPluginWithMdV2TestCase): # Skip all the tests that count subnets, as there is an # additional internal subnet for metadata. def test_list_subnets_with_sort_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_sort_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_pagination_native(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_parameter(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_with_pagination_emulated(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets_shared(self): self.skipTest("The test is not suitable for the metadata test case") def test_list_subnets(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_native_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_native_quotas(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_subnets_bulk_emulated_plugin_failure(self): self.skipTest("The test is not suitable for the metadata test case") class TestExclusiveRouterWithMdTestCase( test_plugin.TestExclusiveRouterTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_address_scope_snat_rules(self): self.skipTest("The test is not suitable for the metadata test case") class TestVdrWithMdTestCase(test_plugin.TestVdrTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") #TODO(asarfaty): fix some mocks so those tests will pass def test_router_plr_binding_default_size(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_configured_size(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_default_az(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_plr_binding_with_az(self): self.skipTest("The test is not suitable for the metadata test case") class TestSharedRouterWithMdTestCase(test_plugin.TestSharedRouterTestCase, NsxVPluginWithMdV2TestCase): # Skip all the tests that count firewall rules, as there are # some MD specific rules def test_router_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_routers_set_gateway_with_nosnat(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_different_tenants_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_interfaces_with_update_firewall(self): self.skipTest("The test is not suitable for the metadata test case") # Skip all the tests that count routers or ports, as there is # an additional router for the md proxy def test_router_list_with_pagination_reverse(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_sort(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list_with_pagination(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_list(self): self.skipTest("The test is not suitable for the metadata test case") def test_router_add_interface_delete_port_after_failure(self): self.skipTest("The test is not suitable for the metadata test case") def test_create_router_fail_at_the_backend(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_subnet_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case") def test_floatingip_delete_router_intf_with_port_id_returns_409(self): self.skipTest("The test is not suitable for the metadata test case")
nilq/baby-python
python
import re from model.contact import Contact def test_contact_info_from_home_page(app, db): app.navigation.open_home_page() contact_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max) def clean(contact): return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip(), address=contact.address.strip(), home=contact.home, mobile=contact.mobile, phone2=contact.phone2, email=contact.email, email2=contact.email2, email3=contact.email3) contact_from_db_list = list(map(clean, db.get_contact_list())) print("Contacts_from_home_page>>>>", contact_from_home_page) print("Contacts_from_DB>>>>", contact_from_db_list) i = 0 for item in contact_from_home_page: assert item.address == contact_from_db_list[i].address assert item.lastname == contact_from_db_list[i].lastname.strip() assert item.firstname == contact_from_db_list[i].firstname.strip() assert item.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db_list[i]) assert item.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_db_list[i]) i += 1 def clear(s): return re.sub("[() -]", "", s) def merge_phones_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.home, contact.mobile, contact.work, contact.phone2])))) def merge_emails_like_on_home_page(contact): return "\n".join(filter(lambda x: x != "", map(lambda x: clear(x), filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3])))) # def test_contacts(app, ormdb): # random_index = randrange(app.contact.count()) # # взять все контакты с главной страницы # contact_from_home_page = app.contact.get_contact_list() # # взять все записи конатктов из бд # contact_from_db = ormdb.get_contact_list() # # сравниваем списки, сортируя # assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max) # def test_contact_info_on_main_page(app): # if app.contact.amount() == 0: # app.contact.create( # Contact(firstname="TestTest", middlename="Test", lastname="Testing", nickname="testing", # title="test", company="Test test", address="Spb", home="000222111", # mobile="444555222", work="99966655", fax="11122255", email="test@tesr.ru", # email2="test2@test.ru", email3="test3@test.ru", homepage="www.test.ru", bday="15", # bmonth="May", byear="1985", aday="14", amonth="June", ayear="1985", # address2="Spb", phone2="111111", notes="Friend")) # random_index = randrange(app.contact.amount()) # contact_from_home_page = app.contact.get_contact_list()[random_index] # contact_from_edit_page = app.contact.get_contact_info_from_edit_page(random_index) # assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page) # assert contact_from_home_page.firstname == contact_from_edit_page.firstname # assert contact_from_home_page.lastname == contact_from_edit_page.lastname # assert contact_from_home_page.address == contact_from_edit_page.address # assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
nilq/baby-python
python
x = [0.0, 3.0, 5.0, 2.5, 3.7] #define array print(type(x)) x.pop(2) #remove third element print(x) x.remove(2.5) #remove element equal to 2.5 print(x) x.append(1.2) #add an element at the end print(x) y = x.copy() #get a copy print(y) print(y.count(0.0)) #how many elements are 0.0? print(y.index(3.7)) #print index w value 3.7 y.sort() #sort list print(y) y.reverse() #reverse sort print(y) y.clear() #remove all elements print(y)
nilq/baby-python
python
import datetime import itertools import functools import io import os import pathlib import string import tqdm import pytz import requests import apiclient.http import fuzzywuzzy.fuzz from .info import Conference, ConferenceInfoSource, Session from apiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow # Video publisher variables YOUTUBE_SCOPE = "https://www.googleapis.com/auth/youtube" YOUTUBE_UPLOAD_SCOPE = "https://www.googleapis.com/auth/youtube.upload" FIRST_DATE = datetime.date( int(os.environ["YEAR"]), int(os.environ["MONTH"]), int(os.environ["DAY"]) ) CONFERENCE_NAME = f"PyCon Taiwan {FIRST_DATE.year}" TIMEZONE_TAIPEI = pytz.timezone("Asia/Taipei") def guess_language(s: str) -> str: """Guess language of a string. The only two possible return values are `zh-hant` and `en`. Nothing scientific, just a vaguely educated guess. If more than half of the string is ASCII, probably English; othereise we assume it's Chinese. """ if sum(c in string.ascii_letters for c in s) > len(s) / 2: return "en" return "zh-hant" def build_body(session: Session) -> dict: title = session.render_video_title() return { "snippet": { "title": title, "description": session.render_video_description(), "tags": [ session.conference.name, "PyCon Taiwan", "PyCon", "Python", ], "defaultAudioLanguage": session.lang, "defaultLanguage": guess_language(title), "categoryId": "28", }, "status": { "license": "creativeCommon", "privacyStatus": "unlisted", "publishAt": None, }, "recordingDetails": { "recordingDate": format_datetime_for_google(session.start) }, } def format_datetime_for_google(dt: datetime.datetime) -> str: """Format a datetime into ISO format for Google API. Google API is wierdly strict on the format here. It REQUIRES exactly three digits of milliseconds, and only accepts "Z" suffix (not +00:00), so we need to roll our own formatting instead relying on `isoformat()`. """ return dt.astimezone(pytz.utc).strftime(r"%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z" def get_match_ratio(session: Session, path: pathlib.Path) -> float: return fuzzywuzzy.fuzz.ratio(session.title, path.stem) def choose_video(session: Session, video_paths: list) -> pathlib.Path: """Look through the file list and choose the one that "looks most like it".""" score, match = max((get_match_ratio(session, p), p) for p in video_paths) if score < 70: raise ValueError("no match") return match def media_batch_reader(file_path, chuncksize=64 * (1 << 20)): print(f"Reading Vedio from:\n\t{file_path}") out = io.BytesIO() total = file_path.stat().st_size // chuncksize with open(file_path, "rb") as f: for block in tqdm.tqdm( functools.partial(f.read, chuncksize), total=total ): out.write(block) return out.getvalue() def upload_video(): print(f"Uploading videos...") # build youtube connection flow = InstalledAppFlow.from_client_secrets_file( os.environ["OAUTH2_CLIENT_SECRET"], scopes=[YOUTUBE_UPLOAD_SCOPE] ) credentials = flow.run_console() youtube = build("youtube", "v3", credentials=credentials) # upload video VIDEO_ROOT = pathlib.Path(os.environ["VIDEO_ROOT"]).resolve() print(f"Reading video files from {VIDEO_ROOT}") VIDEO_PATHS = list( itertools.chain.from_iterable( VIDEO_ROOT.glob(f"*{ext}") for ext in (".avi", ".mp4") ) ) assert VIDEO_PATHS print(f" {len(VIDEO_PATHS)} files loaded") DONE_DIR_PATH = VIDEO_ROOT.joinpath("done") DONE_DIR_PATH.mkdir(parents=True, exist_ok=True) source = ConferenceInfoSource( requests.get(os.environ["URL"]).json(), Conference(CONFERENCE_NAME, FIRST_DATE, TIMEZONE_TAIPEI), ) for session in source.iter_sessions(): body = build_body(session) try: vid_path = choose_video(session, VIDEO_PATHS) except ValueError: print(f"No match, ignoring {session.title}") continue print(f"Uploading {session.title}") print(f" {vid_path}") media = apiclient.http.MediaInMemoryUpload( media_batch_reader(vid_path), resumable=True ) request = youtube.videos().insert( part=",".join(body.keys()), body=body, media_body=media ) with tqdm.tqdm(total=100, ascii=True) as progressbar: prev = 0 while True: status, response = request.next_chunk() if status: curr = int(status.progress() * 100) progressbar.update(curr - prev) prev = curr if response: break print(f" Done, as: https://youtu.be/{response['id']}") new_name = DONE_DIR_PATH.joinpath(vid_path.name) print(f" {vid_path} -> {new_name}") vid_path.rename(new_name)
nilq/baby-python
python
from microbit import display from microbit import Image from KitronikClipDetector import Detector sensor = Detector() while True: if sensor.readDigitalSensor("P2", "Dark") is True: display.show(Image.HAPPY) else: display.show(Image.SAD)
nilq/baby-python
python
"""Urls for the Zinnia entries short link""" from django.conf.urls import url from django.conf.urls import patterns from zinnia.views.shortlink import EntryShortLink urlpatterns = patterns( '', url(r'^(?P<pk>\d+)/$', EntryShortLink.as_view(), name='zinnia_entry_shortlink'), )
nilq/baby-python
python
''' SOLED Scrapy settings For simplicity, this file contains only settings considered important or commonly used. You can find more settings consulting the documentation: http://doc.scrapy.org/en/latest/topics/settings.html http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html ''' BOT_NAME = 'soledbot' SPIDER_MODULES = ['soledbot.spiders'] NEWSPIDER_MODULE = 'soledbot.spiders' # Identity of the bot, to avoid bot detection should rotate to diferent valid values USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36' # Stay LEGAL obey robot.txt ROBOTSTXT_OBEY = True # Delay between requests sent by a spider # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 DOWNLOAD_DELAY = 3.0 # disable to prevent bot tracking (some websites might not allow this) COOKIES_ENABLED = False CONCURRENT_REQUESTS = 16 # Ddefault 16 ###################### PROXY SETTINGS ###################### # Retry many times since proxies often fail RETRY_TIMES = 10 # Retry on most error codes since proxies fail for different reasons RETRY_HTTP_CODES = [500, 503, 504, 400, 403, 404, 408] DOWNLOADER_MIDDLEWARES = { 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 90, 'scrapy_proxies.RandomProxy': 100, 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 110, } # Format http://host1:port PROXY_LIST = './proxy/proxy_list.txt' # Proxy mode # 0 = Every requests have different proxy # 1 = Take only one proxy from the list and assign it to every requests # 2 = Put a custom proxy to use in the settings PROXY_MODE = 0 # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'quotesbot.middlewares.MyCustomSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html #DOWNLOADER_MIDDLEWARES = { # 'quotesbot.middlewares.MyCustomDownloaderMiddleware': 543, #} # Enable or disable extensions # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html #ITEM_PIPELINES = { # 'quotesbot.pipelines.SomePipeline': 300, #} # Enable and configure the AutoThrottle extension (disabled by default) # See http://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
nilq/baby-python
python
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """conftest.py contains configuration for pytest.""" import functools import glob import itertools import os import pytest import gluonnlp as nlp ############################################################################### # Datasets ############################################################################### @pytest.fixture(scope="session") def wikitext2_train_and_counter(): path = os.path.join('tests', 'data', 'wikitext-2') data = nlp.data.WikiText2(segment='train', root=path) counter = nlp.data.utils.Counter(data) return data, counter @pytest.fixture(scope="session") def wikitext2_test_and_counter(): path = os.path.join('tests', 'data', 'wikitext-2') data = nlp.data.WikiText2(segment='test', root=path) counter = nlp.data.utils.Counter(data) return data, counter @pytest.fixture(scope="session") def wikitext2_val_and_counter(): path = os.path.join('tests', 'data', 'wikitext-2') data = nlp.data.WikiText2(segment='val', root=path) counter = nlp.data.utils.Counter(data) return data, counter ############################################################################### # Stream ############################################################################### @pytest.fixture(params=["prefetch_process", "prefetch_thread", "none"]) def stream_identity_wrappers(request): """DataStream wrappers that don't change the content of a Stream. All DataStreams included in Gluon-NLP should support being wrapped by one of the wrappers returned by this test fixture. When writing a test to test some Stream, make sure to parameterize it by stream_identity_wrappers so that the stream is tested with all possible stream wrappers. """ if request.param == "prefetch_process": return functools.partial( nlp.data.PrefetchingStream, worker_type='process') elif request.param == "prefetch_thread": return functools.partial( nlp.data.PrefetchingStream, worker_type='thread') elif request.param == "none": return lambda x: x else: raise RuntimeError @pytest.fixture(scope="session") def wikitext2_simpledatasetstream_skipempty_and_counter( wikitext2_train_and_counter, wikitext2_test_and_counter, wikitext2_val_and_counter): token_path = os.path.join('tests', 'data', 'wikitext-2/*.tokens') assert len(glob.glob(token_path)) == 3 stream = nlp.data.SimpleDatasetStream( nlp.data.CorpusDataset, token_path, skip_empty=True, eos=nlp._constants.EOS_TOKEN) counter = nlp.data.Counter( itertools.chain.from_iterable(itertools.chain.from_iterable(stream))) return stream, counter @pytest.fixture(scope="session") def wikitext2_simpledatasetstream_skipempty_flatten_and_counter( wikitext2_train_and_counter, wikitext2_test_and_counter, wikitext2_val_and_counter): token_path = os.path.join('tests', 'data', 'wikitext-2/*.tokens') assert len(glob.glob(token_path)) == 3 stream = nlp.data.SimpleDatasetStream( nlp.data.CorpusDataset, token_path, flatten=True, skip_empty=True, eos=nlp._constants.EOS_TOKEN) counter = nlp.data.Counter( itertools.chain.from_iterable(itertools.chain.from_iterable(stream))) return stream, counter
nilq/baby-python
python
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Placeholder docstring""" from __future__ import absolute_import import io import struct import sys import numpy as np from scipy.sparse import issparse from sagemaker.amazon.record_pb2 import Record class numpy_to_record_serializer(object): """Placeholder docstring""" def __init__(self, content_type="application/x-recordio-protobuf"): """ Args: content_type: """ self.content_type = content_type def __call__(self, array): """ Args: array: """ if len(array.shape) == 1: array = array.reshape(1, array.shape[0]) assert len(array.shape) == 2, "Expecting a 1 or 2 dimensional array" buf = io.BytesIO() write_numpy_to_dense_tensor(buf, array) buf.seek(0) return buf class record_deserializer(object): """Placeholder docstring""" def __init__(self, accept="application/x-recordio-protobuf"): """ Args: accept: """ self.accept = accept def __call__(self, stream, content_type): """ Args: stream: content_type: """ try: return read_records(stream) finally: stream.close() def _write_feature_tensor(resolved_type, record, vector): """ Args: resolved_type: record: vector: """ if resolved_type == "Int32": record.features["values"].int32_tensor.values.extend(vector) elif resolved_type == "Float64": record.features["values"].float64_tensor.values.extend(vector) elif resolved_type == "Float32": record.features["values"].float32_tensor.values.extend(vector) def _write_label_tensor(resolved_type, record, scalar): """ Args: resolved_type: record: scalar: """ if resolved_type == "Int32": record.label["values"].int32_tensor.values.extend([scalar]) elif resolved_type == "Float64": record.label["values"].float64_tensor.values.extend([scalar]) elif resolved_type == "Float32": record.label["values"].float32_tensor.values.extend([scalar]) def _write_keys_tensor(resolved_type, record, vector): """ Args: resolved_type: record: vector: """ if resolved_type == "Int32": record.features["values"].int32_tensor.keys.extend(vector) elif resolved_type == "Float64": record.features["values"].float64_tensor.keys.extend(vector) elif resolved_type == "Float32": record.features["values"].float32_tensor.keys.extend(vector) def _write_shape(resolved_type, record, scalar): """ Args: resolved_type: record: scalar: """ if resolved_type == "Int32": record.features["values"].int32_tensor.shape.extend([scalar]) elif resolved_type == "Float64": record.features["values"].float64_tensor.shape.extend([scalar]) elif resolved_type == "Float32": record.features["values"].float32_tensor.shape.extend([scalar]) def write_numpy_to_dense_tensor(file, array, labels=None): """Writes a numpy array to a dense tensor Args: file: array: labels: """ # Validate shape of array and labels, resolve array and label types if not len(array.shape) == 2: raise ValueError("Array must be a Matrix") if labels is not None: if not len(labels.shape) == 1: raise ValueError("Labels must be a Vector") if labels.shape[0] not in array.shape: raise ValueError( "Label shape {} not compatible with array shape {}".format( labels.shape, array.shape ) ) resolved_label_type = _resolve_type(labels.dtype) resolved_type = _resolve_type(array.dtype) # Write each vector in array into a Record in the file object record = Record() for index, vector in enumerate(array): record.Clear() _write_feature_tensor(resolved_type, record, vector) if labels is not None: _write_label_tensor(resolved_label_type, record, labels[index]) _write_recordio(file, record.SerializeToString()) def write_spmatrix_to_sparse_tensor(file, array, labels=None): """Writes a scipy sparse matrix to a sparse tensor Args: file: array: labels: """ if not issparse(array): raise TypeError("Array must be sparse") # Validate shape of array and labels, resolve array and label types if not len(array.shape) == 2: raise ValueError("Array must be a Matrix") if labels is not None: if not len(labels.shape) == 1: raise ValueError("Labels must be a Vector") if labels.shape[0] not in array.shape: raise ValueError( "Label shape {} not compatible with array shape {}".format( labels.shape, array.shape ) ) resolved_label_type = _resolve_type(labels.dtype) resolved_type = _resolve_type(array.dtype) csr_array = array.tocsr() n_rows, n_cols = csr_array.shape record = Record() for row_idx in range(n_rows): record.Clear() row = csr_array.getrow(row_idx) # Write values _write_feature_tensor(resolved_type, record, row.data) # Write keys _write_keys_tensor(resolved_type, record, row.indices.astype(np.uint64)) # Write labels if labels is not None: _write_label_tensor(resolved_label_type, record, labels[row_idx]) # Write shape _write_shape(resolved_type, record, n_cols) _write_recordio(file, record.SerializeToString()) def read_records(file): """Eagerly read a collection of amazon Record protobuf objects from file. Args: file: """ records = [] for record_data in read_recordio(file): record = Record() record.ParseFromString(record_data) records.append(record) return records # MXNet requires recordio records have length in bytes that's a multiple of 4 # This sets up padding bytes to append to the end of the record, for diferent # amounts of padding required. padding = {} for amount in range(4): if sys.version_info >= (3,): padding[amount] = bytes([0x00 for _ in range(amount)]) else: padding[amount] = bytearray([0x00 for _ in range(amount)]) _kmagic = 0xCED7230A def _write_recordio(f, data): """Writes a single data point as a RecordIO record to the given file. Args: f: data: """ length = len(data) f.write(struct.pack("I", _kmagic)) f.write(struct.pack("I", length)) pad = (((length + 3) >> 2) << 2) - length f.write(data) f.write(padding[pad]) def read_recordio(f): """ Args: f: """ while True: try: read_kmagic, = struct.unpack("I", f.read(4)) except struct.error: return assert read_kmagic == _kmagic len_record, = struct.unpack("I", f.read(4)) pad = (((len_record + 3) >> 2) << 2) - len_record yield f.read(len_record) if pad: f.read(pad) def _resolve_type(dtype): """ Args: dtype: """ if dtype == np.dtype(int): return "Int32" if dtype == np.dtype(float): return "Float64" if dtype == np.dtype("float32"): return "Float32" raise ValueError("Unsupported dtype {} on array".format(dtype))
nilq/baby-python
python
# required for test discovery
nilq/baby-python
python
# Copyright 2021 Victor I. Afolabi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import Iterable, List, Optional, overload, Tuple, Union try: from typing import Literal except ImportError: # PEP 586 from typing_extensions import Literal import numpy as np import pandas as pd from sklearn.model_selection import train_test_split __all__ = [ 'Data', ] # Features (n_samples, n_features). _Features = np.ndarray # Target (n_samples,) _Target = np.ndarray # Train & Test data type-hints. _TrainData = Tuple[_Features, _Target] _TestData = Tuple[_Features, _Target] class Data: def __init__(self, filename: str) -> None: self.filename = filename # Dataframe object. self._df: pd.DataFrame = pd.read_csv(filename) # Used in `self.__next__` self.__current_id = 0 def __repr__(self) -> str: return f'{self.__class__.__name__}(filename={self.filename})' def __str__(self) -> str: return f'{self.__class__.__name__}({self.filename})' def __len__(self) -> int: return len(self._df) def __iter__(self) -> Iterable[Tuple[_Features, _Target]]: return self def __next__(self) -> Tuple[_Features, _Target]: if self.__current_id == len(self._df): raise StopIteration # Increase the current index. self.__current_id += 1 features = np.array( self._df[self.feature_names].loc[self.__current_id], dtype=np.float32 ) target = np.array( self._df[self.target_name].loc[self.__current_id], dtype=np.float32 ) return features, target def get_class_name(self, target: Literal[0, 1]) -> str: """Returns what numeric class names represents. Args: target (Literal[0, 1]): Either a 0 or 1. Returns: str: Returns corresponding class name given target. """ return self.class_names[target] @overload def has_heart_disease(self, target: int) -> bool: ... @overload def has_heart_disease(self, target: float) -> bool: ... @overload def has_heart_disease(self, target: _Target) -> _Target: ... def has_heart_disease( self, target: Union[int, float, _Target] ) -> Union[bool, _Target]: """Target lookup if a patient has heart disease or not. Args: target (float | int | np.ndarray): Single or multiple targets. Returns: bool | np.ndarray[bool]: Return single or multiple results for the lookup. """ return np.cast[bool](target) def train_test_split( self, test_size: float = 0.2, random_state: Optional[int] = None, shuffle: bool = True, ) -> Tuple[_TrainData, _TestData]: """Split features and labels into random train and test subsets. Arguments: test_size (float): A number between 0.0 and 1.0 that represents the proportion of the dataset to include in the test split. Defaults to 0.2 (2% of the data). random_state (int, optional): Controls the shuffling applied to the data before applying the split. Pass int for reproducable output accross multiple function calls. Defaults to None. shuffle (bool): Whether or not to shuffle the data before splitting. Defaults to True. Returns: Tuple[TrainData, TestData]: Containing train-test split of inputs. """ X_train, X_test, y_train, y_test = train_test_split( self.features, self.target, test_size=test_size, random_state=random_state, shuffle=shuffle ) # Train data Test data return (X_train, y_train), (X_test, y_test) @property def df(self) -> pd.DataFrame: """Dataframe object.""" return self._df @property def columns(self) -> List[str]: """Column names.""" return self._df.columns.tolist() @property def feature_names(self) -> List[str]: """List of feature names.""" return self.columns[:-1] @property def target_name(self) -> str: """Target (label) name.""" return self.columns[-1] @property def features(self) -> _Features: """Features as an array-like (n_samples, n_features).""" return np.array(self._df[self.feature_names], dtype=np.float32) @property def target(self) -> _Target: """Target (labels) as an array-like (n_samples,).""" return np.array(self._df[self.target_name], dtype=np.float32) @property def n_classes(self) -> int: """Number of classes.""" return len(self._df[self.target_name].unique()) @property def n_samples(self) -> int: """Number of data samples.""" return len(self._df) @property def class_names(self) -> List[str]: return ['No Heart disease', 'Has Heart disease']
nilq/baby-python
python
# Created By: Virgil Dupras # Created On: 2009-04-23 # Copyright 2013 Hardcoded Software (http://www.hardcoded.net) # # This software is licensed under the "BSD" License as described in the "LICENSE" file, # which should be included with this package. The terms are also available at # http://www.hardcoded.net/licenses/bsd_license from PyQt4.QtCore import Qt, pyqtSignal from PyQt4.QtGui import QBrush, QFont, QFontMetrics, QTableView, QColor from qtlib.table import Table class ResultsModel(Table): def __init__(self, app, view): model = app.model.result_table Table.__init__(self, model, view) view.horizontalHeader().setSortIndicator(1, Qt.AscendingOrder) app.prefsChanged.connect(self.appPrefsChanged) app.willSavePrefs.connect(self.appWillSavePrefs) def _getData(self, row, column, role): if column.name == 'marked': if role == Qt.CheckStateRole and row.markable: return Qt.Checked if row.marked else Qt.Unchecked return None if role == Qt.DisplayRole: data = row.data_delta if self.model.delta_values else row.data return data[column.name] elif role == Qt.ForegroundRole: if row.isref: return QBrush(Qt.blue) elif row.is_cell_delta(column.name): return QBrush(QColor(255, 142, 40)) # orange elif role == Qt.FontRole: isBold = row.isref font = QFont(self.view.font()) font.setBold(isBold) return font elif role == Qt.EditRole: if column.name == 'name': return row.data[column.name] return None def _getFlags(self, row, column): flags = Qt.ItemIsEnabled | Qt.ItemIsSelectable if column.name == 'marked': if row.markable: flags |= Qt.ItemIsUserCheckable elif column.name == 'name': flags |= Qt.ItemIsEditable return flags def _setData(self, row, column, value, role): if role == Qt.CheckStateRole: if column.name == 'marked': row.marked = bool(value) return True elif role == Qt.EditRole: if column.name == 'name': return self.model.rename_selected(value) return False def sort(self, column, order): column = self.model.COLUMNS[column] self.model.sort(column.name, order == Qt.AscendingOrder) #--- Properties @property def power_marker(self): return self.model.power_marker @power_marker.setter def power_marker(self, value): self.model.power_marker = value @property def delta_values(self): return self.model.delta_values @delta_values.setter def delta_values(self, value): self.model.delta_values = value #--- Events def appPrefsChanged(self, prefs): font = self.view.font() font.setPointSize(prefs.tableFontSize) self.view.setFont(font) fm = QFontMetrics(font) self.view.verticalHeader().setDefaultSectionSize(fm.height()+2) def appWillSavePrefs(self): self.model.columns.save_columns() #--- model --> view def invalidate_markings(self): # redraw view # HACK. this is the only way I found to update the widget without reseting everything self.view.scroll(0, 1) self.view.scroll(0, -1) class ResultsView(QTableView): #--- Override def keyPressEvent(self, event): if event.text() == ' ': self.spacePressed.emit() return QTableView.keyPressEvent(self, event) def mouseDoubleClickEvent(self, event): self.doubleClicked.emit(None) # We don't call the superclass' method because the default behavior is to rename the cell. #--- Signals spacePressed = pyqtSignal()
nilq/baby-python
python
# -*- coding: utf-8 -*- from __future__ import print_function, unicode_literals import unittest from nltk.classify.naivebayes import NaiveBayesClassifier class NaiveBayesClassifierTest(unittest.TestCase): def test_simple(self): training_features = [ ({'nice': True, 'good': True}, 'positive'), ({'bad': True, 'mean': True}, 'negative'), ] classifier = NaiveBayesClassifier.train(training_features) result = classifier.prob_classify({'nice': True}) self.assertTrue(result.prob('positive') > result.prob('negative')) self.assertEqual(result.max(), 'positive') result = classifier.prob_classify({'bad': True}) self.assertTrue(result.prob('positive') < result.prob('negative')) self.assertEqual(result.max(), 'negative')
nilq/baby-python
python
# -*- coding: utf-8 -*- import os from youtube_title_parse import get_artist_title class MetaTestSequence(type): def __new__(mcs, name, bases, attrs): def should_skip(test_params): if not test_params: return False if "skip" in test_params and test_params["skip"] is True: return True return False def gen_test(test_name, test_params): input = test_params["input"] expected = test_params["expected"] skip = should_skip(test_params) def test_func(self): if skip: self.skipTest("Currently unsupported") if "options" in test_params: artist, title = get_artist_title( input, options=test_params["options"] ) or (None, None) else: artist, title = get_artist_title(input) or (None, None) self.assertEqual(title, expected[1]) self.assertEqual(artist, expected[0]) test_func.__name__ = test_name return test_func if "test_cases" in attrs and "test_type" in attrs: for idx, test_params in enumerate(attrs["test_cases"]): test_kind = os.path.splitext(os.path.basename(attrs["test_type"]))[0] test_name = "%s_%d" % (test_kind, idx + 1) new_test = gen_test(test_name, test_params) attrs[new_test.__name__] = new_test return type.__new__(mcs, name, bases, attrs)
nilq/baby-python
python
# -*- coding: utf-8 -*- import pickle import numpy as np import pandas as pd import pystan import os, sys import stan_utility import patsy import arviz as az GLMEdata = pd.read_csv('GLMEdata.csv') GLMEdata = GLMEdata[GLMEdata.exp == 1] GLME = stan_utility.compile_model('GLME.stan', model_name="GLME") fixeff_form = "1+SAT+contrast+givenResp+SAT:contrast"#Fixed effects formula raneff_form = fixeff_form #Random effects formula fixeff = np.asarray(patsy.dmatrix(fixeff_form, GLMEdata)) #FE design matrix raneff = np.asarray(patsy.dmatrix(raneff_form, GLMEdata)) #RE design matrix prior_intercept = np.asarray([1,1]) priors_mu = np.repeat(0,4) #Priors on mu for FE priors_sigma = np.repeat(.5,4) # priors on sigma for FE priors_raneff = [0,.5] #Priors on RE Precision_GLME_data = dict( N = len(GLMEdata), P = fixeff.shape[-1], #number of pop level effects J = len(GLMEdata.participant.unique()), n_u = raneff.shape[-1], subj = GLMEdata.participant, X = fixeff, Z_u = raneff, y = GLMEdata.response.get_values(), p_intercept = prior_intercept, p_fmu = priors_mu, p_fsigma = priors_sigma, p_r = priors_raneff ) Precision_fit = GLME.sampling(data=Precision_GLME_data, iter=2000, chains=6, n_jobs=6, warmup = 1000, control=dict(adapt_delta=0.99)) stan_utility.check_treedepth(Precision_fit) stan_utility.check_energy(Precision_fit) stan_utility.check_div(Precision_fit) Precision_fit = az.from_pystan(posterior=Precision_fit, posterior_predictive='y_hat', observed_data="y", log_likelihood='log_lik', coords={'b': fixeff_form.split('+')[1:]}, dims={'raw_beta': ['b']}) Precision_fit.to_netcdf("FittedModels/Precision_Exp1_fit.nc")
nilq/baby-python
python
from django.db import models # Create your models here. MALE = 'M' FEMALE = 'F' UNKNOWN = 'U' GENDER_CHOICES = ( (MALE, 'Male'), (FEMALE, 'Female'), (UNKNOWN, 'Unknown')) class AdmissionsByGender(models.Model): PRIMARY = 'P' SECONDARY = 'S' DIAGNOSIS_CHOICES = ( (PRIMARY, 'Primary'), (SECONDARY, 'Secondary') ) year = models.IntegerField() gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE) admissions = models.IntegerField() diagnosis = models.CharField(max_length=1, choices=DIAGNOSIS_CHOICES, default=PRIMARY) class AdmissionsByAge(models.Model): PRIMARY = 'P' SECONDARY = 'S' DIAGNOSIS_CHOICES = ( (PRIMARY, 'Primary'), (SECONDARY, 'Secondary') ) year = models.IntegerField() total = models.IntegerField() age_under_16 = models.IntegerField() age_16_to_24 = models.IntegerField() age_25_to_34 = models.IntegerField() age_35_to_44 = models.IntegerField() age_45_to_54 = models.IntegerField() age_55_to_64 = models.IntegerField() age_65_to_74 = models.IntegerField() age_75_and_over = models.IntegerField() age_unknown = models.IntegerField() diagnosis = models.CharField(max_length=1, choices=DIAGNOSIS_CHOICES, default=PRIMARY) class SurgeryByGender(models.Model): code = models.FloatField() year = models.IntegerField() gender = models.CharField(max_length=1, choices=GENDER_CHOICES, default=MALE) admissions = models.IntegerField()
nilq/baby-python
python
# !/usr/bin/env python # -*- coding:utf-8 -*- # @Project : stock_quant # @Date : 2022/1/18 23:29 # @Author : Adolf # @File : info_push.py # @Function: import json import requests import logging def post_msg_to_dingtalk(title="", msg="", token="", at=None, type="text"): if at is None: at = [] url = "https://oapi.dingtalk.com/robot/send?access_token=" + token if type == "markdown": # 使用markdown时at不起作用,大佬们有空调一下 data = {"msgtype": "markdown", "markdown": {"title": "[" + title + "]" + title, "text": "" + msg}, "at": {} } if type == "text": data = {"msgtype": "text", "text": {"content": "[" + title + "]" + msg}, "at": {} } data["at"]["atMobiles"] = at json_data = json.dumps(data) try: response = requests.post(url=url, data=json_data, headers={"Content-Type": "application/json"}).json() assert response["errcode"] == 0 except Exception as e: logging.getLogger().error("发送钉钉提醒失败,请检查;{}".format(e))
nilq/baby-python
python
import unittest import json from mock import patch from provider.utils import unicode_encode from provider.execution_context import S3Session from tests.activity.classes_mock import FakeS3Connection from tests import settings_mock class TestS3Session(unittest.TestCase): @patch("provider.execution_context.S3Session.get_full_key") @patch("boto.s3.key.Key.get_contents_as_string") @patch("provider.execution_context.S3Connection") def test_get_value( self, fake_s3_connection, fake_get_contents_as_string, fake_get_full_key ): session_value = b'{"foo": "bar"}' expected = json.loads(unicode_encode(session_value)) fake_get_full_key.return_value = None fake_s3_connection.return_value = FakeS3Connection() fake_get_contents_as_string.return_value = session_value s3_session_object = S3Session(settings_mock, None, None) self.assertEqual(s3_session_object.get_value(None), expected) if __name__ == "__main__": unittest.main()
nilq/baby-python
python
import pyopencl as cl import numpy as np np.set_printoptions(linewidth=128) BOARD_SIZE = 10 SHIP_SIZES = [5,4,3,3,2] STATE_MISS = 0 STATE_HIT = 1 STATE_UNKNOWN = 2 def bool2IntArray(boolArray): ret = [] for array in boolArray: ret.append(np.packbits(array)) return ret def int2BoolArray(boolArray): ret = [] for array in boolArray: ret.append(np.unpackbits(array)) return ret def posible_positions_for_ship(size,exclude_tiles,matrix=False): positions = [] #print exclude_tiles for x in range(BOARD_SIZE-size+1): positionV = [] positionH = [] for j in range(BOARD_SIZE): excludeH = False excludeV = False posH = [] posV = [] for i in range(x,x+size): if [i,j] in exclude_tiles: excludeV = True posH.append([i,j]) if [j,i] in exclude_tiles: excludeH = True posV.append([j,i]) if (matrix): posV = coordinatesToArray(posV) posH = coordinatesToArray(posH) if(not excludeV): positionV.append(posV) if(not excludeH): positionH.append(posH) positions += positionV positions += positionH return positions def coordinatesToArray(position): array = np.zeros([BOARD_SIZE,BOARD_SIZE],dtype=np.bool) for tile in position: array[tile[0]][tile[1]] = True array = array.flatten() array.resize((128,)) return array def get_ship_boards(exclude_tiles=[],matrix = False): shipBoards = [] for shipSize in SHIP_SIZES: posible = posible_positions_for_ship(shipSize,exclude_tiles,matrix) shipBoards.append(posible) return shipBoards def shortInterpolate(ss1,ss2,size): validBoards = [] for s1 in ss1: for s2 in ss2: b = np.logical_or(s1,s2); if(np.count_nonzero(b)==size): validBoards.append(b) return validBoards def opencl_interpolate(bs1,bs2,hits): print "Starting computation" # This is the plan: # We copy our generator boards to opengl memory # We generate an index of each posible combination, so index x represent one board from each ship generator boards # Then we tell the opengl processor to test a number of ix's to see whether they create a valid board ctx = cl.create_some_context() prg = cl.Program(ctx, """ __kernel void sum(__global const int *v1,__global const int *v2, __global const int *current_state, uint v1_index, __global int *sum_g ,__global int *valids_g) { int work_item = get_global_id(0); int array_position = work_item & 3; // % 4 int v1_local =v1[v1_index * 4 + array_position]; int v2_local =v2[ work_item ]; int current_state_local = current_state[array_position]; int result = v1_local | v2_local; int overlapping = v1_local & v2_local; int non_matching_hits = (( current_state_local | result) ^ result); valids_g[work_item] = ~(overlapping | non_matching_hits); sum_g[ work_item] = result; } __kernel void join_validity(__global long *valids) { int ix = get_global_id(0); long v1 = valids[2*ix]; long v2 = valids[2*ix + 1]; long invalid = (~v1 | ~v2); if (invalid != 0){ valids[2*ix] = 0; valids[2*ix +1] = 0; } } __kernel void matrix_count(__global const char *v1, __global char *valids_g, uint work_size, __global long *out_matrix) { /*int sector = get_global_id(0); int workers = get_global_size(0); int board_sector = get_global_id(1);*/ int ix = get_global_id(0); int workers = get_global_size(0) >> 4; int sector = ix >> 4; int board_sector = ix & 15; char local_sector; char local_valid; uint board; long sum[8] = {0}; uint work_unit = (work_size >> 9) +1 ; // 512 = 2^9 for (uint board_ix = 0;board_ix < work_unit ; board_ix++){ board = work_unit * sector + board_ix; if (board >= work_size){ break; } local_sector = v1[16*board + board_sector]; local_valid = valids_g[16*board + board_sector]; for (char tile = 0; tile < 8; tile++){ sum[tile] += ((local_sector & (1 << 7 - tile)) && local_valid); } } for (char position = 0; position < 8; position++){ out_matrix[sector* 128 + 8*board_sector + position ] += sum[position]; } } """).build() queue = cl.CommandQueue(ctx) mf = cl.mem_flags total = 0 valid = 0 current_state = np.copy(hits) current_state.resize((128,)) current_state = np.packbits(current_state.astype(np.bool)).astype(np.uint8) s1 = np.array(bs1).astype(np.uint8) s2 = np.array(bs2).astype(np.uint8) s1_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=s1) s2_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=s2) current_state_g = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=current_state) workSize= len(bs1) iterations = len(bs2) sum_result_np = np.empty([workSize,16]).astype(np.uint8) sum_result_np_g = cl.Buffer(ctx, mf.WRITE_ONLY, sum_result_np.nbytes) valid_np = np.empty([workSize,16]).astype(np.uint8) valid_np_g = cl.Buffer(ctx, mf.READ_WRITE, valid_np.nbytes) count_matrix = np.zeros([512,128]).astype(np.uint64) count_matrix_g = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=count_matrix) print_limit=1 for step in xrange(iterations): #for step in xrange(1000): tested = float(step)/(iterations)*100 if tested > print_limit: print "Tested: " + str(tested) + "%" print_limit+=1 prg.sum(queue, (workSize * 4,), None, s2_g, s1_g, current_state_g, np.uint32(step), sum_result_np_g, valid_np_g); prg.join_validity(queue, (workSize,), None, valid_np_g); prg.matrix_count(queue, (512 * 16,), None, sum_result_np_g, valid_np_g, np.uint32(workSize),count_matrix_g); cl.enqueue_copy(queue, count_matrix, count_matrix_g) total_matrix = sum(count_matrix) print np.resize(total_matrix,(10,10)) print "" print np.resize(np.ma.masked_array(total_matrix,mask=np.resize(hits,(128,)),fill_value=0).filled(),(10,10)) return np.argmax(np.ma.masked_array(total_matrix,mask=np.resize(hits,(128,)),fill_value=0).filled())
nilq/baby-python
python
from sklearn.metrics import mean_absolute_percentage_error as mape from sklearn.metrics import r2_score def mean_absolute_percentage_error(actual, forecast) -> float: """ calculate mean absolute percentage error (MAPE) :param actual: actual values :param forecast: forecast (prediction) values :return: MAPE """ return mape(actual, forecast) * 100 def r_squared(actual, forecast) -> float: """ calculate r2 (coefficient of determination) :param actual: actual values :param forecast: forecast (prediction) values :return: r2 """ return r2_score(actual, forecast)
nilq/baby-python
python
################################################################################ # # # RUN ALL TESTS AND CHECK FOR ACCURACY # # # ################################################################################ from __future__ import print_function,division import os import sys; sys.dont_write_bytecode = True sys.path.insert(0, '../script/') import util import subprocess as sp import numpy as np import glob import pickle from scipy.interpolate import interp1d as interp import time # SPECIFY EMAIL OPTIONS TO = ['bryan10@illinois.edu'] CC = [] FROM = 'afd.illinois.testing@gmail.com' PASSWORD = 'whatpasswordshouldichoose' SUBJECT = 'BHLIGHT TESTING REPORT' LOGNAME = 'test_all.txt' SEND_REPORT = '-email' in sys.argv TABLE = '-table' in sys.argv FAST = '-fast' in sys.argv VERBOSE = '-verbose' in sys.argv LONG_TEST = '-long' in sys.argv RAD_TEST = '-rad' in sys.argv FLUID_TEST = '-fluid' in sys.argv NU_TEST = '-nu' in sys.argv LIGHT_TEST = '-light' in sys.argv ERROR_THRESHOLD = 0.01 # REPORT TIME FOR TESTING # GET ALL TEST SCRIPTS M_FAST_TESTS = ['tracers1d.py', 'sod.py','table.py','advection1d.py','binning.py'] M_FLUID_TESTS = ['sod.py', 'table.py', 'advection2d.py', 'advection2d.py -mpi'] M_NU_TESTS = ['yedecay.py', 'yedecay.py -antinu', 'yedecay.py -mpi', # 'yedecay.py -mpi -antinu', 'multiscatt.py', 'tracers1d.py'] M_LIGHT_TESTS = ['binning.py', 'brem.py', 'thermalization.py', 'thermalization_mpi.py', 'comptonization.py'] SKIP = ['generate_all_plots.py','test_all.py'] if LONG_TEST: TESTS = glob.glob('*.py') for test in SKIP: if test in TESTS: TESTS.remove(test) elif FAST: TESTS = M_FAST_TESTS elif LIGHT_TEST: TESTS = M_LIGHT_TESTS elif RAD_TEST: TESTS = M_LIGHT_TESTS + M_NU_TESTS elif FLUID_TEST: TESTS = M_FLUID_TESTS elif NU_TEST: TESTS = M_NU_TESTS else: TESTS = M_FLUID_TESTS + M_LIGHT_TESTS + M_NU_TESTS print("") print("********************************************************************************") print("") print(" AUTOMATED TESTING") print("") print("********************************************************************************") util.log_output(sys, LOGNAME) DATE = time.strftime('%Y/%m/%d') TIME = time.strftime('%H:%M:%S') MACHINE = os.uname()[1] popen = sp.Popen(['git', 'show', '-s', '--format=%H'], stdout=sp.PIPE, universal_newlines=True) for line in iter(popen.stdout.readline, ""): HASH = line.lstrip().rstrip() popen = sp.Popen(['git', 'branch'], stdout=sp.PIPE, universal_newlines=True) for line in iter(popen.stdout.readline, ""): if line[0] == '*': BRANCH = line[2:].rstrip() print('\n DATE: ' + DATE) print(' TIME: ' + TIME) print(' MACHINE: ' + MACHINE) print(' BRANCH: ' + BRANCH) print(' COMMIT: ' + HASH + '\n') def name_to_args(namestring): """Takes a script name which may contain CLI args and splits out the args. Assumes string is generically of the form '<script name>.py -arg --arg' """ namestring = namestring.rstrip().lstrip() if namestring[-3:] == '.py': return [namestring] args = namestring.split('.py ') args = ([args[0].lstrip().rstrip() + '.py'] + [s.lstrip().rstrip() for s in args[1].split()]) return args # USE INTERPOLATION ON A (ANALYTIC SOLUTION) TO COMPARE TO B def sanitize_array(a): a = np.array(a) # ensure a is a numpy array if len(a.shape) == 1: return a if np.prod(a.shape[1:]) > 1: raise ValueError( "Array should be 1d. Array shape = {}".format(a.shape) ) return a.reshape(a.shape[0]) def L1_norm(xa, ya, xb, yb): # special case for 0d arrays if len(xa) == len(xb) == len(ya) == len(yb) == 1: if np.abs(yb[0]) <= 1e-12: return np.fabs(ya[0] - yb[0]) return np.fabs((ya[0] - yb[0])/yb[0]) xa,ya,xb,yb = [sanitize_array(a) for a in [xa,ya,xb,yb]] if xa[0] > xb[0]: xb = xb[1:] yb = yb[1:] fa = interp(xa, ya) norm = 0. nodenom = np.max(ya) <= 1e-12 for n in range(len(xb)): num = np.fabs(yb[n] - fa(xb[n])) denom = np.fabs((yb[n] + fa(xb[n]))/2.) if nodenom: norm += num else: norm += num/denom return (norm/n) FAIL = False for TEST in TESTS: args = name_to_args(TEST) TESTNAME = args[0][:-3] if len(args) == 1 else TEST print(' ' + util.color.BOLD + TESTNAME + util.color.NORMAL) args = [sys.executable] + args + ['-auto'] if TABLE: args += ['-table'] if FAST: args += ['-fast'] popen = sp.Popen(args, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True) for line in iter(popen.stdout.readline, ""): if VERBOSE: print(line.rstrip()) if line.lstrip().rstrip() == 'BUILD SUCCESSFUL': print(' BUILD SUCCESSFUL') print(' RUN FINISHED') popen.wait() if not os.path.isfile('data.p'): raise RuntimeError("Test did not succesfully complete.") with open('data.p', 'rb') as f: data = pickle.load(f) xa = data['SOL'][0] ya = data['SOL'][1] xb = data['CODE'][0] yb = data['CODE'][1] if 'THRESHOLD' in data.keys(): error_threshold = data['THRESHOLD'] else: error_threshold = ERROR_THRESHOLD norm = L1_norm(xa, ya, xb, yb) print(' ERROR: %.2g %%' % (100*norm)) if norm < error_threshold: print(util.color.BOLD + ' PASS' + util.color.NORMAL + '\n') else: print(util.color.WARNING + ' FAIL' + util.color.NORMAL + '\n') FAIL = True sp.call(['rm', 'data.p']) if not SEND_REPORT: sp.call(['rm', LOGNAME]) if FAIL: raise RuntimeError("Tests failed!") else: print("All tests passed!") sys.exit() import smtplib if FAIL: SUBJECT += ' - FAIL' else: SUBJECT += ' - PASS' MESSAGE = '' MFILE = open(LOGNAME, 'rb') for line in MFILE: MESSAGE += line MFILE.close() EMAIL = ('From: %s\r\n' % FROM + 'To: %s\r\n' % ','.join(TO) + 'CC: %s\r\n' % ','.join(CC) + 'Subject: %s\r\n' % SUBJECT + '\r\n' + MESSAGE) ADDRS = TO + CC srvr = smtplib.SMTP('smtp.gmail.com', 587) srvr.ehlo() srvr.starttls() srvr.ehlo() srvr.login(FROM, PASSWORD) srvr.sendmail(FROM, ADDRS, EMAIL) srvr.close() sp.call(['rm', LOGNAME])
nilq/baby-python
python
from typing import Any, Dict import numpy import scipy.special from mlxtk.util import memoize @memoize def binom(n: int, k: int) -> int: return int(scipy.special.binom(n, k)) # @jit def build_number_state_table_bosonic(N: int, m: int) -> numpy.ndarray: number_of_states = binom(N + m - 1, m - 1) number_states = numpy.zeros((number_of_states, m), numpy.int64) number_states[0, 0] = N for i in range(number_of_states - 1): j = m - 2 stop = False while j >= 0 and not stop: if number_states[i, j] > 0: summation = 0 for k in range(j): number_states[i + 1, k] = number_states[i, k] summation += number_states[i + 1, k] number_states[i + 1, j] = number_states[i, j] - 1 summation += number_states[i + 1, j] number_states[i + 1, j + 1] = N - summation stop = True j -= 1 return number_states class NumberStateLookupTableBosonic: def __init__(self, N: int, m: int): self.N = N self.m = m self.number_of_states = binom(N + m - 1, m - 1) self.table = {} # type: Dict[int, Any] self._build() def _build(self): state_i = numpy.zeros(self.m, numpy.int64) state_ip1 = numpy.zeros(self.m, numpy.int64) state_i[0] = self.N self.insert_state(state_i, 0) for i in range(self.number_of_states - 1): j = self.m - 2 stop = False while j >= 0 and not stop: if state_i[j] > 0: summation = 0 for k in range(j): state_ip1[k] = state_i[k] summation += state_ip1[k] state_ip1[j] = state_i[j] - 1 summation += state_ip1[j] state_ip1[j + 1] = self.N - summation stop = True j -= 1 self.insert_state(state_ip1, i + 1) state_i[:] = state_ip1[:] state_ip1[:] = 0 def insert_state(self, state: numpy.ndarray, index: int): self._insert_state_impl(state, index, 0, self.table) def _insert_state_impl( self, state: numpy.ndarray, index: int, position: int, current: Dict[int, Any] ): occupation = state[position] if position == state.shape[0] - 1: current[occupation] = index return if occupation not in current: current[occupation] = {} self._insert_state_impl(state, index, position + 1, current[occupation]) def get_index(self, state: numpy.ndarray) -> int: return self._get_index_impl(state, 0, self.table) def _get_index_impl( self, state: numpy.ndarray, position: int, current: Dict[int, Any] ) -> int: occupation = state[position] if position == state.shape[0] - 1: return current[occupation] return self._get_index_impl(state, position + 1, current[occupation]) def get_number_state_index_bosonic(state: numpy.ndarray) -> int: index = 1 m = state.shape[0] remaining = numpy.sum(state) - 1 i = 0 while i < m - 1: remaining -= state[i] i += 1 if remaining > 0: j = 0 while j <= remaining: index += binom(j + m - i - 1, m - i - 1) j += 1 return index
nilq/baby-python
python
from nltk import word_tokenize from nltk.stem import WordNetLemmatizer import os from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel import numpy as np import nltk from nltk.corpus import stopwords nltk.download('punkt') stop_words = set(stopwords.words('english')) def get_similarity_score(actual,query): class LemmaTokenizer: ignore_tokens = [',', '.', ';', ':', '"', '``', "''", '`'] def __init__(self): self.wnl = WordNetLemmatizer() def __call__(self, doc): return [self.wnl.lemmatize(t) for t in word_tokenize(doc) if t not in self.ignore_tokens] tokenizer=LemmaTokenizer() tokens=str(actual) #tokenizer(tokens) documents=tokens search_terms = str(query) # search_terms = 'sewing machine' # Initialise TfidfVectorizer with the LemmaTokenizer. Also need to lemmatize the stop words as well token_stop = tokenizer(' '.join(stop_words)) vectorizer = TfidfVectorizer(stop_words=token_stop, tokenizer=tokenizer) # Calculate the word frequency, and calculate the cosine similarity of the search terms to the documents vectors = vectorizer.fit_transform([search_terms] + [documents]) cosine_similarities = linear_kernel(vectors[0:1], vectors).flatten() document_scores = [item.item() for item in cosine_similarities[1:]] # convert back to native Python dtypes scores = [(score) for score in zip(document_scores)] perc_scores=scores[0][0]*100 res=" Corupus Similarity Ratio:" + str(round(perc_scores,2))+"%" return res
nilq/baby-python
python
import torch.nn as nn from .torch_helpers import NamedTensor class Module(nn.Module): def register_parameter(self, name, tensor): if isinstance(tensor, NamedTensor): param = nn.Parameter(tensor.values) super(Module, self).register_parameter( "_" + name + "_named", param ) tensor._tensor = param setattr(self, name, tensor) else: super(Module, self).register_parameter(name, tensor) ModuleList = nn.ModuleList class _Update: def rename(self, **kwargs): self._updates = kwargs return self def __call__(self, input): updates = {} if "_updates" not in self.__dict__ else self._updates return input.op(super(_Update, self).forward, **updates) class _Flat: def __call__(self, input): return input.op(super(_Flat, self).forward) class _Loss: def reduce(self, dims): self._reduced = dims return self def __call__(self, input, target): assert "_reduced" in dir(self), "Must call 'reduce' first." return input.reduce2(target, super(_Loss, self).forward, self._reduced) class _Augment: def augment(self, name): self._augment = name return self def forward(self, input): augment = ( "embedding" if "_augment" not in self.__dict__ else self._augment ) return input.augment(super(_Augment, self).forward, augment) _wrap = ["Dropout"] class Dropout(_Flat, nn.Dropout): pass _update = [ "Linear", "Conv1d", "Conv2d", "Conv3d", "MaxPool1d", "MaxPool2d", "MaxPool3d", ] class Linear(_Update, nn.Linear): pass class Conv1d(_Update, nn.Conv1d): pass class Conv2d(_Update, nn.Conv2d): pass class Conv3d(_Update, nn.Conv2d): pass class MaxPool1d(_Update, nn.MaxPool1d): pass class MaxPool2d(_Update, nn.MaxPool2d): pass class MaxPool3d(_Update, nn.MaxPool2d): pass _loss = ["CrossEntropyLoss", "NLLLoss"] class CrossEntropyLoss(_Loss, nn.CrossEntropyLoss): pass class NLLLoss(_Loss, nn.NLLLoss): pass _augment = ["Embedding"] class Embedding(_Augment, nn.Embedding): pass
nilq/baby-python
python
import json from django.core.urlresolvers import reverse, NoReverseMatch from django.contrib.admin.templatetags.admin_static import static from django.template import Template, Context from django.utils.encoding import force_text from django.template.loader import get_template from django.forms.models import BaseModelFormSet from django.forms import Form, ModelForm, CharField, HiddenInput from crispy_forms.helper import FormHelper from crispy_forms.bootstrap import FormActions from crispy_forms.layout import LayoutObject, Layout, Submit, HTML from crispy_forms.utils import render_crispy_form, TEMPLATE_PACK from .helpers import init_chosen_widget, init_dateinput class DefaultFormActions(LayoutObject): """ Crispy layout object that renders form actions depending on options defined in ``form.opts`` property. Keyword arguments available in ``opts``: :ivar str success_url: Url to redirect to on successful form save. :ivar str delete_url: Delete view that's requested on form delete. :ivar str delete_success_url: Url to redirect view on successful form deletion. :ivar str form_actions_template: Template to render form actions in. Default: ``'ajaxviews/_form_controls.html'`` :ivar int preview_stage: If form preview is displayed render a back button. :ivar bool modal_form: True if form is displayed in a bootrap modal. Default: ``False`` :ivar bool delete_confirmation: Display a `bootstrap confirmation <http://bootstrap-confirmation.js.org/>`_ popover if delete button is clicked. :ivar dict form_cfg: Additional data needed to process form save passed through a hidden input field. Dictionary is stringified and automatically parsed again when calling :func:`FormMixin.cleaned_form_cfg`. """ # noinspection PyUnusedLocal, PyMethodMayBeStatic def render(self, form, form_style, context, template_pack=TEMPLATE_PACK): success_url = form.opts.get('success_url', '') delete_url = form.opts.get('delete_url', '') if delete_url: delete_url += '&' if '?' in delete_url else '?' delete_url += 'success_url=' + force_text(form.opts.get('delete_success_url', success_url)) template = get_template(form.opts.get('form_actions_template', 'ajaxviews/_form_controls.html')) btn_group = template.render({ 'delete_url': delete_url, 'success_url': force_text(success_url), 'modal_form': form.opts.get('modal_form', False), 'form_preview': form.opts.get('preview_stage', False), 'delete_confirmation': form.opts.get('delete_confirmation', False), 'form_cfg': json.dumps(form.form_cfg) if getattr(form, 'form_cfg', None) else None, }) layout_object = FormActions( Submit('save', form.opts.get('save_button_name', 'Save')), HTML(btn_group), style='margin-bottom: 0;' ) return layout_object.render(form, form_style, context) class DefaultFormHelper(FormHelper): """ Crispy form helper used to define default form action control. A ``data-async`` html property is added to the form tag. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.attrs = {'data-async': ''} def append_form_actions(self): """ Append form actions to the current layout. """ self.layout.append(DefaultFormActions()) def add_form_actions_only(self): """ Disable the form tag and add form actions only to the current layout. """ self.form_tag = False self.add_layout(Layout(DefaultFormActions())) # noinspection PyUnresolvedReferences class FormMixin: """ Mixin that handels instantiation of crispy form helper and options passed in the form's init kwargs. :ivar dict opts: """ form_kwargs = [ 'success_url', 'form_action', 'delete_url', 'delete_success_url', 'modal_form', 'preview_stage', 'model_data', 'preview_data', 'save_button_name', 'init_chosen_widget', 'init_date_widget', 'delete_confirmation', 'form_actions_template', ] def __init__(self, *args, **kwargs): self._helper_instance = None self.form_cfg = kwargs.pop('form_cfg', {}) self.user = kwargs.pop('user', None) self.opts = {} for key in list(kwargs): if key in self.form_kwargs: self.opts[key] = kwargs.pop(key) super().__init__(*args, **kwargs) @property def helper(self): """ The :class:`DefaultFormHelper` is instantiated only once when this helper property is accessed first. Assign your own form helper if you want to override the default behavior. This renders hidden fields and appends form actions by default. :return: Form helper instance """ if self._helper_instance is not None: return self._helper_instance if self.form_cfg: self.fields['form_cfg'] = CharField(widget=HiddenInput(), required=False) self.fields['form_cfg'].initial = json.dumps(self.form_cfg) try: self.init_add_fields() except AttributeError: pass helper = DefaultFormHelper(self) if 'form_action' in self.opts: helper.form_action = self.opts['form_action'] helper.render_hidden_fields = True helper.append_form_actions() self._helper_instance = helper return helper @helper.setter def helper(self, helper): self._helper_instance = helper @property def cleaned_form_cfg(self): """ Loads the stringified ``form_cfg`` in ``cleaned_data`` to return a python dictionary object. :return: form cfg dictionary """ if 'form_cfg' in self.cleaned_data: return json.loads(self.cleaned_data['form_cfg']) return {} @property def layout(self): """ Get or set the crispy form helper layout object. If you set a new layout the form actions are appended automatically. """ return self.helper.layout @layout.setter def layout(self, layout): self.helper.add_layout(layout) self.helper.append_form_actions() class SimpleForm(FormMixin, Form): """ Generic form for use without a corresponding model. Also used to display a preview before saving a form. :ivar object object: Model instance of the preview forms first stage. :ivar dict model_data: Cleaned data of the preview forms second stage. """ def __init__(self, *args, **kwargs): self.object = kwargs.pop('instance', None) self.model_data = kwargs.pop('model_data', None) success_message = kwargs.pop('success_message', None) super().__init__(*args, **kwargs) if success_message is not None: self.form_cfg['success_message'] = success_message if self.opts.get('init_chosen_widget', True): init_chosen_widget(self.fields.items()) if self.opts.get('init_date_widget', True): init_dateinput(self.fields.items()) class GenericModelForm(FormMixin, ModelForm): """ Generic form for use with a corresponding model. """ field_label_addon = """<a class="modal-link form-add-link" href="{0}"><img src="{1}" alt="{2}"/></a>""" def __init__(self, *args, **kwargs): self.json_cache = kwargs.pop('json_cache', {}) super().__init__(*args, **kwargs) for key, value in self.form_cfg.get('related_obj_ids', {}).copy().items(): field_name = key.replace('_id', '') if field_name in self.fields: self.fields[field_name].initial = value del self.form_cfg['related_obj_ids'][key] if self.opts.get('init_chosen_widget', True): init_chosen_widget(self.fields.items()) if self.opts.get('init_date_widget', True): init_dateinput(self.fields.items()) def init_add_fields(self): for field_name, url_name in getattr(self.Meta, 'add_fields', {}).items(): try: url = reverse(url_name) except NoReverseMatch: url = reverse(url_name, args=[self.instance.pk]) # self.fields[field_name].label_suffix = "" # suffix not supported by django-crispy-forms url += '?auto_select_field=' + field_name self.fields[field_name].label += self.field_label_addon.format( url, static('admin/img/icon-addlink.svg'), 'Add' ) def render_form_actions(self): form = Form() form.opts = self.opts form.helper = DefaultFormHelper(self) form.helper.add_form_actions_only() return render_crispy_form(form) def get_related_obj(self, model, key=None): """ Get model instance with pk of related model from the calling view. :param model: Django model class. :param key: Keyword argument to get the value used to retrieve the model instance. If not specified it expects a single key in ``related_obj_ids`` that's used. :return: Model instance. """ related_obj_dict = self.cleaned_form_cfg.get('related_obj_ids', None) if not related_obj_dict: return None if key: related_obj_id = related_obj_dict[key] else: related_obj_id = list(related_obj_dict.values())[0] return model.objects.get(pk=int(related_obj_id)) def save(self, commit=True): instance = super().save(commit=commit) if commit and 'auto_select_field' in self.cleaned_form_cfg: self.json_cache['auto_select_choice'] = { 'pk': instance.pk, 'field': self.form_cfg['auto_select_field'], 'text': str(instance), } return instance class ModelFormSet(BaseModelFormSet): """ Use this form to render form actions at the bottom of the formset. :var str form_actions_template: Template to render save and cancel buttons. Be sure to use the ``{{ success_url }}`` tag for your cancel button if you want to override this template. """ form_actions_template = """ <input name="save" class="btn btn-primary" type="submit" value="Save"> <a role="button" class="btn btn-default cancel-btn" href="{{ success_url }}">Cancel</a> """ def __init__(self, *args, **kwargs): self._success_url = kwargs.pop('success_url', None) super().__init__(*args, **kwargs) def render_form_actions(self, **kwargs): kwargs['success_url'] = self._success_url return Template(self.form_actions_template).render(Context(kwargs)) # helper.form_tag = False # helper.layout = Layout( # TabHolder( # Tab( # 'Basic Information', # 'first_name', # 'last_name' # ), # Tab( # 'Address', # 'address1', # 'address2', # ), # Tab( # 'Contact', # 'email', # 'mobile', # ) # ) # ) # from djmoney.forms.widgets import MoneyWidget # class CustomMoneyWidget(MoneyWidget): # def format_output(self, rendered_widgets): # return ('<div class="row">' # '<div class="col-xs-6 col-sm-10">%s</div>' # '<div class="col-xs-6 col-sm-2">%s</div>' # '</div>') % tuple(rendered_widgets) # class BookingForm(forms.ModelForm): # ... # def __init__(self, *args, **kwargs): # super(BookingForm, self).__init__(*args, **kwargs) # amount, currency = self.fields['amount'].fields # self.fields['amount'].widget = CustomMoneyWidget( # amount_widget=amount.widget, currency_widget=currency.widget) # <a class="modal-link pull-right" href="{0}" style="margin-top: -3px; margin-left: 5px;"> # <img src="{1}" width="15" height="15" alt="{2}"/> # </a>
nilq/baby-python
python
import ray import torch from models import Model import numpy as np import random from atari_wrappers import make_atari, wrap_deepmind @ray.remote class Player: def __init__(self, checkpoint, replay_buffer, share_storage, test_mode): self.game = make_atari(checkpoint["game"]+"NoFrameskip-v4") self.game = wrap_deepmind(self.game, scale=True, frame_stack=True) self.action_list = checkpoint["action_list"] self.epsilon = checkpoint['epsilon'] self.max_len_episode = checkpoint["max_len_episode"] self.checkpoint = checkpoint self.training_step = checkpoint["training_step"] self.epsilon_decay = checkpoint['episilon_decay'] self.update_memory_iter = checkpoint['update_memory_iter'] self.replay_buffer = replay_buffer self.share_storage = share_storage self.model = Model().cuda() self.model.set_weights(checkpoint["weights"]) self.test_mode = test_mode if self.test_mode: self.palyed_game = 0 self.epr_writer = open('./log/'+checkpoint["game"]+'.log', 'w') print('player init done') def continous_self_play(self): print('start play') while not ray.get(self.share_storage.get_info.remote("terminate")): self.model.set_weights( ray.get(self.share_storage.get_info.remote('weights'))) self.paly_game() if ray.get(self.share_storage.get_info.remote("start_training")) == True: self.epsilon = self.epsilon*(1-self.epsilon_decay) if self.test_mode: self.epr_writer.close() print('end play') def paly_game(self): game_history = GameHistory() ep_r = 0 done = False obs = self.game.reset() self.game.step(1) step = 0 live = 5 with torch.no_grad(): while not done: fake_done = False action_index = self.choose_action(np.array(obs)) obs_, reward, done, info = self.game.step( self.action_list[action_index]) if info["ale.lives"] != live: reward = -1 live = info["ale.lives"] self.game.step(1) fake_done = True if not self.test_mode: game_history.save_transition( np.array(obs), action_index, reward, np.array(obs_), fake_done) if done or step % self.update_memory_iter == 0: self.replay_buffer.store_memory.remote(game_history) game_history.clear_memory() obs = obs_ ep_r += reward step += 1 if self.test_mode: print(self.palyed_game, ep_r, self.epsilon) self.epr_writer.write(str(ep_r)+'\n') self.epr_writer.flush() self.palyed_game += 1 def choose_action(self, obs): if self.test_mode: obs_input = torch.FloatTensor( obs).cuda().permute(2, 0, 1).unsqueeze(0) action_index = np.argmax(self.model(obs_input).cpu().numpy()[0]) return action_index if random.random() > self.epsilon: obs_input = torch.FloatTensor( obs).cuda().permute(2, 0, 1).unsqueeze(0) action_index = np.argmax(self.model(obs_input).cpu().numpy()[0]) else: action_index = random.randint(0, len(self.action_list)-1) return action_index class GameHistory: def __init__(self) -> None: self.trans_history = [] def save_transition(self, obs, a, r, obs_, done): self.trans_history.append([obs, a, r, obs_, done]) def clear_memory(self): self.trans_history = []
nilq/baby-python
python
# Generated by Django 3.0.9 on 2020-08-17 14:55 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('filer', '0011_auto_20190418_0137'), ] operations = [ migrations.AddField( model_name='file', name='description_en', field=models.TextField(blank=True, null=True, verbose_name='description'), ), migrations.AddField( model_name='file', name='description_hr', field=models.TextField(blank=True, null=True, verbose_name='description'), ), migrations.AddField( model_name='file', name='description_it', field=models.TextField(blank=True, null=True, verbose_name='description'), ), migrations.AddField( model_name='file', name='name_en', field=models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='name'), ), migrations.AddField( model_name='file', name='name_hr', field=models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='name'), ), migrations.AddField( model_name='file', name='name_it', field=models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='name'), ), migrations.AddField( model_name='image', name='default_alt_text_en', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default alt text'), ), migrations.AddField( model_name='image', name='default_alt_text_hr', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default alt text'), ), migrations.AddField( model_name='image', name='default_alt_text_it', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default alt text'), ), migrations.AddField( model_name='image', name='default_caption_en', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default caption'), ), migrations.AddField( model_name='image', name='default_caption_hr', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default caption'), ), migrations.AddField( model_name='image', name='default_caption_it', field=models.CharField(blank=True, max_length=255, null=True, verbose_name='default caption'), ), ]
nilq/baby-python
python
import unittest import test, app import nltk from nltk.tokenize import word_tokenize import json class TestGetRAD(unittest.TestCase): def test_word_classifier(self): keyword = 'I coughed a lot and vomited. I also have a headache.' words = word_tokenize(keyword) # returns tag name of part of speech tag_block = nltk.pos_tag(words) print(tag_block) # returns stemmed word t = test.word_classify(tag_block[1][1], tag_block[1][0]) self.assertEqual(t, 'cough') def test_return_value(self): # returns 0 if argument is not symptom word r = test.get_rad_value('happy') self.assertEqual(r, 0) # returns RAD value if argument is symptom word existed in dataset r = test.get_rad_value('fever') self.assertTrue(0 < r < 1) class TestSteps(unittest.TestCase): symptom_list = [] @classmethod def setUpClass(cls): pass @classmethod def tearDownClass(cls): pass def tearDown(self): pass def setUp(self): # creates a test client self.app = app.app.test_client() # propagate the exceptions to the test client self.app.testing = True def test_step1(self): symptom_string = 'I have a headache and cough.' req = '/step1?symptom=' req += symptom_string res = self.app.get(req) self.symptom_list = json.loads(res.data) # assert the response data self.assertEqual(res.data, b'{"symptoms":["headache","cough"]}\n') def test_step3(self): self.symptom_list = ["headache", "cough"] str = '' for s in self.symptom_list: str += s + ',' req = '/step3?symptom=' req += str res = self.app.get(req) rad_value = json.loads(res.data) rad_value = rad_value['rad'][0] rad_value = json.loads(rad_value) for s in rad_value.keys(): # assert the RAD value of each symptom exists # print(s, ': ', rad_value[s]) self.assertTrue(rad_value[s] > 0) if __name__ == '__main__': unittest.main()
nilq/baby-python
python