text stringlengths 38 1.54M |
|---|
from datetime import datetime
import requests
BASEURL = "https://api.untappd.com/v4/user/beers/"
@service
def getdata_untappd(
entity_id=None,
user=None,
sort="date",
limit="25",
unit_of_measurement="beers",
icon="mdi:untappd",
):
if entity_id is None:
log.error("No Entity ID Provided")
return
if user is None:
log.error("No User Provided")
return
URL = url = (
BASEURL
+ user
+ "?client_id="
+ get_config("clientid")
+ "&client_secret="
+ get_config("clientsecret")
+ "&sort="
+ sort
+ "&limit="
+ limit
)
r = task.executor(requests.get, URL)
rd = r.json()
beer_data = rd["response"]["beers"]["items"]
DATA = []
attributes = {}
x = 0
for beer in beer_data:
date_firstcheckin = datetime.strptime(
[beer][0]["first_created_at"], "%a, %d %b %Y %H:%M:%S %z"
)
date_recentcheckin = datetime.strptime(
[beer][0]["recent_created_at"], "%a, %d %b %Y %H:%M:%S %z"
)
DATA.append(
{
"beer_name": [beer][0]["beer"]["beer_name"],
"beer_style": [beer][0]["beer"]["beer_style"],
"beer_abv": [beer][0]["beer"]["beer_abv"],
"beer_ibu": [beer][0]["beer"]["beer_ibu"],
"beer_rating": [beer][0]["beer"]["rating_score"],
"brewery": [beer][0]["brewery"]["brewery_name"],
"brewery_country": [beer][0]["brewery"]["country_name"],
"rating": [beer][0]["rating_score"],
"count": [beer][0]["count"],
"first_checkin": datetime.strftime(date_firstcheckin, "%a %-d %b %Y"),
"recent_checkin": datetime.strftime(date_recentcheckin, "%a %-d %b %Y"),
}
)
x = x + 1
if unit_of_measurement:
attributes["unit_of_measurement"] = unit_of_measurement
state_match = {
"date": "Recent",
"checkin": "Most Checked In",
"highest_rated_you": "Highest Rated",
"lowest_rated_you": "Lowest Rated",
}
stype = state_match.get(sort)
stype2 = "Untappd: {} Beers".format(stype)
attributes["friendly_name"] = stype2 + " - {}".format(user)
if icon:
attributes["icon"] = icon
attributes["data"] = DATA
attributes["sort"] = stype
attributes["user"] = user
state.set(entity_id, value=len(beer_data), new_attributes=attributes)
def get_config(name):
value = pyscript.app_config.get(name)
if value is None:
log.error(
'"'
+ name
+ '" is required parameter but not defined in Pyscript configuration for application'
)
return value
# Pyscript startup and app reload
# https://hacs-pyscript.readthedocs.io/en/latest/reference.html?highlight=load#time-trigger
@time_trigger("startup")
def load():
log.info(f"app has started")
# Check required configuration
get_config("clientid")
get_config("clientsecret")
|
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0,5,11)
y = x*2
plt.plot(x,y)
plt.show()
plt.subplot(1,2,1)
plt.plot(x,y,'r')
plt.show()
plt.subplot(1,2,2)
plt.plot(x,y,'b')
plt.show() |
# System includes
import os
import sys
import types
import cPickle
import pickle
from .. import column_base_backend
class PickleBackend(column_base_backend.ColumnBaseBackend):
def save_to(self, obj, filename):
if isinstance(filename, types.StringTypes):
cPickle.dump(obj, open(filename, "wb"))
else:
cPickle.dump(obj, filename)
def load_from(self, obj, filename):
if isinstance(filename, types.StringTypes):
cPickle.load(open(filename, "rb"))
else:
cb = cPickle.load(filename)
obj.clear()
for item in cb.items():
obj.insert(item)
cb.clear()
|
import pickle
import constants
from data_utils import *
from evaluate.bc5 import evaluate_bc5
from models.model_cnn import CnnModel
from dataset import Dataset
def main():
print('Build data')
# load vocabs
vocab_words = load_vocab(constants.ALL_WORDS)
vocab_poses = load_vocab(constants.ALL_POSES)
vocab_depends = load_vocab(constants.ALL_DEPENDS)
train = Dataset('data/raw_data/norm_corenlp_sdp_data.train.txt', vocab_words=vocab_words, vocab_poses=vocab_poses, vocab_depends=vocab_depends)
pickle.dump(train, open(constants.PICKLE_DATA + 'train.pickle', 'wb'), pickle.HIGHEST_PROTOCOL)
test = Dataset('data/raw_data/norm_corenlp_sdp_data.test.txt', vocab_words=vocab_words, vocab_poses=vocab_poses, vocab_depends=vocab_depends)
pickle.dump(test, open(constants.PICKLE_DATA + 'test.pickle', 'wb'), pickle.HIGHEST_PROTOCOL)
# exit(0)
# print('Re-Load data')
# train = pickle.load(open(constants.PICKLE_DATA + 'train.pickle', 'rb'))
# test = pickle.load(open(constants.PICKLE_DATA + 'test.pickle', 'rb'))
# train = open(constants.RAW_DATA + 'norm_corenlp_sdp_data.train.txt', 'rb')
# test = open(constants.RAW_DATA + 'norm_corenlp_sdp_data.test.txt', 'rb')
validation = test
# get pre trained embeddings
embeddings = get_trimmed_w2v_vectors(constants.TRIMMED_W2V)
model = CnnModel(
model_name=constants.MODEL_NAMES.format('cnn', constants.JOB_IDENTITY),
embeddings=embeddings,
batch_size=128
)
model.build()
model.load_data(train=train, validation=validation)
model.run_train(epochs=constants.EPOCHS, early_stopping=constants.EARLY_STOPPING, patience=constants.PATIENCE)
answer = {}
identities = test.identities
y_pred = model.predict(test)
for i in range(len(y_pred)):
if y_pred[i] == 0:
if identities[i][0] not in answer:
answer[identities[i][0]] = []
if identities[i][1] not in answer[identities[i][0]]:
answer[identities[i][0]].append(identities[i][1])
# for identity in identities:
# for i in range(len(y_pred)):
# print(answer[identities[i][0]])
print(
'result: abstract: ', evaluate_bc5(answer)
)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ******************************************************************************
# *
# * Copyright (C) 2015 Kiran Karra <kiran.karra@gmail.com>
# *
# * This program is free software: you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation, either version 3 of the License, or
# * (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program. If not, see <http://www.gnu.org/licenses/>.
# ******************************************************************************
import math
import numpy as np
from scipy.stats import mvn # contains inverse CDF of Multivariate Gaussian
from scipy.stats import norm # contains PDF of Gaussian
from scipy.stats import t
from statsmodels.sandbox.distributions import multivariate as mvt
"""
copulacdf.py contains routines which provide Copula CDF values
"""
def copulacdf(family, u, *args):
""" Generates values of a requested copula family
Inputs:
u -- u is an N-by-P matrix of values in [0,1], representing N
points in the P-dimensional unit hypercube.
rho -- a P-by-P correlation matrix, the first argument required for the Gaussian copula
alpha -- a scalar argument describing the dependency for Frank, Gumbel, and Clayton copula's
Outputs:
y -- the value of the Gaussian Copula
"""
n = u.shape[0]
p = u.shape[1]
num_var_args = len(args)
family_lc = family.lower()
if (family_lc == 'gaussian'):
if (num_var_args != 1):
raise ValueError("Gaussian family requires one additional argument -- rho (correlation matrix) [P x P]")
rho = args[0]
rho_expected_shape = (p, p)
if (type(rho) != np.ndarray or rho.shape != rho_expected_shape):
raise ValueError("Gaussian family requires rho to be of type numpy.ndarray with shape=[P x P]")
y = _gaussian(u, rho)
elif (family_lc == 't'):
if (num_var_args != 2):
raise ValueError(
"T family requires two additional arguments -- rho (correlation matrix) [P x P] and nu [scalar]")
rho = args[0]
nu = args[1]
rho_expected_shape = (p, p)
if (type(rho) != np.ndarray or rho.shape != rho_expected_shape):
raise ValueError("T family requires rho to be of type numpy.ndarray with shape=[P x P]")
y = _t(u, rho, nu)
elif (family_lc == 'clayton'):
if (num_var_args != 1):
raise ValueError("Clayton family requires one additional argument -- alpha [scalar]")
alpha = args[0]
y = _clayton(u, alpha)
elif (family_lc == 'frank'):
if (num_var_args != 1):
raise ValueError("Frank family requires one additional argument -- alpha [scalar]")
alpha = args[0]
y = _frank(u, alpha)
elif (family_lc == 'gumbel'):
if (num_var_args != 1):
raise ValueError("Gumbel family requires one additional argument -- alpha [scalar]")
alpha = args[0]
y = _gumbel(u, alpha)
else:
raise ValueError("Unrecognized family of copula")
return y
def _gaussian(u, rho):
""" Generates values of the Gaussian copula
Inputs:
u -- u is an N-by-P matrix of values in [0,1], representing N
points in the P-dimensional unit hypercube.
rho -- a P-by-P correlation matrix.
Outputs:
y -- the value of the Gaussian Copula
"""
n = u.shape[0]
p = u.shape[1]
lo = np.full((1, p), -10)
hi = norm.ppf(u)
mu = np.zeros(p)
# need to use ppf(q, loc=0, scale=1) as replacement for norminv
# need to use mvn.mvnun as replacement for mvncdf
# the upper bound needs to be the output of the ppf call, right now it is set to random above
y = np.zeros(n)
# I don't know if mvnun is vectorized, I couldn't get that to work
for ii in np.arange(n):
# do some error checking. if we have any -inf or inf values,
#
p, i = mvn.mvnun(lo, hi[ii, :], mu, rho)
y[ii] = p
return y
def _t(u, rho, nu):
""" Generates values of the T copula
Inputs:
u -- u is an N-by-P matrix of values in [0,1], representing N
points in the P-dimensional unit hypercube.
rho -- a P-by-P correlation matrix.
nu -- degrees of freedom for T Copula
Outputs:
y -- the value of the T Copula
"""
n = u.shape[0]
p = u.shape[1]
loIntegrationVal = -40
lo = np.full((1, p), loIntegrationVal) # more accuracy, but slower :/
hi = t.ppf(u, nu)
mu = np.zeros(p)
y = np.zeros(n)
for ii in np.arange(n):
x = hi[ii, :]
x[x < -40] = -40
p = mvt.mvstdtprob(lo[0], x, rho, nu)
y[ii] = p
return y
def _clayton(u, alpha):
# C(u1,u2) = (u1^(-alpha) + u2^(-alpha) - 1)^(-1/alpha)
if (alpha < 0):
raise ValueError("Clayton family -- invalid alpha argument. alpha must be >=0")
elif (alpha == 0):
y = np.prod(u, 1)
else:
tmp1 = np.power(u, -alpha)
tmp2 = np.sum(tmp1, 1) - 1
y = np.power(tmp2, -1.0 / alpha)
return y
def _frank(u, alpha):
# C(u1,u2) = -(1/alpha)*log(1 + (exp(-alpha*u1)-1)*(exp(-alpha*u2)-1)/(exp(-alpha)-1))
if (alpha == 0):
y = np.prod(u, 1)
else:
tmp1 = np.exp(-alpha * np.sum(u, 1)) - np.sum(np.exp(-alpha * u), 1)
y = -np.log((np.exp(-alpha) + tmp1) / np.expm1(-alpha)) / alpha;
return y
def _gumbel(u, alpha):
# C(u1,u2) = exp(-( (-log(u1))^alpha + (-log(u2))^alpha )^(1/alpha))
n = u.shape[0]
p = u.shape[1]
if (alpha < 1):
raise ValueError("Gumbel family -- invalid alpha argument. alpha must be >=1")
elif (alpha == 1):
y = np.prod(u, 1)
else:
# TODO: NaN checking like Matlab here would be nice :)
exparg = np.zeros(n)
for ii in np.arange(p):
tmp1 = np.power(-1 * np.log(u[:, ii]), alpha)
exparg = exparg + tmp1
exparg = np.power(exparg, 1.0 / alpha)
y = np.exp(-1 * exparg)
return y
|
import rospy
# baxter_interface - Baxter Python API
import baxter_interface
rospy.init_node('Hello_Baxter')
limb = baxter_interface.Limb('right')
# get the right limb's current joint angles
angles = limb.joint_angles()
# print the current joint angles
print angles
|
# ///*** get some efficiency plots for the MIDESUM set of plots ***///
import ROOT
import os
effRebin=1
ROOT.gROOT.SetBatch(True)
ene=1.05
det='HPS-EngRun2015-Nominal-v5-0'
#runs=['hps_007809','hps_007807','hps_007808','tritrig_'+det,'wab_'+det]
runs=['hps_005772','wab_'+det,'tritrig-NOSUMCUT_'+det,'wab-beam-tri-zipFix_'+det]
legs=['run5772','wab','tritrig-NOSUMCUT','wab-beam-tri']
col=[1,2,4,3,5]
#postfix='_engrun2015_pass6_useGBL_ECalMatch.root'
postfix='_engrun2015_pass6_TopBot_LeftRight.root'
doesum=False
#histName="h_Ecl_"
xTitles=["Cluster Energy (GeV)","Cluster X Position (mm)","Cluster Y Position (mm)"]
histNames=["h_XvsY_"]
cuts="cop180_"
#cuts="cop180_Holly_"
#cuts="cop180_midESum_"
#cuts="cop180_SuperFid"
posSide="pos_side_found_ele"
eleSide="ele_side_found_pos"
posSideFound="pos_side_found_ele_found_pos"
eleSideFound="ele_side_found_pos_found_ele"
outFile=ROOT.TFile(cuts+"TwoD-EfficiencyResults.root","RECREATE")
for kk in range(0,len(histNames)) :
histName=histNames[kk]
xTitle=xTitles[kk]
effESum=[]
effEle=[]
effPos=[]
for run in runs :
outpre="plots-"+str(ene)+"GeV/"+str(run)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
#histFile=ROOT.TFile("OutputHistograms/Data/hps_008087_hpsrun2016_pass0_useGBL_ECalMatch.root")
if ene==1.05 :
print 'Loading 1.05GeV File'
if os.path.isfile("OutputHistograms/Data/"+run+postfix):
histFile=ROOT.TFile("OutputHistograms/Data/"+run+postfix)
elif os.path.isfile("OutputHistograms/MC/"+run+postfix):
histFile=ROOT.TFile("OutputHistograms/MC/"+run+postfix)
else:
print "NO FILE FOUND!!!!! "+"OutputHistograms/MC/"+run+postfix
else :
print 'Loading 2.3GeV File'
if os.path.isfile("OutputHistograms/Data/"+run+postfix):
histFile=ROOT.TFile("OutputHistograms/Data/"+run+postfix)
elif os.path.isfile("OutputHistograms/MC/"+run+postfix):
histFile=ROOT.TFile("OutputHistograms/MC/"+run+postfix)
else:
print "NO FILE FOUND!!!!! "+"OutputHistograms/MC/"+run+postfix
#get some histograms
print histName+cuts+posSide
pos_side_found_ele=histFile.Get(histName+cuts+posSide)
print histName+cuts+posSideFound
pos_side_found_ele_found_pos=histFile.Get(histName+cuts+posSideFound)
print histName+cuts+eleSide
ele_side_found_pos=histFile.Get(histName+cuts+eleSide)
print histName+cuts+eleSideFound
ele_side_found_pos_found_ele=histFile.Get(histName+cuts+eleSideFound)
ele_side_found_pos.Print("v")
#############
denom = pos_side_found_ele.Clone()
denom.Sumw2()
numer= pos_side_found_ele_found_pos.Clone();
numer.Sumw2();
# eff=ROOT.TH2D()
numer.Divide(denom)
numer.SetTitle(histName+run+"_posEff")
numer.SetName(histName+run+"_posEff")
outFile.cd()
numer.Write()
ct = ROOT.TCanvas("ct","transparent pad",200,10,700,500);
numer.Draw("colz");
ct.SaveAs(outpre+histName+cuts+"-positron-efficiency.pdf");
###############################
denom =ele_side_found_pos.Clone();
denom.Sumw2();
numer=ele_side_found_pos_found_ele.Clone();
numer.Sumw2();
# eff=ROOT.TH2D()
numer.Divide(denom)
numer.SetTitle(histName+run+"_eleEff")
numer.SetName(histName+run+"_eleEff")
outFile.cd()
numer.Write()
ct = ROOT.TCanvas("ct","transparent pad",200,10,700,500);
numer.Draw("colz");
ct.SaveAs(outpre+histName+cuts+"-electron-efficiency.pdf");
###############################
outFile.Close()
|
list = []
x = 6
y = 7
for x in range(x):
list.append(5)
print (list)
if len(list) != y:
print ("Got em")
else:
print("No") |
import os, sys, transaction, datetime
from ..security import (hashpassword)
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Movie,
Base,
User,
Group,
Profile,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
with transaction.manager:
# Create Basic User Groups
admin_group = Group(groupname='admin')
user_group = Group(groupname='user')
# Create Admin and Regular Users
admin_user = User(
username='admin',
email='julien.altenburg@gmail.com',
password= hashpassword('SKY_CANDY'),
confirmed=False,
confirmation_string="foobar",
terms_accepted=True,
solicitation= True,
entry_date = datetime.datetime.today()
)
regular_user = User(
username='DanielAlt',
email='daniel.j.altenburg@gmail.com',
password=hashpassword('SKY_CANDY'),
confirmed= False,
confirmation_string="foobar",
terms_accepted=True,
solicitation= True,
entry_date = datetime.datetime.today()
)
# Add Users To Groups
admin_user.addGroup(user_group)
admin_user.addGroup(admin_group)
regular_user.addGroup(user_group)
# Add Users to Database
DBSession.add(admin_user)
DBSession.add(regular_user) |
from petisco.base.domain.errors.domain_error import DomainError
class NotAllowed(DomainError):
@classmethod
def get_specify_detail(cls) -> str:
return "Not Allowed"
|
import csv
import numpy as np
import matplotlib.pyplot as plt
from prettytable import PrettyTable
from datetime import datetime as date
# load data from csv files
def load():
slot_no = 300
supervisor_no = 47
presentation_no = 118
preference_no = 3
presentation_supervisor = np.zeros([presentation_no, supervisor_no], dtype=np.int8)
supervisor_slot = np.zeros([supervisor_no, slot_no], dtype=np.int8)
supervisor_preference = np.zeros([supervisor_no, 2 * preference_no], dtype=np.int8)
# read supExaAssign.csv
with open('input_files\SupExaAssign.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
next(csv_reader)
for row in csv_reader:
i = int(row[0][1:]) - 1 # only underscores in P___ will be considered
for col in range(1, 4):
j = int(row[col][2:]) - 1 # only underscores in S0__ will be considered
presentation_supervisor[i][j] = 1
presentation_presentation = np.dot(presentation_supervisor, presentation_supervisor.transpose())
# presentations supervised by same examiners are marked with 1
presentation_presentation[presentation_presentation >= 1] = 1
np.fill_diagonal(presentation_presentation, 0) # mark diagonal with 0 so penalty points can be calculated correctly
# read HC04.csv (staff unavailability)
with open('input_files\HC04.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
j = [int(_) - 1 for _ in row[1:]]
supervisor_slot[i][j] = 1
slot_presentation = np.dot(supervisor_slot.transpose(), presentation_supervisor.transpose())
slot_presentation[slot_presentation >= 1] = -1 # unavailable slots for presentation are marked with -1
# read HC03.csv (venue unavailability)
with open('input_files\HC03.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = [int(_) - 1 for _ in row[1:]]
slot_presentation[i, :] = -1 # unavailable slots for presentation are marked with -1
# read SC01.csv (consecutive presentations)
with open('input_files\SC01.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][0] = int(row[1])
# read SC02.csv (number of days)
with open('input_files\SC02.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][1] = int(row[1])
# read SC03.csv (change of venue)
with open('input_files\SC03.csv') as file:
csv_reader = csv.reader(file, delimiter=',')
for row in csv_reader:
i = int(row[0][2:]) - 1 # only underscores in S0__ will be considered
supervisor_preference[i][2] = 1 if row[1] == "yes" else 0
return slot_presentation, presentation_presentation, presentation_supervisor, supervisor_preference
# write result to csv file with timestamp
def write(slot_presentation, supervisor_preference, constraints_count, plot_data):
timestamp = date.now().strftime("[%Y-%m-%d %H-%M-%S]")
# plot graph
title = (f"Improvement of Presentation Scheduling over Iterations\n"
f"[Hard Constraints Violated:] {constraints_count[1]} "
f"[Soft Constraints Violated:] {constraints_count[2]}\n"
f"[Final Penalty Points:] {constraints_count[0]}")
plt.title(title)
plt.xlabel("Number of Iterations")
plt.ylabel("Penalty Points")
plt.axis([0, len(plot_data), 0, max(plot_data)])
plt.plot(plot_data, "r--")
plt.grid(True)
plt.ioff()
plt.show()
graph_name = f"graph {timestamp}"
plt.savefig(graph_name)
# draw schedule
venue_no = 4
time_slot_no = 15
day_slot_no = venue_no * time_slot_no
day_no = 5
slot_no = day_slot_no * day_no
venues = ["Viva Room", "Meeting Room", "Interaction Room", "BJIM"]
days = ["Mon", "Tues", "Wed", "Thu", "Fri"]
schedule = PrettyTable()
schedule.field_names = ["Day", "Venue",
"0900-0930", "0930-1000", "1000-1030",
"1030-1100", "1100-1130", "1130-1200",
"1200-1230", "1230-1300", "1400-1430",
"1430-1500", "1500-1530", "1530-1600",
"1600-1630", "1630-1700", "1700-1730"]
venue = 0
day = 0
for first_slot in range(0, slot_no, time_slot_no):
row = []
if venue == 0:
row.append(days[day])
else:
row.append("")
row.append(venues[venue])
for slot in range(first_slot, first_slot + time_slot_no):
presentation = np.where(slot_presentation[slot] == 1)[0]
if len(presentation) == 0:
row.append("")
else:
presentation = presentation[0] + 1
row.append("P" + str(presentation))
schedule.add_row(row)
venue += 1
if venue == venue_no:
venue = 0
day += 1
schedule.add_row([""] * (2 + time_slot_no))
print("\n", schedule, "\n")
# print supervisor-related data
supervisor_no = supervisor_preference.shape[0]
for supervisor in range(supervisor_no):
venue_preference = "No" if supervisor_preference[supervisor][2] else "Yes"
print(f"[Supervisor S{str(supervisor + 1).zfill(3)}] "
f"[No. of Continuous Presentations: {supervisor_preference[supervisor][3]}] "
f"[Day Preference: {supervisor_preference[supervisor][1]}] "
f"[Days: {supervisor_preference[supervisor][4]}] "
f"[Venue Change Preference: {venue_preference}] "
f"[Venue Changes: {supervisor_preference[supervisor][5]}]")
# write result data to csv file with timestamp
filename = f"result {timestamp}.csv"
with open(filename, 'w', newline='') as file:
writer = csv.writer(file)
for slot in range(slot_presentation.shape[0]):
presentation = np.where(slot_presentation[slot] == 1)[0]
if len(presentation) == 0: # empty if no presentation is found for the slot
writer.writerow(["null", ""])
else:
presentation = presentation[0] + 1 # Access x in array([x])
writer.writerow(["P" + str(presentation), ""])
|
import datetime
from sqlalchemy import String, Integer, Column, Text, Date
from sqlalchemy.orm import relationship
from models.BASE import BASE
from utils.db_connection import DbSession
from sqlalchemy.sql import func
class AccUser (BASE):
__tablename__ = "ACC_USER"
id = Column('ID', Integer, primary_key=True, autoincrement='ignore_fk' )
username = Column('USERNAME', String(32))
password = Column('PASSWORD', String(32))
realname = Column('REALNAME', String(32))
nickname = Column('NICKNAME', String(32))
avatar = Column('AVATAR', String(20))
signature = Column('SIGNATURE', Text)
wx_id = Column('WX_ID', String(30))
sex = Column('SEX', Integer, default=0)
birthday = Column('BIRTHDAY', Date, default=datetime.date(2016, 10, 10))
mobile = Column('MOBILE', String(15))
status = Column('STATUS', Integer, default=1)
def __init__(self, username, password, **kwargs):
self.username = username
self.password = password
self.realname = kwargs.get('realname')
self.nickname = kwargs.get('nickname')
self.avatar = kwargs.get('avatar')
self.signature = kwargs.get('signature')
self.sex = kwargs.get('sex')
self.wx_id = kwargs.get('wx_id')
self.birthday = kwargs.get('birthday')
self.mobile = kwargs.get('mobile')
self.status = kwargs.get('status')
def _as_dict(self):
res = {}
for attr in ['id', 'username', 'password', 'realname', 'nickname', 'avatar', 'signature', 'wx_id', 'sex',
'mobile', 'status']:
res[attr] = getattr(self, attr)
res['birthday'] = str(self.birthday)
return res
if __name__ == '__main__':
with DbSession() as db_session:
user = db_session.query(AccUser).one()
print(user._as_dict()) |
#!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2020-2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import os
import sys
from _core.constants import HostDrivenTestType
from _core.constants import TestExecType
from _core.constants import ModeType
from _core.constants import DeviceLabelType
from _core.driver.drivers_lite import init_remote_server
from _core.exception import DeviceError
from _core.exception import LiteDeviceError
from _core.exception import ParamError
from _core.exception import ReportException
from _core.exception import ExecuteTerminate
from _core.interface import IDriver
from _core.logger import platform_logger
from _core.plugin import Plugin
from _core.testkit.json_parser import JsonParser
from _core.utils import get_config_value
from _core.utils import do_module_kit_setup
from _core.utils import do_module_kit_teardown
from _core.utils import get_filename_extension
from _core.utils import get_file_absolute_path
from _core.utils import get_kit_instances
from _core.utils import check_result_report
from _core.utils import check_mode
from _core.report.suite_reporter import SuiteReporter
LOG = platform_logger("DeviceTest")
PY_SUFFIX = ".py"
PYD_SUFFIX = ".pyd"
@Plugin(type=Plugin.DRIVER, id=HostDrivenTestType.device_test)
class DeviceTestDriver(IDriver):
"""
DeviceTest is a Test that runs a host-driven test on given devices.
"""
# test driver config
config = None
result = ""
error_message = ""
py_file = ""
def __init__(self):
self.linux_host = ""
self.linux_directory = ""
def __check_environment__(self, device_options):
pass
def __check_config__(self, config=None):
pass
def __init_nfs_server__(self, request=None):
return init_remote_server(self, request)
def __execute__(self, request):
try:
# set self.config
self.config = request.config
self.config.tmp_id = str(request.uuid)
self.config.tmp_folder = os.path.join(self.config.report_path,
"temp")
self.config.devices = request.get_devices()
if request.get("exectype") == TestExecType.device_test and \
not self.config.devices:
LOG.error("no device", error_no="00104")
raise ParamError("Load Error[00104]", error_no="00104")
# get source, json config and kits
if request.get_config_file():
source = request.get_config_file()
LOG.debug("Test config file path: %s" % source)
else:
source = request.get_source_string()
LOG.debug("Test String: %s" % source)
if not source:
LOG.error("no config file found for '%s'" %
request.get_source_file(), error_no="00102")
raise ParamError("Load Error(00102)", error_no="00102")
json_config = JsonParser(source)
kits = get_kit_instances(json_config, request.config.resource_path,
request.config.testcases_path)
# create tmp folder
test_name = request.get_module_name()
tmp_sub_folder = self._create_tmp_folder(request)
self.result = "%s.xml" % os.path.join(tmp_sub_folder, test_name)
# set configs keys
configs = self._set_configs(json_config, kits, request,
tmp_sub_folder)
# get test list
test_list = self._get_test_list(json_config, request, source)
if not test_list:
raise ParamError("no test list to run")
self._run_devicetest(configs, test_list)
except (ReportException, ModuleNotFoundError, ExecuteTerminate,
SyntaxError, ValueError, AttributeError, TypeError,
KeyboardInterrupt, ParamError, DeviceError, LiteDeviceError) \
as exception:
error_no = getattr(exception, "error_no", "00000")
LOG.exception(exception, exc_info=False, error_no=error_no)
self.error_message = exception
finally:
self._handle_finally(request)
def _get_test_list(self, json_config, request, source):
test_list = get_config_value('py_file', json_config.get_driver(),
is_list=True)
if str(request.root.source.source_file).endswith(PYD_SUFFIX) or \
str(request.root.source.source_file).endswith(PY_SUFFIX):
test_list = [request.root.source.source_file]
if not test_list and os.path.exists(source):
test_list = _get_dict_test_list(os.path.dirname(source))
# check test list
testcase = request.get("testcase")
testcase_list = []
if testcase:
testcase_list = str(testcase).split(";")
checked_test_list = []
for index, test in enumerate(test_list):
if not os.path.exists(test):
try:
absolute_file = get_file_absolute_path(test, [
self.config.resource_path, self.config.testcases_path])
except ParamError as error:
LOG.error(error, error_no=error.error_no)
continue
else:
absolute_file = test
file_name = get_filename_extension(absolute_file)[0]
if not testcase_list or file_name in testcase_list:
checked_test_list.append(absolute_file)
else:
LOG.info("test '%s' is ignored", absolute_file)
if checked_test_list:
LOG.info("test list: {}".format(checked_test_list))
else:
LOG.error("no test list found", error_no="00109")
raise ParamError("Load Error(00109)", error_no="00109")
return checked_test_list
def _set_configs(self, json_config, kits, request, tmp_sub_folder):
configs = dict()
configs["testargs"] = self.config.testargs or {}
configs["testcases_path"] = self.config.testcases_path or ""
configs["request"] = request
configs["test_name"] = request.get_module_name()
configs["report_path"] = tmp_sub_folder
configs["execute"] = get_config_value(
'execute', json_config.get_driver(), False)
for device in self.config.devices:
do_module_kit_setup(request, kits)
if device.label == DeviceLabelType.ipcamera:
# add extra keys to configs for ipcamera device
self.__init_nfs_server__(request=request)
configs["linux_host"] = self.linux_host
configs["linux_directory"] = self.linux_directory
configs["kits"] = kits
return configs
def _handle_finally(self, request):
from xdevice import Scheduler
# do kit teardown
do_module_kit_teardown(request)
# close device connect
for device in self.config.devices:
if device.label == DeviceLabelType.ipcamera or device.label == \
DeviceLabelType.watch_gt:
device.close()
if device.label == DeviceLabelType.phone:
device.close()
# check result report
report_name = request.root.source.test_name if \
not request.root.source.test_name.startswith("{") \
else "report"
module_name = request.get_module_name()
if Scheduler.mode != ModeType.decc:
self.result = check_result_report(
request.config.report_path, self.result, self.error_message,
report_name, module_name)
else:
tmp_list = copy.copy(SuiteReporter.get_report_result())
if self.result not in [report_path for report_path, _ in tmp_list]:
if not self.error_message:
self.error_message = "Case not execute[01205]"
self.result = check_result_report(
request.config.report_path, self.result,
self.error_message, report_name, module_name)
def _create_tmp_folder(self, request):
if request.root.source.source_file.strip():
folder_name = "task_%s_%s" % (self.config.tmp_id,
request.root.source.test_name)
else:
folder_name = "task_%s_report" % self.config.tmp_id
tmp_sub_folder = os.path.join(self.config.tmp_folder, folder_name)
os.makedirs(tmp_sub_folder, exist_ok=True)
return tmp_sub_folder
def _run_devicetest(self, configs, test_list):
from xdevice import Variables
# insert paths for loading _devicetest module and testcases
devicetest_module = os.path.join(Variables.modules_dir, "_devicetest")
if os.path.exists(devicetest_module):
sys.path.insert(1, devicetest_module)
if configs["testcases_path"]:
sys.path.insert(1, configs["testcases_path"])
# run devicetest
from _devicetest.devicetest.main import DeviceTest
device_test = DeviceTest(test_list=test_list, configs=configs,
devices=self.config.devices, log=LOG)
device_test.run()
def __result__(self):
if check_mode(ModeType.decc):
return self.result
return self.result if os.path.exists(self.result) else ""
def _get_dict_test_list(module_path):
test_list = []
for root, _, files in os.walk(module_path):
for _file in files:
if _file.endswith(".py") or _file.endswith(".pyd"):
test_list.append(os.path.join(root, _file))
return test_list
|
main_dict = {"A" : "71.03711",
"C" : "103.00919",
"D" : "115.02694",
"E" : "129.04259",
"F" : "147.06841",
"G" : "57.02146",
"H" : "137.05891",
"I" : "113.08406",
"K" : "128.09496",
"L" : "113.08406",
"M" : "131.04049",
"N" : "114.04293",
"P" : "97.05276",
"Q" : "128.05858",
"R" : "156.10111",
"S" : "87.03203",
"T" : "101.04768",
"V" : "99.06841",
"W" : "186.07931",
"Y" : "163.06333"}
my_file = open("rosalind_prtm (1).txt")
content = my_file.read()
stripped_content = content.rstrip("\n")
#Reads the Protein into a string
count = 0
weight = 0
for item in stripped_content:
count += 1
for key, value in main_dict.items():
if item == key:
weight += float(value)
# This section compares the string letter to all dictionary keys. If a
#Letter matches a key then the value is added to the weight
print count
print "The final weight is: %r" % weight
|
# ../__init__.py
"""This is the main __init__ that gets imported on plugin_load.
===============================================================================
Source Python
Copyright (C) 2012 Source Python Development Team. All rights reserved.
===============================================================================
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License, version 3.0, as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
As a special exception, the Source.Python Team gives you permission
to link the code of this program (as well as its derivative works) to
"Half-Life 2," the "Source Engine," and any Game MODs that run on software
by the Valve Corporation. You must obey the GNU General Public License in
all respects for all other code used. Additionally, the Source.Python
Development Team grants this exception to all derivative works.
"""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Site-Package Imports
# Configobj
from configobj import ConfigObjError
# Source.Python Imports
# Cvars
from cvars import ConVar
# Hooks - These are imported to implement the exceptions and warnings hooks
from hooks.exceptions import except_hooks
from hooks.warnings import warning_hooks
# =============================================================================
# >> LOGGING SETUP
# =============================================================================
# Use try/except in case the logging values are not integers
try:
# Import the core settings dictionary
from core.settings import _core_settings
# Set the logging level
ConVar('sp_logging_level').set_int(
int(_core_settings['LOG_SETTINGS']['level']))
# Set the logging areas
ConVar('sp_logging_areas').set_int(
int(_core_settings['LOG_SETTINGS']['areas']))
# Was an exception raised?
except (ValueError, ConfigObjError):
# Set the logging level to max (5)
ConVar('sp_logging_level').set_int(5)
# Set the logging area to include console, SP logs, and main log
ConVar('sp_logging_areas').set_int(7)
# Import the _sp_logger
from loggers import _sp_logger
# Log a message about the value
_sp_logger.log_message(
'[Source.Python] Plugin did not load properly ' +
'due to the following error:')
# Re-raise the error
raise
# =============================================================================
# >> TRANSLATIONS SETUP
# =============================================================================
# Import the Language Manager
from translations.manager import language_manager
# Set the default language
language_manager._register_default_language(
_core_settings['BASE_SETTINGS']['language'])
# =============================================================================
# >> INITIALIZE SP COMMAND
# =============================================================================
from core.command import _core_command
# =============================================================================
# >> AUTH SETUP
# =============================================================================
# Get the auth providers that should be loaded
auth_providers = _core_settings['AUTH_SETTINGS']['providers'].split()
# Should any providers be loaded?
if auth_providers:
# Load the auth providers
_core_command.call_command('auth', ['load'] + auth_providers)
# =============================================================================
# >> USER_SETTINGS SETUP
# =============================================================================
from commands.client import client_command_manager
from commands.say import say_command_manager
from settings.menu import _player_settings
# Are there any private user settings say commands?
if _core_settings['USER_SETTINGS']['private_say_commands']:
# Register the private user settings say commands
say_command_manager.register_commands(_core_settings[
'USER_SETTINGS']['private_say_commands'].split(
','), _player_settings._private_send_menu)
# Are there any public user settings say commands?
if _core_settings['USER_SETTINGS']['public_say_commands']:
# Register the public user settings say commands
say_command_manager.register_commands(_core_settings[
'USER_SETTINGS']['public_say_commands'].split(
','), _player_settings._send_menu)
# Are there any client user settings commands?
if _core_settings['USER_SETTINGS']['client_commands']:
# Register the client user settings commands
client_command_manager.register_commands(_core_settings[
'USER_SETTINGS']['client_commands'].split(
','), _player_settings._send_menu)
|
'''
1. Pytest is a testing framework which allows us to write test cases using python.
2.the bellow bngjdlgk
'''
# To execute python file by pytest
python -m pytest 1_pytest.py or pytest 1_pytest.py
# To get print function output on console
python -m pytest 1_pytest.py -s
# To Run particular testcase
python -m pytest 1_pytest.py -s -k test_add
# To Run particular testcases
python -m pytest 1_pytest.py -s -k test_
# To skip particular testcase
pytest 1_pytest.py -s -k " not test_a"
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
###########################
# SemEval-2018 Task 2:
# Multilingual Emoji Detection
# Team: Duluth UROP
# Author: Shuning Jin
# Environment: Python 3.6
# Date: 2018-05-20
# Update: 2020-02-12
###########################
''' Description
File: preprocess.py
Preprocessing text
normalize to lowercase
deal with punctuation, non-ASCII (remove or replace)
tokenize and vectorize
generate scipy spase matrices
'''
import sys
import re
import time
import os
import argparse
from shutil import copyfile, copy
import nltk
from sklearn.feature_extraction.text import CountVectorizer
from scipy.sparse import save_npz
def handle_arguments(cl_arguments):
parser = argparse.ArgumentParser(description="")
parser.add_argument("--train_text", type=str, required=True, default=None, help="",)
parser.add_argument("--train_label", type=str, required=True, default=None, help="",)
parser.add_argument("--test_text", type=str, required=False, default=None, help="",)
parser.add_argument("--run_dir", type=str, required=True, default=None, help="",)
return parser.parse_args(cl_arguments)
def save_sparse_matrix(path, matrix):
from scipy.sparse import save_npz
save_npz(path, matrix)
print('Save text as sparse matrix to: {:s}'.format(path))
def save_label(path, label_list):
with open(path, 'w') as f:
for line in label_list:
print(line, file=f)
print('Save labels to: {:s}'.format(path))
def load_label(path):
label_list = [int(str(line).replace('\n', '')) for line in open(path, 'r')]
print('Load labels from: {:s}'.format(path))
return label_list
def read_text(path):
"""
read raw text from path, preprocess, return a list of sentences
"""
def clean(raw):
# lowercase
raw = raw.lower()
# punctuation
raw = raw.replace(r',', ' ') # remove comma
# non-ASCII characters
# description: UTF-8 literal, unicode code point, name
# remove
raw = raw.replace(r'…', ' ') # '\xe2\x80\xa6', U+2026, horizontal ellipsis # clean text
raw = raw.replace(r'•', ' ') # '\xe2\x80\xa2', U+2022, bullet
raw = raw.replace(r'·', ' ') # '\xc2\xb7', U+00B7, middle dot
raw = raw.replace(r'・', ' ') # '\xe3\x83\xbb', U+30FB, Katakana middle dot
raw = raw.replace(r',', ' ') # '\xef\xbc\x8c', U+FF0C, fullwidth comma
raw = raw.replace(r'—', ' ') # '\xe2\x80\x94', U+2014, EM dash
raw = raw.replace(r'–', ' ') # '\xe2\x80\x93', U+2013, EN dash
# replace with standard ASCII
raw = raw.replace(r'’', "'") # '\xe2\x80\x99', U+2019, right single quotation mark
raw = raw.replace(r'‘', "'") # '\xe2\x80\x98', U+2018, left single quotation mark
raw = raw.replace(r'“', r'"') # '\xe2\x80\x9c', U+201C, left double quotation mark
raw = raw.replace(r'”', r'"') # '\xe2\x80\x9d', U+201D, right double quotation mark
raw = raw.replace(r'!', r'!') # '\xef\xbc\x81', U+FF01, fullwidth exclamation mark
return raw
text = []
with open(path, 'r') as f:
for line in f:
line = clean(line)
line = line.strip('\n').strip(' ')
text.append(line)
print('Read text: {:s}. Total example: {:d}'
.format(path, len(text)))
return text
def main(train_text_path, train_label_path, test_text_path, runname):
print('\n--- PHASE: PREPROCESSING ---')
# file logic
run_dir = os.path.join('experiment', runname)
preprocess_dir = os.path.join(run_dir, 'preprocess')
os.makedirs('experiment', exist_ok=True)
os.makedirs(run_dir, exist_ok=True)
os.makedirs(preprocess_dir, exist_ok=True)
files = os.listdir(preprocess_dir)
if 'train_x_dtm.npz' in files and 'test_x_dtm.npz' in files and 'train_y' in files:
print('Preprocessed files already exists. Pass this step.')
return
# vectorize data: bag of n-grams
# feature: unigram + bigram, document frequency cutoff = 5
tokenizer = nltk.word_tokenize
vect = CountVectorizer(ngram_range=(1, 2), tokenizer=tokenizer, min_df=5)
train_x = read_text(train_text_path)
vect.fit(train_x)
train_x_dtm = vect.transform(train_x)
test_x = read_text(test_text_path)
test_x_dtm = vect.transform(test_x)
# print len(vect.get_feature_names())
# print train_x
# print vect.get_feature_names()
# save text x as sparse matrix
save_sparse_matrix(os.path.join(preprocess_dir, 'train_x_dtm.npz'), train_x_dtm)
save_sparse_matrix(os.path.join(preprocess_dir, 'test_x_dtm.npz'), test_x_dtm)
# copy label y
copy(src=train_label_path, dst=os.path.join(preprocess_dir, 'train_y'))
#train_y = load_label(train_label_path)
if __name__ == "__main__":
args = handle_arguments(sys.argv[1:])
train_text_path = args.train_text
train_label_path = args.train_label
test_text_path = args.test_text
runname = args.run_dir
start_time = time.time()
main(train_text_path, train_label_path, test_text_path, runname)
seconds = time.time() - start_time
minutes = seconds / 60
print("Preprocess time: {:.2f} seconds, {:.2f} minutes".format(seconds, minutes))
|
import tkinter
from tkinter import *
def float_bin1(my_number, places = 3):
my_whole, my_dec = str(my_number).split(".")
my_whole = int(my_whole)
res = (str(bin(my_whole))+".").replace('0b','')
for x in range(places):
my_dec = str('0.')+str(my_dec)
temp = '%1.20f' %(float(my_dec)*2)
my_whole, my_dec = temp.split(".")
res += my_whole
return res
def flip1(c):
return '1' if(c == '0') else '0';
def xor_c1(a,b):
return '0' if(a == b) else '1';
def xor_c():
txt=StringVar()
txt=entry1.get()
a=txt.split()
#a=int(input("Enter first number"))
#b=int(input("Enter second number"))
answer["text"]='0' if(a[0] == a[1]) else '1'
def flip():
c=IntVar()
c=entry1.get()
#c=int(input("Enter number to be flipped"))
answer["text"]='1' if(c == '0') else '0'
def binarytoGray():
binary=StringVar()
binary=entry1.get()
#binary=input("Enter number in binary")
gray = "";
gray += binary[0];
for i in range(1,len(binary)):
gray += xor_c1(binary[i - 1],
binary[i]);
answer["text"]= gray
def graytoBinary():
gray=StringVar()
gray=entry1.get()
#gray=input("Enter number in binary")
binary = "";
binary += gray[0];
for i in range(1, len(gray)):
if (gray[i] == '0'):
binary += binary[i - 1];
else:
binary += flip1(binary[i - 1]);
answer["text"]= binary;
def bcd_to_int():
x=StringVar()
x=entry1.get()
x=int(x)
#x=int(input("Enter number to be converted into decimal"))
if x < 0:
raise ValueError("Cannot be a negative integer")
binstring = ''
while True:
q, r = divmod(x, 10)
nibble = bin(r).replace('0b', "")
while len(nibble) < 4:
nibble = '0' + nibble
binstring = nibble + binstring
if q == 0:
break
else:
x = q
answer["text"]=int(binstring, 2)
def int_to_bcd():
x=StringVar()
x=entry1.get()
x=int(x)
#x=int(input("Enter number to be converted into BCD"))
if x < 0:
raise ValueError("Cannot be a negative integer")
if x == 0:
return 0
bcdstring = ''
while x > 0:
nibble = x % 16
bcdstring = str(nibble) + bcdstring
x >>= 4
answer["text"]=int(bcdstring)
def float_bin():
my_number=StringVar()
my_number=entry1.get()
my_number=float(my_number)
places=3
#my_number=float(input("Enter number to be converted into binary"))
my_whole, my_dec = str(my_number).split(".")
my_whole = int(my_whole)
res = (str(bin(my_whole))+".").replace('0b','')
for x in range(places):
my_dec = str('0.')+str(my_dec)
temp = '%1.20f' %(float(my_dec)*2)
my_whole, my_dec = temp.split(".")
res += my_whole
answer["text"]=res
def IEEE754():
n=StringVar()
n=entry1.get()
n=float(n)
#n=float(input("Enter number to represent it in floating point format"))
sign = 0
if n < 0 :
sign = 1
n = n * (-1)
p = 30
# convert float to binary
dec = float_bin1 (n, places = p)
dotPlace = dec.find('.')
onePlace = dec.find('1')
if onePlace > dotPlace:
dec = dec.replace(".","")
onePlace -= 1
dotPlace -= 1
elif onePlace < dotPlace:
dec = dec.replace(".","")
dotPlace -= 1
mantissa = dec[onePlace+1:]
exponent = dotPlace - onePlace
exponent_bits = exponent + 127
exponent_bits = bin(exponent_bits).replace("0b",'')
mantissa = mantissa[0:23]
final = str(sign) + exponent_bits.zfill(8) + mantissa
hstr = '0x%0*X' %((len(final) + 3) // 4, int(final, 2))
answer["text"] =(hstr[2:], final)
def hexadecimal():
n=StringVar()
n=entry1.get()
n=int(n)
#n=int(input("Enter number to represent in hexadecimal format"))
h=hex(n)
answer["text"]=h[2:]
def octal():
n=StringVar()
n=entry1.get()
n=int(n)
#n=int(input("Enter number to represent in octal format"))
o=oct(n)
answer["text"]= o[2:]
root=Tk()
root.title("Converter")
canvas1=tkinter.Canvas(root,width=400,height=300)
canvas1.pack()
entry1=tkinter.Entry(root)
canvas1.create_window(200,140,window=entry1)
bt1=Button(root,text='XOR',command=xor_c,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt1.pack()
bt1.place(x=170,y=400)
bt2=Button(root,text='Flip',command=flip,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt2.pack()
bt2.place(x=370,y=400)
bt3=Button(root,text='Gray',command=binarytoGray,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt3.pack()
bt3.place(x=570,y=400)
bt4=Button(root,text='Binary',command=graytoBinary,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt4.pack()
bt4.place(x=770,y=400)
bt5=Button(root,text='Decimal',command=bcd_to_int,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt5.pack()
bt5.place(x=970,y=400)
bt6=Button(root,text='BCD',command=int_to_bcd,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt6.pack()
bt6.place(x=170,y=600)
bt7=Button(root,text='Floating Binary',command=float_bin,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt7.pack()
bt7.place(x=370,y=600)
bt8=Button(root,text='Floating point',command=IEEE754,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt8.pack()
bt8.place(x=570,y=600)
bt9=Button(root,text='Hexadecimal',command=hexadecimal,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt9.pack()
bt9.place(x=770,y=600)
bt10=Button(root,text='Octal',command=octal,activeforeground='white',bg='black',activebackground='goldenrod1',fg='white',font=('Airal',12))
bt10.pack()
bt10.place(x=970,y=600)
answer=Message(root,text="Answer")
answer.config(font=('Airal',15),width=400)
answer.place(x=700,y=200)
mainloop()
|
from django.conf.urls import patterns, include, url
from enfermeria.views import HomeView, MenuView
from enfermeria import views
urlpatterns = [
url(r'^$', HomeView.as_view() ),
url(r'^menu/', views.Menu_index, name='menu' ),
]
|
def days_to_units(days, unit):
if unit == "hours":
return f"{days} days are {days * 24} {unit}"
elif unit == "minutes":
return f"{days} days are {days * 24 * 60} {unit}"
else:
return "unsupported units"
def validate_and_execute(du_dic):
try:
user_input_number = float(du_dic["days"])
if user_input_number > 0:
calculated_value = days_to_units(user_input_number, du_dic["unit"])
print(calculated_value)
elif user_input_number == 0:
print("you enter a 0 please enter a valid positive number")
else:
print("you entered a negative number, please enter a valid number")
except ValueError:
print("Input is not a number. don't ruin my program")
user_input = ""
while user_input != "exit":
user_input = input("hey user, enter a number of days and conversion \n")
days_and_units = user_input.split(":")
print(days_and_units)
days_and_units_dic = {"days": days_and_units[0], "unit": days_and_units[1]}
print(days_and_units_dic)
validate_and_execute(days_and_units_dic)
# validate_and_execute(days_and_units_dic("days"))
|
matriz = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for c in range(0, 3):
for r in range(0, 3):
matriz[c][r] = int(input('Digite um valor: '))
print(f'{matriz}')
|
import nsupdate
import util
from api_exception import api_exception
from deployment_role import deployment_role
from entity import entity
class host_record(entity):
"""Instantiate host record.
:param api: API instance used by the entity to communicate with BAM.
:param soap_entity: the SOAP (suds) entity returned by the BAM API.
:param soap_client: the suds client instance.
"""
def __init__(self, api, soap_entity, soap_client):
super(host_record, self).__init__(api, soap_entity, soap_client)
self._immutable_properties.append('addressIds')
"""Set the address(es) for a host record.
:param addresses: list of addresses in the form of strings for the host record. E.g. ['192.168.0.1', '192.168.0.2'].
"""
def set_addresses(self, addresses):
self._properties['addresses'] = ','.join(addresses)
"""Get the addresses for a host record in the form of a list of strings.
"""
def get_addresses(self):
return self._properties['addresses'].split(',')
"""Get the owning configuration.
"""
def get_configuration(self):
return self.get_parent_of_type(entity.Configuration)
"""Get the parent zone.
"""
def get_zone(self):
return self.get_parent_of_type('Zone')
"""Get the IP4 address instances associated with the host record.
"""
def get_ip4_addresses(self):
c = self.get_parent_of_type(entity.Configuration)
res = []
for a in self.get_addresses():
if util.is_valid_ipv4_address(a):
res.append(c.get_ip4_address(a))
return res
"""Get the IP6 address instances associated with the host record.
"""
def get_ip6_addresses(self):
c = self.get_parent_of_type(entity.Configuration)
res = []
for a in self.get_addresses():
if util.is_valid_ipv6_address(a):
res.append(c.get_ip6_address(a))
return res
"""Get the owning view.
"""
def get_view(self):
return self.get_parent_of_type(entity.View)
"""Get the fully qualified domain name of the host record.
"""
def get_full_name(self):
return self._properties['absoluteName']
"""If there is a specific TTL set for the host record, return it, otherwise return -1.
"""
def get_specific_ttl(self):
if 'ttl' in self._properties:
return int(self._properties['ttl'])
else:
return -1
"""Find the applicable TTL for the host record. This involves walking the entity tree in many cases.
"""
def get_ttl(self, server=None):
if 'ttl' in self._properties:
return int(self._properties['ttl'])
if server is not None:
# look for server specific option
e = self.get_parent()
while e is not None:
try:
opt = self._soap_client.service.getDNSDeploymentOption(e.get_id(), 'zone-default-ttl',
server.get_id())
except WebFault as e:
raise api_exception(e.message)
if util.has_response(opt):
return int(opt['value'])
e = e.get_parent()
# now look for option for any server
e = self.get_parent()
while e is not None:
try:
opt = self._soap_client.service.getDNSDeploymentOption(e.get_id(), 'zone-default-ttl', 0)
except WebFault as e:
raise api_exception(e.message)
if util.has_response(opt):
return int(opt['value'])
e = e.get_parent()
return 3600
"""Dynamically update the forward DNS space for a record.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_update_forward(self, tsig_key_file=None):
# forward space
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
for record_addr in self.get_addresses():
if util.is_valid_ipv4_address(record_addr):
nsupdate.update_a(server_ip, self.get_full_name(), record_addr, self.get_ttl(),
tsig_key_file)
elif util.is_valid_ipv6_address(record_addr):
nsupdate.update_aaaa(server_ip, self.get_full_name(), record_addr, self.get_ttl(),
tsig_key_file)
else:
raise api_exception("invalid IP address", record_addr)
"""Dynamically update the reverse DNS space for a record. If there isn't a suitable deployment role set for the relevant network this
method will do nothing.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_update_reverse(self, tsig_key_file=None):
# reverse space
if self._properties['reverseRecord'] == 'true':
for a in self.get_ip4_addresses():
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
nsupdate.create_ptr(server_ip, self.get_full_name(), a.get_reverse_name(), self.get_ttl(),
tsig_key_file)
for server_ip in s.get_service_ip6_addresses():
nsupdate.create_ptr(server_ip, self.get_full_name(), a.get_reverse_name(), self.get_ttl(),
tsig_key_file)
"""Dynamically delete the forward DNS space for a record.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_delete_forward(self, tsig_key_file=None):
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
for record_addr in self.get_addresses():
if util.is_valid_ipv4_address(record_addr):
nsupdate.delete_a(server_ip, self.get_full_name(), tsig_key_file)
elif util.is_valid_ipv6_address(record_addr):
nsupdate.delete_aaaa(server_ip, self.get_full_name(), tsig_key_file)
else:
raise api_exception("invalid IP address", record_addr)
"""Dynamically delete the reverse DNS space for a record. If there isn't a suitable deployment role set for the relevant network this
method will do nothing.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_delete_reverse(self, tsig_key_file=None):
if self._properties['reverseRecord'] == 'true':
# reverse space
for a in self.get_ip4_addresses():
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
nsupdate.delete_ptr(server_ip, a.get_reverse_name(), tsig_key_file)
for server_ip in s.get_service_ip6_addresses():
nsupdate.delete_ptr(server_ip, a.get_reverse_name(), tsig_key_file)
"""Dynamically create the forward DNS space for a record.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_create_forward(self, tsig_key_file=None):
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
for record_addr in self.get_addresses():
if util.is_valid_ipv4_address(record_addr):
nsupdate.create_a(server_ip, self.get_full_name(), record_addr, self.get_ttl(),
tsig_key_file)
elif util.is_valid_ipv6_address(record_addr):
nsupdate.create_aaaa(server_ip, self.get_full_name(), record_addr, self.get_ttl(),
tsig_key_file)
else:
raise api_exception("invalid IP address", record_addr)
"""Dynamically create the reverse DNS space for a record. If there isn't a suitable deployment role set for the relevant network this
method will do nothing.
:param tsig_key_file: file containing the TSIG key (if any) to use.
"""
def dynamic_create_reverse(self, tsig_key_file=None):
if self._properties['reverseRecord'] == 'true':
for a in self.get_ip4_addresses():
for dr in self.get_deployment_roles(types=[deployment_role.MASTER, deployment_role.MASTER_HIDDEN]):
s = dr.get_server()
if s is not None:
for server_ip in s.get_service_ip4_addresses():
nsupdate.create_ptr(server_ip, self.get_full_name(), a.get_reverse_name(), self.get_ttl(),
tsig_key_file)
for server_ip in s.get_service_ip6_addresses():
nsupdate.create_ptr(server_ip, self.get_full_name(), a.get_reverse_name(), self.get_ttl(),
tsig_key_file)
|
from udp import UDPsocket, timeout # import provided class
from enum import Enum, unique
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
WINDOWS_SIZE = 8
MAX_LENGTH = 1024
MAX_TIMEOUT_RETRY = 5
SYN = 0b0100
FIN = 0b0010
ACK = 0b0001
@unique
class STATE(Enum):
OPENED = 0
LISTENING = 1
CONNECTING = 2
CONNECTED = 3
CLOSING = 4
CLOSED = 5
@unique
class EVENT(Enum):
bind = 0
listen = 1
accept = 2
connect_requested = 3
connect = 4
close_requested = 5
close = 6
class FSM(object):
"""
FSM, but it is useless for connectionless rdt
"""
def __init__(self, state=None):
self._current = None
if type(state) == STATE:
self._current = state
elif type(state) == int:
self._current = STATE(state)
elif state is None:
self._current = STATE.OPENED
else:
raise ValueError("Invalid state")
@property
def current(self):
return self._current
@current.setter
def current(self, current: STATE):
self._current = current
logging.info('Change From %s to %s', self._current, current)
def dispatch(self, event: str):
if self._current == STATE.OPENED:
if event == EVENT.bind:
self._current = STATE.LISTENING
elif event == EVENT.listen:
self._current = STATE.LISTENING
elif event == EVENT.connect:
self._current = STATE.CONNECTED
else:
raise ValueError("Invalid event")
elif self._current == STATE.LISTENING:
if event == EVENT.connect_requested:
self._current = STATE.CONNECTING
else:
raise ValueError("Invalid event")
elif self._current == STATE.CONNECTING:
if event == EVENT.accept:
self._current = STATE.CONNECTED
else:
raise ValueError("Invalid event")
elif self._current == STATE.CONNECTED:
if event == EVENT.close_requested:
self._current = STATE.CLOSING
elif event == EVENT.close:
self._current = STATE.CLOSED
else:
raise ValueError("Invalid event")
elif self._current == STATE.CLOSING:
if event == EVENT.close:
self._current = STATE.CLOSED
else:
raise ValueError("Invalid event")
else:
logging.warning("Nothing to do")
class datagram(object):
def _set_header(self, offset, value):
tmp = list(self._header)
for index, byte in enumerate(value):
tmp[offset + index] = byte
self._header = bytes(tmp)
# For datagram type
@property
def dtype(self) -> int:
return self._dtype
@dtype.setter
def dtype(self, dtype: int):
self._dtype = dtype
self._set_header(0, self._dtype.to_bytes(1, 'big'))
# For Seq
@property
def seq(self):
return int.from_bytes(self._seq, 'big')
@seq.setter
def seq(self, seq):
if type(seq) == int:
self._seq = seq.to_bytes(4, 'big')
self._set_header(1, self._seq)
else:
raise ValueError("Seq number must be an integer")
# For SEQ_ACK
@property
def seq_ack(self):
return int.from_bytes(self._seq_ack, 'big')
@seq_ack.setter
def seq_ack(self, seq_ack):
if type(seq_ack) == int:
self._seq_ack = seq_ack.to_bytes(4, 'big')
self._set_header(5, self._seq_ack)
else:
raise ValueError("SEQ_ACK number must be an integer")
# For LEN
@property
def length(self):
return int.from_bytes(self._length, 'big')
@length.setter
def length(self, length):
raise NotImplementedError("Length cannot be set.")
# For CHECKSUM
@property
def checksum(self):
tmp = self._header[0:13] + b'\x00\x00' + self._payload
sum = 0
for byte in tmp:
sum += byte
sum = -(sum % 256)
return (sum & 0xFF)
@checksum.setter
def checksum(self, checksum):
raise NotImplementedError("Checksum cannot be set.")
@property
def valid(self):
return self.checksum == int.from_bytes(self._checksum, 'big')
# For PAYLOAD
@property
def payload(self):
return self._payload
@payload.setter
def payload(self, payload):
if type(payload) == bytes:
self._length = len(payload).to_bytes(4, 'big')
self._set_header(9, self._length)
self._payload = payload
else:
raise TypeError("a bytes-like object is expected")
def __init__(self, raw_data=None):
if type(raw_data) == bytes:
self._decode(raw_data)
else:
self._header = bytes(15)
self._dtype = 0
self._seq = bytes(4)
self._seq_ack = bytes(4)
self._length = bytes(4)
self._checksum = bytes(2)
self._payload = b''
def _decode(self, raw_data: bytes):
if len(raw_data) < 15:
raise ValueError("Invalid data!")
self._header = raw_data[0:15]
self._dtype = self._header[0]
self._seq = self._header[1: 5]
self._seq_ack = self._header[5: 9]
self._length = self._header[9: 13]
self._checksum = self._header[13: 15]
self._payload = raw_data[15:]
def _encode(self):
self._set_header(13, self.checksum.to_bytes(2, 'big'))
return self._header + self._payload
def __call__(self):
return self._encode()
def __str__(self):
return self.__repr__()
def __repr__(self):
try:
res = "Type:\t{}\nSeq:\t{}\nSEQ_ACK:\t{}\nLENGTH:\t{}\nChecksum:\t{}\nPayload:\t{}".format(
self.dtype, self.seq, self.seq_ack, self.length, self.checksum, self.payload)
return res
except Exception:
return "Invalid"
class socket(UDPsocket):
def __init__(self, ):
super().__init__()
self.state = FSM(STATE.OPENED)
self.seq = 0
self.seq_ack = 0
def bind(self, addr):
self.state.dispatch(EVENT.bind)
super().bind(addr)
def accept(self):
print("Accept is not implemented for connectionless rdt")
raise NotImplementedError
def connect(self, addr):
self.to = addr
def close(self):
print("Close is not implemented for connectionless ")
raise NotImplementedError
def recvfrom(self, bufsize=2048):
QvQ = super().recvfrom(bufsize)
if QvQ is None:
raise timeout
data, addr = QvQ
data = datagram(data)
if data.valid:
return data, addr
raise Exception("Invalid packet")
def recv(self, bufsize: int):
rcvd_data = b''
timeout_count = -1
expected = self.seq_ack
ack = datagram()
logging.info('ready to receive...')
while True:
try:
data, addr = self.recvfrom(bufsize)
logging.debug('received raw segment')
timeout_count = 0 # no timeout, reset
logging.info('expected: #%d, received: #%d', expected, data.seq)
if data.seq == expected:
if data.dtype & FIN:
logging.info('FIN Recieved')
break
else:
rcvd_data += data.payload
expected += data.length
ack.seq = self.seq
ack.seq_ack = expected
super().sendto(ack(), addr)
except timeout:
if timeout_count < 0:
continue
timeout_count += 1
logging.info('timed out, count=%d', timeout_count)
if timeout_count > MAX_TIMEOUT_RETRY:
raise ConnectionAbortedError('timed out')
except ValueError:
ack.seq = self.seq
ack.seq_ack = expected
super().sendto(ack(), addr)
except Exception as e:
logging.warning(e)
self.seq += 1
self.seq_ack = expected + 1
fin_ack = datagram()
fin_ack.dtype |= FIN
fin_ack.dtype |= ACK
fin_ack.seq = self.seq
fin_ack.seq_ack = self.seq_ack
fin_err_count = 0
self.sendto(fin_ack(), addr)
# self.seq_ack = expected + 1
logging.info('----------- receipt finished -----------')
return rcvd_data
def send(self, content: bytes, reciver_addr):
# So grass...
base = self.seq
timeout_count = 0
l, r = 0, 0
while l < len(content):
r = min(len(content), l + WINDOWS_SIZE * MAX_LENGTH)
# Calculate all the bytes need to send
buffer = list(range(l, r, MAX_LENGTH))
for i in buffer:
chunk_len = min(MAX_LENGTH, len(content) - i)
data = datagram()
data.payload = content[i:i+chunk_len]
data.seq = base + i
data.seq_ack = self.seq_ack
self.sendto(data(), reciver_addr)
logging.info('Send packet from [%d, %d]' % (buffer[0], buffer[-1]))
while True:
try:
# assumption: no truncated packets
data, addr = self.recvfrom(2048)
timeout_count = 0 # no error, reset counter
logging.info('#%d acked', data.seq_ack)
# cumulative ack, it should be in the window
assert buffer[0] <= data.seq_ack - base <= buffer[-1] + MAX_LENGTH + 1
l = max(l, data.seq_ack - base)
# all acked
if r - l == 0:
logging.info('Finish sending')
break
except ValueError:
logging.info('corrupted ack, ignored')
except AssertionError:
logging.info(
'duplicate ack or unexpected segment received')
except timeout:
timeout_count += 1
logging.info('timed out, count=%d', timeout_count)
if timeout_count > MAX_TIMEOUT_RETRY:
raise ConnectionError('timed out')
break
except Exception as e:
logging.warning(e)
# Finish, send FIN
fin = datagram()
fin.dtype |= FIN
fin.seq = base + len(content)
fin.seq_ack = self.seq_ack
fin_err_count = 0
while True:
try:
self.sendto(fin(), reciver_addr)
data, addr = self.recvfrom(2048)
if data.dtype & ACK and data.dtype & FIN and data.seq_ack == base + len(content) + 1:
break
except (timeout, ValueError):
fin_err_count += 1
if fin_err_count > MAX_TIMEOUT_RETRY:
break
except Exception as e:
logging.warning(e)
self.seq = base + len(content) + 1
logging.info('----------- all sent -----------')
|
import os
import sys
target_version = "3.0.0"
def build_version():
distance ="0"
try:
from subprocess import Popen, PIPE
prev_tag,distance,revision = Popen(["git", "describe", "--match", "[0-9]*", "--long"],
cwd=os.path.dirname(__file__),
stdout=PIPE
).communicate()[0].strip().split("-")
if distance == "0":
return prev_tag
elif prev_tag == target_version:
return "%s.post%s"%(target_version, distance)
except Exception as e:
print e
return "%s.dev%s"%(target_version, distance)
try:
from .version import __version__
except ImportError:
__version__=build_version()
|
"""
A demonstration of tkinter Variable Class
IntVar, StringVar & BooleanVar
"""
import tkinter as tk
root = tk.Tk()
def show():
print("You entered:")
print(f"Employee Number: {empolyee_number.get()}")
print(f"Login Password: {password.get()}")
print(f"Remember Me: {remember_me.get()}")
print("*" * 30)
empolyee_number = tk.IntVar()
tk.Label(root, text="Employee Number:").grid(row=1, column=1)
tk.Entry(root, width=40, textvariable=empolyee_number).grid(
row=1, column=2, columnspan=2
)
password = tk.StringVar()
tk.Label(root, text="Login Password").grid(row=2, column=1, sticky="w")
tk.Entry(root, width=40, show="*", textvariable=password).grid(
row=2, column=2, columnspan=2
)
password.set("mysecretpassword")
tk.Button(root, text="Login", command=show).grid(row=3, column=3)
remember_me = tk.BooleanVar()
tk.Checkbutton(root, text="Remember Me", variable=remember_me).grid(row=3, column=2)
remember_me.set(True)
root.mainloop()
|
# Generated by Django 3.1 on 2020-08-23 20:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comun', '0005_auto_20200823_1534'),
]
operations = [
migrations.AddField(
model_name='moneda',
name='nombre_corto',
field=models.CharField(default='', max_length=5, unique=True),
preserve_default=False,
),
]
|
from vector import *
import pygame
from pygame.locals import *
from sys import exit
screen = pygame.display.set_mode((1024,768),HWSURFACE|DOUBLEBUF,32)
background_img = "background.png"
background = pygame.image.load(background_img).convert()
screen.blit(background,(0,0))
class player(object):
pos = vector(0,0)
move_vec = vector(0,0)
speed = 5
hist_key = K_1
player_img = "player1.png"
player_sprite_right = pygame.image.load(player_img).convert_alpha()
player_sprite_up = pygame.transform.rotate(player_sprite_right,90)
player_sprite_down = pygame.transform.rotate(player_sprite_right,-90)
player_sprite_left = pygame.transform.rotate(player_sprite_right,180)
player_sprite = player_sprite_right
def click_draw(self,destination):
direction = destination - self.pos
direction.normalize()
self.pos += direction*self.speed
pos = self.pos.tupleit()
screen.blit(self.player_sprite,pos)
def kbdraw(self,event):
if event.type == KEYDOWN:
if event.key == K_w:
self.hist_key = event.key
self.move_vec = vector(0,-5)
self.player_sprite = self.player_sprite_up
if event.key == K_s:
self.hist_key = event.key
self.move_vec = vector(0,5)
self.player_sprite = self.player_sprite_down
if event.key == K_d:
self.hist_key = event.key
self.move_vec = vector(5,0)
self.player_sprite = self.player_sprite_right
if event.key == K_a:
self.hist_key = event.key
self.move_vec = vector(-5,0)
self.player_sprite = self.player_sprite_left
if event.key == K_ESCAPE:
exit()
if event.type == KEYUP and event.key == self.hist_key:
self.move_vec = vector(0,0)
self.pos += self.move_vec
sprite_pos = self.pos.tupleit()
sprite_pos = (sprite_pos[0] - self.player_sprite.get_width()/2,sprite_pos[1] - self.player_sprite.get_height()/2)
screen.blit(background,(sprite_pos[0] - 5,sprite_pos[1] - 5),pygame.Rect(sprite_pos[0]-5,sprite_pos[1]-5,60,60))
screen.blit(self.player_sprite,sprite_pos)
|
# File: Figures.py
# Description: Draws various types of geometric figures
import turtle, math
# draw a line from (x1, y1) to (x2, y2)
def drawLine (ttl, x1, y1, x2, y2):
ttl.penup()
ttl.goto (x1, y1)
ttl.pendown()
ttl.goto (x2, y2)
ttl.penup()
def drawPolygon (ttl, x, y, num_side, radius):
sideLen = 2 * radius * math.sin (math.pi / num_side)
angle = 360 / num_side
ttl.penup()
ttl.goto (x, y)
ttl.pendown()
for iter in range (num_side):
ttl.forward (sideLen)
ttl.left (angle)
def main():
# put label on top of page
turtle.title ('Geometric Figures')
# setup screen size
turtle.setup (800, 800, 0, 0)
# create a turtle object
ttl = turtle.Turtle()
# draw equilateral triangle
ttl.color ('blue')
drawPolygon (ttl, -200, 0, 3, 50)
# draw square
ttl.color ('red')
drawPolygon (ttl, -50, 0, 4, 50)
# draw pentagon
ttl.color ('forest green')
drawPolygon (ttl, 100, 0, 5, 50)
# draw octagon
ttl.color ('DarkOrchid4')
drawPolygon (ttl, 250, 0, 8, 50)
# draw a line
ttl.color ('gold4')
drawLine (ttl, -200, -10, 325, -10)
drawLine (ttl, -200, -15, 325, -15)
# persist drawing
turtle.done()
main()
|
# !/usr/bin/env python
# -- coding: utf-8 --
# @Author zengxiaohui
# Datatime:4/30/2021 9:28 AM
# @File:datasets_utils
import random
import math
import cv2
import numpy as np
from imgaug import augmenters as iaa # 引入数据增强的包
def resize_image(img, resize_size=[640, 640]):
"""对图片进行resize"""
h0, w0 = img.shape[:2] # origin hw
# rh0 = self.opt.img_size[0] / h0 # resize image to img_size
# rw0 = self.opt.img_size[1] / w0 # resize image to img_size
r = max(resize_size[0], resize_size[1]) / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
# INTER_NEAREST 最近邻插值
# cv2.INTER_LINEAR 双线性插值(默认设置)
# cv2.INTER_AREA 使用像素区域关系进行重采样。 它可能是图像抽取的首选方法,因为它会产生无云纹理的结果。 但是当图像缩放时,它类似于INTER_NEAREST方法。
# img = cv2.resize(img, (int(w0 * rw0), int(h0 * rh0)), interpolation=cv2.INTER_LINEAR)
# interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR)
return img
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# 将图片修改为长宽都能被32整除的形状
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) # 取长宽比率的最小值
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0) # 图片小就放大 scaleup为False那么图片小不放大
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) # 新图片大小,还没进行pad的时候的
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def imgaug_augmenters(img, hyp):
"""imgaug库增强"""
seq = iaa.Sequential([
# Sometimes是指指针对50%的图片做处理
iaa.Sometimes(
hyp["iaa_Gaussian_r"],
# 高斯模糊
iaa.GaussianBlur(sigma=(hyp["iaa_Gaussian_sigma_1"], hyp["iaa_Gaussian_sigma_2"]))
),
# 使用随机组合上面的数据增强来处理图片
], random_order=True)
images_aug = seq.augment_image(img) # 是处理多张图片augment_images
return images_aug
def cutout(img, num_holes=1):
"""
其思想也很简单,就是对训练图像进行随机遮挡,该方法激励神经网络在决策时能够更多考虑次要特征,而不是主要依赖于很少的主要特征,如下图所示:
Randomly mask out one or more patches from an image.
"""
h, w, _ = img.shape
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
s = random.choice(scales)
for _ in range(num_holes):
y = np.random.randint(h)
x = np.random.randint(w)
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
y1 = np.clip(max(0, y - mask_h // 2), 0, h)
y2 = np.clip(max(0, y + mask_h // 2), 0, h)
x1 = np.clip(max(0, x - mask_w // 2), 0, w)
x2 = np.clip(max(0, x + mask_w // 2), 0, w)
# apply random color mask
img[y1: y2, x1: x2, :] = [random.randint(64, 191) for _ in range(3)]
return img
def random_perspective(img, degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective 透视图
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale 旋转和缩放
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear 剪切
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
return img
|
def unpadPKCS7(s):
i = s[-1]
if i == 0 or s[-i:] != bytes([i] * i):
raise ValueError('bad padding')
return s[0:-i]
if __name__ == '__main__':
print(unpadPKCS7(b'ICE ICE BABY\x04\x04\x04\x04'))
try:
unpadPKCS7(b'ICE ICE BABY\x05\x05\x05\x05')
raise Exception('passes unexpectedly')
except ValueError:
pass
try:
unpadPKCS7(b'ICE ICE BABY\x01\x02\x03\x04')
raise Exception('passes unexpectedly')
except ValueError:
pass
|
"""
This module should contain any and every definitions in use to build the swagger UI,
so that one can update the swagger without touching any other files after the initial integration
"""
# pylint: disable=C0103,invalid-name
from typing import TYPE_CHECKING
from colander import (
Boolean,
DateTime,
Float,
Integer,
MappingSchema as MapSchema,
OneOf,
Range,
SequenceSchema as SeqSchema,
String,
drop
)
from cornice import Service
from weaver import __meta__
from weaver.config import WEAVER_CONFIGURATION_EMS
from weaver.execute import (
EXECUTE_CONTROL_OPTION_ASYNC,
EXECUTE_CONTROL_OPTIONS,
EXECUTE_MODE_ASYNC,
EXECUTE_MODE_AUTO,
EXECUTE_MODE_OPTIONS,
EXECUTE_RESPONSE_OPTIONS,
EXECUTE_RESPONSE_RAW,
EXECUTE_TRANSMISSION_MODE_OPTIONS,
EXECUTE_TRANSMISSION_MODE_REFERENCE
)
from weaver.formats import CONTENT_TYPE_APP_JSON, CONTENT_TYPE_APP_XML, CONTENT_TYPE_TEXT_HTML, CONTENT_TYPE_TEXT_PLAIN
from weaver.owsexceptions import OWSMissingParameterValue
from weaver.sort import JOB_SORT_VALUES, QUOTE_SORT_VALUES, SORT_CREATED, SORT_ID, SORT_PROCESS
from weaver.status import JOB_STATUS_CATEGORIES, STATUS_ACCEPTED, STATUS_COMPLIANT_OGC
from weaver.visibility import VISIBILITY_PUBLIC, VISIBILITY_VALUES
from weaver.wps_restapi.colander_extras import (
DropableNoneSchema,
OneOfCaseInsensitive,
OneOfMappingSchema,
SchemaNodeDefault,
VariableMappingSchema
)
from weaver.wps_restapi.utils import wps_restapi_base_path
if TYPE_CHECKING:
from weaver.typedefs import SettingsType, TypedDict
ViewInfo = TypedDict("ViewInfo", {"name": str, "pattern": str})
class SchemaNode(SchemaNodeDefault):
"""
Override the default :class:`colander.SchemaNode` to auto-handle ``default`` value substitution if an
actual value was omitted during deserialization for a field defined with this schema and a ``default`` parameter.
.. seealso::
Implementation in :class:`SchemaNodeDefault`.
"""
@staticmethod
def schema_type():
raise NotImplementedError
class SequenceSchema(DropableNoneSchema, SeqSchema):
"""
Override the default :class:`colander.SequenceSchema` to auto-handle dropping missing entry definitions
when its value is either ``None``, :class:`colander.null` or :class:`colander.drop`.
"""
schema_type = SeqSchema.schema_type
class MappingSchema(DropableNoneSchema, MapSchema):
"""
Override the default :class:`colander.MappingSchema` to auto-handle dropping missing field definitions
when the corresponding value is either ``None``, :class:`colander.null` or :class:`colander.drop`.
"""
schema_type = MapSchema.schema_type
class ExplicitMappingSchema(MapSchema):
"""
Original behaviour of :class:`colander.MappingSchema` implementation, where fields referencing
to ``None`` values are kept as an explicit indication of an *undefined* or *missing* value for this field.
"""
API_TITLE = "Weaver REST API"
API_INFO = {
"description": __meta__.__description__,
"contact": {"name": __meta__.__authors__, "email": __meta__.__emails__, "url": __meta__.__source_repository__}
}
URL = "url"
#########################################################################
# API endpoints
#########################################################################
api_frontpage_uri = "/"
api_swagger_ui_uri = "/api"
api_swagger_json_uri = "/json"
api_versions_uri = "/versions"
api_conformance_uri = "/conformance"
processes_uri = "/processes"
process_uri = "/processes/{process_id}"
process_package_uri = "/processes/{process_id}/package"
process_payload_uri = "/processes/{process_id}/payload"
process_visibility_uri = "/processes/{process_id}/visibility"
process_jobs_uri = "/processes/{process_id}/jobs"
process_job_uri = "/processes/{process_id}/jobs/{job_id}"
process_quotes_uri = "/processes/{process_id}/quotations"
process_quote_uri = "/processes/{process_id}/quotations/{quote_id}"
process_results_uri = "/processes/{process_id}/jobs/{job_id}/result"
process_exceptions_uri = "/processes/{process_id}/jobs/{job_id}/exceptions"
process_logs_uri = "/processes/{process_id}/jobs/{job_id}/logs"
providers_uri = "/providers"
provider_uri = "/providers/{provider_id}"
provider_processes_uri = "/providers/{provider_id}/processes"
provider_process_uri = "/providers/{provider_id}/processes/{process_id}"
jobs_short_uri = "/jobs"
jobs_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs"
job_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}"
job_exceptions_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}/exceptions"
job_short_uri = "/jobs/{job_id}"
quotes_uri = "/quotations"
quote_uri = "/quotations/{quote_id}"
bills_uri = "/bills"
bill_uri = "/bill/{bill_id}"
results_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}/result"
results_short_uri = "/jobs/{job_id}/result"
result_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}/result/{result_id}"
result_short_uri = "/jobs/{job_id}/result/{result_id}"
exceptions_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}/exceptions"
exceptions_short_uri = "/jobs/{job_id}/exceptions"
logs_full_uri = "/providers/{provider_id}/processes/{process_id}/jobs/{job_id}/logs"
logs_short_uri = "/jobs/{job_id}/logs"
#########################################################
# API tags
#########################################################
TAG_API = "API"
TAG_JOBS = "Jobs"
TAG_VISIBILITY = "Visibility"
TAG_BILL_QUOTE = "Billing & Quoting"
TAG_PROVIDERS = "Providers"
TAG_PROCESSES = "Processes"
TAG_GETCAPABILITIES = "GetCapabilities"
TAG_DESCRIBEPROCESS = "DescribeProcess"
TAG_EXECUTE = "Execute"
TAG_DISMISS = "Dismiss"
TAG_STATUS = "Status"
TAG_DEPLOY = "Deploy"
TAG_RESULTS = "Results"
TAG_EXCEPTIONS = "Exceptions"
TAG_LOGS = "Logs"
TAG_WPS = "WPS"
###############################################################################
# These "services" are wrappers that allow Cornice to generate the JSON API
###############################################################################
api_frontpage_service = Service(name="api_frontpage", path=api_frontpage_uri)
api_swagger_ui_service = Service(name="api_swagger_ui", path=api_swagger_ui_uri)
api_swagger_json_service = Service(name="api_swagger_json", path=api_swagger_json_uri)
api_versions_service = Service(name="api_versions", path=api_versions_uri)
api_conformance_service = Service(name="api_conformance", path=api_conformance_uri)
processes_service = Service(name="processes", path=processes_uri)
process_service = Service(name="process", path=process_uri)
process_package_service = Service(name="process_package", path=process_package_uri)
process_payload_service = Service(name="process_payload", path=process_payload_uri)
process_visibility_service = Service(name="process_visibility", path=process_visibility_uri)
process_jobs_service = Service(name="process_jobs", path=process_jobs_uri)
process_job_service = Service(name="process_job", path=process_job_uri)
process_quotes_service = Service(name="process_quotes", path=process_quotes_uri)
process_quote_service = Service(name="process_quote", path=process_quote_uri)
process_results_service = Service(name="process_results", path=process_results_uri)
process_exceptions_service = Service(name="process_exceptions", path=process_exceptions_uri)
process_logs_service = Service(name="process_logs", path=process_logs_uri)
providers_service = Service(name="providers", path=providers_uri)
provider_service = Service(name="provider", path=provider_uri)
provider_processes_service = Service(name="provider_processes", path=provider_processes_uri)
provider_process_service = Service(name="provider_process", path=provider_process_uri)
jobs_short_service = Service(name="jobs_short", path=jobs_short_uri)
jobs_full_service = Service(name="jobs_full", path=jobs_full_uri)
job_full_service = Service(name="job_full", path=job_full_uri)
job_short_service = Service(name="job_short", path=job_short_uri)
quotes_service = Service(name="quotes", path=quotes_uri)
quote_service = Service(name="quote", path=quote_uri)
bills_service = Service(name="bills", path=bills_uri)
bill_service = Service(name="bill", path=bill_uri)
results_full_service = Service(name="results_full", path=results_full_uri)
results_short_service = Service(name="results_short", path=results_short_uri)
exceptions_full_service = Service(name="exceptions_full", path=exceptions_full_uri)
exceptions_short_service = Service(name="exceptions_short", path=exceptions_short_uri)
logs_full_service = Service(name="logs_full", path=logs_full_uri)
logs_short_service = Service(name="logs_short", path=logs_short_uri)
#########################################################
# Path parameter definitions
#########################################################
class ProcessPath(MappingSchema):
process_id = SchemaNode(String(), description="The process id")
class ProviderPath(MappingSchema):
provider_id = SchemaNode(String(), description="The provider id")
class JobPath(MappingSchema):
job_id = SchemaNode(String(), description="The job id")
class BillPath(MappingSchema):
bill_id = SchemaNode(String(), description="The bill id")
class QuotePath(MappingSchema):
quote_id = SchemaNode(String(), description="The quote id")
class ResultPath(MappingSchema):
result_id = SchemaNode(String(), description="The result id")
#########################################################
# Generic schemas
#########################################################
class JsonHeader(MappingSchema):
content_type = SchemaNode(String(), example=CONTENT_TYPE_APP_JSON, default=CONTENT_TYPE_APP_JSON)
content_type.name = "Content-Type"
class HtmlHeader(MappingSchema):
content_type = SchemaNode(String(), example=CONTENT_TYPE_TEXT_HTML, default=CONTENT_TYPE_TEXT_HTML)
content_type.name = "Content-Type"
class XmlHeader(MappingSchema):
content_type = SchemaNode(String(), example=CONTENT_TYPE_APP_XML, default=CONTENT_TYPE_APP_XML)
content_type.name = "Content-Type"
class AcceptHeader(MappingSchema):
Accept = SchemaNode(String(), missing=drop, default=CONTENT_TYPE_APP_JSON, validator=OneOf([
CONTENT_TYPE_APP_JSON,
CONTENT_TYPE_APP_XML,
CONTENT_TYPE_TEXT_HTML,
]))
class AcceptLanguageHeader(AcceptHeader):
AcceptLanguage = SchemaNode(String(), missing=drop)
AcceptLanguage.name = "Accept-Language"
class KeywordList(SequenceSchema):
keyword = SchemaNode(String())
class JsonLink(MappingSchema):
href = SchemaNode(String(), format=URL, description="Reference URL.")
rel = SchemaNode(String(), description="Relationship of the contained link respective to the current element.")
type = SchemaNode(String(), missing=drop)
hreflang = SchemaNode(String(), missing=drop)
title = SchemaNode(String(), missing=drop)
class MetadataBase(MappingSchema):
title = SchemaNode(String(), missing=drop)
role = SchemaNode(String(), format=URL, missing=drop)
type = SchemaNode(String(), description="Type of metadata entry.")
class MetadataLink(MetadataBase, JsonLink):
pass
class MetadataValue(MetadataBase):
value = SchemaNode(String())
lang = SchemaNode(String())
class Metadata(OneOfMappingSchema):
_one_of = (MetadataLink, MetadataValue)
class MetadataList(SequenceSchema):
item = Metadata()
class JsonLinkList(SequenceSchema):
item = JsonLink()
class LandingPage(MappingSchema):
links = JsonLinkList()
class Format(MappingSchema):
mimeType = SchemaNode(String(), default=CONTENT_TYPE_TEXT_PLAIN)
schema = SchemaNode(String(), missing=drop)
encoding = SchemaNode(String(), missing=drop)
class FormatDescription(Format):
maximumMegabytes = SchemaNode(Integer(), missing=drop)
default = SchemaNode(Boolean(), missing=drop, default=False)
class FormatDescriptionList(SequenceSchema):
format = FormatDescription()
class AdditionalParameterValuesList(SequenceSchema):
values = SchemaNode(String())
class AdditionalParameter(MappingSchema):
name = SchemaNode(String())
values = AdditionalParameterValuesList()
class AdditionalParameterList(SequenceSchema):
item = AdditionalParameter()
class AdditionalParameters(MappingSchema):
role = SchemaNode(String(), missing=drop)
parameters = AdditionalParameterList(missing=drop)
class AdditionalParametersList(SequenceSchema):
additionalParameter = AdditionalParameters()
class Content(MappingSchema):
href = SchemaNode(String(), format=URL, description="URL to CWL file.", title="href",
example="http://some.host/applications/cwl/multisensor_ndvi.cwl")
class Offering(MappingSchema):
code = SchemaNode(String(), missing=drop, description="Descriptor of represented information in 'content'.")
content = Content(title="content", missing=drop)
class OWSContext(MappingSchema):
offering = Offering(title="offering")
class DescriptionType(MappingSchema):
id = SchemaNode(String())
title = SchemaNode(String(), missing=drop)
abstract = SchemaNode(String(), missing=drop)
keywords = KeywordList(missing=drop)
owsContext = OWSContext(missing=drop, title="owsContext")
metadata = MetadataList(missing=drop)
additionalParameters = AdditionalParametersList(missing=drop, title="additionalParameters")
links = JsonLinkList(missing=drop, title="links")
class MinMaxOccursInt(MappingSchema):
minOccurs = SchemaNode(Integer(), missing=drop)
maxOccurs = SchemaNode(Integer(), missing=drop)
class MinMaxOccursStr(MappingSchema):
minOccurs = SchemaNode(String(), missing=drop)
maxOccurs = SchemaNode(String(), missing=drop)
class WithMinMaxOccurs(OneOfMappingSchema):
_one_of = (MinMaxOccursStr, MinMaxOccursInt)
class ComplexInputType(DescriptionType, WithMinMaxOccurs):
formats = FormatDescriptionList()
class SupportedCrs(MappingSchema):
crs = SchemaNode(String(), format=URL)
default = SchemaNode(Boolean(), missing=drop)
class SupportedCrsList(SequenceSchema):
item = SupportedCrs()
class BoundingBoxInputType(DescriptionType, WithMinMaxOccurs):
supportedCRS = SupportedCrsList()
class DataTypeSchema(MappingSchema):
name = SchemaNode(String())
reference = SchemaNode(String(), format=URL, missing=drop)
class UomSchema(DataTypeSchema):
pass
class AllowedValuesList(SequenceSchema):
allowedValues = SchemaNode(String())
class AllowedValues(MappingSchema):
allowedValues = AllowedValuesList()
class AllowedRange(MappingSchema):
minimumValue = SchemaNode(String(), missing=drop)
maximumValue = SchemaNode(String(), missing=drop)
spacing = SchemaNode(String(), missing=drop)
rangeClosure = SchemaNode(String(), missing=drop,
validator=OneOf(["closed", "open", "open-closed", "closed-open"]))
class AllowedRangesList(SequenceSchema):
allowedRanges = AllowedRange()
class AllowedRanges(MappingSchema):
allowedRanges = AllowedRangesList()
class AnyValue(MappingSchema):
anyValue = SchemaNode(Boolean(), missing=drop, default=True)
class ValuesReference(MappingSchema):
valueReference = SchemaNode(String(), format=URL, )
class LiteralDataDomainType(OneOfMappingSchema):
_one_of = (AllowedValues,
AllowedRanges,
ValuesReference,
AnyValue) # must be last because it"s the most permissive
defaultValue = SchemaNode(String(), missing=drop)
dataType = DataTypeSchema(missing=drop)
uom = UomSchema(missing=drop)
class LiteralDataDomainTypeList(SequenceSchema):
literalDataDomain = LiteralDataDomainType()
class LiteralInputType(DescriptionType, WithMinMaxOccurs):
literalDataDomains = LiteralDataDomainTypeList(missing=drop)
class InputType(OneOfMappingSchema):
_one_of = (
BoundingBoxInputType,
ComplexInputType, # should be 2nd to last because very permission, but requires format at least
LiteralInputType, # must be last because it"s the most permissive (all can default if omitted)
)
class InputTypeList(SequenceSchema):
input = InputType()
class LiteralOutputType(MappingSchema):
literalDataDomains = LiteralDataDomainTypeList(missing=drop)
class BoundingBoxOutputType(MappingSchema):
supportedCRS = SupportedCrsList()
class ComplexOutputType(MappingSchema):
formats = FormatDescriptionList()
class OutputDataDescriptionType(DescriptionType):
pass
class OutputType(OneOfMappingSchema, OutputDataDescriptionType):
_one_of = (
BoundingBoxOutputType,
ComplexOutputType, # should be 2nd to last because very permission, but requires format at least
LiteralOutputType, # must be last because it"s the most permissive (all can default if omitted)
)
class OutputDescriptionList(SequenceSchema):
item = OutputType()
class JobExecuteModeEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(JobExecuteModeEnum, self).__init__(
self.schema_type(),
title=kwargs.get("title", "mode"),
default=kwargs.get("default", EXECUTE_MODE_AUTO),
example=kwargs.get("example", EXECUTE_MODE_ASYNC),
validator=OneOf(list(EXECUTE_MODE_OPTIONS)),
**kwargs)
class JobControlOptionsEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(JobControlOptionsEnum, self).__init__(
self.schema_type(),
title="jobControlOptions",
default=kwargs.get("default", EXECUTE_CONTROL_OPTION_ASYNC),
example=kwargs.get("example", EXECUTE_CONTROL_OPTION_ASYNC),
validator=OneOf(list(EXECUTE_CONTROL_OPTIONS)),
**kwargs)
class JobResponseOptionsEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(JobResponseOptionsEnum, self).__init__(
self.schema_type(),
title=kwargs.get("title", "response"),
default=kwargs.get("default", EXECUTE_RESPONSE_RAW),
example=kwargs.get("example", EXECUTE_RESPONSE_RAW),
validator=OneOf(list(EXECUTE_RESPONSE_OPTIONS)),
**kwargs)
class TransmissionModeEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(TransmissionModeEnum, self).__init__(
self.schema_type(),
title=kwargs.get("title", "transmissionMode"),
default=kwargs.get("default", EXECUTE_TRANSMISSION_MODE_REFERENCE),
example=kwargs.get("example", EXECUTE_TRANSMISSION_MODE_REFERENCE),
validator=OneOf(list(EXECUTE_TRANSMISSION_MODE_OPTIONS)),
**kwargs)
class JobStatusEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(JobStatusEnum, self).__init__(
self.schema_type(),
default=kwargs.get("default", None),
example=kwargs.get("example", STATUS_ACCEPTED),
validator=OneOf(list(JOB_STATUS_CATEGORIES[STATUS_COMPLIANT_OGC])),
**kwargs)
class JobSortEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(JobSortEnum, self).__init__(
String(),
default=kwargs.get("default", SORT_CREATED),
example=kwargs.get("example", SORT_CREATED),
validator=OneOf(list(JOB_SORT_VALUES)),
**kwargs)
class QuoteSortEnum(SchemaNode):
schema_type = String
def __init__(self, *args, **kwargs): # noqa: E811
kwargs.pop("validator", None) # ignore passed argument and enforce the validator
super(QuoteSortEnum, self).__init__(
self.schema_type(),
default=kwargs.get("default", SORT_ID),
example=kwargs.get("example", SORT_PROCESS),
validator=OneOf(list(QUOTE_SORT_VALUES)),
**kwargs)
class LaunchJobQuerystring(MappingSchema):
tags = SchemaNode(String(), default=None, missing=drop,
description="Comma separated tags that can be used to filter jobs later")
class VisibilityValue(SchemaNode):
schema_type = String
validator = OneOf(list(VISIBILITY_VALUES))
example = VISIBILITY_PUBLIC
class Visibility(MappingSchema):
value = VisibilityValue()
#########################################################
# These classes define each of the endpoints parameters
#########################################################
class FrontpageEndpoint(MappingSchema):
header = AcceptHeader()
class VersionsEndpoint(MappingSchema):
header = AcceptHeader()
class ConformanceEndpoint(MappingSchema):
header = AcceptHeader()
class SwaggerJSONEndpoint(MappingSchema):
header = AcceptHeader()
class SwaggerUIEndpoint(MappingSchema):
pass
class WPSParameters(MappingSchema):
service = SchemaNode(String(), example="WPS", description="Service selection.",
validator=OneOfCaseInsensitive(["WPS"]))
request = SchemaNode(String(), example="GetCapabilities", description="WPS operation to accomplish",
validator=OneOfCaseInsensitive(["GetCapabilities", "DescribeProcess", "Execute"]))
version = SchemaNode(String(), exaple="1.0.0", default="1.0.0", validator=OneOf(["1.0.0", "2.0.0"]))
identifier = SchemaNode(String(), exaple="hello", description="Process identifier.", missing=drop)
data_inputs = SchemaNode(String(), name="DataInputs", missing=drop, example="message=hi",
description="Process execution inputs provided as Key-Value Pairs (KVP).")
class WPSBody(MappingSchema):
content = SchemaNode(String(), description="XML data inputs provided for WPS POST request.")
class WPSEndpoint(MappingSchema):
header = AcceptHeader()
querystring = WPSParameters()
body = WPSBody()
class WPSXMLSuccessBodySchema(MappingSchema):
pass
class OkWPSResponse(MappingSchema):
description = "WPS operation successful"
header = XmlHeader()
body = WPSXMLSuccessBodySchema()
class WPSXMLErrorBodySchema(MappingSchema):
pass
class ErrorWPSResponse(MappingSchema):
description = "Unhandled error occurred on WPS endpoint."
header = XmlHeader()
body = WPSXMLErrorBodySchema()
class ProviderEndpoint(ProviderPath):
header = AcceptHeader()
class ProviderProcessEndpoint(ProviderPath, ProcessPath):
header = AcceptHeader()
class ProcessEndpoint(ProcessPath):
header = AcceptHeader()
class ProcessPackageEndpoint(ProcessPath):
header = AcceptHeader()
class ProcessPayloadEndpoint(ProcessPath):
header = AcceptHeader()
class ProcessVisibilityGetEndpoint(ProcessPath):
header = AcceptHeader()
class ProcessVisibilityPutEndpoint(ProcessPath):
header = AcceptHeader()
body = Visibility()
class FullJobEndpoint(ProviderPath, ProcessPath, JobPath):
header = AcceptHeader()
class ShortJobEndpoint(JobPath):
header = AcceptHeader()
class ProcessResultsEndpoint(ProcessPath, JobPath):
header = AcceptHeader()
class FullResultsEndpoint(ProviderPath, ProcessPath, JobPath):
header = AcceptHeader()
class ShortResultsEndpoint(ProviderPath, ProcessPath, JobPath):
header = AcceptHeader()
class FullExceptionsEndpoint(ProviderPath, ProcessPath, JobPath):
header = AcceptHeader()
class ShortExceptionsEndpoint(JobPath):
header = AcceptHeader()
class ProcessExceptionsEndpoint(ProcessPath, JobPath):
header = AcceptHeader()
class FullLogsEndpoint(ProviderPath, ProcessPath, JobPath):
header = AcceptHeader()
class ShortLogsEndpoint(JobPath):
header = AcceptHeader()
class ProcessLogsEndpoint(ProcessPath, JobPath):
header = AcceptHeader()
##################################################################
# These classes define schemas for requests that feature a body
##################################################################
class CreateProviderRequestBody(MappingSchema):
id = SchemaNode(String())
url = SchemaNode(String())
public = SchemaNode(Boolean())
class InputDataType(MappingSchema):
id = SchemaNode(String())
class OutputDataType(MappingSchema):
id = SchemaNode(String())
format = Format(missing=drop)
class Output(OutputDataType):
transmissionMode = TransmissionModeEnum(missing=drop)
class OutputList(SequenceSchema):
output = Output()
class ProviderSummarySchema(MappingSchema):
"""WPS provider summary definition."""
id = SchemaNode(String())
url = SchemaNode(String())
title = SchemaNode(String())
abstract = SchemaNode(String())
public = SchemaNode(Boolean())
class ProviderCapabilitiesSchema(MappingSchema):
"""WPS provider capabilities."""
id = SchemaNode(String())
url = SchemaNode(String())
title = SchemaNode(String())
abstract = SchemaNode(String())
contact = SchemaNode(String())
type = SchemaNode(String())
class TransmissionModeList(SequenceSchema):
item = TransmissionModeEnum(missing=drop)
class JobControlOptionsList(SequenceSchema):
item = JobControlOptionsEnum(missing=drop)
class ExceptionReportType(MappingSchema):
code = SchemaNode(String())
description = SchemaNode(String(), missing=drop)
class ProcessSummary(DescriptionType):
"""WPS process definition."""
version = SchemaNode(String(), missing=drop)
jobControlOptions = JobControlOptionsList(missing=drop)
outputTransmission = TransmissionModeList(missing=drop)
processDescriptionURL = SchemaNode(String(), format=URL, missing=drop)
class ProcessSummaryList(SequenceSchema):
item = ProcessSummary()
class ProcessCollection(MappingSchema):
processes = ProcessSummaryList()
class Process(DescriptionType):
inputs = InputTypeList(missing=drop)
outputs = OutputDescriptionList(missing=drop)
visibility = VisibilityValue(missing=drop)
executeEndpoint = SchemaNode(String(), format=URL, missing=drop)
class ProcessOutputDescriptionSchema(MappingSchema):
"""WPS process output definition."""
dataType = SchemaNode(String())
defaultValue = MappingSchema()
id = SchemaNode(String())
abstract = SchemaNode(String())
title = SchemaNode(String())
class JobStatusInfo(MappingSchema):
jobID = SchemaNode(String(), example="a9d14bf4-84e0-449a-bac8-16e598efe807", description="ID of the job.")
status = JobStatusEnum()
message = SchemaNode(String(), missing=drop)
# fixme: use href links (https://github.com/crim-ca/weaver/issues/58) [logs/result/exceptions]
logs = SchemaNode(String(), missing=drop)
result = SchemaNode(String(), missing=drop)
exceptions = SchemaNode(String(), missing=drop)
expirationDate = SchemaNode(DateTime(), missing=drop)
estimatedCompletion = SchemaNode(DateTime(), missing=drop)
duration = SchemaNode(String(), missing=drop, description="Duration of the process execution.")
nextPoll = SchemaNode(DateTime(), missing=drop)
percentCompleted = SchemaNode(Integer(), example=0, validator=Range(min=0, max=100))
class JobEntrySchema(OneOfMappingSchema):
_one_of = (
JobStatusInfo,
SchemaNode(String(), description="Job ID."),
)
# note:
# Since JobId is a simple string (not a dict), no additional mapping field can be added here.
# They will be discarded by `OneOfMappingSchema.deserialize()`.
class JobCollection(SequenceSchema):
item = JobEntrySchema()
class CreatedJobStatusSchema(MappingSchema):
status = SchemaNode(String(), example=STATUS_ACCEPTED)
location = SchemaNode(String(), example="http://{host}/weaver/processes/{my-process-id}/jobs/{my-job-id}")
jobID = SchemaNode(String(), example="a9d14bf4-84e0-449a-bac8-16e598efe807", description="ID of the created job.")
class CreatedQuotedJobStatusSchema(CreatedJobStatusSchema):
bill = SchemaNode(String(), example="d88fda5c-52cc-440b-9309-f2cd20bcd6a2", description="ID of the created bill.")
class GetPagingJobsSchema(MappingSchema):
jobs = JobCollection()
limit = SchemaNode(Integer())
page = SchemaNode(Integer())
class GroupedJobsCategorySchema(MappingSchema):
category = VariableMappingSchema(description="Grouping values that compose the corresponding job list category.")
jobs = JobCollection(description="List of jobs that matched the corresponding grouping values.")
count = SchemaNode(Integer(), description="Number of matching jobs for the corresponding group category.")
class GroupedCategoryJobsSchema(SequenceSchema):
job_group_category_item = GroupedJobsCategorySchema()
class GetGroupedJobsSchema(MappingSchema):
groups = GroupedCategoryJobsSchema()
class GetQueriedJobsSchema(OneOfMappingSchema):
_one_of = (
GetPagingJobsSchema,
GetGroupedJobsSchema,
)
total = SchemaNode(Integer(), description="Total number of matched jobs regardless of grouping or paging result.")
class DismissedJobSchema(MappingSchema):
status = JobStatusEnum()
jobID = SchemaNode(String(), example="a9d14bf4-84e0-449a-bac8-16e598efe807", description="ID of the job.")
message = SchemaNode(String(), example="Job dismissed.")
percentCompleted = SchemaNode(Integer(), example=0)
class QuoteProcessParametersSchema(MappingSchema):
inputs = InputTypeList(missing=drop)
outputs = OutputDescriptionList(missing=drop)
mode = JobExecuteModeEnum(missing=drop)
response = JobResponseOptionsEnum(missing=drop)
class AlternateQuotation(MappingSchema):
id = SchemaNode(String(), description="Quote ID.")
title = SchemaNode(String(), description="Name of the quotation.", missing=drop)
description = SchemaNode(String(), description="Description of the quotation.", missing=drop)
price = SchemaNode(Float(), description="Process execution price.")
currency = SchemaNode(String(), description="Currency code in ISO-4217 format.")
expire = SchemaNode(DateTime(), description="Expiration date and time of the quote in ISO-8601 format.")
created = SchemaNode(DateTime(), description="Creation date and time of the quote in ISO-8601 format.")
details = SchemaNode(String(), description="Details of the quotation.", missing=drop)
estimatedTime = SchemaNode(String(), description="Estimated duration of the process execution.", missing=drop)
class AlternateQuotationList(SequenceSchema):
step = AlternateQuotation(description="Quote of a workflow step process.")
class Reference(MappingSchema):
href = SchemaNode(String())
mimeType = SchemaNode(String(), missing=drop)
schema = SchemaNode(String(), missing=drop)
encoding = SchemaNode(String(), missing=drop)
body = SchemaNode(String(), missing=drop)
bodyReference = SchemaNode(String(), missing=drop, format=URL)
class DataEncodingAttributes(MappingSchema):
mimeType = SchemaNode(String(), missing=drop)
schema = SchemaNode(String(), missing=drop)
encoding = SchemaNode(String(), missing=drop)
class DataFloat(DataEncodingAttributes):
data = SchemaNode(Float())
class DataInteger(DataEncodingAttributes):
data = SchemaNode(Integer())
class DataString(DataEncodingAttributes):
data = SchemaNode(String())
class DataBoolean(DataEncodingAttributes):
data = SchemaNode(Boolean())
class ValueType(OneOfMappingSchema):
_one_of = (DataFloat,
DataInteger,
DataString,
DataBoolean,
Reference)
class Input(InputDataType, ValueType):
pass
class InputList(SequenceSchema):
item = Input(missing=drop)
class Execute(MappingSchema):
inputs = InputList(missing=drop)
outputs = OutputList()
mode = SchemaNode(String(), validator=OneOf(list(EXECUTE_MODE_OPTIONS)))
notification_email = SchemaNode(
String(),
missing=drop,
description="Optionally send a notification email when the job is done.")
response = SchemaNode(String(), validator=OneOf(list(EXECUTE_RESPONSE_OPTIONS)))
class Quotation(MappingSchema):
id = SchemaNode(String(), description="Quote ID.")
title = SchemaNode(String(), description="Name of the quotation.", missing=drop)
description = SchemaNode(String(), description="Description of the quotation.", missing=drop)
processId = SchemaNode(String(), description="Corresponding process ID.")
price = SchemaNode(Float(), description="Process execution price.")
currency = SchemaNode(String(), description="Currency code in ISO-4217 format.")
expire = SchemaNode(DateTime(), description="Expiration date and time of the quote in ISO-8601 format.")
created = SchemaNode(DateTime(), description="Creation date and time of the quote in ISO-8601 format.")
userId = SchemaNode(String(), description="User id that requested the quote.")
details = SchemaNode(String(), description="Details of the quotation.", missing=drop)
estimatedTime = SchemaNode(String(), description="Estimated duration of the process execution.", missing=drop)
processParameters = Execute()
alternativeQuotations = AlternateQuotationList(missing=drop)
class QuoteProcessListSchema(SequenceSchema):
step = Quotation(description="Quote of a workflow step process.")
class QuoteSchema(MappingSchema):
id = SchemaNode(String(), description="Quote ID.")
process = SchemaNode(String(), description="Corresponding process ID.")
steps = QuoteProcessListSchema(description="Child processes and prices.")
total = SchemaNode(Float(), description="Total of the quote including step processes.")
class QuotationList(SequenceSchema):
item = SchemaNode(String(), description="Bill ID.")
class QuotationListSchema(MappingSchema):
quotations = QuotationList()
class BillSchema(MappingSchema):
id = SchemaNode(String(), description="Bill ID.")
title = SchemaNode(String(), description="Name of the bill.")
description = SchemaNode(String(), missing=drop)
price = SchemaNode(Float(), description="Price associated to the bill.")
currency = SchemaNode(String(), description="Currency code in ISO-4217 format.")
created = SchemaNode(DateTime(), description="Creation date and time of the bill in ISO-8601 format.")
userId = SchemaNode(String(), description="User id that requested the quote.")
quotationId = SchemaNode(String(), description="Corresponding quote ID.", missing=drop)
class BillList(SequenceSchema):
item = SchemaNode(String(), description="Bill ID.")
class BillListSchema(MappingSchema):
bills = BillList()
class SupportedValues(MappingSchema):
pass
class DefaultValues(MappingSchema):
pass
class Unit(MappingSchema):
pass
class UnitType(MappingSchema):
unit = Unit()
class ProcessInputDescriptionSchema(MappingSchema):
minOccurs = SchemaNode(Integer())
maxOccurs = SchemaNode(Integer())
title = SchemaNode(String())
dataType = SchemaNode(String())
abstract = SchemaNode(String())
id = SchemaNode(String())
defaultValue = SequenceSchema(DefaultValues())
supportedValues = SequenceSchema(SupportedValues())
class ProcessDescriptionSchema(MappingSchema):
outputs = SequenceSchema(ProcessOutputDescriptionSchema())
inputs = SequenceSchema(ProcessInputDescriptionSchema())
description = SchemaNode(String())
id = SchemaNode(String())
label = SchemaNode(String())
class UndeploymentResult(MappingSchema):
id = SchemaNode(String())
class DeploymentResult(MappingSchema):
processSummary = ProcessSummary()
class ProcessDescriptionBodySchema(MappingSchema):
process = ProcessDescriptionSchema()
class ProvidersSchema(SequenceSchema):
providers_service = ProviderSummarySchema()
class JobOutputSchema(MappingSchema):
id = SchemaNode(String(), description="Job output id corresponding to process description outputs.")
data = SchemaNode(String(), missing=drop)
href = SchemaNode(String(), format=URL, missing=drop)
mimeType = SchemaNode(String(), missing=drop)
schema = SchemaNode(String(), missing=drop)
encoding = SchemaNode(String(), missing=drop)
class JobOutputsSchema(SequenceSchema):
output = JobOutputSchema()
class OutputInfo(OutputDataType, OneOfMappingSchema):
_one_of = (DataFloat,
DataInteger,
DataString,
DataBoolean,
Reference)
class OutputInfoList(SequenceSchema):
output = OutputInfo()
class ExceptionTextList(SequenceSchema):
text = SchemaNode(String())
class ExceptionSchema(MappingSchema):
Code = SchemaNode(String())
Locator = SchemaNode(String())
Text = ExceptionTextList()
class ExceptionsOutputSchema(SequenceSchema):
exceptions = ExceptionSchema()
class LogsOutputSchema(MappingSchema):
pass
class FrontpageParameterSchema(MappingSchema):
name = SchemaNode(String(), example="api")
enabled = SchemaNode(Boolean(), example=True)
url = SchemaNode(String(), example="https://weaver-host", missing=drop)
doc = SchemaNode(String(), example="https://weaver-host/api", missing=drop)
class FrontpageParameters(SequenceSchema):
param = FrontpageParameterSchema()
class FrontpageSchema(MappingSchema):
message = SchemaNode(String(), default="Weaver Information", example="Weaver Information")
configuration = SchemaNode(String(), default="default", example="default")
parameters = FrontpageParameters()
class SwaggerJSONSpecSchema(MappingSchema):
pass
class SwaggerUISpecSchema(MappingSchema):
pass
class VersionsSpecSchema(MappingSchema):
name = SchemaNode(String(), description="Identification name of the current item.", example="weaver")
type = SchemaNode(String(), description="Identification type of the current item.", example="api")
version = SchemaNode(String(), description="Version of the current item.", example="0.1.0")
class VersionsList(SequenceSchema):
item = VersionsSpecSchema()
class VersionsSchema(MappingSchema):
versions = VersionsList()
class ConformanceList(SequenceSchema):
item = SchemaNode(String(), description="Conformance specification link.",
example="http://www.opengis.net/spec/wfs-1/3.0/req/core")
class ConformanceSchema(MappingSchema):
conformsTo = ConformanceList()
#################################
# Local Processes schemas
#################################
class PackageBody(MappingSchema):
pass
class ExecutionUnit(MappingSchema):
_one_of = (Reference,
UnitType)
class ExecutionUnitList(SequenceSchema):
item = ExecutionUnit()
class ProcessOffering(MappingSchema):
processVersion = SchemaNode(String(), missing=drop)
process = Process()
processEndpointWPS1 = SchemaNode(String(), missing=drop, format=URL)
jobControlOptions = JobControlOptionsList(missing=drop)
outputTransmission = TransmissionModeList(missing=drop)
class ProcessDescriptionChoiceType(OneOfMappingSchema):
_one_of = (Reference,
ProcessOffering)
class Deploy(MappingSchema):
processDescription = ProcessDescriptionChoiceType()
immediateDeployment = SchemaNode(Boolean(), missing=drop, default=True)
executionUnit = ExecutionUnitList()
deploymentProfileName = SchemaNode(String(), missing=drop)
owsContext = OWSContext(missing=drop)
class PostProcessesEndpoint(MappingSchema):
header = AcceptHeader()
body = Deploy(title="Deploy")
class PostProcessJobsEndpoint(ProcessPath):
header = AcceptLanguageHeader()
body = Execute()
class GetJobsQueries(MappingSchema):
detail = SchemaNode(Boolean(), description="Provide job details instead of IDs.",
default=False, example=True, missing=drop)
groups = SchemaNode(String(), description="Comma-separated list of grouping fields with which to list jobs.",
default=False, example="process,service", missing=drop)
page = SchemaNode(Integer(), missing=drop, default=0)
limit = SchemaNode(Integer(), missing=drop, default=10)
status = JobStatusEnum(missing=drop)
process = SchemaNode(String(), missing=drop, default=None)
provider = SchemaNode(String(), missing=drop, default=None)
sort = JobSortEnum(missing=drop)
tags = SchemaNode(String(), missing=drop, default=None,
description="Comma-separated values of tags assigned to jobs")
class GetJobsRequest(MappingSchema):
header = AcceptHeader()
querystring = GetJobsQueries()
class GetJobsEndpoint(GetJobsRequest):
pass
class GetProcessJobsEndpoint(GetJobsRequest, ProcessPath):
pass
class GetProviderJobsEndpoint(GetJobsRequest, ProviderPath, ProcessPath):
pass
class GetProcessJobEndpoint(ProcessPath):
header = AcceptHeader()
class DeleteProcessJobEndpoint(ProcessPath):
header = AcceptHeader()
class BillsEndpoint(MappingSchema):
header = AcceptHeader()
class BillEndpoint(BillPath):
header = AcceptHeader()
class ProcessQuotesEndpoint(ProcessPath):
header = AcceptHeader()
class ProcessQuoteEndpoint(ProcessPath, QuotePath):
header = AcceptHeader()
class GetQuotesQueries(MappingSchema):
page = SchemaNode(Integer(), missing=drop, default=0)
limit = SchemaNode(Integer(), missing=drop, default=10)
process = SchemaNode(String(), missing=drop, default=None)
sort = QuoteSortEnum(missing=drop)
class QuotesEndpoint(MappingSchema):
header = AcceptHeader()
querystring = GetQuotesQueries()
class QuoteEndpoint(QuotePath):
header = AcceptHeader()
class PostProcessQuote(ProcessPath, QuotePath):
header = AcceptHeader()
body = MappingSchema(default={})
class PostQuote(QuotePath):
header = AcceptHeader()
body = MappingSchema(default={})
class PostProcessQuoteRequestEndpoint(ProcessPath, QuotePath):
header = AcceptHeader()
body = QuoteProcessParametersSchema()
#################################
# Provider Processes schemas
#################################
class GetProviders(MappingSchema):
header = AcceptHeader()
class PostProvider(MappingSchema):
header = AcceptHeader()
body = CreateProviderRequestBody()
class GetProviderProcesses(MappingSchema):
header = AcceptHeader()
class GetProviderProcess(MappingSchema):
header = AcceptHeader()
class PostProviderProcessJobRequest(MappingSchema):
"""Launching a new process request definition."""
header = AcceptHeader()
querystring = LaunchJobQuerystring()
body = Execute()
#################################
# Responses schemas
#################################
class OWSExceptionResponse(MappingSchema):
code = SchemaNode(String(), description="OWS error code.", example="InvalidParameterValue")
locator = SchemaNode(String(), description="Indication of the element that caused the error.", example="identifier")
message = SchemaNode(String(), description="Specific description of the error.", example="Invalid process ID.")
class ErrorJsonResponseBodySchema(MappingSchema):
code = SchemaNode(Integer(), description="HTTP status code.", example=400)
status = SchemaNode(String(), description="HTTP status detail.", example="400 Bad Request")
title = SchemaNode(String(), description="HTTP status message.", example="Bad Request")
description = SchemaNode(String(), description="", example="Process identifier is invalid.")
exception = OWSExceptionResponse(missing=drop)
class UnauthorizedJsonResponseSchema(MappingSchema):
header = JsonHeader()
body = ErrorJsonResponseBodySchema()
class ForbiddenJsonResponseSchema(MappingSchema):
header = JsonHeader()
body = ErrorJsonResponseBodySchema()
class OkGetFrontpageResponse(MappingSchema):
header = JsonHeader()
body = FrontpageSchema()
class OkGetSwaggerJSONResponse(MappingSchema):
header = JsonHeader()
body = SwaggerJSONSpecSchema(description="Swagger JSON of weaver API.")
class OkGetSwaggerUIResponse(MappingSchema):
header = HtmlHeader()
body = SwaggerUISpecSchema(description="Swagger UI of weaver API.")
class OkGetVersionsResponse(MappingSchema):
header = JsonHeader()
body = VersionsSchema()
class OkGetConformanceResponse(MappingSchema):
header = JsonHeader()
body = ConformanceSchema()
class OkGetProvidersListResponse(MappingSchema):
header = JsonHeader()
body = ProvidersSchema()
class InternalServerErrorGetProvidersListResponse(MappingSchema):
description = "Unhandled error occurred during providers listing."
class OkGetProviderCapabilitiesSchema(MappingSchema):
header = JsonHeader()
body = ProviderCapabilitiesSchema()
class InternalServerErrorGetProviderCapabilitiesResponse(MappingSchema):
description = "Unhandled error occurred during provider capabilities request."
class NoContentDeleteProviderSchema(MappingSchema):
header = JsonHeader()
body = MappingSchema(default={})
class InternalServerErrorDeleteProviderResponse(MappingSchema):
description = "Unhandled error occurred during provider removal."
class NotImplementedDeleteProviderResponse(MappingSchema):
description = "Provider removal not supported using referenced storage."
class OkGetProviderProcessesSchema(MappingSchema):
header = JsonHeader()
body = ProcessCollection()
class InternalServerErrorGetProviderProcessesListResponse(MappingSchema):
description = "Unhandled error occurred during provider processes listing."
class GetProcessesQuery(MappingSchema):
providers = SchemaNode(
Boolean(), example=True, default=False, missing=drop,
description="List local processes as well as all sub-processes of all registered providers. "
"Applicable only for Weaver in {} mode, false otherwise.".format(WEAVER_CONFIGURATION_EMS))
detail = SchemaNode(
Boolean(), example=True, default=True, missing=drop,
description="Return summary details about each process, or simply their IDs."
)
class GetProcessesEndpoint(MappingSchema):
querystring = GetProcessesQuery()
class OkGetProcessesListResponse(MappingSchema):
header = JsonHeader()
body = ProcessCollection()
class InternalServerErrorGetProcessesListResponse(MappingSchema):
description = "Unhandled error occurred during processes listing."
class OkPostProcessDeployBodySchema(MappingSchema):
deploymentDone = SchemaNode(Boolean(), description="Indicates if the process was successfully deployed.",
default=False, example=True)
processSummary = ProcessSummary(missing=drop, description="Deployed process summary if successful.")
failureReason = SchemaNode(String(), missing=drop, description="Description of deploy failure if applicable.")
class OkPostProcessesResponse(MappingSchema):
header = JsonHeader()
body = OkPostProcessDeployBodySchema()
class InternalServerErrorPostProcessesResponse(MappingSchema):
description = "Unhandled error occurred during process deployment."
class OkGetProcessInfoResponse(MappingSchema):
header = JsonHeader()
body = ProcessOffering()
class BadRequestGetProcessInfoResponse(MappingSchema):
description = "Missing process identifier."
body = MappingSchema(default={})
class InternalServerErrorGetProcessResponse(MappingSchema):
description = "Unhandled error occurred during process description."
class OkGetProcessPackageSchema(MappingSchema):
header = JsonHeader()
body = MappingSchema(default={})
class InternalServerErrorGetProcessPackageResponse(MappingSchema):
description = "Unhandled error occurred during process package description."
class OkGetProcessPayloadSchema(MappingSchema):
header = JsonHeader()
body = MappingSchema(default={})
class InternalServerErrorGetProcessPayloadResponse(MappingSchema):
description = "Unhandled error occurred during process payload description."
class ProcessVisibilityResponseBodySchema(MappingSchema):
value = VisibilityValue()
class OkGetProcessVisibilitySchema(MappingSchema):
header = JsonHeader()
body = ProcessVisibilityResponseBodySchema()
class InternalServerErrorGetProcessVisibilityResponse(MappingSchema):
description = "Unhandled error occurred during process visibility retrieval."
class OkPutProcessVisibilitySchema(MappingSchema):
header = JsonHeader()
body = ProcessVisibilityResponseBodySchema()
class InternalServerErrorPutProcessVisibilityResponse(MappingSchema):
description = "Unhandled error occurred during process visibility update."
class OkDeleteProcessUndeployBodySchema(MappingSchema):
deploymentDone = SchemaNode(Boolean(), description="Indicates if the process was successfully undeployed.",
default=False, example=True)
identifier = SchemaNode(String(), example="workflow")
failureReason = SchemaNode(String(), missing=drop, description="Description of undeploy failure if applicable.")
class OkDeleteProcessResponse(MappingSchema):
header = JsonHeader()
body = OkDeleteProcessUndeployBodySchema()
class InternalServerErrorDeleteProcessResponse(MappingSchema):
description = "Unhandled error occurred during process deletion."
class OkGetProviderProcessDescriptionResponse(MappingSchema):
header = JsonHeader()
body = ProcessDescriptionBodySchema()
class InternalServerErrorGetProviderProcessResponse(MappingSchema):
description = "Unhandled error occurred during provider process description."
class CreatedPostProvider(MappingSchema):
header = JsonHeader()
body = ProviderSummarySchema()
class InternalServerErrorPostProviderResponse(MappingSchema):
description = "Unhandled error occurred during provider process registration."
class NotImplementedPostProviderResponse(MappingSchema):
description = "Provider registration not supported using referenced storage."
class CreatedLaunchJobResponse(MappingSchema):
header = JsonHeader()
body = CreatedJobStatusSchema()
class InternalServerErrorPostProcessJobResponse(MappingSchema):
description = "Unhandled error occurred during process job submission."
class InternalServerErrorPostProviderProcessJobResponse(MappingSchema):
description = "Unhandled error occurred during process job submission."
class OkGetProcessJobResponse(MappingSchema):
header = JsonHeader()
body = JobStatusInfo()
class OkDeleteProcessJobResponse(MappingSchema):
header = JsonHeader()
body = DismissedJobSchema()
class OkGetQueriedJobsResponse(MappingSchema):
header = JsonHeader()
body = GetQueriedJobsSchema()
class InternalServerErrorGetJobsResponse(MappingSchema):
description = "Unhandled error occurred during jobs listing."
class OkDismissJobResponse(MappingSchema):
header = JsonHeader()
body = DismissedJobSchema()
class InternalServerErrorDeleteJobResponse(MappingSchema):
description = "Unhandled error occurred during job dismiss request."
class OkGetJobStatusResponse(MappingSchema):
header = JsonHeader()
body = JobStatusInfo()
class InternalServerErrorGetJobStatusResponse(MappingSchema):
description = "Unhandled error occurred during provider process description."
class Result(MappingSchema):
outputs = OutputInfoList()
links = JsonLinkList(missing=drop)
class OkGetJobResultsResponse(MappingSchema):
header = JsonHeader()
body = Result()
class InternalServerErrorGetJobResultsResponse(MappingSchema):
description = "Unhandled error occurred during job results listing."
class OkGetOutputResponse(MappingSchema):
header = JsonHeader()
body = JobOutputSchema()
class InternalServerErrorGetJobOutputResponse(MappingSchema):
description = "Unhandled error occurred during job results listing."
class CreatedQuoteExecuteResponse(MappingSchema):
header = JsonHeader()
body = CreatedQuotedJobStatusSchema()
class InternalServerErrorPostQuoteExecuteResponse(MappingSchema):
description = "Unhandled error occurred during quote job execution."
class CreatedQuoteRequestResponse(MappingSchema):
header = JsonHeader()
body = QuoteSchema()
class InternalServerErrorPostQuoteRequestResponse(MappingSchema):
description = "Unhandled error occurred during quote submission."
class OkGetQuoteInfoResponse(MappingSchema):
header = JsonHeader()
body = QuoteSchema()
class InternalServerErrorGetQuoteInfoResponse(MappingSchema):
description = "Unhandled error occurred during quote retrieval."
class OkGetQuoteListResponse(MappingSchema):
header = JsonHeader()
body = QuotationListSchema()
class InternalServerErrorGetQuoteListResponse(MappingSchema):
description = "Unhandled error occurred during quote listing."
class OkGetBillDetailResponse(MappingSchema):
header = JsonHeader()
body = BillSchema()
class InternalServerErrorGetBillInfoResponse(MappingSchema):
description = "Unhandled error occurred during bill retrieval."
class OkGetBillListResponse(MappingSchema):
header = JsonHeader()
body = BillListSchema()
class InternalServerErrorGetBillListResponse(MappingSchema):
description = "Unhandled error occurred during bill listing."
class OkGetJobExceptionsResponse(MappingSchema):
header = JsonHeader()
body = ExceptionsOutputSchema()
class InternalServerErrorGetJobExceptionsResponse(MappingSchema):
description = "Unhandled error occurred during job exceptions listing."
class OkGetJobLogsResponse(MappingSchema):
header = JsonHeader()
body = LogsOutputSchema()
class InternalServerErrorGetJobLogsResponse(MappingSchema):
description = "Unhandled error occurred during job logs listing."
get_api_frontpage_responses = {
"200": OkGetFrontpageResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
}
get_api_swagger_json_responses = {
"200": OkGetSwaggerJSONResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
}
get_api_swagger_ui_responses = {
"200": OkGetSwaggerUIResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
}
get_api_versions_responses = {
"200": OkGetVersionsResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
}
get_api_conformance_responses = {
"200": OkGetConformanceResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized")
}
get_processes_responses = {
"200": OkGetProcessesListResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorGetProcessesListResponse(),
}
post_processes_responses = {
# FIXME:
# status should be 201 when properly modified to match API conformance
# https://github.com/crim-ca/weaver/issues/14
"200": OkPostProcessesResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorPostProcessesResponse(),
}
get_process_responses = {
"200": OkGetProcessInfoResponse(description="success"),
"400": BadRequestGetProcessInfoResponse(),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProcessResponse(),
}
get_process_package_responses = {
"200": OkGetProcessPackageSchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProcessPackageResponse(),
}
get_process_payload_responses = {
"200": OkGetProcessPayloadSchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProcessPayloadResponse(),
}
get_process_visibility_responses = {
"200": OkGetProcessVisibilitySchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProcessVisibilityResponse(),
}
put_process_visibility_responses = {
"200": OkPutProcessVisibilitySchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorPutProcessVisibilityResponse(),
}
delete_process_responses = {
"200": OkDeleteProcessResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorDeleteProcessResponse(),
}
get_providers_list_responses = {
"200": OkGetProvidersListResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProvidersListResponse(),
}
get_provider_responses = {
"200": OkGetProviderCapabilitiesSchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProviderCapabilitiesResponse(),
}
delete_provider_responses = {
"204": NoContentDeleteProviderSchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorDeleteProviderResponse(),
"501": NotImplementedDeleteProviderResponse(),
}
get_provider_processes_responses = {
"200": OkGetProviderProcessesSchema(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProviderProcessesListResponse(),
}
get_provider_process_responses = {
"200": OkGetProviderProcessDescriptionResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetProviderProcessResponse(),
}
post_provider_responses = {
"201": CreatedPostProvider(description="success"),
"400": MappingSchema(description=OWSMissingParameterValue.explanation),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorPostProviderResponse(),
"501": NotImplementedPostProviderResponse(),
}
post_provider_process_job_responses = {
"201": CreatedLaunchJobResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorPostProviderProcessJobResponse(),
}
post_process_jobs_responses = {
"201": CreatedLaunchJobResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorPostProcessJobResponse(),
}
get_all_jobs_responses = {
"200": OkGetQueriedJobsResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobsResponse(),
}
get_single_job_status_responses = {
"200": OkGetJobStatusResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobStatusResponse(),
}
delete_job_responses = {
"200": OkDismissJobResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorDeleteJobResponse(),
}
get_job_results_responses = {
"200": OkGetJobResultsResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobResultsResponse(),
}
get_job_output_responses = {
"200": OkGetOutputResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobOutputResponse(),
}
get_exceptions_responses = {
"200": OkGetJobExceptionsResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobExceptionsResponse(),
}
get_logs_responses = {
"200": OkGetJobLogsResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"403": UnauthorizedJsonResponseSchema(description="forbidden"),
"500": InternalServerErrorGetJobLogsResponse(),
}
get_quote_list_responses = {
"200": OkGetQuoteListResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorGetQuoteListResponse(),
}
get_quote_responses = {
"200": OkGetQuoteInfoResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorGetQuoteInfoResponse(),
}
post_quotes_responses = {
"201": CreatedQuoteRequestResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorPostQuoteRequestResponse(),
}
post_quote_responses = {
"201": CreatedQuoteExecuteResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorPostQuoteExecuteResponse(),
}
get_bill_list_responses = {
"200": OkGetBillListResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorGetBillListResponse(),
}
get_bill_responses = {
"200": OkGetBillDetailResponse(description="success"),
"401": UnauthorizedJsonResponseSchema(description="unauthorized"),
"500": InternalServerErrorGetBillInfoResponse(),
}
wps_responses = {
"200": OkWPSResponse(),
"500": ErrorWPSResponse(),
}
#################################################################
# Utility methods
#################################################################
def service_api_route_info(service_api, settings):
# type: (Service, SettingsType) -> ViewInfo
api_base = wps_restapi_base_path(settings)
return {"name": service_api.name, "pattern": "{base}{path}".format(base=api_base, path=service_api.path)}
|
from planner_project.common import custom_error
from planner_project.data_access import mysql
from planner_project.sql.backweb import planner_sql,user_sql,upgrade_user_sql
#获取规划师列表
def select_planner_list(name,page=1,size=10):
if page<=0:
page=1
if size<=0:
size=10
sear="%"+ name +"%"
listCount = mysql.get_object(planner_sql.select_planner_list_count,(name,name,sear))
data = mysql.get_list(planner_sql.select_planner_list,(name,name,sear,(page-1)*size,size))
return data,listCount
#获取规划师详情
def select_planner_info(userId):
return mysql.get_object(planner_sql.select_planner_info, (userId))
#修改用户信息
def update_planner(account,phone,userType,name,realName,sex,age,education,address,email,headImage,IDCard,IDCardJust,IDCardBack,ServiceAreaId,ServiceTypeId,userId,Sort,current_user_id):
if userId == None or userId=="" or current_user_id == None or current_user_id=="":
raise custom_error.CustomFlaskErr(status_code=500, message="参数不正确,请刷新后重试")
if account == None or account=="" or name == None or name=="" or realName== None or realName=="":
raise custom_error.CustomFlaskErr(status_code=500, message="帐号姓名昵称不能为空")
a_userid = mysql.get_object(user_sql.select_userid_by_account, (account))
if a_userid!=None and a_userid!="" and a_userid["Id"]!=userId:
raise custom_error.CustomFlaskErr(status_code=500, message="账号已经存在")
data_register = mysql.operate_object(planner_sql.update_planner,(account,phone,userType,current_user_id,userId
,name,realName,sex,age,education,address,email,headImage,IDCard,IDCardJust,IDCardBack,ServiceAreaId,ServiceTypeId,current_user_id,Sort,userId))
#if userType==2 or userType ==3:
# mysql.operate_object(upgrade_user_sql.insert_planner_statistics,(userId,current_user_id,current_user_id,userId,userId))
return data_register > 0
#学历背景
def select_planner_education(userid):
return mysql.get_list(planner_sql.select_planner_education,(userid))
#学历背景详情
def select_education_info(id):
return mysql.get_object(planner_sql.select_education_info,(id))
#社会背景
def select_planner_society(userid):
return mysql.get_list(planner_sql.select_planner_society,(userid))
#社会背景详情
def select_society_info(id):
return mysql.get_object(planner_sql.select_society_info,(id))
#资源背景
def select_planner_resour(userid):
return mysql.get_list(planner_sql.select_planner_resour,(userid))
#资源背景详情
def select_resour_info(id):
return mysql.get_object(planner_sql.select_resour_info,(id))
#新增学历背景
def insert_education(id,userid,TimeStart,TimeEnd,University,Degree,Sort,insertuserid):
return mysql.operate_object(planner_sql.insert_education,(id,userid,TimeStart,TimeEnd,University,Degree,Sort
,insertuserid,insertuserid))
#修改学历背景
def update_education(TimeStart,TimeEnd,University,Degree,Sort,Id,updateuserid):
return mysql.operate_object(planner_sql.update_education,(TimeStart,TimeEnd,University,Degree,Sort,updateuserid,Id))
#删除学历背景
def delete_education(id,updateuserid):
return mysql.operate_object(planner_sql.delete_education,(updateuserid,id))
#新增资源背景
def insert_resour(id,userid,TimeStart,TimeEnd,Description,Sort,insertuserid):
return mysql.operate_object(planner_sql.insert_resour,(id,userid,TimeStart,TimeEnd,Description,Sort
,insertuserid,insertuserid))
#修改资源背景
def update_resour(TimeStart,TimeEnd,Description,Sort,Id,updateuserid):
return mysql.operate_object(planner_sql.update_resour,(TimeStart,TimeEnd,Description,Sort,updateuserid,Id))
#删除资源背景
def delete_resour(id,updateuserid):
return mysql.operate_object(planner_sql.delete_resour,(updateuserid,id))
#新增社会背景
def insert_society(id,userid,TimeStart,TimeEnd,Description,Sort,insertuserid):
return mysql.operate_object(planner_sql.insert_society,(id,userid,TimeStart,TimeEnd,Description,Sort
,insertuserid,insertuserid))
#修改社会背景
def update_society(TimeStart,TimeEnd,Description,Sort,Id,updateuserid):
return mysql.operate_object(planner_sql.update_society,(TimeStart,TimeEnd,Description,Sort,updateuserid,Id))
#删除社会背景
def delete_society(id,updateuserid):
return mysql.operate_object(planner_sql.delete_society,(updateuserid,id))
#修改大v
def update_BigV(isv,id,updateuserid):
return mysql.operate_object(planner_sql.update_BigV, (isv,updateuserid,id))
#return mysql.operate_object(planner_sql.update_BigV,(isv,updateuserid,id))
|
import nltk
import random
from nltk.corpus import movie_reviews
from nltk.classify.scikitlearn import SklearnClassifier
import pickle
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.svm import SVC, LinearSVC, NuSVC
from nltk.classify import ClassifierI
from statistics import mode
from nltk.tokenize import word_tokenize
class VoteClassifier(ClassifierI):
def __init__(self,*classifiers):
self._classifiers = classifiers
def classify(self,features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
return mode(votes)
def confidence(self,features):
votes = []
for c in self._classifiers:
v = c.classify(features)
votes.append(v)
choice_votes = votes.count(mode(votes))
conf = choice_votes / len(votes)
return conf
#This makes a list of different words in the different categories of movies. A neat way to make a list
#This is the data set which we use to compare against
#From this we create the traning and testing sets
short_pos = open("positive.txt","r").read()
short_neg = open("negative.txt","r").read()
documents = []
all_words = []
short_pos_words = word_tokenize(short_pos)
short_neg_words = word_tokenize(short_neg)
# j is adjective, r is adverb, and v is verb
# allowed_word_types = ["J","R","V"]
allowed_word_types = ["J"]
for p in short_pos.split('\n'):
documents.append((p, "pos"))
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
for p in short_neg.split('\n'):
documents.append((p, "neg"))
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allowed_word_types:
all_words.append(w[0].lower())
all_words = nltk.FreqDist(all_words)
#to check how many time a perticcular word comes up
#print(all_words['bad'])
word_features = list(all_words.keys())[:2500]
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_features:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in documents]
random.shuffle(featuresets)
training_set = featuresets[:2500]
testing_set = featuresets[2500:5000]
# posterior = prior occurences x liklihood / evidence = aive bayes algo
#the naive bayes algo
classifier = nltk.NaiveBayesClassifier.train(training_set)
#reading the pickled classifier
#classifier_f = open("naivebayes.pickle","rb")
#classifier = pickle.load(classifier_f)
#classifier_f.close()
#pickling the classifier
'''save_classifier = open("naivebayes.pickle","wb")
pickle.dump(classifier, save_classifier)
save_classifier.close()'''
#MNB_Classifier
MNB_classifier = SklearnClassifier(MultinomialNB())
MNB_classifier.train(training_set)
#bernoulli
BernoulliNB_classifier = SklearnClassifier(BernoulliNB())
BernoulliNB_classifier.train(training_set)
LogisticRegression_classifier = SklearnClassifier(LogisticRegression())
LogisticRegression_classifier.train(training_set)
SGDClassifier_classifier = SklearnClassifier(SGDClassifier())
SGDClassifier_classifier.train(training_set)
LinearSVC_classifier = SklearnClassifier(LinearSVC())
LinearSVC_classifier .train(training_set)
#SVC_classifier = SklearnClassifier(SVC())
#SVC_classifier.train(training_set)
#print("SVC_classifier Algo accuracy: ", (nltk.classify.accuracy(SVC_classifier,testing_set))*100)
#NuSVC_classifier = SklearnClassifier(NuSVC())
#NuSVC_classifier.train(training_set)
#print("NuSVC_classifier Algo accuracy: ", (nltk.classify.accuracy(NuSVC_classifier,testing_set))*100)
voted_classifier = VoteClassifier(classifier,MNB_classifier,BernoulliNB_classifier,LogisticRegression_classifier,SGDClassifier_classifier)
def sentiment(text):
feats = find_features(text)
return voted_classifier.classify(feats),voted_classifier.confidence(feats)
|
from django.contrib import admin
from django.urls import path
import evalapp.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', evalapp.views.home, name='home'),
path('home_new', evalapp.views.home_new, name='home_new'),
path('new', evalapp.views.new, name='new'),
path('popular', evalapp.views.popular, name='popular'),
path('major/', evalapp.views.major, name='major'),
path('liberal/', evalapp.views.liberal, name='liberal'),
path('elective/', evalapp.views.elective, name='elective'),
#detail
path('new_detail/<int:new_id>', evalapp.views.new_detail, name='new_detail'),
path('new_detail_full/<int:new_full_id>', evalapp.views.new_detail_full, name='new_detail_full'),
path('popular_detail/<int:popular_id>', evalapp.views.popular_detail, name='popular_detail'),
path('popular_detail_full/<int:pop_full_id>', evalapp.views.popular_detail_full, name='popular_detail_full'),
path('major/<int:major_id>', evalapp.views.major_detail, name='major_detail'),
path('liberal/<int:liberal_id>', evalapp.views.liberal_detail, name='liberal_detail'),
path('elective/<int:elective_id>', evalapp.views.elective_detail, name='elective_detail'),
#new
path('major_new', evalapp.views.major_new, name='major_new'),
path('liberal_new', evalapp.views.liberal_new, name='liberal_new'),
path('elective_new', evalapp.views.elective_new, name='elective_new'),
#edit
path('elective/edit/<int:e_edit_id>', evalapp.views.elective_edit, name='elective_edit'),
path('liberal/edit/<int:l_edit_id>', evalapp.views.liberal_edit, name='liberal_edit'),
path('major/edit/<int:m_edit_id>', evalapp.views.major_edit, name='major_edit'),
#delete
path('elective/delete/<int:elective_id>', evalapp.views.elective_delete, name='elective_delete'),
path('liberal/delete/<int:liberal_id>', evalapp.views.liberal_delete, name='liberal_delete'),
path('major/delete/<int:major_id>', evalapp.views.major_delete, name='major_delete'),
#comment
path('elective/<int:elective_id>/comment/create', evalapp.views.e_comment_create, name="e_comment_create"),
path('liberal/<int:liberal_id>/comment/create', evalapp.views.l_comment_create, name="l_comment_create"),
path('major/<int:major_id>/comment/create', evalapp.views.m_comment_create, name="m_comment_create"),
path('new/<int:new_id>/comment/create', evalapp.views.new_create, name="new_create"),
path('elective/<int:elective_id>/comment/<int:comment_id>/delete', evalapp.views.e_comment_delete, name="e_comment_delete"),
path('liberal/<int:liberal_id>/comment/<int:comment_id>/delete', evalapp.views.l_comment_delete, name="l_comment_delete"),
path('major/<int:major_id>/comment/<int:comment_id>/delete', evalapp.views.m_comment_delete, name="m_comment_delete"),
path('elective/<int:elective_id>/comment/<int:comment_id>/edit', evalapp.views.e_comment_edit, name="e_comment_edit"),
path('liberal/<int:liberal_id>/comment/<int:comment_id>/edit', evalapp.views.l_comment_edit, name="l_comment_edit"),
path('major/<int:major_id>/comment/<int:comment_id>/edit', evalapp.views.m_comment_edit, name="m_comment_edit"),
path('February', evalapp.views.February, name='February'),
path('February/<int:february_id>', evalapp.views.February_detail, name='February_detail'),
path('empty', evalapp.views.empty, name='empty'),
path('search',evalapp.views.search,name='search'),
path('detail/<int:blog_id>', evalapp.views.detail, name='detail'),
]
|
# -*- coding: utf-8 -*-
###############################################################################
#
# CommitChunkedUpload
# Completes an upload initiated by the ChunkedUpload Choreo.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CommitChunkedUpload(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CommitChunkedUpload Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CommitChunkedUpload, self).__init__(temboo_session, '/Library/Dropbox/FilesAndMetadata/CommitChunkedUpload')
def new_input_set(self):
return CommitChunkedUploadInputSet()
def _make_result_set(self, result, path):
return CommitChunkedUploadResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CommitChunkedUploadChoreographyExecution(session, exec_id, path)
class CommitChunkedUploadInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CommitChunkedUpload
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret retrieved during the OAuth process.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('AccessToken', value)
def set_AppKey(self, value):
"""
Set the value of the AppKey input for this Choreo. ((required, string) The App Key provided by Dropbox (AKA the OAuth Consumer Key).)
"""
super(CommitChunkedUploadInputSet, self)._set_input('AppKey', value)
def set_AppSecret(self, value):
"""
Set the value of the AppSecret input for this Choreo. ((required, string) The App Secret provided by Dropbox (AKA the OAuth Consumer Secret).)
"""
super(CommitChunkedUploadInputSet, self)._set_input('AppSecret', value)
def set_Locale(self, value):
"""
Set the value of the Locale input for this Choreo. ((optional, string) The metadata returned on successful upload will have its size field translated based on the given locale.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('Locale', value)
def set_Overwrite(self, value):
"""
Set the value of the Overwrite input for this Choreo. ((optional, boolean) Indicates what happens when a file already exists at the specified path. If true (the default), the existing file will be overwritten. If false, the new file will be automatically renamed.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('Overwrite', value)
def set_ParentRevision(self, value):
"""
Set the value of the ParentRevision input for this Choreo. ((optional, string) The revision of the file you're editing. If this value matches the latest version of the file on the user's Dropbox, that file will be replaced, otherwise a new file will be created.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('ParentRevision', value)
def set_Path(self, value):
"""
Set the value of the Path input for this Choreo. ((required, string) The path to the file you want to write to (i.e. /RootFolder/SubFolder/MyFile.txt).)
"""
super(CommitChunkedUploadInputSet, self)._set_input('Path', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Can be set to xml or json. Defaults to json.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('ResponseFormat', value)
def set_Root(self, value):
"""
Set the value of the Root input for this Choreo. ((optional, string) Defaults to "auto" which automatically determines the root folder using your app's permission level. Other options are "sandbox" (App Folder) and "dropbox" (Full Dropbox).)
"""
super(CommitChunkedUploadInputSet, self)._set_input('Root', value)
def set_UploadID(self, value):
"""
Set the value of the UploadID input for this Choreo. ((required, string) Used to identify the chunked upload session you'd like to commit. This is returned from the ChunkedUpload Choreo.)
"""
super(CommitChunkedUploadInputSet, self)._set_input('UploadID', value)
class CommitChunkedUploadResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CommitChunkedUpload Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Dropbox. Corresponds to the ResponseFormat input. Defaults to json.)
"""
return self._output.get('Response', None)
class CommitChunkedUploadChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CommitChunkedUploadResultSet(response, path)
|
from django.conf.urls import url
from views import Home, SuccessLoginView
urlpatterns = [
url(r'^$', Home.as_view(), name='home'),
url(r'^success_login/$', SuccessLoginView.as_view(), name='success_login')
] |
###############################################################################
# sender.py
# Names: Stanley Zhang, Olivier Chen
# BU IDs: U99944807, U33604671
###############################################################################
import sys
import socket
import random
SEND_BUFFER_SIZE = 1472
from util import *
def sender(receiver_ip, receiver_port, window_size):
"""TODO: Open socket and send message from sys.stdin"""
socket.setdefaulttimeout(0.5)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# send START message
pkt_header_start = PacketHeader(type=0, seq_num=random.randint(2,10), length = 0)
pkt_header_start.checksum = compute_checksum(pkt_header_start / "")
pkt_start = pkt_header_start / ""
s.sendto(str(pkt_start), (receiver_ip, receiver_port))
# receive ACK for start and check seq_num (must be same) before moving on
ack_start, address = s.recvfrom(SEND_BUFFER_SIZE)
ack_header = PacketHeader(ack_start[:16])
while (ack_header.seq_num != ack_header.seq_num or ack_header.type != 3):
ack_start = s.recvfrom(SEND_BUFFER_SIZE)
ack_header = PacketHeader(ack_start[:16])
# split data into chunks
msg = sys.stdin.read()
msg_chunks = [msg[i:i+SEND_BUFFER_SIZE-16] for i in range(0, len(msg), SEND_BUFFER_SIZE-16)]
# send chunks as DATA while adjusting seq_num appropriately (starts at 0)
ack_count = 0
window = [x for x in range(window_size)]
isAcked = [False for y in range(window_size)]
# send first "window_size" number of packets
for i in range(0,window_size,1):
data_pkt_header = PacketHeader(type=2, seq_num=i, length=len(msg_chunks[i]))
data_pkt_header.checksum = compute_checksum(data_pkt_header/ msg_chunks[i])
data_pkt = data_pkt_header / msg_chunks[i]
s.sendto(str(data_pkt), (receiver_ip, receiver_port))
# whenever we receive an ack we check if it is the expected ack and if so we move the sliding window fwd and send next
while ack_count < len(msg_chunks):
try:
ack_data, address = s.recvfrom(SEND_BUFFER_SIZE)
ack_header = PacketHeader(ack_data[:16])
ack_checksum = ack_header.checksum
ack_header.checksum = 0
computed_checksum = compute_checksum(ack_header / "")
except socket.timeout:
# resend all packets in window that haven't been ACK'd
for i in range(0,len(window),1):
if (isAcked[i] == False):
data_pkt_header = PacketHeader(type=2, seq_num=window[i], length=len(msg_chunks[window[i]]))
data_pkt_header.checksum = compute_checksum(data_pkt_header/ msg_chunks[window[i]])
data_pkt = data_pkt_header / msg_chunks[window[i]]
s.sendto(str(data_pkt), (receiver_ip, receiver_port))
else:
if ack_header.type == 3 and ack_header.length == 0 and ack_checksum == computed_checksum: # check checksum, type and length of packet
# if ack doesn't have same seq_num we sent but is within the window
if ack_header.seq_num > window[0] and ack_header.seq_num <= window[len(window)-1]:
# mark that seq_num as ACK'd
isAcked[ack_header.seq_num - window[0]] = True
# if ack has same seq_num sent (as expected)
elif ack_header.seq_num == window[0]:
# window is moved forward
isAcked[ack_header.seq_num - window[0]] = True
while isAcked[0]:
if len(window) != 0:
data_pkt_header = PacketHeader(type=2, seq_num=window[0], length=len(msg_chunks[window[0]]))
data_pkt_header.checksum = compute_checksum(data_pkt_header/ msg_chunks[window[0]])
data_pkt = data_pkt_header / msg_chunks[window[0]]
s.sendto(str(data_pkt), (receiver_ip, receiver_port))
window.pop(0)
if len(window) != 0:
if window[len(window)-1] < len(msg_chunks)-1: # if window has space to move forward, it does so
window.append(window[window_size-2]+1)
isAcked.pop(0)
ack_count += 1
isAcked.append(False)
# then, send packets in window that haven't already been ACK'd
for i in range(0,len(window),1):
if isAcked[i] == False:
data_pkt_header = PacketHeader(type=2, seq_num=window[i], length=len(msg_chunks[window[i]]))
data_pkt_header.checksum = compute_checksum(data_pkt_header/ msg_chunks[window[i]])
data_pkt = data_pkt_header / msg_chunks[window[i]]
s.sendto(str(data_pkt), (receiver_ip, receiver_port))
# send END message
pkt_header_end = PacketHeader(type=1, seq_num=random.randint(2,10), length = 0)
pkt_header_end.checksum = compute_checksum(pkt_header_end / "")
pkt_end = pkt_header_end / ""
s.sendto(str(pkt_end), (receiver_ip, receiver_port))
# receive ACK for end and check seq_num (must be same) before moving on
ack_end, address = s.recvfrom(SEND_BUFFER_SIZE)
ack_header = PacketHeader(ack_end[:16])
while ack_header.seq_num != pkt_header_end.seq_num or ack_header.type != 3:
ack_end = s.recvfrom(SEND_BUFFER_SIZE)
ack_header = PacketHeader(ack_end[:16])
def main():
"""Parse command-line arguments and call sender function """
if len(sys.argv) != 4:
sys.exit("Usage: python sender.py [Receiver IP] [Receiver Port] [Window Size] < [message]")
receiver_ip = sys.argv[1]
receiver_port = int(sys.argv[2])
window_size = int(sys.argv[3])
sender(receiver_ip, receiver_port, window_size)
if __name__ == "__main__":
main()
|
for cases in range(1, int(input()) + 1):
s = input()
ctr = 0
lc = '+'
for c in s[::-1]:
if(c == lc):
pass
else:
lc = c
ctr += 1
print('Case #%d:' % (cases,), ctr)
|
import queryer
query = {
'composition': 'Ni:1:1 Ti:2:2',
'number_of_elements': '2'
}
##query = {'icsd_collection_code': 181801}
queryer = queryer.Queryer(query=query)
queryer.perform_icsd_query()
##print queryer.hits
##queryer.quit()
|
# coding:utf8
from flask import render_template,redirect,request,url_for,flash
from flask_login import login_user,logout_user,login_required
from flask_login import current_user # 当前用户
from . import auth
from .forms import LoginForm,RegisterForm,ChangePasswordForm
from ..models import User
from .. import db # 实际上是 from ..__init__ import db
# from ..decorators import admin_required,permission_required
@auth.route('/login/',methods = ['GET','POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# 在数据库中查找该邮箱的用户
user = User.query.filter_by(username=form.username.data).first()
# 如果数据库中存在该用户,且表单中填写的密码验证通过
if user is not None and user.verify_password(form.password.data):
# 使用login_user来登录用户
login_user(user,form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid email or password')
return render_template('auth/login.html',form=form)
@auth.route('/logout/')
@login_required # 表示必须在已经登录的情况下才能访问这个路由
def logout():
logout_user()
flash('You have been logged out')
return redirect(url_for('main.index'))
# 注册路由
@auth.route('/register/',methods=['GET','POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
flash('You can now login.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html',form=form)
@auth.route('/change-password/',methods=['GET','POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
# 首先看旧密码是否输入正确
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
# 这里的add是修改现有用户
db.session.add(current_user)
flash('your password has been updated')
return redirect(url_for('main.index'))
else:
# 如果旧密码输错,则提示信息
flash('invalid password')
return render_template('auth/change_password.html',form=form)
|
from pylab import plot, show, title, xlabel, ylabel, subplot
from scipy import fft, arange
import numpy as np
def googleant2(real,imag):
lQ=0
lI=1
div=0
AMPL_CONV=1
imag=imag.imag
real=lI*real+lQ*imag
imag=lI*imag-real*lQ
sgn=1
if imag<0:
sgn *= -1
imag *= -1
ang=0
if real==imag:
div=1
elif real>imag:
div=imag/real
else:
ang = -np.pi / 2
div = real / imag
sgn *= -1
out= sgn *(ang + div/ (0.98419158358617365+ div * (0.093485702629671305+ div * 0.19556307900617517))) * AMPL_CONV
#print(out)
return out
def standard(real,imag):
data=real+imag
out = np.angle(data)
#print(out)
return out
data=np.empty(0)
stan=np.empty(0)
length=100
for i in range(0,length):
ticks=(360/length*i)/180*np.pi
#print('tick:%.1f' % (ticks/np.pi*180))
imag=np.sin(ticks)*1j
real=np.cos(ticks)
#data = real+imag
print('real+imag:%.1f+%.1fj tick:%.1f' % (real,imag.imag,ticks/np.pi*180))
data=np.append(data,googleant2(real,imag))
stan=np.append(stan,standard(real,imag))
datao=data/np.pi*180
stano=stan/np.pi*180
print('data:'+np.array_str(datao)+'\n'+'stan:'+np.array_str(stano))
plot(stano)
plot(datao)
plot(stano-datao) |
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Discharge_Water_Temperature_Setpoint import Discharge_Water_Temperature_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Load_Shed_Supply_Water_Temperature_Setpoint import Load_Shed_Supply_Water_Temperature_Setpoint
from brick.brickschema.org.schema._1_0_2.Brick.Medium_Temperature_Hot_Water import Medium_Temperature_Hot_Water
class Medium_Temperature_Hot_Water_Discharge_Temperature_Load_Shed_Setpoint(Discharge_Water_Temperature_Setpoint,Load_Shed_Supply_Water_Temperature_Setpoint,Medium_Temperature_Hot_Water):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Medium_Temperature_Hot_Water_Discharge_Temperature_Load_Shed_Setpoint
|
import pygame
import sys
from Logic import Logic
pygame.init()
width = 1280
height = 720
window = pygame.display.set_mode((width,height))
pygame.display.set_caption("Langton's Ant")
class Window(object):
def __init__(self):
self.Main()
def Main(self):
self.fps = 120
self.clock = pygame.time.Clock()
window.fill((0,0,0))
self.logic = Logic(width,height,1,window)
while True:
self.logic.run()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
pygame.display.update()
self.clock.tick(self.fps)
|
# Copyright(C) 2018 刘珅珅
# Environment: python 3.6.4
# Date: 2018.4.10
# 导入类模块
import car
from car import Car
# 使用car中的类时需要添加模块名car.
my_tesla = car.ElectricCar('tesla', 'model s', 2016)
my_tesla.battery.describe_battery()
my_car = Car('audi', 'a4', 2017)
print(my_car.get_descriptive_name())
|
# works but is slow, full answer took like 1 minute
def main():
list_digits = [int(x) for x in open('test_input_problem16.txt').read().strip()]
phases = 100
#print(list_digits)
for num in range(phases):
print(num)
new_list_digits = []
for index, digit in enumerate(list_digits):
repeated_pattern = create_pattern(index, len(list_digits))
del repeated_pattern[0]
new_digit = int(str(abs(sum([digit2 * repeated_pattern[x] for x, digit2 in enumerate(list_digits)])))[-1])
new_list_digits.append(new_digit)
list_digits = new_list_digits
#print(list_digits)
print("".join(str(x) for x in list_digits[0:8]))
def create_pattern(position, length_list):
length_list += 1
pattern = []
counter = 0
while counter < length_list:
for num in range(position + 1):
pattern.append(0)
counter += 1
for num in range(position + 1):
pattern.append(1)
counter += 1
for num in range(position + 1):
pattern.append(0)
counter += 1
for num in range(position + 1):
pattern.append(-1)
counter += 1
#print(pattern)
return pattern[0:length_list]
main()
#print(create_pattern(5, 8)) |
from gsearch.googlesearch import search # google search auto
from googlesearch import search
import webbrowser
import textract # Content extract
import pydf
import pyce3
import requests
import time
for url in search('Servo driver ic Texas instrument'+'.html'):
print(url)
print(len(url))
print(url[int(len(url))-4])
if str(url[int(len(url))-4]) == 'h':
try:
html = requests.get(url).content
encoding, time, title, text, next_link = pyce3.parse(url, html)
print("Encoding:"+ encoding)
print('='*10)
print("Title:"+title)
print("time:"+time)
print('='*10)
print("Text:"+text)
print("NextPageLink: ", next_link)
time.sleep(0.3)
# Search from the content best on components price
for url2 in search(str(title)+'price'):
print("Compoent search for price")
print(url2)
try:
html = requests.get(url2).content
encoding, time, title, text, next_link = pyce3.parse(url2, html)
print("Encoding:"+ encoding)
print('='*10)
print("Title:"+title)
print("time:"+time)
print('='*10)
print("Text:"+text)
print("NextPageLink: ", next_link)
time.sleep(0.002)
except:
print("Different webpage with other document file")
#time.sleep(0.002)
except:
print("Different webpage with other document file")
else:
print("Different webpage with other document file")
|
# coding: utf-8
# In[30]:
import pandas
import numpy as np
import matplotlib.pyplot as plt
import os
get_ipython().magic(u'matplotlib inline')
# In[31]:
import getMTAdata as mta
df, links = mta.getAllDf(n_start=0, n_end=1)
filenames = [os.path.split(l)[1] for l in links]
print links
print filenames
# In[32]:
df.head()
# In[33]:
# or we can read from the csv file
# df = pandas.read_csv("Saturday, April 02, 2016")
df.columns = ['C/A','UNIT','SCP','STATION','LINENAME','DIVISION','DATE','TIME','DESC','IN','OUT']
df = df[['UNIT', 'SCP', 'DATE', 'TIME', "IN", "OUT"]]
df.head(5)
# In[34]:
# Create a coordinates look up table and add a column
geocode = pandas.read_csv('geocoded.csv', header=None)
geocode = geocode.drop_duplicates(0)
geocode = geocode[[0,5,6]]
geocode.columns = ['UNIT', 'LAT', 'LON']
geocode_mapping = {row.values[0]:[row[1], row[2]] for index,row in geocode.iterrows()}
def map(unit):
try:
return geocode_mapping[unit]
except:
return np.nan
df['COORD'] = df['UNIT'].apply(map)
df['LAT'] = df['COORD'].apply(lambda x: x[0])
df['LON'] = df['COORD'].apply(lambda x: x[1])
# In[35]:
df.head(5)
# In[36]:
#Add the times as datetime objects
import datetime
df["DATETIME"] = df["DATE"]+ ' ' + df["TIME"]
df["DATETIME"] = pandas.to_datetime(df["DATETIME"])
# uncomment below if we want the day to be int
# df["DAY"] = df.DATETIME.apply(lambda x: int(x.weekday()))
# In[37]:
def classify_day(time):
if time.isoweekday() in range(1,6):
return "Weekday"
else:
return "Weekend"
df["DAY"] = df['DATETIME'].apply(classify_day)
# In[38]:
df.head()
# In[39]:
def classify_time(time):
if 5 <= time.hour <= 9:
return "Morning"
elif 17 < time.hour < 22:
return "Evening"
else:
return None
df["M_E"] = df['DATETIME'].apply(classify_time)
# In[40]:
df.head()
# In[41]:
get_ipython().run_cell_magic(u'time', u'', u"\n# make a group reduce function to apply to df.groupby\ndef group_manipulation(df):\n reduce_df = pandas.Series()\n # IN = df.IN.values; OUT = df.OUT.values\n # IN = IN[1:] - IN[:-1] # convert from cumulative\n # OUT = OUT[1:] - OUT[:-1]\n # mask = (IN >= 0) & (IN < 1e4) & (OUT >= 0) & (OUT < 1e4)\n reduce_df['UNIT'] = df.iloc[0]['UNIT']\n reduce_df['IN'] = df.IN.sum()\n reduce_df['OUT'] = df.OUT.sum()\n reduce_df['LAT'] = df.iloc[0]['LAT']\n reduce_df['LON'] = df.iloc[0]['LON']\n return reduce_df")
# In[42]:
# define a model to adjust the outflux to conserve mass
def model_outflux(OUT, adjust=1.33):
"""model to adjust outflux for the purpose of mass conservation"""
return OUT * adjust
# In[43]:
def getMasterDF(df, target_DAY='Weekday', target_M_E='Morning', adjust=1.33):
"""
feed orig. dataframe to perform filtering and grouping then return
a tableau friendly masterDF
"""
# filter df to target day and timeslot
df = df.copy()
df = df[df['DAY'] == target_DAY]
df = df[df['M_E'] == target_M_E]
# group data by UNIT and apply group operation
masterDF = df.groupby('UNIT', as_index=False).apply(group_manipulation)
masterDF['OUTPRIME'] = masterDF['OUT'].apply(
lambda x: model_outflux(x, adjust)
)
masterDF['FOOTTRAFFIC'] = masterDF.IN + masterDF.OUTPRIME
masterDF['FLUX'] = (masterDF.IN - masterDF.OUTPRIME) / masterDF.FOOTTRAFFIC
print "\tsum of IN", masterDF.IN.sum()
print "\tsum of OUT", masterDF.OUT.sum()
ratio = float(masterDF.OUT.sum()) / masterDF.IN.sum()
print "\tTotal OUT is {:%} of total IN".format(ratio)
adjust = 1. / ratio
print "\tan ajustment parameter should be applied: ", adjust
filename_group = ''.join([f.split('.')[0] for f in filenames]
) + '_' + target_DAY + '_' + target_M_E +'.csv'
print 'saving', filename_group
print
masterDF.to_csv(filename_group, index_label=None)
return masterDF
# In[46]:
target_DAYs = ['Weekday', 'Weekend']
target_M_Es = ['Morning', 'Evening']
df['IN'] = np.concatenate([[0], df['IN'].values[1:] - df['IN'].values[:-1]])
df['OUT'] = np.concatenate([[0], df['OUT'].values[1:] - df['OUT'].values[:-1]])
IN = df.IN.values; OUT = df.OUT.values
mask = (IN >= 0) & (IN < 1e4) & (OUT >= 0) & (OUT < 1e4)
df = df[mask]
for target_DAY in target_DAYs:
for target_M_E in target_M_Es:
print 'working', target_DAY, target_M_E, '...'
masterDF = getMasterDF(df, target_DAY, target_M_E, adjust=1.33)
# In[45]:
masterDF[masterDF['UNIT'] == 'R001']
masterDF
# In[34]:
masterDF.head()
# ---
# ### running till the above will generate data for [weekday, weekend][morning, evening]
# In[28]:
get_ipython().run_cell_magic(u'time', u'', u'# --- this is the same as the above cell to group UNIT\n# --- but it is looping instead of applying group function\n\nmasterDF = pandas.DataFrame(columns=[\'UNIT\', \'IN\', \'OUT\', \'COORDS\'])\nDAY = \'Weekday\'\nM_E = "Morning"\ndf[\'IN\'] = np.concatenate([[0], df[\'IN\'].values[1:] - df[\'IN\'].values[:-1]])\ndf[\'OUT\'] = np.concatenate([[0], df[\'OUT\'].values[1:] - df[\'OUT\'].values[:-1]])\nIN = df.IN.values; OUT = df.OUT.values\nmask = (IN >= 0) & (IN < 1e4) & (OUT >= 0) & (OUT < 1e4)\ndf = df[mask]\nfor unit, group in df.groupby([\'UNIT\']):\n \n # Filter for weekday mornings\n day = group[group.DAY == DAY]\n timeOfDay = day[day.M_E == M_E]\n masterDF.loc[len(masterDF)] = (unit, timeOfDay.IN.sum(), \n timeOfDay.OUT.sum(), group[\'COORD\'].iloc[0])\n\n\nmasterDF.to_csv("SaturdayApril022016-%s-%s"%(DAY, M_E))')
# In[29]:
masterDF.head()
# In[ ]:
masterDF.sum()
# In[189]:
masterDF.to_csv("SaturdayApril022016-%s-%s"%(DAY, M_E))
# In[132]:
df = df[df['UNIT'] == 'R001']
df['IN_PRIME'] = np.concatenate([[0], df['IN'].values[1:] - df['IN'].values[:-1]])
df['OUT_PRIME'] = np.concatenate([[0], df['OUT'].values[1:] - df['OUT'].values[:-1]])
IN = df.IN_PRIME; OUT = df.OUT_PRIME
mask = (IN >= 0) & (IN < 1e4) & (OUT >= 0) & (OUT < 1e4)
df = df[mask]
# In[188]:
print df[(df.M_E == 'Morning') & (df.DAY == 'Weekday')].sum()
# Filter for weekday mornings
day = df[df.DAY == 'Weekday']
timeOfDay = day[day.M_E == "Morning"]
print timeOfDay.sum()
# In[161]:
df
# In[105]:
df.groupby(df.DATETIME.apply(lambda x: x.hour)).sum().IN_PRIME.plot()
#plt.xlim(datetime.datetime(2016, 3,28,0), datetime.datetime(2016, 3, 29, 0))
# In[191]:
input()
# In[ ]:
|
from ROOT import *
import sys
sys.path.append('/home/taylor/Research/codeplot/functions/')
from plottingfunctions import *
f = TFile("/home/taylor/Research/root/mfdtokpiworking.root","READ")
t = f.Get("dsprecontree")
gm1nb = RooRealVar("gm1nb","gm1nb",-1,1)
gm2nb = RooRealVar("gm2nb","gm2nb",-1,1)
nBins = 100
lb = gm1nb.getMax()
rb = gm1nb.getMin()
ub = gm2nb.getMax()
bb = gm2nb.getMin()
frame = gm1nb.frame()
h2 = TH2F("h2","h2",nBins,lb,rb,nBins,bb,ub)
#plot_2d(t,"gm1nb","gm2nb","whomi==1","COLZ","From MC: Title","XTitle","YTitle",h2,frame,0.65,"/home/taylor/Research/plots/dtokpi/gmnb2dplot")
plot_2d(t,"gm1nb","gm2nb","","COLZ","From MC: Title","XTitle","YTitle",h2,frame,0.65,"/home/taylor/Research/plots/dtokpi/gmnb2dplot")
|
import csv
import random
import copy
# Converts IP address string to integer
def convertIP(iP):
# print("Converting")
iPList = iP.split('.')
return ((int(iPList[0]) * 16777216) + (int(iPList[1]) * 65536) + (int(iPList[2]) * 256) + int(iPList[3]))
# Split IP addresses, set numeric values to 0 if empty for that instance, attach label
def preprocess(traffic, label):
removeRows = []
for row in traffic:
# Remove Non TCP/UDP data because Snort can't apply rules to them
if (str(row[4]) != 'TCP' and str(row[4]) != 'UDP'):
removeRows.append(row)
continue
# Remove rows with empty ports
if (row[1] == ''):
removeRows.append(row)
continue
if (row[3] == ''):
removeRows.append(row)
continue
# Convert IPs to integers
row[0] = convertIP(row[0])
row[2] = convertIP(row[2])
# Make TCP Length, UDP Length, and TCP Window Size 0 if there is none
if (str(row[-2]) == ''):
row[-2] = 0
if (str(row[-3]) == ''):
row[-3] = 0
if (str(row[-4]) == ''):
row[-4] = 0
# Change empty HTTP Request Methods to None
if (row[-1] == ''):
row[-1] = 'None'
# Append label for traffic
row.append(label)
traffic[:] = [row for row in traffic if row not in removeRows]
# Scales data based on minimum and maximum possible values to 0-1
def scaleData(traffic):
protocols = []
maxLength = 0
maxTCPWindowSize = 0
maxTCPLength = 0
maxUDPLength = 0
httpRequests = []
for row in traffic:
if row[4] not in protocols:
protocols.append(row[4])
if int(row[5]) > int(maxLength):
maxLength = int(row[5])
if int(row[6]) > int(maxTCPWindowSize):
maxTCPWindowSize = int(row[6])
if int(row[7]) > int(maxTCPLength):
maxTCPLength = int(row[7])
if int(row[8]) > int(maxUDPLength):
maxUDPLength = int(row[8])
if row[9] not in httpRequests:
httpRequests.append(row[9])
for row in traffic:
# Range for Source IP address spots 0-4294967295, [0]
row[0] = int(row[0]) / 4294967295
# Range for Source Port 0-65536, [1]
row[1] = int(row[1]) / 65536
# Range for Destination IP Adress spots 0-4294967295, [2]
row[2] = int(row[2]) / 4294967295
# Range for Destination Port 0-65536, [3]
row[3] = int(row[3]) / 65536
# Protocol does not have numeric range, gonna have to grab list of protocals for ARFF file, [4]
# Length get values from list, [5]
row[5] = int(row[5]) / maxLength
# TCP window size get values from list, [6]
row[6] = int(row[6]) / maxTCPWindowSize
# TCP Length get values from list, [7]
row[7] = int(row[7]) / maxTCPLength
# UDP Length get values from list, [8]
row[8] = int(row[8]) / maxUDPLength
# HTTP request method also not numeric, grab list for ARFF from list, [9]
# Label don't need to scale this, [10]
print("Protocols: " + str(protocols))
print("Max Length: " + str(maxLength))
print("Max TCP Window Size: " + str(maxTCPWindowSize))
print("Max TCP Length: " + str(maxTCPLength))
print("Max UDP Length: " + str(maxUDPLength))
print("HTTP Requests: " + str(httpRequests))
#### Normal Traffic Section
normalTraffic = []
with open("normalTraffic.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
normalTraffic.append(row)
# Remove attribute label row
normalTraffic.pop(0)
# Preprocess Step 1
preprocess(normalTraffic, "Normal")
#### Attack Traffic 1 - JexBoss Exploit Section
attackTraffic1 = []
with open("attackTraffic1.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
attackTraffic1.append(row)
# Remove attribute label row
attackTraffic1.pop(0)
# Preprocess Step 1
preprocess(attackTraffic1, "JexBossExploit")
#### Attack Traffic 2 - Neutrino Exploit Section
attackTraffic2 = []
with open("attackTraffic2.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
attackTraffic2.append(row)
# Remove attribute label row
attackTraffic2.pop(0)
# Preprocess Step 1
preprocess(attackTraffic2, "NeutrinoExploit")
#### Attack Traffic 3 - W32/Sdbot Infected Section
attackTraffic3 = []
with open("attackTraffic3.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
attackTraffic3.append(row)
# Remove attribute label row
attackTraffic3.pop(0)
# Preprocess Step 1
preprocess(attackTraffic3, "W32/SdbotInfected")
#### Attack Traffic 4 - Packet Injection Section
attackTraffic4 = []
with open("attackTraffic4.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
attackTraffic4.append(row)
# Remove attribute label row
attackTraffic4.pop(0)
# Preprocess Step 1
preprocess(attackTraffic4, "PacketInjection")
#### Attack Traffic 5 - Malspam Section
attackTraffic5 = []
with open("attackTraffic5.csv") as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
attackTraffic5.append(row)
# Remove attribute label row
attackTraffic5.pop(0)
# Preprocess Step 1
preprocess(attackTraffic5, "Malspam")
# Merge Traffic for signature detection
allTrafficSig = normalTraffic + attackTraffic1 + attackTraffic2 + attackTraffic3 + attackTraffic4 + attackTraffic5
# Preprocess Step
scaleData(allTrafficSig)
# Copy data for anomaly detection
allTrafficAn = copy.deepcopy(allTrafficSig)
# Relabel attacks for anomaly detection
for row in allTrafficAn:
if (row[-1] != "Normal"):
row[-1] = "Abnormal"
random.shuffle(allTrafficSig)
random.shuffle(allTrafficAn)
# Copy data for individual signature detection
allTraffic1Sig = copy.deepcopy(allTrafficSig)
with open('allTrafficSig.csv', mode = 'w', newline = '') as sigFile:
sigWriter = csv.writer(sigFile, delimiter=',')
for row in allTrafficSig:
sigWriter.writerow(row)
with open('allTrafficAn.csv', mode = 'w', newline = '') as anFile:
anWriter = csv.writer(anFile, delimiter=',')
for row in allTrafficAn:
anWriter.writerow(row)
# Relabel attacks for Jex signature detection
for i in range(0, len(allTrafficSig)):
if (allTrafficSig[i][-1] == "JexBossExploit"):
allTraffic1Sig[i][-1] = "JexBossExploit"
else:
allTraffic1Sig[i][-1] = "NotJexBossExploit"
with open('allTrafficJex.csv', mode = 'w', newline = '') as sigFileJex:
jexWriter = csv.writer(sigFileJex, delimiter=',')
for row in allTraffic1Sig:
jexWriter.writerow(row)
# Relabel attacks for Neutrino signature detection
for i in range(0, len(allTrafficSig)):
if (allTrafficSig[i][-1] == "NeutrinoExploit"):
allTraffic1Sig[i][-1] = "NeutrinoExploit"
else:
allTraffic1Sig[i][-1] = "NotNeutrinoExploit"
with open('allTrafficNeu.csv', mode = 'w', newline = '') as sigFileNeu:
neuWriter = csv.writer(sigFileNeu, delimiter=',')
for row in allTraffic1Sig:
neuWriter.writerow(row)
# Relabel attacks for W32/SdbotInfected signature detection
for i in range(0, len(allTrafficSig)):
if (allTrafficSig[i][-1] == "W32/SdbotInfected"):
allTraffic1Sig[i][-1] = "W32/SdbotInfected"
else:
allTraffic1Sig[i][-1] = "NotW32/SdbotInfected"
with open('allTrafficW32.csv', mode = 'w', newline = '') as sigFileW32:
w32Writer = csv.writer(sigFileW32, delimiter=',')
for row in allTraffic1Sig:
w32Writer.writerow(row)
# Relabel attacks for Packet Injection signature detection
for i in range(0, len(allTrafficSig)):
if (allTrafficSig[i][-1] == "PacketInjection"):
allTraffic1Sig[i][-1] = "PacketInjection"
else:
allTraffic1Sig[i][-1] = "NotPacketInjection"
with open('allTrafficPIn.csv', mode = 'w', newline = '') as sigFilePIn:
pInWriter = csv.writer(sigFilePIn, delimiter=',')
for row in allTraffic1Sig:
pInWriter.writerow(row)
# Relabel attacks for Malspam signature detection
for i in range(0, len(allTrafficSig)):
if (allTrafficSig[i][-1] == "Malspam"):
allTraffic1Sig[i][-1] = "Malspam"
else:
allTraffic1Sig[i][-1] = "NotMalspam"
with open('allTrafficMal.csv', mode = 'w', newline = '') as sigFileMal:
malWriter = csv.writer(sigFileMal, delimiter=',')
for row in allTraffic1Sig:
malWriter.writerow(row)
|
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime, Float
from sixteen.models import BaseData
class BaseDate(object):
create_at = Column(DateTime, nullable=False, default=datetime.utcnow)
update_at = Column(DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)
class Sms(BaseData, BaseDate):
# 指定表名
__tablename__ = "sms"
id = Column(Integer, primary_key=True)
access_key = Column(String(20))
secret_key = Column(String(20))
class CoinPrice(BaseData, BaseDate):
__tablename__ = "coin_price"
id = Column(Integer, primary_key=True)
coin = Column(String(10))
price = Column(Float(10))
user_phone = Column(String(40))
email_address = Column(String(40))
class CoinPriceHistory(BaseDate, BaseData):
__tablename__ = "coin_price_history"
id = Column(Integer, primary_key=True)
coin = Column(String(10))
price = Column(Float(10))
user_phone = Column(String(40))
email_address = Column(String(40))
|
import numpy as np
import random
#MULTIPLICACION DE DOS MATRICES CON DIMENSIONES DEFINIDAS POR EL USUARIOS
#PRIMERA MATRIZ
print("\tMULTIPLICACION DE MATRICES")
print()
valor1=int (input ("ingrese la dimension en A de su primera matriz: "))
valor2=int (input ("ingrese la dimension en B de su primera matriz: "))
dim=(valor1,valor2)
print()
print("\tSU PRIMERA MATRIZ ES: ")
print()
matriz1=np.random.randint(1,25,dim)
print(matriz1)
print()
#SEGUNDA MATRIZ
valor3=int (input ("ingrese la dimension en A de su segunda matriz: "))
valor4=int (input ("ingrese la dimension en B de su segunda matriz: "))
dim2=(valor3,valor4)
print()
print("\tSU SEGUNDA MATRIZ ES: ")
print()
matriz2=np.random.randint(1,25,dim2)
print(matriz2)
print()
#MULTIPLICACION DE MATRICES
matriz_mult=np.dot(matriz1,matriz2)
print("\tLA MULTIPLICACION DE LAS 2 MATRICES ES: ")
print ()
print (matriz_mult) |
import os
class Menu:
def __init__(self, lstOpciones, strTitulo, strMenuDescr):
self.lstOpciones = lstOpciones
self.strTitulo = strTitulo
self.strMenuDescr = strMenuDescr
self.OptionSelect = 0
def show(self):
os.system("cls")
print(f"\033[1;32;40m")
print(20*":" + f"{self.strTitulo:^30}" + 20*":")
print(20*":" + f"{self.strMenuDescr:^30}" + 20*":")
for k, v in self.lstOpciones.items():
print(k, "::", v)
print("9 :: Salir")
while True:
try:
self.OptionSelect = int(input("Ingrese su opción: "))
if self.OptionSelect > 0 and self.OptionSelect < len(self.lstOpciones)+1:
return self.OptionSelect
elif self.OptionSelect == 9:
break
else:
print("Ingrese alguna de las opciones mostradas")
except ValueError:
print("Ingresa un número entero")
class Periodos:
def __init__(self, id, nombre, descripcion):
self.id = id
self.nombre = nombre
self.descripcion = descripcion
class Salon:
def __init__(self, cod_salon, desc, cod_grado):
self.cod_salon = cod_salon
self.desc = desc
self.cod_grado = cod_grado
class Periodo:
def __init__(self, cod_periodo, desc):
self.cod_periodo = cod_periodo
self.desc = desc
class Grados:
def __init__(self, cod_grado, desc):
self.cod_grado = cod_grado
self.desc = desc
class Cursos:
def __init__(self, cod_curso, nombre, cod_grado, numero_notas):
self.cod_curso = cod_curso
self.nombre = nombre
self.cod_grado = cod_grado
self.numero_notas = numero_notas
class Notas:
def __init__(self, cod_matricula, cod_curso, nota):
self.cod_matricula = cod_matricula
self.cod_curso = cod_curso
self.nota = nota
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# @File : __init__.py.py
# @Time : 2020/12/7 22:20
# @Author : QingWen
# @E-mail : hurrsea@outlook.com
import asyncio
from random import randint
from nonebot.typing import Bot, Event
from nonebot.plugin import on_command
from src.utils.rules import is_banned
safe_left = 0
current_bullet_num = 0
players = {}
spin = on_command("rr", rule=is_banned())
@spin.handle()
async def _(bot: Bot, event: Event, state: dict):
if event.sub_type == "private":
await spin.finish("此功能仅对群聊开放。")
else:
global current_bullet_num, safe_left
if event.sender["card"] != "":
user_name = event.sender["card"]
else:
user_name = event.sender["nickname"]
if user_name not in players:
players[user_name] = {}
players[user_name]["point"] = 0
players[user_name]["death"] = 0
args = str(event.message).strip()
if current_bullet_num == 0:
if args:
if len(args) < 2 and '0' < args < '6':
current_bullet_num = int(args)
state["bullet"] = current_bullet_num
safe_left = safe_couter(current_bullet_num)
await bot.send(event, "装填完成")
else:
await spin.finish("请输入可行的数目")
else:
if safe_left == 0:
await bot.send(event, "“砰”")
await bot.call_api("set_group_ban",
group_id=event.group_id,
user_id=event.user_id,
duration=60)
players[user_name]["point"] /= 2
players[user_name]["death"] += 1
current_bullet_num -= 1
if current_bullet_num == 0:
await bot.send(event, "感谢各位的参与,以下是游戏结算:")
await asyncio.sleep(1)
rank = sorted(players.items(), key=lambda x: (x[1]["point"], x[0]))
msg = ''
for i, data in enumerate(rank):
if i != len(players) - 1:
msg += ("%s:\nP: %s分 D: %s\n" % (data[0], data[1]["point"], data[1]["death"]))
else:
msg += ("%s:\nP: %s分 D: %s" % (data[0], data[1]["point"], data[1]["death"]))
players.clear()
await spin.finish(msg)
else:
msg = "有请下一位,还有 %d 发" % current_bullet_num
safe_left = safe_couter(current_bullet_num)
await spin.finish(msg)
else:
safe_left -= 1
msg = "无事发生,还有 %d 发" % current_bullet_num
players[user_name]["point"] += 300 * current_bullet_num**2 / 30 + 20
await spin.finish(msg)
@spin.got("bullet", prompt="紧张刺激的禁言转盘活动,请输入要填入的子弹数目(最多5颗)")
async def _bullet(bot: Bot, event: Event, state: dict):
global current_bullet_num, safe_left
if not current_bullet_num:
if (len(state["bullet"]) < 2) & ('0' < state["bullet"] < '6'):
current_bullet_num = int(state["bullet"])
state["bullet"] = current_bullet_num
safe_left = safe_couter(current_bullet_num)
await bot.send(event, "装填完成")
else:
await spin.finish("请输入可行的数目")
def safe_couter(cur_num: int) -> int:
"""
计算距离下一枪还有几次
"""
n = 6 - randint(1, 6)
if n <= cur_num:
return 0
else:
return 6-n+1
|
from flask import Flask
from flask_migrate import Migrate, MigrateCommand
from configuration import AuthConfiguration
from models import authDb
from flask_script import Manager
from sqlalchemy_utils import create_database, database_exists
authManageApp = Flask(__name__)
authManageApp.config.from_object(AuthConfiguration)
authMigrate = Migrate(authManageApp, authDb)
authManager = Manager(authManageApp)
authManager.add_command("db", MigrateCommand)
if(__name__ == "__main__"):
authDb.init_app(authManageApp)
if not database_exists(AuthConfiguration.SQLALCHEMY_DATABASE_URI):
create_database(AuthConfiguration.SQLALCHEMY_DATABASE_URI)
authManager.run()
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from __future__ import division
import numpy as np
from scipy import stats
import matplotlib as mpl
import matplotlib.pyplot as plt
import warnings
mpl.rcParams['axes.unicode_minus'] = False
mpl.rcParams['font.sans-serif'] = 'SimHei'
def rotate(x, y, theta=45):
data = np.vstack((x, y))
mu = np.mean(data, axis=1)
mu.reshape((-1, 1))
data -= mu
theta *= (np.pi / 180)
c = np.cos(theta)
s = np.sin(theta)
m = np.array(((c, -s), (s, c)))
return m.dot(data) + mu
def calc_pearson(x, y):
""""
皮尔逊相关系数定义为两个变量之间的协方差和标准差的商
标准差 = 方差的平方根
方差 = 平方的平均 - 平均的平方
"""
xstd = np.sqrt(np.mean(x ** 2) - np.mean(x) ** 2)
ystd = np.sqrt(np.mean(y ** 2) - np.mean(y) ** 2)
cov = np.cov(x, y, bias=True)[0,1]
return cov / (xstd * ystd)
def pearson(x, y, tip):
clrs = list('rgbmycrgbmycrgbmycrgbmyc')
plt.figure(figsize=(10, 8), facecolor='w')
for i, theta in enumerate(np.linspace(0, 90, 6)):
x_rotate, y_rotate = rotate(x, y, theta)
_pearson = stats.pearsonr(x, y)[0]
print '旋转角度:', theta, 'Pearson相关系数:', _pearson
str = u'相关系数:%.3f' % _pearson
plt.scatter(x_rotate, y_rotate, s=40, alpha=0.9, linewidths=0.5, c=clrs[i], marker='o', label=str)
plt.legend(loc='upper left', shadow=True)
plt.xlabel(u'X')
plt.ylabel(u'Y')
plt.title(u'Pearson相关系数与数据分布:%s' % tip, fontsize=18)
plt.grid(b=True)
plt.show()
if __name__ == "__main__":
N = 2
tip = u'一次函数关系'
x = np.random.rand(N)
y = np.zeros(N) + np.random.rand(N)*0.001
# tip = u'二次函数关系'
# x = np.random.rand(N)
# y = x ** 2
# tip = u'正切关系'
# x = np.random.rand(N) * 1.4
# y = np.tan(x)
# tip = u'二次函数关系'
# x = np.linspace(-1, 1, 101)
# y = x ** 2
# tip = u'椭圆'
# x, y = np.random.rand(2, N) * 60 - 30
# y /= 5
# idx = (x**2 / 900 + y**2 / 36 < 1)
# x = x[idx]
# y = y[idx]
pearson(x, y, tip)
|
import pandas as pd
from src.utility.file_utility import get_directory_files, create_directory, copy_file
from src.utility.system_utility import progress_bar
from src.utility.image_utility import load_image, crop_roi, save_image
from sklearn.model_selection import train_test_split
def get_labels(n_labels, as_string=True):
if as_string:
return ['0000' + str(i) if i < 10 else '000' + str(i) for i in range(n_labels)]
else:
return [int(i) for i in range(n_labels)]
def get_image_label(label_code, labels):
return [1 if label_code == i else 0 for i in labels]
def create_traing_data_table(folder_path, output_path, img_ext='ppm'):
directories = get_directory_files(folder_path)
directories.sort()
datatable = pd.DataFrame(columns=['image_path', 'label', 'roi_x1', 'roi_y1', 'roi_x2', 'roi_y2'])
total_count = 0
for label in directories:
current_directory = label
path_label_folder = folder_path + '/' + current_directory
images = [image for image in get_directory_files(path_label_folder) if img_ext in image]
images.sort()
category_df = pd.read_csv(path_label_folder + '/GT-' + current_directory + '.csv', sep=';')
count = 0
for img in images:
img_path = path_label_folder + '/' + img
category_df_row = category_df.iloc[count]
datatable.loc[total_count] = [img_path, label, category_df_row['Roi.X1'], category_df_row['Roi.Y1'],
category_df_row['Roi.X2'], category_df_row['Roi.Y2']]
count += 1
total_count += 1
progress_bar(count, len(images), 'Processing label: ' + label + ' with ' + str(len(images)) + ' images')
print()
datatable.to_csv(output_path, index=False, header=True)
def split_train_data(train_out_folder, validation_out_folder, dataset_path, validation_size=0.25, labels=43, roi_folder_suffix='_roi'):
dataframe = pd.read_csv(dataset_path)
x_train, x_valid, y_train, y_valid = train_test_split(dataframe['image_path'].values, dataframe['label'].values,
test_size=validation_size, shuffle=True)
for i in range(labels):
if i < 10:
folder = '0000' + str(i)
else:
folder = '000' + str(i)
create_directory(train_out_folder + '/' + folder)
create_directory(validation_out_folder + '/' + folder)
# Simply move images
copy_images(x_train, y_train, train_out_folder)
print()
copy_images(x_valid, y_valid, validation_out_folder)
# Save images only ROI
save_images_roi(x_train, y_train, train_out_folder + roi_folder_suffix, dataframe)
print()
save_images_roi(x_valid, y_valid, validation_out_folder + roi_folder_suffix, dataframe)
def copy_images(x, y, output_path):
for i in range(x.shape[0]):
label = y[i]
if label < 10:
folder = '0000' + str(label)
else:
folder = '000' + str(label)
file_name = x[i].split('/')[-1]
copy_file(x[i], output_path + '/' + folder + '/' + file_name)
progress_bar(i, x.shape[0], 'Copying ' + str(x.shape[0]) + ' images in: ' + output_path)
def prepare_test_data(starting_folder, output_folder, data_frame_path, sep=';', label_col='ClassId', labels=43, roi_folder_suffix='_roi'):
files = get_directory_files(starting_folder)
files.sort()
data_frame = pd.read_csv(data_frame_path, sep=sep)
for i in range(labels):
if i < 10:
folder = '0000' + str(i)
else:
folder = '000' + str(i)
create_directory(output_folder + '/' + folder)
create_directory(output_folder + roi_folder_suffix + '/' + folder)
for i in range(data_frame.shape[0]):
label = data_frame.iloc[i]
label = label[label_col]
if label < 10:
folder = '0000' + str(label)
else:
folder = '000' + str(label)
image_name = files[i]
# Simply move images
copy_file(starting_folder + '/' + image_name, output_folder + '/' + folder + '/' + image_name)
# Save images only ROI
roi = data_frame.iloc[i, 3: 7]
image = load_image(starting_folder + '/' + image_name)
roi_image = crop_roi(image, roi)
save_image(output_folder + roi_folder_suffix + '/' + folder + '/' + image_name, roi_image)
progress_bar(i, data_frame.shape[0], 'Copying ' + str(data_frame.shape[0]) + ' images in: ' + output_folder)
print()
def save_images_roi(x, y, output_path, dataframe):
for i in range(x.shape[0]):
label = y[i]
if label < 10:
folder = '0000' + str(label)
else:
folder = '000' + str(label)
create_directory(output_path + '/' + folder)
file_name = x[i].split('/')[-1]
image_row = dataframe.loc[dataframe['image_path'] == x[i]]
image = load_image(x[i])
roi_image = crop_roi(image, image_row.iloc[0, 2:].values)
save_image(output_path + '/' + folder + '/' + file_name, roi_image)
progress_bar(i, x.shape[0], 'Writing ' + str(x.shape[0]) + ' ROI images in: ' + output_path) |
import numpy as np
import cv2
"""
The height and width of the result can be specified here.
"""
HEIGHT = 100
WIDTH = 100
"""
primesfrom2to uses the sieve method to generate an array of prime numbers less than the given parameter n.
"""
def _primesfrom2to(n):
# https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188
""" Input n>=6, Returns a array of primes, 2 <= p < n """
sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)
sieve[0] = False
for i in xrange(int(n**0.5)/3+1):
if sieve[i]:
k=3*i+1|1
sieve[ ((k*k)/3) ::2*k] = False
sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False
return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]
"""
createMatrix creates a matrix containing the value 255 if the index is prime, else it contains 0.
"""
def _createMatrix(n):
primes = _primesfrom2to(n)
array = np.zeros(n)
for i in primes:
array[i] = 255
return array.reshape(HEIGHT,WIDTH)
"""
createImage writes the matrix to a PNG image.
"""
def _createImage(data):
cv2.imwrite('primesArt.png',data)
"""
This script creates a PNG image wherein every white pixel corresponds to a prime number.
"""
if __name__ == '__main__':
_createImage(_createMatrix(HEIGHT*WIDTH)) |
import networkx as nx
import random
from operator import itemgetter
from numpy import *
import sys
######################################################################################################################################################
######################################################################################################################################################
def sort_by_degree(G):
return sorted(G.degree(with_labels=True).items(),key = itemgetter(1))
######################################################################################################################################################
######################################################################################################################################################
def my_very_simple_dict_reverse_lookup(input_dictionary, input_value):
for dict_index in input_dictionary:
if input_dictionary[dict_index]==input_value:
return dict_index
else:
print 'Did not find the requested value in the dictionary'
return -1
######################################################################################################################################################
######################################################################################################################################################
def my_very_simple_tuple_intersection(tuple1,tuple2):
return tuple(set(tuple1)& set(tuple2))
######################################################################################################################################################
######################################################################################################################################################
def make_pairs(input_list):
output_list=[]
for k in range(len(input_list)):
for m in range(k+1,len(input_list)):
output_list.append((input_list[k],input_list[m]))
return output_list
######################################################################################################################################################
######################################################################################################################################################
def dimacs2nx(filename):
G = nx.Graph()
for line in open(filename).readlines():
l = line.split()
if l[0]=='p':
N = int(l[2])
for n in range(N):
G.add_node(n)
if l[0]=='e':
G.add_edge(int(l[1]),int(l[2]))
if l[0]=='c': continue
return G
######################################################################################################################################################
######################################################################################################################################################
def tree_decomposition(input_graph):
current_graph=input_graph.copy()
decomposition_tree_vertices=list()
counter=0;
decomposition_tree=nx.Graph()
tree_connectivity_dictionary=dict()
for graph_vertex in current_graph.nodes():
tree_connectivity_dictionary[graph_vertex]=[]
while current_graph.order()>0:
#print current_graph.order()
nodes_sorted_by_degree=sort_by_degree(current_graph)
#print 'nodes_sorted_by_degree', nodes_sorted_by_degree
minimum_degree_vertex=nodes_sorted_by_degree[0][0]
#print 'Minimum Degree_vertex' , minimum_degree_vertex
cliques_of_minimum_degree_vertex=nx.cliques_containing_node(current_graph,minimum_degree_vertex)
#print 'cliques_of_minimum_degree_vertex',cliques_of_minimum_degree_vertex
number_of_cliques_containing_vertex=len(cliques_of_minimum_degree_vertex)
#print 'number_of_cliques_containing_vertex', number_of_cliques_containing_vertex
minimum_degree_vertex_neighbors=current_graph.neighbors(minimum_degree_vertex)
#print 'minimum_degree_vertex_neighbors', minimum_degree_vertex_neighbors
new_tree_vertex=[minimum_degree_vertex]
#print 'new_tree_vertex First element: ',new_tree_vertex
new_tree_vertex.extend(minimum_degree_vertex_neighbors)
new_tree_vertex=tuple(new_tree_vertex)
decomposition_tree.add_node(new_tree_vertex)
#print 'decomposition_tree_vertices',decomposition_tree.nodes()
if number_of_cliques_containing_vertex>1:
#print 'Not Clique, will remove only one vertex'
pairs_of_neighbors=make_pairs(minimum_degree_vertex_neighbors)
#print 'pairs_of_neighbors',pairs_of_neighbors
for additional_edge in pairs_of_neighbors:current_graph.add_edge(additional_edge[0],additional_edge[1])
toberemoved=[minimum_degree_vertex]
#print 'toberemoved ', toberemoved
else:
toberemoved=[minimum_degree_vertex]
#print 'Clique detected, will try to remove more than one vertex'
number_of_clique_edges_per_vertex=len(minimum_degree_vertex_neighbors)
#print 'number_of_clique_edges_per_vertex',number_of_clique_edges_per_vertex
#print 'Checking all the vertex`s neighbors...'
#print 'minimum_degree_vertex_neighbors', minimum_degree_vertex_neighbors
for temp_vertex in minimum_degree_vertex_neighbors:
if current_graph.degree(temp_vertex)==number_of_clique_edges_per_vertex:
toberemoved.append(temp_vertex)
print 'Will ALSO remove vertex ', temp_vertex
for graph_vertex in new_tree_vertex:
if graph_vertex in toberemoved:
current_graph.remove_node(graph_vertex)
#print 'Removed original graph vertex', graph_vertex
tree_vertices_waiting=tree_connectivity_dictionary[graph_vertex]
#print 'For the removed node, tree_vertices_waiting: ' , tree_vertices_waiting
for tree_vertex_waiting in tree_vertices_waiting:
#print 'New Tree vertex: ' , new_tree_vertex
#print 'Tree Vertex waiting:', tree_vertex_waiting
decomposition_tree.add_edge(new_tree_vertex,tree_vertex_waiting)
#print 'Connected tree vertices', new_tree_vertex, 'and ' , tree_vertex_waiting
#print 'The tree edges are now: ', decomposition_tree.edges()
#print 'THE NUMBER OF TREE EDGES ARE NOW: ', len(decomposition_tree.edges())
for tree_vertex_waiting in tree_vertices_waiting:
common_graph_nodes_between_tree_vertices=list(my_very_simple_tuple_intersection(new_tree_vertex,tree_vertex_waiting))
for graph_vertex in common_graph_nodes_between_tree_vertices:
tree_connectivity_dictionary[graph_vertex].remove(tree_vertex_waiting)
#print 'Removed from dictionary entry', graph_vertex , 'tree node ', tree_vertex_waiting
#print 'Now the new dictionary is: ' , tree_connectivity_dictionary
else:
tree_connectivity_dictionary[graph_vertex].append(new_tree_vertex)
#print 'New tree_connectivity_dictionary node appended. New tree_connectivity_dictionary ', tree_connectivity_dictionary
#print 'tree_connectivity_dictionary: ' , tree_connectivity_dictionary
#print 'decomposition_tree.nodes: ', decomposition_tree.nodes()
#print 'decomposition_tree.edges: ', decomposition_tree.edges()
return decomposition_tree
######################################################################################################################################################
######################################################################################################################################################
def find_tree_leaves(nx_tree_input):
tree_leaves=list()
for tree_vertex in nx_tree_input.nodes():
if nx_tree_input.degree(tree_vertex)==1:tree_leaves.append(tree_vertex)
return tree_leaves
######################################################################################################################################################
######################################################################################################################################################
def find_optimal_tree_root(nx_tree_input):
tree_root=nx.center(nx_tree_input)
return tree_root[0]
######################################################################################################################################################
######################################################################################################################################################
def find_combinations_list(input_dict_of_lists):
number_of_sets=len(input_dict_of_lists)
cardinality_dict=dict()
for k in input_dict_of_lists:
cardinality_dict[k]=len(input_dict_of_lists[k])
if cardinality_dict[k]<=0:
print 'The elements of the list must be strictly positive integers. Exiting....'
return -1
print 'CARDINALITY DICT' , cardinality_dict
repetition_dict=dict()
temp_repetition=0
for m in cardinality_dict:
if temp_repetition==0:
repetition_dict[m]=1
temp_repetition=cardinality_dict[m]
#print 'repetition TEMP', temp_repetition
#print 'repetition dict', repetition_dict
else:
repetition_dict[m]=temp_repetition
temp_repetition=temp_repetition*cardinality_dict[m]
#print 'repetition TEMP', temp_repetition
#print 'repetition dict', repetition_dict
total_number_of_combinations=temp_repetition
print 'total_number_of_combinations= ', total_number_of_combinations
output_combination_list=list()
for combination_number in range(total_number_of_combinations):
current_combination_list=list()
for current_set in input_dict_of_lists:
current_combination_list.append( input_dict_of_lists[current_set][(combination_number/repetition_dict[current_set])%cardinality_dict[current_set]])
output_combination_list.append(current_combination_list)
return output_combination_list
######################################################################################################################################################
######################################################################################################################################################
def find_tree_structure(nx_tree_input):
tree_root=find_optimal_tree_root(nx_tree_input)
tree_leaves=find_tree_leaves(nx_tree_input)
tree_structure_children_to_parent=dict()
tree_structure_parent_to_children=dict()
for current_leaf in tree_leaves:
current_path=nx.shortest_path(nx_tree_input,tree_root,current_leaf)
current_path_length=len(current_path)
for m in range(1,current_path_length):
tree_structure_children_to_parent[current_path[m]]=current_path[m-1]
if current_path[m-1] not in tree_structure_parent_to_children:tree_structure_parent_to_children[current_path[m-1]]=[current_path[m]]
elif current_path[m] not in tree_structure_parent_to_children[current_path[m-1]]:tree_structure_parent_to_children[current_path[m-1]].append(current_path[m])
else: continue
return [tree_structure_children_to_parent,tree_structure_parent_to_children]
######################################################################################################################################################
######################################################################################################################################################
def Dynamic_Programming_for_decomposed_trees(input_tree,input_dictionary,interaction_dictionary): #Input dictionary= The alternative rotamers for each residue
current_tree=input_tree.copy()
master_dictionary=dict()
for dummy in current_tree.nodes():
master_dictionary[dummy]=dict()
print 'MASTER DICTIONARY = ', master_dictionary
tree_root=find_optimal_tree_root(input_tree)
print 'Tree root is ', tree_root
next_tree_leaves=find_tree_leaves(current_tree)
current_tree_leaves=find_tree_leaves(current_tree)
[tree_structure_children_to_parent,tree_structure_parent_to_children]=find_tree_structure(input_tree)
print 'tree_structure_children_to_parent ',tree_structure_children_to_parent
print 'tree_structure_parent_to_children: ',tree_structure_parent_to_children
print' ############################################################################################################################################################'
while len(current_tree_leaves)>0:
current_tree_leaves=next_tree_leaves[:]
next_tree_leaves=list()
if tree_root in current_tree_leaves: current_tree_leaves.remove(tree_root) #The root HAS to be computed after ALL the other nodes are computed
print 'REMOVED TREE ROOT'
print 'Current_tree_leaves ', current_tree_leaves
for current_node in current_tree_leaves:
print 'Current Node: ', current_node
parent_dict=dict()
children_dict=dict()
if current_node in tree_structure_parent_to_children:
for child in tree_structure_parent_to_children[current_node]:
children_dict[child]=master_dictionary[child]
parent_of_node=tree_structure_children_to_parent[current_node]
if parent_of_node not in next_tree_leaves:next_tree_leaves.append(parent_of_node)
master_dictionary[current_node]=find_optimal_combination(input_dictionary,interaction_dictionary,current_node,parent_of_node,children_dict)
#Now, once we are done with all the other nodes, we move on to the tree root
root_node=tree_root
parent_of_root=-1
children_dict=dict()
for root_child in tree_structure_parent_to_children[root_node]:
children_dict[root_child]=master_dictionary[root_child]
master_dictionary[root_node]=find_optimal_combination(input_dictionary,interaction_dictionary,root_node,parent_of_root,children_dict)
final_dictionary=master_dictionary[root_node] #This is a dictionary of the form: set:value
print 'FINAL DICTIONARY ', final_dictionary
best_combination=final_dictionary.keys()[0]
minimum_value=final_dictionary[best_combination]
return [best_combination, minimum_value]
######################################################################################################################################################
######################################################################################################################################################
def find_optimal_combination(input_dictionary,interaction_dictionary,current_node,parent_of_node,children_dict):
print 'ENTERED find_optimal_combination FUNCTION'
print 'Input dictionary: ', input_dictionary
#print 'Interaction dictionary: ', interaction_dictionary
print 'Current_node: ' , current_node
print 'Parent of node: ', parent_of_node
print 'Children_dict: ', children_dict
if parent_of_node != -1:
node_with_parent_intersection=tuple( set(current_node) & set(parent_of_node) )
print 'node_with_parent_intersection ', node_with_parent_intersection
node_not_parent_elements=tuple( set( current_node) - set(parent_of_node))
print 'node_not_parent_elements ', node_not_parent_elements
else:
node_with_parent_intersection=tuple()
print 'node_with_parent_intersection ', node_with_parent_intersection
node_not_parent_elements=current_node
print 'node_not_parent_elements ', node_not_parent_elements
if len(children_dict)>0:
leaf_indicator=0
print 'leaf_indicator ', leaf_indicator
children_of_current_node=children_dict.keys()
print 'children_of_current_node: ', children_of_current_node
else:
leaf_indicator=1
print 'leaf_indicator ', leaf_indicator
iterator_dictionary=dict()
variable_dictionary=dict()
output_dictionary=dict()
if parent_of_node != -1:
for iterator in node_with_parent_intersection:
iterator_dictionary[iterator]=input_dictionary[iterator]
print 'ITERATOR DICT : ', iterator_dictionary
for variable in node_not_parent_elements:
variable_dictionary[variable]=input_dictionary[variable]
print 'VARIABLE DICT : ', variable_dictionary
all_iterator_combinations=find_combinations_list(iterator_dictionary)
all_variable_combinations=find_combinations_list(variable_dictionary)
print 'ITERATOR COMBINATIONS : ', all_iterator_combinations
print 'all_variable_combinations : ', all_variable_combinations
if len(all_iterator_combinations)>0:
for current_iterator_combination in all_iterator_combinations:
optimal_variable_combination=list()
smallest_value=sys.maxint
for current_variable_combination in all_variable_combinations:
current_node_interactions_value=find_total_combination_value(interaction_dictionary,current_iterator_combination,current_variable_combination)
print 'node_interactions_value =' ,current_node_interactions_value
integrated_value=current_node_interactions_value
if leaf_indicator==0:
print 'THIS IS NOT A LEAF, SO IT HAS CHILDREN.....'
for current_child in children_of_current_node:
print 'current_child', current_child
provided_set=(set(current_iterator_combination) | set(current_variable_combination) )
print 'provided_set', provided_set
integrated_set=provided_set
print 'Integrated set: ', integrated_set
for dummytuple in children_dict[current_child]:
dummyset=set(dummytuple)
if dummyset.issubset(provided_set):
print 'THIS dummyset IS SUBSET : ', dummyset
integrated_set= ( set(children_dict[current_child][dummytuple][0]) | integrated_set)
print 'integrated_set : ', integrated_set
integrated_value+=children_dict[current_child][dummytuple][1]
print 'integrated_value', integrated_value
else:
print 'THIS IS A LEAF, NO CHILDREN, NO RECURSIVE FUNCTIONS'
provided_set=(set(current_iterator_combination) | set(current_variable_combination) )
integrated_set=provided_set
if integrated_value < smallest_value:
print 'SMALLEST VALUE HAS TO BE UPDATED'
smallest_value=integrated_value
print 'smallest_value= ', smallest_value
optimal_integrated_combination=integrated_set
print 'optimal_integrated_combination : ', optimal_integrated_combination
output_dictionary[tuple(current_iterator_combination)]=[tuple(optimal_integrated_combination), smallest_value]
print 'output_dictionary', output_dictionary
else:
print 'BBBBBBBBBBBBBBBBBBB'
current_iterator_combination=[]
optimal_variable_combination=list()
smallest_value=sys.maxint
for current_variable_combination in all_variable_combinations:
current_node_interactions_value=find_total_combination_value(interaction_dictionary,current_iterator_combination,current_variable_combination)
print 'node_interactions_value =' ,current_node_interactions_value
integrated_value=current_node_interactions_value
provided_set=set(current_variable_combination)
print 'provided_set', provided_set
integrated_set=provided_set
print 'Integrated set: ', integrated_set
if leaf_indicator==0:
print 'THIS IS NOT A LEAF, SO IT HAS CHILDREN.....'
for current_child in children_of_current_node:
print 'current_child', current_child
for dummytuple in children_dict[current_child]:
dummyset=set(dummytuple)
#print 'dummyset: ', dummyset
if dummyset.issubset(provided_set):
print 'THIS dummyset IS SUBSET : ', dummyset
print 'THE NEW INTEGRATED SET WILL BE THE UNION OF THE FOLLOWING SETS:', set(children_dict[current_child][dummytuple][0]), integrated_set
integrated_set= ( set(children_dict[current_child][dummytuple][0]) | integrated_set)
print 'integrated_set : ', integrated_set
integrated_value+=children_dict[current_child][dummytuple][1]
print 'integrated_value', integrated_value
else:
print 'THIS IS A LEAF, NO CHILDREN, NO RECURSIVE FUNCTIONS'
provided_set=(set(current_iterator_combination) | set(current_variable_combination) )
integrated_set=provided_set
if integrated_value < smallest_value:
print 'SMALLEST VALUE HAS TO BE UPDATED'
smallest_value=integrated_value
print 'smallest_value= ', smallest_value
optimal_integrated_combination=integrated_set
print 'optimal_integrated_combination : ', optimal_integrated_combination
output_dictionary[tuple(optimal_integrated_combination)]=smallest_value
print 'output_dictionary', output_dictionary
return output_dictionary
######################################################################################################################################################
######################################################################################################################################################
def find_total_combination_value(interaction_dictionary,list1, list2):
#Check for common elements in the list
if len( set(list1) & set(list2))>0:
print 'There are common elements in the two lists... This is not permitted. Returning -1'
return -1
total_list=list1[:]
total_list.extend(list2)
number_of_elements=len(total_list)
output=0
for k in range(number_of_elements):
for m in range(k,number_of_elements):
if tuple([total_list[k],total_list[m]]) in interaction_dictionary:
output+=interaction_dictionary[tuple([total_list[k],total_list[m]])];
#print 'k,m ';print k;print m ;
#print 'VALUE ADDED: ', interaction_dictionary[tuple([total_list[k],total_list[m]])];
#print 'OUTPUT: ', output
return output
######################################################################################################################################################
######################################################################################################################################################
test_tree=nx.Graph()
node0=tuple(['a','b'])
node1=tuple(['a','c'])
node2=tuple(['b','f'])
test_tree.add_node(node0)
test_tree.add_node(node1)
test_tree.add_node(node2)
test_tree.add_edge(node0,node1)
test_tree.add_edge(node0,node2)
dict1={'g':[37,41], 'a':[3,5], 'b':[7,11], 'c':[13,17],'d':[19,23], 'f':[29,31] }
dict2=dict()
for k in range(45):
for m in range(45):
dict2[tuple([k,m])]=abs(k-m)
print test_tree.nodes()
print test_tree.edges()
[X, Y]=Dynamic_Programming_for_decomposed_trees(test_tree,dict1,dict2)
print X
print Y |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""--- Day 6: Probably a Fire Hazard ---
Because your neighbors keep defeating you in the holiday house decorating
contest year after year, you've decided to deploy one million lights in a
1000x1000 grid.
Furthermore, because you've been especially nice this year, Santa has mailed
you instructions on how to display the ideal lighting configuration.
Lights in your grid are numbered from 0 to 999 in each direction; the lights at
each corner are at 0,0, 0,999, 999,999, and 999,0. The instructions include
whether to turn on, turn off, or toggle various inclusive ranges given as
coordinate pairs. Each coordinate pair represents opposite corners of a
rectangle, inclusive; a coordinate pair like 0,0 through 2,2 therefore refers
to 9 lights in a 3x3 square. The lights all start turned off.
To defeat your neighbors this year, all you have to do is set up your lights by
doing the instructions Santa sent you in order.
For example:
- turn on 0,0 through 999,999 would turn on (or leave on) every light.
- toggle 0,0 through 999,0 would toggle the first line of 1000 lights, turning
off the ones that were on, and turning on the ones that were off.
- turn off 499,499 through 500,500 would turn off (or leave off) the middle
four lights.
After following the instructions, how many lights are lit?
--- Part Two ---
You just finish implementing your winning light pattern when you realize you
mistranslated Santa's message from Ancient Nordic Elvish.
The light grid you bought actually has individual brightness controls; each
light can have a brightness of zero or more. The lights all start at zero.
The phrase turn on actually means that you should increase the brightness of
those lights by 1.
The phrase turn off actually means that you should decrease the brightness of
those lights by 1, to a minimum of zero.
The phrase toggle actually means that you should increase the brightness of
those lights by 2.
What is the total brightness of all lights combined after following Santa's
instructions?
For example:
- turn on 0,0 through 0,0 would increase the total brightness by 1.
- toggle 0,0 through 999,999 would increase the total brightness by 2000000.
"""
from collections import Counter
import re
import sys
import click
ON = 1
OFF = -1
def count_lit_lights(lights):
return sum([Counter(row).get(1, 0) for row in lights])
def count_brightness(lights):
return sum(light for row in lights for light in row)
def empty_grid(x=1000, y=1000, new_commands=False):
OFF = 0 if new_commands else -1
return [[OFF] * x for _ in range(y)] if not (x == 0 or y == 0) else [[]]
def modify_light(light, new_commands=False, modification="toggle"):
return {'turn on': 1 if not new_commands else light + 1,
'turn off': -1 if not new_commands else (light-1) if light >= 1 else 0,
'toggle': light * -1 if not new_commands else light + 2,
}.get(modification)
def parse_instruction(text):
p = re.compile(r"""(?P<mod>(turn\s(on|off))|(toggle)) # modification
\s(?P<x0>\d*),(?P<y0>\d*) # x0,y0
\sthrough
\s(?P<x1>\d*),(?P<y1>\d*) # x1,y1
""", re.VERBOSE)
m = re.search(p, text)
return (m.group('mod'),
(int(m.group('x0')), int(m.group('y0'))),
(int(m.group('x1')), int(m.group('y1'))))
def follow_instruction(instruction, lights, new_commands=False):
x_coords = range(instruction[1][0], instruction[2][0]+1)
y_coords = range(instruction[1][1], instruction[2][1]+1)
for x in x_coords:
for y in y_coords:
lights[y][x] = modify_light(
lights[y][x], new_commands, modification=instruction[0])
return lights
def calculate_solution_1(text):
lights = empty_grid()
for line in text.split('\n'):
follow_instruction(parse_instruction(line), lights)
return count_lit_lights(lights)
def calculate_solution_2(text):
lights = empty_grid(new_commands=True)
for line in text.split('\n'):
follow_instruction(parse_instruction(line), lights, new_commands=True)
return count_brightness(lights)
@click.command()
@click.option('--source_file', default='data/06.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 6."""
data = ''
with open(source_file) as source:
data = source.read()
print('Number of lights switched on is: {}'.format(
calculate_solution_1(data)))
print('Brightness of lights is: {}'.format(
calculate_solution_2(data)))
if __name__ == "__main__":
sys.exit(main())
|
# def getNombre(nombre):
# texto=f"el nombre es: {nombre}"
# rmbda
# eturn texto
# def getApellidos(apellidos):
# texto=f"Los apellidos son: {apellidos}"
# return texto
# print(getNombre("jiren"),getApellidos("suarez"))
# def devuelveTodo(nombre, apellidos):
# texto=f"{getNombre(nombre)} \n{getApellidos(apellidos)}"
# return texto
# print(devuelveTodo("kakaroto","suarez"))
# funcion lambda //funcion anonima que no requiere renombrarla como def
dime_el_ano=lambda year:f"el año es: {year*50}"
print(dime_el_ano(2021)) |
import sys
class Node:
"""
Binary tree node with three instance attributes
data: holds key/value inserted in node
left: reference to left child node
right: reference to right child node
"""
def __init__(self, key):
self.data = key
self.left = None
self.right = None
class BSTTree:
"""
search, insert, delete operations in BST are O(log(h)), h-height of tree -->in avg case
if BST is either left or right skewed then these operations takes O(n) time
height of single node is zero
height of empty tree is -1
"""
def __init__(self):
self.root = None
def insert(self, root, key):
"""
Idea is to insert key in BST is to follow BST property while inserting
In recursive approach:
insert(root, key):
check if root is None
then insert new node and assign its ref to this root
and return this root to caller
root = get_new_node(key)
return root
check if key is less than or equal to current root data:
then recursively call on left subtree
root.left = insert(root.left, key)
else:
call then recursively call on right subtree
root.right = insert(root.right, key)
finally return root
return root
:param root: root of BST
:param key: key/value to be inserted in BST
:return: root
"""
if root is None:
root = BSTTree.get_new_node(key)
return root
if key <= root.data:
root.left = self.insert(root.left, key)
else:
root.right = self.insert(root.right, key)
return root
def insert_iterative(self, key):
"""
TODO: check how to implement it
:param key:
:return:
"""
if self.root is None:
self.root = BSTTree.get_new_node(key)
return
curr = self.root
if key <= self.root.data:
if self.root.left is None:
self.root.left = BSTTree.get_new_node(key)
return
else:
if self.root.right is None:
self.root.right = BSTTree.get_new_node(key)
return
if key > curr.data:
while curr.right:
curr = curr.right
curr.right = BSTTree.get_new_node(key)
else:
while curr.left:
curr = curr.left
curr.left = BSTTree.get_new_node(key)
@staticmethod
def get_new_node(key):
return Node(key)
def inorder(self, root):
if root is None:
return
self.inorder(root.left)
print(root.data, end=' ')
self.inorder(root.right)
def preorder(self, root):
if root is None:
return
print(root.data, end=' ')
self.preorder(root.left)
self.preorder(root.right)
def iterative_pre(self):
"""
Idea is to use stack while traversing in pre-order
curr = self.root
keep traversing left subtree
while curr:
print(curr.data)
if it has right node
push it to stack
curr = curr.left
if curr is None and stack:
curr = stack.pop()
:return:
"""
if self.root is None:
return
curr = self.root
stack = []
while curr:
print(curr.data, end=' ')
if curr.right:
stack.append(curr.right)
curr = curr.left
if curr is None and stack:
curr = stack.pop()
def postorder(self, root):
if root is None:
return
self.postorder(root.left)
self.postorder(root.right)
print(root.data, end=' ')
def post_order_iterative(self):
from collections import defaultdict
if self.root is None:
return
node_to_its_occurrence_map = dict()
stack = list()
stack.append(self.root)
while stack:
temp = stack[-1]
if node_to_its_occurrence_map.get(temp, 0) == 0:
if temp.left:
node_to_its_occurrence_map[temp] = 0
stack.append(temp.left)
elif node_to_its_occurrence_map.get(temp) == 1:
if temp.right:
stack.append(temp.right)
elif node_to_its_occurrence_map.get(temp) == 2:
print(temp.data, end=' ')
else:
stack.pop()
node_to_its_occurrence_map[temp] = node_to_its_occurrence_map.get(temp, 0) + 1
def delete(self, root, key):
if root is None:
return
if root.data > key:
root.left = self.delete(root.left, key)
elif root.data < key:
root.right = self.delete(root.right, key)
else:
# found case
if root.left is None and root.right is None:
# case 1: node is leaf node
del root
root = None
elif root.left is None:
# case 2: node have a child
temp = root
root = root.right
del temp
elif root.right is None:
# case 2: node have a child
temp = root
root = root.left
del temp
else:
# case 3: node have two children
new_root_data = self.find_min(root.right)
root.data = new_root_data
root.right = self.delete(root.right, new_root_data)
return root
def find_min(self, root):
if root and root.left is None:
return root.data
if root is None:
return -1
data = self.find_min(root.left)
return data
def find_max(self, root):
if root and root.right is None:
return root.data
if root is None:
return -1
data = self.find_max(root.right)
return data
def search(self, root, key):
if root is None:
return False
elif root.data == key:
return True
elif root.data < key:
return self.search(root.right, key)
else:
return self.search(root.left, key)
def search_iterative(self, key):
if self.root is None:
return False
curr = self.root
while curr:
if curr.data == key:
return True
if curr.data < key:
curr = curr.right
else:
curr = curr.left
return False
def find_height_of_tree(self, root):
"""
Idea is to use recursion and get max of height of left-subtree and right-subtree
and add 1 to it
If root is None it return -1 ....assuming height of empty tree as -1
:param root:
:return:
"""
if root is None:
return -1
return max(self.find_height_of_tree(root.left), self.find_height_of_tree(root.right)) + 1
def level_order_traversal(self):
print('level order traversal using queue')
if self.is_empty():
print('BST is empty')
return
queue = list()
queue.append(self.root)
while queue:
curr = queue.pop(0)
print(curr.data, end=' ')
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
print('')
def level_order_traversal_line_by_line(self, left_view_only=False):
"""
Idea is to use for level order traversal using queue
To print level line by line
size = use queue size # this line is IMP, it would be wrong to iterate over direct queue and not its size
iterate over queue size
curr = dequeue
print curr.data
insert curr.left and curr.right node to queue if exists
:param left_view_only:
:return:
"""
print('level_order_traversal_line_by_line using queue')
if self.is_empty():
print('BST is empty')
return
queue = list()
queue.append(self.root)
while queue:
size = len(queue)
for i in range(size):
curr = queue.pop(0)
if left_view_only is True:
# print only first(left) node in each level
if i == 0:
print(curr.data, end=' ')
else:
# print all nodes in level in a line
print(curr.data, end=' ')
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
print('')
print('')
def spiral_level_order_traversal(self):
"""
Idea is to use line by line level order traversal using queue and also use stack
while queue is not empty
iterate over curr size of queue
If level is even then push left and then right nodes to stack
and if level is odd then push right and and then left nodes to stack
while stack is not empty
pop and enqueue to queue
:return:
"""
print('spiral_level_order_traversal')
# TODO: please study and come back to me
if self.is_empty():
return
queue = list()
stack = list()
queue.append(self.root)
level = 0
while queue:
size = len(queue)
for i in range(size):
curr = queue.pop(0)
print(curr.data, end=' ')
if level % 2 == 1:
if curr.right:
stack.append(curr.right)
if curr.left:
stack.append(curr.left)
else:
if curr.left:
stack.append(curr.left)
if curr.right:
stack.append(curr.right)
while stack:
queue.append(stack.pop())
level += 1
print('')
def right_view(self):
"""
Idea is to use same concept for level order traversal using queue
To print right view of tree
size = use queue size # this line is IMP, it would be wrong to iterate over direct queue and not its size
iterate over queue size
curr = dequeue
if i== size -1 # this is IMP, to consider last node of each level
print curr.data
insert curr.left and curr.right node to queue if exists
:return:
:return:
"""
print('right_view using queue')
if self.is_empty():
print('BST is empty')
return
queue = list()
queue.append(self.root)
while queue:
size = len(queue)
for i in range(size):
curr = queue.pop(0)
if i == size - 1:
# print only right node at each level
print(curr.data, end=' ')
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
print('')
print('')
def bottom_view(self, root):
"""
If we look at tree from bottom,
it is obvious that you would be able to see only leaf nodes of that tree
Basically idea is to use recursive approach to find all leaf nodes and print them
:param root:
:return:
"""
if root is None:
# if root is None return to immediate it latest caller
return
if not root.left and not root.right:
# leaf node found, so print it
print(root.data, end=' ')
self.bottom_view(root.left)
self.bottom_view(root.right)
def is_empty(self):
return self.root is None
def get_size(self, root):
"""
TC- O(n) --simply traversal of all nodes in tree
SC- O(h)--proportional to height-->no. active fun calls in stack <= height of tree at any moment
:param root:
:return: int size of tree(total no. of nodes in tree)
"""
if root is None:
return 0
else:
return 1 + self.get_size(root.left) + self.get_size(root.right)
def get_size_using_queue(self):
"""
Idea is to use level order traversal using queue
Keep counter to count no. of nodes until queue is not empty
:return:
"""
if self.root is None:
return 0
queue = list()
queue.append(self.root)
counter = 0
while queue:
counter += 1
curr = queue.pop(0)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
return counter
def get_max_in_binary_tree(self, root):
"""
Idea is to using recursion find max from root, left-subtree and right-subtree
This solution is for binary tree.....(for BST there is better solution)
TC- O(n)
SC- O(h)---at most h+1 fun calls in stack-----using level order traversal --O(w)--width of tree
:param root:
:return: max value from tree
"""
if root is None:
return -sys.maxsize
else:
return max(root.key, max(self.get_max_in_binary_tree(root.lef), self.get_max_in_binary_tree(root.right)))
def print_k_dist_nodes(self, root, k):
"""
Idea is to use recursion and find kth level nodes in left and right subtrees
Pass k as argument to fun call
if root is empty simply return
if k == 0 then that is kth level node root
print root.data
else:
recursively call left and right subtree with k-1
:param root:
:param k:
:return:
"""
if root is None:
return
if k == 0: # this should be after only above base case check else none.data would be tried
print(root.data, end=' ')
else:
self.print_k_dist_nodes(root.left, k - 1)
self.print_k_dist_nodes(root.right, k - 1)
def is_balanced_tree(self, root):
"""
TC--->O(n)/theta(n)
SC-->O(h)--->height of tree
Idea is to find height of each node recursively in left and right subtree
And check if there height diff is not more than 1
When height is obtained of left and right subtree of each node
it will decide is balanced or not by comparing height diff
:param root:
:return:
"""
if root is None:
return 0
left_height = self.is_balanced_tree(root.left)
if left_height == -1:
return -1
right_height = self.is_balanced_tree(root.right)
if right_height == -1:
return -1
if abs(left_height - right_height) > 1:
return -1
else:
return max(left_height, right_height) + 1
def get_max_width_of_tree(self):
"""
TC-->theta(n)-->just traversing all nodes and doing const. operation using queue
SC-->O(n)-->theta(w)---max. width would be in a queue at any time
Idea is to use level order traversal line by line using queue
max width would be the max. no. of nodes in any level in the tree
:return: int --> max_width in tree
"""
if self.root is None:
return 0
queue = list()
queue.append(self.root)
max_width = 0
while queue:
size = len(queue)
max_width = max(max_width, size)
for _ in range(size):
curr = queue.pop(0)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
return max_width
def bin_tree_to_double_linked_list(self, root):
pass
def spiral_level_order_traversal_using_queue_and_stack(self):
"""
Use line by line level order traversal using queue
reverse = False
while queue:
size = len(queue)
for _ in range(size):
if level has to printed reverse
then push that curr node data to stack
else
print(curr.data, end=' ')
push left and right
while stack:
print(stack.pop(), end=' ')
reverse = not reverse
print('') # new line for line by line spiral traversal
:return:
"""
print('spiral_level_order_traversal_using_queue_and_stack')
if self.is_empty():
return
queue = list()
stack = list()
queue.append(self.root)
reverse = False
while queue:
size = len(queue)
for i in range(size):
curr = queue.pop(0)
if reverse:
stack.append(curr.data)
else:
print(curr.data, end=' ')
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
if reverse:
while stack:
print(stack.pop(), end=' ')
reverse = not reverse
print('')
def spiral_improved(self):
"""
Idea is to use two stacks
First stack to print a level in left to right order
second stack to print a level in right to left order
:return:
"""
if self.root is None:
return
stack1 = list()
stack2 = list()
stack1.append(self.root)
while stack1 or stack2:
while stack1:
"""
push in stack2 left and right order
"""
curr = stack1.pop()
print(curr.data, end=' ')
if curr.left:
stack2.append(curr.left)
if curr.right:
stack2.append(curr.right)
print('')
while stack2:
"""
reverse order push
push in stack1 right and left order
"""
curr = stack2.pop()
print(curr.data, end=' ')
if curr.right:
stack1.append(curr.right)
if curr.left:
stack1.append(curr.left)
print('')
if __name__ == '__main__':
bst = BSTTree()
# input_array = [15, 10, 20, 8, 12, 17, 25]
# input_array = [1, 2, 3, 4, 5, 6]
# input_array = [10, 5, 20, 3, 7]
# input_array = [100, 80, 300, 10, 90, 200, 700, 8, 9, 150]
input_array = [150, 50, 200, 30, 90, 180, 300, 20, 40, 70, 100, 250, 500, 10]
for val in input_array:
bst.root = bst.insert(bst.root, val)
print('inorder')
bst.inorder(bst.root)
print('')
# print('preorder')
# bst.preorder(bst.root)
# print('')
print('postorder')
bst.postorder(bst.root)
print('')
bst.post_order_iterative()
# print("MIN:{}".format(bst.find_min(bst.root)))
# print("max:{}".format(bst.find_max(bst.root)))
# print("search:{}".format(bst.search(bst.root, -1)))
# print("search_itrative:{}".format(bst.search_iterative(-1)))
# print(bst.find_height_of_tree(bst.root))
# print('iterative pre')
# bst.iterative_pre()
# print('')
# bst.root = bst.delete(bst.root, 12)
# print('Inorder')
# bst.inorder(bst.root)
# print('')
# bst.level_order_traversal()
# bst.level_order_traversal_line_by_line()
# bst.level_order_traversal_line_by_line(left_view_only=True)
# bst.right_view()
# print('bottom view using recursion')
# bst.bottom_view(bst.root)
# print('')
# # bst.spiral_level_order_traversal(bst.root)
# print(bst.get_size(bst.root))
# print(bst.get_size_using_queue())
# print('k dist nodes')
# bst.print_k_dist_nodes(bst.root, 2)
# print('')
# bst.spiral_level_order_traversal()
# bst.spiral_level_order_traversal_using_queue_and_stack()
|
def fibonacci_rec(n):
if n-int(n)!=0:
return 'None'
elif n<0:
return 'None'
elif n==0:
return 0
elif n==1:
return 1
else:
return int(fibonacci_rec(n-1)+fibonacci_rec(n-2))
def fibonacci(n):
m=0
while m<=n:
print fibonacci_rec(m)
m+=1
|
#Program to define a class Employee, to read and display an Employee record
class Employee:
def __init__(self,Eno=0,Ename='',Basic=0):
self.Empno=Eno
self.Empname=Ename
self.Basic=Basic
self.calcSalary()
def readEmployee(self):
self.Empno=int(input("Enter Employee Number:"))
self.Empname=input("Enter Name:")
self.Basic=int(input("Enter Basic Salary:"))
self.calcSalary()
def readEmployee(self):
self.Empno=int(input("Enter Employee Number:"))
self.Empname=input("Enter Name:")
self.Basic=int(input("Enter Basic Salary:"))
self.calcSalary()
def calcSalary(self):
self.hra=self.Basic*0.10
self.da=self.Basic*0.73
self.gross=self.Basic+self.hra+self.da
self.tax=0.3*self.gross
self.net=self.gross-self.tax
def displayEmployee(self):
print("Empno:"+str(self.Empno),"Name:"+str(self.Empname),"Basic Salary:"+str(self.Basic),"HRA:"+str(self.hra),"DA:"+str(self.da),"Gross Salary:"+str(self.gross),"Income Tax:"+str(self.tax),"Net Salary:"+str(self.net),sep='\n')
e1=Employee(101,"Anil",10000)
e1.displayEmployee()
e2=Employee()
e2.readEmployee()
e2.displayEmployee()
|
from connection import *
import csv
from outage import *
from time import sleep
import time_helper as timeHelper
def outageType(LANconnected, WANconnected):
return 'ISP' if LANconnected else 'LAN'
def buildOutage(outageType):
return Outage(outageType)
def closeCurrentOutage(outage):
if outage != None:
finalizeOutage(outage)
return None
def finalizeOutage(outage):
outage.finalize()
if outage.totalTime > 1:
logEvent(outage)
def logEvent(outage):
data = buildLogData(outage)
csv = open('outage_log.csv', 'a')
csv.write(','.join(data))
csv.write("\n")
csv.close()
def buildLogData(outage):
return [
outage.serviceType,
timeHelper.csvTime(outage.startTime),
timeHelper.csvTime(outage.endTime),
str(outage.totalTime)
]
outage = None
WAN = Connection('8.8.8.8', 53)
LAN = Connection('192.168.1.1', 53)
print 'Connection monitoring service started'
while True:
try:
if LAN.isActive() and WAN.isActive():
outage = closeCurrentOutage(outage)
else:
failurePoint = outageType(LAN.isActive(), WAN.isActive())
outage = outage or buildOutage(failurePoint)
sleep(1)
except KeyboardInterrupt:
print 'Connection monitoring service stopped'
exit(0)
|
from contextlib import closing
from multiprocessing import Process
import asyncio
import time
from aiopipe import aiopipe
import uvloop
async def main(loop):
rx, tx = aiopipe()
ry, ty = aiopipe()
with tx.send() as tx:
proc = Process(target=child, args=(tx, ry))
proc.start()
rx = await rx.open(loop)
n = time.time()
with ty.send() as ty:
ty = await ty.open()
for msg in [*('why benz why porsche why lens' for i in range(100000)), 'bye']:
ty.write('{}\n'.format(msg).encode())
msg = await rx.readline()
msg = msg[:-1].decode()
# print('main', msg)
print(time.time() - n)
proc.join()
def child(tx, ry):
loop = asyncio.new_event_loop()
tx = loop.run_until_complete(tx.open(loop))
async def handler():
r = await ry.open(loop)
with closing(tx):
msg = 'hi'
while msg != 'bye':
msg = await r.readline()
msg = msg[:-1].decode()
# print('child:', msg)
tx.write('{}\n'.format(msg).encode())
loop.run_until_complete(handler())
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
main_loop = asyncio.get_event_loop()
main_loop.run_until_complete(main(main_loop))
|
"""
Author: Weixing Zhang
Date: 2018
Name: Spatial join filter
"""
# modules
import os.path
import timeit
import functions as fn
# file path
root = r'C:\Users\downloads'
polygon_A_path = os.path.join(root, '2017_CT_Mansfield_TestPolygon_1.shp')
polygon_B_path = os.path.join(root, '2017_CT_Mansfield_TestPolygon_2.shp')
# __________________read data__________________
shp_A, shp_A_array = fn.read_shp(polygon_A_path)
shp_B, shp_B_array = fn.read_shp(polygon_B_path)
# __________________Sort MBR Filter__________________
filter_index_xy = fn.GPU_serial_sorted_MBR_filter(shp_A_array,shp_B_array)
# __________________Common MBR Filter__________________
W_list, I_list, plyn_A_poten_edges, plyn_B_poten_edges = fn.GPU_common_MBR_filter(shp_A_array,shp_B_array,
shp_A,shp_B,filter_index_xy)
# __________________W LIST_______________________
# (1) Refinement for W_list
W_intersect_result, Need_intersect_test = fn.GPU_PnPtest(shp_A_array,shp_B_array,
shp_A,shp_B,W_list)
# (2) W edges leftover for intersect test
W_Need_intersect_reult = fn.GPU_W_list_EI_test(shp_A,shp_B,Need_intersect_test)
# __________________I LIST_______________________
I_intersect_reult = fn.GPU_I_list_EI_test(shp_A,shp_B,I_list,plyn_A_poten_edges,plyn_B_poten_edges)
# __________________RESULT__________________
print "FINAL RESULT:"
print W_intersect_result+W_Need_intersect_reult+I_intersect_reult
|
from zstacklib.utils import shell
from distutils.version import LooseVersion
import json
__QEMU_IMG_VERSION = None
class CheckResult(object):
def __init__(self, offset, t_clusters, check_erorrs, a_clusters, filename, format):
self.image_end_offset = offset
self.total_clusters = t_clusters
self.check_errors = check_erorrs
self.allocated_clusters = a_clusters
self.filename = filename
self.format = format
def subcmd(subcmd):
global __QEMU_IMG_VERSION
if __QEMU_IMG_VERSION is None:
command = "qemu-img --version | grep 'qemu-img version' | cut -d ' ' -f 3 | cut -d '(' -f 1"
__QEMU_IMG_VERSION = shell.call(command).strip('\t\r\n ,')
options = ''
if LooseVersion(__QEMU_IMG_VERSION) >= LooseVersion('2.10.0'):
if subcmd in ['info', 'check', 'compare', 'convert', 'rebase']:
options += ' --force-share '
return 'qemu-img %s %s ' % (subcmd, options)
def get_check_result(path):
check_cmd = "%s --out json %s" % (subcmd('check'), path)
result = json.loads(shell.call(check_cmd))
return CheckResult(result.get("image-end-offset"), result.get("total-clusters"),
result.get("check-errors"), result.get("allocated-clusters"),
result.get("filename"), result.get("format"))
|
import gc
import os
import time
import win32api, win32con, win32gui
import sys
import win32com.client
class DesktopWindow(object):
def __init__(self, *args, **kwargs):
self.window_id = win32gui.GetDesktopWindow()
self.window_dc = win32gui.GetWindowDC(self.window_id)
pass
def get_pixel_color(self, i_x, i_y):
long_colour = win32gui.GetPixel(self.window_dc, i_x, i_y)
i_colour = int(long_colour)
return (i_colour & 0xff, (i_colour >> 8) & 0xff,
(i_colour >> 16) & 0xff) |
"""Packaging tool for the Yeti python bindings and CLI utility."""
from setuptools import setup
from setuptools import find_packages
"""Packaging tool for the Yeti python bindings and CLI utility."""
from setuptools import setup
from setuptools import find_packages
"""Returns contents of README.md."""
with open("README.md", "r", encoding="utf-8") as readme_fp:
long_description = readme_fp.read()
setup(name='pyeti-python3',
version="1.0",
description='Revival version of pyeti, the API for Yeti Threat Intel Platform.',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
],
keywords='yeti threat intel api',
url='https://github.com/yeti-platform/pyeti',
author='Yeti core developers | packaged by Thomas Roccia @fr0gger_',
license='Apache',
packages=find_packages(),
install_requires=[
'requests',
'tqdm',
],
test_suite='nose.collector',
tests_require=[
'nose',
'nose-cover3'
],
entry_points={
'console_scripts': ['yeticli=pyeti.scripts.cli:main'],
},
include_package_data=True,
python_requires='>=3.6',
zip_safe=False)
|
''' Basic table-printing function
Given a list of the headers and a list of lists of row values,
it will print a human-readable table.
Toggle extra_padding to true to add an extra row of the spacer
character between table entries in order to improve readability
'''
def print_table(headers,rows,extra_padding=False,spacer=' '):
lengths = []
for i in range(len(headers)):
h = headers[i]
longest = 0
if len(str(h)) > longest:
longest = len(str(h))
for r in rows:
if len(str(r[i])) > longest:
longest = len(str(r[i]))
lengths.append(longest)
#Make the template for each row in the table
template = ' {{: <{}}} |'*len(headers)
template = template.format(*lengths)
#Format the template for the header
heading = template.format(*headers)
#Print out the header
print heading
#Print a spacer row between the header and the data
print '-'*len(heading)
#Print out the rows
for row in rows:
print template.format(*row) #do the printing
if extra_padding: #if extra padding is desired
print spacer*len(heading) #print out a row of the spacer character
return template
|
from blocking.block import Block
from utils import functions as F
def extract_blocks(input_file, k_base):
r_input = [r for r in F.read_file(input_file)]
blocks = []
for raw_terms in r_input:
blocks.append(build_blocks(raw_terms, k_base))
return blocks
def build_blocks(record, k_base):
segments = record.split(",")
blocks_list = []
for b in segments:
blocks_list.append(Block(F.normalize_str(b), b))
return blocks_list
#
# def extract_blocks(input_file, k_base):
# print("Extracting blocks...")
# r_input = [r for r in F.read_file(input_file)]
# normalized_input = [F.normalize_str(v) for v in r_input]
# blocks = []
# for raw_terms, record in zip(r_input, normalized_input):
# blocks.append(build_blocks(record.split(), raw_terms.split(), k_base))
# return blocks
#
#
# def build_blocks(terms, raw_terms, k_base):
# '''Build a set of blocks for a string'''
# blocks_list = []
# blocks_list.append(Block(terms[0], raw_terms[0]))
# i = 0
# j = 1
# while j < len(terms):
# co_occur = False
# for entry in k_base.registers:
# if terms[j-1]+' '+terms[j] in entry:
# co_occur = True
# break
# if co_occur == False:
# blocks_list.append(Block('', ''))
# i += 1
# if blocks_list[i].value in '':
# blocks_list[i].value += terms[j]
# blocks_list[i].raw_value += raw_terms[j]
# else:
# blocks_list[i].value += ' ' + terms[j]
# blocks_list[i].raw_value += ' ' + raw_terms[j]
# j += 1
# return blocks_list
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from reclab.model_selection import RandomizedRecommenderSearchCV, \
RecommenderGridSearchCV, train_test_split, KFold
from reclab.model_selection._search import _CVWrapper
from reclab.collab import AlternatingLeastSquares, \
NMSAlternatingLeastSquares
from reclab.datasets import load_lastfm
from reclab._config import RECLAB_CACHE, set_blas_singlethread
from sklearn.externals import joblib
from scipy.stats import randint, uniform
import os
import shutil
import warnings
# set this to avoid the MKL BLAS warning
# set_blas_singlethread()
lastfm = load_lastfm(cache=True, as_sparse=True)
train, test = train_test_split(lastfm.ratings, random_state=42)
class TestRandomizedSearch:
def _search_fit_assert(self, search, val=None):
# Fit it
search.fit(train, validation_set=val)
# Show we can score it
search.score(test)
# Produce recommendations
recs, scores = search.recommend_for_user(0, test, n=5,
return_scores=True)
assert len(recs) == len(scores) == 5, (recs, scores)
# Serialize it and show we can load and produce recommendations still
pkl_loc = "search.pkl"
try:
joblib.dump(search, pkl_loc, compress=3)
joblib.load(pkl_loc).recommend_for_user(
0, test, n=5, return_scores=True)
finally:
os.unlink(pkl_loc)
if os.path.exists(RECLAB_CACHE):
shutil.rmtree(RECLAB_CACHE)
def test_grid_cv_fit_recommend(self):
# Create the estimator
clf = NMSAlternatingLeastSquares(random_state=42, use_cg=True,
iterations=5, factors=15)
# These are the hyper parameters we'll use. Don't use many for
# the grid search since it will fit every combination...
hyper = {
'factors': [5, 6]
}
# Make our cv
cv = KFold(n_splits=2, random_state=1, shuffle=True)
search = RecommenderGridSearchCV(
estimator=clf, cv=cv, param_grid=hyper,
n_jobs=1, verbose=1)
self._search_fit_assert(search)
def test_random_cv_fit_recommend(self):
"""Test a simple fit"""
# Create the estimator
clf = AlternatingLeastSquares(random_state=42, use_cg=True,
iterations=5, factors=15)
# These are the hyper parameters we'll use
hyper = {
'factors': randint(5, 6),
'regularization': uniform(0.01, 0.05)
}
# Make our cv
cv = KFold(n_splits=2, random_state=1, shuffle=True)
search = RandomizedRecommenderSearchCV(
estimator=clf, cv=cv, random_state=42,
param_distributions=hyper, n_jobs=1,
n_iter=2, recommend_params={"filter_previously_rated": True},
verbose=1, scoring='ndcg')
# While we're fitting, assert we get a warning about the
# "filter_previously_rated" key in the fit params...
with warnings.catch_warnings(record=True) as w:
self._search_fit_assert(search) # should warn in fit
# Verify...
assert len(w)
assert any(["filter_previously_rated" in str(warn.message)
for warn in w])
def test_random_val_fit(self):
"""Test a simple fit"""
# Create the estimator
clf = AlternatingLeastSquares(random_state=42, use_cg=True,
iterations=5, factors=10)
# These are the hyper parameters we'll use
hyper = {
'factors': randint(5, 6),
'regularization': uniform(0.01, 0.05)
}
# Create search with no CV and use validation set instead
search = RandomizedRecommenderSearchCV(
estimator=clf, cv=None, random_state=42,
param_distributions=hyper, n_jobs=1,
n_iter=2, verbose=1)
self._search_fit_assert(search, val=test)
def test_cv_wrapper():
# Test that the CV wrapper produces exactly what we think it does...
wrapper = _CVWrapper(cv=None, validation=test)
split = wrapper.split(train)
# The split should be a list of a single tuple
assert isinstance(split, list), split
assert len(split) == 1, split
# The tuple element should be len 2
tup = split[0]
assert len(tup) == 2, tup
assert tup[0] is train
assert tup[1] is test
|
import requests, json
from codebeamer.mixins.project import ProjectMixin
class Codebeamer(ProjectMixin):
def __init__(self, url, login, password):
self.base_url = url
self.auth = (login, password)
def get(self, uri):
url = self.base_url + uri
res = requests.get(url, auth=self.auth, verify=True)
if res.status_code == 200:
return json.loads(res.content)
else:
print(f"Warning : GET error ({url})")
return None
def put(self, uri, data):
url = self.base_url + uri
res = requests.put(url, json=data, auth=self.auth, verify=True)
if res.status_code == 200:
return json.loads(res.content)
else:
print(f"Warning : PUT error ({url})")
return None
def post(self, uri, data):
url = self.base_url + uri
res = requests.post(url, json=data, auth=self.auth, verify=True)
if res.status_code == 201:
return json.loads(res.content)
else:
print(f"Warning : POST error ({url})")
return None
|
""" Tests for the BaseGame class. """
import pytest
from matching import BaseGame
class DummyGame(BaseGame):
def solve(self):
raise NotImplementedError()
def check_stability(self):
raise NotImplementedError()
def check_validity(self):
raise NotImplementedError()
def test_init():
""" Test the default parameters makes a valid instance of BaseGame. """
match = DummyGame()
assert isinstance(match, BaseGame)
assert match.matching is None
assert match.blocking_pairs is None
def test_no_solve():
""" Verify BaseGame raises a NotImplementedError when calling the `solve`
method. """
with pytest.raises(NotImplementedError):
match = DummyGame()
match.solve()
def test_no_check_stability():
""" Verify BaseGame raises a NotImplementedError when calling the
`check_stability` method. """
with pytest.raises(NotImplementedError):
match = DummyGame()
match.check_stability()
def test_no_check_validity():
""" Verify BaseGame raises a NotImplementError when calling the
`check_validity` method. """
with pytest.raises(NotImplementedError):
match = DummyGame()
match.check_validity()
|
from collections import OrderedDict
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import (
UserCreationForm,
AuthenticationForm,
UserChangeForm,
PasswordChangeForm
)
class LoginForm(AuthenticationForm):
username = forms.CharField(
widget=forms.TextInput(
attrs={
'data-msg': 'Please enter your username',
'class': 'input-material',
}
)
)
password = forms.CharField(
label='Password',
widget=forms.PasswordInput(
attrs={
'data-msg': 'Please enter your password',
'class': 'input-material',
}
)
)
class RegistrationForm(UserCreationForm):
first_name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'data-msg': 'Please enter your first name',
'class': 'input-material',
}
)
)
last_name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'data-msg': 'Please enter your last name',
'class': 'input-material',
}
)
)
username = forms.CharField(
widget=forms.TextInput(
attrs={
'data-msg': 'Please enter your username',
'class': 'input-material',
}
)
)
password1 = forms.CharField(
label='Password',
widget=forms.PasswordInput(
attrs={
'data-msg': 'Please enter your password',
'class': 'input-material',
}
)
)
password2 = forms.CharField(
label='Confirm Password',
widget=forms.PasswordInput(
attrs={
'data-msg': 'Please enter your password',
'class': 'input-material',
}
)
)
email = forms.EmailField(
required=True,
widget=forms.EmailInput(
attrs={
'data-msg': 'Please enter a valid email address',
'class': 'input-material',
}
)
)
class Meta:
model = User
fields = (
'username',
'first_name',
'last_name',
'email',
'password1',
'password2',
)
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
class UpdateProfileForm(UserChangeForm):
username = forms.CharField(
widget=forms.TextInput(
attrs={
'class': 'form-control',
}
)
)
email = forms.EmailField(
required=True,
widget=forms.EmailInput(
attrs={
'class': 'form-control',
}
)
)
first_name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'class': 'form-control',
}
)
)
last_name = forms.CharField(
required=True,
widget=forms.TextInput(
attrs={
'class': 'form-control',
}
)
)
class Meta:
model = User
fields = (
'username',
'email',
'first_name',
'last_name',
'password',
)
class ChangePasswordForm(PasswordChangeForm):
old_password = forms.CharField(
label='Current Password',
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
}
)
)
new_password1 = forms.CharField(
label='New Password',
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
}
)
)
new_password2 = forms.CharField(
label='Confirm New Password',
widget=forms.PasswordInput(
attrs={
'class': 'form-control',
}
)
) |
import asyncio
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.types import message
from aiogram.utils import executor
from main import Currency
#from config import TOKEN 'NEED TO MAKE YOUR OWN config FILE WITH TOKEN'
from sqliter import SQLighter
async def schedruled(result):
await bot.send_message(message.from_user.id,result)
# Инициализируем бота
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
# Инициализируем БД
db = SQLighter('db.db') # 'NEED TO MAKE DATA BASE'
# Команда /start
@dp.message_handler(commands=['start'])
async def process_start_command(message: types.Message):
await message.reply("Привет!\nЭтот телеграм бот нужен для отслеживания цен на валюты, криптовалюты, акции и.т.д\nДля связи со мной в случаи сбоев напишите команду /help")
# Команда /help
@dp.message_handler(commands=['help'])
async def process_help_command(message: types.Message):
await message.reply("ссылка на мой телеграм для связи: https://t.me/ndimqa")
# Активация подписки
@dp.message_handler(commands=['subscribe'])
async def subscribe(message: types.Message):
if(not db.subscriber_exist(message.from_user.id)):
# Если юзера нет то создаем запись
db.add_subscriber(message.from_user.id)
else:
# Если есть то обновляем статус
db.update_subscription(message.from_user.id, True)
await message.answer("Вы успешно подписаны.")
# Команда отписки
@dp.message_handler(commands=['unsubscribe'])
async def unsubscribe(message: types.message):
if(not db.subscriber_exist(message.from_user.id)):
# Если юзера нет добавляем его с не активнной подпиской
db.add_subscriber(message.from_user.id, False)
else:
# Если был подписан то меняем статус
db.update_subscription(message.from_user.id, False)
await message.answer("Вы успешно отписаны.")
@dp.message_handler(text="Валюты")
async def cmd_random(message: types.Message):
keyboard = types.InlineKeyboardMarkup()
keyboard.add(types.InlineKeyboardButton(text="XRP", callback_data="XRP"))
keyboard.add(types.InlineKeyboardButton(text="Etherium", callback_data="Etherium"))
await message.answer("Нажмите на кнопку, чтобы бот начал отслеживание валюты", reply_markup=keyboard)
@dp.callback_query_handler(text="XRP")
async def send_answer_value(call: types.CallbackQuery):
await call.message.answer('запущено отслеживание XRP')
currency = Currency()
while True:
if currency.get_result():
try:
answer = currency.get_answer()
await schedruled(answer)
except AttributeError:
pass
currency.check_currency()
if __name__ == '__main__':
executor.start_polling(dp) |
from logging import getLogger
from typing import AnyStr, Callable, Dict, List, Optional, Tuple, Union
from .api_resources.mutations import *
from .api_resources.payload import (
CreateEtchPacketPayload,
FillPDFPayload,
GeneratePDFPayload,
)
from .api_resources.requests import GraphqlRequest, PlainRequest, RestRequest
from .http import HTTPClient
logger = getLogger(__name__)
def _get_return(res: Dict, get_data: Callable[[Dict], Union[Dict, List]]):
"""Process response and get data from path if provided."""
_res = res
if "response" in res and "headers" in res:
_res = res["response"]
return {"response": get_data(_res), "headers": res["headers"]}
return get_data(_res)
class Anvil:
"""Main Anvil API class.
Handles all GraphQL and REST queries.
Usage:
>> anvil = Anvil(api_key="my_key")
>> payload = {}
>> pdf_data = anvil.fill_pdf("the_template_id", payload)
"""
def __init__(self, api_key=None, environment='dev'):
self.client = HTTPClient(api_key=api_key, environment=environment)
def query(self, query: str, variables: Optional[str] = None, **kwargs):
gql = GraphqlRequest(client=self.client)
return gql.post(query, variables=variables, **kwargs)
def mutate(self, query: BaseQuery, variables: dict, **kwargs):
gql = GraphqlRequest(client=self.client)
return gql.post(query.get_mutation(), variables, **kwargs)
def request_rest(self, options: Optional[dict] = None):
api = RestRequest(self.client, options=options)
return api
def fill_pdf(
self, template_id: str, payload: Union[dict, AnyStr, FillPDFPayload], **kwargs
):
"""Fill an existing template with provided payload data.
Use the casts graphql query to get a list of available templates you
can use for this request.
:param template_id: eid of an existing template/cast.
:type template_id: str
:param payload: payload in the form of a dict or JSON data
:type payload: dict|str
"""
try:
if isinstance(payload, dict):
data = FillPDFPayload(**payload)
elif isinstance(payload, str):
data = FillPDFPayload.parse_raw(
payload, content_type="application/json"
)
elif isinstance(payload, FillPDFPayload):
data = payload
else:
raise ValueError("`payload` must be a valid JSON string or a dict")
except KeyError as e:
logger.exception(e)
raise ValueError(
"`payload` validation failed. Please make sure all required "
"fields are set. "
) from e
api = RestRequest(client=self.client)
return api.post(
f"fill/{template_id}.pdf",
data.dict(by_alias=True, exclude_none=True) if data else {},
**kwargs,
)
def generate_pdf(self, payload: Union[AnyStr, Dict], **kwargs):
if not payload:
raise ValueError("`payload` must be a valid JSON string or a dict")
if isinstance(payload, dict):
data = GeneratePDFPayload(**payload)
elif isinstance(payload, str):
data = GeneratePDFPayload.parse_raw(
payload, content_type="application/json"
)
else:
raise ValueError("`payload` must be a valid JSON string or a dict")
# Any data errors would come from here..
api = RestRequest(client=self.client)
return api.post(
"generate-pdf", data=data.dict(by_alias=True, exclude_none=True), **kwargs
)
def get_cast(self, eid: str, fields=None, **kwargs):
if not fields:
# Use default fields
fields = ['eid', 'title', 'fieldInfo']
res = self.query(
f"""{{
cast(eid: "{eid}") {{
{" ".join(fields)}
}}
}}""",
**kwargs,
)
def get_data(r):
return r["data"]["cast"]
return _get_return(res, get_data=get_data)
def get_casts(
self, fields=None, show_all=False, **kwargs
) -> Union[List, Tuple[List, Dict]]:
if not fields:
# Use default fields
fields = ["eid", "title", "fieldInfo"]
cast_args = "" if show_all else "(isTemplate: true)"
res = self.query(
f"""{{
currentUser {{
organizations {{
casts {cast_args} {{
{" ".join(fields)}
}}
}}
}}
}}""",
**kwargs,
)
def get_data(r):
orgs = r["data"]["currentUser"]["organizations"]
return [item for org in orgs for item in org["casts"]]
return _get_return(res, get_data=get_data)
def get_current_user(self, **kwargs):
res = self.query(
"""{
currentUser {
name
email
eid
role
organizations {
eid
name
slug
casts {
eid
name
}
}
}
}
""",
**kwargs,
)
return _get_return(res, get_data=lambda r: r["data"]["currentUser"])
def get_welds(self, **kwargs) -> Union[List, Tuple[List, Dict]]:
res = self.query(
"""{
currentUser {
organizations {
welds {
eid
slug
title
}
}
}
}""",
**kwargs,
)
def get_data(r):
orgs = r["data"]["currentUser"]["organizations"]
return [item for org in orgs for item in org["welds"]]
return _get_return(res, get_data=get_data)
def create_etch_packet(
self,
payload: Optional[
Union[
dict,
CreateEtchPacketPayload,
CreateEtchPacket,
AnyStr,
]
] = None,
json=None,
**kwargs,
):
"""Create etch packet via a graphql mutation."""
# Create an etch packet payload instance excluding signers and files
# (if any). We'll need to add those separately. below.
if not any([payload, json]):
raise TypeError('One of the arguments `payload` or `json` must exist')
if json:
payload = CreateEtchPacketPayload.parse_raw(
json, content_type="application/json"
)
if isinstance(payload, dict):
mutation = CreateEtchPacket.create_from_dict(payload)
elif isinstance(payload, CreateEtchPacketPayload):
mutation = CreateEtchPacket(payload=payload)
elif isinstance(payload, CreateEtchPacket):
mutation = payload
else:
raise ValueError(
"`payload` must be a valid CreateEtchPacket instance or dict"
)
return self.mutate(
mutation,
variables=mutation.create_payload().dict(by_alias=True, exclude_none=True),
**kwargs,
)
def generate_etch_signing_url(self, signer_eid: str, client_user_id: str, **kwargs):
"""Generate a signing URL for a given user."""
mutation = GenerateEtchSigningURL(
signer_eid=signer_eid,
client_user_id=client_user_id,
)
payload = mutation.create_payload()
return self.mutate(mutation, variables=payload.dict(by_alias=True), **kwargs)
def download_documents(self, document_group_eid: str, **kwargs):
"""Retrieve all completed documents in zip form."""
api = PlainRequest(client=self.client)
return api.get(f"document-group/{document_group_eid}.zip", **kwargs)
|
lista=raw_input("gemise tin lista : ")
pinakas=[]
tlista=""
k=0
for i in range(len(lista)):
if (lista[i]!="0"):
pinakas.append(lista[i])
else:
k+=1
for i in range(k):
pinakas.append("0")
for i in range(len(pinakas)):
tlista+=pinakas[i]
print tlista
|
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BrowseFavoriteTests(StaticLiveServerTestCase):
""" Tests browsing inside favorite app """
fixtures = [
"account_fixture.json",
"favorite_fixture.json",
"substitutes_fixture.json"
]
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.selenium = WebDriver()
cls.selenium.implicitly_wait(10)
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super().tearDownClass()
def test_favorite_navigation(self):
""" tests for navigation in product """
# favorites template
#connect to account
self.selenium.get(self.live_server_url)
login_link = self.selenium.find_element_by_css_selector("#visitor_gui a")
login_link.click()
wait = WebDriverWait(self.selenium, 10)
wait.until(
EC.presence_of_element_located(
(By.ID, "login_side")
)
)
# login form
self.selenium.find_element_by_id("id_username").send_keys("grossebouffe")
self.selenium.find_element_by_id("id_password").send_keys("azertyui")
submit = self.selenium.find_element_by_css_selector("form button")
submit.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "member_gui")
)
)
account_links = self.selenium.find_elements_by_css_selector("#member_gui a")
account_link = account_links[1]
account_link.click()
# favorites list template
wait.until(
EC.presence_of_element_located(
(By.ID, "favorites_page")
)
)
title = self.selenium.find_element_by_tag_name("h1").text
assert "MES PRODUITS" == title
favorites = self.selenium.find_elements_by_class_name("card")
assert len(favorites) == 2
# favorite display with template substitutes_list
favorite_links = self.selenium.find_elements_by_css_selector(".card a")
first_favorite_link = favorite_links[1]
first_favorite_link.click()
wait.until(
EC.presence_of_element_located(
(By.CLASS_NAME, "save_prod")
)
)
substitutes_saved = self.selenium.find_elements_by_class_name("save_prod")
assert len(substitutes_saved) == 3
# delete favorite
save_check_buttons = self.selenium.find_elements_by_class_name("save_prod")
save_check_buttons[0].click()
popup = self.selenium.switch_to.alert
popup.accept()
# return to index
search_button = self.selenium.find_element_by_css_selector("#topNavBar .col-md-2 a")
search_button.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "search")
)
)
# return favorites list template
account_links = self.selenium.find_elements_by_css_selector("#member_gui a")
account_link = account_links[1]
account_link.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "favorites_page")
)
)
# return favorite display with template substitutes_list
favorite_links = self.selenium.find_elements_by_css_selector(".card a")
first_favorite_link = favorite_links[1]
first_favorite_link.click()
wait.until(
EC.presence_of_element_located(
(By.CLASS_NAME, "save_prod")
)
)
substitutes_saved = self.selenium.find_elements_by_class_name("save_prod")
assert len(substitutes_saved) == 2
# delete all substitutes for this product
save_check_buttons = self.selenium.find_elements_by_class_name("save_prod")
save_check_buttons[0].click()
popup = self.selenium.switch_to.alert
popup.accept()
save_check_buttons[1].click()
popup = self.selenium.switch_to.alert
popup.accept()
# return favorites list template
account_links = self.selenium.find_elements_by_css_selector("#member_gui a")
account_link = account_links[1]
account_link.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "favorites_page")
)
)
favorites = self.selenium.find_elements_by_class_name("card")
assert len(favorites) == 1
# return to index
search_button = self.selenium.find_element_by_css_selector("#topNavBar .col-md-2 a")
search_button.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "search")
)
)
# index form submit (search)
input_search = self.selenium.find_element_by_id("input_search")
input_search.send_keys("nutella")
self.selenium.find_element_by_id("submit").click()
# wait the response
wait.until(
EC.presence_of_element_located(
(By.CLASS_NAME, "save_prod")
)
)
# save new favorite for the same product
save_check_buttons = self.selenium.find_elements_by_class_name("save_prod")
save_check_buttons[3].click()
save_check_buttons[5].click()
# return to favorites list
account_links = self.selenium.find_elements_by_css_selector("#member_gui a")
account_link = account_links[1]
account_link.click()
wait.until(
EC.presence_of_element_located(
(By.ID, "favorites_page")
)
)
# return to favorite
favorite_links = self.selenium.find_elements_by_css_selector(".card a")
first_favorite_link = favorite_links[1]
first_favorite_link.click()
wait.until(
EC.presence_of_element_located(
(By.CLASS_NAME, "portfolio-box")
)
)
substitutes_saved = self.selenium.find_elements_by_class_name("portfolio-box")
assert len(substitutes_saved) == 2
|
from django.test import TestCase
from email_registration.models import Email
from email_registration.forms import EmailForm
from email_registration.views import confirm_submission
# Create your tests here.
class EmailRegistrationTests(TestCase):
def no_duplicates_allowed(self):
self.client = Client()
e = Email(email = 'duplicate1@gmail.com')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/email_registration/confirm_submission/', {'email':e})
self.assertEqual(response.status_code, 302)
response = self.client.post('/email_registration/confirm_submission/', {'email':e})
self.assertEqual(response.status_code, 302)
email_list = Email.objects.all()
self.assertEqual(len(email_list), 1)
|
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
def init_browser():
# @NOTE: Replace the path with your actual path to the chromedriver
executable_path = {"executable_path": "chromedriver"}
return Browser("chrome", **executable_path, headless=False)
### NASA Mars News
def scrape_news():
browser = init_browser()
news = {}
mars_url = 'https://mars.nasa.gov/news/'
browser.visit(mars_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
items = soup.find_all('li',class_='slide')
item = items[0]
div = item.find('div',class_='content_title')
link = div.find('a')
title = link.text.strip()
article = item.find('div', class_='article_teaser_body')
date = item.find('div', class_ ="list_date").get_text()
news["title"] = title
news["article"] = article.text.strip()
news["date"] = date
# Close the browser after scraping
browser.quit()
return news
### Mars Weather
def scrape_weather():
browser = init_browser()
mars_weather = {}
mars_weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(mars_weather_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
ol = soup.find('ol',id='stream-items-id')
weather_li = ol.find_all('li')
latest_weather = weather_li[0]
latest_weather_p = latest_weather.find('p')
mars_weather["weather"] = latest_weather_p.text.strip()
# Close the browser after scraping
browser.quit()
return mars_weather
#Mars Facts
def scrape_facts():
table_data = {}
mars_facts_url = 'http://space-facts.com/mars/'
table = pd.read_html(mars_facts_url)
mars_df = table[0]
mars_df.columns = ['Mars Fact', 'Value']
print(mars_df)
table_html = mars_df.to_html(index=False, header=None, classes='table table-striped')
table_data["dataframe"] = table_html
return table_data
# JPL Mars Space Images - Featured Image
def scrape_image():
browser = init_browser()
mars_image = {}
mars_image_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(mars_image_url)
html = browser.html
soup = BeautifulSoup(html, "html.parser")
ul = soup.find('ul',class_='articles')
images = ul.find_all('li',class_='slide')
image = images[0]
featured_image_a = image.find('a',class_='fancybox')
featured_image_href = featured_image_a.get('data-fancybox-href')
url_original = 'https://www.jpl.nasa.gov'
featured_image = url_original + featured_image_href
mars_image["image"] = featured_image
# Close the browser after scraping
browser.quit()
return mars_image
# Mars Hemisphere
def scrape_hemisphere():
browser = init_browser()
hemisphere_url = 'https://www.nasa.gov/mission_pages/mars/images/index.html'
base_url = 'https://www.nasa.gov'
browser.visit(hemisphere_url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
parent_div = soup.find('div', id = 'gallery-list')
items = parent_div.find_all('div', class_="ember-view")
images_dict = {}
images = []
for item in items:
img = item.find('img').get('src')
img_url = base_url + img
images.append(img_url)
#print(images)
images_dict['image0'] = images[0]
images_dict['image2'] = images[2]
images_dict['image0'] = images[3]
images_dict['image2'] = images[4]
images_dict['image0'] = images[5]
images_dict['image2'] = images[6]
return images_dict
|
# from product.models import User
# import csv
# reader=csv.reader(open("arts.csv","r"));
# header=reader.next();
# line=reader.next();
# obj=User.objects.create();
# obj.validate_unique();
# for line in reader:
# User.objects.create(userid=line[3],username=line[4])
# #obj.userid=line[3];
# #obj.username=line[4];
# #obj.save();
# line=reader.next();
# from product.models import User
from TaSM_site.models import User
import csv
reader=csv.reader(open("arts.csv","r"))
header=reader.next()
user_dict={}
list1=[]
list2=[]
set_userid=set([])
for row in reader:
list1.append(dict(zip(header,row)))
header2=['userId','username']
for item in list1:
if item['review/userId']!='unknown':
list2.append(dict(zip(header2,[item['review/userId'],item['review/profileName']])))
set_userid=set([])
for item in list2:
set_userid.add(item['userId'])
for item in set_userid:
user_dict[item]={}
for item in list2:
user_dict[item['userId']].update({'name':item['username']})
obj=User()
obj.validate_unique();
for item in user_dict:
uid=item#product_dict[item]['productId']
name=user_dict[item]['name']
#price=product_dict[item]['price']
if len(name)>200:
name=name[:199]
# if price=='unknown':
# price=0.0
obj.userid=uid;
obj.username=name;
obj.email=uid+"@gmail.com"
obj.save();
|
# -*- encoding=utf8 -*-
__author__ = "lyh"
from airtest.core.api import *
from poco.drivers.android.uiautomation import AndroidUiautomationPoco
from poco.exceptions import PocoTargetTimeout
import sys
poco = AndroidUiautomationPoco(use_airtest_input=True, screenshot_each_action=False)
#--todo--广告跳转--
#auto_setup(__file__)
class Ad():
#1--首页广告展示统计 screenNum滑动的屏幕个数
def IndexFeedAd(self, screenNum):
#初始化首页位置||滑动N屏
adDisplayTimes = 0 #广告出现次数--大于实际出现次数
#poco.swipe([0.5, 0.3], [0.5, 0.5], duration = 1)
for index in range(screenNum * 2):
print(index)
poco.swipe([0.5, 0.8], [0.5, 0.4], duration = 1)
sleep(1)
indexAdTipExists = poco(text = "广告").exists()
print(indexAdTipExists)
if(indexAdTipExists):
adDisplayTimes = adDisplayTimes + 1
#还原位置
for index in range(screenNum * 2):
poco.swipe([0.5, 0.4], [0.5, 0.8], duration = 1)
# if (adDisplayTimes == 0):
# return 0
return adDisplayTimes
#2--频道页广告
def ChannelAd(self):
pindaoTab = poco(text = "频道")
if(pindaoTab.exists()):
pindaoTab.click()
else:
print('频道页tab未找到')
sleep(3)
pindaoList = {'电视剧' : 0, '电影' : 0, '动漫' : 0, '少儿' : 0}
for index in pindaoList:
print(index)
sleep(3)
if (poco(text = index).exists()):
poco(text = index).click()
sleep(2)
if (poco(text = "广告").exists()):
pindaoList[index] = 1
else:
print(index,'tab不存在')
poco(text = "首页").click()
return pindaoList
def SearchToLongvideo(self, name):
print('搜索进入长视频', name)
#获取搜索点击元素
searchButton = poco("android.widget.FrameLayout").offspring("android.webkit.WebView").offspring("android.widget.Image")
searchButton.click()
text(name)
poco(text="搜索").wait_for_appearance()
poco(text="搜索").click()
#搜索结果页面展示【立即播放按钮】
poco(text="立即播放").wait_for_appearance()
if(poco(text="立即播放").exists()):
poco(text="立即播放").click()
return 1
return 0
#3--长视频页--视频下方广告
def LongVideoLowAd(self, name = ''):
self.SearchToLongvideo(name)
sleep(5)
result = 1 if (poco(text = "广告").wait(2).exists()) else 0
self.quit()
return result
#4--视频前贴广告(非会员)--todo
def LongVideoFrontAd(self, name = ''):
self.SearchToLongvideo(name)
sleep(8)
result = 1 if (poco(text = "关闭").wait(2).exists()) else 0
self.quit()
return result
#5--搜索页、搜索结果页
def SearchAd(self):
searchAdres = {"searchpro" : False, "searchres" : False}
#获取搜索点击元素
searchButton = poco("android.widget.FrameLayout").offspring("android.webkit.WebView").offspring("android.widget.Image")
searchButton.click()
#搜索过程页面广告展示
searchAdres['searchpro'] = 1 if (poco(text = "广告").wait(2).exists()) else 0
poco(text="搜索").wait_for_appearance()
poco(text="搜索").click()
#搜索结果页面广告展示
searchAdres['searchres'] = 1 if (poco(text = "广告").wait(2).exists()) else 0
self.quit()
return searchAdres
#6--短视频详情页--五插一----下载||查看||进入||立即玩||去逛逛||立即领取|| 去快手||查看详情||下载应用
def ShortAd(self, screenNum):
self.joinShortVideo()
adDisplayTimes = 0 #每次滑动半屏尺寸--停顿--检测广告出现次数--大于实际出现次数
for index in range(screenNum * 2):
print(index)
#indexAdTipExists = poco("视频播放器",type = "android.widget.RelativeLayout").wait(2).exists() #默认第一屏会展示广告
indexAdTipExists = poco(text = "广告").wait(2).exists() #默认第一屏会展示广告
print(indexAdTipExists)
if(indexAdTipExists):
adDisplayTimes = adDisplayTimes + 1
poco.swipe([0.5, 0.8], [0.5, 0.3], duration = 1)
#还原位置
for index in range(screenNum * 2):
poco.swipe([0.5, 0.3], [0.5, 0.8], duration = 1)
#返回首页
self.quit()
poco(text = "首页").click()
#广告次数
return adDisplayTimes
#进入短视频详情页--todo
def joinShortVideo(self):
poco(text = "体育").click()
#poco("android.webkit.WebView")[0].child("android.view.View").child("android.view.View")[0].click([0.9, 0.5])
poco(text = "PP视频").click([0.5,2.5])
#点击返回按钮--退回到首页
def quit(self):
while (poco("com.tencent.mm:id/dc").exists()):
poco("com.tencent.mm:id/dc").click()
def __del__(self):
class_name = self.__class__.__name__
print (class_name, "销毁") |
import re
import time
import vim
from vimade import util
from vimade import global_state as GLOBALS
from vimade import highlighter
SIGN_CACHE = {}
PLACES = []
SIGN_IDS_UNUSED = []
def parseParts(line):
parts = re.split('[\s\t]+', line)
item = {}
for part in parts:
split = part.split('=')
if len(split) < 2:
continue
(key, value) = split
item[key] = value
return item
def get_signs(bufnr):
lines = util.eval_and_return('execute("silent sign place buffer='+str(bufnr)+'")').split('\n')[2:]
result = []
for line in lines:
item = parseParts(line)
if 'name' in item:
result.append(item)
return result
def unfade_bufs(bufs):
global PLACES
global SIGN_IDS_UNUSED
buf_signs = util.eval_and_return('[' + ','.join(['get(get(getbufinfo('+x.bufnr+'), 0, {}), \'signs\', [])' for x in bufs ]) + ']')
# start = time.time()
i = 0
for signs in buf_signs:
bufnr = bufs[i].bufnr
i = i + 1
for sign in signs:
name = sign['name']
is_vimade = sign['is_vimade'] = name.find('vimade_') != -1
if is_vimade == True:
sign['name'] = name.replace('vimade_', '')
sign['bufnr'] = bufnr
if int(GLOBALS.features['has_sign_priority']) and 'priority' in sign:
sign['priority_text'] = ' priority='+str(sign['priority'])
if int(GLOBALS.features['has_sign_group']) and 'group' in sign and sign['group'] != '':
sign['group_text'] = ' group=' + str(sign['group'])
else:
sign['group_text'] = ' '
PLACES.append('silent! sign place ' + str(sign['id']) + sign['group_text'] + ' line='+str(sign['lnum']) + ' name=' + sign['name'] + sign['priority_text'] + ' buffer=' + str(sign['bufnr']))
if len(PLACES):
vim.command('function! VimadeSignTemp() \n' + '\n'.join(PLACES) + '\nendfunction')
try:
vim.command('call VimadeSignTemp()')
except:
pass
PLACES = []
def fade_wins(wins, fade_bufs):
global SIGN_IDS_UNUSED
start = time.time()
buf_map = {}
for win in wins:
if not win.buffer in buf_map:
buf_map[win.buffer] = (win.buffer, {})
vis = buf_map[win.buffer][1]
for row in win.visible_rows:
vis[row] = 1
bufs = list(buf_map.values())
if len(bufs) == 0:
return
infos = util.eval_and_return('[' + ','.join(['vimade#GetSigns('+x[0]+','+ str(x[1]) + ')' for x in bufs ]) + ']' )
changes = []
requests = []
request_names = []
i = 0
lines_by_bufs = {}
for signs in infos:
bufnr = bufs[i][0]
i += 1
if not bufnr in lines_by_bufs:
lines_by_bufs[bufnr] = ({}, {})
[lines, priorities] = lines_by_bufs[bufnr]
for sign in signs:
name = sign['name']
is_vimade = sign['is_vimade'] = name.find('vimade_') != -1
if is_vimade == True:
real_name = sign['real_name'] = name.split('vimade_')[1]
lnum = sign['lnum']
if not 'priority' in sign:
sign['priority'] = ''
if not lnum in lines:
lines[sign['lnum']] = {}
lines[lnum][real_name] = sign['priority']
for sign in signs:
if sign['is_vimade'] == False:
sign['bufnr'] = bufnr
lnum = sign['lnum']
name = sign['name']
if int(GLOBALS.features['has_sign_group']) and 'group' in sign and sign['group'] != '':
sign['group_text'] = ' group='+str(sign['group'])
else:
sign['group_text'] = ' '
if int(GLOBALS.features['has_sign_priority']):
if not 'priority' in sign:
priority = GLOBALS.signs_priority
else:
priority = int(sign['priority']) + GLOBALS.signs_priority
if not lnum in priorities:
priorities[lnum] = {}
if priority in priorities[lnum]:
priority -= 1
priorities[lnum][priority] = True
sign['priority'] = str(priority)
sign['priority_text'] = ' priority='+str(sign['priority'])
else:
sign['priority'] = ''
sign['priority_text'] = ''
if lnum in lines and name in lines[lnum] and (lines[lnum][name] == sign['priority'] or lines[lnum][name] == False):
lines[lnum][name] = False
else:
changes.append(sign)
if not lnum in lines:
lines[lnum] = {}
lines[lnum][name] = False
if not name in SIGN_CACHE:
SIGN_CACHE[name] = True
request_names.append(name)
requests.append('execute("sign list ' + name + '")')
ids = {}
if len(requests):
results = util.eval_and_return('[' + ','.join(requests) + ']')
i = 0
highlights = []
cl_highlights = []
for result in results:
item = parseParts(result)
results[i] = item
if 'texthl' in item:
highlights.append(item['texthl'])
if 'linehl' in item:
cl_highlights.append(item['linehl'])
if 'numhl' in item:
cl_highlights.append(item['numhl'])
i += 1
if len(highlights):
highlights = highlighter.fade_names(highlights)
cl_highlights = highlighter.fade_names(cl_highlights, False, True)
i = 0
j = 0
k = 0
for item in results:
name = request_names[i]
i += 1
name = 'vimade_' + name
sign[name] = name
definition = 'sign define ' + name
linehl_id = texthl_id = icon = text = None
if 'text' in item:
definition += ' text=' + item['text']
if 'icon' in item:
definition += ' icon=' + item['icon']
if 'texthl' in item:
texthl_id = highlights[j][0]
j += 1
else:
texthl_id = GLOBALS.normal_id
if 'linehl' in item:
linehl_id = cl_highlights[k][0]
k += 1
definition += ' linehl=' + linehl_id
if 'numhl' in item:
numhl_id = cl_highlights[k][0]
k += 1
definition += ' numhl=' + numhl_id
definition += ' texthl=' + texthl_id
vim.command(definition)
if len(changes):
place = []
for sign in changes:
PLACES.append('silent! sign place ' + str(sign['id']) + sign['group_text'] + ' line='+str(sign['lnum']) + ' name=vimade_' + sign['name'] + sign['priority_text'] + ' buffer=' + str(sign['bufnr']))
|
# Default imports
import numpy as np
from greyatomlib.python_intermediate.q05_read_csv_data.build import read_ipl_data_csv
path = "data/ipl_matches_small.csv"
# Enter Code Here
def get_unique_teams_set():
ipl_data=read_ipl_data_csv(path,dtype='|S50')
team1=set(ipl_data[:,3])
team2=set(ipl_data[:,4])
return team1.union(team2)
|
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='osm2paths',
version='0.0.11',
description='An automatic generator of JSON vector road network graph car tracks from OSM',
long_description=open('README.md').read(),
url='https://github.com/MikeNezumi/osm2tracks',
author='MikeFreeman',
author_email='michaelsvoboda42@gmail.com',
license='MIT',
classifiers=classifiers,
keywords='json, traffic, simulation, data graphs, osm',
py_modules=['osm2paths'],
package_dir={'':'src'},
packages=find_packages(where='src'),
package_data={'src':['data/*.txt']},
include_package_data=True,
install_requires=[
'utm==0.7.0',
'pyglet==1.5.15',
'pythematics==4.0.0'
]
)
|
# initialize vars
p1 = []
p2 = []
games = 0
p1wins = 0
# assign 5 cards each to player 1 and player 2
def drawCards(s):
p1 = []
p2 = []
for word in s.split():
if len(p1) < 5:
p1.append([word[0], word[1]])
else:
p2.append([word[0], word[1]])
return [p1, p2]
# lookup each card in value/suit enumeration dictionaries -> indeces
def dictLookup(cards):
value_enum = {'2':0, '3':1, '4':2, '5':3, '6':4, '7':5, '8':6, '9':7, 'T':8, 'J':9, 'Q':10, 'K':11, 'A':12}
suit_enum = {'C':0, 'D':1, 'H':2, 'S':3}
values = [0]*13
suits = [0]*4
for card in cards:
values[value_enum[card[0]]] += 1
suits[suit_enum[card[1]]] += 1
return [values, suits]
# Input: A player's values
# Output: [isStraight, special]
def hasStraight(values):
# check for ace
if values[12] == 1:
if values[0] == 1 and values[1] == 1 and values[2] == 1 and values[3] == 1:
return [1, 3]
for i in range(len(values) - 5):
if values[i] == 1 and values[i+1] == 1 and values[i+2] == 1 and values[i+3] == 1 and values[i+4] == 1:
return [1, i + 4]
return [0, 0]
# Input: A player's hand [values, suits]
# Output: A player's hand's rank [rank, special, values]
def rankHand(hand):
rank = 0
special = 0
# ranks = {0:highCard, 1:pair, 2:two, 3:three, 4:straight, 5:flush, 6:full,
# 7:four, 8:straightFlush, 9:royalFlush}
isFlush = False
suits = hand[1]
for suit in suits:
if suit == 5:
isFlush = True
rank = 5
isStraight = []
hasPair = False
hasThree = False
hasFour = False
values = hand[0]
for idx, val in enumerate(values):
if val == 2:
if hasPair == True:
rank = 2
special = max(special, idx)
else:
hasPair = True
rank = 1
if hasThree:
rank = 6
else:
special = idx
elif val == 3:
hasThree = True
special = idx
rank = 3
if hasPair:
rank = 6
elif val == 4:
hasFour = True
rank = 7
special = idx
elif val == 1 and not hasPair and not hasThree and not hasFour:
special = max(special, idx)
if not hasPair and not hasThree and not hasFour:
isStraight = hasStraight(values) # [isStraight, special]
if isStraight[0]:
special = isStraight[1]
rank = 4
if isFlush:
rank = 8
if special == 12:
rank = 9
return [rank, special, values]
# Check if player 1 has a winning hand
def playerOneWins(hands):
p1 = rankHand(hands[0])
p2 = rankHand(hands[1])
if p1[0] > p2[0]: # Player 1 higher rank
return 1
elif p1[0] == p2[0]: # Tied rank
if p1[1] > p2[1]: # Player 1 higher special card
return 1
elif p1[1] == p2[1]:
return playerOneWinsTiebreak(p1[2], p2[2])
else:
return 0
# run tiebreaker if ranks and special card are the same
def playerOneWinsTiebreak(values_1, values_2):
for i in range(len(values_1) - 1, 0, -1):
if values_1[i] > values_2[i]:
return 1
elif values_2[i] > values_1[i]:
return 0
return 0
# read file
with open('p54_poker.txt', 'r') as f:
# determine winner for each line
for line in f:
print line
cards = drawCards(line)
hands = [dictLookup(cards[0]), dictLookup(cards[1])]
if playerOneWins(hands): # P1 wins
p1wins += 1
games += 1
# print result
print "Games = {}, Player 1 wins = {}".format(games, p1wins)
|
'''
author(s): xujing from Medcare
date: 2019-03-20
flask调用opencv并基于yolo-lite做目标检测。
解决了:
在html中嵌入opencv视频流
opencv putText的中文显示
darknet调用yolo-lite
多线程,多点同时访问
ajax异步更新echarts的json数据,实时绘制识别结果!
问题: yolo-lite在no-GPU下的识别FPS没有做到paper中说的那么高!
'''
from flask import Response
from flask import Flask
from flask import render_template
import os
import uuid
import threading
import argparse
from ctypes import *
import math
import random
import numpy as np
import configparser
import imutils
import cv2
from imutils.video import VideoStream
from PIL import Image,ImageDraw,ImageFont
import matplotlib.cm as mpcm
import datetime
import time
from pyecharts.charts import Bar
from pyecharts import options as opts
app = Flask(__name__)
outputFrame = None
temp_str = str(uuid.uuid1())
print(temp_str)
lock = threading.Lock()
config = configparser.ConfigParser()
config.read('config.ini')
if config['IPCapture']['IP'] != 'no':
# vs = VideoStream(src= config['IPCapture']['IP']).start() # ip摄像头
vs = cv2.VideoCapture(config['IPCapture']['IP']) # ip摄像头
elif config['USBCapture']['USB'] != 'no':
# vs = VideoStream(src=0).start() # USB摄像头或采集卡设备
vs = cv2.VideoCapture(0) # USB摄像头或采集卡设备
elif config['PiCamera']['PI'] != 'no':
# vs = VideoStream(usePiCamera=1).start() # 树莓派
vs = cv2.VideoCapture(1) # 树莓派
elif config['VideoPath']['PATH'] != 'no':
# vs = VideoStream(src="test.mp4").start() # 本地视频源
vs = cv2.VideoCapture("test.mp4") # 本地视频源
hasGPU = config['Device']['Device']
label_name = ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',
'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
probs = [0.0] * len(label_name)
time.sleep(2.0)
# ---------------------------通过darknet调用yolo-lite--------------------------------
def change_cv2_draw(image,strs,local,sizes,colour):
'''解决openCV putText中文显示问题
'''
cv2img = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(cv2img)
draw = ImageDraw.Draw(pilimg)
font = ImageFont.truetype("./static/fonts/Microsoft-Yahei-UI-Light.ttc",sizes,encoding='utf-8')
draw.text(local,strs,colour,font=font)
image = cv2.cvtColor(np.array(pilimg),cv2.COLOR_RGB2BGR)
return image
def colors_subselect(colors, num_classes=20):
'''颜色映射
'''
dt = len(colors) // num_classes
sub_colors = []
for i in range(num_classes):
color = colors[i*dt]
if isinstance(color[0], float):
sub_colors.append([int(c * 255) for c in color])
else:
sub_colors.append([c for c in color])
return sub_colors
colors = colors_subselect(mpcm.plasma.colors, num_classes=20)
colors_tableau = [(255, 152, 150),(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229),
(255, 152, 150),(148, 103, 189), (197, 176, 213)]
# 调用darknet需要的一些方法
def sample(probs):
s = sum(probs)
probs = [a/s for a in probs]
r = random.uniform(0, 1)
for i in range(len(probs)):
r = r - probs[i]
if r <= 0:
return i
return len(probs)-1
def c_array(ctype, values):
arr = (ctype*len(values))()
arr[:] = values
return arr
class BOX(Structure):
_fields_ = [("x", c_float),
("y", c_float),
("w", c_float),
("h", c_float)]
class DETECTION(Structure):
_fields_ = [("bbox", BOX),
("classes", c_int),
("prob", POINTER(c_float)),
("mask", POINTER(c_float)),
("objectness", c_float),
("sort_class", c_int),
("uc", POINTER(c_float))]
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
class METADATA(Structure):
_fields_ = [("classes", c_int),
("names", POINTER(c_char_p))]
if os.name == "nt":
cwd = os.path.dirname(__file__)
os.environ['PATH'] = cwd + ';' + os.environ['PATH']
if hasGPU == "True":
winGPUdll = os.path.join(cwd, "yolo_cpp_dll.dll") # GPU!
lib = CDLL(winGPUdll, RTLD_GLOBAL)
else:
winNoGPUdll = os.path.join(cwd, "yolo_cpp_dll_no_gpu.dll")
lib = CDLL(winNoGPUdll, RTLD_GLOBAL)
else:
lib = CDLL("./libdarknet.so", RTLD_GLOBAL) # Lunix
lib.network_width.argtypes = [c_void_p]
lib.network_width.restype = c_int
lib.network_height.argtypes = [c_void_p]
lib.network_height.restype = c_int
copy_image_from_bytes = lib.copy_image_from_bytes
copy_image_from_bytes.argtypes = [IMAGE,c_char_p]
def network_width(net):
return lib.network_width(net)
def network_height(net):
return lib.network_height(net)
predict = lib.network_predict_ptr
predict.argtypes = [c_void_p, POINTER(c_float)]
predict.restype = POINTER(c_float)
if hasGPU:
set_gpu = lib.cuda_set_device
set_gpu.argtypes = [c_int]
make_image = lib.make_image
make_image.argtypes = [c_int, c_int, c_int]
make_image.restype = IMAGE
get_network_boxes = lib.get_network_boxes
get_network_boxes.argtypes = [c_void_p, c_int, c_int, c_float, c_float, POINTER(c_int), c_int, POINTER(c_int), c_int]
get_network_boxes.restype = POINTER(DETECTION)
make_network_boxes = lib.make_network_boxes
make_network_boxes.argtypes = [c_void_p]
make_network_boxes.restype = POINTER(DETECTION)
free_detections = lib.free_detections
free_detections.argtypes = [POINTER(DETECTION), c_int]
free_ptrs = lib.free_ptrs
free_ptrs.argtypes = [POINTER(c_void_p), c_int]
network_predict = lib.network_predict_ptr
network_predict.argtypes = [c_void_p, POINTER(c_float)]
reset_rnn = lib.reset_rnn
reset_rnn.argtypes = [c_void_p]
load_net = lib.load_network
load_net.argtypes = [c_char_p, c_char_p, c_int]
load_net.restype = c_void_p
load_net_custom = lib.load_network_custom
load_net_custom.argtypes = [c_char_p, c_char_p, c_int, c_int]
load_net_custom.restype = c_void_p
do_nms_obj = lib.do_nms_obj
do_nms_obj.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
do_nms_sort = lib.do_nms_sort
do_nms_sort.argtypes = [POINTER(DETECTION), c_int, c_int, c_float]
free_image = lib.free_image
free_image.argtypes = [IMAGE]
letterbox_image = lib.letterbox_image
letterbox_image.argtypes = [IMAGE, c_int, c_int]
letterbox_image.restype = IMAGE
load_meta = lib.get_metadata
lib.get_metadata.argtypes = [c_char_p]
lib.get_metadata.restype = METADATA
load_image = lib.load_image_color
load_image.argtypes = [c_char_p, c_int, c_int]
load_image.restype = IMAGE
rgbgr_image = lib.rgbgr_image
rgbgr_image.argtypes = [IMAGE]
predict_image = lib.network_predict_image
predict_image.argtypes = [c_void_p, IMAGE]
predict_image.restype = POINTER(c_float)
predict_image_letterbox = lib.network_predict_image_letterbox
predict_image_letterbox.argtypes = [c_void_p, IMAGE]
predict_image_letterbox.restype = POINTER(c_float)
def array_to_image(arr):
import numpy as np
# need to return old values to avoid python freeing memory
arr = arr.transpose(2,0,1)
c = arr.shape[0]
h = arr.shape[1]
w = arr.shape[2]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32) / 255.0
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w,h,c,data)
return im, arr
def classify(net, meta, im):
out = predict_image(net, im)
res = []
for i in range(meta.classes):
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
res.append((nameTag, out[i]))
res = sorted(res, key=lambda x: -x[1])
return res
def detect(net, meta, image, thresh=.5, hier_thresh=.5, nms=.45, debug= False):
"""
Performs the meat of the detection
"""
im = load_image(image, 0, 0)
if debug: print("Loaded image")
ret = detect_image(net, meta, im, thresh, hier_thresh, nms, debug)
free_image(im)
if debug: print("freed image")
return ret
def detect_image(net, meta, im, thresh=.5, hier_thresh=.5, nms=.45, debug= False):
num = c_int(0)
if debug: print("Assigned num")
pnum = pointer(num)
if debug: print("Assigned pnum")
predict_image(net, im)
letter_box = 0
#predict_image_letterbox(net, im)
#letter_box = 1
if debug: print("did prediction")
#dets = get_network_boxes(net, custom_image_bgr.shape[1], custom_image_bgr.shape[0], thresh, hier_thresh, None, 0, pnum, letter_box) # OpenCV
dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, None, 0, pnum, letter_box)
if debug: print("Got dets")
num = pnum[0]
if debug: print("got zeroth index of pnum")
if nms:
do_nms_sort(dets, num, meta.classes, nms)
if debug: print("did sort")
res = []
if debug: print("about to range")
for j in range(num):
if debug: print("Ranging on "+str(j)+" of "+str(num))
if debug: print("Classes: "+str(meta), meta.classes, meta.names)
for i in range(meta.classes):
if debug: print("Class-ranging on "+str(i)+" of "+str(meta.classes)+"= "+str(dets[j].prob[i]))
if dets[j].prob[i] > 0:
b = dets[j].bbox
if altNames is None:
nameTag = meta.names[i]
else:
nameTag = altNames[i]
if debug:
print("Got bbox", b)
print(nameTag)
print(dets[j].prob[i])
print((b.x, b.y, b.w, b.h))
res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h)))
if debug: print("did range")
res = sorted(res, key=lambda x: -x[1])
if debug: print("did sort")
free_detections(dets, num)
if debug: print("freed detections")
return res
netMain = None
metaMain = None
altNames = None
def performDetect(imagePath="test.jpg", thresh=0.5, configPath="./model/tiny-yolov2-trial13-noBatch.cfg", weightPath="./model/tiny-yolov2-trial13_noBatch.weights", metaPath="./model/voc.data", showImage=True, makeImageOnly=False, initOnly=False):
global metaMain, netMain, altNames #pylint: disable=W0603
assert 0 < thresh < 1, "Threshold should be a float between zero and one (non-inclusive)"
if not os.path.exists(configPath):
raise ValueError("Invalid config path `"+os.path.abspath(configPath)+"`")
if not os.path.exists(weightPath):
raise ValueError("Invalid weight path `"+os.path.abspath(weightPath)+"`")
if not os.path.exists(metaPath):
raise ValueError("Invalid data file path `"+os.path.abspath(metaPath)+"`")
if netMain is None:
netMain = load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1
if metaMain is None:
metaMain = load_meta(metaPath.encode("ascii"))
if altNames is None:
# In Python 3, the metafile default access craps out on Windows (but not Linux)
# Read the names file and create a list to feed to detect
try:
with open(metaPath) as metaFH:
metaContents = metaFH.read()
import re
match = re.search("names *= *(.*)$", metaContents, re.IGNORECASE | re.MULTILINE)
if match:
result = match.group(1)
else:
result = None
try:
if os.path.exists(result):
with open(result) as namesFH:
namesList = namesFH.read().strip().split("\n")
altNames = [x.strip() for x in namesList]
except TypeError:
pass
except Exception:
pass
if initOnly:
print("Initialized detector")
return None
if not os.path.exists(imagePath):
raise ValueError("Invalid image path `"+os.path.abspath(imagePath)+"`")
# Do the detection
#detections = detect(netMain, metaMain, imagePath, thresh) # if is used cv2.imread(image)
detections = detect(netMain, metaMain, imagePath.encode("ascii"), thresh)
if showImage:
try:
scale = 0.4
text_thickness = 1
line_type = 8
thickness=2
image = cv2.imread(imagePath)
print("*** "+str(len(detections))+" Results, color coded by confidence ***")
imcaption = []
img_prob = [0.0]*len(label_name)
for detection in detections:
label = detection[0]
confidence = detection[1]
pstring = label+": "+str(np.rint(100 * confidence))+"%"
img_prob[label_name.index(label)] = np.rint(100 * confidence)
imcaption.append(pstring)
print(pstring)
bounds = detection[2]
shape = image.shape
yExtent = int(bounds[3])
xEntent = int(bounds[2])
# Coordinates are around the center
xCoord = int(bounds[0] - bounds[2]/2)
yCoord = int(bounds[1] - bounds[3]/2)
color = colors_tableau[label_name.index(detection[0])]
p1 = (xCoord, yCoord)
p2 = (xCoord + xEntent,yCoord + yExtent)
if (p2[0] - p1[0] < 1) or (p2[1] - p1[1] < 1):
continue
cv2.rectangle(image, p1, p2, color, thickness)
text_size, baseline = cv2.getTextSize(pstring, cv2.FONT_HERSHEY_SIMPLEX, scale, text_thickness)
cv2.rectangle(image, (p1[0], p1[1] - thickness*10 - baseline), (p1[0] + 2*(text_size[0]-20), p1[1]), color, -1)
image = change_cv2_draw(image,pstring,(p1[0],p1[1]-7*baseline),20,(255,255,255))
except Exception as e:
print("Unable to show image: "+str(e))
return image, img_prob
#--------------------------falsk调用OpenCV和YOLO-lite------------------------------------
# index视图函数
@app.route("/", methods=['GET'])
def index():
return render_template("index.html")
def detect_yolo_lite():
'''
调用yolo-lite
'''
global vs, outputFrame, lock, probs
total = 0
while True:
ret,frame = vs.read()
total += 1
# frame = imutils.resize(frame, width=400)
# if total/10 == 0:
save_path = "./static/images/"+ temp_str + ".jpg"
cv2.imwrite(save_path,frame)
frame, probs = performDetect(imagePath=save_path)
# print(frame)
with lock: # 多线程的线程锁,确保当前线程的数据不被其他线程修改!
outputFrame = frame
def generate():
'''构建生成器
'''
global outputFrame, lock
while True:
with lock:
if outputFrame is None:
continue
(flag,encodedImage) = cv2.imencode(".jpg",outputFrame)
if not flag:
continue
yield(b"--frame\r\n" b"Content-Type:image/jpeg\r\n\r\n"+bytearray(encodedImage)+b"\r\n")
# 显示帧
@app.route("/video_feed")
def video_feed():
return Response(generate(),mimetype='multipart/x-mixed-replace;boundary=frame')
# ajax异步更新echarts数据
@app.route("/get_bar")
def get_bar():
global probs
bar = (
Bar()
.add_xaxis(label_name)
.add_yaxis("Detection Probs",probs)
)
# print(bar.render_embed())
# print(bar.dump_options())
# return render_template("index.html",bar_data=bar.dump_options())
# return bar.dump_options_with_quotes()
return bar.dump_options()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i","--ip",type=str,required=True,help="IP")
ap.add_argument("-o","--port",type=int,required=True,help="port")
args = vars(ap.parse_args())
# 多线程
t = threading.Thread(target=detect_yolo_lite)
t.daemon = True
t.start()
app.run(host=args["ip"],port=args["port"],debug=True,threaded=True,
use_reloader=False)
#release视频流
vs.stop()
|
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0., 1., 1e-6)
x_q = np.round(np.log(x-np.min(x)+1)/np.max(np.log(x-np.min(x)+1))*255)
bin = np.bincount(x_q.astype('int'))[1:-1]
levels = np.arange(0, 256, 1)[1:-1]
plt.plot(levels, np.sum(bin)/bin)
plt.xlabel('levels')
plt.ylabel('density')
plt.show()
|
import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd
import cv2
import time
FINAL_FILE_NAME = 'data/training_data.npy'
WIDTH, HEIGHT = 320, 180
train_data = np.load(FINAL_FILE_NAME)
for data in train_data:
img = data[0]
choice = data[1]
cv2.imshow('test',img)
if (choice[1] == 1):
print("SHOT")
time.sleep(1)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
df = pd.DataFrame(train_data)
X = np.array(train_data[0])
y = np.array(train_data[0])
train = train_data[:-100]
test = train_data[-100:]
X = np.array([i[0] for i in train]).reshape(-1, WIDTH, HEIGHT, 3)
y = [i[1] for i in train]
X_test = np.array([i[0] for i in test]).reshape(-1, WIDTH, HEIGHT, 3)
y_test = [i[1] for i in test]
print(y[:6])
X_train, X_test, y_train, y_test = train_test_split(df, random_state=0)
print("{}".format(y_train[:6])) |
#!/usr/bin/env python3
import unittest
from client_query_manager.specific_queries import Specific_queries
class Specific_queries_testCase(unittest.TestCase):
def setUp(self):
self.inquirer = Specific_queries()
def test_arrv_or_dep_delay_ratio_at_airport(self):
self.inquirer.arrv_or_dep_delay_ratio_at_airport()
def test_career_delay_proportion_in_day_or_hour(self):
self.inquirer.career_delay_proportion_in_day_or_hour()
def test_career_average_dist_from_airpport(self):
self.inquirer.career_average_dist_from_airpport()
def test_highest_accident_rate_on_street(self):
self.inquirer.highest_accident_rate_on_street()
def test_average_accident_severity_in_city(self):
self.inquirer.average_accident_severity_in_city()
def test_total_gun_violence_occ_on_particular_day(self):
self.inquirer.total_gun_violence_occ_on_particular_day()
def test_ratio_of_gun_violence_suspects_to_all_suspects(self):
self.inquirer.ratio_of_gun_violence_suspects_to_all_suspects()
def test_prop_delay_by_weather_vs_all_delays(self):
self.inquirer.prop_delay_by_weather_vs_all_delays()
def test_prop_acidents_due_to_weather_vs_all(self):
self.inquirer.prop_acidents_due_to_weather_vs_all()
def test_get_accident_incident_severity(self):
self.inquirer.get_accident_incident_severity()
if __name__=='__main__':
unittest.main() |
from django import forms
from .models import Article
from ckeditor_uploader.widgets import CKEditorUploadingWidget
class ArticleCreateForm(forms.ModelForm):
"""
form for creating article
"""
content = forms.CharField(widget=CKEditorUploadingWidget, label='Контент')
class Meta:
model = Article
exclude = ('is_approve', 'author')
|
from dataclasses import dataclass
from enum import Enum, auto
from datetime import datetime, timezone, timedelta
from typing import Optional
import io
import re
class Verbosity(Enum):
"""ログのVerbosityLevel
"""
Log = auto()
Warning = auto()
Error = auto()
Display = auto()
@dataclass
class Log:
"""パースしたログ情報
verbosityとcategoryはNoneの場合があります。
コンソールコマンド変更後やログファイルを閉じた時間等の一部のシステム的なログには
verbosityとcategoryが含まれていません。
Attributes:
date (datetime): ログが出力された時刻
verbosity (Verbosity): ログのVerbosityLevel
category (str): ログカテゴリ
log (str): 時刻情報を取り除いたログ
log_body (str): 時刻情報、VerbosityLevel,カテゴリを取り除いたログ本文
"""
date: datetime
verbosity: Optional[Verbosity]
category: Optional[str]
log: str
log_body: str
class Parser:
"""UnrealEngineのログファイルを読み取り、ログに付随する時刻やカテゴリなどの情報を解析した結果を取得する
"""
def __init__(self, file_object: io.TextIOBase):
"""初期化
Args:
file_object (io.TextIOBase): UnrealEngineのログファイルオブジェクト
Raises:
Exception: UnrealEngineのログとして認識できない場合例外を送出します
"""
# タイムゾーン取得
# ログ出力が始まるまでに以下のようにタイムゾーンが出力されている
# LogICUInternationalization: ICU TimeZone Detection - Raw Offset: +9:00, Platform Override: ''
self._log_timezone = None
self._file_object = file_object
line = self._file_object.readline()
while line:
if 'TimeZone Detection' in line:
match = Parser._extract_time_zone_pattern.match(line)
offset = timedelta(hours=int(match.group(1)), minutes=int(match.group(2)))
self._log_timezone = timezone(offset)
break
else:
line = self._file_object.readline()
if self._log_timezone is None:
raise Exception('ログからタイムゾーンを検出できませんでした。UnrealEngineのログファイルでは無い可能性があります。')
def read(self) -> Optional[Log]:
"""ログを1つ読み込む
Returns:
Optional[Log]: 読み込んだログ情報。これ以上ログを読み込めない場合はNoneを返します。
"""
line = self._file_object.readline()
if line == '':
return None
while line:
if Parser._is_logstart(line):
log = Parser._parse_log_start_line(line, self._log_timezone)
if not log:
line = self._file_object.readline()
continue
# 複数行で出力されているログがあるため次のログ開始にマッチするまでのものをまとめる
prev_pos = self._file_object.tell()
line = self._file_object.readline()
while line and not self._is_logstart(line):
prev_pos = self._file_object.tell()
log.log += '\n'
log.log += line.replace('\n', '')
log.log_body += '\n'
log.log_body += line.replace('\n', '')
line = self._file_object.readline()
# 次のログの開始にマッチした場合に次のreadで検出できるように直前のreadlineを巻き戻す
line = self._file_object.seek(prev_pos)
return log
else:
line = self._file_object.readline()
return None
# タイムゾーン情報のログからオフセット時間と分を抽出する正規表現
# LogICUInternationalization: ICU TimeZone Detection - Raw Offset: +9:00, Platform Override: ''
# というようなログが出力されており、そのログから抽出する
# group1: year
# group2: minute
_extract_time_zone_pattern = re.compile(r'.+Raw Offset:\s*([^:]+):(\d+)')
# UEログ開始パターン
# 時刻情報とログ自体をグループで分離することができる
# [2020.12.13-02.11.01:195][404]LogTemp: Error: LogTemp Verbosity:Error
# というログから
# 前半の時刻情報 [2020.12.12-13.09.43:708]
# 後半のログ LogTemp: Error: LogTemp Verbosity:Error
# を分離する
# group1: year
# group2: month
# group3: day
# group4: hour
# group5: minute
# group6: second
# group7: milliseconds
# group8: log
_log_start_pattern = re.compile(r'^\[(\d+)\.(\d+)\.(\d+)-(\d+)\.(\d+)\.(\d+):(\d+)\]\[[\s\d]+\](.+)$')
# ログからカテゴリを抽出する
# LogTemp: Error: LogTemp Verbosity:Error
# とういうログから
# カテゴリの LogTemp
# それいこうのError: LogTemp Verbosity:Error
# を分離する
# group1: ログカテゴリ
# group2: ログ文
_split_log_category = re.compile(r'([^:]+):\s+(.+)')
# ログのVerbosityを抽出する
# Error: LogTemp Verbosity:Error
# というログから
# VerbosityのError
# それ以降のLogTemp Verbosity:Error
# を抽出する
# UEのVerbosityは以下のものに限定される
# Warning
# Error
# Display
# Logの場合はログに情報がでないためこのパターンにマッチしない場合はVerbosityがLogとなる
# group1: Verbosity
# group2: ログ文
_split_log_verbosity = re.compile(r'(Warning|Error|Display):\s+(.+)')
@staticmethod
def _is_logstart(line: str) -> bool:
"""UnrealEngineのログ出力開始フォーマットとなっているか
UnrealEngineはログごとに時刻情報の出力から始まり、それを検出する。
複数行ログの出力すると先頭に時刻情報がなくその場合は直前に開始されたログの内容とみなされる。
読み込んだログファイルの行がログ開始なのかそうで無いのかを区別するのに使用する。
Args:
line (str): チェックしたい文字列
Returns:
bool: ログ出力開始フォーマットかどうか
"""
match = Parser._log_start_pattern.match(line)
return match is not None
@staticmethod
def _parse_log_start_line(line: str, log_timezone: timezone) -> Optional[Log]:
"""UnrealEngineのログ開始文字列をパースする
ログ開始文字列であってもパースに失敗することがあります。
UnrealEngineのログにはログファイルを閉じた時刻などのシステム的な情報が出力されていることがあり、
その場合はUE_LOGマクロで出力されるログフォーマットとは異なり、パースに失敗します。
Args:
line (str): 文字列
log_timezone (timezone): タイムゾーン
Returns:
Optional[Log]: パースしたログ情報。パースに失敗するとNoneを返します。
"""
match = Parser._log_start_pattern.match(line)
time = datetime(
year=int(match.group(1)),
month=int(match.group(2)),
day=int(match.group(3)),
hour=int(match.group(4)),
minute=int(match.group(5)),
second=int(match.group(6)),
microsecond=int(match.group(7))*1000,
tzinfo=log_timezone
)
# ログカテゴリ検出
log_without_time = match.group(8)
category_match = Parser._split_log_category.match(log_without_time)
if category_match is None:
return Log(time, None, None, log_without_time, log_without_time)
else:
category = category_match.group(1)
log_without_category = category_match.group(2)
# Verbosity検出
verbosity_math = Parser._split_log_verbosity.match(log_without_category)
if verbosity_math is not None:
verbosity = Verbosity[verbosity_math.group(1)]
log_body = verbosity_math.group(2)
else:
verbosity = Verbosity.Log
log_body = log_without_category
return Log(time, verbosity, category, log_without_time, log_body)
|
class Empleado:
# variable de clase
# constantes para todas las instancias
monto_para_aumento = 1.04
numero_de_empleados = 0
#Metodo init ca a correr automaticamente cada que creamos una nueva instancia
def __init__(self, nombre, apellido, salario):
# self -> current instance
self.nombre = nombre
self.apellido = apellido
self.email = nombre + "." + apellido + "@company.com"
self.salario = salario
Empleado.numero_de_empleados += 1
#No olvidar escribir self cada que definamos un nuevo metodo
def nombre_completo(self):
return '{} {}'.format(self.nombre, self.apellido)
def obtnener_salario(self):
return self.salario
def aplicar_salario(self):
self.salario = int(self.salario * self.monto_para_aumento)
# print Empleado.numero_de_empleados
developer = Empleado("Homero", "Valdovinos", 30000)
qa_tester = Empleado("roberto", "diaz", 10000)
print developer
print qa_tester
print developer.__dict__
|
"""Copyright (c) 2017 * Keith Cestaro
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
f1 = open("LOG.txt", "w+")
def listGenerate():
print("Please input the max value of digits you want the generator \
to create(1-6): ")
userInput = int(input())
print()
maxRange = 0
if userInput == 1:
maxRange = 26
elif userInput == 2:
maxRange = 702
elif userInput == 3:
maxRange = 18278
elif userInput == 4:
maxRange = 475254
elif userInput == 5:
maxRange = 12356630
elif userInput == 6:
maxRange = 321272406
else:
print("Please input a valid/smaller range: ")
loops = 1
iterate6(loops, maxRange)
def iterate(loops, maxRange, lst, carry):
while maxRange > 0:
if loops == 1:
lst.append(str(carry) + "a")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 2:
lst.append(str(carry) + "b")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 3:
lst.append(str(carry) + "c")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 4:
lst.append(str(carry) + "d")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 5:
lst.append(str(carry) + "e")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 6:
lst.append(str(carry) + "f")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 7:
lst.append(str(carry) + "g")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 8:
lst.append(str(carry) + "h")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 9:
lst.append(str(carry) + "i")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 10:
lst.append(str(carry) + "j")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 11:
lst.append(str(carry) + "k")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 12:
lst.append(str(carry) + "l")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 13:
lst.append(str(carry) + "m")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 14:
lst.append(str(carry) + "n")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 15:
lst.append(str(carry) + "o")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 16:
lst.append(str(carry) + "p")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 17:
lst.append(str(carry) + "q")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 18:
lst.append(str(carry) + "r")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 19:
lst.append(str(carry) + "s")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 20:
lst.append(str(carry) + "t")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 21:
lst.append(str(carry) + "u")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 22:
lst.append(str(carry) + "v")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 23:
lst.append(str(carry) + "w")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 24:
lst.append(str(carry) + "x")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 25:
lst.append(str(carry) + "y")
loops += 1
maxRange -= 1
f1.write("".join(lst) + "\n")
elif loops == 26:
lst.append(str(carry) + "z")
loops = 0
maxRange -= 1
f1.write("".join(lst) + "\n")
lst.clear()
def iterate2(loops, maxRange, lst, carry):
while maxRange > 0:
if loops == 1:
iterate(1, 26, lst, str(carry) + "a")
loops += 1
maxRange -= 26
elif loops == 2:
iterate(1, 26, lst, str(carry) + "b")
loops += 1
maxRange -= 26
elif loops == 3:
iterate(1, 26, lst, str(carry) + "c")
loops += 1
maxRange -= 26
elif loops == 4:
iterate(1, 26, lst, str(carry) + "d")
loops += 1
maxRange -= 26
elif loops == 5:
iterate(1, 26, lst, str(carry) + "e")
loops += 1
maxRange -= 26
elif loops == 6:
iterate(1, 26, lst, str(carry) + "f")
loops += 1
maxRange -= 26
elif loops == 7:
iterate(1, 26, lst, str(carry) + "g")
loops += 1
maxRange -= 26
elif loops == 8:
iterate(1, 26, lst, str(carry) + "h")
loops += 1
maxRange -= 26
elif loops == 9:
iterate(1, 26, lst, str(carry) + "i")
loops += 1
maxRange -= 26
elif loops == 10:
iterate(1, 26, lst, str(carry) + "j")
loops += 1
maxRange -= 26
elif loops == 11:
iterate(1, 26, lst, str(carry) + "k")
loops += 1
maxRange -= 26
elif loops == 12:
iterate(1, 26, lst, str(carry) + "l")
loops += 1
maxRange -= 26
elif loops == 13:
iterate(1, 26, lst, str(carry) + "m")
loops += 1
maxRange -= 26
elif loops == 14:
iterate(1, 26, lst, str(carry) + "n")
loops += 1
maxRange -= 26
elif loops == 15:
iterate(1, 26, lst, str(carry) + "o")
loops += 1
maxRange -= 26
elif loops == 16:
iterate(1, 26, lst, str(carry) + "p")
loops += 1
maxRange -= 26
elif loops == 17:
iterate(1, 26, lst, str(carry) + "q")
loops += 1
maxRange -= 26
elif loops == 18:
iterate(1, 26, lst, str(carry) + "r")
loops += 1
maxRange -= 26
elif loops == 19:
iterate(1, 26, lst, str(carry) + "s")
loops += 1
maxRange -= 26
elif loops == 20:
iterate(1, 26, lst, str(carry) + "t")
loops += 1
maxRange -= 26
elif loops == 21:
iterate(1, 26, lst, str(carry) + "u")
loops += 1
maxRange -= 26
elif loops == 22:
iterate(1, 26, lst, str(carry) + "v")
loops += 1
maxRange -= 26
elif loops == 23:
iterate(1, 26, lst, str(carry) + "w")
loops += 1
maxRange -= 26
elif loops == 24:
iterate(1, 26, lst, str(carry) + "x")
loops += 1
maxRange -= 26
elif loops == 25:
iterate(1, 26, lst, str(carry) + "y")
loops += 1
maxRange -= 26
elif loops == 26:
iterate(1, 26, lst, str(carry) + "z")
loops = 0
maxRange -= 26
elif maxRange == 26:
iterate(1, maxRange, [], "")
maxRange -= 26
lst.clear()
def iterate3(loops, maxRange, lst, carry):
while maxRange > 0:
if loops == 1:
iterate2(1, 676, lst, str(carry) + "a")
loops += 1
maxRange -= 676
elif loops == 2:
iterate2(1, 676, lst, str(carry) + "b")
loops += 1
maxRange -= 676
elif loops == 3:
iterate2(1, 676, lst, str(carry) + "c")
loops += 1
maxRange -= 676
elif loops == 4:
iterate2(1, 676, lst, str(carry) + "d")
loops += 1
maxRange -= 676
elif loops == 5:
iterate2(1, 676, lst, str(carry) + "e")
loops += 1
maxRange -= 676
elif loops == 6:
iterate2(1, 676, lst, str(carry) + "f")
loops += 1
maxRange -= 676
elif loops == 7:
iterate2(1, 676, lst, str(carry) + "g")
loops += 1
maxRange -= 676
elif loops == 8:
iterate2(1, 676, lst, str(carry) + "h")
loops += 1
maxRange -= 676
elif loops == 9:
iterate2(1, 676, lst, str(carry) + "i")
loops += 1
maxRange -= 676
elif loops == 10:
iterate2(1, 676, lst, str(carry) + "j")
loops += 1
maxRange -= 676
elif loops == 11:
iterate2(1, 676, lst, str(carry) + "k")
loops += 1
maxRange -= 676
elif loops == 12:
iterate2(1, 676, lst, str(carry) + "l")
loops += 1
maxRange -= 676
elif loops == 13:
iterate2(1, 676, lst, str(carry) + "m")
loops += 1
maxRange -= 676
elif loops == 14:
iterate2(1, 676, lst, str(carry) + "n")
loops += 1
maxRange -= 676
elif loops == 15:
iterate2(1, 676, lst, str(carry) + "o")
loops += 1
maxRange -= 676
elif loops == 16:
iterate2(1, 676, lst, str(carry) + "p")
loops += 1
maxRange -= 676
elif loops == 17:
iterate2(1, 676, lst, str(carry) + "q")
loops += 1
maxRange -= 676
elif loops == 18:
iterate2(1, 676, lst, str(carry) + "r")
loops += 1
maxRange -= 676
elif loops == 19:
iterate2(1, 676, lst, str(carry) + "s")
loops += 1
maxRange -= 676
elif loops == 20:
iterate2(1, 676, lst, str(carry) + "t")
loops += 1
maxRange -= 676
elif loops == 21:
iterate2(1, 676, lst, str(carry) + "u")
loops += 1
maxRange -= 676
elif loops == 22:
iterate2(1, 676, lst, str(carry) + "v")
loops += 1
maxRange -= 676
elif loops == 23:
iterate2(1, 676, lst, str(carry) + "w")
loops += 1
maxRange -= 676
elif loops == 24:
iterate2(1, 676, lst, str(carry) + "x")
loops += 1
maxRange -= 676
elif loops == 25:
iterate2(1, 676, lst, str(carry) + "y")
loops += 1
maxRange -= 676
elif loops == 26:
iterate2(1, 676, lst, str(carry) + "z")
loops = 0
maxRange -= 676
elif maxRange == 26:
iterate(1, maxRange, [], "")
maxRange -= 26
elif maxRange == 702:
iterate2(1, maxRange, [], "")
maxRange -= 702
lst.clear()
def iterate4(loops, maxRange, lst, carry):
while maxRange > 0:
if loops == 1:
iterate3(1, 17576, lst, str(carry) + "a")
loops += 1
maxRange -= 17576
elif loops == 2:
iterate3(1, 17576, lst, str(carry) + "b")
loops += 1
maxRange -= 17576
elif loops == 3:
iterate3(1, 17576, lst, str(carry) + "c")
loops += 1
maxRange -= 17576
elif loops == 4:
iterate3(1, 17576, lst, str(carry) + "d")
loops += 1
maxRange -= 17576
elif loops == 5:
iterate3(1, 17576, lst, str(carry) + "e")
loops += 1
maxRange -= 17576
elif loops == 6:
iterate3(1, 17576, lst, str(carry) + "f")
loops += 1
maxRange -= 17576
elif loops == 7:
iterate3(1, 17576, lst, str(carry) + "g")
loops += 1
maxRange -= 17576
elif loops == 8:
iterate3(1, 17576, lst, str(carry) + "h")
loops += 1
maxRange -= 17576
elif loops == 9:
iterate3(1, 17576, lst, str(carry) + "i")
loops += 1
maxRange -= 17576
elif loops == 10:
iterate3(1, 17576, lst, str(carry) + "j")
loops += 1
maxRange -= 17576
elif loops == 11:
iterate3(1, 17576, lst, str(carry) + "k")
loops += 1
maxRange -= 17576
elif loops == 12:
iterate3(1, 17576, lst, str(carry) + "l")
loops += 1
maxRange -= 17576
elif loops == 13:
iterate3(1, 17576, lst, str(carry) + "m")
loops += 1
maxRange -= 17576
elif loops == 14:
iterate3(1, 17576, lst, str(carry) + "n")
loops += 1
maxRange -= 17576
elif loops == 15:
iterate3(1, 17576, lst, str(carry) + "o")
loops += 1
maxRange -= 17576
elif loops == 16:
iterate3(1, 17576, lst, str(carry) + "p")
loops += 1
maxRange -= 17576
elif loops == 17:
iterate3(1, 17576, lst, str(carry) + "q")
loops += 1
maxRange -= 17576
elif loops == 18:
iterate3(1, 17576, lst, str(carry) + "r")
loops += 1
maxRange -= 17576
elif loops == 19:
iterate3(1, 17576, lst, str(carry) + "s")
loops += 1
maxRange -= 17576
elif loops == 20:
iterate3(1, 17576, lst, str(carry) + "t")
loops += 1
maxRange -= 17576
elif loops == 21:
iterate3(1, 17576, lst, str(carry) + "u")
loops += 1
maxRange -= 17576
elif loops == 22:
iterate3(1, 17576, lst, str(carry) + "v")
loops += 1
maxRange -= 17576
elif loops == 23:
iterate3(1, 17576, lst, str(carry) + "w")
loops += 1
maxRange -= 17576
elif loops == 24:
iterate3(1, 17576, lst, str(carry) + "x")
loops += 1
maxRange -= 17576
elif loops == 25:
iterate3(1, 17576, lst, str(carry) + "y")
loops += 1
maxRange -= 17576
elif loops == 26:
iterate3(1, 17576, lst, str(carry) + "z")
loops = 0
maxRange -= 17576
elif maxRange == 26:
iterate(1, maxRange, [], str(carry) + "")
maxRange -= 26
elif maxRange == 702:
iterate2(1, maxRange, [], str(carry) + "")
maxRange -= 702
elif maxRange == 18278:
iterate3(1, maxRange, [], str(carry) + "")
maxRange -= 18278
lst.clear()
def iterate5(loops, maxRange, lst, carry):
while maxRange > 0:
if loops == 1:
iterate4(1, 456976, lst, str(carry) + "a")
loops += 1
maxRange -= 456976
elif loops == 2:
iterate4(1, 456976, lst, str(carry) + "b")
loops += 1
maxRange -= 456976
elif loops == 3:
iterate4(1, 456976, lst, str(carry) + "c")
loops += 1
maxRange -= 456976
elif loops == 4:
iterate4(1, 456976, lst, str(carry) + "d")
loops += 1
maxRange -= 456976
elif loops == 5:
iterate4(1, 456976, lst, str(carry) + "e")
loops += 1
maxRange -= 456976
elif loops == 6:
iterate4(1, 456976, lst, str(carry) + "f")
loops += 1
maxRange -= 456976
elif loops == 7:
iterate4(1, 456976, lst, str(carry) + "g")
loops += 1
maxRange -= 456976
elif loops == 8:
iterate4(1, 456976, lst, str(carry) + "h")
loops += 1
maxRange -= 456976
elif loops == 9:
iterate4(1, 456976, lst, str(carry) + "i")
loops += 1
maxRange -= 456976
elif loops == 10:
iterate4(1, 456976, lst, str(carry) + "j")
loops += 1
maxRange -= 456976
elif loops == 11:
iterate4(1, 456976, lst, str(carry) + "k")
loops += 1
maxRange -= 456976
elif loops == 12:
iterate4(1, 456976, lst, str(carry) + "l")
loops += 1
maxRange -= 456976
elif loops == 13:
iterate4(1, 456976, lst, str(carry) + "m")
loops += 1
maxRange -= 456976
elif loops == 14:
iterate4(1, 456976, lst, str(carry) + "n")
loops += 1
maxRange -= 456976
elif loops == 15:
iterate4(1, 456976, lst, str(carry) + "o")
loops += 1
maxRange -= 456976
elif loops == 16:
iterate4(1, 456976, lst, str(carry) + "p")
loops += 1
maxRange -= 456976
elif loops == 17:
iterate4(1, 456976, lst, str(carry) + "q")
loops += 1
maxRange -= 456976
elif loops == 18:
iterate4(1, 456976, lst, str(carry) + "r")
loops += 1
maxRange -= 456976
elif loops == 19:
iterate4(1, 456976, lst, str(carry) + "s")
loops += 1
maxRange -= 456976
elif loops == 20:
iterate4(1, 456976, lst, str(carry) + "t")
loops += 1
maxRange -= 456976
elif loops == 21:
iterate4(1, 456976, lst, str(carry) + "u")
loops += 1
maxRange -= 456976
elif loops == 22:
iterate4(1, 456976, lst, str(carry) + "v")
loops += 1
maxRange -= 456976
elif loops == 23:
iterate4(1, 456976, lst, str(carry) + "w")
loops += 1
maxRange -= 456976
elif loops == 24:
iterate4(1, 456976, lst, str(carry) + "x")
loops += 1
maxRange -= 456976
elif loops == 25:
iterate4(1, 456976, lst, str(carry) + "y")
loops += 1
maxRange -= 456976
elif loops == 26:
iterate4(1, 456976, lst, str(carry) + "z")
loops = 0
maxRange -= 456976
elif maxRange == 26:
iterate(1, maxRange, [], str(carry) + "")
maxRange -= 26
elif maxRange == 702:
iterate2(1, maxRange, [], str(carry) + "")
maxRange -= 702
elif maxRange == 18278:
iterate3(1, maxRange, [], str(carry) + "")
maxRange -= 18278
elif maxRange == 475254:
iterate4(1, maxRange, [], str(carry) + "")
maxRange -= 475254
lst.clear()
def iterate6(loops, maxRange):
currentList = []
while maxRange > 0:
if maxRange == 26:
iterate(1, maxRange, [], "")
maxRange -= 26
elif maxRange == 702:
iterate2(1, maxRange, [], "")
maxRange -= 702
elif maxRange == 18278:
iterate3(1, maxRange, [], "")
maxRange -= 18278
elif maxRange == 475254:
iterate4(1, maxRange, [], "")
maxRange -= 475254
elif maxRange == 12356630:
iterate5(1, maxRange, [], "")
maxRange -= 12356630
elif loops == 1:
iterate5(1, 11881376, currentList, "a")
loops += 1
maxRange -= 11881376
elif loops == 2:
iterate5(1, 11881376, currentList, "b")
loops += 1
maxRange -= 11881376
elif loops == 3:
iterate5(1, 11881376, currentList, "c")
loops += 1
maxRange -= 11881376
elif loops == 4:
iterate5(1, 11881376, currentList, "d")
loops += 1
maxRange -= 11881376
elif loops == 5:
iterate5(1, 11881376, currentList, "e")
loops += 1
maxRange -= 11881376
elif loops == 6:
iterate5(1, 11881376, currentList, "f")
loops += 1
maxRange -= 11881376
elif loops == 7:
iterate5(1, 11881376, currentList, "g")
loops += 1
maxRange -= 11881376
elif loops == 8:
iterate5(1, 11881376, currentList, "h")
loops += 1
maxRange -= 11881376
elif loops == 9:
iterate5(1, 11881376, currentList, "i")
loops += 1
maxRange -= 11881376
elif loops == 10:
iterate5(1, 11881376, currentList, "j")
loops += 1
maxRange -= 11881376
elif loops == 11:
iterate5(1, 11881376, currentList, "k")
loops += 1
maxRange -= 11881376
elif loops == 12:
iterate5(1, 11881376, currentList, "l")
loops += 1
maxRange -= 11881376
elif loops == 13:
iterate5(1, 11881376, currentList, "m")
loops += 1
maxRange -= 11881376
elif loops == 14:
iterate5(1, 11881376, currentList, "n")
loops += 1
maxRange -= 11881376
elif loops == 15:
iterate5(1, 11881376, currentList, "o")
loops += 1
maxRange -= 11881376
elif loops == 16:
iterate5(1, 11881376, currentList, "p")
loops += 1
maxRange -= 11881376
elif loops == 17:
iterate5(1, 11881376, currentList, "q")
loops += 1
maxRange -= 11881376
elif loops == 18:
iterate5(1, 11881376, currentList, "r")
loops += 1
maxRange -= 11881376
elif loops == 19:
iterate5(1, 11881376, currentList, "s")
loops += 1
maxRange -= 11881376
elif loops == 20:
iterate5(1, 11881376, currentList, "t")
loops += 1
maxRange -= 11881376
elif loops == 21:
iterate5(1, 11881376, currentList, "u")
loops += 1
maxRange -= 11881376
elif loops == 22:
iterate5(1, 11881376, currentList, "v")
loops += 1
maxRange -= 11881376
elif loops == 23:
iterate5(1, 11881376, currentList, "w")
loops += 1
maxRange -= 11881376
elif loops == 24:
iterate5(1, 11881376, currentList, "x")
loops += 1
maxRange -= 11881376
elif loops == 25:
iterate5(1, 11881376, currentList, "y")
loops += 1
maxRange -= 11881376
elif loops == 26:
iterate5(1, 11881376, currentList, "z")
loops = 0
maxRange -= 11881376
currentList.clear()
listGenerate()
|
import nltk
# Read dataset
# 1) open()
# 2) pandas read_csv()
raw_data = open('../data/smsspamcollection/SMSSpamCollection').read()
print('\nraw_data[0:200]')
print(raw_data[0:200])
# Parse raw data into parsed array
parsed_data = raw_data.replace('\t', '\n').split('\n')
print('\nparsed_data[0:10]')
print(parsed_data[0:10])
# assign parsed array into label list and msg list.
label_list = parsed_data[0::2] # start = 0, stop = end, step = 2
print('\nlabel_list[0:5]')
print(label_list[0:5])
msg_list = parsed_data[1::2] # start = 1, stop = end, step = 2
print('\nmsg_list[0:5]')
print(msg_list[0:5])
# Now, combined the label list and message list into pandas DataFrame.
import pandas as pd
print ('len(label_list):', len(label_list))
print ('len(msg_list):', len(msg_list))
# https://stackoverflow.com/questions/509211/understanding-slice-notation
# -1 is the last element.
print('label_list[-3:]', label_list[-3:]) # print last [-3], [-2], [-1]
combined_df = pd.DataFrame ({
'label': label_list[:-1], # stop the before last element (-1).
'sms': msg_list
})
print('\ncombined_df.head():')
print(combined_df.head()) |
from Bio import Align, AlignIO, Seq
import sys
seq_set = set()
records = AlignIO.read(sys.argv[1], 'fasta')
new_records = []
for idx in range(len(records)):
seq = str(records[idx].seq).replace('J', 'L').replace('B', 'N').replace('Z', 'Q')
if seq in seq_set:
continue
seq_set.add(seq)
record = records[idx]
record.seq = Seq.Seq(seq)
new_records.append(record)
new_records = Align.MultipleSeqAlignment(new_records)
with open(sys.argv[2], 'w') as of:
AlignIO.PhylipIO.PhylipWriter(of).write_alignment(
new_records, id_width=100
)
|
def harm(n):
if n==1:
return 1/n
else:
return 1/n + harm(n-1)
if __name__=="__main__":
print(harm(2))
print(harm(100))
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
"""Function implementation"""
import logging
import sys
if sys.version_info.major < 3:
from fn_misp.lib import misp_2_helper as misp_helper
else:
from fn_misp.lib import misp_3_helper as misp_helper
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_misp.lib import common
PACKAGE= "fn_misp"
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function(s)"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.opts = opts
self.options = opts.get(PACKAGE, {})
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.opts = opts
self.options = opts.get(PACKAGE, {})
@function("misp_create_sighting")
def _misp_create_sighting_function(self, event, *args, **kwargs):
"""Function: """
try:
API_KEY, URL, VERIFY_CERT = common.validate(self.options)
# Get the function parameters:
misp_sighting = kwargs.get("misp_sighting") # text
log = logging.getLogger(__name__)
log.info("misp_sighting: %s", misp_sighting)
yield StatusMessage("Setting up connection to MISP")
proxies = common.get_proxies(self.opts, self.options)
misp_client = misp_helper.get_misp_client(URL, API_KEY, VERIFY_CERT, proxies=proxies)
yield StatusMessage(u"Marking {} as sighted".format(misp_sighting))
sighting = misp_helper.create_misp_sighting(misp_client, misp_sighting)
log.debug(sighting)
yield StatusMessage("Sighting has been created")
results = {
"success": True,
"content": sighting
}
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
yield FunctionError()
|
import gym
import griddly
import numpy as np
import gym_sokoban
import random
import time
env_name = 'Sokoban-small-v0'
env = gym.make(env_name)
ACTION_LOOKUP = env.unwrapped.get_action_lookup()
print("Created environment: {}".format(env_name))
q_table = np.zeros([7000,9])
# paramethers
alpha = 0.1
gamma = 0.7
epsilon = 0.15
episode = 2000
ACTION_LOOKUP = env.unwrapped.get_action_lookup()
count = 0
for i in range(1, episode):
state = env.reset()
# print(state)
# break
# epochs, reward = 0, 0
done = False
count += 1
print("No. of episode",count)
while not done:
#if count > 900:
#env.render(mode='human')
if random.uniform(0, 1) < epsilon:
action = env.action_space.sample() # Explore action space
else:
action = np.argmax(q_table[state])
#action = q_table[state].index(q_val)
next_state, reward, done, info = env.step(action)
old_value = q_table[state, action]
next_max = np.max(q_table[next_state])
new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)
q_table[state, action] = new_value
state = next_state
np.savetxt("Array.txt", q_table, fmt="%s")
print("Training finished.")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.