index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
4,600 | 7fd5e83d28e919e7b94cea290c6b4db3378938b6 | from fastapi import APIRouter, Depends, status, Response
from typing import List
import schemas, database
from sqlalchemy.orm import Session
import repository.blog as blog
from .oauth2 import get_current_user
router = APIRouter(
prefix="/blog",
tags=['Blog'])
@router.get('/', status_code=status.HTTP_200_OK, response_model=List[schemas.ShowBlog])
def all_blog(db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.all_blog(db)
@router.post('/', status_code=status.HTTP_201_CREATED)
def create(request:schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.create(request, db)
@router.delete('/{id}', status_code=status.HTTP_200_OK)
def destroy(id, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.destroy(id, db)
@router.put('/{id}', status_code=status.HTTP_202_ACCEPTED)
def update(id, request: schemas.Blog, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.update(id, request, db)
@router.get('/{id}', status_code=status.HTTP_200_OK, response_model=schemas.ShowBlog)
def show(id, response: Response, db: Session = Depends(database.get_db), current_user: schemas.User=Depends(get_current_user)):
return blog.show(id, response, db) |
4,601 | f7d29dd1d990b3e07a7c07a559cf5658b6390e41 |
#create a list
a = [2,3,4,5,6,7,8,9,10]
print(a)
#indexing
b = int(input('Enter indexing value:'))
print('The result is:',a[b])
print(a[8])
print(a[-1])
#slicing
print(a[0:3])
print(a[0:])
#conconteation
b=[20,30]
print(a+b)
#Repetition
print(b*3)
#updating
print(a[2])
a[2]=100
print(a)
#membership
print(5 in a)
#comparison
c=[2,3,4]
print(a==b)
print(a!=b)
#slice
a=[9,8,7,6,5,4]
print(a[0:3])
print(a[:4])
print(a[1:])
print(a[:])
print(a[2:2])
print(a[0:6:2])
print(a[0:6:3])
'''#a.apppend(element)
a=[1,2,3,4,5]
b=int(input('Enter number to append:'))
a.append(b)
print(a)
#insert(index,element)
a.insert(0,0)
print(a)
#a.extend(c)
c=[6,7,8,9]
a.extend(c)
print(a)
#one more'''
|
4,602 | 62fe29b0ac4dee8fec4908cf803dba9bd7e92fa5 | from tkinter import *
from get_train_set import *
from datetime import *
counter = 0
week = int ( datetime.today().isocalendar() [1] )
def update (a,b):
global counter
if b == 'x':
b = 0
a = week
counter = 0
else:
counter += b
a += counter
train_set = get_train_set(a)
txtLbl1.configure (text = train_set[0])
txtLbl2.configure (text = train_set[2])
txtLbl3.configure (text = train_set[4])
img1.configure (file = train_set[1])
img2.configure (file = train_set[3])
img3.configure (file = train_set[5])
curr_week.configure (text = str(a) +'th week')
window = Tk()
window.geometry('435x250')
window.title ('Training schedule')
window.configure(background = 'White')
window.resizable (0,0)
day1 = Label (window, text = 'MON', font = ('Arial', 20), bg = 'White')
day2 = Label (window, text = 'WED',font = ('Arial', 20), bg = 'White')
day3 = Label (window, text = 'FRI', font = ('Arial', 20),bg = 'White')
img1 = PhotoImage (file = 'default.png', width = 100, height = 100)
img2 = PhotoImage (file = 'default.png', width = 100, height = 100)
img3 = PhotoImage (file = 'default.png', width = 100, height = 100)
imgLbl1 = Label (window, image = img1)
imgLbl2 = Label (window, image = img2)
imgLbl3 = Label (window, image = img3)
txtLbl1 = Label (window, font = ('Arial', 12), bg = 'White')
txtLbl2 = Label (window, font = ('Arial', 12), bg = 'White')
txtLbl3 = Label (window, font = ('Arial', 12), bg = 'White')
img_home = PhotoImage (file = 'home (3).gif')
btn_home = Button (window, image = img_home, relief = 'ridge', command = lambda: update(week,'x'))
img_next = PhotoImage (file = 'next.gif')
btn_next = Button (window, image = img_next, relief = 'ridge', command = lambda: update (week,1))
img_prev = PhotoImage (file = 'prev.gif')
btn_prev = Button (window, image = img_prev, relief = 'ridge', command = lambda: update (week,-1))
curr_week = Label (window, text = str(week) + 'th week', font = ('Arial', 15),bg = 'White')
day1.place (x = 30, y = 10)
day2.place (x = 180, y = 10)
day3.place (x = 345, y = 10)
imgLbl1.place (x = 10, y = 50)
imgLbl2.place (x = 160, y = 50)
imgLbl3.place (x = 320, y = 50)
txtLbl1.place (x = 35, y = 160)
txtLbl2.place (x = 200, y = 160)
txtLbl3.place (x = 345, y = 160)
can = Canvas (window, width = 435, height = 2, bg = '#4286f4')
can.create_line (0,0, 435, 0, width = 2, fill = '#4286f4')
can.place (x = 0, y = 185)
btn_home.place (x = 15, y = 200)
btn_prev.place (x = 335, y = 200)
btn_next.place (x = 381, y = 200)
curr_week.place (x = 160, y = 190)
update(week,0)
window.mainloop()
|
4,603 | 7474e60feff61c4ef15680ecc09d910e6e1d6322 | def IsPn(a):
temp = (24*a+1)**0.5+1
if temp % 6 == 0:
return True
else:
return False
def IsHn(a):
temp = (8*a+1)**0.5+1
if temp % 4 == 0:
return True
else:
return False
def CalTn(a):
return (a**2+a)/2
i = 286
while 1:
temp = CalTn(i)
if IsHn(temp) and IsPn(temp):
break
i += 1
print i,temp |
4,604 | f56978d5738c2f8cb4ed5ce4f11d3aae6a9689b1 | import datetime
class Schedule:
def __init__(self, start, end, name, other): # Constructor
self.start = self.str_convert(start) # Schedule start time (ex. 9:00)
self.end = self.str_convert(end) # Schedule end time (ex. 22:00)
self.name = name # Schedule name (ex. member name, final schedule, etc)
self.other = other # Schedule exceptions/"other"
self.array = self.create_array() # Schedule array (2D array of days of week (7) x half hour blocks)
def str_convert(self, str_time):
# Converts start/end time to datettime if entered as string
if isinstance(str_time, str):
str_time = datetime.datetime.strptime(str_time, '%H:%M')
return datetime.time(str_time.hour, str_time.minute)
return str_time
def create_array(self):
# Generate array from number of (30 minute) blocks
num_blocks = self.calculate_num_blocks(self.start, self.end)
return [[True for x in range(num_blocks)] for y in range(7)]
@staticmethod
def calculate_num_blocks(start, end):
# Determining size of array: get difference
total_hrs = end.hour - start.hour
total_mins = end.minute - start.minute
# Determining size of array: in 30 min blocks (rounded)
num_half_hr = int(total_mins/30)
num_blocks = 2 * total_hrs + num_half_hr
return num_blocks
# def get_time
def prep_visualize(self):
# Banner
print("\n######### VISUALIZING WEEK: " + self.name + " #########")
print(self.start, "-", self.end, "\n")
num_blocks = self.calculate_num_blocks(self.start, self.end)
days = ["S", "M", "T", "W", "R", "F", "S" ]
times = []
# Fill times column (from datetime obj)
# Convert to datetime.datetime object, add timedelta, convert back - arbitrary datetime.date(1, 1, 1)
dtdt = datetime.datetime.combine(datetime.date(1, 1, 1), self.start)
for i in range(num_blocks):
num_blocks_i = datetime.timedelta(minutes=30*i)
combined = (dtdt + num_blocks_i).time()
times.append(combined.strftime("%H:%M"))
return days, times
def visualize(self):
days, times = self.prep_visualize()
# HEADER:
print("#####", end=" ")
for d in days: print("(" + d + ") ", end="")
print("#####")
# SCHEDULE:
for t in range(len(times)):
print(times[t], end=" ")
for d in range(7):
slot = self.array[d][t]
if slot is True: slot = " "
elif slot is False: slot = " x "
print(slot, end=" ")
print(times[t])
print()
def print_other(self):
print(self.name + "\t ", self.other.replace("\n", "; "))
class ExSchedule(Schedule):
def __init__(self, start, end, num_members, list_membs):
Schedule.__init__(self, start, end, "ExSched", None)
self.num_members = num_members
self.list_membs = list_membs
self.exarray = self.create_exarray()
def create_exarray(self):
num_blocks = Schedule.calculate_num_blocks(self.start, self.end)
return [[[True for z in range(self.num_members)] for x in range(num_blocks)] for y in range(7)]
def visualize(self):
days, times = Schedule.prep_visualize(self)
print("Members: "+ self.list_membs[:-2])
# HEADER:
print("##### ", end="")
# print(days)
# print(times)
for d in days:
num_spaces = len(self.exarray[0][1]) - 1
left_half = int(num_spaces / 2)
right_half = num_spaces - left_half
print("(", end="")
print(''.join([" " for x in range(left_half)]), end=d)
print(''.join([" " for x in range(right_half)]), end=")")
print(" #####")
# SCHEDULE:
for i in range(len(times)): # i: 0-26 (9:00) = m: 0-26 ([T,T,T])
print(times[i], end=" ")
for d in range(len(self.exarray)): # d: 0-6 (sun)
array = self.exarray[d][i]
print("[", end="")
for memb_avail in array:
print("-", end="") if memb_avail is True else print("*", end="")
print("]", end="")
print(" ", end=times[i]+"\n")
|
4,605 | 3923aed29006b4290437f2b0e11667c702da3241 | # %%
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
import matplotlib.patches as mpatches
import time
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from imblearn.pipeline import make_pipeline as imbalanced_make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report, confusion_matrix, plot_confusion_matrix
from collections import Counter
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split, cross_val_score, GridSearchCV, cross_val_predict
from sklearn.preprocessing import RobustScaler
from scipy.stats import norm
import keras
from keras import backend as K
from keras.models import Sequential
from keras.layers import Activation, Dense
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.callbacks import ModelCheckpoint
import itertools
# %%
dataset = pd.read_csv('./dataset/creditcard.csv')
dataset.head()
# %%
dataset.describe()
# %%
robustScaler = RobustScaler()
dataset['scaled_amount'] = robustScaler.fit_transform(
dataset['Amount'].values.reshape(-1, 1))
dataset['scaled_time'] = robustScaler.fit_transform(
dataset['Time'].values.reshape(-1, 1))
# %%
dataset.drop(['Amount', 'Time'], axis=1, inplace=True)
dataset.head()
# %%
X = dataset.drop(['Class'], axis=1)
Y = dataset['Class']
# %%
SKfold = StratifiedKFold(random_state=42)
for train_index, test_index in SKfold.split(X, Y):
og_X_train, og_X_test = X.iloc[train_index], X.iloc[test_index]
og_Y_train, og_Y_test = Y.iloc[train_index], Y.iloc[test_index]
# %%
og_X_train = og_X_train.values
og_X_test = og_X_test.values
og_Y_train = og_Y_train.values
og_Y_test = og_Y_test.values
# %%
dataset = dataset.sample(frac=1, random_state=42)
fraud = dataset.loc[dataset['Class'] == 1]
normal = dataset.loc[dataset['Class'] == 0][:492]
nd_dataset = pd.concat([fraud, normal])
nd_dataset = nd_dataset.sample(frac=1, random_state=42)
nd_dataset.head()
# %%
nd_X = nd_dataset.drop("Class", axis=1)
nd_Y = nd_dataset["Class"]
# %%
nd_Xtrain, nd_Xtest, nd_Ytrain, nd_Ytest = train_test_split(
nd_X, nd_Y, random_state=42, test_size=0.2)
nd_Xtrain = nd_Xtrain.values
nd_Xtest = nd_Xtest.values
nd_Ytrain = nd_Ytrain.values
nd_Ytest = nd_Ytest.values
# %%
n_inputs = nd_Xtrain.shape[1]
undersample_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation="relu"),
Dense(32, activation="relu"),
Dense(2, activation="softmax")
])
# %%
undersample_model.summary()
# %%
undersample_model.compile(
Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=["accuracy"])
modelcheckpoint = ModelCheckpoint(
"models/undersample_model.h5", save_best_only=True, monitor="val_acc")
undersample_model.fit(nd_Xtrain, nd_Ytrain, validation_split=0.2, epochs=20,
batch_size=25, shuffle=True, verbose=2, callbacks=[modelcheckpoint])
# %%
undersample_pred = undersample_model.predict(og_X_test, verbose=2)
# %%
undersample_pred_classes = undersample_model.predict_classes(
og_X_test, verbose=2)
# %%
confmat = confusion_matrix(og_Y_test, undersample_pred_classes)
print(confmat)
# %%
def plotTensorflowConfmat(confmat, classes):
plt.imshow(confmat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
for i, j in itertools.product(range(confmat.shape[0]), range(confmat.shape[1])):
plt.text(j, i, format(confmat[i, j], '.2f'),
horizontalalignment='center', color='black')
# %%
classes = ["Normal", "Fraud"]
plotTensorflowConfmat(confmat, classes)
# %%
sm = SMOTE(sampling_strategy="minority", random_state=42)
sm_X_train, sm_Y_train = sm.fit_sample(og_X_train, og_Y_train)
# %%
sm_X_train.shape
# %%
n_inputs = sm_X_train.shape[1]
smote_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),
Dense(32, activation='relu'),
Dense(2, activation='softmax')
])
# %%
smote_model.summary()
# %%
smote_model.compile(
Adam(lr=0.001), loss='sparse_categorical_crossentropy', metrics=['accuracy'])
modelcheckpoint = ModelCheckpoint(
'models/smote_model.h5', save_best_only=True, monitor='val_acc')
smote_model.fit(sm_X_train, sm_Y_train, validation_split=0.2, batch_size=25,
epochs=20, verbose=2, shuffle=True, callbacks=[modelcheckpoint])
# %%
smote_model.save('models/smote_model.h5')
# %%
smote_pred_classes = smote_model.predict_classes(og_X_test)
# %%
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
# %%
plotTensorflowConfmat(confmat, classes)
# %%
sm2 = SMOTE(sampling_strategy="minority", random_state=42)
# %%
sm2_X_train, sm2_Y_train = sm2.fit_sample(og_X_train, og_Y_train)
sm2_X_train = pd.DataFrame(sm2_X_train)
sm2_X_train.head()
# %%
sm2_Y_train = pd.DataFrame(sm2_Y_train, columns=["Class"])
sm2_Y_train.head()
# %%
smote_df = pd.concat([sm2_X_train, sm2_Y_train], axis=1)
smote_df.head()
# %%
smote_df = smote_df.sample(frac=1, random_state=42)
# %%
corr = smote_df.corr()
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size': 20})
plt.show()
# %%
corr["Class"].sort_values()
# %%
negative_corr = [13, 11, 9, 15]
positive_corr = [3, 10]
# %%
f, axes = plt.subplots(ncols=4, figsize=(20, 4))
f.suptitle("Negative Corr")
for i, feature in enumerate(negative_corr):
sns.boxplot(x="Class", y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
# %%
f, axes = plt.subplots(ncols=2, figsize=(20, 4))
f.suptitle("Positive Corr")
for i, feature in enumerate(positive_corr):
sns.boxplot(x="Class", y=feature, data=smote_df, ax=axes[i])
axes[i].set_title(feature)
# %%
for i, feature in enumerate(negative_corr):
fraud_dist = smote_df[feature].loc[smote_df["Class"] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75-q25
cutoff = iqr*1.5
upper_limit, lower_limit = q75+cutoff, q25-cutoff
outlier_list = [x for x in fraud_dist if x <
lower_limit or x > upper_limit]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f"outliers removed {len(outlier_list)}")
# %%
for i, feature in enumerate(positive_corr):
fraud_dist = smote_df[feature].loc[smote_df["Class"] == 1].values
q25, q75 = np.percentile(fraud_dist, 25), np.percentile(fraud_dist, 75)
iqr = q75-q25
cutoff = iqr*1.5
upper_limit, lower_limit = q75+cutoff, q25-cutoff
outlier_list = [x for x in fraud_dist if x <
lower_limit or x > upper_limit]
smote_df = smote_df.drop(smote_df[(smote_df[feature] > upper_limit) | (
smote_df[feature] < lower_limit)].index)
print(f"outliers removed {len(outlier_list)}")
# %%
smote_df.shape
# %%
smote_X_train = smote_df.drop(["Class"], axis=1)
smote_Y_train = smote_df["Class"]
# %%
n_inputs = smote_X_train.shape[1]
smote_model = Sequential([
Dense(n_inputs, input_shape=(n_inputs,), activation='relu'),
Dense(64, activation='relu'),
Dense(32, activation='relu'),
Dense(32, activation='relu'),
Dense(2, activation='softmax')
])
# %%
smote_model.summary()
# %%
smote_model.compile(
Adam(lr=0.001), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
modelcheckpoint = ModelCheckpoint(
"models/smote_outliers_removed.h5", save_best_only=True, monitor="val_acc")
smote_model.fit(smote_X_train, smote_Y_train, validation_split=0.2,
shuffle=True, batch_size=25, epochs=20, callbacks=[modelcheckpoint])
# %%
smote_model.save("models/smote_outliers_removed.h5")
# %%
smote_pred_classes = smote_model.predict_classes(og_X_test)
# %%
confmat = confusion_matrix(og_Y_test, smote_pred_classes)
print(confmat)
# %%
classes = ["normal", "fraud"]
plotTensorflowConfmat(confmat, classes)
# %%
|
4,606 | 97637e2114254b41ef6e777e60b3ddab1d4622e8 | from django.forms import ModelForm
from contactform.models import ContactRequest
class ContactRequestForm(ModelForm):
class Meta:
model = ContactRequest
|
4,607 | cca543f461724c3aac8fef23ef648883962bd706 | from os import getenv
LISTEN_IP = getenv('LISTEN_IP', '0.0.0.0')
LISTEN_PORT = int(getenv('LISTEN_PORT', 51273))
LISTEN_ADDRESS = LISTEN_IP, LISTEN_PORT
CONFIRMATION = getenv('CONFIRMATION')
if CONFIRMATION:
CONFIRMATION = CONFIRMATION.encode()
class UDPProtocol:
def __init__(self, consumer):
self.consumer = consumer
self.transport = None
def connection_made(self, transport):
self.transport = transport
def connection_lost(self, exc):
pass
def datagram_received(self, packet, address):
# WARNING: some kind of filtering should be there for the real app
self.consumer.consume_packet(packet)
if CONFIRMATION:
self.transport.sendto(CONFIRMATION, address)
def start(self):
loop = self.consumer.loop
coroutine = loop.create_datagram_endpoint(lambda: self, LISTEN_ADDRESS,
reuse_port=True)
loop.run_until_complete(coroutine)
def stop(self):
self.transport.close()
|
4,608 | 0a7a95755924fd264169286cc5b5b7587d7ee8e4 | import turtle
import math
from tkinter import *
#活性边表节点:
class AetNode(object):
def __init__(self,x,tx,my):
self.x=x
self.tx=tx
self.my=my
def op(self):
return self.x
class AetList(object):
def __init__(self,y):
self.y=y
self.numy=0
self.l=[]
pass
def findRange(point):
# 找到最大y和最小y:
maxy = point[0][1]
miny = point[0][1]
for i in point:
if maxy < i[1]:
maxy = i[1]
if miny > i[1]:
miny = i[1]
return (miny,maxy)
def printNewEegeList(newEdgeTable):
print("新边表是:")
for i in newEdgeTable:
print(i.y)
for j in i.l:
print((j.x,j.tx,j.my))
print("__________________________________")
def createNewEdgeTable(point):
miny,maxy=findRange(point)
# 找打所有y的顶点:
Y = []
for i in point:
Y.append(i[1])
Y = set(Y)
Y = list(Y)
# 创建新边表:
newEdgeList = []
y=miny
while y <=maxy:
if y in Y:
# 找到所有的X值:
print(y)
templist=[]
for i in range(0, 6):
if point[i][1] == y:
templist.append(i)
print(templist)
print("一次创建新边表")
lists = AetList(y)
for temp in templist:
index1 = (temp + 7) % 6
index2 = (temp + 5) % 6
print(point[temp][0],point[temp][1])
print(point[index1][0],point[index1][1])
print(point[index2][0],point[index2][1])
print("+++++++++++++++++++++")
# lists = AetList(y)
if point[index1][1] > y:
lists.numy+=1
if point[index1][1] - point[temp][1]==0:
node = AetNode(point[temp][0],0,point[index1][1])
else:
node = AetNode(point[temp][0],
((point[index1][0] - point[temp][0]) / (point[index1][1] - point[temp][1])),
point[index1][1])
lists.l.append(node)
if point[index2][1] > y:
lists.numy+=1
if point[index2][1] - point[temp][1]==0:
node = AetNode(point[temp][0], 0, point[index2][1])
else:
node = AetNode(point[temp][0],
((point[index2][0] - point[temp][0]) / (point[index2][1] - point[temp][1])),
point[index2][1])
lists.l.append(node)
if len(lists.l)!=0:
lists.l.sort(key=AetNode.op)
if len(templist)>1:
lists.numy-=1
newEdgeList.append(lists)
y+=1
printNewEegeList(newEdgeList)
return (newEdgeList,Y)
def draw(x1,y1,x,y):
turtle.penup()
turtle.goto(x1,y1)
turtle.pendown()
turtle.goto(x,y)
def run():
turtle.screensize(1920,1080)
turtle.penup()
turtle.hideturtle()
point=[]
# point=[[20,20],[50,10],[110,30],[110,80],[50,50],[20,70]]
# point=[[-10,-10],[10,-10],[15,0],[10,10],[-10,10],[-15,0]]
temp = [float(x11.get()), float(y1.get())]
point.append(temp)
temp = [float(x2.get()), float(y2.get())]
point.append(temp)
temp = [float(x3.get()), float(y3.get())]
point.append(temp)
temp = [float(x4.get()), float(y4.get())]
point.append(temp)
temp = [float(x5.get()), float(y5.get())]
point.append(temp)
temp = [float(x6.get()), float(y6.get())]
point.append(temp)
point = [[20, 20], [50, 10], [110, 30], [110, 80], [50, 50], [20, 70]]
#画出原图:
for i in point:
turtle.goto(i[0],i[1])
turtle.pendown()
turtle.goto(point[0][0],point[0][1])
#创建新边表:
newEdgeTable,Y=createNewEdgeTable(point)
miny,maxy=findRange(point)
y=miny
acativeList=[]
while y<=maxy:
#把新边表加进来:
ynum=0
if y in Y:
for i in newEdgeTable:
if i.y==y:
for j in i.l:
acativeList.append(j)
ynum=i.numy
break
acativeList.sort(key=AetNode.op)
for j in acativeList:
print((j.x,j.tx,j.my))
print("****************")
#进行填色:
i=0
flag=True
while i<len(acativeList)-1:
x1=acativeList[i].x
temp=[acativeList[i+1].x,y]
if temp in point and ynum>=1:
ynum-=1
else:
i+=1
if flag:
draw(x1,y,temp[0],y)
flag=not flag
#更新活性边表:
newacativeList=[]
for i in acativeList:
if i.my>y:
i.x+=i.tx
newacativeList.append(i)
acativeList=newacativeList
y+=1
turtle.mainloop()
tk=Tk()
tk.title("扫描填充算法:by 高谦")
Label(tk,text="输入顶点:").grid(row=0)
Label(tk,text="1:").grid(row=1)
Label(tk,text="2:").grid(row=2)
Label(tk,text="3:").grid(row=3)
Label(tk,text="4:").grid(row=4)
Label(tk,text="5:").grid(row=5)
Label(tk,text="6:").grid(row=6)
Label(tk,text="例:\n\n").grid(row=9)
Label(tk,text="(20,20),(50,10)\n(110,30),(110,80)\n(50,50),(20,70)").grid(row=9,column=1)
Label(tk,text="(-10,-10),(10,-10)\n(15,0),(10,10)\n(-10,10),(-15,0)").grid(row=9,column=2)
x11=Entry(tk)
x2=Entry(tk)
x3=Entry(tk)
x4=Entry(tk)
x5=Entry(tk)
x6=Entry(tk)
x11.grid(row=1,column=1)
x2.grid(row=2,column=1)
x3.grid(row=3,column=1)
x4.grid(row=4,column=1)
x5.grid(row=5,column=1)
x6.grid(row=6,column=1)
y1=Entry(tk)
y2=Entry(tk)
y3=Entry(tk)
y4=Entry(tk)
y5=Entry(tk)
y6=Entry(tk)
y1.grid(row=1,column=2,padx=5,pady=5)
y2.grid(row=2,column=2,padx=5,pady=5)
y3.grid(row=3,column=2,padx=5,pady=5)
y4.grid(row=4,column=2,padx=5,pady=5)
y5.grid(row=5,column=2,padx=5,pady=5)
y6.grid(row=6,column=2,padx=5,pady=5)
Button(tk,text="扫描填充",width=10,command=run).grid(row=7,column=1)
Button(tk,text="退出程序",width=10,command=tk.quit).grid(row=7,column=2)
tk.mainloop() |
4,609 | a8fb8ac3c102e460d44e533b1e6b3f8780b1145d | # Downloads images from http://www.verseoftheday.com/ and saves it into a DailyBibleVerse folder
import requests, os, bs4
os.chdir('c:\\users\\patty\\desktop') #modify location where you want to create the folder
if not os.path.isdir('DailyBibleVerse'):
os.makedirs('DailyBibleVerse')
res = requests.get('http://www.verseoftheday.com/') #Downloading the page into the 'res' variable
res.raise_for_status()
soup= bs4.BeautifulSoup(res.text, "html.parser")
#BibleCaption= soup.select('#featured .bilingual-left') #If you want verse use this
#text= BibleCaption[0].getText()
BibleElem = soup.select('#tv-image-wrapper img') #searching the image
BibleUrl= BibleElem[0].get('src')
num=1
while os.path.exists('c:\\users\\patty\\desktop\\DailyBibleVerse\\verse%s.jpg' %num):
num += 1
File = open('c:\\users\\patty\\desktop\\DailyBibleVerse\\' +'verse'+ str(num)+ '.jpg', 'wb') #this is the name
res = requests.get(BibleUrl)
for chunk in res.iter_content(100000):
File.write(chunk)
File.close()
|
4,610 | 53519c704ca9aff62140f187d4246208350fa9ba | # Generated by Django 2.1.2 on 2018-11-05 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('PDPAPI', '0011_auto_20181105_1021'),
]
operations = [
migrations.RemoveField(
model_name='optionvoting',
name='totalVotes',
),
migrations.AddField(
model_name='mcqoption',
name='totalVotes',
field=models.IntegerField(default=0),
),
]
|
4,611 | 63e5ead200fb2884d93f19e7d9b8dc76c7f4f0e3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Python version 3.8.5
#
# Author Maria Catharina van Veen
#
# Purpose To provide users with a tool to create
# or edit an html file.
#
# Tested OS This code was written and tested to
# work with Windows 10.
import os
from tkinter import *
import tkinter as tk
from tkinter.scrolledtext import ScrolledText
import WebPageGeneratorGui as gui
import WebPageGeneratorFunc as func
class MainWindow(Frame):
def __init__(self, root):
Frame.__init__(self, root)
self.root = root
self.root.title("Web Page Generator")
self.root.bgcolor = "#AA0000"
self.root.config(bg = self.root.bgcolor)
gui.loadGUI(self)
if __name__ == "__main__":
root = Tk()
app = MainWindow(root)
root.mainloop()
|
4,612 | d625e6724a3fe077a6f80b6de6b1f5bb0b95d47d | import requests
from bs4 import BeautifulSoup
import json
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
url = 'http://api.tvmaze.com/singlesearch/shows?q=game+of+throne&embed=episodes'
response = requests.get(url, headers = headers)
content = json.loads(response.text)
x = json.dumps(content, ensure_ascii=False,indent =2)
with open('got_info.json','w') as f:
f.write(x)
# print(cele_info.split(' ')) |
4,613 | 6c349b7b4d82b37ec1b1ff8e0d35a3557ed1af67 | def calculaEuclidiana(obj1,obj2):
soma = 0
for I in range(len(obj1)):
soma += (obj1[I] - obj2[I])**2
return soma ** 0.5
def calculaMinkowski(obj1,obj2,p):
# p = 2 => distancia Euclidiana
# p = 1 => distancia de Manhattan
soma = 0
for I in range(len(obj1)):
soma += (abs(obj1[I] - obj2[I]))**p
return soma ** (1/p)
def delta(obj1,obj2):
if((obj1 == None or obj2 == None) or (obj1 == 'None' or obj2 == 'None')):
return 0
else:
return 1
def calculaMinkowskiNormalizada(obj1,obj2,p):
soma = 0
somaDelta = 0
for I in range(len(obj1)):
if(delta(obj1[I],obj2[I])):
somaDelta+=1
soma += (abs(obj1[I] - obj2[I])) ** p
return (soma ** (1/p))/somaDelta
# def calculaMahalanobis()
obj1 = {}
obj1[0] = 2
obj1[1] = -1
obj1[2] = None
obj1[3] = 0
# print("len ",obj1[2])
obj2 = {}
obj2[0] = 7
obj2[1] = 0
obj2[2] = -4
obj2[3] = 8
# print("Result Euclidiana = ",calculaEuclidiana(obj1,obj2))
# print("Result Minkowski = ", calculaMinkowski(obj1,obj2,2))
# print("Result Minkowski normalizada = ", calculaMinkowskiNormalizada(obj1,obj2,2)) |
4,614 | d60a2100127db859162890204655d313cdc2a4a5 | # -*- coding: utf-8 -*-
import scrapy
import MySQLdb
import openpyxl
from scrapy.crawler import CrawlerProcess
import sys
class AllabolaSpider(scrapy.Spider):
name = 'allabola'
allowed_domains = ['https://www.allabolag.se']
start_urls = []
#'https://www.allabolag.se/7696250484/befattningar'
host = '104.197.180.57'
user = 'root'
password = 'root'
DB_name = "db_allabolag"
f = open('Facebook_Auidance.csv', 'w')
f.write('fn,ln,zip,ct,st,country,dob,doby,gen,age,uid')
f.write('\n')
f.close()
try:
connection = MySQLdb.connect(host, user, password,DB_name ,charset='utf8')
cursor = connection.cursor()
except Exception as e:
print(str(e))
try:
strquery2 = "CREATE TABLE tbl_allabola""""(Id INT NOT NULL AUTO_INCREMENT,
Registration_no varchar(250) DEFAULT NULL,
First_name varchar(250) DEFAULT NULL,
Middle_name varchar(250) DEFAULT NULL,
Famaily_name varchar(250) DEFAULT NULL,
Gender longtext DEFAULT NULL,
Year longtext DEFAULT NULL,
Board_member longtext DEFAULT NULL,
PRIMARY KEY (`Id`))"""
cursor.execute(strquery2)
except Exception as e:
print(str(e))
def start_requests(self):
try:
wb = openpyxl.load_workbook(
'/home//Business_numbers.xlsx')
ws = wb.get_active_sheet()
row_count = ws.max_row
for h in range(2,row_count):
regi_number = ws.cell(row=h, column=2).value
Post_Code = ws.cell(row=h, column=4).value
main_link = 'https://www.allabolag.se/'+str(regi_number)+'/befattningar'
yield scrapy.FormRequest(main_link,callback=self.parse,dont_filter=True,meta={'Post_Code':Post_Code})
except Exception as e:
print(e)
def parse(self, response):
Post_Code = response.meta['Post_Code']
Registration_no = response.url
Registration_no = Registration_no.split('.se/')[1]
Registration_no = Registration_no.split('/')[0]
print(Registration_no)
ALl_data = response.xpath('//*[@class="list--personnel accordion-body"]/li')
for datas in ALl_data:
gender = datas.xpath(".//div[1]/span[contains(@class,'male')]/@class").extract_first()
gender = gender.split('--')[1]
gender = gender.encode('utf-8')
if gender == 'male':
gender = 'm'
elif gender == 'female':
gender = 'f'
name = datas.xpath('.//div[2]/a/text()').extract_first()
name = name.strip()
name = name.split(' (f. ')
year = name[1].replace(')','')
if year != None:
age = str(2019 - int(year))
fullname = name[0]
# try:
# fullname = str(fullname)
# except Exception as e:
# print e
fullname = fullname.split(' ')
firstname = ''
middlename = ''
familyname = ''
if len(fullname) == 3:
firstname = fullname[0]
middlename = fullname[1]
familyname = fullname[2]
elif len(fullname) == 2:
firstname = fullname[0]
middlename = fullname[1]
elif len(fullname) > 3:
firstname = fullname[0]
familyname = fullname[-1]
middlename = ''
for k in range(1,len(fullname)-1):
if middlename == '':
middlename = fullname[k]
else:
middlename = middlename + ' ' + fullname[k]
type = datas.xpath('.//div[2]/text()').extract()[2]
Board_member = type.replace('\n','').strip()
if gender != '':
f = open('Facebook_Auidance.csv', 'a')
try:
f.write(firstname+','+familyname+','+Post_Code+','+''+','+''+','+'Sweden'+','+''+','+year+','+gender+','+age+','+'')
except Exception as e:
f.close()
try:
f.write('\n')
f.close()
except Exception as e:
''
if gender != '':
try:
reload(sys)
sys.setdefaultencoding('utf8')
self.cursor.execute(
"""INSERT INTO tbl_allabola(Registration_no,First_name,Middle_name,Famaily_name,Gender,Year,Board_member)VALUES (%s,%s,%s,%s,%s,%s,%s)""",
(Registration_no, firstname, middlename,familyname,gender,year,Board_member))
self.connection.commit()
except Exception as e:
print(e)
process = CrawlerProcess({'LOG_ENABLED': False})
process.crawl(AllabolaSpider)
try:
process.start()
except:
pass
|
4,615 | 66ae7f4ee01ca5516d8e3dc447eeb4709e2b6aec | import datetime
import time
def calculate(a):
return a
data = set()
class Bank:
amount = 0
def __init__(self):
self.Bank_name = "State Bank of India"
self.ifsc = 'SBI0N00012'
def __repr__(self):
return f'Bank Name: {self.Bank_name}, IFSC_Code : {self.ifsc} '
# self.stored = datetime.date.today()
class CustomerDetails(Bank):
check_amt = 18
def __init__(self,name,identity,acc,op_amount):
Bank.__init__(self)
self.name = name
self.identity = identity
self.acc = acc
self.op_amount = op_amount
Bank.amount += self.op_amount
self.count = 0
def __repr__(self):
return f'Name : {self.name}, Aaddhar_card : {self.identity}, Account No : {self.acc}, Amount : {self.op_amount}, Bank_Amount : {Bank.amount} '
# stored = datetime.datetime.today()
# def __repr__(self)
def deposite(self,credit):
self.credit = credit
self.op_amount += self.credit
Bank.amount += self.op_amount
print(f'You\'ve added {self.credit} : Total Amount = {self.op_amount}')
return (Bank.amount)
def check_balance(self):
self.count += 1
if self.count > 3:
self.op_amount -= CustomerDetails.check_amt
return f'{self.name} due to over checking {CustomerDetails.check_amt} Rs. has been deducted. your Balance : {self.op_amount} '
else:
return f'{self.name} your Balance : {self.op_amount}'
# cus1 = CustomerDetails('Lucky','755376288106','67001010115773',5000)
# print(cus1)
cus2 = CustomerDetails('Pawan','755376288078','37376989161',10000)
print(cus2)
cus2.deposite(20000)
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2.check_balance())
print(cus2)
# print(cus2.check_balance())
|
4,616 | d30129248f5245560ee0d3ee786e118427e169d7 | import numpy as np
er = ['why','who','how','where','which','what','when','was','were','did','do','does','is','are','many','much']
qst = []
txt = None
ans = None
fnd = []
def chek_qst(qst):
global er
for h in er:
for i in qst:
if i == h:
qst.remove(i)
# qst = np.delete(qst, ([i for i, j in enumerate(qst) if h in j]))
return qst
def search_word(qst):
global txt
for h in qst:
temp = []
for n,l in enumerate(txt):
if [n for i,j in enumerate(l) if h in j] != []:
temp.append(n)
# temp = np.array(temp)
if temp != []:
fnd.append(temp)
def read():
global txt
global qst
global ans
txt = np.array((input().lower()).split('.'))
txt = txt.reshape(len(txt), 1)
for i in range(5):
qst.append((input().lower()).replace('?','').split())
split_quest()
qst = np.array(qst)
ans = np.array((input().lower()).split(';'))
ans = ans.reshape(len(ans), 1)
def split_quest():
for i in range(len(qst)):
qst[i] = chek_qst(qst[i])
def find_answer(fnd):
flag = False
answer = None
global ans
temp_min = []
for i in fnd:
if len(i) == 1:
answer = i[0]
# print(str(txt[answer][0]))
for i in ans:
for j in i:
if j in txt[answer][0]:
# print('from first :: ',j,'\n',answer)
print(j)
flag = True
if flag:
break
if flag:
break
if not flag:
for i in fnd:
temp_min.append(len(i))
temp_min = np.array(temp_min)
temp_min = temp_min.argmin()
# print(temp)
p = []
for i in fnd[temp_min]:
count = 0
for j,h in enumerate(fnd):
if fnd[temp_min] != h:
if i in h:
count +=1
p.append(count)
p = np.array(p)
# print('from second :: ',str(txt[fnd[temp_min][p.argmax()]][0]))
print(str(txt[fnd[temp_min][p.argmax()]][0]))
# for i in ans:
# for j in i:
# if j in txt[fnd[temp_min][p.argmax()]][0]:
# print(j)
# # break
# break
read()
for i,qst_num in enumerate(qst):
fnd = []
search_word(qst_num)
# print('\n',fnd)
find_answer(fnd)
# fnd = np.array(fnd).reshape(len(fnd))
# print('questin #{}'.format(i+1),fnd,'\n')
# print(str(txt[find_answer(fnd)][0]))
# print(ans)
# print('\n',qst)
# print('\n\n',[(i,j[0]) for i,j in enumerate(txt)])
# print('\n\n',[(i,j[0]) for i,j in enumerate(ans)])
'''Zebras are several species of African equids (horse family) united by their distinctive black and white stripes. Their stripes come in different patterns, unique to each individual. They are generally social animals that live in small harems to large herds. Unlike their closest relatives, horses and donkeys, zebras have never been truly domesticated. There are three species of zebras: the plains zebra, the Grévy's zebra and the mountain zebra. The plains zebra and the mountain zebra belong to the subgenus Hippotigris, but Grévy's zebra is the sole species of subgenus Dolichohippus. The latter resembles an ass, to which it is closely related, while the former two are more horse-like. All three belong to the genus Equus, along with other living equids. The unique stripes of zebras make them one of the animals most familiar to people. They occur in a variety of habitats, such as grasslands, savannas, woodlands, thorny scrublands, mountains, and coastal hills. However, various anthropogenic factors have had a severe impact on zebra populations, in particular hunting for skins and habitat destruction. Grévy's zebra and the mountain zebra are endangered. While plains zebras are much more plentiful, one subspecies, the quagga, became extinct in the late 19th century – though there is currently a plan, called the Quagga Project, that aims to breed zebras that are phenotypically similar to the quagga in a process called breeding back.
Which Zebras are endangered?
What is the aim of the Quagga Project?
Which animals are some of their closest relatives?
Which are the three species of zebras?
Which subgenus do the plains zebra and the mountain zebra belong to?
subgenus Hippotigris;the plains zebra, the Grévy's zebra and the mountain zebra;horses and donkeys;aims to breed zebras that are phenotypically similar to the quagga;Grévy's zebra and the mountain zebra
''' |
4,617 | 9cc672702d960088f0230cbd1694b295216d8b5a | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Especialidade(models.Model):
def __str__(self):
return self.nome
# add unique=True?
nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)
|
4,618 | 42743ee2a812d8fe6fc036ba97daaff5be35564d | from pytorch_lightning.callbacks import Callback
from evaluation.validator import Validator
class LSTMCallback(Callback):
def on_test_end(self, trainer, pl_module):
f = open('/evaluation.log', 'w')
for ev in pl_module.evaluation_data:
f.write(ev + '\n')
Validator(pl_module.evaluation_data, ['speed'])
|
4,619 | dcef5f34a62939d992a109e991552e612bf5bad5 | import pandas as pd
import numpy as np
from datetime import timedelta
import scipy.optimize as optim
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from gen_utils.gen_io import read_run_params,log_msg
#############################################
params = read_run_params()
run = params["current_run"]
out_home = params["container"]+"output/"
out_dir = out_home+run+"/"
df = pd.read_csv(out_dir+"4_mcov_strain_variant_map_covid_pangolin_db_input_"+run+".csv")
df = df[df.quality=="HQ"]
#########################
tag="B.1.617.Family"
voi=["B.1.617.2","AY.2","AY.3"]
start_date = "4-15-2021"
end_date = "7-20-2021"
days_since="4/15/2021"
days= 180
# voi="P.1"
# start_date = "1-1-2021"
# end_date = "6-20-2021"
# days_since="1/1/2021"
# days= 360
#################################
###take unique patients with variant
keep_mrns_variant = np.unique(df[df.variant.isin(voi)]["MRN"])
df_mrns = df[df.MRN.isin(keep_mrns_variant)]
df_mrns = df_mrns[df_mrns.variant.isin(voi)] ###important step--remove non b117 variant
df_mrns.sort_values("COLLECTION_DT",inplace=True)
df_mrns.drop_duplicates("MRN",keep="first",inplace=True)
keep_mrns_not_variant = np.unique(df[~df.variant.isin(voi)]["MRN"])
df_mrns_not_variant = df[df.MRN.isin(keep_mrns_not_variant)]
df_mrns_not_variant = df_mrns_not_variant[~df_mrns_not_variant.variant.isin(voi)]
df_mrns_not_variant.sort_values("COLLECTION_DT",inplace=True)
df_mrns_not_variant.drop_duplicates("MRN",keep="first",inplace=True)
df_2 = df_mrns.append(df_mrns_not_variant)
df_2.drop_duplicates("MRN",keep="first",inplace=True)
df = df_2
df=df[['MCoVNumber','COLLECTION_DT','variant']]
#####################################
df.COLLECTION_DT = pd.to_datetime(df.COLLECTION_DT)
df.COLLECTION_DT = df.COLLECTION_DT.dt.date
df = df[ ( (df.COLLECTION_DT>=pd.to_datetime(start_date)) &
(df.COLLECTION_DT<pd.to_datetime(end_date))
)
]
df.sort_values("COLLECTION_DT",inplace=True)
df.variant.fillna(0,inplace=True)
#########################
df.variant = [1 if x in voi else 0 for x in df.variant]
df_variant = df.groupby("COLLECTION_DT")["variant"].agg("sum").reset_index()
df_count = df.groupby("COLLECTION_DT")["variant"].agg("count").reset_index()
dates = pd.date_range(df.COLLECTION_DT.min(), (df.COLLECTION_DT.max() + timedelta(days=1) )-timedelta(days=1),freq='d')
df_data = pd.DataFrame(dates)
df_data.columns=["dates"]
df_data["date_step"]= [x for x in range(1,df_data.shape[0]+1,1)]
df_data["total"] = df_count.variant
df_data["variant"] = df_variant.variant
df_data["variant_csum"] = np.cumsum(df_variant.variant.values)
df_data["variant_percent"]=[ (x/y)*100 for x,y in zip(df_data.variant,df_data.total)]
df_data.to_excel("final_Data_"+tag+"_log_growth_6_28_2021.xlsx",index=False)
def my_logistic(x,a,b,c):
return c/(1 + a * np.exp(-b*x))
x = np.array(df_data.date_step)
# y = np.array(df_data.variant_csum)
y = np.array(df_data.variant_percent)
##########optimize
po = np.random.exponential(size=3)
bounds = (0,[1000.,2.0,100.])
(a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# for i in range(1,20,1):
# try:
# # po = np.array([250.,0.10,99.])
# po= np.random.exponential(size=3)
# bounds = ([0.,0.1,0.],[1000.,float(i),100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
# print(c)
# except:
# print("error for " + str(i))
# po = np.array([250.,0.10,99.])
# bounds = ([0.,0.1,99.],[1000.,1.0,100.])
# (a,b,c),cov = optim.curve_fit(my_logistic,x,y,bounds=bounds,p0=po)
plt.scatter(x,y)
plt.plot(x,my_logistic(x,a,b,c))
xprime = np.array([x for x in range(1,170,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime)
plt.savefig("log_fit_best_fit"+tag+".png")
plt.close()
############################## method 2 using t distribution on error --> perfer this one
from scipy.stats.distributions import t
pars, pcov = (a,b,c),cov
alpha = 0.05 # 95% confidence interval = 100*(1-alpha)
n = len(y) # number of data points
p = len(pars) # number of parameters
dof = max(0, n - p) # number of degrees of freedom
# student-t value for the dof and confidence level
tval = t.ppf(1.0-alpha/2., dof)
val_dw = 0
val_up = 0
for i, p,var in zip(range(n), pars, np.diag(pcov)):
sigma = var**0.5
if i==1:
val_dw = p - sigma*tval
val_up = p + sigma*tval
print ('p{0}: {1} [{2} {3}]'.format(i, p,
p - sigma*tval,
p + sigma*tval))
plt.plot(x,y,'bo',markersize=5,label='Observed')
xprime = np.array([x for x in range(1,days,1)])
yprime = my_logistic(xprime,a,b,c)
plt.plot(xprime,yprime,label='Predicted')
xpred = np.array([x for x in range(1,days,1)])
ypred_dw = my_logistic(xpred,pars[0],val_dw,pars[2])
ypred_up = my_logistic(xpred,pars[0],val_up,pars[2])
plt.fill_between(xpred, ypred_up,ypred_dw,color = 'k', alpha = 0.1,label='95% CI')
plt.title("Logistic growth model ["+tag+"]",fontsize=18)
plt.xlabel("Days since "+days_since,fontsize=15)
plt.ylabel("Percent of patients ",fontsize=15)
plt.legend()
plt.savefig("log_pred_best_fit"+tag+".png")
plt.close()
gr=b;dt = 70/(gr*100);print(dt)
gr=val_up;dt = 70/(gr*100);print(dt)
gr=val_dw;dt = 70/(gr*100);print(dt)
|
4,620 | 8bce394c651931304f59bbca3e2f019212be9fc1 | import sys
sys.stdin = open("1868_input.txt")
dr = [-1, -1, -1, 0, 0, 1, 1, 1]
dc = [-1, 0, 1, -1, 1, -1, 0, 1]
def is_wall(r, c):
if r < 0 or r >= n or c < 0 or c >= n:
return True
return False
def find(r, c, cnt):
Q = []
Q.append((r, c))
visited[r][c] = 1
while Q:
tr, tc = Q.pop(0)
mine_cnt = 0
for i in range(8):
nr = tr + dr[i]
nc = tc + dc[i]
if not is_wall(nr, nc):
if Map[nr][nc] == '*':
mine_cnt += 1
if not mine_cnt:
for i in range(8):
nr = tr + dr[i]
nc = tc + dc[i]
if not is_wall(nr, nc) and not visited[nr][nc] and Map[nr][nc] == '.':
Q.append((nr, nc))
visited[nr][nc] = 1
Map[tr][tc] = mine_cnt
return cnt + 1
for tc in range(int(input())):
n = int(input())
Map = []
visited = [[0 for _ in range(n)] for _ in range(n)]
for _ in range(n):
tmp = []
for i in input():
tmp.append(i)
Map.append(tmp)
cnt = 0
for i in range(n):
for j in range(n):
if Map[i][j] != '.':
continue
mine = 0
for k in range(8):
nr = i + dr[k]
nc = j + dc[k]
if not is_wall(nr, nc) and Map[nr][nc] == '*':
mine += 1
if not mine:
cnt = find(i, j, cnt)
for i in range(n):
for j in range(n):
if Map[i][j] == '.':
cnt += 1
# for i in range(n//2 + 1):
# for j in range(n//2 + 1):
# # print(i, j, n - i - 1, n - j - 1)
# if Map[i][j] == '.':
# cnt = find(i, j, cnt)
# if Map[n - i - 1][j] == '.':
# cnt = find(n - i - 1, j, cnt)
# if Map[i][n - j - 1] == '.':
# cnt = find(i, n - j - 1, cnt)
# if Map[n - i - 1][n - j - 1] == '.':
# cnt = find(n - i - 1, n - j - 1, cnt)
print(f"#{tc + 1} {cnt}") |
4,621 | d9f055301f050eea4281ce418974546c1245ac7e | strings = ['(())())', '(((()())()', '(()())((()))', '((()()(()))(((())))()', '()()()()(()()())()', '(()((())()(']
#print(string[0])
'''
for i in string:
testlist = []
for j in string[i]:
if j == ')':
if
'''
def isVPS(phrase):
testlist = []
for char in phrase:
if char == '(':
testlist.append(char)
else:
if len(testlist) == 0:
#return False
return 'NO'
else:
testlist.pop()
if len(testlist) == 0:
#return True
return 'YES'
else:
#return False
return 'NO'
for string in strings:
print(isVPS(string))
#print(isVPS(string[0])) |
4,622 | 420beba5b6fd575ab9be0c907ae0698ba7be5220 |
from xai.brain.wordbase.verbs._hinder import _HINDER
#calss header
class _HINDERED(_HINDER, ):
def __init__(self,):
_HINDER.__init__(self)
self.name = "HINDERED"
self.specie = 'verbs'
self.basic = "hinder"
self.jsondata = {}
|
4,623 | 0c37806f0a7c0976711edd685fd64d2616147cb6 | """
Data pre-processing
"""
import os
import corenlp
import numpy as np
import ujson as json
from tqdm import tqdm
from collections import Counter
from bilm import dump_token_embeddings
import sys
sys.path.append('../..')
from LIB.utils import save
def process(json_file, outpur_dir, exclude_titles=None, include_titles=None):
"""
:param json_file: original data in json format
:param outpur_dir: the output directory of pre-processed data
:param exclude_titles: article titles to exclude
:param include_titles: article titles to include
"""
para_file = "{}/paras".format(outpur_dir)
question_file = "{}/questions".format(outpur_dir)
sent_file = "{}/sents".format(outpur_dir)
answer_file = "{}/answers".format(outpur_dir)
print("Generating {} raw data...".format(json_file))
max_sent, max_sent_len, max_que_len, max_ans_len = 0, 0, 0, 0
with open(json_file, "r") as fh, corenlp.CoreNLPClient(annotators="tokenize ssplit pos ner".split(),
endpoint="http://localhost:9099", timeout=50000) as client:
source = json.load(fh)
for article in tqdm(source["data"]):
title = article["title"]
if include_titles and title not in include_titles:
continue
if exclude_titles and title in exclude_titles:
continue
for para in article["paragraphs"]:
paragraphs, questions, answers, sents, ids = [], [], [], [], []
paragraphs_pos, questions_pos, answers_pos, sents_pos = [], [], [], []
paragraphs_ner, questions_ner, answers_ner, sents_ner = [], [], [], []
answers_index, sents_index = [], []
# paragraph
context = para["context"]
if not context.strip():
continue
ann_para = client.annotate(context)
max_sent = max(max_sent, len(ann_para.sentence))
max_sent_len = max(max_sent_len, max(map(lambda x: len(x.token), ann_para.sentence)))
ann_para_tokens, paragraph_tokens, paragraph_pos, paragraph_ner = [], [], [], []
for sent in ann_para.sentence:
for token in sent.token:
ann_para_tokens.append(token)
paragraph_tokens.append(token.word)
paragraph_pos.append(token.pos)
paragraph_ner.append(token.ner)
# questions
for qa in para["qas"]:
# question
ques = qa["question"]
id = qa["id"]
if not ques.strip():
continue
ann_que = client.annotate(ques)
max_que_len = max(max_que_len, len(ann_que.sentence[0].token))
question_tokens, question_pos, question_ner = [], [], []
for sent in ann_que.sentence:
for token in sent.token:
question_tokens.append(token.word)
question_pos.append(token.pos)
question_ner.append(token.ner)
# answer
all_answer_tokens, all_answer_pos, all_answer_ner, all_answer_index = [], [], [], []
all_sent_tokens, all_sent_pos, all_sent_ner, all_sent_index = [], [], [], []
for answer in qa["answers"]:
answer_text = answer["text"]
if not answer_text.strip():
continue
ann_ans = client.annotate(answer_text)
answer_tokens, answer_pos, answer_ner = [], [], []
for sent in ann_ans.sentence:
for token in sent.token:
answer_tokens.append(token.word)
answer_pos.append(token.pos)
answer_ner.append(token.ner)
all_answer_tokens.append(' '.join(answer_tokens))
all_answer_pos.append(' '.join(answer_pos))
all_answer_ner.append(' '.join(answer_ner))
answer_start = answer['answer_start']
answer_end = answer_start + len(answer_text)
# sentence
sentence = []
for sent in ann_para.sentence:
if sent.characterOffsetBegin <= answer_start <= sent.characterOffsetEnd or \
sent.characterOffsetBegin <= answer_end <= sent.characterOffsetEnd:
sentence.append(sent)
sentence = [token for sent in sentence for token in sent.token]
sentence_tokens = [token.word for token in sentence]
sentence_pos = [token.pos for token in sentence]
sentence_ner = [token.ner for token in sentence]
all_sent_tokens.append(' '.join(sentence_tokens))
all_sent_pos.append(' '.join(sentence_pos))
all_sent_ner.append(' '.join(sentence_ner))
# sentence index
y1_sent = sentence[0].tokenBeginIndex
y2_sent = sentence[-1].tokenBeginIndex
# answer index
y1_ans = None
for i, token in enumerate(sentence):
if token.beginChar - 1 <= answer_start <= token.endChar:
y1_ans = sentence[0].tokenBeginIndex + i
try:
assert y1_ans != None
except:
continue
y2_ans = y1_ans + len(answer_tokens) - 1
all_answer_index.append("{},{}".format(y1_ans, y2_ans))
all_sent_index.append("{},{}".format(y1_sent, y2_sent))
paragraphs.append(' '.join(paragraph_tokens))
paragraphs_pos.append(' '.join(paragraph_pos))
paragraphs_ner.append(' '.join(paragraph_ner))
questions.append(' '.join(question_tokens))
questions_pos.append(' '.join(question_pos))
questions_ner.append(' '.join(question_ner))
answers.append('\t'.join(all_answer_tokens))
answers_pos.append('\t'.join(all_answer_pos))
answers_ner.append('\t'.join(all_answer_ner))
answers_index.append('\t'.join(all_answer_index))
sents.append('\t'.join(all_sent_tokens))
sents_pos.append('\t'.join(all_sent_pos))
sents_ner.append('\t'.join(all_sent_ner))
sents_index.append('\t'.join(all_sent_index))
ids.append(id)
# save para
with open("{}.tok".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs) + '\n')
with open("{}.pos".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_pos) + '\n')
with open("{}.ner".format(para_file), 'a') as f:
f.write('\n'.join(paragraphs_ner) + '\n')
with open("{}.id".format(para_file), 'a') as f:
f.write('\n'.join(ids) + '\n')
# save question
with open("{}.tok".format(question_file), 'a') as f:
f.write('\n'.join(questions) + '\n')
with open("{}.pos".format(question_file), 'a') as f:
f.write('\n'.join(questions_pos) + '\n')
with open("{}.ner".format(question_file), 'a') as f:
f.write('\n'.join(questions_ner) + '\n')
# save answer
with open("{}.tok".format(answer_file), 'a') as f:
f.write('\n'.join(answers) + '\n')
with open("{}.pos".format(answer_file), 'a') as f:
f.write('\n'.join(answers_pos) + '\n')
with open("{}.ner".format(answer_file), 'a') as f:
f.write('\n'.join(answers_ner) + '\n')
with open("{}.index".format(answer_file), 'a') as f:
f.write("\n".join(answers_index) + '\n')
# save sent
with open("{}.tok".format(sent_file), 'a') as f:
f.write('\n'.join(sents) + '\n')
with open("{}.pos".format(sent_file), 'a') as f:
f.write('\n'.join(sents_pos) + '\n')
with open("{}.ner".format(sent_file), 'a') as f:
f.write('\n'.join(sents_ner) + '\n')
with open("{}.index".format(sent_file), 'a') as f:
f.write("\n".join(sents_index) + '\n')
# get BIO labels
label(para_file, answer_file)
def label(para_file, answer_file):
# get the answer BIO label for paragraph
max_node = 0
with open("{}.tok".format(para_file), 'r') as fp, open("{}.label".format(para_file), 'a') as fl, \
open("{}.index".format(answer_file), 'r') as fa:
while True:
para = fp.readline()
if not para:
break
words = [p for p in para.strip().split(' ')]
max_node = max(len(words), max_node)
answer = fa.readline()
labels = []
try:
start, end = map(int, answer.split('\t')[0].split(','))
for i in range(len(words)):
if start <= i <= end:
# answer words
if i == start:
labels.append('B')
else:
labels.append('I')
else:
# non answer words
labels.append('O')
except:
pass
fl.write(' '.join(labels) + '\n')
return max_node
def get_data(train_json, dev_json, test_title_file, output_dir):
test_titles = open(test_title_file, 'r').readlines()
test_titles = set([line.strip() for line in test_titles])
process(train_json, "{}/train/".format(output_dir), exclude_titles=test_titles)
process(dev_json, "{}/dev/".format(output_dir))
process(train_json, "{}/test/".format(output_dir), include_titles=test_titles)
def get_word_embedding(counter, emb_file, emb_size, vocab_size, vec_size, vocab_file):
"""
get word embedding matrix from glove
"""
print("Generating word embedding...")
# load word embeddings
embedding_dict = {}
with open(emb_file, "r", encoding="utf-8") as fh:
for line in tqdm(fh, total=emb_size):
array = line.split()
word = "".join(array[0:-vec_size])
vector = list(map(float, array[-vec_size:]))
embedding_dict[word] = vector
TRANSLATE = {
"-lsb-": "[", "-rsb-": "]", "-lrb-": "(", "-rrb-": ")", "-lcb-": "{",
"-rcb-": "}", "-LSB-": "[", "-RSB-": "]", "-LRB-": "(", "-RRB-": ")",
"-LCB-": "{", "-RCB-": "}"
}
SPECIAL_TOKENS = ["<NULL>", "<UNK>", "<S>", "</S>"]
words = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
words = SPECIAL_TOKENS + words
if vocab_size > 0:
words = words[:vocab_size]
with open(vocab_file, 'w') as f:
f.write('\n'.join(words[1:]))
embedding = np.random.normal(scale=0.1, size=(len(words), vec_size))
word2idx_dict = {}
unknown_count = 0
for i, word in enumerate(words):
word2idx_dict[word] = i
if word in TRANSLATE:
word = TRANSLATE[word]
done = False
for w in (word, word.lower(), word.upper(), word.capitalize()):
if w in embedding_dict:
embedding[i] = embedding_dict[w]
done = True
break
if not done:
unknown_count += 1
return embedding, word2idx_dict, unknown_count
def get_tag_embedding(counter, data_type, vec_size):
"""
get pos/ner/label tags' embedding matrix
"""
print("Generating {} tag embedding...".format(data_type))
SPECIAL_TOKENS = ["<NULL>", "<UNK>"]
tags = list(map(lambda x: x[0], sorted(counter.items(), key=lambda x: x[1], reverse=True)))
tags = SPECIAL_TOKENS + tags
embedding = np.random.normal(scale=0.1, size=(len(tags), vec_size))
word2idx_dict = {w: i for i, w in enumerate(tags)}
return embedding, word2idx_dict
def get_vocab(config):
print("Get the vocabulary...")
word_counter, char_counter = Counter(), Counter()
pos_counter, ner_counter, label_counter = Counter(), Counter(), Counter()
files = [(config.train_para_file, config.train_question_file), (config.dev_para_file, config.dev_question_file)]
for para_file, que_file in files:
with open("{}.tok".format(para_file), 'r') as fp, open("{}.tok".format(que_file), 'r') as fq, \
open("{}.pos".format(para_file), 'r') as fpp, open("{}.pos".format(que_file), 'r') as fqp, \
open("{}.ner".format(para_file), 'r') as fpn, open("{}.ner".format(que_file), 'r') as fqn, \
open("{}.label".format(para_file), 'r') as fpl:
while True:
para, question = fp.readline(), fq.readline()
pos, que_pos = fpp.readline(), fqp.readline()
ner, que_ner = fpn.readline(), fqn.readline()
label = fpl.readline()
if not question or not para:
break
if config.lower_word:
para = para.lower()
question = question.lower()
para_tokens = para.strip().split(' ')
que_tokens = question.strip().split(' ')
pos_tags = pos.strip().split(' ')
ner_tags = ner.strip().split(' ')
que_pos_tags = que_pos.strip().split(' ')
que_ner_tags = que_ner.strip().split(' ')
labels = label.strip().split(' ')
for token in para_tokens + que_tokens:
word_counter[token] += 1
for char in list(token):
char_counter[char] += 1
for pos_tag in pos_tags + que_pos_tags:
pos_counter[pos_tag] += 1
for ner_tag in ner_tags + que_ner_tags:
ner_counter[ner_tag] += 1
for label in labels:
label_counter[label] += 1
word_emb_mat, word2idx_dict, unk_num = get_word_embedding(word_counter, emb_file=config.glove_word_file,
emb_size=config.glove_word_size,
vocab_size=config.vocab_size_limit,
vec_size=config.glove_dim, vocab_file=config.vocab_file)
char_emb_mat, char2idx_dict = get_tag_embedding(char_counter, "char", vec_size=config.char_dim)
pos_emb_mat, pos2idx_dict = get_tag_embedding(pos_counter, "pos", vec_size=config.pos_dim)
ner_emb_mat, ner2idx_dict = get_tag_embedding(ner_counter, "ner", vec_size=config.ner_dim)
label_emb_mat, label2idx_dict = get_tag_embedding(label_counter, "label", vec_size=config.label_dim)
print("{} out of {} are not in glove".format(unk_num, len(word2idx_dict)))
print("{} chars".format(char_emb_mat.shape[0]))
print("{} pos tags, {} ner tags, {} answer labels, {} chars".format(
pos_emb_mat.shape[0], ner_emb_mat.shape[0], label_emb_mat.shape[0], char_emb_mat.shape[0]))
save(config.word_emb_file, word_emb_mat, message="word embedding")
save(config.char_emb_file, char_emb_mat, message="char embedding")
save(config.pos_emb_file, pos_emb_mat, message="pos embedding")
save(config.ner_emb_file, ner_emb_mat, message="ner embedding")
save(config.label_emb_file, label_emb_mat, message="label embedding")
save(config.word_dictionary, word2idx_dict, message="word dictionary")
save(config.char_dictionary, char2idx_dict, message="char dictionary")
save(config.pos_dictionary, pos2idx_dict, message="pos dictionary")
save(config.ner_dictionary, ner2idx_dict, message="ner dictionary")
save(config.label_dictionary, label2idx_dict, message="label dictionary")
print("Dump elmo word embedding...")
token_embedding_file = config.embedding_file
dump_token_embeddings(
config.vocab_file, config.elmo_options_file, config.elmo_weight_file, token_embedding_file
)
if __name__ == '__main__':
# process data
os.system("mkdir data; mkdir data/processed; mkdir data/processed/train; "
"mkdir data/processed/dev; mkdir data/processed/test")
get_data("../../LIB/squad/train-v1.1.json", "../../LIB/squad/dev-v1.1.json",
"../../LIB/squad/doclist-test.txt", "data/processed") |
4,624 | 1cc14836808d70c1e53a9ca948a52776ebc89f4a | import dlib
import cv2
import imageio
import torch
from PIL import Image
from model import AgeGenderModel
from mix_model import MixModel
from torchvision.transforms import transforms
from tqdm import tqdm
from retinaface.pre_trained_models import get_model
transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010)),
])
# Load model age gender
model = MixModel()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
ckpt = torch.load("outputs_w_free/model_epoch_50.pth")
model.load_state_dict(ckpt['model_state_dict'])
model.eval()
model.to(device)
model_face = get_model("resnet50_2020-07-20", max_size=512, device='cuda:1')
model_face.eval()
# load the detector
detector = dlib.get_frontal_face_detector()
FPS = 30
# read the video
out_video = imageio.get_writer("/home/cybercore/haimd/w_freeze_osaka.mp4", format='mp4', mode='I', fps=FPS)
video = imageio.get_reader("/home/cybercore/haimd/osaka.mp4")
for img in tqdm(video):
if img is not None:
# gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY)
# faces = detector(gray)
annotation = model_face.predict_jsons(img)
max_thresh = annotation[0]['score']
bbox = annotation[0]['bbox']
if max_thresh > 0.3:
max_head_bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
# for face in faces:
# print(face)
x1 = bbox[0]
y1 = bbox[1]
x2 = bbox[2]
y2 = bbox[3]
x1_face = bbox[0]-20
y1_face = bbox[1]-20
x2_face = bbox[2]+20
y2_face = bbox[3]+20
if x1_face > 0 and y1_face > 0:
img_face = img[y1_face:y2_face, x1_face:x2_face]
imageio.imwrite('face.jpg', img_face)
img_face = Image.fromarray(img_face)
img_face = transform(img_face)
img_face = torch.unsqueeze(img_face, 0)
img_face = img_face.to(device)
gen_pred, age_cls_pred, age_reg_pred = model(img_face)
_, gen_preds = torch.max(gen_pred, 1)
_, age_cls_pred = torch.max(age_cls_pred, 1)
if gen_preds.item() == 1:
text = f'M:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(255,0,0), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(255, 0, 0), thickness=2, lineType=cv2.LINE_AA)
elif gen_preds.item() == 0:
text = f'F:{int(age_reg_pred.item()*100)}'
cv2.rectangle(img=img, pt1=(x1, y1), pt2=(x2, y2), color=(0,0,255), thickness=4)
cv2.putText(img, text, org=(x1, y1), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1, color=(0, 0, 255), thickness=2, lineType=cv2.LINE_AA)
out_video.append_data(img)
out_video.close()
print('Done')
|
4,625 | 11101273a02abec17fc884d5c1d5d182eb82ee0c | # -*- coding: utf-8 -*-
"""The main application module for duffy."""
from flask import Flask
from duffy import api_v1
from duffy.types import seamicro
from duffy.extensions import db, migrate, marshmallow
from duffy.config import ProdConfig,DevConfig
def create_app(config_object=DevConfig):
app = Flask(__name__.split('.')[0])
app.config.from_object(config_object)
app.config.from_envvar('DUFFY_SETTINGS',silent=True)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
db.init_app(app)
migrate.init_app(app, db, render_as_batch=True)
marshmallow.init_app(app)
return None
def register_blueprints(app):
app.register_blueprint(api_v1.views.blueprint)
app.register_blueprint(seamicro.views.blueprint)
return None
def register_errorhandlers(app):
return None
|
4,626 | 81fce5314a7611de11648e412151112e29271871 | import random
from z3 import *
def combine(iter):
tmp_list = [i for i in iter]
res = tmp_list[0]
for i in tmp_list[1:]:
res += i
return res
def co_prime(num1, num2):
for num in range(2, min(num1, num2) + 1):
if num1 % num == 0 and num2 % num == 0:
return False
return True
def gcd(*nums):
min_num = 1 << 32
for num in nums:
if num != 0:
min_num = min(min_num, abs(num))
for i in range(min_num, 1, -1):
flag = True
for num in nums:
if num % i != 0:
flag = False
break
if flag:
return i
return 1
class FormulaTemplate:
def __init__(self, vi ,w ,k, h, m ,timeout=3000000): ####加了w
self.k = k # amount of clause 多少个子句
self.h = h # number of inequality 第一类不等式数量上限
self.m = m # number of mode number 第二类不等式数量上限
self.w = w
self.vi = vi
n = len(vi)
self.n = n
self.aeij = [[Int('ae' + str(i) + str(j)) for j in range(n)] for i in range(h)]
self.bi = [Int('b' + str(i)) for i in range(h)]
self.amij = [[Int('am' + str(i) + str(j)) for j in range(n)] for i in range(m)]
self.ei = [Int('e' + str(i)) for i in range(m)] ##改成定值 , 写一个函数,从2开始一个个试????(还没实现)
self.ci = [Int('c' + str(i)) for i in range(m)]
self.heij = [[Bool('h_e' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hgeij = [[Bool('h_ge' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.hleij = [[Bool('h_le' + str(j) + str(i)) for i in range(h)] for j in range(k)]
self.tij = [[Bool('t' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.ntij = [[Bool('nt' + str(j) + str(i)) for i in range(m)] for j in range(k)]
self.s = Solver()
for i in range(h):
# 不等式系数ae_ij不能全部为0
self.s.add(Or(*[a > 0 for a in self.aeij[i]]))
for j in range(i + 1, h):
self.s.add(Or(*[self.aeij[i][w] != self.aeij[j][w] for w in range(n)]))
for i in range(m):
# 模等式的系数am_ij不能全部小于等于0
self.s.add(Or(*[am > 0 for am in self.amij[i]]))
# 模等式的系数am_ij不能大于模e
self.s.add(*[And(0 <= am, am < self.ei[i]) for am in self.amij[i]])
# for j in range(i + 1, m):
# self.s.add(Or(self.ei[i] != self.ei[j],
# *[self.amij[i][w] != self.amij[j][w] for w in range(n)]))
# 余数c_i必须小于模e
self.s.add(*[And(self.ei[i] > self.ci[i], self.ci[i] >= 0) for i in range(m)])
# 模必须大于等于2,并且小于一定范围
self.s.add(*[And(e <= 10 * m, e >= 2) for e in self.ei])
for i in range(k):
# 判断条件一定有一个是False,避免逻辑出现False
for j in range(i + 1, k):
all_true = [And(self.heij[i][w], self.hgeij[i][w], self.hleij[i][w]) for w in range(h)]
all_true.extend([And(self.tij[i][w], self.ntij[i][w]) for w in range(m)])
struct_const = [Or(self.heij[i][w] != self.heij[j][w],
self.hgeij[i][w] != self.hgeij[j][w],
self.hleij[i][w] != self.hleij[j][w]) for w in range(h)]
struct_const.extend([Or(self.tij[i][w] != self.tij[j][w],
self.ntij[i][w] != self.ntij[j][w]) for w in range(m)])
self.s.add(Or(*struct_const, *all_true))
self.s.set("timeout", timeout)
def add(self, example, label):
self.s.add(self.encoding(example, label))
def check(self):
check = self.s.check()
if check == sat:
self.solve_model()
return check
def W_size(m):
return m+2
def encoding(self, example, label):
Equ = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) != self.bi[i] for i in range(self.h)]
Ge = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) >= self.bi[i] for i in range(self.h)]
Le = [combine(example[j] * self.aeij[i][j] for j in range(self.n)) <= self.bi[i] for i in range(self.h)]
Me = [combine(example[j] * self.amij[i][j] for j in range(self.n)) % self.ei[i] == self.ci[i] for i in
range(self.m)]
Tk = []
for k in range(self.k):
clause = []
clause.extend([Implies(self.heij[k][h], Equ[h]) for h in range(self.h)])
clause.extend([Implies(self.hgeij[k][h], Ge[h]) for h in range(self.h)])
clause.extend([Implies(self.hleij[k][h], Le[h]) for h in range(self.h)])
clause.extend([Implies(self.tij[k][m], Me[m]) for m in range(self.m)])
clause.extend([Implies(self.ntij[k][m], Not(Me[m])) for m in range(self.m)])
Tk.append(And(*clause))
# print("Or(*Tk) , label=\n",Or(*Tk),label)
return Or(*Tk) == label
def solve_model(self): #求出取值 ####加了w
print("w", self.w)
#W_size = [2,3,4,5,6,7,8,9]
model = self.s.model()
self.M = [[model[self.amij[i][j]].as_long() if model[self.amij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.m)]
##用z3求解e(此处要改)
# self.E = [model[self.ei[i]].as_long() if model[self.ei[i]] is not None else 1 for i in range(self.m)]
# print("E= \n",self.E)
####改动
for i in range(self.m):
self.ei[i] = FormulaTemplate.W_size(self.w)
self.E = [self.ei[i] for i in range(self.m)]
print("E = \n",self.E)
####
self.C = [model[self.ci[i]].as_long() if model[self.ci[i]] is not None else 0 for i in range(self.m)]
self.A = [[model[self.aeij[i][j]].as_long() if model[self.aeij[i][j]] is not None else 0
for j in range(self.n)]
for i in range(self.h)]
self.B = [model[self.bi[i]].as_long() if model[self.bi[i]] is not None else 0 for i in range(self.h)]
self.He = [
[bool(model[self.heij[i][j]]) if model[self.heij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hge = [
[bool(model[self.hgeij[i][j]]) if model[self.hgeij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.Hle = [
[bool(model[self.hleij[i][j]]) if model[self.hleij[i][j]] is not None else False
for j in range(self.h)]
for i in range(self.k)
]
self.T = [
[bool(model[self.tij[i][j]]) if model[self.tij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
self.Nt = [
[bool(model[self.ntij[i][j]]) if model[self.ntij[i][j]] is not None else False
for j in range(self.m)]
for i in range(self.k)
]
for i in range(self.m):
flag = True # 判断是否全部系数都相等
pix = -1
for am in self.M[i]:
if pix == -1:
if am != 0:
pix = am
elif am != 0 and am != pix:
flag = False
break
if flag: # 系数全部相同
if self.C[i] == 0:
# if co_prime(pix, self.E[i]):
# for j in range(self.n):
# if self.M[i][j] != 0:
# self.M[i][j] = 1
# else:
# div = gcd(pix, self.E[i])
# self.E[i] /= div
# for j in range(self.n):
# self.M[i][j] /= div
if not co_prime(pix, self.E[i]):
self.E[i] /= gcd(pix, self.E[i])
for j in range(self.n):
self.M[i][j] = 1
else:
div = gcd(pix, self.E[i], self.C[i])
self.E[i] /= div
self.C[i] /= div
pix /= div
for j in range(self.n):
self.M[i][j] /= div
div = gcd(int(pix), int(self.C[i]))
for j in range(self.n):
self.M[i][j] /= div
self.C[i] /= div
for i in range(self.h):
divisior = gcd(*self.A[i], self.B[i])
self.B[i] /= divisior
for j in range(self.n):
self.A[i][j] /= divisior
for i in range(len(self.E)):
self.E[i] = int(self.E[i])
def formula_model(self, *val): # 得到一个公式模型 kd:代入变量求得变量,代入数值就是求得一个值
if len(val) == 0:
val = self.vi
formu = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * val[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True): #选择大于小于等于
clause.append(Coe <= self.B[h])
elif status == (False, True, False):
clause.append(Coe >= self.B[h])
elif status == (True, False, False):
clause.append(Coe != self.B[h])
elif status == (False, True, True):
clause.append(Coe == self.B[h])
elif status == (True, False, True):
clause.append(Coe < self.B[h])
elif status == (True, True, False):
clause.append(Coe > self.B[h])
elif status == (True, True, True):
clause.append(False)
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
if status == (True, False): #选择取模
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] == self.C[m])
elif status == (False, True):
clause.append(combine(self.M[m][j] * val[j] for j in range(self.n)) % self.E[m] != self.C[m])
elif status == (True, True):
clause.append(False)
formu.append(And(*clause))
# print("simplify(Or(*formu))=\n",simplify(Or(*formu)))
return simplify(Or(*formu))
def refine_modu(self, coe, e, b, res, tmp, last=0):
if len(coe) == 1:
if coe[0] == 0:
if last % e == b:
tmp.append(0)
else:
return
for i in range(e):
if (i + last) % e == b:
tmp.append(i)
break
res.append(list(tmp))
tmp.pop()
elif coe[0] == 0:
tmp.append(0)
self.refine_modu(coe[1:], e, b, res, tmp, last)
tmp.pop()
else:
for i in range(e):
tmp.append(i)
self.refine_modu(coe[1:], e, b, res, tmp, last + i)
tmp.pop()
def build_formula(self, coe, V, e, C):
expr = And(*[(coe[i] * v) % e == C[i] for i, v in enumerate(V)])
return simplify(expr)
def refine_model(self):
formu_arr = []
for k in range(self.k):
clause = []
for h in range(self.h):
Coe = combine(self.A[h][j] * self.vi[j] for j in range(self.n))
status = (self.He[k][h], self.Hge[k][h], self.Hle[k][h])
if status == (False, False, True):
clause.append([Coe < self.B[h], Coe == self.B[h]])
elif status == (False, True, False):
clause.append([Coe > self.B[h], Coe == self.B[h]])
elif status == (True, False, False):
clause.append([Coe < self.B[h], Coe > self.B[h]])
elif status == (False, True, True):
clause.append([Coe == self.B[h]])
elif status == (True, False, True):
clause.append([Coe < self.B[h]])
elif status == (True, True, False):
clause.append([Coe > self.B[h]])
elif status == (True, True, True):
clause.append([False])
for m in range(self.m):
status = (self.T[k][m], self.Nt[k][m])
# Com = combine(self.M[m][j] * self.vi[j] for j in range(self.n))
if status == (True, False):
# clause.append([Com % self.E[m] == self.C[m]])
mod_res = []
self.refine_modu(self.M[m], self.E[m], self.C[m], mod_res, [])
for C in mod_res:
clause.append([self.build_formula(self.M[m], self.vi, self.E[m], C)])
elif status == (False, True):
mod_clause = []
for i in range(self.E[m]):
if i != self.C[m]:
# mod_clause.append(Com % self.E[m] == i)
mod_res = []
self.refine_modu(self.M[m], self.E[m], i, mod_res, [])
for C in mod_res:
mod_clause.append(self.build_formula(self.M[m], self.vi, self.E[m], C))
clause.append(mod_clause)
elif status == (True, True):
clause.append([False])
formu_arr.append(clause)
return formu_arr
class EquTemplate:
def __init__(self, n):
self.vi = [Int('v' + str(i)) for i in range(n)]
self.b = Int('b')
self.s = Solver()
def add(self, vector):
vi, target = vector[:-1], vector[-1]
expr = combine(vi[i] * self.vi[i] for i in range(len(self.vi))) + self.b == target
self.s.add(expr)
def check(self):
return self.s.check()
def solve_model(self):
model = self.s.model()
V = [model[v].as_long() if model[v] is not None else 0 for v in self.vi]
B = model[self.b].as_long() if model[self.b] is not None else 0
expr = combine(V[i] * self.vi[i] for i in range(len(self.vi))) + B
return simplify(expr)
if __name__ == '__main__':
# smt = FormulaTemplate([Int('v1'), Int('v2')], 4, 3, 2)
# smt.add([1, 2], True)
# smt.add([2, 3], False)
# print(smt.s)
# print(smt.check())
#
# arr = smt.refine_model()
# for a in arr:
# print(a)
#
# formu = smt.formula_model()
# print(formu)
# print('-' * 50)
# print(simplify(formu))
# print('-' * 50)
smt = EquTemplate(2)
smt.add([0, 1, 1])
smt.add([1, 2, 1])
smt.add([3, 6, 3])
if smt.check() == sat:
print(smt.solve_model()) # 1*v0 + 2*v1 + 1
else:
print(unsat)
|
4,627 | cf65966f5daf88bdefc7a8aa2ff80835cff0d0b6 | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import (
StackingClassifier,
RandomForestClassifier
)
import pandas as pd
from sklearn.metrics import f1_score
# feel free to import any sklearn model here
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
def load_data():
"""
Helper function for loading in the data
------
# of training samples: 419
# of testing samples: 150
------
"""
df = pd.read_csv("../../Data/breast_cancer_data/data.csv")
cols = df.columns
X = df[cols[2:-1]].to_numpy()
y = df[cols[1]].to_numpy()
y = (y=='M').astype(np.int) * 2 - 1
train_X = X[:-150]
train_y = y[:-150]
test_X = X[-150:]
test_y = y[-150:]
return train_X, train_y, test_X, test_y
def main():
np.random.seed(0)
train_X, train_y, test_X, test_y = load_data()
# Stacking models:
# Create your stacked model using StackingClassifier
base_models = [
('rfc', RandomForestClassifier()),
('svm', SVC()),
('gnb', GaussianNB()),
('knc', KNeighborsClassifier()),
('dtc', DecisionTreeClassifier())
]
# The default final_estimator is LogisticRegression
sc = StackingClassifier(estimators=base_models)
# fit the model on the training data
sc.fit(train_X, train_y)
# predict
y_pred = sc.predict(test_X)
# Get and print f1-score on test data
print(f"f1 score = {f1_score(y_pred, test_y , average = 'weighted')}")
if __name__ == '__main__':
main()
|
4,628 | f6838906c961a9ca7d91d2ab02fd2af72797b880 | from torch import nn
class MNIST3dModel(nn.Module):
def __init__(self, input_c=3, num_filters=8, num_classes=10):
super().__init__()
self.conv1 = nn.Conv3d(in_channels=input_c,
out_channels=num_filters,
kernel_size=3,
stride=1,
padding=1)
self.conv2 = nn.Conv3d(in_channels=num_filters,
out_channels=num_filters * 2,
kernel_size=3,
stride=1,
padding=1)
self.batchnorm1 = nn.BatchNorm3d(16)
self.conv3 = nn.Conv3d(in_channels=num_filters * 2,
out_channels=num_filters * 4,
kernel_size=3,
stride=1,
padding=1)
self.conv4 = nn.Conv3d(in_channels=num_filters * 4,
out_channels=num_filters * 8,
kernel_size=3,
stride=1,
padding=1)
self.batchnorm2 = nn.BatchNorm3d(64)
self.pool = nn.MaxPool3d(2)
self.dropout1 = nn.Dropout(0.25)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(4096, 1024)
self.dropout2 = nn.Dropout(0.5)
self.linear2 = nn.Linear(1024, num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.relu(x)
x = self.batchnorm1(x)
x = self.pool(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.relu(x)
x = self.batchnorm2(x)
x = self.pool(x)
x = self.dropout1(x)
x = x.view(x.size()[0], -1)
x = self.linear1(x)
x = self.relu(x)
x = self.dropout2(x)
x = self.linear2(x)
return x |
4,629 | 1346bf78241b4be00f2da3c22731d2846f9d1ada | import shelve
from club import Club
#загальний бютжет клубів в заданій країні
#клуб який має найбільше трофеїв
country = input('country: ')
FILENAME = "clubs"
with shelve.open(FILENAME) as clubs:
clubs_by_country = list(filter(lambda s: s.country.lower() == country.lower(), clubs.values()))
if len(clubs_by_country) == 0 :
print("No clubs with such country")
exit()
the_best_club = max(clubs_by_country, key=lambda s: int(s.award))
clubs_budget = sum(int(club.budget) for club in clubs_by_country)
print("The best club: ", the_best_club)
print("Summary budget: ", clubs_budget)
|
4,630 | a29a904290cb733ac7b526a75e0c218b952e2266 | import mysql.connector
# config = {
# "user":"root",
# "password":"Sm13481353",
# "host":"3"
# }
mydb = mysql.connector.connect(
user="seyed",
password="Sm13481353",
host="localhost",
database="telegram_bot",
auth_plugin="mysql_native_password"
)
mycursor = mydb.cursor()
query = "insert into question(update_id,chat_id) values (40,20)"
# mycursor.execute(query)
# mydb.commit()
mycursor.execute("select * from question")
users = mycursor.fetchall()
for user in users:
print(user)
|
4,631 | 872b13a93c9aba55c143ee9891543f059c070a36 | # dg_kernel plots
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import csv
import sys
NE_SIZE = 128
TITLE_SIZE = 35
TEXT_SIZE = 30
MARKER_SIZE = 10
LINE_WIDTH = 5
colors = { idx:cname for idx, cname in enumerate(mcolors.cnames) }
eventname = 'L1_DCM'
callstacklevel = 7
FREQ_THRESHOLD = 0.02
ROOT = '/global/homes/g/grnydawn/trepo/temp/cylcworkspace/extrae_HSW/cgroup/folding/02242017_1353/codeline'
# read histogram file
def read_histogram(histofile):
histodict = {}
with open(histofile, 'rb') as f:
reader = csv.reader(f, delimiter='\t')
try:
exclude_item = []
for i, row in enumerate(reader):
if len(row)<1: continue
if i==0:
name = []
for j, item in enumerate(row[1:]):
if len(item)<1:
exclude_item += [ j ]
continue
name += [ item ]
histodict['Head'] = name
else:
numval = []
for j, item in enumerate(row[1:]):
if j in exclude_item: continue
try:
numval += [ float(item) ]
except Exception as e:
if len(item)<1:
numval += [ 0.0 ]
else:
print e
histodict[row[0]] = numval
except csv.Error as e:
sys.exit('file %s, line %d: %s' % (histofile, reader.line_num, e))
return histodict
def draw_histogram(xname, yval, title, xlabel, ylabel, filename, xrange=None):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title, fontsize=TITLE_SIZE)
ax.set_xlabel(xlabel, fontsize=TEXT_SIZE)
ax.set_ylabel(ylabel, fontsize=TEXT_SIZE)
if xrange: XL = xrange
else: XL = [0, len(xname)]
ax.set_xticks(range(len(xname)))
newname = []
for i, xn in enumerate(xname):
if i%1==0:
newname += [ xn ]
else:
newname += [ "" ]
ax.set_xticklabels(newname)
xval = np.arange(len(xname))[XL[0]:XL[1]]
yval = yval[XL[0]:XL[1]]
YL = [0, max(yval)*1.5]
ax.axis(XL + YL)
gridlines = ax.get_xaxis().get_gridlines()
for gl in gridlines:
gl.set_visible(False)
ax.grid(b=True, which='major', color='b', linestyle='-', linewidth=0.5)
ax.grid(b=False, which='minor', color='#888888', linestyle='-',linewidth=0.5)
ax.grid(True)
for label in ax.xaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)
#for label in ax.xaxis.get_ticklabels(): label.set_fontsize(20)
for label in ax.yaxis.get_ticklabels(): label.set_fontsize(TEXT_SIZE)
fnamelist = list(set(filename))
clist = []
for fname in filename:
color = colors[fnamelist.index(fname)]
clist += [ color ]
width = (XL[1]-XL[0])/float(len(xval)*2)
histo = ax.bar(xval-width/2, yval, width, color=clist)
dummy_bars = []
for i, fname in enumerate(fnamelist):
dummy_bars += ax.bar([0], [1.E-16], width, color=colors[i])
ax.legend(dummy_bars, fnamelist, loc=2)
#plt.savefig("./dgomp.png")
plt.show()
peak1 = read_histogram('%s/%s_high_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))
peak2 = read_histogram('%s/%s_high_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))
peaks_avgsum = sum(peak1['Average']) + sum(peak2['Average'])
#print 'peaks_avgsum = ', peaks_avgsum
peaks_normavg = {}
for i, line in enumerate(peak1['Head']):
if peaks_normavg.has_key(line):
peaks_normavg[line] += peak1['Average'][i]
else:
peaks_normavg[line] = peak1['Average'][i]
for i, line in enumerate(peak2['Head']):
if peaks_normavg.has_key(line):
peaks_normavg[line] += peak2['Average'][i]
else:
peaks_normavg[line] = peak2['Average'][i]
#print 'peaks_normavg before = ', peaks_normavg.values()[:30]
for line in peaks_normavg.keys():
peaks_normavg[line] = peaks_normavg[line]/peaks_avgsum
#print 'peaks_normavg after = ', peaks_normavg.values()[:30]
nonpeak1 = read_histogram('%s/%s_low_linelevel%d_region0.csv'%(ROOT, eventname, callstacklevel))
nonpeak2 = read_histogram('%s/%s_low_linelevel%d_region1.csv'%(ROOT, eventname, callstacklevel))
nonpeaks_avgsum = sum(nonpeak1['Average']) + sum(nonpeak2['Average'])
nonpeaks_normavg = {}
for i, line in enumerate(nonpeak1['Head']):
if nonpeaks_normavg.has_key(line):
nonpeaks_normavg[line] += nonpeak1['Average'][i]
else:
nonpeaks_normavg[line] = nonpeak1['Average'][i]
for i, line in enumerate(nonpeak2['Head']):
if nonpeaks_normavg.has_key(line):
nonpeaks_normavg[line] += nonpeak2['Average'][i]
else:
nonpeaks_normavg[line] = nonpeak2['Average'][i]
#print 'nonpeaks_normavg before = ', nonpeaks_normavg.values()[:30]
for line in nonpeaks_normavg.keys():
nonpeaks_normavg[line] = nonpeaks_normavg[line]/nonpeaks_avgsum
#print 'nonpeaks_normavg after = ', nonpeaks_normavg.values()[:30]
#import pdb; pdb.set_trace()
result = {}
for line, bursts in peaks_normavg.iteritems():
result[line] = bursts
for line, bursts in nonpeaks_normavg.iteritems():
if result.has_key(line):
result[line] -= bursts
else:
result[line] = -1.0*bursts
xlinenum = []
ybursts = []
filename = []
for line, bursts in result.iteritems():
if bursts>FREQ_THRESHOLD:
match = re.search(r'\s*(\d+)\s+\((.*)\)', line)
if match:
xlinenum += [ match.group(1) ]
ybursts += [ float(bursts) ]
matchfname = re.search(r'(\b\w+\.[cFf][\d]*\,)', match.group(2))
if matchfname is None:
fname = 'Unresolved'
else:
fname = matchfname.group(1)[:-1]
filename += [ fname ]
zipped = zip(xlinenum, ybursts, filename)
zipped.sort()
xlinenum, ybursts, filename = zip(*zipped)
#title = 'Frequent source lines in a region of interest'
title = 'Frequent source lines at high %s regions in callstack level %d'%(eventname, callstacklevel)
xlabel = 'Sampled function line number'
ylabel = 'Normalized frequency'
draw_histogram(xlinenum, np.array(ybursts), title, xlabel, ylabel, filename)
|
4,632 | a3ed47c285b26dca452fa192eb354a21a78b8424 | from math import sqrt, ceil
def encode_s(s):
encoded_s = ''
s_with_no_spaces = s.replace(' ', '')
step = ceil(sqrt(len(s_with_no_spaces)))
for j in range(0, step):
i = j
while i < len(s_with_no_spaces):
encoded_s = encoded_s + s_with_no_spaces[i]
i += step
if j != step - 1:
encoded_s = encoded_s + ' '
return encoded_s
def decode_s(s):
arr = s.split(' ')
decoded_s = ''
for j in range(0, len(arr[0])):
for word in arr:
if len(word) > j:
decoded_s = decoded_s + word[j]
return decoded_s
def TheRabbitsFoot(s, encode):
if encode:
return encode_s(s)
return decode_s(s)
|
4,633 | b43ea8c32207bf43abc3b9b490688fde0706d876 | A = int(input())
B = int(input())
C = int(input())
number = A * B * C
num = str(number)
for i in range(10): # 9를 입력해서 첨에 틀림 !
count = 0
for j in range(len(num)):
if i == int(num[j]):
count += 1
else:
continue
print(count)
|
4,634 | a6cc0078fb37f9c63e119046193f521290c9fb21 | # Generated by Django 3.2.8 on 2021-10-20 08:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0006_auto_20211020_0817'),
]
operations = [
migrations.AlterModelOptions(
name='currencies',
options={'verbose_name': 'Currencie'},
),
migrations.AlterModelOptions(
name='proposals',
options={'verbose_name': 'Proposal'},
),
migrations.AlterModelOptions(
name='transactions',
options={'verbose_name': 'Transaction'},
),
migrations.AlterModelOptions(
name='tutorials',
options={'verbose_name': 'Tutorial'},
),
migrations.AlterModelOptions(
name='userkyc',
options={'verbose_name': 'KYC Document'},
),
]
|
4,635 | 7273592ab8fea10d9a3cde58690063690c74b746 | newList = []
noDuplicate = []
while True:
elem = input("Enter a letter : (type quit to quit) ")
if elem.lower() != "quit":
newList.append(elem)
else:
break
for item in newList:
if item not in noDuplicate:
noDuplicate.append(item)
print(noDuplicate) |
4,636 | 551e9c696eaad6c78f2eae66e50cca34c153d9dd | print "test"
print "moreing"
print " a nnnnn" |
4,637 | 1b3493322fa85c2fe26a7f308466c4a1c72d5b35 | import numpy as np
import scipy.sparse as sparse
from .world import World
from . import util
from . import fem
from . import linalg
def solveFine(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse*world.NCoarseElement
NpFine = np.prod(NWorldFine+1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions==0
fixedFine = util.boundarypIndexMap(NWorldFine, boundaryMap=boundaryMap)
freeFine = np.setdiff1d(np.arange(NpFine), fixedFine)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine*MbFine + AFine*AbFine
AFineFree = AFine[freeFine][:,freeFine]
bFineFree = bFine[freeFine]
uFineFree = linalg.linSolve(AFineFree, bFineFree)
uFineFull = np.zeros(NpFine)
uFineFull[freeFine] = uFineFree
uFineFull = uFineFull
return uFineFull, AFine, MFine
def solveCoarse(world, aFine, MbFine, AbFine, boundaryConditions):
NWorldCoarse = world.NWorldCoarse
NWorldFine = world.NWorldCoarse*world.NCoarseElement
NCoarseElement = world.NCoarseElement
NpFine = np.prod(NWorldFine+1)
NpCoarse = np.prod(NWorldCoarse+1)
if MbFine is None:
MbFine = np.zeros(NpFine)
if AbFine is None:
AbFine = np.zeros(NpFine)
boundaryMap = boundaryConditions==0
fixedCoarse = util.boundarypIndexMap(NWorldCoarse, boundaryMap=boundaryMap)
freeCoarse = np.setdiff1d(np.arange(NpCoarse), fixedCoarse)
if aFine.ndim == 1:
ALocFine = world.ALocFine
else:
ALocFine = world.ALocMatrixFine
AFine = fem.assemblePatchMatrix(NWorldFine, ALocFine, aFine)
MFine = fem.assemblePatchMatrix(NWorldFine, world.MLocFine)
bFine = MFine*MbFine + AFine*AbFine
basis = fem.assembleProlongationMatrix(NWorldCoarse, NCoarseElement)
bCoarse = basis.T*bFine
ACoarse = basis.T*(AFine*basis)
ACoarseFree = ACoarse[freeCoarse][:,freeCoarse]
bCoarseFree = bCoarse[freeCoarse]
uCoarseFree = linalg.linSolve(ACoarseFree, bCoarseFree)
uCoarseFull = np.zeros(NpCoarse)
uCoarseFull[freeCoarse] = uCoarseFree
uCoarseFull = uCoarseFull
uFineFull = basis*uCoarseFull
return uCoarseFull, uFineFull
|
4,638 | 012e4112970a07559f27fa2127cdffcc557a1566 | list_angle_list = RmList()
variable_flag = 0
variable_i = 0
def user_defined_shoot():
global variable_flag
global variable_i
global list_angle_list
variable_i = 1
for count in range(3):
gimbal_ctrl.angle_ctrl(list_angle_list[1], list_angle_list[2])
gun_ctrl.fire_once()
variable_i = variable_i + 2
time.sleep(0.2)
def user_defined_storage_angle():
global variable_flag
global variable_i
global list_angle_list
led_ctrl.gun_led_on()
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.gimbal_axis_yaw))
list_angle_list.append(gimbal_ctrl.get_axis_angle(rm_define.gimbal_axis_pitch))
time.sleep(5)
led_ctrl.gun_led_off()
def start():
global variable_flag
global variable_i
global list_angle_list
robot_ctrl.set_mode(rm_define.robot_mode_free)
gimbal_ctrl.set_rotate_speed(180)
vision_ctrl.enable_detection(rm_define.vision_detection_marker)
vision_ctrl.detect_marker_and_aim(rm_define.marker_trans_red_heart)
time.sleep(5)
user_defined_storage_angle()
vision_ctrl.detect_marker_and_aim(rm_define.marker_number_three)
time.sleep(3)
user_defined_storage_angle()
user_defined_shoot()
|
4,639 | 0d3e1df1720812e8546b1f3509c83d1e6566e103 |
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib import messages
from datetime import datetime, timedelta
class DeadlineMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
response = self.get_response(request)
return response
def process_view(self, request, view_func, view_args, view_kwargs):
if view_func.__module__ == 'api.views' and view_func.__name__ == 'ComingsoonData':
return None
if view_func.__module__ == 'django.contrib.admin.sites' or request.user.is_superuser:
return None
else:
survey = datetime(2019, 9, 16, 23, 50, 0, 0) #茶會調查結束時間
teatime = datetime(2019, 9, 17, 18, 30, 0, 0) #茶會開始時間
if survey - datetime.now() <= timedelta(milliseconds=0): #調查結束
if teatime - datetime.now() <= timedelta(milliseconds=0): #調查結束+茶會開始
if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimestart')
return HttpResponseRedirect(reverse('index'))
else:
return None
else: #調查結束+茶會未開始
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
else: #從調查表單轉址->表單已結束提交,其他->茶會尚未開始
if view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
messages.add_message(request, messages.INFO, '表單已結束提交', extra_tags='teatimeform')
else:
messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
else: #調查未結束
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'comingsoon':
return None
elif view_func.__module__ == 'enter.views' and view_func.__name__ == 'attend':
return None
else:
if view_func.__module__ == 'joinclub.views' and view_func.__name__ == 'index':
return HttpResponseRedirect(reverse('comingsoon'))
else:
messages.add_message(request, messages.INFO, '茶會尚未開始', extra_tags='yetstart')
return HttpResponseRedirect(reverse('comingsoon'))
|
4,640 | 8417b63e2b7b16d3d58175022662c5b3e59e4aaf | import pytest
from ethereum.tools.tester import TransactionFailed
def test_cant_ever_init_twice(ethtester, root_chain):
ethtester.chain.mine()
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k0)
with pytest.raises(TransactionFailed):
root_chain.init(sender=ethtester.k1)
|
4,641 | b4d09b6d8ad5f0584f74adc0fd8116265bb6649b | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index_view, name='accounts.index'),
url(r'^login/$', views.login_view, name='accounts.login'),
url(r'^logout/$', views.logout_view, name='accounts.logout'),
url(r'^registro/$', views.registro_usuario_view, name='accounts.registro'),
url(r'obrigado/(?P<username>[\w]+)/$', views.obrigado_view, name='accounts.obrigado'),
url(r'^ataque/$', views.ataque_view, name='accounts.ataque'),
url(r'^flpositivo/$', views.falsoLoginPositivo_view, name='accounts.flpositivo'),
url(r'^flnegativo/$', views.falsoLoginNegativo_view, name='accounts.flnegativo'),
]
|
4,642 | dc05a441c21a67fbb3a1975b3fccb865a32731c8 | # Copyright (C) 2020 Francis Sun, all rights reserved.
"""A copyright utility"""
import datetime
import argparse
import os
import os.path
class Copyright:
_file_type = {
'c/c++': ['h', 'c', 'cpp', 'cc'],
'python': ['py'],
'cmake': ['cmake'],
'vim': ['vim'],
'shell': ['sh']
}
_declaration = "Copyright (C) {0} {1}, all rights reserved."
_formaters = {}
def __init__(self, file_path, author):
self.file_path = file_path
self.author = author
file_name = self.file_path.split(os.path.sep)[-1]
if file_name == 'CMakeLists.txt':
self.file_type = 'cmake'
elif file_name == 'vimrc':
self.file_type = 'vim'
else:
self.file_type = self.file_path.split('.')[-1]
self.declaration = Copyright._declaration.format(
datetime.date.today().year, self.author)
def _c_cpp_formater(self):
return "/* " + self.declaration + " */"
for ft in _file_type['c/c++']:
_formaters[ft] = _c_cpp_formater
def _py_formater(self):
return "# " + self.declaration
for ft in _file_type['python']:
_formaters[ft] = _py_formater
def _cmake_formater(self):
return "# " + self.declaration
for ft in _file_type['cmake']:
_formaters[ft] = _cmake_formater
def _vim_formater(self):
return "\" " + self.declaration
for ft in _file_type['vim']:
_formaters[ft] = _vim_formater
def _shell_formater(self):
return "# " + self.declaration
for ft in _file_type['shell']:
_formaters[ft] = _shell_formater
def get_declaration(self):
if self.file_type in Copyright._formaters:
return Copyright._formaters[self.file_type](self)
tmp_filename_suffix = ".fjcu"
def Write(self):
tmp_filename = self.file_path + Copyright.tmp_filename_suffix
with open(tmp_filename, 'w') as tmp_f:
origin_content = ""
if os.path.isfile(self.file_path):
with open(self.file_path, 'r') as origin_f:
origin_content = origin_f.read()
tmp_f.write(self.get_declaration() + "\n" + origin_content)
os.replace(tmp_filename, self.file_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('file_path')
parser.add_argument('author')
opt = parser.parse_args()
cr = Copyright(opt.file_path, opt.author)
cr.Write()
|
4,643 | 2b928dad60bfb0ba863e9039a5462faa885644f3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot
from pywikibot import pagegenerators
import re
from pywikibot import xmlreader
import datetime
import collections
from klasa import *
def fraz(data):
data_slownie = data[6:8] + '.' + data[4:6] + '.' + data[0:4]
lista_stron = getListFromXML(data)
site = pywikibot.Site('pl', 'wiktionary')
outputPage = pywikibot.Page(site, 'Wikipedysta:AlkamidBot/listy/związki_frazeologiczne')
logfile = 'log/fraz.txt'
tempLangs = []
notFound = []
text = 'Hasła, które określone zostały jako związek frazeologiczny, lecz nie widnieją w indeksie związków frazeologicznych odpowiednim dla danego języka. Ostatnia aktualizacja: %s\n' % (data_slownie)
phraseList = {}
notFoundList = collections.defaultdict(list)
LangsMediaWiki = getAllLanguages()
# prepare a dictionary of phrase indexes. If an index page doesn't exist
# assign a blank page to it
for a in LangsMediaWiki:
#print a.shortName
indexPageName = 'Indeks:{0}_-_Związki_frazeologiczne'.format(a.upperName)
try: phraseList[a.shortName] = pywikibot.Page(site, indexPageName).get()
except pywikibot.NoPage:
phraseList['%s' % a.shortName] = ''
except pywikibot.IsRedirectPage:
print('redirect')
for a in lista_stron:
try: word = Haslo(a)
except notFromMainNamespace:
continue
except sectionsNotFound:
continue
except WrongHeader:
continue
else:
if word.type == 3:
for lang in word.listLangs:
if lang.type != 2:
lang.pola()
try: lang.subSections['znaczenia'].text
except AttributeError:
pass
except KeyError:
with open(logfile, 'a+', encoding='utf-8') as lf:
lf.write('\n"znaczenia" not found; word: {0}; lang: {1}'.format(word.title, lang.lang))
else:
if lang.type != 2 and 'związek frazeologiczny' in lang.subSections['znaczenia'].text and '[[{0}]]'.format(word.title) not in phraseList[lang.lang]:
notFoundList['%s' % lang.lang].append(word.title)
for a in LangsMediaWiki:
if notFoundList['%s' % a.shortName]:
text += '== [[Indeks:%s_-_Związki_frazeologiczne|%s]] ==' % (a.upperName, a.longName)
for b in notFoundList['%s' % a.shortName]:
text += '\n*[[%s]] <nowiki>| *[[%s]]</nowiki> →' % (b, b)
text += '\n'
with open('output/fraz.txt', encoding='utf-8', mode='w') as f:
f.write(text)
outputPage.text = text
outputPage.save(comment="Aktualizacja listy", botflag=False)
|
4,644 | 4863581a1a557186ceee8d544d1a996082edcf2c | #####coding=utf-8
import re
import urllib.request
import sys
import redis
from urllib.error import URLError, HTTPError
import urllib.parse
# /redis/cluster/23:1417694197540
def con():
pool = redis.ConnectionPool(host='ap2.jd.local', port=5360, password='/redis/cluster/1:1803528818953446384')
r = redis.StrictRedis(connection_pool=pool)
r.set('foo', 'bar')
print(r.get('foo'))
# def findUrl(html):
# reg = r'item.jd.com/(\w+)'
# imgre = re.compile(reg)
# imglist = re.findall(imgre, html)
# x = 0
# print(imglist)
# for imgurl in imglist:
# # imgurl = "http://kill.jd.com/" + imgurl
# # page = urllib.request.urlopen(imgurl)
# # response = page.read().decode('utf-8')
# # print(response)
# x += 1
# print(x)
#
#
# def getHtml(url):
# page = urllib.request.urlopen(url)
# html = page.read().decode('utf-8')
# return html
def findUrl(url):
html = getHtml(url)
x = isJd(html)
print(x)
if (x != 0):
reg = r"((http|ftp|https):\/\/[\w\-_]+(\.[\w\-_]+)+([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?)"
imgre = re.compile(reg)
imglist = re.findall(imgre, html)
# print(imglist)
for imgurl in imglist:
toUrl = imgurl[0]
print(toUrl)
if (isNotImg(toUrl) and not urlAborted(toUrl)):
try:
x += findUrl(toUrl)
except:
print("cannot add to x!")
return x
def isJd(html):
reg = r'jd.com'
hasReg = re.compile(reg)
list_length = len(re.findall(hasReg, html))
return list_length
def isNotImg(url):
reg = r'.+\.jpg|jpeg|gif|png|bmp|ico|mpg|mp4|css|js'
hasReg = re.compile(reg)
list_length = len(re.findall(hasReg, url))
if list_length == 0:
return True
else:
return False
def urlAborted(url):
list = ['hdpreload', 'hao123', 'facebook', 'weibo', 's9w', 'w3', 'jd', 'joybuy', 'kela']
for key in list:
if url.find(key) != -1:
return True
return False
def getHtml(url):
global response, html
try:
request = urllib.request.Request(url) # open=urlopen response.getcode() header=response.info()
request.add_header('Content-Type', 'text/html; charset=utf-8')
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0')
response = urllib.request.urlopen(request, timeout=5)
except HTTPError as e:
print('Error code:', e.code)
except URLError as e:
print('Reason', e.reason)
except:
print('Error unknown')
if (response.getcode() == 200):
try:
reg = r'charset=(.*)'
hasReg = re.compile(reg)
code = re.findall(hasReg, response.headers['Content-Type'])
html = response.read().decode(code[0])
except UnicodeDecodeError as e:
print('Reason', e.reason)
else:
html = ""
return html
# html = getHtml("http://www.baidu.com/baidu?wd=jd&tn=monline_dg&ie=utf-8")
print(findUrl("http://www.baidu.com/baidu?wd=jd&tn=monline_dg&ie=utf-8"))
# print(findUrl("http://list.tmall.com/search_product.htm?q=jd.com&type=p&vmarket=&spm=875.7931836%2FB.a2227oh.d100&from=mallfp..pc_1_searchbutton"))
|
4,645 | 2a13fffa105a5dd546c30c892e59888eb6ead996 | def fonct(valeur, a= None):
if type(a) is list:
a.append(valeur)
# a+= valeur
elif type(a) is tuple:
a += tuple((valeur,))
elif type(a) is str:
a += str(valeur)
elif type(a) is set:
a.add(valeur)
else:
a+= valeur
return(a)
print(fonct(4, [1, 2, 3])) # [1, 2, 3, 4]
print(fonct(4, 'eg' )) # eg4
print(fonct(4, (1,2,3))) # (1, 2, 3, 4)
print(fonct(4, {1, 2, 3})) # (1, 2, 3, 4)
|
4,646 | 4fd4c9cf3bdb73a003ce860bf2ee0ccab01f0009 | # The error measures used in this project
#
# Rooth Mean Squared Error
# Mean Absolute Error
#
# ! Both calculated after descaling the output of the system first
import numpy as np
def RMSE(min_y, max_y, yhat, y):
# first scale output and target back to
# original scale, to prevent scale bias
yhat = descale(yhat, min_y, max_y)
y = descale(y, min_y, max_y)
return(np.mean(np.power(np.subtract(yhat,y),2)))
def MAE(min_y, max_y, yhat, y):
# first scale output and target back to
# original scale, to prevent scale bias
yhat = descale(yhat, min_y, max_y)
y = descale(y, min_y, max_y)
return(np.mean(np.absolute(np.subtract(yhat,y))))
def descale(scaled_y, min_y, max_y):
'''
Descaled data back to original scale
Inputs:
y = vector of values
min_y = minimum value of original data
max_y = minimum value of original data
Output: y in original scale
'''
diff = np.subtract(max_y ,min_y)
descaled_y = np.add(np.multiply(scaled_y, diff), min_y)
# descaled y = scaled_y *(ymax-ymin)+ymin
# descaled_y = [(y*(diff)+min_y) for y in scaled_y]
return(descaled_y) |
4,647 | cfa064611a4aa16638bd649c68d64872b9fac1ff | from math import sqrt
def prime_generator(n):
pp=[2,3]
for i in range(3,n):
i+=2
count=0
for ps in pp:
if ps>(sqrt(i)+1):
break
if i%ps==0:
count+=1
break
if count==0:
pp.append(i)
return pp
|
4,648 | 3dc3bbd00f9c2d00093bf8669963d96f5019b2da | import requests
from multiprocessing import Process
from atomic_counter import AtomicCounter
class Downloader:
def __init__(self, src_url, num_threads):
try:
header = requests.head(src_url).headers
self.url = src_url
self.file_size = int(header.get('content-length'))
self.file_name = src_url.split('/')[-1]
self.num_threads = num_threads
self.chunk_size = self.file_size // self.num_threads
with open(self.file_name, 'wb') as f:
f.write(b'\x00' * self.file_size)
except requests.exceptions.ConnectionError:
print('Connection error, please check your internet connection.')
def _worker(self, download_range: tuple, counter: AtomicCounter):
start, end = download_range
header = {'Range': 'bytes=' + str(start) + '-' + str(end)}
r = requests.get(self.url, headers=header, stream=True, timeout=30)
binary_content = r.content
counter.increment_by_value(end - start + 1)
print(counter.get_value() / self.file_size)
with open(self.file_name, 'wb') as f:
f.seek(start)
f.write(binary_content)
def download(self) -> None:
download_ranges = []
for i in range(self.num_threads):
start = i * self.chunk_size
if i == self.num_threads - 1:
end = self.file_size
else:
end = start + self.chunk_size - 1
download_ranges.append((start, end))
atomic_counter = AtomicCounter()
process_pool = [Process(target=self._worker,
args=(download_ranges[i], atomic_counter))
for i in range(self.num_threads)]
for p in process_pool:
p.start()
for p in process_pool:
p.join()
if __name__ == "__main__":
downloader = Downloader(
'https://download-cf.jetbrains.com/idea/ideaIC-2019.3.3.tar.gz', 4)
downloader.download()
|
4,649 | 6369c692e358c0dfd1193c6e961ecf9b521ea9ba | # Import the SDK
import json
import boto3
from botocore.exceptions import ClientError
import uuid
#dbclient = boto3.client('dynamodb')
dbresource = boto3.resource('dynamodb', region_name='eu-west-1')
rekclient = boto3.client('rekognition','eu-west-1')
collection_name = 'swiftarycelebrity'
ScannedFacestable = dbresource.Table('ScannedFaces')
#
# List all images in the bucket
#
response = rekclient.list_faces( CollectionId=collection_name)
Faces =response ['Faces']
#print Faces
for Images in Faces:
lv_FaceId = Images ['FaceId']
lv_ImageId = Images ['ImageId']
lv_ExternalImageId = Images ['ExternalImageId'],
lv_Names = ExternalImageId.split("_")
lv_Firstname = lv_Names[0]
lv_Surname = lv_Names[1]
print ('FaceId %s' % lv_FaceId)
print ('ImageId %s' % lv_ImageId)
print ('ExternalImageId %s' % lv_ExternalImageId)
print ('Infor %s' %json.dumps(Images))
print ('FirstName %s' % lv_FirstName )
print ('SurName %s' % lv_SurName )
#response = ScannedFacestable.put_item(
# Item={
# 'FaceId' : lv_FaceId,
# 'ImageId' : lv_ImageId,
# 'ExternalImageId' : lv_ExternalImageId,
# 'Firstname' : lv_Firstname,
# 'Surname' : lv_Surname ,
# 'Info' : json.dumps(Images)
# }
#)
print("PutItem succeeded:")
#print(json.dumps(response, indent=4, cls=DecimalEncoder))
|
4,650 | dff5a46c6f1eb715fe5e1eec87e42ceb295b0eae | from django.contrib import admin
from django.urls import path, include
from .views import hindex,galeria,mision_vision,direccion,registro,login,logout_vista,registro_insumo,admin_insumos
urlpatterns = [
path('',hindex,name='HINDEX'),
path('galeria/',galeria,name='GALE'),
path('mision/',mision_vision,name='MISION'),
path('direccion/',direccion,name='UBICACION'),
path('registro/',registro,name='FORMU'),
path('login/',login,name='LOGIN'),
path('logout_vista/',logout_vista,name='LOGOUT'),
path('registro_insumo/',registro_insumo,name='INSUMOS'),
path('admin_insumos/',admin_insumos,name='ADMIN'),
]
admin.site.site_header="Administración Lavado de Autos" |
4,651 | 18d7c486b9070a1c607ba2ba5876309246013182 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2021 Opensource ICT Solutions B.V.
# https://oicts.com
#
#version: 1.0.0
#date: 06-11-2021
import requests
import json
import sys
url = 'http://<URL>/zabbix/api_jsonrpc.php?'
token = '<TOKEN>'
headers = {'Content-Type': 'application/json'}
hostname = sys.argv[1]
def main():
hostid = hostid_get(token)
itemid_array = itemid_get(hostid,token)
update(itemid_array,token)
def hostid_get(token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'host.get'
payload['params'] = {}
payload['params']['output'] = ['hostid']
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['auth'] = token
payload['id'] = 1
#Doing the request
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
hostid = data["result"][0]["hostid"]
return hostid
def itemid_get(hostid,token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'item.get'
payload['params'] = {}
payload['params']['output'] = 'itemid'
payload['params']['filter'] = {}
payload['params']['filter']['host'] = hostname
payload['params']['filter']['type'] = "0", "1", "3", "5", "8", "9", "10", "11", "12", "13", "14", "15", "16", "19", "20", "21"
payload['auth'] = token
payload['id'] = 1
# print(json.dumps(payload))
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
# print(data)
itemid_array = []
for itemid in data['result']:
itemid_array.append(str(itemid['itemid']))
return itemid_array
def update(itemid_array,token):
payload = {}
payload['jsonrpc'] = '2.0'
payload['method'] = 'task.create'
payload['params'] = []
for itemid in itemid_array:
request = {}
request['type'] = '6'
request['request'] = {}
request['request']['itemid'] = itemid
payload['params'].append(request)
payload['auth'] = token
payload['id'] = 1
#print("payload = " + json.dumps(payload))
request = requests.post(url, data=json.dumps(payload), headers=headers)
data = request.json()
json_string = json.dumps(data)
print(json_string)
if __name__ == '__main__':
# Call to main
main()
|
4,652 | e6aa28ae312ea5d7f0f818b7e86b0e76e2e57b48 | from rest_framework import serializers
from films.models import *
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
films = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
theaters = serializers.PrimaryKeyRelatedField(many=True, queryset=Film.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'films', 'theaters')
class GenreSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
depth = 1
class GenreWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Genre
fields = ('id', 'name', 'film_set')
class FilmSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set', 'owner')
depth = 1
class FilmWriteSerializer(serializers.ModelSerializer):
genre = serializers.PrimaryKeyRelatedField(queryset=Genre.objects.all(), allow_null=True)
class Meta:
model = Film
fields = ('id', 'title', 'year_prod', 'genre', 'theater_set')
class TheaterSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Theater
fields = ('id', 'name', 'city', 'films', 'owner')
depth = 1
class TheaterWriteSerializer(serializers.ModelSerializer):
class Meta:
model = Theater
fields = ('id', 'name', 'city')
|
4,653 | e2e2e746d0a8f6b01e6f54e930c7def2d48c2d62 | import json
import random
import uuid
from collections import OrderedDict
import docker
from .db_utils import DBUtils
from .models import DynamicDockerChallenge
class DockerUtils:
@staticmethod
def add_new_docker_container(user_id, challenge_id, flag, port):
configs = DBUtils.get_all_configs()
dynamic_docker_challenge = DynamicDockerChallenge.query \
.filter(DynamicDockerChallenge.id == challenge_id) \
.first_or_404()
client = docker.DockerClient(base_url=configs.get("docker_api_url"))
in_port = dynamic_docker_challenge.redirect_port
ports = {str(in_port):str(port)}
uuid_code = str(uuid.uuid4())
try:
client.containers.run(image=dynamic_docker_challenge.docker_image, name=str(user_id) + '-' + uuid_code,
environment={'FLAG': flag}, detach=True,
mem_limit=dynamic_docker_challenge.memory_limit,
nano_cpus=int(dynamic_docker_challenge.cpu_limit * 1e9), auto_remove=True, ports=ports)
DBUtils.create_new_container(user_id, challenge_id, flag, uuid_code, port)
return True
except:
DBUtils.remove_current_container(user_id)
DockerUtils.remove_current_docker_container(user_id)
return False
@staticmethod
def remove_current_docker_container(user_id, is_retry=False):
configs = DBUtils.get_all_configs()
container = DBUtils.get_current_containers(user_id=user_id)
auto_containers = configs.get("docker_auto_connect_containers", "").split(",")
if container is None:
return
try:
client = docker.DockerClient(base_url=configs.get("docker_api_url"))
networks = client.networks.list(names=[str(user_id) + '-' + container.uuid])
if len(networks) == 0:
containers = client.containers.list(filters={'name': str(user_id) + '-' + container.uuid})
for c in containers:
c.remove(force=True)
else:
containers = client.containers.list(filters={'label': str(user_id) + '-' + container.uuid})
for c in containers:
c.remove(force=True)
for n in networks:
for ac in auto_containers:
n.disconnect(ac)
n.remove()
except:
if not is_retry:
DockerUtils.remove_current_docker_container(user_id, True)
|
4,654 | ad59c1f0038294144b1c63db5f048b0a6b5ebb89 | # coding: utf-8
'''
Programa : py02_variavel.py
Homepage : http://www
Autor : Helber Palheta <hpalheta@outlook.com>
Execução:
python py02_variavel.py
'''
#variável curso e sua atribuição
curso = "Introdução a Biopython!"
#função print
print("Nome do Curso: "+curso) |
4,655 | 17548459b83fe4dea29f20dc5f91196b2b86ea60 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 23 15:26:47 2015
@author: tomhope
"""
import cPickle as pickle
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import re
def tokenize_speeches(text):
text = re.sub('[\[\]<>\'\+\=\/(.?\",&*!_#:;@$%|)0-9]'," ", text)
text = ' '.join(text.split())
text = word_tokenize(text)
tokens = [re.sub(r'(?<![\x00-\x9F])[-]',"", t.encode("UTF-8")).decode("UTF-8") for t in text if len(t)>=2]
tokens = [re.sub(r'[-](?![\x00-\x9F])',"", t.encode("UTF-8")).decode("UTF-8") for t in tokens if len(t)>=2]
return tokens
ss = tokenize_speeches(bibi_speeches[0])
for s in ss[0:10]:
print s
count_vect = CountVectorizer(ngram_range = (1,2), max_df = 0.75, min_df = 10,
tokenizer = tokenize_speeches)
X_train_counts = count_vect.fit_transform(bibi_speeches)
h = count_vect.get_feature_names()[0:20]
print h
ss = count_vect.get_feature_names()
for i in range(600,700):
print ss[i]
|
4,656 | c6ef9154285dee3b21980801a101ad5e34a50cab |
import os
import yaml
import sys
import random
import shutil
import openpyxl
import yaml
import audioanalysis as aa
import numpy as np
import argparse
import logging
"""
manualtest.py
Script to create a listeneing test. The output, test
case directory and answer_key.yml file, can be
found in the root directory.
manual test creation
responsibilities:
1) directory of directories that each contain two files to compare(a,b) and a duplicated one (x)
example scenarios to test:
JITTER_BUFFER_INIT_X VS. JITTER_BUFFER_INIT_Y
dev version vs dev version
need to come up with more
2) an output yaml file labeled answer_key.yml that says which (a,b) is x
"""
# command line parse
help_string = ("\nPlease note that manual_test.py makes 3 assumptions about "
"these file paths. "
"\n1.Both scenarios contain the same amount of wav files."
"\n2.The wav files in both scenarios have a one to one "
"correspondence between each other. Each test case contains a "
"pair of files, one from each scenario. This pair is made by "
"matching files between scenarios with the same names 3."
"There are no more than 25 audio file pairs")
parser = argparse.ArgumentParser(description="Script to create a listening test. The output, test case directory and answer_key.yml file, can be found in the root directory."+help_string)
parser.add_argument("-o", dest="output_base_path", default= os.getcwd(),help="(optional)Absolute file path to locatin to save test directory and answer key (default: root directory)")
parser.add_argument("scenario_one", help="Absolute file path to location of first scenario. Required")
parser.add_argument("scenario_two", help="Absolute file path to location of second scenario. Required")
args=parser.parse_args()
# globals
output_base_path=args.output_base_path
root_directory = os.getcwd()
# first scenario
scenario_one = args.scenario_one
scenario_one_latency=0
scenario_one_correlation_coefficient=0
# second scenario
scenario_two = args.scenario_two
scenario_two_latency=0
scenario_two_correlation_coefficient=0
output_path=""
answer_key=[]
USER_ANSWER_KEY="user_answer"
USER_PREFERENCE_KEY="user_preference_weight"
USER_X_VALUE_KEY="user_X_value"
USER_CONFIDENCE_KEY="user_answer_confidence"
X_ANSWER_KEY="x_answer_alpha"
A_VALUE_KEY="A_value"
B_VALUE_KEY="B_value"
TESTCASES_SUBDIR="testcases"
A_CASE_NAME="A_"
B_CASE_NAME="B_"
X_CASE_NAME="X_"
WNDWS_COPY_CMD="copy"
AUDIO_TYPE=".wav"
SCNEARIO_ONE_DATA_FILE="output_data.yml"
SCENARIO_ONE_DATA_FILE_KEY="Scenario One"
SCENARIO_TWO_DATA_FILE="output_data.yml"
SCENARIO_TWO_DATA_FILE_KEY="Scenario Two"
ANSWER_KEY_NAME="answer_key.yml"
USER_ANSWER_CASE_A="A"
USER_ANSWER_CASE_B="B"
ANSWER_KEY_SCENARIO_ONE="scenario one"
ANSWER_KEY_SCENARIO_TWO="scenario two"
ANSWER_KEY_QUESTION_KEY="Q_"
MAX_CASE_NUM=24
ADJUSTED_AUDIO_SUBDIR="adjusted_audio"
SCENARIO_ONE_SUBDIR="scenario_one"
SCENARIO_TWO_SUBDIR="scenario_two"
class Answer():
"""
Wrapper for A_B_X directory containing all associated attributes.
Populate all fields of the class and call grade to determine if the
question was correct
**user_answers
user_answer either "A" or "B" indicating which file sounded better
user_preference_weight numeric value between 1-5 indicating how much better the
preferred value was. 5 being significant and 1 minimal
user_X_value either "A" or "B" denoting which file the user believes
X was a duplicate of
user_answer_confidence numeric value between 1-5 indicating how easy it was to
distinguish between A and B and pick X
x_answer_alpha the answer to which file X was a duplicate of. Either
"A" or "B"
A_value String field denoting which scenario A belonged to. Either
scenario_one or SCENARIO_TWO_SUBDIR
B_value String field denoting which scenario B belonged to. Either
scenario_one or SCENARIO_TWO_SUBDIR
correct Call self.grade to populate this field. Compares user_X_value
and x_answer_alpha to determine if question was correct.
Populates with boolean
"""
def __init__(self, question_num, **user_answers):
self.question_num=question_num
self.correct = None
try:
self.user_answer=user_answers[USER_ANSWER_KEY]
except KeyError:
self.user_answer=None
try:
self.user_preference_weight=user_answers[USER_PREFERENCE_KEY]
except KeyError:
self.user_preference_weight=None
try:
self.user_X_value=user_answers[USER_X_VALUE_KEY]
except KeyError:
self.user_X_value=None
try:
self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY]
except KeyError:
self.user_answer_confidence=None
try:
self.x_answer_alpha=user_answers[X_ANSWER_KEY]
except KeyError:
self.x_answer_alpha=None
try:
self.A_value=user_answers[A_VALUE_KEY]
except KeyError:
self.A_value=None
try:
self.B_value=user_answers[B_VALUE_KEY]
except KeyError:
self.B_value=None
def grade(self):
if self.x_answer_alpha==self.user_X_value:
self.correct=True
else:
self.correct=False
def _collect_locations():
# Method to pair all the files for comparison in the two scenarios the user has elected to compare
logging.info("Enter: _collect_locations")
global scenario_one
global scenario_two
global output_base_path
if not os.path.exists(scenario_one):
print("Scenario One file path does not exist. Exiting")
sys.exit()
if not os.path.exists(scenario_two):
print("Scenario Two file path does not exist. Exiting")
sys.exit()
print("Creating listening test...")
logging.info("Exit: _collect_locations")
return scenario_one, scenario_two, output_base_path
def _cleanup_scenarios(adjusted_file_path):
# Delete the adjusted audio created for this module
try:
shutil.rmtree(adjusted_file_path)
except:
print("The system could not delete the temporary audio files that "
"were created for this test. This directory can be removed "
"at {}".format(adjusted_file_path))
def _create_output_directory(output_base_path):
# From the base path create a testcases subdirectory
# Return the subdirectory full path
logging.info("Enter: _create_output_directory")
global output_path
output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)
if os.path.exists(output_path):
try:
input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path))
shutil.rmtree(output_path)
except PermissionError:
print("There is a test directory located in the same location as the test directory location you specified")
print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.")
sys.exit()
except KeyboardInterrupt:
print("Exiting...")
sys.exit()
os.mkdir(output_path)
logging.info("Exit: _create_output_directory")
return output_path
def _create_answer_key(output_path):
# Parse the data file from scenario one and two if it exists and add too answer key
# Dump data from processes to ANSWER_KEY_NAME in output_path
logging.info("Enter: _create_answer_key")
global answer_key
global scenario_one
global scenario_two
scenario_one_latency_data={}
if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):
with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data:
scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data)
scenario_two_latency_data={}
if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):
with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data:
scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data)
with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml:
yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False)
yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False)
for question in answer_key:
yaml_dict={}
Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num))
yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value}
yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)
logging.info("Exit: _create_answer_key")
def _create_temp_dir(root_directory, scenario_one, scenario_two):
logging.info("Enter: _create_temp_dir")
# Will create exact copies of both directories specified so files may be altered later
adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)
scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)
scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)
try:
os.mkdir(adjusted_file_path)
except FileExistsError:
print("To properly create ABX tests, the audio files are modified so audio begins play at the same time")
print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.")
input("This directory already exists. Press enter to remove and continue or CTRL-C to quit")
shutil.rmtree(adjusted_file_path)
os.mkdir(adjusted_file_path)
shutil.copytree(scenario_one, scenario_one_temp)
shutil.copytree(scenario_two, scenario_two_temp)
logging.info("Exit: _create_temp_dir")
return adjusted_file_path, scenario_one_temp, scenario_one_temp
def create_A_B_X_cases(A_B_cases_zip_list, output_path):
"""
Method to create A_B_X testing directories and return the corresponding answer key
An A file is chosen from either the scenario one or two with a 50/50 probability.
The B file is then from the scenario not chosen for A. An X file is then created with a 50/50
probability of being either a duplicate of A or B
Parameters:
A_B_cases_zip_list: A list containing absolute file pairs
[[scenario_one, scenario_two]...]
output_path: absolute file path to store testcase directory
Returns:
None
"""
logging.info("Enter: create_A_B_X_cases ")
global scenario_one
global scenario_two
global answer_key
# create listening directories and record answer to each in answer_log
for case_num, case in enumerate(A_B_cases_zip_list):
#MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24.
if case_num > MAX_CASE_NUM:
logging.info("The amount of cases has exceeded 25. Please note that "
"the accompanying excel sheet only has 25 answer slots and that it will need to "
"be restructured")
print("The amount of cases has exceeded 25. Please note that "
"the accompanying excel sheet only has 25 answer slots and that it will need to "
"be restructured")
test_case_path = os.path.join(output_path, str(case_num))
try:
os.mkdir(test_case_path)
except FileExistsError:
logging.debug("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path))
print("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path))
sys.exit()
switch_A_B = random.randint(0,1) #If one then A and B are switched. This is so scenario one and two alternate thier A and B positions roughly 50% of the time
# add the wav files
# pick one to duplicate
x_answer=random.randint(0,1)
if switch_A_B:
# add A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
# add B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
# add X
if x_answer==1:
x_answer_alpha=USER_ANSWER_CASE_A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
if x_answer==0:
x_answer_alpha=USER_ANSWER_CASE_B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
A_value=ANSWER_KEY_SCENARIO_TWO
B_value=ANSWER_KEY_SCENARIO_ONE
else:
# add A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
# add B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
# add X
if x_answer==0:
x_answer_alpha=USER_ANSWER_CASE_A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
if x_answer==1:
x_answer_alpha=USER_ANSWER_CASE_B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
A_value=ANSWER_KEY_SCENARIO_ONE
B_value=ANSWER_KEY_SCENARIO_TWO
question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,A_value=A_value, B_value=B_value)
answer_key.append(question_info)
logging.info("Exit: create_A_B_X_cases")
def create_manual_tests():
logging.info("Enter: create_manual_tests")
global root_directory
scenario_one, scenario_two, output_base_path=_collect_locations()
output_path = _create_output_directory(output_base_path)
# Confirm another answer key does not already exist
if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):
input("An answer_key.yml file already exists at - "+output_path+" - this file will be deleted. Press enter if this is okay of CNTRL-C to exit")
os.remove(os.path.join(output_path, ANSWER_KEY_NAME))
adjusted_file_path, scenario_one_temp, scenario_two_temp= _create_temp_dir(root_directory, scenario_one, scenario_two)
print("Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.")
rate_log, correlation_sample_log, correlation_coefficient_log = aa.find_latency_values(scenario_one_temp, scenario_two_temp)
# Negative value indicates that scenario one signal was delayed. Positive value indicates that scenario two signal was delayed
file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)
aa.adjust_files(correlation_sample_log, rate_log, file_zip)
create_A_B_X_cases(file_zip, output_path)
_cleanup_scenarios(adjusted_file_path)
_create_answer_key(output_base_path)
print("done")
logging.info("Exit: create_manual_tests")
if __name__ =="__main__":
logging.basicConfig(filename="manualtest.log", level=logging.INFO, format="%(asctime)s %(levelname)s %(module)s line: %(lineno)d, %(message)s")
logging.info("Enter: main")
create_manual_tests()
logging.info("Exit: main")
|
4,657 | be1638638c70cf761bf5d2f0eb474b44684dfa47 |
from __future__ import division
import torch
import torch.nn as nn
import math
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class SmallMobileNetV2(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=1000): #, input_size=224
super(SmallMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 8
last_channel = 64
interverted_residual_setting = [
# t, c, n, s
[1, 8, 1, 1],
[6, 12, 2, 2],
[6, 16, 2, 2],
[6, 24, 3, 2],
[6, 32, 3, 2],
[6, 48, 3, 2],
[6, 64, 2, 2],
[6, 80, 1, 1],
]
# building first layer
# assert input_size % 32 == 0
input_channel = int(input_channel * widen_factor)
self.last_channel = int(last_channel * widen_factor) if widen_factor > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building pts net
self.pts_net = nn.Sequential(
nn.Linear(4*self.last_channel, 256),
nn.PReLU(),
nn.Linear(256, 256),
nn.PReLU(),
nn.Linear(256, num_classes)
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
pts = self.pts_net(x)
return pts
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SmallMobileNetV2Part(nn.Module):
def __init__(self, widen_factor=1.0, num_classes=68*2): #, input_size=224
super(SmallMobileNetV2Part, self).__init__()
self.block = InvertedResidual
self.input_channel = 8
self.last_channel = 64
self.interverted_residual_setting = [
# t, c, n, s
[1, 8, 1, 1],
[6, 12, 2, 2],
[6, 16, 2, 2],
[6, 24, 3, 2],
[6, 32, 3, 2],
[6, 48, 3, 2],
[6, 64, 2, 2],
[6, 80, 1, 1],
]
if num_classes==68*2:
part_dim = [22, 22, 18, 40]
elif num_classes==98*2:
part_dim = [36, 36, 18, 40]
elif num_classes==106*2:
part_dim = [38, 38, 30, 40]
# building first layer
# assert input_size % 32 == 0
# Spatial transformer localization-network
self.left_eye_net = self.make_backbone(widen_factor)
self.right_eye_net = self.make_backbone(widen_factor)
self.nose_net = self.make_backbone(widen_factor)
self.mouth_net = self.make_backbone(widen_factor)
# Regressor for the 3 * 2 affine matrix
self.left_eye_loc = self.make_pts_fc(part_dim[0])
self.right_eye_loc = self.make_pts_fc(part_dim[1])
self.nose_loc = self.make_pts_fc(part_dim[2])
self.mouth_loc = self.make_pts_fc(part_dim[3])
self._initialize_weights()
def make_backbone(self, widen_factor):
# building first layer
# assert input_size % 32 == 0
input_channel = int(self.input_channel * widen_factor)
last_channel = int(self.last_channel * widen_factor) if widen_factor > 1.0 else self.last_channel
features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in self.interverted_residual_setting:
output_channel = int(c * widen_factor)
for i in range(n):
if i == 0:
features.append(self.block(input_channel, output_channel, s, expand_ratio=t))
else:
features.append(self.block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
features.append(conv_1x1_bn(input_channel, last_channel))
# make it nn.Sequential
return nn.Sequential(*features)
def make_pts_fc(self,num_classes):
#pdb.set_trace()
pts_net = nn.Sequential(
nn.Linear(self.last_channel, 64),
nn.PReLU(),
nn.Linear(64, 64),
nn.PReLU(),
nn.Linear(64, num_classes)
)
return pts_net
def forward(self, x):
xs_1 = self.left_eye_net(x[0])
xs_1 = torch.flatten(xs_1, 1)
#pdb.set_trace()
out_1 = self.left_eye_loc(xs_1)
xs_2 = self.right_eye_net(x[1])
xs_2 = torch.flatten(xs_2, 1)
out_2 = self.right_eye_loc(xs_2)
xs_3 = self.nose_net(x[2])
xs_3 = torch.flatten(xs_3, 1)
out_3 = self.nose_loc(xs_3)
xs_4 = self.mouth_net(x[3])
xs_4 = torch.flatten(xs_4, 1)
out_4 = self.mouth_loc(xs_4)
return [out_1, out_2, out_3, out_4]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
4,658 | aa2a268143856d8f33b1aaf24f4e28ffd95cab01 | __author__ = 'susperius'
"""
Abstract class used to implement own fuzzers
"""
class Fuzzer:
NAME = []
CONFIG_PARAMS = []
@classmethod
def from_list(cls, params):
raise NotImplementedError("ABSTRACT METHOD")
@property
def prng_state(self):
raise NotImplementedError("ABSTRACT METHOD")
def fuzz(self):
raise NotImplementedError("ABSTRACT METHOD")
def set_state(self, state):
raise NotImplementedError("ABSTRACT METHOD")
def set_seed(self, seed):
raise NotImplementedError("ABSTRACT METHOD")
def create_testcases(self, count, directory):
raise NotImplementedError("ABSTRACT METHOD")
@property
def file_type(self):
raise NotImplementedError("ABSTRACT METHOD") |
4,659 | 7c2d57a8368eb8d1699364c60e98766e66f01569 |
from flask import Blueprint, current_app
logging = Blueprint("logging", __name__)
@logging.route("/debug/")
def debug():
current_app.logger.debug("some debug message")
return ""
@logging.route("/warning/")
def warning():
current_app.logger.warning("some warning message")
return ""
@logging.route("/error/")
def error():
current_app.logger.error("some error message")
return ""
|
4,660 | 47c5fb03cb427d5c9f7703e1715e026b6f2c7a35 | #!/usr/bin/env python
import mcvine.cli
from numpy import array
from mcvine_workflow.singlextal.resolution import use_res_comps as urc
beam_neutrons_path = '/SNS/users/p63/ORNL_public_research/MCViNE_Covmat_comparison/mcvine_resolution/beams/beam_125_1e9/out/neutrons'
instrument = urc.instrument('ARCS', '3.*meter', '13.6*meter', '-0.15*meter')
samplexmlpath = '/SNS/users/p63/ORNL_public_research/learning_from_mcvine/res_sims/Ei_125/E86.7120348337_hkl-7.62228386234,3.53360635791,-3.42342194523/sample/sampleassembly.xml'
psi = -0.011798841097534662
hkl2Q = array([[-0.64961065, 0.94207344, 0. ],
[ 0.66614652, 0.4593441 , -0.80916512],
[-0.66614652, -0.4593441 , -0.80916512]])
pp = array([-1.22433552, 2.73879582, 0.0612745 ])
pixel = urc.pixel('0.5*inch', 'meter/128', '10*atm', position=(pp[1], pp[2], pp[0]))
t_m2p = 0.0038753067573975117
Q = array([ 9.58591698, -3.98508133, -0.08915738])
E = 86.712034833655451
hkl_projection = array([-0.6235806 , -0.08226367, 0.30709024])
urc.run(
beam_neutrons_path, instrument, samplexmlpath, psi, hkl2Q, pixel, t_m2p,
Q, E, hkl_projection, Nbuffer=100000)
|
4,661 | 635b75bc12718bccdfb9d04a54476c93fa4685ce | from django.db import models
import eav
from django.utils import timezone
class RiskType(models.Model):
"""A model class used for storing data
about risk types
"""
name = models.CharField(max_length=255)
created = models.DateTimeField(default=timezone.now)
modified = models.DateTimeField(auto_now=True)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
eav.register(RiskType)
|
4,662 | 0a38cf6e0518a08895ed7155069aa2257c7b352e | import pandas as pd
import numpy as np
df1 = pd.DataFrame(np.ones((3, 4))*0, columns=['a', 'b', 'c', 'd'])
df2 = pd.DataFrame(np.ones((3, 4))*1, columns=['a', 'b', 'c', 'd'])
df3 = pd.DataFrame(np.ones((3, 4))*2, columns=['a', 'b', 'c', 'd'])
# 竖向合并
# ignore_index对行索引重新排序
res1 = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
print(res1)
df4 = pd.DataFrame(np.ones((3, 4))*0, columns=['a', 'b', 'c', 'd'], index=[1, 2, 3])
df5 = pd.DataFrame(np.ones((3, 4))*1, columns=['b', 'c', 'd', 'e'], index=[2, 3, 4])
# inner选择相同部分拼接
res2 = pd.concat([df4, df5], join='inner', ignore_index=True)
print(res2)
# 默认为outer,补充NaN
res3 = pd.concat([df4, df5], join='outer', ignore_index=True)
print(res3)
# 按列合并
res4 = pd.concat([df1, df2], axis=1, join_axes=[df1.index])
print(res4)
# append增加数据
res5 = df1.append([df2, df3], ignore_index=True)
print(res5)
# 单独加一组数据
s1 = pd.Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
res6 = df1.append(s1, ignore_index=True)
print(res6)
|
4,663 | b7c43f4242e38318c9e5423ea73e9d9d86759a53 | # -*- coding:utf-8 -*-
__author__ = 'yyp'
__date__ = '2018-5-26 3:42'
'''
Given a string, find the length of the longest substring without repeating characters.
Examples:
Given "abcabcbb", the answer is "abc", which the length is 3.
Given "bbbbb", the answer is "b", with the length of 1.
Given "pwwkew", the answer is "wke", with the length of 3. Note that the answer must be a substring, "pwke" is a subsequence and not a substring.
'''
class Solution:
"""
Time: O(n)
Space:O(1)
"""
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
l, res, freq = 0, 0, [False for _ in range(256)]
for idx, char in enumerate(s):
if freq[ord(char)]:
while s[l] != char:
freq[ord(s[l])] = False
l += 1
l += 1
else:
freq[ord(char)] = True
res = max(idx - l + 1, res)
return res
|
4,664 | 49ffa225d433ef2263159ba2145da5ba2a95d1f2 | #求11+12+13+。。。+m
m = int(input('请输入一个数:'))
S = m
for x in range(11,m):
S = S+x
print('sum =',S) |
4,665 | 3c2a611fd001f145703853f5ecfe70d0e93844e4 | """
opsi-utils
Test utilities
"""
import os
import tempfile
from contextlib import contextmanager
from pathlib import Path
from typing import Generator
@contextmanager
def temp_context() -> Generator[Path, None, None]:
origin = Path().absolute()
try:
with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tempdir:
os.chdir(tempdir)
yield origin # return original path
finally:
os.chdir(origin)
|
4,666 | b4491b5522e85fec64164b602045b9bd3e58c5b8 | #给你一个字符串 croakOfFrogs,它表示不同青蛙发出的蛙鸣声(字符串 "croak" )的组合。由于同一时间可以有多只青蛙呱呱作响,所以 croakOfFrogs 中会混合多个 “croak” 。请你返回模拟字符串中所有蛙鸣所需不同青蛙的最少数目。
#注意:要想发出蛙鸣 "croak",青蛙必须 依序 输出 ‘c’, ’r’, ’o’, ’a’, ’k’ 这 5 个字母。如果没有输出全部五个字母,那么它就不会发出声音。
#如果字符串 croakOfFrogs 不是由若干有效的 "croak" 字符混合而成,请返回 -1 。
#来源:力扣(LeetCode)
#链接:https://leetcode-cn.com/problems/minimum-number-of-frogs-croaking
#著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def minNumberOfFrogs(self, croakOfFrogs: str) -> int:
#c一定在最前--k一定在最后
c,r,o,a,k=0,0,0,0,0
ans=0
for i in range(len(croakOfFrogs)):
if croakOfFrogs[i]=="c":
c+=1
if croakOfFrogs[i]=="r":
r+=1
if croakOfFrogs[i]=="o":
o+=1
if croakOfFrogs[i]=="a":
a+=1
if croakOfFrogs[i]=="k":
k+=1
ans=max(c-k,ans)
if(c>=r and r>=o and o>=a and a>=k):
continue
else:
break
if (c==r and r==o and o==a and a==k):
return ans
else:
return -1
|
4,667 | 60953878c377382f1c7f25ce284c9fa12b8eb25f | #coding: utf-8
import numpy as np
import cv2
leftgray = cv2.imread('../image/1.jpg')
rightgray = cv2.imread('../image/2.jpg')
hessian=500
surf=cv2.xfeatures2d.SURF_create(hessian) #将Hessian Threshold设置为400,阈值越大能检测的特征就越少
kp1,des1=surf.detectAndCompute(leftgray,None) #查找关键点和描述符
kp2,des2=surf.detectAndCompute(rightgray,None)
FLANN_INDEX_KDTREE=0 #建立FLANN匹配器的参数
indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5) #配置索引,密度树的数量为5
searchParams=dict(checks=50) #指定递归次数
#FlannBasedMatcher:是目前最快的特征匹配算法(最近邻搜索)
flann=cv2.FlannBasedMatcher(indexParams,searchParams) #建立匹配器
matches=flann.knnMatch(des1,des2,k=2) #得出匹配的关键点
a = cv2.drawMatchesKnn(leftgray, kp1, rightgray, kp2, matches,None, flags=2)
cv2.namedWindow("mathches",1)
cv2.imshow("mathches",a)
cv2.waitKey()
good=[]
#提取优秀的特征点
for m,n in matches:
if m.distance < 0.45*n.distance: #如果第一个邻近距离比第二个邻近距离的0.7倍小,则保留
good.append(m)
print(len(good))
result = cv2.drawMatches(leftgray, kp1, rightgray, kp2, good,None, flags=2)
cv2.namedWindow("result",1)
cv2.imshow("result",result)
cv2.waitKey()
src_pts = np.array([ kp1[m.queryIdx].pt for m in good]) #查询图像的特征描述子索引
dst_pts = np.array([ kp2[m.trainIdx].pt for m in good]) #训练(模板)图像的特征描述子索引
print(len(src_pts),len(dst_pts))
H=cv2.findHomography(src_pts,dst_pts) #生成变换矩阵
print('H:',H)
h,w=leftgray.shape[:2]
h1,w1=rightgray.shape[:2]
shft=np.array([[1.0,0,w],[0,1.0,0],[0,0,1.0]])
print('shft:',shft)
M=np.dot(shft,H[0]) #获取左边图像到右边图像的投影映射关系
print('M:',M)
dst_corners=cv2.warpPerspective(leftgray,M,(w*2 ,h))#透视变换,新图像可容纳完整的两幅图
cv2.namedWindow("tiledImg1" ,cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg1',dst_corners) #显示,第一幅图已在标准位置
cv2.waitKey()
dst_corners[0:h,w:w*2]=rightgray #将第二幅图放在右侧
#cv2.imwrite('tiled.jpg',dst_corners)
cv2.namedWindow("tiledImg" ,cv2.WINDOW_NORMAL)
cv2.imshow('tiledImg',dst_corners)
#cv2.imshow('leftgray',leftgray)
#cv2.imshow('rightgray',rightgray)
cv2.waitKey()
cv2.destroyAllWindows()
|
4,668 | 7f7bd2e9ec1932ccfd8aa900956ce85473ee8dbd | #!/usr/bin/env python
"""Diverse wiskundige structuren weergeven in LaTeX in Jupyter Notebook."""
__author__ = "Brian van der Bijl"
__copyright__ = "Copyright 2020, Hogeschool Utrecht"
from IPython.display import display, Math, Markdown
import re
def show_num(x):
return re.compile(r"\.(?!\d)").sub("\1",x)
def latex_formula(form):
latex = form.simplify().to_latex(outer=True)
if latex:
display(Math(latex))
display(Markdown("<details><pre>$" + latex + "$</pre></details>"))
def latex_bmatrix(M, label=None): # Gebaseerd op https://stackoverflow.com/questions/17129290/numpy-2d-and-1d-array-to-latex-bmatrix
if len(M.shape) > 2:
raise ValueError('bmatrix can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if label:
result = [label + " = "]
else:
result = [""]
result += [r"\begin{bmatrix}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{bmatrix}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_amatrix(M, labels=None):
if len(M.shape) > 2:
raise ValueError('array can at most display two dimensions')
lines = str(M).replace("[", "").replace("]", "").splitlines()
if labels and len(labels) == 2:
result = [r"(\mathbf{" + labels[0] + r"} | \vec " + labels[1] + ") = "]
else:
result = [""]
result += [r"\left[\begin{array}{ccc|c}"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\" for l in lines]
result += [r"\end{array}\right]"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_msquare(sq):
if sq.shape != (3,3):
raise ValueError('Geen magisch vierkant')
lines = str(sq).replace("[", "").replace("]", "").splitlines()
result = [r"\begin{array}{|c|c|c|}\hline"]
result += [" " + " & ".join(map(show_num, l.split())) + r"\\\hline" for l in lines]
result += [r"\end{array}"]
display(Math("\n".join(result)))
display(Markdown("<details><pre>$" + " ".join(result) + "$</pre></details>"))
def latex_ratio(x):
"""Helper functie om breuken naar LaTeX te converteren; getallen worden alleen naar string
geconverteerd."""
if isinstance(x, int):
return str(x)
else:
n, d = x.as_integer_ratio() # Nul buiten de breuk halen
return ("-" if n < 0 else "") + r"\frac{" + str(abs(n)) + "}{" + str(d) + "}"
def latex_polynomial(poly):
terms, label, var, primes = poly # Bind parameters uit tuple
def power(exp):
"""Print een term (e.g. x^2). x^1 is gewoon x, x^0 is 1, maar n × 1 is gewoon n dus verberg de 1.
In alle andere gevallen wordt de variabele met het juiste exponent opgeleverd."""
if exp is 1:
return var
elif exp is 0:
return ""
else:
return (var + r"^{" + latex_ratio(exp) + "}")
# Print f(x) met het juiste aantal primes
result = label + ("^{" + r"\prime"*primes + "}" if primes > 0 else "") + "(" + var + ") = "
first = True # Na de eerste moet er "+" tussen de termen komen
for k, v in reversed(sorted(terms.items())): # Voor iedere term, van groot (hoog exponent) naar klein
if v > 0 and not first: # Koppel met een plus, tenzij het de eerste term is
result += "+"
elif v < 0: # Koppel met een min als de term negatief is, ook de eerste term
result += "-"
if v != 0: # Zet first op False na de eerste keer
first = False
if k is 0:
result += str(v)
elif abs(v) is 1: # Print x in plaats van 1x en -x in plaats van -1x
result += str(power(k))
elif v != 0: # Print iedere term die niet 0 of 1 is op de gebruikelijke manier, zonder min want die staat
result += latex_ratio(abs(v)) + str(power(k)) # erboven al
display(Math(result))
display(Markdown("<details><pre>$" + result + "$</pre></details>"))
|
4,669 | a83988e936d9dee4838db61c8eb8ec108f5ecd3f | import numpy as np
labels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')
for lab in labels:
print(lab) |
4,670 | 511c555c88fb646b7b87678044b43a5a623a5ac7 |
from django.contrib import admin
from django.urls import path
from . import view
urlpatterns = [
path('', view.enterMarks),
path('MarkSheet', view.getMarks, name='MarkSheet'),
]
|
4,671 | 844c630d3fe2dda833064556228b524608cfece9 | import numpy as np
import cv2
from camera import load_K, load_camera_dist, load_camera_ret
def undistort_img(img):
'''
Return an undistorted image given previous calibrated parameters
References from OpenCV docs
'''
ret = load_camera_ret()
K = load_K()
dist = load_camera_dist()
h,w = img.shape[:2]
new_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(K,dist,(w,h),1,(w,h))
img_undistorted = cv2.undistort(img, K, dist, None, new_camera_matrix)
return img_undistorted
|
4,672 | dd7c7fa6493a43988e1c8079797f6ff9b4d239dd | # -coding: UTF-8 -*-
# @Time : 2020/06/24 20:01
# @Author: Liangping_Chen
# @E-mail: chenliangping_2018@foxmail.com
import requests
def http_request(url,data,token=None,method='post'):
header = {'X-Lemonban-Media-Type': 'lemonban.v2',
'Authorization':token}
#判断是get请求还是post请求
if method=='get':
# 发起注册&登录
result = requests.get(url, json=data, headers=header)
else:
result = requests.post(url, json=data, headers=header)
return result.json()#return返回指定的结果
if __name__ == '__main__':
login_url='http://120.78.128.25:8766/futureloan/member/login'
login_data={'mobile_phone':13665929730,'pwd':'12345678'}
response=http_request(login_url,login_data)
print('登录的结果是:{}'.format(response))
#充值
token=response['data']['token_info']['token']
rec_url='http://120.78.128.25:8766/futureloan/member/recharge'
rec_data = {'member_id': 200170, 'amount': 123456}
print(http_request(rec_url,rec_data,"bearer "+token)) |
4,673 | 5ef6b2ff89ee1667ddb01b1936557f1f11a49910 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from sqlalchemy import create_engine, MetaData, Table
class DoubanPipeline(object):
conn = None
film_table = None
def open_spider(self, spider):
engine = create_engine('postgresql+psycopg2://postgres:orchid@127.0.0.1:5432/postgres', echo=False)
self.conn = engine.connect()
metadata = MetaData(engine)
self.film_table = Table('film', metadata, autoload=True)
def process_item(self, item, spider):
ins = self.film_table.insert().values(item)
try:
self.conn.execute(ins)
except Exception, e:
pass
return item
def close_spider(self, spider):
self.conn.close()
|
4,674 | a2c93fd632a637d47f05e0a4fda851b465d03a31 | import os
import sqlite3
import datetime
directory = 'C:\PyHelp'
if not os.path.exists(directory):
os.makedirs(directory)
rand_facts = '''• Exception is used as a base class for all exceptions. It's strongly recommended (but not yet required) that user exceptions are derived from this class too.
• SystemExit(Exception) is raised by the sys.exit function. If it propagates to the top level without being caught by a try-except clause, the interpreter is terminated without a traceback message.
• StandardError(Exception) is used as a base class for all standard exceptions (except SystemExit, that is).
• KeyboardInterrupt(StandardError) is raised when the user presses Control-C (or any other interrupt key). Note that this may cause strange errors if you use "catch all" try-except statements.
• ImportError(StandardError) is raised when Python fails to import a module.
• EnvironmentError is used as a base class for exceptions that can be caused by the interpreter's environment (that is, they're usually not caused by bugs in the program).
• IOError(EnvironmentError) is used to flag I/O-related errors.
• OSError(EnvironmentError) is used to flag errors by the os module.
• WindowsError(OSError) is used to flag Windows-specific errors from the os module.
• NameError(StandardError) is raised when Python fails to find a global or local name.
• UnboundLocalError(NameError) is raised if your program attempts to access a local variable before it has been assigned a value. This exception is only used in 2.0 and later; earlier versions raise a plain NameError exception instead.
• AttributeError(StandardError) is raised when Python fails to find (or assign to) an instance attribute, a method, a module function, or any other qualified name.
• SyntaxError(StandardError) is raised when the compiler stumbles upon a syntax error.
• IndentationError(SyntaxError) is raised for syntax errors caused by bad indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.
• TabError(IndentationError) is raised by the interpreter when the -tt option is used to check for inconsistent indentation. This exception is only used in 2.0 and later; earlier versions raise a plain SyntaxError exception instead.
• TypeError(StandardError) is raised when an operation cannot be applied to an object of the given type.
• AssertionError(StandardError) is raised when an assert statement fails (if the expression is false, that is).
• LookupError(StandardError) is used as a base class for exceptions raised when a sequence or dictionary type doesn't contain a given index or key.
• IndexError(LookupError) is raised by sequence objects when the given index doesn't exist.
• KeyError(LookupError) is raised by dictionary objects when the given key doesn't exist.
• ArithmeticError(StandardError) is used as a base class for math-related exceptions.
• OverflowError(ArithmeticError) is raised when an operations overflows (for example, when an integer is too large to fit in the given type).
• ZeroDivisionError(ArithmeticError) is raised when you try to divide a number by zero.
• FloatingPointError(ArithmeticError) is raised when a floating point operation fails.
• ValueError(StandardError) is raised if an argument has the right type, but an invalid value.
• UnicodeError(ValueError) is raised for type problems related to the Unicode string type. This is only used in 2.0 and later.
• RuntimeError(StandardError) is used for various run-time problems, including attempts to get outside the box when running in restricted mode, unexpected hardware problems, etc.
• NotImplementedError(RuntimeError) can be used to flag functions that hasn't been implemented yet, or methods that should be overridden.
• SystemError(StandardError) is raised if the interpreter messes up, and knows about it. The exception value contains a more detailed description (usually something cryptic, like
"eval_code2: NULL globals" or so). I cannot recall ever seeing this exception in over five years of full-time Python programming, but maybe that's just me.
• MemoryError(StandardError) is raised when the interpreter runs out of memory. Note that this only happens when the underlying memory allocation routines complain; you can often send your poor computer into a mindless swapping frenzy before that happens.
• NoneType The type of None.
• TypeType The type of type objects (such as returned by type()).
• IntType The type of integers (e.g. 1).
• LongType The type of long integers (e.g. 1L).
• FloatType The type of floating point numbers (e.g. 1.0).
• ComplexType The type of complex numbers (e.g. 1.0j).
• StringType The type of character strings (e.g. ’Spam’).
• UnicodeType The type of Unicode character strings (e.g. u’Spam’).
• TupleType The type of tuples (e.g. (1, 2, 3, ’Spam’)).
• ListType The type of lists (e.g. [0, 1, 2, 3]).
• DictType The type of dictionaries (e.g. {’Bacon’: 1, ’Ham’: 0}).
• DictionaryType An alternate name for DictType.
• FunctionType The type of user-defined functions and lambdas.
• LambdaType An alternate name for FunctionType.
• CodeType The type for code objects such as returned by compile().
• ClassType type of user-defined classes.
• InstanceType The type of instances of user-defined classes.
• MethodType The type of methods of user-defined class instances.
• UnboundMethod Type An alternate name for MethodType.
• BuiltinFunction Type The type of built-in functions like len() or sys.exit().
• BuiltinMethod TypeAn alternate name for BuiltinFunction.
• ModuleType The type of modules.
• FileType The type of open file objects such as sys.stdout.
• XRangeType The type of range objects returned by xrange().
• SliceType The type of objects returned by slice().
• EllipsisType The type of Ellipsis.
• TracebackType The type of traceback objects such as found in sys.exc traceback.
• FrameType The type of frame objects such as found in tb.tb frame if tb is a traceback object.
• BufferType The type of buffer objects created by the buffer() function.
• string.capitalize()Return a copy of the string with only its first character capitalized.
• string.center(width) Return centered in a string of length width. Padding is done using spaces.
• string.count(sub[, start[, end ]]) Return the number of occurrences of substring sub in string S[start:end]. Optional arguments start and end are interpreted as in slice notation.
• string.encode([encoding[,errors]]) Return an encoded version of the string. Default encoding is the current default string encoding. errors may be given to set a different error handling scheme. The default for errors is ’strict’, meaning that encoding errors raise a ValueError. Other possible values are ’ignore’ and ’replace’. .
• string.endswith(suffix[, start[, end ]]) Return true if the string ends with the specified suffix, otherwise return false. With optional start, test beginning at that position. With optional end, stop comparing at that position.
• string.expandtabs([tabsize ]) Return a copy of the string where all tab characters are expanded using spaces. If tabsize is not given, a tab size of 8 characters is assumed.
• string.find(sub[, start[, end ]]) Return the lowest index in the string where substring sub is found, such that sub is contained in the range [start, end). Optional arguments start and end are interpreted as in slice notation. Return -1 if sub is not found.
• string.index(sub[, start[, end ]]) Like find(), but raise ValueError when the substring is not found.
• string.isalnum() Return true if all characters in the string are alphanumeric and there is at least one character, false otherwise.
• string.isalpha() Return true if all characters in the string are alphabetic and there is at least one character, false otherwise.
• string.isdigit()Return true if there are only digit characters, false otherwise.
• string.islower() Return true if all cased characters in the string are lowercase and there is at least one cased character, false otherwise.
• string.isspace() Return true if there are only whitespace characters in the string and the string is not empty, false otherwise.
• string.istitle() Return true if the string is a titlecased string, i.e. uppercase characters may only follow uncased characters and lowercase characters only cased ones. Return false otherwise.
• string.isupper() Return true if all cased characters in the string are uppercase and there is at least one cased character, false otherwise.
• string.join(seq) Return a string which is the concatenation of the strings in the sequence seq. The separator between elements is the string providing this method.
• string.ljust(width) Return the string left justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s).
• string.lower() Return a copy of the string converted to lowercase.
• string.lstrip() Return a copy of the string with leading whitespace removed.
• string.replace(old, new[, maxsplit]) Return a copy of the string with all occurrences of substring old replaced by new. If the optional argument maxsplit is given, only the first maxsplit occurrences are replaced.
• string.rfind(sub [,start [,end ]]) Return the highest index in the string where substring sub is found, such that sub is contained within s[start,end]. Optional arguments start and end are interpreted as in slice notation. Return -1 on failure.
• string.rindex(sub[, start[, end ]]) Like rfind() but raises ValueError when the substring sub is not found.
• string.rjust(width) Return the string right justified in a string of length width. Padding is done using spaces. The original string is returned if width is less than len(s). • string.rstrip() Return a copy of the string with trailing whitespace removed.
• string.split([sep [,maxsplit]]) Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most maxsplit splits are done. If sep is not specified or None, any whitespace string is a separator.
• string.splitlines([keepends]) Return a list of the lines in the string, breaking at line boundaries. Line breaks are not included in the resulting list unless keepends is given and true.
• string.startswith(prefix[, start[, end ]]) Return true if string starts with the prefix, otherwise return false. With optional start, test string beginning at that position. With optional end, stop comparing string at that position.
• string.strip() Return a copy of the string with leading and trailing whitespace removed.
• string.swapcase() Return a copy of the string with uppercase characters converted to lowercase and vice versa.
• string.title() Return a titlecased version of, i.e. words start with uppercase characters, all remaining cased characters are lowercase.
• string.translate(table[, deletechars]) Return a copy of the string where all characters occurring in the optional argument deletechars are removed, and the remaining characters have been mapped through the given translation table, which must be a string of length 256.
• string.upper() Return a copy of the string converted to uppercase.
• file.close() Close the file. A closed file cannot be read or written anymore. Any operation which requires that the file be open will raise a ValueError after the file has been closed. Calling close() more than once is allowed.
• file.flush() Flush the internal buffer, like stdio’s fflush(). This may be a no-op on some file-like objects.
• file.isatty() Return true if the file is connected to a tty(-like) device, else false. Note: If a file-like object is not associated with a real file, this method should not be implemented.
• file.fileno() Return the integer “file descriptor” that is used by the underlying implementation to request I/O operations from the operating system. This can be useful for other, lower level interfaces that use file descriptors, e.g. module fcntl or os.read() and friends. Note: File-like objects which do not have a real file descriptor should not provide this method!
• file.read([size ]) Read at most size bytes from the file (less if the read hits EOF before obtaining size bytes). If the size argument is negative or omitted, read all data until EOF is reached. The bytes are returned as a string object. An empty string is returned when EOF is encountered immediately. (For certain files, like ttys, it makes sense to continue reading after an EOF is hit.) Note that this method may call the underlying C function fread() more than once in an effort to acquire as close to size bytes as possible.
• file.readline([size ]) Read one entire line from the file. A trailing newline character is kept in the string7 (but may be absent when ends with an incomplete line). If the size argument is present and non-negative, it is a maximum byte count and an incomplete line may be returned. An empty string is returned when EOF is hit immediately. Note: Unlike stdio’s fgets(), the returned string contains null characters (’\0’) if they occurred in the input.
• file.readlines([sizehint]) Read until EOF using readline() and return a list containing the lines thus read. If the optional sizehint argument is present, instead of reading up to EOF, whole lines totalling approximately sizehint bytes (possibly after rounding up to an internal buffer size) are read. Objects implementing a file-like interface may choose to ignore sizehint if it cannot be implemented, or cannot be implemented efficiently.
• file.xreadlines() Equivalent to xreadlines.xreadlines(file). (See the xreadlines module for more information.) .
• file.seek(offset[, whence ]) Set the file’s current position, like stdio’s fseek(). The whence argument is optional and defaults to 0 (absolute file positioning); other values are 1 (seek relative to the current position) and 2 (seek relative to the file’s end). There is no return value. Note that if the file is opened for appending (mode ’a’ or ’a+’), any seek() operations will be undone at the next write. If the file is only opened for writing in append mode (mode ’a’), this method is essentially a no-op, but it remains useful for files opened in append mode with reading enabled (mode ’a+’).
• file.tell() Return the file’s current position, like stdio’s ftell().
• file.truncate([size ]) Truncate the file’s size. If the optional size argument present, the file is truncated to (at most) that size. The size defaults to the current position. Availability of this function depends on the operating system version (for example, not all UNIX versions support this operation).
• file.write(str) Write a string to the file. There is no return value. Note: Due to buffering, the string may not actually show up in the file until the flush() or close() method is called.
• file.writelines(list) Write a list of strings to the file. There is no return value. (The name is intended to match readlines(); writelines() does not add line separators.) File objects also offer a number of other interesting attributes. These are not required for file-like objects, but should be implemented if they make sense for the particular object.
• file.closed Boolean indicating the current state of the file object. This is a read-only attribute; the close() method changes the value. It may not be available on all file-like objects.
• file.mode The I/O mode for the file. If the file was created using the open() built-in function, this will be the value of the mode parameter. This is a read-only attribute and may not be present on all file-like objects.
• file.name If the file object was created using open(), the name of the file. Otherwise, some string that indicates the source of the file object, of the form ‘<...>’. This is a read-only attribute and may not be present on all file-like objects.
• abs(x) Return the absolute value of a number. The argument may be a plain or long integer or a floating point number. If the argument is a complex number, its magnitude is returned.
• apply(function, args[, keywords]) The function argument must be a callable object (a user-defined or built-in function or method, or a class object) and the args argument must be a sequence (if it is not a tuple, the sequence is first converted to a tuple). The function is called with args as the argument list; the number of arguments is the the length of the tuple. (This is different from just calling func(args), since in that case there is always exactly one argument.) If the optional keywords argument is present, it must be a dictionary whose keys are strings. It specifies keyword arguments to be added to the end of the the argument list.
• buffer(object[, offset[, size ]]) The object argument must be an object that supports the buffer call interface (such as strings, arrays, and buffers). A new buffer object will be created which references the object argument. The buffer object will be a slice from the beginning of object (or from the specified offset). The slice will extend to the end of object (or will have a length given by the size argument).
• callable(object) Return true if the object argument appears callable, false if not. If this returns true, it is still possible that a call fails, but if it is false, calling object will never succeed. Note that classes are callable (calling a class returns a new instance); class instances are callable if they have a call () method.
• chr(i) Return a string of one character whose ASCII code is the integer i, e.g., chr(97) returns the string ’a’. This is the inverse of ord(). The argument must be in the range [0..255], inclusive; ValueError will be raised if i is outside that range.
• cmp(x, y) Compare the two objects x and y and return an integer according to the outcome. The return value is negative if x < y, zero if x == y and strictly positive if x > y.
• coerce(x, y) Return a tuple consisting of the two numeric arguments converted to a common type, using the same rules as used by arithmetic operations. compile(string, filename, kind) Compile the string into a code object. Code objects can be executed by an exec statement or evaluated by a call to eval(). The filename argument should give the file from which the code was read; pass e.g. ’<string>’ if it wasn’t read from a file. The kind argument specifies what kind of code must be compiled; it can be ’exec’ if string consists of a sequence of statements, ’eval’ if it consists of a single expression, or ’single’ if it consists of a single interactive statement (in the latter case, expression statements that evaluate to something else than None will printed).
• complex(real[, imag ]) Create a complex number with the value real + imag*j or convert a string or number to a complex number. Each argument may be any numeric type (including complex). If imag is omitted, it defaults to zero and the function serves as a numeric conversion function like int(), long() and float(); in this case it also accepts a string argument which should be a valid complex number.
• delattr(object, name) This is a relative of setattr(). The arguments are an object and a string. The string must be the name of one of the object’s attributes. The function deletes the named attribute, provided the object allows it. For example, delattr(x, ’foobar’) is equivalent to del x.foobar.
• dir([object]) Without arguments, return the list of names in the current local symbol table.
• divmod(a, b) Take two numbers as arguments and return a pair of numbers consisting of their quotient and remainder when using long division. With mixed operand types, the rules for binary arithmetic operators apply. For plain and long integers, the result is the same as (a / b, a % b). For floating point numbers the result is (q, a %bb), where q is usually math.floor(a / b) but may be 1 less than that. In any case q * b + a % b is very close to a, if a % b is non-zero it has the same sign as b, and 0 <= abs(a % b) < abs(b).
• eval(expression[, globals[, locals]]) The arguments are a string and two optional dictionaries. The expression argument is parsed and evaluated as a Python expression (technically speaking, a condition list) using the globals and locals dictionaries as global and local name space. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where eval is called. The return value is the result of the evaluated expression. Syntax errors are reported as exceptions. Example:
>>> x = 1
>>> print eval(’x+1’)
2 This function can also be used to execute arbitrary code objects (e.g. created by compile()). In this case pass a code object instead of a string. The code object must have been compiled passing ’eval’ to the kind argument. Hints: dynamic execution of statements is supported by the exec statement. Execution of statements from a file is supported by the execfile() function. The globals() and locals() functions returns the current global and local dictionary, respectively, which may be useful to pass around for use by eval() or execfile().
• execfile(file[, globals[, locals]]) This function is similar to the exec statement, but parses a file instead of a string. It is different from the import statement in that it does not use the module administration — it reads the file unconditionally and does not create a new module.8 The arguments are a file name and two optional dictionaries. The file is parsed and evaluated as a sequence of Python statements (similarly to a module) using the globals and locals dictionaries as global and local names- pace. If the locals dictionary is omitted it defaults to the globals dictionary. If both dictionaries are omitted, the expression is executed in the environment where execfile() is called. The return value is None.
• filter(function, list) Construct a list from those elements of list for which function returns true. If list is a string or a tuple, the result also has that type; otherwise it is always a list. If function is None, the identity function is assumed, i.e. all elements of list that are false (zero or empty) are removed.
• float(x) Convert a string or a number to floating point. If the argument is a string, it must contain a possibly signed dec-imal or floating point number, possibly embedded in whitespace; this behaves identical to string.atof(x). Otherwise, the argument may be a plain or long integer or a floating point number, and a floating point number with the same value (within Python’s floating point precision) is returned.
• getattr(object, name[, default]) Return the value of the named attributed of object. name must be a string. If the string is the name of one of the object’s attributes, the result is the value of that attribute. For example, getattr(x, ’foobar’) is equivalent to x.foobar. If the named attribute does not exist, default is returned if provided, otherwise AttributeError is raised.
• globals() Return a dictionary representing the current global symbol table. This is always the dictionary of the current module (inside a function or method, this is the module where it is defined, not the module from which it is called).
• hasattr(object, name) The arguments are an object and a string. The result is 1 if the string is the name of one of the object’s attributes, 0 if not. (This is implemented by calling getattr(object, name) and seeing whether it raises an exception or not.)
• hash(object) Return the hash value of the object (if it has one). Hash values are integers. They are used to quickly compare dictionary keys during a dictionary lookup. Numeric values that compare equal have the same hash value (even if they are of different types, e.g. 1 and 1.0).
• hex(x) Convert an integer number (of any size) to a hexadecimal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, hex(-1) yields ’0xffffffff’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.
• id(object) Return the ‘identity’ of an object. This is an integer (or long integer) which is guaranteed to be unique and constant for this object during its lifetime. Two objects whose lifetimes are disjunct may have the same id() value. (Implementation note: this is the address of the object.)
• input([prompt]) Equivalent to eval(raw input(prompt)). Warning: This function is not safe from user errors! It expects a valid Python expression as input; if the input is not syntactically valid, a SyntaxError will be raised. Other exceptions may be raised if there is an error during evaluation. (On the other hand, sometimes this is exactly what you need when writing a quick script for expert use.) If the readline module was loaded, then input() will use it to provide elaborate line editing and history features. Consider using the raw input() function for general input from users.
• int(x[, radix ]) Convert a string or number to a plain integer. If the argument is a string, it must contain a possibly signed decimal number representable as a Python integer, possibly embedded in whitespace; this behaves identical to
• string.atoi(x[, radix ]). The radix parameter gives the base for the conversion and may be any integer in the range [2, 36], or zero. If radix is zero, the proper radix is guessed based on the contents of string; the interpretation is the same as for integer literals. If radix is specified and x is not a string, TypeError is raised. Otherwise, the argument may be a plain or long integer or a floating point number. Conversion of floating point numbers to integers is defined by the C semantics; normally the conversion truncates towards zero.9
• intern(string) Enter string in the table of “interned” strings and return the interned string – which is string itself or a copy. Interning strings is useful to gain a little performance on dictionary lookup – if the keys in a dictionary are interned, and the lookup key is interned, the key comparisons (after hashing) can be done by a pointer compare instead of a string compare. Normally, the names used in Python programs are automatically interned, and the dictionaries used to hold module, class or instance attributes have interned keys. Interned strings are immortal (i.e. never get garbage collected).
• isinstance(object, class) Return true if the object argument is an instance of the class argument, or of a (direct or indirect) subclass thereof. Also return true if class is a type object and object is an object of that type. If object is not a class instance or a object of the given type, the function always returns false. If class is neither a class object nor a type object, a TypeError exception is raised.
• issubclass(class1, class2) Return true if class1 is a subclass (direct or indirect) of class2. A class is considered a subclass of itself. If either argument is not a class object, a TypeError exception is raised.
• len (s) Return the length (the number of items) of an object. The argument may be a sequence (string, tuple or list) or a mapping (dictionary).
• list(sequence) Return a list whose items are the same and in the same order as sequence’s items. If sequence is already a list, a copy is made and returned, similar to sequence[:]. For instance, list(’abc’) returns returns [’a’, ’b’, ’c’] and list( (1, 2, 3) ) returns [1, 2, 3].
• locals() Return a dictionary representing the current local symbol table. Warning: The contents of this dictionary should not be modified; changes may not affect the values of local variables used by the interpreter.
• long(x[, radix ]) Convert a string or number to a long integer. If the argument is a string, it must contain a possibly signed number of arbitrary size, possibly embedded in whitespace; this behaves identical to string.atol(x). The radix argument is interpreted in the same way as for int(), and may only be given when x is a string. Otherwise, the argument may be a plain or long integer or a floating point number, and a long integer with the same value is returned. Conversion of floating point numbers to integers is defined by the C semantics; see the description of int().
• map(function, list, ...) Apply function to every item of list and return a list of the results. If additional list arguments are passed, function must take that many arguments and is applied to the items of all lists in parallel; if a list is shorter than another it is assumed to be extended with None items. If function is None, the identity function is assumed; if there are multiple list arguments, map() returns a list consisting of tuples containing the corresponding items from all lists (i.e. a kind of transpose operation). The list arguments may be any kind of sequence; the result is always a list.
• max(s[, args...]) With a single argument s, return the largest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the largest of the arguments.
• min(s[, args...]) With a single argument s, return the smallest item of a non-empty sequence (e.g., a string, tuple or list). With more than one argument, return the smallest of the arguments.
• oct(x) Convert an integer number (of any size) to an octal string. The result is a valid Python expression. Note: this always yields an unsigned literal, e.g. on a 32-bit machine, oct(-1) yields ’037777777777’. When evaluated on a machine with the same word size, this literal is evaluated as -1; at a different word size, it may turn up as a large positive number or raise an OverflowError exception.
• ord(c) Return the ASCII value of a string of one character or a Unicode character. E.g., ord(’a’) returns the integer 97, ord(u’ u2020’) returns 8224. This is the inverse of chr() for strings and of unichr() for Unicode characters.
• pow(x, y[, z]) Return x to the power y; if z is present, return x to the power y, modulo z (computed more efficiently than pow(x, y) % z). The arguments must have numeric types. With mixed operand types, the rules for binary arithmetic operators apply. The effective operand type is also the type of the result; if the result is not expressible in this type, the function raises an exception; e.g., pow(2, -1) or pow(2, 35000) is not allowed.
• range([start,] stop[, step ]) This is a versatile function to create lists containing arithmetic progressions. It is most often used in for loops. The arguments must be plain integers. If the step argument is omitted, it defaults to 1. If the start argument is omitted, it defaults to 0. The full form returns a list of plain integers [start, start + step, start + 2 * step, ...]. If step is positive, the last element is the largest start + i * step less than stop; if step is negative, the last element is the largest start + i * step greater than stop. step must not be zero (or else ValueError is raised).
• reduce(function, sequence[, initializer]) Apply function of two arguments cumulatively to the items of sequence, from left to right, so as to reduce the sequence to a single value. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). If the optional initializer is present, it is placed before the items of the sequence in the calculation, and serves as a default when the sequence is empty.
• reload(module) Re-parse and re-initialize an already imported module. The argument must be a module object, so it must have been successfully imported before. This is useful if you have edited the module source file using an external editor and want to try out the new version without leaving the Python interpreter. The return value is the module object (i.e. the same as the module argument).
• repr(object) Return a string containing a printable representation of an object. This is the same value yielded by conversions (reverse quotes). It is sometimes useful to be able to access this operation as an ordinary function. For many types, this function makes an attempt to return a string that would yield an object with the same value when passed to eval().
• round(x[, n ]) Return the floating point value x rounded to n digits after the decimal point. If n is omitted, it defaults to zero. The result is a floating point number. Values are rounded to the closest multiple of 10 to the power minus n; if two multiples are equally close, rounding is done away from 0 (so e.g. round(0.5) is 1.0 and round(- 0.5) is -1.0).
• setattr(object, name, value) This is the counterpart of getattr(). The arguments are an object, a string and an arbitrary value. The string may name an existing attribute or a new attribute. The function assigns the value to the attribute, provided the object allows it. For example, setattr(x, ’foobar’, 123) is equivalent to x.foobar = 123.
• slice([start,] stop[, step ]) Return a slice object representing the set of indices specified by range(start, stop, step). The start and step arguments default to None. Slice objects have read-only data attributes start, stop and step which merely return the argument values (or their default). They have no other explicit functionality; however they are used by Numerical Python and other third party extensions. Slice objects are also generated when extended indexing syntax is used, e.g. for ‘a[start:stop:step]’ or ‘a[start:stop, i]’.
• str(object) Return a string containing a nicely printable representation of an object. For strings, this returns the string itself. The difference with repr(object) is that str(object) does not always attempt to return a string that is acceptable to eval(); its goal is to return a printable string.
• tuple(sequence) Return a tuple whose items are the same and in the same order as sequence’s items. If sequence is already a tuple, it is returned unchanged. For instance, tuple(’abc’) returns returns (’a’, ’b’, ’c’) and tuple([1, 2, 3]) returns (1, 2, 3).
• type(object) Return the type of an object. The return value is a type object. The standard module types defines names for all built-in types. For instance:
>>> import types
>>> if type(x) == types.StringType: print "It’s a string" unichr(i)
Return the Unicode string of one character whose Unicode code is the integer i, e.g., unichr(97) returns the string u’a’. This is the inverse of ord() for Unicode strings. The argument must be in the range [0..65535], inclusive. ValueError is raised otherwise. .
• unicode(string[, encoding[, errors]]) Decodes string using the codec for encoding. Error handling is done according to errors. The default behavior is to decode UTF-8 in strict mode, meaning that encoding errors raise ValueError. See also the codecs module. .
• vars([object]) Without arguments, return a dictionary corresponding to the current local symbol table. With a module, class or class instance object as argument (or anything else that has a dict attribute), returns a dictionary corresponding to the object’s symbol table. The returned dictionary should not be modified: the effects on the corresponding symbol table are undefined.11
• xrange([start,] stop[, step ]) This function is very similar to range(), but returns an “xrange object” instead of a list. This is an opaque sequence type which yields the same values as the corresponding list, without actually storing them all si- multaneously. The advantage of xrange() over range() is minimal (since xrange() still has to create the values when asked for them) except when a very large range is used on a memory-starved machine (e.g. MS-DOS) or when all of the range’s elements are never used (e.g. when the loop is usually terminated with break).
• zip(seq1, ...) This function returns a list of tuples, where each tuple contains the i-th element from each of the argument sequences. At least one sequence is required, otherwise a TypeError is raised. The returned list is truncated in length to the length of the shortest argument sequence. When there are multiple argument sequences which are all of the same length, zip() is similar to map() with an initial argument of None. With a single sequence argument, it returns a list of 1-tuples.
'''
op='C:\PyHelp\\randinfo.txt'
file_exists = os.path.isfile(op)
if not file_exists:
x = open(op,"w")
x.write(rand_facts)
|
4,675 | 2747b2563c83e11261a7113d69921c1affb20ac8 | class Balloon(object):
def __init__(self, color, size, shape):
self.color = color
self.size = size
self.shape = shape
self.inflated = False
self.working = True
def inflate(self):
if self.working:
self.inflated = True
else:
print "You exploded this balloon. Idiot."
def explode(self):
self.inflated = False
self.working = False
print "BANG!"
def deflate(self):
self.inflated = False
class BigBalloon(Balloon):
def __init__(self, color, shape):
super(Balloon, self).__init__(color, 'Big', shape)
balloon = BigBalloon('green', 'round')
balloon.paint('red')
bigBalloon.print_info(self.color, self.size, self.shape) |
4,676 | 0dce4ea8ef21f2535194330b82ce5706ae694247 | class Order:
"""
Initiated a new order for the store
"""
def __init__(self, order_number, product_id, item_type, name, product_details, factory, quantity, holiday):
"""
Construct a new order
:param order_number: str
:param product_id: str
:param item_type: str
:param name: str
:param product_details: str
:param factory: Factory
:param quantity: int
:param holiday: str
"""
self._order_number = order_number
self._product_id = product_id
self._item_type = item_type
self._name = name
self._product_details = product_details
self._factory = factory
self._quantity = quantity
self._holiday = holiday
self._is_valid = True
self._invalid_notes = ""
@property
def quantity(self):
"""
Return quantity of the order.
:return: int
"""
return self._quantity
@property
def order_num(self):
"""
Return order num of the order.
:return: str
"""
return self._order_number
@property
def product_id(self):
"""
Return product id of the order.
:return: str
"""
return self._product_id
@property
def item_type(self):
"""
Return item type of the order.
:return: str
"""
return self._item_type
@property
def name(self):
"""
Return item name of the order.
:return: str
"""
return self._name
@property
def product_details(self):
"""
Return other details of the item of the order.
:return: str
"""
return self._product_details
@property
def factory(self):
"""
Return the factory that can generate the item.
:return: Factory
"""
return self._factory
@property
def holiday(self):
"""
Return the holiday that the item for.
:return: str
"""
return self._holiday
@property
def invalid_notes(self):
"""
Return the invalid notes if the item is invalid.
:return: str
"""
return self._invalid_notes
@property
def is_valid(self):
"""
Return the valid status.
:return: str
"""
return self._is_valid
def is_invalid(self):
"""
Set the status to invalid.
"""
self._is_valid = False
def set_invalid_notes(self, error):
"""
Set the invalid notes.
:param error: str
"""
self._invalid_notes = error
def __str__(self):
"""
String method of the class.
"""
return f"Order Number: {self._order_number} " \
f"Product ID: {self._product_id} " \
f"Item: {self._item_type} " \
f"Name: {self._name} " \
f"Quantity: {self._quantity} " \
f"Product details: {self._product_details} "
|
4,677 | 6e614d1235a98ef496956001eef46b4447f0bf9b | from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from appium.webdriver.common.touch_action import TouchAction
import time
import re
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import pymongo
def getSize():
x = driver.get_window_size()['width']
y = driver.get_window_size()['height']
return (x, y)
'''
解释:int start x-开始滑动的x坐标,
int start y -开始滑动的y坐标。
int end x -结束点x坐标,
int end y -结束点y坐标。
duration 滑动时间(默认5毫秒);
'''
def swipeUp(t):
l = getSize()
x1 = int(l[0] * 0.5) #x坐标
y1 = int(l[1] * 0.75) #起始y坐标
y2 = int(l[1] * 0.25) #终点y坐标
driver.swipe(x1, y1, x1, y2,t)
def crawl():
while True:
items = wait.until(EC.presence_of_all_elements_located(
(By.XPATH,'/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.support.v4.view.ViewPager/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.view.ViewGroup/android.support.v7.widget.RecyclerView' )))
swipeUp(1500)
for item in items:
try:
nickname = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderCompany').get_attribute('text')
content = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderStartTime').get_attribute('text')
list_time = content.split("至", 1)
start_time = list_time[0]
deadline = list_time[1]
send = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailStartAdd').get_attribute('text')
receive = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailEndAdd').get_attribute('text')
type = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailGoodsType1').get_attribute('text')
raw_price= item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_orderDetailFreight1').get_attribute('text')
price = re.findall(r"\d+\.?\d*", raw_price)[0]
raw_distance = item.find_element_by_id('com.kuaichengwuliu.driver:id/tv_search_goods_distance').get_attribute('text')
list_raw = re.findall(r"\d+\.?\d*",raw_distance)
distance = list_raw[1]
data = {'nickname': nickname, 'start_time':start_time, 'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}
#self.collection.update({'nickname': nickname, 'content': content}, {'$set': data}, True)
print(data)
collection.update_one({'nickname': nickname,'start_time':start_time,'deadline':deadline,'send':send,'receive':receive,'type':type,'price':price,'distance':distance}, {'$set': data},upsert=True)
except BaseException as e:
print(e)
client=pymongo.MongoClient("127.0.0.1",27017)
db=client.kc_data
collection=db.data_detail
desired_caps = {}
desired_caps['platformName'] ='Android'
desired_caps['deviceName']='f866d421'
desired_caps['appPackage']='com.kuaichengwuliu.driver'
desired_caps['appActivity']='.guide.GuideActivity'#'.guide.GuideActivity'
driver_server='http://localhost:4723/wd/hub'
desired_caps['autoAcceptAlerts']="true"
desired_caps['platformVersion'] = '6.0.1'
driver = webdriver.Remote(driver_server,desired_caps)
wait = WebDriverWait(driver, 300)
#WebDriverWait(driver, 20).until(lambda the_driver: the_driver.find_element_by_id("com.kuyu:id/tv_login").is_displayed())
#time.sleep(30)
WebDriverWait(driver, 7).until(lambda the_driver: driver.find_element_by_id("android:id/content").is_displayed())
TouchAction(driver).tap(x=545, y=181).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=161, y=706).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=534, y=1029).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=183, y=1029).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=528, y=701).release().perform()
time.sleep(1)
TouchAction(driver).tap(x=183, y=684).release().perform()
time.sleep(4)
TouchAction(driver).tap(x=161, y=306).release().perform()
time.sleep(4)
TouchAction(driver).tap(x=128, y=303).release().perform()
time.sleep(5)
crawl()
# 输入用户名
#driver.find_element_by_id("com.kuyu:id/et_email").send_keys("******")
# 输入密码
#driver.find_element_by_id("com.kuyu:id/et_pwd").send_keys("******")
# 点击登录
#driver.find_element_by_id("com.kuyu:id/tv_login").click()
# 这里加了一个等待,判断指定的元素出现则为登录成功(等待方法不懂没有关系,以后会再讲解如何设置等待)
#WebDriverWait(driver, 20).until(
# lambda the_driver: the_driver.find_element_by_id("com.kuyu:id/include_study_iv_add").is_displayed())
print(u"登录成功")
#driver.quit()
#TouchAction(driver).press(x=297, y=1073).move_to(x=309, y=459).release().perform()
|
4,678 | 752affdfa1481b9a19a9b7dfe76f9d5d11c80073 | #
# Copyright John Reid 2009
#
"""
Code to handle bootstrap analyses.
"""
from itertools import cycle
import random
import bisect
def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):
"""
Yield samples that match the sizes given in test_set_sizes
"""
for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):
yield random.sample(test_universe, sample_size)
def calculate_bootstrap_statistics(samples, statistic):
"Calculate the bootstrap statistics for the samples."
stats = list(map(statistic, samples))
stats.sort()
return stats
def bootstrap_p_value(bootstrap_stats, stat_value):
"""
Calculate the p-value for the statistic's value given the bootstrap values.
"""
return 1. - bisect.bisect_left(bootstrap_stats, stat_value) / float(len(bootstrap_stats))
|
4,679 | 6707723b3d0b42271e49c08c639afc9103066dc7 | import numpy as np
import math
import datetime
def multi_strassen(A,B, check_ig = True, check_quad = True, check_pot = True, check_time = True):
def Strassen(matriz_1,matriz_2): # Função do algoritmo de Strassen para multiplicação de matrizes do tipo 2x2
if (matriz_1.shape[0] != 2) or (matriz_1.shape[1] != 2) or (matriz_2.shape[0] != 2) or (matriz_2.shape[1] != 2):
print("As matrizes devem ser do tipo 2x2")
return None
M1 = (matriz_1[0,0] + matriz_1[1,1]) * (matriz_2[0,0] + matriz_2[1,1])
M2 = (matriz_1[1,0] + matriz_1[1,1]) * matriz_2[0,0]
M3 = matriz_1[0,0] * (matriz_2[0,1] - matriz_2[1,1])
M4 = matriz_1[1,1] * (matriz_2[1,0] - matriz_2[0,0])
M5 = (matriz_1[0,0] + matriz_1 [0,1]) * matriz_2[1,1]
M6 = (matriz_1[1,0] - matriz_1[0,0]) * (matriz_2[0,0] + matriz_2[0,1])
M7 = (matriz_1[0,1] - matriz_1[1,1]) * (matriz_2[1,0] + matriz_2[1,1])
Resultado = np.zeros([2,2])
Resultado[0,0] = M1 + M4 - M5 + M7
Resultado[0,1] = M3 + M5
Resultado[1,0] = M2 + M4
Resultado[1,1] = M1 - M2 + M3 + M6
return Resultado
if check_time:
inicio = datetime.datetime.now()
C = np.zeros([A.shape[0],B.shape[1]]) #Guarda o tamanho original da matriz multiplicada
#Parte 1: Checagem das condições pré-estabelecidas
if (A.shape[1] != B.shape[0]):
print("Erro: Não é possível realizar a multiplicação C = A * B com as matrizes fornecidas")
return None, None
if (len(A.shape) != 2) or (len(B.shape) != 2): #Checa a dimensão da matriz
print("Erro: As matrizes devem ser bidimensionais")
return None, None
if check_ig:
if (A.shape != B.shape): #Checa se as matrizes possuem mesma dimensão
print("Erro: As matrizes devem possuir mesmas dimensões")
return None, None
if check_quad:
if ((A.shape[0] - A.shape[1]) != 0) or ((B.shape[0] - B.shape[1]) != 0): #Checa se as matrizes são quadradas
print("Erro: As matrizes devem ser ambas quadradas")
return None, None
if check_pot:
if (math.ceil(math.log2(A.shape[0]) != math.floor(math.log2(A.shape[0])))) or (math.ceil(math.log2(A.shape[1]) != math.floor(math.log2(A.shape[1])))):
print("A matriz A será modificada, acrescentando-se zeros para que torne-se uma matriz do tipo 2^n x 2^n")
linhas = 2**math.ceil(math.log2(A.shape[0])) - A.shape[0] #Calcula quantas linhas faltam para um quadrado de dois
colunas = 2**math.ceil(math.log2(A.shape[1])) - A.shape[1] #Calcula quantas colunas faltam para um quadrado de dois
if linhas > colunas:
matriz_auxiliar = np.zeros([linhas,A.shape[1]])
A = np.vstack((A,matriz_auxiliar))
matriz_auxiliar = np.zeros([A.shape[0],A.shape[0]-A.shape[1]])
A = np.hstack((A,matriz_auxiliar))
elif colunas >= linhas:
matriz_auxiliar = np.zeros([A.shape[0],colunas])
A = np.hstack((A,matriz_auxiliar))
matriz_auxiliar = np.zeros([A.shape[1]-A.shape[0],A.shape[1]])
A = np.vstack((A,matriz_auxiliar))
if (math.ceil(math.log2(B.shape[0]) != math.floor(math.log2(B.shape[0])))) or (math.ceil(math.log2(B.shape[1]) != math.floor(math.log2(B.shape[1])))):
print("A matriz B será modificada, acrescentando-se zeros para que torne-se uma matriz do tipo 2^n x 2^n")
linhas = 2**math.ceil(math.log2(B.shape[0])) - B.shape[0] #Calcula quantas linhas faltam para um quadrado de dois
colunas = 2**math.ceil(math.log2(B.shape[1])) - B.shape[1] #Calcula quantas colunas faltam para um quadrado de dois
if linhas > colunas:
matriz_auxiliar = np.zeros([linhas,B.shape[1]])
B = np.vstack((B,matriz_auxiliar))
matriz_auxiliar = np.zeros([B.shape[0],B.shape[0]-B.shape[1]])
B = np.hstack((B,matriz_auxiliar))
elif colunas >= linhas:
matriz_auxiliar = np.zeros([B.shape[0],colunas])
B = np.hstack((B,matriz_auxiliar))
matriz_auxiliar = np.zeros([B.shape[1]-B.shape[0],B.shape[1]])
B = np.vstack((B,matriz_auxiliar))
#Multiplicação de fato das matrizes
D = np.zeros_like(A)
for i in range(0,A.shape[0],2):
for j in range(0,B.shape[1],2):
soma = 0
for k in range(0,A.shape[1],2):
soma = soma + Strassen(A[i:i+2,k:k+2],B[k:k+2,j:j+2])
D[i:i+2,j:j+2] = soma
C = D[0:C.shape[0],0:C.shape[1]]
print (C)
if check_time:
fim = datetime.datetime.now()
tempo = fim - inicio
#print("Tempo de execução = ", fim - inicio)
else:
tempo = "Tempo não calcualdo"
return C, tempo
|
4,680 | 68ea462f56ba029a7c977d9c8b94e6f913336fb7 | from django.db import models
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
user = settings.AUTH_USER_MODEL
commment_lenght = settings.COMMENT_LENGTH
# Entity Comment
class Comment(TimeStampedModel):
"""
Text comment posted by users
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='comment_user')
# Parent comment (optional) - i.e. a comment of a comment
starting_comment = models.ForeignKey('Comment', blank=True, null=True, related_name='parent_comment')
# Text content of a comment
content = models.TextField(_('comment text'), max_length=commment_lenght, blank=False, null=False)
class Meta:
verbose_name = _('comment')
verbose_name_plural = _('comments')
def __unicode__(self):
return self.content
def get_content(self):
"Returns the text content for the comment"
return self.content
def get_user_id(self):
"Returns the id of the user who posted the comment"
return self.comment_user.pk
def get_date(self):
"Returns the timestamp associated to the comment"
return self.created
def get_parent_comment_id(self):
"Returns the id of the parent comment"
return self.parent_comment.pk
def set_parent_comment(parent_comment):
self.starting_comment = parent_comment
# Entity Cigarette
class Cigarette(models.Model):
"""
Cigarette smoked by a user
"""
# User - Foreign key
user = models.ForeignKey(user, blank=False, null=False, related_name='user_cigarettes')
# Date and time associated to the cigarette
cigarette_date = models.DateField(_('cigarette date'), auto_now_add=True)
cigarette_time = models.TimeField(_('cigarette time'), auto_now_add=True)
class Meta:
verbose_name = _('cigarette')
verbose_name_plural = _('cigarettes')
def __unicode__(self):
return u'%s' % ( self.pk)
def get_cigarette_user_id(self):
"Returns the user id who smoked the cigarette"
return self.cigarette_user.pk
def get_date(self):
"Returns the date associated to the cigarette"
return self.cigarette_date
def get_time(self):
"Returns the time associated to the cigarette"
return self.cigarette_time
|
4,681 | 66f3590381fe96c49a8926a806b4a845f0d7e25d | import inaccel.coral as inaccel
import numpy as np
import time
class StereoBM:
def __init__(self, cameraMA_l=None, cameraMA_r=None, distC_l=None, distC_r=None, irA_l=None, irA_r=None, bm_state=None ):
# allocate mem for camera parameters for rectification and bm_state class
with inaccel.allocator:
if cameraMA_l is None:
self.cameraMA_l_fl = np.array([933.173, 0.0, 663.451, 0.0, 933.173, 377.015, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_l_fl = np.array(cameraMA_l, dtype=np.float32)
if cameraMA_r is None:
self.cameraMA_r_fl = np.array([933.467, 0.0, 678.297, 0.0, 933.467, 359.623, 0.0, 0.0, 1.0], dtype=np.float32)
else:
self.cameraMA_r_fl = np.array(cameraMA_r, dtype=np.float32)
if distC_l is None:
self.distC_l_fl = np.array([-0.169398, 0.0227329, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_l_fl = np.array(distC_l, dtype=np.float32)
if distC_r is None:
self.distC_r_fl = np.array([-0.170581, 0.0249444, 0.0, 0.0, 0.0], dtype=np.float32)
else:
self.distC_r_fl = np.array(distC_r, dtype=np.float32)
if irA_l is None:
self.irA_l_fl = np.array([0.0011976323, -0.0000000019, -0.8153011732, 0.0000000007, 0.0011976994, \
-0.4422348617, 0.0000126839, 0.0000001064, 0.9913820905], dtype=np.float32)
else:
self.irA_l_fl = np.array(irA_l, dtype=np.float32)
if irA_r is None:
self.irA_r_fl = np.array([0.0011976994, 0.0000000000, -0.8047567905, -0.0000000000, 0.0011976994, \
-0.4420566166, -0.0000000000, -0.0000001064, 1.0000392898], dtype=np.float32)
else:
self.irA_r_fl = np.array(irA_r, dtype=np.float32)
if bm_state is None:
self.bm_state_arr = np.array([0, 15, 31, 15, 0, 48, 20, 15, 16, 3, 0], dtype=np.int32)
else:
self.bm_state_arr = np.array(bm_state, dtype=np.int32)
def runAsync(self, left_img, right_img):
self.m_runStartTime = int(round(time.time() * 1000000))
if left_img is None:
raise RuntimeError('Invalid left image')
if right_img is None:
raise RuntimeError('Invalid right image')
if left_img.shape[0] != right_img.shape[0] or left_img.shape[1] != right_img.shape[1]:
raise RuntimeError('Image sizes differ')
# allocate and initialize buffers
rows = np.int32(left_img.shape[0]);
cols = np.int32(left_img.shape[1]);
with inaccel.allocator:
self.left_mat = np.array(left_img)
self.right_mat = np.array(right_img)
self.disp_mat = np.ndarray((rows, cols), dtype=np.uint16)
# Create request for stereo accelerator
req = inaccel.request('com.xilinx.vitis.vision.stereoBM')
req.arg(self.left_mat).arg(self.right_mat).arg(self.disp_mat)
req.arg(self.cameraMA_l_fl).arg(self.cameraMA_r_fl)
req.arg(self.distC_l_fl).arg(self.distC_r_fl)
req.arg(self.irA_l_fl).arg(self.irA_r_fl)
req.arg(self.bm_state_arr)
req.arg(rows).arg(cols)
self.response = inaccel.submit(req)
def wait(self):
# Send request and wait for completion
self.response.result()
# Write output image
disp_mat_scaled = (self.disp_mat.view(np.ndarray)*(256.0 / 48.0) / (16.0)).astype(np.uint8)
self.m_runEndTime = int(round(time.time() * 1000000))
return disp_mat_scaled;
def run(self, left_img, right_img):
self.runAsync(left_img, right_img)
return self.wait()
def lastruntime(self):
duration = self.m_runEndTime - self.m_runStartTime
return duration
|
4,682 | 0d321193d68b463e3dd04b21ee611afdc212a22b | def flat_list(array):
result = []
for element in array:
if type(element) == list:
result += flat_list(element)
else:
result.append(element)
return result
print flat_list([1, [2, 2, 2], 4])
print flat_list([-1, [1, [-2], 1], -1]) |
4,683 | 221a75d37fbb49e8508fc786ee8e6e90b19e12c0 | #!/usr/bin/python
# -*-coding:utf-8 -*-
import smtplib
import MySQLdb
import datetime
import types
def sendEmail(sender,passwd,host,port,receivers,date,mail) :
message = MIMEText(mail, 'html', 'utf-8')
message['From'] = Header("告警发送者<"+sender+">", 'utf-8')
subject = str(date) + '服务器告警通知'
message['Subject'] = Header(subject, 'utf-8')
try:
smtpObj = smtplib.SMTP_SSL(host,port)
smtpObj.ehlo()
smtpObj.login(sender,passwd)
smtpObj.sendmail(sender, receivers, message.as_string())
smtpObj.quit()
print "邮件发送成功"
except smtplib.SMTPException:
print "Error: 无法发送邮件"
if __name__ == '__main__' :
sender = 'liucl@helianhealth.com'
passwd = '@Chuck20110923'
host = 'smtp.exmail.qq.com'
port = 465
receivers = ['547000225@qq.com','longof@126.com']
daytime = (datetime.date.today() - datetime.timedelta(days=1) ). strftime('%Y%m%d')
mail = '服务器问题警报!!!'
sendEmail(sender,passwd,host,port,receivers,daytime,mail) |
4,684 | af8a3fbce35685cd89dee72449a8be2a133b4a3f | from os import chdir
from os.path import dirname, realpath
import random
from flask import Flask, render_template, send_from_directory
app = Flask(__name__)
# gets list of list of all classes
def get_data():
class_list = []
with open('counts.tsv') as fd:
for line in fd.read().splitlines():
class_data = line.split("\t")
class_list.append(class_data)
return class_list
# Gets list of list of all fall 2016 classes
def get_fall_2016():
directory = get_data() # list of all classes of all years
fall_2016_list = []
for n in directory: # for any individual course,
if n[0] == '2016' and n[1] == 'fall':
fall_2016_list.append(n)
return fall_2016_list
# searches fall 2016 classes for a specific Core requirement and creates new list of courses that satisfy that core
def get_fall_2016_core(core):
directory = get_fall_2016()
core_satisfied_list = [] # list of all classes that satisfy specified core requirement
for n in directory:
core_possible = n[9].split(";") # splits multiple core requirements into list of the individual ones
if core in core_possible: # if core argument is satisfied by the class, add class to list of classes
core_satisfied_list.append(n)
return core_satisfied_list
@app.route('/')
def display_full_courses():
courses = get_data()
return render_template('base.html', courses=courses)
# All of the remaining functions display website with 3 (different) random classes that satisfy specified core requirement
@app.route('/CPAF')
def display_random_CPAF():
courses = get_fall_2016_core('CPAF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10): # ensures second course is different from first one
course2 = random.choice(courses)
if course2[5] != course1[5]:
break # if course titles are different, keep course 2. if not, try again
for i in range(10): # ensures third course is different from first and second
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break # if course titles are different, keep course 3. if not, try again
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAS')
def display_random_CPAS():
courses = get_fall_2016_core('CPAS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPEU')
def display_random_CPEU():
courses = get_fall_2016_core('CPEU')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPFA')
def display_random_CPFA():
courses = get_fall_2016_core('CPFA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPAP')
def display_random_CPAP():
courses = get_fall_2016_core('CPAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CFAP')
def display_random_CFAP():
courses = get_fall_2016_core('CFAP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPGC')
def display_random_CPGC():
courses = get_fall_2016_core('CPGC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPIC')
def display_random_CPIC():
courses = get_fall_2016_core('CPIC')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLS')
def display_random_CPLS():
courses = get_fall_2016_core('CPLS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPLA')
def display_random_CPLA():
courses = get_fall_2016_core('CPLA')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPMS')
def display_random_CPMS():
courses = get_fall_2016_core('CPMS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPPE')
def display_random_CPPE():
courses = get_fall_2016_core('CPPE')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPRF')
def display_random_CPRF():
courses = get_fall_2016_core('CPRF')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUS')
def display_random_CPUS():
courses = get_fall_2016_core('CPUS')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CPUD')
def display_random_CPUD():
courses = get_fall_2016_core('CPUD')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
@app.route('/CUSP')
def display_random_CUSP():
courses = get_fall_2016_core('CUSP')
list_of_three = []
course1 = random.choice(courses)
for i in range(10):
course2 = random.choice(courses)
if course2[5] != course1[5]:
break
for i in range(10):
course3 = random.choice(courses)
if (course3[5] != course1[5]) and (course3[5] != course2[5]):
break
list_of_three.append(course1)
list_of_three.append(course2)
list_of_three.append(course3)
return render_template('courses.html', courses=list_of_three)
# The functions below lets you access files in the css, js, and images folders.
# You should not change them unless you know what you are doing.
@app.route('/images/<file>')
def get_image(file):
return send_from_directory('images', file)
@app.route('/css/<file>')
def get_css(file):
return send_from_directory('css', file)
@app.route('/js/<file>')
def get_js(file):
return send_from_directory('js', file)
if __name__ == '__main__':
app.run(debug=True) |
4,685 | 822fc2941099cb9d7791580678cfb2a89a987175 | import os, random, string
from django.conf import settings
from django.template.loader import render_to_string
from django.core.mail import send_mail
def generate_temp_password():
length = 7
chars = string.ascii_letters + string.digits
rnd = random.SystemRandom()
return ''.join(rnd.choice(chars) for i in range(length))
def send_confirmation_email(user):
#Bug in simple_email_confirmation: refer to https://github.com/mfogel/django-simple-email-confirmation/issues/22
try:
confirmation_key = user.confirmation_key
except:
confirmation_key = user.add_unconfirmed_email(user.email)
msg_txt=render_to_string('email/confirmation.txt', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
msg_html = render_to_string('email/confirmation.html', {'SITE_URL': settings.SITE_URL, 'user': user.email, 'key' : confirmation_key})
return send_mail('Confirmation email',msg_txt,'daniyar.yeralin@gmail.com',[user.email],html_message=msg_html,) |
4,686 | 4948fd2062bdbd32bfa32d2b0e24587f0872132d | from .exceptions import InvalidUsage
class HTTPMethodView:
""" Simple class based implementation of view for the sanic.
You should implement methods (get, post, put, patch, delete) for the class
to every HTTP method you want to support.
For example:
class DummyView(HTTPMethodView):
def get(self, request, *args, **kwargs):
return text('I am get method')
def put(self, request, *args, **kwargs):
return text('I am put method')
etc.
If someone tries to use a non-implemented method, there will be a
405 response.
If you need any url params just mention them in method definition:
class DummyView(HTTPMethodView):
def get(self, request, my_param_here, *args, **kwargs):
return text('I am get method with %s' % my_param_here)
To add the view into the routing you could use
1) app.add_route(DummyView.as_view(), '/')
2) app.route('/')(DummyView.as_view())
To add any decorator you could set it into decorators variable
"""
decorators = []
def dispatch_request(self, request, *args, **kwargs):
handler = getattr(self, request.method.lower(), None)
if handler:
return handler(request, *args, **kwargs)
raise InvalidUsage(
'Method {} not allowed for URL {}'.format(
request.method, request.url), status_code=405)
@classmethod
def as_view(cls, *class_args, **class_kwargs):
""" Converts the class into an actual view function that can be used
with the routing system.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
view.view_class = cls
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
return view
|
4,687 | 001198459b038186ab784b6a9bed755924784866 | from flask import Flask, render_template, redirect, request, session
app = Flask(__name__)
app.secret_key = 'ThisIsSecret' #this line is always needed when using the import 'session'
@app.route('/') #methods=['GET'] by default
def index():
return render_template('index.html')
@app.route('/ninja')
def ninja():
return render_template('ninja.html')
@app.route('/ninja/<username>') #great example of using a variable in an html and the image needed on that specific page
def show_user_profile(username):
print username
return render_template('blue.html', username=username)
@app.errorhandler(404) #modifies your 404 url not found page to whatever you have on your html file
def page_not_found(e):
return render_template('404notFound.html'), 404
# @app.route('/ninja/blue')
# def blue():
# return render_template('blue.html')
# @app.route('/ninja/red')
# def red():
# return render_template('red.html')
# @app.route('/ninja/purple')
# def purple():
# return render_template('purple.html')
# @app.route('/ninja/orange')
# def orange():
# return render_template('orange.html')
app.run(debug=True)
|
4,688 | 0656aba517023c003e837d5ad04daeb364f7fda8 | import os
CSRF_ENABLED = True
basedir = os.path.abspath(os.path.dirname(__file__))
# Heroku vs. Local Configs
if os.environ.get('HEROKU') is None:
# Database path
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# CSRF Key
SECRET_KEY = os.urandom(24)
# Pocket API
CONSUMER_KEY = '23571-333bb5dbab872eee6686bf86'
# News API Credentials
TROVE_KEY = 'E767C55D-0941-4993-BB3A-1CB81FD2B9E9'
NYTIMES_SEARCH_KEY = 'b2f1032fbec2cb261c1e153ab6b5a6b8:13:69075429'
else:
# Database path
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
# CSRF Key
SECRET_KEY = os.environ['CSRF_SECRET_KEY']
# Pocket API
CONSUMER_KEY = os.environ['POCKET_KEY']
# News API Credentials
TROVE_KEY = os.environ['TROVE_KEY']
NYTIMES_SEARCH_KEY = os.environ['NYTIMES_KEY']
# Path where we store the migration data files
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
|
4,689 | 21526dabe8456c599e4409228fa69ffd0d672c5b | """Helpers for FormatCBFMiniPilatus..."""
from __future__ import annotations
import calendar
import time
def get_pilatus_timestamp(timestamp_string):
if "." in timestamp_string:
timestamp, milliseconds = timestamp_string.split(".")
else:
timestamp = timestamp_string
milliseconds = "000"
for format in ["%Y-%b-%dT%H:%M:%S", "%Y-%m-%dT%H:%M:%S", "%Y/%b/%d %H:%M:%S"]:
try:
struct_time = time.strptime(timestamp, format)
return calendar.timegm(struct_time) + float("0." + milliseconds)
except Exception:
pass
raise RuntimeError("timestamp %s not recognised" % timestamp)
|
4,690 | be1ddaf5b4a7fb203fea62d061b06afb45d6867d |
def most_frequent_char(lst):
char_dict = {}
for word in lst:
for char in word:
if char in char_dict:
char_dict[char] += 1
else:
char_dict[char] = 1
max_value = max(char_dict.values())
max_keys = []
for key, value in char_dict.items():
if value == max_value:
max_keys.append(key)
return sorted(max_keys)
|
4,691 | 9cf0174a8bd2bccbd8e5d0be1f0b031a1a23c9df | from Global import *
import ShuntingYard
from Thompson import *
def check_string(automaton, word):
inicial = automata['s'].closure
for i in word:
inicial = state_list_delta(inicial, i)
return automaton['f'] in inicial
def create_AFND(re):
deltas = []
initial_node = ShuntingYard.create_tree(ShuntingYard.to_rpn(re))
s = State('s')
f = State('f')
automaton = {s.name: s, f.name: f}
#automaton = {s.name: s}
s.add_transition(initial_node, f);
deltas.append((s,initial_node))
while len(deltas) > 0:
(origin, simbol) = deltas.pop()
if not origin in automaton.values():
automaton.setdefault(origin.name, origin)
if isinstance(simbol, ShuntingYard.Node):
aux_deltas = Thompson.generic(origin, simbol)
for t in aux_deltas:
deltas.insert(0, t)
for state_name in automaton:
automaton[state_name].update_closure()
return automaton
|
4,692 | 027a049ffced721f2cd697bc928bfdf718630623 | import os
from apps.app_base.app_utils.cryp_key import decrypt, get_secret_key
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = get_secret_key
DEBUG = True
ALLOWED_HOSTS = ['.localhost', '127.0.0.1', '[::1]']
# Application definition
INSTALLED_APPS = [
'corsheaders',
'django.contrib.sessions',
]
MIDDLEWARE = [
# CORS
'corsheaders.middleware.CorsMiddleware',
# Session
'django.contrib.sessions.middleware.SessionMiddleware',
# Cache
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'apps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'apps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'luck',
'USER': 'postgres',
'PASSWORD': decrypt(b'gAAAAABfesT5OW3keTFXv6sUP_4NWJfG6U_ZEInkmCvJGdVSNA74VPJeG3lZLky8ZWEsjLsdxe_k_vgVCSIVCoTx1hOQsTb1kw=='),
'HOST': '127.0.0.1',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CACHES = {
# Local Memory Cache https://docs.djangoproject.com/en/3.1/topics/cache/
"default": {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'local-memory-lru',
},
"redis": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0", # db0
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"CONNECTION_POOL_KWARGS": {"max_connections": 100}
}
}
}
# Use Redis for session
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "redis"
SESSION_COOKIE_AGE = 3600 * 24 # In seconds
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
CORS_ALLOW_CREDENTIALS = True
|
4,693 | 33ac328b2bf16380b50c58013bd0d4d888dc3952 | #!/usr/bin/env python
from anytree import Node, RenderTree
webtest = Node("WebappTest")
registration = Node("Registration", parent=webtest)
smsconfirm = Node("SMSconfirm", parent=registration)
login = Node("Login", parent=smsconfirm)
useruploadCV = Node("UserUploadCV", parent=login)
usermatchJD = Node("UserMatchJD", parent=useruploadCV)
bemember = Node("BeMember", parent=login)
addprj = Node("AddProject", parent=bemember)
memuploadCV = Node("MemberUploadCV", parent=addprj)
memupfollowupCV = Node("MemberFollowupCV", parent=memuploadCV)
previewCV = Node("PreviewCV", parent=memuploadCV)
addbid = Node("AddBidding", parent=addprj)
modbid = Node("ModifyBidding", parent=addbid)
addcus = Node("AddCustomer", parent=addbid)
addJD = Node("AddJD", parent=addcus)
JDmatchCV = Node("JDmatchCV", parent=addJD)
JDmatchCVMultiDB = Node("JDmatchCVMultiDB", parent=JDmatchCV)
previewMatchCV = Node("previewMatchCV", parent=JDmatchCVMultiDB)
CVraderChart = Node("CVraderChart", parent=JDmatchCVMultiDB)
from anytree.exporter import DotExporter
DotExporter(webtest).to_picture("webtest.png")
|
4,694 | 1ab5147ed8ce808de9667052b6d17f320d62484f | '''
手写识别系统
构建识别类
Recognize
调用getResult()函数即可
'''
import operator
from numpy import *
from PIL import Image
from os import listdir
from io import BytesIO
def classify(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0] #训练数据集的行数
# 计算距离
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
# 返还距离排序的索引
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.items(),
key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 将图片转化为行向量
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32*i+j] = int(lineStr[j])
return returnVect
'''
如何让加载训练集值运行一次?
'''
hwLabels , trainingMat = [] , []
def loadTrainingSet(dir_trainingSet):
print('把trainingDigits文件夹里的所有训练集导入')
#把trainingDigits文件夹里的所有训练集导入
trainingFileList = listdir(dir_trainingSet)
#print(trainingFileList)
m = len(trainingFileList)
trainingMat = zeros((m,1024)) # 初始化训练矩阵
for i in range(m):
# 此三步,将所有训练集的名称分割只取出第一个
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumStr = int(fileStr.split('_')[0])
# 得到一个由训练集 名称首个number的矩阵
hwLabels.append(classNumStr)
# 每一个 训练集的 txt 都转成一个 1行1025列的向量
trainingMat[i,:] = img2vector(dir_trainingSet+'/%s' % fileNameStr)
return hwLabels , trainingMat
def getResult(filename,trainingDigits):
'''
filename 测试集dir
trainingDigits 训练集dir
'''
hwLabels , trainingMat = loadTrainingSet(trainingDigits)
# 为输入的数字图片分类,读取图片为
with open(filename, 'rb') as f:
filePath = f.read()
# 此时 filePath 是十六进制字节 如: \x7f\x12\xdf
fileNameStr = changeImg2Text(filePath,filename)
inputVect = img2vector(fileNameStr)
classifierResult = classify(inputVect, trainingMat, hwLabels, 3)
print( '预测手写数字识别为:',classifierResult)
return classifierResult
# 原demo里有这句话,可以这句话,会将预测的图片失效,暂注释 保留
#with open(filename, 'w') as f:
# f.write(str(classifierResult))
# 处理初始图形
def changeImg2Text(filePath,filename):
# 就是字符串 \ 分割后(其中 \\ 是加了转译),取最后一个 2.jpg,再 以 . 分割取 名字
fileNameStr = filename.split('\\')[-1].split('.')[0] + '.txt'
fr = open(fileNameStr, 'w')
#读图片转矩阵,Python 3 要加 BytesIO(filePath)
'''
https://codedump.io/share/aztOtkSsnO2U/1/python-valueerror-embedded-null-byte-when-reading-png-file-from-bash-pipe
'''
im = Image.open(BytesIO(filePath))
#print(im) # <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=206x376 at 0x8D99C50>
im2 = im.resize((32, 32), Image.ANTIALIAS)
img = array(im2)
print( img.shape , Image.ANTIALIAS )
m, n = img.shape[:2]
for i in range(m):
for j in range(n):
R, G, B = img[i, j, :]
# 因为,图片首先要 处理成灰度图,所以根据,灰度进而识别
'''
这部分的颜色用 PhotoShop 取色器,调参。
RGB的值选择 白色点 和 目标颜色点的中点的RGB
'''
#if R < 40 and G < 40 and B < 40: # 这些参数时对于黑白色的区分
#if R < 245 and G < 153 and B < 120: # 对 0 文件里,橙色图片的划分
if R < 185 and G < 100 and B < 100: # 对 2 文件里,灰色图片的划分
fr.write('1')
else:
fr.write('0')
fr.write('\n')
fr.close()
return fileNameStr
|
4,695 | 3302dc058032d9fe412bde6fd89699203526a72d | import random #import random module
guesses_taken = 0 #assign 0 to guesses_taken variable
print('Hello! What is your name?')# print Hello! What is your name? to console
myName = input()#take an input from user(name)
number = random.randint(1, 20)# make random number between 1 and 19 and save in number variable
print('Well, ' + myName + ', I am thinking of a number between 1 and 20.') #print the explanation
while guesses_taken < 6: #while loop looping until guesses_taken < 6
print('Take a guess.') # print the introduction
guess = input() # user input
guess = int(guess) #convert the input to integer
guesses_taken += 1 #guess opportunity reduce
if guess < number:#if guess less than number print Your guess is too low.
print('Your guess is too low.')
if guess > number:#if guess bigger than number print Your guess is too low.
print('Your guess is too high.')
if guess == number:#if guess equal to number break
break
if guess == number:#if guess equal to number, user guessed the number and print the underline
guesses_taken = str(guesses_taken)
print('Good job, ' + myName + '! You guessed my number in ' + guesses_taken + ' guesses!')
if guess != number:#if guess not equal to number user try till guess_take is 6 and print under
number = str(number)
print('Nope. The number I was thinking of was ' + number)
|
4,696 | d9f176262dcaf055414fbc43b476117250249b63 | class Solution:
def levelOrder(self, root):
if root is None:
return []
currentList = [root]
nextList = []
solution = []
while currentList:
thisLevel = [node.val for node in currentList]
solution.append(thisLevel)
for node in currentList:
if node.left is not None:
nextList.append(node.left)
if node.right is not None:
nextList.append(node.right)
currentList, nextList = nextList, currentList
del nextList[:]
return solution
|
4,697 | e550a2d46e46f0e07d960e7a214fbaa776bab0d5 | import tensorflow as tf
from rnn_cells import gru_cell, lstm_cell
from tensorflow.python.ops import rnn
def shape_list(x):
ps = x.get_shape().as_list()
ts = tf.shape(x)
return [ts[i] if ps[i] is None else ps[i] for i in range(len(ps))]
def bi_dir_lstm(X, c_fw, h_fw, c_bw, h_bw, units, scope='bi_dir_lstm'):
with tf.variable_scope(scope) as sc:
# forward pass
hs_fw = []
for idx, x in enumerate(X):
if idx > 0:
sc.reuse_variables()
h_fw, c_fw = lstm_cell(x, c_fw, h_fw, 'fw_lstm_cell')
hs_fw.append(h_fw)
# backward pass
hs_bw = []
for idx, x in enumerate(tf.reversed(X)):
if idx > 0:
sc.reuse_variables()
h_bw, c_bw = lstm_cell(x, c_bw, h_bw, 'bw_lstm_cell')
hs_bw.append(h_bw)
# stack outputs
hs_fw = tf.stack(hs_fw)
hs_bw = tf.reversed(tf.stack(hs_bw), 0)
# concat outputs and states
X = tf.concat((hs_fw, hs_bw), 2)
c = tf.concat((c_fw, c_bw), 1)
h = tf.concat((h_fw, h_bw), 1)
return X, c, h
def bi_dir_gru(X, h_fw, h_bw, units, scope='bi_dir_gru'):
with tf.variable_scope(scope) as sc:
# forward pass
hs_fw = []
for idx, x in enumerate(X):
if idx > 0:
sc.reuse_variables()
h_fw = gru_cell(x, h_fw, 'fw_gru_cell')
hs_fw.append(h_fw)
# backward pass
hs_bw = []
for idx, x in enumerate(reversed(X)):
if idx > 0:
sc.reuse_variables()
h_bw = gru_cell(x, h_bw, 'bw_gru_cell')
hs_bw.append(h_bw)
# stack outputs
hs_fw = tf.stack(hs_fw)
hs_bw = tf.reversed(tf.stack(hs_bw), 0)
# concat outputs and states
X = tf.concat((hs_fw, hs_bw), 2)
h = tf.concat((h_fw, h_bw), 1)
return X, h
def stacked_lstm(X, cs, hs, units, depth, non_res_depth, scope='stacked_lstm'):
with tf.variable_scope(scope) as sc:
for idx, x in enumerate(X):
if idx > 0:
sc.reuse_variables()
# handle the stack of lstm_cells
for i in range(depth):
h, c = lstm_cell(x, cs[i], hs[i], units, scope="cell_%d" % i)
# add residual connections after specified depth
if i >= non_res_depth:
x = h + x
cs[i] = c
hs[i] = h
X[idx] = h
return X, cs, hs
def stacked_gru(X, hs, units, depth, non_res_depth, scope='stacked_gru'):
with tf.variable_scope(scope) as sc:
for idx, x in enumerate(X):
if idx > 0:
sc.reuse_variables()
# hande the stack of lstm_cells
for i in range(depth):
h, c = gru_cell(x, hs[i], units, scope="cell_%d" % i)
# add residual connections after specified depth
if i >= non_res_depth:
x = h + x
hs[i] = h
X[idx] = h
return X, hs
def _luong_attn(h, e_out_W, e_out):
score = tf.squeeze(tf.matmul(tf.expand_dims(h, 1), e_out_W, transpose_b=True), [1])
a = tf.nn.softmax(score)
ctx = tf.squeeze(tf.matmul(tf.expand_dims(a, 1), e_out), [1])
return ctx
def _bahdanau_attn(h, e_out_W, e_out):
w_q_attn = tf.get_variable("w_q_attn", [units, units], initializer=tf.random_normal_initializer(stddev=0.02))
v = tf.get_variable("attn_v", [units], dtype=dtype)
h = tf.maxmul(h, w_q_attn)
return tf.reduce_sum(v * tf.tanh(e_out_W + h), [2])
def _simple_norm(inp, axis=1):
return inp / tf.expand_dims(tf.reduce_sum(inp, axis), 1)
def _temp_attn(h, e_out_W, e_out, score_sum, time):
score = tf.squeeze(tf.matmul(tf.expand_dims(h, 1), e_out_W, transpose_b=True), [1])
score = tf.cond(time > 0, lambda: tf.exp(score)/(score_sum+1e-12), lambda: tf.exp(score))
a = _simple_norm(score)
ctx = tf.squeeze(tf.matmul(tf.expand_dims(a, 1), e_out), [1])
return ctx, score_sum + score
def _dec_attn(h, d_hsW, d_hs):
score = tf.squeeze(tf.matmul(tf.expand_dims(h, 1), d_hsW, transpose_b=True), [1])
a = tf.nn.softmax(score)
ctx = tf.squeeze(tf.matmul(tf.expand_dims(a, 1), d_hs), [1])
return ctx |
4,698 | df828344b81a40b7101adcc6759780ea84f2c6b4 | from os import read
from cryptography.fernet import Fernet
#create a key
# key = Fernet.generate_key()
#When every we run this code we will create a new key
# with open('mykey.key','wb') as mykey:
# mykey.write(key)
#To avoid create a new key and reuse the same key
with open('mykey.key','rb') as mykey:
key = mykey.read()
#print(key)
# f = Fernet(key)
# with open('Mailing Client/password.txt','rb') as original_file:
# original = original_file.read()
# #encrypt the data
# encrypted = f.encrypt(original)
# with open('encryptedpassword.txt','wb') as encrypted_password_file:
# encrypted_file = encrypted_password_file.write(encrypted)
#Decrypt Part
f = Fernet(key)
with open('encryptedpassword.txt','rb') as encrypted_password_file:
encrypte_file = encrypted_password_file.read()
decrypt = f.decrypt(encrypte_file)
with open('decryptedpassword.txt','wb') as decrypted_password_file:
decrypted_file = decrypted_password_file.write(decrypt)
|
4,699 | 5d92c68e0fe7f37d4719fb9ca4274b29ff1cbb43 | #!/usr/bin/python
#The MIT License (MIT)
#
#Copyright (c) 2015 Stephen P. Smith
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import time, math
import RPi.GPIO as GPIO
#import numpy
class max31865(object):
"""Reading Temperature from the MAX31865 with GPIO using
the Raspberry Pi. Any pins can be used.
Numpy can be used to completely solve the Callendar-Van Dusen equation
but it slows the temp reading down. I commented it out in the code.
Both the quadratic formula using Callendar-Van Dusen equation (ignoring the
3rd and 4th degree parts of the polynomial) and the straight line approx.
temperature is calculated with the quadratic formula one being the most accurate.
"""
def __init__(self, csPin = 8, misoPin = 9, mosiPin = 10, clkPin = 11):
self.csPin = csPin
self.misoPin = misoPin
self.mosiPin = mosiPin
self.clkPin = clkPin
self.setupGPIO()
def setupGPIO(self):
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.csPin, GPIO.OUT)
GPIO.setup(self.misoPin, GPIO.IN)
GPIO.setup(self.mosiPin, GPIO.OUT)
GPIO.setup(self.clkPin, GPIO.OUT)
GPIO.output(self.csPin, GPIO.HIGH)
GPIO.output(self.clkPin, GPIO.LOW)
GPIO.output(self.mosiPin, GPIO.LOW)
def readTemp(self):
#
# b10000000 = 0x80
# 0x8x to specify 'write register value'
# 0xx0 to specify 'configuration register'
#
# 0b10110010 = 0xB2
# Config Register
# ---------------
# bit 7: Vbias -> 1 (ON)
# bit 6: Conversion Mode -> 0 (MANUAL)
# bit5: 1-shot ->1 (ON)
# bit4: 3-wire select -> 1 (3 wire config)
# bits 3-2: fault detection cycle -> 0 (none)
# bit 1: fault status clear -> 1 (clear any fault)
# bit 0: 50/60 Hz filter select -> 0 (60Hz)
#
# 0b11010010 or 0xD2 for continuous auto conversion
# at 60Hz (faster conversion)
#
#one shot
self.writeRegister(0, 0xB2)
# conversion time is less than 100ms
time.sleep(.1) #give it 100ms for conversion
# read all registers
out = self.readRegisters(0,8)
conf_reg = out[0]
# print("config register byte: %x" % conf_reg)
[rtd_msb, rtd_lsb] = [out[1], out[2]]
rtd_ADC_Code = (( rtd_msb << 8 ) | rtd_lsb ) >> 1
temp_C = self.calcPT100Temp(rtd_ADC_Code)
[hft_msb, hft_lsb] = [out[3], out[4]]
hft = (( hft_msb << 8 ) | hft_lsb ) >> 1
# print("high fault threshold: %d" % hft)
[lft_msb, lft_lsb] = [out[5], out[6]]
lft = (( lft_msb << 8 ) | lft_lsb ) >> 1
# print("low fault threshold: %d" % lft)
status = out[7]
#
# 10 Mohm resistor is on breakout board to help
# detect cable faults
# bit 7: RTD High Threshold / cable fault open
# bit 6: RTD Low Threshold / cable fault short
# bit 5: REFIN- > 0.85 x VBias -> must be requested
# bit 4: REFIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 3: RTDIN- < 0.85 x VBias (FORCE- open) -> must be requested
# bit 2: Overvoltage / undervoltage fault
# bits 1,0 don't care
#print "Status byte: %x" % status
if ((status & 0x80) == 1):
raise FaultError("High threshold limit (Cable fault/open)")
if ((status & 0x40) == 1):
raise FaultError("Low threshold limit (Cable fault/short)")
if ((status & 0x04) == 1):
raise FaultError("Overvoltage or Undervoltage Error")
return temp_C
def writeRegister(self, regNum, dataByte):
GPIO.output(self.csPin, GPIO.LOW)
# 0x8x to specify 'write register value'
addressByte = 0x80 | regNum;
# first byte is address byte
self.sendByte(addressByte)
# the rest are data bytes
self.sendByte(dataByte)
GPIO.output(self.csPin, GPIO.HIGH)
def readRegisters(self, regNumStart, numRegisters):
out = []
GPIO.output(self.csPin, GPIO.LOW)
# 0x to specify 'read register value'
self.sendByte(regNumStart)
for byte in range(numRegisters):
data = self.recvByte()
out.append(data)
GPIO.output(self.csPin, GPIO.HIGH)
return out
def sendByte(self,byte):
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
if (byte & 0x80):
GPIO.output(self.mosiPin, GPIO.HIGH)
else:
GPIO.output(self.mosiPin, GPIO.LOW)
byte <<= 1
GPIO.output(self.clkPin, GPIO.LOW)
def recvByte(self):
byte = 0x00
for bit in range(8):
GPIO.output(self.clkPin, GPIO.HIGH)
byte <<= 1
if GPIO.input(self.misoPin):
byte |= 0x1
GPIO.output(self.clkPin, GPIO.LOW)
return byte
def calcPT100Temp(self, RTD_ADC_Code):
R_REF = 430.0 # Reference Resistor
Res0 = 100.0; # Resistance at 0 degC for 400ohm R_Ref
a = .00390830
b = -.000000577500
# c = -4.18301e-12 # for -200 <= T <= 0 (degC)
c = -0.00000000000418301
# c = 0 # for 0 <= T <= 850 (degC)
#print("RTD ADC Code: %d" % RTD_ADC_Code)
Res_RTD = (RTD_ADC_Code * R_REF) / 32768.0 # PT100 Resistance
#print("PT100 Resistance: %f ohms" % Res_RTD)
#
# Callendar-Van Dusen equation
# Res_RTD = Res0 * (1 + a*T + b*T**2 + c*(T-100)*T**3)
# Res_RTD = Res0 + a*Res0*T + b*Res0*T**2 # c = 0
# (c*Res0)T**4 - (c*Res0)*100*T**3
# + (b*Res0)*T**2 + (a*Res0)*T + (Res0 - Res_RTD) = 0
#
# quadratic formula:
# for 0 <= T <= 850 (degC)
temp_C = -(a*Res0) + math.sqrt(a*a*Res0*Res0 - 4*(b*Res0)*(Res0 - Res_RTD))
temp_C = temp_C / (2*(b*Res0))
temp_C_line = (RTD_ADC_Code/32.0) - 256.0
# removing numpy.roots will greatly speed things up
#temp_C_numpy = numpy.roots([c*Res0, -c*Res0*100, b*Res0, a*Res0, (Res0 - Res_RTD)])
#temp_C_numpy = abs(temp_C_numpy[-1])
#print("Straight Line Approx. Temp: %f degC" % temp_C_line)
#print("Callendar-Van Dusen Temp (degC > 0): %f degC" % temp_C)
#print "Solving Full Callendar-Van Dusen using numpy: %f" % temp_C_numpy
if (temp_C < 0): #use straight line approximation if less than 0
# Can also use python lib numpy to solve cubic
# Should never get here in this application
temp_C = temp_C_line
return temp_C
class FaultError(Exception):
pass
if __name__ == "__main__":
try:
csPin = 24
misoPin = 21
mosiPin = 17
clkPin = 23
max = max31865(csPin,misoPin,mosiPin,clkPin)
while True:
tempC = max.readTemp()
print(tempC)
time.sleep(0.1)
except KeyboardInterrupt:
pass
GPIO.cleanup()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.