text stringlengths 38 1.54M |
|---|
a, b, v = map(int, input().split())
answer = 0
def calculateDays(climb, slip, height):
FIRST_DAY = 1
step = - slip + climb # 하루에 최종적으로 올라간 높이
remainHeight = height - climb # 첫째날 올라간 후 남은 높이
if remainHeight % step == 0:
day = remainHeight // step + FIRST_DAY
else:
day = remainHeight // step + 1 + FIRST_DAY # 소요 일수 하루 추가
return day
answer = calculateDays(a, b, v)
print(answer)
|
def displayGrid(grid) :
"""
This function prints the grid out.
"""
for i in grid['value'] :
s = ""
for z in i :
s += z
print(s)
print()
def loadEmptyGrid(grid) :
"""
This function empties an existing grid or creates a new one.
"""
grid['value'] = [[], [], [], [], []]
grid['row'] = 1
while grid['row'] <= 5 :
grid['column'] = 1
while grid['column'] <= 5 :
if grid['row'] % 2 == 0 and grid['column'] % 2 == 0 :
grid['value'][grid['row'] - 1].append("+")
elif grid['row'] % 2 == 0 :
grid['value'][grid['row'] - 1].append("-")
elif grid['column'] % 2 == 0 :
grid['value'][grid['row'] - 1].append("|")
else :
grid['value'][grid['row'] - 1].append(" ")
grid['column'] += 1
grid['row'] += 1
def getInput(player, grid, repeat, comp, comp2) :
"""
This function gets the input from either a player or a computer.
"""
grid['row'] = 0
grid['column'] = 0
if repeat :
text = "That slot is already taken! Enter another slot: "
else :
text = "Enter the row and column in that order (no spaces): "
if player == 1 :
if comp2 :
print("Player 1 (X): ")
computerTurn(grid, player)
else :
if not repeat :
print("Player 1 (X):")
while grid['row'] not in [1, 2, 3] or grid['column'] not in [1, 2, 3] :
unpackInput(grid, text)
if grid['row'] not in [1, 2, 3] or grid['column'] not in [1, 2, 3] :
text = "That slot is out of bounds. Enter another: "
if grid['value'][fit(grid['row'])][fit(grid['column'])] == ' ' :
grid['value'][fit(grid['row'])][fit(grid['column'])] = 'X'
else :
getInput(player, grid, True, comp)
elif player == 2 :
if comp :
print("Player 2 (O):")
computerTurn(grid, player)
else :
if not repeat :
print("Player 2 (O):")
while grid['row'] not in [1, 2, 3] and grid['column'] not in [1, 2, 3] :
unpackInput(grid, text)
if grid['row'] not in [1, 2, 3] or grid['column'] not in [1, 2, 3] :
text = "That slot is out of bounds. Enter another: "
if grid['value'][fit(grid['row'])][fit(grid['column'])] == ' ' :
grid['value'][fit(grid['row'])][fit(grid['column'])] = 'O'
else :
getInput(player, grid, True, comp)
def unpackInput(grid, text) :
"""
This function takes in input value of two integers and applies them to the row and column of choice.
"""
try :
grid['row'], grid['column'] = input(text)
grid['row'] = int(grid['row'])
grid['column'] = int(grid['column'])
return grid['row'], grid['column']
except ValueError :
unpackInput(grid, "Input is invalid. Enter it correctly: ")
def checkEnd(grid) :
"""
This function checks whether a player won or if the game is tied.
"""
end = False
winner = 0
for i in range(0, 5, 2) :
temp = [grid['value'][i][x] for x in range(0, 5, 2)]
if temp == ['X', 'X', 'X'] or temp == ['O', 'O', 'O'] :
end = True
if temp == ['X', 'X', 'X'] :
winner = 1
else :
winner = 2
for x in range(0, 5, 2) :
temp = [grid['value'][y][x] for y in range(0, 5, 2) ]
if temp == ['X', 'X', 'X'] or temp == ['O', 'O', 'O'] :
end = True
if temp == ['X', 'X', 'X'] :
winner = 1
else :
winner = 2
a = [grid['value'][x][x] for x in range(0, 5, 2)]
b = [grid['value'][4 - x][x] for x in range(0, 5, 2)]
if a == ['X', 'X', 'X'] or a == ['O', 'O', 'O'] or b == ['X', 'X', 'X'] or b == ['O', 'O', 'O'] :
end = True
if temp == ['X', 'X', 'X'] :
winner = 1
else :
winner = 2
space = 0
for i in grid['value'] :
for z in i :
if z == " " :
space += 1
if space == 0 :
end = True
return [end, winner]
def computerTurn(grid, player) :
"""
This function organizes all possible computer responses based on priority (win => defend => choice).
"""
mark = ''
if player == 1 :
enter = 'X'
mark = 'O'
elif player == 2 :
enter = 'O'
mark = 'X'
done = checkFill(grid, mark, enter)
if not done :
done = checkFill(grid, mark, enter)
if not done :
priority(grid, enter)
return
def checkFill(grid, mark, enter) :
"""
This function checks whether a row, column, or diagonal is about to be filled with the same mark. It can be used for both defense and offense.
"""
done = False
temp = [[grid['value'][row][column] for column in range(0, 5, 2)] for row in range(0, 5, 2)]
done = checkOpening(grid, temp, mark, 0, enter)
if not done :
temp = [[grid['value'][row][column] for row in range(0, 5, 2)] for column in range(0, 5, 2)]
done = checkOpening(grid, temp, mark, 1, enter)
if not done :
count = 0
empty = 0
a = [grid['value'][x][x] for x in range(0, 5, 2)]
for i in range(0, len(a)) :
if a[i] == mark :
count += 1
else :
empty = i
if count == 2 and grid['value'][fit(empty)][fit(empty)] == ' ' :
grid['value'][fit(empty)][fit(empty)] = enter
done = True
if not done :
count = 0
empty = 0
b = [grid['value'][4 - x][x] for x in range(0, 5, 2)]
for i in range(0, len(b)) :
if b[i] == mark :
count += 1
else :
empty = i
if count == 2 and grid['value'][fit(empty + 1)][fit(empty + 1)] == ' ' :
grid['value'][fit(empty + 1)][fit(empty + 1)] = enter
done = True
return done
def checkOpening(grid, temp, mark, direct, enter) :
"""
This function simplifies the process of check multiple rows and multiple columns for checkFill.
"""
done = False
for i in range(0, len(temp)) :
count = 0
empty = 0
eMark = ''
for z in range(0, len(temp[i])) :
if temp[i][z] == mark :
count += 1
elif temp[i][z] != '|' :
empty = z
eMark = temp[i][z]
if count == 2 and eMark == ' ' :
if direct == 0 :
grid['value'][fit(i + 1)][fit(empty + 1)] = enter
elif direct == 1 :
grid['value'][fit(empty + 1)][fit(i + 1)] = enter
done = True
break
return done
def priority(grid, enter) :
"""
This function will prioritize the center grid then the corner grid and then randomize if there's no emergence.
"""
done = False
a = []
if grid['value'][2][2] == ' ' :
grid['value'][2][2] = enter
done = True
if not done :
temp = [[int(iFit(row)), int(iFit(column))] for row in range(0, 5, 2) for column in range(0, 5, 2) if grid['value'][row][column] == " "] #Gathers all empty spaces. (Previous checks make this okay.)
a = temp
for i in temp :
if i in [[1, 1], [1, 3], [3, 1], [3, 3]] :
grid['value'][fit(i[0])][fit(i[1])] = enter
done = True
break
if not done :
import random
b = random.randint(0, len(a) - 1)
grid['value'][fit(a[b][0])][fit(a[b][1])] = enter
def fit(a) :
"""
This function changes grid values to list values ([1, 2, 3] => [0, 2, 4]).
"""
return 2 * a - 2
def iFit(a) :
"""
This function is the inverse of fit ([0, 2, 4] => [1, 2, 3]).
"""
return (a + 2) / 2
grid = {'row': 0, 'column': 0, 'value': []}
player = 0
comp = False
loadEmptyGrid(grid)
choice = ""
text = "Play against player (p) or computer (c)? : "
while choice not in ['p', 'c', 's'] :
choice = input(text)
if choice not in ['p', 'c', 's'] :
text = "Invalid input, enter again: "
text = "Which player goes first? (1 or 2) "
while player not in [1, 2] :
player = int(input(text))
if player not in [1, 2] :
text = "Invalid input, enter again: "
comp2 = False
if choice == 'c' or choice == 's' :
comp = True
if choice == 's' :
comp2 = True
while not checkEnd(grid)[0] :
displayGrid(grid)
getInput(player, grid, False, comp, comp2)
if player == 1 :
player = 2
else :
player = 1
displayGrid(grid)
print("\nGame Over!")
if checkEnd(grid)[1] == 1 :
print("Player 1 wins!")
elif checkEnd(grid)[1] == 2 :
print("Player 2 wins!")
else :
print("Tie!")
|
from rest_framework import viewsets
from rest_framework.response import Response
from .models import Profile
from .serializers import ProfileSerializer
class ProfileViewSet(viewsets.GenericViewSet):
'''
API Endpoint to list current logged in user details
'''
def list(self,request, format=None):
# Bail with 404 if no user is logged in
if not request.user or not request.user.is_authenticated:
return Response({"user":None},status=404)
try:
profile = Profile.objects.get(user=request.user)
return Response( ProfileSerializer(profile).data )
except Profile.DoesNotExist:
# Unlikely circumstance (e.g. admin user)
return Response({
"user":None,
"reason":"User is logged in, but has no profile"
},status_code=404)
|
class DeviceStatus:
DISCONNECTED = "disconnected"
UNKNOWN = "unknown"
BUSY = "busy"
HARDWARE_UNAVAILABLE = "hardware-unavailable"
READY = "ready"
class RemoteStatus:
NO_SOCKET = "no-socket"
DISCONNECTED = "disconnected"
class Message:
CONNECT = "connect"
DISCONNECT = "disconnect"
RECONNECT = "reconnect"
HANDSHAKE_REMOTE = "handshake-remote"
HANDSHAKE_DEVICE = "handshake-device"
DEVICE_LISTING = "device-listing"
CHOOSE_DEVICE ="choose-device"
DEVICE_CHOICE_INVALID = "device-choice-invalid"
DEVICE_CHOICE_SUCCESSFUL = "device-choice-successful"
DEVICE_STATUS = "device-status"
DEVICE_REMOTE_CONNECT = "device-remote-connect"
DEVICE_REMOTE_DISCONNECT = "device-remote-disconnect"
TRIGGER_PULSE = "trigger-pulse"
UPDATE_PULSE = "update-pulse"
SET_RECORD_DURATION = "set-record-duration"
UPDATE_OVERRIDES = "update-overrides"
RESTART_DEVICE = "restart-device"
AUDIO = "audio"
ASSIGN_PULSE = "assign-pulse"
UPDATE_LABEL = "update-label" |
import face_recognition
import imutils
import pickle
import time
import cv2
import os
cascPathface = os.path.dirname(cv2.__file__) + "\data\haarcascade_frontalface_alt2.xml"
faceCascade = cv2.CascadeClassifier(cascPathface)
data = pickle.loads(open('Simple_Face_Recognition\\face_enc', "rb").read())
image = cv2.imread('Simple_Face_Recognition\\unknown_face\\images.jpg')
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray,
scaleFactor=1.1,
minNeighbors=3,
minSize=(60, 60),
flags=cv2.CASCADE_SCALE_IMAGE)
encodings = face_recognition.face_encodings(rgb)
names = []
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
names.append(name)
for ((x, y, w, h), name) in zip(faces, names):
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(image, name, (x, y), cv2.FONT_HERSHEY_SIMPLEX,
0.75, (0, 255, 0), 2)
cv2.imshow("Frame", image)
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
|
from simulation import SimState
class SimulationView:
def __init__(self):
self.simulation = SimState()
self.prey = []
self.predator = []
self.linear = []
self.K = 0
self.collect()
def supply(self, food_value, water_value):
self.simulation.resourceLevels[0] += food_value
#self.simulation.resourceLevels = [food_value]
def collect(self):
[prey_i, predator_i] = self.simulation.get()
self.prey. append(prey_i)
self.predator.append(predator_i)
self.linear .append(self.K)
self.K += 1
def step(self, food_value, water_value):
self.supply(food_value, water_value)
self.simulation.step()
self.collect()
|
import json as js
import csv as csv
import scipy as scipy
import numpy as np
import pdb
import string
# Set random state before keras imports
rs = 19683
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from keras.layers.embeddings import Embedding
from keras.optimizers import SGD
from keras.regularizers import l2
from sklearn import cross_validation, preprocessing, metrics
import theano
import matplotlib.pyplot as plt
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from foodio import getdata, writedata, LemmaTokenizer
def nnk(X,y_uniques,lr=0.1):
model = Sequential()
# Dense(64) is a fully-connected layer with 64 hidden units.
# in the first layer, you must specify the expected input data shape
model.add(Dense(512, input_dim=X.shape[1], init='he_normal'))#, W_regularizer=l2(0.1)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(256, init='he_normal',input_dim=512))#, W_regularizer=l2(0.1)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(64, init='he_normal',input_dim=256))#, W_regularizer=l2(0.1)))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(len(y_uniques), init='he_normal',input_dim=64))#, W_regularizer=l2(0.1)))
model.add(Activation('softmax'))
#len(y_uniques)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
# Use mean absolute error as loss function since that is
# what kaggle uses
model.compile(loss='binary_crossentropy', optimizer=sgd)
return model
# Batch size = 100 seems to have stabilized it
#model.fit(X_train, y_train, nb_epoch=100, batch_size=1000)
# score = model.evaluate(X_test, y_test, batch_size=1000)
# preds = model.predict(X_test, batch_size=1000, verbose=1)
# print score
def lstmkeras(X,numfeats,y_uniques,lr=0.1) :
# keras.layers.recurrent.LSTM(output_dim, init=glorot_uniform,
# inner_init=orthogonal, forget_bias_init=one, activation=tanh,
# inner_activation=hard_sigmoid)
model = Graph()
model.add_input(name='input', input_shape=(X,), dtype=int)
model.add_node(Embedding(numfeats, 16, input_length=X),
name='embedding', input='input')
model.add_node(LSTM(20), name='forward', input='embedding')
model.add_node(LSTM(20, go_backwards=True), name='backward', input='embedding')
model.add_node(Dropout(0.5), name='dropout', inputs=['forward', 'backward'])
model.add_node(Dense(y_uniques, activation='softmax'), name='softmax', input='dropout')
model.add_output(name='output', input='softmax')
sgd = SGD(lr=lr, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(sgd,{'output':'categorical_crossentropy'})
return model
def writetest(idx,Xpreds, fil='NN.512.256.64.csv') :
import csv
csv.field_size_limit(1000000000)
outwriter = csv.writer(open(fil,'w'),delimiter=",")
rows = np.arange(0,len(Xpreds))
for row in rows :
outwriter.writerow([int(idx[row]),Xpreds[row]])
def iter_minibatches(traindata,chunksize=1000) :
chunkstartmarker = 0
while chunkstartmarker < len(traindata) :
chunkrows = range(chunkstartmarker,chunkstartmarker + chunksize)
X_chunk, y_chunk = getrows(chunkrows)
if __name__ == '__main__':
print("Importing Data")
#X, y, Xtest, unique_cuisines, test_indices = getdata() # import the data
# Split into training and validation sets
rs = 19683
# X_train, X_test, y_train, y_test = \
# cross_validation.train_test_split(X, y, \
# test_size=0.4, random_state=rs)
print("Training classifier")
# Train the classifier and fit to training data
# clf2 = nnk(X_train,unique_cuisines,lr=0.1)
clf2 = lstmkeras(39744,2884,20,lr=0.1)
# f = clf2.fit(X_train, y_train, nb_epoch=30,
# batch_size=100, validation_split=0.15,
# show_accuracy=True, verbose=1)
# We know rows in training matrix = 37994
# Bring in 1000 at a time to train
# also bring in corresponding y values
nb_epochs = 30
for e in range(nb_epochs) :
print("epoch %d" % (e))
print("------")
print("------")
for rowstart in np.random.permutation(np.linspace(0,38000,381)) :
print("Current row = %d" % (rowstart))
X_train = np.genfromtxt('one.hot.training.ingredients.csv',
delimiter = ',',
skip_header = int(rowstart),
max_rows = 100)
y_train = np.genfromtxt('one.hot.training.classes.csv',
delimiter = ',',
skip_header = int(rowstart),
max_rows = 100)
clf2.fit({'input': X_train, 'output': y_train},
batch_size=100,
nb_epoch=1)
# print("Making predictions on validation set")
# # Make predictions on validation data
# predictions = clf2.predict(X_test, batch_size=100, verbose=1)
# # Take max value in preds rows as classification
# pred = np.zeros((len(X_test)))
# yint = np.zeros((len(X_test)))
# for row in np.arange(0,len(predictions)) :
# pred[row] = np.argmax(predictions[row])
# yint[row] = np.argmax(y_test[row])
# print("Classifier Accuracy = %d"%(metrics.accuracy_score(yint,pred)))
#####
# now to test
#####
print("Testing classifier on Test data")
print("Re-train with full training set")
# clf2 = nnk(X,unique_cuisines,lr=0.1)
# f = clf2.fit(X, y, nb_epoch=30, batch_size=25,
# validation_split=0.15, show_accuracy=True)
# print("Make predictions on test set")
# predictions = clf2.predict(Xtest, batch_size=25, verbose=1)
Xtest = np.genfromtxt('one.hot.testing.ingredients.csv',
delimiter = ',')
predictions = clf2.predict(Xtest, batch_size=100, verbose=1)
# Take max value in preds rows as classification
pred = np.zeros((len(Xtest)))
for row in np.arange(0,len(predictions)) :
pred[row] = np.argmax(predictions[row])
unique_cuisines = {'brazilian',
'british',
'cajun_creole',
'chinese',
'filipino',
'french',
'greek',
'indian',
'irish',
'italian',
'jamaican',
'japanese',
'korean',
'mexican',
'moroccan',
'russian',
'southern_us',
'spanish',
'thai',
'vietnamese'}
unique_cuisines = sorted(list(unique_cuisines))
newcuisines = []
for row in np.arange(0,20) :
newcuisines.append(unique_cuisines[row])
predstr = []
for row in np.arange(0,len(predictions)) :
predstr.append(newcuisines[int(pred[row])])
print("Storing predictions")
writetest(test_indices,predstr,'LSTM.100neurons.30epochs.csv') |
"""
Routines related to sending a list of tiddlers out
to the web, including sending those tiddlers and
validating cache headers for list of tiddlers.
These are important because this is what sends
a TiddlyWiki out.
"""
from sha import sha
from tiddlyweb.serializer import Serializer
from tiddlyweb.web.util import \
get_serialize_type, http_date_from_timestamp, datetime_from_http_date
from tiddlyweb.web.http import HTTP404, HTTP304
def send_tiddlers(environ, start_response, bag):
"""
Output the tiddlers contained in the provided
bag in a Negotiated representation. Often, but
not always, a wiki.
"""
last_modified = None
etag = None
bags_tiddlers = bag.list_tiddlers()
download = environ['tiddlyweb.query'].get('download', [None])[0]
if bags_tiddlers:
last_modified, etag = _validate_tiddler_list(environ, bags_tiddlers)
else:
raise HTTP404('No tiddlers in container')
serialize_type, mime_type = get_serialize_type(environ)
serializer = Serializer(serialize_type, environ)
content_header = ('Content-Type', mime_type)
cache_header = ('Cache-Control', 'no-cache')
response = [content_header, cache_header]
if serialize_type == 'wiki':
if download:
response.append(('Content-Disposition',
'attachment; filename="%s"' % download))
if last_modified:
response.append(last_modified)
if etag:
response.append(etag)
output = serializer.list_tiddlers(bag)
start_response("200 OK", response)
return [output]
def _validate_tiddler_list(environ, tiddlers):
last_modified_number = _last_modified_tiddler(tiddlers)
last_modified_string = http_date_from_timestamp(last_modified_number)
last_modified = ('Last-Modified', last_modified_string)
etag_string = '%s:%s' % (_sha_tiddler_titles(tiddlers), last_modified_number)
etag = ('Etag', etag_string)
incoming_etag = environ.get('HTTP_IF_NONE_MATCH', None)
if incoming_etag:
if incoming_etag == etag_string:
raise HTTP304(incoming_etag)
else:
incoming_modified = environ.get('HTTP_IF_MODIFIED_SINCE', None)
if incoming_modified and \
(datetime_from_http_date(incoming_modified) >= \
datetime_from_http_date(last_modified_string)):
raise HTTP304('')
return last_modified, etag
def _sha_tiddler_titles(tiddlers):
digest = sha()
for tiddler in tiddlers:
if tiddler.recipe:
container = tiddler.recipe
else:
container = tiddler.bag
digest.update(container.encode('utf-8') + tiddler.title.encode('utf-8'))
return digest.hexdigest()
def _last_modified_tiddler(tiddlers):
return str(max([int(tiddler.modified) for tiddler in tiddlers]))
|
import cv2
import face_recognition
import numpy as np
import threading
import time
def find_faces():
global frame
global last_filter
while True:
image = np.array(frame)
last_filter = face_recognition.face_locations(image)
# print(faces)
# last_filter = gen_filters(image, faces)
time.sleep(1)
def apply_filter(last_filter, frame):
print(frame.shape)
for face in last_filter:
points = [(face[0], face[1]), (face[2], face[3])]
for point in points:
frame[point] = [255, 0, 0]
return frame
if __name__ == '__main__':
cv2.namedWindow("video")
# cv2.namedWindow("ir")
video = cv2.VideoCapture(0)
ir = cv2.VideoCapture(2)
last_filter = []
filters = threading.Thread(target=find_faces)
rval, frame = video.read()
filters.start()
while rval:
# rval, frame = ir.read()
# cv2.imshow("ir", frame)
rval, frame = video.read()
# print(frame)
img = apply_filter(last_filter, frame)
cv2.imshow("video", img)
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
filters.join()
cv2.destroyWindow("video")
# cv2.destroyWindow("ir")
|
kuakua = [
"为了表示我对资本市场的信心,我打算现在就去几站路外的银行取几百万投资股市,现在唯一的问题就只剩等公交车了。 ";
"学校教学楼厕所放镜子你以为让你整理仪容!?你错了,是为了让你知道,人丑就要多读书。 ";
"如果你每天干的活明显多于别人,但自己很高兴还感觉得到器重,那么与其说你很有才干,不如说你的领导很会管人。 ";
"“那些出身好能力强的人努力是为了成功,因为人家有可能会成功。我们努力是为了啥呢?” ";
"其实找谁做女朋友都差不多,都是在不停地争吵。只不过一些人是和比较漂亮的女孩子在争吵。 ";
"加油,你是最胖的。 ";
"你全力做到的最好,可能还不如别人的随便搞搞。 ";
"你的个性,通常源于无知。 ";
"你只需看着别人精彩,老天对你另有安排。 ";
" 上帝给了你丑的外表跟低的智商,就是怕你不协调 ";
"谁说女追男隔层网,除非那男的本来就对你有好感,不然隔的基本都是铁丝网,还是带电的那种。 ";
"凡是说“钱不需要那么多,只要快乐不在乎钱多钱少”的人,都是有钱人。 ";
"你无法叫醒一个不回你消息的人,但是红包能。 ";
"女生的冷暖自知什么的,在长得好看面前弱爆了。 ";
"承认吧,你所有的宽容只是因为你懦弱。 ";
"知道为什么自古红颜多薄命吗?因为没有人在意丑的人活多久。 ";
"你以为他是不会表达,其实他就是不爱你。 ";
"在哪里跌倒,就在哪里滑稽。 ";
"大部分成功靠得既不是厚积薄发的努力,也不是戏剧化的机遇,而是早就定好的出身和天赋。 ";
"体重不过百,不是平胸就是矮,说的好像过百就是大好身材一样。 ";
"喜欢一个人就勇敢表白,也许你还能知道自己是几号备胎。 ";
"认识的人越多,越发现还是有钱人里好人多。 ";
"凡是说“钱不需要那么多,只要快乐不在乎钱多钱少”的人,都是有钱人。 ";
"考场上越紧张,想起来的歌词就越多。 ";
"哪怕抛掉出身的因素,我觉得你也未必干得过别人。 ";
"丑小鸭变为白天鹅不是自己有多努力,而是它的父母就是白天鹅。 ";
"女追男隔层纱,除非那男的本来就对你有好感,不然隔的基本都是铁丝网,还是带电的那种。 ";
"你怕你的孩子输在起跑线上,可是你的孩子在娘胎里的时候就离起跑线很远了,因为你就是你孩子的起跑线。 ";
"我去找心理医生看抑郁症,他听完我的陈述,说道:如果你说的都真的话,为什么你不去自杀呢?你这不是抑郁症。抑郁症是看事情悲观,本来好,却认为不行。你是本来就过得不如别人。 ";
"哪怕失败了九十九次,我也要努力凑个整。 ";
"我有个朋友,在几年前停止了抱怨和自怨自艾,开始努力改变自己。到今天,他的物质生活和精神状态都没有什么改善。 ";
"许多人因为自己没见识,没拥有过美好的生活,就把别人所描述的最习以为常的生活当成是在装B。 ";
"你只看到他们挥金如土,却看不到他们背后日进斗金 ";
"大部分成功靠得既不是厚积薄发的努力,也不是戏剧化的机遇,而是早就定好的出身和天赋。 ";
"年轻的男孩只希望自己的女朋友是美丽的。而随着他不断长大,阅历的不断积累,他终归会认识到:美丽并不是选择伴侣的全部,性感也很重要。 ";
"找到男朋友后一定要对他好一点,不要欺负他、伤害他、有负于他,毕竟他已经瞎了。 ";
"你以为有了钱就会像你想象中那样快乐吗?不,你错了。有钱人的快乐,你根本想象不到。 ";
"很多人不断地规划自己的人生,每天压力很大。其实不管你怎么过,都会后悔的。想想你这前几十年就明白了。 ";
"发现很多混得不好的人看得都很开。也不知道他们是因为看得透彻而不屑于世俗的成功,还是因为不成功而不得不看得开。错把放纵当潇洒,把颓废当自由,把逃避责任当做追求自我价值。不过是懒,怕吃苦,哪来那么多好听的理由。幸亏当事者迷,不然真的看清事实真相,很多人是承受不来的。 ";
"普通人一生的四个阶段:心比天高的无知快乐与希望、愧不如人后的奋斗与煎熬、毫无回报的愤懑与失望、坦然的平凡和颓废。你走到哪一步了? ";
"“就算全世界反对,我也要坚持自己的梦想”你谁呀,能轮到全世界反对? ";
"回首往事,我发现自己失去了很多宝贵的东西。但我并不难过,因为我知道,以后会失去的更多。 ";
"你每天熬夜的时候有没有想过,你喜欢的那个人早就已经睡了,而且梦里没有你,醒来也不会爱你。 ";
"失败并不可怕,可怕的是你还相信这句话。 ";
"为什么要怕鬼,害你的全是人 ";
"许多人都说建立自信很难,其实主要问题是没钱。 ";
"别总是把事情想的太糟糕,因为还有更糟糕的事情等着你。 ";
"只要你每天坚持自习,认真刻苦,态度端正,忍受孤独,最终的胜利肯定是属于那些考场上发挥好的人。 ";
"有些事不是努力就可以改变的,五十块的人民币设计的再好看,也没有一百块的招人喜欢。 ";
"小时候以为有钱人都很跋扈,心都是黑的。长大后才发现,很多有钱人都懂得很多,经历很丰富,做事儿很认真,为人很宽厚,理性,比穷人更好相处。 ";
"一场说走就走的旅行归来后,除了该做的事情被拖延的更久了,什么都没有改变。 ";
"“长得丑但身材好是一种什么体验?”“背看倾国倾城,正看忧国忧民。” ";
"有些人感慨:自己岁数不小了,还没有成熟起来。其实你们已经成熟起来了,你们成熟起来就这样。 ";
"有些事不是努力就可以改变的,五十块的人民币设计的再好看,也没有一百块的招人喜欢。 ";
"你永远不知道他在和你聊天的同时,隔着屏幕还在和多少女生聊天。 ";
"有多努力,最后就会有多少无力感,太多了,都要溢出来了。 ";
"别抱怨了,没有什么比现实生活更公正的了。 ";
"有多努力,最后就会有多少无力感,太多了,都要溢出来了。 ";
"年轻人穷不可怕,慢慢你就习惯了,你会穷很久的。 ";
"我问过很多女大学生这个问题:出国旅游和用名牌,真的这么重要吗?第一种回答是:不重要,但男朋友愿意而且能够给我买,很重要。第二种回答是:对啊,很重要啊! ";
"我发现不管怎么纠结和折腾,生活还是那个样子。 ";
"书山有路勤为径,怪你没有富贵命。 ";
"小时候以为有钱人都很跋扈,心都是黑的。长大后才发现,很多有钱人都懂得很多,经历很丰富,做事儿很认真,为人很宽厚,理性,比穷人更好相处。 ";
"一些人追求卓越是因为他们天性如此,且能力出众。很多其他人却也喜欢跟着凑热闹。 ";
"陪你聊几天你就喜欢他,谈恋爱还没几个月就想过一辈子,交个朋友稍微对你好点就想来往一生,难怪你的怨气那么重、悲伤那么多,这都是天真的代价。 ";
"女生有这样的梦想:某一天辛苦工作之后,在客户处认识一位优秀男人。他自信、Self-centered、成熟、幽默,又对人很caring。你们坠入爱河。他对你说:别工作了,我帮你开家咖啡馆吧!于是你的生活变了,每天在充满情调的咖啡馆里钻研咖啡甜点。你又开始相信爱情了。后来你发现男朋友开了十几家咖啡馆了。 ";
"一些人的痛苦在于:眼界打开了,本事却没跟上来。 ";
"你费尽力气想明白了一些事情,但对解决它们一点儿帮助也没有。 ";
"爱迪生会告诉你:天才是由百分之一的灵感加百分之九十九的汗水。但是他绝对不会告诉你他名下的大部分发明都不是来自于自己创造而是手下员工的专利。 ";
"女追男,隔层纱;男追女,隔房隔车隔她妈。 ";
"你只知道人家化妆比你好看,却不知道,她们卸了妆,不仅比你好看,皮肤还吹弹可破。 ";
"如果有人问我:那些艰难的岁月你是怎么熬过来的? 我想我只有一句话回答:我有一种强大的精神力量支撑着我,这种力量名字叫“想死又不敢”。 ";
"准备恶语伤人的时候我们便会加上一句“都是为了你好”。 ";
"真正努力过的人,就会明白天赋的重要。 ";
"假如今天生活欺骗了你,不要悲伤,不要哭泣,因为明天生活还会继续欺骗你。 ";
"快乐分享错了人,就成了显摆;难过分享错了人,就成了矫情,甚至一个笑话 ";
"善良没用,你得漂亮,还得有钱。 ";
"我追逐自己的梦想,别人说我幼稚可笑,但我坚持了下来。最后发现,原来还真是我以前幼稚可笑。 ";
"其实你没那么重要,也没那么多人在意你。 ";
"四个字形容人生重在参与 ";
"我们所咒骂的那些品德败坏的人,其实不过是直接或间接地损害了我们的利益。 ";
"要是有个地方能出卖自己的灵魂换取物质享受就好了。 ";
"有得到就有失去,有人得到了财富,但也失去了烦恼~~~~~~~~~ ";
"你只知道人家化妆比你好看,却不知道,她们卸了妆,不仅比你好看,皮肤还吹弹可破。 ";
"你若好看或有钱,自然就会发现社交的乐趣。 ";
"第一名和第二名通常是对手,但倒数第一名和倒数第二名通常是朋友。 ";
"你不会真的以为你高中时候没有早恋是因为老师和家长禁止吧?! ";
"死并不可怕,怕的是再也不能活了。 ";
"不经历风雨,怎么迎接暴风雨。暴风雨之后,不仅没看到彩虹,还感冒了 ";
"秋天是收获的季节。别人的收获是成功与快乐,你的收获是认识到并不是每个人都会成功与快乐。 ";
"我有一些漂亮又受过教育的姐妹,本来有机会嫁给官富二代,但很多被那些长得不错、有名校高学历、刚毕业在知名外企工作、加班到很晚还健身的男生所吸引。觉得人家上进又有潜力。五六年后,她们悔得肠子都青了。 ";
"我这张脸只要遮住两个地方就完美了,一处是右半边脸,一处是左半边脸。 ";
"如果不能有效的反击,所有的宽容都是懦弱。 ";
"你若好看或有钱,自然就会发现社交的乐趣。 ";
"女孩子自信一点才漂亮,但是漂亮的女孩子谁不自信啊! ";
"体重不过百,不是平胸就是矮,说的好像过百就是大好身材一样。 ";
"世上无难事,只要肯放弃。 ";
"充电五小时,通话两分钟~。 ";
"努力不一定成功,但是不努力一定很轻松。 ";
"有时候,别看我一脸平和,其实内心正揪着你的衣领,暴打你的狗头 ";
"长得丑就是病,不然整形医院怎么叫医院? ";
"青年靠什么混日子?头等青年靠出身,二等青年靠关系,三等青年靠天资,四等青年靠努力,五等青年耍文艺,六等青年打游戏,穷游,看美剧。 ";
"有些女生觉得说自己是吃货能显得可爱,其实并没有这样的效果。 ";
"没钱没事业的人,才有时间去提高自己的人生境界。 ";
"人丑就要多读书,反正你也没别的事做了。 ";
"我没见过一个煤矿工人靠挖煤又快又多当上了煤老板。 ";
"也没有日渐生情的耐心 ";
"难受的时候摸摸自己的胸,告诉自己是汉子,要坚强。 ";
"青年靠什么混日子?头等青年靠出身,二等青年靠关系,三等青年靠天资,四等青年靠努力,五等青年耍文艺,六等青年打游戏,穷游,看美剧。 ";
"如果你很忙,除了你真的很重要以外,更可能的原因是:你很弱,你没有什么更好的事情去做,你生活太差不得不努力来弥补,或者你装作很忙,让自己显得很重要。——史蒂夫乔布斯 ";
"自由从来不是什么理所当然的东西,而是一项需要极高成本的特权。 ";
"每次看到穷游日志的感受都是:那么穷就别TM出去浪了 ";
"善良没用,你得漂亮,还得有钱。 ";
"你听过的情话不都是为上床做铺垫吗? ";
"只要你每天坚持自习,认真刻苦,态度端正,忍受孤独,最终的胜利肯定是属于那些考场上发挥好的人。 ";
"从前车马很慢,书信很远,一生只够爱一个人,但是能纳很多妾。 ";
"书上说,不管是爱迪生还是林肯或者华盛顿,他们小时候不是砍树就是丧父。就算这些是真的,别人砍树和丧父之后取得了成功,你也可以一样吗? ";
"朋友,你听过相由薪生吗? ";
"有些年轻人,一毕业就到某些大型国企和机关中工作。每天没什么事儿,就是吃饭喝酒,福利好得不得了。人生还没有奋斗过就开始养老,自己的理想被丢在一旁,用民脂民膏来享受特权。对于这样的年轻人,我只想问你们四个字:哪投简历?b ";
"那些能够彼此帮忙托关系的友谊,比那些天天在一起就是扯淡吃喝旅游的友谊,要强得多。 ";
"所谓人生理想,就是支持你捱过每天的无聊与痛苦,最终没有实现的那个愿望。有了它,临死前是无穷的空虚和痛苦;没有它,这辈子都熬不下来。 ";
"只要我肯努力,没什么事情是我搞不砸的。 ";
"如果她有什么坏毛病,一定要纵着她 ";
"很多人不断地规划自己的人生,每天压力很大。其实,不管你怎么过,都会后悔的,想想你这前几十年就明白了。 ";
"以前觉得靠关系的人一定很无能,接触后发现人家样样比我强。 ";
"人还是要看内在美,比如说一张崭新的五十元人民币和一张破旧的一百块钱人民币,我还是选择后者。 ";
"假如今天生活欺骗了你,不要悲伤,不要哭泣,因为明天生活还会继续欺骗你。 ";
"不要看别人表面上一帆风顺,实际上他们背地里,也是一帆风顺。 ";
"喜欢一个人就去表白,万一成备胎了呢 ";
"有些人努力了一辈子,就是从社会的四流挤入了三流。 ";
"其实找谁做女朋友都差不多,都是在不停地争吵。只不过一些人是和比较漂亮的女孩子在争吵。 ";
"只有能力强会被当成纯技术人员;而光会社交拍马又会被认为没有真才实学;所以,要想在单位中脱颖而出,最重要的是有关系。 ";
"只要你每天坚持自习,认真刻苦,态度端正,忍受孤独,最终的胜利肯定是属于那些考场上发挥好的人。 ";
"生活会让你苦上一阵子,等你适应以后,再让你苦上一辈子。 ";
"我每天拼了命的努力就是为了向那些看不起我的人证明他们是对的。 ";
"人生就是一个起落落落落落落落落的过程。 ";
"一些女生在背后议论长得美家境好的女生,说对方生活轻浮,道德不高。她们说得有根有据,也很有道理,但听起来总是显得酸溜溜的。 ";
"对女人强吻表白一般会有两种结果,一种是啪,一种是啪啪啪。这就是屌丝和高富帅的差距。 ";
"只有拥有找男女朋友的能力和很多备胎,才能真正享受单身生活。 ";
"女追男隔层纱,除非那男的本来就对你有好感,不然隔的基本都是铁丝网,还是带电的那种。 ";
"我的梦想就是一手拿着相机,一手拉着你,四处旅行。每天日落时的歇脚处都是我们的家。然后在三十多岁的时候还在初级职位上拿着微薄的薪水,和刚毕业的年轻人一起被呼来喝去。 ";
"我们所咒骂的那些品德败坏的人,其实不过是直接或间接地损害了我们的利益。 ";
"没有钱包的充实,哪来内心的宁静。很多人都本末倒置了。 ";
"弱者都是这样,觉得生活中的无奈太多,自己又无力改变。 ";
"趁着年轻多出来走走看看,不然你都不会知道呆在家里有多爽。 ";
"很多时候别人对你好,并不是因为别人喜欢你,而是他喜欢对别人好。 ";
"好身材的因素很多。不是节食和锻炼就能拥有好身材的。 ";
"也没有日渐生情的耐心 ";
"准备恶语伤人的时候我们便会加上一句“都是为了你好”。 ";
"“老师您好,请问像我这样的,没有背景的北大毕业生应该如何定位自己?”“社会底层群众中受教育程度最高的一批人。” ";
"30.喜欢就去表白,不然你不会知道自己长得多丑。 ";
"找到男朋友后一定要对他好一点,不要欺负他、伤害他、有负于他,毕竟他已经瞎了。 ";
"生活会让你苦上一阵子,等你适应以后,再让你苦上一辈子。 ";
"比你优秀的人还在努力,你努力还有什么用。 ";
"你以为你两关系很好吗?不从别人的嘴巴里听到一些话,你永远不知道朋友怎么评价你。 ";
"你每天熬夜的时候有没有想过,你喜欢的那个人早就已经睡了,而且梦里没有你,醒来也不会爱你。 ";
"在我心里一直觉得中国现代文坛的绝代双骄是“刘墉”和“林清玄”。 ";
"没事,笑一笑,明天可能还不如今天呢 ";
"经过十年不断的努力和奋斗,我终于从一个懵懂无知的少年变成了一个懵懂无知的青年。 ";
"年轻的男孩只希望自己的女朋友是美丽的。而随着他不断长大,阅历的不断积累,他终归会认识到:美丽并不是选择伴侣的全部,性感也很重要。 ";
"喜欢的人一定要去告白啊,不被拒绝一下你还真当自己是仙女了。 ";
"学校教学楼厕所放镜子你以为让你整理仪容!?你错了,是为了让你知道,人丑就要多读书。 ";
"也没有日渐生情的耐心 ";
"悲剧分为两种,一种叫悲剧,一种叫没钱。 ";
"那些一直嚷着【老子做的是自己,关别人屁事,爱看不看】的人,你真的觉得是别人的问题吗?别天真了,要想活下去处的好,就该改变自己,否则活该被孤立死。 ";
"快乐分享错了人,就成了显摆;难过分享错了人,就成了矫情,甚至一个笑话 ";
"其实找谁做女朋友都差不多,都是在不停地争吵。只不过一些人是和比较漂亮的女孩子在争吵。 ";
"你注意过大街上的乞丐吗?衣衫褴褛,目光呆滞,有的匍匐在地,磕头作揖,乞求怜悯。但曾有人见过这样一个乞丐,他年老体衰,却尽量地挺着那原本弯曲的身子,目光炯炯有神,表明他是乞丐的唯一标志"就是那只放在地上的里面盛着零钱的碗。有人说,他物质贫穷.但心灵高贵。其实,他只是腿坐麻了。 ";
"比三观更重要的是五官。 ";
"如果一个人秒回了你,也只能说明这个人在玩手机而已。 ";
"00后的,我们都不小了,再多4年就合法了,找到适合的就不要因为一点小事分手了,不要像那些90后,那么老了还没有对象,知道吗? ";
"“那些出身好能力强的人努力是为了成功,因为人家有可能会成功。我们努力是为了啥呢?”“为了活下去。” ";
"性是复杂的。 不,人是「复杂」的,性是啪啪啪。 ";
"爱情开始时都差不多。但当两个人平淡到左手牵右手时,是加班挤地铁还房贷然后给他农村父母寄钱假期在屋里大眼瞪小眼,还是开小车朝九晚五住大房子周末采购装点自己的小家出国旅游,区别就非常大了。 ";
"一提到钱,大家就不是那么亲热了。 ";
"孜孜不倦熬夜,勤勤恳恳护肤。 ";
"我无辣不欢,有你,无辣也行。 ";
"有人出现在你的生命里,是为了告诉你,你真好骗。 ";
"心灵鸡汤中说‘上帝是公平的,他在给你关上了门的同时还为你打开了一扇窗。’可是上帝真的是公平的吗?事实分明是‘上帝在给你关上门的同时还夹住了你的脑袋。’ ";
"年轻的男孩只希望自己的女朋友是美丽的。而随着他不断长大,阅历的不断积累,他终归会认识到:美丽并不是选择伴侣的全部,性感也很重要。 ";
"有的人是来爱你的,有的人就是来给你上课的。 ";
"物以类聚人以穷分。有钱人终成眷属。 ";
"“假如我又丑又穷又矮又笨,你还会爱我吗?”“别他妈假如了,你本来就又丑又穷又矮又笨。” ";
"人作的程度不能超过自己的颜值。 ";
"天空飘来五个字:怎么都不行。 ";
"没事,笑一笑,明天可能还不如今天呢 ";
"认识不少人,他们都通过改变成功的定义获得了成功。 ";
"一个姑娘的介绍:思想上的女流氓,生活中的好姑娘,然而给我的感觉是:心思活络的丑逼。 ";
"有些人没有好的出身,聪明的脑子和善于与人打交道的能力,妄图通过单纯的努力获得成功,就和只靠面粉就想包出好吃的饺子一样可笑。 ";
"现在的人天天手机不离手,要回你消息早就回了。 ";
"尽量不要和比自己生活质量高的人起冲突,不论你是假装若无其事、努力争辩或者破口大骂,在别人看来,都是自己很可笑并且已经输了。 ";
"爱情就是,如果没有更好的选择了,我才陪你到天荒地老。 ";
"假如说你被骗了,别伤心~别难过~多被骗几次就习惯了。 ";
"坏女人爱男人的钱和权;好女人爱男人因有钱和有权儿产生的自信、宽大、精力充沛、乐观进取。还好,殊途同归。 ";
"有缘千里来相会,无缘倒也省话费 ";
"小时候的我们很快乐。因为小时候穷和丑没有现在那么明显 ";
"我的梦想就是一手拿着相机,一手拉着你,四处旅行。每天日落时的歇脚处都是我们的家。然后在三十多岁的时候还在初级职位上拿着微薄的薪水,和刚毕业的年轻人一起被呼来喝去。 ";
"人丑就要多读书,反正你也没别的事做了。 ";
"我问过很多女大学生这个问题:出国旅游和用名牌,真的这么重要吗?第一种回答是:不重要,但男朋友愿意而且能够给我买,很重要。第二种回答是:对啊,很重要啊! ";
"失败者面对生活比自己好的人,心中生出了怨恨。 ";
"优秀的女生在脆弱的时候(比如分手,工作不如意),会需要比平时更多的关爱,于是会和更多的人交流。如果她在恢复正常后,回到了自己原来的圈子,不再理你,请你不要奇怪。 ";
"等你生活中真有了生老病死这样的大事,才会知道自己以前半夜的忧伤都是狗屁。 ";
"人家说你看着小,其实并不是你娃娃脸,而是你穿得土。 ";
"错把放纵当潇洒,把颓废当自由,把逃避责任当做追求自我价值。不过是懒,怕吃苦,哪来那么多好听的理由。 ";
"自由从来不是什么理所当然的东西,而是一项需要极高成本的特权。 ";
"除了有钱人,世上还有两种人:其一是省吃俭用买奢侈品装逼,其二是省吃俭用也买不起奢侈品的。 ";
"人人都说我长的丑,其实我是美的不明显。 ";
"物以类聚人以穷分。有钱人终成眷属。 ";
"不要看别人表面上一帆风顺,实际上,他们肯地里,也是一帆风顺。 ";
"你只知道人家化妆比你好看,却不知她们卸了妆,不仅仅比你好看,皮肤还吹弹可破。 ";
"出身不好的限制也不光是平台问题。出身不好,说明你父母混的也不怎么样,也许你基因里就写着“不行”两个字。 ";
"人和人之间最大的区别就是逛完商场后下电梯,你按1楼,别人都按B。 ";
"你全力做到的最好,可能还不如别人的随便搞搞。 ";
"当你觉得自己又丑又穷,一无是处时,别绝望,因为至少你的判断还是对的。 ";
"每次看到穷游日志的感受都是:那么穷就别TM出去浪了。 ";
"认识的人越多,越发现还是有钱人里好人多。 ";
"早起的鸟儿有虫吃;早起的虫儿被鸟吃。 ";
"就算睡得晚,不会找你的人还是不会找你! ";
"你每天都那么努力,忍受了那么多的寂寞和痛苦。可我也没见你有多优秀。 ";
"有人问我如何在这个物质恒流的社会独善其身,我回答了一个字“穷”。 ";
"如果一个人每天工作完都坚持总结自己的不足,时间长了,就会变得越来越不自信。 ";
"是金子,早晚会发光。是石头,到哪里都不会发光。就算你是金子,早晚也会花光。 ";
"我每天拼了命的努力就是为了向那些看不起我的人证明他们是对的。 ";
"没钱没事业的人,才有时间去提高自己的人生境界。 ";
"奶奶曾经告诉我,“当你和跟你过日子的人吵架的时候,你们俩都要记得,应该是你们俩VS问题,而不是你VS他” ";
"因为没有一见钟情的皮囊 ";
"双十一晚上最大的区别就是,有的人是花钱买女朋友开心,有的人是花钱买女朋友开心。 ";
"喜欢就要说出来,不然你怎么能和女神做朋友呢 ";
"已经半夜了,我还在努力工作,却没能挣到多少钱,以后也未必有什么发展。我偶尔想起别人,别人也偶尔想起我,大概这就是生活。 ";
"对今天解决不了的事情,也不必着急。因为明天还是解决不了。 ";
"失恋的时候很多年轻人以为整个世界抛弃了你。别傻了世界根本就没需要过你。 ";
"如果你很忙,除了你这真的很重要之外,更可能的原因是:你很弱。 ";
"找到男朋友后一定要对他好一点,不要欺负他、伤害他、有负于他,毕竟他已经瞎了。 ";
"我要是有钱或者长得好看就好了。那样,即使我性格有点儿怪,也会有人愿意倾听我的想法和感受,跟我做朋友。 ";
"有些人不是赢在了起跑线上,而是直接生在了终点。 ";
"假如今天生活欺骗了你,不要悲伤,不要哭泣,因为明天生活还会继续欺骗你。 ";
"有时候觉得这个世界真是乱了套,说真话的人,像个小人,而说假话的人,一本正经的像个君子。 ";
"你身边没那么多朋友,只有两个原因,第一你太丑,第二你太穷。 ";
"不要抱怨你在十三亿人中还找不到一个对的人,选择题就四个,你也没对过。 ";
"好的容貌和很多钱,是进入上流社交活动的通行证。 ";
"你以为有了钱就会像你想象中那样快乐吗?不,你错了。有钱人的快乐,你根本想象不到。 ";
"体重不过百,不是平胸就是矮,说的好像过百就是大好身材一样。 ";
"做一天的好人并不难,难的是做一辈子有钱人。 ";
"用钱当然买不到快乐,只是有钱,别人会想办法让你快乐。 ";
"万事开头难,中间难,到最后都难。 ";
"女孩子自信一点才漂亮,但是漂亮的女孩子谁不自信啊! ";
"世界上几乎每个人都看电视,但没有一部电视剧呈现的是真实的人生。你要是每天花%的时间看电视剧,那你会发现自己对这个世界越来越失望。 ";
"没有钱包的充实,哪来内心的宁静。很多人都本末倒置了。 ";
"“老师您好,请问像我这样的,没有背景的北大毕业生应该如何定位自己?”“社会底层群众中受教育程度最高的一批人。” ";
"谅解还是惩罚都只是限制罢了,只是刻意的施加桎梏,要说原因…因为人是不知悔改的生物啊。不管多少次,不管愉悦还是痛苦,其本质都不会改变… ";
"“不去努力然后告诉自己我根本就不想要”的痛苦,比“拼命努力后失败”的痛苦,要小得多。 ";
"对今天解决不了的事情,也不必着急。因为明天还是解决不了。 ";
"所有的工作,做久了都会觉得无聊。差别在于,别人无聊还赚得比你多。 ";
"老板和领导很清楚哪些人能够得到晋升。对于那些不能的人,他们也会不断地鼓励和给她们希望。 ";
"岁月是把杀猪刀,是针对那些好看的人,它对长得丑的人一点办法都没有。 ";
"幸亏当事者迷,不然真的看清事实真相,很多人是承受不来的。 ";
"成语造句:很多年轻人想要从工作中找寻快乐和人生的意义,真是缘木求鱼。 ";
"最靠得住的是金钱,最靠不住的是人心。 ";
"当我们说别人太现实的时候,其实是我们的想法太幼稚。 ";
"善良没用,你得漂亮,还得有钱。 ";
"周星驰电影都很寂寞,大内密探008,他最后打败了大反派,对着他老婆,一脸期待她夸奖表扬的样子,这当儿,刘嘉玲演的老婆却自顾自的照镜子,半分眼色也没给他。 ";
"秋天是收获的季节,别人的收获是成功与快乐,你的收获是认识到并不是每个人都会成功与快乐。 ";
"许多人说选择北上广的原因,是发展机会好。但他们说的发展,实际上指的就是打工。 ";
"很多人不断地规划自己的人生,每天压力很大。其实不管你怎么过。都会后悔的。想想你这前几十年就明白了。 ";
"人作的程度不能超过自己的颜值。 ";
"你所有为人称道的美丽,都是PS的痕迹 ";
"年轻的时候多吃些苦,这样老了吃苦才习惯大多数事情,不是你想明白后才觉得无所谓,而是你已经决定这件事情对于你而言无所谓了才突然想明白的 ";
"爱情开始时都差不多。但当两个人平淡到左手牵右手时,是加班挤地铁还房贷然后给他农村父母寄钱假期在屋里大眼瞪小眼,还是开小车朝九晚五住大房子周末采购装点自己的小家出国旅游,区别就非常大了。 ";
"你努力后的成功,不能弥补你成功前的痛苦。 ";
"同样是出卖自己,有的人成了大姐,有的人成了小姐。 ";
"普通人结婚的另一半其实并不欣赏自己,只不过找不到更好的人。他们真正欣赏喜欢的,另有其人。 ";
“然了,人也没必要活得那么虚荣,为什么非要在意别人的看法呢?更何况爱情本来就是盲目的,妈支持你!” ";
"30.喜欢就去表白,不然你不会知道自己长得多丑。 ";
"有些人,常把“自己不行”说成“中国不行”。 ";
"如果你不喜欢自己的房子,你可以换个新的;如果你不喜欢自己的城市,你可以搬到别的城市;如果你不喜欢自己的国家,你甚至可以移民。 ";
"现在的人天天手机不离手,要回你消息早就回了。 ";
"优秀的女生在脆弱的时候(比如分手,工作不如意),会需要比平时更多的关爱,于是会和更多的人交流。如果她在恢复正常后,回到了自己原来的圈子,不再理你,请你不要奇怪。 ";
"普通人结婚的另一半其实并不欣赏自己,只不过找不到更好的人。他们真正欣赏喜欢的,另有其人。 ";
"年轻人嘛,现在没钱算什么,以后没钱的日子还多着呢。 ";
"万事开头难,然后中间难,最后结束难。 ";
"别抱怨了,没有什么比现实生活更公正的了。 ";
"回首往事,是不是发现自己失去了很多,遗憾吧!放心,你以后会失去得更多 ";
"一鼓作气,再而卒。 ";
"这个世界没有不公平,谁让你长得不好看又没钱。 ";
"是金子总会发光的,可你是块石头啊。 ";
"年轻人不努力,怎么知道自己真的不行? ";
"别减肥了,你丑不仅是因为胖。 ";
"弱者都是这样,觉得生活中的无奈太多,自己又无力改变。 ";
"不要年纪轻轻就觉得到了人生谷底,要相信自己还有很大的下降空间。 ";
"那些人人羡慕的精英,其实过得并不如你想象的那样好,但肯定比你强得多。 ";
"不努力的女生,会有买不完的地摊货,逛不完的菜市场。努力的女生,你就没时间逛地摊,因为你只能加班叫外卖、逛网店! ";
"有些年轻人。一毕业就到某些大型国企和机关中工作。每天没什么事儿就是吃饭喝酒。福利好得不得了人生还没有奋斗过就开始养老。自己的理想被丢在一旁。用民脂民膏来享受特权。对于这样的年轻人。我只想问你们四个字:哪投简历? ";
"又一天过去了。今天过得怎么样,梦想是不是更远了? ";
"从前车马很慢,书信很远,一生只够爱一个人,但是可以纳很多妾啊! ";
"世界那么大,不爱你的人那么多 ";
"人性是复杂的。 不,人是「复杂」的,性是啪啪啪。 ";
"努力了这么多年,但凡是有点天赋的,也该有点成功迹象了。 ";
"虽然我长得丑,但是买了漂亮衣服,我就可以丑的漂亮。 ";
"我有个朋友,在几年前停止了抱怨和自怨自艾,开始努力改变自己。到今天,他的物质生活和精神状态都没有什么改善。 ";
"路上看到两个长得不好看也看起来很穷的男女热吻。我说两个物质生活质量很差的人,彼此相伴。朋友立刻反驳我:物质生活质量不行就不会幸福吗?他们如此相爱,比那些有钱又美但离婚的名人强多了!我说我根本没说他们不幸福呀,你努力反驳的,可能是你自己内心的声音吧。 ";
"所有抱怨社会不公和制度的人翻译过来只有一句话:请给我金钱,女人和社会地位。 ";
"学术能力的缺乏并不意味着你就擅长混社会,说不定还不如在学校的表现。 ";
"好人成佛需要九九八十一难,可坏人只需要放下屠刀。 ";
"那年夏天,我抓住一只蝉。终于被我抓住了,瞎几把吵! ";
"失恋的时候,许多年轻人以为整个世界都抛弃了自己,别傻了,世界根本就没需要过你。 ";
"很多时候别人对你好,并不是因为别人喜欢你,而是他喜欢对别人好。 ";
"不要看别人表面上一帆风顺,实际上,他们背地里,也是一帆风顺。 ";
"成败是评判英雄的唯一标准。 ";
"26岁的他辞掉工作,和朋友建了支乐队,到各民办大学演出,反响平淡。30岁钱花完了,父亲得病要很多钱。“唱完这场就放弃,青春就到这里了。”甘肃七里河大学演唱会前,他留下眼泪说。一位女学生递上纸条:我喜欢你的歌,要坚持梦想。他把纸条攥紧决定坚持梦想。34岁,他欠了十几万的债,父亲也病逝了。 "
] |
#!/usr/bin/python3
#ask for username & password
#create lists ulist & plist
#Username:
#Password:
#return <Logging in> or <Failed to login>
#if username = ulist, ask for password, else fail
#if password = plist, return "logging in"
#username list
ulist = ["Captain Planet"]
#password list (shouldn't this be encrypted?)
plist = ["THE POWER IS YOURS!!"]
print("||||||||||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||")
print("|||||Welcome Planeteers|||||||")
print("||||||||||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||")
print("||||||||||||||||||||||||||||||")
#ask username section
userInput = input("What is your Username?: ")
if (ulist[0] == userInput): #[0]not sure how to change this to allow more users
print("Retinal Scan... Don't move...") #not sure how else to move the script along
else: #This is the fail response
print("Failed to verify credentials.")
print("Stop Polluting our servers!")
print("Connection Terminated...")
exit() #This kills the script after it fails
#ask password section
userPassword = input("Please verify your password: ")
if (plist[0] == userPassword): #same as ulist[0] this needs to be fixed
print("|||||||||||Earth!|||||||||||")
print("|||||||||||Fire!||||||||||||")
print("|||||||||||Wind!||||||||||||")
print("|||||||||||Water!|||||||||||")
print("|||||||||||Heart!|||||||||||")
print("||||||||||||||||||||||||||||")
print("With your powers combined...")
print(" I am Captain Planet! ")
print("||||||||||||||||||||||||||||")
print("||||||||||||||||||||||||||||")
else:
print("Failed to verify credentials...")
print("Nice try, Looten Plunder, Captain Planet is coming for you!")
print("GO PLANET!")
|
'''
Level: Medium Tag: [Matrix]
The island is partitioned into a grid of square cells.
You are given an m x n integer matrix heights where heights[r][c] represents the height
above sea level of the cell at coordinate (r, c).
The island receives a lot of rain,
and the rain water can flow to neighboring cells directly north, south, east, and west
if the neighboring cell's height is less than or equal to the current cell's height.
Water can flow from any cell adjacent to an ocean into the ocean.
Return a 2D list of grid coordinates result where result[i] = [ri, ci] denotes that rain water
can flow from cell (ri, ci) to both the Pacific and Atlantic oceans.
============= Older Description ===============
Given an m x n matrix of non-negative integers representing the height of each unit cell in a continent,
the "Pacific ocean" touches the left and top edges of the matrix and the "Atlantic ocean" touches
the right and bottom edges.
Water can only flow in four directions (up, down, left, or right)
from a cell to another one with height equal or lower.
Find the list of grid coordinates where water can flow to both the Pacific and Atlantic ocean.
Note:
The order of returned grid coordinates does not matter.
Both m and n are less than 150.
Example:
Given the following 5x5 matrix:
"../../../Material/waterflow-grid.jpg"
Pacific ~ ~ ~ ~ ~
~ 1 2 2 3 (5) *
~ 3 2 3 (4) (4) *
~ 2 4 (5) 3 1 *
~ (6) (7) 1 4 5 *
~ (5) 1 1 2 4 *
* * * * * Atlantic
Return:
[[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]] (positions with parentheses in above matrix).
Example 2:
Input: heights = [[2,1],[1,2]]
Output: [[0,0],[0,1],[1,0],[1,1]]
Constraints:
m == heights.length
n == heights[r].length
1 <= m, n <= 200
0 <= heights[r][c] <= 10^5
'''
'''
跟之前那道 Surrounded Regions 很类似, 都是换一个方向考虑问题
既然从每个点向中间扩散会 TLE, 那么我们就把所有边缘点当作起点开始遍历搜索, 然后标记能到达的点为 true
分别标记出 pacific 和 atlantic 能到达的点, 那么最终能返回的点就是二者均为 true 的点。
'''
import sys
class Solution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
row = len(matrix)
col = len(matrix[0])
ans = []
pacific = [[False for _ in xrange(col)] for _ in xrange(row)]
atlantic = [[False for _ in xrange(col)] for _ in xrange(row)]
for i in xrange(row):
self.dfs(matrix, pacific, -sys.maxint - 1, i, 0)
self.dfs(matrix, atlantic, -sys.maxint - 1, i, col - 1)
for j in xrange(col):
self.dfs(matrix, pacific, -sys.maxint - 1, 0, j)
self.dfs(matrix, atlantic, -sys.maxint - 1, row - 1, j)
for x in xrange(row):
for y in xrange(col):
if pacific[x][y] and atlantic[x][y]:
ans.append([x, y])
return ans
def dfs(self, matrix, visited, pre, x, y):
if x >= len(matrix) or x < 0:
return
if y >= len(matrix[0]) or y < 0:
return
if visited[x][y] == True:
return
if matrix[x][y] < pre:
return
visited[x][y] = True
self.dfs(matrix, visited, matrix[x][y], x + 1, y)
self.dfs(matrix, visited, matrix[x][y], x - 1, y)
self.dfs(matrix, visited, matrix[x][y], x, y + 1)
self.dfs(matrix, visited, matrix[x][y], x, y - 1)
class MySolution(object):
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
row = len(matrix)
col = len(matrix[0])
ans = []
pacific = set()
atlantic = set()
def dfs(x, y, matrix, visited, lower_area):
row = len(matrix)
col = len(matrix[0])
if x < 0 or x >= row or y < 0 or y >= col:
return
if (x, y) in visited:
return
if matrix[x][y] < lower_area:
return
visited.add((x, y))
dfs(x+1, y, matrix, visited, matrix[x][y])
dfs(x-1, y, matrix, visited, matrix[x][y])
dfs(x, y+1, matrix, visited, matrix[x][y])
dfs(x, y-1, matrix, visited, matrix[x][y])
for i in range(col):
dfs(0, i, matrix, pacific, -1) # 太平洋左上角到右上角
dfs(row-1, i, matrix, atlantic, -1) # 大西洋左下角到右下角
for i in range(row):
dfs(i, 0, matrix, pacific, -1) # 太平洋左上角到左下角
dfs(i, col-1, matrix, atlantic, -1) # 大西洋右上角到右下角
ans = list(pacific & atlantic)
print(ans)
return ans
sea = [
[1, 2, 2, 3, 5],
[3, 2, 3, 4, 4],
[2, 4, 5, 3, 1],
[6, 7, 1, 4, 5],
[5, 1, 1, 2, 4]
]
print(Solution().pacificAtlantic(sea))
# print [0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]
print(MySolution().pacificAtlantic(sea)) |
#!/usr/bin/env python3
from __future__ import print_function
import psutil
import re
from pprint import pprint
import argparse
class Data(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return '<Data({0})>'.format(', '.join('{key}={val}'.format(key=key, val=repr(val))
for key,val in self.__dict__.iteritems()))
def parse_mapline(line):
# 7fbe721a4000-7fbe7235b000 r-xp 00000000 fd:01 1185241 /usr/lib64/libc-2.21.so (deleted)
# fs/proc/task_mmu.c show_map_vma()
# seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
# start,
# end,
# flags & VM_READ ? 'r' : '-',
# flags & VM_WRITE ? 'w' : '-',
# flags & VM_EXEC ? 'x' : '-',
# flags & VM_MAYSHARE ? 's' : 'p',
# pgoff,
# MAJOR(dev), MINOR(dev), ino);
parts = line.split(None, 5)
addr = parts[0].split('-')
flags = parts[1]
dev = parts[3].split(':')
try:
path = parts[5].rstrip()
except IndexError:
path = ''
deleted = False
if path.endswith(' (deleted)'):
path = path[:-10]
deleted = True
return Data(
start = int(addr[0], 16),
end = int(addr[1], 16),
readable = flags[0] == 'r',
writable = flags[1] == 'w',
executable = flags[2] == 'x',
mayshare = flags[3] == 's',
pgoff = int(parts[2], 16),
major = int(dev[0], 16),
minor = int(dev[1], 16),
inode = int(parts[4], 10),
path = path,
deleted = deleted,
)
def read_maps(pid):
try:
with open('/proc/{pid}/maps'.format(pid=pid), 'r') as f:
for line in f:
yield parse_mapline(line)
except IOError:
raise psutil.AccessDenied()
def handle_proc(proc, show_files=False):
printed_name = False
for m in read_maps(proc.pid):
if m.executable and m.deleted:
if not printed_name:
printed_name = True
print('[{0}] {1}'.format(proc.pid, proc.name()))
if show_files:
print(' ' + m.path)
def parse_args():
ap = argparse.ArgumentParser(description='Find processes executing deleted files')
ap.add_argument('--show-files', '-f', action='store_true',
help='Show deleted file paths')
return ap.parse_args()
def main():
args = parse_args()
print('Processes executing deleted files:')
for proc in psutil.process_iter():
try:
handle_proc(proc, show_files=args.show_files)
except psutil.AccessDenied:
continue
if __name__ == '__main__':
main()
|
#program to calculate fibonaci sequence using recursion
num= int(input("Enter a number"))
def fib(n):
return 1 if n<=1 else n*fib(n-1)
def fibonacci(num):
if(num<=1):
return num
else:
return fibonacci(num-1)+fibonacci(num-2)
print(fibonacci(num))
|
for i in range(1, 1 + input()):
ca = map(int, raw_input().split())
cz = ((ca[1] - 1)/ca[2]) + (ca[2]) + (ca[0] - 1) * (ca[1]/ca[2])
print "Case #{}: {}".format(i, cz) |
import os, django, time, random
from datetime import timedelta, date
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "beginning_django.settings")
django.setup()
from bookstore.models import Book, Author
from faker import Faker
import random
from time import sleep
cnt = 1
for i in range(1, 50001):
fake = Faker()
title = fake.text()
pages = random.randint(99, 699)
price = '{:02.2f}'.format(random.uniform(0.99, 49.99))
rating = '{:02.2f}'.format(random.uniform(1.0, 5.0))
publisher = random.randint(1, 220)
tempYear = random.randint(1950, 2020)
tempMonth = random.randint(1, 12)
tempDay = random.randint(1, 28)
published_date = date(tempYear, tempMonth, tempDay)
if len(title) >= 300:
title = title[0:295] + '...'
books = '{}\n'.format(cnt)
books += 'Book Title: {}\n'.format(title)
books += 'Pages: {}\n'.format(pages)
books += 'Price: {}\n'.format(price)
books += 'Rating: {}\n'.format(rating)
books += 'Publisher: {}\n'.format(publisher)
books += 'Published Date: {}\n'.format(published_date.strftime("%Y-%m-%d"))
book = Book()
book.book_title = title
book.pages = pages
book.price = price
book.rating = rating
book.publisher_id = publisher
book.published_date = published_date
author_id = [random.randint(1, 1572)]
if cnt % 17 == 0:
for i in range(1, 3):
author_id.append(random.randint(1,1572))
if cnt % 29 == 0:
for i in range(1, 4):
author_id.append(random.randint(1,1572))
if cnt % 71 == 0:
for i in range(1, 5):
author_id.append(random.randint(1, 1572))
if cnt % 97 == 0:
for i in range(1, 7):
author_id.append(random.randint(1, 1572))
book.save()
tAuthors = Author.objects.filter(pk__in=author_id)
# print(author_id, tAuthors)
book.authors.set(tAuthors)
print(books)
cnt += 1 |
"""authors URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('home/', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('home/', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import path, include
2. Add a URL to urlpatterns: path('blog/', include('blog.urls',))
"""
from django.urls import include, path
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
from rest_framework.documentation import include_docs_urls
from django.views.generic.base import RedirectView
coreapi_docs = include_docs_urls(title='Authors Haven')
schema_view = get_swagger_view(title='Authors Haven')
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('authors.apps.authentication.urls', )),
path('api/social/', include('authors.apps.social_auth.urls',)),
path('api/', include('authors.apps.profiles.urls',)),
path('swagger/', schema_view),
path('docs/', coreapi_docs),
path('', RedirectView.as_view(url='docs/', permanent=False), name='index'),
path('api/', include('authors.apps.articles.urls')),
path('api/', include('authors.apps.notifications.urls'))
]
|
# -*- coding: utf-8 -*-
from symbol_table import SymbolTable
TYPES = ['inteiro', 'flutuante']
OPERATIONS = ['=', '<>', '>', '<', '>=', '<=', '&&', '||']
success = True
class Analyzer():
def __init__(self):
self.symboltable = SymbolTable()
success = True
def scan_tree(self, node):
currentStatus = self.verify_node(node)
if(not currentStatus['goNextNode']):
return
for child in node.children:
self.scan_tree(child)
if(currentStatus['isNewContext']):
self.symboltable.removeCurrentContext()
if(currentStatus['isFunction']):
line = self.symboltable.getGlobal()
if(line['type'] != '' and not self.symboltable.hasReturn()):
success = False
print('[ERRO] Função ' + line['name'] + ' deveria retornar ' + line['type'] + ' em ' + str(line['line']) + ':' + str(line['column']))
def verify_node(self, node):
if(node.value == 'declaracao_variaveis'):
for var in node.children[1:]:
dimension = self.verify_variable(var)
status = self.symboltable.insert({
'name': var.children[0].value,
'type': node.children[0].value,
'used': False,
'symbol_type': 'var',
'initialized': False,
'dimension': dimension,
'line': var.children[0].line,
'column': var.children[0].column,
'value': None
})
var.children[0].table_pointer = self.symboltable.searchFor(var.children[0].value)
if(not status):
success = False
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'lista_parametros'):
for param in node.children:
self.symboltable.insert({
'name': param.children[1].value,
'type': param.children[0].value,
'used': False,
'symbol_type': 'params',
'initialized': True,
'dimension': int(len(param.children[2:]) / 2),
'line': param.children[0].line,
'column': param.children[0].column
})
line = self.symboltable.searchFor(param.children[1].value)
param.children[1].table_pointer = line
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'atribuicao'):
var = node.children[0]
expression = node.children[1]
self.verify_variable(var)
line = self.verify_tableline(var, initialized = True, used = False)
var_type = 'inteiro'
if(line):
var_type = line['type']
expression_type = self.verify_expression(expression)
if(expression_type == 'wrong_type'):
print('[AVISO] Atribuição de tipos distintos \'' + var.table_pointer['name'] + '\' ' + var_type + ' em ' + str(var.table_pointer['line']) + ':' + str(var.table_pointer['column']))
success = False
elif(var_type != expression_type and expression_type != None):
print('[AVISO] Atribuição de tipos distintos \'' + var.table_pointer['name'] + '\' ' + var_type + ' e ' + expression_type + ' em ' + str(var.table_pointer['line']) + ':' + str(var.table_pointer['column']))
success = False
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'corpo'):
for child in node.children:
if(child.value == 'expressao'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'retorna'):
self.symboltable.setReturn()
expression_type = self.verify_expression(node.children[0])
line = self.symboltable.getGlobal()
if(line['type'] not in TYPES or expression_type not in TYPES):
success = False
print('[ERRO] Tipo de Retorno inválido em ' + str(node.line) + ':' + str(node.column))
elif(line['type'] != expression_type):
print('[AVISO] Conversão Implícita de tipos em ' + str(node.line) + ':' + str(node.column))
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'declaracao_funcao'):
params = []
function_type = None
if(len(node.children) == 4):
function_type = node.children[0].value
function_name = node.children[1].value
params_list = node.children[2]
else:
function_name = node.children[0].value
params_list = node.children[1]
for param in params_list.children:
params.append({
'type': param.children[0].value,
'vet': 0 if len(param.children) == 2 else int((len(param.children) - 2)/2)
})
status = self.symboltable.insert({
'name': function_name,
'type': function_type if function_type else '',
'used': False,
'symbol_type': 'function',
'initialized': True,
'dimension': 0,
'params': params,
'line': node.children[0].line,
'column': node.children[0].column
})
line = self.symboltable.searchFor(function_name, used = False)
if(len(node.children) == 4):
node.children[1].table_pointer = line
else:
node.children[0].table_pointer = line
if(not status):
success = False
self.symboltable.insertContex(function_name)
return {
'goNextNode': True,
'isNewContext': True,
'isFunction': True,
}
elif(node.value == 'repita' or node.value == 'se' or node.value == 'senão'):
self.symboltable.insertContex(node.value)
if(node.value == 'repita'):
for child in node.children:
if(child.value == 'expression'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': True,
'isFunction': False,
}
elif(node.value == 'condicional'):
for child in node.children:
if(child.value == 'expression'):
self.verify_expression(child)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'leia'):
var = node.children[0]
var.children[0].table_pointer = self.verify_tableline(var, initialized = True)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'chamada_funcao'):
self.verify_function(node)
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
elif(node.value == 'escreva'):
self.verify_expression(node.children[0])
return {
'goNextNode': False,
'isNewContext': False,
'isFunction': False,
}
else:
return {
'goNextNode': True,
'isNewContext': False,
'isFunction': False,
}
def verify_variable(self, node):
dimension = 0
if(len(node.children) > 1):
for child in node.children[1].children:
if(child.value != '[' and child.value != ']'):
var_type = self.verify_expression(child)
var = self.verify_tableline(node, False)
if(var_type and var_type != 'inteiro'):
success = False
print('[ERRO] Índice de array \'' + node.children[0].value + '\' não é inteiro, em ' + str(node.children[0].line) + ':' + str(node.children[0].column))
dimension += 1
return dimension
def verify_function(self, node):
function = self.verify_tableline(node, False)
node.table_pointer = function
if(function):
params = function['params']
args = []
for expression in node.children[-1].children:
arg = {}
expression_type = self.verify_expression(expression).split(' ')
arg['type'] = expression_type[0]
arg['vet'] = int(expression_type[1]) if len(expression_type) == 2 else 0
args.append(arg)
if(function['name'] == 'principal'):
if(self.symboltable.getCurrentContex().scope == 'principal'):
print('[AVISO] Chamada recursiva para principal.')
else:
success = False
print('[ERRO] Chamada para a função principal não permitida.')
if(len(params) != len(args)):
success = False
print('[ERRO] Chamada à função \'' + function['name'] + '\' com número de parâmetros diferente que o declarado. Esperado ' + str(len(params)) + ', mas recebido ' + str(len(args)) + ', em ' + str(function['line']) + ':' + str(function['column']))
elif(params != args):
success = False
print('[ERRO] Conversão Implícita em função \'' + function['name'] + '\' em ' + str(function['line']) + ':' + str(function['column']))
def verify_tableline(self, node_type, isError = True, used = True, initialized = False):
aux = node_type.children[0].value
line = self.symboltable.searchFor(aux, used = used, initialized = initialized)
node_type.table_pointer = line
if(not line):
success = False
if(isError):
success = False
print('[ERRO] Chamada à ' + ('variável ' if(node_type.value == 'var') else 'função ') + aux + ' que não foi declarada em ' + str(node_type.children[0].line) + ':' + str(node_type.children[0].column))
return line if line else None
def verify_expression(self, node):
if(node.value == 'expressao'):
return self.verify_expression(node.children[0])
if(node.value == 'expressao_unaria'):
children = node.children
if(len(children) == 1):
expression_type = children[0].children[0]
else:
operation = children[0].value
expression_type = children[1].children[0]
if(operation == '!'):
if(expression_type.value == 'expressao'):
self.verify_expression(expression_type)
return 'wrong_type'
if(expression_type.value == 'numero'):
number = expression_type.children[0].value
return 'inteiro' if(type(number) is int) else 'flutuante'
elif(expression_type.value == 'expressao'):
return self.verify_expression(expression_type)
else:
line = self.verify_tableline(expression_type)
if(line and (line['symbol_type'] == 'var' or line['symbol_type'] == 'params')):
dimension = line['dimension']
if(dimension != 0):
real_dimension = len(expression_type.children) - 1
if(dimension - real_dimension != 0):
return line['type']
if(expression_type.value == 'chamada_funcao'):
self.verify_function(expression_type)
return line['type'] if line else None
elif(len(node.children) >= 2):
type1 = self.verify_expression(node.children[0])
type2 = self.verify_expression(node.children[1])
if(node.value in OPERATIONS):
if(not type1 or not type2 or (len(type1.split(' ')) == 2 or len(type2.split(' ')) == 2)):
print('[AVISO] Tipo Inválido em ' + str(node.line) + ':' + str(node.column))
return 'wrong_type'
if(type1 == type2):
return type1
elif(type1 in TYPES and type2 in TYPES):
return 'flutuante'
else:
return self.verify_expression(node.children[0])
return None
def verify_principal(self):
line = self.symboltable.hasPrincipal()
if(line and line['used']):
print('[ERRO] Chamada para a função principal não permitida.')
success = False
elif(not line):
print('[ERRO] Função principal não declarada.')
success = False
def verify_other_points(self):
for line in self.symboltable.getUninitializedLines():
print('[AVISO] Variável \'' + line['name'] + '\' declarada, e não utilizada em ' + str(line['line']) + ':' + str(line['column']))
for line in self.symboltable.getUnusedLines():
if(line['name'] == 'principal'):
continue
print('[AVISO] Função \'' + line['name'] + '\' declarada, mas não utilizada em ' + str(line['line']) + ':' + str(line['column']))
def analyzer(tree):
analyzer = Analyzer()
analyzer.scan_tree(tree)
analyzer.verify_principal()
analyzer.verify_other_points()
return success |
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
# >> SNS
NOT_SNS_REQUEST = {
'code': 'ops.NotSNSRequests',
'detail': _('This resource is forbidden for not SNS requests.'),
}
METHOD_NOT_ALLOWED = {
'code': 'sns.MethodNotAllowed',
'detail': _('This method is not allowed for SNS requests'),
}
INVALID_SNS_SIGNATURE = {
'code': 'ops.InvalidSNSSignature',
'detail': _('Invalid SNS Signature.'),
}
SNS_ENDPOINT_SUBSCRIBE_FAILED = {
'code': 'ops.SNSEndpointSubscribeFailed',
'detail': _('SNS endpoint subscribe failed.'),
}
SNS_ENDPOINT_SUBSCRIBE_CONFIRMED = {
'code': 'ops.SNSEndpointSubscribeConfirmed',
'detail': _('SNS endpoint subscribe confirmed.'),
}
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2019-07-03 08:03
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('userinfo', '0002_bankcard'),
]
operations = [
migrations.AlterModelOptions(
name='userinfo',
options={'verbose_name': '用户列表', 'verbose_name_plural': '用户列表展示'},
),
]
|
class BD_roman:
def __init__(self):
self.names = list()
self.encoded = list()
def add(self, name, vector):
self.names.append(name)
self.encoded.append(vector)
def get_names(self):
return self.names
def get_vectors(self):
return self.encoded
def get_data(self):
return self.names, self.encoded |
from application import app, db, login_required
from application.auth.models import User
from application.animals.models import Animal
from application.auth.forms import LoginForm, CreateUserForm, ChangePasswordForm, ChangeUsernameForm, EditUserInfoForm
from flask_login import login_user, logout_user, current_user
from flask import render_template, request, redirect, url_for, flash, jsonify
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy.sql import text
from sqlalchemy.exc import IntegrityError
@app.route("/auth/menu", methods=["GET"])
@login_required()
def auth_menu():
return render_template("auth/menu.html")
@app.route("/auth/login", methods=["GET", "POST"])
def auth_login():
if request.method == "GET":
return render_template("auth/loginform.html", form=LoginForm())
form = LoginForm(request.form)
if not form.validate():
return render_template("auth/loginform.html", form=form)
user = User.query.filter_by(username=form.username.data).first()
if user is None or not check_password_hash(user.password, form.password.data + user.salt):
flash("Annettua käyttäjää ei ole olemassa, tai salasana oli väärä!", "error")
return render_template("auth/loginform.html", form=form)
login_user(user, remember=True)
flash('Kirjautuminen onnistui!', "info")
return redirect(url_for("index"))
@app.route("/auth/logout")
@login_required()
def auth_logout():
logout_user()
return redirect(url_for("index"))
@app.route("/auth/create", methods=["GET", "POST"])
def auth_create():
if request.method == "GET":
return render_template("auth/createuserform.html", form=CreateUserForm())
form = CreateUserForm(request.form)
# Create the new user from the form
if form.validate():
new_user = User(form.username.data, form.name.data,
form.password.data, form.city.data, form.age.data, 'USER')
try:
db.session().add(new_user)
db.session().commit()
except IntegrityError:
db.session().rollback()
flash("Käyttäjänimi on jo käytössä!", "error")
return render_template("auth/createuserform.html", form=CreateUserForm())
flash('Rekisteröityminen onnistui!', "info")
return redirect(url_for("auth_login"))
return render_template("auth/createuserform.html", form=form)
@app.route("/auth/delete", methods=["GET", "POST"])
@login_required()
def auth_delete():
if request.method == "GET":
return render_template("auth/deleteuserform.html")
try:
user = User.query.get(current_user.account_id)
Animal.delete_users_votes(current_user.account_id)
db.session.delete(user)
db.session.commit()
logout_user()
except:
db.session().rollback()
flash("Virhe poistettaessa käyttäjää!, käyttäjää ei poistettu!", "error")
return redirect(url_for("index"))
flash("Käyttäjä poistettu", "info")
return redirect(url_for("index"))
@app.route("/auth/delete/<account_id>", methods=["GET", "POST"])
@login_required(role="ADMIN")
def auth_delete_account(account_id):
try:
user = User.query.get(account_id)
Animal.delete_users_votes(account_id)
db.session.delete(user)
db.session.commit()
flash("Käyttäjä poistettu onnistuneesti!", "info")
return redirect(url_for("index"))
except:
flash("Poistaminen epäonnistui!", "error")
return redirect(url_for("index"))
@app.route("/auth/changepw", methods=["GET", "POST"])
@login_required()
def auth_changepw():
if request.method == "GET":
return render_template("auth/changepasswordform.html", form=ChangePasswordForm())
form = ChangePasswordForm(request.form)
if form.validate():
if check_password_hash(current_user.password, form.oldPassword.data + current_user.salt):
try:
salt = User.generate_salt()
stmt = text("UPDATE account SET password = :new_pw, salt = :slt WHERE account_id = :cur_id").params(
new_pw=generate_password_hash(form.password.data + salt), slt=salt, cur_id=current_user.account_id)
db.engine.execute(stmt)
db.session().commit()
except:
db.session().rollback()
flash("Virhe salasanan vaihdossa! Salasanaa ei vaihdettu!", "error")
return render_template("auth/changepasswordform.html", form=form)
flash('Salasana vaihdettu!', "info")
return redirect(url_for('auth_menu'))
flash("Nykyinen salasana oli väärä!", "error")
return render_template("auth/changepasswordform.html", form=form)
return render_template("auth/changepasswordform.html", form=form)
@app.route("/auth/user_info")
@login_required()
def auth_user_info():
return render_template("auth/userinfo.html", user_info=User.find_current_user_information(current_user.account_id))
@app.route("/auth/user_info/edit/<account_id>", methods=["GET", "POST"])
@login_required()
def auth_user_info_edit(account_id):
if account_id != current_user.get_id():
if current_user.urole != "ADMIN":
flash("Sinulla ei ole käyttöoikeuksia kyseiseen toimintoon", "error")
return redirect(url_for("index"))
user = User.query.get(account_id)
form = EditUserInfoForm()
if request.method == "GET":
form.name.data = user.name
form.city.data = user.city
form.age.data = user.age
return render_template("auth/edituserinfo.html", form=form, account_id=account_id)
form = EditUserInfoForm(request.form)
if form.validate():
try:
user.name = form.name.data
user.city = form.city.data
user.age = form.age.data
db.session().commit()
except:
db.session().rollback()
flash("Muokatessa tapahtui virhe! Käyttäjätietoja ei muokattu!", "error")
return render_template("auth/edituserinfo.html", form=form)
flash("Käyttäjätietoja muokattiin onnistuneesti!", "info")
return redirect(url_for("auth_user_info"))
return render_template("auth/edituserinfo.html", form=form)
@app.route("/auth/list")
@login_required(role="ADMIN")
def auth_list():
page = request.args.get('page', 1, type=int)
return render_template("auth/list.html", users=User.query.order_by(User.account_id.asc()).paginate(page, 20, False))
@app.route("/auth/change_username", methods=["GET", "POST"])
@login_required()
def auth_change_username():
if request.method == "GET":
return render_template("auth/changeusername.html", form=ChangeUsernameForm())
form = ChangeUsernameForm(request.form)
if form.username.data == current_user.username:
flash("Uusi käyttäjänimi on sama kuin nykyinen käyttäjänimi!", "error")
return render_template("auth/changeusername.html", form=form)
if form.validate():
try:
current_user.username = form.username.data
db.session().commit()
except:
db.session().rollback()
flash("Käyttäjänimi on varattu!", "error")
return render_template("auth/changeusername.html", form=form)
flash("Käyttäjänimi vaihdettu onnistuneesti", "info")
return render_template("auth/menu.html")
return render_template("auth/changeusername.html", form=form)
|
#!/usr/bin/env python3
# looking for number of events within 1 second, 5 seconds, 10 seconds
import csv
import math
import pprint
import sys
import time
import itertools
import matplotlib.pyplot as plt
import numpy as np
try:
dw_file = sys.argv[1]
pw_file = sys.argv[2]
except:
print()
print('Usage: {} dw_fils.csv pw_file.csv'.format(sys.argv[0]))
print()
raise
# Threshold is time between events to define a cluster
# Size is minimum number of events in a cluster to be considered an outage
THRES_DW = 15*1000
CLUSTER_SIZE_DW = 3
THRES_PW = 300
CLUSTER_SIZE_PW = 3
print('Config:')
print('DW\tCluster sep {}, cluster size {}'.format(THRES_DW/1000, CLUSTER_SIZE_DW))
print('PW\tCluster sep {}, cluster size {}'.format(THRES_PW, CLUSTER_SIZE_PW))
print()
# Load DW Data
times_dw = []
with open(dw_file) as csvfile:
reader = csv.reader(csvfile)
headers = next(reader, None)
fucked_rows = 0
good_rows = 0
phones = {}
phone_power_state = {}
phone_del_cnt = {}
for row in reader:
# Not every row even has an event timestamp
try:
eventtime = int(row[headers.index('event_time')])
good_rows += 1
except:
fucked_rows += 1
continue
# Lie, use IMEI as it's always present, phone can be ''
phone_num = row[headers.index('phone_imei')]
evt_type = row[headers.index('type')]
# Assume phones start powered
if phone_num not in phone_power_state:
phone_power_state[phone_num] = True
if evt_type == 'plugged':
phone_power_state[phone_num] = True
if evt_type != 'unplugged':
continue
phone_power_state[phone_num] = False
# Keep track of the number of records we delete due to duplication
if phone_num not in phone_del_cnt:
phone_del_cnt[phone_num] = 0
# Filter out duplicate events from the same phone
if phone_num in phones:
FIVE_MIN = 1000*60*5
ONE_HOUR = 1000*60*60
if (eventtime - phones[phone_num]) < (FIVE_MIN):
# Keep sliding this forward so one phone can't keep recreating
# clusters
#phones[phone_num] = eventtime
phone_del_cnt[phone_num] += 1
continue
if (eventtime - phones[phone_num]) < 0:
# Somehow this phone went back in time? Skip.
# This shouldn't ever happen, but let's be paranoid
raise NotImplementedError
phones[phone_num] = eventtime
# This is now a unique enough outage event
times_dw.append(eventtime)
## Optional, bail out in a shorter window
#if eventtime > (times_dw[0] + 1000*60*60*24*7):
# break
times_dw.sort()
print('DW')
print('good rows: ', good_rows)
print('bad rows: ', fucked_rows)
print('times_dw size: ', len(times_dw))
# Print the number of deleted records per phone, sorted by how many per phone
#for phone,cnt in reversed(sorted(phone_del_cnt.items(), key=lambda kv: kv[1])):
# print('{}\t{}'.format(phone[-3:], cnt))
times_pw = []
with open(pw_file) as csvfile:
reader = csv.reader(csvfile)
headers = next(reader, None)
cores = {}
for row in reader:
core_id = row[headers.index('core_id')]
is_powered = True if row[headers.index('is_powered')] == 't' else False
# Handle new nodes, we get enough repeats that ignoring the first
# message is fine
if core_id not in cores:
cores[core_id] = is_powered
continue
# If the power state changes update
if cores[core_id] != is_powered:
cores[core_id] = is_powered
# If the power turned off, note an event
if is_powered == False:
time_str = row[headers.index('time')]
time_str = time_str[:-3].split('.')[0]
pattern = '%Y-%m-%d %H:%M:%S'
epoch = int(time.mktime(time.strptime(time_str, pattern)))
epoch = epoch - 18000
times_pw.append(epoch)
## Optional bail early
#if epoch > ((times_dw[0] + 1000*60*60*24*7)/1000):
# break
times_pw.sort()
print('PW')
print('PW events: ', len(times_pw))
print('-------')
print('Clustering....')
# Clustering
# https://stackoverflow.com/questions/15800895/finding-clusters-of-numbers-in-a-list
#
# This magic splits into events into clusters where clusters are identified by
# being runs of numbers at least THRES apart. The `res[j]` holds the whole list
# of clusters, while the `r2` is what we actually use and filters down to just
# the clusters that are at least CLUSTER_SIZE.
nd = [0] + list(np.where(np.diff(times_dw) > THRES_DW)[0] + 1) + [len(times_dw)]
a, b = itertools.tee(nd)
next(b, None)
res = {}
r2 = {}
for j, (f, b) in enumerate(zip(a, b)):
res[j] = times_dw[f:b]
if len(res[j]) >= CLUSTER_SIZE_DW:
cluster_times_dw = list(res[j])
r2[int(np.average(cluster_times_dw))] = cluster_times_dw
print('DW')
print('num clusters of any size', len(res))
print('num clusters of min size', len(r2))
cluster_times_dw = []
cnts_dw = []
for time,cluster in r2.items():
#print(time,cluster)
t = int(time/1000)
cluster_times_dw.append(t)
cnts_dw.append(len(cluster))
if len(cluster) > 10:
print('\t{}\t{}'.format(t, len(cluster)))
#print(cnts_dw)
fig, ax1 = plt.subplots()
ax1.scatter(cluster_times_dw,cnts_dw)
#plt.scatter(cluster_times_dw,cnts_dw)
#plt.show()
nd = [0] + list(np.where(np.diff(times_pw) > THRES_PW)[0] + 1) + [len(times_pw)]
a, b = itertools.tee(nd)
next(b, None)
res = {}
r2 = {}
for j, (f, b) in enumerate(zip(a, b)):
res[j] = times_pw[f:b]
if len(res[j]) >= CLUSTER_SIZE_PW:
cluster_times_pw = list(res[j])
r2[int(np.average(cluster_times_pw))] = cluster_times_pw
print('PW')
print('num clusters of any size', len(res))
print('num clusters of min size', len(r2))
cluster_times_pw = []
cnts_pw = []
for time,cluster in r2.items():
if time == -1:
continue
#print(time,cluster)
cluster_times_pw.append(time)
cnts_pw.append(len(cluster))
if len(cluster) > 20:
print('\t{}\t{}'.format(time, len(cluster)))
#print(cnts_pw)
ax2 = ax1.twinx()
ax2.scatter(cluster_times_pw,cnts_pw,c='orange')
#plt.scatter(cluster_times_pw,cnts_pw,c='orange')
for i,t in enumerate(cluster_times_pw):
if cnts_pw[i] >= 20:
plt.axvline(x=t,color='orange')
print(np.array(cluster_times_dw))
print(np.array(cluster_times_pw))
# https://stackoverflow.com/questions/2566412/find-nearest-value-in-numpy-array/2566508
def find_nearest(array,value):
idx = np.searchsorted(array, value, side="left")
if idx > 0 and (idx == len(array) or math.fabs(value - array[idx-1]) < math.fabs(value - array[idx])):
return array[idx-1]
else:
return array[idx]
print('--------------')
print('PW with nearby DW?')
better60 = 0
better300 = 0
better600 = 0
for pw in cluster_times_pw:
dw = find_nearest(cluster_times_dw, pw)
diff = abs(pw-dw)
print('{}\t{}\t{}\t{}\t{}\t{}'.format(pw, dw, diff, diff <= 60, diff <= 300, diff <= 600))
if diff <= 60:
better60 += 1
if diff <= 300:
better300 += 1
if diff <= 600:
better600 += 1
print()
print('tot {}, bet60 {} {:.2f}%, bet300 {} {:.2f}%, bet600 {} {:.2f}%'.format(
len(cluster_times_pw),
better60, 100*better60/len(cluster_times_pw),
better300, 100*better300/len(cluster_times_pw),
better600, 100*better600/len(cluster_times_pw)
))
print('--------------')
print('DW with nearby PW?')
better60 = 0
better300 = 0
better600 = 0
for dw in cluster_times_dw:
pw = find_nearest(cluster_times_pw, dw)
diff = abs(pw-dw)
#print('{}\t{}\t{}\t{}\t{}\t{}'.format(pw, dw, diff, diff <= 60, diff <= 300, diff <= 600))
if diff <= 60:
better60 += 1
if diff <= 300:
better300 += 1
if diff <= 600:
better600 += 1
print()
print('tot {}, bet60 {} {:.2f}%, bet300 {} {:.2f}%, bet600 {} {:.2f}%'.format(
len(cluster_times_dw),
better60, 100*better60/len(cluster_times_dw),
better300, 100*better300/len(cluster_times_dw),
better600, 100*better600/len(cluster_times_dw)
))
plt.show()
|
# -*- coding:utf-8 -*-
'''
Created on 2016年7月14日
@author: zhaojiangang
'''
import random
import freetime.util.log as ftlog
from freetime.core import reactor
from poker.entity.game.rooms import roominfo
from poker.entity.game.rooms.erdayi_match_ctrl.config import MatchConfig
from poker.entity.game.rooms.erdayi_match_ctrl.const import MatchFinishReason
from poker.entity.game.rooms.erdayi_match_ctrl.interface import MatchStage, \
MatchFactory
from poker.entity.game.rooms.erdayi_match_ctrl.interfacetest import \
MatchStatusDaoMem, SigninRecordDaoTest, TableControllerTest, PlayerNotifierTest, \
MatchRewardsTest, MatchUserIFTest, SignerInfoLoaderTest
from poker.entity.game.rooms.erdayi_match_ctrl.match import MatchMaster, \
MatchAreaLocal, MatchInst
from poker.entity.game.rooms.erdayi_match_ctrl.models import Player, \
TableManager
from poker.entity.game.rooms.erdayi_match_ctrl.utils import Logger, \
HeartbeatAble
match_conf = {
"buyinchip": 0,
"controlServerCount": 1,
"controlTableCount": 0,
"dummyUserCount": 0,
"gameServerCount": 20,
"gameTableCount": 500,
"goodCard": 0,
"hasrobot": 0,
"ismatch": 1,
"matchConf": {
"desc": "开赛时间:21:30 \n报名费用:免费",
"fees": [],
"rank.rewards": [
{
"desc": "价值1499元的游戏耳机",
"ranking": {
"end": 1,
"start": 1
},
"rewards": [
{
"count": 1,
"itemId": "item:4151"
}
]
},
{
"desc": "10万金币",
"ranking": {
"end": 4,
"start": 2
},
"rewards": [
{
"count": 100000,
"itemId": "user:chip"
}
]
},
{
"desc": "1万金币",
"ranking": {
"end": 10,
"start": 5
},
"rewards": [
{
"count": 10000,
"itemId": "user:chip"
}
]
}
],
"stages": [
{
"animation.type": 0,
"card.count": 6,
"chip.base": 1000,
"chip.grow": 0.3,
"chip.grow.base": 200,
"chip.grow.incr": 100,
"chip.times": 60,
"chip.user": 12000,
"chip.user.2.rate": 0,
"chip.user.3.base": 0,
"lose.user.chip": 0.5,
"name": "海选赛",
"rise.user.count": 24,
"rise.user.refer": 30,
"seat.principles": 1,
"grouping.type": 2,
"grouping.user.count": 30,
"type": 1
},
{
"animation.type": 1,
"card.count": 2,
"chip.base": 100,
"chip.grow": 0.5,
"chip.times": 3600,
"chip.user": 3,
"chip.user.3.base": 300,
"name": "24强赛",
"rise.user.count": 12,
"seat.principles": 2,
"type": 2
},
{
"animation.type": 1,
"card.count": 2,
"chip.base": 100,
"chip.grow": 0.5,
"chip.times": 3600,
"chip.user": 2,
"chip.user.2.rate": 0.2,
"name": "12强赛",
"rise.user.count": 6,
"seat.principles": 2,
"type": 2
},
{
"animation.type": 3,
"card.count": 2,
"chip.base": 100,
"chip.grow": 0.5,
"chip.times": 3600,
"chip.user": 2,
"chip.user.2.rate": 0.2,
"name": "6强赛",
"rise.user.count": 3,
"seat.principles": 2,
"type": 2
},
{
"animation.type": 2,
"card.count": 2,
"chip.base": 100,
"chip.grow": 0.5,
"chip.times": 3600,
"chip.user": 2,
"chip.user.2.rate": 0.2,
"name": "决赛",
"rise.user.count": 1,
"seat.principles": 2,
"type": 2
}
],
"start": {
"fee.type": 0,
"maxplaytime": 7200,
"prepare.times": 0,
"signin.times": 2400,
"start.speed": 6,
"times": {
"days": {
"first": "",
"interval": "1d",
"count": 100,
},
"times_in_day": {
"first": "00:00",
"interval": 1,
"count": 2000
}
},
"type": 2,
"user.groupsize": 2000,
"user.maxsize": 2000,
"user.minsize": 3,
"user.next.group": 0
},
"table.seat.count": 3,
"tips": {
"infos": [
"积分相同时,按报名先后顺序确定名次。",
"积分低于淘汰分数线会被淘汰,称打立出局。",
"打立赛制有局数上限,打满局数会等待他人。",
"打立阶段,轮空时会记1局游戏。",
"定局赛制,指打固定局数后按积分排名。",
"每局会按照开局时的底分结算。",
"比赛流局时,可能会有积分惩罚。"
],
"interval": 5
}
},
"maxCoin": -1,
"maxCoinQS": -1,
"maxLevel": -1,
"minCoin": -1,
"minCoinQS": -1,
"name": "途游阿里赛",
"playDesc": "",
"playMode": "happy",
"robotUserCallUpTime": 10,
"robotUserMaxCount": 0,
"robotUserOpTime": [
5,
12
],
"roomFee": 45,
"roomMutil": 50,
"sendCoupon": 0,
"showCard": 0,
"tableConf": {
"autochange": 1,
"basebet": 1,
"basemulti": 1,
"cardNoteChip": 500,
"canchat": 0,
"coin2chip": 1,
"grab": 1,
"gslam": 128,
"lucky": 0,
"maxSeatN": 3,
"optime": 20,
"passtime": 5,
"rangpaiMultiType": 1,
"robottimes": 1,
"tbbox": 0,
"unticheat": 1
},
"typeName": "big_match",
"winDesc": ""
}
class MyRoom(object):
def __init__(self, roomId):
self.roomId = roomId
self.gameId = 6
class MyStage(MatchStage):
def __init__(self, stageConf):
super(MyStage, self).__init__(stageConf)
self._count = 0
self._logger = Logger()
self._logger.add('stageIndex', self.stageIndex)
def start(self):
self._logger.info('MatchStage.start')
def kill(self, reason):
self._logger.info('MatchStage.kill',
'reason=', reason)
def finish(self, reason):
self._logger.info('MatchStage.finish',
'reason=', reason)
def processStage(self):
self._logger.info('MatchStage.processStage',
'count=', self._count)
self._count += 1
if self._count >= 10:
self.group.finishGroup(MatchFinishReason.FINISH)
class MatchFactoryTest(MatchFactory):
def newStage(self, stageConf):
'''
创建阶段
'''
return MyStage(stageConf)
def newPlayer(self, signer):
'''
创建一个Player
'''
player = Player(signer.userId)
return player
def buildMatchMaster(roomId, matchId, matchConf):
room = MyRoom(roomId)
tableManager = TableManager(room, 3)
tableManager.addTables(60571, 1, 100)
master = MatchMaster(matchId, room, matchConf)
master.matchStatusDao = MatchStatusDaoMem()
master.matchFactory = MatchFactoryTest()
return master
def buildMatchArea(roomId, matchId, matchConf, master):
room = MyRoom(roomId)
tableManager = TableManager(room, 3)
tableManager.addTables(roomId, 1, 100)
area = MatchAreaLocal(matchId, master, room, matchConf)
area.signinRecordDao = SigninRecordDaoTest()
area.tableManager = tableManager
area.tableController = TableControllerTest(area)
area.playerNotifier = PlayerNotifierTest()
area.matchRewards = MatchRewardsTest()
area.matchUserIF = MatchUserIFTest()
area.signerInfoLoader = SignerInfoLoaderTest()
area.matchFactory = MatchFactoryTest()
return area
CLIENT_IDS = [
'Android_3.372_tuyoo.weakChinaMobile.0-hall7.ydmm.happyxinchun',
'Winpc_3.70_360.360.0-hall8.360.texas',
'Android_3.72_tyOneKey,tyAccount,tyGuest.tuyoo.0-hall8.duokunew.day',
'Android_3.363_pps.pps,weakChinaMobile,woStore,aigame.0-hall6.pps.dj'
]
class MatchChecker(HeartbeatAble):
def __init__(self, master, areas, signerInfoLoader):
super(MatchChecker, self).__init__(1)
self._master = master
self._areaMap = {area.roomId: area for area in areas}
self._userId = 1040000001
self._signerCountPerArea = 30
self.signerInfoLoader = signerInfoLoader
self.userIds = [self._userId + i for i in xrange(len(self._areaMap) * self._signerCountPerArea)]
for userId in self.userIds:
clientId = CLIENT_IDS[random.randint(0, len(CLIENT_IDS) - 1)]
self.userInfoLoader.setUserAttrs(i + 1, {'name': 'user%s' % (userId),
'sessionClientId': clientId,
'snsId': 'sns%s' % (userId)})
def _doHeartbeat(self):
isAllReady = self._isAllReady()
ftlog.info('MatchChecker._doHeartbeat isAllReady=', isAllReady)
if isAllReady:
# 报名到master
for i, area in enumerate(self._areaMap.values()):
self._signinToMatch(area, self.userIds[
i * self._signerCountPerArea:i * self._signerCountPerArea + self._signerCountPerArea])
self.stopHeart()
def _isAllReady(self):
if (not self._master._instCtrl
or self._master._instCtrl.state < MatchInst.ST_SIGNIN):
return False
for area in self._areaMap.values():
if not area.curInst:
return False
return True
def _signinToMatch(self, area, userIds):
for userId in userIds:
area.curInst.signin(userId, 0)
def loadRoomInfo(gameId, roomId):
return None
def saveRoomInfo(gameId, roomInfo):
pass
def removeRoomInfo(gameId, roomId):
pass
if __name__ == '__main__':
ftlog.initLog('groupmatch.log', './logs/')
matchId = 6057
masterRoomId = 60571
areaRoomIds = [60571]
roominfo.saveRoomInfo = saveRoomInfo
roominfo.removeRoomInfo = removeRoomInfo
roominfo.loadRoomInfo = loadRoomInfo
matchConf = MatchConfig.parse(6, 60571, 6057, '满3人开赛', match_conf['matchConf'])
signerInfoLoader = SignerInfoLoaderTest()
areas = []
master = buildMatchMaster(masterRoomId, matchId, matchConf)
for areaRoomId in areaRoomIds:
area = buildMatchArea(areaRoomId, matchId, matchConf, master)
areas.append(area)
master.addArea(area)
master.startHeart()
MatchChecker(master, areas, signerInfoLoader).startHeart()
reactor.mainloop()
|
from o3seespy.base_model import OpenSeesObject
class Node(OpenSeesObject):
op_base_type = "node"
op_type = "node"
def __init__(self, osi, x: float, y=None, z=None, vel=None, acc=None, mass: list=None,
x_mass=None, y_mass=None, z_mass=None, x_rot_mass=None, y_rot_mass=None, z_rot_mass=None, tag=None,
build=1):
"""
An OpenSees node
Parameters
----------
osi : o3seespy.opensees_instance.OpenSeesInstance object
An instance of OpenSees
x : float
x-coordinate
y : float, optional
y-coordinate
z : float, optional
z-coordinate
vel : iterable object, optional
nodal velocities (x, y, z)
acc : iterable object, optional
mass: iterable object, option
nodal masses
"""
self.x = float(x)
if y is not None:
self.y = float(y)
if z is not None:
self.z = float(z)
self.vel = vel
self.acc = acc
if tag is None:
osi.n_node += 1
self._tag = osi.n_node
else:
self._tag = int(tag)
if osi.ndm == 1:
self._parameters = [self._tag, self.x]
poss_mass = [x_mass]
elif osi.ndm == 2:
self._parameters = [self._tag, self.x, self.y]
poss_mass = [x_mass, y_mass, z_rot_mass]
elif osi.ndm == 3:
self._parameters = [self._tag, self.x, self.y, self.z]
poss_mass = [x_mass, y_mass, z_mass, x_rot_mass, y_rot_mass, z_rot_mass]
else:
raise NotImplementedError(f"Currently only supports 1-3D analyses, ndm={osi.ndm}")
if mass is None:
mass = []
if poss_mass[0] is not None:
none_found = False
for mval in poss_mass:
if mval is None:
none_found = True
else:
if not none_found:
mass.append(float(mval))
else:
mstr = ','.join([str(x) for x in poss_mass])
raise ValueError(f'Cannot set mass, since None in mass=[{mstr}]')
else:
mass = [float(x) for x in mass]
if len(mass):
self.mass = mass
self._parameters += ["-mass", *mass]
if self.vel is not None:
self._parameters += ["-vel", self.vel]
if self.acc is not None:
self._parameters += ["-accel", self.acc]
if build:
self.to_process(osi)
def build_regular_node_mesh(osi, xs, ys, zs=None, active=None, tags=None):
"""
Creates an array of nodes that are in a regular mesh
The mesh has len(xs) nodes in the x-direction and len(ys) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
tags: array_like
array of node tags
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
tag = None
from numpy import array
if not hasattr(zs, '__len__'):
zs = [zs]
sn = []
for xx in range(len(xs)):
sn.append([])
for yy in range(len(ys)):
if len(zs) == 1:
if tags is not None:
tag = tags[xx][yy]
if active is None or active[xx][yy]:
if osi.ndm == 2:
pms = [osi, xs[xx], ys[yy]]
else:
pms = [osi, xs[xx], ys[yy], zs[0]]
sn[xx].append(Node(*pms, tag=tag))
else:
sn[xx].append(None)
else:
sn[xx].append([])
for zz in range(len(zs)):
if tags is not None:
tag = tags[xx][yy][zz]
# Establish left and right nodes
if active is None or active[xx][yy][zz]:
sn[xx][yy].append(Node(osi, xs[xx], ys[yy], zs[zz], tag=tag))
else:
sn[xx][yy].append(None)
# if len(zs) == 1:
# return sn[0]
return array(sn)
def build_varied_y_node_mesh(osi, xs, ys, zs=None, active=None):
"""
Creates an array of nodes that in vertical lines, but vary in height
The mesh has len(xs)=ln(ys) nodes in the x-direction and len(ys[0]) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
from numpy import array
if not hasattr(zs, '__len__'):
zs = [zs]
sn = []
for xx in range(len(xs)):
sn.append([])
for yy in range(len(ys[xx])):
if len(zs) == 1:
if active is None or active[xx][yy]:
if osi.ndm == 2:
pms = [osi, xs[xx], ys[xx][yy]]
else:
pms = [osi, xs[xx], ys[xx][yy], zs[0]]
sn[xx].append(Node(*pms))
else:
sn[xx].append(None)
else:
sn[xx].append([])
for zz in range(len(zs)):
# Establish left and right nodes
if active is None or active[xx][yy][zz]:
sn[xx][yy].append(Node(osi, xs[xx], ys[xx][yy], zs[zz]))
else:
sn[xx][yy].append(None)
# if len(zs) == 1:
# return sn[0]
return array(sn)
def build_varied_xy_node_mesh(osi, xs, ys, zs=None, active=None, tags=None):
"""
Creates an array of nodes that vary in both x and y but are still in a grid.
The mesh has len(xs)=ln(ys) nodes in the x-direction and len(ys[0]) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
tag = None
from numpy import array
if not hasattr(zs, '__len__'):
zs = [zs]
sn = []
for xx in range(len(xs)):
sn.append([])
for yy in range(len(ys[xx])):
if len(zs) == 1:
if tags is not None:
tag = tags[xx][yy]
if active is None or active[xx][yy]:
if osi.ndm == 2:
pms = [osi, xs[xx][yy], ys[xx][yy]]
else:
pms = [osi, xs[xx][yy], ys[xx][yy], zs[0]]
sn[xx].append(Node(*pms, tag=tag))
else:
sn[xx].append(None)
else:
sn[xx].append([])
for zz in range(len(zs)):
if tags is not None:
tag = tags[xx][yy][zz]
# Establish left and right nodes
if active is None or active[xx][yy][zz]:
sn[xx][yy].append(Node(osi, xs[xx][yy], ys[xx][yy], zs[zz], tag=tag))
else:
sn[xx][yy].append(None)
# if len(zs) == 1:
# return sn[0]
return array(sn)
# UNUSED?
def duplicate_node(osi, node):
"""
Copy a node to initialise in another processor in parallel mode
Note: It has the same node number
"""
if osi.ndm == 1:
_parameters = [node.tag, *[node.x]]
pms = ['mass']
elif osi.ndm == 2:
_parameters = [node.tag, *[node.x, node.y]]
pms = ['mass']
elif osi.ndm == 3:
_parameters = [node.tag, *[node.x, node.y, node.z]]
pms = ['mass']
else:
raise NotImplementedError("Currently only supports 1-3D analyses")
masses = []
none_found = 0
if hasattr(node, 'mass') and node.mass is not None:
_parameters += ["-mass", *node.mass]
if node.vel is not None:
_parameters += ["-vel", node.vel]
if node.acc is not None:
_parameters += ["-accel", node.acc]
osi.to_process('node', _parameters)
def repeat_node(osi, node, tag=None):
"""
Copy a node to initialise in another processor in parallel mode
Note: It has the same node number
"""
if osi.ndm == 1:
return Node(osi, node.x, tag=tag)
elif osi.ndm == 2:
return Node(osi, node.x, node.y, tag=tag)
elif osi.ndm == 3:
return Node(osi, node.x, node.y, node.z, tag=tag)
else:
raise NotImplementedError("Currently only supports 1-3D analyses")
# def build_node_if_within_segment(osi, coords, segment):
# """
#
# Parameters
# ----------
# coords
# segment: array_like
# if osi.ndm = 1, segment is [[min_x], [max_x]]
# if osi.num = 2, segment is [[
#
# Returns
# -------
#
# """
# pass
def hash_coords(coords, nsf=8):
return '_'.join(['{n:.{nsf}}'.format(n=float(x), nsf=nsf) for x in coords])
def build_node_tag_hash_dict_from_mesh(ndm, xs, ys, zs=None, active=None, init_tag=0, nsf=8):
"""
Creates an array of nodes that in vertical lines, but vary in height
The mesh has len(xs) nodes in the x-direction and len(ys[0]) in the y-direction.
If zs is not None then has len(zs) in the z-direction.
Parameters
----------
osi
xs
ys
zs
active
Returns
-------
np.array
axis-0 = x-direction
axis-1 = y-direction
axis-2 = z # not included if len(zs)=1 or zs=None
"""
# axis-0 = x # unless x or y are singular
# axis-1 = y
# axis-2 = z # not included if len(zs)=1 or
import numpy as np
ys = np.array(ys)
nx = xs.shape[0]
ny = ys.shape[min(len(ys.shape), 1)]
if not hasattr(zs, '__len__'):
zs = [zs]
if len(active.shape) == 3:
azi = np.arange(len(zs))
else:
azi = [None] * len(xs)
if len(xs.shape) >= 2:
yind4x = np.arange(ny)
else:
yind4x = [None] * ny
if len(ys.shape) >= 2:
xind4y = np.arange(nx)
else:
xind4y = [None] * nx
nz = zs.shape[-1]
sd = {}
tag = init_tag
for xx in range(nx):
for yy in range(ny):
for zz in range(nz):
if active is None or active[xx, yy, azi[zz]]:
if ndm == 2:
pms = [xs[xx, yind4x[yy]], ys[xind4y[xx], yy]]
else:
pms = [xs[xx, yind4x[yy]], ys[xind4y[xx], yy], zs[zz]]
fstr = hash_coords(pms, nsf)
if fstr not in sd:
sd[fstr] = []
sd[fstr].append((tag, *pms))
tag += 1
return sd, tag
|
# -*- coding: utf-8 -*-
"""
Created on Sat May 16 21:13:51 2020
@author: Sony
"""
import time
import numpy as np
import pandas as pd
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import sklearn.metrics
from sklearn import metrics
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier
datos = pd.read_csv('C://Users//Sony//Desktop//TESIS 2//CIC_AWS_Filtrado.csv')
df=pd.DataFrame(datos)
X = datos[['Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts']]
y=datos['Output']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
start = time.time()
model=sklearn.svm.LinearSVC(C=175, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=12000,
multi_class='ovr', penalty='l2', random_state=None, tol=1e-10,
verbose=0)
model.fit(X_train,y_train)
end = time.time()
print ("SVC Linear", end - start)
predictions=model.predict(X_test)
print(classification_report(y_test,predictions))
print("PRECISIÓN PARA DETECTAR DIFERENTES MUESTRAS DE BOTNET ", metrics.precision_score(y_test, predictions,average=None, zero_division='warn')*100)
print("RECALL PARA DETECTAR DIFERENTES MUESTRAS DE BOTNET: ", metrics.recall_score(y_test, predictions, average=None, zero_division='warn')*100)
print ("EXACTITUD DEL MODELO: ", sklearn.metrics.accuracy_score(y_test, predictions, normalize=True)*100)
# VALIDAR MODELO
results = cross_val_score(model, X_train, y_train,scoring='accuracy', cv=5)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100, results.std()*100))
print ('CROSS-VALIDATION SCORES:')
print(results)
# PREDECIR BENIGNO
X_test = pd.DataFrame(columns=('Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts','Output'))
X_test.loc[0] = (3389,17,119996043,40,56,1175,45187,0,0,29.375,806.9107143,386.3627403,0.800026381,0.333344325,0.466682056,1175,45187,56,40,0)
y_pred = model.predict(X_test.drop(['Output'], axis = 1))
print("Prediccion: " + str(y_pred))
# PREDECIR BOT
X_test = pd.DataFrame(columns=('Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts','Output'))
X_test.loc[0] = (8080,6,10869,3,4,326,129,0,0,108.6666667,32.25,41862.17683,644.0334897,276.0143527,368.019137,326,129,4,3,1)
y_pred = model.predict(X_test.drop(['Output'], axis = 1))
print("Prediccion: " + str(y_pred))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-06-05 13:45
from __future__ import unicode_literals
from django.db import migrations
def create_badges(apps, schema):
category_model = apps.get_model('core.Badge')
category_model.objects.create(name="First Mystery completed", description="Complete 1 Mystery task", slug="mystery", validator="int(bool(user_calls.filter(task__category__slug='mystery').count()))")
##############################
category_model.objects.create(name="First Geo completed", description="Complete 1 Geo task", slug="geo", validator="int(bool(user_calls.filter(task__category__slug='geo').count()))")
##############################
category_model.objects.create(name="First Selfie completed", description="Complete 1 Selfie task", slug="selfie", validator="int(bool(user_calls.filter(task__category__slug='selfie').count()))")
##############################
category_model.objects.create(name="First Team completed", description="Complete 1 Team task", slug="team", validator="int(bool(user_calls.filter(task__category__slug='team').count()))")
##############################
category_model.objects.create(name="First Stranger completed", description="Complete 1 Stranger task", slug="stranger", validator="int(bool(user_calls.filter(task__category__slug='stranger').count()))")
class Migration(migrations.Migration):
dependencies = [
('core', '0025_badge_slug'),
]
operations = [
migrations.RunPython(create_badges)
]
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
url(r'', include('travelpad.urls')),
url(r'', include('travelpad.urls_profile')),
url(r'', include('travelpad.urls_itineraries')),
url(r'', include('travelpad.urls_invitation')),
url(r'', include('travelpad.urls_itinerary')),
url(r'', include('travelpad.urls_event')),
url(r'', include('travelpad.urls_map')),
url(r'', include('travelpad.urls_login')),
url(r'', include('travelpad.urls_expense')),
)
|
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
length = len(nums)
if length>1:
pre = nums[0]
i = 1
while i<length:
if nums[i]==pre:
nums.pop(i)
length -= 1
else:
pre = nums[i]
i += 1
return length |
import common
import prolog
import config as cfg
from os.path import isfile
class Aleph:
name='aleph'
aleph_path='aleph/aleph'
aleph_runner='aleph/runner'
def __init__(self):
pass
def parse_train(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
prims=set(map(common.pred,bk))
(p,a)=subtarget
subtarget='{}/{}'.format(p,a)
prims=list(map(lambda x: '{}/{}'.format(x[0],x[1]),prims))
fname=outpath+p
with open(fname+'.b','w') as f:
for prim in prims:
f.write(':- determination({},{}).\n'.format(subtarget,prim))
for atom in bk:
f.write('{}.\n'.format(atom))
with open(fname+'.f','w') as f:
for atom in pos:
f.write('{}.\n'.format(atom))
with open(fname+'.n','w') as f:
for atom in neg:
f.write('{}.\n'.format(atom))
def parse_test(self,datafile,outpath,game,target):
for (subtarget,bk,pos,neg) in common.parse_target(datafile):
common.parse_test(outpath,subtarget,bk,pos,neg)
def train(self,inpath,outfile,target):
infile=inpath+target
cmd="set(verbose,0),read_all('{}'),induce_modes,induce,write_rules('{}'),halt.".format(infile,outfile)
prolog.yap(cmd,[self.aleph_path],outfile=None,timeout=cfg.learning_timeout)
def do_test(self,dataf,programf,outf):
if isfile(programf):
prolog.swipl('do_test,halt.',[dataf,programf,self.aleph_runner],outf,timeout=None)
else:
prolog.swipl('do_test,halt.',[dataf,self.aleph_runner],outf,timeout=None) |
from compmusic import dunya
dunya.set_token('ad57ef18f8c3a2f4962b7883ac6ed38b3578ba38')
a = dunya.carnatic.get_recordings(recording_detail=True)
import json
with open('carnatic.json', 'w') as fp:
json.dump(a, fp)
|
msgs.df < - c() # 빈 데이터프레임 생성
trending_stock < - c("SSNLF", "GILD") # 주식종목 선택
stock_twits < - function(msgs.df, stock)
{
# Get raw data
url < - paste("https://api.stocktwits.com/api/2/streams/symbol/", stock, ".json", sep="") # api로 부터 데이터 추출
msgs < - c()
for (i in c(1:length(url))) # 전체 내용 행만큼 반복
{
raw < - readLines(url[i], warn="F")
resp < - fromJSON(raw) # json 파싱
for (j in c(1:length(resp$messages)))
{
msg < - unlist(resp$messages[j])
msgs < - rbind(msgs, c(msg["id"], msg["body"], msg["created_at"], stock[i])) # id, 내용, 날짜, 주가종목 가져오기
}
}
eval.parent(substitute(msgs.df < - unique(rbind(msgs.df, msgs)))) # 행 덧붙이기
}
stock_twits(msgs.df, trending_stock)
write.csv(msgs.df, file="C:/Users/user1/stock-twits.csv",
row.names = FALSE, col.names = FALSE, append = TRUE) # 주가정보 저장될 경로 지정
|
import os
from pathlib import Path
import unittest
import crowsetta
import pandas as pd
import vak.io.spect
import vak.files.spect
from vak.config.validators import VALID_AUDIO_FORMATS
HERE = Path(__file__).parent
TEST_DATA_DIR = HERE.joinpath('..', '..', 'test_data')
SETUP_SCRIPTS_DIR = HERE.joinpath('..', '..', 'setup_scripts')
class TestFindAudioFname(unittest.TestCase):
"""class to test find_audio_fname function"""
def setUp(self):
# ---- in .mat files -------------------------------
self.spect_dir_mat = TEST_DATA_DIR.joinpath('mat', 'llb3', 'spect')
self.spect_list_mat = list(self.spect_dir_mat.glob('*.mat'))
self.spect_list_mat = [str(path) for path in self.spect_list_mat]
# ---- in .npz files, made from .cbin audio files -------------------------------
self.spect_dir_npz = list(TEST_DATA_DIR.joinpath('prep/train').glob(
'spectrograms_generated*')
)
self.spect_dir_npz = self.spect_dir_npz[0]
self.spect_list_npz = list(self.spect_dir_npz.glob('*.spect.npz'))
self.spect_list_npz = [str(path) for path in self.spect_list_npz]
def test_with_mat(self):
audio_fnames = [vak.files.spect.find_audio_fname(spect_path)
for spect_path in self.spect_list_mat]
for mat_spect_path, audio_fname in zip(self.spect_list_mat, audio_fnames):
# make sure we gout out a filename that was actually in spect_path
self.assertTrue(audio_fname in mat_spect_path)
# make sure it's some valid audio format
self.assertTrue(
Path(audio_fname).suffix.replace('.', '') in VALID_AUDIO_FORMATS
)
def test_with_npz(self):
audio_fnames = [vak.files.spect.find_audio_fname(spect_path)
for spect_path in self.spect_list_npz]
for npz_spect_path, audio_fname in zip(self.spect_list_npz, audio_fnames):
# make sure we gout out a filename that was actually in spect_path
self.assertTrue(audio_fname in npz_spect_path)
self.assertTrue(
Path(audio_fname).suffix.replace('.', '') in VALID_AUDIO_FORMATS
)
class TestToDataframe(unittest.TestCase):
"""class to test vak.io.dataframe.from_files function"""
def setUp(self):
self.spect_dir = TEST_DATA_DIR.joinpath('mat', 'llb3', 'spect')
self.spect_files = self.spect_dir.glob('*.mat')
self.spect_files = sorted([str(path) for path in self.spect_files])
self.spect_format = 'mat'
self.annot_mat = TEST_DATA_DIR.joinpath('mat', 'llb3', 'llb3_annot_subset.mat')
self.annot_mat = str(self.annot_mat)
self.scribe = crowsetta.Transcriber(annot_format='yarden')
self.annot_list = self.scribe.from_file(self.annot_mat)
self.labelset_mat = {1, 2, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19}
def _check_df_returned_by_from_files(self, vak_df):
"""assertions that are shared across unit tests for vak.io.dataframe.from_files"""
self.assertTrue(
type(vak_df) == pd.DataFrame
)
spect_files_from_test_data = [os.path.basename(spect_path)
for spect_path in self.spect_files]
spect_files_from_df = [os.path.basename(spect_path)
for spect_path in vak_df['spect_path']]
self.assertTrue(
all([spect_file in spect_files_from_test_data
for spect_file in spect_files_from_df])
)
# if all assertTrues were True
return True
def test_spect_dir_annot(self):
# test that from_files works when we point it at directory + give it list of annotations
vak_df = vak.io.spect.to_dataframe(self.spect_format,
spect_dir=self.spect_dir,
labelset=self.labelset_mat,
annot_list=self.annot_list)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_dir_annot_no_labelset(self):
# test that from_files works when we point it at directory + give it list of annotations
# but do not give it a labelset to filter out files
vak_df = vak.io.spect.to_dataframe(self.spect_format,
spect_dir=self.spect_dir,
labelset=None,
annot_list=self.annot_list)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_dir_without_annot(self):
# make sure we can make a dataset from spectrogram files without annotations,
# e.g. if we're going to predict the annotations using the spectrograms
vak_df = vak.io.spect.to_dataframe(self.spect_format,
spect_dir=self.spect_dir,
annot_list=None)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_files_annot(self):
# test that from_files works when we give it list of spectrogram files and a list of annotations
vak_df = vak.io.spect.to_dataframe(self.spect_format,
spect_files=self.spect_files,
labelset=self.labelset_mat,
annot_list=self.annot_list)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_files_annot_no_labelset(self):
# test that from_files works when we give it list of spectrogram files and a list of annotations
# but do not give it a labelset to filter out files
vak_df = vak.io.spect.to_dataframe(self.spect_format,
spect_files=self.spect_files,
labelset=None,
annot_list=self.annot_list)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_annot_map(self):
# test that from_files works when we give it a dict that maps spectrogram files to annotations
# but do not give it a labelset to filter out files
spect_annot_map = dict(zip(self.spect_files, self.annot_list))
vak_df = vak.io.spect.to_dataframe(self.spect_format,
labelset=self.labelset_mat,
spect_annot_map=spect_annot_map)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_spect_annot_map_no_labelset(self):
# test that from_files works when we give it a dict that maps spectrogram files to annotations
# but do not give it a labelset to filter out files
spect_annot_map = dict(zip(self.spect_files, self.annot_list))
vak_df = vak.io.spect.to_dataframe(self.spect_format,
labelset=None,
spect_annot_map=spect_annot_map)
self.assertTrue(
self._check_df_returned_by_from_files(vak_df)
)
def test_bad_inputs_raise(self):
# must specify one of: spect dir, spect files, or spect files/annotations mapping
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(spect_format='npz',
spect_dir=None,
spect_files=None,
annot_list=self.annot_list,
spect_annot_map=None)
# invalid spect format
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(spect_format='npy',
spect_dir=self.spect_dir,
spect_files=self.spect_files,
annot_list=self.annot_list)
# can't specify both dir and list
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(self.spect_format,
spect_dir=self.spect_dir,
spect_files=self.spect_files,
annot_list=self.annot_list)
# can't specify both dir and spect_annot_map
spect_annot_map = dict(zip(self.spect_files, self.annot_list))
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(self.spect_format,
spect_dir=self.spect_dir,
spect_annot_map=spect_annot_map)
# can't specify both list and spect_annot_map
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(self.spect_format,
spect_files=self.spect_files,
spect_annot_map=spect_annot_map)
# can't specify both annotations list and spect_annot_map
with self.assertRaises(ValueError):
vak.io.spect.to_dataframe(self.spect_format,
spect_annot_map=spect_annot_map,
annot_list=self.annot_list)
if __name__ == '__main__':
unittest.main()
|
import csv
import tensorflow as tf
tf.enable_eager_execution()
'''
This program takes in a csv file of EEG data output from chronoSync.py and creates a Tensorflow dataframe object from it
'''
#FThis function formats each line of CSV into features (8 tensors with numSamples number of EEG readings) and a class label for the button pressed
def _parse_csv_row(*vals):
#The label (keypress) is the first col of the row
class_label = tf.argmax(vals[0], axis=0)
featureNames = []
#Create an array of names to pair give to the features
for i in range(1, numElectrodes):
featureNames.append(('Electrode'+str(i)+' Samples'))
#Now for each individual electrode grab the samples and put it in a tensor (E1 starts at row[2])
electrodeTensors = []
firstSampleOfElectrode = 2
lastSampleOfElectrode = firstSampleOfElectrode+numSamples
for i in range(1, numElectrodes):
electrodeTensors.append(tf.convert_to_tensor(vals[firstSampleOfElectrode:lastSampleOfElectrode]))
#Now slide the window to grab the next electrode's values
firstSampleOfElectrode = firstSampleOfElectrode+numSamples
lastSampleOfElectrode = lastSampleOfElectrode+numSamples
#For each tensor of Electrode samples, make it a dictionary with the key of the electrode name for TF
features = dict(zip(featureNames, electrodeTensors))
return features, class_label
#Open the CSV to extract numSamples - tensorflow needs this before
numSamples = 0
numElectrodes = 8
with open('exampleEEGData.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
numSamples = int(row[1])
break
csv_file.close()
#Now that we have the number of samples, its time to read in the CSV as a tensorflow dataset object. Label + 8*numSamples size
defaults = [tf.float32] * (2 + (8*numSamples))
dataset = tf.contrib.data.CsvDataset(['exampleEEGData.csv'], defaults)
dataset = dataset.map(_parse_csv_row)
print(dataset)
|
#!/usr/bin/env python
import os
import sys
import time
from boto.ec2.connection import EC2Connection
# change these as desired
#
# EC2 keypair to attach to instance on boot
KEY = 'james-mac'
if os.environ.has_key('EC2_SSH_KEY'):
KEY = os.environ['EC2_SSH_KEY']
# probably don't want to change these:
ROLE = sys.argv[1]
ZONE = 'us-east-1b'
SEC_GROUP = ['default']
AMI = 'ami-eafa5883'
INST_TYPE = 'm1.small'
USER_DATA = """#!/bin/bash
set -e
# install basic OS packages
sleep 15
apt-get update
apt-get install -yf ntp git bzr mercurial curl build-essential
"""
#######################################################
# Create EC2 conn
print "Creating EC2 conn using env: AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY"
print "Will boot instance using keypair (set: EC2_SSH_KEY to override): %s" % KEY
ec2 = EC2Connection(os.environ['AWS_ACCESS_KEY_ID'],
os.environ['AWS_SECRET_ACCESS_KEY'])
# Describe instances
print "Looking for existing EC2 instance with role: %s" % ROLE
target_inst = None
for res in ec2.get_all_instances():
for inst in res.instances:
print " id=%s tags=%s" % (inst.id, str(inst.tags))
if inst.tags.has_key('role') and inst.tags['role'] == ROLE:
if inst.state == 'terminated':
print "Found terminated EC2 instance. id=%s" % inst.id
else:
print "Found existing EC2 instance: id=%s state=%s ip=%s" % (inst.id, inst.state, inst.ip_address)
target_inst = inst
# Boot instance if not found
if not target_inst:
print "Existing EC2 instance not found - booting"
resp = ec2.run_instances(AMI,
key_name=KEY,
instance_type=INST_TYPE,
security_groups=SEC_GROUP,
user_data=USER_DATA)
inst = resp.instances[0]
inst.add_tag("role", ROLE)
# Poll until instance available
print "Waiting for instance to become available"
while True:
instances = ec2.get_all_instances([inst.id])
if instances and len(instances) > 0:
if instances[0].instances[0].state=='running':
target_inst = instances[0].instances[0]
break
else:
time.sleep(5)
print "Instance booted: id=%s ip_addr=%s" % (target_inst.id, target_inst.ip_address)
print "Done!"
|
import serial
import subprocess
import os
enteroApagar = int("0xFE7887",16)
ser = serial.Serial('/dev/ttyACM0', 9600)
while True:
lectura = int(ser.readline(),16)
if lectura == enteroApagar:
operative = os.name
if(os.name == 'posix'):
subprocess.call(["sudo", "shutdown", "-h", "now"])#ubuntu
else:#in case you just have linux and windows on your machine
subprocess.call(["shutdown", "/s"])#para windows
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-01 20:01
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('member', '0002_auto_20170901_1946'),
]
operations = [
migrations.AddField(
model_name='scholarship',
name='hours_schedule',
field=models.IntegerField(default=40),
),
migrations.AlterField(
model_name='member',
name='institution',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='member.InstitutionDepartment'),
),
]
|
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from .agc_layer import AGCLayer as InputEncoding
from .selectscale_hc import SelectscaleHyperConv
from .selectframe_tc import SelectframeTemConv
from .utils import *
from graph.nturgbd import *
from graph.kinetics import *
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.drop = nn.Dropout(0.5, inplace=True)
self.relu = nn.ReLU()
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class SelectSTHCBlock(nn.Module):
'''Selective Spatial Temporal Hypergraph Convolution Block.'''
def __init__(self, in_channels, out_channels, A, G_part, G_body, num_point, num_frame, stride=1, pool_channels=None, residual=True):
super(SelectSTHCBlock, self).__init__()
self.stride = stride
self.gcn1 = SelectscaleHyperConv(in_channels, out_channels, A, G_part, G_body, num_point, num_frame)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=1)
self.relu = nn.ReLU()
if self.stride == 2:
self.pool = SelectframeTemConv(*pool_channels)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=1)
def forward(self, x, N):
res = self.residual(x)
x = self.gcn1(x)
x = self.tcn1(x)
# NM, C, T, V = x.size()
# if self.stride == 2:
# x, indices = self.pool(x, N)
# T_new = (T+1)//2
# pre_res = res.view(N, NM//N, C, T, V)
# res = torch.empty((N, NM//N, C, T_new, V), device=x.device)
# for i in range(N):
# res[i] = pre_res[i,:,:,indices[i],:]
# res = res.view(NM, C, T_new, V)
x = x + res
return self.relu(x)
class Model(nn.Module):
'''Selective-HCN Model.'''
def __init__(self, num_class=120, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
self.hypergraph = Hypergraph()
A = self.graph.A
G_part = self.hypergraph.G_part
G_body = self.hypergraph.G_body
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = InputEncoding(3, 64, A, residual=False)
self.l2 = SelectSTHCBlock(64, 64, A, G_part, G_body, num_point, 300)
# self.l3 = SelectSTHCBlock(64, 64, A, G_part, G_body, num_point, 300)
self.l4 = SelectSTHCBlock(64, 64, A, G_part, G_body, num_point, 300)
self.l5 = SelectSTHCBlock(64, 128, A, G_part, G_body, num_point, 300, stride=2, pool_channels=[128, 300])
self.l6 = SelectSTHCBlock(128, 128, A, G_part, G_body, num_point, 150)
# self.l7 = SelectSTHCBlock(128, 128, A, G_part, G_body, num_point, 150)
self.l8 = SelectSTHCBlock(128, 256, A, G_part, G_body, num_point, 150, stride=2, pool_channels=[256, 150])
self.l9 = SelectSTHCBlock(256, 256, A, G_part, G_body, num_point, 75)
self.l10 = SelectSTHCBlock(256, 256, A)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x = self.l1(x)
x = self.l2(x, N)
x = self.l3(x, N)
x = self.l4(x, N)
x = self.l5(x, N)
x = self.l6(x, N)
x = self.l7(x, N)
x = self.l8(x, N)
x = self.l9(x, N)
x = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
return self.fc(x)
|
import subprocess
""" suppresssing errors by passing them to DEVNULL
"""
try:
completed = subprocess.run(
'echo to stdout; echo to stderr 1>&2; exit 1',
shell=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
except subprocess.CalledProcessError as err:
print('ERROR: ', err)
else:
print('returncode:', completed.returncode)
print(f"stdout is {completed.stdout}")
print(f"stderr is {completed.stderr}")
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from youtube_podcast_api.config import get_settings
# Parse .env file for settings
# settings = get_settings()
SQLALCHEMY_DATABASE_URL = f"sqlite:///{get_settings().db_path}"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
|
def _jupyter_server_extension_paths():
return [{
"module": "nbextension_toc"
}]
# Jupyter Extension points
def _jupyter_nbextension_paths():
return [dict(
section="notebook",
src="static",
dest="nbextension_toc",
require="nbextension_toc/toc")]
def load_jupyter_server_extension(nbapp):
nbapp.log.info("ToC nbextension enabled!") |
import json
import os
import requests
import ssl
import sys
import urlparse
import webbrowser
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from multiprocessing import Process
from os.path import isdir, isfile
CERT_FILE_PATH = './server.pem'
BASE_FB_GRAPH_URL = "https://graph.facebook.com/v2.11"
if not isfile(CERT_FILE_PATH):
raise Exception("At first you should create PEM file - \n\t"
"$ openssl req -new -x509 -keyout server.pem -out server.pem -days 365 -nodes")
if len(sys.argv) < 3:
raise Exception("Script run example -\n\n$ %s <APP_ID> <APP_SECRET>" % os.path.basename(__file__))
app_id, app_secret = sys.argv[1:3]
http_port = 4443
redirect_url = "https://localhost:%d/" % http_port
# hub_home = expanduser("~") + "/.ads-hub"
hub_home = "./.auth"
if not isdir(hub_home):
os.makedirs(hub_home)
token_file_path = hub_home + "/fb-token"
class FbAuthServer(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
print self.path
if len(self.path.split("?")) == 2:
data_dict = dict(urlparse.parse_qsl(self.path.split("?")[1]))
if "code" in data_dict:
token_url = "%s/oauth/access_token?" \
"client_id=%s&client_secret=%s&code=%s&redirect_uri=%s" \
% (BASE_FB_GRAPH_URL, app_id, app_secret, data_dict["code"], redirect_url,)
print token_url
res = requests.get(token_url, allow_redirects=True)
print res.text
if res.status_code != 200:
print res.status_code
os._exit(1)
short_lived_token = json.loads(res.text)["access_token"]
print "Short lived: " + short_lived_token
token_url = "%s/oauth/access_token?" \
"grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s" \
% (BASE_FB_GRAPH_URL, app_id, app_secret, short_lived_token)
res = requests.get(token_url, allow_redirects=True)
data = json.loads(res.text)
print "Long lived: " + data["access_token"]
data["app_id"] = app_id
data["app_secret"] = app_secret
data_str = json.dumps(data)
f = open(token_file_path, 'w+')
f.write(data_str)
f.close()
self._set_headers()
self.wfile.write(
"<html>"
+ "<body>"
+ " <h1>Store token!</h1>"
+ " <h2>Success!!</h2>"
+ data_str
+ "</body>"
+ "</html>")
os._exit(0)
def run(server_class=HTTPServer, handler_class=FbAuthServer, port=4443):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
httpd.socket = ssl.wrap_socket(httpd.socket, certfile=CERT_FILE_PATH, server_side=True)
print 'Starting httpd...'
httpd.serve_forever()
p = Process(target=run, args=(HTTPServer, FbAuthServer, http_port,))
p.start()
reading_scopes = (
"ads_read",
"ads_management",
)
url = "https://www.facebook.com/v2.8/dialog/oauth?" \
"client_id=%s&redirect_uri=%s&scope=%s" \
% (app_id, redirect_url, ",".join(reading_scopes))
print url
webbrowser.open_new(url)
p.join()
|
# -*- coding: utf-8 -*-
import unittest
from chakert import Typograph
def highlight(txt):
return txt.replace(u'\u00a0', u'␣')\
.replace(u'\N{NON-BREAKING HYPHEN}', u'=')
class BaseTests(unittest.TestCase):
def assertText(self, text, *args, **kwargs):
check_html = kwargs.pop('check_html', True)
value = Typograph.typograph_text(text, self.lang, **kwargs)
value_hl = highlight(value)
if not value_hl in args:
print('\n'+value_hl)
for arg in args:
print(arg)
print('\n')
self.assertIn(value_hl, args)
if check_html:
value2 = Typograph.typograph_html(value, self.lang)
if value != value2:
print('\n'+highlight(value)+'\n'+highlight(value2))
self.assertEqual(highlight(value), highlight(value2))
def assertHtml(self, text, *args, **kwargs):
value = Typograph.typograph_html(text, self.lang, **kwargs)
value_hl = highlight(value)
if not value_hl in args:
print('\n')
print(value_hl)
for arg in args:
print(arg)
print('\n')
self.assertIn(value_hl, args)
value2 = Typograph.typograph_html(value, self.lang, **kwargs)
if value != value2:
print('\n'+highlight(value)+'\n'+highlight(value2))
self.assertEqual(highlight(value), highlight(value2))
|
import pandas
from shapely.geometry import Polygon
import shapefile
nex_shapename = '..\\shapes\\NEXRAD_pixels_tsala'
grid_shapename = '..\\shapes\\join_all2'
df = pandas.read_csv('NEXRAD.csv',nrows=1)
df_keys = list(df.keys())
#--build nexrad polygons - just duplicate multipart polys
print 'loading grid shapefile'
nex_shapes,nex_recs = shapefile.load_as_dict(nex_shapename)
nex_polys,nex_pixelnums = [],[]
print 'building nexrad polygons'
for shape,pnum in zip(nex_shapes,nex_recs['Pixel']):
if str(pnum) in df_keys:
if len(shape.parts) > 1:
points = shape.points
for i1,i2 in zip(shape.parts[:-1],shape.parts[1:]):
poly = Polygon(shape.points[i1:i2])
if not poly.is_valid:
raise Exception('invalid nexrad geometry'+str(pnum))
nex_polys.append(poly)
nex_pixelnums.append(pnum)
#raise Exception('multipart nexrad shape'+str(rec))
else:
poly = Polygon(shape.points)
if not poly.is_valid:
raise Exception('invalid nexrad geometry'+str(pnum))
nex_polys.append(poly)
nex_pixelnums.append(pnum)
#else:
# print 'skipping pixel:',pnum
print 'built polygons for',len(nex_polys),' nexrad pixels'
#--build grid polygons
print 'loading grid shapefile'
grid_shp = shapefile.Reader(grid_shapename)
grid_shapes = grid_shp.shapes()
grid_recs = grid_shp.records()
grid_polys = []
print 'building grid polygons'
for i,(shape,rec) in enumerate(zip(grid_shapes,grid_recs)):
if i % 500 == 0:
print 'record',i,'\r',
poly = Polygon(shape.points)
if not poly.is_valid:
raise Exception('invalid nexrad geometry'+str(rec))
grid_polys.append(poly)
print '\nintersecting grid and nexrad polygons'
wr = shapefile.writer_like(grid_shapename)
wr.field('nex_pix',fieldType='C',size=50)
wr.field('nex_frac',fieldType='C',size=50)
for i,(gshape,grec,gpoly) in enumerate(zip(grid_shapes,grid_recs,grid_polys)):
print 'record',i,'\r',
#--search for intersections
#--sum up the total intersected area - for grid cells not completely covered
pixs,areas = [],[]
tot_area = 0.0
for npoly,pix in zip(nex_polys,nex_pixelnums):
if gpoly.intersects(npoly):
ipoly = gpoly.intersection(npoly)
area = ipoly.area
tot_area += area
#--incase this is a multipart nexrad shape and multiple parts intersect the same grid cell
if pix in pixs:
areas[pixs.index(pix)] += area
else:
areas.append(area)
pixs.append(pix)
if len(pixs) > 0:
pstr,fstr = '',''
for p,a in zip(pixs,areas):
pstr += '{0:6.0f},'.format(p)
fstr += '{0:6.5f},'.format(a / tot_area)
pstr = pstr[:-1]
fstr = fstr[:-1]
grec.append(pstr)
grec.append(fstr)
else:
grec.append('')
grec.append('')
wr.poly([gshape.points])
wr.record(grec)
wr.save('..\\shapes\\tsala_grid_nexrad')
|
# Local tests took 1 hour to insert 5 million lines. Estimated 9 hours to insert all data as of 05/2017.
# Suggested to run on a weekend during off hours
import os
import re
import logging
import boto
import urllib.request
from sqlalchemy.dialects.postgresql import insert
from sqlalchemy.exc import IntegrityError
from dataactcore.logging import configure_logging
from dataactcore.config import CONFIG_BROKER
from dataactcore.interfaces.db import GlobalDB
from dataactcore.models.domainModels import Zips, StateCongressional
from dataactvalidator.health_check import create_app
logger = logging.getLogger(__name__)
zip4_line_size = 182
citystate_line_size = 129
chunk_size = 1024 * 10
# update contents of state_congressional table based on zips we just inserted
def update_state_congr_table(sess):
logger.info("Loading zip codes complete, beginning update of state_congressional table")
# clear old data out
sess.query(StateCongressional).delete(synchronize_session=False)
sess.commit()
# get new data
distinct_list = sess.query(Zips.state_abbreviation, Zips.congressional_district_no).distinct().\
order_by(Zips.state_abbreviation, Zips.congressional_district_no)
sess.bulk_save_objects([StateCongressional(state_code=state_data.state_abbreviation,
congressional_district_no=state_data.congressional_district_no)
for state_data in distinct_list])
sess.commit()
# add data to the zips table
def add_to_table(data, sess):
try:
sess.bulk_save_objects([Zips(**zip_data) for _, zip_data in data.items()])
sess.commit()
except IntegrityError:
sess.rollback()
logger.error("Attempted to insert duplicate zip. Inserting each row in batch individually.")
i = 0
# loop through all the items in the current array
for _, new_zip in data.items():
# create an insert statement that overrides old values if there's a conflict
insert_statement = insert(Zips).values(**new_zip).\
on_conflict_do_nothing(index_elements=[Zips.zip5, Zips.zip_last4])
sess.execute(insert_statement)
if i % 10000 == 0:
logger.info("Inserting row %s of current batch", str(i))
i += 1
sess.commit()
def parse_zip4_file(f, sess):
logger.info("Starting file %s", str(f))
# pull out the copyright data
f.read(zip4_line_size)
data_array = {}
current_zip = ""
curr_chunk = ""
while True:
# grab the next chunk
next_chunk = f.read(chunk_size)
# when streaming from S3 it reads in as bytes, we need to decode it as a utf-8 string
if not type(next_chunk) == str:
next_chunk = next_chunk.decode("utf-8")
# add the new chunk of the file to the current chunk we're processing
curr_chunk += next_chunk
# if the current chunk is smaller than the line size, we're done
if len(curr_chunk) < zip4_line_size:
break
# while we can still do more processing on the current chunk, process it per line
while len(curr_chunk) >= zip4_line_size:
# grab another line and get the data that's always the same
curr_row = curr_chunk[:zip4_line_size]
state = curr_row[157:159]
# ignore state codes AA, AE, and AP because they're just for military routing
if state not in ['AA', 'AE', 'AP']:
# files are ordered by zip5: when it changes, that's the last record with that zip5
# insert batches by zip5 to avoid conflicts
zip5 = curr_row[1:6]
if current_zip != zip5:
if len(data_array) > 0:
logger.info("Inserting {} records for {}".format(len(data_array), current_zip))
add_to_table(data_array, sess)
data_array.clear()
current_zip = zip5
# zip of 96898 is a special case
if zip5 == "96898":
congressional_district = "99"
state = "UM"
county = "450"
else:
county = curr_row[159:162]
congressional_district = curr_row[162:164]
# certain states require specific CDs
if state in ["AK", "DE", "MT", "ND", "SD", "VT", "WY"]:
congressional_district = "00"
elif state in ["AS", "DC", "GU", "MP", "PR", "VI"]:
congressional_district = "98"
elif state in ["FM", "MH", "PW", "UM"]:
congressional_district = "99"
try:
zip4_low = int(curr_row[140:144])
zip4_high = int(curr_row[144:148])
# if the zip4 low and zip4 high are the same, it's just one zip code and we can just add it
if zip4_low == zip4_high:
zip_string = str(zip4_low).zfill(4)
data_array[zip5 + zip_string] = {"zip5": zip5, "zip_last4": zip_string, "county_number": county,
"state_abbreviation": state,
"congressional_district_no": congressional_district}
# if the zip codes are different, we have to loop through and add each zip4
# as a different object/key
else:
i = zip4_low
while i <= zip4_high:
zip_string = str(i).zfill(4)
data_array[zip5 + zip_string] = {"zip5": zip5, "zip_last4": zip_string,
"state_abbreviation": state, "county_number": county,
"congressional_district_no": congressional_district}
i += 1
# catch entries where zip code isn't an int (12ND for example, ND stands for "no delivery")
except ValueError:
logger.error("Error parsing entry: %s", curr_row)
# cut the current line out of the chunk we're processing
curr_chunk = curr_chunk[zip4_line_size:]
# add the final chunk of data to the DB
if len(data_array) > 0:
logger.info("Adding last {} records for current file".format(len(data_array)))
add_to_table(data_array, sess)
data_array.clear()
def parse_citystate_file(f, sess):
logger.info("Starting file %s", str(f))
# pull out the copyright data
f.read(citystate_line_size)
data_array = {}
curr_chunk = ""
while True:
# grab the next chunk
next_chunk = f.read(chunk_size)
# when streaming from S3 it reads in as bytes, we need to decode it as a utf-8 string
if not type(next_chunk) == str:
next_chunk = next_chunk.decode("utf-8")
# add the new chunk of the file to the current chunk we're processing
curr_chunk += next_chunk
# if the current chunk is smaller than the line size, we're done
if len(curr_chunk) < citystate_line_size:
break
# while we can still do more processing on the current chunk, process it per line
while len(curr_chunk) >= citystate_line_size:
# grab another line and get the data if it's a "detail record"
curr_row = curr_chunk[:citystate_line_size]
if curr_row[0] == "D":
state = curr_row[99:101]
# ignore state codes AA, AE, and AP because they're just for military routing
if state not in ['AA', 'AE', 'AP']:
zip5 = curr_row[1:6]
# zip of 96898 is a special case
if zip5 == "96898":
congressional_district = "99"
state = "UM"
county = "450"
else:
congressional_district = None
county = curr_row[101:104]
# certain states require specific CDs
if state in ["AK", "DE", "MT", "ND", "SD", "VT", "WY"]:
congressional_district = "00"
elif state in ["AS", "DC", "GU", "MP", "PR", "VI"]:
congressional_district = "98"
elif state in ["FM", "MH", "PW", "UM"]:
congressional_district = "99"
data_array[zip5] = {"zip5": zip5, "zip_last4": None, "state_abbreviation": state,
"county_number": county, "congressional_district_no": congressional_district}
# cut the current line out of the chunk we're processing
curr_chunk = curr_chunk[citystate_line_size:]
# remove all zip5s that already exist in the table
for item in sess.query(Zips.zip5).distinct():
if item.zip5 in data_array:
del data_array[item.zip5]
logger.info("Starting insert on zip5 data")
add_to_table(data_array, sess)
return f
def read_zips():
with create_app().app_context():
sess = GlobalDB.db().session
# delete old values in case something changed and one is now invalid
sess.query(Zips).delete(synchronize_session=False)
sess.commit()
if CONFIG_BROKER["use_aws"]:
s3connection = boto.s3.connect_to_region(CONFIG_BROKER['aws_region'])
s3bucket = s3connection.lookup(CONFIG_BROKER['sf_133_bucket'])
zip_folder = CONFIG_BROKER["zip_folder"] + "/"
for key in s3bucket.list(prefix=zip_folder):
if key.name != zip_folder:
zip_4_file_path = key.generate_url(expires_in=600)
parse_zip4_file(urllib.request.urlopen(zip_4_file_path), sess)
# parse remaining 5 digit zips that weren't in the first file
citystate_file = s3bucket.get_key("ctystate.txt").generate_url(expires_in=600)
parse_citystate_file(urllib.request.urlopen(citystate_file), sess)
else:
base_path = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", CONFIG_BROKER["zip_folder"])
# creating the list while ignoring hidden files on mac
file_list = [f for f in os.listdir(base_path) if not re.match('^\.', f)]
for file in file_list:
parse_zip4_file(open(os.path.join(base_path, file)), sess)
# parse remaining 5 digit zips that weren't in the first file
citystate_file = os.path.join(CONFIG_BROKER["path"], "dataactvalidator", "config", "ctystate.txt")
parse_citystate_file(open(citystate_file), sess)
update_state_congr_table(sess)
logger.info("Zipcode script complete")
if __name__ == '__main__':
configure_logging()
read_zips()
|
"""
Tests for views:
- home-page (project-collection)
- project-page
- result-page
"""
import os
from pathlib import Path
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase, override_settings
from django.urls import reverse
def get_relative_results_files(project_path):
"""
Return a list containing all files in the directory `project_path` and it's
subdirectories. The files are specified relative to `project_path`.
"""
result_files = []
for root, _, files in os.walk(project_path):
relative_root = Path(root).relative_to(project_path)
for my_file in files:
result_files.append(str(relative_root / my_file))
return result_files
def get_collection_details(collection_id):
"""
A collection is a directory containing projects (each of which is a
directory).
Given a collection_id, this returns
- the path to the corresponding collection-directory;
- the names of any projects within that directory;
- and the relative-path for any project-associated files (as a dictionary).
"""
path = Path(collection_id)
project_ids = os.listdir(path)
file_paths = {
project_id: get_relative_results_files(path / project_id)
for project_id in project_ids
}
return {"path": path, "project_ids": project_ids, "file_paths": file_paths}
class HomePageTest(TestCase):
"""
The home-page for contented-based websites contains
- a list of project-names.
"""
def setUp(self):
self.project_collections = {
collection_id: get_collection_details(collection_id)
for collection_id in ["dummy_projects", "dummy_projects2"]
}
def test_uses_home_template(self):
"""
WHEN: the user requests the home page (using `reverse("home")`)
THEN: the home-page template is used
"""
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
response = self.client.get(reverse("home"))
self.assertTemplateUsed(response, "home.html")
def test_alternative_url_specification_for_homepage(self):
"""
WHEN: the user requests the home page (using `"/"`)
THEN: the home-page template is used
"""
response = self.client.get("/")
self.assertTemplateUsed(response, "home.html")
def test_home_page_contains_list_of_projects(self):
"""
Admin specifies a directory for presentation to the users; this
directory comprises (possibly 0) a set of directories, one per
data-analysis project. Each project should be accessible via the home
page (ie, the name of the project should be visible)
"""
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
response = self.client.get(reverse("home"))
response_text = response.content.decode("utf8")
self.assertTrue(
all(
[
project_id in response_text
for project_id in details["project_ids"]
]
),
"""A project-name was present in the project-directory, but
absent from the home-page""",
)
def test_home_page_contains_hyperlinks_to_projects(self):
"""
WHEN: the user opens the home-page
THEN: a list of projects is visible and each one has an associated
hyperlink
"""
hyperlink_stub = """<a href="/projects/{proj}">{proj}</a>"""
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
response = self.client.get(reverse("home"))
for project_id in details["project_ids"]:
self.assertContains(
response, hyperlink_stub.format(proj=project_id), html=True
)
@override_settings(
PROJECTS_DIR=Path("dummy_projects"),
RESTRICTED_PROJECTS=["my_other_project"],
)
class HomePageRestrictionsTest(TestCase):
"""
The project-names listed on contented-based websites comprises a set of
openly-accessible and a set of restricted projects. Only logged-in users
can access the restricted projects
"""
def setUp(self):
self.collection_id = "dummy_projects"
self.collection_details = get_collection_details(self.collection_id)
self.open_projects = ["my_test_project"]
self.restricted_projects = ["my_other_project"]
get_user_model().objects.create_user(
username="testuser1", password="not-a-password"
)
self.hyperlink_stub = """<a href="/projects/{proj}">{proj}</a>"""
def test_unlogged_users_cannot_see_restricted_projects(self):
"""
GIVEN: a user is not logged in
WHEN: the user views the home page
THEN: none of the restricted projects should be visible
"""
response = self.client.get(reverse("home"))
for project_id in self.restricted_projects:
self.assertNotContains(
response, self.hyperlink_stub.format(proj=project_id), html=True
)
def test_unlogged_users_can_see_all_open_projects(self):
"""
GIVEN: a user is not logged in
WHEN: the user views the home page
THEN: all of the non-restricted projects should be visible
"""
response = self.client.get(reverse("home"))
for project_id in self.open_projects:
self.assertContains(
response, self.hyperlink_stub.format(proj=project_id), html=True
)
def test_logged_users_can_see_all_projects(self):
"""
GIVEN: a logged-in user
WHEN: the user views the home page
THEN: all available projects should be visible
"""
self.client.login(username="testuser1", password="not-a-password")
response = self.client.get(reverse("home"))
for project_id in self.collection_details["project_ids"]:
self.assertContains(
response, self.hyperlink_stub.format(proj=project_id), html=True
)
class ProjectPageTest(TestCase):
"""
The project-page is a webpage that contains
- info about all files connected to a specific analysis project
"""
def setUp(self):
self.project_collections = {
collection_id: get_collection_details(collection_id)
for collection_id in ["dummy_projects", "dummy_projects2"]
}
get_user_model().objects.create_user(
username="testuser1", password="not-a-password"
)
def test_uses_project_template(self):
"""
WHEN: the user requests the webpage for a specific project-ID
THEN: the project-page template should be used
"""
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
for project_id in details["project_ids"]:
response = self.client.get(f"/projects/{project_id}")
self.assertTemplateUsed(response, "project.html")
def test_project_page_contains_project_id(self):
"""
WHEN: the user requests a webpage for a specific project-ID
THEN: the project-ID should appear in the title of the webpage
"""
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
for project_id in details["project_ids"]:
response = self.client.get(f"/projects/{project_id}")
response_text = response.content.decode("utf8")
self.assertIn(project_id, response_text)
def test_project_page_contains_list_of_results(self):
"""
GIVEN: a project name, and all the results files that are stored in the
project's directory
WHEN: the user opens that project's project-page
THEN: each results-file should be mentioned in the text for the
project-page (not tested: in a table)
"""
def assert_project_page_text_contains_file(project_id, file_path):
# GIVEN: a project name, and one of the results files that are
# stored in the project's directory
# WHEN: the user opens that project's project-page
response = self.client.get(f"/projects/{project_id}")
response_text = response.content.decode("utf8")
# THEN: the results-file should be mentioned on the project-page
self.assertIn(file_path, response_text)
def assert_project_page_contains_list_of_all_results_files(details):
for project_id, files in details["file_paths"].items():
for file_path in files:
assert_project_page_text_contains_file(project_id, file_path)
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
assert_project_page_contains_list_of_all_results_files(details)
def test_project_page_contains_hyperlinks_to_results(self):
"""
GIVEN: a project name, and all the results files that are stored in the
project's directory
WHEN: the user opens that project's project-page
THEN: there should be a hyperlink for each results-file from the
project-page
"""
def assert_project_page_contains_hyperlink_to_file(project_id, file_path):
hyperlink_stub = """<a href="/projects/{proj}/{file}">{file}</a>"""
response = self.client.get(f"/projects/{project_id}")
self.assertContains(
response,
hyperlink_stub.format(proj=project_id, file=file_path),
html=True,
)
def assert_project_page_contains_hyperlinks_to_all_results_files(details):
for project_id, files in details["file_paths"].items():
for file_path in files:
assert_project_page_contains_hyperlink_to_file(
project_id, file_path
)
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
assert_project_page_contains_hyperlinks_to_all_results_files(details)
# def test_nonexisting_projects_throw_404(self):
# response = self.client.get(f"/projects/not-a-project")
#
# self.assertEqual(response.status_code, 404, f"Non-existing project")
@override_settings(
PROJECTS_DIR=Path("dummy_projects"),
RESTRICTED_PROJECTS=["my_other_project"],
)
def test_logged_in_users_can_open_restricted_projects(self):
"""
GIVEN: a logged-in user and a restricted project
WHEN: the user tries to open the URL for that project
THEN: the project page opens without error
"""
self.client.login(username="testuser1", password="not-a-password")
url = reverse("project", args=["my_other_project"])
response = self.client.get(url)
self.assertEqual(response.status_code, 200, f"Couldn't open {url}")
self.assertTemplateUsed(response, "project.html")
@override_settings(
PROJECTS_DIR=Path("dummy_projects"),
RESTRICTED_PROJECTS=["my_other_project"],
)
def test_unlogged_users_cannot_open_restricted_projects(self):
"""
GIVEN: a user who has not logged in and a restricted project
WHEN: the user tries to open the URL for the project
THEN: the user is redirected to the login page
"""
url = reverse("project", args=["my_other_project"])
response = self.client.get(url)
self.assertEqual(
response.status_code,
302,
"Couldn't redirect to login when accessing a restricted project",
)
# Wanted to test that registration/login.html template is used when
# redirecting unlogged users, but when I use assertTemplateUsed it
# claims that no templates were used when rendering; I suspect this is
# because the page redirects rather than renders the chosen page.
self.assertEqual(response.url, settings.LOGIN_URL)
class ResultsPageTest(TestCase):
"""
The results page just shows the contents of a results-file in the browser.
If a user is not logged in then only files from non-restricted projects can
be displayed.
"""
def setUp(self):
self.project_collections = {
collection_id: get_collection_details(collection_id)
for collection_id in ["dummy_projects", "dummy_projects2"]
}
get_user_model().objects.create_user(
username="testuser1", password="not-a-password"
)
def test_results_page_opens(self):
"""
WHEN: the user requests an existing file from a project
THEN: the file should open in the browser
"""
def assert_results_page_opens(project_id, file_name):
url = f"/projects/{project_id}/{file_name}"
response = self.client.get(url)
self.assertEqual(response.status_code, 200, f"Couldn't open {url}")
def assert_all_results_pages_open(details):
for project_id, files in details["file_paths"].items():
for file_name in files:
assert_results_page_opens(project_id, file_name)
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
assert_all_results_pages_open(details)
def test_results_page_matches_expected_content_type(self):
"""
GIVEN: a file `<projects_dir>/<project_id>/<some_file>` exists
WHEN: the user requests to see the URL corresponding to that file
(/projects/<project_id>/<some_file>)
THEN: the content-type for the response should make sense for the
original file (if it's a pdf, html, txt ... file then the content-type
should be application/pdf, text/html, text/plain)
"""
# Take the default content-type to be text/plain
content_types = {
".pdf": "application/pdf",
".html": "text/html",
".jpeg": "image/jpeg",
".png": "image/png",
".svg": "image/svg+xml",
}
def assert_results_page_has_correct_content_type(project_id, file_name):
_, extension = os.path.splitext(file_name)
url = f"/projects/{project_id}/{file_name}"
response = self.client.get(url)
self.assertEqual(
response["content-type"], content_types.get(extension, "text/plain")
)
def assert_all_results_pages_have_correct_content_type(details):
for project_id, files in details["file_paths"].items():
for file_name in files:
assert_results_page_has_correct_content_type(project_id, file_name)
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
assert_all_results_pages_have_correct_content_type(details)
def test_results_page_content_matches_file_content(self):
"""
GIVEN: a file `<projects_dir>/<project_id>/<some_file>` exists
WHEN: the user requests to see the URL corresponding to that file
(/projects/<project_id>/<some_file>).
THEN: the contents of the file should open in the browser and be
identical to the original contents.
"""
def is_binary(file_name):
_, extension = os.path.splitext(file_name)
binary_extensions = {".pdf", ".svg", ".png", ".jpeg"}
return extension in binary_extensions
def get_file_contents(file_path):
file_text = ""
file_mode = "rb" if is_binary(file_path) else "r"
with open(file_path, mode=file_mode) as file_object:
file_text = file_object.read()
return file_text
def get_response_contents(response, binary):
if binary:
return b"".join(response.streaming_content)
return response.content.decode("utf8")
def assert_file_matches_browser_contents(path, project_id, file_name):
file_path = path / project_id / file_name
url = f"/projects/{project_id}/{file_name}"
file_text = get_file_contents(file_path)
response = self.client.get(url)
response_contents = get_response_contents(
response, binary=is_binary(file_path)
)
self.assertEqual(response_contents, file_text)
def assert_all_files_match_their_browser_rendering(details):
for project_id, files in details["file_paths"].items():
for file_name in files:
assert_file_matches_browser_contents(
path=details["path"], project_id=project_id, file_name=file_name
)
for _, details in self.project_collections.items():
with self.settings(PROJECTS_DIR=details["path"]):
assert_all_files_match_their_browser_rendering(details)
@override_settings(
PROJECTS_DIR=Path("dummy_projects"),
RESTRICTED_PROJECTS=["my_other_project"],
)
def test_logged_in_users_can_open_restricted_files(self):
"""
GIVEN: a logged-in user and a file within a restricted project
WHEN: the user tries to open the URL for that file
THEN: the results file opens without error
"""
self.client.login(username="testuser1", password="not-a-password")
url = reverse("results", args=["my_other_project", "README.md"])
response = self.client.get(url)
self.assertEqual(response.status_code, 200, f"Couldn't open {url}")
@override_settings(
PROJECTS_DIR=Path("dummy_projects"),
RESTRICTED_PROJECTS=["my_other_project"],
)
def test_unlogged_users_cannot_open_restricted_files(self):
"""
GIVEN: a user who has not logged in and a file in a restricted project
WHEN: the user tries to open the URL for the file
THEN: the user is redirected to the login page
"""
url = reverse("results", args=["my_other_project", "README.md"])
response = self.client.get(url)
self.assertEqual(
response.status_code,
302,
"Couldn't redirect to login when accessing a restricted project",
)
self.assertEqual(response.url, settings.LOGIN_URL)
|
from django.test import SimpleTestCase
from django.test import TestCase
from django.urls import reverse ,resolve
from account.views import *
class TestUrls(SimpleTestCase):
def test_register_urls_is_resolve(self):
url= reverse ('register')
# print(resolve(url))
self.assertEquals(resolve(url).func,registerPage)
def test_login_urls_is_resolve(self):
url= reverse ('login')
# print(resolve(url))
self.assertEquals(resolve(url).func,loginPage)
def test_logout_urls_is_resolve(self):
url= reverse ('logout')
# print(resolve(url))
self.assertEquals(resolve(url).func,logoutUser)
def test_home_urls_is_resolve(self):
url= reverse ('home')
# print(resolve(url))
self.assertEquals(resolve(url).func,home)
def test_user_page_urls_is_resolve(self):
url= reverse ('user-page')
# print(resolve(url))
self.assertEquals(resolve(url).func,userPage) |
t = int(input())
for i in range(t):
x, y, n = map(int, input().split())
print(pow(x, y, n))
|
"""Module that contains TextFormatter class"""
import re
import numpy as np
from bs4 import BeautifulSoup
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from nltk.stem.lancaster import LancasterStemmer
from nltk.stem import SnowballStemmer
class TextFormatter(object):
"""Class responsible for formatting text columns from data"""
def __init__(self, configs):
self.configs_ = configs
def remove_html_tags(self, text):
"""Remove all HTML tags from the text"""
return BeautifulSoup(text, "lxml").get_text()
def change_case(self, text, case_format):
"""Change the case of a text"""
if case_format == 'lower':
return text.lower()
elif case_format == 'upper':
return text.upper()
print 'Invalid case format '+case_format+', keeping text case'
return text
def remove_stop_words(self, text, stop_word_config):
"""Remove all stop words from text"""
words = text.split()
stop_words = set(stopwords.words(stop_word_config['language']))
filtered_words = []
for word in words:
if word not in stop_words:
filtered_words.append(word)
return ' '.join(filtered_words)
def lemmatize_text(self, text, lemmatizer_config):
"""Apply lemattizers to the text"""
words = text.split()
lemmatizer = WordNetLemmatizer()
lemmatized_words = []
for word in words:
lemmatized_words.append(lemmatizer.lemmatize(word))
return ' '.join(lemmatized_words)
def stem_text(self, text, stemmer_config):
"""Apply configure stemmers to the text"""
words = text.split()
if stemmer_config['algorithm'] == 'porter':
stemmer = PorterStemmer()
elif stemmer_config['algorithm'] == 'lancaster':
stemmer = LancasterStemmer()
elif stemmer_config['algorithm'] == 'snowball':
stemmer = SnowballStemmer(stemmer_config['language'])
else:
return text
stemmed_words = []
for word in words:
stemmed_words.append(stemmer.stem(word))
return ' '.join(stemmed_words)
def remove_non_letters(self, text):
"""Filter characters different than letters"""
return re.sub('[^a-zA-Z]', ' ', text)
def perform_operations(self, text, configs):
"""Perform formatting for a single input text"""
if 'change_case' in configs:
text = self.change_case(text, configs['change_case'])
if 'remove_stop_words' in configs:
text = self.remove_stop_words(text, configs['remove_stop_words'])
if 'lemmatize_text' in configs:
text = self.lemmatize_text(text, configs['lemmatize_text'])
if 'stem_text' in configs:
text = self.stem_text(text, configs['stem_text'])
if 'remove_html_tags' in configs and configs['remove_html_tags']:
text = self.remove_html_tags(text)
if 'use_only_letters' in configs and configs['use_only_letters']:
text = self.remove_non_letters(text)
return text
def format(self, engine):
"""Format the configured text columns"""
for column_config in self.configs_['columns']:
column_name = column_config['name']
operations = column_config['operations']
column_values = engine.get_column_as_array(column_name)
format_row = np.vectorize(lambda x: self.perform_operations(x,
operations))
column_values = format_row(column_values)
engine = engine.set_column(column_name, column_values)
return engine
def get_processed_columns(self):
"""Return the list of processed columns"""
return [column_config['name']
for column_config in self.configs_['columns']]
|
from databases.sql_db import db
class Vacancy(db.Model):
__tablename__ = "vacancies"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255), nullable=False)
firm = db.Column(db.String(128), nullable=False)
description = db.Column(db.Text(), nullable=False)
location_postcode = db.Column(db.Integer())
location = db.Column(db.String(128))
from_date = db.Column(db.Date())
job_link = db.Column(db.String(128))
job_type = db.Column(db.String(80))
def __init__(self, title, firm, description,
location_postcode, location,
from_date, job_link, job_type):
self.title = title
self.firm = firm
self.description = description
self.location_postcode = location_postcode
self.location = location
self.from_date = from_date
self.job_link = job_link
self.job_type = job_type
def __repr__(self):
return f'{self.title}'
def dict(self):
return {
'id': self.id,
'title': self.title,
'location': self.location
}
@classmethod
def find_by_id(cls, vacancy_id):
return cls.query.filter_by(id=vacancy_id).first()
@classmethod
def _create_keywords_conditions(cls, keywords):
conditions = []
for keyword in keywords:
conditions.append(cls.title.ilike(f'%{keyword}%'))
conditions.append(cls.description.ilike(f'%{keyword}%'))
conditions.append(cls.firm.ilike(f'%{keyword}%'))
return conditions
@classmethod
def _create_location_conditions(cls, locations):
conditions = []
for location in locations:
conditions.append(cls.location.ilike(f'%{location}%'))
return conditions
@classmethod
def search_vacancies(cls, page, quantity, keywords, locations):
data = cls.query
if locations:
locations_conditions = cls._create_location_conditions(locations)
data = data.filter(db.and_(*locations_conditions))
if keywords:
keywords_conditions = cls._create_keywords_conditions(keywords)
data = data.filter(db.or_(*keywords_conditions))
return data.order_by(cls.from_date.desc()).paginate(page, quantity, error_out=False)
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
|
'''
==========================================================================
XbarRTL_test.py
==========================================================================
Unit tests for XbarRTL.
Author : Yanghui Ou
Date : Apr 16, 2020
'''
import pytest
from pymtl3 import *
from pymtl3.stdlib.test_utils import mk_test_case_table
from pymtl3.stdlib.stream.SourceRTL import SourceRTL as TestSource
from pymtl3_net.ocnlib.ifcs.packets import mk_generic_pkt, mk_xbar_pkt
from pymtl3_net.ocnlib.utils import run_sim
from pymtl3_net.ocnlib.test.stream_sinks import NetSinkRTL as TestSink
from ..XbarRTL import XbarRTL
#-------------------------------------------------------------------------
# Constants
#-------------------------------------------------------------------------
opaque_nbits = 8
payload_nbits = 32
#-------------------------------------------------------------------------
# test case: sanity check
#-------------------------------------------------------------------------
def test_sanity():
PktT = mk_generic_pkt( nrouters=4, opaque_nbits=8, vc=0, payload_nbits=32 )
dut = XbarRTL( PktT, 2, 2 )
dut.elaborate()
dut.apply( DefaultPassGroup() )
dut.sim_reset()
dut.sim_tick()
dut.sim_tick()
#-------------------------------------------------------------------------
# arrange_src_sink_pkts
#-------------------------------------------------------------------------
def arrange_src_sink_pkts( num_inports, num_outports, pkt_lst ):
src_pkts = [ [] for _ in range( num_inports ) ]
sink_pkts = [ [] for _ in range( num_outports ) ]
for pkt in pkt_lst:
src = pkt.src.uint()
dst = pkt.dst.uint()
src_pkts [ src ].append( pkt )
sink_pkts[ dst ].append( pkt )
return src_pkts, sink_pkts
#-------------------------------------------------------------------------
# TestHarness
#-------------------------------------------------------------------------
class TestHarness( Component ):
def construct( s, num_inports, num_outports, pkts ):
src_pkts, sink_pkts = \
arrange_src_sink_pkts( num_inports, num_outports, pkts )
PktT = mk_xbar_pkt( num_inports, num_outports, opaque_nbits, payload_nbits )
s.src = [ TestSource( PktT, src_pkts[i] ) for i in range( num_inports ) ]
s.dut = XbarRTL( PktT, num_inports, num_outports )
s.sink = [ TestSink( PktT, sink_pkts[i] ) for i in range( num_outports ) ]
for i in range( num_inports ):
s.src[i].send //= s.dut.recv[i]
for i in range( num_outports ):
s.dut.send[i] //= s.sink[i].recv
def done( s ):
src_done = True
sink_done = True
for m in s.src:
src_done &= m.done()
for m in s.sink:
sink_done &= m.done()
return src_done and sink_done
def line_trace( s ):
return s.dut.line_trace()
#-------------------------------------------------------------------------
# test case: basic
#-------------------------------------------------------------------------
def basic_pkts( n_in, n_out ):
Pkt = mk_xbar_pkt( n_in, n_out, opaque_nbits, payload_nbits )
return [
Pkt( 0, 0, 0x01, 0xfaceb00c ),
]
#-------------------------------------------------------------------------
# test case table
#-------------------------------------------------------------------------
test_cases = [
( 'msg_func n_in n_out init intv' ),
[ 'basic1x2', basic_pkts, 1, 2, 0, 0 ],
[ 'basic2x1', basic_pkts, 2, 1, 0, 0 ],
[ 'basic2x2', basic_pkts, 2, 2, 0, 0 ],
]
test_case_table = mk_test_case_table( test_cases )
#-------------------------------------------------------------------------
# test driver
#-------------------------------------------------------------------------
@pytest.mark.parametrize( **test_case_table )
def test_sflit_xbar( test_params, cmdline_opts ):
pkts = test_params.msg_func( test_params.n_in, test_params.n_out )
th = TestHarness( test_params.n_in, test_params.n_out, pkts )
th.set_param( 'top.sink*.construct',
initial_delay = test_params.init,
interval_delay = test_params.intv,
)
run_sim( th, cmdline_opts )
|
# utf-8
#palindromo
se = []
f = str(input('Digite a frase: ').lower())
se = f.replace(' ', '').replace('.', '')
if se == se[::-1]:
print('É um palíndromo')
else:
print('Não é um palíndromo.') |
import os
import numpy as np
from collections import Counter
from sklearn import svm
from sklearn.metrics import accuracy_score
import nltk
import pickle
import gzip
from sklearn.feature_extraction import DictVectorizer
# to load the file that we saved that contains an object (eg.. array or a list or a dic and just loaded)
def load(file_name):
# load the model
stream = gzip.open(file_name, "rb")
model = pickle.load(stream)
stream.close()
return model
#save a pthon object like arrays , dicts and son on in a file for later use to save calculations
def save(file_name, model):
# save the model
stream = gzip.open(file_name, "wb")
pickle.dump(model, stream)
stream.close()
#data = load("Tatoeba/data")
#targets = load("Tatoeba/targets")
data = load("wili-2018/x_train")
targets = load("wili-2018/y_train")
test_data = load("wili-2018/x_test") #load the x_test data set
test_targets = load("wili-2018/y_test") #load their correspoding target names
def text_features(in_text='', n=2):
#Convert string to featureset
#To be used by predict_nltk
tokenz = [in_text[i:i+n] for i in range(len(in_text))]
fdist = nltk.FreqDist(tokenz)
features = {}
for tok in fdist.keys():
features[tok] = fdist[tok]
return features
def get_feautres_matrices(data, n):
features_matrices = []
for text in data:
features_matrix = text_features(text, n)
features_matrices.append(features_matrix)
return features_matrices
'''for n in range(2, 16):
train_features_matrices = get_feautres_matrices(data, n)
test_features_matrices = get_feautres_matrices(test_data, n)
save("train_features_matrices_wili-2018_"+ str(n) + "n", train_features_matrices)
save("test_features_matrices_wili-2018_"+ str(n) + "n", test_features_matrices)
'''
def train_and_save_model(features_matrices, data_set_name = "SVM_Model_wili-2018_", n = 2):
model = svm.SVC(kernel="rbf", C=100, gamma=0.001)
vec = DictVectorizer()
X = vec.fit_transform(features_matrices).toarray()
model.fit(X, targets)
save(data_set_name + str(n) + "n", model)
save("vec_" + str(n) + "n", vec)
return model
def predict_nltk(model, vec, in_text='', n=2):
#Text language classification
#Then use scikit-learn classifiers from within NLTK
#to classify new text based on training set.
in_features = text_features(in_text, n=n)
#vec = DictVectorizer()
X = vec.transform(in_features).toarray()
lang = model.predict(X)
return lang
langs = ['ara', 'arz', 'ary', 'arq', 'afb', 'eng']
langs_names = ['arabic', 'egyptian arabic', 'Moroccan Arabic', 'Algerian Arabic', 'Gulf Arabic', 'english']
print ("Training model.")
all_accuracies = {}
for g in [0.001, 0.01, 0.1, 1]:
accuracies = []
for n in range(2, 16):
train_features_matrices = load("features_matrices/train_features_matrices_wili-2018_"+ str(n) + "n")
test_features_matrices = load("features_matrices/test_features_matrices_wili-2018_"+ str(n) + "n")
model = svm.SVC(kernel="rbf", C=100, gamma= g)
vec = DictVectorizer()
X = vec.fit_transform(train_features_matrices).toarray()
model.fit(X, targets)
Y = vec.transform(test_features_matrices).toarray()
precited_langs = model.predict(Y)
acc = accuracy_score(test_targets, precited_langs)
print('the accurcy of n = ', n, "is ", acc, " when gamma is ", g)
accuracies.append(acc)
all_accuracies[g] = accuracies
#train model
#for n in range(3, 16):
# train_and_save_model("SVM_Model_wili-2018_", n)
#odel = load("SVM_Model_wili-2018_" + str(n) + "n")
#vec = load("vec_"+ str(n) + "n")
#predicted_labels = predict_nltk(model, vec, "ممكن الضمير الشخصى يجى فى الأول", n)
#print("the predicted label is ", predicted_labels)
#print("predicted labels")
#print(predicted_labels)
#print ("FINISHED classifying. accuracy score : ")
#print (accuracy_score(test_labels, predicted_labels))
|
from django.urls import path
from .views import *
from django.contrib.auth.views import PasswordResetView,PasswordResetDoneView,PasswordResetConfirmView,PasswordResetCompleteView
urlpatterns=[
path('home/',Homeview,name='home'),
path('register/',Registerview,name='register'),
path('login/',Loginview,name='login'),
path('logout/',Logoutview,name='logout'),
path('changepassword/',Changepassview,name='changepass'),
path('reset_password/',PasswordResetView.as_view(template_name='Auth/reset_password.html'),name='reset_password'),
path('reset_password_sent/',PasswordResetDoneView.as_view(template_name='Auth/password_reset_sent.html'),name='password_reset_done'),
path('reset/<uidb64>/<token>',PasswordResetConfirmView.as_view(template_name='Auth/password_reset_form.html'),name='password_reset_confirm'),
path('reset_password_complete/',PasswordResetCompleteView.as_view(template_name='Auth/password_reset_done.html'),name='password_reset_complete'),
]
|
#/usr/bin/env python3.4
import sys
def cut_blank_lines(script_path):
lines = []
with open(script_path, 'r') as python_file:
for line in python_file.readlines():
if line.strip() == '':
continue
lines.append(line)
return ''.join(lines)
if __name__ == '__main__':
# takes a script and edits/overwrites it with minimified script
python_script_path = sys.argv[1]
cut_blank_lines(python_script_path)
|
# GQC environment setting
# Flask-NegLog Settings
LOG_LEVEL = 'debug'
LOG_FILENAME = "/var/camel/error.log"
LOG_ENABLE_CONSOLE = False
|
#!/usr/bin/env python2.7
# -*- coding:utf-8 -*-
#
# Author :
# E-mail :
# Date : 2014/02/25
# Desc :
#
import tornado.web
import json,logging,types,time,urllib2
from tor_manager.util.config import Config
from tor_manager.util.httpclient import HttpClient
from tor_manager.util.httpresponse import Response as Resp, ResponseCode as RespCode
class MainHandler(tornado.web.RequestHandler):
"""docstring for MainHandler"""
def initialize(self):
self.Resp = Resp()
def get(self):
ori = self.get_argument("ori", None)
if ori == None:
self.render("index.html", title = "a", name = "b")
return
if ori == "host":
self.render("hostManage/addHost.html")
return
if ori == "user":
self.render("userManage/addUser.html")
#self.render("userManage/selectUser.html")
return
def post(self):
data = self.request.body
logging.info("Received data: %s", data)
username = self.get_argument("username", None)
password = self.get_argument("password", None)
logging.info("User logging! username: %s, password: %s", username, password)
self.render("tor_main.html", username=username)
|
import numpy as np
x = 0.25
y = 0.25
gamma = .9
#Store Transition matrices
actions = 3
states = 4
#Action, Current state, Next state
T = np.zeros((actions, states, states))
T[0, 1, 1] = 1-x
T[0, 1, 3] = x
T[0, 2, 0] = 1-y
T[0, 2, 3] = y
T[0, 3, 0] = 1
T[1, 0, 1] = 1
T[2, 0, 2] = 1
#Rewards
R = np.array([0,0,1,10])
#Initialize value vector
V = np.zeros(states)
#Initialize policy vector
P = np.full(states, -1)
#Value Iteration
while True:
V_new = np.zeros(states)
#Update Value for each state
for cs in range(states):
V_new[cs] = R[cs] #Reward for state
#Calculate Value of each action
V_actions = np.zeros(actions)
for a in range(actions):
for ns in range(states):
V_actions[a] += T[a,cs,ns] * V[ns]
#Select best action and store value
V_new[cs] += gamma * max(V_actions)
P[cs] = np.where(V_actions == max(V_actions))[0][0]
#Check if convergence has been reached
dif = np.sum(V - V_new)
if abs(dif) < 0.001 :
break
V = V_new
print("V: "+str(V))
print("Pi: "+str(P)) |
hadoop fs -get result
echo 'id,hotel_cluster' > result/head
cat result/head result/part-* > result.csv
|
from ..decorators import stere_performer, use_after, use_before
from ..field import Field
@stere_performer('click', consumes_arg=False)
class Link(Field):
"""Convenience Class on top of Field.
Uses Splinter's click method.
"""
@use_after
@use_before
def click(self):
"""Use Splinter's click method.
Example:
>>> login = Link('id', 'loginLink')
>>> login.click()
"""
self.find().click()
|
from server.server_app import app
from flask_cors import CORS
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='0.0.0.0', help='Host name')
parser.add_argument('-p', '--port', default=8888, help='Port number')
args = parser.parse_args()
CORS(app)
app.run(host=args.host, port=args.port)
|
f = open('test.txt','r')
a = open('ans.txt','w')
for i in range(1,int(f.readline())+1):
c = 1
digit = set()
num = int(f.readline())
while(num!=0):
digit = digit | set(list(str(num*c)))
if len(digit)==10:
break
c+=1
if num==0:
a.write('Case #%d: INSOMNIA\n'%(i))
else:
a.write('Case #%d: %d\n'%(i,num*c))
|
import abc
class ServiceHashringException(Exception):
pass
class ServiceHashringNode(object):
"""Service hashring node class."""
def __init__(self, token, service_info, data=None):
"""ServiceHashringNode constructor.
Args:
token: 128-bit integer token identifying the node's
position on the hashring.
service_info: ServiceInfo object identifying the service
occupying the node.
data: addtional dict of data stored at the node.
"""
self.token = token
self.service_info = service_info
self.data = data or {}
def __cmp__(self, other):
if self.token < other.token:
return -1
elif self.token > other.token:
return 1
else:
return 0
def __hash__(self):
return self.token.__hash__()
def __repr__(self):
return "%s(%s, %r, %r)" % (
self.__class__.__name__,
self.token,
self.service_info,
self.data)
def __str__(self):
return "%s(%032x, %s)" % (
self.__class__.__name__,
self.token,
self.service_info)
class ServiceHashringEvent(object):
"""Service hashring event."""
CONNECTED_EVENT = "CONNECTED_EVENT"
CHANGED_EVENT = "CHANGED_EVENT"
DISCONNECTED_EVENT = "DISCONNECTED_EVENT"
def __init__(self, event_type, previous_hashring=None, current_hashring=None,
added_nodes=None, removed_nodes=None):
"""ServiceHashringEvent constructor.
Args:
event_type: event type (ALL EVENTS)
previous_hashring: list of ServiceHashringNode's before change (CHANGED_EVENT)
current_hashring: list of ServiceHashringNode's after change (CHANGED_EVENT)
added_nodes: list of added ServiceHashringNode's (CHANGED_EVENT)
removed_nodes: list of removed ServiceHashringNode's (CHANGED_EVENT)
"""
self.event_type = event_type
self.previous_hashring = previous_hashring
self.current_hashring = current_hashring
self.added_nodes = added_nodes
self.removed_nodes = removed_nodes
def __repr__(self):
return "%s(%r, %r, %r, %r %r)" % (
self.event_type,
self.previous_hashring,
self.current_hashring,
self.added_nodes,
self.removed_nodes)
class ServiceHashring(object):
"""Consistent service hashring abstract base class.
This class represents a consistent hashring where the
positions are occupied by services. Each occupied position on
the hashring is considered a node, and is represented by the
ServiceHashringNode class. Each node is assigned a unique token
which identifies its place on the hashring, and is used to determine
which node is responsible for requests related to a
specific piece of data.
Note that it is possible, and advisiable that a single service
occupy more than one position on the hashring. This will
promote a more even load balancing, and also allows
more powerful machines to occupy more positions to take on
a greater portion of the load.
This class is designed to be used by both services occupying
positions on the hashring, and services which are simply
observing the hashring.
In order to route a service request, a hash of the governing data
is computed. The hashring is then traversed in a clockwise direction
to determine the appropriate node for the given data.
The first node whose token is greater than the data's hash
is responsible for processing the request or data.
"""
__metaclass__ = abc.ABCMeta
def __init__(self, service_name, service=None, positions=None, position_data=None):
"""ServiceHashring constructor.
Args:
service_name: service name, i.e. chatsvc
service: optional Service object which is only required for services
registering positions on the hashring.
positions: optional list of positions to occupy on the
hashring (nodes to create). Each position
must be a 128-bit integer in integer or hex string format.
If None, a randomly generated position will be used.
Note that in the case of a position collision, a
randomly generated position will also be used.
position_data: Dict of additional key /values (string) to store with
the hashring position node.
"""
self.service_name = service_name
self.service = service
self.positions = positions
self.position_data = position_data or {}
@abc.abstractmethod
def start(self):
"""Start watching the hashring and register positions if needed."""
self.hashring_watch.start()
@abc.abstractmethod
def stop(self):
"""Stop watching the hashring and remove positions if needed."""
return
@abc.abstractmethod
def join(self, timeout):
"""Join the hashring."""
return
@abc.abstractmethod
def add_observer(self, method):
"""Add a hashring observer method.
The given method will be invoked with following arguments:
hashring: ServiceHashring object
event: ServiceHashringEvent object
"""
return
@abc.abstractmethod
def remove_observer(self, method):
"""Remove a hashring observer method."""
return
@abc.abstractmethod
def hashring(self):
"""Return hashring as ordered list of ServiceHashringNode's.
Hashring is represented as an ordered list of ServiceHashringNode's.
The list is ordered by hashring position (ServiceHashringNode.token).
Returns:
Ordered list of ServiceHashringNode's.
"""
return
@abc.abstractmethod
def preference_list(self, data, merge_nodes=True):
"""Return a preference list of ServiceHashringNode's for the given data.
Generates an ordered list of ServiceHashringNode's responsible for
the data. The list is ordered by node preference, where the
first node in the list is the most preferred node to process
the data. Upon failure, lower preference nodes in the list
should be tried.
Note that each service (unique service_key) will only appear
once in the perference list. For each service, The
most preferred ServiceHashringNode will be returned.
Removing duplicate service nodes make the preference
list makes it easier to use for failure retries, and
replication.
Additionally, if the merge_nodes flag is True, each
unique hostname will appear once in the preference
list. The most perferred ServiceHashringNode per
hostname will be returned. This is extremely
useful for replication, since it's often a requirement
that replication nodes be on different servers.
Args:
data: string to hash to find appropriate hashring position.
hashring: Optional list of ServiceHashringNode's for which
to calculate the preference list. If None, the current
hashring will be used.
merge_nodes: Optional flag indicating that each hostname
should only appear once in the preference list. The
most preferred ServiceHashringNode per hostname will
be returned.
Returns:
Preference ordered list of ServiceHashringNode's responsible
for the given data.
"""
return
def find_hashring_node(self, data):
"""Find the hashring node responsible for the given data.
The selected hashring node is determined based on the hash
of the user passed "data". The first node to the
right of the data hash on the hash ring
will be selected.
Args:
data: string to hash to find appropriate hashring position.
Returns:
ServiceHashringNode responsible for the given data.
Raises:
ServiceHashringException if no nodes are available.
"""
return
|
#!/usr/bin/env python
import rospy
import time
import numpy as np
from std_msgs.msg import Float64MultiArray
from std_msgs.msg import Float64
from sensor_msgs.msg import Image
from sensor_msgs.msg import JointState
from geometry_msgs.msg import Transform, TransformStamped
from PIL import Image as pil_img
from cv_bridge import CvBridge
import cv2
import math
import tf
import feature_extract
import jacobian_function
import transform
jac_inverse = np.zeros((6,6))
vel_ee = np.zeros(6)
vel_ee[0] = 0.005
vel_cam = np.zeros(6)
# vel_cam[0] = 0.01
vel_joints = np.zeros(6)
joint_states = np.zeros(6)
centre_red = (0,0)
centre_green = (0,0)
centre_yellow = (0,0)
centre_blue = (0,0)
rgb_img = np.zeros((480,640,3),np.uint8)
hsv_img = np.zeros((480,640,3),np.uint8)
Lc = np.zeros([8,6])
error = np.zeros(8)
curr_features = np.zeros((4,3))
des_features = np.array([[324, 303, 2.07349873], [271, 302, 2.09497476], [271, 249, 2.09497452], [324, 249, 2.07349825]])
pub1 = rospy.Publisher("/reachy/shoulder_pitch_velocity_controller/command", Float64, queue_size=10)
pub2 = rospy.Publisher("/reachy/shoulder_roll_velocity_controller/command", Float64, queue_size=10)
pub3 = rospy.Publisher("/reachy/arm_yaw_velocity_controller/command", Float64, queue_size=10)
pub4 = rospy.Publisher("/reachy/elbow_pitch_velocity_controller/command", Float64, queue_size=10)
pub5 = rospy.Publisher("/reachy/forearm_yaw_velocity_controller/command", Float64, queue_size=10)
pub6 = rospy.Publisher("/reachy/wrist_pitch_velocity_controller/command", Float64, queue_size=10)
def publish_joint_velocity(vel_joints):
global pub1, pub2, pub3, pub4, pub5, pub6
# print(vel_joints)
pub1.publish(vel_joints[0])
pub2.publish(vel_joints[1])
pub3.publish(vel_joints[2])
pub4.publish(vel_joints[3])
pub5.publish(vel_joints[4])
pub6.publish(vel_joints[5])
def update_interaction_matrix(curr_features):
global Lc, error
fl = 530
j=0
uc = 320
vc = 240
for i in range(4):
u = curr_features[i,0]
v = curr_features[i,1]
z = curr_features[i,2]
# z = 1
_u = u-uc
_v = v-vc
Lc[j:j+2,:] = np.array([[-fl/z, 0, _u/z, _u*_v/fl, -(fl*fl+_u*_u)/fl, _v], [0, -fl/z, _v/z, (fl*fl+_v*_v)/fl, -_u*_v/fl, -_u]])
j=j+2
update_cam_velocity(Lc, error)
def update_cam_velocity(Lc, error):
global vel_cam, vel_ee, vel_joints, jac_inverse
L_inverse = np.linalg.pinv(Lc)
K = 0.5
vel_cam = -K * np.matmul(L_inverse, error)
print(vel_cam)
vel_ee = vel_cam
# vel_ee = np.asarray([0, -0.005, 0, 0, 0, 0])
# vel_ee[0] = -vel_cam[2]
# vel_ee[3] = -vel_cam[5]
# vel_ee[2] = vel_cam[0]
# vel_ee[5] = vel_cam[3]
vel_ee[0] = vel_cam[2]
vel_ee[3] = vel_cam[5]
vel_ee[1] = -vel_cam[0]
vel_ee[4] = -vel_cam[3]
vel_ee[2] = vel_cam[1]
vel_ee[5] = vel_cam[4]
# V cmaera to V base conversion
trans = np.zeros((4,4))
trans = transform.getTransformBaseWrist_hand(joint_states)
rot = trans[:-1, :-1]
# print(trans)
rot = np.linalg.pinv(rot)
vel_lin = vel_ee[:-3]
vel_ang = vel_ee[3:]
vel_ee = np.concatenate((np.matmul(rot, vel_lin), np.matmul(rot, vel_ang)), axis=None)
# Joint Velocity Update
vel_joints = np.matmul(jac_inverse, vel_ee)
# print(jac_inverse)
# print(vel_joints, vel_ee)
##################################### Callbacks ###############################################
def Img_RGB_Callback(rgb_data):
global rgb_img, hsv_img, centre_red, centre_blue, centre_green, centre_yellow
global des_features, curr_features, Lc
global joint_states, vel_ee, vel_joints
global error
bridge = CvBridge()
rgb_img = bridge.imgmsg_to_cv2(rgb_data, desired_encoding='bgr8')
hsv_img = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2HSV)
centre_red = feature_extract.extract_features(hsv_img, "red")
centre_blue = feature_extract.extract_features(hsv_img, "blue")
centre_green = feature_extract.extract_features(hsv_img, "green")
centre_yellow = feature_extract.extract_features(hsv_img, "yellow")
curr_features[0][0] = centre_red[0]
curr_features[0][1] = centre_red[1]
curr_features[0][2] = centre_red[2]
curr_features[1][0] = centre_blue[0]
curr_features[1][1] = centre_blue[1]
curr_features[1][2] = centre_blue[2]
curr_features[2][0] = centre_green[0]
curr_features[2][1] = centre_green[1]
curr_features[2][2] = centre_green[2]
curr_features[3][0] = centre_yellow[0]
curr_features[3][1] = centre_yellow[1]
curr_features[3][2] = centre_yellow[2]
cv2.circle(rgb_img, (int(des_features[0][0]),int(des_features[0][1])), 5, (255, 0, 255), -1)
cv2.circle(rgb_img, (int(des_features[1][0]),int(des_features[1][1])), 5, (255, 0, 255), -1)
cv2.circle(rgb_img, (int(des_features[2][0]),int(des_features[2][1])), 5, (255, 0, 255), -1)
cv2.circle(rgb_img, (int(des_features[3][0]),int(des_features[3][1])), 5, (255, 0, 255), -1)
cv2.imshow("sample", rgb_img)
cv2.waitKey(1)
error = np.reshape((curr_features[:,:-1]-des_features[:,:-1]), (8,1))
# print(curr_features)
# print(np.linalg.norm(error, axis=None))
update_interaction_matrix(curr_features)
if(np.linalg.norm(error, axis=None) < 30):
publish_joint_velocity(np.zeros(6))
else:
publish_joint_velocity(vel_joints)
# def Image_Depth_Callback(depth_data):
# global curr_features, centre_yellow, centre_blue, centre_green, centre_red
# bridge = CvBridge()
# depth_img = bridge.imgmsg_to_cv2(depth_data, desired_encoding='passthrough')
# # cv2.circle(depth_img, (640,460), 5, (200, 100, 255), -1)
# # cv2.circle(depth_img, (int(des_features[0][0]),int(des_features[0][1])), 5, (255, 0, 255), -1)
# # cv2.circle(depth_img, (int(des_features[1][0]),int(des_features[1][1])), 5, (255, 0, 255), -1)
# # cv2.circle(depth_img, (int(des_features[2][0]),int(des_features[2][1])), 5, (255, 0, 255), -1)
# # cv2.circle(depth_img, (int(des_features[3][0]),int(des_features[3][1])), 5, (255, 0, 255), -1)
# curr_features[0][2] = depth_img[centre_red[0]][centre_red[1]]
# curr_features[1][2] = depth_img[centre_blue[0]][centre_blue[1]]
# curr_features[2][2] = depth_img[centre_green[0]][centre_green[1]]
# curr_features[3][2] = depth_img[centre_yellow[0]][centre_yellow[1]]
# # cv2.imshow("depth", depth_img)
# # cv2.waitKey(1)
# update_interaction_matrix(curr_features)
# publish_joint_velocity(vel_joints)
def Joint_State_Callback(data):
global joint_states, jac_inverse, vel_ee, vel_joints, error
joint_states = np.asarray(data.position)
jac_inverse = jacobian_function.calc_jack(joint_states[0], joint_states[1], joint_states[2], joint_states[3], joint_states[4], joint_states[5])
# print(jac_inverse)
vel_joints = np.matmul(jac_inverse, vel_ee)
if(np.linalg.norm(error, axis=None) < 30):
publish_joint_velocity(np.zeros(6))
else:
publish_joint_velocity(vel_joints)
##################################### Callbacks ###############################################
def get_jacobian():
rospy.Subscriber("/reachy/joint_states", JointState, Joint_State_Callback)
def get_image():
rospy.Subscriber("/camera/rgb/image_raw", Image, Img_RGB_Callback)
# rospy.Subscriber("/camera/depth/image_raw", Image, Image_Depth_Callback)
def main():
global jac_inverse
rospy.init_node("ibvs", anonymous="True")
get_jacobian()
get_image()
rospy.spin()
if __name__=='__main__':
main() |
# -*- coding: utf-8 -*-
from openerp import api, fields, models
class StockMove(models.Model):
_inherit = 'stock.move'
brand_id = fields.Many2many('product.brand', string="Brand")
StockMove() |
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
#
# Created by Jason Wu on 2015-08-29
#
# ------------------------------------------------------------------------
import random
class Particle(object):
def __init__(self, pos, w=1):
self._pos = pos
self._w = w
def __repr__(self):
return "(x=%f, w=%f)" % (self._pos, self._w)
@property
def pos(self):
return self._pos
@pos.setter
def pos(self, value):
if not isinstance(value, float):
raise ValueError('pos must be an float!')
if value < 0.0:
self._w = 0.0
#raise ValueError('pos must between 0.0 ~ 1.0 !')
self._pos = value
@property
def w(self):
return self._w
@w.setter
def w(self, value):
if not isinstance(value, float):
raise ValueError('weight must be an float!')
if value < 0.0 or value > 1.0:
raise ValueError('weight must between 0.0 ~ 1.0 !')
self._w = value |
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 18 13:46:37 2016
@author: Autex
"""
import processinglib
src = face(gray=True)
k = 3
psf = np.ones((k, k)) / k**2
direct_conv = conv2(src, psf)
direct_rest = deconv2(direct_conv, psf)
show_pics([src, direct_rest], ["Source", "Directly restored"]) |
import os
import sys
import struct
import platform
import fileinput
import subprocess
from setuptools import setup
from ext_targets import build_ext, StaticLibrary, Executable
COMPILE_FLAGS = ['-flto', '-std=gnu++11', '-g', '-Wall',
'-Werror', '-DRENDERER_PROCESS', '-static']
LINK_FLAGS = ['-flto', '-Wl,-rpath,.']
if len(sys.argv) > 1 and "--fast" in sys.argv:
sys.argv.remove("--fast")
# Fast mode disables optimization flags
FAST = True
print("FAST mode On")
else:
FAST = False
# Fix "ImportError ... undefined symbol ..." caused by CEF's include/base/
# headers by adding the -flto flag (Issue #230). Unfortunately -flto
# prolongs compilation time significantly.
# More on the other flags: https://stackoverflow.com/questions/6687630/
COMPILE_FLAGS += ['-fdata-sections', '-ffunction-sections']
LINK_FLAGS += ['-Wl,--gc-sections']
# Architecture and OS postfixes
ARCH32 = (8 * struct.calcsize('P') == 32)
ARCH64 = (8 * struct.calcsize('P') == 64)
OS_POSTFIX = ("win" if platform.system() == "Windows" else
"linux" if platform.system() == "Linux" else
"mac" if platform.system() == "Darwin" else "unknown")
OS_POSTFIX2 = "unknown"
if OS_POSTFIX == "win":
OS_POSTFIX2 = "win32" if ARCH32 else "win64"
elif OS_POSTFIX == "mac":
OS_POSTFIX2 = "mac32" if ARCH32 else "mac64"
elif OS_POSTFIX == "linux":
OS_POSTFIX2 = "linux32" if ARCH32 else "linux64"
# Directories
SUBPROCESS_DIR = os.path.abspath(os.path.dirname(__file__))
SRC_DIR = os.path.abspath(os.path.join(SUBPROCESS_DIR, ".."))
CEFPYTHON_DIR = os.path.abspath(os.path.join(SRC_DIR, ".."))
BUILD_DIR = os.path.abspath(os.path.join(CEFPYTHON_DIR, "build"))
CEFPYTHON_BINARY = os.path.abspath(os.path.join(BUILD_DIR,
"cefpython_"+OS_POSTFIX2))
CEF_BINARY = os.environ.get('CEF_BINARY')
if not CEF_BINARY or not os.path.exists(CEF_BINARY):
CEF_BINARY = os.path.abspath(os.path.join(BUILD_DIR, "cef_"+OS_POSTFIX2))
UPSTREAM_BUILD = (os.path.exists(os.path.join(CEF_BINARY, 'bin')) and
os.path.exists(os.path.join(CEF_BINARY, 'lib')))
# Python version string: "27" or "32".
PYTHON_VERSION = str(sys.version_info.major) + str(sys.version_info.minor)
libcefpython_src = [
'cefpython_app.cpp',
'v8function_handler.cpp',
'v8utils.cpp',
'javascript_callback.cpp',
'main_message_loop/main_message_loop.cpp',
'main_message_loop/main_message_loop_std.cpp',
'main_message_loop/main_message_loop_external_pump.cpp'
]
subprocess_src = [
'main.cpp'
]
libs = [
'cefpython',
'glib-2.0',
'gtk-x11-2.0',
'cef_dll_wrapper',
'cef',
]
include_dirs = [
SRC_DIR,
os.path.join(SRC_DIR, 'common'),
os.path.join(CEF_BINARY),
os.path.join(CEF_BINARY, 'include'),
r'/usr/include/python2.7',
r'/usr/include/gtk-2.0',
r'/usr/include/gtk-unix-print-2.0',
r'/usr/include/glib-2.0',
r'/usr/include/cairo',
r'/usr/include/pango-1.0',
r'/usr/include/gdk-pixbuf-2.0',
r'/usr/include/atk-1.0',
r'/usr/lib/x86_64-linux-gnu/gtk-2.0/include',
r'/usr/lib/x86_64-linux-gnu/gtk-unix-print-2.0',
r'/usr/lib/x86_64-linux-gnu/glib-2.0/include',
r'/usr/lib/i386-linux-gnu/gtk-2.0/include',
r'/usr/lib/i386-linux-gnu/gtk-unix-print-2.0',
r'/usr/lib/i386-linux-gnu/glib-2.0/include',
r'/usr/lib64/gtk-2.0/include',
r'/usr/lib64/gtk-unix-print-2.0',
r'/usr/lib64/glib-2.0/include',
r'/usr/lib/gtk-2.0/include',
r'/usr/lib/gtk-2.0/gtk-unix-print-2.0',
r'/usr/lib/glib-2.0/include',
]
if UPSTREAM_BUILD:
libs.append('cef_dll_wrapper')
library_dirs=[
'.',
os.path.join(CEF_BINARY,'bin'),
os.path.join(CEF_BINARY,'lib'),
]
else:
library_dirs=[
'.',
os.path.join(CEF_BINARY,'Release'),
os.path.join(CEF_BINARY,'build','libcef_dll_wrapper'),
]
# Build libcef_wrapper
# Remove dependency on cefclient and cefsimple, we don't need
# them and the spotify minimal builds don't include them
for line in fileinput.input(os.path.join(CEF_BINARY,'CMakeLists.txt'), inplace=True):
comment = ''
if line.strip() in ('add_subdirectory(cefclient)',
'add_subdirectory(cefsimple)'):
comment = '# '
print('%s%s' % (comment, line), end='')
# Run cmake
wrapper_dir = os.path.join(CEF_BINARY,'build')
if not os.path.exists(wrapper_dir):
os.makedirs(wrapper_dir)
subprocess.call(['cmake','-G','Ninja','-DCMAKE_BUILD_TYPE=Release','..'], cwd=wrapper_dir)
subprocess.call(['ninja','libcef_dll_wrapper'], cwd=wrapper_dir)
if OS_POSTFIX.startswith('linux'):
libs.insert(0, 'gobject-2.0')
subprocess_src.append('print_handler_gtk.cpp')
libcefpython_src.extend(['print_handler_gtk.cpp',
'main_message_loop/main_message_loop_external_pump_linux.cpp'])
elif OS_POSTFIX.startswith('mac'):
libcefpython_src.append('main_message_loop/main_message_loop_external_pump_mac.mm')
libcefpython = StaticLibrary(
name='cefpython',
sources=[os.path.join(SUBPROCESS_DIR, src) for src in libcefpython_src],
include_dirs=include_dirs,
extra_compile_args=COMPILE_FLAGS,
extra_link_args=LINK_FLAGS,
)
subprocess_exec = Executable(
name="subprocess",
sources=[os.path.join(SUBPROCESS_DIR, src) for src in subprocess_src],
language='c++',
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libs,
extra_compile_args=COMPILE_FLAGS,
extra_link_args=LINK_FLAGS,
)
setup(
name='subprocess_%s' % PYTHON_VERSION,
cmdclass={'build_ext': build_ext},
ext_modules=[libcefpython, subprocess_exec],
setup_requires = ['setuptools_bin_targets>=1.2']
)
|
from PIL import Image, ImageDraw, ImageFont
from datetime import date
import pandas as pd
import numpy as np
import yagmail
import os
#Function to verify if the certificate exists in the search_path
def find_files(filename, search_path):
result = []
for root, dir, files in os.walk(search_path):
if filename in files:
result.append(filename)
return result
#Function to attach the certificate file and send using
#gmail account (hackersvilla.xyz@gmail.com) to all participants
def mail_send(recv_mail, filename,name):
receiver = recv_mail
body = "Hi "+name+"! Here's your Certificate of Participation for Hack-n-Slash Webinar by HackersVilla :)"
location = "certs/"+filename.strip()
yag = yagmail.SMTP("hackersvilla.xyz@gmail.com")
yag.send(
to=receiver,
subject="Knock Knock! Its HackersVilla..",
contents=body,
attachments=location,
)
#Fetch today's date for printing on certificate
today = date.today()
date = today.strftime("%B %d, %Y")
#Storing entire csv file into a dataframe
df = pd.read_csv('list.csv')
#Length of dataframe (for iterating in the loop)
entries = len(df)
#Storing the names and emails of participants as a numpy array
arr = np.array(df[['name','email','date']])
date = arr[0][2]
print("Found",entries,"participants")
print("Creating Certificates now.. Please Wait")
#Segment to create certificate
font1 = ImageFont.truetype('Herland.ttf',170)
font2 = ImageFont.truetype('Poppins-SemiBold.otf',34)
for index,j in df.iterrows():
img = Image.open('certificate.jpg')
draw = ImageDraw.Draw(img)
W = 3650
msg = '{}'.format(j['name'])
w, h = draw.textsize(msg, font=font1)
draw.text(((W-w)/2,900), msg, fill="white", font=font1)
#draw.text(xy=(1700,900),text='{}'.format(j['name']),fill="white",font=font1)
draw.text(xy=(2400,1385),text=date,fill="#00f0ff",font=font2)
img.save('certs/{}.jpg'.format(j['name']))
print("Certificates have been created and are ready to mail")
print("Stariting now \n\n")
#Segment to iterate over every row in the array, verify the cert file and send on the corresponding email of participant
for x in range(entries):
name = arr[x][0]
img = ""
img = img + name + ".jpg"
print("Name of Participant:", name)
print("Searching for cert", img)
if find_files(img,"./certs"):
print("Certificate Found for", name)
email = arr[x][1]
print("Sending email on",email)
mail_send(email,img,name)
print("Certificate Sent\n\n")
else:
print("Certificate file not found for",name)
print("Continuing further..\n\n")
print("Huff! I am done boss")
|
# connected-component analysis: binary or thresholded image
# The first pass
# step 1: check if we care about the central pixel p or not
# if p == 0:
# ignore
# else:
# proceed to step 2 and step 3
# step 2 and step 3
# north and west pixels, denoted as N and W
# if N and W are background pixels:
# create a new label
# elif N and/or W are not background pixels:
# proceed to step 4 and step 5
# step 4 and step 5
# label of center pixel p = min(N, M)
# step 6
# union-find data structure
# step 7
# Continue to the next pixel and go repeat the process beginning with Step 1
# The second pass
# looping over the image once again, one pixel at a time
# if label in a set, they have common minimum label
from skimage.filters import threshold_local
from skimage import measure
import numpy as np
import cv2
# load image
plate = cv2.imread('license_plate.png')
# convert to HSV and apply adaptive thresholding
V = cv2.split(cv2.cvtColor(plate, cv2.COLOR_BGR2HSV))[2]
thresh = cv2.adaptiveThreshold(V, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 17, 3)
cv2.imshow('License plate', plate)
cv2.imshow('Thresh', thresh)
cv2.waitKey(0)
# apply connected-component analysis
labels = measure.label(thresh, neighbors=8, background=0)
mask = np.zeros(thresh.shape, dtype='uint8')
print('[INFO] found {} blobs'.format(len(np.unique(labels))))
for (i, label) in enumerate(np.unique(labels)):
if label == 0:
print('[INFO] label: 0(background)')
continue
print('[INFO] label: {}(foreground)'.format(i))
labelMask = np.zeros(thresh.shape, dtype='uint8')
labelMask[labels == label] = 255
numPixels = cv2.countNonZero(labelMask)
# ensure the blobs be the useful information
if numPixels > 300 and numPixels < 1500:
mask = cv2.add(mask, labelMask)
cv2.imshow('Label', labelMask)
cv2.waitKey(0)
cv2.imshow('Large blobs', mask)
cv2.waitKey(0) |
card_list = []
# 显示功能菜单
def menu():
print('欢迎进入名片系统 V1.0')
print('1. 新建名片')
print('2. 显示所有名片')
print('3. 查找名片')
print('')
print('0. 退出系统')
# 新建名片
def new_card():
# 用户输入信息
print('【添加新名片】')
print('请根据提示输入信息')
name = input('请输入姓名:')
phone = input('请输入电话:')
email = input('请输入邮箱:')
card_list.append({'name': name, 'phone': phone, 'email': email})
print('新名片添加成功!')
print(card_list)
# 显示所有名片
def all_cards():
if not card_list:
print('没有任何名片!')
else:
print('{}\t\t{}\t\t{}'.format('姓名', '电话', '邮箱'))
print('-' * 50)
for i in card_list:
print('{}\t\t\t{}\t\t\t{}'.format(i['name'], i['phone'], i['email']))
print('-' * 50)
# 查询名片
def search_card():
# 用户输入姓名进行名片查询
print('【查找名片】')
find_list = []
# 判断系统中是否存在名片
if not card_list:
print('系统中没有任何名片')
else:
find_name = input('请输入要查找的姓名:')
for i in card_list:
if i['name'] == find_name:
find_list.append(i)
if not find_list:
pass
# 没有找到对应的名片
# 1. 提示用户未找到
print('没有找到【{}】的名片信息!'.format(find_name))
else:
# 查询到对应名片
print('{}\t\t{}\t\t{}'.format('姓名', '电话', '邮箱'))
for i in find_list:
print('{}\t\t\t{}\t\t\t{}'.format(i['name'], i['phone'], i['email']))
print('-' * 50)
|
# -*- coding: UTF-8 -*-
import requests
import traceback
class KfReq:
def __init__(self):
self.headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
}
self.cookies = dict()
def get(self, url, **kwargs):
kwargs['method'] = 'GET'
kwargs['url'] = url
return self.request(**kwargs)
def post(self, url, **kwargs):
kwargs['method'] = 'POST'
kwargs['url'] = url
return self.request(**kwargs)
def put(self, url, **kwargs):
kwargs['method'] = 'PUT'
kwargs['url'] = url
return self.request(**kwargs)
def delete(self, url, **kwargs):
kwargs['method'] = 'DELETE'
kwargs['url'] = url
return self.request(**kwargs)
def head(self, url, **kwargs):
kwargs['method'] = 'HEAD'
kwargs['url'] = url
return self.request(**kwargs)
def options(self, url, **kwargs):
kwargs['method'] = 'OPTIONS'
kwargs['url'] = url
return self.request(**kwargs)
def request(self, method, url, **kwargs):
try:
r = requests.request(method, url, headers=self.headers, cookies=self.cookies, verify=False, timeout=30, **kwargs)
self.cookies.update(r.cookies.get_dict())
return r.text
except:
traceback.print_exc()
return self.request(method, url, **kwargs)
|
'search for an item in a sorted matrix'
import bisect
NOT_FOUND = (False, (None, None))
def transpose(mat, r, s):
'returns the transpose of a matrix'
nmat = []
for y in range(s):
row = []
nmat.append(row)
for x in range(r):
row.append(mat[x][y])
return nmat
def getSortedMatrix(n, m):
'creates and returns a sorted matrix'
r, s = n, m
if n > m:
r, s = m, n
row = []
last = 0
for x in range(1, r+1):
curr = last + x
row.append(curr)
last = curr
for x in range(r+1, s+1):
curr = last + r
row.append(curr)
last = curr
for y in range(r, 0, -1):
curr = last + y
row.append(curr)
last = curr
mat = [[(y - x) for y in row[x:x+s]] for x in range(0, n)]
if n > m:
mat = transpose(mat, r, s)
return mat
class MatDiagToList(object):
'allows indexing the diag of a matrix like a list'
def __init__(self, mat, sx, sy, nels):
self.mat = mat
self.sx = sx
self.sy = sy
self.nels = nels
def __getitem__(self, x):
if x < 0:
x = self.nels = x
if x >= self.nels:
raise IndexError
return self.mat[self.sx + x][self.sy + x]
def __len__(self):
return self.nels
class MatSeqToList(object):
'allows indexing a row/col of a matrix like a list'
def __init__(self, mat, sx, sy, ex, ey):
self.mat = mat
self.sx = sx
self.sy = sy
self.ex = ex
self.ey = ey
self.nels = max(ex - sx, ey - sy) + 1
self.dxy = True if ex > sx else False
def __getitem__(self, offset):
if offset < 0:
offset = self.nels - offset
if offset >= self.nels:
raise IndexError
if self.dxy:
return self.mat[self.sx+offset][self.sy]
return self.mat[self.sx][self.sy+offset]
def __len__(self):
return self.nels
def linBSearch(mat, t, sx, sy, ex, ey):
'search for element in a linear part of the matrix'
mlen = max(ex - sx, ey - sy) + 1
pos = bisect.bisect_left(MatSeqToList(mat, sx, sy, ex, ey), t)
if pos >= mlen:
return NOT_FOUND
if sx == ex:
if mat[sx][sy + pos] == t:
return True, (sx, sy + pos)
return NOT_FOUND
if mat[sx + pos][sy] == t:
return True, (sx + pos, sy)
return NOT_FOUND
def findInMatRecur(mat, t, sx, sy, ex, ey):
'search for the element t in the submatrix'
if sx > ex or sy > ey:
return NOT_FOUND
if ex - sx == 0 or ey - sy == 0:
return linBSearch(mat, t, sx, sy, ex, ey)
nels = min(ex - sx, ey - sy) + 1
pos = bisect.bisect_left(MatDiagToList(mat, sx, sy, nels), t)
if pos == 0 and mat[sx][sy] != t:
return NOT_FOUND
cx, cy = sx + pos, sy + pos
if pos < nels and mat[cx][cy] == t:
return True, (cx, cy)
res, pxy = findInMatRecur(mat, t, cx, sy, ex, cy-1)
if res:
return res, pxy
return findInMatRecur(mat, t, sx, cy, cx-1, ey)
def findInMatrix(mat, t):
'search for element t in the matrix'
r, s = len(mat), len(mat[0])
return findInMatRecur(mat, t, 0, 0, r-1, s-1)
def test_findInMatrix():
'test for findInMatrix method'
m, n = 12, 5
mat = getSortedMatrix(m, n)
for x in range(len(mat)):
for y in range(len(mat[0])):
el = mat[x][y]
res, (sx, sy) = findInMatrix(mat, el)
assert res and x == sx and y == sy
res, (sx, sy) = findInMatrix(mat, -20)
assert (not res) and (sx == None) and (sy == None)
res, (sx, sy) = findInMatrix(mat, m * n + 20)
assert (not res) and (sx == None) and (sy == None)
print 'Test Passed'
if __name__ == '__main__':
test_findInMatrix()
|
class base1c():
def x(s, d):
print("base1->")
print("<-base1")
class base2n():
def x(s, c1, c2, c3, c4):
print("base2->")
#super().x()
print("<-base2")
class main (base2n, base1c):
def x(s):
print("main->")
base2n.x(s,1,2,3,4)
base1c.x(s,55)
print("<-main")
main().x() |
import data
import xlrd
from entities.Day import Day
from entities.Group import Group
from database.Database import Database
from entities.Lesson import Lesson
import requests
def pair_merged(sheet):
for merged in sheet.merged_cells:
rbeg, rend, cbeg, cend = merged
cell = sheet.cell(rbeg, cbeg)
for row in range(rbeg, rend):
for col in range(cbeg, cend):
sheet.put_cell(row, col, cell.ctype, cell.value, cell.xf_index)
def filter(sheet):
for rx in range(sh.nrows):
for cx in range(sh.ncols):
cell = sh.cell(rx, cx)
if cell.value in data.without_cells:
sheet.put_cell(rx, cx, cell.ctype, "", cell.xf_index)
def search_cell(sheet, values):
result = []
for row in range(sheet.nrows):
for col in range(sheet.ncols):
if sheet.cell(row, col).value in values:
result.append((row, col))
return result
def parse_table(sheet):
groups_row, groups_col = search_cell(sheet, data.GROUPS_IN_TABLE_TITLE)[0]
last_row, last_col = search_cell(sheet, data.LAST_ROW_TABLE_TITLE)[-1]
groups = [Group(sheet.cell(groups_row, col).value) for col in range(groups_col + 1, sheet.ncols)]
i = 0
for col in range(groups_col + 1, sheet.ncols):
for row in range(groups_row + 1, last_row):
groups[i].add_lesson(sheet.cell(row, 0).value, sheet.cell(row, 1).value, sheet.cell(row, col).value)
i += 1
return groups
print("download:",data.URL[0])
response = requests.get(data.URL[0])
book = xlrd.open_workbook(file_contents=response.content, formatting_info=True)
for sh in book.sheets():
filter(sh)
pair_merged(sh)
groups = parse_table(sh)
for group in groups:
print(group.name, "------------------------------------------------")
for name, day in group.days.items():
print(name, "---", day.name)
for time, lesson in day.lessons.items():
for l in lesson:
print(time, l)
db = Database()
db.save(groups) |
from .node import Node
class Stack:
""" created class. Init class"""
def __init__(self, iterable=[]):
self.top = None
self.len = 0
"""define magics """
def __len__(self):
return self.len
def __str__(self):
pass
def push(self, val):
""" push adds one item to the top of the stack"""
node = Node(val)
self.len += 1
node._next = self.top
self.top = node
return self.top
def pop(self):
""" pop removes the first itme in the stack """
if self.top is None:
raise IndexError('Stack is empty')
self.len -= 1
node = self.top
self.top = self.top._next
return node.val
def peek(self):
""" Peek returns the value of what is at the end pass """
return self.top
|
import random
def roll(number_of_throws):
"""geeft een random getal tussen 1 en 6 voor het gegeven aantal keer.
input:
number_of_throws - int
output:
return getallenlijst - list
"""
getallenlijst = []
if number_of_throws > 0:
for i in range(number_of_throws):
getal = random.randint(1,6)
getallenlijst += [getal]
else:
print("Geef een positief getal")
while getallenlijst != []:
return getallenlijst
def main():
number_of_throws = int(input("Hoe vaak wil je gooien? "))
getallenlijst = roll(number_of_throws)
print(getallenlijst)
main()
|
# -*- coding: utf-8 -*-
import scrapy
class SinopecsalesItem(scrapy.Item):
#{"amount":"20000","balance":"262773","litre":"3160","oilName":"95号车用汽油(V)",
# "price":"633","opeTime":"2016-07-27 09:10:37",
# "reward":"200","nodeTag":"滨州石油第5加油站","traName":"加油"}
# holders = scrapy.Field()
cardNo = scrapy.Field()
amount = scrapy.Field()
# balance = scrapy.Field()
# litre = scrapy.Field()
# oilName = scrapy.Field()
# price = scrapy.Field()
opeTime = scrapy.Field()
# reward = scrapy.Field()
# nodeTag = scrapy.Field()
traName = scrapy.Field()
|
import numpy as np
def strassen(A, B):
if type(A) == np.ndarray and type(B) == np.ndarray:
raise Exception('Inputs are not numpy ndarrays')
if True:
raise Exception('Inputs are not bidimensional')
if True:
raise Exception('Matrices are not squared')
if A:
raise Exception('Matrices are not of n power of two')
n = len(A)
if n == 2:
return A @ B
h = n//2
A11 = A[:h,:h]
A12 = A[:h,h:n]
A21 = A[h:n,:h]
A22 = A[h:n,h:n]
B11 = B[:h,:h]
B12 = B[:h,h:n]
B21 = B[h:n,:h]
B22 = B[h:n,h:n]
M1 = strassen(A11+A22,B11+B22)
M2 = strassen(A21+A22,B11)
M3 = strassen(A11,B12-B22)
M4 = strassen(A22,B21-B11)
M5 = strassen(A11+A12,B22)
M6 = strassen(A21-A11,B11+B12)
M7 = strassen(A12-A22,B21+B22)
C = numpy.zeros((n,n))
C[:h,:h]
C[:h,h:n]
C[h:n,:h]
C[h:n,h:n]
return C
|
#coding=utf-8
list1=[]
list2=[]
list3=[]
n=input('输入n的值:')
for i in range(1,n+1,1):
x=input('输入一个数:')
if x>0:
list1=list1+[x]
elif x<0:
list2=list2+[x]
else:
list3=list3+[x]
print '这些数中正数的个数:',len(list1)
print '这些数中负数的个数:',len(list2)
print '这些数中零的个数:',len(list3)
|
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.callbacks import TensorBoard
# 生成虚拟数据
import numpy as np
x_train = np.random.random((1000, 20))
y_train = keras.utils.to_categorical(np.random.randint(10, size=(1000, 1)), num_classes=10)
x_test = np.random.random((100, 20))
y_test = keras.utils.to_categorical(np.random.randint(10, size=(100, 1)), num_classes=10)
model = Sequential()
# Dense(64) 是一个具有 64 个隐藏神经元的全连接层。
# 在第一层必须指定所期望的输入数据尺寸:
# 在这里,是一个 20 维的向量。
model.add(Dense(64, activation='relu', input_dim=20))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
tbCallBack = TensorBoard(log_dir='../../logs', # log 目录
histogram_freq=0, # 按照何等频率(epoch)来计算直方图,0为不计算
# batch_size=32, # 用多大量的数据计算直方图
write_graph=True, # 是否存储网络结构图
write_grads=True, # 是否可视化梯度直方图
write_images=True,# 是否可视化参数
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
model.fit(x_train, y_train,
epochs=20,
batch_size=128,
callbacks=[tbCallBack])
score = model.evaluate(x_test, y_test, batch_size=128)
#tensorboard --logdir ./logs
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='model.png')
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model).create(prog='dot', format='svg'))
#依赖 pydot-ng 和 graphviz,若出现错误,用命令行输入
#pip install pydot-ng & brew install graphviz |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-22 11:46
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ac_site', '0014_auto_20170922_2028'),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('city_id', models.IntegerField(default=0)),
('city', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Consumption',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('csm_id', models.IntegerField(default=0)),
('prefecture_id', models.IntegerField(default=0)),
('num_of_answers', models.IntegerField(default=0)),
('consumption', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ForeignGuest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_of_foreign_id', models.IntegerField(default=0)),
('guest_2011', models.IntegerField(default=0)),
('guest_2012', models.IntegerField(default=0)),
('guest_2013', models.IntegerField(default=0)),
('guest_2014', models.IntegerField(default=0)),
('guest_2015', models.IntegerField(default=0)),
('guest_2016', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='ForeignGuestM',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('num_of_foreign_month_id', models.IntegerField(default=0)),
('guest_201101', models.IntegerField(default=0)),
('guest_201102', models.IntegerField(default=0)),
('guest_201103', models.IntegerField(default=0)),
('guest_201104', models.IntegerField(default=0)),
('guest_201105', models.IntegerField(default=0)),
('guest_201106', models.IntegerField(default=0)),
('guest_201107', models.IntegerField(default=0)),
('guest_201108', models.IntegerField(default=0)),
('guest_201109', models.IntegerField(default=0)),
('guest_201110', models.IntegerField(default=0)),
('guest_201111', models.IntegerField(default=0)),
('guest_201112', models.IntegerField(default=0)),
('guest_201201', models.IntegerField(default=0)),
('guest_201202', models.IntegerField(default=0)),
('guest_201203', models.IntegerField(default=0)),
('guest_201204', models.IntegerField(default=0)),
('guest_201205', models.IntegerField(default=0)),
('guest_201206', models.IntegerField(default=0)),
('guest_201207', models.IntegerField(default=0)),
('guest_201208', models.IntegerField(default=0)),
('guest_201209', models.IntegerField(default=0)),
('guest_201210', models.IntegerField(default=0)),
('guest_201211', models.IntegerField(default=0)),
('guest_201212', models.IntegerField(default=0)),
('guest_201301', models.IntegerField(default=0)),
('guest_201302', models.IntegerField(default=0)),
('guest_201303', models.IntegerField(default=0)),
('guest_201304', models.IntegerField(default=0)),
('guest_201305', models.IntegerField(default=0)),
('guest_201306', models.IntegerField(default=0)),
('guest_201307', models.IntegerField(default=0)),
('guest_201308', models.IntegerField(default=0)),
('guest_201309', models.IntegerField(default=0)),
('guest_201310', models.IntegerField(default=0)),
('guest_201311', models.IntegerField(default=0)),
('guest_201312', models.IntegerField(default=0)),
('guest_201401', models.IntegerField(default=0)),
('guest_201402', models.IntegerField(default=0)),
('guest_201403', models.IntegerField(default=0)),
('guest_201404', models.IntegerField(default=0)),
('guest_201405', models.IntegerField(default=0)),
('guest_201406', models.IntegerField(default=0)),
('guest_201407', models.IntegerField(default=0)),
('guest_201408', models.IntegerField(default=0)),
('guest_201409', models.IntegerField(default=0)),
('guest_201410', models.IntegerField(default=0)),
('guest_201411', models.IntegerField(default=0)),
('guest_201412', models.IntegerField(default=0)),
('guest_201501', models.IntegerField(default=0)),
('guest_201502', models.IntegerField(default=0)),
('guest_201503', models.IntegerField(default=0)),
('guest_201504', models.IntegerField(default=0)),
('guest_201505', models.IntegerField(default=0)),
('guest_201506', models.IntegerField(default=0)),
('guest_201507', models.IntegerField(default=0)),
('guest_201508', models.IntegerField(default=0)),
('guest_201509', models.IntegerField(default=0)),
('guest_201510', models.IntegerField(default=0)),
('guest_201511', models.IntegerField(default=0)),
('guest_201512', models.IntegerField(default=0)),
('guest_201601', models.IntegerField(default=0)),
('guest_201602', models.IntegerField(default=0)),
('guest_201603', models.IntegerField(default=0)),
('guest_201604', models.IntegerField(default=0)),
('guest_201605', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='HotelType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hotel_id', models.IntegerField(default=0)),
('prefecture_id', models.IntegerField(default=0)),
('hotel', models.IntegerField(default=0)),
('ryokan', models.IntegerField(default=0)),
('condominium', models.IntegerField(default=0)),
('dorm', models.IntegerField(default=0)),
('house', models.IntegerField(default=0)),
('youth_hostel', models.IntegerField(default=0)),
('other', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='PriceofLand',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priceofland_id', models.IntegerField(default=0)),
('prefecture_id', models.IntegerField(default=0)),
('city_id', models.IntegerField(default=0, null=True)),
('properties', models.IntegerField(default=0)),
('average_price', models.IntegerField(default=0)),
('upper_price', models.IntegerField(default=0)),
('lower_price', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='TourResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scr_id', models.IntegerField(default=0)),
('prefecture_id', models.IntegerField(default=0)),
('city_id', models.IntegerField(default=0, null=True)),
('scr_type1', models.CharField(max_length=20)),
('scr_type2', models.CharField(max_length=20)),
('scr_name', models.CharField(max_length=50)),
('scr_rank', models.CharField(max_length=5)),
('scr_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='WebSite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('web_id', models.IntegerField(default=0)),
('prefecture_id', models.IntegerField(default=0)),
('city_id', models.IntegerField(default=0, null=True)),
('website', models.IntegerField(default=0)),
],
),
migrations.AlterField(
model_name='listingtrend',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2017, 9, 22, 11, 46, 15, 437623)),
),
migrations.AlterField(
model_name='summaryarticlebreakdown',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2017, 9, 22, 11, 46, 15, 436207)),
),
migrations.AlterField(
model_name='summarysizebreakdown',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2017, 9, 22, 11, 46, 15, 435583)),
),
]
|
import base58
import os
from common.serializers.serialization import state_roots_serializer
from plenum.common.constants import DOMAIN_LEDGER_ID, ALIAS, BLS_KEY
from plenum.common.keygen_utils import init_bls_keys
from plenum.common.messages.node_messages import Commit, Prepare, PrePrepare
from plenum.common.util import get_utc_epoch, randomString, random_from_alphabet
from plenum.test.helper import sendRandomRequests, waitForSufficientRepliesForRequests
from plenum.test.node_catchup.helper import waitNodeDataEquality, ensureClientConnectedToNodesAndPoolLedgerSame
from plenum.test.pool_transactions.helper import updateNodeData, new_client
def generate_state_root():
return base58.b58encode(os.urandom(32))
def check_bls_multi_sig_after_send(looper, txnPoolNodeSet,
client, wallet,
saved_multi_sigs_count):
# at least two because first request could have no
# signature since state can be clear
number_of_requests = 3
# 1. send requests
# Using loop to avoid 3pc batching
state_roots = []
for i in range(number_of_requests):
reqs = sendRandomRequests(wallet, client, 1)
waitForSufficientRepliesForRequests(looper, client, requests=reqs)
waitNodeDataEquality(looper, txnPoolNodeSet[0], *txnPoolNodeSet[:-1])
state_roots.append(
state_roots_serializer.serialize(
bytes(txnPoolNodeSet[0].getState(DOMAIN_LEDGER_ID).committedHeadHash)))
# 2. get all saved multi-sigs
multi_sigs_for_batch = []
for state_root in state_roots:
multi_sigs = []
for node in txnPoolNodeSet:
multi_sig = node.bls_bft.bls_store.get(state_root)
if multi_sig:
multi_sigs.append(multi_sig)
multi_sigs_for_batch.append(multi_sigs)
# 3. check how many multi-sigs are saved
for multi_sigs in multi_sigs_for_batch:
assert len(multi_sigs) == saved_multi_sigs_count,\
"{} != {}".format(len(multi_sigs), saved_multi_sigs_count)
# 3. check that bls multi-sig is the same for all nodes we get PrePrepare for (that is for all expect the last one)
for multi_sigs in multi_sigs_for_batch[:-1]:
if multi_sigs:
assert multi_sigs.count(multi_sigs[0]) == len(multi_sigs)
def process_commits_for_key(key, pre_prepare, bls_bfts):
for sender_bls_bft in bls_bfts:
commit = create_commit_bls_sig(
sender_bls_bft,
key,
pre_prepare)
for verifier_bls_bft in bls_bfts:
verifier_bls_bft.process_commit(commit,
sender_bls_bft.node_id)
def process_ordered(key, bls_bfts, pre_prepare, quorums):
for bls_bft in bls_bfts:
bls_bft.process_order(key,
quorums,
pre_prepare)
def calculate_multi_sig(creator, bls_bft_with_commits, quorums, pre_prepare):
key = (0, 0)
for bls_bft_with_commit in bls_bft_with_commits:
commit = create_commit_bls_sig(
bls_bft_with_commit,
key,
pre_prepare
)
creator.process_commit(commit, bls_bft_with_commit.node_id)
if not creator._can_calculate_multi_sig(key, quorums):
return None
return creator._calculate_multi_sig(key, pre_prepare)
def create_pre_prepare_params(state_root,
ledger_id = DOMAIN_LEDGER_ID,
txn_root=None,
timestamp=None,
bls_multi_sig=None):
params= [0,
0,
0,
timestamp or get_utc_epoch(),
[('1' * 16, 1)],
0,
"random digest",
ledger_id,
state_root,
txn_root or '1' * 32]
if bls_multi_sig:
params.append(bls_multi_sig.as_list())
return params
def create_pre_prepare_no_bls(state_root):
params = create_pre_prepare_params(state_root=state_root)
return PrePrepare(*params)
def create_commit_params(view_no, pp_seq_no):
return [0, view_no, pp_seq_no]
def create_commit_no_bls_sig(req_key):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
return Commit(*params)
def create_commit_with_bls_sig(req_key, bls_sig):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params.append(bls_sig)
return Commit(*params)
def create_commit_bls_sig(bls_bft, req_key, pre_prepare):
view_no, pp_seq_no = req_key
params = create_commit_params(view_no, pp_seq_no)
params = bls_bft.update_commit(params, pre_prepare)
return Commit(*params)
def create_prepare_params(view_no, pp_seq_no, state_root):
return [0,
view_no,
pp_seq_no,
get_utc_epoch(),
"random digest",
state_root,
'1' * 32]
def create_prepare(req_key, state_root):
view_no, pp_seq_no = req_key
params = create_prepare_params(view_no, pp_seq_no, state_root)
return Prepare(*params)
def change_bls_key(looper, txnPoolNodeSet,
node,
steward_client, steward_wallet,
add_wrong=False):
new_blspk = init_bls_keys(node.keys_dir, node.name)
key_in_txn = \
new_blspk \
if not add_wrong \
else ''.join(random_from_alphabet(32, base58.alphabet))
node_data = {
ALIAS: node.name,
BLS_KEY: key_in_txn
}
updateNodeData(looper, steward_client, steward_wallet, node, node_data)
waitNodeDataEquality(looper, node, *txnPoolNodeSet[:-1])
ensureClientConnectedToNodesAndPoolLedgerSame(looper, steward_client,
*txnPoolNodeSet)
return new_blspk
def check_bls_key(blskey, node, nodes, add_wrong=False):
'''
Check that each node has the same and correct blskey for this node
'''
keys = set()
for n in nodes:
keys.add(n.bls_bft.bls_key_register.get_key_by_name(node.name))
assert len(keys) == 1
if not add_wrong:
assert blskey == next(iter(keys))
# check that this node has correct blskey
if not add_wrong:
assert node.bls_bft.can_sign_bls()
assert blskey == node.bls_bft.bls_crypto_signer.pk
else:
assert not node.bls_bft.can_sign_bls()
def check_update_bls_key(node_num, saved_multi_sigs_count,
looper, txnPoolNodeSet,
client_tdir,
poolTxnClientData,
stewards_and_wallets,
add_wrong=False):
# 1. Change BLS key for a specified NODE
node = txnPoolNodeSet[node_num]
steward_client, steward_wallet = stewards_and_wallets[node_num]
new_blspk = change_bls_key(looper, txnPoolNodeSet, node,
steward_client, steward_wallet,
add_wrong)
# 2. Check that all Nodes see the new BLS key value
check_bls_key(new_blspk, node, txnPoolNodeSet, add_wrong)
# 3. Check that we can send new requests and have correct multisigs
client, wallet = new_client(looper,
poolTxnClientData,
txnPoolNodeSet, client_tdir)
check_bls_multi_sig_after_send(looper, txnPoolNodeSet,
client, wallet,
saved_multi_sigs_count=saved_multi_sigs_count)
|
#!/usr/bin/env python
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler, test
class CORSRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
test(CORSRequestHandler, HTTPServer, port=8631)
|
from pymongo import MongoClient
from flask import Flask, render_template
############
from os import environ as env
from os import path as path
from flask import Flask, jsonify, request, session, redirect, render_template, url_for, send_from_directory
from werkzeug.exceptions import HTTPException
from functools import wraps
from authlib.flask.client import OAuth
from six.moves.urllib.parse import urlencode
from dotenv import load_dotenv
#from bookstore.version import API_VERSION
import constants
import json
import jwt
import datetime
app = Flask(__name__)
dotenv_path = path.join(path.dirname(__file__), '.env')
load_dotenv(dotenv_path)
JWT_SECRET_KEY = env.get('JWT_SECRET_KEY')
if not JWT_SECRET_KEY:
JWT_SECRET_KEY = "aquickfoxjumpedovertheriver"
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
def fulfillment_token(auth):
if auth and auth.password == 'secret':
user_name = auth.username
else:
return jsonify_error("Unauthorized", 401)
try:
token = jwt.encode({'user': user_name, 'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=300)}, JWT_SECRET_KEY)
return jsonify({'token': token.decode()})
except Exception as ex:
print(ex)
return jsonify_error("Unexpected error while creating the auth token.", 500)
def jsonify_error(message, staus_code):
error = {
'status_code': staus_code,
'message': message
}
return (jsonify(error), staus_code)
def token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
try:
token = get_token_auth_header()
jwt.decode(token, JWT_SECRET_KEY)
except AuthError as ex:
return jsonify_error(ex.error, ex.status_code)
except Exception as ex:
print(ex)
return jsonify_error("Unauthorized", 403)
return f(*args, **kwargs)
return decorated
def get_token_auth_header():
"""Obtains the access token from the Authorization Header"""
auth = request.headers.get("Authorization", None)
if not auth:
raise AuthError("Authorization header not found", 401)
parts = auth.split()
if parts[0].lower() != "bearer":
raise AuthError("Authorization header must start with Bearer", 401)
elif len(parts) == 1:
raise AuthError("Token not found", 401)
elif len(parts) > 2:
raise AuthError("Authorization header must be Bearer token", 401)
token = parts[1]
return token
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
if constants.PROFILE_KEY not in session:
return redirect('/login')
return f(*args, **kwargs)
return decorated
def create_app(app_config=None):
"""Create and configure an instance of the Flask application."""
FLASK_SECRET_KEY = env.get('FLASK_SECRET_KEY')
if not FLASK_SECRET_KEY:
FLASK_SECRET_KEY = "secretdev"
global AUTH0_CALLBACK_URL, AUTH0_AUDIENCE, AUTH0_BASE_URL, AUTH0_CLIENT_SECRET, AUTH0_CLIENT_ID
AUTH0_CALLBACK_URL = env.get(constants.AUTH0_CALLBACK_URL)
AUTH0_CLIENT_ID = env.get(constants.AUTH0_CLIENT_ID)
AUTH0_CLIENT_SECRET = env.get(constants.AUTH0_CLIENT_SECRET)
AUTH0_DOMAIN = env.get(constants.AUTH0_DOMAIN)
AUTH0_BASE_URL = 'https://' + AUTH0_DOMAIN
AUTH0_AUDIENCE = env.get(constants.AUTH0_AUDIENCE)
if AUTH0_AUDIENCE is '':
AUTH0_AUDIENCE = AUTH0_BASE_URL + '/userinfo'
app.config.from_mapping(
# a default secret that should be overridden by instance config
SECRET_KEY=FLASK_SECRET_KEY,
)
oauth = OAuth(app)
global auth0
auth0 = oauth.register(
'auth0',
client_id=AUTH0_CLIENT_ID,
client_secret=AUTH0_CLIENT_SECRET,
api_base_url=AUTH0_BASE_URL,
access_token_url=AUTH0_BASE_URL + '/oauth/token',
authorize_url=AUTH0_BASE_URL + '/authorize',
client_kwargs={
'scope': 'openid profile',
},
)
@app.route('/login')
def login():
return auth0.authorize_redirect(redirect_uri=AUTH0_CALLBACK_URL, audience=AUTH0_AUDIENCE)
@app.route('/callback')
def callback_handling():
auth0.authorize_access_token()
resp = auth0.get('userinfo')
userinfo = resp.json()
session[constants.JWT_PAYLOAD] = userinfo
session[constants.PROFILE_KEY] = {
'user_id': userinfo['sub'],
'name': userinfo['name'],
'picture': userinfo['picture']
}
return redirect('/listings')
@app.route('/logout')
def logout():
session.clear()
params = {'returnTo': url_for('home', _external=True), 'client_id': AUTH0_CLIENT_ID}
return redirect(auth0.api_base_url + '/logout?' + urlencode(params))
@app.route('/')
@app.route('/home')
def home():
if session and session[constants.PROFILE_KEY]:
user_info_json = session[constants.JWT_PAYLOAD]
user_id_string = user_info_json.get('sub', '0|0')
user_id = user_id_string.split('|')[1]
return render_template('home.html', userinfo=session[constants.PROFILE_KEY], userid=user_id)
else:
return render_template('home.html')
############
#app = Flask(__name__)
create_app()
client = MongoClient()
db = client['abnb']
reviews = db['reviews']
home_listings = db['listings']
calendar = db['calendar']
@app.route("/listings")
def listings():
return redirect(url_for("houselistings", pagenumber=1))
@app.route("/houselistings/<pagenumber>")
@requires_auth
def houselistings(pagenumber=1):
rows = []
start = int(pagenumber)*10 - 10
count = 0
for document in home_listings.find({}):
if count>=start:
temprow = []
temprow.append(document["picture_url"])
temprow.append(document["name"])
temprow.append(document["price"])
temprow.append(document["street"])
temprow.append(document["id"])
rows.append(temprow)
if count==start+10:
break
count+=1
rows.pop(0)
user_id = session[constants.JWT_PAYLOAD].get('sub', '0|0').split("|")[1]
return render_template("listings.html", rows=rows, userinfo=session[constants.PROFILE_KEY], userid=user_id, pagenumber=pagenumber)
@app.route("/listing/<listing_id>")
@requires_auth
def listing_details(listing_id):
home = home_listings.find({"id" : int(listing_id) })[0]
data = {}
dates = [None]*30
cal = []
alldates = calendar.find({"listing_id" : int(listing_id)})
for date in alldates:
datestring = date["date"]
year, month, day = datestring.split("-")
if int(year)==2019 and 5 < int(month) < 7:
if date["available"] == "t":
dates[int(day)-1] = "active"
else:
dates[int(day)-1] = "notactive"
data["image"] = home["picture_url"]
data["name"] = home["name"][:45]
data["host_image"] = home["host_thumbnail_url"]
data["host_name"] = home["host_name"]
data["review"] = reviews.find({"listing_id" : int(listing_id) })[0]["comments"]
data["review_name"] = reviews.find({"listing_id" : int(listing_id) })[0]["reviewer_name"]
data["bathrooms"] = home["bathrooms"]
data["bedrooms"] = home["bedrooms"]
data["address"] = home["street"]
data["price"] = home["price"]
data["dates"] = dates
return render_template("listing_details.html", userinfo=session[constants.PROFILE_KEY], userid=session[constants.JWT_PAYLOAD].get('sub', '0|0').split("|")[1], data=data)
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True) |
from django.db import models
from rompas.models import Product, Subscription, Tokens
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
from django.utils.translation import gettext as _
class Order(models.Model):
name = models.ForeignKey(User,
null=True,
on_delete=models.CASCADE,
verbose_name=_('Name'),
)
created = models.DateTimeField(verbose_name='Create', auto_now_add=True)
updated = models.DateTimeField(verbose_name='Update', auto_now=True)
paid = models.BooleanField(verbose_name='Paid', default=False)
paid_status = models.CharField(verbose_name='paid_status', max_length=50, blank=True)
product = models.BooleanField(default=False)
subscription = models.BooleanField(default=False)
tokens = models.BooleanField(default=False)
class Meta:
ordering = ('-created', )
verbose_name = 'Order'
verbose_name_plural = 'Orders'
def __str__(self):
return 'Order: {}'.format(self.id)
def get_total_cost(self):
total_cost = sum(item.get_cost() for item in self.items.all())
return total_cost
def save(self, *args, **kwargs):
if self.paid_status == 'sandbox' or self.paid_status == 'success' or self.paid_status == 'wait_accept':
self.paid = True
super(Order, self).save(*args, **kwargs)
class OrderItem(models.Model):
order = models.ForeignKey(Order, related_name='items', on_delete=models.CASCADE,)
product = models.ForeignKey(Product, related_name='order_items', on_delete=models.PROTECT, null=True, blank=True)
subscription = models.ForeignKey(Subscription, related_name='order_subscription', on_delete=models.PROTECT,
null=True, blank=True)
tokens = models.ForeignKey(Tokens, related_name='order_tokens', on_delete=models.PROTECT, null=True, blank=True)
price = models.DecimalField(verbose_name='Price', max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField(verbose_name='Amount', default=1)
def __str__(self):
return '{}'.format(self.id)
def get_cost(self):
return self.price * self.quantity
|
import numpy as np
#
def password(m):
N = 1000
check = 0
n = np.power(26,4)
for i in range (0,N):
p = np.random.randint(0,n)
H = np.random.randint(0,n,m)
if p in H:
check += 1
print(check/N)
#
m = 80000
k = 7
password(m*k)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Alpha Labs
if __name__ == '__main__':
import pandas as pd
import numpy as ny
import matplotlib.pyplot as plt
from Data_preprocessing_function import *
#读取文件
filename = "./test.csv"
df_1 = pd.read_csv(filename)
#read(df_1)
#删除没用的列
df_1 = df_1.drop(['Name', 'Ticket', 'Cabin'], axis=1)
#read(df_1)
#处理缺失数据
df_1 = processing_NA(df_1)
#read(df_1)
#处理字符串
df_1 = processing_string(df_1)
read(df_1)
#把处理好的数据存起来
df_1.to_csv('./preprocessed_test_data.csv')
|
from setuptools import setup, find_packages
import versioneer
setup(
name='tsdataformat',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Chris T. Berthiaume',
author_email='chrisbee@uw.edu',
license='MIT',
description='A Python project to manage time series data',
long_description=open('README.rst', 'r').read(),
url='https://github.com/ctberthiaume/tsdataformat-python',
packages=find_packages(where='src'),
package_dir={'': 'src'},
include_package_data=True,
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only'
],
keywords = ['csv', 'command-line', 'time series', 'tsdata'],
python_requires='>=3.7, <4',
install_requires=[
'ciso8601',
'click',
'pandas'
],
zip_safe=True,
entry_points={
'console_scripts': [
'tsdataformat=tsdataformat.cli:cli'
]
}
)
|
class Scrap:
def __init__(self):
pass
def get_titles(self, p):
return p.find_element_by_class_name('post-title').text
def get_dates(self, p):
return p.find_element_by_class_name('post-date').text
def get_excerpts(self, p):
return p.find_element_by_class_name('post-excerpt').text
def get_url_image(self, bs):
return bs.find('img', class_='attachment-post-thumbnail').get('src') |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from Page_Explorer.Page_Explorer import PageExplorer
from selenium.webdriver.firefox.options import Options
import platform
import os
import time
from datetime import date
class EdreamsExplorer(PageExplorer):
def __init__(self, start_url="http://www.edreams.com", driver_element=None, **kwargs):
super().__init__(start_url=start_url, driver_element=driver_element, **kwargs)
self.initial_page_setup()
def initial_page_setup(self):
"""
This is just a sloppy way to find and click the accept cookies button so it doesn't get in the way
Some extensions will also toake this opportunity to define other buttons somewhere on the page to be called later
"""
buttons = self.driver.find_elements_by_tag_name('button')
for b in buttons:
if b.text == 'Understood':
b.click()
|
#-*- coding:utf-8 -*-
import picamera
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
camera = picamera.PiCamera()
camera.resolution = (CAMERA_WIDTH,CAMERA_HEIGHT)
def Capture():
camera.capture('image_.jpg')
return 'hello'
Capture() |
__author__ = '''Kent (Jin-Chun Chiu)'''
import numpy as np
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
##################################
# [*] Path Interpretation #
##################################
# up-left up up-right #
# 7:14 8 9:15 #
# left 4 5 6:0 right #
# 1:11 2:10 3:13 #
# down-left down down-right #
##################################
class Path_DTW_Dictionary():
def __init__(self):
self.pathDictionary ={}
# you could pre-difined some pattern there
#self.pathDictionary['Slow Moving Down-Left' ] = [1,1,1,1,1,1,1,1]
#self.pathDictionary['Slow Moving Down' ] = [2,2,2,2,2,2,2,2]
#self.pathDictionary['Slow Moving Down-Right' ] = [3,3,3,3,3,3,3,3]
#self.pathDictionary['Slow Moving Left' ] = [4,4,4,4,4,4,4,4]
self.pathDictionary['Keeping Stationary' ] = [5,5,5,5,5,5,5,5]
#self.pathDictionary['Slow Moving Right' ] = [6,6,6,6,6,6,6,6]
#self.pathDictionary['Slow Moving Up-Left' ] = [7,7,7,7,7,7,7,7]
#self.pathDictionary['Slow Moving Up' ] = [8,8,8,8,8,8,8,8]
#self.pathDictionary['Slow Moving Up-Right' ] = [9,9,9,9,9,9,9,9]
self.pathDictionary['Clockwise Circle' ] = [8,15,6,13,10,11,4,14]
self.pathDictionary['Clockwise Circle.' ] = [15,6,13,10,11,4,14,8]
self.pathDictionary['Clockwise Circle..' ] = [6,13,10,11,4,14,8,15]
self.pathDictionary['Clockwise Circle...' ] = [13,10,11,4,14,8,15,6]
self.pathDictionary['Clockwise Circle....' ] = [10,11,4,14,8,15,6,13]
self.pathDictionary['Clockwise Circle....' ] = [11,4,14,8,15,6,13,10]
self.pathDictionary['Clockwise Circle.....' ] = [4,14,8,15,6,13,10,11]
self.pathDictionary['Clockwise Circle......' ] = [14,8,15,6,13,10,11,4]
self.pathDictionary['Conter Clockwise Circle' ] = [14,4,11,10,13,6,15,8]
self.pathDictionary['Conter Clockwise Circle.' ] = [4,11,10,13,6,15,8,14]
self.pathDictionary['Conter Clockwise Circle..' ] = [11,10,13,6,15,8,14,4]
self.pathDictionary['Conter Clockwise Circle...' ] = [10,13,6,15,8,14,4,11]
self.pathDictionary['Conter Clockwise Circle....' ] = [13,6,15,8,14,4,11,10]
self.pathDictionary['Conter Clockwise Circle.....' ] = [6,15,8,14,4,11,10,13]
self.pathDictionary['Conter Clockwise Circle......' ] = [15,8,14,4,11,10,13,6]
self.pathDictionary['Conter Clockwise Circle.......' ] = [8,14,4,11,10,13,6,15]
self.pathDictionary['Horizontal Wave' ] = [4,0,4,0]
self.pathDictionary['Vertical Wave' ] = [10,8,10,8]
self.pathDictionary['Horizontal Wave' ] = [0,4,0,4]
self.pathDictionary['Vertical Wave' ] = [8,10,8,10]
def search(self,value):
# type(value) = list
memo = []
memo_distance = []
new_value = [] # spare 5 #
for tmp in value:
if tmp == 2 :
new_value.append(10)
elif tmp ==6:
new_value.append(3)
elif tmp ==1:
new_value.append(11)
elif tmp ==3:
new_value.append(13)
elif tmp ==7:
new_value.append(14)
elif tmp ==9:
new_value.append(15)
else :
new_value.append(tmp)
for pathStr, pathList in self.pathDictionary.iteritems():
distance, path = fastdtw(new_value, pathList, dist=euclidean)
memo.append(pathStr)
memo_distance.append(distance)
print ('[*] Min_DTW_Distance : %s'%(str(min(memo_distance))) )
if min(memo_distance) < 40:
idx = memo_distance.index(min(memo_distance))
return memo[idx]
else:
return 'None'
def search2(self,value):
# type(value) = list
memo = []
memo_distance = []
new_value = [] # spare 5 #
for tmp in value:
if tmp == 2 :
new_value.append(10)
else :
new_value.append(tmp)
for pathStr, pathList in self.pathDictionary.iteritems():
distance, path = fastdtw(new_value, pathList, dist=euclidean)
memo.append(pathStr)
memo_distance.append(distance)
print ('[*] Min_DTW_Distance : %s'%(str(min(memo_distance))) )
if min(memo_distance) < 40:
idx = memo_distance.index(min(memo_distance))
return memo[idx]
else:
return 'None'
|
import boto3
ec2_client=boto3.client('ec2')
x = ec2_client.describe_instance()
data = x['Reservation']
li=[]
for instances in data:
instance = instances["Instances"]
for ids in instance:
instance_id = ids["InstanceId"]
li.append(instance_id)
ec2_client.terminate_instances(InstanceIds=li) |
#!/usr/bin/python
import MySQLdb as mdb
import sys
print "Content-type: text\html\n"
print "{\"characters\":["
try:
con = mdb.connect('localhost', 'dlin', 'dlin2dlin', 'dlin');
with con:
cur=con.cursor(mdb.cursors.DictCursor)
cur.execute("select id, name, baseClass, altClass1, altClass2, altClass3 from FireEmblem")
rows=cur.fetchall()
count = len(rows)
for row in rows:
count -= 1
print "{\"name\":\"" + row["name"] + "\","
print "\"id\":\"" + str(row["id"]) + "\","
print "\"class\":\"" + row["baseClass"] + "\","
if row["altClass1"] is not None:
print "\"altClass1\":\"" + row["altClass1"] + "\","
if row["altClass2"] is not None:
print "\"altClass2\":\"" + row["altClass2"] + "\","
if row["altClass3"] is not None:
print "\"altClass3\":\"" + row["altClass3"] + "\","
print "\"thumb\":\"../images/"+row["name"]+".png\","
print "\"image\":\"../images/fullBody/"+row["name"]+".jpg\"}"
if count:
print ","
else:
break
except mdb.Error, e:
print "Error %d: %s" % (e.args[0],e.args[1])
sys.exit(1)
finally:
if con:
con.close()
print "]}"
|
import cv2
import node_settings
def test_draw_pint():
#path = r'world_rs_walker.png'
path = r'world_rs_walker_AUG_2021.png'
image = cv2.imread(path)
coord = (90, 120)
radius = 2
color = (0, 0, 255)
thickness = 0
window_name = 'Image'
# Using cv2.circle() method
# Draw a point
image = cv2.circle(image, coord, radius, color, thickness)
# Displaying the image
cv2.imshow(window_name, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def world_graph_nodes():
path = r'world_rs_walker_AUG_2021.png'
img = cv2.imread(path, 1)
df = node_settings.WorldGraph_Nodes
# loop through each coordinate pair in arr
for item in df:
cv2.drawMarker(img, (item[0], item[1]), (0, 0, 255), markerType=cv2.MARKER_DIAMOND,
markerSize=3, thickness=1, line_type=cv2.LINE_AA)
cv2.imwrite('RES_WALKER.png', img)
#window_name = 'Image'
#cv2.imshow(window_name, img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
def world_graph_nodes_names():
path = r'world_rs_walker_AUG_2021.png'
img = cv2.imread(path, 1)
df_Nodes = node_settings.WorldGraph_Nodes
df_Names = node_settings.WorldGraph_Names
df = df_Nodes
# loop through each coordinate pair in arr
index = 0
for item in df:
name = df_Names[index]
cv2.circle(img, (item[0], item[1]), radius=2, color=(0, 0, 255), thickness=0)
cv2.putText(img, name, (item[0], item[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
index += 1
cv2.imwrite('RES_WALKER_NAMES_AUG.png', img)
#window_name = 'Image'
#cv2.imshow(window_name, img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
def world_graph_nodes_names_paths():
#path = r'world_rs_walker.png'
path = r'world_rs_walker_AUG_2021.png'
# store image file to variable
img = cv2.imread(path, 1)
# Store array of node points, the names of specific points and the available paths to each node
df_Nodes = node_settings.WorldGraph_Nodes
df_Names = node_settings.WorldGraph_Names
df_Paths = node_settings.WorldGraph_Paths
df = df_Nodes
# loop through each coordinate pair in arr
index = 0
for item in df:
#print(df_Paths[index])
# using an index get the corresponding name and path array
name = df_Names[index]
path = df_Paths[index]
# using cv2 draw a circle for the node points
cv2.circle(img, (item[0], item[1]), radius=2, color=(0, 0, 255), thickness=-1)
# using cv2 add the name text for the node names
cv2.putText(img, name, (item[0], item[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
#line_pos = path[0]
#print(line_pos)
# itterate through each available path in the corresponding node
for paths in path:
cv2.line(img, (item[0], item[1]), (df[paths][0], df[paths][1]), (0, 255, 0), 1)
# make sure index keeps in line with index position of node
index += 1
# overlay names = white text, paths = green lines and node = red dots on image map
cv2.imwrite('RES_WALKER_NAMES_PATHS_AUG_2021.png', img)
#window_name = 'Image'
#cv2.imshow(window_name, img)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
def test_array_format():
test_pos = [[4659, 2734], [4684, 2734], [4678, 2760], [4637, 2734]]
test = [[1, 3, 5, 4, 6, 2], [0, 2, 6, 25]]
index = 0
position = test[index][1]
print(test_pos[position])
#test_array_format()
#world_graph_nodes()
#world_graph_nodes_names()
world_graph_nodes_names_paths()
#test_draw_pint() |
#!/usr/bin/python
import time
def login(shana, event):
shana.write(("USER", shana.conf['user'], '+iw', shana.conf['nick']), shana.conf['name'])
shana.write(["NICK"], shana.conf['nick'])
login.name = 'login'
login.wake_on_letter = True
def pong(shana, event):
shana.write(['PONG'], event.group(0))
pong.name = 'pong'
pong.event = ['PING']
def echo(shana, event):
shana.say(event.group(2))
echo.name = 'echo'
echo.commands = ['echo']
def topic(shana, event):
shana.send("module.bot.store", "PUT", {'name': "%s topic" % event.args[1], 'value': event.group(0)})
l = shana.recv(subject=["PUT",])
topic.name = 'topic'
topic.event = ['332']
def change_topic(shana, event):
channel, section, text = event.group(2).split(' ', 2)
if not event.admin and channel == "#general" and int(section) not in [1, 4]:
shana.say("Denied: Check your privilege")
return
shana.send("module.bot.store", "GET", {'name': "%s topic" % channel})
shana.log("%s topic" % channel, "NOTICE")
l = shana.recv(subject=["GET",])
topic = l.body['value']
sections = topic.split(" | ")
if int(section) >= len(sections):
shana.say("No such slot")
return
sections[int(section)] = text
shana.write(["TOPIC", channel], " | ".join(sections))
change_topic.name = 'change_topic'
change_topic.commands = ['topic'] |
import os.path as _path;
import common_def as _common;
import functools as _func;
import data_ops as _data;
import os;
class CommitCheckReport(object):
def __init__(self, cache_path, err_level=_common.LOGLEVEL_INFO, err_desc=None, origin_status=None, actions=None):
self.cache_path = cache_path;
self.err_level = err_level;
self.err_desc = err_desc;
self.origin_status = origin_status;
self.actions = actions if not actions is None else [];
return;
@property
def origin_status_desc(self):
return _common.get_status_desc(self.origin_status);
pass;
# check current file status in cache
# here is the prossible list of status:
# * None -- exception happens;
# * null -- only happens to cache_path as directory, when test_untracked==True, it means it has multiple status on files/folders in it, when test_untracked==False, it means it is whether null or untracted;
# * clean -- a link point to entry in data, with aligned path, no entry in git;
# * conflict -- file is not a link, but has corresponding doc entry in data. File may miss in cache;
# * dirty -- has corresponding entry in git, dirty is not detected in this method, and will treat independently;
# * renamed -- a link point to entry in data, but path not aligened;
# * miss -- file not exists in cache, (and may also miss in data and git);
# * delay -- a non-link which manually taged in cache as an entry delay for sync
# * ready -- a non-link which manually taged in cache as an entry ready for sync
# * partial -- only happens when test_partial=True, means it is just a part of a doc entry in cache/git/doc
# * untracted -- a file is untracted iff it haven't any above known status, a direct is untracted iff all its child are untracted. A folder can be determined as untracked only when test_untracked=True is specifed and recursive check is performed, if not it just return status null
# here is a list of possible combination of status, which is checked by status_corrupted():
# * clean + miss + partial -- ancestor clean, but file miss
# * clean + partial -- partial clean, rest part of entry may not clean
# * conflict + miss -- maltag, will return None, means the file tagged as 'c' but miss in cache, caused by conflict then rename. The flags couldn't imply a case that the overriding file is part of doc-entry, because it won't lead to a miss.
# * conflict + miss + partial -- means (partial conflict) + miss, for the case the ancestor itself miss (means ancestor tagged as 'c' but miss in cache, maltag), we will return None as error, for miss co-existing with partial conflict, it only means the file miss both in cache and data.
# * conflict + partial -- partial conflict, means the file not overriding in cache, but the checking file belongs to a part of existing doc entry (we should check in data here). ! we consider it a complete conflict if there already exist a counterpart in data, even if it just a part of one existing doc entry. ! in terms of availability, it actually covers two kinds of situations: one is the file exists in cache only, another is the file exists in data only. to distinguish the two cases, you need to perform more detection, but most of the time you can judge it by context e.g. the siguation in commit_scan_check
# * renamed + miss + partial -- ancestor renamed, but file miss
# * renamed + partial -- partial renamed
# * delay + miss + partial -- ancestor delay, but file miss
# * delay + partial -- partial delay
# * ready + miss -- maltag, will return None, means the file tagged as 'r' but miss in cache
# * ready + miss + partial -- means (partial ready) + miss, for the case ancestor miss we (condiser it as maltag) will return None as error
# * ready + partial -- partial ready
# following combination is impossible (not expected to appear in return):
# * None + <any>
# * null + <any>
# * untracked + <any>
# * partial (alone) -- should with one of (clean | conflict | renamed | delay)
# * clean + (conflict | renamed | delay | ready)
# * conflict + (renamed | delay | ready) -- conflict with rename will finally cause conflict + miss
# * renamed + (delay | ready)
# * delay + ready
# * miss + (delay | renamed | clean) (without partial)
def status_check(cache_path, test_partial=True, test_untracked=True):
cache_path = _path.normpath(cache_path);
status_flags = _common.STATEFLAG_NULL;
if not _common.isancestor(_common.REPO_CACHE_PATH, cache_path):
_common.logerr('invalid path');
return (None, None);
if not _path.exists(cache_path):
if _path.lexists(cache_path):
_common.logerr('dead link');
return (None, None);
status_flags |= _common.STATEFLAG_MISS;
link_ancestor=None;
conflict_ancestor=None;
delay_ancestor=None;
ready_ancestor=None;
ancestor = cache_path;
while len(ancestor) > len(_common.REPO_CACHE_PATH):
if _path.islink(ancestor):
if link_ancestor:
_common.logerr('multi-link redirection');
return (None, None);
link_ancestor = ancestor;
if _common.gettag(ancestor) == 'c': # here we just rely on 'c' tag, we don't check it by test if corresponding data_path has tag 'e', which should be the work in aggregation check
if conflict_ancestor:
_common.logerr('multi-partial-conflict');
return (None, None);
conflict_ancestor = ancestor;
if _common.gettag(ancestor) == 'd':
if delay_ancestor:
_common.logerr('multi-partial-delay');
return (None, None);
delay_ancestor = ancestor;
if _common.gettag(ancestor) == 'r':
if ready_ancestor:
_common.logerr('multi-partial-ready');
return (None, None);
ready_ancestor = ancestor;
if test_partial:
ancestor = _path.dirname(ancestor); # continue with parent
else:
break; # only test cache_path alone and stop
if (not link_ancestor is None) + (not conflict_ancestor is None) + (not delay_ancestor is None) > 1:
_common.logerr('multi-partial status');
return (None, None);
if conflict_ancestor:
data_ancestor = get_data_path(conflict_ancestor);
if _path.isdir(data_ancestor) and _common.gettag(data_ancestor) != 'e':
_common.logerr('link is not a tracked doc entry in data');
if not _path.exists(conflict_ancestor):
_common.logerr('maltag: the file tagged as \'c\' not exists');
return (None, conflict_ancestor);
if not _path.exists(data_ancestor):
# todo: more restrict detection
_common.logerr('malconflict: data cache entry unmatched');
return (None, conflict_ancestor);
status_flags |= _common.STATEFLAG_CONFLICT;
if conflict_ancestor != cache_path: # we should review the existing of file in data
status_flags |= _common.STATEFLAG_PARTIAL;
data_path = get_data_path(cache_path);
if _path.exists(data_path): # we don't consider it is a miss if the data counterpart got hidden by conflict ancestor
status_flags &= ~_common.STATEFLAG_MISS;
if _path.exists(cache_path):
status_flags &= ~_common.STATEFLAG_PARTIAL;
if ready_ancestor:
if not _path.exists(ready_ancestor):
_common.logerr('maltag: the file tagged as \'r\' not exists');
return (None, ready_ancestor);
status_flags |= _common.STATEFLAG_READY;
if ready_ancestor != cache_path: # we should review the existing of file in data
status_flags |= _common.STATEFLAG_PARTIAL;
if delay_ancestor:
status_flags |= _common.STATEFLAG_DELAY;
if delay_ancestor != cache_path:
status_flags |= _common.STATEFLAG_PARTIAL;
if link_ancestor:
data_ancestor = _path.realpath(link_ancestor);
if not _common.isancestor(_common.REPO_DATA_PATH, data_ancestor):
_common.logerr('link outof scope');
return (None, link_ancestor);
if _path.isdir(data_ancestor) and _common.gettag(data_ancestor) != 'e':
_common.logerr('link is not a tracked doc entry in data');
return (None, link_ancestor);
if _path.relpath(link_ancestor, _common.REPO_CACHE_PATH) == _path.relpath(data_ancestor, _common.REPO_DATA_PATH):
status_flags |= _common.STATEFLAG_CLEAN;
else:
status_flags |= _common.STATEFLAG_RENAMED;
if link_ancestor != cache_path:
status_flags |= _common.STATEFLAG_PARTIAL;
if status_flags == _common.STATEFLAG_NULL:
if _path.isfile(cache_path):
status_flags |= _common.STATEFLAG_UNTRACTED;
elif _path.isdir(cache_path) and test_untracked: # try to test untract recursively
all_untracked = True;
for subname in os.listdir(cache_path):
subpath = _path.join(cache_path, subname);
if not status_check(subpath, test_partial=False, test_untracked=True
)[0] == _common.STATEFLAG_UNTRACTED:
# disable test_partial because we have already tested all ancestors if we want
all_untracked = False;
break;
if all_untracked:
status_flags |= _common.STATEFLAG_UNTRACTED;
assert not (status_flags & _common.STATEFLAG_DIRTY);
return status_flags, link_ancestor or conflict_ancestor or delay_ancestor or ready_ancestor;
# check the invalid combination which is described in status_check()
def is_status_corrupted(status_flags):
# no bad state except None will be returned temporary
return status_flags is None;
def coverage_scan_check(cache_root, data_root):
assert _common.isancestor(_common.REPO_CACHE_PATH, cache_root);
assert _common.isancestor(_common.REPO_DATA_PATH, data_root);
entry_list = _data.get_entries(data_root);
miss_set = set(entry_list);
hit_map = {}; # key: data entry, value: cache entry list
def coverage_eliminate(cache_path):
status = status_check(cache_path, test_partial=False, test_untracked=False)[0];
assert not status is None;
assert not (_common.STATEFLAG_MISS & status);
assert not (_common.STATEFLAG_PARTIAL & status);
data_path = None;
if status & _common.STATEFLAG_CLEAN or status & _common.STATEFLAG_RENAMED:
data_path = _path.realpath(cache_path);
else:
assert not _path.islink(cache_path);
if status & _common.STATEFLAG_CONFLICT:
data_path = get_data_path(cache_path);
if not data_path is None:
miss_set.remove(data_path);
if not data_path in hit_map:
hit_map[data_path] = [];
hit_map[data_path].append(cache_path);
if status == _common.STATEFLAG_NULL:
for subname in os.listdir(cache_path):
subpath = _path.join(cache_path, subname);
coverage_eliminate(subpath);
return;
coverage_eliminate(cache_root);
return miss_set, hit_map;
def malassociate_scan_check(cache_root, data_root):
# caused by delete/rename from the path sometime, then agg/rename to the path
miss_set, hit_map = coverage_scan_check(cache_root, data_root);
malassoc_map = {};
for data_path in miss_set:
cache_path = _data.get_cache_path(data_path);
if _path.exists(cache_path):
if not data_path in malassoc_map:
malassoc_map[data_path] = [];
malassoc_map[data_path] = cache_path;
for data_path, cache_list in hit_map.iteritems():
if len(cache_list) > 1:
if not data_path in malassoc_map:
malassoc_map[data_path] = [];
malassoc_map.extend(cache_list);
return malassoc_map;
def malconflict_scan_check(cache_root):
malconflict_list = [];
for cache_path in _common.get_tag_pathlist('c'):
assert _common.isancestor(_common.REPO_CACHE_PATH, cache_path);
if not _common.isancestor(cache_root, cache_path):
continue;
if not _path.exists(cache_path):
logerr('malconflict: %s -- not exists' % cache_path);
malconflict_list.append(cache_path);
data_path = get_data_path(cache_path);
if not _path.exists(data_path):
logerr('malconflict: %s -- data counterpart not exists' % cache_path);
malconflict_list.append(cache_path);
if _path.isdir(data_path) and _common.gettag(data_path) != 'e':
logerr('malconflict: %s -- data counterpart not an entry' % cache_path);
malconflict_list.append(cache_path);
if _path.isfile(cache_path) != _path.isfile(data_path):
logerr('malconflict: %s -- unconsistent type' % cache_path);
malconflict_list.append(cache_path);
return malconflict_list;
# use to have a thorough status check, unlike commit_check with just stop on an determined status
def malstatus_scan_check(cache_root):
malstatus_list = [];
for dirpath, dirnames, filenames in os.walk(cache_root):
for name in dirnames + filenames:
path = _path.join(dirpath, name);
if status_check(path, test_partial=True, test_untracked=False)[0] is None:
malstatus_list.append(path);
return malstatus_list;
def commit_scan_check(root_path):
commit_list=[];
untracted_set=set();
rename_map={};
def commit_scan(cache_path):
status, ancestor = status_check(cache_path, test_partial=True, test_untracked=False);
if status is None:
_common.logerr('corrupted cache status (None), need manually check');
commit_list.append(CommitCheckReport(cache_path, err_level=_common.LOGLEVEL_ERR, err_desc='corrupted cache status (None)'));
if is_status_corrupted(status):
_common.logerr('corrupted status, need manually check');
commit_list.append(CommitCheckReport(cache_path, err_level=_common.LOGLEVEL_ERR, err_desc='corrupted cache status',
origin_status=status));
assert not (_common.STATEFLAG_MISS & status);
diginto = False;
untracted = False;
if status == _common.STATEFLAG_NULL:
untracted = True;
diginto = True;
if status & _common.STATEFLAG_UNTRACTED:
assert status == _common.STATEFLAG_UNTRACTED;
untracted = True;
assert diginto == False;
commit_list.append(CommitCheckReport(cache_path, err_desc='no action', origin_status=status));
if status & _common.STATEFLAG_CLEAN:
assert status == _common.STATEFLAG_CLEAN;
assert untracted == False;
assert diginto == False;
commit_list.append(CommitCheckReport(cache_path, err_desc='no action', origin_status=status));
if status & _common.STATEFLAG_CONFLICT:
# todo: check doc entry
assert untracted == False;
assert not ancestor is None;
data_path = get_data_path(cache_path);
data_ancestor = get_data_path(ancestor);
if _path.isfile(cache_path):
assert diginto == False;
if status & _common.STATEFLAG_PARTIAL:
commit_list.append(CommitCheckReport(cache_path, err_desc='new file', origin_status=status, actions=[
_func.partial(_common.addfile_op, src=cache_path, dest=data_path),
_func.partial(_common.rmfile_op, path=cache_path),
_func.partial(_common.untag_op, path=ancestor, origin_tag='c'),
_func.partial(_common.restorelink_op, src=data_ancestor, dest=ancestor),
]));
else:
commit_list.append(CommitCheckReport(cache_path, origin_status=status, actions=[
_func.partial(_common.override_op, src=cache_path, dest=data_path),
_func.partial(_common.rmfile_op, path=cache_path),
_func.partial(_common.untag_op, path=ancestor, origin_tag='c'),
_func.partial(_common.restorelink_op, src=data_ancestor, dest=ancestor),
]));
else:
diginto = True;
if status & _common.STATEFLAG_RENAMED:
# todo: check doc entry
assert untracted == False;
assert diginto == False;
assert status == _common.STATEFLAG_RENAMED;
data_path = _path.realpath(cache_path);
assert _path.exists(data_path);
new_data_path = get_data_path(cache_path);
actions = [];
assert not data_path in rename_map;
rename_map[data_path] = new_data_path;
if _path.isdir(data_path):
actions.extend([
_func.partial(_common.untag_op, path=data_path, origin_tag='e'),
_func.partial(_common.tagfile_op, path=new_data_path, tag='e'),
]);
commit_list.append(CommitCheckReport(cache_path, err_desc='override', origin_status=status, actions=actions + [
_func.partial(_common.rmlink_op, link_path=cache_path),
_func.partial(_common.mv_batch_op, src=data_path, dest=new_data_path),
_func.partial(_common.restorelink_op, src=new_data_path, dest=cache_path),
]));
if status & _common.STATEFLAG_DELAY:
assert untracted == False;
assert diginto == False;
assert status == _common.STATEFLAG_DELAY;
commit_list.append(CommitCheckReport(cache_path, err_desc='no action', origin_status=status));
if status & _common.STATEFLAG_READY:
assert untracted == False;
assert diginto == False;
assert status == _common.STATEFLAG_READY;
data_path = get_data_path(cache_path);
actions = [];
if _path.isdir(cache_path):
actions.append(
_func.partial(_common.tagfile_op, path=data_path, tag='e')
);
commit_list.append(CommitCheckReport(cache_path, err_desc='new file', origin_status=status, actions=actions + [
_func.partial(_common.addfile_op, src=cache_path, dest=data_path),
_func.partial(_common.untag_op, path=cache_path, origin_tag='r'),
_func.partial(_common.rmfile_op, path=cache_path),
_func.partial(_common.restorelink_op, src=data_path, dest=cache_path),
]));
if untracted:
untracted_set.add(cache_path);
else: # remove any ancestors in untracted_set
untracted_ancestor = _path.dirname(cache_path);
while untracted_ancestor in untracted_set:
untracted_set.remove(untracted_ancestor);
untracted_ancestor = _path.dirname(untracted_ancestor);
if diginto:
for subname in os.listdir(cache_path):
subpath = _path.join(cache_path, subname);
commit_scan(subpath);
commit_scan(root_path);
untracted_list = [];
for path in untracted_set:
if not _path.dirname(path) in untracted_set:
untracted_list.append(path);
return commit_list, untracted_list, rename_map;
def commit_update(root_path, logonly=True):
rawlog = [];
assert _common.isancestor(_common.REPO_CACHE_PATH, root_path);
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'malassociate_scan_check',
'root_path' : root_path,
}));
malassociate_list = malassociate_scan_check(root_path, _common.REPO_DATA_PATH);
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'malconflict_scan_check',
'root_path' : root_path,
}));
malconflict_list = malconflict_scan_check(root_path);
if malassociate_list or malconflict_list:
for path in malassociate_list:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'malassociate_scan_check', {
'path' : path,
}));
for path in malconflict_list:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'malconflict_scan_check', {
'path' : path,
}));
return rawlog;
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'commit_scan_check',
'root_path' : root_path,
}));
commit_list, untracted_list, rename_map = commit_scan_check(root_path);
if len(untracted_list):
for path in untracted_list:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'commit_scan_check', {
'err_desc' : 'untracted entry',
'path' : path,
}));
return rawlog;
for src, dest in rename_map.iteritems():
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'log_rename', {
'src' : src,
'dest' : dest,
}));
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'merge ops',
'root_path' : root_path,
}));
# merge operation
mv_batch_ops = {}; # key: src, dest
addfile_ops = {}; # key: dest
override_ops = {}; # key: dest
untag_ops = {}; # key: path
tagfile_ops = {}; # key: path
rmlink_ops = {}; # key: path
rmfile_ops = {}; # key: path
restorelink_ops = {}; # key: dest
for report in commit_list:
if report.err_level == _common.LOGLEVEL_ERR:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'commit_scan_check', {
'cache_path' : report.cache_path,
'err_desc' : report.err_desc,
}));
return rawlog;
if report.err_level == _common.LOGLEVEL_WARN:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_WARN, 'commit_scan_check', {
'cache_path' : report.cache_path,
'warn_desc' : report.err_desc,
'origin_status' : report.origin_status_desc,
'actions' : map(_common.get_bareopname, report.actions),
}));
# we won't stop on warning
else:
assert report.err_level == _common.LOGLEVEL_INFO;
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'commit_scan_check', {
'cache_path' : report.cache_path,
'info_desc' : report.err_desc, # may be None
'origin_status' : report.origin_status_desc,
'actions' : map(_common.get_bareopname, report.actions),
}));
for op in report.actions:
if _common.get_bareop(op) == _common.mv_batch_op:
src, dest = op.keywords['src'], op.keywords['dest'];
key = src, dest;
assert _common.isancestor(_common.REPO_DATA_PATH, src);
assert _common.isancestor(_common.REPO_DATA_PATH, dest);
if key in mv_batch_ops:
assert False;
mv_batch_ops[key] = op;
if len(set([src for src, dest in mv_batch_ops])) != len(mv_batch_ops):
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'cache_commit_update', {
'err_desc' : 'mv confliction: multiple ops have same src',
}));
return rawlog;
if len(set([dest for src, dest in mv_batch_ops])) != len(mv_batch_ops):
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'cache_commit_update', {
'err_desc' : 'mv confliction: multiple ops have same dest',
}));
return rawlog;
if _common.get_bareop(op) == _common.addfile_op:
src, dest = op.keywords['src'], op.keywords['dest'];
key = dest;
assert _common.isancestor(_common.REPO_CACHE_PATH, src);
assert _common.isancestor(_common.REPO_DATA_PATH, dest);
if key in addfile_ops:
assert False;
addfile_ops[key] = op;
if _common.get_bareop(op) == _common.override_op:
src, dest = op.keywords['src'], op.keywords['dest'];
key = dest;
assert _common.isancestor(_common.REPO_CACHE_PATH, src);
assert _common.isancestor(_common.REPO_DATA_PATH, dest);
if key in override_ops:
assert False;
override_ops[key] = op;
if _common.get_bareop(op) == _common.untag_op:
path, origin_tag = op.keywords['path'], op.keywords['origin_tag'];
key = path;
if key in untag_ops:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_WARN, 'cache_commit_update', {
'warn_desc' : 'duplicated untag',
'path' : path,
'origin_tag' : origin_tag,
}));
untag_ops[key] = op;
if _common.get_bareop(op) == _common.tagfile_op:
path, tag = op.keywords['path'], op.keywords['tag'];
key = path;
if key in tagfile_ops:
assert False;
tagfile_ops[key] = op;
if _common.get_bareop(op) == _common.rmlink_op:
key = link_path = op.keywords['link_path'];
assert _common.isancestor(_common.REPO_CACHE_PATH, link_path);
if key in rmlink_ops:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_WARN, 'cache_commit_update', {
'warn_desc' : 'duplicated rmlink',
'link_path' : link_path,
}));
rmlink_ops[key] = op;
if _common.get_bareop(op) == _common.rmfile_op:
key = path = op.keywords['path'];
assert _common.isancestor(_common.REPO_CACHE_PATH, path);
assert not key in rmfile_ops;
rmfile_ops[key] = op;
if _common.get_bareop(op) == _common.restorelink_op:
src, dest = op.keywords['src'], op.keywords['dest'];
assert _common.isancestor(_common.REPO_DATA_PATH, src);
assert _common.isancestor(_common.REPO_CACHE_PATH, dest);
key = dest;
if key in restorelink_ops:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_WARN, 'cache_commit_update', {
'warn_desc' : 'duplicated entry to restorelink',
'src' : src,
'dest' : dest,
}));
restorelink_ops[key] = op;
if not logonly:
_common.incfile_init();
# execute ops
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'execute ops',
'root_path' : root_path,
}));
rawlog.extend(_common.mv_init_op(logonly=logonly));
if rawlog[-1].level == _common.LOGLEVEL_ERR:
return rawlog;
for op in mv_batch_ops.values():
rawlog.extend(op(logonly=logonly));
if rawlog[-1].level == _common.LOGLEVEL_ERR:
return rawlog;
rawlog.extend(_common.mv_commit_op(logonly=logonly));
if rawlog[-1].level == _common.LOGLEVEL_ERR:
return rawlog;
for ops in [
rmlink_ops, addfile_ops, override_ops,
untag_ops, tagfile_ops,
rmfile_ops, restorelink_ops,
]:
for op in ops.values():
rawlog.extend(op(logonly=logonly));
if rawlog[-1].level == _common.LOGLEVEL_ERR:
return rawlog;
# post check
rawlog.append(_common.LogEntry(_common.LOGLEVEL_INFO, 'cache_commit_update', {
'step' : 'post check',
'root_path' : root_path,
}));
if not logonly:
# check after updating
rawlog.extend(malassociate_scan_check(root_path, _common.REPO_DATA_PATH));
rawlog.extend(malconflict_scan_check(root_path));
_common.incfile_check();
return rawlog;
def restore_miss_update(cache_root, data_root, logonly=True):
rawlog = [];
malassociate_list = malassociate_scan_check(cache_root, data_root);
if malassociate_list:
for path in malassociate_list:
rawlog.append(_common.LogEntry(_common.LOGLEVEL_ERR, 'malassociate_scan_check', {
'path' : path,
}));
return rawlog;
miss_set, hit_map = coverage_scan_check(cache_root, data_root);
for data_path in miss_set:
cache_path = _data.get_cache_path(data_path);
assert _common.isancestor(_common.REPO_DATA_PATH, data_path);
assert _common.isancestor(_common.REPO_CACHE_PATH, cache_path);
rawlog.extend(_common.restorelink_op(data_path, cache_path, logonly=logonly));
return rawlog;
def clean_miss_update():
# todo
return;
def get_data_path(cache_path):
assert _common.isancestor(_common.REPO_CACHE_PATH, cache_path);
return _path.normpath(_path.join(_common.REPO_DATA_PATH, _path.relpath(cache_path, _common.REPO_CACHE_PATH)));
|
class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
def helper(S):
stack1 = []
for s in S:
if s == '#':
if stack1:
stack1.pop()
else:
stack1.append(s)
return stack1
return helper(S) == helper(T) |
import pandas as pd
import os
import sys
class CM:
"""
Confusion matrix class for binary problems.
"""
def __init__(self, table: dict):
"""
The class constructor.
:param table: a dictionary with 4 keys (tp, fn, tn, fp) and their corresponding
values representing a confusion matrix.
"""
self.table = table
self.tp = table['tp']
self.fn = table['fn']
self.tn = table['tn']
self.fp = table['fp']
self.n = self.tn + self.fp
self.p = self.tp + self.fn
def normalize(self):
"""
normalizes all entries of the confusion matrix.
:return: None.
"""
cm_normalized = {
'tn': self.tn / self.n if self.n != 0 else 0,
'fp': self.fp / self.n if self.n != 0 else 0,
'tp': self.tp / self.p if self.p != 0 else 0,
'fn': self.fn / self.p if self.p != 0 else 0
}
self.__init__(cm_normalized)
def __repr__(self):
d = {'TP': self.tp, 'FN': self.fn, 'TN': self.tn, 'FP': self.fp}
return str(pd.DataFrame.from_dict(d, orient='index').T)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.