text stringlengths 38 1.54M |
|---|
import requests
import json
import zipfile
import re
def startDownload(project_id):
print('downloading project: ' + str(project_id))
# try:
resp = requests.get(
'https://cdn.projects.scratch.mit.edu/internalapi/project/'
+ str(project_id)
+ '/get/')
project = resp.json()
print('load json data success')
costumesToDownload, soundsToDownload = [], []
processSoundAndCostumes(project, costumesToDownload, soundsToDownload)
if 'children' in project:
for child in project['children']:
processSoundAndCostumes(
child, costumesToDownload, soundsToDownload)
totalAssets = len(costumesToDownload) + len(soundsToDownload)
print("Found %d assets" % totalAssets)
print('Loading project title...')
resp = requests.get('https://scratch.mit.edu/api/v1/project/' +
str(project_id) + '/?format=json')
print('generate ZIP...')
title_data = resp.json()
zipfile_name = title_data['title'] + '.sb2'
sb2 = zipfile.ZipFile(zipfile_name, 'w')
sb2.writestr('project.json', json.dumps(project).encode())
downloadCostume(sb2, costumesToDownload, soundsToDownload)
sb2.close()
# except Exception as e:
# print('error with %s', str(e))
def downloadCostume(sb2, costumesToDownload, soundsToDownload):
complete, totalAssets = 0, len(costumesToDownload) + len(soundsToDownload)
for costume in costumesToDownload:
print('Loading asset ' + costume['costumeName'] + ' (' +
str(complete) + '/' + str(totalAssets) + ')')
resp = requests.get(
'https://cdn.assets.scratch.mit.edu/internalapi/asset/' +
costume['baseLayerMD5'] + '/get/')
ext = re.findall('\.[a-zA-Z0-9]+', costume['baseLayerMD5'])[0]
filename = str(costume['baseLayerID']) + ext
sb2.writestr(filename, resp.content)
complete += 1
for costume in soundsToDownload:
print('Loading asset ' + costume['soundName'] + ' (' +
str(complete) + '/' + str(totalAssets) + ')')
resp = requests.get(
'https://cdn.assets.scratch.mit.edu/internalapi/asset/' +
costume['md5'] + '/get/')
ext = re.findall('\.[a-zA-Z0-9]+', costume['md5'])[0]
filename = str(costume['soundID']) + ext
sb2.writestr(filename, resp.content)
complete += 1
def processSoundAndCostumes(project, c, s):
if 'costumes' in project:
for data in project['costumes']:
data['baseLayerID'] = len(c)
c.append(data)
if 'sounds' in project:
for data in project['sounds']:
data['soundID'] = len(s)
s.append(data)
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 14 10:36:06 2016
@author: Luciano
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from PIL import Image
import scipy.signal
import tifffile
from skimage import io
def importData(cameraFrameFile, darkFrameFile):
img = Image.open(darkFrameFile)
background = np.array(img)
data = io.imread(cameraFrameFile)
data = np.array(data)
return data, background
def getOffPattern(data, expectedValue):
"""data are the camera frames, expected_value is the initial guess on the
period of the pattern in pixels
output is a vector with the following order:
- period in x-direction [pixels] (px)
- offset in x-direction [pixels] (x0)
- period in y-direction [pixels] (py)
- offset in y-direction [pixels] (y0)
and the function for recreating the off switching pattern would be:
sin(pi * (x - x0) / px).^2 + sin(pi * (y - y0) / py).^2"""
pattern = 0
return pattern
def objectDistances
def signalReconstruction(data, pattern, objp, shiftp):
""" Given the pattern period and offset as well as the output pixel length
and the scanning pixel length it constructs the central and peripheral
signal frames. Pattern is the periods and offsets of the on-switched
regions"""
# data parameters
dx = np.size(data)[1]
dy = np.size(data)[2]
nframes = np.size(data)[0]
nsteps = np.sqrt(nframes)
# decode the pattern
fx = pattern[0]
x0 = pattern[1]
fy = pattern[2]
y0 = pattern[3]
# object positions in image so that they are always in the scanned regions
[xi, yi] = object_positions([fx, dx], [1, dy - fy], objp)
# central loop: interpolate camera frame on shifting grids (scanning)
# and extract central and peripheral signals
central_signal = 0
central_signal_weights = 0
peripheral_signal = 0
peripheral_signal_weights = 0
# loop (attention, the scanning direction of our microscope is hardcoded,
# first down, then right)
for kx in np.arange(nsteps):
shift_x = -kx * shiftp
for ky in np.arange(nsteps):
shift_y = ky * shiftp;
# get frame number and frame
kf = ky + 1 + nsteps * kx
frame = data(:, :, kf)
# adjust positions for this frame
xj = xi + shift_x
yj = yi + shift_y
# interpolation
est = interpn(frame, xj, yj, 'nearest');
# result will be isnan for outside interpolation (should not happen)
est(isnan(est)) = 0;
est = max(est, 0); % no negative values (should only happen rarely)
# compute distance to the center (minima of off switching pattern)
[t2max, ~] = objectDistances(xj, yj, fx, x0, fy, y0)
# compute weights (we add up currently 50nm around each position),
# feel free to change this value for tradeoff of SNR and resolution
W = 0.05 / 0.0975;
wmax = power(2., -t2max / (W / 2)^2)
# add up with weights
central_signal = central_signal + wmax .* est
central_signal_weights = central_signal_weights + wmax
# subtraction of surrounding minima
cx = round(fx / 2 / objp);
cy = round(fy / 2 / objp);
# left upper
shifted = circshift(est, [-cx, -cy]);
peripheral_signal = peripheral_signal + wmax .* shifted
peripheral_signal_weights = peripheral_signal_weights + wmax
# another
shifted = circshift(est, [cx, -cy]);
peripheral_signal = peripheral_signal + wmax .* shifted;
peripheral_signal_weights = peripheral_signal_weights + wmax
# another
shifted = circshift(est, [-cx, cy]);
peripheral_signal = peripheral_signal + wmax .* shifted;
peripheral_signal_weights = peripheral_signal_weights + wmax
# another
shifted = circshift(est, [cx, cy]);
peripheral_signal = peripheral_signal + wmax .* shifted;
peripheral_signal_weights = peripheral_signal_weights + wmax
# normalize by weights
central_signal = central_signal ./ central_signal_weights;
peripheral_signal = peripheral_signal ./ peripheral_signal_weights
return central_signal, peripheral_signal
data, background = importData(r'/Users/Luciano/Documents/LabNanofisica/rawstack.tif',
r'/Users/Luciano/Documents/LabNanofisica/darkframe.tif')
## some physical parameters of the setup
camera_pixel_length = 0.0975 # camera pixel length [µm] in sample space
scanning_period = 0.322 # scanning period [µm] in sample space
number_scanning_steps = 12 # number of scanning steps in one direction
# total number of camera frames is (number_scanning_steps)^2
pixel_length = 0.02 # pixel length [µm] of interpolated and combined frames
# derived parameters
shift_per_step = scanning_period / number_scanning_steps / camera_pixel_length;
# shift per scanning step [camera pixels]
pixel_length_per_camera = pixel_length / camera_pixel_length;
# length of pixel of combined frames in camera pixels |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-10 19:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('music_choreo_app', '0002_auto_20171210_1853'),
]
operations = [
migrations.AlterField(
model_name='track',
name='number',
field=models.PositiveSmallIntegerField(verbose_name='release number'),
),
]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from pyoselm.core import OSELMClassifier, OSELMRegressor
from sklearn.datasets import load_digits, make_regression
from sklearn.metrics import confusion_matrix
import numpy as np
import time
import random
import sys
def make_batch_predictions(model, x, y, n_batch=50):
n_samples = len(y)
n_batches = int(n_samples / n_batch)
print("Dataset has %i samples" % n_samples)
print("Testing over %i non-overlapped batches with a max of %i samples..." % (n_batches, n_batch))
scores, preds = [], []
tic = time.time()
for i in range(n_batches):
x_batch = x[i*n_batch:(i+1)*n_batch]
y_batch = y[i*n_batch:(i+1)*n_batch]
model.fit(x_batch, y_batch)
preds.extend(model.predict(x_batch))
scores.append(model.score(x_batch, y_batch))
print("Train score for batch %i: %s" % (i+1, str(scores[-1])))
print("Train score - online: %s" % str(np.mean(scores)))
print("Train score - offline: %s" % str(model.score(x, y)))
toc = time.time()
print("Total time: %.3f seconds" % (toc-tic))
return model, preds
def make_sequential_predictions(model, x, y):
n_samples = len(y)
# The first batch of data should have the same size as neurons in the model to achieve the 1st phase (boosting)
batches_x = [x[:model.n_hidden]] + [[x_i] for x_i in x[model.n_hidden:]]
batches_y = [y[:model.n_hidden]] + [[y_i] for y_i in y[model.n_hidden:]]
print("Testing over %i samples in a online way..." % n_samples)
preds = []
tic = time.time()
for b_x, b_y in zip(batches_x, batches_y):
model.fit(b_x, b_y)
preds.extend(model.predict(b_x))
print("Train score of total: %s" % str(model.score(x, y)))
toc = time.time()
print("Total time: %.3f seconds" % (toc-tic))
return model, preds
def test_oselm_regression_batch(n_samples=400, n_hidden=25, activation_func='tanh', plot=True):
x, y = make_regression(n_samples=n_samples, n_targets=1, n_features=10)
oselmr = OSELMRegressor(n_hidden=n_hidden, activation_func=activation_func)
oselmr, y_pred = make_batch_predictions(model=oselmr, x=x, y=y, n_batch=n_hidden)
if plot is True:
import matplotlib.pyplot as plt
axis_x = range(len(y))
plt.plot(axis_x, y, axis_x, y_pred)
plt.show()
def test_oselm_regression_sequential(n_samples=2000, n_hidden=20, activation_func='tanh', plot=True):
x, y = make_regression(n_samples=n_samples, n_targets=1, n_features=10)
oselmr = OSELMRegressor(n_hidden=n_hidden, activation_func=activation_func)
oselmr, y_pred = make_sequential_predictions(model=oselmr, x=x, y=y)
if plot is True:
import matplotlib.pyplot as plt
axis_x = range(len(y))
plt.plot(axis_x, y, axis_x, y_pred)
plt.show()
def test_oselm_classification_batch2(n_batches=10, n_hidden=100, activation_func='sigmoid'):
x, y = load_digits(n_class=10, return_X_y=True)
# Shuffle data
zip_x_y = zip(x, y)
random.shuffle(zip_x_y)
x, y = [x_y[0] for x_y in zip_x_y], [x_y[1] for x_y in zip_x_y]
n_samples = len(y)
print("Data have %i samples" % n_samples)
n_batch = n_samples/n_batches # batch size
oselmc = OSELMClassifierSoftmax(n_hidden=n_hidden, activation_func=activation_func)
y_pred, scores = [], []
for i in range(n_batches):
x_batch = x[i*n_batch:(i+1)*n_batch]
y_batch = y[i*n_batch:(i+1)*n_batch]
oselmc.fit(x_batch, y_batch)
y_pred.extend(oselmc.predict(x_batch))
score_batch = oselmc.score(x_batch, y_batch)
scores.append(score_batch)
print("Train score for batch %i: %s" % (i+1, str(score_batch)))
print("Train score - online: %s" % str(np.mean(scores)))
print("Train score - offline: %s" % str(oselmc.score(x, y)))
print("Confusion matrix: \n %s" % str(confusion_matrix(y[:-1], y_pred)))
def test_oselm_classification_batch(n_hidden=100, activation_func='sigmoid'):
x, y = load_digits(n_class=10, return_X_y=True)
# Shuffle data
zip_x_y = zip(x, y)
random.shuffle(zip_x_y)
x, y = [x_y[0] for x_y in zip_x_y], [x_y[1] for x_y in zip_x_y]
oselmc = OSELMClassifier(n_hidden=n_hidden, activation_func=activation_func)
oselmc, y_pred = make_batch_predictions(model=oselmc, x=x, y=y, n_batch=n_hidden)
max_len = min(len(y), len(y_pred))
print("Confusion matrix: \n %s" % str(confusion_matrix(y[:max_len], y_pred[:max_len])))
def test_oselm_classification_sequential(n_hidden=100, activation_func='sigmoid'):
x, y = load_digits(n_class=10, return_X_y=True)
# Shuffle data
zip_x_y = zip(x, y)
random.shuffle(zip_x_y)
x, y = [x_y[0] for x_y in zip_x_y], [x_y[1] for x_y in zip_x_y]
oselmc = OSELMClassifier(n_hidden=n_hidden, activation_func=activation_func)
oselmc, y_pred = make_sequential_predictions(model=oselmc, x=x, y=y)
max_len = min(len(y), len(y_pred))
print("Confusion matrix: \n %s" % str(confusion_matrix(y[:max_len], y_pred[:max_len])))
test_modes = {
'classification_batch': lambda: test_oselm_classification_batch(n_hidden=100),
'classification_sequential': lambda: test_oselm_classification_sequential(n_hidden=100),
'regression_batch': lambda: test_oselm_regression_batch(n_hidden=50, n_samples=1000,
activation_func='tanh', plot=False),
'regression_sequential': lambda: test_oselm_regression_sequential(n_hidden=50, n_samples=1000,
activation_func='tanh',
plot=False)
}
if __name__ == '__main__':
mode = 'regression_batch'
if len(sys.argv) > 1:
argmode = sys.argv[1]
if argmode in test_modes:
mode = argmode
print("Executing test in mode=%s..." % mode)
test_modes[mode]()
|
priority = {
"+" : 1,
"-" : 1,
"/" : 2,
"X" : 2,
"^" : 3,
"(" : 0,
")" : 0,
}
def isFloat(number):
try:
float(number)
return True
except:
return False
def infixToPost(inExpr):
global priority
postExpr, opStack = [], []
for token in list(inExpr.strip().split()):
if isFloat(token):
postExpr.append(token)
elif token == ")":
while(opStack[0] != "("):
postExpr.append(opStack.pop(0))
opStack.pop(0)
elif token == "(":
opStack.insert(0,"(")
else:
if len(opStack) == 0:
opStack.insert(0,token)
else:
if(priority[opStack[0]] < priority[token]):
opStack.insert(0,token)
else:
postExpr.append(opStack.pop(0))
opStack.insert(0,token)
# print()
# print(token)
# print("[ ",*opStack," ]")
# print(">>> ",*postExpr)
while(len(opStack) != 0):
postExpr.append(opStack.pop(0))
return postExpr
def evaluate(postExpr):
postExpr = infixToPost(postExpr)
tempStack = []
for token in postExpr:
if isFloat(token):
tempStack.insert(0,float(token))
else:
if( token == "-"):
temp = tempStack.pop(1) - tempStack.pop(0)
elif token == "+":
temp = tempStack.pop(1) + tempStack.pop(0)
elif token == "X":
temp = tempStack.pop(1) * tempStack.pop(0)
elif token == "/":
temp = tempStack.pop(1)/tempStack.pop(0)
tempStack.insert(0,temp)
# print(token)
# print(tempStack)
if tempStack[0] - int(tempStack[0]) == 0 :
return tempStack[0]
return( str("{0:.2f}".format(tempStack[0])))
# expression = "( 6.2 + 6 / 3.1 ) X 2.8"
# postExpr = infixToPost(expression)
# infixToPost("2 + 8 / 2")
# postExpr = ['2', '4', '5', '/', '5', '3', '-', '5', '^', '4', '^', '*', '+']
# postExpr = ['2', '4', '5', '+', '5', '3', '-', '5', '4', '+']
#postExpr = ["10", "12", "+"]
# print(evaluate("6 X 8"))
|
locators = {
'title': '//*[@id="site-name"]/a',
'username_title': '//*[@id="login-form"]/div[1]/label',
'username_field': '//*[@id="id_username"]',
'password_title': '//*[@id="login-form"]/div[2]/label',
'password_field': '//*[@id="id_password"]',
'login_button': '//*[@id="login-form"]/div[3]/input',
}
|
import PySide.QtGui as QtGui
import PySide.QtCore as QtCore
import Ui_MainWindow
import GameWorld
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.ui = Ui_MainWindow.Ui_MainWindow()
self.ui.setupUi(self)
scene = GameWorld.GameWorld()
scene.init()
self.ui.graphicsView.setScene(scene)
self.ui.graphicsView.show()
self.timer = self.startTimer(50)
#
def timerEvent(self, event):
scene = self.ui.graphicsView.scene()
if not scene.advance():
self.killTimer(self.timer)
#
def closeEvent(self, event):
self.killTimer(self.timer)
event.accept()
#
@QtCore.Slot()
def on_actionQuitTriggered(self):
self.killTimer(self.timer)
self.close()
|
from torch import nn
import torchvision.models as models
from torchvision.models.resnet import Bottleneck
from torch.hub import load_state_dict_from_url
class MedNet(models.ResNet):
""" Simple transfer learning for a medical image task instead of CIFAR """
def __init__(self, num_classes):
super(MedNet, self).__init__(block=Bottleneck, layers=[3, 8, 36, 3])
block = 0
state_dict = load_state_dict_from_url("https://download.pytorch.org/models/resnet152-b121ed2d.pth",
progress=True)
self.load_state_dict(state_dict)
for child in self.children():
block+=1
if block < 7:
for param in child.parameters():
param.requires_grad=False
# instead of many classes, there are three classes, so a trainable, fully
# connected layer is added
nf = self.fc.in_features
self.fc = nn.Sequential(nn.Linear(nf, num_classes,bias=True)) |
# -*- coding: utf-8 -*-
# by Part!zanes 2017
import re
import requests
from log import Log
from config import Config as cfg
class hdapi(object):
cfg.initializeConfig()
hdLog = Log("hdLog")
hdUrl = cfg.getHdUrl()
@staticmethod
def postQuickReply(ticket_id, reply, status, openbot):
secretKey = requests.get(cfg.getHdBotToken(), auth=requests.auth.HTTPBasicAuth(cfg.getHdBotUsername(), cfg.getHdBotPassword())).text
if(secretKey is None):
hdapi.hdLog.critical("[postQuickReply] Не удалось получить secretKey")
openbot.sendMessageGroup("[postQuickReply] Не удалось получить secretKey")
cookies = dict(staff_data=secretKey,supportadmin=cfg.getHdSession())
r = requests.get(hdapi.hdUrl + '/ticket_detail.php?ticket_id=%s' % (ticket_id), cookies=cookies)
subject = "Ответ на заявку {0}".format(ticket_id)
try:
subject = re.search(u'Предмет\n\t\t</td>\n\t\t<td class=\"ticket-content-[a-z]{1,10}\" colspan=5>\n\n\t\t(.+?)\n\n\t\t</td>', r.text).group(1).replace('\r','').replace('\t','')
except Exception as exc:
hdapi.hdLog.critical("[postQuickReply] Не удалось получить тему для ответа")
dataToPost = {
'ticket_id': ticket_id,
'subject': subject,
'reply': reply,
'act':'quickreply',
'submit':'Послать ответ',
'dotktaction': status,
'canned':'0'
}
r = requests.post(hdapi.hdUrl + '/ticket_detail.php?ticket_id=%s' % (ticket_id), data = dataToPost, cookies=cookies)
if(r.status_code == 200):
hdapi.hdLog.info("[postQuickReply][%s] Ответ отправлен." % (ticket_id))
openbot.sendMessageGroup("[%s] Ответ отправлен." % (ticket_id))
return True
else:
hdapi.hdLog.critical("[postQuickReply][%s] Попытка ответа неудачна.Код ответа: %s" % (ticket_id, r.status_code))
openbot.sendMessageGroup("[postQuickReply][%s] Попытка ответа неудачна.Код ответа: %s" % (ticket_id, r.status_code))
return False |
from tkinter import Canvas, BOTH
from automata import Automata
from canvasGrid import CanvasGrid
class AutomataCanvas(Canvas):
def __init__(self, frame=None, data=None, automata=None, width=0, height=0):
if frame == None:
raise ValueError('ERROR: no frame')
super().__init__(
frame,
width=width,
height=height,
borderwidth=0,
highlightthickness=0
)
self.pack(fill=BOTH, expand=1)
self.width = width
self.height = height
self.automata = automata if automata != None else Automata(data)
self.grid = CanvasGrid(width=width, height=height)
self.drawAutomata()
def drawAutomata(self):
startingX = 10
startingY = 50
x = startingX
y = startingY
stateBuffer = 50
states = self.automata.getStates()
transitions = self.automata.getTransitions()
for state in states:
state.setCanvas(self)
size = state.getDiameter() + stateBuffer
while self.grid.isBoxFree(x, y, size) == False:
x += size
if x >= self.width:
x = startingX
y += size
self.grid.occupyBox(x+(stateBuffer / 2), y+(stateBuffer / 2), state.getDiameter())
state.setLocation(x+(stateBuffer / 2), y+(stateBuffer / 2))
for transition in transitions:
# transition = transitions[1]
transition.setCanvas(self)
fromState = states[transition.getOriginState()]
toState = states[transition.getNextState()]
transition.draw(fromState, toState)
for state in states:
state.draw()
def drawGrid(self):
for x in range(int(51)):
self.create_line(x*10, 0, x*10, 500)
for y in range(int(51)):
self.create_line(0, y*10, 500, y*10)
def drawState(self, state, drawn):
if state.id not in drawn:
drawn.append(state.id)
# print(drawn)
for symbol in state.transitions:
nextStateId = state.transitions[symbol]
nextState = self.automata.states[nextStateId]
self.drawState(nextState, drawn)
def drawCircle(self):
self.test = self.create_oval(0, 0, 39, 39, outline="#000", width=1, tags=("one", "two"))
self.create_line(40, 20, 70, 20)
# print([method_name for method_name in dir(self.test)
# if callable(getattr(self.test, method_name))]);
# print(self.canvas.gettags(self.test))
def drawData(self, data):
for x in range(data.numCircles):
self.drawCircle()
self.canvas.create_line(data.points[0][0],
data.points[0][1],
data.points[1][0],
data.points[1][1])
self.canvas.pack(fill=BOTH, expand=1) |
import cv2
for dogNUM in range(4000):
dogPic="dogs/dog."+str(dogNUM+1)+".jpg"
img=cv2.imread(dogPic)
imgResize=cv2.resize(img,(32,32))
dogPic2="DataSet_CAT_DOG/dogs/dog."+str(dogNUM)+"resized.jpg"
cv2.imwrite(dogPic2,imgResize)
for catNUM in range(4000):
catPic="cats/cat."+str(catNUM+1)+".jpg"
img=cv2.imread(catPic)
imgResize=cv2.resize(img,(32,32))
catPic2="DataSet_CAT_DOG/cats/cat."+str(catNUM)+"resized.jpg"
cv2.imwrite(catPic2,imgResize)
|
import numpy as np
import json
import tensorflow as tf
from config import Config
from utils import *
import sys
import os
class imdb_classifier(object):
def __init__(self, config, session, x_train, y_train, x_test, y_test, train_length, test_lentgh):
self.config = config
self.embedding_size = config.embedding_size
self.batch_size = config.batch_size
self.encoder_hidden_size = config.encoder_hidden_size
self.vocab_size = config.vocab_size
self.lr = config.lr
self.sess = session
self.epoch_num = config.epoch_num
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.train_length = train_length
self.test_length = test_length
self.max_length = config.max_length
self.max_grad_norm = config.max_grad_norm
self.save_per_epoch = config.save_per_epoch
self.ckpt_path = config.ckpt_path
self.test_lentgh = test_length
self.keep_prob = config.keep_prob
self.atn_hidden_size = config.atn_hidden_size
def build(self):
self.global_step = tf.Variable(0, name="global_step")
self.batch_maxlen = tf.placeholder(dtype=tf.int32, name="batch_maxlen")
self.output_keep_prob = tf.placeholder(dtype=tf.float32, name="output_keep_prob")
self.encoder_input = tf.placeholder(shape=(None, None), dtype=tf.int32, name="encoder_input")
self.encoder_input_length = tf.placeholder(shape=(None,), dtype=tf.int32, name="encoder_input_length")
self.labels = tf.placeholder(shape=(None,), dtype=tf.int32, name="label")
self.embedding = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -5.0, 5.0),
dtype=tf.float32,
trainable=False,
name="embedding")
self.encoder_input_embedded = tf.nn.embedding_lookup(self.embedding,
self.encoder_input)
self.encoder_input_embeded = tf.nn.dropout(self.encoder_input_embedded, keep_prob=self.output_keep_prob)
# bidirectional GRU with dropout
encoder_fw = tf.contrib.rnn.GRUCell(self.encoder_hidden_size)
encoder_bw = tf.contrib.rnn.GRUCell(self.encoder_hidden_size)
self.encoder_fw = tf.contrib.rnn.DropoutWrapper(encoder_fw, output_keep_prob=self.output_keep_prob)
self.encoder_bw = tf.contrib.rnn.DropoutWrapper(encoder_bw, output_keep_prob=self.output_keep_prob)
# Since time_major == False, output shape should be [batch_size, max_time, ...]
# run the GRU
with tf.variable_scope("bi-GRU") as scope:
((self.encoder_fw_output, self.encoder_bw_output),
(self.encoder_fw_state, self.encoder_bw_state)) = (
tf.nn.bidirectional_dynamic_rnn(cell_fw=self.encoder_fw,
cell_bw=self.encoder_bw,
inputs=self.encoder_input_embedded,
sequence_length=self.encoder_input_length,
dtype=tf.float32)
)
self.encoder_output = tf.concat((self.encoder_fw_output, self.encoder_bw_output), 2)
#[batch_size, max_time, 2 * encoder_hidden_size]
self.encoder_state = tf.concat((self.encoder_fw_state, self.encoder_bw_state), 1)
# Attention layer
with tf.variable_scope("attention") as scope:
self._atn_in = tf.expand_dims(self.encoder_output, axis=2) # [batch_size, max_time, 1, 2 * encoder_hidden_size]
self.atn_w = tf.Variable(
tf.truncated_normal(shape=[1, 1, 2 * self.encoder_hidden_size, self.atn_hidden_size], stddev=0.1),
name="atn_w")
self.atn_b = tf.Variable(tf.zeros(shape=[self.atn_hidden_size]))
self.atn_v = tf.Variable(
tf.truncated_normal(shape=[1, 1, self.atn_hidden_size, 1], stddev=0.1),
name="atn_b")
self.atn_activations = tf.nn.tanh(
tf.nn.conv2d(self._atn_in, self.atn_w, strides=[1,1,1,1], padding='SAME') + self.atn_b)
self.atn_scores = tf.nn.conv2d(self.atn_activations, self.atn_v, strides=[1,1,1,1], padding='SAME')
atn_probs = tf.nn.softmax(tf.squeeze(self.atn_scores, [2, 3]))
_atn_out = tf.matmul(tf.expand_dims(atn_probs, 1), self.encoder_output)
self.atn_out = tf.squeeze(_atn_out, [1], name="atn_out")
# Output layer
with tf.variable_scope("output") as scope:
self.output_w = tf.Variable(
tf.truncated_normal(shape=(self.encoder_hidden_size*2, 2), stddev=0.1), name="output_w")
self.output_b = tf.Variable(tf.zeros(2), name="output_b")
self.logits = tf.matmul(self.atn_out, self.output_w) + self.output_b
self.prediction = tf.cast(tf.argmax(self.logits, 1), tf.int32)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.prediction, self.labels), tf.float32))
self.loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits, labels=self.labels)
)
'''self.tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, self.tvars),
self.max_grad_norm)
self.opt = tf.train.AdamOptimizer(self.lr)
self.train_op = self.opt.apply_gradients(zip(grads, self.tvars), global_step=self.global_step)'''
self.train_op = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss, global_step=self.global_step)
#self.rmsp_train_op = tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(self.loss, global_step=self.global_step)
self.saver = tf.train.Saver()
def train(self, mode=None, restore=False):
if mode != "continue":
print("Building the model...")
self.build()
self.sess.run(tf.global_variables_initializer())
else:
if restore:
self.saver.restore(sess=self.sess, save_path=self.ckpt_path)
print("Starting training...")
print("%d steps per epoch." % (len(self.x_train) // self.batch_size))
for epoch in range(self.epoch_num):
loss_in_epoch = []
acc_in_epoch = []
for x_batch, y_batch, input_length in self.minibatches(
self.x_train, self.y_train, self.train_length, batch_size=self.batch_size, shuffle=True):
# pad inputs
x_batch, batch_maxlen = self.padding_sequence(x_batch, self.max_length)
feed_dict = {
self.encoder_input: x_batch,
self.encoder_input_length: input_length,
self.labels: y_batch,
self.output_keep_prob: self.keep_prob,
self.batch_maxlen: batch_maxlen
}
_, loss, step, acc, pred = sess.run(
[self.train_op, self.loss, self.global_step, self.accuracy, self.prediction], feed_dict=feed_dict)
loss_in_epoch.append(loss)
acc_in_epoch.append(acc)
sys.stdout.write("Epoch %d, Step: %d, Loss: %.4f, Acc: %.4f\r" % (epoch, step, loss, acc))
sys.stdout.flush()
sys.stdout.write("Epoch %d, Step: %d, Loss: %.4f, Acc: %.4f\r" %
(epoch, step, np.mean(loss_in_epoch), np.mean(acc_in_epoch)))
sys.stdout.flush()
print("")
if (epoch + 1) % self.save_per_epoch == 0:
self.saver.save(self.sess, "models/bi-lstm-imdb.ckpt")
self.test(sub_size=5000, restore=False)
def test(self, sub_size=None, restore=False):
if sub_size == None:
train_sub_size = len(self.y_train)
test_sub_size = len(self.y_test)
else:
train_sub_size = sub_size
test_sub_size = sub_size
# build and restore the model
if restore:
self.build()
self.sess.run(tf.global_variables_initializer())
self.saver.restore(sess=self.sess, save_path=self.ckpt_path)
acc_list = []
loss_list = []
for x_batch, y_batch, input_length in self.minibatches(
self.x_train[:train_sub_size], self.y_train[:train_sub_size], self.train_length[:train_sub_size], self.batch_size, False):
x_batch, _ = self.padding_sequence(x_batch, self.max_length)
feed_dict = {
self.encoder_input: x_batch,
self.encoder_input_length: input_length,
self.labels: y_batch,
self.output_keep_prob: 1.0
}
loss, acc, pred = self.sess.run([self.loss, self.accuracy, self.prediction], feed_dict=feed_dict)
acc_list.append(acc)
loss_list.append(loss)
'''print(pred)
print(y_batch)
print(acc, np.mean(pred == y_batch))
return '''
print("Test finished on training set! Loss: %.4f, Acc: %.4f" % (np.mean(loss_list), np.mean(acc_list)))
acc_list = []
loss_list = []
for x_batch, y_batch, input_length in self.minibatches(
self.x_test[:test_sub_size], self.y_test[:test_sub_size], self.test_lentgh[:test_sub_size], self.batch_size, False):
x_batch, _ = self.padding_sequence(x_batch, self.max_length)
feed_dict = {
self.encoder_input: x_batch,
self.encoder_input_length: input_length,
self.labels: y_batch,
self.output_keep_prob: 1.0
}
loss, acc = self.sess.run([self.loss, self.accuracy], feed_dict=feed_dict)
acc_list.append(acc)
loss_list.append(loss)
print("Test finished on test set! Loss: %.4f, Acc: %.4f" % (np.mean(loss_list), np.mean(acc_list)))
def predict(self, inputs, restore=False):
if restore:
self.build()
self.sess.run(tf.global_variables_initializer())
self.saver.restore(sess=self.sess, save_path=self.ckpt_path)
inputs = self.padding_sequence(inputs)
inputs_length = np.array([len(seq) for seq in inputs])
feed_dict = {
self.encoder_input: inputs,
self.encoder_input_length: input_length,
self.output_keep_prob: 1.0
}
pred = self.sess.run()
def minibatches(self, inputs=None, targets=None, input_len=None, batch_size=None, shuffle=True):
assert len(inputs) == len(targets)
#assert len(inputs) == len(inputs_length)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt], input_len[excerpt]
def padding_sequence(self, inputs, max_length=None):
batch_size = len(inputs)
#assert self.batch_size == batch_size
if max_length != None:
if np.max([len(i) for i in inputs]) > max_length:
maxlen = max_length
else:
maxlen = np.max([len(i) for i in inputs])
else:
maxlen = np.max([len(i) for i in inputs])
output = np.zeros([batch_size, maxlen], dtype=np.int32)
for i, seq in enumerate(inputs):
output[i, :len(seq[:maxlen])] = np.array(seq[:maxlen])
return output, maxlen
if __name__ == "__main__":
config = Config(batch_size=32,
embedding_size=128,
encoder_hidden_size=64,
vocab_size=88584,
lr=0.0005,
epoch_num=50,
save_per_epoch=5,
max_length=128,
max_grad_norm=5,
keep_prob=0.2,
atn_hidden_size=16,
ckpt_path="models")
# mkdir models
if not os.path.exists("models"):
os.mkdir("models")
x_train, y_train, x_test, y_test, train_length, test_length, wid_dict, id2w = load_imdb_data()
SAMPLE_SIZE = 25000 # To debug, set to 25000 after debuging
sess = tf.Session()
classifier = imdb_classifier(config,
sess,
x_train[:SAMPLE_SIZE],
y_train[:SAMPLE_SIZE],
x_test[:SAMPLE_SIZE],
y_test[:SAMPLE_SIZE],
train_length,
test_length)
if len(sys.argv) == 1:
# train a new model
#tf.reset_default_graph()
classifier.train()
else:
if sys.argv[1] == "test":
classifier.test()
elif sys.argv[1] == "continue":
classifier.train(mode="continue", restore=True)
sess.close()
|
class Point:
def __init__(self, orientation, position, timestamp):
self.orientation = orientation
self.position = position
self.timestamp = timestamp
class Orientation:
def __init__(self, w, x, y, z):
self.w = w
self.x = x
self.y = y
self.z = z
class Position:
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
|
def zero_matrix(matrix):
zero_cols = []
count = 0
for row in matrix:
has_zero = False
count = 0
for column in row:
if column == 0:
zero_cols.append(count)
has_zero = True
count += 1
if(has_zero):
for num in row:
num = 0
if zero_cols:
for row in matrix:
for index in zero_cols:
row[index] = 0
|
from ast import Call
from hashlib import sha256
from random import randint
from shellshock.parse import Parseable, parse
class AssignType(Parseable):
@classmethod
def parse(cls, obj):
assign_target = obj.targets[0].id
cls._known_vars.add(assign_target)
if isinstance(obj.value, Call) and obj.value.func.attr == 'input':
# If we're assigning from user input, adjust accordingly
return cls._read_input_into_var(assign_target, obj)
else:
return "{}={}".format(
assign_target,
parse(obj.value),
)
@classmethod
def _read_input_into_var(cls, var, obj):
""" Read user input into variable
At this point we assume obj.value is a Call object pointing to
ss.input and obj.value.args[0] is the input prompt. We know the user
is trying to set this input to a new variable, but we must make a temp
variable in between. The variable name will be based off the hash of
the prompt for repeatability's sake.
"""
prompt_hash = sha256()
prompt_hash.update(obj.value.args[0].s.encode())
temp_var = "INPUT_VAR_{}".format(prompt_hash.hexdigest()[:8])
return [
parse(obj.value, assign_to_var=temp_var),
"{new_var}=\"${temp_var}\"".format(new_var=var, temp_var=temp_var),
]
|
import asyncio
import logging
from abc import ABCMeta
from types import MappingProxyType
from typing import Any, Callable, Mapping, Optional
from . import decorators
from .abc import AbstractRoute, AbstractWebSocket
log = logging.getLogger("wsrpc")
# noinspection PyUnresolvedReferences
class RouteMeta(ABCMeta):
def __new__(cls, clsname, superclasses, attributedict):
attrs = {"__no_proxy__": set(), "__proxy__": set()}
for superclass in superclasses:
if not hasattr(superclass, "__proxy__"):
continue
attrs["__no_proxy__"].update(superclass.__no_proxy__)
attrs["__proxy__"].update(superclass.__proxy__)
for key, value in attributedict.items():
if key in ("__proxy__", "__no_proxy__"):
continue
if isinstance(value, decorators.NoProxyFunction):
value = value.func
attrs["__no_proxy__"].add(key)
elif isinstance(value, decorators.ProxyFunction):
value = value.func
attrs["__proxy__"].add(key)
attrs[key] = value
instance = super(RouteMeta, cls).__new__(
cls, clsname, superclasses, attrs,
)
for key, value in attrs.items():
if not callable(value):
continue
if instance.__is_method_allowed__(key, value) is True:
instance.__proxy__.add(key)
elif instance.__is_method_masked__(key, value) is True:
instance.__no_proxy__.add(key)
for key in ("__no_proxy__", "__proxy__"):
setattr(instance, key, frozenset(getattr(instance, key)))
return instance
ProxyCollectionType = Mapping[str, Callable[..., Any]]
class RouteBase(AbstractRoute, metaclass=RouteMeta):
__proxy__: ProxyCollectionType = MappingProxyType({})
__no_proxy__: ProxyCollectionType = MappingProxyType({})
def __init__(self, socket: AbstractWebSocket):
super().__init__(socket)
self.__socket = socket
self.__loop: Optional[asyncio.AbstractEventLoop] = None
@property
def socket(self) -> AbstractWebSocket:
return self.__socket
@property
def loop(self) -> asyncio.AbstractEventLoop:
if not self.__loop:
self.__loop = asyncio.get_event_loop()
return self.__loop
def _onclose(self):
pass
@classmethod
def __is_method_allowed__(cls, name, func):
return None
@classmethod
def __is_method_masked__(cls, name, func):
return None
class Route(RouteBase):
def _method_lookup(self, method):
if method in self.__no_proxy__:
raise NotImplementedError("Method masked")
if method in self.__proxy__:
return getattr(self, method)
raise NotImplementedError("Method not implemented")
@classmethod
def __is_method_masked__(cls, name, func):
if name.startswith("_"):
return True
def __call__(self, method):
return self._method_lookup(method)
class AllowedRoute(Route):
@classmethod
def __is_method_allowed__(cls, name, func):
if name.startswith("_"):
return False
return True
class PrefixRoute(Route):
PREFIX = "rpc_"
@classmethod
def __is_method_allowed__(cls, name, func):
if name.startswith("rpc_"):
return True
return False
def _method_lookup(self, method):
return super()._method_lookup(self.PREFIX + method)
class WebSocketRoute(AllowedRoute):
@classmethod
def noproxy(cls, func):
return decorators.noproxy(func)
__all__ = (
"RouteBase",
"Route",
"WebSocketRoute",
"AllowedRoute",
"decorators",
)
|
'''
This code is based on the Matryoshka [1] repository [2] and was modified accordingly:
[1] https://arxiv.org/abs/1804.10975
[2] https://bitbucket.org/visinf/projects-2018-matryoshka/src/master/
Copyright (c) 2018, Visual Inference Lab @TU Darmstadt
'''
import os
import json
from collections import OrderedDict
from ipdb import set_trace
def id_to_name(id, category_list):
for k, v in category_list.items():
if v[0] <= id and v[1] > id:
return (k, id - v[0])
def category_model_id_pair(dataset_portion=[]):
'''
Load category, model names from a shapenet dataset.
'''
def model_names(model_path):
""" Return model names"""
model_names = [name for name in os.listdir(model_path)
if os.path.isdir(os.path.join(model_path, name))]
return sorted(model_names)
category_name_pair = [] # full path of the objs files
cats = json.load(open('ShapeNet.json'))
cats = OrderedDict(sorted(cats.items(), key=lambda x: x[0]))
for k, cat in cats.items(): # load by categories
model_path = os.path.join('/home/matryoshka/matryoshka/data/ShapeNetVox32', cat['id'])
models = model_names(model_path)
num_models = len(models)
portioned_models = models[int(num_models * dataset_portion[0]):int(num_models *
dataset_portion[1])]
category_name_pair.extend([(cat['id'], model_id) for model_id in portioned_models])
return category_name_pair
with open('3dr2n2-train.txt', 'w') as f:
for synset, model in category_model_id_pair([0,0.8]):
f.write('%s/%s\n' % (synset, model))
with open('3dr2n2-test.txt', 'w') as f:
for synset, model in category_model_id_pair([0.8,1]):
f.write('%s/%s\n' % (synset, model))
|
# Made with python3
# (C) @FayasNoushad
# Copyright permission under MIT License
# All rights reserved by FayasNoushad
# License -> https://github.com/FayasNoushad/Info-Bot/blob/main/LICENSE
import os
from pyrogram import Client, filters
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
FayasNoushad = Client(
"Info-Bot",
bot_token = os.environ["BOT_TOKEN"],
api_id = int(os.environ["API_ID"]),
api_hash = os.environ["API_HASH"]
)
START_TEXT = """
Hello {}, I am a user or chat information finder telegram bot.
- Send /info for your info
- Send /info reply to a forward message for chat or user info
Made by @FayasNoushad
"""
BUTTONS = InlineKeyboardMarkup(
[[
InlineKeyboardButton('⚙ Join Updates Channel ⚙', url='https://telegram.me/FayasNoushad')
]]
)
@FayasNoushad.on_message(filters.private & filters.command(["start"]))
async def start(bot, update):
text = START_TEXT.format(update.from_user.mention)
reply_markup = BUTTONS
await update.reply_text(
text=text,
disable_web_page_preview=True,
reply_markup=reply_markup,
quote=True
)
@FayasNoushad.on_message((filters.private | filters.group) & filters.command(["info", "information"]))
async def info(bot, update):
if (not update.reply_to_message) and ((not update.forward_from) or (not update.forward_from_chat)):
info = user_info(update.from_user)
elif update.reply_to_message and update.reply_to_message.forward_from:
info = user_info(update.reply_to_message.forward_from)
elif update.reply_to_message and update.reply_to_message.forward_from_chat:
info = chat_info(update.reply_to_message.forward_from_chat)
elif (update.reply_to_message and update.reply_to_message.from_user) and (not update.forward_from or not update.forward_from_chat):
info = user_info(update.reply_to_message.from_user)
else:
return
try:
await update.reply_text(
text=info,
reply_markup=BUTTONS,
disable_web_page_preview=True,
quote=True
)
except Exception as error:
await update.reply_text(error)
def user_info(user):
text = "--**User Details:**--\n"
text += f"\n**First Name:** `{user.first_name}`"
text += f"\n**Last Name:** `{user.last_name},`" if user.last_name else ""
text += f"\n**User Id:** `{user.id}`"
text += f"\n**Username:** @{user.username}" if user.username else ""
text += f"\n**User Link:** {user.mention}" if user.username else ""
text += f"\n**DC ID:** `{user.dc_id}`" if user.dc_id else ""
text += f"\n**Is Deleted:** True" if user.is_deleted else ""
text += f"\n**Is Bot:** True" if user.is_bot else ""
text += f"\n**Is Verified:** True" if user.is_verified else ""
text += f"\n**Is Restricted:** True" if user.is_verified else ""
text += f"\n**Is Scam:** True" if user.is_scam else ""
text += f"\n**Is Fake:** True" if user.is_fake else ""
text += f"\n**Is Support:** True" if user.is_support else ""
text += f"\n**Language Code:** {user.language_code}" if user.language_code else ""
text += f"\n**Status:** {user.status}" if user.status else ""
text += f"\n\nMade by @FayasNoushad"
return text
def chat_info(chat):
text = "--**Chat Details**--\n"
text += f"\n**Title:** `{chat.title}`"
text += f"\n**Chat ID:** `{chat.id}`"
text += f"\n**Username:** @{chat.username}" if chat.username else ""
text += f"\n**Type:** `{chat.type}`"
text += f"\n**DC ID:** `{chat.dc_id}`"
text += f"\n**Is Verified:** True" if chat.is_verified else ""
text += f"\n**Is Restricted:** True" if chat.is_verified else ""
text += f"\n**Is Creator:** True" if chat.is_creator else ""
text += f"\n**Is Scam:** True" if chat.is_scam else ""
text += f"\n**Is Fake:** True" if chat.is_fake else ""
text += f"\n\nMade by @FayasNoushad"
return text
FayasNoushad.run()
|
# Generated by Django 3.0.5 on 2020-05-04 03:38
import django.contrib.postgres.indexes
import django.contrib.postgres.search
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('questions', '0004_auto_20200503_0713'),
]
operations = [
migrations.AddField(
model_name='profile',
name='rating',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='question',
name='search_vector',
field=django.contrib.postgres.search.SearchVectorField(null=True),
),
migrations.AddIndex(
model_name='question',
index=django.contrib.postgres.indexes.GinIndex(fields=['search_vector'], name='questions_q_search__34fc30_gin'),
),
]
|
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
from bisect import bisect
from collections import defaultdict
from typing import cast
from typing import Collection
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
import colorlog
import staticconf
from clusterman.aws.aws_resource_group import AWSResourceGroup
from clusterman.aws.markets import InstanceMarket
from clusterman.aws.util import RESOURCE_GROUPS
from clusterman.config import POOL_NAMESPACE
from clusterman.draining.queue import DrainingClient
from clusterman.exceptions import AllResourceGroupsAreStaleError
from clusterman.exceptions import PoolManagerError
from clusterman.exceptions import ResourceGroupError
from clusterman.interfaces.cluster_connector import AgentMetadata
from clusterman.interfaces.cluster_connector import AgentState
from clusterman.interfaces.cluster_connector import ClusterConnector
from clusterman.interfaces.resource_group import InstanceMetadata
from clusterman.interfaces.resource_group import ResourceGroup
from clusterman.monitoring_lib import get_monitoring_client
from clusterman.util import read_int_or_inf
AWS_RUNNING_STATES = ('running',)
MIN_CAPACITY_PER_GROUP = 1
SFX_RESOURCE_GROUP_MODIFICATION_FAILED_NAME = 'clusterman.resource_group_modification_failed'
logger = colorlog.getLogger(__name__)
class ClusterNodeMetadata(NamedTuple):
agent: AgentMetadata
instance: InstanceMetadata
class PoolManager:
def __init__(
self,
cluster: str,
pool: str,
scheduler: str,
fetch_state: bool = True,
) -> None:
self.cluster = cluster
self.pool = pool
self.scheduler = scheduler
self.cluster_connector = ClusterConnector.load(self.cluster, self.pool, self.scheduler)
self.pool_config = staticconf.NamespaceReaders(POOL_NAMESPACE.format(pool=self.pool, scheduler=self.scheduler))
self.draining_enabled = self.pool_config.read_bool('draining_enabled', default=False)
self.draining_client: Optional[DrainingClient] = DrainingClient(cluster) if self.draining_enabled else None
self.min_capacity = self.pool_config.read_int('scaling_limits.min_capacity')
self.max_capacity = self.pool_config.read_int('scaling_limits.max_capacity')
self.max_tasks_to_kill = read_int_or_inf(self.pool_config, 'scaling_limits.max_tasks_to_kill')
if fetch_state:
self.reload_state()
def reload_state(self) -> None:
""" Fetch any state that may have changed behind our back, but which we do not want to change during an
``Autoscaler.run()``.
"""
logger.info('Reloading cluster connector state')
self.cluster_connector.reload_state()
logger.info('Reloading resource groups')
self._reload_resource_groups()
logger.info('Recalculating non-orphan fulfilled capacity')
self.non_orphan_fulfilled_capacity = self._calculate_non_orphan_fulfilled_capacity()
def mark_stale(self, dry_run: bool) -> None:
if dry_run:
logger.warn('Running in "dry-run" mode; cluster state will not be modified')
for group_id, group in self.resource_groups.items():
logger.info(f'Marking {group_id} as stale!')
try:
group.mark_stale(dry_run)
except NotImplementedError as e:
logger.warn(f'Skipping {group_id} because of error:')
logger.warn(str(e))
def modify_target_capacity(
self,
new_target_capacity: float,
dry_run: bool = False,
force: bool = False,
prune: bool = True,
) -> float:
""" Change the desired :attr:`target_capacity` of the resource groups belonging to this pool.
Capacity changes are roughly evenly distributed across the resource groups to ensure that
nodes are diversified in the cluster
:param new_target_capacity: the desired target capacity for the cluster and pool
:param dry_run: boolean indicating whether the cluster should actually be modified
:param force: boolean indicating whether to override the scaling limits
:returns: the (set) new target capacity
.. note:: It may take some time (up to a few minutes) for changes in the target capacity to be reflected in
:attr:`fulfilled_capacity`. Once the capacity has equilibrated, the fulfilled capacity and the target
capacity may not exactly match, but the fulfilled capacity will never be under the target (for example, if
there is no combination of nodes that evenly sum to the desired target capacity, the final fulfilled
capacity will be slightly above the target capacity)
"""
if dry_run:
logger.warning('Running in "dry-run" mode; cluster state will not be modified')
if not self.resource_groups:
raise PoolManagerError('No resource groups available')
orig_target_capacity = self.target_capacity
new_target_capacity = self._constrain_target_capacity(new_target_capacity, force)
res_group_targets = self._compute_new_resource_group_targets(new_target_capacity)
for group_id, target in res_group_targets.items():
try:
self.resource_groups[group_id].modify_target_capacity(
target,
dry_run=dry_run,
)
except ResourceGroupError:
logger.critical(traceback.format_exc())
rge_counter = get_monitoring_client().create_counter(
SFX_RESOURCE_GROUP_MODIFICATION_FAILED_NAME,
{'cluster': self.cluster, 'pool': self.pool},
)
rge_counter.count()
continue
if prune:
self.prune_excess_fulfilled_capacity(new_target_capacity, res_group_targets, dry_run)
logger.info(f'Target capacity for {self.pool} changed from {orig_target_capacity} to {new_target_capacity}')
return new_target_capacity
def prune_excess_fulfilled_capacity(
self,
new_target_capacity: float,
group_targets: Optional[Mapping[str, float]] = None,
dry_run: bool = False,
) -> None:
""" Decrease the capacity in the cluster
The number of tasks killed is limited by ``self.max_tasks_to_kill``, and the nodes are terminated in an
order which (hopefully) reduces the impact on jobs running on the cluster.
:param group_targets: a list of new resource group target_capacities; if None, use the existing
target_capacities (this parameter is necessary in order for dry runs to work correctly)
:param dry_run: if True, do not modify the state of the cluster, just log actions
"""
marked_nodes_by_group = self._choose_nodes_to_prune(new_target_capacity, group_targets)
if not dry_run:
if self.draining_enabled:
assert self.draining_client # make mypy happy
for group_id, node_metadatas in marked_nodes_by_group.items():
for node_metadata in node_metadatas:
self.draining_client.submit_instance_for_draining(
node_metadata.instance,
sender=cast(Type[AWSResourceGroup], self.resource_groups[group_id].__class__),
)
else:
for group_id, node_metadatas in marked_nodes_by_group.items():
self.resource_groups[group_id].terminate_instances_by_id([
node_metadata.instance.instance_id
for node_metadata in node_metadatas
])
def get_node_metadatas(self, state_filter: Optional[Collection[str]] = None) -> Sequence[ClusterNodeMetadata]:
""" Get a list of metadata about the nodes currently in the pool
:param state_filter: only return nodes matching a particular state ('running', 'cancelled', etc)
:returns: a list of InstanceMetadata objects
"""
return [
ClusterNodeMetadata(
self.cluster_connector.get_agent_metadata(instance_metadata.ip_address),
instance_metadata,
)
for group in self.resource_groups.values()
for instance_metadata in group.get_instance_metadatas(state_filter)
]
def _reload_resource_groups(self) -> None:
resource_groups: MutableMapping[str, ResourceGroup] = {}
for resource_group_conf in self.pool_config.read_list('resource_groups'):
if not isinstance(resource_group_conf, dict) or len(resource_group_conf) != 1:
logger.error(f'Malformed config: {resource_group_conf}')
continue
resource_group_type = list(resource_group_conf.keys())[0]
resource_group_cls = RESOURCE_GROUPS.get(resource_group_type)
if resource_group_cls is None:
logger.error(f'Unknown resource group {resource_group_type}')
continue
resource_groups.update(resource_group_cls.load(
cluster=self.cluster,
pool=self.pool,
config=list(resource_group_conf.values())[0],
))
self.resource_groups = resource_groups
logger.info(f'Loaded resource groups: {list(resource_groups)}')
def _constrain_target_capacity(
self,
requested_target_capacity: float,
force: bool = False,
) -> float:
""" Signals can return arbitrary values, so make sure we don't add or remove too much capacity """
max_weight_to_add = self.pool_config.read_int('scaling_limits.max_weight_to_add')
max_weight_to_remove = self.pool_config.read_int('scaling_limits.max_weight_to_remove')
requested_delta = requested_target_capacity - self.target_capacity
if requested_delta > 0:
delta = min(self.max_capacity - self.target_capacity, max_weight_to_add, requested_delta)
elif requested_delta < 0:
delta = max(self.min_capacity - self.target_capacity, -max_weight_to_remove, requested_delta)
else:
delta = 0
constrained_target_capacity = self.target_capacity + delta
if requested_delta != delta:
if force:
forced_target_capacity = self.target_capacity + requested_delta
logger.warning(
f'Forcing target capacity to {forced_target_capacity} even though '
f'scaling limits would restrict to {constrained_target_capacity}.'
)
return forced_target_capacity
else:
logger.warning(
f'Requested target capacity {requested_target_capacity}; '
f'restricting to {constrained_target_capacity} due to scaling limits.'
)
return constrained_target_capacity
def _choose_nodes_to_prune(
self,
new_target_capacity: float,
group_targets: Optional[Mapping[str, float]],
) -> Mapping[str, List[ClusterNodeMetadata]]:
""" Choose nodes to kill in order to decrease the capacity on the cluster.
The number of tasks killed is limited by self.max_tasks_to_kill, and the nodes are terminated in an order
which (hopefully) reduces the impact on jobs running on the cluster.
:param new_target_capacity: The total new target capacity for the pool. Most of the time, this is equal to
self.target_capacity, but in some situations (such as when all resource groups are stale),
modify_target_capacity cannot make self.target_capacity equal new_target_capacity. We'd rather this method
aim for the actual target value.
:param group_targets: a list of new resource group target_capacities; if None, use the existing
target_capacities (this parameter is necessary in order for dry runs to work correctly)
:returns: a dict of resource group ids -> list of nodes to terminate
"""
# If dry_run is True in modify_target_capacity, the resource group target_capacity values will not have changed,
# so this function would not choose to terminate any nodes (see case #2 in the while loop below). So
# instead we take a list of new target capacities to use in this computation.
#
# We leave the option for group_targets to be None in the event that we want to call
# prune_excess_fulfilled_capacity outside the context of a modify_target_capacity call
if not group_targets:
group_targets = {group_id: rg.target_capacity for group_id, rg in self.resource_groups.items()}
curr_capacity = self.fulfilled_capacity
# we use new_target_capacity instead of self.target_capacity here in case they are different (see docstring)
if curr_capacity <= new_target_capacity:
return {}
prioritized_killable_nodes = self._get_prioritized_killable_nodes()
logger.info('Killable instance IDs in kill order:\n{instance_ids}'.format(
instance_ids=[node_metadata.instance.instance_id for node_metadata in prioritized_killable_nodes],
))
if not prioritized_killable_nodes:
return {}
rem_group_capacities = {group_id: rg.fulfilled_capacity for group_id, rg in self.resource_groups.items()}
# How much capacity is actually up and available in Mesos.
remaining_non_orphan_capacity = self.non_orphan_fulfilled_capacity
# Iterate through all of the idle agents and mark one at a time for removal until we reach our target capacity
# or have reached our limit of tasks to kill.
marked_nodes: Mapping[str, List[ClusterNodeMetadata]] = defaultdict(list)
killed_task_count = 0
for node_metadata in prioritized_killable_nodes:
# Try to mark the node for removal; this could fail in a few different ways:
# 1) The resource group the node belongs to can't be reduced further.
# 2) Killing the node's tasks would take over the maximum number of tasks we are willing to kill.
# 3) Killing the node would bring us under our target_capacity of non-orphaned nodes.
# In each of the cases, the node has been removed from consideration and we jump to the next iteration.
instance_id = node_metadata.instance.instance_id
group_id = node_metadata.instance.group_id
instance_weight = node_metadata.instance.weight
new_group_capacity = rem_group_capacities[group_id] - instance_weight
if new_group_capacity < group_targets[group_id]: # case 1
logger.info(
f'Resource group {group_id} is at target capacity; skipping {instance_id}'
)
continue
if killed_task_count + node_metadata.agent.task_count > self.max_tasks_to_kill: # case 2
logger.info(
f'Killing instance {instance_id} with {node_metadata.agent.task_count} tasks would take us '
f'over our max_tasks_to_kill of {self.max_tasks_to_kill}. Skipping this instance.'
)
continue
if node_metadata.agent.state != AgentState.ORPHANED:
if (remaining_non_orphan_capacity - instance_weight < new_target_capacity): # case 3
logger.info(
f'Killing instance {instance_id} with weight {instance_weight} would take us under '
f'our target_capacity for non-orphan boxes. Skipping this instance.'
)
continue
logger.info(f'marking {instance_id} for termination')
marked_nodes[group_id].append(node_metadata)
rem_group_capacities[group_id] -= instance_weight
curr_capacity -= instance_weight
killed_task_count += node_metadata.agent.task_count
if node_metadata.agent.state != AgentState.ORPHANED:
remaining_non_orphan_capacity -= instance_weight
if curr_capacity <= new_target_capacity:
logger.info("Seems like we've picked enough nodes to kill; finishing")
break
return marked_nodes
def _compute_new_resource_group_targets(self, new_target_capacity: float) -> Mapping[str, float]:
""" Compute a balanced distribution of target capacities for the resource groups in the cluster
:param new_target_capacity: the desired new target capacity that needs to be distributed
:returns: A list of target_capacity values, sorted in order of resource groups
"""
stale_groups = [group for group in self.resource_groups.values() if group.is_stale]
non_stale_groups = [group for group in self.resource_groups.values() if not group.is_stale]
# If we're scaling down the logic is identical but reversed, so we multiply everything by -1
coeff = -1 if new_target_capacity < self.target_capacity else 1
groups_to_change = sorted(
non_stale_groups,
key=lambda g: coeff * g.target_capacity,
)
targets_to_change = [coeff * g.target_capacity for g in groups_to_change]
num_groups_to_change = len(groups_to_change)
while True:
# If any resource groups are currently above the new target "uniform" capacity, we need to recompute
# the target while taking into account the over-supplied resource groups. We never decrease the
# capacity of a resource group here, so we just find the first index is above the desired target
# and remove those from consideration. We have to repeat this multiple times, as new resource
# groups could be over the new "uniform" capacity after we've subtracted the overage value
#
# (For scaling down, apply the same logic for resource groups below the target "uniform" capacity
# instead; i.e., groups will below the target capacity will not be increased)
capacity_per_group, remainder = divmod(new_target_capacity, num_groups_to_change)
pos = bisect(targets_to_change, coeff * capacity_per_group)
residual = sum(targets_to_change[pos:num_groups_to_change])
if residual == 0:
for i in range(num_groups_to_change):
targets_to_change[i] = coeff * (capacity_per_group + (1 if i < remainder else 0))
break
new_target_capacity -= coeff * residual
num_groups_to_change = pos
targets: MutableMapping[str, float] = {}
# For stale groups, we set target_capacity to 0. This is a noop on SpotFleetResourceGroup.
for stale_group in stale_groups:
targets[stale_group.id] = 0
for group_to_change, new_target in zip(groups_to_change, targets_to_change):
targets[group_to_change.id] = new_target / coeff
return {group_id: targets[group_id] for group_id in self.resource_groups}
def get_market_capacities(
self,
market_filter: Optional[Collection[InstanceMarket]] = None
) -> Mapping[InstanceMarket, float]:
""" Return the total (fulfilled) capacities in the cluster across all resource groups
:param market_filter: a set of :py:class:`.InstanceMarket` to filter by
:returns: the total capacity in each of the specified markets
"""
total_market_capacities: MutableMapping[InstanceMarket, float] = defaultdict(float)
for group in self.resource_groups.values():
for market, capacity in group.market_capacities.items():
if not market_filter or market in market_filter:
total_market_capacities[market] += capacity
return total_market_capacities
def _get_prioritized_killable_nodes(self) -> List[ClusterNodeMetadata]:
"""Get a list of killable nodes in the cluster in the order in which they should be considered for
termination.
"""
killable_nodes = [
metadata for metadata in self.get_node_metadatas(AWS_RUNNING_STATES)
if self._is_node_killable(metadata)
]
return self._prioritize_killable_nodes(killable_nodes)
def _is_node_killable(self, node_metadata: ClusterNodeMetadata) -> bool:
if node_metadata.agent.state == AgentState.UNKNOWN:
return False
elif self.max_tasks_to_kill > node_metadata.agent.task_count:
return True
else:
return node_metadata.agent.task_count == 0
def _prioritize_killable_nodes(self, killable_nodes: List[ClusterNodeMetadata]) -> List[ClusterNodeMetadata]:
"""Returns killable_nodes sorted with most-killable things first."""
def sort_key(node_metadata: ClusterNodeMetadata) -> Tuple[int, int, int, int, int]:
return (
0 if node_metadata.agent.state == AgentState.ORPHANED else 1,
0 if node_metadata.agent.state == AgentState.IDLE else 1,
0 if node_metadata.instance.is_stale else 1,
node_metadata.agent.batch_task_count,
node_metadata.agent.task_count,
)
return sorted(
killable_nodes,
key=sort_key,
)
def _calculate_non_orphan_fulfilled_capacity(self) -> float:
return sum(
node_metadata.instance.weight
for node_metadata in self.get_node_metadatas(AWS_RUNNING_STATES)
if node_metadata.agent.state not in (AgentState.ORPHANED, AgentState.UNKNOWN)
)
@property
def target_capacity(self) -> float:
""" The target capacity is the *desired* weighted capacity for the given Mesos cluster pool. There is no
guarantee that the actual capacity will equal the target capacity.
"""
non_stale_groups = [group for group in self.resource_groups.values() if not group.is_stale]
if not non_stale_groups:
raise AllResourceGroupsAreStaleError()
return sum(group.target_capacity for group in non_stale_groups)
@property
def fulfilled_capacity(self) -> float:
""" The fulfilled capacity is the *actual* weighted capacity for the given Mesos cluster pool at a particular
point in time. This may be equal to, above, or below the :attr:`target_capacity`, depending on the availability
and state of AWS at the time. In general, once the cluster has reached equilibrium, the fulfilled capacity will
be greater than or equal to the target capacity.
"""
return sum(group.fulfilled_capacity for group in self.resource_groups.values())
|
import sys, os
from collections import deque
from skimage.segmentation import slic
from skimage.morphology import remove_small_objects, disk, remove_small_holes, binary_dilation
from skimage.future.graph import rag_mean_color, cut_normalized
from multiprocess import Pool
sys.path.append(os.path.join(os.environ['REPO_DIR'], 'preprocess'))
import morphsnakes
sys.path.append(os.environ['REPO_DIR'] + '/utilities')
from utilities2015 import *
from metadata import *
from data_manager import *
from registration_utilities import find_contour_points
from annotation_utilities import contours_to_mask
from gui_utilities import *
from distributed_utilities import transfer_data_synced
from preprocess_utilities import DEFAULT_MINSIZE, DEFAULT_BORDER_DISSIMILARITY_PERCENTILE
VMAX_PERCENTILE = 99
VMIN_PERCENTILE = 1
SLIC_SIGMA = 2
SLIC_COMPACTNESS = 5
SLIC_N_SEGMENTS = 400 # 200 causes many superpixels to cross obvious boundaries. 400 is good, 20 per dimension.
SLIC_MAXITER = 100
SUPERPIXEL_SIMILARITY_SIGMA = 50. # higher value means greater affinity between superpixels.
SUPERPIXEL_MERGE_SIMILARITY_THRESH = .2
# threshold on affinity; edge whose affinity is above this value is not further split.
# if edge affinity is below this value, do further split.
# Higher value means more sensitive to slight intensity difference.
GRAPHCUT_NUM_CUTS = 20
BORDER_DISSIMILARITY_PERCENTILE = DEFAULT_BORDER_DISSIMILARITY_PERCENTILE
MIN_SIZE = DEFAULT_MINSIZE
FOREGROUND_DISSIMILARITY_THRESHOLD_MAX = 1.5
INIT_CONTOUR_COVERAGE_MAX = .5
INIT_CONTOUR_MINLEN = 50
MORPHSNAKE_SMOOTHING = 1
MORPHSNAKE_LAMBDA1 = 1 # imprtance of inside pixels
MORPHSNAKE_LAMBDA2 = 20 # imprtance of outside pixels
# Only relative lambda1/lambda2 matters, large = shrink, small = expand
MORPHSNAKE_MAXITER = 600
MORPHSNAKE_MINITER = 10
PIXEL_CHANGE_TERMINATE_CRITERIA = 3
AREA_CHANGE_RATIO_MAX = 1.2
AREA_CHANGE_RATIO_MIN = .1
def generate_submask_review_results(submasks_rootdir, filenames=None, which='user'):
mask_alg_review_results = {}
if filenames is None:
filenames = os.listdir(submasks_rootdir)
for img_fn in filenames:
try:
mask_alg_review_results[img_fn] = generate_submask_review_results_one_section(submasks_rootdir=submasks_rootdir, fn=img_fn, which=which)
except Exception as e:
# sys.stderr.write('%s\n' % e)
mask_alg_review_results[img_fn] = []
return mask_alg_review_results
def generate_submask_review_results_one_section(submasks_rootdir, fn, which):
if which == 'auto':
review_fp = os.path.join(submasks_rootdir, fn, fn + "_submasksAlgReview.txt")
elif which == 'user':
review_fp = os.path.join(submasks_rootdir, fn, fn + "_submasksUserReview.txt")
else:
raise Exception("Argument which must be either auto or user.")
decisions = map(bool, np.atleast_1d(np.loadtxt(review_fp, dtype=np.float)))
return decisions
def load_masking_parameters(submasks_rootdir):
snake_lambda1_allFiles = {}
dissim_threshold_allFiles = {}
channel_allFiles = {}
for fn in os.listdir(submasks_rootdir):
params_fp = os.path.join(submasks_rootdir, fn, fn + '_maskingParameters.txt')
with open(params_fp, 'r') as f:
for line in f.readlines():
param_name, val_str = line.split()
if param_name == 'snake_lambda1':
snake_lambda1_allFiles[fn] = int(val_str)
elif param_name == 'dissim_threshold':
dissim_threshold_allFiles[fn] = float(val_str)
elif param_name == 'channel':
channel_allFiles[fn] = int(val_str)
return snake_lambda1_allFiles, dissim_threshold_allFiles, channel_allFiles
def load_final_decisions(stack):
submask_decisions_allFiles = {}
accept_which_allFiles = {}
fp = os.path.join(THUMBNAIL_DATA_DIR, stack, stack + '_submasks_finalDecisions.txt')
with open(fp, 'r') as f:
for line in f.readlines():
elements = line.split()
sec = int(elements[0])
fn = elements[1]
accept_which = int(elements[2])
submask_decisions = map(lambda x: bool(int(x)), elements[3:])
accept_which_allFiles[fn] = accept_which
submask_decisions_allFiles[fn] = submask_decisions
return accept_which_allFiles, submask_decisions_allFiles
# def load_submask_decisions(fp):
# assert os.path.exists(fp), 'No review file'
# review_df = pandas.read_csv(fp, header=0, index_col=0)
# submask_decisions = {fn: [bool(dec) for submask_i, dec in decisions.iteritems()] for fn, decisions in review_df.iterrows()}
# return submask_decisions
def load_submasks(submasks_rootdir):
submasks = defaultdict(dict)
for fn in os.listdir(submasks_rootdir):
submasks_dir = os.path.join(submasks_rootdir, fn)
for fp in os.listdir(submasks_dir):
match_obj = re.match(fn + '_submask_([0-9]{1,2}).png', fp)
if match_obj is not None:
submask_ind = int(match_obj.groups()[0])
submasks[fn][submask_ind] = imread(os.path.join(submasks_dir, fp)).astype(np.bool)
submasks[fn] = [m for i, m in submasks[fn].iteritems()]
submasks.default_factory = None
return submasks
def auto_judge_submasks(submasks):
n = len(submasks)
submask_areas = [np.count_nonzero(m) for m in submasks]
rank1 = np.argsort(submask_areas)[::-1] # sort by area from large to small
image_center = np.r_[submasks[0].shape[1]/2, submasks[0].shape[0]/2]
bbox_to_image_center_distance = []
for m in submasks:
xmin, xmax, ymin, ymax = bbox_2d(m)
dist = np.sqrt(np.sum((image_center - ((xmin + xmax)/2, (ymin+ymax)/2))**2))
bbox_to_image_center_distance.append(dist)
rank2 = np.argsort(bbox_to_image_center_distance) # sort by center distance from small to large
r1 = np.asarray([r for r, i in sorted(enumerate(rank1), key=lambda (r,i): i)])
r2 = np.asarray([r for r, i in sorted(enumerate(rank2), key=lambda (r,i): i)])
print 'area', submask_areas
print 'area penalty', r1
print 'box_to_center_distance', bbox_to_image_center_distance
print 'bbox center penalty', r2
rank = np.argsort(r1 + 0.2 * r2)
best_mask_ind = rank[0]
decisions = [False for _ in range(n)]
decisions[best_mask_ind] = True
return decisions
# def do_snake_worker(image, init_snake_contour_vertices, lambda1):
# submasks = snake(img=image, init_contours=[init_snake_contour_vertices], lambda1=lambda1)
# return submasks
#
# def snake_parallel(images_all_sections, init_snake_contours_all_sections, lambda1=1.):
#
# pool = Pool(NUM_CORES)
# submasks_all_sections = pool.map(lambda (img, init_cnt): do_snake_worker(img, init_cnt, lambda1),
# zip(images_all_sections, init_snake_contours_all_sections))
# pool.close()
# pool.join()
#
# return submasks_all_sections
def snake(img, init_submasks=None, init_contours=None, lambda1=MORPHSNAKE_LAMBDA1, return_masks=True, min_size=MIN_SIZE):
# Find contours from mask.
if init_contours is None:
init_contours = []
for submask in init_submasks:
cnts = find_contour_points(submask.astype(np.int), sample_every=1)
if 1 not in cnts or len(cnts[1]) == 0:
continue
for cnt in cnts[1]:
if len(cnt) > INIT_CONTOUR_MINLEN:
init_contours.append(cnt)
else:
init_contours = [c.astype(np.int) for c in init_contours]
xmin, ymin = np.min([np.min(c, axis=0) for c in init_contours], axis=0)
xmax, ymax = np.max([np.max(c, axis=0) for c in init_contours], axis=0)
margin = 50
crop_xmin = max(0, xmin - margin)
crop_ymin = max(0, ymin - margin)
crop_xmax = min(img.shape[1], xmax + margin)
crop_ymax = min(img.shape[0], ymax + margin)
cropped_img = img[crop_ymin:crop_ymax+1, crop_xmin:crop_xmax+1]
# cropped_img_height, cropped_img_width = cropped_img.shape[:2]
init_contours_on_cropped_img = [c-(crop_xmin, crop_ymin) for c in init_contours]
# init_contours = [xys for submask in submasks
# for xys in find_contour_points(submask.astype(np.int), sample_every=1)[1]
# if len(xys) > INIT_CONTOUR_MINLEN]
sys.stderr.write('Extracted %d contours from mask.\n' % len(init_contours))
# Create initial levelset
init_levelsets = []
for cnt in init_contours_on_cropped_img:
init_levelset = np.zeros_like(cropped_img, np.float)
t = time.time()
init_levelset[contours_to_mask([cnt], cropped_img.shape[:2])] = 1.
sys.stderr.write('Contour to levelset: %.2f seconds\n' % (time.time() - t)) # 10s
init_levelset[:10, :] = 0
init_levelset[-10:, :] = 0
init_levelset[:, :10] = 0
init_levelset[:, -10:] = 0
init_levelsets.append(init_levelset)
# img_enhanced = img.copy()
#####################
# Evolve morphsnake #
#####################
final_masks = []
for levelset_ind, init_levelset in enumerate(init_levelsets):
sys.stderr.write('\nContour %d\n' % levelset_ind)
discard = False
init_area = np.count_nonzero(init_levelset)
t = time.time()
msnake = morphsnakes.MorphACWE(cropped_img.astype(np.float), smoothing=int(MORPHSNAKE_SMOOTHING),
lambda1=lambda1, lambda2=MORPHSNAKE_LAMBDA2)
msnake.levelset = init_levelset.copy()
dq = deque([None, None])
for i in range(MORPHSNAKE_MAXITER):
# At stable stage, the levelset (thus contour) will oscilate,
# so instead of comparing to previous levelset, must compare to the one before the previous
oneBefore_levelset = dq.popleft()
# If less than 3 pixels are changed, stop.
if i > MORPHSNAKE_MINITER:
if np.count_nonzero(msnake.levelset - oneBefore_levelset) < PIXEL_CHANGE_TERMINATE_CRITERIA:
break
area = np.count_nonzero(msnake.levelset)
if area < min_size:
discard = True
sys.stderr.write('Too small, stop iteration.\n')
break
# If area changes more than 2, stop.
labeled_mask = label(msnake.levelset.astype(np.bool))
for l in np.unique(labeled_mask):
if l != 0:
m = labeled_mask == l
if np.count_nonzero(m)/float(init_area) > AREA_CHANGE_RATIO_MAX:
msnake.levelset[m] = 0
sys.stderr.write('Area expands too much - nullified.\n')
if np.count_nonzero(msnake.levelset)/float(init_area) < AREA_CHANGE_RATIO_MIN:
discard = True
sys.stderr.write('Area shrinks too much, stop iteration.\n')
break
dq.append(msnake.levelset)
# t = time.time()
msnake.step()
# sys.stderr.write('Step: %f seconds\n' % (time.time()-t)) # 0.6 second/step, roughly 200 steps takes 120s
sys.stderr.write('Snake finished at iteration %d.\n' % i)
sys.stderr.write('Snake: %.2f seconds\n' % (time.time()-t)) # 72s
if discard:
sys.stderr.write('Discarded.\n')
continue
else:
# Handles the case that a single initial contour morphs into multiple contours
labeled_mask = label(msnake.levelset.astype(np.bool))
for l in np.unique(labeled_mask):
if l != 0:
m = labeled_mask == l
if np.count_nonzero(m) > min_size:
final_masks.append(m)
sys.stderr.write('Final masks added.\n')
if len(final_masks) == 0:
sys.stderr.write('Snake return no valid submasks.\n')
if return_masks:
final_masks_uncropped = []
for m in final_masks:
uncropped_mask = np.zeros(img.shape[:2], np.bool)
uncropped_mask[crop_ymin:crop_ymax+1, crop_xmin:crop_xmax+1] = m
final_masks_uncropped.append(uncropped_mask)
return final_masks_uncropped
else:
final_contours = []
for m in final_masks:
cnts = [cnt_on_cropped + (crop_xmin, crop_ymin) for cnt_on_cropped in find_contour_points(m)[1]]
final_contours += cnts
return final_contours
def get_submasks(ncut_labels, sp_dissims, dissim_thresh):
"""Generate mask for snake's initial contours."""
# t = time.time()
superpixel_mask = np.zeros_like(ncut_labels, np.bool)
for l, d in sp_dissims.iteritems():
if d > dissim_thresh:
superpixel_mask[ncut_labels == l] = 1
superpixel_mask = remove_small_objects(superpixel_mask, min_size=200)
# sys.stderr.write('Get mask from foreground superpixels: %.2f seconds.\n' % (time.time() - t)) # 50 seconds.
labelmap, n_submasks = label(superpixel_mask, return_num=True)
dilated_superpixel_submasks = []
for i in range(1, n_submasks+1):
m = labelmap == i
dilated_m = binary_dilation(m, disk(10))
dilated_m = remove_small_objects(dilated_m, min_size=MIN_SIZE)
dilated_superpixel_submasks.append(dilated_m)
return dilated_superpixel_submasks
def merge_overlapping_masks(submasks):
"""
Args:
submasks (list)
"""
n = len(submasks)
overlap = np.zeros((n,n), np.int)
for i in range(n):
for j in range(i, n):
overlap[i,j] = np.count_nonzero(np.logical_and(submasks[i], submasks[j]))
import networkx as nx
g = nx.from_numpy_matrix(overlap)
components = nx.connected_components(g)
output_masks = [np.any([submasks[i] for i in nodes], axis=0) for nodes in components]
return output_masks
def generate_submasks_viz(img, submasks, color=(255,0,0), linewidth=3):
"""Generate visualization of submasks."""
viz = gray2rgb(img)
for i, submask in enumerate(submasks):
cnts = find_contour_points(submask)
if 1 not in cnts or len(cnts[1]) == 0:
sys.stderr.write('Submask %d has no contour.\n' % i)
continue
for cnt in cnts[1]:
cv2.polylines(viz, [cnt.astype(np.int)], True, color, linewidth) # blue
return viz
# plt.figure(figsize=(15,15));
# plt.imshow(viz);
# plt.show();
# def normalized_cut_superpixels(img, slic_labels):
# # Build affinity graph.
#
# t = time.time()
# sim_graph = rag_mean_color(img, slic_labels, mode='similarity', sigma=SUPERPIXEL_SIMILARITY_SIGMA)
# sys.stderr.write('Build affinity graph: %.2f seconds.\n' % (time.time() - t)) # 20 seconds
#
# edge_weights = np.array([a['weight'] for n, d in sim_graph.adjacency_iter() for a in d.itervalues()])
#
# # Recursively perform binary normalized cut.
# for _ in range(3):
# try:
#
# t = time.time()
# ncut_labels = cut_normalized(slic_labels, sim_graph, in_place=False,
# thresh=SUPERPIXEL_MERGE_SIMILARITY_THRESH,
# num_cuts=GRAPHCUT_NUM_CUTS)
#
# sys.stderr.write('Normalized Cut: %.2f seconds.\n' % (time.time() - t)) # 1.5s for SLIC_N_SEGMENTS=200 ~ O(SLIC_N_SEGMENTS**3)
# break
#
# except ArpackError as e:
# sys.stderr.write('ArpackError encountered.\n')
# continue
#
# # ncut_boundaries_viz = mark_boundaries(img, label_img=ncut_labels, background_label=-1, color=(1,0,0))
# return ncut_labels
def compute_sp_dissims_to_border(img, ncut_labels):
# Find background superpixels.
background_labels = np.unique(np.concatenate([ncut_labels[:,0], ncut_labels[:,-1], ncut_labels[0,:], ncut_labels[-1,:]]))
# Collect border superpixels.
border_histos = [np.histogram(img[ncut_labels == b], bins=np.arange(0,256,5), density=True)[0].astype(np.float)
for b in background_labels]
# Compute dissimilarity of superpixels to border superpixels.
allsp_histos = {l: np.histogram(img[ncut_labels == l], bins=np.arange(0,256,5), density=True)[0].astype(np.float)
for l in np.unique(ncut_labels)}
hist_distances = {l: np.percentile([chi2(h, th) for th in border_histos], BORDER_DISSIMILARITY_PERCENTILE)
for l, h in allsp_histos.iteritems()}
# min is too sensitive if there is a blob at the border
return hist_distances
def generate_dissim_viz(sp_dissims, ncut_labels):
superpixel_border_distancemap = np.zeros_like(ncut_labels, np.float)
for l, s in sp_dissims.iteritems():
superpixel_border_distancemap[ncut_labels == l] = s
# superpixel_border_distances_normed = superpixel_border_distances.copy()
# superpixel_border_distances_normed[superpixel_border_distances > 2.] = 2.
viz = img_as_ubyte(plt.cm.jet(np.minimum(superpixel_border_distancemap, 2.)))
# plt.figure(figsize=(20,20));
# im = plt.imshow(superpixel_border_distances, vmin=0, vmax=2);
# plt.title('Superpixels distance to border');
# plt.colorbar(im, fraction=0.025, pad=0.02);
#
# import io
# buf = io.BytesIO()
# plt.savefig(buf, format='png')
# buf.seek(0)
# plt.close();
return viz[..., :3] # discard alpha channel
def determine_dissim_threshold(sp_dissims, ncut_labels):
dissim_vals = np.asarray(sp_dissims.values())
ticks = np.linspace(0, dissim_vals.max(), 100)
dissim_cumdist = [np.count_nonzero(dissim_vals < th) / float(len(dissim_vals)) for th in ticks]
# Strategy: Select the lowest threshold (most inclusive) while covers less than 50% of the image (avoid over-inclusiveness).
# def moving_average(interval, window_size):
# window = np.ones(int(window_size))/float(window_size)
# return np.convolve(interval, window, 'same')
grad = np.gradient(dissim_cumdist, 3)
# smoothed_grad = moving_average(grad, 1)
# Identify the leveling point
hessian = np.gradient(grad, 3)
# plt.plot(ticks, hessian);
# plt.title('Hessian - minima is the plateau point of cum. distr.');
# plt.xlabel('Dissimlarity threshold');
# plt.savefig(os.path.join(submask_dir, '%(fn)s_spDissimCumDistHessian.png' % dict(fn=fn)));
# plt.show()
# print ticks[h.argsort()]
# print ticks[np.argsort(smoothed_grad, kind='mergesort')]
# fig, axes = plt.subplots(3, 1, sharex=True)
# axes[0].plot(ticks, dissim_cumdist);
# # axes[0].set_title('Cumulative Distribution of Superpixel dissimilarity to border superpixels');
# axes[1].plot(ticks, grad);
# axes[2].plot(ticks, hessian);
# import io
# buf = io.BytesIO()
# plt.savefig(buf, format='png')
# buf.seek(0)
# plt.close();
ticks_sorted = ticks[10:][hessian[10:].argsort()]
# ticks_sorted = ticks[find_score_peaks(-h, min_distance=5)[0]]
# ticks_sorted = ticks[np.argsort(smoothed_grad, kind='mergesort')] # Only mergesort is "stable".
ticks_sorted_reduced = ticks_sorted[ticks_sorted < FOREGROUND_DISSIMILARITY_THRESHOLD_MAX]
init_contour_coverages = np.asarray([np.sum([np.count_nonzero(ncut_labels == l)
for l, d in sp_dissims.iteritems()
if d > th]) / float(ncut_labels.size)
for th in ticks_sorted_reduced])
threshold_candidates = ticks_sorted_reduced[(init_contour_coverages < INIT_CONTOUR_COVERAGE_MAX) & \
(init_contour_coverages > 0)]
# np.savetxt(os.path.join(submask_dir, '%(fn)s_spThreshCandidates.txt' % dict(fn=fn)), threshold_candidates, fmt='%.3f')
print threshold_candidates[:10]
FOREGROUND_DISSIMILARITY_THRESHOLD = threshold_candidates[0]
print 'FOREGROUND_DISSIMILARITY_THRESHOLD =', FOREGROUND_DISSIMILARITY_THRESHOLD
return FOREGROUND_DISSIMILARITY_THRESHOLD
def contrast_stretch_image(img):
"""
Args:
img (2D np.ndarray): single-channel image.
"""
# Stretch contrast
# img_flattened = img.flatten()
img_flattened = img[(img > 0) & (img < 255)] # ignore 0 and 255 which are likely artificial background
vmax_perc = VMAX_PERCENTILE
while vmax_perc > 80:
vmax = np.percentile(img_flattened, vmax_perc)
if vmax < 255:
break
else:
vmax_perc -= 1
vmin_perc = VMIN_PERCENTILE
while vmin_perc < 20:
vmin = np.percentile(img_flattened, vmin_perc)
if vmin > 0:
break
else:
vmin_perc += 1
sys.stderr.write('%d(%d percentile), %d(%d percentile)\n' % (vmin, vmin_perc, vmax, vmax_perc) )
from skimage.exposure import rescale_intensity
img = img_as_ubyte(rescale_intensity(img, in_range=(vmin, vmax)))
# img[(img <= vmax) & (img >= vmin)] = 255./(vmax-vmin)*(img[(img <= vmax) & (img >= vmin)]-vmin)
# img[img > vmax] = 255
# img[img < vmin] = 0
# img = img.astype(np.uint8)
return img
# def contrast_stretch_and_slic_image(stack, sec):
#
# contrast_stretched_images_allChannels = contrast_stretch_image(stack, sec)
#
# # def do_slic(c):
# # t = time.time()
# # slic_labels_ = slic(images[c].astype(np.float), sigma=SLIC_SIGMA, compactness=SLIC_COMPACTNESS,
# # n_segments=SLIC_N_SEGMENTS, multichannel=False, max_iter=SLIC_MAXITER)
# # sys.stderr.write('SLIC: %.2f seconds.\n' % (time.time() - t)) # 10 seconds, iter=100, nseg=1000;
# # sp_max_std = np.percentile([images[c][slic_labels_ == l].std() for l in np.unique(slic_labels_)], 90)
# # return slic_labels_, sp_max_std
#
# # pool = Pool(3)
# # result_list = map(do_slic, range(3))
# # slic_labels_allChannel, sp_max_stds = zip(*result_list)
#
# slic_labels_allChannel = []
# sp_max_stds = []
# for c in range(3):
# t = time.time()
# slic_labels_ = slic(contrast_stretched_images_allChannels[c].astype(np.float), sigma=SLIC_SIGMA, compactness=SLIC_COMPACTNESS,
# n_segments=SLIC_N_SEGMENTS, multichannel=False, max_iter=SLIC_MAXITER)
# sys.stderr.write('SLIC: %.2f seconds.\n' % (time.time() - t)) # 10 seconds, iter=100, nseg=1000;
# slic_labels_allChannel.append(slic_labels_)
#
# sp_max_std = np.percentile([contrast_stretched_images_allChannels[c][slic_labels_ == l].std() for l in np.unique(slic_labels_)], 90)
# sys.stderr.write('sp_max_std = %.2f.\n' % sp_max_std)
# sp_max_stds.append(sp_max_std)
#
# best_channel_id = np.argmin(sp_max_stds)
# sys.stderr.write('Use channel %s.\n' % ['RED', 'GREEN', 'BLUE'][best_channel_id])
# slic_labelmap = slic_labels_allChannel[best_channel_id]
# contrast_stretched_image = images[best_channel_id]
#
# return contrast_stretched_image, slic_labelmap
|
#! /usr/bin/env python
import argparse
import json
import os
import cv2
from frontend import YOLO
from utils import draw_boxes
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
argparser = argparse.ArgumentParser(
description='Infer the localization of nucleus using YOLO_v2 model')
argparser.add_argument(
'-c',
'--conf',
help='path to configuration file')
argparser.add_argument(
'-w',
'--weights',
help='path to pretrained weights')
argparser.add_argument(
'-i',
'--input',
help='path to directory of nuclei images')
argparser.add_argument(
'-d',
'--draw',
help='Flag to indicate if draw the predictions or not (0 or 1)')
def _main_(args):
config_path = args.conf
weights_path = args.weights
nuclei_imgs_path = args.input
draw_boxes_flg = bool(int(args.draw))
with open(config_path) as config_buffer:
config = json.load(config_buffer)
###############################
# Make the model
###############################
yolo = YOLO(architecture=config['model']['architecture'],
input_size=config['model']['input_size'],
labels=config['model']['labels'],
max_box_per_image=config['model']['max_box_per_image'],
anchors=config['model']['anchors'])
###############################
# Load trained weights
###############################
print(weights_path)
yolo.load_weights(weights_path)
###############################
# Predict bounding boxes
###############################
total_boxes = 0
detections = {'image': []}
for image_directory in sorted(os.listdir(nuclei_imgs_path)):
if image_directory == '.DS_Store':
continue
image_dir = "{}/{}/{}".format(nuclei_imgs_path, image_directory, 'images')
image_file_name = os.listdir(image_dir)[1] if os.listdir(image_dir)[0] == '.DS_Store' else \
os.listdir(image_dir)[0]
image_full_path = "{}/{}".format(image_dir, image_file_name)
image = cv2.imread(image_full_path)
img_h = image.shape[1]
img_w = image.shape[0]
image_vars = {'path': "{}/{}/{}".format(image_directory, 'images', image_file_name), 'height': img_h, 'width': img_w, 'bbox': []}
boxes = yolo.predict(image)
for box in boxes:
xmin = int((box.x - box.w / 2) * img_h)
xmax = int((box.x + box.w / 2) * img_h)
ymin = int((box.y - box.h / 2) * img_w)
ymax = int((box.y + box.h / 2) * img_w)
image_vars['bbox'].append(
{'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax, 'score': str(box.get_score())})
detections['image'].append(image_vars)
total_boxes += len(boxes)
print("{} boxes are found on image with id {}".format(len(boxes), image_directory))
if draw_boxes_flg:
image = draw_boxes(image, boxes, config['model']['labels'])
cv2.imwrite(image_full_path[:-4] + '_detected' + image_full_path[-4:], image)
print("Total number of boxes detected: {}".format(total_boxes))
with open('nuclei_detections.json', 'w') as outfile:
json.dump(detections, outfile)
if __name__ == '__main__':
args = argparser.parse_args()
_main_(args)
|
from flask import Blueprint, jsonify, request, abort, make_response
from services import userService
from auth import requiresAdmin
from utils.mail import sendMail
mail = Blueprint("mail", __name__, url_prefix="/mail")
@mail.route("/<id>", methods=["POST"])
@requiresAdmin
def sendMailToId(id):
if request.is_json:
form = request.get_json()
message = form.get("content")
subject = form.get("subject")
user = userService.getUserById(id)
sendMail(subjectText=subject, contentText=message, recipientsList=[user.email])
return make_response(jsonify(form), 200)
return abort(400)
@mail.route("/", methods=["POST"])
@requiresAdmin
def sendMailToList():
if request.is_json:
form = request.get_json()
userids = form.get("ids")
message = form.get("content")
subject = form.get("subject")
users = list(map(lambda userid: userService.getUserById(userid), userids))
usersEmail = list(map(lambda user: user.email, users))
sendMail(subjectText=subject, contentText=message, recipientsList=[usersEmail])
return make_response(jsonify(form), 200)
return abort(400)
|
#!/usr/bin/env python
# coding=utf-8
"""
Copyright (C) 2019 * Ltd. All rights reserved.
Editor : PyCharm
File name : buttom.py
Author : Charles zhang
Created date: 2020/6/7 12:16
Description :
"""
import pygame.font
class Button:
def __init__(self, setting, screen, msg):
'''初始化按钮的属性'''
self.screen = screen
self.screen_rect = screen.get_rect()
# 设置按钮的大小
self.width = 200
self.height = 20
self.button_color = (0, 255, 0) # 按钮的颜色
self.text_color = (200, 200, 200) # 文字的颜色
self.font = pygame.font.SysFont('SimHei', 48) # 字体为黑体大小为48PX
# 创建按钮的rect对象, 并居中
self.rect = pygame.Rect((0, 0, self.width, self.height))
self.rect.center = self.screen_rect.center
# 按钮的标签只需要创建一次
self.prep_msg(msg)
# 将msg渲染成图像
def prep_msg(self, msg):
'''
font.reder方法是将msg中的文本转换为图像
* 参数True是开启抗锯齿模式
* self.text_color是文本的颜色
* self.button_color是背景颜色
:param msg:
:return:
'''
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
# 使其在按钮上居中
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
# 绘制按钮
def draw_button(self):
self.screen.fill(self.button_color, self.rect) # 用一个填充按钮
self.screen.blit(self.msg_image, self.msg_image_rect) # 绘制文本
|
from django.contrib import admin
# Register your models here.
from .models import Run, Manufacturer, Shoe
class ManufacturerAdmin(admin.ModelAdmin):
fields = ("name",)
list_display = ["name",]
# list_display_links = ["name",]
# list_editable = ["name",]
# list_filter = ["name",]
search_fileds = ["name",]
class ShoeAdmin(admin.ModelAdmin):
fields = ("user","name","purchased","rating","manufacturer",)
list_display = ["user","name","purchased","rating","manufacturer",]
list_editable = ["purchased","rating","manufacturer",]
list_filter = ["manufacturer",]
search_fields = ["name","manufacturer",]
class RunAdmin(admin.ModelAdmin):
fields = ("user","rundate","distance","calories","shoe","notes",)
list_display = ["user","rundate","distance","calories","shoe","notes",]
list_editable = ["distance","calories","shoe","notes",]
list_filter = ["shoe",]
admin.site.register(Manufacturer, ManufacturerAdmin)
admin.site.register(Shoe, ShoeAdmin)
admin.site.register(Run, RunAdmin)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 4 16:32:03 2020
@author: DELL
"""
import pandas as pd
import numpy as np
data1 = pd.read_csv('ReaderInformation.csv');
data2 = pd.read_csv('ReaderRentRecode.csv');
#观察数据
print(data1.info())
print(data2.info())
data3 = pd.merge(data1,data2,on='num')
data3.to_csv('combineData.csv',encoding='utf-8') |
import traceback
import asyncio
from nats.aio.client import Client as NATS
#from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
# python3 implementation of NATS python client
async def nats(server, subject, msg, loop):
"""
NATS client implemented via asyncio
python3 implementation, see
https://github.com/nats-io/nats.py
"""
nc = NATS()
try:
await nc.connect(server, loop=loop, max_reconnect_attempts=3)
except Exception as exp:
print("failed to connect to server: error {}".format(str(exp)))
traceback.print_exc()
return
if isinstance(msg, list):
for item in msg:
await nc.publish(subject, item)
else:
await nc.publish(subject, msg)
await nc.close()
|
def find_first_k_missing_positive(nums, k):
missingNumbers = []
i = 0
while i < len(nums):
j = nums[i] - 1
if j >= 0 and j < len(nums) and nums[j] != nums[i]:
nums[i], nums[j] = nums[j], nums[i]
else:
i += 1
extraNumbers = set()
for index, num in enumerate(nums):
if len(missingNumbers) < k and num != index + 1:
missingNumbers.append(index + 1)
extraNumbers.add(num)
seed = len(nums) + 1
while len(missingNumbers) < k:
if seed not in extraNumbers:
missingNumbers.append(seed)
seed += 1
return missingNumbers
input = [2, 3, 4, 5, 6]
k= 4
print(find_first_k_missing_positive(input, k)) |
class worldlist:
#====================================================================================#
#=================== Liste des mots auquel le bot réagit (Début) ====================#
#====================================================================================#
liste_Kaamelott = ['Kaamelott', 'kaamelott', 'perceval', "c'est pas faux", 'Perceval']
liste_bot = ['BOT','bot','Bot','T6R27-H59P','BOT?','bot?','Bot?','BOT ?','bot ?','Bot ?']
liste_non_rien = ['Non rien','non rien','NON rien']
liste_perdu = ['42','perdu','Perdu','PERDU','perdu!','Perdu!','PERDU!']
liste_stark = ['JARVIS','jarvis','Jarvis']
liste_ping = ['PONG','Pong','pong']
liste_pong = ['PING','Ping','ping']
liste_bit = ['bit','BIT','Bit', 'bit ?', 'Bit ?','BIT ?','bit?','Bit?','BIT?']
liste_chat = ['chat','Chat','CHAT']
liste_generalkenobi = ['Hello there', 'hello there', 'Hello there !!' 'hello there !!']
liste_obiwan = ['obi wan kenobi', 'Obi Wan Kenobi', 'Obi Wan', 'obi wan']
#====================================================================================#
#===================== Liste des mots auquel le bot réagit (Fin) ====================#
#====================================================================================# |
#!/usr/bin/python
class Feature:
def __init__(self,name,path,time,cost,label,location,box):
self.name = name
self.path = path
self.time = time
self.cost = cost
self.label = label
self.location = location
self.box_location = box
|
from django.urls import path
from .views import *
urlpatterns = [
path('', OrderView.as_view(), name='order'),
path('login/', LoginLogoutView.as_view(), name='login'),
path('orders/', Order_List.as_view(), name='list'),
path('detail/<int:pk>', Order_Detail.as_view(), name='detail'),
path('detail/<int:pk>/close', order_close, name='close'),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 30 11:10:49 2019
@author: jason
@E-mail: jasoncoding13@gmail.com
@Github: jasoncoding13
"""
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from collections import Counter
from sklearn.cluster import KMeans
from sklearn.metrics import accuracy_score
from sklearn.metrics import normalized_mutual_info_score
from sklearn.model_selection import StratifiedKFold
from .preprocess import load_data, add_block_noise, add_salt_noise
from .utils import compute_image_shape, print_log
plt.rc('font', family='serif', size=26)
plt.rc('lines', markerfacecolor='none')
plt.rc('xtick', labelsize='x-small')
plt.rc('ytick', labelsize='x-small')
def compute_RRE(X, X_hat):
"""Compute relative reconstruction error.
Args:
X: array with shape of [feature size, sample size]
X_hat: array with the same shape as X for reconstruction.
"""
return np.linalg.norm(X - X_hat) / np.linalg.norm(X)
def compute_ACC_NMI(R, Y):
"""Compute accuracy and normalized mutual information.
Args:
R: array with shape of [number of components, sample size], the
subspace learned from NMF.
Y: array with shape of [sample size], the true label of raw data.
"""
kmeans = KMeans(n_clusters=len(set(Y))).fit(R.T)
Y_pred = np.zeros(Y.shape)
for i in set(kmeans.labels_):
ind = (kmeans.labels_ == i)
# ([label, frequence])[0][0]
Y_pred[ind] = Counter(Y[ind]).most_common(1)[0][0]
acc = accuracy_score(Y, Y_pred)
nmi = normalized_mutual_info_score(Y, Y_pred, average_method='arithmetic')
return acc, nmi
def compute_metrics(X, X_hat, R, Y):
"""Compute RRE, ACC and NMI.
"""
acc, nmi = compute_ACC_NMI(R, Y)
metrics = {'RRE': compute_RRE(X, X_hat),
'ACC': acc,
'NMI': nmi}
print_log('RRE: {RRE}, ACC: {ACC}, NMI:{NMI}'.format(**metrics))
return metrics
def inspect_dictionary(D, data, reduce=None, n_cols=5):
"""Inspect the dictionary
Args:
D: array with shape of [feature size, number of components]
data: str, 'ORL' or 'EYB' or 'AR'.
reduce: scale factor.
n_cols: int, number of images shown in each row.
"""
image_shape, reduce = compute_image_shape(data, reduce)
nrows = D.shape[1] // n_cols
nrows += 1 if D.shape[1] % n_cols else 0
for i in range(nrows):
plt.figure(figsize=(16, 9))
for j in range(n_cols):
plt.subplot(1, n_cols, j+1)
plt.imshow(D[:, i*n_cols+j].reshape(image_shape), cmap=plt.cm.gray)
plt.axis('off')
plt.show()
def reconstruct(X, X_noised, X_hat, data, reduce=None, ind=None, path=None):
"""Reconstruct the image
Args:
X: array with shape of [feature size, sample size], original images.
X_noised: array like X, noised images.
X_hat: array like X, reconstructed images.
data: str, 'ORL' or 'EYB' or 'AR'.
reduce: scale factor.
ind: int, index of images to plot.
path: path to save plot.
"""
image_shape, reduce = compute_image_shape(data, reduce)
if not ind:
if data in ('ORL', 'EYB'):
ind = np.random.randint(X.shape[1])
elif data == 'AR':
ind = np.random.randint(7, 13) + np.random.randint(200) * 13
plt.figure(figsize=(16, 9))
plt.subplot(131)
plt.imshow(X[:, ind].reshape(image_shape), cmap=plt.cm.gray)
plt.axis('off')
plt.title('Image(Original)')
plt.subplot(132)
plt.imshow(X_noised[:, ind].reshape(image_shape), cmap=plt.cm.gray)
plt.axis('off')
plt.title('Image(Noised)')
plt.subplot(133)
plt.imshow(X_hat[:, ind].reshape(image_shape), cmap=plt.cm.gray)
plt.axis('off')
plt.title('Image(Reconstructed)')
if path:
plt.savefig(path)
print_log(f'Save image at {path}')
plt.show()
def experiment(model, data, noise, noise_param_lst, reduce=None, n_splits=5):
"""CV experiment
Args:
model: instance like `NMF(n_components=40)`.
data: str, 'ORL' or 'EYB' or 'AR'.
noise: 'block' or 'salt', type of noise to add.
noise_param_lst: a list of parameters like [10, 12]
reduce: scale factor.
n_splits: int, number of folds for cross validation.
"""
X, Y = load_data(data=data, reduce=reduce)
if noise == 'block':
add_noise_fun = add_block_noise
elif noise == 'sale':
add_noise_fun = add_salt_noise
else:
raise ValueError("noise should be 'block' or 'salt'")
_array = np.zeros([len(noise_param_lst), 7+n_splits*3])
skf = StratifiedKFold(
n_splits=n_splits, random_state=np.random.RandomState(13))
module_path = os.path.dirname(__file__)
for i, noise_param in enumerate(noise_param_lst):
_row = [noise_param]
X_noised = add_noise_fun(X, noise_param, data=data, reduce=reduce)
_row_RRE = []
_row_ACC = []
_row_NMI = []
for j, (train_index, _) in enumerate(skf.split(Y, Y)):
D, R = model.fit(X_noised[:, train_index])
X_hat = D.dot(R)
reconstruct(
X[:, train_index],
X_noised[:, train_index],
X_hat,
data=data,
reduce=reduce,
path='{mp}/plots/{m}_{d}_{n}_{p}_{j}.png'.format(
mp=module_path,
m=model.__class__.__name__,
d=data,
n=noise,
p=noise_param,
j=j))
dct_metrics = compute_metrics(
X[:, train_index], X_hat, R, Y[train_index])
_row_RRE.append(dct_metrics['RRE'])
_row_ACC.append(dct_metrics['ACC'])
_row_NMI.append(dct_metrics['NMI'])
_row += [np.mean(_row_RRE), np.std(_row_RRE),
np.mean(_row_ACC), np.std(_row_ACC),
np.mean(_row_NMI), np.std(_row_NMI)]
_row += _row_RRE
_row += _row_ACC
_row += _row_NMI
_array[i, :] = _row
df_cv = pd.DataFrame(_array)
df_cv.columns = (
['noise_param',
'mean_RRE', 'std_RRE',
'mean_ACC', 'std_ACC',
'mean_NMI', 'std_NMI'] +
[f'{i}_{m}'
for i in range(n_splits)
for m in ['RRE', 'ACC', 'NMI']])
csv_path = '{mp}/results/{m}_{d}_{n}.csv'.format(
mp=module_path,
m=model.__class__.__name__,
d=data,
n=noise)
df_cv.to_csv(path_or_buf=csv_path, index=False)
print_log(f'Save cross validation result at {csv_path}')
return df_cv
def plot_result(models, data, noise, metric, path=None):
"""Plot the CV result in line charts
Args:
models: list of str, names of class.
data: str, 'ORL' or 'EYB' or 'AR'.
noise: str, 'block' or 'salt', type of noise to add.
metric: str, 'RRE' or 'ACC' or 'NMI'.
"""
fig, ax = plt.subplots(figsize=(16, 9))
module_path = os.path.dirname(__file__)
for name in os.listdir(f'{module_path}/results'):
lst_name = name.split('.')[0].split('_')
model = '_'.join(lst_name[0:-2])
if data in name and noise in name and model in models:
df_cv = pd.read_csv(f'{module_path}/results/{name}')
ax.plot(df_cv[f'mean_{metric}'], label=model)
xticklabels = df_cv['noise_param']
ax.grid(True)
ax.legend()
ax.set_title(f'{metric} VS {noise} parameter of different NMFs')
ax.set_xlabel(f'{noise} parameter')
ax.set_xticks(range(len(xticklabels)))
ax.set_xticklabels(xticklabels)
ax.set_ylabel(metric)
ax.set_yticklabels(['{:,.2%}'.format(x) for x in ax.get_yticks()])
plt.show()
|
# coding:utf-8
import cv2 as cv
import os
import sys
__all__ = ['PictureUtil']
class PictureUtil(object):
def __init__(self):
pass
@staticmethod
def get_picture_part(image, coordinate):
return image[coordinate['y1']: coordinate['y2'], coordinate['x1']: coordinate['x2']]
@staticmethod
def show_pic(load_path, name):
image = cv.imread(load_path + name)
cv.imshow("Image", image)
cv.waitKey(0)
@staticmethod
def show_current_crop(image, coordinate):
cv.imshow("Image", image[coordinate['y1']: coordinate['y2'], coordinate['x1']: coordinate['x2']])
cv.waitKey(0)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secretkey' #Put A Better Secret Key
app.config['SQLALCHEMY_DATABASE_URI']='sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
LogMan = LoginManager(app)
LogMan.login_view = 'login'
LogMan.login_message_category = 'info'
from Base import routes
|
# encoding: UTF-8
from vnpy.trader import vtConstant
from .oandaGateway import OandaGateway
gatewayClass = OandaGateway
gatewayName = 'OANDA'
gatewayDisplayName = 'OANDA'
gatewayType = vtConstant.GATEWAYTYPE_INTERNATIONAL
gatewayQryEnabled = False |
def start():
print("there are two doors ")
print("select any door left or right")
answer = input("<").lower()
if "f" in answer:
bear_room()
elif "r" in answer:
monster_room()
else:
game_over("don't you know how type!!")
def bear_room():
print("your in bear room,now the bear is eating tasty honey and there is door beside the bear")
print("select one 1)go silently 2)take off that honey")
answer = input()
if "1" in answer:
diamond_room()
elif "2" in answer:
game_over("you got killed by bear")
else:
game_over("dont you know how to type")
def monster_room():
print("your in a monster room ,there is a monster sleeping infront of you ")
print("you have two options 1)silently go through the door which is behind the monster")
print("or 2)try to kill the monster")
answer = input()
if "1" in answer:
diamond_room()
elif "2" in answer:
game_over("monster killed you")
else:
game_over("you dont know how to type")
def diamond_room():
print("your in a diamond room full of diamonds now you have two options")
print("choose one 1)pack all your diamonds in your bag 2)move silently to next door")
answer = input()
if "1" in answer:
game_over("your dead cause all the diamonds are cursed stuff")
elif "2" in answer:
print("you win the game")
else:
game_over("bitch")
def game_over(reason):
print("\n"+reason)
print("Game over!")
play_again()
def play_again():
print("do u wanna start again y or n")
answer = input()
if "y" in answer:
start()
else:
exit()
start()
|
import os
s = 'javac -cp ".:lucene-6.6.0/*" -g src/*.java -d bin/'
os.system(s)
test = "HW1-Test"
for i in range(0, 30):
testcase = i
filetowrite = ['test.param', "my{1}-{0}.teIn".format(testcase, test)]
with open(filetowrite[0], 'w') as f:
f.write("indexPath=[indexpath]\n")
f.write(
"queryFilePath=[yourcodedir]/testcase/{1}-{0}.qry\n".format(testcase, test))
f.write(
"trecEvalOutputPath=[yourcodedir]/my{1}-{0}.teIn\n".format(testcase, test))
# f.write("trecEvalOutputLength=100\n")
with open("testcase/{1}-{0}.param".format(testcase, test)) as f2:
r = f2.readlines()
flag = 0
for i in r:
if "trecEvalOutputLength" in i:
f.write(i)
flag = 1
else:
if flag == 1:
f.write(i)
os.chdir("[yourcodedir]/bin")
print(os.getcwd())
code2 = 'java -classpath ".:[yourcodedir]/lucene-6.6.0/*" QryEval [yourcodedir]/test.param'
os.system(code2)
os.chdir("[yourcodedir]")
wrong = False
with open("testcase/{1}-{0}.teIn".format(testcase, test)) as f1:
r1 = f1.readlines()
with open("my{1}-{0}.teIn".format(testcase, test)) as f2:
r2 = f2.readlines()
for i in range(len(r1)):
id1 = r1[i].split(" ")[2]
id2 = r2[i].split(" ")[2]
if id1 != id2:
print("WRONG in TESTCASE {0} in line {1}".format(
testcase, i))
wrong = True
os.system(
"cp " + "testcase/{1}-{0}.teIn".format(testcase, test) + " . ")
break
if wrong == False:
print("RIGHT in testcast {0}".format(testcase))
for file in filetowrite:
os.system("rm {0}".format(file))
if wrong == True:
break
|
import tensorflow as tf
import os
import glob
import numpy as np
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
def make_single_dataset(image_size=[256, 128], tfrecords_path="./mars/mars_validation_00000-of-00001.tfrecord", shuffle_buffer_size=2000, repeat=True, train=True):
"""
Input:
image_size: size of input images to network
tfrecords_path: address to tfrecords file containing all image data
shuffle_buffer_size: number of images to load into a memory for a shuffling operation.
repeat (boolean): repeat dataset
train (boolean): use in training
Features:
image: image tensor
label: label tensor
height: original image height
width: original image width
addr: image address in file system
Returns:
Dataset
"""
image_size = tf.cast(image_size, tf.int32)
def _parse_function(example_proto):
features = {'image/class/label': tf.FixedLenFeature((), tf.int64, default_value=1),
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=""),
'image/height': tf.FixedLenFeature([], tf.int64),
'image/width': tf.FixedLenFeature([], tf.int64),
'image/format': tf.FixedLenFeature((), tf.string, default_value="")}
parsed_features = tf.parse_single_example(example_proto, features)
image_buffer = parsed_features['image/encoded']
image = tf.image.decode_jpeg(image_buffer,channels=3)
image = tf.cast(image, tf.float32)
S = tf.stack([tf.cast(parsed_features['image/height'], tf.int32),
tf.cast(parsed_features['image/width'], tf.int32), 3])
image = tf.reshape(image, S)
image = tf.image.convert_image_dtype(image, tf.float32)
image = tf.image.resize_images(image, [256, 128])
return image, parsed_features['image/class/label'], parsed_features['image/format']
filenames = [tfrecords_path]
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.shuffle(shuffle_buffer_size)
dataset = dataset.map(_parse_function, num_parallel_calls=8)
return dataset
def combine_dataset(batch_size, image_size, same_prob, diff_prob, repeat=True, train=True):
"""
Input:
image size (int)
batch_size (int)
same_prob (float): probability of retaining images in same class
diff_prob (float): probability of retaining images in different class
train (boolean): train or validation
repeat (boolean): repeat elements in dataset
Return:
zipped dataset
"""
dataset_left = make_single_dataset(image_size, repeat=repeat, train=train)
dataset_right = make_single_dataset(image_size, repeat=repeat, train=train)
dataset = tf.data.Dataset.zip((dataset_left, dataset_right))
if train:
filter_func = create_filter_func(same_prob, diff_prob)
dataset = dataset.filter(filter_func)
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1)
return dataset
def create_filter_func(same_prob, diff_prob):
def filter_func(left, right):
_, right_label, _ = left
_, left_label, _ = right
label_cond = tf.equal(right_label, left_label)
different_labels = tf.fill(tf.shape(label_cond), diff_prob)
same_labels = tf.fill(tf.shape(label_cond), same_prob)
weights = tf.where(label_cond, same_labels, different_labels)
random_tensor = tf.random_uniform(shape=tf.shape(weights))
return weights > random_tensor
return filter_func
def model(input, reuse=False):
print(np.shape(input))
with tf.name_scope("model"):
with tf.variable_scope("conv1") as scope:
net = tf.contrib.layers.conv2d(input, 256, [3, 3], activation_fn=None, padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
scope=scope, reuse=reuse)
print(np.shape(net))
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2, padding='valid')
net = tf.layers.batch_normalization(net, fused=True)
net = tf.nn.relu(net)
print(np.shape(net))
with tf.variable_scope("conv2") as scope:
net = tf.contrib.layers.conv2d(net, 128, [3, 3], activation_fn=None, padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
scope=scope, reuse=reuse)
print(np.shape(net))
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2, padding='valid')
net = tf.layers.batch_normalization(net, fused=True)
net = tf.nn.relu(net)
print(np.shape(net))
with tf.variable_scope("conv3") as scope:
net = tf.contrib.layers.conv2d(net, 64, [3, 3], activation_fn=None, padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
scope=scope, reuse=reuse)
print(np.shape(net))
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2, padding='valid')
net = tf.layers.batch_normalization(net, fused=True)
net = tf.nn.relu(net)
print(np.shape(net))
with tf.variable_scope("conv4") as scope:
net = tf.contrib.layers.conv2d(net, 32, [3, 3], activation_fn=None, padding='SAME',
weights_initializer=tf.contrib.layers.xavier_initializer_conv2d(),
scope=scope, reuse=reuse)
print(np.shape(net))
net = tf.layers.max_pooling2d(net, pool_size=2, strides=2, padding='valid')
net = tf.layers.batch_normalization(net, fused=True)
net = tf.nn.relu(net)
print(np.shape(net))
net = tf.contrib.layers.flatten(net)
print(np.shape(net))
net = tf.layers.dense(net, 4096, activation=tf.sigmoid)
print(np.shape(net))
return net
def contrastive_loss(left_embed, right_embed, y, left_label, right_label, margin=0.2, use_loss=False):
conds = tf.equal(left_label, right_label)
y = tf.to_float(conds)
with tf.name_scope("contrastive_loss"):
distance = tf.sqrt(tf.reduce_sum(tf.pow(left_embed - right_embed, 2), 1, keepdims=True))
similarity = y * tf.square(distance) # keep the similar label (1) close to each other
dissimilarity = (1 - y) * tf.square(tf.maximum((margin - distance),
0)) # give penalty to dissimilar label if the distance is bigger than margin
similarity_loss = tf.reduce_mean(dissimilarity + similarity) / 2
if use_loss:
tf.losses.add_loss(similarity_loss)
def inference(left_input_image, right_input_image):
"""
left_input_image: 3D tensor input
right_input_image: 3D tensor input
label: 1 if images are from same category. 0 if not.
"""
with tf.variable_scope('feature_generator', reuse=tf.AUTO_REUSE) as sc:
left_features = model(tf.layers.batch_normalization(tf.divide(left_input_image, 255.0)))
right_features = model(tf.layers.batch_normalization(tf.divide(right_input_image, 255.0)))
merged_features = tf.abs(tf.subtract(left_features, right_features))
logits = tf.contrib.layers.fully_connected(merged_features, num_outputs=1, activation_fn=None)
logits = tf.reshape(logits, [-1])
return logits, left_features, right_features
def loss(logits, left_label, right_label):
label = tf.equal(left_label, right_label)
label_float = tf.cast(label, tf.float64)
logits = tf.cast(logits, tf.float64)
cross_entropy_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=label_float))
tf.losses.add_loss(cross_entropy_loss)
|
#!/usr/bin/python
for prj in [ 1,2,3,4,5,6,7]:
print "drop database if exists net_stat_%02d; "%(prj)
print "create database net_stat_%02d CHARACTER SET 'utf8' COLLATE 'utf8_general_ci'; "%(prj)
print "use net_stat_%02d; "%(prj)
for i in range(4):
for j in range(10):
if i in [0,1]:
type_str="integer"
else:
type_str="bigint"
print """create table net_stat_ip%d_lv%d(
ip integer unsigned,
seq integer unsigned,
delay_v %s unsigned default 0,
delay_c int unsigned default 0,
lost_v %s unsigned default 0,
lost_c int unsigned default 0,
hop_v %s unsigned default 0,
hop_c int unsigned default 0,
primary key ( seq,ip )
)default charset utf8;
"""%(j, i, type_str, type_str, type_str)
for i in range(4):
for j in [110000,120000,130000,140000,150000,210000,220000,230000,310000,320000,330000,340000,350000,360000,370000,410000,420000,430000,440000,450000,460000,500000,510000,520000,530000,540000,610000,620000,630000,640000,650000,710000,810000,820000,830000]:
print """create table net_stat_prov_%06u_lv%d(
cityid integer unsigned,
comp varchar(64),
seq integer unsigned,
delay_v %s unsigned default 0,
delay_c int unsigned default 0,
lost_v %s unsigned default 0,
lost_c int unsigned default 0,
hop_v %s unsigned default 0,
hop_c int unsigned default 0,
primary key ( seq,cityid,comp)
)default charset utf8;
"""%(j,i, type_str, type_str, type_str)
|
#!/usr/bin/python3
#encoding=utf-8
import time
import random
import RPi.GPIO as GPIO
class MyGPIO():
init_count = 0
mode = GPIO.BCM
def __init__(self, idx, init_with_output, init_with_high=False):
self.idx = idx
if idx is None:
return
if MyGPIO.init_count == 0:
GPIO.setmode(MyGPIO.mode)
MyGPIO.init_count += 1
if init_with_output: # 只有在输出的时候才可能会设置输出为高/低
initial_status = GPIO.HIGH if init_with_high else GPIO.LOW
GPIO.setup(idx, GPIO.OUT, initial=initial_status)
else:
GPIO.setup(idx, GPIO.IN)
def __del__(self):
if self.idx is None:
return
MyGPIO.init_count -= 1
if MyGPIO.init_count == 0:
GPIO.cleanup()
def high(self):
if self.idx is not None:
GPIO.output(self.idx, GPIO.HIGH)
def low(self):
if self.idx is not None:
GPIO.output(self.idx, GPIO.LOW)
def input(self):
if self.idx is not None:
return GPIO.input(self.idx)
class MyPWM(MyGPIO):
def __init__(self, idx, frequency):
super(MyPWM, self).__init__(idx, init_with_output=True, init_with_high=False)
self.pwm = GPIO.PWM(idx, frequency)
self.pwm.start(0)
def chanage_duty_cycle(self, cycle):
self.pwm.ChangeDutyCycle(cycle)
class DistanceManager(object):
steer_frequency = 50 # 50HZ
def __init__(self, idx, idx_r=None, idx_g=None, idx_b=None, idx_trig=None, idx_ping=None):
self.steer = MyPWM(idx, DistanceManager.steer_frequency)
self.led_r = MyGPIO(idx_r, True)
self.led_g = MyGPIO(idx_g, True)
self.led_b = MyGPIO(idx_b, True)
self.delt = 12
self.trig = MyGPIO(idx_trig, init_with_output=True, init_with_high=False)
self.echo = MyGPIO(idx_ping, init_with_output=False)
leds = [self.led_r, self.led_g, self.led_b]
times = 8
while times > 0:
self.turn_angle(random.randint(0, 180))
for led in leds:
led.low()
leds[random.randint(0, 2)].high()
time.sleep(0.2)
times -= 1
self.turn_angle(90)
[led.low() for led in leds]
self.led_r.high()
time.sleep(0.2)
def set_delt(self):
pass
def angle2frequency(self, alpha):
alpha += self.delt
if alpha < 0:
alpha = 0
alpha %= 180
return round(alpha / 18. + 2.5)
def calc_distance(self, alpha):
self.turn_angle(alpha)
time.sleep(0.2)
self.trig.high()
time.sleep(0.00001) #1us
self.trig.low()
#start recording
while self.echo.input() == 0:
pass
start=time.time()
#end recording
while self.echo.input() == 1:
pass
end = time.time()
return round((end - start) * 343 / 2 * 100, 2)
def turn_angle(self, alpha):
self.steer.chanage_duty_cycle(self.angle2frequency(alpha))
class Wheel():
def __init__(self, p_idx, n_idx, ctrl_idx=None):
self.p = MyGPIO(p_idx, init_with_output=True, init_with_high=True)
self.n = MyGPIO(n_idx, init_with_output=True, init_with_high=True)
self.ctrl = MyPWM(ctrl_idx, frequency=50)
self.speed = 0
def set_speed(self, speed):
self.speed = speed
def stop(self):
self.p.high()
self.n.high()
def forward(self, speed): # 正向
self.p.high()
self.n.low()
self.ctrl.chanage_duty_cycle(speed)
def reverse(self, speed):
self.p.low()
self.n.high()
self.ctrl.chanage_duty_cycle(speed)
def fire(self):
while True:
pass
class Car(object):
def __init__(self, w1_idxs, w2_idxs, w3_idxs, w4_idxs, steer_idx=None, idx_trig=None, idx_ping=None,
steer_led_r=None, steer_led_g=None, steer_led_b=None):
self.w1 = Wheel(w1_idxs[0], w1_idxs[1], 27)
self.w2 = Wheel(w2_idxs[0], w2_idxs[1], 22)
self.w3 = Wheel(w3_idxs[0], w3_idxs[1], 17)
self.w4 = Wheel(w4_idxs[0], w4_idxs[1], 4)
if steer_idx is not None:
self.distance_manager = DistanceManager(steer_idx, idx_trig=idx_trig, idx_ping=idx_ping, idx_r=steer_led_r, idx_g=steer_led_g, idx_b=steer_led_b)
def forward(self, speed):
self.w1.forward(speed)
self.w2.forward(speed)
self.w3.forward(speed)
self.w4.forward(speed)
def turn_left(self):
speed = 90
self.w1.reverse(speed)
self.w3.reverse(speed)
self.w2.forward(speed)
self.w4.forward(speed)
def turn_right(self):
speed = 90
self.w1.forward(speed)
self.w3.forward(speed)
self.w2.reverse(speed)
self.w4.reverse(speed)
def stop(self):
self.w1.stop()
self.w2.stop()
self.w3.stop()
self.w4.stop()
def run(self):
dist = self.distance_manager.calc_distance(145)
print(dist)
#return
while True:
min_dist = 0xFFFF
angles = [10, 30, 50, 70, 110, 130, 150, 90]
self.stop()
for alpha in angles:
dist = self.distance_manager.calc_distance(alpha)
print("turn %d angle, dist=%d" % (alpha, dist))
time.sleep(0.1)
if dist < min_dist:
min_dist = dist
print("min dist=%.2fcm" % (min_dist))
if min_dist > 20:
if min_dist > 100: # 15cm
self.forward(90)
elif min_dist > 50:
self.forward(60)
else:
self.forward(30)
time.sleep(0.8)
else:
# check left
dist = self.distance_manager.calc_distance(1)
self.turn_right()
time.sleep(0.5)
def main():
#w = Wheel(18, 23, 4)
#w.forward(50)
#time.sleep(10)
w1 = [12, 16]
w2 = [21, 20]
w3 = [25, 24]
w4 = [18, 23]
steer_idx = 5
steer_led_r = 13
steer_led_g = 6
idx_trig = 19
idx_ping = 26
car = Car(w1, w2, w3, w4, steer_idx, steer_led_r=steer_led_r, steer_led_g=steer_led_g, idx_trig=19, idx_ping=26)
car.run()
if __name__ == "__main__":
main()
|
#AIM:Compute EMIs for a loan using the numpy or scipy libraries.
import numpy as np
# assume annual interest of 7.5%
def calc_interest(interest ,years , loan_value ):
annual_rate = interest/100.0
monthly_rate = annual_rate/12
number_month = years * 12
monthly_pay = abs(np.pmt(monthly_rate, number_month, loan_value))
sf1 = "Paying off a loan of ${:,} over {} years at"
sf2 = "{}% interest, your monthly payment will be ₹{:,.2f}"
print(sf1.format(loan_value, years))
print(sf2.format(interest, monthly_pay))
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# author: Carl time:2020/9/14
dic = {
'python': 95,
'java': 99,
'c': 100
}
# 1.字典的长度是多少
# 2.请修改'java' 这个key对应的value值为98
# 3.删除 c 这个key ======>> 重点看看
# 4.增加一个key-value对,key值为 php, value是90
# 5.获取所有的key值,存储在列表里
# 6.获取所有的value值,存储在列表里
# 7.判断 javascript 是否在字典中
# 8.获得字典里所有value 的和
# 9.获取字典里最大的value
# 10.获取字典里最小的value
# 11.字典 dic1 = {'php': 97}, 将dic1的数据更新到dic中
print(len(dic))
dic["java"] = 98
del dic["c"]
dic["php"] = 90
print(dic)
list_key = [item for item in dic.keys()]
# list_key = list(dic.keys())
print(list_key)
list_value = [item for item in dic.values()]
# list_value = list(dic.values())
print(list_value)
print("javascript" in dic.keys())
print(sum(dic.values()))
print(max(dic.values()))
print(min(dic.values()))
dic1 = {'php': 97}
dic.update(dic1)
print(dic)
|
r"""
.. autofunction:: openpnm.models.phases.thermal_conductivity.water
.. autofunction:: openpnm.models.phases.thermal_conductivity.chung
.. autofunction:: openpnm.models.phases.thermal_conductivity.sato
"""
import scipy as sp
def water(target, temperature='pore.temperature', salinity='pore.salinity'):
r"""
Calculates thermal conductivity of pure water or seawater at atmospheric
pressure using the correlation given by Jamieson and Tudhope. Values at
temperature higher the normal boiling temperature are calculated at the
saturation pressure.
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
temperature : string
The dictionary key containing the temperature values. Temperature must
be in Kelvin for this emperical equation to work
salinity : string
The dictionary key containing the salinity values. Salinity must be
expressed in g of salt per kg of solution (ppt).
Returns
-------
value : NumPy ndarray
Array containing thermal conductivity of water/seawater in [W/m.K]
Notes
-----
T must be in K, and S in g of salt per kg of phase, or ppt (parts per
thousand)
VALIDITY: 273 < T < 453 K; 0 < S < 160 g/kg;
ACCURACY: 3 %
References
----------
D. T. Jamieson, and J. S. Tudhope, Desalination, 8, 393-401, 1970.
"""
T = target[temperature]
if salinity in target.keys():
S = target[salinity]
else:
S = 0
T68 = 1.00024*T # convert from T_90 to T_68
SP = S/1.00472 # convert from S to S_P
k_sw = 0.001*(10**(sp.log10(240+0.0002*SP) +
0.434*(2.3-(343.5+0.037*SP)/T68) *
((1-T68/(647.3+0.03*SP)))**(1/3)))
value = k_sw
return value
def chung(target, Cv='pore.heat_capacity',
acentric_factor='pore.acentric_factor',
mol_weight='pore.molecular_weight',
viscosity='pore.viscosity',
temperature='pore.temperature',
critical_temperature='pore.critical_temperature'):
r"""
Uses Chung et al. model to estimate thermal conductivity for gases with
low pressure(<10 bar) from first principles at conditions of interest
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
acentric_factor : string
Dictionary key containing the acentric factor of the component
Cv : string
Dictionary key containing the heat capacity at constant volume
(J/(mol.K))
mol_weight : string
Dictionary key containing the molecular weight of the component
(kg/mol)
viscosity : string
The dictionary key containing the viscosity values (Pa.s)
temperature : string
The dictionary key containing the temperature values (K)
critical_temperatre: string
The dictionary key containing the critical temperature values (K)
Returns
-------
value : NumPy ndarray
Array containing thermal conductivity values in [W/m.K]
"""
Cv = target[Cv]
acentric = target[acentric_factor]
MW = target[mol_weight]
R = 8.314
T = target[temperature]
mu = target[viscosity]
Tc = target[critical_temperature]
Tr = T/Tc
z = 2.0 + 10.5*Tr**2
beta = 0.7862 - 0.7109*acentric + 1.3168*acentric**2
alpha = Cv/R - 3/2
s = 1 + alpha*((0.215+0.28288*alpha-1.061*beta+0.26665*z) /
(0.6366+beta*z+1.061*alpha*beta))
value = 3.75*s*(mu)*R/(MW)
return value
def sato(target, mol_weight='pore.molecular_weight',
boiling_temperature='pore.boiling_point',
temperature='pore.temperature',
critical_temperature='pore.critical_temperature'):
r"""
Uses Sato et al. model to estimate thermal conductivity for pure liquids
from first principles at conditions of interest
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
boiling_temperature : string
Dictionary key containing the toiling temperature of the component (K)
mol_weight : string
Dictionary key containing the molecular weight of the component
(kg/mol)
temperature : string
The dictionary key containing the temperature values (K)
critical_temperature : string
The dictionary key containing the critical temperature values (K)
Returns
-------
value : NumPy ndarray
Array containing thermal conductivity values in [W/m.K]
"""
T = target[temperature]
Tc = target[critical_temperature]
MW = target[mol_weight]
Tbr = target[boiling_temperature]/Tc
Tr = T/Tc
value = (1.11/((MW*1e3)**0.5))*(3+20*(1-Tr)**(2/3))/(3+20*(1-Tbr)**(2/3))
return value
|
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
# from sumy.summarizers.lsa import LsaSummarizer as Summarizer
# from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
#from sumy.summarizers.kl import KLSummarizer as Summarizer
from sumy.summarizers.text_rank import TextRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
from utils.browse_files import get_filenames_recursively
# from utils.browse_files import get_filenames_recursively
#
#
input_path = "../resources/corpora/sample_texts/machine_reading/summarization/Corpus_RPM2/Corpus_RPM2_documents"
LANGUAGE = "french"
SENTENCES_COUNT = 7
for thematic in range(1, 21):
for cluster in range(1, 3):
file_start = input_path + "\T" + str(thematic).zfill(2) + "_C" + str(cluster)
print(file_start)
clusterContent = ""
# load content from input file(s)
filenames = get_filenames_recursively(input_path)
files_content = {}
for filename in filenames:
if filename.startswith(file_start) and filename.endswith('.txt'):
with open(filename, "r") as current_file:
clusterContent = clusterContent + current_file.read() + "\n\n\n"
# print(clusterContent)
# print("\n\n\n==================\n\n\n")
parser = PlaintextParser.from_string(clusterContent, Tokenizer(LANGUAGE))
stemmer = Stemmer(LANGUAGE)
summarizer = Summarizer(stemmer)
summarizer.stop_words = get_stop_words(LANGUAGE)
summary = ""
for sentence in summarizer(parser.document, SENTENCES_COUNT):
summary = summary + str(sentence) + "\n"
summary_output_filepath = "../machine_reading/Rouge/test-summarization/system\T" + str(thematic).zfill(2) + "C" + str(cluster) + "_summy-textr.txt"
with open(summary_output_filepath, "w") as text_file:
text_file.write(summary)
|
"""
@author: shoo Wang
@contact: wangsuoo@foxmail.com
@file: demo02.py
@time: 2020/5/6 0006
"""
import requests as rq
from bs4 import BeautifulSoup as Bs
import pandas as pd
import numpy as np
# 获取数据,就是通过访问网页,把他的html源代码拿过来
def getData(resLoc):
rp = rq.get(resLoc)
rp.encoding = 'utf-8'
return rp.text
# 最关键的部分: 数据处理,我们的目标是将文本格式的 html 网页转化为表格的形式;
def dataProcessing(html, num):
bs = Bs(html, features='lxml')
# 包含表头的列表
table = bs.table.find_all('tr', limit=num, recursive=True)
table_head = table[0]
# print(table_head)
# 看一下表头是什么:
# <tr>
# <th style="text-align: center;">排名</th>
# <th style="text-align: center;">学校名称</th>
# <th class="hidden-xs" style="text-align: center; width: 80px;">省市</th>
# <th style="text-align: center;">总分</th>
# <th class="hidden-xs" style="text-align: center; width: 265px;">指标得分<br/>
# <select class="form-control" id="select-indicator-type" name="type" style="text-align: left;">
# <option selected="selected" title="生源质量(新生高考成绩得分)" value="indicator5">生源质量(新生高考成绩得分)</option>
# <option title="培养结果(毕业生就业率)" value="indicator6">培养结果(毕业生就业率)</option>
# <option title="社会声誉(社会捐赠收入·千元)" value="indicator7">社会声誉(社会捐赠收入·千元)</option>
# <option title="科研规模(论文数量·篇)" value="indicator8">科研规模(论文数量·篇)</option>
# <option title="科研质量(论文质量·FWCI)" value="indicator9">科研质量(论文质量·FWCI)</option>
# <option title="顶尖成果(高被引论文·篇)" value="indicator10">顶尖成果(高被引论文·篇)</option>
# <option title="顶尖人才(高被引学者·人)" value="indicator11">顶尖人才(高被引学者·人)</option>
# <option title="科技服务(企业科研经费·千元)" value="indicator12">科技服务(企业科研经费·千元)</option>
# <option title="成果转化(技术转让收入·千元)" value="indicator13">成果转化(技术转让收入·千元)</option>
# <option title="学生国际化(留学生比例) " value="indicator14">学生国际化(留学生比例)</option>
# </select>
# </th>
# </tr>
# 去掉表头,只要表体
table_body = table[1:]
# print(table_body)
# 看一下每一条 tr 里面是什么?
# [ <tr class="alt">
# <td>1</td> --排名
# <td><div align="left">清华大学</div></td> --学校名称
# <td>北京</td> --省市
# <td>94.6</td> --总分
# <td class="hidden-xs need-hidden indicator5">100.0</td> --生源质量
# <td class="hidden-xs need-hidden indicator6" style="display: none;">98.30%</td> --培养结果
# <td class="hidden-xs need-hidden indicator7" style="display: none;">1589319</td> --社会声誉
# <td class="hidden-xs need-hidden indicator8" style="display: none;">48698</td> --科研规模
# <td class="hidden-xs need-hidden indicator9" style="display: none;">1.512</td> --科研质量
# <td class="hidden-xs need-hidden indicator10" style="display: none;">1810</td> --顶尖成果
# <td class="hidden-xs need-hidden indicator11" style="display: none;">126</td> --顶尖人才
# <td class="hidden-xs need-hidden indicator12" style="display: none;">1697330</td> --科技服务
# <td class="hidden-xs need-hidden indicator13" style="display: none;">302898</td> --成果转化
# <td class="hidden-xs need-hidden indicator14" style="display: none;">6.81%</td> --学生国际化
# </tr> ]
# for tr in table_body:
# 我们可以无视上面标签中的属性值,只关注内容
# 也就是说对于table_body中的每一个tr标签,我们要做的是取出来其中的td中的content,作为二维列表
universityList = []
for tr in table_body:
tds = tr.find_all('td')
# tds 的结果是:
# print(tds)
# [ <td>1</td>,
# <td><div align="left">清华大学</div></td>,
# <td>北京</td>, <td>94.6</td>,
# <td class="hidden-xs need-hidden indicator5">100.0</td>,
# <td class="hidden-xs need-hidden indicator6" style="display: none;">98.30%</td>,
# <td class="hidden-xs need-hidden indicator7" style="display: none;">1589319</td>,
# <td class="hidden-xs need-hidden indicator8" style="display: none;">48698</td>,
# <td class="hidden-xs need-hidden indicator9" style="display: none;">1.512</td>,
# <td class="hidden-xs need-hidden indicator10" style="display: none;">1810</td>,
# <td class="hidden-xs need-hidden indicator11" style="display: none;">126</td>,
# <td class="hidden-xs need-hidden indicator12" style="display: none;">1697330</td>,
# <td class="hidden-xs need-hidden indicator13" style="display: none;">302898</td>,
# <td class="hidden-xs need-hidden indicator14" style="display: none;">6.81%</td> ]
# 可以看到是一个列表,我们获取每一个 td 标签的 content
contents = [td.contents for td in tds]
# for td in tds:
# print(td.contents)
# 得到的结果如下:
# ['1']
# [<div align="left">清华大学</div>]
# ['北京']
# ['94.6']
# ['100.0']
# ['98.30%']
# ['1589319']
# ['48698']
# ['1.512']
# ['1810']
# ['126']
# ['1697330']
# ['302898']
# ['6.81%']
# 但是有一个问题就是 [<div align="left">清华大学</div>] 这里有一个 div 标签,我们要把它替换成他里面的元素值
contents[1] = contents[1][0].contents
# 大家注意我们现在还在for循环当中哦,我们要这些遍历到的contents存到外面的变量中才能保存起来
universityList.append(contents)
# 现在我们得到的列表就是类似于这种形式[[[清华], [1], ...], [[北大], [2], ...], ...]
# print(universityList)
# 但是现在还没有把表头加上,现在我们加上表头,但是表头还是那个乱七八糟的形式,所以我们要先处理一下
# 这里为什么只要四个呢? 因为第五个是下拉选框,我们后面再单独处理
ths = table_head.find_all('th', limit=4)
# 这里是表头的前四个元素 [['排名'], ['学校名称'], ['省市'], ['总分']], th_four 代表前四个th
thf = [th.contents for th in ths]
# 下面处理下拉框中的元素 option
options = [op.contents for op in table_head.find_all('option', recursive=True)]
# print(options)
# [ ['生源质量(新生高考成绩得分)'],
# ['培养结果(毕业生就业率)'],
# ['社会声誉(社会捐赠收入·千元)'],
# ['科研规模(论文数量·篇)'],
# ['科研质量(论文质量·FWCI)'],
# ['顶尖成果(高被引论文·篇)'],
# ['顶尖人才(高被引学者·人)'],
# ['科技服务(企业科研经费·千元)'],
# ['成果转化(技术转让收入·千元)'],
# ['学生国际化(留学生比例)'] ]
# 好了,现在我们合并表头
for i in options:
thf.append(i)
# print(thf)
# 下面的问题是, 我们有了表头列表 thf(二维),有了表体列表 universityList(三维), 怎么把它们合并呢?
# thf: [['排名'], ['学校名称'], ['省市'], ['总分'], ['生源质量(新生高考成绩得分)'], ['培养结果(毕业生就业率)'], ['社会声誉(社会捐赠收入·千元)'], ['科研规模(论文数量·篇)'], ['科研质量(论文质量·FWCI)'], ['顶尖成果(高被引论文·篇)'], ['顶尖人才(高被引学者·人)'], ['科技服务(企业科研经费·千元)'], ['成果转化(技术转让收入·千元)'], ['学生国际化(留学生比例)']]
# universityList: [
# [['1'], ['清华大学'], ['北京'], ['94.6'], ['100.0'], ['98.30%'], ['1589319'], ['48698'], ['1.512'], ['1810'], ['126'], ['1697330'], ['302898'], ['6.81%']],
# [['2'], ['北京大学'], ['北京'], ['76.5'], ['95.2'], ['98.07%'], ['570497'], ['47161'], ['1.409'], ['1402'], ['100'], ['554680'], ['14445'], ['6.15%']]
# ]
# 但是还有一个问题就是DataFrame是二维结构,我们这里是三维结构,显然需要降维打击!
# 我们把最里面的列表可以转化为字符串,实现降维
thf = ["".join(th) for th in thf]
# universityList = ["".join(attr) for attr in [university for university in universityList]]
# print(universityList)|
univList = []
for university in universityList:
university = ["".join(attr) for attr in university]
univList.append(university)
pd_universityList = pd.DataFrame(np.array(univList), columns=thf)
return pd_universityList
# 显示所有列
# pd.set_option('display.max_columns', None)
# 显示所有行
# pd.set_option('display.max_rows', None)
# 设置value的显示长度为100,默认为50
# pd.set_option('max_colwidth', 100)
# print(pd_universityList)
# 负责保存数据到本地磁盘
def saveData(data):
data.to_csv('university.csv', index=False)
data.to_excel('university.xlsx', index=False)
def main(num):
# 由于该网站最多有550个大学,所以输入的数字不能大于550,否则什么也不做
if num >= 550:
print("数量不能大于550")
return
else:
url = 'http://zuihaodaxue.com/zuihaodaxuepaiming2019.html'
# 获取数据
text = getData(url)
# 处理数据, num 是你要爬取前多少名大学的排名信息
universityList = dataProcessing(text, num + 1)
# 保存数据
saveData(universityList)
print("文件保存成功!")
# 测试,爬取前10名大学的信息
main(10)
|
# -*- coding: utf-8 -*-
from django.contrib import admin
from django import forms
from nested_inline.admin import NestedStackedInline, NestedModelAdmin
from .models import ProgramInterface, ProgramArgument, ProgramArgumentField, Program, ReferenceDescriptor, \
ProgramVersion
from .utils import get_customer_available_content_types
class ProgramArgumentFieldInline(NestedStackedInline):
model = ProgramArgumentField
extra = 1
fk_name = 'program_argument'
exclude = ('variable_definition',)
class ContentTypeHolderForm(forms.ModelForm):
content_type = forms.ModelChoiceField(
queryset=get_customer_available_content_types())
class ProgramArgumentInline(NestedStackedInline):
model = ProgramArgument
form = ContentTypeHolderForm
extra = 1
fk_name = 'program_interface'
inlines = [ProgramArgumentFieldInline]
exclude = ('variable_definition',)
class ProgramInline(NestedStackedInline):
model = Program
extra = 1
class ContentTypeFilter(admin.RelatedFieldListFilter):
def field_choices(self, field, request, model_admin):
return field.get_choices(
include_blank=False,
limit_choices_to={'id__in': get_customer_available_content_types()}
)
class ProgramInterfaceAdmin(NestedModelAdmin):
model = ProgramInterface
inlines = [ProgramArgumentInline, ProgramInline]
list_filter = (
('arguments__content_type', ContentTypeFilter),
)
class ProgramAdmin(admin.ModelAdmin):
model = Program
list_filter = (
'program_interface',
('program_interface__arguments__content_type', ContentTypeFilter),
)
class ProgramVersionAdmin(admin.ModelAdmin):
model = ProgramVersion
list_filter = (
'program',
'program__program_interface',
)
readonly_fields = ('program', 'entry_point')
def has_add_permission(self, request):
return False
class ReferenceDescriptorAdmin(admin.ModelAdmin):
model = ReferenceDescriptor
form = ContentTypeHolderForm
admin.site.register(ProgramInterface, ProgramInterfaceAdmin)
admin.site.register(Program, ProgramAdmin)
admin.site.register(ProgramVersion, ProgramVersionAdmin)
admin.site.register(ReferenceDescriptor, ReferenceDescriptorAdmin)
# register all app models for debug purposes
# from django.apps import apps
# for model in apps.get_app_config('business_logic').get_models():
# try:
# admin.site.register(model)
# except admin.sites.AlreadyRegistered:
# pass
|
# Generated by Django 2.0.7 on 2018-08-12 12:52
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('cowapp', '0004_delete_monthlyweatherbycity'),
]
operations = [
migrations.AlterField(
model_name='senseur',
name='date_stocker',
field=models.TimeField(default=django.utils.timezone.now),
),
]
|
from trawler.browsers.base import BrowserBase
class BrowseStackOverFlow(BrowserBase):
"""
Does the browsing tasks on stackoverflow.com
Usage:
from trawler.browsers.stackoverflow import BrowseStackoverFlow
stack = BrowseStackoverFlow(kw="invaana", max_page=1)
stack.search()
stack.data # returns the data
"""
def __init__(self, kw=None, max_page=1, method='selenium-chrome', driver=None):
super(BrowseStackOverFlow, self).__init__(kw=kw, max_page=max_page, method=method, driver=driver)
self._BASE_URL = 'https://stackoverflow.com'
self._SEARCH_QS = '/search?q='
self._SEARCH_MAIN_CSS_SELECTOR = '.result-link a,.summary h3 a'
self._SEARCH_KEYWORDS_CSS_SELECTOR = None
self._SEARCH_NEXT_QS = '&page='
self._SEARCH_NEXT_CSS_SELECTOR = '.pager.fl a[rel="next"]'
self._DEFAULT_SCRAPE_METHOD = method
self._SEARCH_URL = self._BASE_URL + self._SEARCH_QS + kw
class BrowseStackOverFlowDocumentation(BrowserBase):
def __init__(self, kw=None, max_page=1, method='selenium-chrome', driver=None):
super(BrowseStackOverFlowDocumentation, self).__init__(kw=kw, max_page=max_page, method=method, driver=driver)
self._BASE_URL = 'https://stackoverflow.com'
self._SEARCH_QS = "/documentation/%s/topics/" % kw
self._SEARCH_TERM = kw
self._SEARCH_MAIN_CSS_SELECTOR = '.doc-topic-link'
self._SEARCH_KEYWORDS_CSS_SELECTOR = None
self._SEARCH_NEXT_QS = '&page='
self._SEARCH_NEXT_CSS_SELECTOR = '.pager a[rel="next"]'
self._DEFAULT_SCRAPE_METHOD = method
self._SEARCH_URL = self._BASE_URL + self._SEARCH_QS
|
import random
import matplotlib.pyplot as plt
def normal_initialization(population_size, val_range):
population = []
for i in range(population_size):
population.append(random.randint(val_range[0], val_range[1]))
return population
def special_initialization(population_size, val_range, segments):
population = []
pop_segment = population_size//segments
range_val = val_range[1]//segments
min_val = 0
max_val=range_val
for segment in range(segments):
for num in range(pop_segment):
population.append(random.randint(min_val, max_val))
min_val+=range_val
max_val+=range_val
return population
def main():
population_size = 50
val_range = [0, 1000000]
random.seed(1000)
population1 = normal_initialization(population_size, val_range)
random.seed(1000)
population2 = special_initialization(population_size, val_range, 5)
random.seed(1000)
population3 = special_initialization(population_size, val_range, 10)
x1 = [0 for i in range(population_size)]
x2 = [1 for i in range(population_size)]
x3 = [2 for i in range(population_size)]
plt.scatter(population1, x1)
plt.scatter(population2, x2)
plt.scatter(population3, x3)
plt.ylabel("Initialization Process")
plt.xlabel("Position Value")
plt.title("Comparing Initialization Process")
plt.savefig("Comparing Initialization Process",bbox_inches='tight')
plt.show()
if __name__=="__main__":
main() |
def Fabs(a):
if a<0:
return -a
else:
return a
def solve(listB, listG):
res=0;
cntb=0
while cntb<len(listB):
cntg=0
while cntg<len(listG):
if Fabs(listB[cntb]-listG[cntg])<=1:
res+=1
listG[cntg]=10000
listB[cntb]=10000
cntg+=1
cntb+=1
return res
listB=[]
listG=[]
b=input()
listB=list(map(int, input().split()))
g=input()
listG=list(map(int, input().split()))
listB=sorted(listB)
listG=sorted(listG)
print(solve(listB, listG)) |
__author__ = "Arnaud Girardin &Alexandre Laplante-Turpin& Antoine Delbast"
import csv
class Map:
def __init__(self, path):
self.map = []
self.startingPoint =[]
self.numRow = 0
self.numCol = 0
self.generateMapFromFile(path)
def generateMapFromFile(self, path):
#Lecture de la map dans le fichier csv
with open(path, 'r') as f:
reader = csv.reader(f)
for row in reader:
self.numCol = len(row)
self.map.append(row)
self.numRow +=1
self.startingPoint.append((4,4))
self.startingPoint.append((self.numCol-4,self.numRow-4))
self.startingPoint.append((self.numCol-4,int((self.numRow/2)-4)))
self.startingPoint.append((self.numCol-4,4))
self.startingPoint.append((int((self.numCol/2)-5),self.numRow-4))
self.startingPoint.append((52,52))
self.startingPoint.append((int(self.numCol/2-4),4))
self.startingPoint.append((4,self.numRow-9))
self.startingPoint.append((15,int((self.numRow/2)+3)))
|
#!/usr/bin/python
import sys
import socket
import os
def load_file(file):
f=open(file,'r')
out = f.read()
return out
def receive(client):
output=''
while True:
msg=client.recv(2048).decode('utf-8')
if(msg==None):
break
output += msg
if(output[-4:]=='\r\n\r\n'):
break
return output
def parse(req):
return req.split(' ',2)[1].split('/',1)[1]
def response(req):
path = parse(req)
output='HTTP/1.1 '
if(path.split('.')[-1]!='html' and path.split('.')[-1]!='htm'):
output+='403 Forbidden\r\nConnection: Close\r\n\r\n'
return output
else:
if(not os.path.exists(path)):
output+='404 Not Found\r\nConnection: Close\r\n\r\n'
return output
else:
body=load_file(path)
output+='200 OK\r\nContent-Length: '+str(len(body))+'\r\nConnection: close\r\nContent-Type: text/html; charset=UTF-8\r\n'+body
return output
def connection(port):
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(('',port))
server.listen(5)
print('The server is ready to receive')
while True:
clientsocket, address = server.accept()
print(f'Connection from {address} has been established!')
request = receive(clientsocket)
resp = response(request)
clientsocket.send(bytes(resp,'utf-8'))
clientsocket.close()
connection(int(sys.argv[1])) |
class Solution(object):
def treeToDoublyList(self, root):
if not root:
return root
first = None
last = None
def convert(node):
nonlocal first, last
if not node:
return node
convert(node.left)
if last:
last.right = node
node.left = last
else:
first = node
last = node
convert(node.right)
convert(root)
first.left = last
last.right = first
return first
|
# Generated by Django 3.1.7 on 2021-04-03 07:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('college', '0004_auto_20210403_0212'),
]
operations = [
migrations.RemoveField(
model_name='department',
name='dep_hod',
),
migrations.AddField(
model_name='staff',
name='deptartment_id',
field=models.OneToOneField(default='', on_delete=django.db.models.deletion.CASCADE, to='college.department'),
preserve_default=False,
),
]
|
from django.shortcuts import render
from . import models
# Create your views here.
def articles_list(request):
article = models.Article.objects.all().order_by('date')
arg = {'art':article}
return render(request,'articles/articleslist.html',arg)
|
import numpy as np
import pandas as pd
import pytest
from tti_explorer import Case, Contacts
from tti_explorer.scenario import get_monte_carlo_factors, run_scenario, STATS_KEYS, scale_results, results_table
from tti_explorer.strategies import registry, RETURN_KEYS
def test_get_monte_carlo_factors():
monte_carlo_factor, r_monte_carlo_factor = get_monte_carlo_factors(1, 0.125, 0.125)
assert monte_carlo_factor == 1.0
assert r_monte_carlo_factor == 2.0
def test_run_scenario():
case = Case(
covid=True,
symptomatic=True,
under18=False,
day_noticed_symptoms=1,
inf_profile=[0.5, 0.5],
)
contacts = Contacts(1, np.ones((1,2), dtype=int), np.ones((1,2), dtype=int), np.ones((1,2), dtype=int))
def mock_strategy(*args, **kwargs):
return {
RETURN_KEYS.base_r: 1 if case.covid else np.nan,
RETURN_KEYS.reduced_r: 2 if case.covid else np.nan,
RETURN_KEYS.man_trace: 3,
RETURN_KEYS.app_trace: 4,
RETURN_KEYS.tests: 5,
RETURN_KEYS.quarantine: 6,
RETURN_KEYS.covid: case.covid,
RETURN_KEYS.symptomatic: case.symptomatic,
RETURN_KEYS.tested: True,
RETURN_KEYS.secondary_infections: 7,
RETURN_KEYS.cases_prevented_social_distancing: 8,
RETURN_KEYS.cases_prevented_symptom_isolating: 9,
RETURN_KEYS.cases_prevented_contact_tracing: 10,
RETURN_KEYS.fractional_r: 11,
}
scenario_output = run_scenario([(case, contacts)], mock_strategy, np.random.RandomState, {})
assert STATS_KEYS.mean in scenario_output.columns
assert STATS_KEYS.std in scenario_output.columns
assert scenario_output.loc[RETURN_KEYS.base_r][STATS_KEYS.mean] == 1
assert scenario_output.loc[RETURN_KEYS.reduced_r][STATS_KEYS.mean] == 2
def test_scale_results():
mock_results = pd.DataFrame({
STATS_KEYS.mean: [1, 1, 1],
STATS_KEYS.std: [1, 1, 1],
}, index=[RETURN_KEYS.base_r, RETURN_KEYS.secondary_infections, RETURN_KEYS.percent_primary_missed])
monte_carlo_factor=2
r_monte_carlo_factor=3
nppl=10
scaled_results = scale_results(mock_results, monte_carlo_factor, r_monte_carlo_factor, nppl)
assert scaled_results.loc[RETURN_KEYS.base_r][STATS_KEYS.mean] == \
mock_results.loc[RETURN_KEYS.base_r][STATS_KEYS.mean]
assert scaled_results.loc[RETURN_KEYS.base_r][STATS_KEYS.std] == \
mock_results.loc[RETURN_KEYS.base_r][STATS_KEYS.std] * r_monte_carlo_factor
assert scaled_results.loc[RETURN_KEYS.percent_primary_missed][STATS_KEYS.mean] == \
mock_results.loc[RETURN_KEYS.percent_primary_missed][STATS_KEYS.mean] * 100
assert scaled_results.loc[RETURN_KEYS.percent_primary_missed][STATS_KEYS.std] == \
mock_results.loc[RETURN_KEYS.percent_primary_missed][STATS_KEYS.std] * 100
assert scaled_results.loc[RETURN_KEYS.secondary_infections][STATS_KEYS.mean] == \
mock_results.loc[RETURN_KEYS.secondary_infections][STATS_KEYS.mean] * nppl
assert scaled_results.loc[RETURN_KEYS.secondary_infections][STATS_KEYS.std] == \
mock_results.loc[RETURN_KEYS.secondary_infections][STATS_KEYS.std] * nppl * monte_carlo_factor
|
import traceback
from muddery.common.utils.singleton import Singleton
from muddery.common.utils.password import hash_password, make_salt
from muddery.worldeditor.settings import SETTINGS
from muddery.server.database.worlddata_db import WorldDataDB
from muddery.worldeditor.database.worldeditor_db import WorldEditorDB
from muddery.worldeditor.dao.accounts import Accounts
from muddery.worldeditor.processer import Processor
class Server(Singleton):
"""
The game editor server.
"""
def __init__(self, *args, **kwargs):
super(Server, self).__init__(*args, **kwargs)
self.db_connected = False
self.processor = None
def init(self):
self.connect_db()
self.processor = Processor()
def connect_db(self):
"""
Create the db connection.
"""
if self.db_connected:
return
try:
# init world data
WorldDataDB.inst().connect()
WorldEditorDB.inst().connect()
except Exception as e:
traceback.print_exc()
raise
self.db_connected = True
def check_admin(self):
"""
Create an administrator account.
"""
return Accounts.inst().count() == 0
def create_admin(self, username, password):
"""
Create an administrator account.
"""
# Add a default ADMIN account
salt = make_salt()
password = hash_password(password, salt)
try:
Accounts.inst().add(username, password, salt, "ADMIN")
except Exception as e:
print(e)
async def handle_request(self, method, path, data, request, token=None):
return await self.processor.process(method, path, data, request, token)
|
# Copyright (c) 2009-2014 Stefan Marr <http://www.stefan-marr.de/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from . import value_with_optional_details
import logging
class BenchmarkConfig(object):
@classmethod
def compile(cls, bench, suite, data_store):
"""Specialization of the configurations which get executed by using the
suite definitions.
"""
name, details = value_with_optional_details(bench, {})
command = details.get('command', name)
# TODO: remove in ReBench 1.0
if 'performance_reader' in details:
logging.warning("Found deprecated 'performance_reader' key in"
" configuration, please replace by 'gauge_adapter'"
" key.")
details['gauge_adapter'] = details['performance_reader']
gauge_adapter = details.get('gauge_adapter',
suite.gauge_adapter)
extra_args = details.get('extra_args', None)
codespeed_name = details.get('codespeed_name', None)
warmup = int(details.get('warmup', 0))
return BenchmarkConfig(name, command, gauge_adapter, suite,
suite.vm, extra_args, warmup, codespeed_name,
data_store)
def __init__(self, name, command, gauge_adapter, suite, vm, extra_args,
warmup, codespeed_name, data_store):
self._name = name
self._command = command
self._extra_args = extra_args
self._codespeed_name = codespeed_name
self._warmup = warmup
self._gauge_adapter = gauge_adapter
self._suite = suite
self._vm = vm
self._runs = set() # the compiled runs, these might be shared
# with other benchmarks/suites
data_store.register_config(self)
def add_run(self, run):
self._runs.add(run)
@property
def name(self):
return self._name
@property
def command(self):
"""
We distinguish between the benchmark name, used for reporting, and the
command that is passed to the benchmark executor.
If no command was specified in the config, the name is used instead.
See the compile(.) method for details.
:return: the command to be passed to the benchmark invocation
"""
return self._command
@property
def codespeed_name(self):
return self._codespeed_name
@property
def extra_args(self):
return self._extra_args
@property
def warmup_iterations(self):
return self._warmup
@property
def gauge_adapter(self):
return self._gauge_adapter
@property
def suite(self):
return self._suite
@property
def vm(self):
return self._vm
@property
def execute_exclusively(self):
return self._vm.execute_exclusively
def __str__(self):
return "%s, vm:%s, suite:%s, args:'%s', warmup: %d" % (
self._name, self._vm.name, self._suite.name, self._extra_args or '',
self._warmup)
def as_simple_string(self):
if self._extra_args:
return "%s (%s, %s, %s, %d)" % (self._name, self._vm.name,
self._suite.name, self._extra_args,
self._warmup)
else:
return "%s (%s, %s, %d)" % (self._name, self._vm.name,
self._suite.name, self._warmup)
def as_str_list(self):
return [self._name, self._vm.name, self._suite.name,
'' if self._extra_args is None else str(self._extra_args),
str(self._warmup)]
@classmethod
def from_str_list(cls, data_store, str_list):
return data_store.get_config(str_list[0], str_list[1], str_list[2],
None if str_list[3] == '' else str_list[3],
int(str_list[4]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 28 15:03:04 2022
@author: Dartoon
"""
import numpy as np
import astropy.io.fits as pyfits
import matplotlib.pyplot as plt
import glob
import pickle
import copy
run_folder = 'stage3_all/' #!!!
filt = 'F356W'
files = glob.glob(run_folder+'fit_material/data_process_idx0_*{0}*_*FOVpsf*.pkl'.format(filt))
files.sort()
collect_info = []
for i in range(len(files)):
_file = files[i]
idx_info = _file.split('idx')[1].split('_')[0]
filt_info = _file.split('W_')[0].split('_')[-1] + 'W'
this_info = [idx_info, filt_info]
if this_info not in collect_info:
collect_info.append(this_info)
# - [ ] F115W_psf6 is QSO (idx 2). [136, 137, 139]
# - [ ] F150W_psf7 is QSO (idx 2).
# - [ ] F277W_psf2 is QSO (idx 2).
print("After remove candidates")
PSF_lib_files = glob.glob('stage3_all/'+'material/*'+filt[:-1]+'*_PSF_Library_idx{0}.pkl'.format(0))[0]
PSF_list, PSF_list_clean, PSF_RA_DEC_list, PSF_from_file_list = pickle.load(open(PSF_lib_files,'rb'))
#%%
if_printshow = False
for count in range(len(collect_info)):
item = collect_info[count]
fit_run_list = []
idx, filt= item
fit_files = glob.glob(run_folder+'fit_material/fit_run_idx{0}_{1}_*FOVpsf*.pkl'.format(idx, filt))
fit_files.sort()
for i in range(len(fit_files)):
fit_run_list.append(pickle.load(open(fit_files[i],'rb')))
chisqs = np.array([fit_run_list[i].reduced_Chisq for i in range(len(fit_run_list))])
idx_counts = chisqs.argsort()
if len(idx_counts)<8:
print(idx, filt, len(idx_counts))
# print("work on", count, 'idx', idx, filt, "Total PSF NO.", len(idx_counts))
for i in range(5):
print(PSF_RA_DEC_list[idx_counts[i]])
|
print("====Apuração de Votos - Melhor dia para Lives====")
segunda = int(input("Insira os votos da Segunda-feira: ")) # variável dia
terca = int(input("Insira os votos da Terça-feira: ")) # variável dia
quarta = int(input("Insira os votos da Quarta-feira: ")) # variável dia
quinta = int(input("Insira os votos da Quinta-feira: ")) # variável dia
sexta = int(input("Insira os votos da Sexta-feira: ")) # variável dia
maior_votos = segunda # variável auxiliar para comparar votos
maior_dia = "Segunda-Feira" # variável auxiliar para atribuir ao dia com mais votos
if (terca > maior_votos):
maior_votos = terca
maior_dia = "Terça-Feira"
if (quarta > maior_votos):
maior_votos = quarta
maior_dia = "Quarta-Feira"
if (quinta > maior_votos):
maior_votos = quinta
maior_dia = "Quinta-Feira"
if (sexta > maior_votos):
maior_votos = sexta
maior_dia = "Sexta-Feira"
else:
print("====FIM DA APURAÇÃO====")
print("O dia mais votado foi {} com {} votos".format(maior_dia, maior_votos)) # impressão do resultado
#Não tratei empates, se houver empate o primeiro ganha.
|
from abc import abstractmethod
from contextlib import suppress
from os import unlink
from pathlib import Path
from typing import Union
from google.cloud import bigquery
from google.cloud.bigquery.table import RowIterator, _EmptyRowIterator
from pandas import DataFrame, read_feather
class AbstractRepository:
@abstractmethod
def get(self, *args):
raise NotImplementedError
class DataFrameRepository(AbstractRepository):
def __init__(self, path: Path):
self.path = path
def add(self, data_frame: DataFrame, filename: str) -> Path:
DataFrame.to_feather(
data_frame, path=self._filepath(filename), compression="zstd"
)
return self._filepath(filename)
def get(self, filename: str) -> DataFrame:
return read_feather(self._filepath(filename))
def remove(self, filename: str) -> None:
with suppress(FileNotFoundError):
unlink(self._filepath(filename))
def _filepath(self, filename: str) -> Union[Path, str]:
return self.path / filename
class BigQueryRepository(AbstractRepository):
def __init__(self, client: bigquery.Client):
self.client = client
def get(self, sql_command: str) -> DataFrame:
query_job = self.client.query(sql_command)
result = query_job.result()
return result.to_dataframe(progress_bar_type="tqdm_notebook")
def get_rows(self, sql_command: str) -> Union[RowIterator, _EmptyRowIterator]:
query_job = self.client.query(sql_command)
return query_job.result()
|
import os
from operator import itemgetter, attrgetter
def month_to_second(month_str):
if (month_str == "January"):
return 1
if (month_str == "February"):
return 2
if (month_str == 'March'):
return 3
if (month_str == 'April'):
return 4
if (month_str == 'May'):
return 5
if (month_str == 'June'):
return 6
if (month_str == 'July'):
return 7
if (month_str == 'August'):
return 8
if (month_str == 'September'):
return 9
if (month_str == 'October'):
return 10
if (month_str == 'November'):
return 11
if (month_str == 'December'):
return 12
class Date():
def __init__(self,year,month,day,hour,minute,filenum):
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.filenum = filenum
def __repr__(self):
return repr(( self.year, self.month, self.day, self.hour, self.minute, self.filenum))
date_list = []
date_file = []
day = 0
year = 0
hour = 0
minute = 0
month = 0
for path,folders,files in os.walk("C:\Users\sandaon\Desktop\Python\hw_5\Test_data") :
for f in files :
filenum = f
a = open (os.path.join(path, f))
for line in open (os.path.join(path, f),"r"):
line = a.readline()
if (line.find("Date")) == 0 :
splited = line.split()
day = splited[1]
month_str = splited[2]
#Function
month = month_to_second(month_str)
year = splited[3]
hour = (splited[5])[0:2]
minute = (splited[5])[3:]
date = Date(year, month, day, hour, minute, filenum)
date_list.append(date)
a = sorted(date_list, key=attrgetter('year','month','day','hour','minute'))
for i in a:
date_file.append(i.filenum)
open ("date.txt","w").write('%s' % "\n".join(date_file))
|
def calculateTotalPrice(articlePrice: int, n=9):
cgst_sgst = articlePrice * (n/100)
totalPrice = articlePrice + cgst_sgst
return totalPrice
print(calculateTotalPrice(105))
|
#_*_coding:utf-8_*_
__author__ = 'Jorden Hai'
from sqlalchemy import create_engine,Table
from sqlalchemy.orm import sessionmaker
from conf import settings
# engine = create_engine(settings.DB_CONN)
# engine = create_engine(settings.DB_CONN,echo=True)
#创建与数据库的会话session class ,注意,这里返回给session的是个class,不是实例
SessionCls = sessionmaker(bind=engine)
session = SessionCls() |
#!/usr/bin/env python
#----------------------------------------------------------------------
# Description:
# Author: Carsten Richter <carsten.richter@esrf.fr>
# Created at: Sa 6. Mai 16:04:24 CEST 2017
# Computer: lid01gpu1.
# System: Linux 3.16.0-4-amd64 on x86_64
#----------------------------------------------------------------------
#
# App for reading out subsequent camera images from given Url
# - image feature matching
# - determine shifts / rotations
# - move center of rotation to point of interest
#
# Currently relies on the newest SILX version (0.5.0)
# Can work for python 3 and python 2
# A suitable python 3 environment is here:
#
# source /data/id01/inhouse/crichter/venv3.4/bin/activate
#----------------------------------------------------------------------
import os
os.environ.pop("http_proxy", None) # for ID01
os.environ.pop("https_proxy", None)
import sys
for p_i in range(len(sys.path)):
if "dist-packages" in sys.path[p_i]:
sys.path.append(sys.path.pop(p_i))
devicetype = "CPU" if "--cpu" in sys.argv else "GPU"
import time
import collections
import platform
PV = platform.python_version()
print("Python version %s"%PV)
import numpy as np
print("Using numpy %s"%np.__version__)
from scipy import linalg, ndimage
from PIL import Image
print("Using PIL.Image %s"%Image.VERSION)
from PyQt4 import QtGui as Q
from PyQt4 import QtCore
print("Using PyQt %s"%QtCore.QT_VERSION_STR)
_use_console = True
_use_console = _use_console and PV.startswith("3.")
import silx
print("Using silx %s"%silx.version)
from silx.gui import plot
from silx.gui.plot import PlotActions
import silx.gui.icons
from silx.image import sift
if _use_console:
from silx.gui import console
import id01lib
from id01lib import image
iconpath = os.path.dirname(os.path.join(id01lib.__file__))
iconpath = os.path.join(iconpath, "media", "camview.png")
#_default_url = "http://220.221.164.165:8000/jpg/image.jpg"
#_default_url = "http://skycam.mmto.arizona.edu/skycam/latest_image.png"
#_default_url = "http://jimstar11.com/DSICam/SkyEye.jpg"
#_default_url = "http://www.webcam.cannstatter-volksfest.de/2013/live/live.jpg"
_default_url = "http://vidid011.esrf.fr/jpg/1/image.jpg"
_default_motors = ["thx", "thy"]
_hints = dict()
_hints["calibration"] = ('Move motor %i, take picture and press to get '
'calibration in pixel per motor step.')
_hints["Exposure"] = 'Press to acquire new picture.'
_hints["Get COR"] = ('Identifies features on both images, estimates '
'the affine transform between them and returns '
'Center Of Rotation.')
_hints["POI to COR"] = ('Move selected Point Of Interest into Center Of '
'Rotation')
_hints["Get Sharpness"] = ('Compute a measure for the sharpness of the image '
'in arbitrary units. Uses the latest image.')
_hints["AutoFocus"] = ('Use ROI sharpness and optimization of a motor position '
'to focus the image.')
_hints["selectPOI"] = 'Select Point of Interest (POI)'
_hints["Navg"] = 'Number of subsequent camera images to average'
_hints["enhance"] = 'Strech contrast of the camera image'
_hints["saveit"] = 'Save the new image to the current directory'
_valid = {int:Q.QIntValidator(),
float:Q.QDoubleValidator()}
class CrosshairAction(PlotActions.CrosshairAction):
"""
Overridden silx class
"""
def _actionTriggered(self, checked=False):
super(CrosshairAction, self)._actionTriggered(checked)
if checked:
self.plot.setInteractiveMode("select")
else:
self.plot.setInteractiveMode(**self.plot._defaultMode)
class ClearPointsAction(PlotActions.PlotAction):
def __init__(self, plot, parent=None):
super(ClearPointsAction, self).__init__(
plot,
icon='image-select-erase',
text='Clear SIFT keypoints',
tooltip='Clear keypoints found by SIFT',
triggered=self.trigger,
parent=parent)
def trigger(self):
self.plot.update_keypoints(None)
class CamPlot(plot.PlotWindow):
roi = None
poi = None
def __init__(self, data=None, title=None, parent=None):
super(CamPlot, self).__init__(parent=parent, resetzoom=True,
autoScale=False,
logScale=False, grid=False,
curveStyle=False, colormap=True,
aspectRatio=True, yInverted=False,
copy=True, save=True, print_=True,
control=False,
roi=False, mask=False)
self.setXAxisAutoScale(True)
self.setYAxisAutoScale(True)
self.setKeepDataAspectRatio(True)
self.setYAxisInverted(True)
#self.setKeepDataAspectRatio(True)
if not data is None:
self.addImage(data, resetzoom=True)
self.setGraphTitle(title)
clearpoints = ClearPointsAction(self)
self.toolBar().addAction(clearpoints)
self._clearpoints = clearpoints
#r = self.getDataRange()
#self.setGraphXLimits(r[0][0], r[0][1])
#self.setGraphYLimits(r[1][0], r[1][1])
#self.resetZoom()
#self.profile = plot.Profile.ProfileToolBar(plot=self)
#self.addToolBar(self.profile)
def update_roi(self, event):
# if "button" in event and event["button"] == "right":
# self.remove("roi")
# self.roi = None
# return # Problem: No right click signal in draw mode
xlim = np.clip(event["xdata"], 0, None)
ylim = np.clip(event["ydata"], 0, None)
xlim.sort()
ylim.sort()
if xlim[0]==xlim[1] or ylim[0]==ylim[1]:
self.remove("roi")
self.roi = None
if self.getInteractiveMode()["mode"] is 'draw':
self.parent().parent().echo("Empty ROI -> removed ROI.")
return
self.roi = xlim.astype(int), ylim.astype(int)
x = [xlim[i] for i in (0,1,1,0,0)]
y = [ylim[i] for i in (0,0,1,1,0)]
self.addCurve(x, y, resetzoom=False, legend="roi", color="r")
def update_poi(self, event):
self.poi = poi = event["x"], event["y"]
m = self.addMarker(poi[0], poi[1], symbol="o",
legend="poi", color=(.3,1.,1.,1.), text="POI")
#m = self._getItem("marker", m)
#print(m.getSymbolSize())
#m.setSymbolSize(1)
#print(m.getSymbolSize())
#self.addCurve([event["x"]], [event["y"]], symbol="o", linestyle=" ",
# legend="poi", linewidth=5, color="c", resetzoom=False)
#c = self.getCurve("poi")
#c.setSymbolSize(10)
def get_roi_data(self):
imdata= self.getImage().getData()
if not self.roi is None:
xlim, ylim = self.roi
roidata = imdata[ylim[0]:ylim[1], xlim[0]:xlim[1]]
return roidata
else:
return imdata
def update_keypoints(self, xy=None):
plotcfg = dict(legend="keypoints", color=(.3,1.,.3,.8), symbol=".",
resetzoom=False, linestyle=" ")
if xy is None:
self.remove(plotcfg['legend'])
else:
self.addCurve(xy[0], xy[1], **plotcfg)
class ControlWidget(Q.QWidget):
Input = dict()
def __init__(self, parent=None, **kw):
super(ControlWidget, self).__init__(parent=parent, **kw)
self.home()
def home(self):
font = Q.QFont()
font.setPointSize(9)
self.setFont(font)
layout = Q.QHBoxLayout(self)
self.splitter = splitter = Q.QSplitter(QtCore.Qt.Horizontal)
layout.addWidget(splitter)
_reg = self.registerWidget
self.form = form = Q.QFrame(self)
form.setFrameShape(Q.QFrame.StyledPanel)
form.layout = Q.QFormLayout(form)
hbox = Q.QHBoxLayout()
url = _reg(Q.QLineEdit(_default_url), "url")
enhance = Q.QCheckBox('enhance', self)
enhance.setStatusTip(_hints['enhance'])
enhance = _reg(enhance, "enhanced")
saveit = Q.QCheckBox('save', self)
saveit.setStatusTip(_hints['saveit'])
saveit = _reg(saveit, "saveit")
Navg = Q.QLineEdit("1")
Navg.setValidator(_valid[int])
Navg.setStatusTip(_hints['Navg'])
Navg.setMaxLength(3)
Navg.setFixedWidth(25)
Navg = _reg(Navg, "Navg")
hbox.addWidget(Q.QLabel("URL"))
hbox.addWidget(url)
hbox.addSpacing(5)
hbox.addWidget(Q.QLabel("Navg"))
hbox.addWidget(Navg)
hbox.addSpacing(5)
hbox.addWidget(enhance)
hbox.addWidget(saveit)
form.layout.addRow(hbox)
hbox = Q.QHBoxLayout()
for k in ("E&xposure", "Get CO&R", "POI to COR", "Get Sharpness", "AutoFocus"):
name = k.replace("&","")
btn = _reg(Q.QPushButton(k, self), name)
btn.setStatusTip(_hints[name])
hbox.addWidget(btn)
form.layout.addRow(hbox)
# Calibration:
for i in range(1,3):
motor = _default_motors[i-1]
hbox = Q.QHBoxLayout()
MBtn = _reg(Q.QLineEdit(motor), "Mot%i"%i)
MBtn.setMinimumWidth(40)
hbox.addWidget(MBtn)
hbox.addWidget(Q.QLabel("Step"))
hbox.addWidget(_reg(Q.QLineEdit("0.05"), "Step%i"%i))
CBtn = Q.QPushButton("Calib. Mot. #%i"%i, self)
CBtn.setStatusTip(_hints["calibration"]%i)
hbox.addWidget(_reg(CBtn, "Cal%i"%i))
hbox.addWidget(_reg(Q.QLineEdit("#####"), "CalRes_%i"%i))
form.layout.addRow(Q.QLabel("Motor #%i"%i), hbox)
[self.Input["CalRes_%i"%i].setMinimumWidth(100) for i in (1,2)]
form.layout.addRow(_reg(Q.QTextEdit(""), "output"))
self.Input["output"].setReadOnly(True)
textFont = Q.QFont("Monospace", 9)
textFont.setStyleHint(Q.QFont.Monospace)
self.Input["output"].setCurrentFont(textFont)
splitter.addWidget(form)
if _use_console:
banner = "Inspect/Modify `MainWindow` App instance."
ipython = console.IPythonWidget(self)#, custom_banner=banner)
#ipython.banner += banner
mainWindow = self.parent().parent()
ipython.pushVariables({"MainWindow": mainWindow})
#ipython.font_size =
ipython.change_font_size(-2)
self.console = ipython
#ipython.clear()
splitter.addWidget(ipython)
else:
splitter.addWidget(Q.QPushButton("Dummy"))
splitter.setSizes([500,500])
self.setLayout(layout)
def registerWidget(self, QtObject, name):
self.Input[name] = QtObject
if isinstance(QtObject, Q.QLineEdit):
#QtObject.setFixedWidth(length)
pass
else:
QtObject.resize(QtObject.minimumSizeHint())
return QtObject
class Window(Q.QMainWindow):
_ignoreEvent = False
_eventSource = None
resultsCOR = dict()
calibration = dict()
def __init__(self):
super(Window, self).__init__()
self.setGeometry(200, 100, 1000, 750)
self.setWindowTitle("Cam view processing")
if iconpath is not None and os.path.isfile(iconpath):
print("setting icon %s"%iconpath)
self.setWindowIcon(Q.QIcon(iconpath))
extractAction = Q.QAction("&Quit", self)
extractAction.setShortcut("Ctrl+Q")
extractAction.setStatusTip('Leave The App')
extractAction.triggered.connect(self.close_application)
#self.setStatusBar(Q.QStatusBar())
self.statusBar()
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('&File')
fileMenu.addAction(extractAction)
#extractAction = Q.QAction(Q.QIcon('todachoppa.png'), 'Quit', self)
extractAction = Q.QAction('Quit', self)
extractAction.triggered.connect(self.close_application)
extractAction.setStatusTip('Leave The App')
self.toolBar = self.addToolBar("Extraction")
self.toolBar.addAction(extractAction)
self.home()
#self.show()
def home(self):
cw = Q.QWidget(self)
self.grid = g = Q.QGridLayout(cw)
self.setCentralWidget(cw)
data = np.random.random((512,512))
self.plotLeft = pleft = CamPlot(data, "Latest", cw)
self.plotRight = pright = CamPlot(data, "Previous", cw)
g.addWidget(pleft, 0,0)
g.addWidget(pright, 0,1)
crosshair = CrosshairAction(pleft, color="b")
crosshair.setToolTip(_hints["selectPOI"])
pleft.toolBar().addAction(crosshair)
pleft.crosshair = crosshair
pleft.setCallback( lambda event: self.handle_event(event, "l"))
pright.setCallback(lambda event: self.handle_event(event, "r"))
pleft.setInteractiveMode("draw", shape="rectangle"
, color=(1.,1.,0.,0.8))
for p in (pleft, pright):
p._defaultMode = p.getInteractiveMode()
self.control = control = ControlWidget(cw)
g.addWidget(control, 1, 0, 1, 2)
# control.adjustSize()
# Connect:
control.Input["Exposure"].clicked.connect(self.update_plots)
control.Input["Get COR"].clicked.connect(self.get_center_of_rotation)
control.Input["POI to COR"].clicked.connect(self.poi_to_cor)
control.Input["Get Sharpness"].clicked.connect(self.calc_sharpness)
control.Input["AutoFocus"].clicked.connect(self.autofocus)
control.Input["Cal1"].clicked.connect(lambda: self.calibrate(1))
control.Input["Cal2"].clicked.connect(lambda: self.calibrate(2))
#self.plotLeft.resetZoom() #doesn't work
self.show()
def handle_event(self, event, side):
if event["event"] is "drawingFinished":
#print(event["xdata"], event["ydata"])
for p in [self.plotLeft, self.plotRight]:
p.update_roi(event)
elif event["event"]=="limitsChanged" \
and not self._eventSource==event["source"] \
and not self._ignoreEvent:
self._eventSource=event["source"]
self._ignoreEvent = True
if side is "l":
self.plotRight.setLimits(*(event["xdata"]+event["ydata"]))
elif side is "r":
self.plotLeft.setLimits(*(event["xdata"]+event["ydata"]))
self._ignoreEvent = False
elif event["event"] is "mouseClicked" and side is "l":
if event["button"] is "left" and \
not self.plotLeft.getGraphCursor() is None:
for p in [self.plotLeft, self.plotRight]:
p.update_poi(event)
# if event["button"] is "right":
# for p in [self.plotLeft, self.plotRight]:
# p.update_roi(event)
def update_plots(self):
iso_time = time.strftime("%Y-%m-%dT%H:%M:%S", time.localtime())
url = self.control.Input["url"].text()#.toAscii()
url = str(url)
Navg = int(self.control.Input["Navg"].text())#.toAscii()
if Navg < 1:
self.echo("Error: need Navg > 0")
return
Ntext = "once" if Navg is 1 else "%i times"%Navg
self.echo("Exposure - %s"%iso_time)
self.echo("Fetching %s %s..."%(url, Ntext))
try:
img = image.url2array(url, Navg)
self.echo("Image shape: (%i, %i)"%img.shape)
except Exception as emsg:
self.echo("...failed: %s"%emsg)
return
do_enhance = self.control.Input["enhanced"].checkState()
if do_enhance:
img = image.stretch_contrast(img)
do_save = self.control.Input["saveit"].checkState()
if do_save:
try:
impath = "CamView_%s.png"%iso_time
im = Image.fromarray(img*255./img.max())
im = im.convert("RGB")
im.save(impath)
self.echo("Saved image to: %s"%os.path.abspath(impath))
except Exception as emsg:
self.echo("Saving failed: %s"%emsg)
imLeft = self.plotLeft.getImage()
imRight = self.plotRight.getImage()
oldData = imLeft.getData()
imRight.setData(oldData)
imLeft.setData(img)
self.plotLeft.resetZoom()
def calibrate(self, motorNum):
motorName = self.control.Input["Mot%i"%motorNum].text()#.toAscii()
motorStep = float(self.control.Input["Step%i"%motorNum].text())#.toFloat()[0]
imLeft = self.plotLeft.get_roi_data()
imRight = self.plotRight.get_roi_data()
sa = sift.LinearAlign(imLeft, devicetype=devicetype)
res = sa.align(imRight, shift_only=True, return_all=True,
double_check=False, relative=False, orsa=False)
if res is None or res["matrix"] is None or res["offset"] is None:
self.echo("Warning: No matching keypoints found.")
return
self.plot_matchpoints(res)
offset = -res["offset"][::-1]
#print(offset)
output = "Offset estimated for %s movement of %f: (%.2f, %.2f) px" \
%(motorName, motorStep, offset[0], offset[1])
self.echo(output)
dv_vs_dm = offset / motorStep
self.control.Input["CalRes_%i"%motorNum].setText("%.2f, %.2f"%tuple(dv_vs_dm))
self.calibration[motorNum] = dv_vs_dm
def get_center_of_rotation(self):
imLeft = self.plotLeft.get_roi_data().astype(float)
imRight = self.plotRight.get_roi_data().astype(float)
if not imLeft.size or not imRight.size:
self.echo("Error: ROI outside image data.")
return
roi = self.plotLeft.roi
dx, dy = np.array(roi)[:,0] if roi is not None else (0,0)
#print(imLeft.shape, imRight.shape, roi)
#sigma = float(self.control.Input["sigma"].text().toFloat()[0])
sigma = 1.6 # default value
t0 = time.time()
try:
sa = sift.LinearAlign(imLeft, devicetype=devicetype,init_sigma=sigma)
res = sa.align(imRight, shift_only=False, return_all=True,
double_check=False, relative=False, orsa=False)
except Exception as emsg:
self.echo("Error during alignment: %s"%emsg)
return
self.echo("Calculation time: %.2f ms"%((time.time() - t0)*1000))
self.resultsCOR = dict(align=res)
if res is None or res["matrix"] is None or res["offset"] is None:
self.echo("Warning: No matching keypoints found.")
return
self.plot_matchpoints(res)
numpoints = len(res["matching"])
if numpoints<18:
self.echo("Too few matching keypoints found (%i)."%numpoints)
return
self.echo("Matching keypoints found: %i"%numpoints)
matrix, offset = res["matrix"][::-1,::-1], res["offset"][::-1]
#offset[0] += dx
#offset[1] += dy
U, S, V = linalg.svd(matrix)
R = U.dot(V) # Rotation part
self.resultsCOR.update(dict(U=U, S=S, V=V, R=R))
relrot = abs(R[0,0] - 1)
if relrot < 1e-3:
self.echo("Estimation of rotation failed. Too small? (%.3g)"%relrot)
return
angle = np.degrees(np.arctan2(R[0,1], R[1,1]))
self.echo("Rotation of %.2f deg found."%angle)
cor = linalg.solve(matrix - np.eye(2), -offset).squeeze()
cor[0] += dx
cor[1] += dy
self.resultsCOR["cor"] = cor
self.echo("Center of rotation estimated at (%.2f, %.2f) px."%tuple(cor))
plotcfg = dict(symbol="o", legend="cor",
color=(1.,.3,.3,1.), text="COR")
self.plotLeft.addMarker(cor[0], cor[1], **plotcfg)
self.plotRight.addMarker(cor[0], cor[1], **plotcfg)
def calc_sharpness(self):
imLeft = self.plotLeft.get_roi_data().astype(float)
sharpness = image.contrast(imLeft)
self.echo("Computed sharpness of the left image ROI: %f"%sharpness)
def autofocus(self):
url = str(self.control.Input["url"].text())
Navg = int(self.control.Input["Navg"].text())
do_enhance = bool(self.control.Input["enhanced"].checkState())
roi = self.plotLeft.roi
if not roi is None:
roi = tuple(roi[1]) + tuple(roi[0])
if not hasattr(self, "_AutoFocus"):
self._AutoFocus = image.AutoFocus(url)
af = self._AutoFocus
af.url = url
af.roi = roi
ddefaults = collections.OrderedDict()
ddefaults["motor"] = af.motor
ddefaults["lower_limit"] = af._ll
ddefaults["upper_limit"] = af._ul
ddefaults["Navg"] = Navg
ddefaults["contrast"] = image._models
ddefaults["enhance"] = do_enhance
#print(ddefaults)
dialog = AutoFocusDialog(self, defaults=ddefaults)
dialog.exec_()
results = dict.fromkeys(ddefaults)
for field in ddefaults:
result = dialog.Input[field]
if isinstance(ddefaults[field], bool):
result = bool(result.checkState())
elif isinstance(ddefaults[field], list):
result = str(result.currentText())
else:
result = type(ddefaults[field])(result.text())
results[field] = result
af.motor = results["motor"]
af.limits = results["lower_limit"], results["upper_limit"]
af.navg = results["Navg"]
af.stretch = results["enhance"]
af.contrast = results["contrast"]
if not dialog.result():
return
self.echo("Starting autofocus...")
try:
fit = af.focus()
self.echo("Done. Status: %s"%fit.message)
self.echo("New Position: %s=%f"%(af.motor,fit.x.item()))
except Exception as emsg:
self.echo("Error: %s"%emsg)
def plot_matchpoints(self, res):
roi = self.plotLeft.roi
dx, dy = np.array(roi)[:,0] if roi is not None else (0,0)
xk1 = res["matching"].x[:,0] + dx
xk2 = res["matching"].x[:,1] + dx
yk1 = res["matching"].y[:,0] + dy
yk2 = res["matching"].y[:,1] + dy
self.plotLeft.update_keypoints((xk1, yk1))
self.plotRight.update_keypoints((xk2, yk2))
def poi_to_cor(self):
poi = self.plotLeft.poi
cor = self.resultsCOR.get("cor", None)
if poi is None:
self.echo("Use crosshair to select point of interest first.")
return
if cor is None:
self.echo("Error: No center of rotation found.")
return
diff = cor - poi
self.echo("Distance: (%.2f, %.2f) px"%tuple(diff))
calibration = []
for i in (1,2):
if not i in self.calibration:
self.echo("Motor %i not calibrated"%i)
return
calibration.append(self.calibration[i])
matrix = np.linalg.inv(np.array(calibration).T)
dm1, dm2 = matrix.dot(diff)
m1, m2 = [self.control.Input["Mot%i"%i].text() for i in (1,2)]
self.echo("Move to POI:")
self.echo(" umvr %s %s"%(m1, dm1))
self.echo(" umvr %s %s"%(m2, dm2))
def echo(self, output):
self.control.Input["output"].append(output)
def close_application(self):
choice = Q.QMessageBox.question(self, 'Quit',
"Do you really want to quit?",
Q.QMessageBox.Yes | Q.QMessageBox.No)
if choice == Q.QMessageBox.Yes:
sys.exit()
else:
pass
class AutoFocusDialog(Q.QDialog):
Input = dict()
def __init__(self, parent=None, defaults=dict()):
super(AutoFocusDialog, self).__init__(parent)
self.resize(300,200)
self.defaults = defaults
self.home()
def home(self):
font = Q.QFont()
font.setPointSize(9)
self.setFont(font)
_reg = self.registerWidget
layout = Q.QHBoxLayout(self)
self.form = form = Q.QFrame(self)
form.setFrameShape(Q.QFrame.StyledPanel)
form.layout = Q.QFormLayout(form)
#form.layout = Q.QGridLayout()
defaults = self.defaults
for i, field in enumerate(defaults):
#hbox = Q.QHBoxLayout()
val = defaults.get(field, None)
if isinstance(val, bool):
qobj = Q.QCheckBox(field, self)
qobj.setCheckState(val)
elif isinstance(val, list):
qobj = Q.QComboBox(self)
qobj.addItems(val)
else:
qobj = Q.QLineEdit(str(val))
for chktyp in _valid:
if isinstance(val, chktyp):
qobj.setValidator(_valid[chktyp])
qobj = _reg(qobj, field)
qlabel = Q.QLabel(field.capitalize())
form.layout.addRow(qlabel, qobj)
#form.layout.addRow(Q.QPushButton("Start"), Q.QPushButton("Cancel"))
buttonBox = Q.QDialogButtonBox(Q.QDialogButtonBox.Cancel | Q.QDialogButtonBox.Ok)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
form.layout.addRow(buttonBox)
layout.addWidget(form)
self.setLayout(layout)
def showEvent(self, event):
geom = self.frameGeometry()
geom.moveCenter(Q.QCursor.pos())
self.setGeometry(geom)
super(AutoFocusDialog, self).showEvent(event)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Enter:
pass
elif event.key() == QtCore.Qt.Key_Escape:
self.hide()
event.accept()
else:
super(AutoFocusDialog, self).keyPressEvent(event)
def registerWidget(self, QtObject, name):
self.Input[name] = QtObject
if isinstance(QtObject, Q.QLineEdit):
#QtObject.setFixedWidth(length)
pass
else:
QtObject.resize(QtObject.minimumSizeHint())
#QtObject.resize(200)
return QtObject
def run():
app = Q.QApplication(sys.argv)
#app.setStyle("CleanLooks")
GUI = Window()
sys.exit(app.exec_())
#app.exec_()
if __name__=="__main__":
run() |
from os import path
def read_file(filename):
file_path = path.join(path.dirname(__file__), filename)
f_obj = open(file_path, "r")
fie_content = f_obj.readlines()
f_obj.close()
return fie_content
def get_str_digits(string):
result = ''.join(char for char in string if char.isdigit())
return int(result) if result != '' else 0
def handle_schedule_line(line):
name, rest = line.split(':')
raw_vals = [get_str_digits(val)
for val in rest.split(' ') if val.strip() != '']
return name, sum(raw_vals)
lines = read_file('schedule.txt')
result = dict([handle_schedule_line(line) for line in lines])
print(result)
|
#Programmers - 비밀지도
def solution(n, arr1, arr2):
answer = []
a,b = [], []
for i in arr1:
temp = str(bin(i).replace('0b',''))
if len(temp) == n:
a.append(temp)
else:
a.append('0'*(n-len(temp))+temp)
for i in arr2:
temp = str(bin(i).replace('0b',''))
if len(temp) == n:
b.append(temp)
else:
b.append('0'*(n-len(temp))+temp)
res = [[0]*len(a) for _ in range(len(a))]
for i in range(len(a)):
for j in range(len(b)):
if int(a[i][j]) | int(b[i][j]) == 1:
res[i][j] = '#'
else:
res[i][j] = ' '
answer = []
for i in res:
answer.append(''.join(i))
return answer
|
import json
import requests
from pathlib import Path
with open('api_key.json') as f:
API_KEY = json.load(f)['API_KEY']
# Get 300 featured tracks
r = requests.get('https://freemusicarchive.org/featured.json',
data={'api_key': API_KEY})
tracks = r.json()['aTracks']
for track in tracks[:90]:
file_path = Path(track['track_file'])
if file_path.exists():
continue
print(file_path)
try:
file_path.parent.mkdir(parents=True)
except FileExistsError:
pass
r2 = requests.get(track['track_file_url'],
params={'api_key': API_KEY})
with open(str(file_path), mode='wb') as f:
f.write(r2.content)
|
from marsim import rescue_line
rescue_line.init()
robot = rescue_line.Robot()
# robot = rescue_line.Robot(x=0.4, y=0.3, angle=0)
robot.addSensor(0.01, 0.14) # add one sensor
rescue_line.start() # run simulation (optional)
while True:
s1 = robot.readSensors("gray")[0] # get information from first sensor
u = (s1 - 128) / 30 # use this information to calculate control coefficient
robot.setSpeed(6 + u, 6 - u) # set speed to motors
rescue_line.stop() # stop simulation (optional) |
from tkinter import END
from tkinter.filedialog import *
def decimalToRoman (self):
self.root.title("Task 20")
self.file = None
string_textarea = self.textArea.get(1.0, END)
self.textArea.delete(1.0, END)
number = int(string_textarea)
num = [1, 4, 5, 9, 10, 40, 50, 90,
100, 400, 500, 900, 1000]
summ = ["I", "IV", "V", "IX", "X", "XL",
"L", "XC", "C", "CD", "D", "CM", "M"]
i = 12
while number:
div = number // num[i]
number %= num[i]
while div:
self.textArea.insert(2.0, summ[i])
div -= 1
i -= 1 |
from django.contrib import admin
from models import *
from django.contrib.contenttypes import generic
class ImageInline(admin.StackedInline):
model = SentenceImage
class OrderImageInline(admin.TabularInline):
model = OrderImage
class OrderAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'user', 'born', 'city')
list_filter = ['born', 'user', 'city']
search_fields = ['title']
inlines = [OrderImageInline, ]
class SentenceAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'user', 'born', 'city')
list_filter = ['born', 'user', 'city']
search_fields = ['title']
inlines = [ImageInline, ]
class ImageCompanyInline(admin.StackedInline):
model = CompanyImage
class CompanyAdmin(admin.ModelAdmin):
list_display = ('title', 'user', 'city')
list_filter = ['user', 'city']
search_fields = ['title']
inlines = [ImageCompanyInline, ]
class SubcategoryAdmin(admin.ModelAdmin):
list_display = ('title','parent','slug')
search_fields = ['title']
list_filter = ['parent']
class SubsubcategoryAdmin(admin.ModelAdmin):
list_display = ('title','parent')
search_fields = ['title']
list_filter = ['parent']
class CategoryAdmin(admin.ModelAdmin):
list_display = ('my_order','title')
list_editable = ('my_order',)
admin.site.register(Page)
admin.site.register(Order, OrderAdmin)
admin.site.register(Sentence, SentenceAdmin)
admin.site.register(Company, CompanyAdmin)
admin.site.register(Category,CategoryAdmin)
admin.site.register(Subcategory,SubcategoryAdmin)
admin.site.register(Subsubcategory, SubsubcategoryAdmin)
admin.site.register(BlockInfo)
admin.site.register(BlockonPage) |
import os
from load_model import load_model_depth
import os
import time
import cv2
from my_utils import load_cv
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '10'
from keras.models import load_model
from layers import BilinearUpSampling2D
from utils import predict, display_images
import json
import numpy as np
depthModel = load_model_depth()
if len(os.listdir("received_images"))==1:
os.remove('received_images/.DS_Store')
while len(os.listdir("received_images"))==0:
print("It is empty")
time.sleep(1)
whatToPush = {}
while True:
with open('test_json_files/test.json') as f:
data = json.load(f)
image_paths = os.listdir("received_images")
print(image_paths)
time.sleep(1)
inputs = load_cv("received_images/testing.png")
if inputs is not None:
outputs = predict(depthModel, inputs)
print("Visualising")
viz = display_images(outputs.copy(), inputs.copy())
cv2.imshow("Depth Map",viz/255)
cv2.waitKey(20)
dodge = viz
try:
if bool(data['received_images/testing.png']) and dodge is not None:
print("Going in")
height, width = dodge.shape[:2]
jojo = 1000000
for cls in list(data['received_images/testing.png'][0].keys()):
x = int(float(data['received_images/testing.png'][0][cls][0]))
y = int(float(data['received_images/testing.png'][0][cls][1]))
w = int(float(data['received_images/testing.png'][0][cls][2]))
h = int(float(data['received_images/testing.png'][0][cls][3]))
ul = (x, y)
lr = (x+w, y+h)
dodge1 = np.mean(dodge[0:int(height/4), 0:int(width)].ravel())
dodge1 = 0.017*dodge1**2 - 3.55*dodge1 + 255
dodge2 = np.mean(dodge[int(height/4):2*int(height/4), 0:int(width)].ravel())
dodge2 = 0.017*dodge2**2 - 3.55*dodge2 + 255
dodge3 = np.mean(dodge[2*int(height/4):3*int(height/4), 0:int(width)].ravel())
dodge3 = 0.017*dodge3**2 - 3.55*dodge3 + 255
dodge4 = np.mean(dodge[3*int(height/4):4*int(height/4), 0:int(width)].ravel())
dodge4 = 0.017*dodge4**2 - 3.55*dodge4 + 255
dodgeno = dodge[ul[0]:lr[0], ul[1]:lr[1]]
d1, d2, d3, d4 = 0, 0, 0, 0
if int(x+w/2) in range(0, int(height/4)):
impart = 1
dodge1 = np.mean(dodgeno.ravel())
dodge1 = 0.017*dodge1**2 - 3.55*dodge1 + 255
d1 = 0.625*dodge1
elif int(x+w/2) in range(int(height/4), 2*int(height/4)):
impart = 2
dodge2 = np.mean(dodgeno.ravel())
dodge2 = 0.017*dodge2**2 - 3.55*dodge2 + 255
d2 = 0.625*dodge2
elif int(x+w/2) in range(2*int(height/4), 3*int(height/4)):
impart = 3
dodge3 = np.mean(dodgeno.ravel())
dodge3 = 0.017*dodge3**2 - 3.55*dodge3 + 255
d3 = 0.625*dodge3
elif int(x+w/2) in range(3*int(height/4), 4*int(height/4)):
impart = 4
dodge4 = np.mean(dodgeno.ravel())
dodge4 = 0.017*dodge4**2 - 3.55*dodge4 + 255
d4 = 0.625*dodge4
#mappo = 0.017*jojo**2 - 3.55*jojo + 255
whatToPush['testing.png'] = [dodge1, dodge2, dodge3, dodge4, d1, d2, d3, d4, cls, time.time()]
with open("test_json_files/push_indoor.json", "w") as outfile:
json.dump(whatToPush, outfile)
except Exception as e:
print(e) |
import sqlalchemy as sa
import sqlparse
import argparse
def main(sql_file_name, connection_string, schema=None):
engine = sa.create_engine(connection_string)
connection = engine.connect()
with open(sql_file_name) as f:
sql_txt = f.read()
sql_statements = sqlparse.split(sql_txt)
if schema is not None:
pre_statement = "set search_path=%s;" % schema
else:
pre_statement = ""
for sql_statement in sql_statements:
print(sql_statement)
sql_to_execute = pre_statement + sql_statement
connection.exeute(sql_to_execute)
if __name__ == "__main__":
arg_parser_obj = argparse.ArgumentParser()
arg_parser_obj.add_argument("-f", dest="file_name", default="execute_sql_files.py")
arg_parser_obj.add_argument("-c", dest="connection_string")
arg_parser_obj.add_argument("-s", dest="schema", default=None)
arg_obj = arg_parser_obj.parse_args()
main(sql_file_name=arg_obj.file_name, connection_string=arg_obj.connection_sting, schema=arg_obj.schema)
|
"""Pylint plugin for py.test"""
from __future__ import unicode_literals
from __future__ import absolute_import
from os.path import exists, join, dirname
from six.moves.configparser import ( # pylint: disable=import-error
ConfigParser,
NoSectionError,
NoOptionError
)
from pylint import lint
from pylint.config import PYLINTRC
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
import pytest
class ProgrammaticReporter(BaseReporter):
"""Reporter that replaces output with storage in list of dictionaries"""
__implements__ = IReporter
extension = 'prog'
def __init__(self, output=None):
BaseReporter.__init__(self, output)
self.current_module = None
self.data = []
def add_message(self, msg_id, location, msg):
"""Deprecated, but required"""
raise NotImplementedError
def handle_message(self, msg):
"""Get message and append to our data structure"""
self.data.append(msg)
def _display(self, layout):
"""launch layouts display"""
def pytest_addoption(parser):
"""Add all our command line options"""
group = parser.getgroup("general")
group.addoption(
"--pylint",
action="store_true", default=False,
help="run pylint on all"
)
group.addoption(
'--pylint-rcfile',
default=None,
help='Location of RC file if not pylintrc'
)
group.addoption(
'--pylint-error-types',
default='CRWEF',
help='The types of pylint errors to consider failures by letter'
', default is all of them (CRWEF).'
)
def pytest_collect_file(path, parent):
"""Handle running pylint on files discovered"""
config = parent.config
if not config.option.pylint:
return
if path.ext != ".py":
return
# Find pylintrc to check ignore list
pylintrc_file = config.option.pylint_rcfile or PYLINTRC
if pylintrc_file and not exists(pylintrc_file):
# The directory of pytest.ini got a chance
pylintrc_file = join(dirname(str(config.inifile)), pylintrc_file)
if not pylintrc_file or not exists(pylintrc_file):
# No pylintrc, therefore no ignores, so return the item.
return PyLintItem(path, parent)
pylintrc = ConfigParser()
pylintrc.read(pylintrc_file)
ignore_list = []
try:
ignore_string = pylintrc.get('MASTER', 'ignore')
if len(ignore_string) > 0:
ignore_list = ignore_string.split(',')
except (NoSectionError, NoOptionError):
pass
msg_template = None
try:
msg_template = pylintrc.get('REPORTS', 'msg-template')
except (NoSectionError, NoOptionError):
pass
rel_path = path.strpath.replace(parent.fspath.strpath, '', 1)[1:]
if not any(basename in rel_path for basename in ignore_list):
return PyLintItem(path, parent, msg_template, pylintrc_file)
class PyLintException(Exception):
"""Exception to raise if a file has a specified pylint error"""
pass
class PyLintItem(pytest.Item, pytest.File):
"""pylint test running class."""
# pylint doesn't deal well with dynamic modules and there isn't an
# astng plugin for pylint in pypi yet, so we'll have to disable
# the checks.
# pylint: disable=no-member,super-on-old-class
def __init__(self, fspath, parent, msg_format=None, pylintrc_file=None):
super(PyLintItem, self).__init__(fspath, parent)
self.add_marker("pylint")
if msg_format is None:
self._msg_format = '{C}:{line:3d},{column:2d}: {msg} ({symbol})'
else:
self._msg_format = msg_format
self.pylintrc_file = pylintrc_file
def runtest(self):
"""Setup and run pylint for the given test file."""
reporter = ProgrammaticReporter()
# Build argument list for pylint
args_list = [str(self.fspath)]
if self.pylintrc_file:
args_list.append('--rcfile={0}'.format(
self.pylintrc_file
))
lint.Run(args_list, reporter=reporter, exit=False)
reported_errors = []
for error in reporter.data:
if error.C in self.config.option.pylint_error_types:
reported_errors.append(
error.format(self._msg_format)
)
if reported_errors:
raise PyLintException('\n'.join(reported_errors))
def repr_failure(self, excinfo):
"""Handle any test failures by checkint that they were ours."""
if excinfo.errisinstance(PyLintException):
return excinfo.value.args[0]
return super(PyLintItem, self).repr_failure(excinfo)
def reportinfo(self):
"""Generate our test report"""
return self.fspath, None, "[pylint] {0}".format(self.name)
|
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import smtplib
import webbrowser as wb
import os
import pyautogui
import psutil
import pyjokes
from covid import Covid
from quotes import Quotes
import pywhatkit as kit
en=pyttsx3.init()
#en.say("hello this is Jarvis")
voices = en.getProperty('voices')
en.setProperty('voice' ,voices[1].id)
def speak(audio):
en.say(audio)
en.runAndWait()
speak("hey boss this is friday and I am ai assistant")
def time():
Time=datetime.datetime.now().strftime("%I:%M:%S")
speak("time in 24 hrs format and is")
speak(Time)
#time()
def quotes():
quotes = Quotes()
persons = quotes.random()
l=str(persons[1])
print(l,"\n")
speak(l)
def date():
year= int(datetime.datetime.now().year)
month= int(datetime.datetime.now().month)
date= int(datetime.datetime.now().day)
speak("date is")
speak(date)
speak("month is")
speak(month)
speak("year is")
speak(year)
#date()
def wishme():
speak("welcome back boss!")
#speak("current time in 24 hrs format and it is")
##time()
#speak("current date is")
##date()
hour=datetime.datetime.now().hour
if hour>=6 and hour<12:
speak("good morning")
elif hour>=12 and hour<15:
speak("good afternoon")
elif hour>=15 and hour<20:
speak("good evening")
elif hour>=20 and hour<24:
speak("good night")
else:
speak("good night")
speak("friday at your service tell meh how can i help you")
#wishme()
def takecommand():
r=sr.Recognizer()
with sr.Microphone() as source:
print("listening....")
r.pause_threshold=1
audio=r.listen(source)
try:
print("recognizing..")
query=r.recognize_google(audio,language='en-in')
print(query)
except Exception as e:
print(e)
speak("say that again boss....")
return "none"
return query
def sendemail(to,content):
server=smtplib.SMTP("smtp.gmail.com",587)
server.eclo()
server.starttls()
server.login('sadiqshaik1211@gmail.com','Sadiqshaik@1234')
server.sendmail('sadiqshaik1211@gmail.com',to,content)
server.close()
def screenshot():
img = pyautogui.screenshot()
img.save("C:\\Users\\sadiq shaik\\Desktop\\car\\ss.png")
def cpu():
usage = str(psutil.cpu_percent())
speak("cpu is at"+usage)
battery = psutil.sensors_battery()
speak("the battery is at")
speak(battery.percent )
def jokes():
speak(pyjokes.get_joke())
def covid():
speak("which country boss ")
case= takecommand().lower()
#covid=Covid(source="worldometers")
covid=Covid()
cases=covid.get_status_by_country_name(str(case))
for x in cases:
print(x,":",cases[x])
def whatsapp():
speak("what you want to send boss")
msg=takecommand().lower()
speak("tell the hour time")
hour=takecommand()
speak("tellthe minutes")
min=takecommand()
kit.sendwhatmsg("+919629595614",str(msg),int(hour),int(min))
def name():
speak("my name is friday")
def boss():
speak("sadiq shaik")
if __name__=="__main__":
wishme()
while True:
query=takecommand().lower()
if 'time'in query:
time()
elif 'boss' in query:
boss()
elif 'date' in query:
date()
elif 'name' in query:
name()
elif 'quotes' in query:
quotes()
elif 'wikipedia' in query:
speak("boss I am searching.....")
query = query.replace("wikipedia","")
result=wikipedia.summary(query,sentences=2)
print("friday:",result)
speak(result)
elif 'send mail' in query:
try:
speak("what should i write ")
content =takecommand()
to='sadiqshaik1211@gmail.com'
sendemail(to,content)
speak("email is sent successfully" )
except Exception as e:
print(e)
speak("I cant able to send due to bad network connection")
elif 'search' in query:
speak("what you want to search")
chromepath="C:/Program Files (x86)/Google/Chrome/Application/chrome.exe %s"
search= takecommand().lower()
wb.get(chromepath).open_new_tab(search+'.com')
elif 'log out' in query:
os.system("shutdown -l")
elif 'restart' in query:
os.system("shutdown /r /t 1")
elif 'shutdown' in query:
os.system("shutdown /s /t 1")
elif 'note this one' in query:
speak("what to remember boss")
data = takecommand()
speak("you said me to remember that"+data)
remember=open('data.txt','w')
remember.write(data)
remember.close()
elif 'do you remember anything' in query:
remember=open('data.txt','r')
speak("you said to remember that "+remember.read())
elif 'play songs'in query:
song_dir='C:\\Users\sadiq shaik\Desktop\Musicfiles'
songs=os.listdir(song_dir)
os.startfile(os.path.join(song_dir,songs[1]))
elif 'screenshot' in query:
screenshot()
speak("yeah screenshot is done and yeah it is great content")
elif 'battery' in query:
cpu()
speak("percent")
elif 'covid news' in query:
covid()
elif 'joke' in query:
jokes()
speak("it is really good right hahahaha")
elif " send whatsapp message":
whatsapp()
speak("your msg is sending")
elif "offline" in query:
speak("yep done!")
quit()
|
import asyncio
import logging
import unittest
from aioradius import RadiusService, RadiusAuthProtocol, RadiusAccountingProtocol, \
RadiusResponseError, \
packet
__author__ = 'aruisnov'
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
class FakeTransport(object):
def __init__(self, host, port, loop):
self.local_addr = (host, port)
self.remote_addr = (None, None)
self.response_data = None
self.__wait_response_lock = asyncio.Event(loop=loop)
@asyncio.coroutine
def wait_response(self):
return self.__wait_response_lock.wait()
def get_extra_info(self, name):
return self.local_addr
def sendto(self, data, remote_addr):
if self.__wait_response_lock is not None:
self.__wait_response_lock.set()
self.remote_addr = remote_addr
self.response_data = data
REJECT_ALL_MESSAGE = 'REJECT_ALL'
class AlwaysReject(RadiusService):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.loop = asyncio.get_event_loop()
self.handle_exception = None
def validate_nas(self, remote_addr):
remote_host, remote_port = remote_addr
if remote_host == 'localhost':
return 'secret'
else:
raise RadiusResponseError("Receive data from unknown NAS '{}'".format(remote_host))
def on_auth_packet(self, request_attributes):
return (
('Reply-Message', REJECT_ALL_MESSAGE)
), packet.ACCESS_REJECT
def on_acct_packet(self, request_attributes):
raise RadiusResponseError('Accounting is not implemented')
def register_exception(self, exc):
self.logger.error(exc)
self.handle_exception = exc
class SyncProtoTestCase(unittest.TestCase):
def test_initialize_proto(self):
# Test bad initialization
with self.assertRaises(RuntimeError):
proto = RadiusAuthProtocol(None)
service = AlwaysReject()
# Test init with RadiusService object only (object has loop and logger)
proto = RadiusAuthProtocol(service)
self.assertEqual(proto.loop, service.loop)
self.assertEqual(proto.logger, service.logger)
# Test init with RadiusService object, external event_loop and external logger
other_loop = asyncio.new_event_loop()
other_logger = logging.getLogger('other_logger')
proto = RadiusAuthProtocol(service, loop=other_loop, logger=other_logger)
self.assertEqual(proto.loop, other_loop)
self.assertEqual(proto.logger, other_logger)
self.assertNotEqual(proto.loop, service.loop)
self.assertNotEqual(proto.logger, service.logger)
# Test init with RadiusService object only (object not has loop and logger)
delattr(service, 'loop')
delattr(service, 'logger')
proto = RadiusAuthProtocol(service)
self.assertTrue(isinstance(proto.loop, asyncio.AbstractEventLoop))
self.assertTrue(isinstance(proto.logger, logging.Logger))
def test_auth_request(self):
service = AlwaysReject()
proto = RadiusAuthProtocol(service)
transport = FakeTransport('localhost', 1812, service.loop)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('User-Password', 'password'),
('NAS-Identifier', 'nas_id')
)
encoded_ = bytes(request_packet)
proto.datagram_received(encoded_, ('localhost', 65000))
service.loop.run_until_complete(transport.wait_response())
received_data = transport.response_data
received_packet = packet.decode_response('secret', received_data, request=request_packet)
self.assertEqual(request_packet.identifier, received_packet.identifier)
self.assertEqual(received_packet.attributes.get('Reply-Message'), REJECT_ALL_MESSAGE)
proto.connection_lost(None)
proto.wait_for_close()
self.assertTrue(proto.is_closed())
def test_acc_request(self):
service = AlwaysReject()
proto = RadiusAccountingProtocol(service)
transport = FakeTransport('localhost', 1813, service.loop)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('NAS-Identifier', 'nas-id'),
('Acct-Session-Id', 'session-id'),
('Acct-Status-Type', 'Start')
)
encoded_ = bytes(request_packet)
proto.datagram_received(encoded_, ('localhost', 65000))
with self.assertRaises(asyncio.TimeoutError):
service.loop.run_until_complete(
asyncio.wait_for(transport.wait_response(), 1.5)
)
self.assertIsNone(transport.response_data)
proto.connection_lost(None)
proto.wait_for_close()
self.assertTrue(proto.is_closed())
def test_bad_request(self):
service = AlwaysReject()
proto = RadiusAuthProtocol(service)
transport = FakeTransport('localhost', 1812, service.loop)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('User-Password', 'password'),
)
with self.assertRaises(packet.PacketError):
encoded_ = bytes(request_packet)
#proto.datagram_received(encoded_, ('localhost', 65000))
with self.assertRaises(asyncio.TimeoutError):
service.loop.run_until_complete(
asyncio.wait_for(transport.wait_response(), 1.5)
)
self.assertIsNone(transport.response_data)
self.assertIsNone(service.handle_exception)
proto.connection_lost(None)
proto.wait_for_close()
self.assertTrue(proto.is_closed())
def test_bad_bytes(self):
service = AlwaysReject()
proto = RadiusAccountingProtocol(service)
transport = FakeTransport('localhost', 1813, service.loop)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
encoded_ = bytes(64)
proto.datagram_received(encoded_, ('localhost', 65000))
with self.assertRaises(asyncio.TimeoutError):
service.loop.run_until_complete(
asyncio.wait_for(transport.wait_response(), 1.5)
)
self.assertIsNone(transport.response_data)
self.assertIsInstance(service.handle_exception, ValueError)
proto.connection_lost(None)
proto.wait_for_close()
self.assertTrue(proto.is_closed())
def test_bad_nas(self):
service = AlwaysReject()
proto = RadiusAccountingProtocol(service)
transport = FakeTransport('localhost', 1812, service.loop)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('NAS-Identifier', 'nas-id'),
('Acct-Session-Id', 'session-id'),
('Acct-Status-Type', 'Start')
)
encoded_ = bytes(request_packet)
proto.datagram_received(encoded_, ('127.0.0.2', 65000))
with self.assertRaises(asyncio.TimeoutError):
service.loop.run_until_complete(
asyncio.wait_for(transport.wait_response(), 1.5)
)
self.assertIsNone(transport.response_data)
proto.connection_lost(None)
proto.wait_for_close()
self.assertTrue(proto.is_closed())
class FakeTransportWithProto(FakeTransport):
def __init__(self, host, port, loop, proto):
super().__init__(host, port, loop)
self.proto = proto
def sendto(self, data, remote_addr):
super().sendto(data, remote_addr)
self.proto.connection_lost(None)
class AsyncRadiusService(RadiusService):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.loop = asyncio.get_event_loop()
self.handle_exception = None
@asyncio.coroutine
def validate_nas(self, remote_addr):
yield from asyncio.sleep(1)
return 'secret'
@asyncio.coroutine
def on_auth_packet(self, request_attributes):
yield from asyncio.sleep(1)
return (
('User-Name', 'user'),
), packet.ACCESS_ACCEPT
@asyncio.coroutine
def on_acct_packet(self, request_attributes):
yield from asyncio.sleep(1)
return (), None
def register_exception(self, exc):
raise exc
class AsyncProtoTestCase(unittest.TestCase):
def test_async_auth_request(self):
service = AsyncRadiusService()
proto = RadiusAuthProtocol(service)
transport = FakeTransportWithProto('localhost', 1812, service.loop, proto)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('User-Password', 'password'),
('NAS-Identifier', 'nas_id')
)
encoded_ = bytes(request_packet)
proto.datagram_received(encoded_, ('localhost', 65000))
proto.wait_for_close()
self.assertTrue(proto.is_closed())
received_data = transport.response_data
received_packet = packet.decode_response('secret', received_data, request=request_packet)
self.assertEqual(request_packet.identifier, received_packet.identifier)
self.assertEqual(received_packet.attributes.get('User-Name'), 'user')
def test_async_acc_request(self):
service = AsyncRadiusService()
proto = RadiusAccountingProtocol(service)
transport = FakeTransportWithProto('localhost', 1812, service.loop, proto)
proto.connection_made(transport)
self.assertIs(proto.transport, transport)
request_class = proto.request_class
request_packet = request_class('secret')
request_packet.attributes.extend(
('User-Name', 'user'),
('NAS-Identifier', 'nas_id'),
('Acct-Session-Id', 'session-id'),
('Acct-Status-Type', 'Start')
)
encoded_ = bytes(request_packet)
proto.datagram_received(encoded_, ('localhost', 65000))
proto.wait_for_close()
self.assertTrue(proto.is_closed())
received_data = transport.response_data
received_packet = packet.decode_response('secret', received_data, request=request_packet)
self.assertEqual(request_packet.identifier, received_packet.identifier)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 24 19:22:30 2016
@author: colinh
"""
from sys import argv
import json
from stream.twitch_stream import *
from stream.config.universal_config import *
import socket, threading
import logging
import sys
logging.basicConfig()
class StreamServer:
def __init__(self, config):
#self.config must be set before calling create_socket!
self.config = config
self.init_socket()
self.streams = {}
self.threads = {}
self.ports = {}
self.nextPort = self.config['init_port']
def init_socket(self):
# if self.config['mode'] == 'python':
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.bind((self.config['host'], self.config['port']))
# sock.listen(self.config['listeners'])
# self.socket = sock
## if self.config['mode'] == 'sqs':
## session = boto3.Session(
## aws_access_key_id='AKIAJJYQ67ESV5S4YVHQ',
## aws_secret_access_key='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',
## region_name='us-west-2',
## )
## self.client = session.client('sqs')
if self.config['mode'] == 'multicast':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((self.config['host'], self.config['listen_port']))
sock.listen(self.config['listeners'])
self.listen_socket = sock
multisock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
multisock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self.config['ttl'])
self.multi_socket = multisock
#stream control
def create_stream(self, stream):
self.threads[stream] = threading.Thread(target=self.add_stream, args=(stream,))
self.threads[stream].start()
def add_stream(self, stream):
self.streams[stream] = TwitchStream(twitch_config,stream)
self.ports[stream] = self.nextPort
self.nextPort += 1
self.streams[stream].run()
def check_for_stream(self, data):
if data[:6] == 'stream':
return True
def get_stream(self, data):
return data[7:]
def listen_to_client(self, client_sock, client_address):
config = self.config
connected = True
while connected:
data = client_sock.recv(config['socket_buffer_size']).rstrip()
if len(data) == 0:
pp(('Connection lost by: ' + str(client_address)))
connected = False
if config['debug']:
pp(data)
if self.check_for_stream(data):
stream_id = self.get_stream(data)
if stream_id in self.streams.keys():
if config['debug']:
pp('Found stream!')
output = json.dumps(self.ports[stream_id])
if config['debug']:
pp('Sending: '+ output)
client_sock.sendall(output)
else:
if config['debug']:
pp('Stream not found.')
self.create_stream(stream_id)
stream_exists = False
while not stream_exists:
stream_exists = stream_id in self.streams.keys()
if config['debug']:
pp('Stream created!')
output = json.dumps(self.ports[stream_id])
if config['debug']:
pp('Sending: '+ output)
client_sock.sendall(output)
def listen(self):
sock = self.listen_socket
config = self.config
pp('Now listening...')
self.listening = True
while self.listening:
(client_sock, client_address) = sock.accept()
pp(('Connection initiated by: ' + str(client_address)))
client_sock.settimeout(60)
threading.Thread(target = self.listen_to_client,args = (client_sock,client_address)).start()
def multicast(self):
multisock = self.multi_socket
config = self.config
pp('Now multicasting...')
self.multicast = True
while self.multicast:
if len(self.streams.keys()) > 0:
for stream_key in self.streams.keys():
stream_dict = json.dumps(self.streams[stream_key].get_trending())
if self.config['debug']:
pp(stream_dict)
multisock.sendto(stream_dict, (self.config['multicast_server'],self.ports[stream_key]))
else:
pass
time.sleep(0.5)
if __name__ == '__main__':
server = StreamServer(server_config)
listen_thread = threading.Thread(target = server.listen).start()
multicast_thread = threading.Thread(target = server.multicast).start() |
class SharedData:
spam = 42 # 数据属性,在顶层,为所有实例共享
class MixedNames:
data = 'spam' # 类对象的属性,在实例继承变量名的类中
def __init__(self, value):
# self返回的是调用主题,也就是实例对象,一个类有多个实例对象
self.data = value # 在实例对象中
def display(self):
print(self.data, MixedNames.data) # 这两个display是不一样的
if __name__ == "__main__":
x = SharedData()
y = SharedData()
print(x.spam, y.spam)
SharedData.spam = 98 # 对类进行修改,影响所有实例的值
print(x.spam, y.spam)
x.spam = 54 # 对实例进行修改,不影响其他实例的值
print(x.spam, y.spam)
z = MixedNames(1)
zz = MixedNames(2)
z.display() # 自动把实例方法对应到类函数
zz.display() # 调用方法1:通过实例调用
MixedNames.display(zz) # 调用方法2:通过类调用
|
import torch
import argparse
import os
import torch
import torch.optim
import numpy as np
import argparse
from torch.utils import data
from ssdn.network import NoiseNetwork
from ssdn.Discriminator import DiscriminatorLinear
from lossfunction_dual import *
from datasets.DenoisingDatasets import BenchmarkTrain, SIDD_VAL
import torch.nn.functional as F
import torch.nn as nn
import time
from skimage.metrics import peak_signal_noise_ratio
np.random.seed(1234)
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
import cv2
from skimage.metrics import peak_signal_noise_ratio
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
import matplotlib.pyplot as plt
def test(config):
# Train in CPU
if config.gpu_id == -1:
device = torch.device('cpu')
# Train in GPU
else:
device = torch.device('cuda')
os.environ['CUDA_VISIBLE_DEVICES'] = str(config.gpu_id)
batch_size = config.batch_size
num_workers = config.num_workers
# val_dataset = SIDD_VAL('../validation/RAW/')
val_dataset = SIDD_VAL('/vinai/tampm2/SIDD')
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers, pin_memory=True)
net = NoiseNetwork(out_channels=8).to(device)
model_dis = DiscriminatorLinear(in_chn=4).to(device)
if config.pretrain_model:
print('Loading pretrained model.')
checkpoint = torch.load(config.pretrain_model)
net.load_state_dict(checkpoint['model_state_dict'])
model_dis.load_state_dict(checkpoint['model_dis_state_dict'])
print('Loaded pretrained model sucessfully.')
with torch.no_grad():
psnr_ori = []
psnr = []
psnr2 = []
psnr_mean = []
for ii, data in enumerate(val_loader):
im_noisy = data[0].to(device)
im_gt = data[1].to(device)
out = net(im_noisy)
restored = torch.clamp(out[:, :4, :, :], 0, 1)
noise = torch.clamp(out[:, 4:, :, :], -1, 1)
restored_2 = torch.clamp(im_noisy - noise, 0, 1)
restored = np.transpose(restored.cpu().numpy(), [0, 2, 3, 1])
restored_2 = np.transpose(restored_2.cpu().numpy(), [0, 2, 3, 1])
im_gt_np = np.transpose(im_gt.cpu().numpy(), [0, 2, 3, 1])
im_noisy_np = np.transpose(im_noisy.cpu().numpy(), [0, 2, 3, 1])
restored_mean = (restored + restored_2)/2
psnr_ori.extend(
[peak_signal_noise_ratio(im_noisy_np[i], im_gt_np[i], data_range=1) for i in range(batch_size)])
psnr.extend(
[peak_signal_noise_ratio(restored[i], im_gt_np[i], data_range=1) for i in range(batch_size)])
psnr2.extend([peak_signal_noise_ratio(restored_2[i], im_gt_np[i], data_range=1) for i in range(batch_size)])
psnr_mean.extend([peak_signal_noise_ratio(restored_mean[i], im_gt_np[i], data_range=1) for i in range(batch_size)])
print('psnr_ori={:.4e} ,psnr={:.4e} , psnr2={:.4e} ,psnr_mean={:.4e} '.format(np.mean(psnr_ori),np.mean(psnr), np.mean(psnr2),np.mean(psnr_mean)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--gpu_id", type=int, default=2)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--pretrain_model', type=str, default='model_dual_34_gan/model_32.pth')
# parser.add_argument('--pretrain_model', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=16)
config = parser.parse_args()
test(config)
|
import FWCore.ParameterSet.Config as cms
l1tGTTFileReader = cms.EDProducer('GTTFileReader',
files = cms.vstring("gttOutput_0.txt"), #, "gttOutput_1.txt"),
format = cms.untracked.string("APx")
)
|
#C:\Users\mzy\Desktop\机器学习\data\train
import tensorflow as tf
import random
import os
def image_deals1(train_file): # 读取原始文件
image_string = tf.io.read_file(train_file) # 读取原始文件
image_decoded = tf.image.decode_png(image_string) # 解码JPEG图片
image_decoded=randoc(image_decoded)
image_decoded= tf.image.resize(image_decoded, [299, 299]) #把图片转换为224*224的大小
#image = tf.image.rgb_to_grayscale(image_decoded)
image = tf.cast(image_decoded, dtype=tf.float32) / 255.0-0.5
return image
def image_deals(train_file): # 读取原始文件
image_string = tf.io.read_file(train_file) # 读取原始文件
image_decoded = tf.image.decode_png(image_string) # 解码JPEG图片
image_decoded=randoc(image_decoded)
image_decoded= tf.image.resize(image_decoded, [299, 299]) #把图片转换为224*224的大小
#image = tf.image.rgb_to_grayscale(image_decoded)
image = tf.cast(image_decoded, dtype=tf.float32) / 255.0-0.5
return image
def randoc(train_file):
int1=random.randint(1,10)
if int1==1:
train_file = tf.image.random_flip_left_right(train_file) #左右翻折
elif int1==2:
train_file=tf.image.random_flip_up_down(train_file)
return train_file
def train_test_get(train_test_inf):
for root,dir,files in os.walk(train_test_inf, topdown=False):
#print(root)
#print(files)
list=[root+"/"+i for i in files]
#print(list)
filename=[]
for i in files:
label=i[0:3]
if label=="cat":
#x1 = tf.constant([0, 1], shape=(1, 2))
x1=[0,1]
filename.append(x1)
else:
#x2 = tf.constant([1, 0], shape=(1, 2))
x2=[0,1]
filename.append(x2)
json={
"list":list,
"filename":filename
}
print(len(list))
print(len(filename))
return json
def dogandcat():
json_train=train_test_get("C:/Users/mzy/Desktop/机器学习/data/train1")
list_file=json_train["list"]
list_filename=json_train["filename"]
print(list_file)
image_list=[image_deals(i) for i in list_file]
#image_list=tf.expand_dims(image_list,axis=1)
# print(image_list.shape)
dataest=tf.data.Dataset.from_tensor_slices((image_list, list_filename))
dataest=dataest.shuffle(buffer_size=300).repeat(count=10).prefetch(tf.data.experimental.AUTOTUNE).batch(10)
print(dataest)
return dataest
#dogandcat()
def dogandcat1():
json_train=train_test_get("C:/Users/mzy/Desktop/机器学习/data/test1")
list_file=json_train["list"]
list_filename=json_train["filename"]
print(list_file)
image_list=[image_deals(i) for i in list_file]
#image_list=tf.expand_dims(image_list,axis=1)
# print(image_list.shape)
dataest=tf.data.Dataset.from_tensor_slices((image_list, list_filename))
dataest=dataest.shuffle(buffer_size=300).repeat(count=10).prefetch(tf.data.experimental.AUTOTUNE).batch(10)
#print(dataest)
return dataest |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-01 21:37
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_user_pokes'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='pokes',
),
]
|
import numpy as np
import pandas as pd
data = pd.read_csv('austin_crime.csv', names = ["address","census_tract","clearance_date","clearance_status","council_district_code","description","district","latitude","location","location_description","longitude","primary_type","timestamp","unique_key","x_coordinate","y_coordinate","year","zipcode"])
data = data.drop('address',axis=1)
data = data.drop('census_tract',axis=1)
data = data.drop("location",axis=1)
data = data.drop("location_description",axis=1)
data = data.drop("primary_type",axis=1)
data = data.drop("unique_key",axis=1)
data = data.drop("x_coordinate",axis=1)
data = data.drop("y_coordinate",axis=1)
data = data.drop("year",axis=1)
data = data.drop("zipcode",axis=1)
data.to_csv('austin_crimes01.csv', index=False, sep=';', encoding='utf-8')
|
"""Core appconfig"""
from django.apps import AppConfig
class CoreConfig(AppConfig):
name = 'core'
|
estados = {"roraima","acre","amapa","amazonas","para","rondonia","tocantins"}
a = raw_input()
if a in estados:
print("Regiao Norte")
else:
print("Outra regiao")
|
import random
import pygame
from pygame.locals import *
from sys import exit
print("Welcome to your new house!")
print("To open the door press the 0 key")
print("To have the sun, press the 1 key")
print("To close the door and make it night time, press the 2 key")
print("To open the windows, press the 3 key")
print("To grow a tree, press the 4 key")
print("To make it rain, press the 5 key")
def drawHouse(background):
pygame.draw.rect(background, (232, 193, 116), (250,250,500,525))
pygame.draw.polygon(background, (235, 145, 110), ((250,250), (450,0), (640, 250)))
pygame.draw.circle(background, (174, 241, 245), (450,150), 55)
pygame.draw.rect(background, (138, 91, 37),(100, 380, 30, 170))
pygame.draw.line(background, (0, 0, 0),(450, 100),(450,205),(3))
pygame.draw.line(background, (0,0,0),(400,150),(500,150),(3))
pygame.draw.polygon(background, (80, 199, 78), ((70,380), (150,380), (140, 150)))
pygame.draw.rect(background, (224, 160, 49),(400, 380, 60, 170))
pygame.draw.circle(background, (135, 99, 27), (413,417), 5)
pygame.draw.circle(background, (255, 255, 255), (140, 100), 25)
pygame.draw.circle(background, (255, 255, 255), (160, 120), 25)
pygame.draw.circle(background, (255, 255, 255), (180, 100), 25)
pygame.draw.circle(background, (255, 255, 255), (190, 120), 25)
pygame.draw.circle(background, (255, 255, 255), (210, 100), 25)
pygame.draw.rect(background, (255, 255, 255),(280, 310, 70, 70))
pygame.draw.rect(background, (235, 145, 110),(280, 310, 70, 58))
pygame.draw.rect(background, (255, 255, 255),(520, 310, 70, 70))
pygame.draw.rect(background, (235, 145, 110),(520, 310, 70, 58))
def rain(background):
for i in range(50):
x = random.randint(1,639)
y = random.randint(1,479)
pygame.draw.line(background, (132, 161, 224),(x, y),(x+30,y+30),(3))
def openDoor(background):
pygame.draw.rect(background, (255, 255, 255),(400, 380, 60, 170))
pygame.draw.rect(background, (224, 160, 49),(460, 380, 60, 170))
pygame.draw.circle(background, (135, 99, 27), (505,420), 5)
def make_sun(background):
background.fill(SKY_COLOR)
pygame.draw.circle(background, (240, 207, 24), (50,30), 35)
def moon(background):
background.fill(NIGHT_COLOR)
pygame.draw.circle(background, (255, 255, 255), (50,30), 35)
def window(background):
pygame.draw.rect(background, (255, 255, 255),(280, 310, 70, 70))
pygame.draw.rect(background, (235, 145, 110),(280, 310, 70, 20))
pygame.draw.rect(background, (255, 255, 255),(520, 310, 70, 70))
pygame.draw.rect(background, (235, 145, 110),(520, 310, 70, 20))
def growtree(background):
pygame.draw.polygon(background, (110, 199, 78), ((140,380), (220,380), (210, 150)))
pygame.draw.rect(background, (138, 91, 37),(170, 380, 30, 170))
#add functions here
#color VARS
SKY_COLOR = (170, 226, 240)
NIGHT_COLOR = (45, 87, 179)
pygame.init()
background=pygame.display.set_mode((640, 480))
background.fill(SKY_COLOR)
drawHouse(background)
pygame.display.update()
while True:
f_openDoor = False
f_make_sun = False
f_moon = False
f_window = False
f_growtree = False
f_rain = False
# add flags here
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit
elif event.type == pygame.KEYDOWN:
# User presses ESCAPE-Key
if event.key == pygame.K_ESCAPE:
mainloop = False
if event.key == pygame.K_0:
f_openDoor = True
if event.key == pygame.K_1:
f_make_sun = True
if event.key == pygame.K_2:
f_moon = True
if event.key == pygame.K_3:
f_window = True
if event.key == pygame.K_4:
f_growtree = True
if event.key == pygame.K_5:
f_rain = True
# add if statements
if f_openDoor:
openDoor(background)
pygame.display.update()
if f_make_sun:
make_sun(background)
drawHouse(background)
pygame.display.update()
if f_moon:
moon(background)
drawHouse(background)
pygame.display.update()
if f_window:
window(background)
pygame.display.update()
if f_growtree:
growtree(background)
pygame.display.update()
if f_rain:
rain(background)
pygame.display.update()
# add checks
|
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import numpy as np
from matplotlib import animation
fig = plt.figure()
ax = p3.Axes3D(fig)
N = 200
t = np.linspace(0, 10, N)
def diff_func(s, t):
x, v_x, y, v_y, z, v_z = s
ml = 3 * g * z / (z**2 + x**2) # множитель Лагранжа
dxdt = v_x
dv_xdt = ml * x
dydt = v_y
dv_ydt = 0
dzdt = v_z
dv_zdt = - g + ml * z
return dxdt, dv_xdt, dydt, dv_ydt, dzdt, dv_zdt
x0 = 1
y0 = 0
z0 = 0
v_x0 = 0
v_y0 = 1
v_z0 = 0
g = 9.8
#k = 1
R = 1
s0 = x0, v_x0, y0, v_y0, z0, v_z0
sol = odeint(diff_func, s0, t)
ball, = ax.plot(sol[:, 0], sol[:, 2], sol[:, 0], 'o', color='r')
line, = ax.plot(sol[:, 0], sol[:, 2], sol[:, 0], '--', color='r')
def animation_func(i):
ball.set_data(sol[i, 0], sol[i, 2])
ball.set_3d_properties(sol[i, 4])
line.set_data(sol[:i, 0], sol[:i, 2])
line.set_3d_properties(sol[:i, 4])
# рисуем эллипсоид
theta = np.linspace(-np.pi, 0, N)
x = R * np.outer(np.ones(np.size(t)), np.cos(theta))
y = np.outer(t, np.ones(np.size(theta)))
z = R * np.outer(np.ones(np.size(t)), np.sin(theta))
ax.plot_surface(x, y, z, color='g')
ax.set_xlim3d([-5, 5])
ax.set_ylim3d([0, 5])
ax.set_zlim3d([-5, 5])
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ani = animation.FuncAnimation(fig, animation_func, N, interval=50)
ani.save('ani3.gif') |
from flask import json
from snakeeyes.blueprints.User.model import Employee
from lib.tests import assert_result_is_dictionary
DUMMY_ID = 1000
class TestModel():
def test_getall(self,client):
result = Employee.getall()
assert_result_is_dictionary(result,dict)
def test_gettree(self,client):
result = Employee.gettree(DUMMY_ID)
assert_result_is_dictionary(result,dict)
def test_getsubtree_joiningdateflag_notset(self,client):
result = Employee.getsubtree(DUMMY_ID,False)
assert_result_is_dictionary(result,dict)
def test_getsubtree_joiningdateflag_set(self,client):
result = Employee.getsubtree(DUMMY_ID,True)
assert_result_is_dictionary(result,dict)
def test_getancestorpath(self,client):
result = Employee.getancestorpath(DUMMY_ID)
assert_result_is_dictionary(result,list)
|
# multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
import traceback
import numpy as np
from game import Agent
_DEBUG = False
ghost_weight = 1
capsule_weight = 1
score_weight = 0.1
scared_weight = 10
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
# print('scores: {}'.format(scores))
bestScore = max(scores)
# print('best score: {}'.format(bestScore))
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
print(chosenIndex)
"Add more of your code here if you want to"
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
# Useful information you can extract from a GameState (pacman.py)
successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = successorGameState.getPacmanPosition()
newFood = successorGameState.getFood()
newGhostStates = successorGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
food_list = newFood.asList()
ghost_list = [state.getPosition() for state in newGhostStates]
if _DEBUG:
print('\nTIMESTEP')
print('--------\n')
print("successor State:\n {}".format(successorGameState))
print("new pos: {}".format(newPos))
print("new Food: {}".format(newFood))
print("Food: {}".format(food_list))
print("new ghost states: {}".format(newGhostStates))
print("ghost position: {}".format(ghost_list))
print("new scared times: {}".format(newScaredTimes))
# GET DISTANCES
food_distances = []
for food in food_list:
dist = float(util.manhattanDistance(food, newPos))
food_distances.append(dist)
avg_food_distance = np.mean(food_distances)
if len(food_distances) > 0:
closest_food = min(food_distances)
else:
closest_food = 1
food_sum = sum([1.0/float(dist) for dist in food_distances])
ghost_distances = []
for food in ghost_list:
dist = util.manhattanDistance(food, newPos)
ghost_distances.append(dist)
avg_ghost_distance = np.mean(ghost_distances)
closest_ghost = min(ghost_distances)
if closest_ghost == 0:
closest_ghost = 0.1
# ghost_sum = sum([1.0/float(dist) for dist in ghost_distances])
if _DEBUG:
print("Food Distances: {}".format(food_distances))
print("AVG Distances: {}".format(avg_food_distance))
print("food sum: {}".format(food_sum))
print("Ghost Distances: {}".format(ghost_distances))
print("AVG Distances: {}".format(avg_ghost_distance))
print("score: {}".format(successorGameState.getScore()))
# new_score = int(food_sum*100)
new_score = 1.0/closest_food - ghost_weight/closest_ghost + successorGameState.getScore()
return new_score
# return successorGameState.getScore()
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
Your minimax agent (question 2)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
legalActions = gameState.getLegalActions()
bestAction = Directions.STOP
bestScore = -(float("inf"))
for action in legalActions:
nextState = gameState.generateSuccessor(0, action)
nextScore = self.getValue(nextState, 0, 1)
if nextScore > bestScore:
bestAction = action
bestScore = nextScore
return bestAction
def maxValue(self, gameState, currentDepth):
values = [float("-inf")]
for action in gameState.getLegalActions(0):
values.append(self.getValue(gameState.generateSuccessor(0, action), currentDepth, 1))
return max(values)
def minValue(self, gameState, currentDepth, agentIndex):
values = [float("inf")]
for action in gameState.getLegalActions(agentIndex):
lastGhostIndex = gameState.getNumAgents() - 1
if agentIndex == lastGhostIndex:
values.append(self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth+1, 0))
else:
values.append(self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth, agentIndex+1))
return min(values)
def getValue(self, gameState, currentDepth, agentIndex):
if gameState.isWin() or gameState.isLose() or currentDepth == self.depth:
return self.evaluationFunction(gameState)
elif agentIndex == 0:
return self.maxValue(gameState,currentDepth)
else:
return self.minValue(gameState, currentDepth, agentIndex)
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning (question 3)
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
legalActions = gameState.getLegalActions()
bestAction = Directions.STOP
bestScore = -(float("inf"))
alpha = -(float("inf"))
beta = float("inf")
for action in legalActions:
nextState = gameState.generateSuccessor(0, action)
nextScore = self.getValue(nextState, 0, 1, alpha, beta)
if nextScore > bestScore:
bestAction = action
bestScore = nextScore
alpha = max(alpha,bestScore)
return bestAction
def maxValue(self, gameState, alpha, beta, currentDepth):
#values = [float("-inf")]
v = -(float("inf"))
for action in gameState.getLegalActions(0):
v = max(v,self.getValue(gameState.generateSuccessor(0, action), currentDepth, 1, alpha, beta))
if v > beta:
return v
alpha = max(alpha,v)
return v
#return max(values)
def minValue(self, gameState, alpha, beta, currentDepth, agentIndex):
#values = [float("inf")]
v = float("inf")
for action in gameState.getLegalActions(agentIndex):
lastGhostIndex = gameState.getNumAgents() - 1
if agentIndex == lastGhostIndex:
v = min(v,self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth + 1, 0, alpha, beta))
else:
v = min(v,self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth, agentIndex + 1, alpha, beta))
if v < alpha:
return v
beta = min(beta,v)
return v
#return min(values)
def getValue(self, gameState, currentDepth, agentIndex, alpha, beta):
if gameState.isWin() or gameState.isLose() or currentDepth == self.depth:
return self.evaluationFunction(gameState)
elif agentIndex == 0:
return self.maxValue(gameState, alpha, beta, currentDepth)
else:
return self.minValue(gameState, alpha, beta, currentDepth, agentIndex)
'''
def maxValue(gameState, alpha, beta, depth):
if gameState.isWin() or gameState.isLose() or depth == 0:
return self.evaluationFunction(gameState)
v = -(float("inf"))
legalActions = gameState.getLegalActions(0) # maxValue will only be used for Pacman, always use index 0
for action in legalActions:
nextState = gameState.generateSuccessor(0, action)
v = max(v, minValue(nextState, alpha, beta, 1, depth))
if v > beta:
return v
alpha = max(alpha, v)
return v
def minValue(gameState, alpha, beta, agentIndex, depth):
numberGhosts = gameState.getNumAgents()-1 # Gets the number of ghosts, assuming number of agents is pacman + ghosts
if gameState.isWin() or gameState.isLose() or depth == 0:
return self.evaluationFunction(gameState)
v = float("inf")
legalActions = gameState.getLegalActions(agentIndex)
for action in legalActions:
nextState = gameState.generateSuccessor(agentIndex, action)
if agentIndex == numberGhosts:
v = min(v, maxValue(nextState, alpha, beta, depth - 1)) # after all ghosts, reduce depth by 1
if v < alpha:
return v
beta = min(beta,v)
else:
v = min(v, minValue(nextState, alpha, beta, agentIndex + 1, depth))
if v < alpha:
return v
beta = min(beta,v)
return v
legalActions = gameState.getLegalActions(0)
bestAction = Directions.STOP
bestScore = -(float("inf"))
alpha = -(float("inf"))
beta = float("inf")
for action in legalActions:
nextState = gameState.generateSuccessor(0,action)
nextScore = max(bestScore,minValue(nextState,alpha,beta,1,self.depth))
if nextScore > bestScore:
bestAction = action
if nextScore >= beta:
return bestAction
alpha = max(alpha,bestScore)
return bestAction
'''
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Your expectimax agent (question 4)
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
"*** YOUR CODE HERE ***"
legalActions = gameState.getLegalActions()
bestAction = Directions.STOP
bestScore = -(float("inf"))
for action in legalActions:
nextState = gameState.generateSuccessor(0, action)
nextScore = self.getValue(nextState, 0, 1)
if nextScore > bestScore:
bestAction = action
bestScore = nextScore
return bestAction
def maxValue(self, gameState, currentDepth):
values = [float("-inf")]
for action in gameState.getLegalActions(0):
values.append(self.getValue(gameState.generateSuccessor(0, action), currentDepth, 1))
return max(values)
def minValue(self, gameState, currentDepth, agentIndex):
values = []
for action in gameState.getLegalActions(agentIndex):
lastGhostIndex = gameState.getNumAgents() - 1
if agentIndex == lastGhostIndex:
values.append(self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth+1, 0))
else:
values.append(self.getValue(gameState.generateSuccessor(agentIndex, action), currentDepth, agentIndex+1))
return float(sum(values))/float(len(values))
def getValue(self, gameState, currentDepth, agentIndex):
if gameState.isWin() or gameState.isLose() or currentDepth == self.depth:
return self.evaluationFunction(gameState)
elif agentIndex == 0:
return self.maxValue(gameState,currentDepth)
else:
return self.minValue(gameState, currentDepth, agentIndex)
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
# Useful information you can extract from a GameState (pacman.py)
# successorGameState = currentGameState.generatePacmanSuccessor(action)
newPos = currentGameState.getPacmanPosition()
newFood = currentGameState.getFood()
newCapsules = currentGameState.getCapsules()
newGhostStates = currentGameState.getGhostStates()
newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]
food_list = newFood.asList()
ghost_list = [state.getPosition() for state in newGhostStates]
# GET DISTANCES
food_distances = []
for food in food_list:
dist = float(util.manhattanDistance(food, newPos))
food_distances.append(dist)
avg_food_distance = np.mean(food_distances)
if len(food_distances) > 0:
closest_food = min(food_distances)
else:
closest_food = 1
food_sum = sum([1.0/float(dist) for dist in food_distances])
# Ghosts
ghost_distances = []
for food in ghost_list:
dist = util.manhattanDistance(food, newPos)
ghost_distances.append(dist)
avg_ghost_distance = np.mean(ghost_distances)
closest_ghost = min(ghost_distances)
if closest_ghost == 0:
closest_ghost = 0.1
# Capsules
capsule_distances = []
for capsule in newCapsules:
dist = util.manhattanDistance(capsule, newPos)
capsule_distances.append(dist)
avg_capsule_distance = np.mean(capsule_distances)
if len(capsule_distances) == 0:
closest_capsule = 1000
else:
closest_capsule = min(capsule_distances)
scared_ghosts = max(newScaredTimes)
if scared_ghosts > 0:
scared_score = 1
else:
scared_score = 0
new_score = 1.0/closest_food - ghost_weight/closest_ghost + \
capsule_weight/closest_capsule + score_weight*currentGameState.getScore() + \
scared_weight*scared_score
return new_score
# Abbreviation
better = betterEvaluationFunction
|
from django.db import models
from musica.settings import MEDIA_ROOT
from django.contrib.auth.models import User
class perfil(models.Model):
def get_ruta(self,filename):
ruta_completa = "%s%s" %(MEDIA_ROOT,user.username)
return ruta_completa
user = models.OneToOneField(User,null=False,blank=False)
foto = models.ImageField(upload_to=get_ruta)
class mp3(models.Model):
def get_ruta(self,filename):
ruta_completa = "%s%s" %(MEDIA_ROOT,user.username)
return ruta_completa
user = models.OneToOneField(User,null=False,blank=False)
mp3 = models.FileField(upload_to=get_ruta)
|
import gzip
import json
import re
fname = "jawiki-country.json.gz"
def get_UK_article():
with gzip.open(fname, "rt") as jsonfile:
for line in jsonfile:
line_json = json.loads(line)
if line_json["title"] == "イギリス":
return line_json["text"]
raise ValueError("Not Found Article.")
pattern = re.compile(
r"""
^ # 行頭
( # キャプチャ対象のグループ開始
.* # 任意の文字0文字以上
\[\[Category:
.* # 任意の文字0文字以上
\]\]
.* # 任意の文字0文字以上
) #グループ終了
$ # 行末
""",
re.M + re.X,
)
result = pattern.findall(get_UK_article())
for line in result:
print(line)
|
import db, import_file
PayrollRecord = import_file.import_file('PayrollRecord')
def getPayrollRecords():
res = db.List("PayrollRecord")
PayrollRecordList = []
for row in res:
if row is not None:
PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15]),int(row[16]),int(row[17]),int(row[18]) )
PayrollRecordList.append(PayrollRecord)
row = cur.fetchone()
return PayrollRecordList
def getPayrollRecord(val):
res = db.SubList("PayrollRecord", "ID", val)
for row in res:
if row is not None:
PayrollRecord = PayrollRecord.PayrollRecord( int(row[0]), int(row[1]), int(row[2]), int(row[3]), int(row[4]), int(row[5]), int(row[6]), int(row[7]), int(row[8]), int(row[9]), int(row[10]), int(row[11]), int(row[12]), int(row[13]), int(row[14]), int(row[15]),int(row[16]),int(row[17]),int(row[18]) )
return PayrollRecord
|
#!/usr/bin/env python
# Requires urllib: pip3 install urllib3
from http.server import BaseHTTPRequestHandler, HTTPServer
import requests
# HTTPRequestHandler class
class testHTTPServer_RequestHandler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['Content-Length'])
print(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header('Content-Type','text/html')
self.end_headers()
self.wfile.write(bytes("", "utf8"))
return
def run():
print('Starting skjeng-speedbox-httpserver on port 8080')
server_address = ('', 8080)
httpd = HTTPServer(server_address, testHTTPServer_RequestHandler)
print('Server is running, waiting for POST')
httpd.serve_forever()
run()
|
from prac_07.date import Date
def main():
print("Current Date: ")
date = Date(20, 9, 2017)
print(str(date))
number_of_days = int(input("Enter the number of days you wish to add: "))
print("In {} days the date will be".format(number_of_days))
date.add_days(number_of_days)
print(str(date))
main() |
from lxml import etree
import os
from os.path import join
path = 'Real_Masters_all'
callnumbers = {}
filecount = 0
for filename in os.listdir(path):
filecount += 1
eachfile = 0
for filename in os.listdir(path):
eachfile += 1
tree = etree.parse(join(path, filename))
callnumber = tree.xpath('//archdesc/did/unitid')
for r in callnumber:
print '\rChecking call numbers in file number ' + str(eachfile) + ' of ' + str(filecount) + ': ' + filename,
callno = r.text.encode('utf-8')
callnumbers.setdefault(callno, []).append(filename)
print '\nChecking for duplicate call numbers...'
for callno in callnumbers:
if len(callnumbers[callno]) > 1:
print callno + ': ' + str(callnumbers[callno])
'''
dupes = set([x for x in callnumbers if callnumbers.count(x) > 1])
print '\nYou have ' + str(len(dupes)) + ' duplicate call numbers'
for filename in os.listdir(path):
tree = etree.parse(join(path, filename))
callnumber = tree.xpath('//archdesc/did/unitid')
for cn in callnumber:
if cn.text in dupes:
print filename + ' has duplicate call number ' + cn.text
'''
|
def main():
# problem1()
# problem2()
def problem1():
nameList= []
userInput = ""
while userInput.lower() != "quit":
nameList.append(userInput)
userInput = input("What are your favorite Pokeman? If you dont like Pokeman or just dont have anymore enter 'quit\n'")
print(nameList)
# * Create a function that has a loop that quits with ```q```
# * Allow the User to enter names until ```q``` is entered
# * Add each name entered to a List
# * When the User enters ```q``` print the list of names
#
# ADDITIONAL REQUIREMENTS:
# * Your code should be able to process the quit command (q) the User enters regardless of case
def problem2():
userInput = ""
myDictionaryList = [
{
"name": "Kelvin",
"age": 30
},
{
"name": "Bob",
"age": 50
},
{
"name": "Alex",
"age": 21
}
]
for elememnt in myDictionaryList:
print(elememnt)
print(myDictionaryList[0])
print(myDictionaryList[1])
print(myDictionaryList[2])
(e)
returnllen(e)
def getSortkey
while userInput.lower() != "age" or userInput.lower() != "name":
userInput = ("Enter 'age' or 'name' on how you want to sort the dictionary")
if userInput == "age":
myDictionaryList.sort(key=getsortkey)
# 1. Prints a formatted list of names and ages
# 2. Prompts the User for which property they want to use to sort the list (e.g. ```AGE``` or ```NAME```).
# Print the formatted list of names and ages sorted by the specified sort criteria.
#
# 3. Continue prompting the User for the sort criteria and print a sorted list until the User enters ```q``` then exit.
#
# ADDITIONAL REQUIREMENTS:
# * Your code should be able to process the sort criteria the User enters regardless of case
# * Your code should be able to process the quit command (q) the User enters regardless of case
# * If the User enters something other than ```q``` or a valid sort criteria (e.g. ```AGE``` or ```NAME```)
# your code should display ```INVALID ENTRY. PLEASE TRY AGAIN``` and continue the process.
if __name__ == '__main__':
main() |
from utils import *
from function2 import *
from function3 import *
def reduce_puzzle(values):
"""
Iterate eliminate() and only_choice(). If at some point, there is a box with no available values, return False.
If the sudoku is solved, return the sudoku.
If after an iteration of both functions, the sudoku remains the same, return the sudoku.
Input: A sudoku in dictionary form.
Output: The resulting sudoku in dictionary form.
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values)
# Use the Only Choice Strategy
values = only_choice(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
|
from .litmus_database import Litmus
from .color_space import CVC
def search_main(word):
search = {}
if len(word) > 2: # 2글자 이하는 검색에서 제외
symbol = word[0]
tag = word[1:]
if symbol == '#':
if is_hexa(tag): # 헥사코드인지 확인 - #시작 16진 7 숫자 (#FFFFFF) or 16진 6 숫자 (FFFFFF)
search = search_by_hexa(word, radius=0.1)
elif symbol == '$':
if tag in Litmus.cell.keys():
search = search_by_cell(tag)
else:
search = search_by_name(word)
return search
def search_info(my_id):
litmus = Litmus.db[my_id]
hexa = litmus['hexa']
cell = litmus['cell']
search = search_by_hexa(hexa, radius=0.1)
for item in search['identical']['list']:
if my_id == item['id']:
item['case'] = 'self'
search.update(get_thesaurus(my_id))
search.update({'supernova':{'count':len(Litmus.supernova), 'list':Litmus.supernova}})
return search
def search_by_hexa(hexa, radius):
me = CVC.hexa_rgb(hexa)
identical = []
neighbor = []
for litmus in Litmus.db:
you = litmus['rgb']
d = tuple(abs(you[i] - me[i]) for i in range(0,3))
if d[0] < radius and d[1] < radius and d[2] < radius:
distance = (d[0]**2 + d[1]**2+ d[2]**2)**0.5
if distance < 0.0001 :
identical.append({'id': litmus['id'], 'case':'identical', 'distance':distance,'litmus':litmus})
elif distance < radius:
neighbor.append({'id': litmus['id'], 'case':'neighbor', 'distance':distance,'litmus':litmus})
if identical or neighbor:
sorted_i = sorted(identical, key=lambda i: i['litmus']['name'])
sorted_n = sorted(neighbor, key=lambda n: n['distance'])
return {'identical':{'count':len(sorted_i), 'list':sorted_i}, 'neighbor':{'count':len(sorted_n), 'list':sorted_n}}
else:
return {}
def search_by_name(word):
match = []
for litmus in Litmus.db:
name = litmus['name']
if (word.lower() in name.lower()):
match.append({'id': litmus['id'], 'case':'match', 'litmus':litmus})
if match:
sorted_m = sorted(match, key=lambda m: m['litmus']['name'])
return {'match':{'count':len(sorted_m), 'list':sorted_m}}
else:
return {}
def get_thesaurus(litmus_id) :
thesaurus = {}
litmus = Litmus.db[litmus_id]
rgb = litmus['rgb']
room = litmus['cell']
r, g, b = room[0:1], room[1:2], room[2:3]
level = int( (int(r) + int(g) + int(b)) / 2 )
complementary = []
additive = (1-rgb[0], 1-rgb[1], 1-rgb[2])
new_id = find_nearest(additive, 0.1)
new_litmus = Litmus.db[new_id]
complementary.append({'id': new_id, 'case':'complementary', 'litmus':new_litmus })
CMYK = CVC.rgb_CMYK(rgb)
subtractive = ( CMYK[0]*(1-CMYK[3]), CMYK[1]*(1-CMYK[3]), CMYK[2]*(1-CMYK[3]) )
new_id = find_nearest(subtractive, 0.1)
new_litmus = Litmus.db[new_id]
complementary.append({'id': new_id, 'case':'complementary', 'litmus':new_litmus })
thesaurus.update({'complementary':{'count':len(complementary), 'list':complementary}})
shade = []
if level > 2 :
for i in range (1, level-1) :
new_rgb = ( rgb[0]*i/level, rgb[1]*i/level, rgb[2]*i/level )
new_id = find_nearest(new_rgb, 0.1)
new_litmus = Litmus.db[new_id]
shade.append({'id': new_id, 'case':'shade', 'litmus':new_litmus })
if shade :
thesaurus.update({'shade':{'count':len(shade), 'list':shade}})
tint = []
if level < 7 :
for i in range (1, 12 - level) :
new_rgb = ( 1-(1-rgb[0])*i/(12-level), 1-(1-rgb[1])*i/(12-level), 1-(1-rgb[2])*i/(12-level) )
new_id = find_nearest(new_rgb, 0.1)
new_litmus = Litmus.db[int(new_id)]
tint.append({'id': new_id, 'case':'tint', 'litmus':new_litmus })
if tint :
thesaurus.update({'tint':{'count':len(tint), 'list':tint}})
return thesaurus
def find_nearest(rgb, radius) :
me = rgb
neighbor = 1.0
index = 0
for litmus in Litmus.db:
you = litmus['rgb']
d = tuple(abs(you[i] - me[i]) for i in range(0,3))
if d[0] < radius and d[1] < radius and d[2] < radius:
distance = (d[0]**2 + d[1]**2+ d[2]**2)**0.5
if distance < neighbor :
neighbor = distance
index = litmus['id']
return index
def is_hexa(tag):
if len(tag) == 6:
hexa = tag
else :
return False
try:
int(hexa, 16)
return '#'+ hexa
except ValueError:
return False
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
通过关键字搜索 rarbg.is 发布的资源,并获取第一个搜索结果的磁力链接,
再打开 Mac 上的 Transmission 添加到下载列表中。
如果没有搜索结果,一段时间后再请求搜索,直到添加下载后退出执行。
如果需要验证浏览器,获取新的Cookies再请求
"""
__author__ = 'LGX95'
import logging
import os
import random
import re
import subprocess
import time
import urllib.parse
from datetime import datetime
from io import BytesIO
import requests
import pytesseract
from bs4 import BeautifulSoup
from PIL import Image
from requests import RequestException
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from headers import headers
logging.basicConfig(
level=logging.INFO,
format = '%(asctime)s %(filename)s : %(levelname)s %(message)s',
filename = 'rarbg_dl.log',
filemode = 'w'
)
SCHEME = 'https'
HOST = 'rarbg.is'
home_path = 'torrents.php'
query = 'the big bang theory'
verify_pattern = re.compile(r'Please wait while we try to verify your browser...')
magnet_pattern = re.compile(r'magnet:.*?')
file = 'test.html'
def get_cookie_string():
"""通过 Selenium 获取新的Cookie,并返回一个Cookie的字符串
Returns:
以一个字符串的形式返回Cookie
"""
url = urllib.parse.urlunparse([SCHEME, HOST, home_path, '', '', ''])
# 创建一个新的 Selenium WebDriver
driver = webdriver.PhantomJS(executable_path='/usr/local/bin/phantomjs')
# driver = webdriver.Firefox()
# driver = webdriver.Chrome()
driver.get(url)
# 创建一个 WebDriverWait 对象
wait = WebDriverWait(driver, 30)
try:
# 等待出现点击跳转的页面
click_here = wait.until(EC.presence_of_element_located((By.TAG_NAME, 'a')))
# 等待几秒再点击跳转
time.sleep(random.randint(2, 5))
click_here.click()
# 等待填写验证码的表格出现
wait.until(EC.presence_of_element_located((By.TAG_NAME, 'form')))
# 获得验证码图片的
img_elem = driver.find_element_by_xpath('//table[@width="100%"]/tbody/tr/td/img')
img_src = img_elem.get_attribute('src')
img_rsp = requests.get(img_src)
im = Image.open(BytesIO(img_rsp.content))
# im = Image.open(BytesIO(img_elem.screenshot_as_png))
# 解析验证码
solve_string = pytesseract.image_to_string(im)
input = driver.find_element_by_id('solve_string')
# 填写验证码并回车
input.send_keys(solve_string)
input.send_keys(Keys.ENTER)
# 等待首页出现
wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '.lista2t')))
time.sleep(random.randint(5, 10))
# 获取Cookies
cookies = driver.get_cookies()
finally:
driver.quit()
cookie_string = ''
# 将Cookies对象转换为一个字符串
for item in cookies:
if item['name'] != '':
cookie_string += (item['name'] + '=' + item['value'] + '; ')
else:
cookie_string += (item['value'] + '; ')
return cookie_string.strip()
def reset_cookies():
"""重新获取新的 Cookie,将新的 Cookie 写入 headers.py 文件
"""
cookies = get_cookie_string()
# 获取文件内容
with open('headers.py', 'r') as f:
headers = f.read()
# 写入新的Cookies
with open('headers.py', 'w') as f:
cookies = "'Cookie': '" + cookies + "'"
f.write(re.sub(r"'Cookie': '.*?'", cookies, headers))
def get_response(url, headers=headers, timeout=30):
"""Get response from url by requests.get
用get方式得到URL的response对象,对需要验证浏览器的情况抛出异常
Args:
url: 请求的URL
headers: get请求中包含的headers,默认值中包含了User-Agent和Cookie
timeout: 请求超时时间,默认为30秒
Returns:
正常情况下返回一个response对象,获取网页异常时返回None
Raises:
在需要验证浏览器的情况下抛出RuntimeError
"""
try:
response = requests.get(url, headers=headers, timeout=timeout)
if response.status_code == 200:
# 如果需要验证浏览器,重新获取Cookie并再次请求
if re.search(verify_pattern, response.text):
reset_cookies()
get_response(url)
return response
return None
except RequestException:
return None
def search(query):
"""search keyword in rarbg.is and return response
输入的关键字,组建搜索的URL,并get后获取response返回
Args:
query: 搜索的关键词,会进行转码
Returns:
返回搜索结果的response对象
"""
query_arg = 'search=' + urllib.parse.quote(query)
url = urllib.parse.urlunparse([SCHEME, HOST, home_path, '', query_arg, ''])
return get_response(url)
def get_soup(response):
"""Turn the response to a BeautifulSoup
将response对象转换为BeautifulSoup返回
"""
return BeautifulSoup(response.content, 'lxml')
def get_detail_url(soup):
"""get the detail url from the result BeautifulSoup
处理搜索结果的BeautifulSoup对象,得到第一条结果的链接
Args:
soup: 搜索结果的BeautifulSoup对象
Returns:
第一条结果的链接
"""
# 获取包含所有结果的table
result_table = soup.find(attrs={'class': 'lista2t'})
# 得到第一个结果
first_tr = result_table.find('tr', attrs={'class': 'lista2'})
# 如果没有搜索到结果,返回None
if first_tr is None:
return None
# 得到第一个结果包含跳转链接的a元素
first_a = first_tr.find('a', href=re.compile(r'/torrent/.*'))
# 获得跳转链接的path
detail_path = first_a['href']
print(first_a.text)
detail_url = urllib.parse.urlunparse([SCHEME, HOST, detail_path, '', '', ''])
return detail_url
def get_magnet_link(url):
"""get magnet link from a page
从一个详情页得到磁力链接
Args:
资源详情页的url地址
Returns:
磁力链接
"""
# 如果获取response失败,一直重试
while True:
detail_response = get_response(detail_url)
if detail_response is not None:
break
detail_soup = get_soup(detail_response)
# 通过href属性和正则表达式匹配获取包含磁力链接的a元素
magnet_a = detail_soup.find('a', href=magnet_pattern)
# 从href提取磁力链接
magnet_link = magnet_a['href']
return magnet_link
def log(string):
logging.info(string)
if __name__ == '__main__':
print(datetime.now(), 'Program start...')
logging.info('Program start...')
print('Input the keyword: ')
query = input()
while True:
log('Start request...')
result_response = search(query)
# 如果获取response失败,暂停5秒后重试
if result_response is None:
# print('Retry...')
time.sleep(5)
continue
result_soup = get_soup(result_response)
# 将结果网页写入文件,调试时方便使用
with open(file, 'w') as f:
f.write(result_soup.prettify())
detail_url = get_detail_url(result_soup)
# 如果没得到结果,暂停30分钟后重试
if detail_url is None:
log('No result, Waiting for retry...')
time.sleep(1800)
continue
else: # 得到第一个结果,输出详情页链接
print('detail_url:', detail_url)
# 获得详情页的磁力链接
magnet_link = get_magnet_link(detail_url)
print(magnet_link)
# 打开Transmission,添加磁力链接到下载列表
command = 'open -a /Applications/Transmission.app ' + '"' + magnet_link + '"'
os.system(command)
break
|
"""
Code shared by LocalCKAN, RemoteCKAN and TestCKAN
"""
import json
from ckanapi.errors import (CKANAPIError, NotAuthorized, NotFound,
ValidationError, SearchQueryError, SearchError, SearchIndexError,
ServerIncompatibleError)
class ActionShortcut(object):
"""
ActionShortcut(foo).bar(baz=2) <=> foo.call_action('bar', {'baz':2})
An instance of this class is used as the .action attribute of
LocalCKAN and RemoteCKAN instances to provide a short way to call
actions, e.g::
pkg = demo.action.package_show(id='adur_district_spending')
instead of::
pkg = demo.call_action('package_show', {'id':'adur_district_spending'})
File-like values (objects with a 'read' attribute) are
sent as file-uploads::
pkg = demo.action.resource_update(package_id='foo', upload=open(..))
becomes::
pkg = demo.call_action('resource_update',
{'package_id': 'foo'}, files={'upload': open(..)})
"""
def __init__(self, ckan):
self._ckan = ckan
def __getattr__(self, name):
def action(**kwargs):
files = {}
for k, v in kwargs.items():
if is_file_like(v):
files[k] = v
if files:
nonfiles = dict((k, v) for k, v in kwargs.items()
if k not in files)
return self._ckan.call_action(name,
data_dict=nonfiles,
files=files)
return self._ckan.call_action(name, data_dict=kwargs)
return action
def is_file_like(v):
"""
Return True if this object is file-like or is a tuple in a format
that the requests library would accept for uploading.
"""
# see http://docs.python-requests.org/en/latest/user/quickstart/#more-complicated-post-requests
return hasattr(v, 'read') or (
isinstance(v, tuple) and len(v) >= 2 and hasattr(v[1], 'read'))
def prepare_action(action, data_dict=None, apikey=None, files=None):
"""
Return action_url, data_json, http_headers
"""
if not data_dict:
data_dict = {}
headers = {}
if files:
# when uploading files all parameters must be strings and
# no nesting is allowed because request is sent as multipart
items = data_dict.items()
data_dict = {}
for (k, v) in items:
if v is None:
continue # assuming missing will work the same as None
if isinstance(v, (int, float)):
v = str(v)
data_dict[k.encode('utf-8')] = v.encode('utf-8')
else:
data_dict = json.dumps(data_dict).encode('ascii')
headers['Content-Type'] = 'application/json'
if apikey:
apikey = str(apikey)
headers['X-CKAN-API-Key'] = apikey
headers['Authorization'] = apikey
url = 'api/action/' + action
return url, data_dict, headers
def reverse_apicontroller_action(url, status, response):
"""
Make an API call look like a direct action call by reversing the
exception -> HTTP response translation that ApiController.action does
"""
try:
parsed = json.loads(response)
if parsed.get('success'):
return parsed['result']
if hasattr(parsed, 'get'):
err = parsed.get('error', {})
else:
err = {}
except (AttributeError, ValueError):
err = {}
if not isinstance(err, dict): # possibly a Socrata API.
raise ServerIncompatibleError(repr([url, status, response]))
etype = err.get('__type')
emessage = err.get('message', '')
if hasattr(emessage, 'split'):
emessage = emessage.split(': ', 1)[-1]
if etype == 'Search Query Error':
# I refuse to eval(emessage), even if it would be more correct
raise SearchQueryError(emessage)
elif etype == 'Search Error':
# I refuse to eval(emessage), even if it would be more correct
raise SearchError(emessage)
elif etype == 'Search Index Error':
raise SearchIndexError(emessage)
elif etype == 'Validation Error':
raise ValidationError(err)
elif etype == 'Not Found Error':
raise NotFound(emessage)
elif etype == 'Authorization Error':
raise NotAuthorized(err)
# don't recognize the error
raise CKANAPIError(repr([url, status, response]))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.