text stringlengths 38 1.54M |
|---|
import os
from keras.layers import Dense,Input,LSTM,Bidirectional,Activation,Conv1D,GRU
from keras.callbacks import Callback
from keras.layers import Dropout,Embedding,GlobalMaxPooling1D, MaxPooling1D, Add, Flatten
from keras.preprocessing import text, sequence
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D
from keras import initializers, regularizers, constraints, optimizers, layers, callbacks
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.models import Model
from keras.optimizers import Adam
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
import numpy as np
import sys
# insert at 1, 0 is the script path (or '' in REPL)
sys.path.insert(1, '../')
from utils.Embedding import get_embeddings
from utils.data_prep import data_clean
path_to_data = "../../drive/My Drive/dataset/dataset/data.txt"
path_to_label = "../../drive/My Drive/dataset/dataset/labels.xlsx"
df = data_clean(path_to_data, path_to_label)
# df.head()
path_to_embedding = "../../drive/My Drive/glove.6B/glove.6B.300d.txt"
embedding_name = "glove"
embeddings_index = get_embeddings(embedding_name, path_to_embedding)
df["len"] = df["Privacy_Policies"].apply(lambda x: len(x))
df.drop(df[df["len"] == 0].index, inplace=True)
X, y = list(df["Privacy_Policies"]) , df.drop(["Privacy_Policies","Policy #","len"], axis = 1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
del X,y
class RocAucEvaluation(Callback):
def __init__(self, validation_data=(), interval=1):
super(Callback, self).__init__()
self.interval = interval
self.X_val, self.y_val = validation_data
def on_epoch_end(self, epoch, logs={}):
if epoch % self.interval == 0:
y_pred = self.model.predict(self.X_val, verbose=0)
score = roc_auc_score(self.y_val, y_pred)
print("\n ROC-AUC - epoch: {:d} - score: {:.6f}".format(epoch+1, score))
max_features = 100000
maxlen = 8000
embed_size = 300
tokenizer = text.Tokenizer(num_words=max_features,lower=True)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index) + 1
maxlen = 8000
X_train = sequence.pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, padding='post', maxlen=maxlen)
word_index = tokenizer.word_index
#prepare embedding matrix
num_words = min(max_features, len(word_index) + 1)
embedding_matrix = np.zeros((num_words, embed_size))
for word, i in word_index.items():
if i >= max_features:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
sequence_input = Input(shape=(maxlen, ))
x = Embedding(len(embedding_matrix), embed_size, weights=[embedding_matrix],trainable = False)(sequence_input)
x = SpatialDropout1D(0.2)(x)
x = Bidirectional(GRU(128, return_sequences=True,dropout=0.1,recurrent_dropout=0.1))(x)
x = Conv1D(64, kernel_size = 3, padding = "valid", kernel_initializer = "glorot_uniform")(x)
avg_pool = GlobalAveragePooling1D()(x)
max_pool = GlobalMaxPooling1D()(x)
x = concatenate([avg_pool, max_pool])
# x = Dense(128, activation='relu')(x)
# x = Dropout(0.1)(x)
preds = Dense(9, activation="sigmoid")(x)
model = Model(sequence_input, preds)
model.compile(loss='binary_crossentropy',optimizer=Adam(lr=1e-3),metrics=['accuracy'])
batch_size = 16
epochs = 4
X_tra, X_val, y_tra, y_val = train_test_split(X_train, y_train, train_size=0.9, random_state=233)
filepath="/content/drive/My Drive/dataset/weights_base.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=5)
ra_val = RocAucEvaluation(validation_data=(X_val, y_val), interval = 1)
callbacks_list = [ra_val,checkpoint, early]
model.fit(X_tra, y_tra, batch_size=batch_size, epochs=epochs, validation_data=(X_val, y_val),callbacks = callbacks_list,verbose=1)
model.load_weights(filepath)
print('Predicting....')
y_pred = model.predict(X_test,batch_size=32,verbose=1)
from sklearn.metrics import roc_curve
from sklearn.metrics import auc
# y_pred_keras = keras_model.predict(X_test).ravel()
fpl = []
tpl = []
aucl = []
for i in range(9):
fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test[:,i], y_pred[:,i])
auc_keras = auc(fpr_keras, tpr_keras)
fpl.append(fpr_keras)
tpl.append(tpr_keras)
aucl.append(auc_keras)
# fpr_keras = sum(fpl) #/len(fpl)
# tpr_keras = sum(tpl)/len(tpl)
# auc_keras = sum(aucl)/len(aucl)
for i in range(9):
fpr_keras, tpr_keras, auc_keras = fpl[i], tpl[i], aucl[i]
from matplotlib import pyplot as plt
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_keras, tpr_keras, label='Keras (area = {:.3f})'.format(auc_keras))
# plt.plot(fpr_rf, tpr_rf, label='RF (area = {:.3f})'.format(auc_rf))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn.metrics import roc_curve, auc
n_classes = 9
lw = 2
y_score = y_pred
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
plt.figure(1)
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue',"red","green","yellow","orange","purple","brown"])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,alpha = 0.3,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show() |
from mrjob.job import MRJob
import re
class MRImageCount(MRJob):
def mapper(self, _, line):
match_ext = {"png", "jpg", "gif"}
request = re.search(r"GET[A-Za-z_\/. 0-9]*HTTP",line)
if request:
for i in match_ext:
if i in request.group().lower():
yield(i, 1)
def reducer(self, ext, occurances):
yield(ext, sum(occurances))
if __name__ == '__main__':
MRImageCount.run() |
#!/usr/bin/env python
# coding:utf-8
# @Time :11/24/18 10:45
import json
import sys
import time
import click
sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from lxml import html
from tqdm import tqdm
import requests
from common.logger import AppLogger
from configs.mongo_config import LocalMongoConfig
from common.mongo import MongDb
from www_douban_com.handler import DouBanInfoHandler
logger = AppLogger('douban.log').get_logger()
init_urls = [
"https://www.douban.com/group/nanshanzufang/discussion?start={}",
"https://www.douban.com/group/498004/discussion?start={}",
"https://www.douban.com/group/106955/discussion?start={}",
"https://www.douban.com/group/szsh/discussion?start={}",
"https://www.douban.com/group/551176/discussion?start={}",
"https://www.douban.com/group/SZhouse/discussion?start={}"
]
class DoubanCrawl(object):
__START_URL = "https://www.douban.com/group/luohuzufang/discussion?start={}"
__HOST = "www.douban.com"
def __init__(self, page, log):
self.__page = page
self.log = log
self.log.info("获得 {} 页之后的数据...".format(self.__page))
self.mongo = MongDb(LocalMongoConfig.HOST,
LocalMongoConfig.PORT,
LocalMongoConfig.DB,
LocalMongoConfig.USER,
LocalMongoConfig.PASSWD,
log=self.log)
self.table = "douban"
self.request = self.__init_reqeust()
self.douban_handler = DouBanInfoHandler()
def __init_reqeust(self):
headers = {
"Host": self.__HOST,
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"Upgrade-Insecure-Requests": "1",
"DNT": "1",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
}
self.request = requests.Session()
self.request.headers = headers
return self.request
def __get_page_data(self, page_num=0, start_url=None):
url = start_url.format(page_num) if start_url else self.__START_URL.format(page_num)
resp = self.request.get(url)
if resp is None:
self.log.error("请求列表页出错...")
return -1
html_resp = html.fromstring(resp.text)
# 遍历所有的帖子
discussion_extract = html_resp.xpath('//div[@class="article"]//tr[@class=""]')
item_list = []
for per_discussion in discussion_extract:
title = per_discussion.xpath('./td[@class="title"]/a/@title')[0]
detail_url = per_discussion.xpath('./td[@class="title"]/a/@href')[0]
author = per_discussion.xpath('./td[2]/a/text()')[0]
author_url = per_discussion.xpath('./td[2]/a/@href')[0]
comment_count_raw = per_discussion.xpath('./td[3]/text()')
comment_count = comment_count_raw[0] if comment_count_raw else 0
comment_date_raw = str(per_discussion.xpath('./td[4]/text()')[0])
comment_date = "2018-" + comment_date_raw if not comment_date_raw.startswith("20") else comment_date_raw
# titles.append(title)
extract_info = self.douban_handler.clean_data(title)
item = {
"title": title,
"detail_url": detail_url,
"author": author,
"author_url": author_url,
"comment_count": comment_count,
"comment_date": comment_date,
"_in_time": time.strftime("%")
}
new_item = {**extract_info, **item}
# print(new_item)
item_list.append(new_item)
self.mongo.insert_batch_data(self.table, item_list, key="detail_url")
def start(self, *args, **kwargs):
for url in init_urls:
self.log.info("当前采集小组的链接是:{}".format(url))
for i in tqdm(range(0, self.__page + 1)):
self.log.info("当前即将采集第 {} 页".format(i))
grab_list_page_status = self.__get_page_data(i * 25, url)
if grab_list_page_status == -1:
self.log.info("当前采集列表页出错, 当前页面是第 {} 页".format(i))
continue
self.log.info("当前页面采集完成: page = {}".format(i))
self.log.info("成功退出采集程序...")
@click.command()
@click.option('--page',
default=20,
type=int,
help=u'采集总页数')
def main(page):
try:
DoubanCrawl(page, logger).start()
except Exception as e:
logger.error("采集异常退出: ")
logger.exception(e)
if __name__ == '__main__':
main()
|
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import cv2, time
from os import system, name
import json
from PyQt5 import QtWidgets
from PyQt5 import QtGui
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QWidget, QInputDialog, QAction
from PyQt5.QtGui import QImage
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import QTimer, QThread, pyqtSignal, pyqtSlot,Qt
from PyQt5.QtWidgets import QMessageBox
from tools.UI_Main import *
import sys
class Main(QtWidgets.QMainWindow):
# class constructor
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.uni = self.ui.unit_select.currentText() #"mm"#input("Desea hacer sus medidas en Milimetros[mm] o Pulgadas [in]")
self.setWindowFlag(Qt.FramelessWindowHint)
self.quit = QAction("Quit", self)
self.video=cv2.VideoCapture(0)
self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
self.display = None
self.timer = QTimer()
self.timer.timeout.connect(self.get_measurements)
self.timer.start(10)
self.ui.toolButton.clicked.connect(self.close_app)
self.ui.unit_select.currentTextChanged.connect(self.change_units)
def close_app(self):
try:
self.timer.stop()
except:
pass
self.close()
def closeEvent(self, event): # Close main window
reply = QMessageBox.question(
self, 'Close application', 'Are you sure you want to close the window?', QMessageBox.Yes | QMessageBox.No)
if reply == QMessageBox.Yes:
# Verificar que el dron esta en home y apagar todo
self.video.release()
print("Released")
event.accept()
else:
self.timer.start(50)
event.ignore()
def change_units(self):
self.uni = self.ui.unit_select.currentText()
def midpoint(self, ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
def get_measurements(self):
check, frame = self.video.read()
# cv2.waitKey(0)
# self.video.release()
# cargar imagen, convertir a escala de grises y aplicarle filtro para reducir artefactos
image = frame #cv2.imread(frame)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
edged = cv2.Canny(gray, 50, 100)
edged = cv2.dilate(edged, None, iterations=10)
edged = cv2.erode(edged, None, iterations=10)
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0]# if imutils.is_cv2() else cnts[1]
(cnts, _) = contours.sort_contours(cnts)
pixelsPerMetric = None
for c in cnts:
if cv2.contourArea(c) < 100:
continue
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
(tl, tr, br, bl) = box
(tltrX, tltrY) = self.midpoint(tl, tr)
(blbrX, blbrY) = self.midpoint(bl, br)
(tlblX, tlblY) = self.midpoint(tl, bl)
(trbrX, trbrY) = self.midpoint(tr, br)
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), (255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), (255, 0, 255), 2)
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
if pixelsPerMetric is None:
if self.uni == "mm":
pixelsPerMetric = dB / 24.257
if self.uni == "in":
pixelsPerMetric = dB / .955
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
if self.uni == 'mm':
cv2.putText(orig, "{:.1f}mm".format(dimB), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}mm".format(dimA), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
elif self.uni== "in":
cv2.putText(orig, "{:.1f}in".format(dimB), (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
cv2.putText(orig, "{:.1f}in".format(dimA), (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, 0.65, (255, 255, 255), 2)
# self.display = orig
self.display = cv2.resize(orig,(self.ui.video_display.width(),self.ui.video_display.height()),interpolation= cv2.INTER_LINEAR)
height, width, channel = self.display.shape
step = channel * width
# create QImage from image
qImg = QImage(self.display.data, width, height, step, QImage.Format_BGR888)
# show image in img_label
self.ui.video_display.setPixmap(QPixmap.fromImage(qImg))
def main_window(): # Run application
app = QApplication(sys.argv)
# create and show mainWindow
mainWindow = Main()
mainWindow.showMaximized()
sys.exit(app.exec_())
if __name__ == "__main__":
main_window() |
#!/usr/bin/python3
"""
Island Permiterer Module
"""
def island_perimeter(grid):
"""
Method to find the perimeter of an instance
"""
val = 0
for x in range(0, len(grid)):
for y in range(0, len(grid[x])):
if (grid[x][y] == 1):
p = 4
if (grid[x][y - 1] == 1):
p -= 1
if (grid[x][y + 1] == 1):
p -= 1
if (grid[x - 1][y] == 1):
p -= 1
if (grid[x + 1][y] == 1):
p -= 1
val += p
return val
|
from django import forms
from .models import Product
class ProductForm(forms.ModelForm):
# sections = forms.CharField(max_length=20)
class Meta:
model = Product
fields = ['name',
'price',
'image',
'type',
'discount',
'manufacturer',
'number_Of_Copy',
'par_Code',
]
def sec(self, sec_list):
self.sections = forms.CharField(widget=forms.Select(choices=[(sec, sec) for sec in sec_list]),max_length=20)
self.fields['sections'] = self.sections
|
from typing import List
from fastapi import APIRouter
from todoberry.models import Item, ItemsList
from todoberry.services import ItemService, ListService
items_router = APIRouter()
lists_router = APIRouter()
@items_router.get("/", response_model=Item)
def get_items(list_id: str):
return ItemService.get_items(list_id)
@items_router.get("/{item_id}", response_model=Item)
def get_item(item_id: str):
return ItemService.get_item(item_id)
@items_router.post("/", response_model=Item)
def create_item(list_id: str, item: Item):
return ItemService.create_item(list_id, item)
@items_router.put("/{item_id}", response_model=Item)
def update_item(item_id: str, item: Item):
return ItemService.update_item(item_id, item)
@items_router.delete("/{item_id}", response_model=Item)
def delete_item(item_id: str):
ItemService.delete_item(item_id)
@lists_router.get("/", response_model=List[ItemsList])
def get_lists():
return ListService.get_lists()
@lists_router.get("/{list_id}", response_model=ItemsList)
def get_list(list_id: str):
return ListService.get_list(list_id)
@lists_router.post("/", response_model=ItemsList)
def create_list(list_: ItemsList):
return ListService.create_list(list_)
@lists_router.put("/{list_id}", response_model=ItemsList)
def update_list(list_id: str, list_: ItemsList):
return ListService.update_list(list_id, list_)
@lists_router.delete("/{list_id}")
def delete_list(list_id: str):
ListService.delete_list(list_id)
|
'''
Created on Apr 23, 2019
@author: facebook
'''
def f(list):
l = []
for i in list:
if i not in l:
l.append(i)
for i in l:
print i,
list = ['pink','pink','blue']
print("the unique values from list is")
f(list)
'''
x = input("Please enter a sentence: ")
print("The words in that sentence are: ", x.split())
unique = set(x)
print("Here are the unique words in that sentence: ",unique)'''
|
ABOUT = 'Time tracker application'
_cmd = [
('lap or l', 'make new lap'),
('end or e', 'exit'),
('anything else', 'lap title'),
]
COMMANDS: str = '\n'.join(
[' - '.join(list(map(lambda e: f"{e:<13}", l))) for l in _cmd])
class TYPES:
WORK = 'work session'
RELAX = 'chill'
|
#pymysql安装:pip install pymysql
#在命令行启动mysql(service mysql start),在python文件中使用pymysql连接
import pymysql
db_info = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'password': '123',
'db': 'text'
}
sqls = ['select 1', 'select VERSION']
result = []
class ConnDB:
def __init__(self, db_info, sqls):
self.host = db_info['host']
self.port = db_info['port']
self.user = db_info['user']
self.password = db_info['password']
self.db = db_info['db']
self.sqls = sqls
def run(self):
conn = pymysql.connect(
host = self.host,
port = self.port,
user = self.user,
password = self.password,
db = self.db,
charset = 'utf8')
# 获取游标
cur = conn.cursor()
#执行sql语句
try:
for command in self.sqls:
cur.execute(command) # 执行命令
result.append(cur.fetchone()) # 存储结果
cur.close()
cur.commit()
except:
conn.rollback()
# 关闭连接
conn.close()
if __name__ == '__main__':
conn = ConnDB(db_info, sqls)
conn.run()
print(result)
|
from django.conf.urls import url
from django.urls import path
from django.views.generic.base import RedirectView
from . import views
from django.contrib.staticfiles.storage import staticfiles_storage
urlpatterns = [
path('', views.HomeView.as_view(), name="home"),
path('<slug:slug>', views.download, name='download'),
path('favicon.ico', RedirectView.as_view(url=staticfiles_storage.url("favicon.ico")))
]
|
from numpy import*
x=array(eval(input("numeros:")))
soma=0
d=0
for i in range(size(x)):
soma+=x[i]
media=soma/size(x)
from math import*
for i in range(size(x)):
d=d+(x[i]-media)**2
k=size(x)-1
j=sqrt(d/k)
print(round(j,3)) |
#code to calculate the PDF for the p value calculation of the SM using 144 bins
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import cevns
import cevns_accelerators as acc
import coherent_CsI_real as csir
from scipy.interpolate import interp1d
from scipy.optimize import minimize
#setting up COHERENT
pot= 0.08*(1.76e23/308.1)/(24.0*60.0*60.0) #proton on target
R=1930.0 #distance to detector in cm
A_Cs=133 #atomic numbers of Cs, I and their binding energies
Z_Cs=55
bindE_Cs=1118.532104
Q=0.0878
Y=13.348
A_I=127
Z_I=53
bindE_I=1072.580322
mass=14.6
time=308.1
f_Cs=(A_Cs)/(A_Cs+A_I)
f_I=(A_I)/(A_Cs+A_I)
#calculate SM signal
sig=csir.signalcomp(0.,0.,0.,0.,0.)
#load data
dat=acc.load_file("datafiles_in/Co_data.dat")
#load neutrons
neut=acc.load_file("datafiles_in/total_neutron.dat")
n=acc.load_neutron(neut)
#load bkg
ACon=np.loadtxt("datafiles_in/full_AC_on.dat")
ACoff=np.loadtxt("datafiles_in/full_AC_off.dat")
COoff=np.loadtxt("datafiles_in/full_CO_off.dat")
bkg1dsdata=acc.bkgreal_2ds(ACoff,ACon,COoff)
def randompred(sig,n,bkg):
alpha=np.random.normal(0, 0.28)
beta=np.random.normal(0, 0.25)
sig_n=csir.add_lists_2(sig,n,alpha,beta)
smeared=acc.smearing_fct(sig_n)
pred=acc.data_eff(sig_n)
#add bkg
lista=csir.add_lists_1(pred,bkg,0)
return lista
def randomdata(lista):
arrout = np.zeros((144, 3))
bins=lista
for i in range(len(bins)):
if bins[i,2]>=0:
arrout[i,0] =bins[i,0]
arrout[i,1]=bins[i,1]
arrout[i,2]=np.random.poisson(bins[i, 2])
prob=0
else:
prob=1e4
return prob,arrout
def chi2_bins(n,ac,meas,alpha,beta,gamma):
chi=0
sig=csir.signalcomp(0.0,0.0,0.0,0.0,0.0)
sig_n=csir.add_lists_2(sig,n,alpha,beta)
smeared=acc.smearing_fct(sig_n)
pred=acc.data_eff(smeared)
#add bkg
lista=csir.add_lists_1(pred,ac,gamma)
preddev=lista
for i in range(len(preddev)):
numevents = preddev[i, 2]
numobs = meas[i, 2]
if numobs == 0:
add = numevents - numobs
else:
add = numevents - numobs + numobs*np.log(numobs/numevents)
chi += add
return 2*chi+(alpha/0.28)**2+(beta/0.25)**2+(gamma/0.171)**2
def mini_bins(n,meas,bkgdata):
minres= minimize(lambda x: chi2_bins( n,bkgdata,meas,x[0],x[1],x[2]),(0.0,0.0,0.0),method='SLSQP',tol=1e-5,options={"maxiter":1e3})
return minres
#number of tests
nn=1500
fileout=np.zeros((nn,1))
for i in range(nn):
random=randompred(sig,n,bkg1dsdata)
pp,outt=randomdata(random)
if pp<10:
res=mini_bins(n,outt,bkg1dsdata)
fileout[i,0]=res.fun
np.savetxt("datafiles_out/result_pval_sm_12t12e.txt",fileout)
|
#!/usr/bin/env python
import random
def create_random_number():
lgth = int(raw_input("How many digits long should the number I think of be? "))
try:
answer = random.sample(range(10),lgth),
except ValueError:
print "Can't be more than 10 digits long."
create_random_number()
except TypeError:
print "That's not a number."
create_random_number()
numbers = random.sample(range(10),lgth)
a = ''
for i in range(lgth):
a += str(numbers[i])
return a
def create_clue_string(guess,secret):
if guess == secret:
return "Correct!"
clue = []
for i in range(len(guess)):
if guess[i] == secret[i]:
clue.append('Fermi')
elif guess[i] in secret:
clue.append('Pico')
if len(clue) == 0:
return 'Bagels'
clue.sort()
return ' '.join(clue)
def is_only_digits(n):
if n == '':
return False
for i in n:
if i not in '0 1 2 3 4 5 6 7 8 9'.split():
return False
return True
def ask_to_play_again():
answer = raw_input("Play again? (Yes/no) ")
if (str.lower(answer) in ('y','yes') or answer == ''):
return True
elif str.lower(answer) in ('n','no'):
return False
else:
print "Not a valid response."
if ask_to_play_again():
return True
else:
return False
while True:
answer = create_random_number()
guesses = 1
while True:
guess = ''
while len(guess) != len(str(answer)) or not is_only_digits(guess):
guess = raw_input("Guess #" + str(guesses) + ": ")
clue = create_clue_string(guess,answer)
print " " + clue
guesses += 1
if guess == answer:
break
if not ask_to_play_again():
break
|
# Licensed under an MIT style license -- see LICENSE.md
import plotly.graph_objects as go
import plotly
__author__ = ["Charlie Hoy <charlie.hoy@ligo.org>"]
def write_to_html(fig, filename):
"""Write a plotly.graph.objects.go.Figure to a html file
Parameters
----------
fig: plotly.graph.objects.go.Figure object
figure containing the plot that you wish to save to html
filename: str
name of the file that you wish to write the figure to
"""
div = plotly.offline.plot(fig, include_plotlyjs=False, output_type='div')
data = "<script src='https://cdn.plot.ly/plotly-latest.min.js'></script>\n"
data += (
"<script src='https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.1/"
"MathJax.js?config=TeX-MML-AM_SVG'></script>"
)
with open(filename, "w") as f:
data += div
f.write(data)
def histogram2d(
x, y, xlabel='x', ylabel='y', contour=False, contour_color='Blues',
marker_color='rgba(248,148,6,1)', dimensions={'width': 900, 'height': 900},
write_to_html_file="interactive_2d_histogram.html", showgrid=False,
showlegend=False
):
"""Build an interactive 2d histogram plot
Parameters
----------
x: np.ndarray
An array containing the x coordinates of the points to be histogrammed
y: np.ndarray
An array containing the y coordinates of the points to be histogrammed
xlabel: str
The label for the x coordinates
ylabel: str
The label for the y coordinates
contour: Bool
Whether or not to show contours on the scatter plot
contour_color: str
Name of the matplotlib palette to use for contour colors
marker_color: str
Color to use for the markers
dimensions: dict
A dictionary giving the width and height of the figure.
write_to_html_file: str
Name of the html file you wish to write the figure to
showgrid: Bool
Whether or not to show a grid on the plot
showlegend: Bool
Whether or not to add a legend to the plot
"""
fig = go.Figure()
if contour:
fig.add_trace(
go.Histogram2dContour(
x=x, y=y, colorscale=contour_color, reversescale=True,
xaxis='x', yaxis='y', histnorm="probability density"
)
)
fig.add_trace(
go.Scatter(
x=x, y=y, xaxis='x', yaxis='y', mode='markers',
marker=dict(color=marker_color, size=3)
)
)
fig.add_trace(
go.Histogram(
y=y, xaxis='x2', marker=dict(color=marker_color),
histnorm="probability density"
)
)
fig.add_trace(
go.Histogram(
x=x, yaxis='y2', marker=dict(color=marker_color),
histnorm="probability density"
)
)
fig.update_layout(
autosize=False,
xaxis=dict(
zeroline=False, domain=[0, 0.85], showgrid=showgrid
),
yaxis=dict(
zeroline=False, domain=[0, 0.85], showgrid=showgrid
),
xaxis2=dict(
zeroline=False, domain=[0.85, 1], showgrid=showgrid
),
yaxis2=dict(
zeroline=False, domain=[0.85, 1], showgrid=showgrid
),
height=dimensions["height"],
width=dimensions["width"],
bargap=0,
hovermode='closest',
showlegend=showlegend,
font=dict(
size=10
),
xaxis_title=xlabel,
yaxis_title=ylabel,
)
if write_to_html_file is not None:
write_to_html(fig, write_to_html_file)
return
return fig
def ridgeline(
data, labels, xlabel='x', palette='colorblind', colors=None, width=3,
write_to_html_file="interactive_ridgeline.html", showlegend=False,
dimensions={'width': 1100, 'height': 700}
):
"""Build an interactive ridgeline plot
Parameters
----------
data: list, np.ndarray
The samples you wish to produce a ridgline plot for. This should be a 2
dimensional array where the zeroth axis is the list of samples and
the next axis is are the dimensions of the space
labels: list
List of labels corresponding to each set of samples
xlabel: str
The label for the x coordinates
palette: str
Name of the seaborn colorpalette to use for the different posterior
distributions
colors: list
List of colors to use for the different posterior distributions
width: float
Width of the violin plots
write_to_html_file: str
Name of the html file you wish to write the figure to
showlegend: Bool
Whether or not to add a legend to the plot
dimensions: dict
A dictionary giving the width and height of the figure
"""
fig = go.Figure()
if colors is None:
import seaborn
colors = seaborn.color_palette(
palette=palette, n_colors=len(data)
).as_hex()
for dd, label, color in zip(data, labels, colors):
fig.add_trace(go.Violin(x=dd, line_color=color, name=label))
fig.update_traces(
orientation='h', side='positive', width=width, points=False
)
fig.update_layout(
xaxis_showgrid=False, xaxis_zeroline=False, xaxis_title=xlabel,
width=dimensions["width"], height=dimensions["height"],
font=dict(size=18), showlegend=showlegend
)
if write_to_html_file is not None:
write_to_html(fig, write_to_html_file)
return
return fig
def corner(
data, labels, dimensions={'width': 900, 'height': 900}, show_diagonal=False,
colors={'selected': 'rgba(248,148,6,1)', 'not_selected': 'rgba(0,0,0,1)'},
show_upper_half=False, write_to_html_file="interactive_corner.html"
):
"""Build an interactive corner plot
Parameters
----------
data: list, np.ndarray
The samples you wish to produce a corner plot for. This should be a 2
dimensional array where the zeroth axis is the list of samples and
the next axis is are the dimensions of the space
labels: list, np.ndarray
A list of names for each dimension
dimensions: dict
A dictionary giving the width and height of the figure.
show_diagonal: Bool
Whether or not to show the diagonal scatter plots
colors: dict
A dictionary of colors for the individual samples. The dictionary should
have keys 'selected' and 'not_selected' to indicate the colors to be
used when the markers are selected and not selected respectively
show_upper_half: Bool
Whether or not to show the upper half of scatter plots
write_to_html_file: str
Name of the html file you wish to write the figure to
"""
data_structure = [
dict(label=label, values=value) for label, value in zip(
labels, data
)
]
fig = go.Figure(
data=go.Splom(
dimensions=data_structure,
marker=dict(
color=colors["not_selected"], showscale=False,
line_color='white', line_width=0.5,
size=3
),
selected=dict(marker=dict(color=colors["selected"])),
diagonal_visible=show_diagonal,
showupperhalf=show_upper_half,
)
)
fig.update_layout(
dragmode='select',
width=dimensions["width"],
height=dimensions["height"],
hovermode='closest',
font=dict(
size=10
)
)
if write_to_html_file is not None:
write_to_html(fig, write_to_html_file)
return
return fig
|
import time
import sys
import datetime
from poloniex import poloniex
def main():
#initializations / customizables
period = .5 #how often program runs in seconds
#pairtrio = btc, xmr, zec
#follow naming standard pair1, pair 2, pair1pair2
BTCpair1 = "USDT_BTC"
BTCpair2 = "USDT_STR"
pair1pair2 = "BTC_STR"
tradePlaced = False #false only if back at btc, otherwise, indicate direction pair1 to pair 2 or pair2 to pair1
direction = False
stage = False #which stage of the pairs, be it btc, pair1, pair2, btc
dataDate = ""
orderNumber = ""
#connecting to polo api
connect = poloniex('key here', 'secret here') #plug in API key and secret for account
#trading
while True:
BTCpair1info = connect.api_query("returnOrderBook", {"currencyPair":BTCpair1})
BTCpair2info = connect.api_query("returnOrderBook", {"currencyPair":BTCpair2})
pair1pair2info = connect.api_query("returnOrderBook", {"currencyPair":pair1pair2})
BTCpair1lowestAsk = float(BTCpair1info["asks"][0][0])
BTCpair1lowestAskVol = float(BTCpair1info["asks"][0][1])
BTCpair1highestBid = float(BTCpair1info["bids"][0][0])
BTCpair1highestBidVol = float(BTCpair1info["bids"][0][1])
BTCpair2lowestAsk = float(BTCpair2info["asks"][0][0])
BTCpair2lowestAskVol = float(BTCpair2info["asks"][0][1])
BTCpair2highestBid = float(BTCpair2info["bids"][0][0])
BTCpair2highestBidVol = float(BTCpair2info["bids"][0][1])
pair1pair2lowestAsk = float(pair1pair2info["asks"][0][0])
pair1pair2lowestAskVol = float(pair1pair2info["asks"][0][1])
pair1pair2highestBid = float(pair1pair2info["bids"][0][0])
pair1pair2highestBidVol = float(pair1pair2info["bids"][0][1])
dataDate = datetime.datetime.now()
#if not in a positional loop:
if (not tradePlaced):
# if btc_zec/btc_xmr > xmr_zec:
#trade btc to xmr price(lowest sell order) .25%
#trade xmr to zec price(lowest sell order) .25%
#trade zec to btc price(highest buy order) .25%
#elif btc_zec/btc_xmr < xmr_zec:
#trade btc to zec price(lowest sell order) .25%
#trade zec to xmr price(highest buy order) .25%
#trade xmr to btc price(highest buy order) .25%
#if loop is initiated, go through whole loop with whatever is put into first transaction, no matter what
print ("pair1 to pair2: %s" % ((BTCpair2highestBid/(BTCpair1lowestAsk * pair1pair2lowestAsk)) * (.9975)*(.9975)*(.9975)))
print ("pair2 to pair1: %s" % (((BTCpair1highestBid * pair1pair2highestBid)/BTCpair2lowestAsk) * (.9975)*(.9975)*(.9975)))
if ((BTCpair2highestBid/(BTCpair1lowestAsk * pair1pair2lowestAsk)) * (.9975)*(.9975)*(.9975)) > 1:
print("Buying BTCpair1")
#convert all order sizes to BTCpair1
ordersize = min(BTCpair1lowestAskVol, (pair1pair2lowestAskVol*pair1pair2lowestAsk)*(1/.9975), BTCpair2highestBidVol*pair1pair2lowestAsk*(1/.9975)*(1/.9975))
orderNumber = connect.buy(BTCpair1, BTCpair1lowestAsk, ordersize, immediateOrCancel=1)
tradePlaced = True
direction = "pair1 to pair2"
stage = "pair1"
elif (((BTCpair1highestBid * pair1pair2highestBid)/BTCpair2lowestAsk) * (.9975)*(.9975)*(.9975)) > 1:
print("Buying BTCpair2")
ordersize = min(BTCpair2lowestAskVol, pair1pair2highestBidVol *(1/.9975), BTCpair1highestBidVol/pair1pair2lowestAsk*(1/.9975)*(1/.9975))
orderNumber = connect.buy(BTCpair2, BTCpair2lowestAsk, ordersize, immediateOrCancel=1)
tradePlaced = True
direction = "pair2 to pair1"
stage = "pair2"
#in trade loop
elif direction == "pair1 to pair2":
if stage == "pair1":
print("Buying BTCpair2")
ordersize = float(orderNumber["resultingTrades"]["amount"])
orderNumber = connect.buy(pair1pair2, pair1pair2lowestAsk*1.1, ordersize, immediateOrCancel=1)
stage = "pair2"
elif stage == "pair2":
print("Buying BTC")
ordersize = float(orderNumber["resultingTrades"]["amount"])
orderNumber = connect.sell(BTCpair2, BTCpair2highestBid/1.1, ordersize, immediateOrCancel=1)
stage = False
tradePlaced = False
direction = False
#end loop after finishing trade
elif direction == "pair2 to pair1":
if stage == "pair2":
print("Buying BTCpair1")
ordersize = float(orderNumber["resultingTrades"]["amount"])
orderNumber = connect.sell(pair1pair2, pair1pair2lowestAsk/1.1, ordersize, immediateOrCancel=1)
stage = "pair1"
elif stage == "pair1":
print("Buying BTC")
ordersize = float(orderNumber["resultingTrades"]["amount"])
orderNumber = connect.sell(BTCpair1, BTCpair1highestBid/1.1, ordersize, immediateOrCancel=1)
stage = False
tradePlaced = False
direction = False
#end loop after finishing trade
#feedback to user
print("%s Period: %ss" % (dataDate,period))
time.sleep(int(period))
#runs main when python trader.py is run
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
# -*- coding=utf-8 -*-
import cv2 as cv
import numpy as np
"""
图像噪声:
图像噪声在模拟信号传输或数字信号传输过程中,图像发生了丢失或收到干扰而产生,亦或是成像设备如摄像头、照相机等的损
坏、环境本身导致的成像质量不稳定,反映到图像上就是图像的亮度与颜色呈现某种程度的不一致性。常见噪声有以下几种:
椒盐噪声: 随机分布在图像中的黑白像素点(使用中值滤波对该类噪声进行处理)
高斯噪声: 成像设备收到物理/光/电等各种型号干扰产生的高斯分布噪声
均匀分布噪声: 由于某些规律性的错误导致
"""
# 随机为照片添加椒盐噪声
def add_salt_pepper_noise(image):
h, w = image.shape[:2] # h, w, channel
nums = 100000
rows = np.random.randint(0, h, nums, dtype=np.int)
cols = np.random.randint(0, w, nums, dtype=np.int)
for i in range(nums):
if 1 == i % 2:
image[rows[i], cols[i]] = (255, 255, 255)
else:
image[rows[i], cols[i]] = (0, 0, 0)
return image
# 随机为图像添加高斯噪声
def add_gaussian_noise(image):
noise = np.zeros(image.shape, image.dtype)
m = (15, 15, 15)
s = (45, 45, 45)
cv.randn(noise, m, s)
dst = cv.add(image, noise)
return dst
def main():
src = cv.imread("../../pic/IMG_20191204_151110.jpg")
cv.imshow("src", src)
cv.imshow("salt pepper noise", add_salt_pepper_noise(src))
cv.imshow("gaussian noise", add_gaussian_noise(src))
cv.waitKey(0)
cv.destroyAllWindows()
if "__main__" == __name__:
main()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-25 09:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontendapp', '0007_college_model_pic'),
]
operations = [
migrations.AddField(
model_name='college',
name='brief',
field=models.CharField(default=b'We are working on the brief.', max_length=150),
),
migrations.AlterField(
model_name='college',
name='model_pic',
field=models.ImageField(default=b'media/not_available.png', null=True, upload_to=b'media/'),
),
]
|
"""Configuration file for Trainer
@author: vasudevgupta
"""
class TrainerConfig(object):
default_attrs = [
'fast_dev_run', 'start_epoch', 'epochs', 'load_dir',
'assert_consumed', 'checkpoint', 'save_only_final_ckpts',
'save_every_ckpt', 'saved_ckpts_dir', 'max_ckpt_to_keep', 'project_name',
'config', 'dir', 'sync_tensorboard', 'save_code', 'lit_logger',
'keep_checkpoint_every_n_hours', 'callbacks', 'policy_name',
'enable_precision_training', 'enable_distributed_training',
]
|
from zeus.util.MsgUtil import MsgUtil
# 微信服务器推送消息是xml的,根据利用ElementTree来解析出的不同xml内容返回不同的回复信息,就实现了基本的自动回复功能了,也可以按照需求用其他的XML解析方法
import xml.etree.ElementTree as et
import logging
logger = logging.getLogger("django.request")
class BusinessController:
def auto_reply(self, body):
xml_data = et.fromstring(body)
msg_type = xml_data.find('MsgType').text
ToUserName = xml_data.find('ToUserName').text
FromUserName = xml_data.find('FromUserName').text
CreateTime = xml_data.find('CreateTime').text
# MsgId = xml_data.find('MsgId').text
reply_msg = MsgUtil(FromUserName, ToUserName)
content = "您好,欢迎来到Python学习!"
if msg_type == 'event': # 触发事件消息类型
event = xml_data.find('Event').text
eventKey = xml_data.find('EventKey').text
if event == 'subscribe': # 订阅事件
return reply_msg.send_text("终于等到你了!欢迎关注我们,未来我们一起成长!!!")
if event == 'unsubscribe': # 取消订阅事件
pass
if msg_type == 'text':
content = "文本已收到,谢谢"
elif msg_type == 'image':
content = "图片已收到,谢谢"
elif msg_type == 'voice':
content = "语音已收到,谢谢"
elif msg_type == 'video':
content = "视频已收到,谢谢"
elif msg_type == 'shortvideo':
content = "小视频已收到,谢谢"
elif msg_type == 'location':
content = "位置已收到,谢谢"
elif msg_type == 'link':
content = "链接已收到,谢谢"
return reply_msg.send_text(content)
|
import pydata_google_auth
class BigQueryConnection:
def __init__(self):
self.SCOPES = [
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive',
]
self.credentials = pydata_google_auth.get_user_credentials(self.SCOPES,auth_local_webserver=False,)
def get_credentials(self):
return self.credentials |
from .community import DisseminationCommunity
from dispersy.logger import get_logger
from dispersy.script import ScriptBase
from dispersy.tool.lencoder import bz2log
from dispersy.tool.scenarioscript import ScenarioScript
logger = get_logger(__name__)
MASTER_MEMBER_PUBLIC_KEY = "3081a7301006072a8648ce3d020106052b810400270381920004068097a9d88022d0581ce8f9064a575f99d7907a2f1d4c865884e7445a3494484ae6f89e7c69e99697c605d066b16aeb99cb7f7f4be8b24870f88f987d52cd6279909d382cd8626606ec2944526e5bb64b936709d2c7e43b820db5e45697ea98805bcd0708a8eb2fd4377a2d8bafc5a844950e2d7bafd3072416e52b3d25710d377176ec6e1046fd99f6a9b41f816a6f".decode("HEX")
class ScenarioScript(ScenarioScript):
def __init__(self, *args, **kargs):
super(ScenarioScript, self).__init__(*args, **kargs)
self._community_kargs = {}
@property
def my_member_security(self):
# return u"NID_sect233k1"
# NID_secp224r1 is approximately 2.5 times faster than NID_sect233k1
return u"NID_secp224r1"
@property
def master_member_public_key(self):
return MASTER_MEMBER_PUBLIC_KEY
@property
def community_class(self):
return DisseminationCommunity
@property
def community_kargs(self):
return self._community_kargs
def log(self, _message, **kargs):
# import sys
# print >> sys.stderr, _message, kargs
bz2log("log", _message, **kargs)
def scenario_set_karg(self, key, value):
self.community_kargs[key] = value
def scenario_create_one(self, *message):
community = self.has_community()
if community:
community.create_text(" ".join(message).decode("UTF-8"))
else:
logger.error("Unable to scenario_create_one (not online)")
def scenario_create_many(self, count, *message):
community = self.has_community()
if community:
community.create_text(" ".join(message).decode("UTF-8"), int(count))
else:
logger.error("Unable to scenario_create_many (not online)")
def scenario_create_start(self, delay, *message):
delay = float(delay)
message = " ".join(message).decode("UTF-8")
self._dispersy.callback.register(self._create_periodically, (delay, message), id_="scenario-periodically")
def scenario_create_stop(self):
self._dispersy.callback.unregister("scenario-periodically")
def _create_periodically(self, delay, message):
while True:
community = self.has_community()
if community:
community.create_text(message)
else:
logger.error("Unable to _create_periodically (not online)")
community = None
yield delay
def scenario_dissemination_success_condition(self, message_name, minimum, maximum="DEF"):
try:
meta_message_id, = self._dispersy.database.execute(u"SELECT id FROM meta_message WHERE name = ?",
(unicode(message_name),)).next()
count, = self._dispersy.database.execute(u"SELECT COUNT(*) FROM sync WHERE meta_message = ?",
(meta_message_id,)).next()
except:
count = 0
minimum = int(minimum)
maximum = -1 if maximum == "DEF" else int(maximum)
if maximum == -1:
# there is no maximum
self.log("success_condition",
type="dissemination",
success=minimum <= count,
description="%d %s messages in the database (minimum %d)" % (count, message_name, minimum))
else:
self.log("success-condition",
type="dissemination",
success=minimum <= count <= maximum,
description="%d %s messages in the database (minimum %d, maximum %d)" % (count, message_name, minimum, maximum))
assert minimum <= maximum
class FillDatabaseScript(ScriptBase):
@property
def enable_wait_for_wan_address(self):
return False
def run(self):
self.add_testcase(self.fill)
def fill(self):
community = DisseminationCommunity.join_community(self._dispersy, self._dispersy.get_member(MASTER_MEMBER_PUBLIC_KEY), self._dispersy.get_new_member(u"low"))
community.auto_load = False
MAX = 3000
with self._dispersy.database:
for i in xrange(MAX):
community.create_text(u"Hello World! #%d" % i, forward=False)
if i % max(1, (MAX / 100)) == 0:
print "progress...", i, "/", MAX, "~", round(1.0 * i / MAX, 2)
yield 0.0
print self._dispersy.database.file_path
|
import tkinter as tk
from tkinter import ttk
from tkinter.ttk import Progressbar
from tkinter import messagebox
def init_window():
Lista_de_op=[]
window=tk.Tk()
window.title('Mi aplicacion')
window.geometry('400x250')
window.config(bg='SteelBlue1')
label=tk.Label(window,text='Calculadora',font=('Arial bold',18),bg='SteelBlue1',fg='white')
label.grid(column=0,row=0)
entrada1=tk.Entry(window,width=10)
entrada2=tk.Entry(window,width=10)
entrada1.grid(column=1,row=1)
entrada2.grid(column=1,row=2)
label_entrada1=tk.Label(window,text='Ingrese primer numero',font=('Arial bold',12),bg='SteelBlue1',fg='white')
label_entrada1.grid(column=0,row=1)
label_entrada2=tk.Label(window,text='Ingrese el segundo numero',font=('Arial bold',12),bg='SteelBlue1',fg='white')
label_entrada2.grid(column=0,row=2)
label_operador=tk.Label(window,text='Escoja un operador',font=('Arial bold',12),bg='SteelBlue1',fg='white')
label_operador.grid(column=0,row=3)
combo_operadores=ttk.Combobox(window)
#valores del seleccionador
combo_operadores['values']=['+','-','x','/','pow']
#opcion por defecto
combo_operadores.current(0)
combo_operadores.grid(column=1,row=3)
label_resultado=tk.Label(window,text='Resultado:',font=('Arial bold',15),bg='SteelBlue1',fg='white')
label_resultado.grid(column=0,row=5)
#witget barra progresiva
style=ttk.Style()
style.theme_use('default')
style.configure('steelBlue2.Horizontal.TProgressbar',background='steelBlue2')
bar=Progressbar(window,length=200,style='steelBlue2.Horizontal.TProgressbar')
bar['value']=0
bar.grid(column=1,row=6)
#witget checkbutton
chk_state= tk.BooleanVar()
chk_state.set(False)
boton_c= tk.Checkbutton(window, text='Guardar operación', var=chk_state,bg='SteelBlue1')
boton_c.grid(column=0,row=4)
def calculadora(num1,num2,operador):
if operador=='+':
resultado=num1+num2
elif operador=='-':
resultado=num1-num2
elif operador=='x':
resultado=num1*num2
elif operador=='/':
resultado=round(num1/num2,2)
else:
resultado=num1**num2
return resultado
def click_calcular(label,num1,num2,operador,state,Lista_de_op):
if num1=='' and num2=='':
messagebox.showerror('ERROR','Faltan los datos para hacer la operación') #widget mensaje de error
elif num1=='' or num2=='':
messagebox.showerror('ERROR','Falta un dato para hacer la operación') #widget mensaje de error
elif operador=='/' and num2=='0':
messagebox.showerror('ERROR','No puedes dividir por 0') #widget mensaje de error
else:
valor1=float(num1)
valor2=float(num2)
res=calculadora(valor1,valor2,operador)
bar['value']=200
label.configure(text='Resultado: '+str(res))
if state==True:
if operador=='pow':
operador='^'
cadena=str(num1)+(operador)+str(num2)+'='+str(res)
Lista_de_op.append(cadena)
def historial(lista_de_op):
window_2=tk.Tk()
window_2.title('Historial')
window_2.geometry('400x250')
window_2.config(bg='lightgreen')
for i in range(len(lista_de_op)):
label=tk.Label(window_2,text=lista_de_op[i],font=('Arial bold',14),bg='lightgreen',fg='black')
label.grid(column=0,row=i)
#boton calcular
boton=tk.Button(window,
command=lambda: click_calcular(
label_resultado,
entrada1.get(),
entrada2.get(),
combo_operadores.get(),
chk_state.get(),Lista_de_op),
text='Calcular',
bg="steelBlue2",
fg="white")
boton.grid(column=1,row=4)
#boton Mostrar historial
boton_2=tk.Button(window,text='Mostrar historial',bg='steelBlue2',fg='white',command=lambda: historial(Lista_de_op))
boton_2.grid(column=0,row=6)
window.mainloop()
def main():
init_window()
main()
|
import split_set_data
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
from sklearn.externals import joblib
def metric_evaluation_model(classifier, yTest, xTest):
'''
Evaluation the models from the ROC curve (adopted evaluation metric).
'''
# Load pretrained model and return predicted values.
loaded_model = joblib.load(classifier)
predicted = loaded_model.predict_proba(xTest)
# Calculates ROC curve.
fpr, tpr, _ = roc_curve(yTest[:,:], predicted[:,1])
roc_auc = auc(fpr, tpr)
# Plot ROC curve.
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
def main():
'''
Main function
'''
# Get split data.
yTrain, xTrain, yTest, xTest = split_set_data.get_raw_data()
# Represents the metric evaluation (ROC curve) of each model.
metric_evaluation_model('SVM_C_1e+16_gamma_1e-11.pkl', yTest, xTest)
metric_evaluation_model('ANN_nlayer_1190.pkl', yTest, xTest)
if __name__=='__main__':
'''
Main function
'''
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipaySocialAntiepTaskFinishModel(object):
def __init__(self):
self._condition_for_variable_award = None
self._finish_business_info = None
self._request_mode = None
self._request_type = None
self._scene_code = None
self._source = None
self._task_token = None
self._task_type = None
self._unique_id = None
self._zone_id = None
@property
def condition_for_variable_award(self):
return self._condition_for_variable_award
@condition_for_variable_award.setter
def condition_for_variable_award(self, value):
self._condition_for_variable_award = value
@property
def finish_business_info(self):
return self._finish_business_info
@finish_business_info.setter
def finish_business_info(self, value):
self._finish_business_info = value
@property
def request_mode(self):
return self._request_mode
@request_mode.setter
def request_mode(self, value):
self._request_mode = value
@property
def request_type(self):
return self._request_type
@request_type.setter
def request_type(self, value):
self._request_type = value
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def source(self):
return self._source
@source.setter
def source(self, value):
self._source = value
@property
def task_token(self):
return self._task_token
@task_token.setter
def task_token(self, value):
self._task_token = value
@property
def task_type(self):
return self._task_type
@task_type.setter
def task_type(self, value):
self._task_type = value
@property
def unique_id(self):
return self._unique_id
@unique_id.setter
def unique_id(self, value):
self._unique_id = value
@property
def zone_id(self):
return self._zone_id
@zone_id.setter
def zone_id(self, value):
self._zone_id = value
def to_alipay_dict(self):
params = dict()
if self.condition_for_variable_award:
if hasattr(self.condition_for_variable_award, 'to_alipay_dict'):
params['condition_for_variable_award'] = self.condition_for_variable_award.to_alipay_dict()
else:
params['condition_for_variable_award'] = self.condition_for_variable_award
if self.finish_business_info:
if hasattr(self.finish_business_info, 'to_alipay_dict'):
params['finish_business_info'] = self.finish_business_info.to_alipay_dict()
else:
params['finish_business_info'] = self.finish_business_info
if self.request_mode:
if hasattr(self.request_mode, 'to_alipay_dict'):
params['request_mode'] = self.request_mode.to_alipay_dict()
else:
params['request_mode'] = self.request_mode
if self.request_type:
if hasattr(self.request_type, 'to_alipay_dict'):
params['request_type'] = self.request_type.to_alipay_dict()
else:
params['request_type'] = self.request_type
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.source:
if hasattr(self.source, 'to_alipay_dict'):
params['source'] = self.source.to_alipay_dict()
else:
params['source'] = self.source
if self.task_token:
if hasattr(self.task_token, 'to_alipay_dict'):
params['task_token'] = self.task_token.to_alipay_dict()
else:
params['task_token'] = self.task_token
if self.task_type:
if hasattr(self.task_type, 'to_alipay_dict'):
params['task_type'] = self.task_type.to_alipay_dict()
else:
params['task_type'] = self.task_type
if self.unique_id:
if hasattr(self.unique_id, 'to_alipay_dict'):
params['unique_id'] = self.unique_id.to_alipay_dict()
else:
params['unique_id'] = self.unique_id
if self.zone_id:
if hasattr(self.zone_id, 'to_alipay_dict'):
params['zone_id'] = self.zone_id.to_alipay_dict()
else:
params['zone_id'] = self.zone_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipaySocialAntiepTaskFinishModel()
if 'condition_for_variable_award' in d:
o.condition_for_variable_award = d['condition_for_variable_award']
if 'finish_business_info' in d:
o.finish_business_info = d['finish_business_info']
if 'request_mode' in d:
o.request_mode = d['request_mode']
if 'request_type' in d:
o.request_type = d['request_type']
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'source' in d:
o.source = d['source']
if 'task_token' in d:
o.task_token = d['task_token']
if 'task_type' in d:
o.task_type = d['task_type']
if 'unique_id' in d:
o.unique_id = d['unique_id']
if 'zone_id' in d:
o.zone_id = d['zone_id']
return o
|
# Generated by Django 3.0.1 on 2019-12-20 01:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('index', '0019_city_type_place'),
]
operations = [
migrations.RemoveField(
model_name='city',
name='type_place',
),
]
|
from flask import render_template, flash, redirect, session, url_for, request, g
from flask.ext.login import login_user, logout_user, current_user, login_required
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, models, lm, oid
from forms import LoginForm, NewTaskForm, TrackDurationForm
from models import User, Task, Worklog, ROLE_USER, ROLE_ADMIN
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from cStringIO import StringIO
@lm.user_loader
def load_user(id):
return User.query.get(int(id))
@app.route('/')
@app.route('/home')
def home():
return render_template('home.html')
@app.before_request
def before_request():
g.user = current_user
@app.route('/login', methods = ['GET', 'POST'])
@oid.loginhandler
def login():
if g.user is not None and g.user.is_authenticated():
return redirect(url_for('smore_station'))
form = LoginForm()
if form.validate_on_submit():
session['remember_me'] = form.remember_me.data
return oid.try_login(form.openid.data, ask_for = ['nickname','email'])
return render_template('login.html', title = 'Sign In', form = form, providers = app.config['OPENID_PROVIDERS'])
@oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash('Invalid login. Please try again.')
return redirect(url_for('login'))
user = User.query.filter_by(email = resp.email).first()
if user is None:
nickname = resp.nickname
if nickname is None or nickname == "":
nickname = resp.email.split('@')[0]
user = User(nickname = nickname, email = resp.email, role = ROLE_USER)
db.session.add(user)
db.session.commit()
remember_me = False
if 'remember_me' in session:
remember_me = session['remember_me']
session.pop('remember_me', None)
login_user(user, remember = remember_me)
return redirect(request.args.get('next') or url_for('smore_station'))
@app.route('/smore_station', methods = ['GET','POST'])
@login_required
def smore_station():
form = NewTaskForm()
if form.validate_on_submit():
task = Task(task_name = form.task.data, timestamp = datetime.utcnow(), duration = 0, user_id = g.user.id)
db.session.add(task)
db.session.commit()
flash('Task added!')
return redirect(url_for('smore_station'))
duration_form = TrackDurationForm()
if duration_form.validate_on_submit():
active_task = duration_form.active_task.data
duration = duration_form.duration.data
worklog = Worklog(task_id = active_task, timestamp = datetime.utcnow(), duration = duration)
db.session.add(worklog)
db.session.commit()
flash('Database updated!')
return redirect(url_for('smore_station'))
tasks = Task.query.all()
worklogs = {}
for task in tasks:
data = Worklog.query.filter_by(task_id = task.id).all()
worklogs[task] = data
return render_template('smore_station.html', user = user, form = form, duration_form = duration_form, tasks=tasks, worklogs=worklogs)
@app.route('/user/<nickname>')
@login_required
def user(nickname):
user = User.query.filter_by(nickname = nickname).first()
# get the tasks associated with the user
#haven't tested yet task_list = Task.query.filter_by(user_id = user)
if user == None:
flash('User ' + nickname + ' not found.')
return redirect(url_for('home'))
tasks = Task.query.all()
html = '''
<img src="data:image/png;base64,{}" />
'''
N = 5
smore = (20, 35, 30, 35, 27)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
rects1 = ax.bar(ind, smore, width, color='r')
sqlal = (25, 32, 55, 20, 25)
rects2 = ax.bar(ind+width, sqlal, width, color='y')
# add some
ax.set_ylabel('Duration')
ax.set_title('Time spent working')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('11/21', '11/22', '11/23', '11/24', '11/25') )
ax.legend( (rects1[0], rects2[0]), ('Smore', 'SQLAlchemy') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
# Encode image to png in base64
io = StringIO()
fig.savefig(io, format='png')
data = io.getvalue().encode('base64')
graph = html.format(data)
return render_template('user.html', user = user, tasks = tasks, graph = graph)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(debug=True) |
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gobject
import gtk
import sys
import os
sys.path.insert(1,"/usr/local/lib64/python2.7/site-packages")
import gwy
import re
import numpy as np
from GwyData import GwyData
# Class for navigation
class Infor():
def __init__(self,parent):
self.parent = parent
# Definiton of the variables
self.c = None
self.param = None
self.table_info = gtk.Table(4,2,True)
self.label_volt_pre = gtk.Label("V: ")
self.label_current_pre = gtk.Label("I: ")
self.label_w_pre = gtk.Label("W: ")
self.label_h_pre = gtk.Label("H: ")
self.label_w_pre_real = gtk.Label("W_real: ")
self.label_h_pre_real = gtk.Label("H_real: ")
self.label_volt = gtk.Label(None)
self.label_current = gtk.Label(None)
self.label_w = gtk.Label(None)
self.label_h = gtk.Label(None)
self.label_w_real = gtk.Label(None)
self.label_h_real = gtk.Label(None)
self.frame_volt = gtk.Frame()
self.frame_volt.add(self.label_volt_pre)
self.frame_current = gtk.Frame()
self.frame_current.add(self.label_current_pre)
self.frame_w = gtk.Frame()
self.frame_w.add(self.label_w_pre)
self.frame_h = gtk.Frame()
self.frame_h.add(self.label_h_pre)
self.frame_w_real = gtk.Frame()
self.frame_w_real.add(self.label_w_pre_real)
self.frame_h_real = gtk.Frame()
self.frame_h_real.add(self.label_h_pre_real)
self.label_ratio_pre = gtk.Label("Ratio:")
self.entry_ratio = gtk.Entry(max=0)
self.entry_ratio.set_text("1")
self.label_types =gtk.Label("Type: ")
self.combobox_types =gtk.combo_box_new_text()
types = ["Unknown","Metal","Molecules","Useless"]
for item in types:
self.combobox_types.append_text(item)
self.combobox_types.set_active(0)
self.table_info.attach(self.frame_volt,0,1,0,1)
self.table_info.attach(self.label_volt,1,2,0,1)
self.table_info.attach(self.frame_current,0,1,1,2)
self.table_info.attach(self.label_current,1,2,1,2)
self.table_info.attach(self.frame_w,0,1,2,3)
self.table_info.attach(self.label_w,1,2,2,3)
self.table_info.attach(self.frame_h,0,1,3,4)
self.table_info.attach(self.label_h,1,2,3,4)
self.table_info.attach(self.frame_w_real,0,1,4,5)
self.table_info.attach(self.label_w_real,1,2,4,5)
self.table_info.attach(self.frame_h_real,0,1,5,6)
self.table_info.attach(self.label_h_real,1,2,5,6)
self.table_info.attach(self.label_ratio_pre,0,1,6,7)
self.table_info.attach(self.entry_ratio,1,2,6,7)
self.table_info.attach(self.label_types,0,1,7,8)
self.table_info.attach(self.combobox_types,1,2,7,8)
self.entry_ratio.connect('activate',self.update,None)
def initialize(self,widget,data):
#self.c = container
self.param = data
self.label_volt.set_text(str(self.param['bias'])+' '+ self.param['bu'])
self.label_current.set_text(str(self.param['current']) +' '+ self.param['cu'])
self.label_w.set_text("{0:.1f}".format(self.param['width']) +' '+ self.param['xyu']+'['+str(self.param['w_dim'])+']')
self.label_h.set_text("{0:.1f}".format(self.param['height']) +' '+ self.param['xyu']+'['+str(self.param['h_dim'])+']')
self.label_w_real.set_text("{0:.1f}".format(self.param['width_real']) +' '+ self.param['xyu'])
self.label_h_real.set_text("{0:.1f}".format(self.param['height_real']) +' '+ self.param['xyu'])
def update(self,widget,data):
ratio = float(self.entry_ratio.get_text())
print ratio, type(ratio)
self.param['ratio'] = ratio
self.param['width_real'] = self.param['width']*ratio
self.param['height_real'] = self.param['height']*ratio
self.label_w_real.set_text("{0:.1f}".format(self.param['width_real']) +' '+ self.param['xyu'])
self.label_h_real.set_text("{0:.1f}".format(self.param['height_real']) +' '+ self.param['xyu'])
def main():
gtk.main()
return 0
if __name__ == "__main__":
window = gtk.Window()
data = GwyData()
data.load_data('/home/jorghyq/Project/Gwyddion-Utils/A151201.000102-01691.sxm')
inf = Infor(window)
inf.initialize(data.c,data.param)
print "hi"
window.add(inf.table_info)
window.show_all()
window.present()
main()
|
'''
2309번
일곱 난쟁이
'''
a=[]
for i in range(9):
a.append(int(input()))
for i in range(9):
for j in range(i+1, 9):
if a[i]+a[j]==sum(a)-100:
del a[j]
del a[i]
break
if len(a)==7:
break
a.sort()
for i in range(7):
print(a[i]) |
#!/usr/bin/env python3
from lib import digits, prime
def all_ccprimes(s):
for i in s:
for j in s:
if i != j and not prime(int("".join(digits(i)+digits(j)))):
return False
return True
from lib import sieve
c = sieve(10**4)
primes = []
for i in range(len(c)):
if c[i]:
primes.append(i)
print("Sieve done")
min_sum = len(c)**5
for i1 in range(0,len(primes)):
for i2 in range(i1+1,len(primes)):
if all_ccprimes((primes[i1],primes[i2])):
#print(primes[i1],primes[i2])
for i3 in range(i2+1,len(primes)):
if all_ccprimes((primes[i1],primes[i2],primes[i3])):
#print(primes[i1],primes[i2],primes[i3])
for i4 in range(i3+1,len(primes)):
if all_ccprimes((primes[i1],primes[i2],primes[i3],primes[i4])):
#print(primes[i1],primes[i2],primes[i3],primes[i4])
for i5 in range(i4+1,len(primes)):
if all_ccprimes((primes[i1],primes[i2],primes[i3],primes[i4],primes[i5])):
print(primes[i1],primes[i2],primes[i3],primes[i4],primes[i5])
if primes[i1] + primes[i2] + primes[i3] + primes[i4] + primes[i5] < min_sum:
min_sum = primes[i1] + primes[i2] + primes[i3] + primes[i4] + primes[i5]
min_set = (primes[i1],primes[i2],primes[i3],primes[i4],primes[i5])
print(min_set, min_sum) |
from factorial import factorial
import os
if __name__=='__main__':
os.system('cls')
num=int(input('INGRESE UN NUMERO: '))
factorial(num)
input()
|
# Generated by Django 2.2.3 on 2019-07-30 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='radacct',
options={'managed': False, 'verbose_name_plural': 'VPN计费表'},
),
migrations.AlterModelOptions(
name='radgroupreply',
options={'managed': False, 'verbose_name': 'VPN组', 'verbose_name_plural': 'VPN组授权表'},
),
migrations.AlterModelOptions(
name='radpostauth',
options={'managed': False, 'verbose_name_plural': 'VPN登录记录表'},
),
migrations.AlterModelOptions(
name='radusergroup',
options={'managed': False, 'verbose_name': 'VPN用户名', 'verbose_name_plural': 'VPN账户&组对应表'},
),
migrations.AddField(
model_name='user',
name='in_office',
field=models.BooleanField(default=True, verbose_name='是否在职'),
),
migrations.AlterField(
model_name='user',
name='c_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='创建时间'),
),
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(max_length=254, unique=True, verbose_name='邮件地址'),
),
migrations.AlterField(
model_name='user',
name='has_confirmed',
field=models.BooleanField(default=False, verbose_name='是否邮件确认'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(max_length=64, unique=True, verbose_name='用户名'),
),
migrations.AlterField(
model_name='user',
name='password',
field=models.CharField(max_length=64, verbose_name='密码'),
),
]
|
import numpy as np
from Externals import calculate_gamma_new
from Externals import calculate_T
from single_estimate import single_worker
def DC_estimate(x, n, m, k_n, k_rho, tau):
M1_1_array, M1_2_array, M2_1_array, M2_2_array, M2_3_array = calculate_M(x, m, n, k_n, k_rho)
M1_1, M1_2, M2_1, M2_2, M2_3 = np.mean(M1_1_array), np.mean(
M1_2_array), np.mean(M2_1_array), np.mean(M2_2_array), np.mean(M2_3_array)
gamma_new = np.zeros(len(tau))
for i in range(len(tau)):
T = calculate_T(M2_1, M2_2, M2_3, tau[i])
rho = -3 * np.abs((T-1) / (T-3))
gamma_new[i] = calculate_gamma_new(M1_1, M1_2, rho, k_n)
return M1_1, gamma_new
def calculate_M(x, m, n, k_n, k_rho):
M1_1, M1_2, M2_1, M2_2, M2_3 = np.zeros(m), np.zeros(m), np.zeros(m), np.zeros(m), np.zeros(m)
for i in range(m):
# cut the data into different machines
x_temp = x[(i*n):(i * n + n)].copy()
# append the gamma in each machine
M1_1[i], M1_2[i], M2_1[i], M2_2[i], M2_3[i] = single_worker(x_temp, k_n, k_rho)
return M1_1, M1_2, M2_1, M2_2, M2_3
if __name__ == "__main__":
from scipy.stats import invweibull
N = 100000
x = invweibull.rvs(1, size=N)
n, m, k_n, tau, type = 10000, 10, 5000, [1], 'intermediate'
k_rho = int(np.power(n, 0.91))
res = DC_estimate(x, n, m, k_n, k_rho, tau)
print(res)
|
import sys
sys.path.append("..")
from logs.logutil_file import fastapi_logger
def check_log():
try:
1 / 0
except ZeroDivisionError:
fastapi_logger.exception(1)
if __name__ == "__main__":
check_log()
|
# ------------------------------------------------------------------
#
# Tests for utility module
#
# ------------------------------------------------------------------
import sys
py_path = '../../tools/'
sys.path.insert(0, py_path)
py_path = '../../src/mobility/'
sys.path.insert(0, py_path)
import utils as ut
from colors import *
import abm_utils as aut
#
# Supporting functions
#
def distance_test(loc1, loc2, exp_val):
''' Tests utilities for computing distances
from GIS '''
tol = 1e-5
val = aut.compute_distance(loc1, loc2)
if not ut.float_equality(val, exp_val, tol):
return False
else:
return True
#
# Tests
#
# --- Distances
# Test 1
place_1 = {}
place_1['lat'] = -43
place_1['lon'] = 172
place_2 = {}
place_2['lat'] = -44
place_2['lon'] = 171
place_2['candy'] = 'ZivotinjskoCarstvo'
exp_val = 137.365669065197
ut.test_pass(distance_test(place_1, place_2, exp_val), 'Short distance computation')
# Test 2
place_2 = {}
place_2['lat'] = 20
place_2['lon'] = -108
place_2['choco'] = 'NajlepseZelje'
exp_val = 10734.8931427602
ut.test_pass(distance_test(place_1, place_2, exp_val), 'Long distance computation')
|
from flask import Flask
from flask import request
import requests
import json
import hmac
import hashlib
import sys
import argparse
# 追加
import random
# 定数値
CALLBACK_TOKEN = 'yuya4_callback_token'
PAGE_TOKEN = 'hage'
APP_ID = 'hoge'
APP_SECRET = b'fuga' # bin指定しないとhmacで怒られる
# 引数設定
parser = argparse.ArgumentParser()
parser.add_argument("-p", help="set port number", type=int)
args = parser.parse_args()
# メッセージを受信したときの処理
def recieveMessage(entry, message):
# メッセージの中身を確認する
print(message)
if 'text' in message['message']:
m = message['message']['text']
if m.split(' ')[0] == '画像':
keyword = m.split(' ')[1]
sendMessage(message['sender']['id'], 'ソイヤッ!!')
image_url = getImage(keyword)
sendImage(message['sender']['id'], image_url)
return
elif 'いらいら' in m or '疲' in m or '死' in m or 'つらい' in m:
heals = ['猫', '犬']
n = random.randint(0,len(heals)-1)
image_url = getImage(heals[n])
sendMessage(message['sender']['id'], '元気出して!')
sendImage(message['sender']['id'], image_url)
return
else:
mes_res = ['確かに', 'わかる', 'うんうん', '私もそう思う']
n = random.randint(0,len(mes_res)-1)
sendMessage(message['sender']['id'], mes_res[n])
return
elif message['message']['attachments'][0]['type'] == 'image':
image_res = ['可愛い💓', 'ナイスセンス!', 'いい写真!']
n = random.randint(0,len(image_res)-1)
sendMessage(message['sender']['id'], image_res[n])
return
else:
# exception
sendMessage(message['sender']['id'], 'not supported')
# メッセージを送信する処理
def sendMessage(recipientId, message):
# 送信先
url = 'https://graph.facebook.com/v2.6/me/messages?access_token=' + PAGE_TOKEN
# ヘッダ
headers = {
'Content-Type': 'application/json'
}
# ボディ
body = {
'recipient': {
'id': recipientId,
},
'message': {
'text': message,
},
}
# postで送る
requests.post(url, headers=headers, data=json.dumps(body))
def sendImage(recipientId, image_url):
'''
sned image
Args:
recipientId: string
unique id of a recipient
image_url: string
url of a image to sent
'''
url = 'https://graph.facebook.com/v2.6/me/messages?access_token=' + PAGE_TOKEN
headers = {
'Content-Type': 'application/json'
}
body = {
'recipient': {
'id': recipientId,
},
"message":{
"attachment":{
"type":"image",
"payload":{
"url": image_url
}
}
},
}
requests.post(url, headers=headers, data=json.dumps(body))
def getImage(text):
'''
search photozou for images by query and get the url
Args:
text: string
search query
Return:
a image url
'''
url = 'https://api.photozou.jp/rest/search_public.json'
paramas = {
'keyword': text,
'limit': 10
}
num = random.randint(0, 9)
res = requests.get(url, params=paramas).json()
if 'info' in res:
image_url = res['info']['photo'][num]['image_url']
return image_url
else:
return ''
# Flask準備
app = Flask(__name__)
# POSTで来たとき
@app.route("/", methods=['GET', 'POST'])
def main():
# コールバック検証
if ('hub.verify_token' in request.args) and ('hub.challenge' in request.args):
if request.args['hub.verify_token'] != '' and request.args['hub.challenge'] != '':
if request.args['hub.verify_token'] == CALLBACK_TOKEN:
print('verify_token OK')
return request.args['hub.challenge']
print('verify_token NG')
return ''
# シグネチャ検証
if ('X-Hub-Signature' in request.headers) == False:
print('missing signature')
return ''
hubSignature = request.headers['X-Hub-Signature'].split('=')[1]
signature = hmac.new(APP_SECRET, request.data, hashlib.sha1).hexdigest()
if signature != hubSignature:
print('X-Hub-Signature NG')
return ''
for entry in request.json['entry']:
for message in entry['messaging']:
if 'message' in message:
recieveMessage(entry, message)
return ''
# Flask起動
if __name__ == "__main__":
app.run(host='0.0.0.0', port=args.p if args.p else 3000, threaded=True, debug=True)
|
import qrcode
# 复杂的生成二维码
def make_code(text):
# version是二维码的尺寸,数字大小决定二维码的密度 error_correction:是指误差
# box_size:参数用来控制二维码的每个单元(box)格有多少像素点
# border: 参数用控制每条边有多少个单元格(默认值是4,这是规格的最小值
qr = qrcode.QRCode(version=5,
error_correction=qrcode.constants.ERROR_CORRECT_L,
box_size=8,
border=4,
)
# 添加数据
qr.add_data(text)
# 生成二维码
qr.make(fit=True)
img = qr.make_image()
img.show() # 简单的生成二维码
def make_code_easy(text):
image = qrcode.make(text)
image.save(r"C:\python_work")
image.show()
print("image already save: C:\python_work")
if __name__ == '__main__':
text = input("请输入你想说的话:")
make_code(text)
#https://www.jb51.net/article/127033.htm
#https://blog.csdn.net/henni_719/article/details/54580732?locationNum=3&fps=1
# ~ qr = qrcode.QRCode(
# ~ version=2,
# ~ error_correction=qrcode.constants.ERROR_CORRECT_H,
# ~ box_size=10,
# ~ border=1
# ~ )
# ~ qr.add_data("http://jb51.net/")
# ~ qr.make(fit=True)
# ~ img = qr.make_image()
# ~ img = img.convert("RGBA")
# ~ icon = Image.open("favicon.png")
# ~ img_w, img_h = img.size
# ~ factor = 4
# ~ size_w = int(img_w / factor)
# ~ size_h = int(img_h / factor)
# ~ icon_w, icon_h = icon.size
# ~ if icon_w > size_w:
# ~ icon_w = size_w
# ~ if icon_h > size_h:
# ~ icon_h = size_h
# ~ icon = icon.resize((icon_w, icon_h), Image.ANTIALIAS)
# ~ w = int((img_w - icon_w) / 2)
# ~ h = int((img_h - icon_h) / 2)
# ~ img.paste(icon, (w, h), icon)
# ~ img.save("dhqme_qrcode.png")
|
from unittest import TestCase
import math
__author__ = 'serhiyk'
def russian_rec(a, b):
return 0 if a == 0 else 2 * russian_rec(a / 2, b) if a % 2 == 0 else b + 2 * russian_rec((a - 1) / 2, b)
def func(a):
return 3 *math.floor(math.log(a, 2)) + 4
class Tester(TestCase):
def test_1_and_1(self):
self.assertEqual(1, russian_rec(1,1))
def test_500000000_and_200000000(self):
self.assertEqual(100000000000000000, russian_rec(500000000,200000000))
def test_single(self):
self.assertEqual(16, func(20))
self.assertEqual(10, func(4))
def test_my_numbers(self):
theory = [(a, 1 if a == 0 else 4 if a < 2 else 7 if a < 4 else 10) for a in range(1, 6)]
print theory
for (a, t) in theory:
print a, t
self.assertEqual(t, func(a))
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#import sys
#reload(sys)
#sys.setdefaultencoding('utf-8')
#from nltk.corpus import brown
from gensim.models import Word2Vec
import os
import torch
class PretrainedWordVectors():
def __init__(self,corpus,dataname,word2idx,kind='word2vec',dim=300):
path = './wordvectors/pretrain_pycache_/'+str(dataname)+'/' #路径对上
filename = 'word2vec'+str(dim)+'d.txt'
self.word2vec = torch.zeros(len(word2idx.keys()),dim)
if os.path.exists(path+filename):
model = Word2Vec.load(path+filename)
else:
if not os.path.exists(path):
os.makedirs(path)
model = Word2Vec(corpus,workers=4,min_count=1,vector_size=dim)
#corpus语料库:由dataloader传入
model.save(path+filename)
#save weights
for word in word2idx:
try:
vector = torch.tensor(model.wv.word_vec(word))
except Exception:
#不在model中
print(word)
continue
self.word2vec[word2idx[word]]=vector
def get_wordVectors(self):
return self.word2vec
|
#change all letters from a to z
#all to upper case
#remove all spaces
def translate(userinput):
userinput=userinput.upper()
userinput=userinput.replace(" ","")
userinput=userinput.replace("A","Z")
print (userinput)
usertext= str(input("Please put whatever word you want to convert"))
translate(usertext)
|
from django.shortcuts import render
from rest_framework import generics
from .serializers import CategorySerializer,BookSerializer
from .models import Category,Book
# Create your views here.
class ListBook(generics.ListCreateAPIView):
queryset=Book.objects.all()
serializer_class=BookSerializer
class DetailBook(generics.RetrieveUpdateDestroyAPIView):
queryset=Book.objects.all()
serializer_class=BookSerializer
class ListCategory(generics.ListCreateAPIView):
queryset=Category.objects.all()
serializer_class=CategorySerializer
class DetailCategory(generics.RetrieveUpdateDestroyAPIView):
queryset=Category.objects.all()
serializer_class=CategorySerializer
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
r"""Utility functions."""
import json
import math
import os
from urllib.parse import urlparse
import urllib.request
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import torch
import torch.nn.functional as F
EPSILON_DOUBLE = torch.tensor(2.220446049250313e-16, dtype=torch.float64)
EPSILON_SINGLE = torch.tensor(1.19209290E-07, dtype=torch.float32)
SQRT_TWO_DOUBLE = torch.tensor(math.sqrt(2), dtype=torch.float32)
SQRT_TWO_SINGLE = SQRT_TWO_DOUBLE.to(torch.float32)
_DEFAULT_CONFIG = {
'mongo': {
'server': 'mongod',
'hostname': 'localhost',
'port': 27017,
'database': './data/db'
},
'benchmark': {
'voc_dir': './data/datasets/voc',
'coco_dir': './data/datasets/coco',
'coco_anno_dir': './data/datasets/coco/annotations',
'imagenet_dir': './data/datasets/imagenet',
'models_dir': './data/models',
'experiments_dir': './data'
}
}
_config_read = False
def get_config():
"""Read the TorchRay config file.
Read the config file from the current directory or the user's home
directory and return the configuration.
Returns:
dict: configuration.
"""
global _config_read
config = _DEFAULT_CONFIG
if _config_read:
return config
def _update(source, delta):
if isinstance(source, dict):
assert isinstance(delta, dict)
for k in source.keys():
if k in delta:
source[k] = _update(source[k], delta[k])
for k in delta.keys():
# Catch name errors in config file.
assert k in source
else:
source = delta
return source
config = _DEFAULT_CONFIG
for curr_dir in os.curdir, os.path.expanduser('~'):
path = os.path.join(curr_dir, '.torchrayrc')
if os.path.exists(path):
with open(path, 'r') as file:
config_ = json.load(file)
_update(config, config_)
break
_config_read = True
return config
def get_device(gpu=0):
r"""Get the :class`torch.device` to use; specify device with :attr:`gpu`.
Args:
gpu (int, optional): Index of the GPU device; specify ``None`` to
force CPU. Default: ``0``.
Returns:
:class:`torch.device`: device to use.
"""
device = torch.device(
f'cuda:{gpu}'
if torch.cuda.is_available() and gpu is not None
else 'cpu')
return device
def xmkdir(path):
r"""Create a directory path recursively.
The function creates :attr:`path` if the directory does not exist.
Args::
path (str): path to create.
"""
if path is not None and not os.path.exists(path):
try:
os.makedirs(path)
except FileExistsError:
# Race condition in multi-processing.
pass
def is_url(obj):
r"""Check if an object is an URL.
Args:
obj (object): object to test.
Returns:
bool: ``True`` if :attr:`x` is an URL string; otherwise ``False``.
"""
try:
result = urlparse(obj)
return all([result.scheme, result.netloc, result.path])
except Exception:
return False
def tensor_to_im(tensor):
r"""Reshape a tensor as a grayscale image stack.
The function reshapes the tensor :attr:`x` of size
:math:`N\times K\times H\times W`
to have shape :math:`(NK)\times 1\times H\times W`.
Args:
tensor (:class:`torch.Tensor`): tensor to rearrange.
Returns:
:class:`torch.Tensor`: Reshaped tensor.
"""
return tensor.reshape(-1, *tensor.shape[2:])[:, None, :, :]
def pil_to_tensor(pil_image):
r"""Convert a PIL image to a tensor.
Args:
pil_image (:class:`PIL.Image`): PIL image.
Returns:
:class:`torch.Tensor`: the image as a :math:`3\times H\times W` tensor
in the [0, 1] range.
"""
pil_image = np.array(pil_image)
if len(pil_image.shape) == 2:
pil_image = pil_image[:, :, None]
return torch.tensor(pil_image, dtype=torch.float32).permute(2, 0, 1) / 255
def im_to_numpy(tensor):
r"""Convert a tensor image to a NumPy image.
The function converts the :math:`K\times H\times W` tensor :attr:`tensor`
to a corresponding :math:`H\times W\times K` NumPy array.
Args:
tensor (:class:`torch.Tensor`): input tensor.
Returns:
:class:`numpy.ndarray`: NumPy array.
"""
tensor_reshaped = tensor.expand(3, *tensor.shape[1:]).permute(1, 2, 0)
return tensor_reshaped.detach().cpu().numpy()
def imread(file, as_pil=False, resize=None, to_rgb=False):
r"""
Read an image as a tensor.
The function reads the image :attr:`file` as a PyTorch tensor.
`file` can also be an URL.
To reshape the image use the option :attr:`reshape`, passing the desired
shape ``(W, H)`` as tuple. Passing an integer sets the shortest side to
that length while preserving the aspect ratio.
Args:
file (str): Path or ULR to the image.
resize (float, int, tuple or list): Resize the image to this size.
as_pil (bool): If ``True``, returns the PIL image instead of converting
to a tensor.
to_rgb (optional, bool): If `True`, convert the PIL image to RGB.
Default: ``False``.
Returns:
:class:`torch.Tensor`:
The image read as a :math:`3\times H\times W` tensor in
the [0, 1] range.
"""
# Read an example image as a numpy array.
if is_url(file):
hdr = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 '
'(KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.'
'11',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*'
'/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'
}
req = urllib.request.Request(file, headers=hdr)
file = urllib.request.urlopen(req)
img = Image.open(file)
if to_rgb:
img = img.convert('RGB')
if resize is not None:
if not isinstance(resize, tuple) and not isinstance(resize, list):
scale = float(resize) / float(min(img.size[0], img.size[1]))
resize = [round(scale * h) for h in img.size]
if resize != img.size:
img = img.resize(resize, Image.ANTIALIAS)
if as_pil:
return img
return pil_to_tensor(img)
def imsc(img, *args, quiet=False, lim=None, interpolation='lanczos', **kwargs):
r"""Rescale and displays an image represented as a img.
The function scales the img :attr:`im` to the [0 ,1] range.
The img is assumed to have shape :math:`3\times H\times W` (RGB)
:math:`1\times H\times W` (grayscale).
Args:
img (:class:`torch.Tensor` or :class:`PIL.Image`): image.
quiet (bool, optional): if False, do not display image.
Default: ``False``.
lim (list, optional): maximum and minimum intensity value for
rescaling. Default: ``None``.
interpolation (str, optional): The interpolation mode to use with
:func:`matplotlib.pyplot.imshow` (e.g. ``'lanczos'`` or
``'nearest'``). Default: ``'lanczos'``.
Returns:
:class:`torch.Tensor`: Rescaled image img.
"""
if isinstance(img, Image.Image):
img = pil_to_tensor(img)
handle = None
with torch.no_grad():
if not lim:
lim = [img.min(), img.max()]
img = img - lim[0] # also makes a copy
img.mul_(1 / (lim[1] - lim[0]))
img = torch.clamp(img, min=0, max=1)
if not quiet:
bitmap = img.expand(3,
*img.shape[1:]).permute(1, 2, 0).cpu().numpy()
handle = plt.imshow(
bitmap, *args, interpolation=interpolation, **kwargs)
curr_ax = plt.gca()
curr_ax.axis('off')
return img, handle
def resample(source, target_size, transform):
r"""Spatially resample a tensor.
The function resamples the :attr:`source` tensor generating a
:attr:`target` tensors of size :attr:`target_size`. Resampling uses the
transform :attr:`transform`, specified as a :math:`2\times 2` matrix in the
form
.. math::
\begin{bmatrix}
s_u & t_u\\
s_v & t_v
\end{bmatrix}
where :math:`s_u` is the scaling factor along the horizontal spatial
direction, :math:`t_u` the horizontal offset, and :math:`s_v, t_v` the
corresponding quantity for the vertical direction.
Internally, the function uses :func:`torch.nn.functional.grid_sample` with
bilinear interpolation and zero padding.
The transformation defines the forward
mapping, so that a pixel :math:`(u,v)` in the source tensro is mapped to
pixel :math:`u' = s_u u + t_u, v' = s_v v + tv`.
The reference frames are defined as follows. Pixels are unit squares, so
that a :math:`H\times W` tensor maps to the rectangle :math:`[0, W) \times
[0, H)`. Hence element :math:`x_{ncij}` of a tensor :math:`x` maps
to a unit square whose center is :math:`(u,v) = (i + 1/2, j+1/2)`.
Example:
In order to stretch an :math:`H \times W` source tensor to a target
:math:`H' \times W'` tensor, one would use the transformation matrix
.. math::
\begin{bmatrix}
W'/W & 0\\
H'/H & 0\\
\end{bmatrix}
Args:
source (:class:`torch.Tensor`): :math:`N\times C\times H\times W`
tensor.
target_size (tuple of int): target size.
transform (:class:`torch.Tensor`): :math:`2\times 2` transformation
tensor.
Returns:
:class:`torch.Tensor`: resampled tensor.
"""
dtype = source.dtype
dev = source.device
height_, width_ = target_size
ur_ = torch.arange(width_, dtype=dtype, device=dev) + 0.5
vr_ = torch.arange(height_, dtype=dtype, device=dev) + 0.5
height, weight = source.shape[2:]
ur = 2 * ((ur_ + transform[0, 1]) / transform[0, 0]) / weight - 1
vr = 2 * ((vr_ + transform[1, 1]) / transform[1, 0]) / height - 1
v, u = torch.meshgrid(vr, ur)
v = v.unsqueeze(2)
u = u.unsqueeze(2)
grid = torch.cat((u, v), dim=2)
grid = grid.unsqueeze(0).expand(len(source), -1, -1, -1)
return torch.nn.functional.grid_sample(source, grid)
def imsmooth(tensor,
sigma,
stride=1,
padding=0,
padding_mode='constant',
padding_value=0):
r"""Apply a 2D Gaussian filter to a tensor.
The 2D filter itself is implementing by separating the 2D convolution in
two 1D convolutions, first along the vertical direction and then along
the horizontal one. Each 1D Gaussian kernel is given by:
.. math::
f_i \propto \exp\left(-\frac{1}{2} \frac{i^2}{\sigma^2} \right),
~~~ i \in \{-W,\dots,W\},
~~~ W = \lceil 4\sigma \rceil.
This kernel is normalized to sum to one exactly. Given the latter, the
function calls `torch.nn.functional.conv2d`
to perform the actual convolution. Various padding parameters and the
stride are passed to the latter.
Args:
tensor (:class:`torch.Tensor`): :math:`N\times C\times H\times W`
image tensor.
sigma (float): standard deviation of the Gaussian kernel.
stride (int, optional): subsampling factor. Default: ``1``.
padding (int, optional): extra padding. Default: ``0``.
padding_mode (str, optional): ``'constant'``, ``'reflect'`` or
``'replicate'``. Default: ``'constant'``.
padding_value (float, optional): constant value for the `constant`
padding mode. Default: ``0``.
Returns:
:class:`torch.Tensor`: :math:`N\times C\times H\times W` tensor with
the smoothed images.
"""
assert sigma >= 0
width = math.ceil(4 * sigma)
filt = (torch.arange(-width,
width + 1,
dtype=torch.float32,
device=tensor.device) /
(SQRT_TWO_SINGLE * sigma + EPSILON_SINGLE))
filt = torch.exp(-filt * filt)
filt /= torch.sum(filt)
num_channels = tensor.shape[1]
width = width + padding
if padding_mode == 'constant' and padding_value == 0:
other_padding = width
x = tensor
else:
# pad: (before, after) pairs starting from last dimension backward
x = F.pad(tensor,
(width, width, width, width),
mode=padding_mode,
value=padding_value)
other_padding = 0
padding = 0
x = F.conv2d(x,
filt.reshape((1, 1, -1, 1)).expand(num_channels, -1, -1, -1),
padding=(other_padding, padding),
stride=(stride, 1),
groups=num_channels)
x = F.conv2d(x,
filt.reshape((1, 1, 1, -1)).expand(num_channels, -1, -1, -1),
padding=(padding, other_padding),
stride=(1, stride),
groups=num_channels)
return x
def imarraysc(tiles,
spacing=0,
quiet=False,
lim=None,
interpolation='lanczos'):
r"""Display or arrange an image or tensor batch as a mosaic.
The function displays the tensor `tiles` as a set of tiles. `tiles` has
shape :math:`K\times C\times H\times W` and the generated mosaic
is a *new* tensor with shape :math:`C\times (MH) \times (NW)` where
:math:`MN \geq K`.
Missing tiles are filled with zeros.
The range of each tile is individually scaled to the range [0, 1].
Args:
tiles (:class:`torch.Tensor`): tensor to display or rearrange.
spacing (int, optional): thickness of the border (infilled
with zeros) around each tile. Default: ``0``.
quiet (bool, optional): If False, do not display the mosaic.
Default: ``False``.
lim (list, optional): maximum and minimum intensity value for
rescaling. Default: ``None``.
interpolation (str, optional): interpolation to use with
:func:`matplotlib.pyplot.imshow`. Default: ``'lanczos'``.
Returns:
class:`torch.Tensor`: The rearranged tensor.
"""
num = tiles.shape[0]
num_cols = math.ceil(math.sqrt(num))
num_rows = (num + num_cols - 1) // num_cols
num_channels = tiles.shape[1]
height = tiles.shape[2]
width = tiles.shape[3]
mosaic = torch.zeros(num_channels,
height * num_rows + spacing * (num_rows - 1),
width * num_cols + spacing * (num_cols - 1))
for t in range(num):
u = t % num_cols
v = t // num_cols
mosaic[0:num_channels,
v*(height+spacing):v*(height+spacing)+height,
u*(width+spacing):u*(width+spacing)+width] = imsc(tiles[t],
quiet=True,
lim=lim)[0]
return imsc(mosaic, quiet=quiet, interpolation=interpolation)
|
from flask import Flask
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/dojo')
def dojo():
return 'Dojo!'
@app.route('/say/<name>')
def say_route(name):
return "Hi"+ " " +str(name.capitalize())+"!"
@app.route('/repeat/<int:value>/<name>')
def repeat_names(value , name):
return int(value) * (" " + str(name))
if __name__=="__main__":
app.run(debug=True)
|
import csv
import io
from collections import defaultdict
from datetime import datetime, timedelta
import jinja2
from flask import current_app
from notifications_utils.clients.zendesk.zendesk_client import (
NotifySupportTicket,
NotifySupportTicketAttachment,
NotifySupportTicketComment,
NotifySupportTicketStatus,
NotifyTicketType,
)
from notifications_utils.timezones import convert_utc_to_bst
from redis.exceptions import LockError
from sqlalchemy import and_, between
from sqlalchemy.exc import SQLAlchemyError
from app import db, dvla_client, notify_celery, statsd_client, zendesk_client
from app.aws import s3
from app.celery.broadcast_message_tasks import trigger_link_test
from app.celery.letters_pdf_tasks import get_pdf_for_templated_letter
from app.celery.tasks import (
get_recipient_csv_and_template_and_sender_id,
process_incomplete_jobs,
process_job,
process_row,
)
from app.clients.letter.dvla import DvlaRetryableException
from app.config import QueueNames, TaskNames
from app.constants import (
EMAIL_TYPE,
JOB_STATUS_ERROR,
JOB_STATUS_IN_PROGRESS,
JOB_STATUS_PENDING,
SMS_TYPE,
)
from app.cronitor import cronitor
from app.dao.annual_billing_dao import set_default_free_allowance_for_service
from app.dao.date_util import get_current_financial_year_start_year
from app.dao.inbound_numbers_dao import dao_get_available_inbound_numbers
from app.dao.invited_org_user_dao import (
delete_org_invitations_created_more_than_two_days_ago,
)
from app.dao.invited_user_dao import (
delete_invitations_created_more_than_two_days_ago,
)
from app.dao.jobs_dao import (
dao_set_scheduled_jobs_to_pending,
dao_update_job,
find_jobs_with_missing_rows,
find_missing_row_for_job,
)
from app.dao.notifications_dao import (
dao_old_letters_with_created_status,
dao_precompiled_letters_still_pending_virus_check,
get_ratio_of_messages_delivered_slowly_per_provider,
is_delivery_slow_for_providers,
letters_missing_from_sending_bucket,
notifications_not_yet_sent,
)
from app.dao.provider_details_dao import (
dao_adjust_provider_priority_back_to_resting_points,
dao_reduce_sms_provider_priority,
)
from app.dao.services_dao import (
dao_find_services_sending_to_tv_numbers,
dao_find_services_with_high_failure_rates,
)
from app.dao.users_dao import delete_codes_older_created_more_than_a_day_ago
from app.letters.utils import generate_letter_pdf_filename
from app.models import (
AnnualBilling,
BroadcastMessage,
BroadcastStatusType,
EmailBranding,
Event,
Job,
Organisation,
Service,
User,
)
from app.notifications.process_notifications import send_notification_to_queue
from app.utils import get_london_midnight_in_utc
@notify_celery.task(name="run-scheduled-jobs")
@cronitor("run-scheduled-jobs")
def run_scheduled_jobs():
try:
for job in dao_set_scheduled_jobs_to_pending():
process_job.apply_async([str(job.id)], queue=QueueNames.JOBS)
current_app.logger.info("Job ID %s added to process job queue", job.id)
except SQLAlchemyError:
current_app.logger.exception("Failed to run scheduled jobs")
raise
@notify_celery.task(name="delete-verify-codes")
def delete_verify_codes():
try:
start = datetime.utcnow()
deleted = delete_codes_older_created_more_than_a_day_ago()
current_app.logger.info(
"Delete job started %s finished %s deleted %s verify codes", start, datetime.utcnow(), deleted
)
except SQLAlchemyError:
current_app.logger.exception("Failed to delete verify codes")
raise
@notify_celery.task(name="delete-invitations")
def delete_invitations():
try:
start = datetime.utcnow()
deleted_invites = delete_invitations_created_more_than_two_days_ago()
deleted_invites += delete_org_invitations_created_more_than_two_days_ago()
current_app.logger.info(
"Delete job started %s finished %s deleted %s invitations", start, datetime.utcnow(), deleted_invites
)
except SQLAlchemyError:
current_app.logger.exception("Failed to delete invitations")
raise
@notify_celery.task(name="switch-current-sms-provider-on-slow-delivery")
def switch_current_sms_provider_on_slow_delivery():
"""
Reduce provider's priority if at least 15% of notifications took more than 5 minutes to be delivered
in the last ten minutes. If both providers are slow, don't do anything. If we changed the providers in the
last ten minutes, then don't update them again either.
"""
slow_delivery_notifications = is_delivery_slow_for_providers(
created_within_minutes=15,
delivered_within_minutes=5,
threshold=0.15,
)
# only adjust if some values are true and some are false - ie, don't adjust if all providers are fast or
# all providers are slow
if len(set(slow_delivery_notifications.values())) != 1:
for provider_name, is_slow in slow_delivery_notifications.items():
if is_slow:
current_app.logger.warning("Slow delivery notifications detected for provider %s", provider_name)
dao_reduce_sms_provider_priority(provider_name, time_threshold=timedelta(minutes=10))
@notify_celery.task(name="generate-sms-delivery-stats")
def generate_sms_delivery_stats():
for delivery_interval in (1, 5, 10):
providers_slow_delivery_ratios = get_ratio_of_messages_delivered_slowly_per_provider(
created_within_minutes=15, delivered_within_minutes=delivery_interval
)
for provider, ratio in providers_slow_delivery_ratios.items():
statsd_client.gauge(f"slow-delivery.{provider}.delivered-within-minutes.{delivery_interval}.ratio", ratio)
@notify_celery.task(name="tend-providers-back-to-middle")
def tend_providers_back_to_middle():
dao_adjust_provider_priority_back_to_resting_points()
@notify_celery.task(name="check-job-status")
def check_job_status():
"""
every x minutes do this check
select
from jobs
where job_status == 'in progress'
and processing started between 30 and 35 minutes ago
OR where the job_status == 'pending'
and the job scheduled_for timestamp is between 30 and 35 minutes ago.
if any results then
update the job_status to 'error'
process the rows in the csv that are missing (in another task) just do the check here.
"""
thirty_minutes_ago = datetime.utcnow() - timedelta(minutes=30)
thirty_five_minutes_ago = datetime.utcnow() - timedelta(minutes=35)
incomplete_in_progress_jobs = Job.query.filter(
Job.job_status == JOB_STATUS_IN_PROGRESS,
between(Job.processing_started, thirty_five_minutes_ago, thirty_minutes_ago),
)
incomplete_pending_jobs = Job.query.filter(
Job.job_status == JOB_STATUS_PENDING,
Job.scheduled_for.isnot(None),
between(Job.scheduled_for, thirty_five_minutes_ago, thirty_minutes_ago),
)
jobs_not_complete_after_30_minutes = (
incomplete_in_progress_jobs.union(incomplete_pending_jobs)
.order_by(Job.processing_started, Job.scheduled_for)
.all()
)
# temporarily mark them as ERROR so that they don't get picked up by future check_job_status tasks
# if they haven't been re-processed in time.
job_ids = []
for job in jobs_not_complete_after_30_minutes:
job.job_status = JOB_STATUS_ERROR
dao_update_job(job)
job_ids.append(str(job.id))
if job_ids:
current_app.logger.info("Job(s) %s have not completed.", job_ids)
process_incomplete_jobs.apply_async([job_ids], queue=QueueNames.JOBS)
@notify_celery.task(name="replay-created-notifications")
def replay_created_notifications():
# if the notification has not be send after 1 hour, then try to resend.
resend_created_notifications_older_than = 60 * 60
for notification_type in (EMAIL_TYPE, SMS_TYPE):
notifications_to_resend = notifications_not_yet_sent(resend_created_notifications_older_than, notification_type)
if len(notifications_to_resend) > 0:
current_app.logger.info(
(
"Sending %(num)s %(type)s notifications to the delivery queue because the "
"notification status was created."
),
dict(num=len(notifications_to_resend), type=notification_type),
)
for n in notifications_to_resend:
send_notification_to_queue(notification=n)
# if the letter has not be send after an hour, then create a zendesk ticket
letters = letters_missing_from_sending_bucket(resend_created_notifications_older_than)
if len(letters) > 0:
current_app.logger.info(
(
"%(num)s letters were created over an hour ago, "
"but do not have an updated_at timestamp or billable units.\n"
"Creating app.celery.letters_pdf_tasks.create_letters tasks to upload letter to S3 "
"and update notifications for the following notification ids:\n%(ids)s"
),
dict(num=len(letters), ids=[x.id for x in letters]),
)
for letter in letters:
get_pdf_for_templated_letter.apply_async([str(letter.id)], queue=QueueNames.CREATE_LETTERS_PDF)
@notify_celery.task(name="check-if-letters-still-pending-virus-check")
def check_if_letters_still_pending_virus_check():
letters = []
for letter in dao_precompiled_letters_still_pending_virus_check():
# find letter in the scan bucket
filename = generate_letter_pdf_filename(
letter.reference, letter.created_at, ignore_folder=True, postage=letter.postage
)
if s3.file_exists(current_app.config["S3_BUCKET_LETTERS_SCAN"], filename):
current_app.logger.warning(
"Letter id %s got stuck in pending-virus-check. Sending off for scan again.", letter.id
)
notify_celery.send_task(
name=TaskNames.SCAN_FILE,
kwargs={"filename": filename},
queue=QueueNames.ANTIVIRUS,
)
else:
letters.append(letter)
if len(letters) > 0:
letter_ids = [(str(letter.id), letter.reference) for letter in letters]
msg = f"""{len(letters)} precompiled letters have been pending-virus-check for over 90 minutes.
We couldn't find them in the scan bucket. We'll need to find out where the files are and kick them off
again or move them to technical failure.
Notifications: {sorted(letter_ids)}"""
if current_app.should_send_zendesk_alerts:
ticket = NotifySupportTicket(
subject=f"[{current_app.config['NOTIFY_ENVIRONMENT']}] Letters still pending virus check",
message=msg,
ticket_type=NotifySupportTicket.TYPE_INCIDENT,
notify_ticket_type=NotifyTicketType.TECHNICAL,
ticket_categories=["notify_letters"],
)
zendesk_client.send_ticket_to_zendesk(ticket)
current_app.logger.error(
"Letters still pending virus check",
extra=dict(number_of_letters=len(letters), notification_ids=sorted(letter_ids)),
)
@notify_celery.task(name="check-if-letters-still-in-created")
def check_if_letters_still_in_created():
letters = dao_old_letters_with_created_status()
if len(letters) > 0:
msg = (
f"{len(letters)} letters were created before 17.30 yesterday and still have 'created' status. "
"Follow runbook to resolve: "
"https://github.com/alphagov/notifications-manuals/wiki/Support-Runbook"
"#deal-with-letters-still-in-created."
)
if current_app.should_send_zendesk_alerts:
ticket = NotifySupportTicket(
subject=f"[{current_app.config['NOTIFY_ENVIRONMENT']}] Letters still in 'created' status",
message=msg,
ticket_type=NotifySupportTicket.TYPE_INCIDENT,
notify_ticket_type=NotifyTicketType.TECHNICAL,
ticket_categories=["notify_letters"],
)
zendesk_client.send_ticket_to_zendesk(ticket)
current_app.logger.error(
"%s letters created before 17:30 yesterday still have 'created' status",
len(letters),
)
@notify_celery.task(name="check-for-missing-rows-in-completed-jobs")
def check_for_missing_rows_in_completed_jobs():
jobs = find_jobs_with_missing_rows()
for job in jobs:
recipient_csv, template, sender_id = get_recipient_csv_and_template_and_sender_id(job)
missing_rows = find_missing_row_for_job(job.id, job.notification_count)
for row_to_process in missing_rows:
row = recipient_csv[row_to_process.missing_row]
current_app.logger.info("Processing missing row: %s for job: %s", row_to_process.missing_row, job.id)
process_row(row, template, job, job.service, sender_id=sender_id)
@notify_celery.task(name="check-for-services-with-high-failure-rates-or-sending-to-tv-numbers")
def check_for_services_with_high_failure_rates_or_sending_to_tv_numbers():
start_date = datetime.utcnow() - timedelta(days=1)
end_date = datetime.utcnow()
message = ""
services_with_failures = dao_find_services_with_high_failure_rates(start_date=start_date, end_date=end_date)
services_sending_to_tv_numbers = dao_find_services_sending_to_tv_numbers(start_date=start_date, end_date=end_date)
if services_with_failures:
message += "{} service(s) have had high permanent-failure rates for sms messages in last 24 hours:\n".format(
len(services_with_failures)
)
for service in services_with_failures:
service_dashboard = "{}/services/{}".format(
current_app.config["ADMIN_BASE_URL"],
str(service.service_id),
)
message += "service: {} failure rate: {},\n".format(service_dashboard, service.permanent_failure_rate)
current_app.logger.error(
"%s services have had a high permanent-failure rate for text messages in the last 24 hours.",
len(services_with_failures),
extra=dict(service_ids=[service.service_id for service in services_with_failures]),
)
elif services_sending_to_tv_numbers:
message += "{} service(s) have sent over 500 sms messages to tv numbers in last 24 hours:\n".format(
len(services_sending_to_tv_numbers)
)
for service in services_sending_to_tv_numbers:
service_dashboard = "{}/services/{}".format(
current_app.config["ADMIN_BASE_URL"],
str(service.service_id),
)
message += "service: {} count of sms to tv numbers: {},\n".format(
service_dashboard, service.notification_count
)
current_app.logger.error(
"%s services have sent over 500 text messages to tv numbers in the last 24 hours.",
len(services_sending_to_tv_numbers),
extra=dict(
service_ids_and_number_sent={
service.service_id: service.notification_count for service in services_sending_to_tv_numbers
}
),
)
if services_with_failures or services_sending_to_tv_numbers:
if current_app.should_send_zendesk_alerts:
message += (
"\nYou can find instructions for this ticket in our manual:\n"
"https://github.com/alphagov/notifications-manuals/wiki/Support-Runbook#deal-with-services-with-high-failure-rates-or-sending-sms-to-tv-numbers" # noqa
) # noqa
ticket = NotifySupportTicket(
subject=f"[{current_app.config['NOTIFY_ENVIRONMENT']}] High failure rates for sms spotted for services",
message=message,
ticket_type=NotifySupportTicket.TYPE_INCIDENT,
notify_ticket_type=NotifyTicketType.TECHNICAL,
)
zendesk_client.send_ticket_to_zendesk(ticket)
@notify_celery.task(name="trigger-link-tests")
def trigger_link_tests():
if current_app.config["CBC_PROXY_ENABLED"]:
for cbc_name in current_app.config["ENABLED_CBCS"]:
trigger_link_test.apply_async(kwargs={"provider": cbc_name}, queue=QueueNames.BROADCASTS)
@notify_celery.task(name="auto-expire-broadcast-messages")
def auto_expire_broadcast_messages():
expired_broadcasts = BroadcastMessage.query.filter(
BroadcastMessage.finishes_at <= datetime.now(),
BroadcastMessage.status == BroadcastStatusType.BROADCASTING,
).all()
for broadcast in expired_broadcasts:
broadcast.status = BroadcastStatusType.COMPLETED
db.session.commit()
if expired_broadcasts:
notify_celery.send_task(name=TaskNames.PUBLISH_GOVUK_ALERTS, queue=QueueNames.GOVUK_ALERTS)
@notify_celery.task(name="remove-yesterdays-planned-tests-on-govuk-alerts")
def remove_yesterdays_planned_tests_on_govuk_alerts():
notify_celery.send_task(name=TaskNames.PUBLISH_GOVUK_ALERTS, queue=QueueNames.GOVUK_ALERTS)
@notify_celery.task(name="delete-old-records-from-events-table")
@cronitor("delete-old-records-from-events-table")
def delete_old_records_from_events_table():
delete_events_before = datetime.utcnow() - timedelta(weeks=52)
event_query = Event.query.filter(Event.created_at < delete_events_before)
deleted_count = event_query.delete()
current_app.logger.info("Deleted %s historical events from before %s.", deleted_count, delete_events_before)
db.session.commit()
@notify_celery.task(name="zendesk-new-email-branding-report")
def zendesk_new_email_branding_report():
# make sure we convert to BST as in summer this'll run at 23:30 UTC
previous_weekday = convert_utc_to_bst(datetime.utcnow()).date() - timedelta(days=1)
# If yesterday is a Saturday or Sunday, adjust back to the Friday
if previous_weekday.isoweekday() in {6, 7}:
previous_weekday -= timedelta(days=(previous_weekday.isoweekday() - 5))
previous_weekday_midnight = get_london_midnight_in_utc(previous_weekday)
new_email_brands = (
EmailBranding.query.join(Organisation, isouter=True)
.join(User, User.id == EmailBranding.created_by, isouter=True)
.filter(
EmailBranding.created_at >= previous_weekday_midnight,
User.platform_admin.is_(False),
)
.order_by(EmailBranding.created_at)
.all()
)
current_app.logger.info("%s new email brands to review since %s.", len(new_email_brands), previous_weekday)
if not new_email_brands:
return
brands_by_organisation = defaultdict(list)
brands_with_no_organisation = []
for new_brand in new_email_brands:
if not new_brand.organisations:
brands_with_no_organisation.append(new_brand)
else:
for organisation in new_brand.organisations:
brands_by_organisation[organisation].append(new_brand)
with open("templates/tasks/scheduled_tasks/new_email_brandings.html") as template_file:
template = jinja2.Template(template_file.read())
message = template.render(
domain=current_app.config["ADMIN_BASE_URL"],
yesterday=previous_weekday.strftime("%A %-d %B %Y"),
brands_by_organisation=brands_by_organisation,
brands_with_no_organisation=brands_with_no_organisation,
)
if current_app.should_send_zendesk_alerts:
ticket = NotifySupportTicket(
subject="Review new email brandings",
message=message,
ticket_type=NotifySupportTicket.TYPE_TASK,
notify_ticket_type=NotifyTicketType.NON_TECHNICAL,
ticket_categories=["notify_no_ticket_category"],
message_as_html=True,
)
zendesk_client.send_ticket_to_zendesk(ticket)
@notify_celery.task(name="check-for-low-available-inbound-sms-numbers")
@cronitor("check-for-low-available-inbound-sms-numbers")
def check_for_low_available_inbound_sms_numbers():
if not current_app.should_send_zendesk_alerts:
current_app.logger.info("Skipping report run on in %s", current_app.config["NOTIFY_ENVIRONMENT"])
return
num_available_inbound_numbers = len(dao_get_available_inbound_numbers())
current_app.logger.info("There are %s available inbound SMS numbers.", num_available_inbound_numbers)
if num_available_inbound_numbers > current_app.config["LOW_INBOUND_SMS_NUMBER_THRESHOLD"]:
return
message = (
f"There are only {num_available_inbound_numbers} inbound SMS numbers currently available for services.\n\n"
"Request more from our provider (MMG) and load them into the database.\n\n"
"Follow the guidance here: "
"https://github.com/alphagov/notifications-manuals/wiki/Support-Runbook#add-new-inbound-sms-numbers"
)
ticket = NotifySupportTicket(
subject="Request more inbound SMS numbers",
message=message,
ticket_type=NotifySupportTicket.TYPE_TASK,
notify_ticket_type=NotifyTicketType.TECHNICAL,
ticket_categories=["notify_no_ticket_category"],
)
zendesk_client.send_ticket_to_zendesk(ticket)
@notify_celery.task(name="weekly-dwp-report")
def weekly_dwp_report():
report_config = current_app.config["ZENDESK_REPORTING"].get("weekly-dwp-report")
if not current_app.should_send_zendesk_alerts:
current_app.logger.info("Skipping DWP report run in %s", current_app.config["NOTIFY_ENVIRONMENT"])
return
if (
not report_config
or not isinstance(report_config, dict)
or not report_config.get("query")
or not report_config.get("ticket_id")
):
current_app.logger.info("Skipping DWP report run - invalid configuration.")
return
attachments = []
for csv_name, query in report_config["query"].items():
result = db.session.execute(query)
headers = result.keys()
rows = result.fetchall()
csv_data = io.StringIO()
csv_writer = csv.DictWriter(csv_data, fieldnames=headers, dialect="excel")
csv_writer.writeheader()
for row in rows:
csv_writer.writerow(row._asdict())
csv_data.seek(0)
attachments.append(NotifySupportTicketAttachment(filename=csv_name, filedata=csv_data, content_type="text/csv"))
zendesk_client.update_ticket(
report_config["ticket_id"],
status=NotifySupportTicketStatus.PENDING,
comment=NotifySupportTicketComment(
body="Please find attached your weekly report.",
attachments=attachments,
),
due_at=convert_utc_to_bst(datetime.utcnow() + timedelta(days=7, hours=3, minutes=10)),
)
@notify_celery.task(bind=True, name="change-dvla-password", max_retries=3, default_retry_delay=60)
def change_dvla_password(self):
try:
dvla_client.change_password()
except LockError:
# some other task is currently changing the password. let that process handle it and quietly exit
current_app.logger.info("change-dvla-password lock held by other process, doing nothing")
except DvlaRetryableException:
current_app.logger.info("change-dvla-password DvlaRetryableException - retrying")
self.retry()
@notify_celery.task(bind=True, name="change-dvla-api-key", max_retries=3, default_retry_delay=60)
def change_dvla_api_key(self):
try:
dvla_client.change_api_key()
except LockError:
# some other task is currently changing the api key. let that process handle it and quietly exit
current_app.logger.info("change-dvla-api-key lock held by other process, doing nothing")
except DvlaRetryableException:
current_app.logger.info("change-dvla-api-key DvlaRetryableException - retrying")
self.retry()
def populate_annual_billing(year, missing_services_only):
"""
Add or update annual billing with free allowance defaults for all active services.
The default free allowances are stored in the DB in a table called `default_annual_allowance`.
If missing_services_only is true then only add rows for services that do not have annual billing for that year yet.
This is useful to prevent overriding any services that have a free allowance that is not the default.
If missing_services_only is false then add or update annual billing for all active services.
This is useful to ensure all services start the new year with the correct annual billing.
"""
if missing_services_only:
active_services = (
Service.query.filter(Service.active)
.outerjoin(
AnnualBilling, and_(Service.id == AnnualBilling.service_id, AnnualBilling.financial_year_start == year)
)
.filter(AnnualBilling.id == None) # noqa
.all()
)
else:
active_services = Service.query.filter(Service.active).all()
for service in active_services:
set_default_free_allowance_for_service(service, year)
@notify_celery.task(name="run-populate-annual-billing")
@cronitor("run-populate-annual-billing")
def run_populate_annual_billing():
year = get_current_financial_year_start_year()
run_populate_annual_billing(year=year, missing_services_only=True)
|
# Generated by Django 2.1.5 on 2019-01-24 06:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pousada', '0005_auto_20190124_0510'),
]
operations = [
migrations.RemoveField(
model_name='blogcomments',
name='lastname',
),
migrations.AddField(
model_name='blogcomments',
name='check_in',
field=models.DateField(null=True),
),
migrations.AddField(
model_name='blogcomments',
name='check_out',
field=models.DateField(null=True),
),
]
|
from django import forms
from .models import Assignment
from .models import Course
from .models import Announcement
from .models import Document
class AssignmentForm(forms.ModelForm):
class Meta:
model = Assignment
fields = (
'assignment_name', 'a_description', 'due_date',
'release_date', 'assignment_points', 'course_id')
class CourseForm(forms.ModelForm):
class Meta:
model = Course
fields = (
'course_name', 'course_description', 'course_id')
class AnnouncementForm(forms.ModelForm):
class Meta:
model = Announcement
fields = ('course_id', 'announcement_name', 'description')
class DocumentForm(forms.ModelForm):
class Meta:
model = Document
fields = ('description', 'document', )
|
import subprocess, time
subprocess.call([r'.\z0b0t.bat'])
time.sleep(15)
subprocess.call([r'.\z0b0tClient.exe'])
time.sleep(15)
|
from django.http import HttpResponse
from django.shortcuts import render,redirect
from django.contrib.auth.models import User
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
from shop.models import logindata
def homepage(req):
return render(req,"shop/hp.html")
def signup(req):
if req.method == 'POST':
FName=req.POST['FName']
Email=req.POST['Email']
Password=req.POST['Password']
username=req.POST['Username']
myuser=User.objects.create_user(username,Email,Password)
myuser.first_name=FName
myuser.save()
ld=logindata(username=username,password=Password)
ld.save()
messages.success(req,"Now you are a member")
return redirect('/')
return render(req,"shop/login.html")
def signin(req):
if req.method == 'POST':
Login_username=req.POST['loginUsername']
Login_password=req.POST['LoginPassword']
user = authenticate(username=Login_username,password=Login_password)
if user is not None:
login(req,user)
messages.error(req,"You are logged in")
return redirect('/')
else:
messages.error(req,'Incorrect Username/Password')
return render(req,'shop/login.html')
def logout1(req):
logout(req)
messages.success(req,"You are logged out")
return redirect('/') |
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('50_Startups.csv')
x = dataset.iloc[:,:1]
y = dataset.iloc[:,-1]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.2,random_state = 0)
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit( x_train, y_train)
y_pred = reg.predict(x_test)
plt.scatter(x_train,y_train,color='red')
plt.plot(x_train,reg.predict(x_train),color='blue')
plt.title('Rnd vs Profit')
plt.xlabel('Rnd')
plt.ylabel("Profit")
plt.show()
plt.scatter(x_test,y_test,color='black')
plt.plot(x_train,reg.predict(x_train),color='blue')
plt.title('Rnd vs Profit')
plt.xlabel('Rnd')
plt.ylabel("Profit")
plt.show()
|
'''
Consider a square of the surface area A. Inside of it is an inscribed dartoard.
Now consider we throw some darts onto the square. the ones which hit the darboard are N
the ones which hit the outisde square are M. Total number of darts thrown is N+M
The percentage of darts which hit the dartbboard is f=N/(N+M). For a high enough number
of darts f*A is approximately are the area of the dartboard.
The first part of this challenge is writing a program which finds the estimate area of a circle of any
given radius using this approach. The program ough to give us results for 3 numbers of darts thrown:
10^3,10^5 and 10^6.
We will input our own radius and the program will do the calculations
'''
'''
First part
import math
import random
def estimate(radius,total_throws):
center=(radius,radius)
in_circle=0
for i in range(total_throws):
x=random.uniform(0,2*radius)
y=random.uniform(0,2*radius)
p=(x,y)
d=math.sqrt((p[0]-center[0])**2+(p[1]-center[1])**2)
if d<=radius:
in_circle+=1
area_of_square=(2*radius)**2
return (in_circle/total_throws)*area_of_square
if __name__=='__main__':
radius=float(input('Please enter the desired radius: '))
area_of_circle=math.pi*radius**2
for points in [10**3, 10**5, 10**6]:
print('Area: {0}, Estimated ({1} throws): {2}'.format(area_of_circle,points,estimate(radius,points)))
'''
'''
The area of the square is 4*r**2 and the area of the circle inscribed is math.pi*r**2.
If we divide the area of the circle by the area of the square, we get math.pi/4
The function f from above is an approximation of math.pi/4 and thus:
4*(N/(N+M)) ought to be math.pi
Adapt the program from above so it calculates pi for the given number valus and any radius
'''
import math
import random
def calculate(radius,total_throws):
center=(radius,radius)
in_circle=0
for i in range(total_throws):
x=random.uniform(0,2*radius)
y=random.uniform(0,2*radius)
p=(x,y)
d=math.sqrt((p[0]-center[0])**2+(p[1]-center[1])**2)
if d<=radius:
in_circle+=1
return (in_circle/total_throws)*4
if __name__=='__main__':
radius=1
for points in [10**3, 10**5, 10**10]:
print('Pi: {0} Calculated Value for {1} throws: {2}'.format(math.pi,points,calculate(radius,points)))
|
# coding: utf-8
# 自分の得意な言語で
# Let's チャレンジ!!
import numpy as np
H, W, N = input().rstrip().split(' ')
table = [[0 for i in range(int(W))] for j in range(int(H))]
for h in range(int(H)):
table[h] = list(map(int, input().rstrip().split(' ')))
L = input()
memo = [[0 for i in range(4)] for j in range(int(L))]
for l in range(int(L)):
memo[l] = list(map(int, input().rstrip().split(' ')))
member = [0 for i in range(int(N))]
turn = 0
l = 0
while l < int(L) - 1:
while table[memo[l][0] - 1][memo[l][1] - 1] == table[memo[l][2] - 1][memo[l][3] - 1]:
member[turn] = member[turn] + 2
if l >= int(L) - 1:
break
else:
l = l + 1
if turn < int(N) - 1:
turn = turn + 1
else:
turn = 0
if l >= int(L) - 1:
break
else:
l = l + 1
for n in range(int(N)):
print(member[n]) |
from django.db import models
class UserInfo(models.Model):
uname=models.CharField(max_length=20)
upwd=models.CharField(max_length=40)
uemail=models.CharField(max_length=30)
isValid=models.BooleanField(default=True)
isActive=models.BooleanField(default=False)
class UserAddressInfo(models.Model):
uname=models.CharField(max_length=20)
uaddress=models.CharField(max_length=100)
uphone=models.CharField(max_length=11)
user=models.ForeignKey('UserInfo')
|
"""
Triplets
Given an array containing N integers, and an number S
denoting a target sum.
Find all distinct integers that can add up to form target
sum. The numbers in each triplet should be ordered in
ascending order, and triplets should be ordered too.
Return empty array if no such triplet exists.
"""
"""
Machine generated alternative text:
Input
array = [1 2, 3, 4, 5, 6, 7, 8, 9, 15]
target = 18
Output
[[1,2,15],
[3,7,8],
[4,6,8],
[5,6,7]
]
"""
#Brute force
array = [1, 2, 3, 4, 5, 6, 7, 8, 9, 15]
sum = 18
for i in range(len(array)):
for j in range(len(array)):
for k in range(len(array)):
if(i==j and j==k):
continue
if((array[i]+array[j]+array[k]) == sum ):
result = (array[i],array[j],array[k])
print(result)
print("O(n^3) approach")
#Optimized approach
array.sort()
print(array)
# learning of sorting
"""
array.sort(reverse=True)
print(array)
# take second element for sort
def takeSecond(elem):
return elem[1]
# random list
random = [(2, 2), (3, 4), (4, 1), (1, 3)]
# sort list with key
random.sort(key=takeSecond)
# print list
print('Sorted list:', random)
"""
"""
Maintain a list on how to solve the problem with 2 lines description
"""
"""
1,2,3,4,5(i),6,7(k)
n-2 because we have loop till last but one element, as last element is held by k
prateek is using <=n-3
important problem asked in most companies.
n-3 more understading is required
we need to have atleast 4 elements to start with , other wise it will only 3 elements and we could easily compute the result.No need for algorithm.
"""
array = [13, 1, 43, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15]
sum = 18
array.sort()
# (length - 3) because we need at min of 4 elements else we will have 3 elements and can directly compute the sum.
# The for loop range it prints till the limit specified
#1(i),2(j),3,4(k)
rangeLimit = len(array) - 3
""" print(array)
print("length"+str(len(array)))
for k in range(rangeLimit):
print(array[k]) """
for k in range(rangeLimit):
i=k+1
j=len(array)-1
while(i<j):
current_sum = array[k]
current_sum += array[i]
current_sum += array[j]
if(current_sum == sum):
print((array[k],array[i],array[j]))
i = i + 1
j = j - 1
if(current_sum < sum):
i = i + 1
if(current_sum > sum):
j = j - 1
|
S = input()
def f(s):
i = s.find('(')
if i == -1:
return s
result = s[:i]
s = s[i + 1:]
n = 1
for i in range(len(s)):
if s[i] == '(':
n += 1
elif s[i] == ')':
n -= 1
if n == 0:
break
t = f(s[:i])
result += t + t[::-1]
result += f(s[i + 1:])
return result
print(f(S))
|
#!/usr/bin/python
# Filename: subject.py
class Subject:
def __init___(self):
self.observers = []
def Attach(self, observer):
self.observers.append(observer)
def Dettach(self, observer):
self.observers.remove(observer)
def Notify(self):
for o in observers:
o.Update()
|
from django.conf import settings
def globals(request):
return {
'server_url': settings.SERVER_URL
} |
from gpiozero import Motor
max = 1
medium = 0.7
class Astroby:
"""
all configuration and interface for Astroby
L - Left, R - Right, F - Front, R - Rear
LW - LeftWheels, RW - RightWheels
"""
def __init__(self):
# wheels and their GPIOpins
self.LF = Motor(2, 3, pwm=True)
self.LR = Motor(17, 27, pwm=True)
self.RF = Motor(14, 15, pwm=True)
self.RR = Motor(23, 24, pwm=True)
# initial speed
self.speed = max
# def __del__(self):
# import RPi.GPIO as GPIO
# GPIO.cleanup()
def LW_forward(self, pwmvalue=None):
pwmvalue = self.speed
self.LF.forward(pwmvalue)
self.LR.forward(pwmvalue)
def LW_backward(self, pwmvalue=None):
pwmvalue = self.speed
self.LF.backward(pwmvalue)
self.LR.backward(pwmvalue)
def LW_stop(self):
self.LF.stop()
self.LR.stop()
def RW_forward(self, pwmvalue=None):
pwmvalue = self.speed
self.RF.forward(pwmvalue)
self.RR.forward(pwmvalue)
def RW_backward(self, pwmvalue=None):
pwmvalue = self.speed
self.RF.backward(pwmvalue)
self.RR.backward(pwmvalue)
def RW_stop(self):
self.RF.stop()
self.RR.stop()
# control for the wheels together
def forward(self, pwmvalue=None):
pwmvalue = self.speed
self.LW_forward(pwmvalue)
self.RW_forward(pwmvalue)
def backward(self, pwmvalue=None):
pwmvalue = self.speed
self.LW_backward(pwmvalue)
self.RW_backward(pwmvalue)
def left(self, pwmvalue=None):
pwmvalue = self.speed
self.LW_backward(pwmvalue)
self.RW_forward(pwmvalue)
def right(self, pwmvalue=None):
pwmvalue = self.speed
self.RW_backward(pwmvalue)
self.LW_forward(pwmvalue)
def stop(self):
self.LW_stop()
self.RW_stop()
def toggle_speed(self):
# toggle current speed of astroby
self.speed = medium if self.speed == max else max
|
from django.db import models
from login.models import User
from django.forms import ModelForm
# Create your models here.
class Address(models.Model):
province = models.CharField(max_length=30)
city = models.CharField(max_length=30)
county = models.CharField(max_length=30)
street = models.CharField(max_length=400)
consignee = models.CharField(max_length=30)
consignee_tel = models.CharField(max_length=30)
is_default = models.BooleanField(default=False)
postcode = models.CharField(max_length=6)
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table = 'Address'
def __str__(self):
return self.street
class Shop(models.Model):
shopname = models.CharField(max_length=30)
least_price = models.IntegerField()
deliver_fee = models.IntegerField()
review_score = models.DecimalField(max_digits=2, decimal_places=1)
shop_img = models.ImageField(upload_to='uploads/')
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
class Meta:
db_table = 'Shop'
def __str__(self):
return self.shopname
class Merchandise(models.Model):
title = models.CharField(max_length=30)
price = models.IntegerField()
image = models.ImageField(upload_to='goods/')
shop_id = models.ForeignKey(Shop, on_delete=models.CASCADE)
class Meta:
db_table='Merchandise'
def __str__(self):
return self.title
class SalesNum(models.Model):
month = models.IntegerField()
sales_num = models.IntegerField()
merchan_id = models.ForeignKey(Merchandise, on_delete=models.CASCADE)
class Meta:
db_table='SalesNum'
def __str__(self):
return self.merchan_id
class Orders(models.Model):
order_num = models.CharField(max_length=20)
order_time = models.DateTimeField()
total_price = models.IntegerField()
status = models.IntegerField() # 0 not confirmed 1 confirm 2 completed
user_id = models.ForeignKey(User, on_delete=models.CASCADE, default=None)
shop_id = models.ForeignKey(Shop, on_delete=models.CASCADE, default=None)
address_id = models.ForeignKey(Address, on_delete=models.CASCADE, default=None)
class Meta:
db_table = "Orders"
def __str__(self):
return self.order_num
class OrderDetail(models.Model):
merchan_num = models.IntegerField()
merchan_id = models.ForeignKey(Merchandise, on_delete=models.CASCADE)
order_id = models.ForeignKey(Orders, on_delete=models.CASCADE)
class Meta:
db_table='OrderDetail'
def __str__(self):
return str(self.merchan_id)+str(self.order_id) |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Demo")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring ('file:/afs/cern.ch/user/f/florez/CMSSW_2_0_4/src/FedFillerWords/Data/mysimple.root')
)
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
files = cms.untracked.PSet(
output = cms.untracked.PSet(
)
),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('WARNING')
)
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
files = cms.untracked.PSet(
output = cms.untracked.PSet(
)
),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('WARNING')
)
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
files = cms.untracked.PSet(
output = cms.untracked.PSet(
)
),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('WARNING')
)
process.filler = cms.EDProducer("SiPixelFedFillerWordEventNumber",
InputLabel = cms.untracked.string('source'),
InputInstance = cms.untracked.string(''),
SaveFillerWords = cms.bool(False)
)
process.out = cms.EDProducer("PoolOutputModule",
fileName = cms.untracked.string("NewFEDFWs.root")
)
process.producer = cms.Path(process.filler)
process.producer = cms.EndPath(process.out)
|
import torch
import torch.nn as nn
class Policy(nn.Module):
"""
implements both actor and critic in one model
"""
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(30, 128)
# actor's layer
self.action_mean = nn.Linear(128, 1)
self.action_log_var = nn.Linear(128, 1)
# critic's layer
self.value = nn.Linear(128, 1)
self.relu = nn.ReLU()
# action & reward buffer
self.saved_actions = []
self.rewards = []
def forward(self, x):
"""
forward of both actor and critic
"""
x = self.relu(self.affine1(x))
# actor: choses action to take from state s_t
# by returning probability of each action
action_mean = self.action_mean(x)
# action_log_var = self.action_log_var(x)
# critic: evaluates being in the state s_t
state_value = self.value(x)
# return values for both actor and critic as a tupel of 2 values:
# 1. a list with the probability of each action over the action space
# 2. the value from state s_t
return action_mean, state_value # action_log_var, state_value
|
import os
import time
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import *
from tensorflow.keras.models import load_model
from .datagen import ImgDataGenerator
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
# -----------------------------------------------------------------------------------------------------------
# =========================================== MODEL CALLBACKS ===============================================
# -----------------------------------------------------------------------------------------------------------
def reducelronplateau():
reduce_lr_on_plateau = ReduceLROnPlateau(
monitor='categorical_accuracy', factor=0.05,
patience=5, verbose=1, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0.001)
return reduce_lr_on_plateau
def modelcheckpoint(model_checkpoint_dir, runtime_name):
model_checkpoint = ModelCheckpoint(
filepath=os.path.join(model_checkpoint_dir, runtime_name),
save_weights_only=True, save_best_only=True,
monitor='val_acc', mode='max')
return model_checkpoint
def earlystopping():
early_stopping = EarlyStopping(
monitor='val_loss', min_delta=0, patience=0,
verbose=0, mode='auto', baseline=None,
restore_best_weights=False)
return early_stopping
def get_model_callbacks(model_checkpoint_dir, runtime_name):
callbacks = [
reducelronplateau(),
modelcheckpoint(model_checkpoint_dir, runtime_name),
]
return callbacks
# -----------------------------------------------------------------------------------------------------------
# ============================================= MODEL OPTIMIZERS ============================================
# -----------------------------------------------------------------------------------------------------------
def get_mixed_precision_opt(optimizer):
return tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
# =================================== MODEL COMPILING ===================================
class GenericTrainLoop:
def __init__(self, datagen_params, model, optimizer, enable_mixed_precision, loss, metrics,
model_checkpoint_dir, use_callbacks, epochs, steps_per_epoch,
plot_dir, runtime_name, saved_models_dir, model_name):
self.datagen_params = datagen_params
self.model = model # Tensorflow Model
self.optimizer = optimizer # Keras Optimizer
self.enable_mixed_precision = enable_mixed_precision # Boolean - set true if NVIDIA gpu contains RT cores
self.loss = loss # Keras/custom loss function
self.metrics = metrics
self.model_checkpoint_dir = model_checkpoint_dir # Directory to save model checkpoints
self.use_callbacks = use_callbacks # Whether to use callbacks
self.epochs = epochs
self.steps_per_epoch = steps_per_epoch
self.plot_dir = plot_dir # Directory to save model progress (accuracy/loss vs epochs) plots
self.runtime_name = runtime_name # *IMPORTANT* will be used as the name for "model_progress_time"
self.model_name = model_name # *IMPORTANT* will be used as the name for the model's hdf5 save file
self.saved_models_dir = saved_models_dir # Directory to save models after training
def compile_model(self):
if self.enable_mixed_precision:
optimizer = get_mixed_precision_opt(self.optimizer)
else:
optimizer = self.optimizer
model = self.model
model.compile(optimizer=optimizer, loss=self.loss, metrics=self.metrics)
return model
def import_generators(self):
datagenerator = ImgDataGenerator(**self.datagen_params)
train_generator, test_generator = datagenerator.return_datagens()
return [train_generator, test_generator]
def train_model(self, compiled_model, train_generator):
callbacks = get_model_callbacks(self.model_checkpoint_dir, self.runtime_name)
start = time.time()
if self.use_callbacks:
history = compiled_model.fit(
train_generator,
epochs=self.epochs,
steps_per_epoch=self.steps_per_epoch,
callbacks=callbacks
)
else:
history = compiled_model.fit(
train_generator,
epochs=self.epochs,
steps_per_epoch=self.steps_per_epoch
)
stop = time.time()
total_training_time = stop - start
return history, total_training_time
# ================================= TRAINING ANALYSIS ===================================
def export_model_stats(self, model_history, total_training_time):
plot_title = f"""Visualizing Model Progress
Total Training Time: {total_training_time} seconds"""
history = model_history.history
fig = make_subplots(rows=1, cols=2,
subplot_titles=['Loss', 'Accuracy'])
fig.add_trace(go.Scatter(x=np.arange(1, 11), y=history['loss'],
mode='lines+markers', name='Loss'),
row=1, col=1)
fig.add_trace(go.Scatter(x=np.arange(1, 11), y=history['accuracy'],
mode='lines+markers', name='Accuracy'),
row=1, col=2)
fig.update_xaxes(title_text='Epochs', row=1, col=1)
fig.update_xaxes(title_text='Epochs', row=1, col=2)
fig.update_layout(title=plot_title)
fig.write_image(f'{os.path.join(self.plot_dir, self.runtime_name)}.jpg')
def save_model(self):
self.model.save(f"{self.saved_models_dir}/{self.model_name}.hdf5")
def load_saved_model(self):
model = load_model(f"{self.saved_models_dir}/{self.model_name}.hdf5")
return model
# =================================== MODEL EVALUATION ===================================
def evaluate_model(self, test_generator):
saved_model = self.load_saved_model()
scores = saved_model.evaluate(test_generator, steps=10)
return scores
def print_model_scores(self, model_scores):
print(f"""
Evaluation Loss: {model_scores[0]}
Evaluation Accuracy: {model_scores[1]}
""")
def run_loop(self):
compiled_model = self.compile_model()
train_generator, test_generator = self.import_generators()
history, total_training_time = self.train_model(compiled_model, train_generator)
self.export_model_stats(history, total_training_time)
self.save_model()
model_scores = self.evaluate_model(test_generator)
self.print_model_scores(model_scores)
return model_scores
|
from setuptools import find_packages, setup
setup(name="filetools",
version="0.1",
description="Helper class to manage files",
author="Jonathan Grizou",
author_email='jonathan.grizou@gmail.com',
platforms=["linux"],
url="https://github.com/jgrizou/filetools",
packages=find_packages(),
)
|
from django.urls import path
from . import views
from django.urls import path
from . import views
urlpatterns = [
path('', views.ClosureApproval, name='ClosureApproval'),
] |
#this will return the greatest integer of some real number
greatestInt = lambda n : int(n // 1)
def recursion():
try:
num = float(input("Enter a number : "))
print(f"Greatest Integer of {num} : {greatestInt(num)}")
except Exception:
print("You are a fricking fool, get lost")
exit()
try:
ans = int(input("Wanna do that again?, if yes press 0 : "))
if ans == 0:
recursion()
except ValueError:
pass
recursion() |
from django.contrib import admin
from lamp.models import Lamp,ModelLamp
admin.site.register(Lamp)
admin.site.register(ModelLamp)
# Register your models here.
|
from sys import argv
script, file_path = argv
def read_file(file_name):
file_obj = open(file_name)
print(file_obj.read())
file_obj.close()
print(f"读取文件: {file_path}")
read_file(file_path) |
from django.contrib.gis.geos import Point
from django.core import mail
from django.test import TestCase
from djapp.models import HostOrganization
from .factories import CoachFactory, HostOrganizationFactory, MatchingFactory
from djapp.biz.matching import Matcher
def valid_host_data(**overrides):
default = dict(
type=HostOrganization.Type.COMMUNE,
zip_code='75015',
start_date='2020-11-20',
email_confirmed='2020-01-01',
validated='2020-01-02',
)
default.update(overrides)
return default
def valid_coach_data(**overrides):
default = dict(
situation_graduated=True, zip_code='75015', max_distance=5, start_date='2020-11-15',
email_confirmed='2020-01-01',
)
default.update(overrides)
return default
class MatchingTestCase(TestCase):
def test_coach_matching(self):
coach = CoachFactory(
situation_graduated=True,
zip_code='75013',
max_distance=5,
start_date='2020-11-15',
email_confirmed='2020-01-01',
)
host1 = HostOrganizationFactory(**valid_host_data())
host2 = HostOrganizationFactory(**valid_host_data(zip_code='75011'))
host3 = HostOrganizationFactory(**valid_host_data(zip_code='94200'))
# Too far
HostOrganizationFactory(name='Too far 1', **valid_host_data(zip_code='33000'))
HostOrganizationFactory(name='Too far 2', **valid_host_data(zip_code='92330'))
HostOrganizationFactory(name='Too far 3', **valid_host_data(zip_code='94700'))
# Start date does not work
HostOrganizationFactory(name='Bad Start Date', **valid_host_data(start_date='2020-11-01'))
# Email not confirmed
HostOrganizationFactory(name='Email Not Confirmed', **valid_host_data(email_confirmed=None))
# Not validated
HostOrganizationFactory(name='Not Validated', **valid_host_data(validated=None))
# Blocked
HostOrganizationFactory(name='Blocked', **valid_host_data(blocked='2020-01-01'))
# Unsubscribed
HostOrganizationFactory(name='Unsubscribed', **valid_host_data(unsubscribed='2020-01-01'))
# Too many matchings
host_busy = HostOrganizationFactory(name='Too many matchings', **valid_host_data())
for i in range(Matcher.MAX_MATCHINGS):
MatchingFactory(host=host_busy)
matcher = Matcher()
matchings = matcher.get_matchings_for_coach(coach, limit=10000)
self.assertEqual(
[host1, host2, host3],
[host for _, host in matchings],
)
def test_host_matching(self):
host = HostOrganizationFactory(
type=HostOrganization.Type.COMMUNE,
zip_code='75013',
start_date='2020-11-20',
email_confirmed='2020-01-01',
validated='2020-01-02',
)
coach1 = CoachFactory(**valid_coach_data())
coach2 = CoachFactory(**valid_coach_data(situation_graduated=False, has_experience=True, zip_code='75013'))
coach3 = CoachFactory(**valid_coach_data(situation_graduated=False, situation_learning=True, zip_code='94200'))
# Too far
CoachFactory(**valid_coach_data(zip_code='33000'))
CoachFactory(**valid_coach_data(zip_code='92330'))
CoachFactory(**valid_coach_data(zip_code='94700'))
# Bad situation
CoachFactory(**valid_coach_data(situation_graduated=False, situation_job=True))
# Start Date does not work
CoachFactory(**valid_coach_data(start_date='2020-11-30'))
# Email not confirmed
CoachFactory(**valid_coach_data(email_confirmed=None))
# Blocked
CoachFactory(**valid_coach_data(blocked='2020-01-01'))
# Unsubscribed
CoachFactory(**valid_coach_data(first_name='Unsubscribed', unsubscribed='2020-01-01'))
# Too many matchings
coach_busy = CoachFactory(**valid_coach_data())
for i in range(Matcher.MAX_MATCHINGS):
MatchingFactory(coach=coach_busy)
# Run test
matcher = Matcher()
matchings = matcher.get_matchings_for_host(host, limit=10000)
self.assertEqual(
[coach1, coach2, coach3],
[coach for coach, _ in matchings],
)
def test_max_matchings_coach(self):
coach = CoachFactory(
situation_graduated=True,
zip_code='75013',
max_distance=5,
start_date='2020-11-15',
email_confirmed='2020-01-01',
)
MatchingFactory(coach=coach)
for _ in range(Matcher.MAX_MATCHINGS-1):
HostOrganizationFactory(**valid_host_data())
HostOrganizationFactory(**valid_host_data())
HostOrganizationFactory(**valid_host_data())
HostOrganizationFactory(**valid_host_data())
matcher = Matcher()
matchings = matcher.get_matchings_for_coach(coach)
self.assertEqual(Matcher.MAX_MATCHINGS-1, len(matchings))
def test_max_matchings_host(self):
host = HostOrganizationFactory(
type=HostOrganization.Type.COMMUNE,
zip_code='75013',
start_date='2020-11-20',
email_confirmed='2020-01-01',
validated='2020-01-02',
)
MatchingFactory(host=host)
for _ in range(Matcher.MAX_MATCHINGS-1):
CoachFactory(**valid_coach_data())
CoachFactory(**valid_coach_data())
CoachFactory(**valid_coach_data())
CoachFactory(**valid_coach_data())
matcher = Matcher()
matchings = matcher.get_matchings_for_host(host)
self.assertEqual(Matcher.MAX_MATCHINGS-1, len(matchings))
def test_run_process_for_host(self):
host = HostOrganizationFactory(
name='Amazing Organization',
type=HostOrganization.Type.COMMUNE, zip_code='33000', start_date='2020-11-15',
email_confirmed='2020-11-01', validated='2020-11-02',
)
coach = CoachFactory(
first_name='John', last_name='Doe',
situation_graduated=True, zip_code='33000', max_distance=5, start_date='2020-11-15',
email_confirmed='2020-01-01',
)
matcher = Matcher()
matcher.run_process_for_host(host)
self.assertEqual(len(mail.outbox), 2)
self.assertEqual(mail.outbox[0].subject, 'Amazing Organization est prête à vous accueillir')
self.assertEqual(mail.outbox[0].to, [coach.email])
self.assertEqual(mail.outbox[1].subject, 'John Doe est disponible pour le poste de conseiller numérique')
self.assertEqual(mail.outbox[1].to, [host.contact_email])
def test_quimper_does_not_match_with_brest(self):
coach = CoachFactory(
situation_graduated=True,
geo_name='Brest',
zip_code='29200',
commune_code='29019',
departement_code='29',
region_code='53',
location=Point([-4.5058, 48.4059]),
start_date='2020-11-15',
email_confirmed='2020-01-01',
)
host_brest = HostOrganizationFactory(
type=HostOrganization.Type.COMMUNE,
geo_name='Brest',
zip_code='29200',
commune_code='29019',
departement_code='29',
region_code='53',
location=Point([-4.5058, 48.4059]),
start_date='2020-11-20',
email_confirmed='2020-11-01',
validated='2020-11-02',
)
host_quimper = HostOrganizationFactory(
type=HostOrganization.Type.COMMUNE,
geo_name='Quimper',
zip_code='75015',
commune_code='29232',
departement_code='29',
region_code='53',
location=Point([-4.0916, 47.9914]),
start_date='2020-11-20',
)
# Run test
matcher = Matcher()
matchings = matcher.get_matchings_for_coach(coach)
self.assertEqual(
[host_brest],
[host for _, host in matchings],
)
|
""" border_app.py
Credit: Rodrigo Sparrapan
sparodrigo82@gmail.com
based on https://github.com/fsoubelet/ColorFrame """
import cv2
import glob
count = 1
path = "s_Top-10/Top_10_a_Olho/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/a__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Entrada/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/b__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Fila da Entrada/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/c__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_iPorta malas/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/d__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Sem Carro/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/e__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Transito da Entrada/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/f__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Vol Criancas/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/g__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_Vol Trabalhando/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/h__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
path = "s_Top-10/Top_10_xFood/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/i__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_xRetrovisores/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/j__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
count = 1
path = "s_Top-10/Top_10_zEvangelistas/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/k__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
ount = 1
path = "s_Top-10/Top_10_zOracoes/*.jpg"
filenames = glob.glob(path)
for filename in filenames:
image = cv2.imread(filename)
gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = cv2.resize(image, (6000, 40000))
#image = cv2.imread(image)
#gray_img = cv2.resize(gray_img, (6000, 4000))
cv2.imwrite("drop-out/l__rebels_color_" +str(count) +".jpg", image)
cv2.imwrite("drop-out/z__rebels_gray_" +str(count) +".jpg", gray_img)
count += 1
from colorframe import BorderCreator
border_api = BorderCreator(commandline_path="drop-out/", vertical_border=33, horizontal_border=33, color="white")
border_api.execute_target()
|
# coding=utf-8
"""
timestamp is responsible for [brief description here].
"""
from datetime import datetime
def make_timestamp():
return datetime.now().strftime("%d/%m/%Y %H:%M:%S")
|
n1 = int(input("primeiro número: "))
n2 = int(input("segundo número: "))
mdc = 1
divisor = 2
while divisor <= n1 or divisor <= n2:
if n1>1 and n2>1 and n1 % divisor == 0 and n2 % divisor ==0:
print(int(n1),int(n2),'|',divisor)
n1 = n1/divisor
n2 = n2/divisor
mdc = mdc * divisor
elif n1>1 and n2>1 and n1 % divisor == 0 or n2 % divisor ==0:
while divisor <= n1 or divisor <= n2:
if n1 % divisor == 0:
print(int(n1),int(n2),'|',divisor)
n1 = n1/divisor
mdc = mdc * divisor
elif n2 % divisor == 0:
print(int(n1),int(n2),'|',divisor)
n2 = n2/divisor
mdc = mdc * divisor
break
else:
divisor += 1
print('1,1')
print('mdc: ',mdc)
|
# -*- coding=UTF-8 -*-
import getpass
class FakeLogin(object):
def __init__(self):
self.name = 'king'
self.password = 'haha'
self.banner = 'Hello'
self.run()
def run(self):
'''仿Linux终端窗口'''
while True:
print 'Login:king'
pw = getpass.getpass('Password:')
if pw == '88888888':
print 'exit'
exit()
else:
if len(pw) > 12:
print '密码长度应小于12'
continue
elif len(pw) < 6:
print '密码长度应大于6'
continue
else:
print '可惜'
continue |
import tensorflow as tf
def get_cost(logits, labels, weights, regularizer, cfg):
with tf.variable_scope('cost', reuse = tf.AUTO_REUSE):
flat_logits = tf.reshape(logits, [-1])
flat_labels = tf.reshape(labels, [-1])
flat_weights = tf.reshape(weights, [-1])
loss = tf.losses.log_loss(flat_labels, flat_logits, weights=flat_weights)
if cfg.get('regularizer', True):
#reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
#reg_term = tf.contrib.layers.apply_regularization(regularizer, reg_variables)
#loss += reg_term
l2_loss = tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])
loss += l2_loss * cfg.regularizer_scale
return loss
|
from .models import Course, Student
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
# superuser: gldng password: sadece123
# user: mertali password: sadece123123
@login_required
def available_courses(request):
loggedUser = request.user
takenCourses = Course.objects.filter(student__user_id=loggedUser.id)
availableCourses = Course.objects.exclude(id__in=takenCourses)
return render(request, 'createSchedule/available_courses.html', {'courses': availableCourses})
@login_required
def local_users(request):
students = Student.objects.all()
return render(request, 'createSchedule/profile.html', {'students': students})
@login_required
def main_page(request):
return render(request, 'createSchedule/main_page.html')
@login_required
def schedule(request):
loggedUser = request.user
takenCourses = Course.objects.filter(student__user_id=loggedUser.id)
return render(request, 'createSchedule/schedule.html', {'courses': takenCourses})
|
from enum import Enum
class Tax(Enum):
zero = 0
eight = 8
ten = 10
@property
def name(self):
if self._value_ == self.zero.value:
return 'なし'
elif self._value_ == self.eight.value:
return '8%'
else:
return '10%'
@property
def rate(self):
return self._value_ / 100
# 税込金額から税金を割り出す
@property
def rate_if_exclude_tax(self):
return self._value_ / (100 + self._value_)
@staticmethod
def get_type_for_select():
ret = [('', '')]
type_list = [(str(tax.value), tax.name) for tax in Tax]
ret.extend(type_list)
return ret
@staticmethod
def parse(value):
if isinstance(value, str):
try:
value = int(value)
except ValueError:
pass
for tax in Tax:
if tax.value == value:
return tax
return None
def __str__(self):
return str(self._value_)
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from std_msgs.msg import Float32MultiArray
class ImuTeleop:
def __init__(self):
# desired velocity for publish
self.cmd_vel = Twist()
# variables for tilt control
self.roll_to_linear_scale = rospy.get_param("~roll_to_linear_scale", -1.0)
self.pitch_to_angular_scale = rospy.get_param("~pitch_to_angular_scale", -2.0)
self.pitch_deadband = rospy.get_param("~pitch_deadband", 0.2)
self.roll_deadband = rospy.get_param("~roll_deadband", 0.2)
self.pitch_offset = rospy.get_param("~pitch_offset", 0.15)
self.roll_offset = rospy.get_param("~roll_offset", 0.1)
# TODO: subscribers to IMU data
self.human_input_ort_sub = None
# publisher to robot velocity
self.robot_vel_pub = rospy.Publisher("/cmd_vel",
Twist, queue_size=1)
# TODO: define callback functions
# utility functions
def send_vel_cmd(self, vx, omg):
# set and send desired velocity
self.cmd_vel.linear.x = vx
self.cmd_vel.angular.z = omg
self.robot_vel_pub.publish(self.cmd_vel)
# convert euler angles from IMU data
def euler_from_imu(self):
roll, pitch, yaw = (0.0, 0.0, 0.0)
# TODO: convert imu data into euler angles
return roll, pitch, yaw
# main teleoperation function
def calculate_control(self):
roll, pitch, yaw = self.euler_from_imu()
# TODO: calculate desired velocities based on pitch and roll angles
vx, om = (0.0, 0.0)
if pitch > self.roll_deadband:
pass
elif pitch < -self.roll_deadband:
pass
if roll > self.pitch_deadband:
pass
elif roll < -self.pitch_deadband:
pass
self.send_vel_cmd(vx, om)
if __name__ == "__main__":
# initialize node
rospy.init_node("teleop")
# create a teleop object
teleop = ImuTeleop()
# loop rate 50 Hz
rate = rospy.Rate(50)
while not rospy.is_shutdown():
teleop.calculate_control()
rate.sleep()
|
'''
Finn은 요즘 수학공부에 빠져 있습니다.
수학 공부를 하던 Finn은 자연수 n을 연속한 자연수들로 표현 하는 방법이 여러개라는 사실을 알게 되었습니다.
예를들어 15는 다음과 같이 4가지로 표현 할 수 있습니다.
1 + 2 + 3 + 4 + 5 = 15
4 + 5 + 6 = 15
7 + 8 = 15
15 = 15
자연수 n이 매개변수로 주어질 때, 연속된 자연수들로 n을 표현하는 방법의 수를 return하는 solution를 완성해주세요.
1 = 1
2 = x
3 = 1+2 1로 시작하는 첫수... 2개짜리.. 1개건너뛰고 나옴 n개자리는 n(1+n)/n번째 부터 나온다. 그리고 n-1번째 단위로 하나씩 나오게 됨.
4 = x
5 = 2+3
6 = 1+2+3 1로 시작하는 첫수... 3개짜리.. 2개건너뛰고 나옴
7 = 3+4
8 = x
9 = 2+3+4, 4+5
10 = 1+2+3+4 1로 시작하는 첫수... 4개짜리.. 3개건너뛰고 나옴
11 = 5+6
12 = 3+4+5
13 = 6+7
14 = 2+3+4+5
15 = 7+8, 4+5+6 , 1+2+3+4+5 1로 시작하는 첫수... 5개짜리.. 4개건너뛰고 나옴.
16 = x
17 = 8+9
18 = 5+6+7, 3+4+5+6
n개자리는 n(1+n)/n번째 부터 나온다. 그리고 n-1번째 단위로 하나씩 나오게 됨.
만약 15로 지정했다하면.
n(1+n)/2이 15이하인 n을 먼저 계산하고. 15에선ㄴ 2개짜리, 3개짜리, 4개짜리, 5인 n이 산출되게 된다.
2개짜리는 15 - n(1+n)이 2로 나누어 떨어지면 +1
3개짜리는 15 - n(1+n)이 3으로 나누어 떨어지면 +1 ... 나머지도 이렇게 간다.
'''
'''
pseudo code
def solution(n):
for i in range(n):
i(1+i)/2 <= n 인 i를 구하고
n - i(1+i)/2 가 i로 나누어 떨어지면 총합을 1 추가한다.
return answer
'''
# def solution(n):
# total = 0
# for i in range(2, n+1): #i개 짜리가 있는지 검사합니다.
# print("i: {}".format(i))
# sum = (i*(1+i))//2
# print("sum: {}".format(sum))
# if sum <= n:
# print("sum이 n보다 작으니까, {}개 짜리가 있을 수 있습니다.".format(i))
# if (n - sum) % i == 0:
# print("{}의 연속된 자연수 합 리스트에 있습니다.".format(n))
# total += 1
# print("total: {}".format(total))
# else:
# break
#
# return total+1
#
#
# print(solution(15))
def solution(n):
total = 0
for i in range(2, n+1):
sum = (i*(1+i))//2
if sum <= n:
if (n - sum) % i == 0:
total += 1
else:
break
return total+1
print(solution(15)) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui/thumbnail.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Thumbnail(object):
def setupUi(self, Thumbnail):
Thumbnail.setObjectName("Thumbnail")
Thumbnail.resize(400, 263)
self.gridLayout = QtWidgets.QGridLayout(Thumbnail)
self.gridLayout.setObjectName("gridLayout")
self.thumbnail = QResizeLabel(Thumbnail)
self.thumbnail.setText("")
self.thumbnail.setAlignment(QtCore.Qt.AlignCenter)
self.thumbnail.setObjectName("thumbnail")
self.gridLayout.addWidget(self.thumbnail, 0, 0, 1, 1)
self.retranslateUi(Thumbnail)
QtCore.QMetaObject.connectSlotsByName(Thumbnail)
def retranslateUi(self, Thumbnail):
_translate = QtCore.QCoreApplication.translate
Thumbnail.setWindowTitle(_translate("Thumbnail", "Form"))
from geomaker.ui.widgets import QResizeLabel
|
# -*- coding: utf-8 -*-
"""thrift文件处理的通用工具"""
__author__ = ["zhaolingzhi@xxxxxxxx.cn", 'PangBo <pangbo@xxxxxxxx.cn>']
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
class FileInOut(object):
def open(self, filepath, mode):
"""打开文件,准备用thrift TBinaryProtocol 读或者写
如果文件名是.bz2结尾,将会自动使用bzip2压缩或者解压缩
Args:
filepath str - 文件路径
mode - 文件读写模式
"""
if filepath.endswith('.bz2'):
import bz2
f = bz2.BZ2File(filepath, mode)
else:
f = open(filepath, mode)
self.__trans = TTransport.TFileObjectTransport(f)
self.__prot = TBinaryProtocol.TBinaryProtocol(self.__trans)
def read(self, t):
"""读一个t对象
Args:
t - a thrift object
Returns:
bool - True读取成功,False读取失败
"""
try:
t.read(self.__prot)
return True
except EOFError, e:
return False
def write(self, t):
t.write(self.__prot)
def close(self):
if hasattr(self, '__trans'):
self.__trans.flush()
self.__trans.close()
del self.__trans
if hasattr(self, '__prot'):
del self.__prot
def getRecordcount(self, t):
event_count = 0
appwall_count = 0
dbox_count = 0
while 1 == 1 :
if self.read(t):
if int(t.sdkType) == 30:
appwall_count += 1
elif int(t.sdkType) == 31:
dbox_count += 1
else:
event_count += 1
else :
break
return event_count, appwall_count,dbox_count
class FastFileInOut(FileInOut):
'''快速,可能丢数据 '''
def open(self, p, mode):
if p.endswith('.bz2'):
import bz2
f = bz2.BZ2File(p, mode)
else:
f = open(p, mode)
self.__trans = TTransport.TFileObjectTransport(f)
self.__trans = TTransport.TBufferedTransport(self.__trans, 256 * 1024)
self.__prot = TBinaryProtocol.TBinaryProtocolAccelerated(self.__trans)
|
import collections
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
s_counter = collections.Counter(s)
t_counter = collections.Counter(t)
if s_counter - t_counter != collections.Counter():
return False
return True
|
# PYTHON 3
def main():
# YOUR CODE GOES HERE
# Please take input and print output to standard input/output (stdin/stdout)
# E.g. 'input()/raw_input()' for input & 'print' for output
s=input()
print("Hello "+s)
return 0
if __name__ == '__main__':
main()
# PYTHON 2.7
# def main():
# # YOUR CODE GOES HERE
# # Please take input and print output to standard input/output (stdin/stdout)
# # E.g. 'input()/raw_input()' for input & 'print' for output
# s = raw_input()
# # print '{0} {1}'.format('Hello',s)
# print 'Hello '+s
# return 0
# if __name__ == '__main__':
# main()
|
"""
This method takes an array of items, a page number, and page size and returns a smaller array
>>> pagination([1,2,3,4,5,6,7,8,9,10,11,12], 1, 6)
[1, 2, 3, 4, 5, 6]
>>> pagination([1,2,3,4,5,6,7,8,9,10,11,12], 2, 6)
[7, 8, 9, 10, 11, 12]
>>> pagination([1,2,3,4,5,6,7,8,9,10,11,12], 4, 6)
[7, 8, 9, 10, 11, 12]
>>> pagination([1,2,3,4,5,6,7,8,9,10,11,12], 0, 3)
[1, 2, 3]
>>> pagination([1,2,3,4,5,6,7,8,9,10,11,12], 3, 3)
[7, 8, 9]
"""
import math
def pagination(arr, page_number, page_size):
size = math.ceil(len(arr)/page_size)
if page_number > size:
page_number = size
if page_number < 1:
page_number = 1
start_index = (page_number-1)*page_size
paginated_array = arr[start_index:start_index+page_size]
return paginated_array |
Python 2.7.5 (default, May 15 2013, 22:43:36) [MSC v.1500 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import SpellCheckFinal
>>> import doctest
>>> doctest.testmod(SpellCheckFinal, verbose = True)
Trying:
spellCheck("test1.txt")
Expecting:
{'exercsie': 1, 'finised': 1}
ok
Trying:
spellCheck("test2.txt")
Expecting:
{'bechause': 1, 'c++': 1}
ok
Trying:
spellCheck("test3.txt")
Expecting:
{'lissard': 1, 'chamelon': 1, 'gerbal': 2, 'hampster': 3, 'tortise': 1}
ok
Trying:
type(spellCheck("test1.txt"))
Expecting:
<type 'dict'>
ok
1 items had no tests:
SpellCheckFinal
1 items passed all tests:
4 tests in SpellCheckFinal.spellCheck
4 tests in 2 items.
4 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=4)
>>> |
import torch
from torch import nn
from .feature_extractor import EEG_FeatureExtractor
class Relative_Positioning(nn.Module):
def __init__(self, C, T, k=50, m=13, dropout_prob=0.5, embedding_dim=100, n_spatial_filters=8):
super().__init__()
self.feature_extractor = EEG_FeatureExtractor(C, T, k, m, dropout_prob, embedding_dim, n_spatial_filters).cuda()
self.linear = nn.Linear(embedding_dim, 1)
self.loss_fn = nn.SoftMarginLoss()
def forward(self, x):
first_samples = x[:,0].unsqueeze(dim=1)
second_samples = x[:,1].unsqueeze(dim=1)
h_first = self.feature_extractor(first_samples)
h_second = self.feature_extractor(second_samples)
h_combined = torch.abs(h_first - h_second)
out = self.linear(h_combined)
return out |
"""Set estimation (solving the attribution problem)"""
__author__ = 'thor'
from numpy import *
import pandas as pd
import itertools
import ut as ms
import ut.daf.ch
import ut.pmath.poset
import ut.pcoll.order_conserving
from ut.util.uiter import powerset
default = dict()
default['success'] = 'success'
default['trial'] = 'trial'
default['rate'] = 'rate'
default['set_elements_name'] = 'element' # category of the elements used in the sets
default[
'set_column'
] = 'set' # name of column that contains the sets (which will index the data)
class SetEst(object):
def __init__(self, d, **kwargs):
# process inputs
kwargs = dict(default, **kwargs)
self.success = kwargs['success']
self.trial = kwargs['trial']
self.rate = kwargs['rate']
self.set_elements_name = kwargs['set_elements_name']
# get and order data
self.d = d
# compute other columns and attributes
self.index_type = type(self.d.index.values[0])
self.set_elements = list(
unique(list(itertools.chain.from_iterable(self.d.index.values)))
)
self.n_elements = len(self.set_elements)
self.add_bitmap()
self.add_stats()
# order columns
self.d = self.d[self.non_element_columns() + self.set_elements]
# init poset
self._poset = None # will be computes if and when used
# vector to map bitmaps to ints
self.hash_base = array([2 ** i for i in range(self.n_elements)])
self.hash_base_matrix = matrix(self.hash_base).T
self.hash_to_value = dict()
def bitmap_to_hash(self, bitmap):
return array((bitmap * self.hash_base_matrix))
# def mk_hash_to_value(self, val_col=None):
# val_col = val_col or self.success
def add_bitmap(self):
ddd = ms.pmath.poset.family_of_sets_to_bitmap(self.d.index.values)[
self.set_elements
]
self.d = pd.concat([self.d, ddd], axis=1)
# self.d[self.set_elements] = ddd
def poset(self, num_of_elements=None):
num_of_elements = num_of_elements or self.n_elements
if self._poset is None:
self._poset = ms.pmath.poset.set_containment_matrix(self.bitmap_matrix())
return self._poset
def add_stats(self):
self.d.loc[:, 'n_members'] = list(map(len, self.d.index.values))
if self.trial in self.d.columns:
self.d.loc[:, self.rate] = self.d[self.success] / array(
list(map(float, self.d[self.trial]))
)
def rm_bitmap(self):
self.d = self.dd()
def bitmap_matrix(self):
return self.d[self.set_elements].as_matrix()
def non_element_columns(self):
return ms.pcoll.order_conserving.setdiff(
list(self.d.columns), self.set_elements
)
def sort_d(self, columns=None, **kwargs):
self.d = self.d.sort(columns=columns, **kwargs)
def dd(self):
return self.d[self.non_element_columns()]
def get_rate(self, df):
return df[self.success] / df[self.trial]
def change_type_of_d_index(self, change_to_type=tuple):
index_name = self.d.index.name or 'index'
self.d = self.d.reset_index(drop=False)
self.d.loc[:, index_name] = self.d.loc[:, index_name].apply(change_to_type)
self.d = self.d.set_index(index_name)
self.index_type = type(
self.d.index.values[0]
) # couldn't I use change_to_type here?
def subset_summed_d(self, cols=None):
cols = cols or [self.success, self.trial]
if isinstance(cols, str):
cols = [cols]
t = self.d[cols].copy()
return pd.DataFrame([sum(t[lidx]) for lidx in self.poset()], index=self.d.index)
def get_stats_of_subsets_of(self, idx):
if not isinstance(idx, int):
idx = type(idx)(unique(idx))
idx = array([x == idx for x in self.d.index.values])
t = self.dd()[self.poset()[idx, :].T]
else:
t = self.dd().iloc[self.poset()[idx, :]]
return t.sort(['n_members', self.trial, self.success], ascending=False)
@staticmethod
def _process_input_for_factories(df, kwargs):
kwargs['success'] = kwargs.get(
'success',
list(
set(kwargs.get('set_column', default['set_column'])).difference(
df.columns
)
)[0],
)
df = df.copy()
df[kwargs['set_column']] = list(
map(tuple, list(map(unique, df[kwargs['set_column']])))
)
return df, dict(default, **kwargs)
@staticmethod
def _mk_data_from_set_success_df(df, **kwargs):
df = (
df[[kwargs['set_column'], kwargs['success']]]
.groupby(kwargs['set_column'])
.agg(['sum', 'count'])[kwargs['success']]
)
df = ms.daf.ch.ch_col_names(
df,
new_names=[kwargs['success'], kwargs['trial']],
old_names=['sum', 'count'],
)
df = df.sort([kwargs['success'], kwargs['trial']], ascending=False)
return df
@staticmethod
def from_set_success_df(df, **kwargs):
df, kwargs = SetEst._process_input_for_factories(df, kwargs)
df = SetEst._mk_data_from_set_success_df(df, **kwargs)
return SetEst(d=df, **kwargs)
@staticmethod
def mk_subset_summed_from_set_success_df(df, **kwargs):
se = SetEst.from_set_success_df(df, **kwargs)
se.d[[se.success, se.trial]] = se.subset_summed_d()
se.add_stats()
return se
@staticmethod
def mk_subset_summed_closure_from_set_success_df(df, **kwargs):
df, kwargs = SetEst._process_input_for_factories(df, kwargs)
df = SetEst._mk_data_from_set_success_df(df, **kwargs)
# make missing combos data and append to existing data
existing_combos = df.index.values
set_elements = list(
unique(list(itertools.chain.from_iterable(existing_combos)))
)
missing_combos = set(powerset(set_elements)).difference(existing_combos)
missing_combo_data = pd.DataFrame(
{kwargs['success']: 0, kwargs['trial']: 0}, index=missing_combos
)
# append to existing data
df = pd.concat([df, missing_combo_data], axis=0)
# make a SetEst from the set_success df
se = SetEst(df, **kwargs)
se.d[[se.success, se.trial]] = se.subset_summed_d()
se.add_stats()
return se
class Shapley(SetEst):
def __init__(self, d, **kwargs):
super(Shapley, self).__init__(d, **kwargs)
self.val_col = kwargs.get('val_col', self.success)
self.subset_val_map = None
self.compute_subset_val_map()
def compute_subset_val_map(self):
self.subset_val_map = {
tuple(k): v for k, v in zip(self.bitmap_matrix(), self.d[self.val_col])
}
def get_subset_val(self, subset):
return self.subset_val_map.get(tuple(subset), 0)
def compute_shapley_values(self):
return {
element: self._compute_single_shapley_value(element)
for element in self.set_elements
}
def _compute_single_shapley_value(self, element):
t = self._mk_marginal_values_for_element(element)
tt = t[['subset_sizes', 'success']].groupby('subset_sizes').mean()
return mean(tt['success'])
def _mk_marginal_values_for_element(self, element):
# get location lidx
element_col_lidx = array([x == element for x in self.set_elements])
element_row_lidx = array(self.d[element] > 0)
# sum up all values for subsets containing element
t = self.d[[self.val_col]][element_row_lidx]
# get the the values of the subsets obtained by removing this element from the sets it's in
subsets_intersecting_with_element = self.bitmap_matrix()[element_row_lidx, :]
t['subset_sizes'] = sum(subsets_intersecting_with_element, axis=1)
subsets_intersecting_with_element[:, element_col_lidx] = 0
t['success'] = t[self.val_col] - array(
list(map(self.get_subset_val, subsets_intersecting_with_element))
)
return t
def _compute_single_shapley_value_experimental(self, element):
def group_stats_fun(g):
return g['success'].sum() / float(g['subset_sizes'].iloc[0])
t = self._mk_marginal_values_for_element(element)
tt = (
t[['subset_sizes', 'success']]
.groupby('subset_sizes')
.apply(group_stats_fun)
)
return mean(tt)
# tt = t[['subset_sizes', 'success']].groupby('subset_sizes').mean()
# return mean(tt['success'])
@staticmethod
def mk_subset_summed_closure_from_set_success_df(df, **kwargs):
df, kwargs = Shapley._process_input_for_factories(df, kwargs)
df = Shapley._mk_data_from_set_success_df(df, **kwargs)
# make missing combos data and append to existing data
existing_combos = df.index.values
set_elements = list(
unique(list(itertools.chain.from_iterable(existing_combos)))
)
missing_combos = set(powerset(set_elements)).difference(existing_combos)
missing_combo_data = pd.DataFrame(
{kwargs['success']: 0, kwargs['trial']: 0}, index=missing_combos
)
# append to existing data
df = pd.concat([df, missing_combo_data], axis=0)
# make a SetEst from the set_success df
se = Shapley(df, **kwargs)
se.d[[se.success, se.trial]] = se.subset_summed_d()
se.compute_subset_val_map()
se.add_stats()
return se
class WithOrWithout(SetEst):
def __init__(self, d, **kwargs):
super(WithOrWithout, self).__init__(d, **kwargs)
def with_and_without_stats_for_element(self, element, extra_group_vars=[]):
grvars = extra_group_vars + [element]
dd = self.d[[self.success, self.trial] + grvars].groupby(grvars).sum()
dd['rate'] = dd[self.success] / dd[self.trial]
return dd
def with_and_without_stats(self):
dd = pd.DataFrame()
for c in self.set_elements:
t = self.with_and_without_element_stats(c)
t['element'] = c
t['element_present'] = t.index.values
t = t.reset_index(drop=True)
dd = pd.concat([dd, t], axis=0)
return dd.set_index(['element', 'element_present'])
def with_and_without_rate_lift(self):
dd = dict()
for c in self.set_elements:
t = self.with_and_without_stats_for_element(c)
dd[c] = t[t.index == 1]['rate'].iloc[0] / t[t.index == 0]['rate'].iloc[0]
dd = pd.DataFrame({'element': list(dd.keys()), 'rate_lift': list(dd.values())})
dd = dd.sort('rate_lift', ascending=False).set_index('element')
return dd
|
name = input("What's your name ? > ")
sex = input("What's your sex ? (male/female) > ")
bb = int(input("Input weight (Kg) > "))
tb1 = int(input("Input Hight (Cm) > "))
tb2 = tb1 / 100
bmi = bb / (tb2 * tb2)
def male():
if bmi < 17:
cc = "Thin"
elif bmi >= 17 and bmi < 23:
cc = "Normal"
elif bmi >= 23 and bmi < 27:
cc = "Fat"
else:
cc = "Obesitas"
return cc
def female(self):
if bmi < 18:
cc = "Thin"
elif bmi >= 18 and bmi < 25:
cc = "Normal"
elif bmi >= 25 and bmi < 27:
cc = "Fat"
else:
cc = "Obesitas"
return cc
def kesimpulan(cc):
print("Hei ", name, " !")
print("Your weight is ", bb, "Kg")
print("Your weight is= ", bb, " Kg")
print("Your height is= ", tb2, " M")
print("Your Body Mass Index is= ", bmi)
print("With these results, we conclude that you are ", hasil_bmi, " people.")
if sex == "male":
result = male()
kesimpulan(result)
elif sex == "female":
result = male()
kesimpulan(result)
else:
exit()
|
import wan_optimizer
import utils
import tcp_packet
class WanOptimizer(wan_optimizer.BaseWanOptimizer):
""" WAN Optimizer that divides data into variable-sized
blocks based on the contents of the file.
This WAN optimizer should implement part 2 of project 4.
"""
# The string of bits to compare the lower order 13 bits of hash to
GLOBAL_MATCH_BITSTRING = '0111011001010'
def __init__(self):
wan_optimizer.BaseWanOptimizer.__init__(self)
# Add any code that you like here (but do not add any constructor arguments).
self.block_buffers = {}
self.hash_to_payload = {}
return
def sender(self, packet, port):
string = self.block_buffers[packet.dest]
i = len(string) - len(packet.payload) + 1
if i == 1:
i = 48
while i < (len(string) + 1):
window = string[(i-48):i]
h = utils.get_hash(window)
if utils.get_last_n_bits(h, 13) == WanOptimizer.GLOBAL_MATCH_BITSTRING:
payload = self.block_buffers[packet.dest][:i]
payload_hash = utils.get_hash(payload)
if payload_hash in self.hash_to_payload:
hash_packet = tcp_packet.Packet(packet.src, packet.dest, False, packet.is_fin, payload_hash)
self.send(hash_packet, port)
else:
if payload_hash == '\xbc\x17j\xea\x9a7ERJ\xe9\xd8\x01Jl\xb1\xb8\xa1\x88>}':
print payload
self.hash_to_payload[payload_hash] = payload
self.payload_sender(payload, packet, port)
self.block_buffers[packet.dest] = self.block_buffers[packet.dest][i:]
i += 48
else:
i += 1
if packet.is_fin:
payload = self.block_buffers[packet.dest]
payload_hash = utils.get_hash(payload)
if payload_hash not in self.hash_to_payload:
self.hash_to_payload[payload_hash] = payload
self.payload_sender(payload, packet, port)
def payload_sender(self, payload, packet, port):
while len(payload) > utils.MAX_PACKET_SIZE:
packet_payload = payload[:utils.MAX_PACKET_SIZE]
new_packet = tcp_packet.Packet(packet.src, packet.dest, True, False, packet_payload)
self.send(new_packet, port)
payload = payload[utils.MAX_PACKET_SIZE:]
new_packet = tcp_packet.Packet(packet.src, packet.dest, True, packet.is_fin, payload)
self.send(new_packet, port)
def receive(self, packet):
""" Handles receiving a packet.
Right now, this function simply forwards packets to clients (if a packet
is destined to one of the directly connected clients), or otherwise sends
packets across the WAN. You should change this function to implement the
functionality described in part 2. You are welcome to implement private
helper fuctions that you call here. You should *not* be calling any functions
or directly accessing any variables in the other middlebox on the other side of
the WAN; this WAN optimizer should operate based only on its own local state
and packets that have been received.
"""
if packet.dest in self.address_to_port:
# The packet is destined to one of the clients connected to this middlebox;
# send the packet there.
if not packet.is_raw_data:
payload = self.hash_to_payload[packet.payload]
self.payload_sender(payload, packet, self.address_to_port[packet.dest])
else:
if packet.dest not in self.block_buffers:
self.block_buffers[packet.dest] = packet.payload
else:
self.block_buffers[packet.dest] += packet.payload
self.sender(packet, self.address_to_port[packet.dest])
else:
# The packet must be destined to a host connected to the other middlebox
# so send it across the WAN.
if packet.dest not in self.block_buffers:
self.block_buffers[packet.dest] = packet.payload
else:
self.block_buffers[packet.dest] += packet.payload
self.sender(packet, self.wan_port)
|
# path: core/
# filename: content.py
# description: WSGI application content tree
'''
# make python2 strings and dictionaries behave like python3
from __future__ import unicode_literals
try:
from builtins import dict, str
except ImportError:
from __builtin__ import dict, str
Copyright 2017 Mark Madere
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
''' external imports
'''
import os.path
import sys
import imp
import traceback
import urllib
import datetime
from decimal import Decimal
''' internal imports
'''
#import functions
''' classes
'''
class Content(list):
def __init__(self,parent,conf):
# super list class - init with empty list
super(Content, self).__init__()
# vars
self.attributes = conf # attributes, the 'content block'
self.parent = parent # parent object
#self.attributes['parent'] = self.parent.attributes # this adds support for {{parent[:parent[:parent]etc.]:attribute}}
if 'top' not in dir(self.parent):
self.parent.top = self.parent
self.top = self.parent.top # top object
self.view = self.top #migrate to this instead of top
self.elementObj = None # placeholder for elementObj
self.data = None
self.marker_map = self.top.marker_map
# main
''' this is a recursive class.
working root to branch above this point
'''
# perform preprocessing
self.process(self.attributes.get('process',{}))
# build a tree of sub Content objects (a list of lists)
self.tree(conf) # recursive function
''' an end node has been reached
working branch to root below this point
'''
# init elementObj
self.init_element()
return None
def process(self,conf):
if isinstance(conf,dict):
conf = [conf]
if not isinstance(conf,list):
print('process conf is unparseable')
return False
for item in conf:
# log
if item.get('log'):
print(self.fnr(item['log']))
#vars
processor_type = item.get('type')
if not processor_type:
return False
''' Dynamically load processors
'''
m = ".".join(processor_type.split('.')[:-1])
c = ".".join(processor_type.split('.')[-1:])
_class = self.loadmodule(m,c)
if not _class:
return False
# instanciate element object
self.processorObj = _class(self,item)
# run processor
try:
output = self.processorObj.run()
# debug
#print('Processor Output: - %s' %str(output))
if output == True:
# convert bool True to string
if item.get(True):
item['true'] = item[True]
if item.get('true'):
# sub content
if isinstance(item['true'], dict) and item['true'].get('content'):
self.tree({'content': item['true']})
continue
# sub process
self.process(item['true']) #recurse
else:
continue
elif output == False:
# convert bool False to string
if item.get( False ):
item['false'] = item[False]
if item.get('false'):
if isinstance(item['false'], dict) and item['false'].get('content'):
self.tree({'content': item['false']})
return False
self.process(item['false']) #recurse
else:
#return False
# are we in a list? how to handle differntly
continue
# if process returns something other than false make it false
else:
return False
except: traceback.print_exc()
return True
def loadmodule(self,m,c):
# debug
#print(m)
#print(c)
# does the module exist in site directory
if os.path.isfile("%s.py" %m.replace('.','/')):
print('found a local module - importing')
try:
_module = imp.load_source(m,"%s.py" %m.replace('.','/'))
except:
traceback.print_exc()
return False
# does the module exist in core directory
elif os.path.isfile("core/%s.py" %m.replace('.','/')):
try:
#_module = imp.load_source(m,"core/%s.py" %m.replace('.','/'))
__import__(m)
_module = sys.modules[m] #load module
except:
traceback.print_exc()
return False
else:
print("Could not find the the module '%s'." %m)
return False
# load class
try:
_class = getattr(_module,c)
except AttributeError:
print("Could not find the class '%s' in the module '%s'." %(c,m))
return False
except:
traceback.print_exc()
return False
return _class
def tree(self,conf):
''' Recursivly create Content objects
'''
# Vars
if isinstance(conf,basestring) :
print("Warn: classes.content.tree() performing eval()")
conf = eval(conf)
if conf.get('content'):
# convert dict to list
if isinstance(conf['content'],dict):
conf['content'] = [conf['content']]
# recurse
for item in conf['content']:
try:
self.append( Content(self,item) )
'''
except MyError:
item = {errorcontent}
self.append(Content(self,item))
'''
except: traceback.print_exc()
return None
def init_element(self):
# Dynamically import an Element object
element_type = self.attributes.get('type','classes.element.Element') # type/class of element or default
m = ".".join(element_type.split('.')[:-1]) # module name and path
c = ".".join(element_type.split('.')[-1:]) # class name
_class = self.loadmodule(m,c)
if not _class:
return False
# instanciate Element object
#self.elementObj = _class(self)
# making an exception for calling processor instead of element
try:
# instanciate Element object
self.elementObj = _class(self)
except Exception as e:
# There was an error
# copy the content block (conf) for showing in error message
import copy
orginal_conf = copy.deepcopy(self.attributes)
self.attributes = {}
self.attributes['noindent'] = True
self.attributes['value'] = '''
<style>
.error {
border: 2pt solid #ea8a8a;
padding-left: 10px;
padding-right: 10px;
border-radius: 10px;
background: #ffefef;
}
</style>
<pre class="error">
<h3>Content Error</h3>
There is an error in this content block:
{{yaml(e:code).html_escape()}}
Type: {{e:type}}
Message: {{e:message}}
{{e:suggestion}}
Stacktrace:
{{yaml(e:stack)}}
</pre>
'''
self.attributes['e'] = {}
self.attributes['e']['message'] = e.message
self.attributes['e']['type'] = e.__repr__()
self.attributes['e']['code'] = orginal_conf
self.attributes['e']['code'].pop('content', None)
if e.message == "__init__() takes exactly 3 arguments (2 given)":
self.attributes['e']['suggestion'] = '''
Suggestion: This error usually means that a processor was called as an element.
Try this content block instead:
{{yaml(e:suggestedcode).html_escape()}}
'''
self.attributes['e']['suggestedcode'] = {"process": orginal_conf}
#self.attributes['e']['stack'] = traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback,10)
return self.init_element()
return None
def render(self):
''' Render Element objects in tree
'''
#vars
output = ''
# render child elements
for item in self:
if isinstance(item, type(self)):
output += item.render() #recurse
# log entry if requested
if 'log' in self.attributes:
print(self.fnr(self.attributes['log']))
# render this element
output = unicode(self.elementObj.render())+output
# split wrap
wrap_delimiter = self.attributes.get('wrap_delimiter','|')
wrap = self.fnr(self.attributes.get('wrap',wrap_delimiter)).split(wrap_delimiter,1)
if len(wrap) == 1:
wrap.append('')
# Helpers for API formating
if 'nomarkup' in self.attributes:
#print('nomarkup')
return wrap[0]+output+wrap[1]
if 'singlemarkup' in self.attributes:
#print('singlemarkup')
return self.fnr(wrap[0]+output+wrap[1],1)
if 'noindent' in self.attributes or 'noindent' in self.top.attributes:
#print('noindent')
# double markup
return self.fnr(wrap[0]+output+wrap[1])
# markup with indent
return self.fnr(self.indent("%s\n%s\n%s" %(wrap[0],output,wrap[1])))
''' merge with render
'''
def indent(self,input,prefix=" "):
# vars
output = ''
if self.top == self.parent:
return input
for line in input.split('\n'):
output += prefix+line+"\n"
#print(type(output))
return output.rstrip("\n")
''' Regenerate This the core of the framework
'''
def fnr(self,template,limit=10,**kwargs):
#debug
#print(self.marker_map)
binary = False
if 'binary' in kwargs:
print('binary')
binary = True
raw = False
if 'raw' in kwargs:
print('raw')
raw = True
#debug
#print(type(template))
count = 0
while True:
# copy the template. this copy will be destroyed searching for markers
template_copy = template
template_original = template # second copy for caparision later
''' find all the markers
This does not support nested markers!
'''
# does this template include any markers?
try:
start = unicode(template_copy).index('{{')
end = unicode(template_copy).index('}}',start)
# no markers found, return input
except ValueError:
return template
# markers found
# find markers
markers = []
while True:
# find the next marker's endpoints
try:
start = template_copy.index('{{')
end = template_copy.index('}}',start)
except ValueError:
# all markers found
break
# add the marker to list
markers.append(template_copy[start+2:end])
# truncate template
template_copy = template_copy[end+2:]
# debug
#print('markers: %s' %str(markers))
''' Is this marker for a data object?
If there is only one marker
and the marker is the only item in the template
and there are no functions
then it is a request for a data object
'''
if len(markers) == 1 and "(" not in markers[0] and ")" not in markers[0] and "{{%s}}"%markers[0] == template:
raw = True
''' Parse each marker into a stack.
'''
for marker in markers:
# vars
quote_state = False
nested_state = False
quote_chr = None
quote_characters = ["'",'"']
escape_characters = ["\\"]
value = []
stack = []
markup_value = None
# parse this marker character by character
for i in range(0,len(marker)):
# debug
#print("".join(value))
# skip escape characters
if marker[i] in escape_characters:
continue
# record charater after escpae character
if i > 0 and marker[i-1] in escape_characters:
value.append(marker[i])
continue
# start quote
if not quote_state and marker[i] in quote_characters:
quote_state = True
quote_chr = marker[i]
# record litteral character
value.append("|")
continue
# end quote
if quote_state and marker[i] == quote_chr:
quote_state = False
quote_chr = None
continue
# data inside of methods
if i < len(marker)-1 and not quote_state and marker[i] == "(" and marker[i+1] != ")":
if value:
stack.append(''.join(value))
value = []
nested_state = True
continue
# data inside of methods
if nested_state and i < len(marker) and not quote_state and marker[i] == ")":
if value:
stack.insert(0, ''.join(value))
value = []
# syntax errrs may cause '()()' in item
stack[1] += "()"
nested_state = False
continue
# seperators
if not quote_state and marker[i] == ".":
if value:
stack.append(''.join(value))
value = []
continue
# otherwise
value.append(marker[i])
# debug
#print(marker[i])
# reached the end of the marker
if value:
stack.append(''.join(value))
''' Perform markup on input using markers
'''
for item in stack:
# debug
#print(item)
''' begin - i am not sure if this really needed any more
'''
# string literals
if item.startswith("|"):
markup_value = item.lstrip("|")
continue
''' end
'''
# methods
if item.endswith("()"):
# search for methods in marker_map
if item.rstrip("()") not in self.marker_map:
print("WARN - '%s' is not a valid marker methods" %item)
markup_value = item
continue
markup_value = eval(self.marker_map[item.rstrip("()")])(self,markup_value)
continue
# attributes
if ":" in item:
items = item.split(":")
object = items[0]
#print(self.attributes)
# is this a marker for a local attribute?
if object in self.attributes:
# yes prepend this: to marker
items.insert(1,object)
object = "this"
# is there a type for this marker
if object not in self.marker_map:
#debug
#print("WARN - '%s' is not a valid fnr attribute in %s" %(object,item))
#print(template)
break
keys = self.colon_seperated_to_brackets(":".join(items[1:]))
# debug
#print(self.marker_map[object])
#print(keys)
#print(markup_value)
try:
markup_value = eval(self.marker_map[object]+keys)
except KeyError:
#debug
#print('KeyError in Content.fnr()')
pass
except TypeError:
# debug
#print('TypeError in Content.fnr()')
pass
except: traceback.print_exc()
continue
# if this attribute is for the local scope (this)
if item in eval(self.marker_map['this']):
markup_value = eval(self.marker_map['this'])[item]
continue
# attribute object (or raw)
if item in self.marker_map:
markup_value = eval(self.marker_map[item])
continue
# end of loop
# replace marker with markup_value
if markup_value or markup_value == '' or markup_value == 0 or markup_value == []:
if binary:
# the markup_value is a binary object and should be considered to be a string
template = template.replace("{{%s}}" %marker, str(markup_value))
elif raw:
#print("raw")
return markup_value
else:
if not isinstance(markup_value,unicode):
try:
markup_value = unicode(markup_value)
except UnicodeDecodeError:
markup_value = unicode(markup_value.decode('UTF-8'))
template = template.replace(u"{{%s}}" %marker, markup_value)
'''debug - warning: lots of output, but this is useful if you need to see
markups at this granular level.
'''
#print(marker,markup_value.__repr__())
''' Can we make some sort of check here to see if there are markers that can still be replaced?
'''
if template == template_original:
return template
# increment count
count += 1
if limit == count:
return template
# iterate
return None
# data handling
def load_data(self,conf):
''' This method will load data into the current element or processor
and optionally can be used to store the data for access by other
elements and processors in the content tree.
directives are attributes of a Data Object as defined in element and
processor configurations
data: # name given in config as specified by the element or processor
# attributes
value: # A pointer to data in marker syntax
format: # the format of the value: csv, dict, int, json, list, python, string, xml, yaml (defaut=string)
store: # (optional) name to store data as in self.top
entry: # (optional) point in opject to load or store
# csv attributes
reader: list or dict (deafult=dict)
kwargs:
# will accpet any keyword arg for csv function
delimter: # an optional delimer: ie: ";", "\t"
'''
# debug
#print('clases.content.Content.load_data')
# conf check
#debug
#print(conf)
# value
''' Value can be any python object including None or an empty object
'''
if 'value' not in conf:
print("error value not in configuration")
return False
#''' This needs to be fixed to allow empty objects
#'''
#if not conf.get('value'):
# print("error value not given")
# return False
# format
conf.setdefault('format','string')
# store
conf.setdefault('store',False)
# data
data = conf['value']
if isinstance(data,basestring) and 'nomarkup' not in conf:
# markup data
data = self.fnr(data)
# debug
#print(data)
# format data
# CSV
if conf['format'] == 'csv':
#print('format is csv')
import csv
reader = conf.get('reader','dict')
kwargs = conf.get('kwargs',{})
if reader == 'list':
try:
tmp_data = csv.reader(data.split('\n'),**kwargs)
self.data = []
for item in tmp_data:
if item:
self.data.append(item)
except: traceback.print_exc()
else:
try:
tmp_data = csv.DictReader(data.split('\n'),**kwargs)
self.data = []
for item in tmp_data:
# clean up dictionary keys
tmp_dict = {}
for key in item:
tmp_dict[key.strip()] = item[key].strip()
self.data.append(tmp_dict)
except: traceback.print_exc()
# dict
if conf['format'] == 'dict':
#print('format is dict')
if isinstance(data, dict):
self.data = data
else:
try:
self.data = eval(data)
if not isinstance(self.data,dict):
print('warning data not a dictionary')
except: traceback.print_exc()
# int
if conf['format'] == 'int':
#print('format is int')
if isinstance(data, int):
self.data = data
else:
try:
self.data = eval(data)
if not isinstance(self.data,int):
print('warning data not a int')
except: traceback.print_exc()
# json
if conf['format'] == 'json':
#print('format is json')
import json
try:
self.data = json.loads(data, parse_float=Decimal, strict=False)
except: traceback.print_exc()
# list
if conf['format'] == 'list':
#print('format is list')
#self.data = eval(data)
#print('here')
if isinstance(data, list):
self.data = data
else:
try:
self.data = eval(data)
if not isinstance(self.data,list):
print('warning - data not a list')
except NameError:
# Item not in top?
pass
except SyntaxError:
# missing data obj?
pass
except: traceback.print_exc()
# python
if conf['format'] == 'python':
#print('format is python')
try:
self.data = eval(data)
print('python data is of the type %s' %type(self.data))
except: traceback.print_exc()
# raw
if conf['format'] == 'raw':
#print('format is raw')
self.data = data
# string
if conf['format'] == 'string':
#print('format is string')
self.data = unicode(data)
# yaml
if conf['format'] == 'xml':
#print('format is xml')
import xmltodict
try:
self.data = xmltodict.parse(data)
except: traceback.print_exc()
# yaml
if conf['format'] == 'yaml':
#print('format is yaml')
import yaml
try:
self.data = yaml.load(data)
except: traceback.print_exc()
# default (string)
if not self.data:
#print('format is default')
self.data = unicode(data)
# entry point
if conf.get('entry'):
if conf['entry'].startswith('{{') and conf['entry'].endswith('}}'):
entry = self.colon_seperated_to_brackets(conf['entry'].lstrip('{{').rstrip('}}'))
exec('self.data = self.data%s' %entry)
# store
if conf.get('store'):
conf['store2'] = conf['store']
# is this store request for top (global) or this (local)
if conf['store2'].startswith('this'):
conf['store2'] = conf['store2'].replace("this","attributes")
# covert colons to brackets
brackets = self.colon_seperated_to_brackets(":".join(conf['store2'].split(":")[1:]))
conf['store2'] = "attributes%s" %brackets
if 'merge' not in conf:
exec('self.%s = self.data' %conf['store2'])
return True
# figure out if attribute already exists
try:
exec('x = self.%s' %conf['store2'])
except KeyError:
# It does not exist treat as normal
exec('self.%s = self.data' %conf['store2'])
return True
# now that we know it exists try to merge
if eval('isinstance(self.%s, dict)' %conf['store2']):
# update dict
exec('self.%s.update(self.data)' %conf['store2'])
if eval('isinstance(self.%s, list)' %conf['store2']):
if conf['merge'] == 'append':
# append item to list
exec('self.%s.append(self.data)' %conf['store2'])
else:
# extend list with items
exec('self.%s.extend(self.data)' %conf['store2'])
if eval('isinstance(self.%s, basestring)' %conf['store2']):
# append item to string
exec('self.%s += self.data' %conf['store2'])
return True
# top (global) storage
# does conf['store'] have a prefix?
if not ":" in conf['store2']:
conf['store2'] = "top:%s" %conf['store']
# convert colons to dots
conf['store2'] = conf['store2'].replace(":",".")
if 'merge' in conf and conf['store'] in self.top.marker_map:
if eval('isinstance(self.%s, dict)' %conf['store2']):
# merge with top item
exec('self.%s.update(self.data)' %conf['store2'])
# debug
#print('updated top.%s with self.data' %conf['store'])
if eval('isinstance(self.%s, list)' %conf['store2']):
if conf['merge'] == 'append':
# merge with top item
exec('self.%s.append(self.data)' %conf['store2'])
else:
# merge with top item
exec('self.%s.extend(self.data)' %conf['store2'])
# debug
#print('extended top.%s with self.data' %conf['store'])
if eval('isinstance(self.%s, basestring)' %conf['store2']):
# merge with top item
exec('self.%s += self.data' %conf['store2'])
# debug
#print('concatonated top.%s and self.data' %conf['store'])
else:
# add to top
#print('self.%s = self.data' %conf['store2'])
exec('self.%s = self.data' %conf['store2'])
# add to top marker_map
self.top.marker_map.update({conf['store']: 'self.%s' %conf['store2']})
#print('stored self.data as top.%s' %conf['store'])
#print(self.data)
return True
def colon_seperated_to_brackets(self,input):
if input != "":
input = unicode(input)
# format input into a bracketed format
# replace escaped colons
input = input.replace("\:","_-_-_-_-_-_")
# split segments on colons
segments = input.split(":")
output = ""
for segment in segments:
# debug
#print(segment)
# backwards compatibility
if segment.startswith('|'):
segment = segment.lstrip('|')
# record digits as numbers
if segment.lstrip('-').isdigit():
# add segment to entry
output += "[%s]" %segment
continue
# reinstate colons in segments
segment = segment.replace("_-_-_-_-_-_",":")
# escape quotes
segment = segment.replace("'",r"\'").replace('"',r'\"')
# add segment to entry
output += "['%s']" %segment
return output
return input
|
from csv import reader, DictReader
from sys import argv
def main():
if len(argv) < 3:
print("Usage: dna.py <File:STR count> <File:DNA Seq>")
exit()
elif len(argv) > 3:
print("Usage: dna.py <File:STR count> <File:DNA Seq>")
exit()
# read in a dna seqence from the file
# store the dna seq in a string
with open(argv[2]) as dnafile:
dnaseq = dnafile.read()
# make a dict to store the STR sequences
STRs = {}
# put the STR definitions from the database into a list
with open(argv[1]) as countfile:
people = reader(countfile)
for row in people:
dnaDefs = row
# remove the first element, being 'name'
dnaDefs.pop(0)
break
# put the STR defs into a dictionary where each gene is a key, and the number is the number of occurances
for element in dnaDefs:
STRs[element] = 69 # nice
for key in STRs:
maxocc = 0 # maximum number of repeated occurances of STR
occ = 0 # number of current occurances within the conditional loop below
STRlen = len(key)
# print(STRlen)
for i in range(len(dnaseq)): # loop through the length of the dna sequence
# after having counted a sequence it skips the end to avoid counting again
while occ > 0:
occ -= 1
continue
# if the segment we are currently looking at (which is as long as the STR) is equal to the STR
if dnaseq[i: i + STRlen] == key:
# print(f"/{dnaseq[i: i + STRlen]}/ : /{key}/ {i}") #checking
occ = 1
# as long as each segment is equal to the STR, continue to add to the occ counter
while dnaseq[i - STRlen: i] == dnaseq[i: i + (STRlen)]:
# print(f"/{dnaseq[i + ((occ - 1) * STRlen): i + ((occ) * STRlen)]}/ : /{dnaseq[i + ((occ) * STRlen): i + ((occ + 1) * STRlen)]} {occ}/")
# print(f"{i}, {dnaseq[i: i + STRlen]} == {i + STRlen}, {dnaseq[i + STRlen: i + (2 * STRlen)]}")
occ += 1
i += STRlen
# print(f"{key} {occ}")
if occ > maxocc: # if we found a new longest seq, set it to maxocc
maxocc = occ
# print(maxocc)
STRs[key] = maxocc
# print(dnaseq)
# print(STRs)
with open(argv[1], newline='') as peoplefile:
people = DictReader(peoplefile)
for person in people:
match = 0
for dna in STRs:
if STRs[dna] == int(person[dna]):
match += 1
if match == len(STRs):
print(person['name'])
exit()
print("no match")
main()
|
import cPickle
from embeddingsModel import CharModel
model_path = '../pkls/full_vocabrnn_2tanh.pkl'
chars_path = '/data/lisatmp3/devincol/data/translation_char_vocab.en.pkl'
vocab_path = '/data/lisatmp3/chokyun/mt/vocab.30k/bitexts.selected/vocab.en.pkl'
words_path = '/data/lisatmp3/devincol/data/translation_vocab_aschar.en.pkl'
print "Loading Data"
with open(vocab_path) as f:
vocab = cPickle.load(f)
ivocab = {v:k for k,v in vocab.iteritems()}
with open(model_path) as f:
pylearn2_model = cPickle.load(f)
with open(words_path) as f:
words = cPickle.load(f)
with open(chars_path) as f:
char_dict = cPickle.load(f)
inv_dict = {v:k for k,v in char_dict.items()}
inv_dict[0] = inv_dict[len(inv_dict.keys())-1]
unknown = inv_dict[0]
print "Building Model"
fpropNoProjLayer = pylearn2_model.layers[0].fprop
fpropProjLayer = lambda state_below: pylearn2_model.layers[1].fprop(pylearn2_model.layers[0].fprop(state_below))
model = CharModel(pylearn2_model, char_dict, fprop=fpropNoProjLayer)
model.genEmbeddings(ivocab)
# def closest(vec, n):
# words = []
# dists = [(cosine(vec,rnn_embeddings[i]), i) for i in range(len(rnn_embeddings))]
# for k in range(n):
# index = min(dists)[1]
# dists[index] = (float("inf"),index)
# words.append(index)
# return words
with open("/data/lisatmp3/devincol/embeddings/deepoutput_rnnSkipgram300.pkl", 'w') as f:
cPickle.dump(model.embeddings, f)
|
from django import forms
from .models import Valuta, ValutaValue
class ConverterForm(forms.Form):
#from_valuta = forms.CharField(max_length=3, widget=forms.TextInput(attrs={'class': 'special'}))
#to_valuta = forms.CharField(max_length=3)
#from_value = forms.FloatField()
valuta_from = forms.ModelChoiceField(Valuta.objects.all(), widget=forms.Select(attrs={'class': 'uk-select'}))
valuta_to = forms.ModelChoiceField(Valuta.objects.all(), widget=forms.Select(attrs={'class': 'uk-select'}))
value_from = forms.FloatField(widget=forms.NumberInput(attrs={'class': 'uk-input'}))
|
#!/usr/bin/python3
from networkScan import networkScanner, get_ip_network
from iotIdent import iotIdentifier
from portScan import portScanner
from vulnScan import vulnerabilityScanner, vuln_flag
from arpSpoof import arpSpoofer, arp_flag
from snortController import snortController, snort_flag
from counter import counterTraffic, counter_flag, write_data_to_file, resetData
from alertUser import sendEmail, sendSMS
from datetime import datetime
import time
import json
import threading
import subprocess, signal
import os
import sys
# Global Variable
kill_flag = False
def threadForVulnScan(keepRunning):
global kill_flag
if keepRunning:
while True:
print("Scanning for vulnerabilities infinitely...")
vulnerabilityScanner()
print("Vulnerabilities scanning completed!")
for i in range(8640): # sleep during half a day
if kill_flag:
break
time.sleep(5)
if kill_flag:
break
else:
print("Scanning for vulnerabilities only once...")
vulnerabilityScanner()
print("Vulnerabilities scanning completed!")
def snortSendEmail():
global kill_flag
tmpDir = "snortDir/tmp/"
persistentDir = "snortDir/"
while not kill_flag:
date = datetime.now().strftime("%Y-%m-%d_%I:%M:%S%p")
tmpDirs = os.listdir(tmpDir)
for directory in tmpDirs:
files = os.listdir(tmpDir + directory)
for file in files:
#print(file)
#move file
persistentDirs = os.listdir(persistentDir)
if not directory in persistentDirs:
os.mkdir(persistentDir + directory)
filename = persistentDir + directory + '/' + file + '_' + date + ".log"
os.replace(tmpDir + directory + '/' + file, filename)
#send email
print("Sending snort email...")
subj = "[Home Security] Alert: Possible attack or intrusion in your network"
msg = "Dear user,\n\nThis email is reporting an anomaly that was detected in your network.\nIt seems like someone is attacking your IoT devices or exploiting them to have access to your network.\nWe strongly recommend you to read carefully the attached logging file.\n\nBest regards,\nHomeSecurity Team"
sendEmail(subj, msg, filename)
print("Email sent!")
print("Sending snort SMS...")
sms = "\nWe have detected an anomaly in your network! It seems like someone is attacking your IoT devices or exploiting them to have access to your network. We strongly recommend you to read carefully the logging file sent by email.\nBest regards,\nHomeSecurity Team"
sendSMS(sms)
print("SMS sent!")
time.sleep(60)
def counterSendEmail():
global kill_flag
tmpFile = "counters/tmp/counter.json"
counterDir = "counters/"
while not kill_flag:
time.sleep(60*5)
date = datetime.now().strftime("%Y-%m-%d_%I:%M:%S%p")
#move file
filename = counterDir + date + ".json"
os.replace(tmpFile, filename)
resetData()
#send email
print("Sending counter email...")
subj = "[Home Security] Report: Weekly update of your IoT devices' traffic statistics"
msg = "Dear user,\n\nThe weekly report of your IoT devices' traffic is in the attached file.\n\nBest regards,\nHomeSecurity Team"
sendEmail(subj, msg, filename)
print("Email sent!")
print("Sending snort SMS...")
sms = "\nWe have sent you the weekly report of your IoT devices' traffic by email.\nBest regards,\nHomeSecurity Team"
sendSMS(sms)
print("SMS sent!")
"""
def mainthread():# Não sei o nome desta Thread
oldData = networkScanner()
iotIdentifier()
time.sleep(15)
while True:
print("Scanning the network for devices...")
newData = networkScanner()
#print(newData)
if oldData['devices'] != newData['devices']:
print("Change detected!")
time.sleep(10)
newData = networkScanner()
print(newData)
print("Identifying IoT devices...")
iotIdentifier()
time.sleep(10)
print("Scanning ports...")
if portScanner():
print("Launching thread...")
tempThread = threading.Thread(target=threadForVulnScan, args=[False])
tempThread.start()
print("Thread successfully launched!")
print("Finished! Starting again...\n")
oldData = newData
time.sleep(20)
"""
def main():
global kill_flag
print("Starting program...")
oldData = networkScanner(1)
iotIdentifier()
print("Launching threads...")
vulnScanThread = threading.Thread(target=threadForVulnScan, args=[True], daemon=True)
vulnScanThread.start()
arpThread = threading.Thread(target=arpSpoofer, args=[], daemon=True)
arpThread.start()
time.sleep(10)
net = get_ip_network()
snortThread = threading.Thread(target=snortController, args=(net ,"eth0"), daemon=True)
snortThread.start()
time.sleep(10)
counterThread = threading.Thread(target=counterTraffic, args=["eth0"],daemon=True)
counterThread.start()
snortEmailThread = threading.Thread(target=snortSendEmail, args=[],daemon=True)
snortEmailThread.start()
counterEmailThread = threading.Thread(target=counterSendEmail, args=[],daemon=True)
counterEmailThread.start()
print("Threads successfully launched!")
try:
while True:
print("Scanning the network for devices...")
newData = networkScanner(3)
#print(newData)
if oldData['devices'] != newData['devices']:
print("Change detected! (1)")
time.sleep(10)
newData = networkScanner(3)
#print(newData)
if oldData['devices'] != newData['devices']:
print("Change detected! (2)")
print("Identifying IoT devices...")
iotIdentifier()
time.sleep(10)
print("Scanning ports...")
if portScanner():
print("Launching thread...")
tempThread = threading.Thread(target=threadForVulnScan, args=[False])
tempThread.start()
print("Thread successfully launched!")
print("Finished! Starting again...\n")
oldData = newData
time.sleep(10)
write_data_to_file()
except:
pass
print("\nExiting the program...")
kill_flag = True
vuln_flag()
print("Ending vuln thread...")
vulnScanThread.join()
print("Vuln thread terminated...")
snort_flag()
print("Ending snort thread...")
snortThread.join()
print("Snort thread terminated...")
arp_flag()
print("Ending ettercap thread...")
arpThread.join()
print("Ettercap thread terminated...")
counter_flag()
print("Ending counter thread...")
counterThread.join()
print("Killing remaining processes...")
p = subprocess.Popen(['ps', '-A'], universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
for line in p.stdout:
if 'ettercap' in line or 'snort' in line or 'nmap' in line or 'searchsploit' in line:
pid = int(line.split(None, 1)[0])
print("Killing ", pid, "...")
os.kill(pid, signal.SIGKILL)
print(pid, " killed!")
time.sleep(3)
print("Program successfully terminated!\n")
if __name__ == "__main__":
main()
#sys.exit(0)
|
# -*- coding: utf-8 -*-
# pip install aoc-tools-dannyboywoop
from aoc_tools import Advent_Timer
def readfile(filename):
with open(filename, 'r') as file:
return [int(line.strip()) for line in file]
class jump_program:
def __init__(self, code_in, offset_func_in=lambda x: 1):
self.code = code_in
self.current_pos = 0
self.total_steps = 0
self.offset_func = offset_func_in
def jump(self):
step = self.code[self.current_pos]
self.code[self.current_pos] += self.offset_func(self.code[self.current_pos])
self.current_pos += step
self.total_steps += 1
def jump_till_exit(self):
while 0 <= self.current_pos < len(self.code):
self.jump()
def part1(filename):
data = readfile(filename)
computer = jump_program(data)
computer.jump_till_exit()
print("Solution: {}.".format(computer.total_steps))
def part2(filename):
data = readfile(filename)
computer = jump_program(data, lambda x: 1 if x < 3 else -1)
computer.jump_till_exit()
print("Solution: {}.".format(computer.total_steps))
if __name__ == "__main__":
timer = Advent_Timer()
print("Part 1:")
part1("../data/day5.dat")
timer.checkpoint_hit()
print("\nPart 2:")
part2("../data/day5.dat")
timer.checkpoint_hit()
timer.end_hit()
|
# import torch
# A = torch.eye(10,10)
# A[:,3] = float('nan')
# print(A)
# B = torch.isnan(A)
# print(B)
# # out, cnt = torch.unique(B, axis=0,return_counts=True)
# # print(out)
# # print(cnt)
# print(torch.__version__)
# C = torch.count_nonzero(C)
# print(C)
# import numpy as np
# A = np.eye(3,3)
# A[:,0] = float('nan')
# print(A)
# B = np.isnan(A)
# print(B)
# print(np.count_nonzero(B == False, axis=1))
import torch
A = torch.eye(2,3)
for i in range(2):
for j in range(3):
A[i,j] = i*2+j
print(A)
rowsum = A.sum(dim=1, keepdim=True)
print(rowsum)
rowsum[rowsum == 0] = 1
A = A / rowsum
print(A) |
from featurize_jupyterlab.core import Loss
import torch.nn.functional as F
class NllLoss(Loss):
"""Simple wrap of PyTorch's nll_loss
"""
def __call__(self):
return F.nll_loss
|
for i in range(5):
for j in range(5):
for k in range(5):
print(i, j, i ^ j, bin(i ^ j), bin(i ^ j).count(1))
print(j, k, j ^ k, bin(j ^ k))
|
#!/Users/munraito/opt/anaconda3/envs/bdt-python-course/bin/python
import re
import logging
import logging.config
import json
from argparse import ArgumentParser
from lxml import etree
import yaml
APPLICATION_NAME = 'stackoverflow_analytics'
DEFAULT_QUESTION_PATH = 'stackoverflow_posts_sample.xml'
DEFAULT_STOPWORDS_PATH = 'stop_words_en.txt'
DEFAULT_QUERIES_PATH = 'sample_queries.csv'
DEFAULT_LOGGING_CONFIG_PATH = 'logging.conf.yml'
logger = logging.getLogger(APPLICATION_NAME)
class StackOverflowAnalyzer:
"""class for analyzing XMLs from StackOverflow"""
def __init__(self, stopwords_filepath: str, questions_filepath: str):
"""initialize stopwords and questions dicts"""
self.stop_words = []
self.questions = []
self.parse_stopwords(stopwords_filepath)
self.parse_questions(questions_filepath)
def parse_stopwords(self, filepath: str) -> None:
"""method for parsing stopwords file into list"""
for line in open(filepath, encoding='koi8-r'):
self.stop_words.append(line[:-1])
def parse_questions(self, filepath: str) -> None:
"""method for parsing questions file into list excluding stopwords"""
for line in open(filepath, encoding='utf-8'):
try:
xml = etree.fromstring(line)
if xml.get('PostTypeId') == '1':
question = {'year': int(xml.get('CreationDate')[:4]),
'score': int(xml.get('Score'))}
all_words = set(re.findall(r'\w+', xml.get('Title').lower()))
question['words'] = [word for word in all_words if word not in self.stop_words]
self.questions.append(question)
except: # etree.XMLSyntaxError
continue
logger.info('process XML dataset, ready to serve queries')
def get_words_from_questions(self, start_year: int, end_year: int) -> dict:
"""support method for query"""
words = {}
for question in self.questions:
if start_year <= question['year'] <= end_year:
for word in question['words']:
if word not in words.keys():
words[word] = question['score']
else:
words[word] += question['score']
return words
def query(self, filepath: str) -> list:
"""method for querying questions list"""
answers = []
for line in open(filepath):
try:
start_year, end_year, top_n = list(map(int, line.split(',')))
logger.debug('got query "%d,%d,%d"', start_year, end_year, top_n)
words = self.get_words_from_questions(start_year, end_year)
top_w = sorted(words.items(), key=lambda x: (-x[1], x[0]))[:top_n]
if len(top_w) < top_n:
logger.warning(
'not enough data to answer, found %d words out of %d for period "%d,%d"',
len(top_w), top_n, start_year, end_year)
answers.append(json.dumps({'start': start_year, 'end': end_year, 'top': top_w}))
except:
continue
logger.info('finish processing queries')
return answers
def setup_logging():
"""reads logging configs from YAML"""
with open(DEFAULT_LOGGING_CONFIG_PATH) as config_file:
logging.config.dictConfig(yaml.safe_load(config_file))
def setup_parser(parser: ArgumentParser):
"""describes all possible arguments"""
parser.add_argument("--questions",
dest="questions_file", default=DEFAULT_QUESTION_PATH)
parser.add_argument("--stop-words",
dest="stopwords_file", default=DEFAULT_STOPWORDS_PATH)
parser.add_argument("--queries",
dest="queries_file", default=DEFAULT_QUERIES_PATH)
def main():
"""main function"""
setup_logging()
parser = ArgumentParser(
prog='stackoverflow_analytics',
description="tool to analyze xml from stackoverflow",
)
setup_parser(parser)
args = parser.parse_args()
analyzer = StackOverflowAnalyzer(args.stopwords_file, args.questions_file)
for ans in analyzer.query(args.queries_file):
print(ans)
if __name__ == '__main__':
main()
|
from .puzzleboard import WordSolution
def test_wordsolution_can_construct():
ws = WordSolution('WORD')
assert ws is not None
def test_wordsolution_is_iterable():
d = {'word': 'WORD', 'placed': False, 'origin': {'x': 0, 'y': 0}, 'direction': None, 'points': []}
ws = WordSolution('WORD')
# can leverage iterabiity with dict protocol
assert d == dict(ws)
|
import os, sys
import numpy as np
import pickle
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torch.distributions as torchD
import torch, seaborn as sns
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
from utils import *
"""
Copy the network structures here for easier import during testing and plotting
"""
class MLP_Encoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=np.prod(kwargs["input_shape"]), out_features=400),
nn.ReLU(),
nn.Linear(in_features=400, out_features=kwargs["latent_dim"])
)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
latent = self.model(x)
return latent
class MLP_Decoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_Decoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=kwargs["latent_dim"], out_features=400),
nn.ReLU(),
nn.Linear(in_features=400, out_features=np.prod(kwargs["input_shape"])),
nn.Sigmoid() # push the pixels in range (0,1)
)
self.output_shape = kwargs["input_shape"]
def forward(self, latent):
x_bar = self.model(latent)
x_bar = x_bar.view([-1]+ self.output_shape)
return x_bar
class MLP_AE(nn.Module):
def __init__(self, **kwargs):
# kwargs["input_shape"] = [1,28,28]
# kwargs["latent_dim"] = 4
super(MLP_AE, self).__init__()
self.encoder = MLP_Encoder(**kwargs)
self.decoder = MLP_Decoder(**kwargs)
def forward(self, x):
latent = self.encoder(x)
x_bar = self.decoder(latent)
return latent, x_bar
def sample_latent_embedding(self, latent, sd=1, N_samples=1):
"""
AE returns scalar value and we use that as mean and predefined default value for standard deviation (sd)
"""
dist = torchD.Normal(latent, sd)
embedding = dist.sample((N_samples,))
return embedding
class MLP_V_Encoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_V_Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=np.prod(kwargs["input_shape"]), out_features=kwargs["enc_dim"]),
nn.ReLU(),
)
def forward(self, x):
x = torch.flatten(x, start_dim=1)
enc_out = self.model(x)
return enc_out
class MLP_V_Decoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_V_Decoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=kwargs["latent_dim"], out_features=kwargs["enc_dim"]),
nn.ReLU(),
nn.Linear(in_features=kwargs["enc_dim"], out_features=np.prod(kwargs["input_shape"])),
nn.Sigmoid() # push the pixels in range (0,1)
)
self.output_shape = kwargs["input_shape"]
def forward(self, latent):
x_bar = self.model(latent)
x_bar = x_bar.view([-1]+ self.output_shape)
return x_bar
class MLP_VAE(nn.Module):
"""
TODO: check whether to use sum or mean for the probability part
"""
def __init__(self, **kwargs):
# kwargs["input_shape"] = [1,28,28]
# kwargs["latent_dim"] = 4
super(MLP_VAE, self).__init__()
self.encoder = MLP_V_Encoder(**kwargs)
self.decoder = MLP_V_Decoder(**kwargs)
# distribution layers
self.enc_dim = kwargs["enc_dim"]
self.latent_dim = kwargs["latent_dim"]
self.enc_to_mean = nn.Linear(self.enc_dim, self.latent_dim)
self.enc_to_logvar = nn.Linear(self.enc_dim, self.latent_dim)
def encode(self, x):
enc_out = self.encoder(x)
mean = self.enc_to_mean(enc_out)
logvar = self.enc_to_logvar(enc_out)
return mean, logvar
def decode(self, latent):
return self.decoder(latent)
def pxz_likelihood(self, x, x_bar, scale=1., dist_type="Gaussian"):
"""
compute the likelihood p(x|z) based on predefined distribution, given a latent vector z
default scale = 1, can be broadcasted to the shape of x_bar
"""
if dist_type == "Gaussian":
dist = torch.distributions.Normal(loc=x_bar, scale=scale)
else:
raise NotImplementedError("unknown distribution for p(x|z) {}".format(dist_type))
log_pxz = dist.log_prob(x)
return log_pxz.sum() # log_pxz.sum((1,2,3))
def kl_divergence(self, mean, logvar):
"""
Monte Carlo way to solve KL divergence
"""
pz = torchD.Normal(torch.zeros_like(mean), scale=1)
std = torch.exp(0.5*logvar)
qzx = torchD.Normal(loc=mean, scale=std)
z = qzx.rsample() # reparameterized sampling, shape [32,2]
# clamp the log prob to avoid -inf
qzx_lp = qzx.log_prob(z).clamp(min=-1e10, max=0.)
pz_lp = pz.log_prob(z).clamp(min=-1e10, max=0.)
kl = qzx_lp - pz_lp
if torch.isnan(qzx_lp).any():
raise ValueError("nan in qzx_lp")
if torch.isnan(pz_lp).any():
raise ValueError("nan in pz_lp")
if torch.isnan(kl.mean()).any():
raise ValueError("nan in kl")
return kl.sum()
def reparameterize(self, mean, logvar):
# assume Gaussian for p(epsilon)
sd = torch.exp(0.5*logvar)
# use randn_like to sample N(0,1) of the same size as std/mean
# default only sample once, otherwise should try sample multiple times take mean
eps = torch.randn_like(sd)
return mean + sd * eps
def sample_latent_embedding(self, mean, logvar, method="reparameterize"):
"""
Write a sampling function to make function name consistent
"""
if method=="reparameterize":
return self.reparameterize(mean, logvar)
else:
raise NotImplementedError("Unrecognized method for sampling latent embedding {}".format(method))
def forward(self, x, if_plot_pq=False):
latent_mean, latent_logvar = self.encode(x)
latent = self.reparameterize(latent_mean, latent_logvar)
x_bar = self.decoder(latent)
if if_plot_pq:
plot_p_q(latent_mean, latent_logvar)
return latent, x_bar, latent_mean, latent_logvar
class MLP_CV_Encoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_CV_Encoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=(np.prod(kwargs["input_shape"])+1), out_features=kwargs["enc_dim"]),
nn.ReLU(),
)
def forward(self, x, y):
y = torch.unsqueeze(y,1)
x = torch.flatten(x, start_dim=1)
x = torch.cat([x, y], dim = 1)
enc_out = self.model(x)
return enc_out
class MLP_CV_Decoder(nn.Module):
def __init__(self, **kwargs):
super(MLP_CV_Decoder, self).__init__()
self.model = nn.Sequential(
nn.Linear(in_features=(kwargs["latent_dim"]+1), out_features=kwargs["enc_dim"]),
nn.ReLU(),
nn.Linear(in_features=kwargs["enc_dim"], out_features=np.prod(kwargs["input_shape"])),
nn.Sigmoid() # push the pixels in range (0,1)
)
self.output_shape = kwargs["input_shape"]
def forward(self, latent, y):
y = torch.unsqueeze(y,1)
latent = torch.cat([latent, y], dim=1)
x_bar = self.model(latent)
x_bar = x_bar.view([-1]+ self.output_shape)
return x_bar
class MLP_CVAE(nn.Module):
"""
TODO: check whether to use sum or mean for the probability part
"""
def __init__(self, **kwargs):
# e.g.
# kwargs["input_shape"] = [1,28,28]
# kwargs["latent_dim"] = 4
super(MLP_CVAE, self).__init__()
self.encoder = MLP_CV_Encoder(**kwargs)
self.decoder = MLP_CV_Decoder(**kwargs)
# distribution layers
self.enc_dim = kwargs["enc_dim"]
self.latent_dim = kwargs["latent_dim"]
self.enc_to_mean = nn.Linear(self.enc_dim, self.latent_dim)
self.enc_to_logvar = nn.Linear(self.enc_dim, self.latent_dim)
def encode(self, x, y):
enc_out = self.encoder(x, y)
mean = self.enc_to_mean(enc_out)
logvar = self.enc_to_logvar(enc_out)
return mean, logvar
def decode(self, latent, y):
return self.decoder(latent, y)
def pxz_likelihood(self, x, x_bar, scale=1., dist_type="Gaussian"):
"""
compute the likelihood p(x|z) based on predefined distribution, given a latent vector z
default scale = 1, can be broadcasted to the shape of x_bar
"""
if dist_type == "Gaussian":
dist = torch.distributions.Normal(loc=x_bar, scale=scale)
else:
raise NotImplementedError("unknown distribution for p(x|z) {}".format(dist_type))
log_pxz = dist.log_prob(x)
return log_pxz.sum() # log_pxz.sum((1,2,3))
def kl_divergence(self, mean, logvar):
"""
Monte Carlo way to solve KL divergence
"""
pz = torchD.Normal(torch.zeros_like(mean), scale=1)
std = torch.exp(0.5*logvar)
qzx = torchD.Normal(loc=mean, scale=std)
z = qzx.rsample() # reparameterized sampling, shape [32,2]
# clamp the log prob to avoid -inf
qzx_lp = qzx.log_prob(z).clamp(min=-1e10, max=0.)
pz_lp = pz.log_prob(z).clamp(min=-1e10, max=0.)
kl = qzx_lp - pz_lp
if torch.isnan(qzx_lp).any():
raise ValueError("nan in qzx_lp")
if torch.isnan(pz_lp).any():
raise ValueError("nan in pz_lp")
if torch.isnan(kl.mean()).any():
raise ValueError("nan in kl")
return kl.sum()
def reparameterize(self, mean, logvar):
# assume Gaussian for p(epsilon)
sd = torch.exp(0.5*logvar)
# use randn_like to sample N(0,1) of the same size as std/mean
# default only sample once, otherwise should try sample multiple times take mean
eps = torch.randn_like(sd)
return mean + sd * eps
def sample_latent_embedding(self, mean, logvar, method="reparameterize"):
"""
Write a sampling function to make function name consistent
"""
if method=="reparameterize":
return self.reparameterize(mean, logvar)
else:
raise NotImplementedError("Unrecognized method for sampling latent embedding {}".format(method))
def forward(self, x, y, if_plot_pq=False):
latent_mean, latent_logvar = self.encode(x, y)
latent = self.reparameterize(latent_mean, latent_logvar)
x_bar = self.decoder(latent, y)
if if_plot_pq:
plot_p_q(latent_mean, latent_logvar)
return latent, x_bar, latent_mean, latent_logvar
|
from flask import current_app as app
from flask import Blueprint, url_for
from collections import OrderedDict
import OSS.models as models
import pprint
import json
import ast
api = Blueprint('api', __name__)
@api.route('/', methods=['GET'])
def index():
return 'This is the API for Open Source Shakespeare (OSS).'
@api.route('/play/<string:name>/characters', methods=['GET'])
def get_play_characters(name):
play = get_play(name)
characters = get_characters(name)
return json.dumps(characters)
@api.route('/play/raw/<string:name>', methods=['GET'])
def get_play_raw(name):
play = get_play(name)
chapters = get_chapters(play)
paragraphs = get_paragraphs(play, chapters)
text = list(map(clean_text, iter(paragraphs)))
plaintext = [line for (character, line) in text]
return json.dumps(plaintext)
@api.route('/play/<string:name>', methods=['GET'])
def get_play_with_characters(name):
play = get_play(name)
chapters = get_chapters(play)
paragraphs = get_paragraphs(play, chapters)
text = list(map(clean_text, iter(paragraphs)))
return json.dumps(text)
@api.route('/play/modern/<string:name>', methods=['GET'])
def get_play_modern(name):
return json.dumps(get_modern_play(name))
def get_play(name):
return models.Works.query.filter_by(workid=name).first()
def get_chapters(play):
chapter_query = models.Chapters.query
chapter_query = chapter_query.filter_by(workid=play.workid)
chapter_query = chapter_query.order_by(models.Chapters.section,
models.Chapters.chapter)
return chapter_query.all()
def get_modern_play(name):
output = []
with open('OSS/static/modern/{}.txt'.format(name), 'r') as f:
lines = ast.literal_eval(f.read())
print(lines)
for item in lines:
print(item)
output.append((item[0], item[1]))
return output
def get_paragraphs(play, chapters):
paragraphs = []
for chapter in chapters:
query = models.Paragraphs.query
query = query.filter_by(workid=play.workid,
section=chapter.section,
chapter=chapter.chapter)
query = query.order_by(models.Paragraphs.section,
models.Paragraphs.chapter,
models.Paragraphs.paragraphnum)
chapter_paragraphs = query.all()
paragraphs += chapter_paragraphs
return paragraphs
def get_characters(play):
search = '%{}%'.format(play)
query = models.Characters.query
query = query.with_entities(models.Characters.charname,
models.Characters.description)
query = query.filter(models.Characters.works.like(search))
results = query.all()
print(results)
return results
def clean_text(paragraph, modern=False):
character_query = models.Characters.query
character_query = character_query.filter_by(charid=paragraph.charid)
character_name = character_query.first().charname
line = paragraph.moderntext if modern else paragraph.plaintext
if line:
line = line.replace('\n', '')
line = line.replace('[p]', ' ')
return (character_name, line)
|
import scraperwiki
import lxml.html
p = 0
base_url = "http://www.catinabib.it/"
catalog_url = "?q=digitalib/res_in_coll/247&page="
info_url = "/?q=node/"
img_url = "http://www.catinabib.it/files/"
dublincore_url = "http://www.catinabib.it/?q=digitalib/generate_dc/"
mag_url = "http://www.catinabib.it/?q=digitalib/generate_mag/"
html = scraperwiki.scrape(base_url + catalog_url + str(p))
root = lxml.html.fromstring(html)
last_page= root.cssselect("li.pager-last a")[0].attrib['href'].split("page=")[1]
tables = root.cssselect("div.node")
for table in tables:
internal_id = table.cssselect("h2 a")[0].attrib['href'].replace(info_url,"")
infohtml = lxml.html.fromstring(scraperwiki.scrape(base_url + info_url + str(internal_id)))
fields = infohtml.cssselect("fieldset")
raccolta = fields[0].cssselect("div")[0].text_content().replace("Raccolta: ","")
foto = fields[1].cssselect("img")[0].attrib['src'].replace('/?q=digitalib/reduced/','')
for f in fields[2]:
v = lxml.html.tostring(f)
if ((v[0:3] != '<br') and (v[0:3] != '<di')):
campo = v.split(": ")[0].replace("<b>","").replace("</b>","")
valore = v.split(": ")[1]
dublincore = dublincore_url + internal_id
mag = mag_url + id
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.