text stringlengths 38 1.54M |
|---|
import pandas as pd
import numpy as np
import seaborn as sns
sns.set(style="white")
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.preprocessing import StandardScaler, normalize
from sklearn.model_selection import train_test_split, KFold,RepeatedStratifiedKFold,RandomizedSearchCV,cross_val_score
from sklearn.metrics import(accuracy_score, roc_auc_score, f1_score, plot_confusion_matrix, precision_recall_curve, roc_curve,
recall_score, confusion_matrix, precision_score, classification_report)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.svm import SVC
#global variable
random_state=42
%matplotlib inline
#load data
df=pd.read_csv('./data/churn.csv').drop(columns=['RowNumber','Surname', 'CustomerId', 'Geography'], axis=1)
df.head(3)
#check for Nan
df.isnull().sum()
#data preprocessing
#convert categorical values to numerical
df['Gender']=df['Gender'].apply(lambda x: 0 if x=='Female' else 1)
#split data on test, val and train subset
y = df["Exited"]
X = df.drop(["Exited"], axis = 1)
X_train, X_test, y_train, y_test=train_test_split(X,y, test_size=0.25, random_state=random_state)
X_train, X_val, y_train, y_val=train_test_split(X_train, y_train, test_size=0.25, random_state=random_state)
#scale data
scaler = StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_val=scaler.transform(X_val)
X_test=scaler.transform(X_test)
# count samples for each class
from collections import Counter
counter = Counter(y_train)
# estimate scale_pos_weight value
estimate = counter[0] / counter[1]
print('Estimate: %.3f' % estimate)
#instantiate models
log_clf = LogisticRegression()
rnd_clf = RandomForestClassifier()
svm_clf = SVC(probability=True)
voting_clf = VotingClassifier(estimators=[('lr', log_clf), ('rf', rnd_clf), ('svc', svm_clf)],voting='soft')
#compute accuracy for each model
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_val)
print(clf.__class__.__name__, accuracy_score(y_val, y_pred))
#evaluate models on roc_auc score:
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
for clf in (log_clf, rnd_clf, svm_clf, voting_clf):
scores = cross_val_score(clf, X_train, y_train, scoring='roc_auc', cv=cv, n_jobs=-1)
print(clf.__class__.__name__, "Mean ROC AUC: %.5f " % (scores.mean()))
#hyperparameters tuning
# define grid
param_grid= {'max_iter':[10,50,100,300], 'C':[0.001,0.01,0.5], 'solver':['newton-cg','lbfgs','liblinear','sag','saga']}
model =LogisticRegression(random_state=67)
# define evaluation procedure
from sklearn.model_selection import GridSearchCV
# define grid search for logistic regressoin
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
grid_result = grid.fit(X_train, y_train)
# report the best configuration
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
# define grid search for SVM classifier
param_grid= {'max_iter':[10,50,100,-1], 'C':[0.05,0.01,1.0], 'gamma':['auto'],'kernel':['linear','poly','rbf','sigmoid']}
model =SVC(random_state=67, probability=True)
# define evaluation procedure
from sklearn.model_selection import GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# define grid search
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
grid_result = grid.fit(X_train, y_train)
# report the best configuration
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
# define grid search for random forest classsifier
param_grid= {'n_estimators':[100,300,500],'max_depth':[1,3,5,7], 'max_leaf_nodes':[5,15]}
model =RandomForestClassifier(random_state=random_state)
# define evaluation procedure
from sklearn.model_selection import GridSearchCV
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# define grid search
grid = GridSearchCV(estimator=model, param_grid=param_grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
grid_result = grid.fit(X_train, y_train)
# report the best configuration
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
#plot roc curve on validation subset to evaluate model perfomance
from sklearn.metrics import roc_curve
# roc curve for models
fpr1, tpr1, thresh1 = roc_curve(y_val, log_clf.predict_proba(X_val)[:,1], pos_label=1)
fpr2, tpr2, thresh2 = roc_curve(y_val, rnd_clf.predict_proba(X_val)[:,1], pos_label=1)
fpr3, tpr3, thresh3 = roc_curve(y_val, svm_clf.predict_proba(X_val)[:,1], pos_label=1)
#roc curve for tpr = fpr
random_probs = [0 for i in range(len(y_val))]
p_fpr, p_tpr, _ = roc_curve(y_val, random_probs, pos_label=1)
# matplotlib
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# plot roc curves
plt.figure(1)
plt.plot(fpr1, tpr1, linestyle='--',color='orange', label='Logistic')
plt.plot(fpr2, tpr2, linestyle='--',color='green', label='RandomForest')
plt.plot(fpr3, tpr3, linestyle='--',color='red', label='SVC')
plt.plot(p_fpr, p_tpr, linestyle='--', color='black')
# title
plt.title('ROC curve')
# x label
plt.xlabel('False Positive Rate')
# y label
plt.ylabel('True Positive rate')
plt.legend(loc='best')
plt.savefig('AllModelsROC',dpi=200)
plt.show();
plt.figure(2)
plt.xlim(0, 0.4)
plt.ylim(0.4, 1)
plt.plot(fpr1, tpr1, linestyle='--',color='orange', label='Logistic')
plt.plot(fpr2, tpr2, linestyle='--',color='green', label='RandomForest')
plt.plot(fpr3, tpr3, linestyle='--',color='red', label='SVC')
plt.plot(p_fpr, p_tpr, linestyle='--', color='black')
# title
plt.title('ROC curve')
# x label
plt.xlabel('False Positive Rate')
# y label
plt.ylabel('True Positive rate')
plt.legend(loc='best')
#plt.savefig('ROCzoom',dpi=200)
plt.show();
# plot confusion matrix to evaluate Random Forest perfomance
plt.style.use('classic')
disp = plot_confusion_matrix(rnd_clf, X_val, y_val,cmap=plt.cm.Blues)
disp.ax_.set_title("Confusion matrix, without normalization")
plt.show()
plt.savefig('ConfusionMatrixBefore',dsi=200)
# plot precision vs recall on threshold
y_hat=rnd_clf.predict(X_val)
rf_proba=rnd_clf.predict_proba(X_val)
precisions,recalls, thresholds=precision_recall_curve(y_val, rf_proba[:,1])
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g-", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.ylim([0, 1])
#since labels inbalanced with ration 1:4 I need to set decision threshold
threshold=0.5
y_val_60=(rf_proba[:,1] > threshold)
# plot confusion matrix again
plt.style.use('classic')
disp = plot_confusion_matrix(rnd_clf, X_val, y_val_60, cmap=plt.cm.Blues)
disp.ax_.set_title("Confusion matrix, without normalization")
plt.show()
# since model picked and threshold determinded I can make a prediction on test set
y_hat=rnd_clf.predict(X_test)
rf_proba=rnd_clf.predict_proba(X_test)
threshold=0.5
y_test_60=(rf_proba[:,1] > threshold)
disp = plot_confusion_matrix(rnd_clf, X_test, y_test_60,cmap=plt.cm.Blues)
disp.ax_.set_title("Confusion matrix, without normalization")
plt.show()
#######
|
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from utils import plot
###############################################################################
# plot mean aligned boolean traces
###############################################################################
aligned_traces = pd.read_csv('./data/aligned_traces_tidy.csv')
# add repeat number for color palette
aligned_traces['ctdr'] = aligned_traces.strain.map(plot.CTDr_dict)
fig, ax = plt.subplots(figsize=(10,8))
sns.lineplot(x='time', y='spot', hue='ctdr', data=aligned_traces, ax=ax, palette=plot.colors_ctd)
ax.set(xlabel='Time (min)', ylabel='Mean Transcription\nSpot Presence', xticks=np.arange(0, 50, 10))
plt.tight_layout()
plt.savefig('./figures/output/FigS3_alignedBooltraces.svg')
|
arr = [1,0,0,1,0,0, 1]
count_index = []
count_values = []
sum1 = 0
ct0 = ct1 = 0
sum1 = sum(arr)
diff = 0
for i in range(1,len(arr)):
for j in range(0,i):
arr_temp = arr[j:i]
ct0 = ct1 = 0
for k in range(0,len(arr_temp)):
if (arr_temp[k] == 0):
ct0 += 1
else:
ct1 +=1
if(ct0 - ct1 > diff):
diff = ct0 - ct1
result = sum1 + diff
print(result)
#return result
|
from flask import Flask,redirect, url_for,render_template, request,session,flash,request, jsonify
from flask_wtf import FlaskForm
from wtforms import StringField , PasswordField,SubmitField
from flask_sqlalchemy import SQLAlchemy
from wtforms.validators import DataRequired,Length, Email , EqualTo, ValidationError
from flask_login import LoginManager, UserMixin, login_user,current_user,logout_user,login_required
from wtforms.widgets import TextArea
from datetime import datetime
import pusher
from flask import Flask, render_template, request
app = Flask(__name__)
app.config['SECRET_KEY'] = 'test'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
login_manager = LoginManager(app)
pusher_client = pusher.Pusher(
app_id='ID',
key='KEY',
secret="SECRET",
cluster='CLS',
ssl=True
)
class User(db.Model , UserMixin):
id = db.Column(db.Integer,primary_key= True)
username = db.Column(db.String(15), unique= True, nullable = False)
email = db.Column(db.String(120), unique= True, nullable = False)
password = db.Column(db.String(60), nullable = False)
class Usermessagesallchat(db.Model,UserMixin):
id = db.Column(db.Integer,primary_key= True)
usernameTO = db.Column(db.String(70))
usernameFROM = db.Column(db.String(70))
messages = db.Column(db.String(700))
date_posted = db.Column(db.DateTime, default = datetime.utcnow )
@login_manager.user_loader
def load_user(user_id):
return (User.query.get(int(user_id)))
class RegistrationForm(FlaskForm):
username = StringField('UserName' , validators = [DataRequired() , Length(min = 3, max = 14)])
email = StringField('Email' , validators = [DataRequired() , Email()])
password = PasswordField('Password' , validators = [DataRequired()])
Confirm_password = PasswordField('Confirm Password' , validators = [DataRequired() , EqualTo('password')])
submit = SubmitField('Sign Up!')
class LoginForm(FlaskForm):
email = StringField('Email' , validators = [DataRequired() , Email()])
password = PasswordField('Password' , validators = [DataRequired()])
submit = SubmitField('Log in.')
class MessageForm(FlaskForm):
usernameTO = StringField('Username' , validators = [DataRequired() ])
message = StringField('Write-Up' ,validators = [DataRequired()], widget = TextArea())
submit = SubmitField('Send')
class Message(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(50))
message = db.Column(db.String(500))
@app.route("/")
def index():
return render_template("landingpage.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/home")
@login_required
def mainhome():
return render_template("mainhomepage.html")
@app.route("/colorburster")
@login_required
def colorburster():
return render_template("playwithcolors.html")
@app.route("/inbox")
@login_required
def inbox():
usermsgs=Usermessagesallchat.query.filter_by(usernameTO = current_user.username).all()
usermsgs.reverse()
return render_template("inbox.html",inbox = usermsgs)
@app.route("/towerblock")
@login_required
def towerblock():
return render_template("tower.html")
@app.route("/cube")
@login_required
def cube():
return render_template("cube.html")
@app.route("/register", methods = ['GET' , "POST"] )
def register():
form = RegistrationForm()
if (form.validate_on_submit()):
##hashing passwords
hashed_pwd = form.password.data
user = User(username = form.username.data ,email = form.email.data, password = hashed_pwd)
u = form.username.data
e = form.email.data
user1 = User.query.filter_by(username = u ).first()
user2 = User.query.filter_by(email = e).first()
if (user1 or user2):
return ("<h1> Credentials already taken! </h1>")
else:
db.session.add(user)
db.session.commit()
flash('Your account has been created, Login!' , 'success')
return redirect(url_for('login'))
else:
return render_template('register.html' ,type ='Register', title = 'Register' , form = form )
@app.route("/message" , methods = ['GET','POST'])
@login_required
def message_():
userlist=[]
out=[]
p=False
form = MessageForm()
userlist = User.query.with_entities(User.username).all()
for t in userlist:
for x in t:
out.append(x)
if (form.validate_on_submit()):
form.usernameTO.data = form.usernameTO.data.strip()
print(form.usernameTO.data)
if(form.usernameTO.data==current_user.username):
return (" <h2> Please select an appropriate username of the username </h2>")
for i in range(len(out)):
if(out[i]==form.usernameTO.data):
p=True
break
if(p):
usermsg = Usermessagesallchat(usernameTO = form.usernameTO.data, usernameFROM = current_user.username, messages = form.message.data)
db.session.add(usermsg)
db.session.commit()
form.usernameTO.data=None
form.message.data=None
return render_template("returnMessage.html")
else:
return("Please enter a valid username")
return render_template("message_chats.html",form=form,userlist=out)
@app.route("/login" , methods = ['GET' , "POST"])
def login():
if (current_user.is_authenticated):
return redirect(url_for('mainhome'))
form = LoginForm()
if (form.validate_on_submit() ):
user = User.query.filter_by(email = form.email.data).first()
if(user and (user.password == form.password.data)):
login_user(user , remember = False)
next_page = request.args.get('next')
return redirect(next_page) if (next_page) else (redirect(url_for('mainhome')))
else:
return ("<br> <h1>You have entered wrong credentials for Logging in.</h1><br> <p>Go back and Log in Again! </p>")
else:
return render_template('login.html' , type = 'Log in', title = 'Log in',form = form )
login_manager.login_view = 'login'
login_manager.login_message_category = 'info'
@app.route("/loggedout" )
def logout():
logout_user()
return redirect(url_for('login'))
@app.route("/userprofile")
@login_required
def userprofile():
username= current_user.username
email= current_user.email
password= current_user.password
return render_template("profile.html",username=username,email=email,Password=password)
@login_required
@app.route('/chat')
def index_():
messages = Message.query.all()
return render_template('index.html', messages=messages)
@login_required
@app.route('/chat_', methods=['POST'])
def message():
try:
username = current_user.username
message = request.form.get('message')
new_message = Message(username=username, message=message)
db.session.add(new_message)
db.session.commit()
pusher_client.trigger('chat-channel', 'new-message', {'username' : username, 'message': message})
return jsonify({'result' : 'success'})
except:
return jsonify({'result' : 'failure'})
@app.errorhandler(404)
def not_found(e):
return render_template("error404.html")
@app.errorhandler(500)
def notatall_found(e):
return render_template("error500.html")
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(390, 333)
MainWindow.setFixedSize(500, 333)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 10, 191, 21))
self.label.setObjectName("label")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(20, 30, 450, 291))
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.editBtn = QtWidgets.QPushButton(self.widget)
self.editBtn.setObjectName("editBtn")
self.gridLayout.addWidget(self.editBtn, 2, 2, 1, 1)
self.connectBtn = QtWidgets.QPushButton(self.widget)
self.connectBtn.setObjectName("connectBtn")
self.gridLayout.addWidget(self.connectBtn, 0, 2, 1, 1)
self.keyInput = QtWidgets.QLineEdit(self.widget)
self.keyInput.setEchoMode(QtWidgets.QLineEdit.Password)
self.keyInput.setObjectName("keyInput")
self.gridLayout.addWidget(self.keyInput, 0, 0, 1, 2)
self.addBtn = QtWidgets.QPushButton(self.widget)
self.addBtn.setObjectName("addBtn")
self.gridLayout.addWidget(self.addBtn, 2, 0, 1, 1)
self.deleteBtn = QtWidgets.QPushButton(self.widget)
self.deleteBtn.setObjectName("deleteBtn")
self.gridLayout.addWidget(self.deleteBtn, 2, 1, 1, 1)
self.tableWidget = QtWidgets.QTableWidget(self.widget)
self.tableWidget.setMinimumSize(QtCore.QSize(0, 192))
self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 192))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(4)
self.tableWidget.setRowCount(0)
self.tableWidget.verticalHeader().setVisible(False)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
self.gridLayout.addWidget(self.tableWidget, 1, 0, 1, 3)
self.btns = [self.connectBtn, self.addBtn, self.editBtn, self.deleteBtn]
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Passner 0.0.1"))
self.label.setText(_translate("MainWindow", "Ingresa tu clave maestra para acceder"))
self.editBtn.setText(_translate("MainWindow", "Guardar cambios"))
#self.editBtn.setEnabled(False)
self.connectBtn.setText(_translate("MainWindow", "Conectar"))
self.addBtn.setText(_translate("MainWindow", "Agregar"))
self.deleteBtn.setText(_translate("MainWindow", "Eliminar"))
for btn in self.btns: btn.setAutoDefault(False)
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "Número"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "Usuario"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "Contraseña"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "Información"))
|
# File: Queens.py
# Description: All queen positions without them attacking each other
# Student Name: Pranjal Jain
# Student UT EID: pj5775
# Partner's Name: Maurya Atluri
# Partner's UT EID: ma57744
# Course Name: CS 313E
# Unique Number: 50300
# Date Created: 3/13/20
# Date Last Modified: 3/13/20
num = 1
user = None
class Queens (object):
# initialize the board
def __init__ (self, n = 8):
self.board = []
self.n = n
for i in range (self.n):
row = []
for j in range (self.n):
row.append ('*')
self.board.append (row)
# print the board
def print_board (self):
global num
num += 1
for i in range (self.n):
for j in range (self.n):
print (self.board[i][j], end = ' ')
print ()
# check if no queen captures another
def is_valid (self, row, col):
for i in range (self.n):
if (self.board[row][i] == 'Q' or self.board[i][col] == 'Q'):
return False
for i in range (self.n):
for j in range (self.n):
row_diff = abs (row - i)
col_diff = abs (col - j)
if (row_diff == col_diff) and (self.board[i][j] == 'Q'):
return False
return True
# do a recursive backtracking solution
def recursive_solve (self, col):
if (col == self.n):
self.print_board()
print()
return True
else:
repeat = False
for i in range (self.n):
if (self.is_valid(i, col)):
self.board[i][col] = 'Q'
if (self.recursive_solve (col + 1)) :
repeat = True
self.board[i][col] = '*'
return repeat
# if the problem has a solution print the board
def solve (self):
for i in range (self.n):
if (self.recursive_solve(i)):
self.print_board()
def main():
global user
user = int(input('Enter the size of board: '))
while user > 8 or user < 1:
user = int(input('Enter the size of board: '))
print()
# create a regular chess board
game = Queens (user)
#place queens on board
if (game.recursive_solve(0) == False):
print('No possible solutions.')
return
return
main()
num = num-1
print('There are', num, 'solutions for a ' + str(user)+'x'+ str(user)+ ' board.')
|
from peewee import *
MySQLDatabase.connect("localhost", "root", "123456", "houseinfo", charset='utf8') |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 6 11:15:31 2021
@author: he
"""
import pandas as pd
df = pd.read_csv('news.csv')
df.columns
data = df.drop('Unnamed: 0', axis=1)
data.to_csv('final_news_data.csv', index=False)
new = pd.read_csv('final_news_data.csv') |
# coding: utf-8
import sys
import requests
GITHUB_URL = 'https://api.github.com/repos'
def _get_bar(num, total):
return ('+' * num) + (' ' * (total - num))
class Repo(object):
def __init__(self, full_name):
self.full_name = full_name
self.name = None
self.stars = None
self.forks = None
self.subscribers = None
self.pushed_at = None
self.created_at = None
self.updated_at = None
self._update_attrs(full_name)
def _update_attrs(self, full_name):
url = '{}/{}'.format(GITHUB_URL, full_name)
response = requests.get(url)
if response.status_code != 200:
raise Exception('Repository not found: {}'.format(full_name))
_json = response.json()
self.name = _json['name']
self.stars = _json['stargazers_count']
self.forks = _json['forks_count']
self.subscribers = _json['subscribers_count']
self.pushed_at = _json['pushed_at']
self.created_at = _json['created_at']
self.updated_at = _json['updated_at']
def __repr__(self):
return '<Repo: {}>'.format(self.full_name)
class Report(object):
fields = {
'created_at': min,
'forks': max,
'pushed_at': max,
'stars': max,
'subscribers': max,
'updated_at': max,
}
def __init__(self, repos):
self.repos = repos
self.score = {repo.full_name: {'total': 0} for repo in self.repos}
self.col_width = max(len(repo.full_name) for repo in self.repos)
def get_best_value(self, func, field):
return func([getattr(repo, field) for repo in self.repos])
def test_score(self):
for field in self.fields:
best = self.get_best_value(max, field)
for repo in self.repos:
if getattr(repo, field) == best:
self.score[repo.full_name][field] = 1
self.score[repo.full_name]['total'] += 1
def show_result(self):
for repo in self.repos:
sys.stdout.write('{}: [{}] {}\n'.format(
repo.full_name.ljust(self.col_width),
_get_bar(self.score[repo.full_name]['total'], len(self.fields)),
self.score[repo.full_name]['total']
))
|
'''
Created on 2013-04-19
@author: Ian
'''
from django.contrib import admin
from forum.models import Comment
admin.site.register(Comment)
|
from unittest import TestCase, main
from core.qm.qm import QM
class TestQM(TestCase):
def setup(self):
self.fail()
def test_to_binary(self):
#test if the strings are the actual binary representations
#for each of the strings
minterms = [1,2,3,4,5,6,15]
qm = QM(minterms)
expected_conversion = ['0001','0010','0011','0100','0101','0110','1111']
conversion = qm.to_binary(minterms)
self.assertEqual(conversion,expected_conversion)
#check if the number of bits in each binary string is the same for
#all
for term in conversion:
self.assertEqual(len(term),4)
#check if the same applies to the don't cares
def test_combine(self):
self.fail()
def test_combine_groups(self):
self.fail()
def test_combine_generation(self):
self.fail()
def test_group_minterms(self):
self.fail()
def test_pis(self):
self.fail()
def test_can_cover(self):
self.fail()
def test_epis(self):
self.fail()
def test_other_pis(self):
self.fail()
|
# Generated by Django 3.2.5 on 2021-08-04 21:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('account', '0015_auto_20210804_1933'),
]
operations = [
migrations.AlterField(
model_name='user',
name='cv',
field=models.FileField(blank=True, null=True, upload_to='media/cv'),
),
migrations.AlterField(
model_name='user',
name='profile_image',
field=models.ImageField(blank=True, null=True, upload_to='media/images'),
),
]
|
# ----------------------------------------------------------
# this code is for plotting potentials whose with rank 1
# ----------------------------------------------------------
def plot_pot_pi_plus(pot_list, figpath):
# ----------------------------------------------------------
# import
# ----------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# ----------------------------------------------------------
# constants
# ----------------------------------------------------------
aminos = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO',
'SER', 'THR', 'TRP', 'TYR', 'VAL']
baces = ['A', 'C', 'G', 'U']
aminos_pi = ["ARG", "TRP", "ASN", "HIS", "GLU", "GLN", "TYR", "PHE", "ASP"]
pi_type = ["pair", "stack"]
separator = ' |'
# ----------------------------------------------------------
# functions
# ----------------------------------------------------------
pair_list = []
for amino in aminos:
if amino == 'ILE':
pair_list.append(f'ACGU\n{amino}\n\nHyddrogen Bond')
elif amino == aminos[-1]:
pair_list.append(f'ACGU\n{amino}\n{separator}\n{separator}')
else:
pair_list.append(f'ACGU\n{amino}')
for amino in aminos_pi:
if amino == 'GLU':
pair_list.append(f'ACGU\n{amino}\n\nPi Pair')
elif amino == aminos[3]:
pair_list.append(f'ACGU\n{amino}\n{separator}\n{separator}')
else:
pair_list.append(f'ACGU\n{amino}\n')
for amino in aminos_pi:
if amino == 'GLU':
pair_list.append(f'ACGU\n{amino}\n\nPi Stack')
elif amino == aminos[3]:
pair_list.append(f'ACGU\n{amino}\n\n')
else:
pair_list.append(f'ACGU\n{amino}\n')
print(pair_list)
print(pot_list[0])
# ----------------------------------------------------------
# main
# ----------------------------------------------------------
fig = plt.figure(1, figsize=[27, 5])
ax = fig.add_subplot(1, 1, 1)
ax.tick_params(length=0)
minor_ticks = np.arange(1.5, 152.5, 4)
plt.xticks(minor_ticks, pair_list, size=10)
for i in range(len(pot_list)):
plt.scatter(list(range(0, 152)), pot_list[i])
plt.hlines(y=0, xmin=-0.5, xmax=152, lw=0.5)
plt.xlim([0, 150])
major_ticks = np.arange(-0.5, 155.5, 4)
plt.tick_params(labelsize=11.5, colors='white', labelcolor='black')
ax.set_xticks(major_ticks, minor=True)
ax.set_xticks(minor_ticks)
plt.grid(b=True, which='minor', ls='--')
plt.savefig(figpath, bbox_inches='tight', dpi=500)
plt.show(bbox_inches='tight')
def plot_pot_pi_plus2(pot_list, figpath, best_pot): # Eval 4
# ----------------------------------------------------------
# import
# ----------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
# ----------------------------------------------------------
# constants
# ----------------------------------------------------------
aminos = ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLU', 'GLN', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET', 'PHE', 'PRO',
'SER', 'THR', 'TRP', 'TYR', 'VAL']
baces = ['A', 'C', 'G', 'U']
aminos_pi = ["ARG", "TRP", "ASN", "HIS", "GLU", "GLN", "TYR", "PHE", "ASP"]
pi_type = ["pair", "stack"]
separator = ' |'
# ----------------------------------------------------------
# functions
# ----------------------------------------------------------
pair_list = []
for amino in aminos:
if amino == 'ILE':
pair_list.append(f'ACGU\n{amino}\n\nHyddrogen Bond')
elif amino == aminos[-1]:
pair_list.append(f'ACGU\n{amino}\n{separator}\n{separator}')
else:
pair_list.append(f'ACGU\n{amino}')
for amino in aminos_pi:
if amino == 'GLU':
pair_list.append(f'ACGU\n{amino}\n\nPi Pair')
elif amino == aminos[3]:
pair_list.append(f'ACGU\n{amino}\n{separator}\n{separator}')
else:
pair_list.append(f'ACGU\n{amino}\n')
for amino in aminos_pi:
if amino == 'GLU':
pair_list.append(f'ACGU\n{amino}\n\nPi Stack')
elif amino == aminos[3]:
pair_list.append(f'ACGU\n{amino}\n\n')
else:
pair_list.append(f'ACGU\n{amino}\n')
print(pair_list)
# ----------------------------------------------------------
# main
# ----------------------------------------------------------
fig = plt.figure(1, figsize=[27, 5])
ax = fig.add_subplot(1, 1, 1)
ax.tick_params(length=0)
minor_ticks = np.arange(1.5, 152.5, 4)
plt.xticks(minor_ticks, pair_list, size=10)
for i in range(len(pot_list)):
plt.scatter(list(range(0, 152)), pot_list[i])
plt.scatter(list(range(0, 152)), best_pot, s=120, color='black', marker="*")
plt.hlines(y=0, xmin=-0.5, xmax=152, lw=0.5)
plt.xlim([0, 150])
major_ticks = np.arange(-0.5, 155.5, 4)
plt.tick_params(labelsize=13.5, colors='white', labelcolor='black')
ax.set_xticks(major_ticks, minor=True)
ax.set_xticks(minor_ticks)
plt.grid(b=True, which='minor', ls='--')
plt.savefig(figpath, bbox_inches='tight', dpi=500)
plt.show(bbox_inches='tight')
|
from unittest import TestCase
from webauthn.helpers.tpm.parse_cert_info import parse_cert_info
from webauthn.helpers.tpm.structs import TPM_ALG, TPM_ST
class TestWebAuthnTPMParseCertInfo(TestCase):
def test_properly_parses_cert_info_bytes(self) -> None:
cert_info = b'\xffTCG\x80\x17\x00"\x00\x0bW"f{J5_9"\x15\tL\x01\xd5e\xbcr\xc6\xc9\x03\xbc#\xb5m\xee\xb5yI+j\xe6\xce\x00\x14`\x0bD(A\x99\xf3\xd3\x12I[\x04\x1f\xf4\xe7\xfb)\xc8\x02\x8f\x00\x00\x00\x00\x1a0)Z\x16\xb0}\xb1R\'s\xf8\x01\x97g1K\xfaf`T\x00"\x00\x0b\xe7\x1c"\x90\x07\xdeA\xe1w\xe0\xb3F\xe1\x07\x02\x8c\x16b\xe1\r\x9e\xb8\xae\xe7\xa95\xac\xf6\x1a\xedx\x89\x00"\x00\x0b\x7f\xe8\x84\xdaC\xa7\xc5?\xcept,\xa9\nA\x99\x93\xbc\x1f\x15\xcbs\x7f\xe0\x1a\x96u\xca\xe4\x8f\x86\x81'
output = parse_cert_info(cert_info)
assert output.magic == b"\xffTCG"
assert output.type == TPM_ST.ATTEST_CERTIFY
assert (
output.qualified_signer
== b'\x00\x0bW"f{J5_9"\x15\tL\x01\xd5e\xbcr\xc6\xc9\x03\xbc#\xb5m\xee\xb5yI+j\xe6\xce'
)
assert (
output.extra_data
== b"`\x0bD(A\x99\xf3\xd3\x12I[\x04\x1f\xf4\xe7\xfb)\xc8\x02\x8f"
)
assert output.firmware_version == b"\x97g1K\xfaf`T"
# Attested
assert output.attested.name_alg == TPM_ALG.SHA256
assert output.attested.name_alg_bytes == b"\x00\x0b"
assert (
output.attested.name
== b'\x00\x0b\xe7\x1c"\x90\x07\xdeA\xe1w\xe0\xb3F\xe1\x07\x02\x8c\x16b\xe1\r\x9e\xb8\xae\xe7\xa95\xac\xf6\x1a\xedx\x89'
)
assert (
output.attested.qualified_name
== b"\x00\x0b\x7f\xe8\x84\xdaC\xa7\xc5?\xcept,\xa9\nA\x99\x93\xbc\x1f\x15\xcbs\x7f\xe0\x1a\x96u\xca\xe4\x8f\x86\x81"
)
# Clock Info
assert output.clock_info.clock == b"\x00\x00\x00\x00\x1a0)Z"
assert output.clock_info.reset_count == 380665265
assert output.clock_info.restart_count == 1378317304
assert output.clock_info.safe is True
|
import os
from flask import Flask
from flask import render_template
from flask import request
app = Flask(__name__)
#Create our index or root / route
@app.route("/")
def home():
return render_template('home.html')
@app.route("/index", methods=["GET", "POST"])
def index():
notes = [{ "name":"First Note Ever",
"author":"Abhiram",
"content":"This text is coming from the content field"
},
{ "name":"Finish this Blog",
"author":"Abhiram",
"content":"Show the template control structures"
},
{"name":"Deploy this app to OpenShift",
"author":"Abhiram",
"content":"When finished coding this app deploy it to OpenShift"
}]
return render_template("index.html",notes=notes)
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(debug = "True") |
#!/usr/bin/env python
# coding: utf-8
# In[6]:
get_ipython().system('apt-get -y install git wget aria2')
get_ipython().run_line_magic('cd', '/workspace/')
# In[9]:
get_ipython().system('wget --no-clobber https://storage.googleapis.com/conceptual_12m/cc12m.tsv')
# In[5]:
get_ipython().system('wget https://storage.googleapis.com/conceptual_12m/cc12m.tsv')
# In[21]:
get_ipython().run_line_magic('cd', '/workspace')
from tqdm import tqdm
with open("/workspace/cc12m.tsv", 'r', encoding='utf-8') as reader, open("/workspace/cc12m_dl.txt", 'w', encoding='utf-8') as writer:
for i, line in enumerate(reader):
url = line.split("\t")[0]
writer.write(url + "\n\tout=" + str(i) + ".jpg\n")
# In[22]:
get_ipython().run_line_magic('mkdir', 'cc12m_images')
get_ipython().run_line_magic('cp', 'cc12m_dl.txt cc12m_images')
get_ipython().run_line_magic('cd', '/workspace/cc12m_images')
# In[ ]:
"cc12m_dl.txt"
get_ipython().system('aria2c -i cc12m_dl.txt -j 16 -q --deferred-input true')
|
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import range_count
class RangeCount(AggregationPrimitive):
"""Count observed values within the interval [min, max).
Args:
min (float) : The inclusive lower bound of the range.
max (float) : The exclusive upper bound of the range.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.range_count
"""
name = "range_count"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def __init__(self, min, max):
self.min = min
self.max = max
def get_function(self):
def function(x):
return range_count(x, min=self.min, max=self.max)
return function
|
"""Script for running Pokemon Simulation."""
from threading import Thread
from queue import Queue
from time import time
from agent.basic_pokemon_agent import PokemonAgent
from agent.basic_planning_pokemon_agent import BasicPlanningPokemonAgent
from battle_engine.pokemon_engine import PokemonEngine
from file_manager.log_writer import LogWriter
from file_manager.team_reader import TeamReader
from simulation.base_type_logging_simulation import BaseLoggingSimulation
from simulation.base_simulation import load_config
from stats.calc import calculate_avg_elo
class PokemonSimulation(BaseLoggingSimulation):
"""Class for Pokemon Simulation."""
def __init__(self, **kwargs):
"""
Initialize this simulation.
Args:
config (str): Filename for the population configs.
data_delay (int): Number of matches between gathering type data.
multithread (bool): Whether or not to run this simulation multithreaded.
"""
pkmn_kwargs = kwargs
pkmn_kwargs["game"] = PokemonEngine()
pkmn_kwargs["prefix"] = "PKMN"
self.config = load_config(kwargs["config"])
self.type_log_writer = None
self.data_delay = kwargs["data_delay"]
self.multithread = kwargs.get("multithread", False)
super().__init__(pkmn_kwargs)
def add_agents(self):
"""Add the agents to this model."""
for conf in self.config:
conf_tr = TeamReader(prefix=conf["team_file"])
conf_tr.process_files()
conf_team = conf_tr.teams[0]
for _ in range(int(self.num_players * conf["proportion"])):
pkmn_agent = None
if conf["agent_class"] == "basic":
pkmn_agent = PokemonAgent(
team=conf_team
)
pkmn_agent.type = conf["agent_type"]
elif conf["agent_class"] == "basicplanning":
pkmn_agent = BasicPlanningPokemonAgent(
tier=conf["agent_tier"],
team=conf_team
)
pkmn_agent.type = conf["agent_type"]
else:
raise RuntimeError("Invalid agent_class: {}".format(conf["agent_class"]))
self.ladder.add_player(pkmn_agent)
def init_type_log_writer(self):
"""Initialize Type Average Elo LogWriter."""
header = []
for conf in self.config:
header.append(conf["agent_type"])
self.type_log_writer = LogWriter(header, prefix="PKMNTypes", directory=self.directory)
def run(self):
"""Run this simulation."""
if not self.multithread:
super().run()
return
battle_queue = Queue()
battle_results_queue = Queue()
type_results_queue = Queue()
for num in range(self.num_games):
battle_queue.put(num)
start_time = time()
# Threads to run the battles
for _ in range(4):
battle_thread = Thread(target=battle, args=(self,
battle_queue,
battle_results_queue,
type_results_queue,
start_time))
battle_thread.start()
battle_queue.join()
while not battle_results_queue.empty():
output, player1, player2 = battle_results_queue.get()
self.write_player_log(output, player1, player2)
battle_results_queue.task_done()
while not type_results_queue.empty():
data_line = type_results_queue.get()
self.type_log_writer.write_line(data_line)
type_results_queue.task_done()
def battle(main_sim, battle_queue, output_queue, type_queue, start_time):
"""
Code for a single battle thread to run.
Args:
main_sim (BaseSimulation): Simulation that is spawning this thread.
battle_queue (Queue): Queue with placeholders to count number of
battles remaining.
output_queue (Queue): Queue to hold the results of the battles.
type_queue (Queue): Queue to hold the rating data broken down by agent type.
start_time (time): Time object to hold simulation starting time.
"""
while not battle_queue.empty():
battle_queue.get()
results = None
while results is None:
try:
results = main_sim.ladder.run_game()
except RuntimeError as rte:
print(rte, main_sim.ladder.thread_lock.locked())
output_queue.put(results)
if battle_queue.qsize() % main_sim.data_delay == 0:
type_queue.put(calculate_avg_elo(main_sim.ladder))
main_sim.print_progress_bar(main_sim.num_games - battle_queue.qsize(), start_time)
battle_queue.task_done()
|
from superhero_api import SuperHeroAPI, API_KEY
class SuperHeroApp():
def __init__(self):
self._s = SuperHeroAPI(API=API_KEY)
self._status = True
self._prelude_text = '''Ну привет странник. че забрел? либо иди отседа либо используй команды.
help - список команд
compare name name - сравнить по силе двух челиков
exit - иди отседа'''
def _set_status(self):
self._status = False
def run(self):
print(self._prelude_text)
while self._status:
_input = input('Пиши команды либо go отсуда, help для команд: ')
command = self._parse_command(_input)
self._command_dispathcer(command)
def _parse_command(self, _input):
print(_input.strip().split(sep='-', maxsplit=2))
return _input.strip().lower().split()
def _command_dispathcer(self, command):
if len(command) <= 1:
if not command or command[0] == 'exit':
self._change_status()
elif command[0] == 'help':
print(self._prelude_text)
else:
action, *arguments = command
if action == 'compare':
self._compare_heroes(arguments)
def _compare_heroes(self, heroes):
hero_one = self._s.get_hero_stats(heroes[0])
hero_two = self._s.get_hero_stats(heroes[1])
power_one, hp_one = int(hero_one['power']), int(hero_one['durability'])
power_two, hp_two = int(hero_two['power']), int(hero_two['durability'])
if hp_two - power_one > hp_one - power_two:
print(f'{heroes[1].title()} победил! Осталось здоровья: {hp_two - power_one}')
else:
print(f'{heroes[0].title()} победил! Осталось здоровья: {hp_two - power_one}')
if __name__ == '__main__':
app = SuperHeroApp()
app.run()
|
def describe_city(city,country):
print(city,"is in",country)
describe_city("Karachi","Pakistan")
describe_city("Bonn","Germany")
describe_city("Moscow","Russia") |
def extract_menu_day_elements(day, parent_element):
return parent_element.find('div', {'id': 'menu-plan-tab' + str(day)})
def extract_menu_item(parent_element):
return parent_element.findAll('div', {'class': 'menu-item'})
def extract_menu_title(parent_element):
menu_title = parent_element.find('h2', {'class': 'menu-title'}).get_text()
menu_title = menu_title.replace(u'\xAD', u'')
return menu_title
def extract_menu_description(parent_element):
menu_description = parent_element.find('p', {'class': 'menu-description'}).get_text()
menu_description = menu_description.replace(u'\xAD', u'')
return menu_description
def extract_weekdays_elements(parent_element):
return parent_element.find('div', {'class': 'day-nav'})
def extract_single_day(day, parent_element):
return parent_element.find('label', {'for': 'mp-tab' + str(day)})
def extract_weekday(parent_element):
return parent_element.find('span', {'class': 'day'}).contents[0]
def extract_date(parent_element):
return parent_element.find('span', {'class': 'date'}).contents[0]
|
import FWCore.ParameterSet.Config as cms
myan = cms.EDAnalyzer('HcalTimingAnalyzer',
eventDataPset = cms.untracked.PSet(
simHitLabel = cms.untracked.InputTag(""), # change it if you want 'em
hbheDigiLabel = cms.untracked.InputTag(""), # change it if you want 'em
hbheRechitLabel = cms.untracked.InputTag("hbhereco"),
hfRechitLabel = cms.untracked.InputTag("hfreco"),
hoRechitLabel = cms.untracked.InputTag("horeco"),
metLabel = cms.untracked.InputTag("metNoHF"),
twrLabel = cms.untracked.InputTag("towerMaker")
),
tgtTwrId = cms.vint32(),
eventNumbers = cms.vint32(),
minHitGeVHB = cms.double(0.5),
minHitGeVHE = cms.double(0.6), # = avg (0.5 single width, 0.7 double)
minHitGeVHO = cms.double(0.5),
minHitGeVHF1 = cms.double(1.2),
minHitGeVHF2 = cms.double(1.8),
hcalRecHitEscaleMinGeV = cms.double(-10.5),
hcalRecHitEscaleMaxGeV = cms.double(500.5),
ecalRecHitTscaleNbins = cms.int32(401),
ecalRecHitTscaleMinNs = cms.double(-100.5),
ecalRecHitTscaleMaxNs = cms.double(100.5),
hcalRecHitTscaleNbins = cms.int32(401),
hcalRecHitTscaleMinNs = cms.double(-90.5),
hcalRecHitTscaleMaxNs = cms.double(110.5),
)
|
import pandas as pd
import numpy as np
from quant.stock.stock import Stock
from quant.stock.date import Date
import statsmodels.api as sm
def PriceDelay(beg_date, end_date):
"""
因子说明:价格延迟
时间序列上,用股票收益对当期市场收益做回归,记为回归1,回归长度为LongTerm
时间序列上,用股票收益对当期市场收益和过去N天市场收益做回归,记为回归2,回归长度为LongTerm
计算回归1的R2除以回归2的R2作为Price Delay
此数据越大,说明股票不反映过去信息
"""
# param
#################################################################################
LongTerm = 40
HalfTerm = int(LongTerm/2)
N = 5
factor_name = "PriceDelay"
ipo_num = 90
# read data
#################################################################################
pct = Stock().get_factor_h5("Pct_chg", None, "primary_mfc").T
# data precessing
#################################################################################
pass
# calculate data daily
#################################################################################
date_series = Date().get_trade_date_series(beg_date, end_date)
date_series = list(set(date_series) & set(pct.index))
date_series.sort()
res = pd.DataFrame([], index=pct.index, columns=pct.columns)
for i in range(0, len(date_series)):
current_date = date_series[i]
data_beg_date_1 = Date().get_trade_date_offset(current_date, -(LongTerm-1))
pct_before_1 = pct.ix[data_beg_date_1:current_date, :]
market_pct_1 = pct_before_1.mean(axis=1)
data_beg_date_2 = Date().get_trade_date_offset(current_date, -(LongTerm+N-1))
current_date_2 = Date().get_trade_date_offset(current_date, -N)
pct_before_2 = pct.ix[data_beg_date_2:current_date_2, :]
market_pct_2 = pct_before_2.mean(axis=1)
if len(pct_before_2) == len(pct_before_1) and len(pct_before_1) == LongTerm:
market_pct_2 = pd.DataFrame(market_pct_2.values, index=market_pct_1.index)
print('Calculating factor %s at date %s' % (factor_name, current_date))
for i_code in range(len(pct_before_1.columns)):
code = pct_before_1.columns[i_code]
reg_data = pd.concat([pct_before_1[code], market_pct_1, market_pct_2], axis=1)
reg_data.columns = [code, 'Market_Pct_1', 'Market_Pct_2']
reg_data = reg_data.dropna()
if len(reg_data) > HalfTerm:
y = reg_data[code].values
x = reg_data['Market_Pct_1'].values
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
r1 = model.rsquared
x = reg_data[['Market_Pct_1', 'Market_Pct_2']].values
x = sm.add_constant(x)
model = sm.OLS(y, x).fit()
r2 = model.rsquared
res.ix[current_date, code] = r1 / r2
else:
print('Calculating factor %s at date %s is null' % (factor_name, current_date))
res = res.dropna(how='all').T
# save data
#############################################################################
Stock().write_factor_h5(res, factor_name, "alpha_dfc")
return res
#############################################################################
if __name__ == '__main__':
from datetime import datetime
beg_date = '20160101'
end_date = datetime.today()
data = PriceDelay(beg_date, end_date)
print(data)
|
# univariate cnn example
from numpy import array
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
import pandas as pd
import numpy as np
import sklearn.metrics as skm
import matplotlib.pyplot as plt
# split a univariate sequence into samples
# def split_sequence(sequence, n_steps):
# X, y = list(), list()
# for i in range(len(sequence)):
# # find the end of this pattern
# end_ix = i + n_steps
# # check if we are beyond the sequence
# if end_ix > len(sequence)-1:
# break
# # gather input and output parts of the pattern
# seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
# X.append(seq_x)
# y.append(seq_y)
# return array(X), array(y)
def split_sequences(data, n_steps):
data = data.values
X, y = list(), list()
for i in range(len(data)):
end_ix = i + n_steps*6
if end_ix > len(data):
break
Kx = np.empty((1, 12))
for index in np.arange(i, i+(n_steps*6), step=6, dtype=int):
eachhour = index + 6
if eachhour > len(data) or i+(n_steps*6) > len(data):
break
a = data[index: eachhour, : -1]
hourlymean_x = np.mean(a, axis=0)
hourlymean_y = data[eachhour-1, -1]
hourlymean_x = hourlymean_x.reshape((1, hourlymean_x.shape[0]))
if index != i:
Kx = np.append(Kx, hourlymean_x, axis=0)
else:
Kx = hourlymean_x
X.append(Kx)
y.append(hourlymean_y)
print(np.array(X).shape)
return np.array(X), np.array(y)
def temporal_horizon(df, pd_steps, target):
pd_steps = pd_steps * 6
target_values = df[[target]]
target_values = target_values.drop(
target_values.index[0: pd_steps], axis=0)
target_values.index = np.arange(0, len(target_values[target]))
df = df.drop(
df.index[len(df.index)-pd_steps: len(df.index)], axis=0)
df['Target_'+target] = target_values
print('Target_'+target)
return df
path = 'Sondes_data/train/train_data/'
file = 'leavon_wo_2019-07-01-2020-01-15.csv'
# define input sequence
dataset = pd.read_csv(path+file)
# dataset = dataset['dissolved_oxygen']
# raw_seq = dataset.values
# choose a number of time steps
n_steps = 3
df = temporal_horizon(dataset, 12, 'dissolved_oxygen')
df = df[['dissolved_oxygen', 'Target_dissolved_oxygen']]
# split into samples
X, y = split_sequences(df, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = X.shape[2]
X = X.reshape((X.shape[0], X.shape[1], n_features))
# define model
model = Sequential()
model.add(Conv1D(64, 2, activation='relu', input_shape=(n_steps, n_features)))
model.add(MaxPooling1D())
model.add(Flatten())
model.add(Dense(50, activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X, y, epochs=1000, verbose=0)
# demonstrate prediction
testpath = 'Sondes_data/test/test_data/leavon_2019-07-01-2020-01-15.csv'
dataset = pd.read_csv(path+file)
n_steps = 3
df = temporal_horizon(dataset, 12, 'dissolved_oxygen')
df = df[['dissolved_oxygen', 'Target_dissolved_oxygen']]
# split into samples
X, y = split_sequences(df, n_steps)
# reshape from [samples, timesteps] into [samples, timesteps, features]
n_features = X.shape[2]
X = X.reshape((X.shape[0], X.shape[1], n_features))
yhat = model.predict(X, verbose=0)
print(skm.r2_score(y, yhat))
plt.scatter(np.arange(len(y)),
y, s=1)
plt.scatter(np.arange(len(yhat)),
yhat, s=1)
plt.legend(['actual', 'predictions'],
loc='upper right')
plt.show()
|
# class in the poppins thingamajig
from poplib import *
class poppinsMary:
def __init__(self):
popIp='aaa.bbb.ccc.ddd'
popAddr='email@address.com'
popPass='EmailPassword'
class poppins:
def __init__(self):
x=poppinsMary()
self.popSrvr = POP3(x.popIp) # ip works better than url
print self.popSrvr.getwelcome() # todo: convert print to debug
print self.popSrvr.user(x.popAddr) # todo: chng print to dbg
print self.popSrvr.pass_(x.popPass) # todo: s/print/debug.validate(
messagesInfo = self.popSrvr.list()[1]
self.numMessages = len(messagesInfo)
|
from django.db import models
class Company(models.Model):
id = models.IntegerField(primary_key=True, auto_created=True)
ticker_symbol = models.CharField(max_length=6, unique=True)
name = models.CharField(max_length=150)
url = models.CharField(max_length=150)
business = models.CharField(max_length=150)
listing_bourse = models.CharField(max_length=10)
revenue = models.BigIntegerField(null=True)
crawled_at = models.DateTimeField(null=False, auto_created=True, auto_now=True)
class CompanyInfo(models.Model):
ticker_symbol = models.CharField(max_length=6)
param_name = models.CharField(max_length=50)
param_value = models.CharField(max_length=600)
class Meta:
unique_together = ('ticker_symbol', 'param_name',)
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def __init__(self):
self.head = None
self.tail = None
def addNode(self, node):
self.tail.next = node
self.tail = node
'''
def getElement(self, node):
result = []
curNode = node
while curNode is not None:
result.append(curNode.val)
curNode = curNode.next
print(result)
'''
def reorderList(self, head):
self.__init__()
if head is None or head.next is None:
return
stack = []
curNode = head
while curNode is not None:
stack.append(curNode.val)
curNode = curNode.next
index = 0
self.head = ListNode(stack[0])
self.tail = self.head
while index <= (len(stack)+1)//2 - 1:
self.addNode(ListNode(stack[len(stack)-1-index]))
if len(stack) % 2 == 0:
if index == (len(stack)+1)//2 - 1:
break
index+=1
self.addNode(ListNode(stack[index]))
if len(stack) % 2 == 1:
if index == (len(stack)+1)//2 - 1:
break
head.next = self.head.next
#self.getElement(head)
|
from flask import render_template, flash, redirect, request, url_for, send_from_directory,session
from app import app
from .forms import Setting
from .forms import Add
from werkzeug import secure_filename
import os
import time
import random
import MySQLdb
import hashlib
import models_admin
from random import randint
#from splinter import Browser
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/')
@app.route('/index')
def index():
return render_template('index.html')
@app.route('/lihatadmin', methods=['GET', 'POST'])
def lihatadmin():
list = []
list = models_admin.view()
return render_template('lihatadmin.html',posts=list)
@app.route('/setting', methods=['GET', 'POST'])
def setting():
form = Add()
list = []
list = models_admin.view()
return render_template('setting.html',posts=list,form=form)
@app.route('/addadmin', methods=['GET', 'POST'])
def addadmin():
form = Add()
list = []
if form.validate_on_submit():
username = str(request.form.get('username'))
password = str(request.form.get('password'))
ja = str(request.form.get('ja'))
jk = str(request.form.get('jk'))
file = request.files['file']
if file and allowed_file(file.filename):
uniq = (time.strftime("%d%m%Y%H%M%S")+str(randint(0,10000))) #nama file diambil dari waktu
filename = secure_filename(file.filename) #nama file lengkap untuk upload
ekstension = filename.rsplit('.', 1)[1] #extension
filename = uniq + "." + ekstension #nama file setelah diedit
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
if(models_admin.add(username,password,ja,jk,filename)):
flash('Data Admin Berhasil Ditambah!')
return redirect('/addadmin')
else:
flash('Jenis Photo Hanya Boleh JPG, JPEG, Dan PNG!')
return redirect('/addadmin')
else:
list = models_admin.view()
'''for row in list:
id_admin = list[0]
user = list[1]
pswd = list[2]
status = list[3]
jk = list[4]
photo = list[5]
listData.append(
{
'id_admin':id_admin,
'user':user,
'pswd':pswd,
'status':status,
'jk':jk,
'photo':photo
}
)'''
'''db = MySQLdb.connect("127.0.0.1","root","","latihanwebpython1" )
cursor = db.cursor()
try:
cursor.execute("select * from admin order by id_admin desc") #menjalankan perintah sql
results = cursor.fetchall()
for row in results:
id_admin = str(row[0])
user = row[1]
pswd = row[2]
status = row[3]
jk = row[4]
photo = row[5]
listData.append(
{
'id_admin':id_admin,
'user':user,
'pswd':pswd,
'status':status,
'jk':jk,
'photo':photo
}
)
except:
print "Error: unable to fecth data"
db.close()'''
return render_template('addadmin.html',form=form,posts=list)
@app.route('/hapus/<id>/<photo>', methods=['GET', 'POST'])
def hapus(id,photo):
os.remove(os.path.join(app.config['UPLOAD_FOLDER'], photo))
models_admin.delete(id)
return redirect('/lihatadmin')
return render_template('lihatadmin.html')
@app.route('/login', methods=['GET', 'POST'])
def login(): #panggil procedure login
'''form = Login()
if form.validate_on_submit():'''
username = str(request.form.get('username'))
password = str(request.form.get('password'))
h = hashlib.md5(password.encode())
db = MySQLdb.connect("127.0.0.1","root","","latihanwebpython1" )
cursor = db.cursor()
try:
cursor.execute("select * from admin where user='"+username+"' and pswd='"+h.hexdigest()+"'") #menjalankan perintah sql
results = cursor.fetchall()
for row in results:
id_admin = str(row[0])
nama = row[1]
pswd = row[2]
status = row[3]
jk = row[4]
photo = row[5]
session['id_admin'] = id_admin
session['nama'] = nama
session['status'] = status
session['jk'] = jk
session['photo'] = photo
session['state'] = 1
if nama == username and pswd == password:
return redirect('/index')
else:
return redirect('/login')
except:
print "Error: unable to fecth data"
db.close()
'''else:
return redirect('/index')'''
return render_template('login.html')
@app.route('/logout')
def logout():
session.clear()
session['state'] = 0
'''with Browser() as browser:
# Visit URL
url = "login"
browser.visit(url)
browser.back()'''
return redirect('login')
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#--------------------------------------
# Clase que describe el comportamiento del sistema de ILUMINACION de HydroPonic System
#
# Authors : [Matias Deambrosi, Juan Jose Conejero Serna]
# Date : 21/10/2015
#
# Contact: [md9@alu.ua.es, jjcs2@alu.ua.es]
#
#--------------------------------------
import HPS_Options as op
import HPS_Funciones as fn
import HPS_ActuadorDiscreto as actd
import threading
class HPS_SistemaIluminacion(threading.Thread):
#Constructor, recibe los sensores de luz exterior
#y los actuadores que debe actvar/desactivar
def __init__(self, sensorLuzSolar, sensorLuzInterior, actuadorIluminacion):
threading.Thread.__init__(self)
self._sensorLuzSolar=sensorLuzSolar
self._sensorLuzInterior=sensorLuzInterior
self._actuadorIluminacion=actuadorIluminacion
self._iluminacionActiva=False;
def CompruebaLuces(self):
if self._iluminacionActiva:
if self._sensorLuzInterior.get("datoDigital") <op.umbral_VALOR_MIN_LUZINTERIOR:
fn.Escribir("La iluminación no está encendida correctamente", True)
self._iluminacionActiva=False
#Hilo principal, si salta la condicion de activación y la iluminacion NO está encendida, se enciende.
def run(self):
while True:
#Indicamos si hay suficiente luz solar
if self._sensorLuzSolar.get("datoDigital") < op.umbral_VALOR_MIN_LUZEXTERIOR:
if not self._iluminacionActiva:
self._actuadorIluminacion.Activar()
self._iluminacionActiva=True
fn.Escribir("\033[91m"+"Activando iluminacion"+"\033[0m")
self.CompruebaLuces()
else:
if self._iluminacionActiva:
self._actuadorIluminacion.Desactivar()
self._iluminacionActiva=False
fn.Escribir("Desctivando iluminacion")
op.time.sleep(op.REFRESCO_PROCESOS) |
from inspect import isfunction, isclass
class PollenException(Exception):
pass
class OverwriteServiceError(PollenException):
pass
class UndefinedServiceNameError(PollenException):
pass
class ConfigurableServiceError(PollenException):
pass
class Pollen(object):
def __init__(self):
self.__services = dict()
self.__instances = dict()
def register(self, name, definition=None, shared=False):
name = self.__get_fqn(name)
if definition is None:
def decorator(definition):
self.register(name or definition, definition, shared)
return definition
return decorator
if self.has(name):
msg = 'Name "%s" is already in use'
raise OverwriteServiceError(msg % name)
if not self.__is_callable(definition):
self.__instances[name] = definition
return
if self.__is_configurable(definition) and shared:
msg = 'Configurable services can not be shared'
raise ConfigurableServiceError(msg)
self.__services[name] = (definition, shared)
def get(self, name, **kwargs):
name = self.__get_fqn(name)
if name in self.__instances:
return self.__instances[name]
if name not in self.__services:
msg = 'Unable to resolve "%s"'
raise UndefinedServiceNameError(msg % name)
definition, shared = self.__services[name]
instance = definition(self, **kwargs)
if shared:
self.__instances[name] = instance
return instance
def has(self, name):
name = self.__get_fqn(name)
return name in self.__instances or name in self.__services
def __get_fqn(self, name):
if isfunction(name) or isclass(name):
return '%s.%s' % (name.__module__, name.__name__)
return str(name)
def __is_callable(self, definition):
return callable(definition) or hasattr(definition, '__call__')
def __is_configurable(self, definition):
args = self.__get_arguments(definition)
return len(args) > 1
def __get_arguments(self, definition):
func = definition
if not isfunction(definition):
func = definition.__call__
try:
from inspect import signature
sig = signature(func)
args = [param for param in sig.parameters]
except ImportError:
from inspect import getargspec
sig = getargspec(func)
args = filter(lambda arg: arg != 'self', sig.args)
return args
|
"""
Trace the execution of the code in Example 5.2 on paper showing
the contents of the run-time stack just before the function call returns.
"""
# val
# Main activation record
# y
# x |
import pymysql
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = ['sans-serif']
plt.rcParams['font.sans-serif'] = ['SimHei']
conn = pymysql.connect(
host = "localhost",
user = "root",
port = 3306,
db = "bilibili"
)
cur = conn.cursor()
cur.execute("SELECT sex FROM bilibili_user;")
sex = cur.fetchall()
data = []
for i in sex:
data.append(i[0])
percentage = []
label = ["男", "女", "保密"]
for i in label:
percentage.append(data.count(i))
plt.pie(percentage, labels = label)
plt.title("哔哩哔哩用户性别分析")
plt.show()
|
import matplotlib.pyplot as plt
import numpy as np
def filter_stat_data(x, y):
new_x = []
new_y = []
for k in range(0, (len(x) - 1)):
if x[k] > 12000:
new_x.append(x[k])
new_y.append(y[k])
return new_x, new_y
def fit_data(x, y):
return np.polyfit(y, x, 1)
def find_error(a, b, x, y):
y_fit = np.multiply(a, x) + b
return np.abs(y_fit - y)
def remove_outliers(err, x, y):
new_x = []
new_y = []
mean = np.mean(err)
sd = np.std(err)
z = (err - mean)/sd
for i in range(0, len(z)):
if abs(z[i]) < 2:
new_x.append(x[i])
new_y.append(y[i])
return new_x, new_y
def plot_data(a, b, y, x):
y_fit = np.multiply(a, x) + b
plt.plot(x, y_fit, 'b')
plt.plot(x, y, 'ro')
plt.xlabel('ADC')
plt.ylabel('force')
plt.show()
arr_a_x = []
arr_b_x = []
arr_a_y = []
arr_b_y = []
if __name__ == '__main__':
positions = np.loadtxt('calibration_data/positions.txt')
for i in range(1, 3):
# 1st and 2nd column data from file are x-direction data
# 3rd and 4th column are y-direction data
data_x = np.loadtxt('calibration_data/{}.txt'.format(i), usecols=(0, 1))
data_y = np.loadtxt('calibration_data/{}.txt'.format(i), usecols=(2, 3))
force_x = data_x[:, 0]
adc_x = data_x[:, 1]
force_y = data_y[:, 0]
adc_y = data_y[:, 1]
# Find parameters for linear equation from data fitting
new_adc_x, new_force_x = filter_stat_data(adc_x, force_x)
new_adc_y, new_force_y = filter_stat_data(adc_y, force_y)
# Find parameters for linear equation from data fitting
est_a_x, est_b_x = np.polyfit(new_adc_x, new_force_x, 1)
est_a_y, est_b_y = np.polyfit(new_adc_y, new_force_y, 1)
# find difference between fitted line and data
err_x = find_error(est_a_x, est_b_x, new_adc_x, new_force_x)
err_y = find_error(est_a_y, est_b_y, new_adc_y, new_force_y)
# remove errors from data that are more than
adc_x, force_x = remove_outliers(err_x, new_adc_x, new_force_x)
adc_y, force_y = remove_outliers(err_y, new_adc_y, new_force_y)
# Find parameters after outliers removal
a_x, b_x = fit_data(adc_x, force_x)
a_y, b_y = fit_data(adc_y, force_y)
plot_data(a_x, b_x, adc_x, force_x)
plot_data(a_y, b_y, adc_y, force_y)
# write data in array
arr_a_x.append(a_x)
arr_b_x.append(b_x)
arr_a_y.append(a_y)
arr_b_y.append(b_y)
a_x_final = fit_data(positions, arr_a_x)
a_y_final = fit_data(positions, arr_a_y)
calib_equation = np.array(a_x_final, a_x_final)
print 'a_x = ', a_x_final
print 'a_y = ', a_y_final
print calib_equation
np.savez('calibration_equation.npz', transform=calib_equation)
file_x = open('data_analysis_results_x.txt', 'w')
# loop through each item in the list and write it to the output file
for (pos, a, b) \
in zip(positions, arr_a_x, arr_b_x):
file_x.write('{} {} {} \n'.format(str(pos), str(a), str(b)))
file_x.write('\n \n')
file_x.close()
file_y = open('data_analysis_results_y.txt', 'w')
# loop through each item in the list and write it to the output file
for (pos, a, b) \
in zip(positions, arr_a_y, arr_b_y):
file_y.write('{} {} {} \n'.format(str(pos), str(a), str(b)))
file_y.write('\n \n')
file_y.close()
|
import argparse
from common_crawl.finder.product_finder import ProductFinder
from common_crawl.save.json_save_products import JsonSaveProducts
from product.extractors import tesco_extractor
from common_crawl.utils import search_domain
import time
# Scans common crawl information for products and saves them.
def main(domain, cc_index_list, output_path):
# Finds all relevant domins
record_list = search_domain(domain, cc_index_list)
# Creating save object - Products are saved to Amazon DynamoDB
savethread = JsonSaveProducts(output_path).start()
# Downloads page from CommconCrawl and Inspects, then Extracts infomation
first_mid = record_list[0: int(len(record_list) / 2)]
end_mid = record_list[int(len(record_list) / 2): int(len(record_list))]
product_finder_1 = ProductFinder(first_mid, tesco_extractor.extract_product).start(savethread)
product_finder_2 = ProductFinder(end_mid, tesco_extractor.extract_product).start(savethread)
# Idle Main Thread
while product_finder_1.check_status() is not True and product_finder_2.check_status() is not True:
time.sleep(1)
while savethread.alive():
time.sleep(1)
# Stop Threads
product_finder_1.stop()
product_finder_2.stop()
savethread.stop()
if __name__ == '__main__':
# parse the command line arguments
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-d", "--domain", required=True, help="The domain to target e.g. youtube.com")
parser.add_argument("--index_list", nargs='+', required=True,
help="The index list e.g. 2017-39. Check http://index.commoncrawl.org/")
parser.add_argument("--output_path", required=True, help="")
args = parser.parse_args()
main(args.domain, args.index_list, args.output_path)
|
import numpy as np
import pandas as pd
# documentation
# output: clean data set, remove missing values and convert categorical values to binary, extract sensitive features
# for each data set 'name.csv' we create a function clean_name
# clean name takes parameter num_sens, which is the number of sensitive attributes to include
# clean_name returns pandas data frames X, X_prime, where:
# X is the full data set of X values
# X_prime is only the sensitive columns of X
# y are the binary outcomes
def l2_norm(col):
return np.sqrt(np.sum(np.power(col, 2)))
def center(X):
for col in X.columns:
X.loc[:, col] = X.loc[:, col]-np.mean(X.loc[:, col])
return X
def standardize(X):
for col in X.columns:
X.loc[:, col] = X.loc[:, col]/np.sqrt(np.var(X.loc[:, col]))
return X
def max_row_norm(X):
return np.max([l2_norm(row) for index, row in X.iterrows()])
def normalize_rows(X):
max_norm = max_row_norm(X)
return X/max_norm
def add_intercept(X):
"""Add all 1's column to predictor matrix"""
X['intercept'] = [1]*X.shape[0]
return X
def one_hot_code(df1, sens_dict):
cols = df1.columns
for c in cols:
if isinstance(df1[c][0], str):
column = df1[c]
df1 = df1.drop(c, 1)
unique_values = list(sorted(set(column)))
n = len(unique_values)
if n > 2:
for i in range(n):
col_name = '{}.{}'.format(c, i)
col_i = [1 if el == unique_values[i] else 0 for el in column]
df1[col_name] = col_i
sens_dict[col_name] = sens_dict[c]
del sens_dict[c]
else:
col_name = c
col = [1 if el == unique_values[0] else 0 for el in column]
df1[col_name] = col
return df1, sens_dict
def clean_mnist(d =16, scale_and_center=True, intercept=False, normalize=True, samprate = 1):
X = pd.read_csv('dataset/mnist_data/n=10000_d={}/X_test.csv'.format(d))
X = X.sample(frac=samprate)
sampled_indices = X.index
X = X.reset_index(drop = True)
y = pd.read_csv('dataset/mnist_data/n=10000_d={}/y_test.csv'.format(d))
y = y.iloc[sampled_indices,:]
y = pd.Series(2*y.iloc[:,0] - 1)
y = y.reset_index(drop=True)
if scale_and_center:
X = center(X)
X = standardize(X)
if intercept:
X = add_intercept(X)
if normalize:
X = normalize_rows(X)
return X, y
# center data frame columns for visual purposes
def clean_communities(scale_and_center=True, intercept=True, normalize=True):
"""Clean communities & crime data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/communities.csv')
df = df.fillna(0)
y = df['ViolentCrimesPerPop']
q_y = np.percentile(y, 70)
# convert y's to binary predictions on whether the neighborhood is
# especially violent
y = [np.sign(s - q_y) for s in y]
# hot code categorical variables
sens_df = pd.read_csv('dataset/communities_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, _ = one_hot_code(df, sens_dict)
X = df.iloc[:, 0:122]
if scale_and_center:
X = center(X)
X = standardize(X)
if intercept:
X = add_intercept(X)
if normalize:
X = normalize_rows(X)
return X, pd.Series(y)
# num_sens in 1:17
def clean_lawschool(scale_and_center=True, intercept=True, normalize=True):
"""Clean law school data set."""
# Data Cleaning and Import
df = pd.read_csv('dataset/lawschool.csv')
df = df.dropna()
# convert categorical column variables to 0,1
df['gender'] = df['gender'].map({'female': 1, 'male': 0})
# remove y from df
df_y = df['bar1']
df = df.drop('bar1', 1)
y = [2*int(a == 'P')-1 for a in df_y]
y = pd.Series(y)
sens_df = pd.read_csv('dataset/lawschool_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
# one hot coding of race variable
for i in range(1, 9):
col_name = 'race{}'.format(i)
if 'race' in sens_cols:
sens_dict[col_name] = 1
else:
sens_dict[col_name] = 0
race_code = [np.int(r == i) for r in df['race']]
df[col_name] = race_code
sens_dict['race'] = 0
df = df.drop('race', 1)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names]
df.index = range(len(df))
x_prime.index = range(len(x_prime))
X = df
if scale_and_center:
X = center(X)
X = standardize(X)
if intercept:
X = add_intercept(X)
if normalize:
X = normalize_rows(X)
return X, pd.Series(y)
def clean_synthetic(num_sens):
"""Clean synthetic data set, all features sensitive, y value is last col."""
df = pd.read_csv('dataset/synthetic.csv')
df = df.dropna()
y_col = df.shape[1]-1
y = df.iloc[:, y_col]
df = df.iloc[:, 0:y_col]
x_prime = df.iloc[:, 0:num_sens]
return df, x_prime, y
def clean_adult_full(scale_and_center=True, intercept=True, normalize=True, samprate = 1.0):
df = pd.read_csv('dataset/adult_full.csv') #full adult data
df = df.sample(frac=samprate, random_state=0).reset_index(drop=True) #subsample
df = df.dropna()
# binarize and remove y value
df['income'] = df['income'].map({'<=50K': -1, '>50K': 1})
y = df['income']
df = df.drop('income', 1)
# hot code categorical variables
sens_df = pd.read_csv('dataset/adult_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
X, sens_dict = one_hot_code(df, sens_dict)
if scale_and_center:
X = center(X)
X = standardize(X)
if intercept:
X = add_intercept(X)
if normalize:
X = normalize_rows(X)
return X, pd.Series(y)
def clean_adult(scale_and_center=True, intercept=True, normalize=True):
df = pd.read_csv('dataset/adult.csv')
df = df.dropna()
# binarize and remove y value
df['income'] = df['income'].map({' <=50K': -1, ' >50K': 1})
y = df['income']
df = df.drop('income', 1)
# hot code categorical variables
sens_df = pd.read_csv('dataset/adult_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
X, sens_dict = one_hot_code(df, sens_dict)
if scale_and_center:
X = center(X)
X = standardize(X)
if intercept:
X = add_intercept(X)
if normalize:
X = normalize_rows(X)
return X, pd.Series(y)
def clean_adultshort():
df = pd.read_csv('dataset/adultshort.csv')
df = df.dropna()
# binarize and remove y value
df['income'] = df['income'].map({' <=50K': 0, ' >50K': 1})
y = df['income']
df = df.drop('income', 1)
# hot code categorical variables
sens_df = pd.read_csv('dataset/adultshort_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names]
x = center(df)
x_prime = center(x_prime)
return x, x_prime, y
# currently 6 sensitive attributes
def clean_student():
df = pd.read_csv('dataset/student-mat.csv', sep=';')
df = df.dropna()
y = df['G3']
y = [0 if y < 11 else 1 for y in y]
df = df.drop(['G3', 'G2', 'G1'], 1)
sens_df = pd.read_csv('dataset/student_protected.csv')
sens_cols = [str(c) for c in sens_df.columns if sens_df[c][0] == 1]
print('sensitive features: {}'.format(sens_cols))
sens_dict = {c: 1 if c in sens_cols else 0 for c in df.columns}
df, sens_dict = one_hot_code(df, sens_dict)
sens_names = [key for key in sens_dict.keys() if sens_dict[key] == 1]
print('there are {} sensitive features including derivative features'.format(len(sens_names)))
x_prime = df[sens_names]
return df, x_prime, pd.Series(y)
|
from django.conf.urls import patterns, include, url
from yasana import urls as yasana_urls
from account import urls as account_urls
from api import urls as api_urls
from partials import urls as partial_urls
urlpatterns = patterns('',
url(r'^', include(yasana_urls, namespace='yasana')),
url(r'^account/', include(account_urls, namespace='account')),
url(r'^api/', include(api_urls, namespace='api')),
url(r'^partials/', include(partial_urls, namespace='partials')),
)
|
import unittest
def fun(x):
return "test"
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(fun(1), "test")
|
import torch
import dgl
from model import GraphTransformerNet
import torch.nn.functional as F
import torch.nn as nn
from data import GraphDataset, collate
from torch.utils.data import DataLoader
import datetime
# device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
def main(train_dataloader, validate_dataloader, model, optimizer, loss_fn, epochs, batch_size):
model = model.to(device)
model.train()
size = len(train_dataloader.dataset)
for epoch in range(epochs):
print(f"Epoch {epoch + 1}\n-------------------------------")
for batch_id, (graphs, labels) in enumerate(train_dataloader):
if torch.any(torch.isnan(graphs.ndata['h'])) \
or torch.any(torch.isnan(graphs.edata['h'])) \
or torch.any(torch.isnan(labels)):
continue
graphs = graphs.to(device)
labels = labels.to(device)
pred = model(graphs, graphs.ndata['h'], graphs.edata['h'])
loss = loss_fn(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_id % 10 == 0:
loss, current = loss.item(), batch_id * batch_size
print(f"loss: {loss:>7f} [{current:>5d}/{size:5d}]")
torch.save(model.state_dict(), 'model_weight.pth')
def train_loop(dataloader, model, loss_fn, optimizer, epoch, epochs):
model.train()
size = len(dataloader.dataset)
num_batches = len(dataloader)
train_loss = 0
for batch_id, (graphs, labels) in enumerate(dataloader):
if torch.any(torch.isnan(graphs.ndata['h'])) \
or torch.any(torch.isnan(graphs.edata['h'])) \
or torch.any(torch.isnan(labels)):
continue
graphs = graphs.to(device)
labels = labels.to(device)
pred = model(graphs, graphs.ndata['h'], graphs.edata['h'])
loss = loss_fn(pred, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.detach().item()
if batch_id % 5 == 0:
loss, current = loss.item(), batch_id * graphs.batch_size
print(f"loss: {loss:>7f} [Epoch {epoch + 1:>3d}/{epochs:<3d}] [{current:>5d}/{size:<5d}]")
return train_loss / num_batches
def validate_loop(dataloader, model, loss_fn, epoch, epochs):
model.eval()
num_batches = len(dataloader)
val_loss = 0
with torch.no_grad():
for graphs, labels in dataloader:
if torch.any(torch.isnan(graphs.ndata['h'])) \
or torch.any(torch.isnan(graphs.edata['h'])) \
or torch.any(torch.isnan(labels)):
continue
graphs = graphs.to(device)
labels = labels.to(device)
pred = model(graphs, graphs.ndata['h'], graphs.edata['h'])
val_loss += loss_fn(pred, labels).item()
val_loss /= num_batches
print(f"val_loss: {val_loss:>8f} [Epoch {epoch + 1:>3d}/{epochs:<3d}] \n")
return val_loss
if __name__ == '__main__':
dataset = GraphDataset('traindata')
train_size = int(0.8 * len(dataset))
train_set, validate_set = torch.utils.data.random_split(dataset, [train_size, len(dataset) - train_size])
print("train: {}\nvalida: {}".format(len(train_set), len(validate_set)))
batch_size = 4
train_dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True, collate_fn=collate)
validate_dataloader = DataLoader(validate_set, batch_size=batch_size, shuffle=True, collate_fn=collate)
model = GraphTransformerNet(46, 12, 128, 38, 6).to(device)
learning_rate = 0.001
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
# scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=1, verbose=True)
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, 0.9)
criterion = nn.CrossEntropyLoss()
epochs = 2
dt = datetime.datetime.now().strftime('%m%d_%H%M')
log_loss = open("loss_{}.txt".format(dt), 'w')
log_loss.write('train_loss\tval_loss\n')
for epoch in range(epochs):
train_loss = train_loop(train_dataloader, model, criterion, optimizer, epoch, epochs)
val_loss = validate_loop(validate_dataloader, model, criterion, epoch, epochs)
scheduler.step()
log_loss.write("{:<7.5f}\t{:<7.5f}\n".format(train_loss, val_loss))
torch.save(model.state_dict(), 'models/model_{}.pth'.format(dt))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import unittest
import json
class PasswordWithJsonTestCase(unittest.TestCase):
data_file_path = './user_data.json'
def setUp(self):
print('set up')
self.test_data = json.loads(open(self.data_file_path).read())
@unittest.skip('test_weak_password')
def test_weak_password(self):
for data in self.test_data:
passwd = data['password']
self.assertTrue(len(passwd) >= 6)
msg = "user %s has a weak password" % (data['name'])
self.assertTrue(passwd != 'password', msg)
self.assertTrue(passwd != 'password123', msg)
def test_dummy(self):
pass
if __name__ == '__main__':
unittest.main()
|
from main.models import Contact
from django.shortcuts import render,redirect
from datetime import datetime
from main.models import Contact,Product,Order
from django.contrib import messages
from django.contrib.auth.models import User
from django.contrib.auth import authenticate,login
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def cartItems(cart):
items=[]
for item in cart:
items.append(Product.objects.get(id=item))
return items
def priceCart(cart):
cart_items=cartItems(cart)
price=0
for item in cart_items:
price += item.price
return price
def index(request):
if 'cart' not in request.session:
request.session['cart']=[]
cart=request.session['cart']
request.session.set_expiry(0)
store_items=Product.objects.all()
context={'store_items':store_items,'cart_size':len(cart)}
if request.method=="POST":
cart.append(int(request.POST['obj_id']))
messages.success(request, 'Item added to Cart!')
return redirect('home')
return render(request,"index.html",context)
def cart(request):
cart=request.session['cart']
request.session.set_expiry(0)
ctx={'cart':cart,'cart_size':len(cart),'cart_items':cartItems(cart),'total_price':priceCart(cart)}
return render(request,"cart.html",ctx)
def removefromcart(request):
request.session.set_expiry(0)
obj_to_remove = (request.POST['obj_id'])
s=obj_to_remove.replace('/','')
obj_index = request.session['cart'].index(int(s))
request.session['cart'].pop(obj_index)
return redirect('cart')
def about(request):
return render(request,"about.html")
def contact(request):
if request.method == "POST":
name=request.POST.get('name')
email=request.POST.get('email')
phone=request.POST.get('phone')
desc=request.POST.get('desc')
contact=Contact(name=name,email=email,phone=phone,desc=desc,date=datetime.today())
contact.save()
messages.success(request, 'Your message has been sent!')
return render(request,"contact.html")
def checkout(request):
cart = request.session['cart']
request.session.set_expiry(0)
ctx = {'cart':cart, 'cart_size':len(cart), 'cart_items':cartItems(cart), 'total_price': priceCart(cart)}
return render(request, "checkout.html", ctx)
def signup(request):
form=UserCreationForm()
if request.method=="POST":
form=UserCreationForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Congratulations you are Registered. Login Again !')
return redirect("/cart/checkout")
context={'form':form}
return render(request,"signup.html",context)
def loginUser(request):
if request.method=="POST":
username=request.POST.get('username')
password=request.POST.get('password')
#check credentials
User = authenticate(request,username=username, password=password)
if User is not None:
# A backend authenticated the credentials
login(request,User)
# cart = request.session['cart']
# request.session.set_expiry(0)
# context = {'cart':cart, 'cart_size':len(cart), 'cart_items':cartItems(cart), 'total_price': priceCart(cart)}
return redirect('/cart/checkout/login')
else:
# No backend authenticated the credentials
messages.warning(request, 'Invalid Credentials!!')
return render(request,"login.html")
return render(request,"login.html")
def genItemsList(cart):
cart_items = cartItems(cart)
items_list = ""
for item in cart_items:
items_list += ","
items_list += item.name
return items_list
def placeOrder(request):
cart = request.session['cart']
request.session.set_expiry(0)
ctx = {'cart':cart, 'cart_size':len(cart), 'cart_items':cartItems(cart), 'total_price': priceCart(cart)}
if request.method=="POST":
order = Order()
order.items = genItemsList(cart)
order.first_name = request.POST.get('first_name')
order.last_name = request.POST.get('last_name')
order.address = request.POST.get('address')
order.city = request.POST.get('city')
order.payment_data = request.POST.get('payment_data')
order.payment_method = request.POST.get('payment')
order.save()
request.session['cart'] = []
return render(request, "place_order.html")
def search_product(request):
if request.method=="POST":
searched=request.POST['searched']
products=Product.objects.filter(name__icontains=searched)
return render(request,"search_product.html",{'searched':searched,'products':products})
else:
return render(request,"search_product.html")
|
__all__ = ["UploadStrategy", "FileUploadStrategy", "UserInputUploadStrategy"]
from rest_api.views.strategies.upload import UploadStrategy, FileUploadStrategy, UserInputUploadStrategy
|
#!/usr/bin/env python
# encoding:UTF-8
import unittest
from app.student.homework.object_page.homework_page import Homework
from app.student.login.object_page.home_page import HomePage
from app.student.login.object_page.login_page import LoginPage
from app.student.login.test_data.login_failed_toast import VALID_LOGIN_TOAST
from app.student.vanclass.object_page.vanclass_page import VanclassPage
from app.student.vanclass.test_data.vanclass_data import GetVariable as gv
from app.student.vanclass.object_page.vanclass_detail_page import VanclassDetailPage
from conf.decorator import setup, teardown, testcase, teststeps
from utils.reset_phone_findtoast import verify_find
from utils.toast_find import Toast
class Vanclass(unittest.TestCase):
"""班级"""
@classmethod
@setup
def setUp(cls):
"""启动应用"""
cls.login = LoginPage()
cls.home = HomePage()
cls.detail = VanclassDetailPage()
cls.van = VanclassPage()
cls.homework = Homework()
@classmethod
@teardown
def tearDown(cls):
pass
@testcase
def test_quit_vanclass(self):
self.login.app_status() # 判断APP当前状态
if self.home.wait_check_page(): # 页面检查点
self.home.click_test_vanclass() # 班级tab
if self.van.wait_check_page(): # 页面检查点
van = self.van.vanclass_name() # 班级名称
for i in range(len(van)):
if van[i].text == gv.DEL_VANCLASS:
van[i].click() # 进入班级详情页
break
self.quit_tips_operate() # 退出班级提示框
if self.detail.wait_check_page(gv.DEL_VANCLASS): # 页面检查点
self.home.back_up_button() # 返回 班级
if self.van.wait_check_page(): # 班级 页面检查点
self.home.click_tab_hw() # 返回主界面
else:
Toast().find_toast(VALID_LOGIN_TOAST.login_failed())
print("未进入主界面")
@teststeps
def quit_tips_operate(self):
if self.van.wait_check_vanclass_page(gv.DEL_VANCLASS): # 页面检查点
self.van.quit_vanclass() # 退出班级 按钮
if self.detail.wait_check_tips_page(): # tips弹框 检查点
self.detail.tips_title()
self.detail.tips_content()
self.detail.cancel_button().click() # 取消按钮
print('取消 退出')
print('------------------------------')
if self.van.wait_check_vanclass_page(gv.DEL_VANCLASS): # 页面检查点
self.van.quit_vanclass() # 退出班级 按钮
self.detail.tips_operate() # 提示框
print('确定 退出')
self.phone_judge() # 验证手机号
@teststeps
def phone_judge(self):
"""验证手机号"""
if self.van.wait_check_quit_page():
self.van.phone_name() # 提示
value = verify_find(gv.PHONE, 'quitClass') # 获取验证码
self.van.code_input().send_keys(value) # 输入验证码
self.van.quit_button() # 退出班级 按钮
|
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
# for armchair junctiong
# declare parameters
# dimension default: energy is eV, length is 10^-10
me = 0.51099895000*(10**6) / (299792458**2) # 전자의 질량 MeV/c**2
hbar = 6.582119569 * 10**(-16)
K0 = 2.35*10**(-9)
vy = 5.6*10**5
m = 1.42*me
mx = 0.15*me
my = 1.18*me
v0 = vy # hbar*K0 / mx
eg1 = 0.3 # bandgap energy
eg2 = 0.3
potential = 1
energy = 0.5
D = 200*10**(-9) # potential depth D. set 200nm.
V0 = 25*10**(-3) # 25meV
E0 = 10*10**(-3) # 10meV
kxMax = np.sqrt((energy - eg1/2)*2*m/(hbar**2))
kstep = 10000
# kx = np.linspace(kxMax, -1, kstep) # np.linspace(0, 1, kstep)
# ky = (1/(hbar*vy)) * np.sqrt(energy**2 -
# (eg1/2 + ((hbar*kx)**2)/(2*m))**2) # np.linspace(0, 1, kstep)
#qx = kx
# qy = -(1/(hbar*vy))*np.sqrt((energy-potential)**2 -
# (eg2/2 + (((hbar*kx)**2)/(2*m)))**2) # np.linspace(0, 1, kstep)
inciAngle = []
results = []
lamb = 1 # upper band is +1
lambdot = -1 # lower band is -1
vx = 1
vy = 1
# vy/vx = sqrt(mx/my)
gamma = np.sqrt(mx/my)
alpha = np.arctan(1/gamma) # potential 장벽이 얼마나 기울어졌느냐. 변수 취급해야하지만 일단 편의상 0으로.
# phiV = np.arctan((gamma**2)*ky/kx) # 하지만 kx, ky 변화시키지 않고 그냥 phiV 만 변화시킬거임.
phiStep = 1000
phiV = np.linspace(-np.pi/2, np.pi/2, phiStep)
kx = (2*E0*gamma/(lamb*hbar*v0))*np.sqrt(1/(gamma**2 + (np.tan(phiV))**2))
ky = (1/gamma)*np.sqrt(((2*E0)/(lamb*hbar*v0))**2-kx**2)
phiK = np.arctan(ky/kx)
k = np.sqrt(kx**2 + ky**2)
phiKdot = phiK - alpha
phiKRdot = np.arctan((kx*np.sin(alpha) + ky*np.cos(alpha)) /
(-kx*np.cos(alpha) + ky*np.sin(alpha)))
kxdot = k*np.cos(phiKdot)
kydot = k*np.sin(phiKdot)
phiS = np.arctan((1/gamma)*np.tan(phiV))
# alpah 값이 달라지면 둘이 다르지만, 일단 alpha 가 0인 상태로 테스트만.
phiSR = np.arctan((gamma)*np.tan(phiKRdot))
# thetaV는? theta 가 포텐셜 장벽 내부의 phi다.
qy = ky
qx = np.sqrt((2*(V0-E0)/(hbar*v0))**2 - (gamma*qy)**2)
qyr = qy
qxr = -qx
q = np.sqrt(qx**2 + qy**2)
thetaK = np.arctan(qy/qx)
thetaKdot = thetaK - alpha
qxdot = q*np.cos(thetaKdot)
qydot = q*np.sin(thetaKdot)
qxrdot = qxr*np.cos(alpha) + qyr*np.sin(alpha)
thetaKRdot = np.arctan((qx*np.sin(alpha) + qy*np.cos(alpha)) /
(-qx*np.cos(alpha) + qy*np.sin(alpha)))
thetaV = np.arctan((gamma**2)*qy/qx) # qy와 qx 를 kx, ky 로 표현하자.
thetaS = np.arctan((1/gamma)*np.tan(thetaV))
thetaSR = np.arctan((gamma)*np.tan(thetaKRdot))
A = np.exp(-1j*qxdot*D)*(np.exp(1j*thetaS + 1j*thetaSR) + np.exp(1j*phiS + 1j*phiSR) -
lamb*lambdot*np.exp(1j*thetaSR + 1j*phiSR) - lamb*lambdot*np.exp(1j*thetaS + 1j*phiS)) - np.exp(-1j*qxrdot*D)*(np.exp(1j*thetaS+1j*thetaSR) + np.exp(1j*phiS+1j*phiSR) - lamb*lambdot*np.exp(1j*thetaSR+1j*phiS) - lamb*lambdot*np.exp(1j*thetaS + 1j*phiSR))
t = (lamb*lambdot*np.exp(-1j*kxdot*D)*(np.exp(1j*thetaSR) -
np.exp(1j*thetaS))*(np.exp(1j*phiS)-np.exp(1j*phiSR)))/A
T = np.absolute(t)**2 # np.absolute(t)**2 # t * np.conjugate(t)
# alpha 값에 의해 kx ky 값이 좀 바뀌고, V와 D를 특정값으로 한뒤 phiV 에 따른 변화를 plot 하기.
#thetaK = np.angle(complex((eg1/2 + hbar**2 * kx**2), (hbar * vy * ky)))
#thetaQ = np.angle(complex((eg2/2 + hbar**2 * qx**2), (hbar * vy * qy)))
# transmission = - ((2*np.sin(thetaK) * np.sin(thetaQ)) /
# (1+np.cos(thetaK + thetaQ)))
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(phiV, T)
plt.show()
# for b in range(0, kstep):
#
# thetaK = np.angle(
# complex(((eg2-eg1)/2 + ((hbar * kx[b])**2)/(2*m)), (hbar * vy * ky[b])))
# # print(thetaK)
# inciAngle.append(np.angle(
# complex(((eg2-eg1)/2 + ((hbar * kx[b])**2)/(2*m)), (hbar * vy * ky[b])), deg=True))
# thetaQ = np.angle(
# complex(((eg2-eg1)/2 + ((hbar * qx[b])**2)/(2*m)), (hbar * vy * qy[b])))
# if b == 0:
# print(kx[b])
# print(np.angle(
# complex((eg1/2 + hbar**2 * kx[b]**2), (hbar * vy * ky[b])), deg=True))
# transmission = - ((2*np.sin(thetaK) * np.sin(thetaQ)) /
# (1+np.cos(thetaK + thetaQ)))
# results.append(copy(transmission))
# print(results)
# plot
#plt.plot(inciAngle, results)
# plt.show()
|
# A simple lambda function that check if a port on a host is open and pushes a cloudwatch metric
import json
import boto3
import socket
def lambda_handler(event, context):
# Check if port is open or not (try to connect to a port on a server - timeout set to 1 sec)
ip = event['ip']
port = event['port']
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
result = sock.connect_ex((ip, port))
sock.close()
if (0 == result):
portOpen = 1
else:
portOpen = 0
except:
portOpen = 0
print("portOpen = " + str(portOpen))
# You can force value for testing purpose
portOpen = 1
print("forcing value to: " + str(portOpen))
# Add value to a cloudwatch metric
cloudwatch = boto3.client('cloudwatch')
response = cloudwatch.put_metric_data(
MetricData = [
{
'MetricName': 'FsxServerStatus',
'Dimensions': [
{
'Name': 'FSX_SERVER_TYPE',
'Value': 'Primary'
},
{
'Name': 'FSX_SERVER_HOST',
'Value': event['host']
}
],
'Unit': 'None',
'Value': portOpen
},
],
Namespace='FSX'
)
return {
'statusCode': 200,
'body': json.dumps(response)
}
|
""" kalman_filter_dependent_fusion
Uses the test and using taking the dependence into account. Follows Bar-Shaloms formulas for doing so.
"""
import numpy as np
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.models.transition.linear import CombinedLinearGaussianTransitionModel, ConstantVelocity
from stonesoup.predictor.kalman import KalmanPredictor
from stonesoup.types.state import GaussianState
from stonesoup.updater.kalman import KalmanUpdater
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.track import Track
from data_fusion import track_to_track_fusion
from trackers.calc_cross_cov_estimate_error import calc_cross_cov_estimate_error
class kalman_filter_dependent_fusion:
"""
todo
"""
def __init__(self, measurements_radar, measurements_ais, start_time, prior: GaussianState,
sigma_process_radar=0.01, sigma_process_ais=0.01, sigma_meas_radar=3, sigma_meas_ais=1):
"""
:param measurements_radar:
:param measurements_ais:
:param start_time:
:param prior:
:param sigma_process_radar:
:param sigma_process_ais:
:param sigma_meas_radar:
:param sigma_meas_ais:
"""
self.start_time = start_time
self.measurements_radar = measurements_radar
self.measurements_ais = measurements_ais
# same transition models (radar uses same as original)
self.transition_model_radar = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(sigma_process_radar), ConstantVelocity(sigma_process_radar)])
self.transition_model_ais = CombinedLinearGaussianTransitionModel(
[ConstantVelocity(sigma_process_ais), ConstantVelocity(sigma_process_ais)])
# same measurement models as used when generating the measurements
# Specify measurement model for radar
self.measurement_model_radar = LinearGaussian(
ndim_state=4, # number of state dimensions
mapping=(0, 2), # mapping measurement vector index to state index
noise_covar=np.array([[sigma_meas_radar, 0], # covariance matrix for Gaussian PDF
[0, sigma_meas_radar]])
)
# Specify measurement model for AIS
self.measurement_model_ais = LinearGaussian(
ndim_state=4,
mapping=(0, 2),
noise_covar=np.array([[sigma_meas_ais, 0],
[0, sigma_meas_ais]])
)
# specify predictors
self.predictor_radar = KalmanPredictor(self.transition_model_radar)
self.predictor_ais = KalmanPredictor(self.transition_model_ais)
# specify updaters
self.updater_radar = KalmanUpdater(self.measurement_model_radar)
self.updater_ais = KalmanUpdater(self.measurement_model_ais)
# create prior, both trackers use the same starting point
self.prior_radar = prior
self.prior_ais = prior
def track(self):
"""
todo
:return:
"""
# create list for storing kalman gains
kf_gains_radar = []
kf_gains_ais = []
# create list for storing transition_noise_covar
transition_covars_radar = []
transition_covars_ais = []
# create list for storing tranisition matrixes
transition_matrixes_radar = []
transition_matrixes_ais = []
# create list for storing tracks
tracks_radar = Track()
tracks_ais = Track()
# track
for measurement in self.measurements_radar:
prediction = self.predictor_radar.predict(self.prior_radar, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement)
# calculate the kalman gain
hypothesis.measurement_prediction = self.updater_radar.predict_measurement(hypothesis.prediction,
measurement_model=self.measurement_model_radar)
post_cov, kalman_gain = self.updater_radar._posterior_covariance(hypothesis)
kf_gains_radar.append(kalman_gain)
# get the transition model covar NOTE; same for AIS and radar. Name change not a bug
predict_over_interval = measurement.timestamp - self.prior_radar.timestamp
transition_covars_radar.append(self.transition_model_radar.covar(time_interval=predict_over_interval))
transition_matrixes_radar.append(self.transition_model_radar.matrix(time_interval=predict_over_interval))
# update
post = self.updater_radar.update(hypothesis)
tracks_radar.append(post)
self.prior_radar = post
for measurement in self.measurements_ais:
prediction = self.predictor_ais.predict(self.prior_ais, timestamp=measurement.timestamp)
hypothesis = SingleHypothesis(prediction, measurement)
# calculate the kalman gain
hypothesis.measurement_prediction = self.updater_ais.predict_measurement(hypothesis.prediction,
measurement_model=self.measurement_model_ais)
post_cov, kalman_gain = self.updater_ais._posterior_covariance(hypothesis)
kf_gains_ais.append(kalman_gain)
# get the transition model covar
predict_over_interval = measurement.timestamp - self.prior_ais.timestamp
transition_covars_ais.append(self.transition_model_ais.covar(time_interval=predict_over_interval))
transition_matrixes_ais.append(self.transition_model_ais.matrix(time_interval=predict_over_interval))
# update
post = self.updater_ais.update(hypothesis)
tracks_ais.append(post)
self.prior_ais = post
# FOR NOW: run track_to_track_association here, todo change pipeline flow
# FOR NOW: run the association only when both have a new posterior (so each time the AIS has a posterior)
# todo handle fusion when one track predicts and the other updates. (or both predicts) (Can't be done with the theory
# described in the article)
cross_cov_ij = [np.zeros([4, 4])]
cross_cov_ji = [np.zeros([4, 4])]
# TODO change flow to assume that the indexes decide whether its from the same iterations
# use indexes to loop through tracks, kf_gains etc
tracks_fused = []
# tracks_fused.append(tracks_radar[0])
for i in range(1, len(tracks_radar)):
# we assume that the indexes correlates with the timestamps. I.e. that the lists are 'synchronized'
# check to make sure
if tracks_ais[i].timestamp == tracks_radar[i].timestamp:
# calculate the cross-covariance estimation error
cross_cov_ij.append(calc_cross_cov_estimate_error(
self.measurement_model_radar.matrix(), self.measurement_model_ais.matrix(), kf_gains_radar[i],
kf_gains_ais[i],
transition_matrixes_radar[i], transition_covars_ais[i], cross_cov_ij[i - 1]
))
cross_cov_ji.append(calc_cross_cov_estimate_error(
self.measurement_model_ais.matrix(), self.measurement_model_radar.matrix(), kf_gains_ais[i],
kf_gains_radar[i],
transition_matrixes_ais[i], transition_covars_radar[i], cross_cov_ji[i - 1]
))
# test for track association
# same_target = track_to_track_association.test_association_dependent_tracks(tracks_radar[i],
# tracks_ais[i],
# cross_cov_ij[i],
# cross_cov_ji[i], 0.01)
same_target = True # ignore test for track association for now
if same_target:
fused_posterior, fused_covar = track_to_track_fusion.fuse_dependent_tracks(tracks_radar[i],
tracks_ais[i],
cross_cov_ij[i],
cross_cov_ji[i])
estimate = GaussianState(fused_posterior, fused_covar, timestamp=tracks_ais[i].timestamp)
tracks_fused.append(estimate)
return tracks_fused, tracks_ais, tracks_radar
# # plot
# fig = plt.figure(figsize=(10, 6))
# ax = fig.add_subplot(1, 1, 1)
# ax.set_xlabel("$x$")
# ax.set_ylabel("$y$")
# ax.axis('equal')
# ax.plot([state.state_vector[0] for state in ground_truth],
# [state.state_vector[2] for state in ground_truth],
# linestyle="--",
# label='Ground truth')
# ax.scatter([state.state_vector[0] for state in measurements_radar],
# [state.state_vector[1] for state in measurements_radar],
# color='b',
# label='Measurements Radar')
# ax.scatter([state.state_vector[0] for state in measurements_ais],
# [state.state_vector[1] for state in measurements_ais],
# color='r',
# label='Measurements AIS')
#
# # add ellipses to the posteriors
# for state in tracks_radar:
# w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='b')
# ax.add_artist(ellipse)
#
# for state in tracks_ais:
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='r')
# ax.add_patch(ellipse)
#
# for track_fused in tracks_fused:
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused[1] @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(track_fused[0][0], track_fused[0][2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.5,
# color='green')
# ax.add_patch(ellipse)
#
# # add ellipses to add legend todo do this less ugly
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='r',
# alpha=0.2,
# label='Posterior AIS')
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='b',
# alpha=0.2,
# label='Posterior Radar')
# ax.add_patch(ellipse)
# ellipse = Ellipse(xy=(0, 0),
# width=0,
# height=0,
# color='green',
# alpha=0.5,
# label='Posterior Fused')
# ax.add_patch(ellipse)
#
# ax.legend()
# ax.set_title("Kalman filter tracking and fusion accounting for the dependence")
# fig.show()
# # fig.savefig("../results/scenario2/KF_tracking_and_fusion_accounting_for_dependence.svg")
#
# # plot estimate for estimate
# # plot
# fig_2 = plt.figure(figsize=(10, 6))
# ax = fig_2.add_subplot(1, 1, 1)
# ax.set_xlabel("$x$")
# ax.set_ylabel("$y$")
# ax.axis('equal')
# ax.plot([state.state_vector[0] for state in ground_truth],
# [state.state_vector[2] for state in ground_truth],
# linestyle="--",
# label='Ground truth')
# # ax.scatter([state.state_vector[0] for state in measurements_radar],
# # [state.state_vector[1] for state in measurements_radar],
# # color='b',
# # label='Measurements Radar')
# # ax.scatter([state.state_vector[0] for state in measurements_ais],
# # [state.state_vector[1] for state in measurements_ais],
# # color='r',
# # label='Measurements AIS')
#
# for i in range(0, len(tracks_fused)):
# # plot measurements
# ax.scatter([measurements_radar[i + 1].state_vector[0]],
# [measurements_radar[i + 1].state_vector[1]],
# color='b',
# label='Measurements Radar')
# ax.scatter([measurements_ais[i + 1].state_vector[0]],
# [measurements_ais[i + 1].state_vector[1]],
# color='r',
# label='Measurements AIS')
#
# # plot one and one estimate
# state_radar = tracks_radar[i + 1]
# w, v = np.linalg.eig(measurement_model_radar.matrix() @ state_radar.covar @ measurement_model_radar.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state_radar.state_vector[0], state_radar.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='b')
# ax.add_artist(ellipse)
#
# state_ais = tracks_ais[i + 1]
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_ais.covar @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state_ais.state_vector[0], state_ais.state_vector[2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.2,
# color='r')
# ax.add_patch(ellipse)
#
# state_fused = tracks_fused[i]
# w, v = np.linalg.eig(measurement_model_ais.matrix() @ state_fused[1] @ measurement_model_ais.matrix().T)
# max_ind = np.argmax(w)
# min_ind = np.argmin(w)
# orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# ellipse = Ellipse(xy=(state_fused[0][0], state_fused[0][2]),
# width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# angle=np.rad2deg(orient),
# alpha=0.5,
# color='green')
# ax.add_patch(ellipse)
#
# fig_2.show()
# input("Press Enter to continue...")
#
# #
# # # add ellipses to the posteriors
# # for state in tracks_radar:
# # w, v = np.linalg.eig(measurement_model_radar.matrix() @ state.covar @ measurement_model_radar.matrix().T)
# # max_ind = np.argmax(w)
# # min_ind = np.argmin(w)
# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# # ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# # angle=np.rad2deg(orient),
# # alpha=0.2,
# # color='b')
# # ax.add_artist(ellipse)
# #
# # for state in tracks_ais:
# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ state.covar @ measurement_model_ais.matrix().T)
# # max_ind = np.argmax(w)
# # min_ind = np.argmin(w)
# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# # ellipse = Ellipse(xy=(state.state_vector[0], state.state_vector[2]),
# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# # angle=np.rad2deg(orient),
# # alpha=0.2,
# # color='r')
# # ax.add_patch(ellipse)
# #
# # for track_fused in tracks_fused:
# # w, v = np.linalg.eig(measurement_model_ais.matrix() @ track_fused[1] @ measurement_model_ais.matrix().T)
# # max_ind = np.argmax(w)
# # min_ind = np.argmin(w)
# # orient = np.arctan2(v[1, max_ind], v[0, max_ind])
# # ellipse = Ellipse(xy=(track_fused[0][0], track_fused[0][2]),
# # width=2 * np.sqrt(w[max_ind]), height=2 * np.sqrt(w[min_ind]),
# # angle=np.rad2deg(orient),
# # alpha=0.5,
# # color='green')
# # ax.add_patch(ellipse)
#
# fig_2.show()
|
"""Construct a profile with two hosts for testing owamp
Instructions:
Wait for the profile to start. . .
"""
# Boiler plate
import geni.portal as portal
import geni.rspec.pg as rspec
request = portal.context.makeRequestRSpec()
# Get nodes
host = request.RawPC("host")
target = request.RawPC("target")
# Force hardware type for consistency
host.hardware_type = "m510"
target.hardware_type = "m510"
link1 = request.Link(members = [host, target])
# Set scripts from repo
# node1.addService(rspec.Execute(shell="sh", command="/local/repository/initDocker.sh"))
# Boiler plate
portal.context.printRequestRSpec()
|
# a=[1,2,8,7,6,3,4,5]
# i=0
# b=[]
# while i<len(a):
# if i%2==0:
# pass
# else:
# b.append(a[i])
# i=i+1
# i=0
# while i <len(b):
# j=i
# var=0
# while j<len(b):
# if b[j]>b[i]:
# var=b[i]
# b[j]=b[i]
# b[j]=var
# i=i+1
# x=0
# while x<len(a):
# if a[x]%2==0:
# c=a.index (a[x])
# b.insert(c,a[x])
# print(b)
s=[2,9,1,8,5,11,3,4,7]
i=0
b=[]
for i in range(len(s)):
if s[i]%2==0:
pass
else:
b.append(s[i])
i=0
for i in range(len(b)):
j=i
temp=0
for j in range(len(b)):
if b[j]>b[i]:
temp=b[i]
b[i]=b[j]
b[j]=temp
i=0
for i in range(len(s)):
if s[i]%2==0:
a=s.index(s[i])
b.insert(a,s[i])
print(b)
|
# KEEP THIS SECRET
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_TOKEN_SECRET = ''
|
from bs4 import BeautifulSoup
import urllib.request
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "app.settings.dev")
from django.db import models
from person.models import Person
# class Person:
# def __init__(self, fullname, firstname, lastname, location):
# self.fullname = fullname
# self.firstname = firstname
# self.lastname = lastname
# self.location = location
with urllib.request.urlopen('https://nodefeeds.seas.harvard.edu/app/api/person/list/all') as f:
data = f.read().decode('utf-8')
soup = BeautifulSoup(data, 'xml')
people = soup.find_all('person')
# people_list = []
for person in people:
if len(person.location.fordisp) > 0:
if person.eppn:
print(person.eppn)
else:
print("nope")
# obj, created = Person.objects.update_or_create(
# eppn=person.eppn,
# defaults={
# 'firstname': person.givenname.text,
# 'lastname': person.lastname.text,
# 'fullname': person.gecos.text,
# 'location': person.location.fordisp.text,
# }
)
print(created)
|
"""
A MNIST classifier using batch normalization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tempfile
import tensorflow as tf
import tensorflow.contrib.layers as layers
from tensorflow.examples.tutorials.mnist import input_data
FLAGS = None
def dense(x, size, scope):
with tf.variable_scope(scope):
return layers.fully_connected(x, size, activation_fn=None,
scope='fc')
def dense_batch_relu(x, size, is_training, scope):
with tf.variable_scope(scope):
fc = layers.fully_connected(x, size, activation_fn=None,
scope='fc')
bn = layers.batch_norm(fc, center=True, scale=True,
is_training=is_training, scope='bn')
return tf.nn.relu(bn, name='relu')
def build_model(x, is_training):
h1 = dense_batch_relu(x, 256, is_training,'layer1')
h2 = dense_batch_relu(h1, 256, is_training, 'layer2')
logits = dense(h2, 10, 'logits')
return logits
def main(_):
print('Loading dataset')
mnist = input_data.read_data_sets(FLAGS.data_dir)
print('%d train images' % mnist.train.num_examples)
print('%d test images' % mnist.test.num_examples)
batch_size = 128
max_steps = 10000
x = tf.placeholder(tf.float32, [None, 784])
y = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder(tf.bool, name='is_training')
logits = build_model(x, is_training)
with tf.name_scope('accuracy'):
correct_prediction = tf.cast(tf.equal(tf.argmax(logits, 1), y), tf.float32)
accuracy = tf.reduce_mean(correct_prediction)
with tf.name_scope('loss'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy(labels=y, logits=logits)
loss = tf.reduce_mean(cross_entropy)
# The batch_norm layer keeps track of input statistics in variables `moving_mean`
# and `moving_variance`. This variables must be updates before every train step.
# Unfortunately, the update_ops are not parents of train_op in the computational
# graph. Therefore we must define the dependency explicitly. By default the
# update_ops are placed in tf.GraphKeys.UPDATE_OPS.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = tf.train.AdamOptimizer(1e-4).minimize(loss)
#train_op = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(max_steps):
x_train, y_train = mnist.train.next_batch(batch_size)
feed_dict = { x: x_train, y: y_train, is_training: True }
sess.run(train_op, feed_dict=feed_dict)
if i % 100 == 0 and i > 0:
feed_dict = { x: mnist.test.images, y: mnist.test.labels, is_training: False }
eval_loss, eval_acc = sess.run([loss, accuracy], feed_dict=feed_dict)
print('step %d: eval_loss=%f eval_acc=%f ' % (i, eval_loss, eval_acc))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str,
default='/tmp/tensorflow/mnist/input_data',
help='Directory for storing input data')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
def main():
num = int(input("Digite o número: "))
result = verifica_numero_primo(num)
print(result)
def verifica_numero_primo(num):
if num>=0 and num<=100:
if num % num == 0 and num % 1 == 0 and not num % 2 == 0:
return "é primo"
elif num == 2:
return "é primo"
else:
return "Não é primo"
else:
return "Número incompatível"
main()
|
from numbers import Number
import math
class RegularConvexPolygon:
'''A Regular Strictly Convex Polygon'''
def __init__(self, edges:int, radius:Number)->None:
'''Polygon initializer with attributes edges and radius'''
self.edges = edges
self.radius = radius
def __validate_edges(self, edges:int)->None:
'''validates datatype and value of edges'''
if type(edges) != int:
raise TypeError(f"'edges' must be int not {edges.__class__.__name__} ")
if edges < 3:
raise ValueError(f"'edges' should be greater than or equal to 3")
def __validate_radius(self, radius:Number)->None:
'''validates datatype and value of radius'''
if not isinstance(5, Number):
raise TypeError(f"'radius' must be a number(int/float/decimal) not {radius.__class__.__name__} ")
if radius < 0:
raise ValueError(f"'edges' should be greater than or equal to 3")
def __repr__(self)->str:
'''RegularPoly representation'''
return (f'A Regular Strictly Convex Polygon of {self.edges} edges and radius {self.radius}')
def __str__(self)->str:
'''RegularPoly string representation'''
return (f'Regular Strictly Convex Polygon: edges = {self.edges}, radius = {self.radius}')
@property
def edges(self)->int:
'''edges of polygon'''
return self.__edges
@edges.setter
def edges(self, edges:int)->None:
'''set edges of polygon'''
self.__validate_edges(edges)
self.__edges = edges
@property
def radius(self)->Number:
'''circumradius of polygon'''
return self.__radius
@radius.setter
def radius(self, radius:Number)->None:
'''set circumradius of polygon'''
self.__validate_radius(radius)
self.__radius = radius
@property
def interior_angle(self)->float:
'''calculate interior angle of polygon'''
return (self.__edges - 2)*(180/self.__edges)
@property
def edge_length(self)->float:
'''calculate edge length of polygon'''
return (2 * self.__radius) * math.sin(math.pi/self.__edges)
@property
def apothem(self)->float:
'''calculate apothem of polygon'''
return self.__radius * math.cos(math.pi/self.__edges)
@property
def area(self)->float:
'''calculate area of polygon'''
return (self.__edges * self.apothem*self.edge_length) / 2
@property
def perimeter(self)->float:
'''calculate perimeter of polygon'''
return self.__edges * self.edge_length
def __validate_polygon(self, other):
'''validates data type of other object'''
if not isinstance(other, RegularConvexPolygon):
raise TypeError("RegularConvexPolygon cannot be compared with {other.__class__.__name__}")
def __eq__(self, other:'RegularConvexPolygon')->bool:
'''check equality of two polygons'''
self.__validate_polygon(other)
return (self.__edges==other.__edges and self.__radius == other.__radius)
def __gt__(self, other:'RegularConvexPolygon')->bool:
'''check no of edges of current polygon is greater than no of edges of other polygons'''
self.__validate_polygon(other)
return self.__edges > other.__edges
def __ge__(self, other:'RegularConvexPolygon')->bool:
'''check no of edges of current polygon is greater than or equal to no of edges of other polygons'''
self.__validate_polygon(other)
return self.__edges >= other.__edges
def __lt__(self, other:'RegularConvexPolygon')->bool:
'''check no of edges of current polygon is less than no of edges of other polygons'''
self.__validate_polygon(other)
return self.__edges < other.__edges
def __le__(self, other:'RegularConvexPolygon')->bool:
'''check no of edges of current polygon is less than or equal to no of edges of other polygons'''
self.__validate_polygon(other)
return self.__edges <= other.__edges
|
import cv2
WINDOW_ID = '0x2800018' # さっき確認したID
video = cv2.VideoCapture(f'ximagesrc xid={WINDOW_ID} ! videoconvert ! appsink')
while cv2.waitKey(1) != 27:
ok, img = video.read()
if not ok:
print("Error brank")
break
cv2.imshow('test', img) |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
from sklearn.ensemble import RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelBinarizer, MinMaxScaler, Imputer
from basic.bupt_2017_11_28.type_deco import prt
import joblib
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from basic.bupt_2017_11_28.type_deco import prt
import seaborn as sns
from basic.bupt_2017_12_01.hou_prepro import find_num_attr, find_text_attr, find_related_num_attr, load_obj
from basic.bupt_2017_12_04.Property import Property
'''
User:waiting
Date:2017-12-13
Time:9:46
'''
ppt = Property(r'G:\idea_wirkspace\learnp\basic\bupt_2017_12_04\global.properties')
ran_fore_reg = joblib.load(ppt['model_path'])
df = pd.read_csv(ppt['te'])
df.pop('Id')
prt(load_obj(ppt['rel_attr_path']))
related_num_df = df[load_obj(ppt['rel_attr_path'])]
scaler = MinMaxScaler()
pipeline = Pipeline([('imputer',Imputer(strategy='median')),('scaler',scaler)])
ans = ran_fore_reg.predict(pipeline.fit_transform(related_num_df))
prt(ans)
with open(ppt['res_path'],'w') as f:
f.write('Id,SalePrice\n')
for i in range(len(ans)):
f.write("{},{}\n".format(1461+i,ans[i]))
|
def display_menu():
print("-" * 30)
print(" 图书馆管理系统 v8.8 ")
print("1.登录/注册")
print("2.新书上架")
print("3.热书推荐")
print("4.退出系统")
print("-" * 30)
def menu1():
print("-" * 30)
print(" 管理员入口 ")
print("1、读者管理")
print("2、图书管理")
print("3、退出系统")
print("-" * 30)
def A():
print("-" * 30)
print(" 读者管理 ")
print("1.添加读者信息")
print("2.删除读者信息")
print("3.修改读者信息")
print("4.查询单个读者信息")
print("5.查询所有读者信息")
print("6.退出系统")
print("-" * 30)
def B():
print("-" * 30)
print(" 图书管理 ")
print("1.删除图书")#删除
print("2.添加图书")#添加
print("3.查询图书信息")
print("4.查询所有图书信息")
print("6.退出系统")
print("-" * 30)
def menu2():
print("-" * 30)
print(" 读者入口 ")
print("1.借书登记")#删除
print("2.还书登记")#添加
print("3.查询图书信息")
print("4.查询所有图书信息")
print("5.退出系统")
print("-" * 30)
def get_choice():
selected_key = input("请输入选择的序号:")
return selected_key |
from sqlalchemy import Column, Integer, String, Date, ForeignKey, Float, Boolean, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Product(Base):
__tablename__ = 'products'
Product_id = Column(Integer, primary_key=True)
name = Column(String)
price = Column(Float)
Picture_link = Column(String)
Description = Column(String)
def __repr(self):
return str(self.Product_id) + self.name + str(self.price) + self.Description
class cart(Base):
__tablename__='ProductID'
Product_id = Column(Integer, primary_key=True)
|
# Copyright 2014 Baidu, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides string converting tools and compatibility on py2 vs py3
"""
import functools
import itertools
import operator
import sys
import types
PY2 = sys.version_info[0]==2
PY3 = sys.version_info[0]==3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
def convert_to_bytes(idata):
"""
convert source type idata to bytes string
:type idata: any valid python type
:param idata: source data
:return : bytes string
"""
# unicode
if isinstance(idata, str):
return idata.encode(encoding='utf-8')
# Ascii
elif isinstance(idata, bytes):
return idata
# int,dict,list
else:
return str(idata).encode(encoding='utf-8')
def convert_to_string(idata):
"""
convert source data to str string on py3
:type idata:any valid python type
:param idata:source data
:return :uniocde string on py3
"""
return convert_to_unicode(idata)
def convert_to_unicode(idata):
"""
convert source type idata to unicode string
:type idata: any valid python type
:param idata: source data
:return : unicode string
"""
# Ascii
if isinstance(idata, bytes):
return idata.decode(encoding='utf-8')
# unicode
elif isinstance(idata, str):
return idata
# int,dict,list
else:
return str(idata)
else: # py2
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
def convert_to_bytes(idata):
"""
convert source type idata to bytes string
:type idata: any valid python type
:param idata: source data
:return : bytes string
"""
if isinstance(idata, unicode):
return idata.encode(encoding='utf-8')
elif isinstance(idata, str):
return idata
# int ,long, dict, list
else:
return str(idata)
def convert_to_string(idata):
"""
convert source data to str string on py2
:type idata:any valid python type
:param idata:source data
:return :bytes string on py2
"""
return convert_to_bytes(idata)
def convert_to_unicode(idata):
"""
convert source type idata to unicode string
:type idata: any valid python type
:param idata: source data
:return : unicode string
"""
if isinstance(idata, str): #Ascii
return idata.decode(encoding='utf-8')
elif isinstance(idata, unicode):
return idata
else:
return str(idata).decode(encoding='utf-8')
|
import tensorflow as tf
class GRUCellWithBahdanauAttention(tf.contrib.rnn.GRUCell):
def call(self, inputs, state):
return inputs, state
### YOUR CODE HERE
### Make sure all variable definitions are within the scope!
raise NotImplementedError("Need to implement the GRU cell \
with Bahdanau-style attention.")
### END YOUR CODE
class GRUCellWithLuongAttention(tf.contrib.rnn.GRUCell):
def call(self, inputs, state):
return inputs, state
### YOUR CODE HERE
### Make sure all variable definitions are within the scope!
raise NotImplementedError("Need to implement the GRU cell \
with Luong-style attention.")
### END YOUR CODE
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from Public.BasePage import BasePage
from Public.maxim_monkey import Maxim
from Public.Decorator import *
from uiautomator2 import UiObjectNotFoundError
class login_page(BasePage):
@teststep
def wait_page(self):
try:
if self.d(text='ECOPROP').wait(timeout=2):
return True
else:
raise Exception('Not in LoginPage')
except Exception:
raise Exception('Not in LoginPage')
@teststep
def input_username(self, text):
log.i('输入用户名:%s'% text)
self.d(className="android.widget.EditText", instance=0) \
.set_text(text)
@teststep
def input_password(self, text):
log.i('输入密码:%s'% text)
self.d(className="android.widget.EditText", instance=1) \
.set_text(text)
@teststep
def click_login_btn(self):
log.i('点击登录按钮')
self.d(text=u"SIGN IN").click()
# def login(username, password):
# page = LoginPage()
# page.input_username(username)
# page.inputpassword(password)
# page.login_click()
|
'''
Hello my name is Ethan Dewri. This program will scramble text into a two rail cypher and give enctpted and decrypted
messages.
Scorces :
https://www.youtube.com/watch?v=qOlJwi9mu2Q
https://www.youtube.com/watch?v=uaCumJi4Iuw
'''
choice = input("You shall have the ability to encrypt or decrypt anything thou shall please. Type either 'encrypt' or 'decrypt': ")
plainText = input ("type your message here(make sure you leave no unnessary spaces): ")
def Scramble2Text(plainText):
evenChars = ''
oddChars = ''
charCount = 0
for ch in plainText:
if charCount % 2 is 0:
evenChars = evenChars + ch
else:
oddChars = oddChars + ch
charCount = charCount + 1
cipherText = oddChars + evenChars #order of it
return cipherText
def decryptMessage (cipherText):
#this will seperate the two strings
# // is interger division
halfLength = len(cipherText) // 2
oddChars = cipherText[:halfLength]
evenChars = cipherText[halfLength:]
plainText = ''
for i in range (halfLength): #will put two characters together (1 even, 1 odd)
plainText = plainText + evenChars[i]
plainText = plainText + oddChars[i]
if len(evenChars) > len(oddChars): #for uneven total of characters
plainText = plainText + evenChars [-1]
return plainText
plain = Scramble2Text(message)
print(plain)
if (choice == "encrypt"):
print(Scramble2Text(plainText))
if (choice == "decrypt"):
print(decryptMessage(plainText))
|
from ham.util import radio_types
from ham.util.data_column import DataColumn
class DmrId:
@classmethod
def create_empty(cls):
cols = dict()
cols['number'] = ''
cols['radio_id'] = ''
cols['name'] = ''
return DmrId(cols)
def __init__(self, cols):
self.number = DataColumn(fmt_name='number', fmt_val=cols['number'], shape=int)
self.number.set_alias(radio_types.D878, 'No.')
self.radio_id = DataColumn(fmt_name='radio_id', fmt_val=cols['radio_id'], shape=int)
self.radio_id.set_alias(radio_types.D878, 'Radio ID')
self.name = DataColumn(fmt_name='name', fmt_val=cols['name'], shape=str)
self.name.set_alias(radio_types.D878, 'Name')
def headers(self, style):
switch = {
radio_types.DEFAULT: self._headers_default,
radio_types.D878: self._headers_d878,
}
return switch[style]()
def output(self, style):
switch = {
radio_types.DEFAULT: self._output_default,
radio_types.D878: self._output_d878
}
return switch[style]()
def _headers_default(self):
output = list()
output.append(f"{self.number.get_alias(radio_types.DEFAULT)}")
output.append(f"{self.radio_id.get_alias(radio_types.DEFAULT)}")
output.append(f"{self.name.get_alias(radio_types.DEFAULT)}")
return output
def _output_default(self):
output = list()
output.append(f"{self.number.fmt_val()}")
output.append(f"{self.radio_id.fmt_val()}")
output.append(f"{self.name.fmt_val()}")
return output
def _headers_d878(self):
output = list()
output.append(f"{self.number.get_alias(radio_types.D878)}")
output.append(f"{self.radio_id.get_alias(radio_types.D878)}")
output.append(f"{self.name.get_alias(radio_types.D878)}")
return output
def _output_d878(self):
output = list()
output.append(f"{self.number.fmt_val()}")
output.append(f"{self.radio_id.fmt_val()}")
output.append(f"{self.name.fmt_val()}")
return output
|
class Solution(object):
def checkPossibility(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
not_working = 0
N = len(nums)
for idx in range(N-2):
if nums[idx] > nums[idx+1]:
if nums[idx] <= nums[idx+2]:
tochange= idx+1
else:
tochange = idx
if tochange !=0 and nums[tochange-1] > nums[tochange+1]:
return False
not_working +=1
if nums[N-2] > nums[N-1]:
not_working +=1
if not_working <=1:
return True
else:
return False
|
# Generated by Django 3.2.4 on 2021-07-03 17:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('nduggaapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='category',
name='name',
field=models.CharField(db_index=True, max_length=200, verbose_name='name'),
),
migrations.AlterField(
model_name='product',
name='available',
field=models.BooleanField(default=True, verbose_name='available'),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(blank=True, verbose_name='description'),
),
migrations.AlterField(
model_name='product',
name='name',
field=models.CharField(db_index=True, max_length=200, verbose_name='name'),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=10, verbose_name='price'),
),
]
|
#!/usr/bin/env python2.5
#######################################################################
#
# Copyright (c) Stoke, Inc.
# All Rights Reserved.
#
# This code is confidential and proprietary to Stoke, Inc. and may only
# be used under a license from Stoke.
#
#######################################################################
"""
DESCRIPTION : This Script contains following PreArp APIs which has
been used in thePreArp Testcases
TEST PLAN : PreArp Test plan V4.6
AUTHOR : Rajshekar; email : Rajshekar@stoke.com
REVIEWER :
DEPENDENCIES : Linux.py,device.py
"""
import sys, os
mydir = os.path.dirname(__file__)
qa_lib_dir = mydir
if qa_lib_dir not in sys.path:
sys.path.insert(1,qa_lib_dir)
import pexpect
import time
import string
import sys
import re
from logging import getLogger
# grab the root logger.
log = getLogger()
def enable_prearp(self,context):
"""
Description : API used to enable PREARP
Note : By default PreArp is enabled on SSX but you have to
execute this command if it is disabled.
Arguments : context : specify context name otherwise it will take default context name i.e local
Example : self.ssx.enable_prearp(context="123")
ReturnValue : On success return value is True else False
"""
self.cmd("end")
self.cmd("config")
self.cmd("context %s"%context)
out = self.cmd("no ip arp noprearp")
if "pre-arping is already on" in out or "unexpected" not in out :
return True
else :
return False
def disable_prearp(self,context):
"""
Description : API used to disable PREARP
Arguments : context : specify context name otherwise it will take default context name i.e local
Example : self.ssx.disable_prearp(context="123")
ReturnValue : On success return value is True else False
"""
self.cmd("end")
self.cmd("config")
self.cmd("context %s"%context)
out = self.cmd("ip arp noprearp")
if "Error" not in out :
return True
else :
return False
def verify_prearp_disabled(self,context="local") :
"""
Description : API used to verify the PREARP disabled when executed the command
Note : Here we are verifying that particular command is there in
"show config" output or not
Arguments : context : specify context name otherwise it will take default context name i.e local
Example : self.ssx.prearp_disabled(context="123")
ReturnValue : On success return value is 1 else 0
"""
out = self.cmd("sh conf cont %s | begin \"ip arp nopre\""%context)
if "ip arp noprearp" not in out or "Error" in out:
return 0
else :
return 1
|
import numpy as np
import matplotlib.pyplot as plt
positions = np.loadtxt("test-positions.txt")
plt.plot(positions[:, 0], positions[:, 1])
plt.show()
|
import boto3
from decimal import Decimal
import numpy as np
from boto3.dynamodb.conditions import Key, Attr
def get_table_metadata(table):
"""
Get some metadata about chosen table.
"""
return {
'num_items': table.item_count,
'primary_key_name': table.key_schema[0],
'status': table.table_status,
'bytes_size': table.table_size_bytes,
'global_secondary_indices': table.global_secondary_indexes
}
def scan_table(table, filtering_exp=None):
"""
Scan a table using a filter. filtering_exp (optional)
must be of the format Key(<item>)<comparison operator>(<value>)
Sample usage:
Item_list = scan_table(table, Key('runtime').lt(9))['Items']
You can also chain the filters with & (and), | (or), ~ (not), like so:
list = scan_table(table, Key('runtime').lt(9) & Key('runtime').lt(9))
"""
if filtering_exp is not None:
response = table.scan(FilterExpression=filtering_exp)
else:
response = table.scan()
return response
def query_table(table, filtering_exp=None):
"""
Very similar to scan(). You need to use a key with
query, unlike with scan.
"""
if filtering_exp is not None:
response = table.query(KeyConditionExpression=filtering_exp)
else:
response = table.query()
return response
def read_table_item(table, primary_key, pk_value, secondary_key=None, sk_value=None):
"""
Return item read by primary key.
"""
if secondary_key is not None:
response = table.get_item(Key={primary_key: pk_value,
secondary_key: sk_value})
else:
response = table.get_item(Key={primary_key: pk_value})
return response
def add_Item(table, item):
"""
Add an item to the table. You must include the
keys in the item object.
"""
response = table.put_item(Item=item)
return response
def update_Item(table, keys, itm, value):
"""
Update an item in the table. Add the keys object first, then
the item object (itm), then it's new value.
"""
table.update_item(
Key=keys,
UpdateExpression="SET #attr = :Val",
ExpressionAttributeNames={'#attr': itm},
ExpressionAttributeValues={':Val': value}
)
def update_SubAttribute(table, keys, attr, sub_attr, value):
"""
If an item's attribute has multiple sub-attributes within, you can
update these at one level deep with this function.
"""
table.update_item(
Key=keys,
UpdateExpression="SET #itm.#sub_itm = :Val",
ExpressionAttributeNames={
'#itm': attr,
'#sub_itm': sub_attr
},
ExpressionAttributeValues={
':Val': value
},
)
def delete_Item(table, primary_key, pk_value, secondary_key=None, sk_value=None):
"""
Delete a table item using its primary key.
"""
if secondary_key is not None:
table.delete_item(Key={primary_key: pk_value,
secondary_key: sk_value})
else:
table.delete_item(Key={primary_key: pk_value})
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 14:15:38 2016
@author: 3201955
"""
import random
import numpy as np
import math
import matplotlib.pyplot as plt
N = 1000
#random points in triangle ABC
def monteCarlo_triangle(A,B,C,N):
r1 = np.random.uniform(0,1,N)
r2 = np.random.uniform(0,1,N)
x=(1-np.sqrt(r1))*A[0]+(np.sqrt(r1)*(1-r2))*B[0]+(r2*np.sqrt(r1))*C[0]
y=(1-np.sqrt(r1))*A[1]+(np.sqrt(r1)*(1-r2))*B[1]+(r2*np.sqrt(r1))*C[1]
P=np.array([x,y])
return P
def monteCarlo_quadrangle(A,B,C,D,N):
P = monteCarlo_triangle(A,B,C,N)
Q = monteCarlo_triangle(A,D,C,N)
R = np.append(P,Q, axis=1)
return R
A=np.array([1,0])
B=np.array([2,0])
C=np.array([2,0.9])
D=np.array([1,0.9])
Point=monteCarlo_quadrangle(A,B,C,D,N)
plt.figure()
plt.scatter(A[0], A[1])
plt.scatter(B[0], B[1])
plt.scatter(C[0], C[1])
plt.scatter(D[0],D[0])
plt.scatter(Point[0],Point[1])
plt.show()
#case regular f(x,y) = exp(-|x-y|²)
def monteCarlo_Reg(x,y,n):
f_moy=sum(np.exp(-np.abs(x-y)**2))/n
f_moy2=sum(np.exp(-np.abs(x-y)**2)**2)
Var=np.abs(f_moy2-n*f_moy**2)/(n-1)
err = np.sqrt(Var/n)*1.96/f_moy
return err
#1/|x-y|
def monteCarlo_Sing(x,y,n):
f_moy=sum(1/np.abs(x-y))/n
f_moy2=sum(1/(x-y)**2)
Var=np.abs(f_moy2-n*f_moy**2)/(n-1)
err = np.sqrt(Var/n)*1.96/f_moy
return err
#Point = monteCarlo_quadrangle(A,B,C,D,N)
err1=np.array([])
err2=np.array([])
figure()
for n in range(100,1000,10):
err1_=0
err2_=0
for k in range(10):
Point=monteCarlo_quadrangle(A,B,C,D,n)
err1_=err1_+monteCarlo_Reg(Point[0],Point[1],n)
err2_=err2_+monteCarlo_Sing(Point[0],Point[1],n)
err1_=err1_/10
err2_=err2_/10
err1=np.append(err1,err1_)
err2=np.append(err2,err2_)
print size(err2),size(err1)
plt.plot(log(range(100,1000,10)),log(err1),"b")
plt.plot(log(range(100,1000,10)), -0.5*log(range(100,1000,10))+0.07, "r")
plt.title("Case 2D quadrangle regular $exp(-|x-y|^{2})$")
plt.xlabel("log(N)")
plt.ylabel("log(err)")
plt.show()
figure()
plt.plot(log(range(100,1000,10)),log(err2),"b")
plt.plot(log(range(100,1000,10)), -0.5*log(range(100,1000,10))+0.1, "r")
plt.title("Case 2D quadrangle singular $1/|x-y|")
plt.xlabel("log(N)")
plt.ylabel("log(err)")
plt.show()
err3=np.array([])
figure()
for n in range(500,1500):
Point=monteCarlo_quadrangle(A,B,C,D,n)
err3_=monteCarlo_Sing(Point[0],Point[1],n)
err3=np.append(err3,err3_)
figure()
plt.plot(log(range(500,1500)),log(err3),"b")
plt.plot(log(range(500,1500)), -0.5*log(range(500,1500))+0.1, "r")
plt.title("Case 2D quadrangle singular $1/|x-y|")
plt.xlabel("log(N)")
plt.ylabel("log(err)")
plt.show() |
import os
import platform
import subprocess
import re
import psutil
from time import sleep
from flask import render_template, abort, request, send_file, flash, redirect, url_for
from arm.ui import app, db
from arm.models.models import Job, Config
from arm.config.config import cfg
from arm.ui.utils import convert_log, get_info, call_omdb_api, clean_for_filename
from arm.ui.forms import TitleSearchForm, ChangeParamsForm, CustomTitleForm
from pathlib import Path
@app.route('/logreader')
def logreader():
# TODO: check if logfile exist, if not error out
logpath = cfg['LOGPATH']
mode = request.args['mode']
logfile = request.args['logfile']
# Assemble full path
fullpath = os.path.join(logpath, logfile)
# Check if the logfile exists
my_file = Path(fullpath)
if not my_file.is_file():
# file exists
# clogfile = convert_log(logfile)
return render_template('error.html')
# return send_file("templates/error.html")
if mode == "armcat":
def generate():
f = open(fullpath)
while True:
new = f.readline()
if new:
if "ARM:" in new:
yield new
else:
sleep(1)
elif mode == "full":
def generate():
with open(fullpath) as f:
while True:
yield f.read()
sleep(1)
elif mode == "download":
clogfile = convert_log(logfile)
return send_file(clogfile, as_attachment=True)
else:
# do nothing
exit()
return app.response_class(generate(), mimetype='text/plain')
@app.route('/activerips')
def rips():
return render_template('activerips.html', jobs=Job.query.filter_by(status="active"))
@app.route('/history')
def history():
if os.path.isfile(cfg['DBFILE']):
# jobs = Job.query.filter_by(status="active")
jobs = Job.query.filter_by()
else:
jobs = {}
return render_template('history.html', jobs=jobs)
@app.route('/jobdetail', methods=['GET', 'POST'])
def jobdetail():
job_id = request.args.get('job_id')
jobs = Job.query.get(job_id)
tracks = jobs.tracks.all()
return render_template('jobdetail.html', jobs=jobs, tracks=tracks)
@app.route('/titlesearch', methods=['GET', 'POST'])
def submitrip():
job_id = request.args.get('job_id')
job = Job.query.get(job_id)
form = TitleSearchForm(obj=job)
if form.validate_on_submit():
form.populate_obj(job)
flash('Search for {}, year={}'.format(form.title.data, form.year.data), category='success')
# dvd_info = call_omdb_api(form.title.data, form.year.data)
return redirect(url_for('list_titles', title=form.title.data, year=form.year.data, job_id=job_id))
# return render_template('list_titles.html', results=dvd_info, job_id=job_id)
# return redirect('/gettitle', title=form.title.data, year=form.year.data)
return render_template('titlesearch.html', title='Update Title', form=form)
@app.route('/changeparams', methods=['GET', 'POST'])
def changeparams():
config_id = request.args.get('config_id')
config = Config.query.get(config_id)
job = Job.query.get(config_id)
form = ChangeParamsForm(obj=config)
if form.validate_on_submit():
config.MINLENGTH = format(form.MINLENGTH.data)
config.MAXLENGTH = format(form.MAXLENGTH.data)
config.RIPMETHOD = format(form.RIPMETHOD.data)
# config.MAINFEATURE = int(format(form.MAINFEATURE.data) == 'true')
config.MAINFEATURE = bool(format(form.MAINFEATURE.data)) # must be 1 for True 0 for False
app.logger.debug(f"main={config.MAINFEATURE}")
job.disctype = format(form.DISCTYPE.data)
db.session.commit()
db.session.refresh(job)
db.session.refresh(config)
flash('Parameters changed. Rip Method={}, Main Feature={}, Minimum Length={}, '
'Maximum Length={}, Disctype={}'.format(
config.RIPMETHOD, config.MAINFEATURE, config.MINLENGTH, config.MAXLENGTH,
job.disctype))
return redirect(url_for('home'))
return render_template('changeparams.html', title='Change Parameters', form=form)
@app.route('/customTitle', methods=['GET', 'POST'])
def customtitle():
job_id = request.args.get('job_id')
job = Job.query.get(job_id)
form = CustomTitleForm(obj=job)
if form.validate_on_submit():
form.populate_obj(job)
job.title = format(form.title.data)
job.year = format(form.year.data)
db.session.commit()
flash('custom title changed. Title={}, Year={}, '.format(form.title, form.year))
return redirect(url_for('home'))
return render_template('customTitle.html', title='Change Title', form=form)
@app.route('/list_titles')
def list_titles():
title = request.args.get('title').strip()
year = request.args.get('year').strip()
job_id = request.args.get('job_id')
dvd_info = call_omdb_api(title, year)
return render_template('list_titles.html', results=dvd_info, job_id=job_id)
@app.route('/gettitle', methods=['GET', 'POST'])
def gettitle():
imdbID = request.args.get('imdbID')
job_id = request.args.get('job_id')
dvd_info = call_omdb_api(None, None, imdbID, "full")
return render_template('showtitle.html', results=dvd_info, job_id=job_id)
@app.route('/updatetitle', methods=['GET', 'POST'])
def updatetitle():
new_title = request.args.get('title')
new_year = request.args.get('year')
video_type = request.args.get('type')
imdbID = request.args.get('imdbID')
poster_url = request.args.get('poster')
job_id = request.args.get('job_id')
print("New imdbID=" + imdbID)
job = Job.query.get(job_id)
job.title = clean_for_filename(new_title)
job.title_manual = clean_for_filename(new_title)
job.year = new_year
job.year_manual = new_year
job.video_type_manual = video_type
job.video_type = video_type
job.imdb_id_manual = imdbID
job.imdb_id = imdbID
job.poster_url_manual = poster_url
job.poster_url = poster_url
job.hasnicetitle = True
db.session.commit()
flash('Title: {} ({}) was updated to {} ({})'.format(job.title_auto, job.year_auto, new_title, new_year),
category='success')
return redirect(url_for('home'))
@app.route('/logs')
def logs():
mode = request.args['mode']
logfile = request.args['logfile']
return render_template('logview.html', file=logfile, mode=mode)
@app.route('/listlogs', defaults={'path': ''})
def listlogs(path):
basepath = cfg['LOGPATH']
fullpath = os.path.join(basepath, path)
# Deal with bad data
if not os.path.exists(fullpath):
return abort(404)
# Get all files in directory
files = get_info(fullpath)
return render_template('logfiles.html', files=files)
@app.route('/')
@app.route('/index.html')
def home():
# Hard drive space
freegb = psutil.disk_usage(cfg['ARMPATH']).free
freegb = round(freegb / 1073741824, 1)
mfreegb = psutil.disk_usage(cfg['MEDIA_DIR']).free
mfreegb = round(mfreegb / 1073741824, 1)
# RAM memory
meminfo = dict((i.split()[0].rstrip(':'), int(i.split()[1])) for i in open('/proc/meminfo').readlines())
mem_kib = meminfo['MemTotal'] # e.g. 3921852
mem_gib = mem_kib / (1024.0 * 1024.0)
# lets make sure we only give back small numbers
mem_gib = round(mem_gib, 2)
memused_kib = meminfo['MemFree'] # e.g. 3921852
memused_gib = memused_kib / (1024.0 * 1024.0)
# lets make sure we only give back small numbers
memused_gib = round(memused_gib, 2)
memused_gibs = round(mem_gib - memused_gib, 2)
# get out cpu info
ourcpu = get_processor_name()
if os.path.isfile(cfg['DBFILE']):
# jobs = Job.query.filter_by(status="active")
jobs = db.session.query(Job).filter(Job.status.notin_(['fail', 'success'])).all()
else:
jobs = {}
return render_template('index.html', freegb=freegb, mfreegb=mfreegb, jobs=jobs, cpu=ourcpu, ram=mem_gib,
ramused=memused_gibs, ramfree=memused_gib, ramdump=meminfo)
def get_processor_name():
"""
function to collect and return some cpu info
ideally want to return {name} @ {speed} Ghz
"""
if platform.system() == "Windows":
return platform.processor()
elif platform.system() == "Darwin":
return subprocess.check_output(['/usr/sbin/sysctl', "-n", "machdep.cpu.brand_string"]).strip()
elif platform.system() == "Linux":
command = "cat /proc/cpuinfo"
# return \
fulldump = str(subprocess.check_output(command, shell=True).strip())
# Take any float trailing "MHz", some whitespace, and a colon.
speeds = re.search(r"\\nmodel name\\t:.*?GHz\\n", fulldump)
if speeds:
# We have intel CPU
speeds = str(speeds.group())
speeds = speeds.replace('\\n', ' ')
speeds = speeds.replace('\\t', ' ')
speeds = speeds.replace('model name :', '')
return speeds
# AMD CPU
# model name.*?:(.*?)\n
# matches = re.search(regex, test_str)
amd_name_full = re.search(r"model name\\t: (.*?)\\n", fulldump)
if amd_name_full:
amd_name = amd_name_full.group(1)
amd_mhz = re.search(r"cpu MHz(?:\\t)*: ([.0-9]*)\\n", fulldump) # noqa: W605
if amd_mhz:
# amd_ghz = re.sub('[^.0-9]', '', amd_mhz.group())
amd_ghz = round(float(amd_mhz.group(1)) / 1000, 2) # this is a good idea
return str(amd_name) + " @ " + str(amd_ghz) + " GHz"
return None # We didnt find our cpu
|
"""
Write a program that walks through a folder tree and searches for files with a
certain file extension. Copy these files from whatever location they are in to a new folder.
"""
import shutil, os
def filesCopy(folderLocation, extension, destination):
folderLocation = os.path.abspath(folderLocation)
for folderName, subfolders, filenames in os.walk(folderLocation): #walking through the folder tree
for filename in filenames:
if(filename.endswith(extension)):
source = os.path.join(folderName, filename)
shutil.copy(source, destination) #copying the file
#__main__
folderLocation = input("Enter the location from where you want to copy\n")
extension = input("Enter the extension of the files\n")
destination = input("Enter the place where you want to copy the files\n")
print("The files are copied to a new folder named - "+ "Extension_Copy" )
destination = destination + "/" + "Extension_Copy" #naming the destination folder
try:
os.makedirs(destination) #making the directory
except FileExistsError:
print("Folder already Exists")
filesCopy(folderLocation, extension,destination)
|
from django.db import models
class Usuario(models.Model):
nombre= models.CharField(max_length=20)
correo= models.CharField(max_length=30)
def __str__(self):
return"%s - %s"%(
self.nombre,
self.correo)
# Create your models here.
|
import os
import h5py
import numpy as np
import argparse
import json
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.applications.resnet_v2 import preprocess_input
label_list = ['Abyssinian','Bengal','Birman','Bombay','British_Shorthair','Egyptian_Mau','Maine_Coon','Persian','Ragdoll','Russian_Blue','Siamese','Sphynx','american_bulldog','american_pit_bull_terrier','basset_hound','beagle','boxer','chihuahua','english_cocker_spaniel','english_setter','german_shorthaired','great_pyrenees','havanese','japanese_chin','keeshond','leonberger','miniature_pinscher','newfoundland','pomeranian','pug','saint_bernard','samoyed','scottish_terrier','shiba_inu','staffordshire_bull_terrier','wheaten_terrier','yorkshire_terrier']
ap = argparse.ArgumentParser()
ap.add_argument("-database", required = True,
help = "Path to database which contains images to be indexed")
ap.add_argument("-embedding", required = True,
help = "Name of output embedding")
ap.add_argument("-json", required = True,
help = "Name of json")
args = vars(ap.parse_args())
import re
def get_first_digit_pos(s):
first_digit = re.search('\d', s)
return first_digit.start()
def get_imlist(path):
return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.jpg')]
from numpy import linalg
def extract_from_img(img_path):
img = image.load_img(img_path, target_size=(224, 224))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = extract_model.predict(img)
norm_feat = feat[0] / linalg.norm(feat[0])
return norm_feat
if __name__ == "__main__":
db = args["database"]
embedding_file = args["embedding"]
json_file = args["json"]
img_list = get_imlist(db)
print("--------------------------------------------------")
print(" feature extraction starts")
print("--------------------------------------------------")
feats = []
names = []
labels = []
model = load_model('/home/cyhong021/saved_model/resnet50/model_epoch100_loss0.05_acc1.00.h5')
layer_name = 'dense'
extract_model = Model(inputs=model.input,
outputs=model.get_layer(layer_name).output)
for i, img_path in enumerate(img_list):
norm_feat = extract_from_img(img_path)
img_name = os.path.split(img_path)[1]
feats.append(norm_feat)
names.append(img_name)
#print(names)
#print(label_list.index(img_name[:get_first_digit_pos(img_name) - 1]))
labels.append(label_list.index(img_name[:get_first_digit_pos(img_name) - 1]))
if i%300 == 0:
print("extracting feature from image No. %d , %d images in total" %((i+1), len(img_list)))
with open(json_file, 'w') as outfile:
json.dump({'name': names, 'label':labels}, outfile)
feats = np.array(feats)
print(feats.shape)
np.savez(embedding_file, ans=feats)
print("--------------------------------------------------")
print(" writing feature extraction results ...")
print("--------------------------------------------------")
|
#!usr/bin/python
#coding:utf-8
class panda():
"趴趴熊"
pandacount=0
foodcount = 0
def __init__(self,name,bamboo=0):
self.name = name
self.bamboo = bamboo
panda.pandacount += 1
panda.foodcount = panda.foodcount + bamboo
def climd(self):
print "%s 趴趴……" % self.name
def food(self):
print "吃了 %d 个……" % self.bamboo
def main():
papa1 = panda("缓缓",20)
papa2 = panda("源源",19)
papa1.climd()
papa1.age = 3
papa2.age = 10
print "现在有%d个熊猫,他们吃了%d个竹子" %(panda.pandacount,panda.foodcount)
print hasattr(panda,"food")
print hasattr(panda,'age')
print hasattr(panda,'mv')
if __name__ == '__main__':
main() |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.loss import BCELoss
from bigdl.nn.criterion import *
from bigdl.nn.layer import *
from bigdl.optim.optimizer import Adam
from pyspark.sql.types import *
from zoo import init_nncontext
from zoo.common.nncontext import *
from zoo.pipeline.api.net.torch_net import TorchNet, TorchIdentityCriterion
from zoo.pipeline.nnframes import *
# create training data as Spark DataFrame
def get_df(sqlContext):
data = sc.parallelize([
((2.0, 1.0), 1.0),
((1.0, 2.0), 0.0),
((2.0, 1.0), 1.0),
((1.0, 2.0), 0.0)])
schema = StructType([
StructField("features", ArrayType(DoubleType(), False), False),
StructField("label", DoubleType(), False)])
df = sqlContext.createDataFrame(data, schema)
return df
# define model with Pytorch
class SimpleTorchModel(nn.Module):
def __init__(self):
super(SimpleTorchModel, self).__init__()
self.dense1 = nn.Linear(2, 4)
self.dense2 = nn.Linear(4, 8)
self.dense3 = nn.Linear(8, 1)
def forward(self, x):
x = self.dense1(x)
x = self.dense2(x)
x = F.sigmoid(self.dense3(x))
return x
if __name__ == '__main__':
sparkConf = init_spark_conf().setAppName("testNNClassifer").setMaster('local[1]')
sc = init_nncontext(sparkConf)
sqlContext = SQLContext(sc)
df = get_df(sqlContext)
torch_model = SimpleTorchModel()
becloss = BCELoss()
model = TorchNet.from_pytorch(module=torch_model,
input_shape=[1, 2],
lossFunc=becloss.forward,
pred_shape=[1, 1], label_shape=[1, 1])
classifier = NNEstimator(model, TorchIdentityCriterion(), SeqToTensor([2])) \
.setBatchSize(2) \
.setOptimMethod(Adam()) \
.setLearningRate(0.1) \
.setMaxEpoch(20)
nnClassifierModel = classifier.fit(df)
print("After training: ")
res = nnClassifierModel.transform(df)
res.show(10, False)
|
from tritium.apps.subscriptions.views import SubscriptionViewSet, TransactionViewSet
from tritium.apps.users.views import UserViewSet, APICreditViewSet, APIKeyViewSet
from rest_framework.routers import DefaultRouter
from tritium.apps.networks.views import NetworkViewSet
router = DefaultRouter()
router.register(r'subscriptions', SubscriptionViewSet, basename='subscription')
router.register(r'transactions', TransactionViewSet, basename='transaction')
router.register(r'users', UserViewSet, basename='user')
router.register(r'api_credits', APICreditViewSet, basename='api_credit')
router.register(r'api_keys', APIKeyViewSet, basename='api_key')
router.register(r'networks', NetworkViewSet, basename='network')
urlpatterns = router.urls |
# Created by Gustavo A. Diaz Galeas
#
# A sample program for an assignment for the students enrolled in the AP
# Computer Science course at Colonial High School in Orlando, Florida.
#
# Purpose: To implement a program that prints out a customers receipt upon
# order placement. A worker inputs the necessary data, it is processed, then
# all of the information will be displayed upon the screen.
#
# Initialization
pizza_base_price = 9.99
price_per_topping = 1.05
tax_rate = 0.1
prompt_welcome = "Welcome to the Big Pizza Pie Pizzeria!"
# Welcome prompts
print prompt_welcome
print
# Inputs and declaration of further variables
customer_name = raw_input('What is the name of the customer?\n')
order_number = input('What is the order number?\n')
num_toppings = input('How many toppings are on the pizza?\n')
# Calculating pricing for the subtotal and total of the pizza
subtotal = pizza_base_price + (num_toppings * price_per_topping)
tax = subtotal * tax_rate
total = subtotal + tax
#Printing the receipt
print
print
print customer_name + " Order #" + str(order_number)
print "----------------------------"
print "Base price of pizza: $" + str(pizza_base_price)
print "Price per topping: $" + str(price_per_topping)
print "Number of toppings: " + str(num_toppings)
print "Subtotal: $" + str(subtotal)
print "Tax: $%0.2f" % (tax)
print "Total: $%0.2f" % (total)
|
def isEven(n):
if n % 2 == 0:
return True
else:
return False
def isDivisibleBy3(n):
if n % 3 == 0:
return True
else:
return False
for i in range(1,20+1):
if isDivisibleBy3(i) and isEven(i):
print(i,'<=')
else:
if isEven(i) and isDivisibleBy3(i) == False:
print(i,'<')
if isDivisibleBy3(i) and isEven(i) == False:
print(i,'=')
|
# numpy has an insanely powerful way to create 'dictionaries' from an array (a):
lut, ndx = np.unique(a, return_inverse=True)
# Now you have a list of unique values in (lut) and a list of indices to them in (ndx) ...fast!
# Depending on number of distinct values, index array can be compressed to file by typecasting:
if len(lut) < 256:
ndx = ndx.astype(np.uint8) #byte
b = np.memmap(outFileName, dtype=ndx.dtype, mode='w+', shape=ndx.shape)
b[:] = ndx[:]
# to recreate the original fully specified array (a) after reading from disk:
a = np.array(lut)[ndx]
# If your task is to constantly access a particular column for all rows, this column-based approach
# beats row-based RDBMSs by miles and miles and miles.
# BTW 'dictionary encoding' effectively also happens in a classic RDBMS when joining a 'fact table'
# to a 'dimension table' (everybody calls it LUT for 'lookup table' in my experience) .
|
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
df = pd.read_csv(r"data/abalone.csv")
group = df.groupby('Sex')
keys = ['M', 'F', 'I']
'''
for all data, "rings" is dependent, scatter
'''
fig1 = plt.figure(figsize=(12, 16))
for i in range(1, 8):
ax = fig1.add_subplot(3, 3, i)
sns.scatterplot(x=df.iloc[:, i], y=df.iloc[:, -1], hue=df.iloc[:, 0], alpha=.9, s=7, ax=ax)
plt.tight_layout(pad=6, w_pad=2, h_pad=6)
plt.savefig(r'./output/description/scatter_rings.png')
'''
for group data, group by sex, "rings" is dependent, scatter
'''
for key, dff in group:
fig2 = plt.figure(figsize=(12, 16))
for i in range(1, 8):
ax = fig2.add_subplot(3, 3, i)
sns.scatterplot(x=dff.iloc[:, i], y=dff.iloc[:, -1], alpha=0.9, s=7, ax=ax)
plt.tight_layout(pad=6, w_pad=2, h_pad=6)
plt.savefig(r'./output/description/scatter_rings_' + key + '.png')
'''
for all data, "sex" is dependent, scatter, box, hist
'''
fig3_1 = plt.figure(figsize=(12, 16))
ax3_1 = fig3_1.add_subplot(111)
sns.scatterplot(x=df.iloc[:, 1], y=df.iloc[:, 2], hue=df.iloc[:, 0], alpha=1, s=7, ax=ax3_1)
ax3_1.set_title('Length & Diam')
plt.savefig(r'./output/description/scatter_length_diam.png')
fig3_2 = plt.figure(figsize=(18, 8))
for i in range(3):
ax3_2 = fig3_2.add_subplot(1, 3, i+1)
g = group.get_group(keys[i])
sns.scatterplot(x=g.iloc[:, 1], y=g.iloc[:, 2], alpha=1, s=7, ax=ax3_2)
ax3_2.set_title(keys[i])
plt.tight_layout(pad=6, w_pad=6)
plt.savefig(r'./output/description/scatter_length_diam_sex.png')
fig4 = plt.figure(figsize=(12, 12))
for i in range(1, 9):
ax4 = fig4.add_subplot(3, 3, i)
sns.boxplot(x=df.iloc[:, 0], y=df.iloc[:, i], linewidth=0.5, width=0.6, fliersize=0.4)
ax4.set_xlabel('')
plt.tight_layout(pad=6, w_pad=4, h_pad=3)
plt.savefig(r'./output/description/box.png')
for key in keys:
fig5 = plt.figure(figsize=(15, 15))
g = group.get_group(key)
for i in range(1, 9):
ax5 = fig5.add_subplot(3, 3, i)
sns.histplot(g.iloc[:, i], kde=True, ax=ax5)
ax5.set_ylabel('')
plt.tight_layout(pad=6, w_pad=3, h_pad=6)
plt.savefig(r'./output/description/hist_' + key + '.png')
plt.rcParams["font.size"] = 6
plt.rcParams["axes.labelsize"] = 6
g = sns.PairGrid(df)
g.map(sns.scatterplot, s=0.8)
g.tight_layout(pad=6)
g.savefig(r'./output/description/linear_corr_all.png')
|
from django.http import HttpResponse
def ola(request):
return HttpResponse("<h1 style='color:Red'>Projeto 1</h1></br><h3>Coisas para colocar.</h3></br><h3>Mais coisas para colocar.</h3>")
|
# To rearrange a sorted array in the max min form.
# for e.g. given_array = [1,2,3,4,5,6,7,8,9]
# answer_array = [9,1,8,2,7,3,6,4,5]
def Convert_to_max_min(arr, n):
temp_arr = []
# to denote the begining of the array
start = 0
# to denote the end of the array
end = n-1
max = True
while start <= end:
if max:
temp_arr.append(arr[end])
end -=1
max = False
else:
temp_arr.append(arr[start])
start +=1
max = True
return temp_arr
# Code to run the program
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]
n = len(arr)
print("Given Array")
print(arr)
print("Answer Array")
print(Convert_to_max_min(arr, n)) |
dirName = input("| Enter Directory Name : ")
print('|\n|\033[92m Directory Created\033[0m')
fileName = []
responseCode = 1
while(responseCode < 5 and responseCode > 0):
print('|\n| Operations\n|\n| 1.Create File\n| 2.Delete File\n| 3.Search in Directory\n| 4.View All Files\n| 5.Exit\n|')
responseCode = int(input('| Enter Response : '))
if(responseCode == 1):
fileId = input("| Enter File Name : ")
try:
fileName.index(fileId)
print('|\n|\033[93m File Name Already Exits. Try Again\033[0m')
except ValueError:
fileName.append(fileId)
print('|\n|\033[92m File Created\033[0m')
elif (responseCode == 2):
fileId = input("|\n| Enter Name File to be Deleted : ")
try:
idValue = fileName.index(fileId)
fileName.pop(idValue)
print('|\n|\033[91m File Deleted\033[0m')
except ValueError:
print('|\n|\033[93m File Name Not Found. Try Again\033[0m')
elif (responseCode == 3):
fileId = input("|\n| Enter Name File to Search : ")
try:
idValue = fileName.index(fileId)
print('|\n|\033[92m File Found As {}/{}\033[0m'.format(dirName,fileName[idValue]))
except ValueError:
print('|\n|\033[93m File Name Not Found. Try Again\033[0m')
elif (responseCode == 4):
print("|\n|\033[92m Files : \033[0m")
for files in fileName:
print('|\033[92m {}/{}\033[0m'.format(dirName, files))
print("|\n| Thank You. Bye")
|
from django_tables2 import Table, Column
from .models import Fights
from django.utils.safestring import mark_safe
from units.models import Unit
from types import MethodType
from math import log
def col_style():
return {'td': {"style": "text-align:center;"},
'th': {"style": "text-align:center;"}}
def sign(x):
if x > 0:
return 1
elif x < 0:
return -1
return 0
def color4cell(x):
color_fn = lambda y: sign(log(y)) * abs(log(y))**.5
x = color_fn(x)
if x > 0:
g = int((1 - x / color_fn(1000)) * 255)
r = 255
else:
g = 255
r = int((1 + x / color_fn(1000)) * 255)
return r, g, 0
class FightsTable(Table):
class Meta:
attrs = {'class': 'paleblue'}
def __init__(self, *args, **kwargs):
self.base_columns['id'] = Column()
self.base_columns['unit'] = Column(order_by='unit.name')
self.base_columns['gold_cost'] = Column(
attrs=col_style(), verbose_name='Gold cost')
self.base_columns['tot_growth'] = Column(
attrs=col_style(), verbose_name='Total growth')
self.base_columns['ai_value'] = Column(
attrs=col_style(), order_by='-ai_value', verbose_name='AI Value')
self.base_columns['value'] = Column(
attrs=col_style(), order_by='-value', verbose_name='Value')
self.base_columns['value'].attrs['td'][
'style'] += "border-right: solid 1px #e2e2e2;"
self.base_columns['value'].attrs['th'][
'style'] += "border-right: solid 1px #e2e2e2;"
def new_render(field_name):
def fn(self, value, record):
pk1 = record['id']
pk2 = int(field_name[2:])
count1 = 10000
count2 = int(count1 / record[field_name])
# set bg color
r, g, b = color4cell(value)
idx = self.attrs['td']['style'].find("background-color:")
color = "background-color: rgba(%i, %i, %i, .5);" % (r, g, b)
if idx > -1:
self.attrs['td']['style'] = self.attrs['td']['style'][:idx]
self.attrs['td']['style'] += color
return mark_safe(
'<a title=\'Try it!\' href=\'combat/?unit1=%s&unit2=%s&'
'count1=%s&count2=%s&num_fights=1000\'>%.3f</a>'
% (pk1, pk2, count1, count2, value,))
return fn
def render_value(_, value, record):
return mark_safe('<b>%i</b>' % value)
self.base_columns['value'].render = MethodType(
render_value, self.base_columns['value'])
for vs_field in Fights._meta.fields[2:]:
self.base_columns[vs_field.name] = Column(
vs_field.verbose_name, attrs=col_style())
col = self.base_columns[vs_field.name]
col.render = MethodType(new_render(vs_field.name), col)
super(FightsTable, self).__init__(*args, **kwargs)
|
import moodleGradeHandler as mgh
if __name__ == "__main__":
username = input('Please Enter Username\n')
password = input('Please Enter Password\n')
course_url = input('Please Enter Exam URL\n'
'For Example: https://moodle2.bgu.ac.il/moodle/mod/quiz/view.php?id=1853627\n')
course_module = mgh.parseCourseURL(course_url)
token = mgh.getToken(username,password)
quiz_id = mgh.getQuizId(token,course_module)
userid = mgh.getUserId(token)
exam_results = mgh.getExamResult(token,userid,quiz_id)
print(exam_results) |
import vodka
import vodka.plugins
# vodka.plugin.register('my_plugin')
# class MyPlugin(vodka.plugins.PluginBase):
# pass
|
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
res=[[] for i in range(numRows)]
for i in range(numRows):
temp=[1]*(i+1)
for j in range(1,i):
temp[j]=res[i-1][j-1]+res[i-1][j]
res[i]=temp
return res
|
"""
Dictionary
* try except playground
📚 Resources:
https://www.youtube.com/watch?v=rfscVS0vtbw&t=1s&ab_channel=freeCodeCamp.org
"""
try:
number = int(input('Enter a number: '))
print(number)
except ZeroDivisionError:
print('Divided by zero')
except ValueError:
print('Invalid input')
|
import requests
url = 'https://v7.wuso.tv/wp-content/uploads/2018/03/asdysb0320007.mp4'
headers = {'Referer': 'https://wuso.me/',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1'}
req = requests.get(url, headers=headers, stream=True, )
print(req.headers)
|
from .exceptions import *
import random
# Complete with your own, just for fun :)
LIST_OF_WORDS = []
def _get_random_word(list_of_words):
# Exception check
if not list_of_words:
raise InvalidListOfWordsException()
# Using a random integer as an indexer to select a word
rand_indexer = random.randint(0, (len(list_of_words) - 1))
return list_of_words[rand_indexer]
def _mask_word(word):
# Exception check
if not word:
raise InvalidWordException()
# Returning a string of "*" equal to the length of the word
masked_word = "*" * len(word)
return masked_word
def _uncover_word(answer_word, masked_word, character):
# Exception checks.
if len(answer_word) != len(masked_word):
raise InvalidWordException()
if not answer_word or not masked_word:
raise InvalidWordException()
if len(character) != 1:
raise InvalidGuessedLetterException()
# Blank string for the unmasked word
unmasked_word = ""
"""
Using .index() to hold the spot of the iterated character
and then using a for loop and if statements to
either unmask it or replace it with "*" to keep the rest masked.
"""
for char in answer_word:
indexer = answer_word.index(char)
if char.lower() == character.lower():
unmasked_word += char.lower()
continue
if masked_word[indexer] != "*":
unmasked_word += masked_word[indexer]
continue
unmasked_word += "*"
return unmasked_word
def guess_letter(game, letter):
# Conditions/checks for the state of the game overall.
finished = False
if finished:
raise GameFinishedException()
# Setting up for masked word vs. answer word and appending
# the previous guesses
game["masked_word"] = _uncover_word(game["answer_word"], game["masked_word"], letter)
game["previous_guesses"].append(letter.lower())
# Conditions to win the game
if game["masked_word"] == game["answer_word"]:
if letter not in game["answer_word"]:
raise GameFinishedException()
raise GameWonException()
# Decrementing for incorrect guesses
if letter.lower() not in game["answer_word"].lower():
game["remaining_misses"] -= 1
# Conditionals for the remaining guesses reaching 0.
if game["remaining_misses"] == 0:
if letter in game["answer_word"]:
raise GameFinishedException()
raise GameLostException()
return game
def start_new_game(list_of_words=None, number_of_guesses=5):
if list_of_words is None:
list_of_words = LIST_OF_WORDS
word_to_guess = _get_random_word(list_of_words)
masked_word = _mask_word(word_to_guess)
game = {
'answer_word': word_to_guess,
'masked_word': masked_word,
'previous_guesses': [],
'remaining_misses': number_of_guesses,
}
return game
|
# -*- coding:utf-8 -*-
import web
from jinja2 import Template
import os
import sys
reload(sys)
#使用utf8编码
sys.setdefaultencoding('utf-8')
# 导入MySQL驱动:
import pymysql
import json
import urllib2
import re
urls = (
'/search/(.*)', 'search',
'/mh/([\d,/]*)','result',
'/test','index',
'/mh/(\d*)/(.*)','comic'
)
class result:
def GET(self,name):
url='http://www.733dm.net/mh/'+name
html=urllib2.urlopen(url).read()
#print html
data=re.findall(r'<div id="mhContent">([\s\S]*)<div class="tagWarp">',html.decode('gbk'))[0]
#print data
#为了jinja2渲染时能直接写入中文
y=json.dumps(data).decode("unicode-escape")
#print y
def index():
f = open('qss.html')
result = f.read()
template = Template(result)
data = template.render(comic=y)
template.render()
return data.decode('utf8')
return index()
class search:
def GET(self,name):
rawurl = name.decode('utf8').encode('gbk')
url = urllib2.quote(rawurl)
req_header={
'searchget':'1',
'show':'title',
'keyboard':url}
#733搜索api接口
req = urllib2.Request('http://www.733dm.net/e/search/?searchget=1&show=title&keyboard='+url)
resp = urllib2.urlopen(req,None,timeout=30)
return resp.read().replace('/skin/dh/dhb.css','http://www.733dm.net/skin/dh/dhb.css').replace('/skin/dh/i.css','http://www.733dm.net/skin/dh/i.css')
class comic:
def GET(self,title,chap):
return title,chap
if __name__ == "__main__":
app = web.application(urls, globals())
app.run() |
#%%
import wrf as w
import xarray as xr
from netCDF4 import Dataset
import pandas as pd
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cmaps
import os
import sys
sys.path.append('/home/zzhzhao/code/python/wrf-test-10')
from zMap import set_grid, add_NamCo
import warnings
warnings.filterwarnings("ignore")
def load_wrfdata(data_dir):
wrf_files = [f for f in os.listdir(data_dir) if f[9]=='2']
wrflist = [Dataset(os.path.join(data_dir, wrf_file)) for wrf_file in wrf_files]
rainc = w.getvar(wrflist, 'RAINC', timeidx=w.ALL_TIMES, method='cat')
rainnc = w.getvar(wrflist, 'RAINNC', timeidx=w.ALL_TIMES, method='cat')
total_rain = rainc + rainnc
prec = total_rain.diff('Time', 1)#.sel(Time=pd.date_range('2017-06-01 3:00:00', '2017-06-8 00:00:00', freq='3H'))
# prec = total_rain.isel(Time=-1)
lats, lons = w.latlon_coords(prec)
time = total_rain.Time.to_index()
return prec, lats, lons, time
#%%
if __name__ == '__main__':
data_dir1 = '/home/zzhzhao/Model/wrfout/test-10'
data_dir2 = '/home/zzhzhao/Model/wrfout/test-10-removelake'
prec1, lats, lons, time = load_wrfdata(data_dir1)
prec2, lats, lons, time = load_wrfdata(data_dir2)
lat_range = (28, 34)
lon_range = (86, 94)
### CMFD资料
file_path = '/home/zzhzhao/code/python/wrf-test-10/data/prec_CMFD_201706.nc'
cmfd = xr.open_dataset(file_path)['prec'].sel(lat=slice(lat_range[0],lat_range[1]), lon=slice(lon_range[0],lon_range[1])) * 3
lat, lon =cmfd.lat, cmfd.lon
### 累计降水
# prec_sum = prec.sel(Time=second_period).sum(dim='Time')
cmfd_sum = cmfd.sum(dim='time')
prec1_sum = prec1.sum(dim='Time')
prec2_sum = prec2.sum(dim='Time')
#%%
### 累计降水分布
proj = ccrs.PlateCarree()
# crange = np.arange(0, 200+10, 10)
labels = ['WRF', 'WRF-nolake', 'CMFD', 'Difference']
fig, axes = plt.subplots(ncols=2, nrows=2, figsize=(9,11), subplot_kw={'projection':proj})
fig.subplots_adjust(hspace=0.2, wspace=0.15)
for i in range(2):
for j in range(2):
set_grid(axes[i, j], lat=[30, 31.5], lon=[90, 91.5], span=.5)
add_NamCo(axes[i, j])
for j, prec_sum in enumerate([prec1_sum, prec2_sum]):
c = axes[0][j].pcolor(lons, lats, prec_sum, vmin=0, vmax=250, cmap=cmaps.WhiteBlueGreenYellowRed, transform=proj)
axes[0][j].set_title(labels[j], fontsize=14, weight='bold')
axes[1][0].pcolor(lon, lat, cmfd_sum, vmin=0, vmax=250, cmap=cmaps.WhiteBlueGreenYellowRed, transform=proj)
axes[1][0].set_title(labels[2], fontsize=14, weight='bold')
c2 = axes[1][1].pcolor(lons, lats, prec1_sum-prec2_sum, vmin=-60, vmax=60, cmap='RdBu', transform=proj)
axes[1][1].set_title(labels[3], fontsize=14, weight='bold')
cb = fig.colorbar(c, ax=axes, orientation='horizontal', pad=0.05, shrink=0.9, aspect=35)
cb.set_label('Precipitation / mm', fontsize=14)
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
axins = inset_axes(axes[1][1],
width="5%", # width = 10% of parent_bbox width
height="100%", # height : 50%
loc=6,
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=axes[1][1].transAxes,
borderpad=0,
)
cb2 = fig.colorbar(c2, cax=axins)#, orientation='vertical', shrink=0.6, aspect=25)
# cb2.set_label('Precipitation / mm', fontsize=14)
# axes[0][1].set_visible(False)
fig.savefig('/home/zzhzhao/code/python/wrf-test-10/fig/prec.jpg', dpi=300)
|
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Linklist:
def __init__(self):
self.head = None
def insert(self,data):
newNode = Node(data)
newNode.next = self.head
self.head = newNode
def print(self):
temp = self.head
while(temp):
print(temp.data)
temp = temp.next
def segrate(self):
main_link = self.head
odd = None
even = None
while(main_link):
prev = main_link.next
if(main_link.data % 2 ==0):
main_link.next = even
even = main_link
else:
main_link.next = odd
odd = main_link
main_link = prev
odd2= odd
while(odd2.next != None):
odd2 = odd2.next
odd2.next = even
self.head = odd
if __name__ == "__main__":
linklist = Linklist()
linklist.insert(17)
linklist.insert(15)
linklist.insert(8)
linklist.insert(12)
linklist.insert(10)
linklist.insert(15)
linklist.insert(4)
linklist.insert(1)
linklist.insert(7)
linklist.insert(6)
linklist.segrate()
linklist.print()
|
from .Config import Config
from .Package import Package
class Manager():
STATUS_PROCESSING = 'PROCESSING'
STATUS_READY = 'READY'
def __init__(self):
self.status = self.STATUS_READY
def process(self):
self.status = self.STATUS_PROCESSING
for source in Config().get('sources'):
package = Package(source)
package.zip()
package.send()
self.status = self.STATUS_READY |
import psycopg2
import sys
sys.path.append('/home/proj/price_keeper')
from price_keeper import TokenInfoDB
import json
db = TokenInfoDB()
decimal_map = {t[0]: t[1] for t in db.get_all_decimal()}
with open('utils/decimal/token_decimal.json', 'w') as f:
f.write(json.dumps(decimal_map))
|
import datetime
import pytz
from smartweb_service.database import db
from smartweb_service.database.user_api_relation import UserApiRelation
def validate_api_key(api_key):
user_api_rel = UserApiRelation.query.filter(UserApiRelation.api_key == api_key).first()
if user_api_rel is None:
return False
return True
def getTime():
return datetime.datetime.now(pytz.timezone('America/New_York')).strftime("%Y-%m-%d %H:%M:%S %z")
|
from Cell import Cell
from Constants import Constants
from Config import Config
from CubeCoord import CubeCoord
import random
class Board:
def __init__(self, seed):
self.map = {} # coord -> Cell
self.index = 0
if seed:
random.seed(seed)
self.generate()
def generateCell(self, coord, richness):
self.map[coord] = Cell(self.index, richness)
self.index += 1
def generate(self):
# build forest floor
center = CubeCoord(0, 0, 0)
self.generateCell(center, Constants.RICHNESS_LUSH)
coord = center.neighbor(0)
for distance in range(1, Config.MAP_RING_COUNT+1):
for orientation in range(6):
for _ in range(distance):
if distance == Config.MAP_RING_COUNT:
self.generateCell(coord, Constants.RICHNESS_POOR)
elif distance == Config.MAP_RING_COUNT -1:
self.generateCell(coord, Constants.RICHNESS_OK)
else:
self.generateCell(coord, Constants.RICHNESS_LUSH)
coord = coord.neighbor((orientation + 2) % 6)
coord = coord.neighbor(0)
# add random holes
coordList = list(self.map.keys())
wantedEmptyCells = random.randint(1, Config.MAX_EMPTY_CELLS) if Config.HOLES_ENABLED else 0
actualEmptyCells = 0
while actualEmptyCells < wantedEmptyCells-1:
randCord = random.choice(coordList)
if self.map[randCord].richness == Constants.RICHNESS_NULL:
continue
self.map[randCord].richness = Constants.RICHNESS_NULL
self.map[randCord.getOpposite()].richness = Constants.RICHNESS_NULL
actualEmptyCells += 1 if randCord is center else 2
"""
coordinates = list(self.board.keys)
wantedEmpty = 5
while (count with null richness) < wantedEmptyCells
set richness to 0 for [
a random coordinate with some richness,
it's opposite
]
""" |
#python Wavelet.py
import numpy as np
from PyLets import MatPlotWavelets as mwl
from PyLets import Others as ot
from PyLets import SignalsAnalyses as sia
from PyLets import Wavelets as wale
t = np.linspace(0, 10,1000)
dilat = np.linspace(0, 0.5,1000)
WaveletSinal = wale.HermitianWavelet1(t,0.25,5)
sinal = wale.SinalBasico(t,1,1) +wale.SinalBasico(t,2,1)
Wavelet= wale.HermitianWavelet1
mwl.WaveLetMatPlot(t,WaveletSinal)
mwl.WaveLetMatPlot(t,sinal)
TransWaveMatrix = sia.TransWaveletCont(t,dilat,sinal,Wavelet)
print(TransWaveMatrix)
mwl.TransWaveletContGraph(TransWaveMatrix)
# alising
# nisquit
# operador hermitiano
# Testar funções
|
#!/usr/local/bin/python
#-*- coding: UTF-8 -*-
#生成产品页面
##################################################
import string_data #变量保存
import Cmysql #数据库操作文件
import sfile #文件操作
import os
import re
import time
import random
def sj(): #产生随机字符
try:
#seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!@#$%^&*()_+=-"
seed = "1234567890abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
sa = []
for i in range(8):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
except:
pass
def index_data(): #创建文章
try:
index=string_data.path+string_data.url_Route+'\\chanpin\\'+sj()+'.html'
Adata=string_data.url_chanpen_data
#正则出需要喜欢的内容
p = re.compile(r'{.*?}')
sarr = p.findall(Adata)
for every in sarr:
data1=mysql_index(string_data.url_Sindex,every) #查询内容
#print every+"------"+data1
if data1!="":
Adata=index_replace(Adata,every,data1) #内容 替换关键字 替换内容
#time.sleep(0.5)
sfile.TXT_file(index,Adata) #写入文本 中文
print Adata
except:
pass
def index_replace(strdata,name,data): #内容 替换关键字 替换内容
try:
strdata=strdata.replace(name,data)
return strdata
except:
pass
def mysql_index(index,data): #查询内容
try:
sql_data="select data from Sindex where Sindex='%s' and name='%s'"%(index,data)
index_cursor=Cmysql.sql.conn.cursor()
n = index_cursor.execute(sql_data)
index_cursor.scroll(0)
for row in index_cursor.fetchall():
data=row[0]
index_cursor.close()
return data
except:
return ""
pass
|
"""Copyright (c) 2018 Great Ormond Street Hospital for Children NHS Foundation
Trust & Birmingham Women's and Children's NHS Foundation Trust
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import json
import hashlib
import labkey as lk
from datetime import datetime
from django.utils.dateparse import parse_date
import time
from ..models import *
from ..api_utils.poll_api import PollAPI
from ..vep_utils.run_vep_batch import CaseVariant, CaseCNV, CaseSTR
from ..config import load_config
import re
import copy
import pprint
from tqdm import tqdm
from protocols.reports_6_0_0 import InterpretedGenome, InterpretationRequestRD, CancerInterpretationRequest, ClinicalReport
import csv
class Case(object):
"""
Entity object which represents a case and it's associated details.
Cases are instantiated by MultipleCaseAdder with basic init attributes,
then details will be added if the case information needs to be added to or
updated in the DB.
Attributes:
json (dict): upon initialisation of the Case, this is the full json
response from PollAPI. This is edited during the database update
process to keep relevant information together in the datastructure.
raw_json (dict): a deepcopy of the json, which remains unmodified, for
the purpose of saving the json to disk once a Case has been added
or updated in the database.
json_case_data (dict): a sub-dict within the json which refers to the
interpretation_request_data.
json_request_data (dict): a sub-dict of json_case_data which holds the
tiered variant information. case_data and request_data are set as
attributes during init mostly to avoid long dict accessors in the
code itself.
request_id (str): the XXXX-X CIP-ID of the Interpretation Request.
json_hash (str): the MD5 hash of the json file, which has been sorted
to maintain consistency of values (which would change the hash).
proband (dict): sub-dict of json which holds the information about the
proband only.
proband_sample (str): the sample ID used for sequencing of the proband,
obtained from the JSON.
family_members (list): list of sub-dicts of the json which hold
information about all relatives of the proband's relative
tools_and_versions (dict): k-v pair of tools used by GeL/CIP (key) and
the version that was used (value)
status (str): the status of the case according the CIP-API, choice of
waiting_payload, interpretation_generated, dispatched, blocked,
files_copied, transfer_ready, transfer_complete,
rejected_wrong_format, gel_qc_failed, gel_qc_passed, sent_to_gmcs,
report_generated, or report_sent.
panel_manager (multiple_case_adder.PanelManager): instance of the class
PanelManager, which is used to hold new panels polled from PanelApp
during a cycle of the Cases' overseeing MultipleCaseAdder to avoid
polling PanelApp for the same new panel twice, before it can be
detected by check_found_in_db()
variant_manager (multiple_case_adder.VariantManager): instance of the
class VariantManager, which checks all variants from VEP 37 which
can conflict with VEP 38, then ensures the same variant (ie at the
same genomic position) has identical information between b37/b38
cases.
gene_manager (multiple_case_adder.GeneManager): instance of the control
class GeneManager, which ensures that GeneNames is not polled twice
for the same gene in one run of the MCA, ie. before a new gene is
added the database and hence won't be found by check_found_in_db
panels (dict): the "analysisPanels" section of the JSON. This only
applies for rare disease cases; cancer cases do not have panels so
this attribute is None in those cases.
skip_demographics (bool): whether (T) or not (F) we should poll LabKey
for demographic information for database entries. If not, Proband,
Relative, and Clinician will have "unknown" set as their values for
all LabKey-sourced fields.
pullt3 (bool): whether (T) or not (F) Tier 3 variants should be pulled
out of the JSON and added to the database. This is mostly to save
time, since in the case of !pullt3, there will be a huge reduction
in number of variants passed to VEP/variant annotater.
attribute_managers (dict): k-v pairing of each database model being
updated for each case, and the CaseAttributeManager for that model
for this case (e.g. {gel2mdt.models.Proband: CaseAttributeManager}.
attribute_managers are not created at first, so this starts as an
empty dictionary. When MCA determines the case needs to be added or
updated in the database, then the MCA will create (in the correct
order) CaseAttributeManagers for each model type for each case.
"""
def __init__(self, case_json, panel_manager, variant_manager, gene_manager, skip_demographics=False, pullt3=True):
"""
Initialise a Case with the json, then pull out relevant sections.
The JSON is deepcopied for raw_json, then the relevant sections are
extracted by dictionary accessors or standalone functions (in the case
of proband, family_members, tools_and_versions). A SHA512 hash is also
calculated for the JSON, to later check if the JSON used for a case
has changed in the CIP API.
"""
self.json = case_json
# raw json created to dump at the end; json attr is modified
self.pullt3 = pullt3
self.raw_json = copy.deepcopy(case_json)
self.json_case_data = self.json["interpretation_request_data"]
self.json_request_data = self.json_case_data["json_request"]
self.request_id = str(
self.json["interpretation_request_id"]) \
+ "-" + str(self.json["version"])
if self.json["sample_type"] == 'raredisease':
self.ir_obj = InterpretationRequestRD.fromJsonDict(self.json_request_data)
elif self.json['sample_type'] == 'cancer':
self.ir_obj = CancerInterpretationRequest.fromJsonDict(self.json_request_data)
self.json_hash = self.hash_json()
self.proband = self.get_proband_json()
if self.json["sample_type"] == 'raredisease':
self.proband_sample = self.proband.samples[0].sampleId
elif self.json['sample_type'] == 'cancer':
self.proband_sample = self.proband.matchedSamples[0].tumourSampleId
self.family_members = self.get_family_members()
self.tools_and_versions = {'genome_build': self.json["assembly"]}
self.status = self.get_status_json()
self.panel_manager = panel_manager
self.variant_manager = variant_manager
self.gene_manager = gene_manager
self.panels = self.get_panels_json()
self.ig_objs = [] # List of interpreteted genome objects
self.clinical_report_objs = [] # ClinicalReport objects
self.variants, self.svs, self.strs = self.get_case_variants()
self.transcripts = [] # set by MCM with a call to vep_utils
self.demographics = None
self.clinicians = None
self.diagnosis = None
# initialise a dict to contain the AttributeManagers for this case,
# which will be set by the MCA as they are required (otherwise there
# are missing dependencies)
self.skip_demographics = skip_demographics
self.attribute_managers = {}
gmc_file = open('genie_dumps/190416_genie_gmc_dump.csv', encoding='utf-8-sig')
gmc_raw_dict = csv.DictReader(gmc_file)
self.gmc_dict = {key['ParticipantID']: key['OrgCode'] for key in gmc_raw_dict}
def hash_json(self):
"""
Hash the given json for this Case, sorting the keys to ensure
that order is preserved, or else different order -> different
hash.
"""
hash_buffer = json.dumps(self.json, sort_keys=True).encode('utf-8')
hash_hex = hashlib.sha512(hash_buffer)
hash_digest = hash_hex.hexdigest()
return hash_digest
def get_proband_json(self):
"""
Get the proband from the list of partcipants in the JSON.
"""
proband_json = None
if self.json["sample_type"]=='raredisease':
proband_json = [member for member in self.ir_obj.pedigree.members if member.isProband][0]
elif self.json["sample_type"]=='cancer':
proband_json = self.ir_obj.cancerParticipant
return proband_json
def get_family_members(self):
'''
Gets the family member details from the JSON.
:return: A list of dictionaries containing family member details (gel ID, relationship and affection status)
'''
family_members = []
if self.json["sample_type"] == 'raredisease':
for member in self.ir_obj.pedigree.members:
if not member.isProband:
if member.additionalInformation:
relation = member.additionalInformation.get('relation_to_proband', 'unknown')
else:
relation = 'unknown'
family_member = {'gel_id': member.participantId,
'relation_to_proband': relation,
'affection_status': True if member.disorderList else False,
'sequenced': True if member.samples else False,
'sex': member.sex,
}
family_members.append(family_member)
return family_members
def get_tools_and_versions(self):
'''
Gets the genome build from the JSON. Details of other tools (VEP, Polyphen/SIFT) to be pulled from config file?
:return: A dictionary of tools and versions used for the case
'''
if self.json['sample_type'] == 'raredisease':
if self.json_request_data['genomeAssemblyVersion'].startswith('GRCh37'):
genome_build = 'GRCh37'
elif self.json_request_data['genomeAssemblyVersion'].startswith('GRCh38'):
genome_build = 'GRCh38'
elif self.json['sample_type'] == 'cancer':
if self.json["assembly"].startswith('GRCh37'):
genome_build = 'GRCh37'
elif self.json["assembly"].startswith('GRCh38'):
genome_build = 'GRCh38'
else:
raise Exception(f'{self.request_id} has unknown genome build')
return {'genome_build': genome_build}
def get_status_json(self):
"""
JSON has a list of statuses. Extract only the latest.
"""
status_jsons = self.json["status"]
return status_jsons[-1] # assuming GeL will always work upwards..
def get_panels_json(self):
"""
Get the list of panels from the json
"""
analysis_panels = []
if self.json["sample_type"] == 'raredisease':
analysis_panels = self.ir_obj.pedigree.analysisPanels
return analysis_panels
def parse_ig_variants(self, ig_obj, genome_build, variant_object_count, case_variant_list):
for variant in ig_obj.variants:
# Sort out tiers first
variant_min_tier = None
tier = None
for report_event in variant.reportEvents:
if self.json['sample_type'] == 'raredisease':
if report_event.tier:
tier = int(report_event.tier[-1])
elif self.json['sample_type'] == 'cancer':
if report_event.domain:
tier = int(report_event.domain[-1])
if variant_min_tier is None:
variant_min_tier = tier
elif tier < variant_min_tier:
variant_min_tier = tier
variant.max_tier = variant_min_tier
interesting_variant = False
if ig_obj.interpretationService == 'Exomiser':
for report_event in variant.reportEvents:
if report_event.score >= 0.95:
interesting_variant = False
elif ig_obj.interpretationService == 'genomics_england_tiering':
if not self.pullt3:
if variant.max_tier < 3:
interesting_variant = True
else:
interesting_variant = True
else:
interesting_variant = True # CIP variants all get pulled
if interesting_variant:
variant_object_count += 1
case_variant = CaseVariant(
chromosome=variant.variantCoordinates.chromosome,
position=variant.variantCoordinates.position,
ref=variant.variantCoordinates.reference,
alt=variant.variantCoordinates.alternate,
case_id=self.request_id,
variant_count=str(variant_object_count),
genome_build=genome_build
)
case_variant_list.append(case_variant)
variant.case_variant = case_variant
else:
variant.case_variant = False
return ig_obj, case_variant_list, variant_object_count
def parse_ig_svs(self, ig_obj, genome_build, case_sv_list):
if int(ig_obj.softwareVersions['gel-tiering'].replace('.', '')) <= 1000:
for variant in ig_obj.structuralVariants:
variant.case_variant = False
return ig_obj, case_sv_list
for variant in ig_obj.structuralVariants:
interesting_variant = False
for report_event in variant.reportEvents:
if report_event.tier:
variant.max_tier = report_event.tier
interesting_variant = True
if interesting_variant:
case_variant = CaseCNV(
chromosome=variant.coordinates.chromosome,
sv_start=variant.coordinates.start,
sv_end=variant.coordinates.end,
sv_type=variant.variantType,
case_id=self.request_id,
genome_build=genome_build
)
case_sv_list.append(case_variant)
variant.case_variant = case_variant
else:
variant.case_variant = False
return ig_obj, case_sv_list
def parse_ig_strs(self, ig_obj, genome_build, case_str_list):
if int(ig_obj.softwareVersions['gel-tiering'].replace('.', '')) <= 1000:
for variant in ig_obj.shortTandemRepeats:
variant.case_variant = False
return ig_obj, case_str_list
participant_id = self.json["proband"]
mother_id = None
father_id = None
family_members = self.family_members
for family_member in family_members:
if family_member["relation_to_proband"] == "Father":
father_id = family_member["gel_id"]
elif family_member["relation_to_proband"] == "Mother":
mother_id = family_member["gel_id"]
for variant in ig_obj.shortTandemRepeats:
variant.proband_copies_a = None
variant.proband_copies_b = None
variant.maternal_copies_a = None
variant.maternal_copies_b = None
variant.paternal_copies_a = None
variant.paternal_copies_b = None
interesting_variant = False
for report_event in variant.reportEvents:
if report_event.tier:
variant.max_tier = report_event.tier
interesting_variant = True
variant.mode_of_inheritance = report_event.modeOfInheritance
variant.segregation_pattern = report_event.segregationPattern
for variant_call in variant.variantCalls:
if variant_call.participantId == participant_id:
variant.proband_copies_a = variant_call.numberOfCopies[0].numberOfCopies
variant.proband_copies_b = variant_call.numberOfCopies[1].numberOfCopies
if variant_call.participantId == mother_id:
variant.maternal_copies_a = variant_call.numberOfCopies[0].numberOfCopies
variant.maternal_copies_b = variant_call.numberOfCopies[1].numberOfCopies
if variant_call.participantId == father_id:
variant.paternal_copies_a = variant_call.numberOfCopies[0].numberOfCopies
variant.paternal_copies_b = variant_call.numberOfCopies[1].numberOfCopies
if interesting_variant:
case_variant = CaseSTR(
chromosome=variant.coordinates.chromosome,
str_start=variant.coordinates.start,
str_end=variant.coordinates.end,
repeated_sequence=variant.shortTandemRepeatReferenceData.repeatedSequence,
normal_threshold=variant.shortTandemRepeatReferenceData.normal_number_of_repeats_threshold,
pathogenic_threshold=variant.shortTandemRepeatReferenceData.pathogenic_number_of_repeats_threshold,
case_id=self.request_id,
genome_build=genome_build
)
case_str_list.append(case_variant)
variant.case_variant = case_variant
else:
variant.case_variant = False
return ig_obj, case_str_list
def get_case_variants(self):
"""
Create CaseVariant objects for each variant listed in the json,
then return a list of all CaseVariants for construction of
CaseTranscripts using VEP.
"""
case_variant_list = []
case_sv_list = []
case_str_list = []
# go through each variant in the json
variant_object_count = 0
genome_build = self.json['assembly']
for ig in self.json['interpreted_genome']:
ig_obj = InterpretedGenome.fromJsonDict(ig['interpreted_genome_data'])
if ig_obj.variants:
ig_obj, case_variant_list, variant_object_count = self.parse_ig_variants(ig_obj,
genome_build,
variant_object_count,
case_variant_list)
if ig_obj.structuralVariants:
ig_obj, case_sv_list = self.parse_ig_svs(ig_obj,
genome_build,
case_sv_list)
if ig_obj.shortTandemRepeats:
ig_obj, case_str_list = self.parse_ig_strs(ig_obj,
genome_build,
case_str_list)
self.ig_objs.append(ig_obj)
for clinical_report in self.json['clinical_report']:
cr_obj = ClinicalReport.fromJsonDict(clinical_report['clinical_report_data'])
if cr_obj.variants:
for variant in cr_obj.variants:
variant_min_tier = None
tier = None
for report_event in variant.reportEvents:
if self.json['sample_type'] == 'raredisease':
if report_event.tier:
tier = int(report_event.tier[-1])
elif self.json['sample_type'] == 'cancer':
if report_event.domain:
tier = int(report_event.domain[-1])
if variant_min_tier is None:
variant_min_tier = tier
elif tier < variant_min_tier:
variant_min_tier = tier
variant.max_tier = variant_min_tier
variant_object_count += 1
case_variant = CaseVariant(
chromosome=variant.variantCoordinates.chromosome,
position=variant.variantCoordinates.position,
ref=variant.variantCoordinates.reference,
alt=variant.variantCoordinates.alternate,
case_id=self.request_id,
variant_count=str(variant_object_count),
genome_build=genome_build
)
case_variant_list.append(case_variant)
variant.case_variant = case_variant
self.clinical_report_objs.append(cr_obj)
return case_variant_list, case_sv_list, case_str_list
class CaseAttributeManager(object):
"""
Handler for managing each different type of case attribute.
Holds get/refresh functions to be called by MCA, as well as pointing to
CaseModels and ManyCaseModels for access by MCA.bulk_create_new().
"""
def __init__(self, case, model_type, model_objects, many=False):
"""
Initialise with CaseModel or ManyCaseModel, dependent on many param.
"""
self.case = case # for accessing related table entries
self.model_type = model_type
self.model_objects = model_objects
self.many = many
self.case_model = self.get_case_model()
def get_case_model(self):
"""
Call the corresponding function to update the case model within the
AttributeManager.
"""
if self.model_type == Clinician:
case_model = self.get_clinician()
elif self.model_type == Proband:
case_model = self.get_proband()
elif self.model_type == Family:
case_model = self.get_family()
elif self.model_type == Relative:
case_model = self.get_relatives()
elif self.model_type == Phenotype:
case_model = self.get_phenotypes()
elif self.model_type == FamilyPhenotype:
case_model = self.get_family_phenotypes()
elif self.model_type == InterpretationReportFamily:
case_model = self.get_ir_family()
elif self.model_type == Panel:
case_model = self.get_panels()
elif self.model_type == PanelVersion:
case_model = self.get_panel_versions()
elif self.model_type == InterpretationReportFamilyPanel:
case_model = self.get_ir_family_panel()
elif self.model_type == Gene:
case_model = self.get_genes()
elif self.model_type == PanelVersionGene:
case_model = self.get_panel_version_genes()
elif self.model_type == Transcript:
case_model = self.get_transcripts()
elif self.model_type == GELInterpretationReport:
case_model = self.get_ir()
elif self.model_type == Variant:
case_model = self.get_variants()
elif self.model_type == TranscriptVariant:
case_model = self.get_transcript_variants()
elif self.model_type == PVFlag:
case_model = self.get_pv_flags()
elif self.model_type == ProbandVariant:
case_model = self.get_proband_variants()
elif self.model_type == ProbandTranscriptVariant:
case_model = self.get_proband_transcript_variants()
elif self.model_type == ReportEvent:
case_model = self.get_report_events()
elif self.model_type == ToolOrAssemblyVersion:
case_model = self.get_tool_and_assembly_versions()
elif self.model_type == SVRegion:
case_model = self.get_sv_regions()
elif self.model_type == SV:
case_model = self.get_svs()
elif self.model_type == ProbandSV:
case_model = self.get_proband_svs()
elif self.model_type == ProbandSVGene:
case_model = self.get_proband_sv_genes()
elif self.model_type == STRVariant:
case_model = self.get_str_variants()
elif self.model_type == ProbandSTR:
case_model = self.get_proband_strs()
elif self.model_type == ProbandSTRGene:
case_model = self.get_proband_str_genes()
return case_model
def get_clinician(self):
"""
Create a case model to handle adding/getting the clinician for case.
"""
# family ID used to search for clinician details in labkey
family_id = None
clinician_details = {"name": "unknown", "hospital": "unknown"}
if self.case.json['sample_type'] == 'raredisease':
family_id = self.case.json["family_id"]
search_term = 'family_id'
elif self.case.json['sample_type'] == 'cancer':
family_id = self.case.json["proband"]
search_term = 'participant_identifiers_id'
# load in site specific details from config file
if not self.case.skip_demographics:
for row in self.case.clinicians:
try:
if row[search_term] == family_id:
if row.get('consultant_details_full_name_of_responsible_consultant'):
clinician_details['name'] = row.get(
'consultant_details_full_name_of_responsible_consultant')
except IndexError as e:
pass
if self.case.json["proband"] in self.case.gmc_dict:
clinician_details['hospital'] = self.case.gmc_dict[self.case.json['proband']]
elif self.case.ir_obj.workspace:
clinician_details['hospital'] = self.case.ir_obj.workspace[0]
else:
clinician_details['hospital'] = 'Unknown'
GMC.objects.get_or_create(name=clinician_details['hospital'])
clinician = CaseModel(Clinician, {
"name": clinician_details['name'],
"email": "unknown", # clinician email not on labkey
"hospital": clinician_details['hospital'],
"added_by_user": False
}, self.model_objects)
return clinician
def get_paricipant_demographics(self, participant_id):
'''
Calls labkey to retrieve participant demographics
:param participant_id: GEL participant ID
:return: dict containing participant demographics
'''
# load in site specific details from config file
participant_demographics = {
"surname": 'unknown',
"forename": 'unknown',
"date_of_birth": '2011/01/01', # unknown but needs to be in date format
"nhs_num": 'unknown',
}
if not self.case.skip_demographics:
for row in self.case.demographics:
try:
if row['participant_id'] == participant_id:
participant_demographics["surname"] = row.get(
'surname')
participant_demographics["forename"] = row.get(
'forenames')
participant_demographics["date_of_birth"] = row.get(
'date_of_birth').split(' ')[0]
if self.case.json['sample_type'] == 'raredisease':
if row.get('person_identifier_type').upper() == "NHSNUMBER":
participant_demographics["nhs_num"] = row.get(
'person_identifier')
elif self.case.json['sample_type'] == 'cancer':
participant_demographics["nhs_num"] = row.get(
'person_identifier')
except IndexError as e:
pass
return participant_demographics
def get_proband(self):
"""
Create a case model to handle adding/getting the proband for case.
"""
participant_id = self.case.json["proband"]
demographics = self.get_paricipant_demographics(participant_id)
family = self.case.attribute_managers[Family].case_model
clinician = self.case.attribute_managers[Clinician].case_model
recruiting_disease = None
disease_subtype = None
disease_group = None
try:
if self.case.json['sample_type'] == 'cancer':
recruiting_disease = self.case.proband.primaryDiagnosisDisease[0]
disease_subtype = self.case.proband.primaryDiagnosisSubDisease[0]
except (TypeError, KeyError):
pass
if not self.case.skip_demographics:
# set up LabKey to get recruited disease
if self.case.json['sample_type'] == 'raredisease':
for row in self.case.diagnosis:
try:
if row['participant_identifiers_id'] == participant_id:
disease_group = row.get('gel_disease_information_disease_group', None)
recruiting_disease = row.get('gel_disease_information_specific_disease', None)
disease_subtype = row.get('gel_disease_information_disease_subgroup', None)
except IndexError:
pass
proband = CaseModel(Proband, {
"gel_id": participant_id,
"family": family.entry,
"nhs_number": demographics['nhs_num'],
"forename": demographics["forename"],
"surname": demographics["surname"],
"date_of_birth": datetime.strptime(demographics["date_of_birth"], "%Y/%m/%d").date(),
"sex": self.case.proband.sex,
"recruiting_disease": recruiting_disease,
'disease_group': disease_group,
'disease_subtype': disease_subtype,
"gmc": clinician.entry.hospital
}, self.model_objects)
return proband
def get_relatives(self):
"""
Creates entries for each relative. Calls labkey to retrieve demograhics
"""
family_members = self.case.family_members
relative_list = []
for family_member in family_members:
demographics = self.get_paricipant_demographics(family_member['gel_id'])
proband = self.case.attribute_managers[Proband].case_model
if family_member['gel_id'] != 'None':
relative = {
"gel_id": family_member['gel_id'],
"relation_to_proband": family_member["relation_to_proband"],
"affected_status": family_member["affection_status"],
"sequenced": family_member["sequenced"],
"proband": proband.entry,
"nhs_number": demographics["nhs_num"],
"forename": demographics["forename"],
"surname":demographics["surname"],
"date_of_birth": demographics["date_of_birth"],
"sex": family_member["sex"],
}
relative_list.append(relative)
relatives = ManyCaseModel(Relative, [{
"gel_id": relative['gel_id'],
"relation_to_proband": relative["relation_to_proband"],
"affected_status": relative["affected_status"],
"proband": relative['proband'],
"sequenced": relative['sequenced'],
"nhs_number": relative["nhs_number"],
"forename": relative["forename"],
"surname": relative["surname"],
"date_of_birth": datetime.strptime(relative["date_of_birth"], "%Y/%m/%d").date(),
"sex": relative["sex"],
} for relative in relative_list], self.model_objects)
return relatives
def get_family(self):
"""
Create case model to handle adding/getting family for this case.
"""
family_members = self.case.family_members
self.case.mother = {}
self.case.father = {}
self.case.mother['sequenced'] = False
self.case.father['sequenced'] = False
for family_member in family_members:
if family_member["relation_to_proband"] == "Father":
self.case.father = family_member
elif family_member["relation_to_proband"] == "Mother":
self.case.mother = family_member
self.case.trio_sequenced = False
if self.case.mother["sequenced"] and self.case.father["sequenced"]:
# participant has a mother and father recorded
self.case.trio_sequenced = True
self.case.has_de_novo = False
if self.case.trio_sequenced:
# determine if any de_novo variants present
variants_to_check = []
# standard tiered variants
for ig in self.case.json['interpreted_genome']:
ig_obj = InterpretedGenome.fromJsonDict(ig['interpreted_genome_data'])
if ig_obj.variants:
for variant in ig_obj.variants:
variant.maternal_zygosity = 'unknown'
variant.paternal_zygosity = 'unknown'
for call in variant.variantCalls:
if call.participantId == self.case.mother["gel_id"]:
variant.maternal_zygosity = call.zygosity
elif call.participantId == self.case.father["gel_id"]:
variant.paternal_zygosity = call.zygosity
variants_to_check.append(variant)
for clinical_report in self.case.json['clinical_report']:
cr_obj = ClinicalReport.fromJsonDict(clinical_report['clinical_report_data'])
if cr_obj.variants:
for variant in cr_obj.variants:
variant.maternal_zygosity = 'unknown'
variant.paternal_zygosity = 'unknown'
for call in variant.variantCalls:
if call.participantId == self.case.mother["gel_id"]:
variant.maternal_zygosity = call.zygosity
elif call.participantId == self.case.father["gel_id"]:
variant.paternal_zygosity = call.zygosity
variants_to_check.append(variant)
for variant in variants_to_check:
inheritance = self.determine_variant_inheritance(variant)
if inheritance == "de_novo":
self.case.has_de_novo = True
# found a de novo, can stop here
break
clinician = self.case.attribute_managers[Clinician].case_model
if self.case.json['sample_type'] == 'raredisease':
family_id = self.case.json["family_id"]
elif self.case.json['sample_type'] == 'cancer':
family_id = self.case.json["proband"]
family = CaseModel(Family, {
"clinician": clinician.entry,
"gel_family_id": family_id,
"trio_sequenced": self.case.trio_sequenced,
"has_de_novo": self.case.has_de_novo
}, self.model_objects)
return family
def get_phenotypes(self):
"""
Create a list of CaseModels for phenotypes for this case.
"""
if self.case.json['sample_type'] == 'raredisease':
phenotypes = ManyCaseModel(Phenotype, [
{"hpo_terms": phenotype.term,
"description": "unknown"}
for phenotype in self.case.proband.hpoTermList
if phenotype.termPresence == 'yes'
], self.model_objects)
else:
phenotypes = ManyCaseModel(Phenotype, [], self.model_objects)
return phenotypes
def get_family_phenotyes(self):
# TODO
family_phenotypes = ManyCaseModel(FamilyPhenotype, [
{"family": None,
"phenotype": None}
], self.model_objects)
return family_phenotypes
def get_panelapp_api_response(self, panel, panel_file):
panelapp_poll = PollAPI(
"panelapp", f"get_panel/{panel.panelName}/?version={panel.panelVersion}")
with open(panel_file, 'w') as f:
json.dump(panelapp_poll.get_json_response(), f)
panel_app_response = panelapp_poll.get_json_response()
return panel_app_response
def get_panels(self):
"""
Poll panelApp to fetch information about a panel, then create a
ManyCaseModel with this information.
"""
config_dict = load_config.LoadConfig().load()
panelapp_storage = config_dict['panelapp_storage']
if self.case.panels:
for panel in self.case.panels:
polled = self.case.panel_manager.fetch_panel_response(
panelapp_id=panel.panelName,
panel_version=panel.panelVersion
)
if polled:
panel.panelapp_results = polled.results
if not polled:
panel_file = os.path.join(panelapp_storage, f'{panel.panelName}_{panel.panelVersion}.json')
if os.path.isfile(panel_file):
try:
panelapp_response = json.load(open(panel_file))
except:
panelapp_response = self.get_panelapp_api_response(panel, panel_file)
else:
panelapp_response = self.get_panelapp_api_response(panel, panel_file)
# inform the PanelManager that a new panel has been added
polled = self.case.panel_manager.add_panel_response(
panelapp_id=panel.panelName,
panel_version=panel.panelVersion,
panelapp_response=panelapp_response["result"]
)
panel.panelapp_results = polled.results
for panel in self.case.panels:
panel.panel_name_results = self.case.panel_manager.fetch_panel_names(
panelapp_id=panel.panelName
)
panels = ManyCaseModel(Panel, [{
"panelapp_id": panel.panelName,
"panel_name": panel.panel_name_results["SpecificDiseaseName"],
"disease_group": panel.panel_name_results["DiseaseGroup"],
"disease_subgroup": panel.panel_name_results["DiseaseSubGroup"]
} for panel in self.case.panels], self.model_objects)
else:
panels = ManyCaseModel(Panel, [], self.model_objects)
return panels
def get_panel_versions(self):
"""
Add the panel model to description in case.panel then set values
for the ManyCaseModel.
"""
panel_models = [
# get all the panel models from the attribute manager
case_model.entry
for case_model
in self.case.attribute_managers[Panel].case_model.case_models]
if self.case.panels:
for panel in self.case.panels:
# set self.case.panels["model"] to the correct model
for panel_model in panel_models:
if panel.panelName == panel_model.panelapp_id:
panel.model = panel_model
panel_versions = ManyCaseModel(PanelVersion, [{
# create the MCM
"version_number": panel.panelapp_results["version"],
"panel": panel.model
} for panel in self.case.panels], self.model_objects)
else:
panel_versions = ManyCaseModel(PanelVersion, [], self.model_objects)
return panel_versions
def get_genes(self):
"""
Create gene objects from the genes from panelapp.
"""
panels = self.case.panels
# get the list of genes from the panelapp_result
gene_list = []
if panels:
for panel in panels:
genes = panel.panelapp_results["Genes"]
gene_list += genes
for gene in gene_list:
# Alot of pilot cases just have E for this
if not gene["EnsembleGeneIds"] or gene["EnsembleGeneIds"] == 'E':
gene["EnsembleGeneIds"] = None
else:
if type(gene['EnsembleGeneIds']) is str:
gene['EnsembleGeneIds'] = gene['EnsembleGeneIds']
else:
gene['EnsembleGeneIds'] = gene['EnsembleGeneIds'][0]
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
for report_event in variant.reportEvents:
for gene in report_event.genomicEntities:
if gene.type == 'gene':
if gene.ensemblId != "NO_GENE_ASSOCIATED":
gene_list.append({'EnsembleGeneIds': gene.ensemblId,
'GeneSymbol': gene.geneSymbol})
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
for report_event in variant.reportEvents:
for gene in report_event.genomicEntities:
if gene.type == 'gene':
if gene.ensemblId != "NO_GENE_ASSOCIATED":
gene_list.append({'EnsembleGeneIds': gene.ensemblId,
'GeneSymbol': gene.geneSymbol})
self.case.gene_manager.load_genes()
for transcript in self.case.transcripts:
if transcript.gene_ensembl_id and transcript.gene_hgnc_id:
gene_list.append({
'EnsembleGeneIds': transcript.gene_ensembl_id,
'GeneSymbol': transcript.gene_hgnc_name,
'HGNC_ID': str(transcript.gene_hgnc_id),
})
self.case.gene_manager.add_searched(transcript.gene_ensembl_id, str(transcript.gene_hgnc_id))
for gene in tqdm(gene_list, desc=self.case.request_id):
gene['HGNC_ID'] = None
if gene['EnsembleGeneIds']:
polled = self.case.gene_manager.fetch_searched(gene['EnsembleGeneIds'])
if polled == 'Not_found':
gene['HGNC_ID'] = None
elif not polled:
genename_poll = PollAPI(
"genenames", "search/{gene}/".format(
gene=gene["EnsembleGeneIds"])
)
genename_response = genename_poll.get_json_response()
if genename_response['response']['docs']:
hgnc_id = genename_response['response']['docs'][0]['hgnc_id'].split(':')
gene['HGNC_ID'] = str(hgnc_id[1])
self.case.gene_manager.add_searched(gene["EnsembleGeneIds"], str(hgnc_id[1]))
else:
self.case.gene_manager.add_searched(gene["EnsembleGeneIds"], 'Not_found')
else:
gene['HGNC_ID'] = polled
cleaned_gene_list = []
for gene in gene_list:
if gene['HGNC_ID']:
self.case.gene_manager.add_gene(gene)
new_gene = self.case.gene_manager.fetch_gene(gene)
cleaned_gene_list.append(new_gene)
self.case.gene_manager.write_genes()
genes = ManyCaseModel(Gene, [{
"ensembl_id": gene["EnsembleGeneIds"], # TODO: which ID to use?
"hgnc_name": gene["GeneSymbol"],
"hgnc_id": gene['HGNC_ID']
} for gene in cleaned_gene_list if gene["HGNC_ID"]], self.model_objects)
return genes
def get_panel_version_genes(self):
# TODO: implement M2M relationship
panel_version_genes = ManyCaseModel(PanelVersionGenes, [{
"panel_version": None,
"gene": None
}], self.model_objects)
return panel_version_genes
def get_transcripts(self):
"""
Create a ManyCaseModel for transcripts based on information returned
from VEP.
"""
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
genome_assembly = None
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
genes = self.case.attribute_managers[Gene].case_model.case_models
case_transcripts = self.case.transcripts
# for each transcript, add an FK to the gene with matching ensg ID
for transcript in case_transcripts:
# convert canonical to bools:
transcript.canonical = transcript.transcript_canonical == "YES"
if self.case.json['sample_type'] == 'cancer':
transcript.selected = transcript.transcript_canonical == "YES"
if not transcript.gene_hgnc_id:
# if the transcript has no recognised gene associated
continue # don't bother checking genes
transcript.gene_model = None
for gene in genes:
if gene.entry.hgnc_id == transcript.gene_hgnc_id:
transcript.gene_model = gene.entry
if self.case.json['sample_type'] == 'raredisease':
preferred_transcript = PreferredTranscript.objects.filter(gene=gene.entry,
genome_assembly=genome_assembly)
if preferred_transcript:
preferred_transcript = preferred_transcript.first()
if preferred_transcript.transcript.name == transcript.transcript_name:
transcript.selected = True
else:
transcript.selected = transcript.transcript_canonical == "YES"
transcripts = ManyCaseModel(Transcript, [{
"gene": transcript.gene_model,
"name": transcript.transcript_name,
"canonical_transcript": transcript.canonical,
"strand": transcript.transcript_strand,
'genome_assembly': genome_assembly
# add all transcripts except those without associated genes
} for transcript in case_transcripts if transcript.gene_model], self.model_objects)
return transcripts
def get_ir_family(self):
"""
Create a CaseModel for the new IRFamily Model to be added to the
database (unlike before it is impossible that this alreay exists).
"""
family = self.case.attribute_managers[Family].case_model
ir_family = CaseModel(InterpretationReportFamily, {
"participant_family": family.entry,
"cip": self.case.json["cip"],
"ir_family_id": self.case.request_id,
"priority": self.case.json["case_priority"]
}, self.model_objects)
return ir_family
def get_ir_family_panel(self):
"""
Through table linking panels to IRF when no variants have been reported
within a particular panel for a case.
"""
# get the string names of all genes which fall below 95% 15x coverage
genes_failing_coverage = []
for panel in self.case.attribute_managers[PanelVersion].case_model.case_models:
if "entry" in vars(panel):
if self.case.ir_obj.genePanelsCoverage:
panel_coverage = self.case.ir_obj.genePanelsCoverage.get(panel.entry.panel.panelapp_id, {})
for gene, coverage_dict in panel_coverage.items():
if float(coverage_dict["_".join((self.case.proband_sample, "gte15x"))]) < 0.95:
genes_failing_coverage.append(gene)
genes_failing_coverage = sorted(set(genes_failing_coverage))
if 'SUMMARY' in genes_failing_coverage:
genes_failing_coverage.remove('SUMMARY')
str_genes_failing_coverage = ''
for gene in genes_failing_coverage:
str_genes_failing_coverage += gene + ', '
str_genes_failing_coverage = str_genes_failing_coverage[:-2]
str_genes_failing_coverage += '.'
ir_family = self.case.attribute_managers[InterpretationReportFamily].case_model
if self.case.ir_obj.genePanelsCoverage:
ir_family_panels = ManyCaseModel(InterpretationReportFamilyPanel, [{
"ir_family": ir_family.entry,
"panel": panel.entry,
"custom": False,
"average_coverage": self.case.ir_obj.genePanelsCoverage[panel.entry.panel.panelapp_id]["SUMMARY"].get("_".join((self.case.proband_sample, "avg")), None),
"proportion_above_15x": self.case.ir_obj.genePanelsCoverage[panel.entry.panel.panelapp_id]["SUMMARY"].get("_".join((self.case.proband_sample, "gte15x")), None),
"genes_failing_coverage": str_genes_failing_coverage
} for panel in self.case.attribute_managers[PanelVersion].case_model.case_models if "entry" in vars(panel) and
panel.entry.panel.panelapp_id in self.case.ir_obj.genePanelsCoverage and
'SUMMARY' in self.case.ir_obj.genePanelsCoverage[panel.entry.panel.panelapp_id]],
self.model_objects)
else:
ir_family_panels = ManyCaseModel(InterpretationReportFamilyPanel, [], self.model_objects)
return ir_family_panels
def get_ir(self):
"""
Get json information about an Interpretation Report and create a
CaseModel from it.
"""
case_attribute_managers = self.case.attribute_managers
irf_manager = case_attribute_managers[InterpretationReportFamily]
ir_family = irf_manager.case_model
# find assembly
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
genome_assembly = None
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
# find the max tier
self.case.processed_variants = self.process_proband_variants()
self.case.max_tier = 3
for variant in self.case.processed_variants:
if variant['max_tier']:
if variant['max_tier'] < self.case.max_tier:
self.case.max_tier = variant['max_tier']
tumour_content = None
if self.case.json['sample_type'] == 'cancer':
tumour_content = self.case.proband.tumourSamples[0].tumourContent
has_germline_variant = False
if self.case.json['sample_type'] == 'cancer':
for ig_obj in self.case.ig_objs:
if ig_obj.variants:
alleleorigins = [variant.variantAttributes.alleleOrigins[0] for variant in ig_obj.variants]
if "germline_variant" in alleleorigins:
has_germline_variant = True
ir = CaseModel(GELInterpretationReport, {
"ir_family": ir_family.entry,
"polled_at_datetime": timezone.now(),
"sha_hash": self.case.json_hash,
"status": self.case.status["status"],
"updated": timezone.make_aware(
datetime.strptime(
self.case.status["created_at"][:19],
'%Y-%m-%dT%H:%M:%S'
)),
"user": self.case.status["user"],
"max_tier": self.case.max_tier,
"assembly": genome_assembly,
'sample_type': self.case.json['sample_type'],
"sample_id": self.case.proband_sample,
'tumour_content': tumour_content,
"has_germline_variant": has_germline_variant,
"case_status": 'N', # initialised to not started? (N)
}, self.model_objects)
return ir
def get_variants(self):
"""
Get the variant information (genetic position) for the variants in this
case and return a matching ManyCaseModel with model_type = Variant.
"""
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
genome_assembly = None
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
variants_list = []
# loop through all variants and check that they have a case_variant
for ig_obj in self.case.ig_objs:
if ig_obj.variants:
for variant in ig_obj.variants:
if variant.case_variant:
variant.dbsnp = None
if variant.variantAttributes.variantIdentifiers.dbSnpId:
if re.match('rs\d+', variant.variantAttributes.variantIdentifiers.dbSnpId):
variant.dbsnp = variant.variantAttributes.variantIdentifiers.dbSnpId
tiered_variant = {
"genome_assembly": genome_assembly,
"alternate": variant.case_variant.alt,
"chromosome": variant.case_variant.chromosome,
"db_snp_id": variant.dbsnp,
"reference": variant.case_variant.ref,
"position": variant.case_variant.position,
}
variants_list.append(tiered_variant)
for clinical_report in self.case.clinical_report_objs:
if clinical_report.variants:
for variant in clinical_report.variants:
variant.dbsnp = None
if variant.variantAttributes.variantIdentifiers.dbSnpId:
if re.match('rs\d+', variant.variantAttributes.variantIdentifiers.dbSnpId):
variant.dbsnp = variant.variantAttributes.variantIdentifiers.dbSnpId
cip_variant = {
"genome_assembly": genome_assembly,
"alternate": variant.case_variant.alt,
"chromosome": variant.case_variant.chromosome,
"db_snp_id": variant.dbsnp,
"reference": variant.case_variant.ref,
"position": variant.case_variant.position,
}
variants_list.append(cip_variant)
for variant in variants_list:
self.case.variant_manager.add_variant(variant)
cleaned_variant_list = []
for variant in variants_list:
cleaned_variant_list.append(self.case.variant_manager.fetch_variant(variant))
# set and return the MCM
variants = ManyCaseModel(Variant, [{
"genome_assembly": genome_assembly,
"alternate": variant["alternate"],
"chromosome": variant["chromosome"],
"db_snp_id": variant["db_snp_id"],
"reference": variant["reference"],
"position": variant["position"],
} for variant in cleaned_variant_list], self.model_objects)
return variants
def get_transcript_variants(self):
"""
Get all variant transcripts. This is essentialy a 'through' table for
the M2M relationship between Variant and Transcript, but with extra
information.
"""
# get all Transcript and Variant entries
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
genome_assembly = None
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
case_attribute_managers = self.case.attribute_managers
transcript_manager = case_attribute_managers[Transcript].case_model
transcript_entries = [transcript.entry
for transcript in transcript_manager.case_models]
variant_manager = case_attribute_managers[Variant].case_model
variant_entries = [variant.entry
for variant in variant_manager.case_models]
# for each CaseTranscript (which contains necessary info):
for case_transcript in self.case.transcripts:
# get information to hook up transcripts with variants
case_id = case_transcript.case_id
variant_id = case_transcript.variant_count
for variant in self.case.variants:
if (
case_id == variant.case_id and
variant_id == variant.variant_count
):
case_variant = variant
break
# name to hook up CaseTranscript with Transcript
transcript_name = case_transcript.transcript_name
# add the corresponding Variant entry
for variant_entry in variant_entries:
# find the matching variant entry
if (
variant_entry.chromosome == case_variant.chromosome and
variant_entry.position == case_variant.position and
variant_entry.reference == case_variant.ref and
variant_entry.alternate == case_variant.alt
):
# add match to the case_transcript
case_transcript.variant_entry = variant_entry
# add the corresponding Transcript entry
for transcript_entry in transcript_entries:
found = False
if transcript_entry.name == transcript_name and transcript_entry.genome_assembly == genome_assembly:
case_transcript.transcript_entry = transcript_entry
found = True
break
if not found:
# we don't make entries for tx with no Gene
case_transcript.transcript_entry = None
# use the updated CaseTranscript instances to create an MCM
transcript_variants = ManyCaseModel(TranscriptVariant, [{
"transcript": transcript.transcript_entry,
"variant": transcript.variant_entry,
"af_max": transcript.transcript_variant_af_max,
"hgvs_c": transcript.transcript_variant_hgvs_c,
"hgvs_p": transcript.transcript_variant_hgvs_p,
"hgvs_g": transcript.transcript_variant_hgvs_g,
"sift": transcript.variant_sift,
"polyphen": transcript.variant_polyphen,
} for transcript in self.case.transcripts
if transcript.transcript_entry], self.model_objects)
return transcript_variants
def process_proband_variants(self):
"""
Take all proband variants from this case and process them for
get_proband_variants.
"""
proband_manager = self.case.attribute_managers[Proband]
variant_entries = [
variant.entry
for variant
in self.case.attribute_managers[Variant].case_model.case_models
]
raw_proband_variants = []
processed_proband_variants = []
for ig_obj in self.case.ig_objs:
if ig_obj.variants:
for ig_variant in ig_obj.variants:
if ig_variant.case_variant:
raw_proband_variants.append(ig_variant)
for clinical_report in self.case.clinical_report_objs:
if clinical_report.variants:
for variant in clinical_report.variants:
raw_proband_variants.append(variant)
for ig_variant in raw_proband_variants:
# some json_variants won't have an entry (T3), so:
ig_variant.somatic = False
ig_variant.variant_entry = None
# for those that do, fetch from list of entries:
# variant in json matches variant entry
for variant in variant_entries:
if (
ig_variant.variantCoordinates.chromosome == variant.chromosome and
ig_variant.variantCoordinates.position == variant.position and
ig_variant.variantCoordinates.reference == variant.reference and
ig_variant.variantCoordinates.alternate == variant.alternate
):
# variant in json matches variant entry
ig_variant.variant_entry = variant
ig_variant.zygosity = 'unknown'
ig_variant.maternal_zygosity = 'unknown'
ig_variant.paternal_zygosity = 'unknown'
for call in ig_variant.variantCalls:
if call.participantId == proband_manager.case_model.entry.gel_id:
if call.zygosity != 'na':
ig_variant.zygosity = call.zygosity
elif call.participantId == self.case.mother.get("gel_id", None):
if call.zygosity != 'na':
ig_variant.maternal_zygosity = call.zygosity
elif call.participantId == self.case.father.get("gel_id", None):
if call.zygosity != 'na':
ig_variant.paternal_zygosity = call.zygosity
if call:
if ig_variant.variantAttributes.alleleOrigins[0] == 'somatic_variant':
ig_variant.somatic = True
for ig_variant in raw_proband_variants:
proband_variant = {
"max_tier": ig_variant.max_tier,
"variant": ig_variant.variant_entry,
"zygosity": ig_variant.zygosity,
"maternal_zygosity": ig_variant.maternal_zygosity,
"paternal_zygosity": ig_variant.paternal_zygosity,
"somatic": ig_variant.somatic,
'variant_obj': ig_variant
}
processed_proband_variants.append(proband_variant)
# remove duplicate variants
uniq_proband_variants = []
seen_variants = []
for variant in processed_proband_variants:
if variant['variant'] not in seen_variants:
uniq_proband_variants.append(variant)
seen_variants.append(variant['variant'])
return uniq_proband_variants
def get_proband_variants(self):
"""
Get proband variant information from VEP and the JSON and create MCM.
"""
ir_manager = self.case.attribute_managers[GELInterpretationReport]
tiered_and_cip_proband_variants = self.case.processed_variants
proband_variants = ManyCaseModel(ProbandVariant, [{
"interpretation_report": ir_manager.case_model.entry,
"max_tier": variant['max_tier'],
"variant": variant["variant"],
"zygosity": variant["zygosity"],
"maternal_zygosity": variant['maternal_zygosity'],
"paternal_zygosity": variant['paternal_zygosity'],
"inheritance": self.determine_variant_inheritance(variant['variant_obj']),
"somatic": variant["somatic"]
} for variant in tiered_and_cip_proband_variants], self.model_objects)
return proband_variants
def determine_variant_inheritance(self, variant):
"""
Take a variant, and use maternal and paternal zygosities to determine
inheritance.
"""
if variant.maternal_zygosity == 'reference_homozygous' and variant.paternal_zygosity == 'reference_homozygous':
# neither parent has variant, --/-- cross so must be de novo
inheritance = 'de_novo'
elif "heterozygous" in variant.maternal_zygosity or "heterozygous" in variant.paternal_zygosity:
# catch +-/?? cross
inheritance = 'inherited'
elif "alternate" in variant.maternal_zygosity or "alternate" in variant.paternal_zygosity:
# catch ++/?? cross
inheritance = 'inherited'
else:
# cannot determine
inheritance = 'unknown'
# print("Proband I:", inheritance)
return inheritance
def get_pv_flags(self):
proband_variants = [proband_variant.entry for proband_variant
in self.case.attribute_managers[ProbandVariant].case_model.case_models]
pv_flags = []
for interpreted_genome in self.case.ig_objs:
if interpreted_genome.variants:
for variant in interpreted_genome.variants:
if variant.case_variant:
for proband_variant in proband_variants:
if proband_variant.variant == variant.variant_entry:
variant.proband_variant = proband_variant
variant.company = interpreted_genome.interpretationService
pv_flags.append(variant)
break
for clinical_report in self.case.clinical_report_objs:
if clinical_report.variants:
for variant in clinical_report.variants:
for proband_variant in proband_variants:
if proband_variant.variant == variant.variant_entry:
variant.proband_variant = proband_variant
variant.company = 'Clinical Report'
pv_flags.append(variant)
break
pv_flags = ManyCaseModel(PVFlag, [{
"proband_variant": variant.proband_variant,
'flag_name': variant.company
} for variant in pv_flags], self.model_objects)
return pv_flags
def get_proband_transcript_variants(self):
"""
Get the ProbandTranscriptVariants associated with this case and return
a MCM containing them.
"""
# associat a proband_variant with a transcript
proband_variants = [proband_variant.entry for proband_variant
in self.case.attribute_managers[ProbandVariant].case_model.case_models]
for transcript in self.case.transcripts:
for proband_variant in proband_variants:
if proband_variant.variant == transcript.variant_entry:
transcript.proband_variant_entry = proband_variant
if self.case.json['sample_type'] == 'cancer':
for transcript in self.case.transcripts:
if transcript.transcript_entry:
for ig_obj in self.case.ig_objs:
if ig_obj.variants:
for variant in ig_obj.variants:
if variant.case_variant:
if variant.variant_entry == transcript.variant_entry:
for reportevent in variant.reportEvents:
for en in reportevent.genomicEntities:
if transcript.transcript_name == en.ensemblId:
transcript.selected = True
else:
transcript.selected = False
proband_transcript_variants = ManyCaseModel(ProbandTranscriptVariant, [{
"transcript": transcript.transcript_entry,
"proband_variant": transcript.proband_variant_entry,
# default is true if assoc. tx is canonical
"selected": transcript.selected,
"effect": transcript.proband_transcript_variant_effect
} for transcript
in self.case.transcripts
if transcript.transcript_entry
and transcript.proband_variant_entry], self.model_objects)
return proband_transcript_variants
def get_tool_and_assembly_versions(self):
'''
Create tool and assembly entries for the case
'''
tools_and_assemblies = ManyCaseModel(ToolOrAssemblyVersion, [{
"tool_name": tool,
"version_number": version
}for tool, version in self.case.tools_and_versions.items()], self.model_objects)
return tools_and_assemblies
def get_sv_regions(self):
'''
Loop over all CNVs and lump in all the regions
:return:
'''
sv_region_list = []
genome_assembly = None
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
tiered_variant = {
"genome_assembly": genome_assembly,
"chromosome": variant.case_variant.chromosome,
"sv_start": variant.case_variant.sv_start,
"sv_end": variant.case_variant.sv_end,
}
sv_region_list.append(tiered_variant)
sv_regions = ManyCaseModel(SVRegion, sv_region_list, self.model_objects)
return sv_regions
def get_svs(self):
'''
Loop again over the SVs and match up the SVRegions,
Then loop again and add them to a MVM list
:return:
'''
sv_region_entries = [sv_region.entry for sv_region in
self.case.attribute_managers[SVRegion].case_model.case_models]
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
for sv_region_entry in sv_region_entries:
if (variant.case_variant.chromosome == sv_region_entry.chromosome and
int(variant.case_variant.sv_start) == sv_region_entry.sv_start and
int(variant.case_variant.sv_end) == sv_region_entry.sv_end):
variant.sv_region1_entry = sv_region_entry
variant.sv_region2_entry = None
break # Stop, found what you need
sv_list = []
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
if variant.sv_region1_entry:
sv_list.append({
'sv_region1': variant.sv_region1_entry,
'sv_region2': variant.sv_region2_entry,
'variant_type': variant.variantType
})
svs = ManyCaseModel(SV, sv_list, self.model_objects)
return svs
def get_proband_svs(self):
'''
Loop over CNVs to get SV entry, then loop again to get SampleSVs
:return:
'''
sv_entries = [sv.entry for sv in
self.case.attribute_managers[SV].case_model.case_models]
ir_manager = self.case.attribute_managers[GELInterpretationReport]
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
variant.cnv_af = None
variant.cnv_auc = None
for sv_entry in sv_entries:
if (variant.sv_region1_entry == sv_entry.sv_region1 and
variant.sv_region2_entry == sv_entry.sv_region2 and
variant.variantType == sv_entry.variant_type):
if variant.variantAttributes.alleleFrequencies:
for allele_freq in variant.variantAttributes.alleleFrequencies:
if allele_freq.population == 'CNV_AF':
variant.cnv_af = allele_freq.alternateFrequency
elif allele_freq.population == 'CNV_AUC':
variant.cnv_auc = allele_freq.alternateFrequency
variant.sv_entry = sv_entry
break
proband_sv_list = []
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
if variant.sv_entry:
proband_sv_list.append({
'interpretation_report': ir_manager.case_model.entry,
'sv': variant.sv_entry,
'max_tier': variant.max_tier,
'cnv_af': variant.cnv_af,
'cnv_auc': variant.cnv_auc
})
sample_svs = ManyCaseModel(ProbandSV, proband_sv_list, self.model_objects)
return sample_svs
def get_proband_sv_genes(self):
gene_entries = [gene.entry for gene in
self.case.attribute_managers[Gene].case_model.case_models]
proband_sv_entries = [gene.entry for gene in
self.case.attribute_managers[ProbandSV].case_model.case_models]
ir_manager = self.case.attribute_managers[GELInterpretationReport]
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
for proband_sv_entry in proband_sv_entries:
if (variant.sv_entry == proband_sv_entry.sv and
ir_manager.case_model.entry == proband_sv_entry.interpretation_report):
variant.proband_sv_entry = proband_sv_entry
break
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
variant.genes = {}
for report_event in variant.reportEvents:
interesting_gene = False
if report_event.tier == 'TIERA':
interesting_gene = True
for gene in report_event.genomicEntities:
if gene.type == 'gene':
gene.gene_entry = None
for gene_entry in gene_entries:
if gene_entry.ensembl_id == gene.ensemblId:
if gene_entry not in variant.genes:
variant.genes[gene_entry] = interesting_gene
if interesting_gene:
variant.genes[gene_entry] = interesting_gene
break
proband_sv_gene_list = []
for ig_obj in self.case.ig_objs:
if ig_obj.structuralVariants:
for variant in ig_obj.structuralVariants:
if variant.case_variant:
for gene_key in variant.genes:
proband_sv_gene_list.append({
'gene': gene_key,
'proband_sv': variant.proband_sv_entry,
'selected': variant.genes[gene_key]
})
proband_sv_genes = ManyCaseModel(ProbandSVGene, proband_sv_gene_list, self.model_objects)
return proband_sv_genes
def get_str_variants(self):
'''
Loop over all STRs and lump in all the STR Variants
:return:
'''
genome_assembly = None
str_variant_list = []
tool_models = [
case_model.entry
for case_model in self.case.attribute_managers[ToolOrAssemblyVersion].case_model.case_models]
for tool in tool_models:
if tool.tool_name == 'genome_build':
genome_assembly = tool
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
tiered_variant = {
"genome_assembly": genome_assembly,
"chromosome": variant.case_variant.chromosome,
"str_start": variant.case_variant.str_start,
"str_end": variant.case_variant.str_end,
"repeated_sequence" : variant.case_variant.repeated_sequence,
"normal_threshold": variant.case_variant.normal_threshold,
"pathogenic_threshold": variant.case_variant.pathogenic_threshold,
}
str_variant_list.append(tiered_variant)
str_variants = ManyCaseModel(STRVariant, str_variant_list, self.model_objects)
return str_variants
def get_proband_strs(self):
'''
Loop over STRs to get ProbandSTRs
:return:
'''
str_variant_entries = [str_variant.entry for str_variant in
self.case.attribute_managers[STRVariant].case_model.case_models]
ir_manager = self.case.attribute_managers[GELInterpretationReport]
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
for str_variant_entry in str_variant_entries:
if (variant.case_variant.chromosome == str_variant_entry.chromosome and
int(variant.case_variant.str_start) == str_variant_entry.str_start and
int(variant.case_variant.str_end) == str_variant_entry.str_end and
variant.case_variant.repeated_sequence == str_variant_entry.repeated_sequence and
int(variant.case_variant.normal_threshold) == str_variant_entry.normal_threshold and
int(variant.case_variant.pathogenic_threshold ) == str_variant_entry.pathogenic_threshold):
variant.str_variant_entry = str_variant_entry
break
proband_str_list = []
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
if variant.str_variant_entry:
proband_str_list.append({
'interpretation_report': ir_manager.case_model.entry,
'str_variant': variant.str_variant_entry,
'max_tier': variant.max_tier,
'proband_copies_a': variant.proband_copies_a,
'proband_copies_b': variant.proband_copies_b,
'maternal_copies_a': variant.maternal_copies_a,
'maternal_copies_b': variant.maternal_copies_b,
'paternal_copies_a': variant.paternal_copies_a,
'paternal_copies_b': variant.paternal_copies_b,
'mode_of_inheritance': variant.mode_of_inheritance,
'segregation_pattern': variant.segregation_pattern
})
proband_strs = ManyCaseModel(ProbandSTR, proband_str_list, self.model_objects)
return proband_strs
def get_proband_str_genes(self):
gene_entries = [gene.entry for gene in
self.case.attribute_managers[Gene].case_model.case_models]
proband_str_entries = [proband_str.entry for proband_str in
self.case.attribute_managers[ProbandSTR].case_model.case_models]
ir_manager = self.case.attribute_managers[GELInterpretationReport]
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
for proband_str_entry in proband_str_entries:
if (variant.str_variant_entry == proband_str_entry.str_variant and
ir_manager.case_model.entry == proband_str_entry.interpretation_report):
variant.proband_str_entry = proband_str_entry
break
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
variant.genes = {}
for report_event in variant.reportEvents:
for gene in report_event.genomicEntities:
if gene.type == 'gene':
gene.gene_entry = None
for gene_entry in gene_entries:
if gene_entry.ensembl_id == gene.ensemblId:
if gene_entry not in variant.genes:
variant.genes[gene_entry] = True
break
proband_str_gene_list = []
for ig_obj in self.case.ig_objs:
if ig_obj.shortTandemRepeats:
for variant in ig_obj.shortTandemRepeats:
if variant.case_variant:
for gene_key in variant.genes:
proband_str_gene_list.append({
'gene': gene_key,
'proband_str': variant.proband_str_entry,
'selected': variant.genes[gene_key]
})
proband_str_genes = ManyCaseModel(ProbandSTRGene, proband_str_gene_list, self.model_objects)
return proband_str_genes
class CaseModel(object):
"""
A handler for an instance of a model that belongs to a case. Holds an
instance of a model (pre-creation or post-creation) and whether it
requires creation in the database.
"""
def __init__(self, model_type, model_attributes, model_objects):
self.model_type = model_type
self.model_attributes = model_attributes.copy()
self.escaped_model_attributes = model_attributes.copy()
self.string_escape_model_attributes() # prevent sql injection
self.model_objects = model_objects
self.entry = self.check_found_in_db(self.model_objects)
def string_escape_model_attributes(self):
# string escape ' characters in model attributes for use with raw sql
for k, v in self.escaped_model_attributes.items():
if isinstance(self.escaped_model_attributes[k], str):
self.escaped_model_attributes[k] = self.escaped_model_attributes[k].replace(
"'", "''" # psql takes '' when ' is used in string to avoid term
)
def set_sql_cmd(self):
# set raw sql to run against database to fetch matching record
if self.model_type == Clinician:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Clinician'
cmd = ''.join([
" WHERE BINARY name = '{name}'",
" AND BINARY hospital = '{hospital}'",
" AND BINARY email = '{email}'"
]).format(
name=self.escaped_model_attributes["name"],
hospital=self.escaped_model_attributes["hospital"],
email=self.escaped_model_attributes["email"]
)
else:
table = 'SELECT * FROM "Clinician"'
cmd = ''.join([
" WHERE name = '{name}'",
" AND hospital = '{hospital}'",
" AND email = '{email}'"
]).format(
name=self.escaped_model_attributes["name"],
hospital=self.escaped_model_attributes["hospital"],
email=self.escaped_model_attributes["email"]
)
elif self.model_type == Proband:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Proband'
else:
table = 'SELECT * FROM "Proband"'
cmd = ''.join([
" WHERE gel_id = '{gel_id}'"
]).format(
gel_id=self.escaped_model_attributes["gel_id"]
)
elif self.model_type == Family:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Family'
else:
table = 'SELECT * FROM "Family"'
cmd = ''.join([
" WHERE gel_family_id = '{gel_family_id}'"
]).format(
gel_family_id=self.escaped_model_attributes["gel_family_id"]
)
elif self.model_type == Relative:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Relative'
else:
table = 'SELECT * FROM "Relative"'
cmd = ''.join([
" WHERE gel_id = '{gel_id}'",
" AND proband_id = {proband_id}"
]).format(
gel_id=self.escaped_model_attributes["gel_id"],
proband_id=self.escaped_model_attributes["proband"].id
)
elif self.model_type == Phenotype:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Phenotype'
else:
table = 'SELECT * FROM "Phenotype"'
cmd = ''.join([
" WHERE hpo_terms = '{hpo_terms}'"
]).format(
hpo_terms=self.escaped_model_attributes["hpo_terms"]
)
elif self.model_type == InterpretationReportFamily:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM InterpretationReportFamily'
else:
table = 'SELECT * FROM "InterpretationReportFamily"'
cmd = ''.join([
" WHERE ir_family_id = '{ir_family_id}'",
]).format(
ir_family_id=self.escaped_model_attributes["ir_family_id"]
)
elif self.model_type == Panel:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Panel'
else:
table = 'SELECT * FROM "Panel"'
cmd = ''.join([
" WHERE panelapp_id = '{panelapp_id}'",
]).format(
panelapp_id=self.escaped_model_attributes["panelapp_id"]
)
elif self.model_type == PanelVersion:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM PanelVersion'
else:
table = 'SELECT * FROM "PanelVersion"'
cmd = ''.join([
" WHERE panel_id = '{panel_id}'",
" AND version_number = '{version_number}'"
]).format(
panel_id=self.escaped_model_attributes["panel"].id,
version_number=self.escaped_model_attributes["version_number"]
)
elif self.model_type == InterpretationReportFamilyPanel:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM gel2mdt_interpretationreportfamilypanel'
else:
table = 'SELECT * FROM "gel2mdt_interpretationreportfamilypanel"'
cmd = ''.join([
" WHERE ir_family_id = {ir_family_id}", # no ' because FKID
" AND panel_id = {panel_id}"
]).format(
ir_family_id=self.escaped_model_attributes["ir_family"].id,
panel_id=self.escaped_model_attributes["panel"].id
)
elif self.model_type == Gene:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Gene'
else:
table = 'SELECT * FROM "Gene"'
cmd = ''.join([
" WHERE hgnc_id = '{hgnc_id}'",
]).format(
hgnc_id=self.escaped_model_attributes["hgnc_id"]
)
elif self.model_type == Transcript:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Transcript'
else:
table = 'SELECT * FROM "Transcript"'
cmd = ''.join([
" WHERE name = '{name}'",
" AND genome_assembly_id = {genome_assembly_id}"
]).format(
name=self.escaped_model_attributes["name"],
genome_assembly_id=self.escaped_model_attributes["genome_assembly"].id
)
elif self.model_type == GELInterpretationReport:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM GELInterpretationReport'
else:
table = 'SELECT * FROM "GELInterpretationReport"'
cmd = ''.join([
" WHERE sha_hash = '{sha_hash}'",
]).format(
sha_hash=self.escaped_model_attributes["sha_hash"]
)
elif self.model_type == Variant:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM Variant'
else:
table = 'SELECT * FROM "Variant"'
cmd = ''.join([
" WHERE chromosome = '{chromosome}'",
" AND position = {position}"
" AND reference = '{reference}'"
" AND alternate = '{alternate}'"
" AND genome_assembly_id = {genome_assembly_id}"
]).format(
chromosome=self.escaped_model_attributes["chromosome"],
position=self.escaped_model_attributes["position"],
reference=self.escaped_model_attributes["reference"],
alternate=self.escaped_model_attributes["alternate"],
genome_assembly_id=self.escaped_model_attributes["genome_assembly"].id
)
elif self.model_type == TranscriptVariant:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM TranscriptVariant'
else:
table = 'SELECT * FROM "TranscriptVariant"'
cmd = ''.join([
" WHERE transcript_id = {transcript_id}",
" AND variant_id = {variant_id}"
]).format(
transcript_id=self.escaped_model_attributes["transcript"].id,
variant_id=self.escaped_model_attributes["variant"].id
)
elif self.model_type == ProbandVariant:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandVariant'
else:
table = 'SELECT * FROM "ProbandVariant"'
cmd = ''.join([
" WHERE variant_id = {variant_id}",
" AND interpretation_report_id = {interpretation_report_id}"
]).format(
variant_id=self.escaped_model_attributes["variant"].id,
interpretation_report_id=self.escaped_model_attributes["interpretation_report"].id
)
elif self.model_type == PVFlag:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM PVFlag'
else:
table = 'SELECT * FROM "PVFlag"'
cmd = ''.join([
" WHERE proband_variant_id = {proband_variant_id}",
" AND flag_name = '{flag_name}'"
]).format(
proband_variant_id=self.escaped_model_attributes["proband_variant"].id,
flag_name=self.escaped_model_attributes["flag_name"]
)
elif self.model_type == ProbandTranscriptVariant:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandTranscriptVariant'
else:
table = 'SELECT * FROM "ProbandTranscriptVariant"'
cmd = ''.join([
" WHERE transcript_id = {transcript_id}",
" AND proband_variant_id = {proband_variant_id}"
]).format(
transcript_id=self.escaped_model_attributes["transcript"].id,
proband_variant_id=self.escaped_model_attributes["proband_variant"].id
)
elif self.model_type == ReportEvent:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ReportEvent'
else:
table = 'SELECT * FROM "ReportEvent"'
cmd = ''.join([
" WHERE re_id = '{re_id}'",
" AND proband_variant_id = {proband_variant_id}"
]).format(
re_id=self.escaped_model_attributes["re_id"],
proband_variant_id=self.escaped_model_attributes["proband_variant"].id
)
elif self.model_type == ToolOrAssemblyVersion:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ToolOrAssemblyVersion'
else:
table = 'SELECT * FROM "ToolOrAssemblyVersion"'
cmd = ''.join([
" WHERE tool_name = '{tool_name}'",
" AND version_number = '{version_number}'"
]).format(
tool_name=self.escaped_model_attributes["tool_name"],
version_number=self.escaped_model_attributes["version_number"]
)
elif self.model_type == SVRegion:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM SVRegion'
else:
table = 'SELECT * FROM "SVRegion"'
cmd = ''.join([
f" WHERE chromosome = '{self.escaped_model_attributes['chromosome']}'",
f" AND sv_start = {self.escaped_model_attributes['sv_start']}"
f" AND sv_end = '{self.escaped_model_attributes['sv_end']}'"
f" AND genome_assembly_id = {self.escaped_model_attributes['genome_assembly'].id}"])
elif self.model_type == SV:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM SV'
else:
table = 'SELECT * FROM "SV"'
if self.escaped_model_attributes['sv_region2'] is None:
cmd = ''.join([
f" WHERE sv_region1_id = {self.escaped_model_attributes['sv_region1'].id}",
f" AND variant_type = '{self.escaped_model_attributes['variant_type']}'"
])
else:
cmd = ''.join([
f" WHERE sv_region1_id = {self.escaped_model_attributes['sv_region1'].id}",
f" WHERE sv_region2_id = {self.escaped_model_attributes['sv_region2'].id}",
f" AND variant_type = '{self.escaped_model_attributes['variant_type']}'"
])
elif self.model_type == ProbandSV:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandSV'
else:
table = 'SELECT * FROM "ProbandSV"'
cmd = ''.join([
f" WHERE interpretation_report_id = {self.escaped_model_attributes['interpretation_report'].id}",
f" AND sv_id = {self.escaped_model_attributes['sv'].id}",
])
elif self.model_type == ProbandSVGene:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandSVGene'
else:
table = 'SELECT * FROM "ProbandSVGene"'
cmd = ''.join([
f" WHERE proband_sv_id = {self.escaped_model_attributes['proband_sv'].id}",
f" AND gene_id = '{self.escaped_model_attributes['gene'].id}'"
])
elif self.model_type == STRVariant:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM STRVariant'
else:
table = 'SELECT * FROM "STRVariant"'
cmd = ''.join([
f" WHERE chromosome = '{self.escaped_model_attributes['chromosome']}'",
f" AND str_start = '{self.escaped_model_attributes['str_start']}'"
f" AND str_end = '{self.escaped_model_attributes['str_end']}'"
f" AND repeated_sequence = '{self.escaped_model_attributes['repeated_sequence']}'"
f" AND normal_threshold = '{self.escaped_model_attributes['normal_threshold']}'"
f" AND pathogenic_threshold = '{self.escaped_model_attributes['pathogenic_threshold']}'"
f" AND genome_assembly_id = {self.escaped_model_attributes['genome_assembly'].id}"])
elif self.model_type == ProbandSTR:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandSTR'
else:
table = 'SELECT * FROM "ProbandSTR"'
cmd = ''.join([
f" WHERE interpretation_report_id = {self.escaped_model_attributes['interpretation_report'].id}",
f" AND str_variant_id = {self.escaped_model_attributes['str_variant'].id}",
])
elif self.model_type == ProbandSTRGene:
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.mysql':
table = 'SELECT * FROM ProbandSTRGene'
else:
table = 'SELECT * FROM "ProbandSTRGene"'
cmd = ''.join([
f" WHERE proband_str_id = {self.escaped_model_attributes['proband_str'].id}",
f" AND gene_id = '{self.escaped_model_attributes['gene'].id}'"
])
sql = ''.join([
table,
cmd
])
return sql
def check_found_in_db(self, queryset):
"""
Queries the database for a model of the given type with the given
attributes. Returns True if found, False if not.
"""
sql_cmd = self.set_sql_cmd()
entry = [
db_obj for db_obj in self.model_type.objects.raw(sql_cmd)
]
# tqdm.write(sql_cmd + "\t>>>\t" + str(entry))
if len(entry) == 1:
entry = entry[0]
# also need to set self.entry here as it may not be called in init
self.entry = entry
elif len(entry) == 0:
entry = False
self.entry = entry
else:
print(entry)
raise ValueError("Multiple entries found for same object.")
return entry
class ManyCaseModel(object):
"""
Class to deal with situations where we need to extend on CaseModel to allow
for ManyToMany field population, as the bulk update must be handled using
'through' tables.
"""
def __init__(self, model_type, model_attributes_list, model_objects):
self.model_type = model_type
self.model_attributes_list = model_attributes_list
self.model_objects = model_objects
self.case_models = [
CaseModel(model_type, model_attributes, model_objects)
for model_attributes in model_attributes_list
]
self.entries = self.get_entry_list()
def get_entry_list(self):
entries = []
for case_model in self.case_models:
entries.append(case_model.entry)
return entries
|
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
def run_forever_like_Forrest():
moment=time.strftime("%Y-%b-%d__%H_%M_%S",time.localtime())
f = open('output '+moment+'.csv', 'w')
headers="LATER OI (CALL),CHANGE IN OI (CALL),LTP (CALL),NET CHANGE (CALL),STRIKE PRICE,NET CHANGE (PUT),LTP (PUT),CHANGE IN OI (PUT),LATER OI (PUT),\n"
f.write(headers)
chrome_path = r"D:\Work\Yash\Webscraper 2.0\chromedriver.exe"
driver = webdriver.Chrome(chrome_path)
driver.get("https://www.nseindia.com/live_market/dynaContent/live_watch/option_chain/optionKeys.jsp")
t_id = driver.find_element(By.ID, 'octable')
tbody = t_id.find_element_by_tag_name('tbody')
rows = tbody.find_elements(By.TAG_NAME, "tr")
no_of_rows = len(rows)
x = 1
for row in rows:
if x !=no_of_rows:
col1 = row.find_elements(By.TAG_NAME, "td")[1]
col2 = row.find_elements(By.TAG_NAME, "td")[2]
col3 = row.find_elements(By.TAG_NAME, "td")[5]
col4 = row.find_elements(By.TAG_NAME, "td")[6]
col5 = row.find_elements(By.TAG_NAME, "td")[11]
col6 = row.find_elements(By.TAG_NAME, "td")[16]
col7 = row.find_elements(By.TAG_NAME, "td")[17]
col8 = row.find_elements(By.TAG_NAME, "td")[20]
col9 = row.find_elements(By.TAG_NAME, "td")[21]
f.write(col1.text.replace(",","")+","+col2.text.replace(",","")+","+col3.text.replace(",","")+","+col4.text.replace(",","")+","+col5.text.replace(",","")+","+col6.text.replace(",","")+","+col7.text.replace(",","")+","+col8.text.replace(",","")+","+col9.text.replace(",","")+","+"\n")
x+=1
f.close()
time.sleep(900)
while True:
run_forever_like_Forrest()
|
import argparse
import json
import math
import os
import shutil
import socket
from copy import deepcopy
from datetime import datetime
from pprint import pformat
from typing import Callable, Dict, Generator
import numpy
import psutil
import tensorflow as tf
import yaml
from tensorflow.python.keras.callbacks import LambdaCallback, ModelCheckpoint
from fabnn.callbacks import TensorBoard
from fabnn.data_storage import Data, Data3D, DataPlanar
from fabnn.data_storage.data import MODE_TRAIN, make_batch, make_batch_swap_axes
from fabnn.dataset_utils import classify_dataset_class
from fabnn.models import models_collection
from fabnn.utils import (
ensure_dir,
get_git_revision_short_hash,
human_size,
setup_console_logger,
update_timings_dict,
)
if "CUDA_VISIBLE_DEVICES" not in os.environ:
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# config = tf.compat.v1.ConfigProto()
# config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
# sess = tf.compat.v1.Session(config=config)
# tf.compat.v1.keras.backend.set_session(sess) # set this TensorFlow session as the default session for Keras # noqa
TIMINGS_TRAIN = dict()
TIMINGS_VALIDATE = dict()
logger = setup_console_logger(__name__)
def make_batches_gen(
data: Data,
data_name: str,
batch_size: int,
batch_build_length: int = None,
initial_data_batch_start_index: int = 0,
initial_data_shuffle_random_seed: int = 0,
make_batch_function: Callable = make_batch,
reshuffle: bool = False,
) -> Generator:
data_size = len(data)
i = initial_data_batch_start_index
k = initial_data_shuffle_random_seed
if k != 0:
data.generate_patches(seed_=k)
if batch_build_length is None:
batch_build_length = batch_size
else:
batch_build_length = max(batch_build_length, batch_size)
batch_build_length = int(round(batch_build_length / batch_size) * batch_size)
while True:
batches = make_batch_function(data, i, batch_size, batch_build_length)
for batch in batches:
yield batch
i += batch_build_length
if i >= data_size:
i = 0
k += 1
logger.info("{}: Full round over the dataset complete.".format(data_name))
if reshuffle:
data.generate_patches(seed_=k)
data_size = len(data)
def train(
batch_build_length: int,
batch_size: int,
metadata: dict,
model_config: Dict,
output_dir: str,
test_data: Data,
train_data: Data,
initial_epoch: int = 0,
initial_weights: str = "",
tensorboard_verbose: bool = False,
train_epochs: int = 10,
train_steps_per_epoch_limit: int = None,
validation_freq: int = 1,
verbose_logging: bool = False,
):
if initial_epoch > 0 and initial_weights:
raise ValueError("Only one of: initial_epoch or initial_weights - can be set at once")
model_arch_name = model_config["model_arch_name"]
weights_best_filename = os.path.join(output_dir, "model_weights.h5")
checkpoint_filename = os.path.join(output_dir, "model_checkpoint.h5")
logdir = os.path.join(output_dir, "tf_logs")
if initial_epoch > 0:
if os.path.exists(checkpoint_filename):
logger.info("Loading model: {}".format(checkpoint_filename))
model = tf.keras.models.load_model(checkpoint_filename)
else:
raise RuntimeError(
"{} not found, you should start from scratch".format(checkpoint_filename)
)
else: # initial_epoch == 0
patch_size = model_config["patch_size"]
feature_shape = (
patch_size[0],
patch_size[1],
patch_size[2],
len(model_config["stencil_channels"]),
)
model_make_function = models_collection[model_arch_name]
model_params = deepcopy(model_config)
model_params.update(
{
"feature_shape": feature_shape,
"voxel_size": tuple(train_data.voxel_size),
}
)
model = model_make_function(params=model_params)
model.compile(optimizer="adam", loss="mse", metrics=["mse"])
ensure_dir(output_dir)
with open(os.path.join(output_dir, "model.json"), "w") as json_file:
json_file.write(model.to_json())
metadata["model_params"] = model_params
with open(os.path.join(output_dir, "model_metadata.json"), "w") as json_file:
json.dump(metadata, json_file, indent=4, sort_keys=True)
if initial_weights:
logger.info("Loading weights: {}".format(initial_weights))
model.load_weights(initial_weights)
if os.path.exists(logdir):
shutil.rmtree(logdir)
checkpoint_best_callback = ModelCheckpoint(
filepath=weights_best_filename,
monitor="val_loss",
verbose=1,
save_best_only=True,
save_weights_only=True,
mode="auto",
period=1,
)
checkpoint_all_callback = ModelCheckpoint(
filepath=checkpoint_filename,
verbose=1,
save_best_only=False,
save_weights_only=False,
mode="auto",
period=1,
)
tensorboard_callback = TensorBoard(
log_dir=logdir,
histogram_freq=25 if tensorboard_verbose else 0,
write_grads=tensorboard_verbose,
write_images=tensorboard_verbose,
profile_batch=0,
)
def process_timings(epoch, logs):
this_epoch_train_timings = train_data.get_and_clean_timings()
this_epoch_test_timings = test_data.get_and_clean_timings()
update_timings_dict(TIMINGS_TRAIN, this_epoch_train_timings)
update_timings_dict(TIMINGS_VALIDATE, this_epoch_test_timings)
logger.info("Epoch {}".format(epoch))
logger.info("Data.train timings: {}".format(pformat(this_epoch_train_timings)))
logger.info("Data.test timings: {}".format(pformat(this_epoch_test_timings)))
process_timings_callback = LambdaCallback(
on_epoch_begin=lambda epoch, logs: print(), on_epoch_end=process_timings
)
make_batch_function = (
make_batch_swap_axes
if model_arch_name in ("planar_first", "first_baseline")
else make_batch
)
train_steps_per_epoch = math.ceil(len(train_data) / batch_size)
if train_steps_per_epoch_limit is not None:
train_steps_per_epoch = min(train_steps_per_epoch, train_steps_per_epoch_limit)
initial_train_data_batch_start_index = (
initial_epoch * train_steps_per_epoch * batch_size
) % len(train_data)
initial_data_shuffle_random_seed = (
initial_epoch * train_steps_per_epoch * batch_size
) // len(train_data)
callbacks = [
checkpoint_best_callback,
checkpoint_all_callback,
tensorboard_callback,
process_timings_callback,
]
model.fit_generator(
generator=make_batches_gen(
data=train_data,
data_name="train_data",
batch_size=batch_size,
batch_build_length=batch_build_length,
initial_data_batch_start_index=initial_train_data_batch_start_index,
initial_data_shuffle_random_seed=initial_data_shuffle_random_seed,
make_batch_function=make_batch_function,
reshuffle=True,
),
steps_per_epoch=train_steps_per_epoch,
validation_data=make_batches_gen(
data=test_data,
data_name="validation_data",
batch_size=batch_size,
make_batch_function=make_batch_function,
reshuffle=False,
),
validation_freq=validation_freq,
validation_steps=math.ceil(len(test_data) / batch_size),
epochs=train_epochs,
verbose=2,
callbacks=callbacks,
initial_epoch=initial_epoch,
)
def get_args():
parser = argparse.ArgumentParser(description="Run an full optimization on a given task file")
parser.add_argument("-d", "--dataset", dest="dataset", type=str, required=True)
parser.add_argument("-c", "--config", dest="model_config", type=str, required=True)
parser.add_argument("-o", "--output", dest="output_model_directory", type=str, required=True)
parser.add_argument(
"-t",
"--train-patches",
dest="train_patches",
type=int,
default=None,
help="Number of patches to take from the train data",
)
parser.add_argument(
"-e",
"--train-epoch-limit",
dest="train_steps_per_epoch_limit",
type=int,
default=None,
help="Maximum number of batches per epoch in the training",
)
parser.add_argument(
"-b",
"--batch-size",
dest="batch_size",
type=int,
default=2000,
help="Number of patches in the batch",
)
parser.add_argument(
"-bb",
"--batch-build-size",
dest="batch_build_length",
type=int,
default=2000,
help="Number of patches in the batch",
)
parser.add_argument(
"--validation-patches",
dest="validation_patches",
type=int,
default=None,
help="Number of patches to take from the validation data",
)
parser.add_argument("--memory-limit", dest="memory_limit", type=float, default=None)
parser.add_argument("--memory-volumes", dest="memory_volumes", type=int, default=None)
parser.add_argument("--rotations", dest="rotations", type=int, default=None)
parser.add_argument("--initial-epoch", dest="initial_epoch", type=int, default=0)
parser.add_argument("--initial-weights", dest="initial_weights", type=str, default=None)
parser.add_argument("--epochs", dest="train_epochs", type=int, default=2000)
parser.add_argument("--validation-freq", dest="validation_freq", type=int, default=1)
parser.add_argument(
"-tt",
"--tensorboard-verbose",
help="Additional outputs to tensorboard",
action="store_true",
)
parser.add_argument(
"-v", "--verbose", help="More debug prints and logging messages", action="store_true"
)
return parser.parse_args()
def main():
args = get_args()
with open(args.model_config, "r") as json_file:
model_config = json.load(json_file)
# backward compatibility
if type(model_config["stencil_channels"]) == int:
model_config["stencil_channels"] = ["scattering", "absorption", "mask"][
: model_config["stencil_channels"]
]
dataset_base_dir = os.path.abspath(os.path.split(args.dataset)[0])
with open(args.dataset, "r") as f:
dataset = yaml.full_load(f)
train_data_items = dataset["items"]
validate_data_items = dataset["items_validate"]
materials_file = dataset["materials_file"]
if not os.path.isabs(materials_file):
materials_file = os.path.join(dataset_base_dir, materials_file)
train_data_class = (
DataPlanar if classify_dataset_class(dataset_base_dir, train_data_items) else Data3D
)
validate_data_class = (
DataPlanar if classify_dataset_class(dataset_base_dir, validate_data_items) else Data3D
)
if args.memory_limit:
sliding_window_memory_limit = psutil.virtual_memory().total * args.memory_limit
logger.info(
"Sliding window memory limit is set to {}".format(
human_size(sliding_window_memory_limit)
)
)
else:
sliding_window_memory_limit = None
train_data = train_data_class(
alignment_z_centered=model_config.get("alignment_z_centered", train_data_class == Data3D),
data_items=train_data_items,
dataset_base_dir=dataset_base_dir,
find_inner_material_voxels=model_config["find_inner_material_voxels"],
limit_patches_number=args.train_patches,
materials_file=materials_file,
mode=MODE_TRAIN,
patch_size=model_config["patch_size"],
rotations=args.rotations,
sat_object_class_name="TreeSAT",
scale_levels=model_config["scale_levels"],
shuffle_patches=True,
sliding_window_length=args.memory_volumes,
sliding_window_memory_limit=sliding_window_memory_limit,
stencil_channels=model_config["stencil_channels"],
verbose_logging=args.verbose,
)
test_data = validate_data_class(
alignment_z_centered=model_config.get("alignment_z_centered", train_data_class == Data3D),
data_items=validate_data_items,
dataset_base_dir=dataset_base_dir,
find_inner_material_voxels=model_config["find_inner_material_voxels"],
limit_patches_number=args.validation_patches,
materials_file=materials_file,
mode=MODE_TRAIN,
patch_size=model_config["patch_size"],
rotations=1,
sat_object_class_name="TreeSAT",
scale_levels=model_config["scale_levels"],
shuffle_patches=True, # should be enabled if we limit the number of validation patches
sliding_window_length=1,
stencil_channels=model_config["stencil_channels"],
verbose_logging=args.verbose,
)
logger.info("Training on {} patches".format(len(train_data)))
logger.info("Validation will be performed on {} patches".format(len(test_data)))
model_name = os.path.split(args.output_model_directory)[1]
batch_build_length = args.batch_build_length
batch_size = args.batch_size
train_steps_per_epoch_limit = args.train_steps_per_epoch_limit
metadata = {
"batch_build_length": batch_build_length,
"batch_size": batch_size,
"dataset_path": args.dataset,
"git_commit": get_git_revision_short_hash(),
"hostname": socket.gethostname(),
"memory-volumes": args.memory_volumes,
"model_name": model_name,
"rotations": args.rotations,
"train_data_items": dataset["items"],
"train_start_time": datetime.now().strftime("%Y-%m-%d-%H:%M:%S"),
"train_steps_per_epoch_limit": train_steps_per_epoch_limit,
"training_patches_number": len(train_data),
"validate_data_items": dataset["items_validate"],
"validation_patches_number": len(test_data),
"sliding-window-volumes-average": round(
numpy.array(
[len(volume_idxs) for data_idxs, volume_idxs in train_data.load_indexes.items()]
).mean(),
2,
),
}
try:
train(
batch_build_length=batch_build_length,
batch_size=batch_size,
metadata=metadata,
model_config=model_config,
output_dir=args.output_model_directory,
test_data=test_data,
train_data=train_data,
# the arguments below are optional
initial_epoch=args.initial_epoch,
initial_weights=args.initial_weights,
tensorboard_verbose=args.tensorboard_verbose,
train_epochs=args.train_epochs,
train_steps_per_epoch_limit=train_steps_per_epoch_limit,
validation_freq=args.validation_freq,
verbose_logging=args.verbose,
)
except KeyboardInterrupt:
logger.info("Data.train timings: {}".format(pformat(TIMINGS_TRAIN)))
logger.info("Data.test timings: {}".format(pformat(TIMINGS_VALIDATE)))
raise
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.