text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'WindowUI.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1184, 827)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = GLViewWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(0, 0, 921, 781))
self.widget.setObjectName("widget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(950, 20, 47, 13))
self.label.setObjectName("label")
self.translateX = QtWidgets.QLineEdit(self.centralwidget)
self.translateX.setGeometry(QtCore.QRect(950, 50, 113, 20))
self.translateX.setObjectName("translateX")
self.translateY = QtWidgets.QLineEdit(self.centralwidget)
self.translateY.setGeometry(QtCore.QRect(950, 80, 113, 20))
self.translateY.setObjectName("translateY")
self.translateZ = QtWidgets.QLineEdit(self.centralwidget)
self.translateZ.setGeometry(QtCore.QRect(950, 110, 113, 20))
self.translateZ.setObjectName("translateZ")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(930, 50, 16, 16))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(930, 80, 16, 16))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(930, 110, 16, 16))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(950, 150, 47, 13))
self.label_5.setObjectName("label_5")
self.rotateZ = QtWidgets.QLineEdit(self.centralwidget)
self.rotateZ.setGeometry(QtCore.QRect(950, 240, 113, 20))
self.rotateZ.setObjectName("rotateZ")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(930, 210, 16, 16))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(930, 240, 16, 16))
self.label_7.setObjectName("label_7")
self.rotateX = QtWidgets.QLineEdit(self.centralwidget)
self.rotateX.setGeometry(QtCore.QRect(950, 180, 113, 20))
self.rotateX.setObjectName("rotateX")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(930, 180, 16, 16))
self.label_8.setObjectName("label_8")
self.rotateY = QtWidgets.QLineEdit(self.centralwidget)
self.rotateY.setGeometry(QtCore.QRect(950, 210, 113, 20))
self.rotateY.setObjectName("rotateY")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(930, 310, 16, 16))
self.label_9.setObjectName("label_9")
self.scaleZ = QtWidgets.QLineEdit(self.centralwidget)
self.scaleZ.setGeometry(QtCore.QRect(950, 370, 113, 20))
self.scaleZ.setObjectName("scaleZ")
self.scaleX = QtWidgets.QLineEdit(self.centralwidget)
self.scaleX.setGeometry(QtCore.QRect(950, 310, 113, 20))
self.scaleX.setObjectName("scaleX")
self.scaleY = QtWidgets.QLineEdit(self.centralwidget)
self.scaleY.setGeometry(QtCore.QRect(950, 340, 113, 20))
self.scaleY.setObjectName("scaleY")
self.label_10 = QtWidgets.QLabel(self.centralwidget)
self.label_10.setGeometry(QtCore.QRect(950, 280, 47, 13))
self.label_10.setObjectName("label_10")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(930, 370, 16, 16))
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(930, 340, 16, 16))
self.label_12.setObjectName("label_12")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(1070, 180, 47, 13))
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(1070, 210, 47, 13))
self.label_14.setObjectName("label_14")
self.label_15 = QtWidgets.QLabel(self.centralwidget)
self.label_15.setGeometry(QtCore.QRect(1070, 240, 47, 13))
self.label_15.setObjectName("label_15")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(970, 440, 181, 23))
self.pushButton.setObjectName("pushButton")
self.label_16 = QtWidgets.QLabel(self.centralwidget)
self.label_16.setGeometry(QtCore.QRect(950, 640, 201, 141))
self.label_16.setObjectName("label_16")
self.scaleAll = QtWidgets.QLineEdit(self.centralwidget)
self.scaleAll.setGeometry(QtCore.QRect(970, 400, 113, 20))
self.scaleAll.setObjectName("scaleAll")
self.label_17 = QtWidgets.QLabel(self.centralwidget)
self.label_17.setGeometry(QtCore.QRect(930, 400, 41, 16))
self.label_17.setObjectName("label_17")
self.resetButton = QtWidgets.QPushButton(self.centralwidget)
self.resetButton.setGeometry(QtCore.QRect(970, 470, 181, 23))
self.resetButton.setObjectName("resetButton")
self.projectionButton = QtWidgets.QPushButton(self.centralwidget)
self.projectionButton.setGeometry(QtCore.QRect(970, 500, 181, 23))
self.projectionButton.setObjectName("projectionButton")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1184, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.label.setText(_translate("MainWindow", "Translate"))
self.label_2.setText(_translate("MainWindow", "x"))
self.label_3.setText(_translate("MainWindow", "y"))
self.label_4.setText(_translate("MainWindow", "z"))
self.label_5.setText(_translate("MainWindow", "Rotate"))
self.label_6.setText(_translate("MainWindow", "y"))
self.label_7.setText(_translate("MainWindow", "z"))
self.label_8.setText(_translate("MainWindow", "x"))
self.label_9.setText(_translate("MainWindow", "x"))
self.label_10.setText(_translate("MainWindow", "Scale"))
self.label_11.setText(_translate("MainWindow", "z"))
self.label_12.setText(_translate("MainWindow", "y"))
self.label_13.setText(_translate("MainWindow", "degrees"))
self.label_14.setText(_translate("MainWindow", "degrees"))
self.label_15.setText(_translate("MainWindow", "degrees"))
self.pushButton.setText(_translate("MainWindow", "Perform transformations"))
self.label_16.setText(_translate("MainWindow", "<html><head/><body><p>Legend:</p><p>X Axis - blue line</p><p>Y Axis - yellow line</p><p>Z Axis - green line</p><p>Grid cell size: 10</p></body></html>"))
self.label_17.setText(_translate("MainWindow", "all axis"))
self.resetButton.setText(_translate("MainWindow", "Reset"))
self.projectionButton.setText(_translate("MainWindow", "Toggle projections"))
from pyqtgraph.opengl import GLViewWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from django.shortcuts import render, redirect
# Create your views here.
from test_view.forms import ContactForm
from django.views.generic.edit import FormView
class ContactView(FormView):
template_name = 'test_view/contact.html'
form_class = ContactForm
# success_url = 'test_view/posted.html'
success_url = 'test_view/thanks'
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
form.send_email()
return super().form_valid(form)
from django.http import HttpResponse
def thanks(request):
return HttpResponse("谢谢使用.")
# from django.views import View
# class AuthorCreate(View):
# """处理GET请求"""
# def get(self, request):
# # articles = ContactForm.objects.all()
# # context = {'articles': articles}
# template_name = 'test_view/create_author.html'
# # form_class = ContactForm
# # context = {'author_name': author_name}
# # return render(request, 'article/list.html', context)
# return render(request, template_name, form_class)
#
# def post(self, request):
# from test_view.models import Author
#
# # articles = ContactForm.objects.all()
# # context = {'articles': articles}
#
# model = Author
# fields = ['name']
#
# # return render(request, 'test_view/posted.html', context)
# return redirect("test_view:thanks")
from .forms import CreateAuthorForm
def author_create(request):
# 判断用户是否提交数据
if request.method == "POST":
# return HttpResponse("under developing................")
# article_post_form = CreateAuthorForm(data=request.POST)
# 将提交的数据赋值到表单实例中
create_author_form = CreateAuthorForm(data=request.POST)
# 判断提交的数据是否满足模型的要求
if create_author_form.is_valid():
# 保存数据,但暂时不提交到数据库中
# new_author = create_author_form.save(commit=False)
# 指定数据库中 id=1 的用户为作者
# 如果你进行过删除数据表的操作,可能会找不到id=1的用户
# 此时请重新创建用户,并传入此用户的id
# new_author.author = User.objects.get(id=1)
new_author = create_author_form.save(commit=False)
# new_author.name = request.POST['title']
# create_author_form.save()
# print('---------------- new_author.name: ', new_author.name)
# 将新文章保存到数据库中
new_author.save()
# 完成后返回到文章列表
return redirect("test_view:thanks")
# 如果数据不合法,返回错误信息
else:
return HttpResponse("表单内容有误,请重新填写。")
# 如果用户请求获取数据
else:
# 创建表单类实例
create_author_form = CreateAuthorForm()
# 赋值上下文
context = {'name': create_author_form}
# 返回模板
return render(request, 'test_view/create.html', context)
def author_list(request):
from .models import AuthorPost
authors_list = AuthorPost.objects.all()
authors = authors_list
context = {'authors': authors}
return render(request, 'test_view/list.html', context)
# from django.views.generic.edit import CreateView
# from test_view.models import Author
#
# #
# class AuthorCreate(CreateView):
# model = Author
# fields = ['name']
#
# success_url = '../thanks'
# # success_url = 'test_view/thanks' |
from nltk.tokenize import sent_tokenize, word_tokenize
import nltk
from gutenberg.cleanup import strip_headers
import re
import heapq
# ref: https://stackabuse.com/text-summarization-with-nltk-in-python/
class Summarization(object):
# local variables
textSummary = ""
def __init__(self, text):
# pass in the text, and process the text
self.original_striped_text = strip_headers(text).strip()
self.text = strip_headers(text).strip()
self.sentences = []
self.word_frequencies = {}
self.sentence_scores = {}
self.preprocesstext()
def preprocesstext(self):
# responsible for handling the calling and building of the text summarization
self.text = re.sub('[^a-zA-Z.?!]', ' ', self.text)
self.text = re.sub(r'\s+', ' ', self.text)
self.text = str(self.text)
self.tokenizeSentences()
self.frequencyWeighted()
self.calculateWorkFrequence()
self.calculateSentenceScores()
self.summarise()
def tokenizeSentences(self):
# tokenize the sentences
# Issue we have here is it still takes special characters, which will mess things up later on.
self.sentences = sent_tokenize(self.text)
def frequencyWeighted(self):
stopwords = nltk.corpus.stopwords.words('english')
for word in word_tokenize(self.text):
if word not in stopwords:
if word not in self.word_frequencies.keys():
self.word_frequencies[word] = 1
else:
self.word_frequencies[word] += 1
def calculateWorkFrequence(self):
# calculate the frequency of words within the text.
maximum_frequency = max(self.word_frequencies.values())
for word in self.word_frequencies.keys():
self.word_frequencies[word] = (self.word_frequencies[word]/maximum_frequency)
def calculateSentenceScores(self):
# calculate the sentence scores.
for sent in self.sentences:
for word in word_tokenize(sent.lower()):
if word in self.word_frequencies.keys():
if len(sent.split(' ')) < 50:
if sent not in self.sentence_scores.keys():
self.sentence_scores[sent] = self.word_frequencies[word]
else:
self.sentence_scores[sent] += self.word_frequencies[word]
def summarise(self):
# Look at the top 10 sentences and produce a concatinated summary.
summary_sentences = heapq.nlargest(20, self.sentence_scores, key=self.sentence_scores.get)
summary = ' '.join(summary_sentences)
self.textSummary = summary
def getTextSummarization(self):
# return the summarization of the text
return str(self.textSummary)
def getOringalText(self):
return str(self.original_striped_text) |
print('This program is all about for loop itration')
for a in '12345':
print(a)
for b in "hana afsal":
print(b)
c=10
d=11
for x in [1, 2, 3, 4]:
for y in [2, 3, 6, 7]:
# for getting coordinates
print(f"({x+1}, {y+10})") # print formatted string
for z in [[1, 2, 3], [8, 7, 6], [1, 5, 4, 2, 7]]:
print(z)
|
import pandas
import numpy as np
import random
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import PorterStemmer
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from scipy.sparse import csr_matrix
#load in the data
data = pandas.read_csv('spam.csv',encoding = "latin-1")
#get rid of unwanted columns
data = data.loc[:,'v1':'v2']
#get size of the DataFrame
rows,cols = data.shape
#get stopwords
stop_words = set(stopwords.words('english'))
#initiate porter stemmer
ps = PorterStemmer()
#set up translator to remove punctuation
translator = str.maketrans('', '', string.punctuation)
#loop through and tokenize the strings, remove stopwords, and perform stemming
for i in range(0,rows):
sms = data.iloc[i,1]
#tokenize the string while removing punctuation
words = word_tokenize(sms.translate(translator))
#remove stopwords and stem
filtered_words = []
for w in words:
if w not in stop_words:
filtered_words.append(ps.stem(w))
#rejoin the list into a string
w = " ".join(filtered_words)
data.set_value(i,1,w,takeable=True)
#convert to numpy arrays
features = np.array(data.loc[:,'v2'])
labels = np.array(data.loc[:,'v1'])
l = len(labels)
spam_index = [] #list containing the index for each spam sms
ham_index = [] #list containing the index for each non spam sms
for i in range(0,l):
if labels[i] == 'ham':
labels[i] = 0
ham_index.append(i)
elif labels[i] == 'spam':
labels[i] = 1
spam_index.append(i)
else:
print('UNIDENTIFIED LABEL AT INDEX: '+str(i))
#count the occurance of each word
CV = CountVectorizer()
TF = TfidfTransformer()
#fit and transform the features
features_count = CV.fit_transform(features) #contains a count of each word in each sms
TF.fit(features_count)
features_tfidf = TF.transform(features_count) #TfIdf representation
#convert to dense arrays in order to seperate into test and training data and perform feature selection
features_count = features_count.toarray()
features_tfidf = features_tfidf.toarray()
#seperate into traiing and testing data
num = round(rows/20)
spam_num = len(spam_index)
ham_num = len(ham_index)
#select 10% of the rows for testing
random_ham_index = random.sample(range(0,ham_num),num)
random_spam_index = random.sample(range(0,spam_num),num)
hams = [ham_index[i] for i in random_ham_index]
spams = [spam_index[i] for i in random_spam_index]
random_nums = hams+spams #the test data will have an equal number of spam and non-spam messages
features_count_test = features_count[random_nums,:]
features_tfidf_test = features_tfidf[random_nums,:]
labels_test = labels[random_nums]
#remove those rows to form the training sets
features_count_train = np.delete(features_count,random_nums,0)
features_tfidf_train = np.delete(features_tfidf,random_nums,0)
labels_train = np.delete(labels,random_nums,0)
#Use Univariate feature selection to select the k best features
SK_count_10 = SelectKBest(chi2, k=10)
SK_count_50 = SelectKBest(chi2, k=50)
SK_count_100 = SelectKBest(chi2, k=100)
SK_count_200 = SelectKBest(chi2, k=200)
SK_tfidf_10 = SelectKBest(chi2, k=10)
SK_tfidf_50 = SelectKBest(chi2, k=50)
SK_tfidf_100 = SelectKBest(chi2, k=100)
SK_tfidf_200 = SelectKBest(chi2, k=200)
#fit and transform the training features
features_count_train_k10 = SK_count_10.fit_transform(features_count_train,list(labels_train)) #need labels as list as it fixes the unkown label type error
features_count_train_k50 = SK_count_50.fit_transform(features_count_train,list(labels_train))
features_count_train_k100 = SK_count_100.fit_transform(features_count_train,list(labels_train))
features_count_train_k200 = SK_count_200.fit_transform(features_count_train,list(labels_train))
features_tfidf_train_k10 = SK_tfidf_10.fit_transform(features_tfidf_train,list(labels_train))
features_tfidf_train_k50 = SK_tfidf_50.fit_transform(features_tfidf_train,list(labels_train))
features_tfidf_train_k100 = SK_tfidf_100.fit_transform(features_tfidf_train,list(labels_train))
features_tfidf_train_k200 = SK_tfidf_200.fit_transform(features_tfidf_train,list(labels_train))
#transform the testing features
features_count_test_k10 = SK_count_10.transform(features_count_test)
features_count_test_k50 = SK_count_50.transform(features_count_test)
features_count_test_k100 = SK_count_100.transform(features_count_test)
features_count_test_k200 = SK_count_200.transform(features_count_test)
features_tfidf_test_k10 = SK_tfidf_10.transform(features_tfidf_test)
features_tfidf_test_k50 = SK_tfidf_50.transform(features_tfidf_test)
features_tfidf_test_k100 = SK_tfidf_100.transform(features_tfidf_test)
features_tfidf_test_k200 = SK_tfidf_200.transform(features_tfidf_test)
#save the arrays to files
np.save('features_count_test.npy',features_count_test)
np.save('features_tfidf_test.npy',features_tfidf_test)
np.save('features_count_train.npy',features_count_train)
np.save('features_tfidf_train.npy',features_tfidf_train)
np.save('labels_test.npy',labels_test)
np.save('labels_train.npy',labels_train)
np.save('features_count_train_k10.npy',features_count_train_k10)
np.save('features_count_train_k50.npy',features_count_train_k50)
np.save('features_count_train_k100.npy',features_count_train_k100)
np.save('features_count_train_k200.npy',features_count_train_k200)
np.save('features_tfidf_train_k10.npy',features_tfidf_train_k10)
np.save('features_tfidf_train_k50.npy',features_tfidf_train_k50)
np.save('features_tfidf_train_k100.npy',features_tfidf_train_k100)
np.save('features_tfidf_train_k200.npy',features_tfidf_train_k200)
np.save('features_count_test_k10.npy',features_count_test_k10)
np.save('features_count_test_k50.npy',features_count_test_k50)
np.save('features_count_test_k100.npy',features_count_test_k100)
np.save('features_count_test_k200.npy',features_count_test_k200)
np.save('features_tfidf_test_k10.npy',features_tfidf_test_k10)
np.save('features_tfidf_test_k50.npy',features_tfidf_test_k50)
np.save('features_tfidf_test_k100.npy',features_tfidf_test_k100)
np.save('features_tfidf_test_k200.npy',features_tfidf_test_k200)
|
X = input()
def check(s):
if s == "":
return True
if s[0] in "oku":
return check(s[1:])
if s[:2] == "ch":
return check(s[2:])
return False
if check(X):
print("YES")
else:
print("NO") |
# -*- coding:utf-8 -*-
from helper import singleton
@singleton
class Configer:
a = 0
def make_redis_config(self, config):
a = config
pass
if __name__ == '__main__':
c1 = Configer()
c1.a = 10
print c1.a
c2 = Configer()
print c2.a
|
#Django code:
import json
def save_data(request):
if request.method == 'POST':
json_data = json.loads(request.body) # request.raw_post_data w/ Django < 1.4
try:
data = json_data['data']
except KeyError:
HttpResponseServerError("Malformed data!")
HttpResponse("Got json data")
def save_events_json(request):
if request.is_ajax():
if request.method == 'POST':
print 'Raw Data: "%s"' % request.body
return HttpResponse("OK")
|
#Imports the class information from the cashRegister class
import CashRegister
ADD_MONEY = "a"
REMOVE_MONEY = "r"
TRANSFER_MONEY = "t"
LOCK_REGISTER = "l"
UNLOCK_REGISTER = "u"
DISPLAY_STATE = "s"
CLOSE_STORE = "c"
OPTION_LIST = [ADD_MONEY,REMOVE_MONEY,TRANSFER_MONEY,LOCK_REGISTER,UNLOCK_REGISTER,DISPLAY_STATE,CLOSE_STORE]
REG_1 = 1
REG_2 = 2
LIST_REG_1 = 0
LIST_REG_2 = 1
#name: menuOptions
#input: none
#output: displays the corresponding meaning behind every menu option
def menuOptions():
print("Menu Options:")
print("A - Add money")
print("R - Remove money")
print("T - Transfer money")
print("L - Lock register")
print("U - Unlock register")
print("S - Display register state")
print("C - Close the store and quit")
#Name: mainMenu
#input: myRegList; a list of registers
#Output: runs the program in an apropriate manner for the user
def mainMenu(myRegList):
#gets an initial menu choice from the user,
#ensuring both upper and lowercase entries work
menuOptions()
choice = input("Enter option (A, R, T, L, U, S, C): ")
choice = choice.lower()
#goes through each option as long as the user does not close the store
while(choice != CLOSE_STORE):
#ensures valid input from the user
while(choice not in OPTION_LIST):
print("\nInvalid menu option. Please try again. \n")
choice = input("Enter option (A, R, T, L, U, S, C): ")
choice = choice.lower()
#adds money to the register desired
if(choice == ADD_MONEY):
addMoney(myRegList)
#removes money from register desired
elif(choice == REMOVE_MONEY):
removeMoney(myRegList)
#moves money from one register to the other
elif(choice == TRANSFER_MONEY):
transferMoney(myRegList)
#lockes a register
elif(choice == LOCK_REGISTER):
lockRegister(myRegList)
#unlocks a register
elif (choice == UNLOCK_REGISTER):
unlockRegister(myRegList)
#displays the state of the register
elif(choice == DISPLAY_STATE):
displayRegState(myRegList)
#gets a new choice from the user if they didn't close the store,
#ensuring both upper and lowercase entries work
if(choice != CLOSE_STORE):
menuOptions()
choice = input("Enter option (A, R, T, L, U, S, C): ")
choice = choice.lower()
#closes the store; gets the total money out of each register
print("Store Closing")
totalCash = 0;
for x in range(len(myRegList)):
totalCash += myRegList[x].calcTotal()
print("Total amount removed: $", totalCash , "\n")
#removes the money from each register and locks them for the night
for y in range(len(myRegList)):
myRegList[y].close()
print("Register ", y + 1 )
myRegList[y].displayState()
print()
print("Thank you for using this application! Have a good evening")
#name: getRegChoice()
#input: none
#output: a number; the corresponding list index for the chosen register
def getRegChoice():
#gets a register choice from the user, ensuring valid input
choice = int(input("Register number (1 or 2): "))
while((choice != REG_1) and (choice != REG_2)):
print(choice, " is not a valid register number \n")
choice = int(input("Register number (1 or 2): "))
#returns the corresponding list value for the register chosen
if(choice == REG_1):
return LIST_REG_1
else:
return LIST_REG_2
#name: displayRegState()
#input: myRegList; a list of registers
#output: the state of the register is displayed to the user
def displayRegState(myRegList):
#gets the valid register choice from the user, displays it properly
myItem = getRegChoice()
myRegList[myItem].displayState()
#name: addMoney()
#input: myRegList; a list of registers
#output: changes the value of the register the user asked for
def addMoney(myRegList):
#gets the valid register choice from the user, adds money to it properly
myItem = getRegChoice()
myRegList[myItem].addMoney()
#name: removeMoney()
#input: myRegList; a list of registers
#output: changes the value of the register the user asked for
def removeMoney(myRegList):
# gets the valid register choice from the user, removes money to it properly
myItem = getRegChoice()
myRegList[myItem].removeMoney()
#name: transferMoney()
#input: myRegList
#output: moves money from one register to another
def transferMoney(myRegList):
#gets the register the user wants to move money from
print("Enter register to transfer money from.")
myItem = getRegChoice()
if(myItem == LIST_REG_1):
otherReg = LIST_REG_2
else:
otherReg = LIST_REG_1
#transfers money accordingly
myRegList[myItem].transferMoney(myRegList[otherReg])
#name: lockRegister()
#input: myRegList; a list of registers
#output: changes the locked status of a register
def lockRegister(myRegList):
#gets the register choice, proceeds to lock it
myItem = getRegChoice()
myRegList[myItem].lock()
#name: unlockRegister()
#input: myRegList; a list of registers
#output: changes the locked status of a register
def unlockRegister(myRegList):
# gets the register choice, proceeds to unlock it
myItem = getRegChoice()
myRegList[myItem].unlock()
#name: initRegList()
#input: none
#output: initializes the list of registers
def initRegList():
myRegList = []
print("Good Morning! Please initialize your registers:")
#gets the number of each bill the user wants the registers to start with
myOnes = int(input("Number of 1s: "))
myFives = int(input("Number of 5s: "))
myTens = int(input("Number of 10s: "))
myTwenties = int(input("Number of 20s: "))
#constructs two registers with the desired amount of money, appends both to a list of registers
myRegister1 = CashRegister.cashRegister(myOnes,myFives,myTens,myTwenties)
myRegister2 = CashRegister.cashRegister(myOnes,myFives,myTens,myTwenties)
myRegList.append(myRegister1)
myRegList.append(myRegister2)
return myRegList
#name: main()
#input: none
#output: initializes the list of registers and
#proceeds to run the program for the user to change them as they please
if __name__ == "__main__":
#initializes registers
myRegisterList = initRegList()
#runs mainMenu on the registers
mainMenu(myRegisterList)
|
import numpy as np
import pyccl as ccl
import pytest
BEMULIN_TOLERANCE = 1e-3
BEMUNL_TOLERANCE = 5e-3
BEMBAR_TOLERANCE = 1e-3
def test_baccoemu_linear_As_sigma8():
bemu = ccl.BaccoemuLinear()
cosmo1 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
sigma8=0.83,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
cosmo2 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
A_s=2.2194e-09,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
k1, pk1 = bemu.get_pk_at_a(cosmo1, 1.0)
k2, pk2 = bemu.get_pk_at_a(cosmo2, 1.0)
err = np.abs(pk1 / pk2 - 1)
assert np.allclose(err, 0, atol=BEMULIN_TOLERANCE, rtol=0)
def test_baccoemu_nonlinear_As_sigma8():
bemu = ccl.BaccoemuNonlinear()
cosmo1 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
sigma8=0.83,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
cosmo2 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
A_s=2.2194e-09,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
k1, pk1 = bemu.get_pk_at_a(cosmo1, 1.0)
k2, pk2 = bemu.get_pk_at_a(cosmo2, 1.0)
err = np.abs(pk1 / pk2 - 1)
assert np.allclose(err, 0, atol=BEMUNL_TOLERANCE, rtol=0)
def test_baccoemu_baryons_boost():
baryons = ccl.BaccoemuBaryons()
nlpkemu = ccl.BaccoemuNonlinear()
cosmo = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
sigma8=0.83,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0,
matter_power_spectrum=nlpkemu)
k = np.logspace(-2, 0.5, 100)
cclfk = baryons.boost_factor(cosmo, k, 1)
pk_gro = cosmo.get_nonlin_power()
pk_bcm = baryons.include_baryonic_effects(cosmo, pk_gro)
fk = pk_bcm(k, 1) / pk_gro(k, 1)
err = np.abs(fk / cclfk - 1)
assert np.allclose(err, 0, atol=BEMBAR_TOLERANCE, rtol=0)
def test_baccoemu_baryons_changepars():
baryons = ccl.BaccoemuBaryons()
baryons.update_parameters(log10_M_c=12.7, log10_eta=-0.4)
assert ((baryons.bcm_params['M_c'] == 12.7)
& (baryons.bcm_params['eta'] == -0.4))
def test_baccoemu_baryons_a_range():
baryons = ccl.BaccoemuBaryons()
cosmo = ccl.CosmologyVanillaLCDM()
k = 1e-1
with pytest.raises(ValueError):
baryons.boost_factor(cosmo, k, baryons.a_min * 0.9)
def test_baccoemu_baryons_As_sigma8():
baryons = ccl.BaccoemuBaryons()
cosmo1 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
sigma8=0.83,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
cosmo2 = ccl.Cosmology(
Omega_c=0.27,
Omega_b=0.05,
h=0.67,
A_s=2.2194e-09,
n_s=0.96,
Neff=3.046,
mass_split='normal',
m_nu=0.1,
Omega_g=0,
Omega_k=0,
w0=-1,
wa=0)
k = np.logspace(-2, 0.5, 100)
fk1 = baryons.boost_factor(cosmo1, k, 1)
fk2 = baryons.boost_factor(cosmo2, k, 1)
err = np.abs(fk1 / fk2 - 1)
assert np.allclose(err, 0, atol=BEMUNL_TOLERANCE, rtol=0)
|
import os
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader
class Data:
def __init__(self, args):
pin_memory = False
if args.gpus is not None:
pin_memory = True
scale_size = 299 if args.student_model.startswith('inception') else 224
traindir = os.path.join(args.data_dir, 'train')
valdir = os.path.join(args.data_dir, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
trainset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.Resize(scale_size),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1),
transforms.ToTensor(),
normalize,
]))
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
self.loader_train = DataLoader(
trainset, batch_size=args.train_batch_size, shuffle=True,
num_workers=2, pin_memory=pin_memory
)
self.loader_test = DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.Resize(scale_size),
transforms.ToTensor(),
normalize,
])),
batch_size=args.eval_batch_size, shuffle=False,
num_workers=2, pin_memory=True) |
path('persona/new', views.new_persona, name='create_persona'),
nombre = models.CharField(max_length=30)
apellido = models.CharField(max_length=30)
tipodocumento = models.IntegerField(max_length=1)
documento = models.IntegerField(max_length=15)
residencia = models.CharField(max_length=100)
fechanacimiento = models.DateField
email = models.CharField(max_length=50)
telefono = models.IntegerField(max_length=15)
usuario = models.CharField(max_length=30)
password = models.CharField(max_length=30) |
#!/usr/bin/python
f = open('A-large.in', 'r')
o = open('output', 'w')
T = f.readline()
S = ""
out = ""
def insertRight (letter):
global out
out += letter
def insertLeft (letter):
global out
for x in range(len(S), 0):
out[x+1] = out[x]
out = letter + out[0:]
for x in range(1, int(T)+1):
S = f.next()
out = ""
for i in xrange(0, len(S)):
if (i > 0):
if (S[i] >= out [0]):
insertLeft(S[i])
else:
insertRight(S[i])
else:
out += S[i]
o.write("Case #" + str(x) + ": " + out)
|
#Finding an Observation's Nearest Neighbors
#load libraries
from sklearn import datasets
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
#load data
iris = datasets.load_iris()
features = iris.data
target = iris.target
#create standardScaler
standardscaler = StandardScaler()
#Standardize features
features_standardized = standardscaler.fit_transform(features)
#two nearest neighbors
nearest_neighbors = NearestNeighbors(n_neighbors=2).fit(features_standardized)
#create an observation
new_observation = [1, 1, 1, 1]
#find distance and indices of the observation's nearest neighbors
distances, indices = nearest_neighbors.kneighbors([new_observation])
# View distances
print("Distance Minkowski: ", distances)
#view the nearest neighbors
print(features_standardized[indices])
# Find two nearest neighbors based on euclidean distance
nearestneighbors_euclidean = NearestNeighbors(n_neighbors=2, metric='euclidean').fit(features_standardized)
distances, indices = nearestneighbors_euclidean.kneighbors([new_observation])
# View distances
print("Distance Euclidean: n_neighbors=2", distances)
# Find each observation's three nearest neighbors
# based on euclidean distance (including itself)
nearestneighbors_euclidean = NearestNeighbors(n_neighbors=3, metric="euclidean").fit(features_standardized)
distances, indices = nearestneighbors_euclidean.kneighbors([new_observation])
# View distances
print("Distance Euclidean: n_neighbors=3", distances)
# List of lists indicating each observation's 3 nearest neighbors
# (including itself)
nearest_neighbors_with_self = nearestneighbors_euclidean.kneighbors_graph( features_standardized).toarray()
# Remove 1's marking an observation is a nearest neighbor to itself
for i, x in enumerate(nearest_neighbors_with_self):
x[i] = 0
# View first observation's two nearest neighbors
print("nearest neighbors with self: ", nearest_neighbors_with_self[0])
|
from mrjob.job import MRJob
class MRSpentByCustomers(MRJob):
def mapper(self, _, line):
(custid,itemid,amount) = line.split(',')
yield custid,float(amount)
def reducer(self,custid,amounts):
yield custid, sum(amounts)
if __name__ == '__main__':
MRSpentByCustomers.run() |
from utilities.config import db_config, finnhub_config, MAX_TRY
from utilities.postgres_utils import cursor, connection
from utilities.finnhub_utils import finnhub_connection
from utilities.stringio_utils import buildStringIO
from logging_conf import MyLogger
from datetime import timezone, date, timedelta, datetime
from typing import List
import pandas as pd
logger = MyLogger.logger
def gen_splits(date_from: str, date_to: str):
import time
with open('data_input/symbol_list.txt') as f:
symbols = [line.strip() for line in f]
with finnhub_connection(finnhub_config['api']) as finnhub_client:
for symbol in symbols:
res = {}
stock_data = None
trials = MAX_TRY
t1 = time.perf_counter()
while trials:
try:
stock_data = finnhub_client.stock_splits(
symbol,
_from=date_from.strftime('%Y-%m-%d'),
to=date_to.strftime('%Y-%m-%d')
)
break
except Exception:
trials -= 1
logger.info(f'Fail to read split data for {symbol}')
if not trials:
logger.error(f'Cannot get valid data of {symbol} after {MAX_TRY} tries.')
if stock_data:
res['symbol'] = symbol
res['data'] = stock_data
res['t'] = t1
else:
res['symbol'] = symbol
res['data'] = None
res['t'] = t1
yield res
def gen_daily(date_from: datetime, date_to: datetime, symbols: List[str]):
import time
with finnhub_connection(finnhub_config['api']) as finnhub_client:
for symbol in symbols:
# t1 = time.perf_counter()
res = {}
stock_data = None
trials = MAX_TRY
t1 = time.perf_counter()
while trials:
try:
stock_data = finnhub_client.stock_candles(
symbol, 'D',
int(date_from.timestamp()),
int(date_to.timestamp()))
break
except Exception:
trials -= 1
logger.info(f'{symbol} fail to get data this time')
if not trials:
logger.error(f'Cannot get valid data of {symbol} after {MAX_TRY} tries.')
if stock_data and stock_data['s'] == 'ok':
df = pd.DataFrame(stock_data)
df['t'] = pd.to_datetime(df['t'], unit='s')
df['v'] = df['v'].astype('int64')
df.drop(columns='s', inplace=True)
df.columns = ['close', 'high', 'low', 'open', 'time', 'volume']
df['symbol'] = symbol
res['symbol'] = symbol
res['data'] = df
res['t'] = t1
else:
res['symbol'] = symbol
res['data'] = None
res['t'] = t1
yield res
def load_daily_data(date_from: datetime, date_to: datetime, symbols: List[str]):
import time
count = 0
seconds = 0
with cursor(
db_config['host'],
db_config['port'],
db_config['user'],
db_config['password'],
db_config['dbname']) as cur:
conn = cur.connection
for stock_data in gen_daily(date_from, date_to, symbols):
t1 = stock_data['t']
symbol = stock_data['symbol']
length = len(stock_data['data']) if stock_data['data'] is not None else 0
if stock_data['data'] is not None:
with buildStringIO() as temp_file:
stock_data['data'].to_csv(temp_file, header=False, index=False, line_terminator='\n')
temp_file.seek(0)
cur.copy_from(temp_file, 'daily', sep=',', columns=['close', 'high', 'low', 'open', 'time', 'volume', 'symbol'])
conn.commit()
t2 = time.perf_counter()
count += 1
else:
t2 = time.perf_counter()
count += 1
logger.error(f'{symbol} has no data')
seconds += t2-t1
if count == 59:
if seconds < 60:
logger.info(f'sleep {62-seconds} seconds')
time.sleep(62 - seconds)
count = 0
seconds = 0
logger.info(f'It takes {t2 - t1} seconds to handle {symbol}, total records count is {length}, {count} stocks in 1 minute')
def load_splits(date_from: datetime, date_to: datetime):
import time
count = 0
seconds = 0
with cursor(
db_config['host'],
db_config['port'],
db_config['user'],
db_config['password'],
db_config['dbname']) as cur:
# conn = cur.connection
for stock_data in gen_splits(date_from, date_to):
t1 = stock_data['t']
symbol = stock_data['symbol']
if stock_data['data'] is not None:
# logger.info(str(stock_data['data']))
data = ',\n'.join(
["('{}', '{}', {}, {})".format(stock["symbol"], stock["date"], stock["fromFactor"], stock["toFactor"])
for stock in stock_data['data']]
)
insert_query = f"""
INSERT INTO splits
(symbol, split_date, fromfactor, tofactor)
VALUES
{data};
"""
cur.execute(insert_query)
t2 = time.perf_counter()
count += 1
else:
t2 = time.perf_counter()
count += 1
logger.error(f'{symbol} has no data')
seconds += t2-t1
if count == 59:
if seconds < 60:
logger.info(f'sleep {62-seconds} seconds')
time.sleep(62 - seconds)
count = 0
seconds = 0
logger.warning(f'It takes {t2 - t1} seconds to handle {symbol}, {count} stocks in 1 minute') |
#coding:utf-8
from flask import jsonify ,request
from . import api
import json
import os
from random import randint
@api.route('/eatwhat/', methods = ['GET'])
def eatwhat():
items = ['东一', '东二', '学子', '桂香园', '博雅园', '外卖']
item = items[randint(0, 5)]
return jsonify({
"location": item
})
|
def solution(K, A):
# write your code in Python 3.6
if len(A)<1:
return 0
count=0
cLength=0
for i in A:
cLength+=i
if cLength>=K:
cLength = 0
count +=1
return count
|
class Solution(object):
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
给定两个二进制字符串,返回他们的和(用二进制表示)。
输入为非空字符串且只包含数字 1 和 0。
示例 1:
输入: a = "11", b = "1"
输出: "100"
"""
if len(a) < len(b):
a, b = b, a
n = len(a)
b = '0'*(n - len(b)) + b #补齐 b 不足的位为 0
result = ''
summ = 0 #进位值
for i in range(n):
a_1 = int(a[-i-1])
b_1 = int(b[-i-1])
result = str((a_1 + b_1 + summ) % 2) + result #当前位数相加模 2 ,链接更小位数的值
summ = (a_1 + b_1 + summ) // 2 #当前位数之和整除二,得到下一位运算的进位值
if summ == 1: #判断最高位是否需要进位
result = '1' + result
return result
|
from typing import List
class Solution:
def solve(self, board: List[List[str]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
if not board: return
rows = len(board)
cols = len(board[0])
dummy = rows * cols
p = {dummy: dummy}
for row in range(rows):
for col in range(cols):
if board[row][col] == "O":
p[row * cols + col] = row * cols + col
directions = [[-1, 0], [1, 0], [0, -1], [0, 1]]
for row in range(rows):
for col in range(cols):
if board[row][col] == "O":
if self.isBoundary(board, row, col):
self.union(p, row * cols + col, dummy)
else:
for _, dirs in enumerate(directions):
newRow, newCol = row + dirs[0], col + dirs[1]
if self.isValid(board, newRow, newCol):
self.union(p, row * cols + col, newRow * cols + newCol)
for row in range(rows):
for col in range(cols):
if board[row][col] == "O":
p1 = self.parent(p, row * cols + col)
p2 = self.parent(p, dummy)
if p1 == p2:
board[row][col] = "O"
else:
board[row][col] = "X"
def isBoundary(self, board, row, col):
if row == 0 or row == len(board) - 1:
return True
if col == 0 or col == len(board[0]) - 1:
return True
return False
def isValid(self, grid, row, col):
if row < 0 or row >= len(grid):
return False
if col < 0 or col >= len(grid[0]):
return False
if grid[row][col] == "X":
return False
return True
def union(self, p, i, j):
p1 = self.parent(p, i)
p2 = self.parent(p, j)
p[p1] = p2
def parent(self, p, i):
root = i
while p[root] != root:
root = p[root]
while p[i] != i:
x = i
i = p[i]
p[x] = root
return root
|
class OldPhone:
__brand = ''
def setBrand(self, brand):
self.__brand = brand
def getBrand(self):
return self.__brand
def call(self, name):
print(f"正在给{name}打电话...")
class NewPhone(OldPhone):
def call(self, name):
print("语音拨号中...")
super().call(name)
def phoneIntro(self):
print(f"品牌为:{self.getBrand()}的手机很好用...")
n = NewPhone()
n.setBrand('vivo')
n.phoneIntro()
n.call('lily')
|
def get_node_pool_oauth_scopes(oauth_scopes):
oauth_scopes_list = []
for scope in oauth_scopes:
oauth_scopes_list.append("https://www.googleapis.com/auth/" + scope)
return oauth_scopes_list
def generate_node_pools(cluster_properties):
node_pools_properties = cluster_properties.get("nodePools")
final_node_pools_config = []
for node_pool in node_pools_properties:
node_pool_config = node_pool.get("config")
oauth_scopes = get_node_pool_oauth_scopes(node_pool_config.get("oauthScopes"))
node_pool_object = {
"name": node_pool.get("name"),
"config": {
"machineType": node_pool_config.get("machineType") or "f1-micro",
"oauthScopes": oauth_scopes,
"imageType": node_pool_config.get("imageType") or "COS",
},
"initialNodeCount": node_pool.get("initialNodeCount"),
"locations": node_pool.get("locations")
}
final_node_pools_config.append(node_pool_object)
return final_node_pools_config
def GenerateConfig(context):
cluster_properties = context.properties['cluster'] or context.env["name"]
node_pools = generate_node_pools(cluster_properties)
resources = [
{
"name": "k8s-cluster",
"type": "container.v1.cluster",
"properties": {
"zone": context.properties["zone"],
"cluster": {
"name": cluster_properties.get("name"),
"nodePools": node_pools
}
}
}
]
return {"resources": resources}
|
#!/usr/bin/env python
from setuptools import setup
from cron_conf import __version__
setup(
name='cron_conf',
version=__version__,
description='Cronf conf',
author='Sogeti AB',
author_email='supportwebb@sogeti.se',
classifiers=['License :: Other/Proprietary License'],
url='https://github.com/elinfs/bobcat-validator-ul',
packages=[
'cron_conf',
],
package_data={'cron_conf': [
'schema/*.yaml'
]},
install_requires=[
'azure-storage-blob',
'setuptools',
'pyyaml',
'jsonschema'
],
data_files=[
],
entry_points={
"console_scripts": [
"cron_conf = cron_conf.main:main"
]
}
)
|
from django.db import models
from model_utils.models import TimeStampedModel
from api.models import Fleet, User, Machinery, Site
from api.models.tools import Tool
class FleetHistory(TimeStampedModel):
history_type_choices = (
('assignment', 'Assignment'),
('broken_down', 'Broken Down'),
('fixed', 'Fixed'),
)
fleet = models.ForeignKey(to=Fleet, on_delete=models.CASCADE)
user = models.ForeignKey(to=User, on_delete=models.CASCADE)
history_type = models.CharField(max_length=150, choices=history_type_choices)
time_to_fix = models.IntegerField(blank=True, null=True)
site = models.ForeignKey(to=Site, null=True, blank=True, on_delete=models.CASCADE)
class MachineHistory(TimeStampedModel):
history_type_choices = (
('assignment', 'Assignment'),
('broken_down', 'Broken Down'),
('fixed', 'Fixed'),
)
machine = models.ForeignKey(to=Machinery, on_delete=models.CASCADE)
user = models.ForeignKey(to=User, on_delete=models.CASCADE)
history_type = models.CharField(max_length=150, choices=history_type_choices)
time_to_fix = models.IntegerField(blank=True, null=True)
site = models.ForeignKey(to=Site, null=True, blank=True, on_delete=models.CASCADE)
class ToolHistory(TimeStampedModel):
history_type_choices = (
('assignment', 'Assignment'),
('broken_down', 'Broken Down'),
('fixed', 'Fixed'),
)
tool = models.ForeignKey(to=Tool, on_delete=models.CASCADE)
user = models.ForeignKey(to=User, on_delete=models.CASCADE)
history_type = models.CharField(max_length=150, choices=history_type_choices)
time_to_fix = models.IntegerField(blank=True, null=True)
site = models.ForeignKey(to=Site, null=True, blank=True, on_delete=models.CASCADE)
|
# ------------------------------------------------------------
# Copyright (c) 2017-present, SeetaTech, Co.,Ltd.
#
# Licensed under the BSD 2-Clause License.
# You should have received a copy of the BSD 2-Clause License
# along with the software. If not, See,
#
# <https://opensource.org/licenses/BSD-2-Clause>
#
# ------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from dragon.vm.torch.ops.modules.base import BaseModule
class Reduce(BaseModule):
def __init__(self, key, ctx, **kwargs):
super(Reduce, self).__init__(key, ctx, **kwargs)
self.operation = kwargs.get('operation', 'SUM')
self.axis = kwargs.get('axis', -1)
self.keep_dims = kwargs.get('keep_dims', True)
self.register_arguments()
self.register_op()
def register_arguments(self):
"""No Arguments for reduce op.
Mutable ``axis`` and ``keep_dims`` is non-trivial for backend,
we simply hash them in the persistent key.
"""
pass
def register_op(self):
self.op_meta = {
'op_type': 'Reduce',
'n_inputs': 1, 'n_outputs': 1,
'arguments': {
'operation': self.operation,
'axis': self.axis,
'keep_dims': self.keep_dims
}
}
def forward(self, x, y):
inputs = [x]; self.unify_devices(inputs)
outputs = [y] if y else [self.register_output(x.dtype)]
return self.run(inputs, outputs)
class ArgReduce(BaseModule):
def __init__(self, key, ctx, **kwargs):
super(ArgReduce, self).__init__(key, ctx, **kwargs)
self.operation = kwargs.get('operation', 'ARGMAX')
self.axis = kwargs.get('axis', -1)
self.keep_dims = kwargs.get('keep_dims', True)
self.top_k = kwargs.get('top_k', 1)
self.register_arguments()
self.register_op()
def register_arguments(self):
"""No Arguments for reduce op.
Mutable ``axis`` and ``keep_dims`` is non-trivial for backend,
we simply hash them in the persistent key.
"""
pass
def register_op(self):
self.op_meta = {
'op_type': 'ArgReduce',
'n_inputs': 1, 'n_outputs': 1,
'arguments': {
'operation': self.operation if 'ARG' in self.operation \
else 'ARG' + self.operation,
'axis': self.axis,
'keep_dims': self.keep_dims,
'top_k': self.top_k,
}
}
def forward(self, x, y):
inputs = [x]; self.unify_devices(inputs)
if 'ARG' in self.operation:
# Return indices only
outputs = [y] if y else [self.register_output(dtype='int64')]
return self.run(inputs, outputs)
else:
if y:
if not isinstance(y, (tuple, list)):
raise TypeError('Excepted outputs as a tuple or list, got {}.'.format(type(y)))
if len(y) != 2:
raise ValueError('Excepted 2 outputs, got {}.'.format(len(y)))
outputs = [y[1], y[0]]
else: outputs = [self.register_output('int64'), self.register_output(x.dtype)]
returns = self.run(inputs, outputs)
# Return values only
if self.axis == -1: return returns[1]
# Return values and indices
return returns[1], returns[0] |
# Generated by Django 3.1.2 on 2020-10-26 13:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CoAuthorship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date published')),
('first_author_id', models.IntegerField()),
('second_author_id', models.IntegerField()),
('paper_id', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Paper',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('pii', models.IntegerField()),
('title', models.TextField(max_length=200)),
('abstract', models.CharField(max_length=200)),
('date', models.DateTimeField(verbose_name='date published')),
('url', models.CharField(max_length=200)),
('journal_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='PotentialCoAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(verbose_name='date published')),
('first_author_id', models.IntegerField()),
('second_author_id', models.IntegerField()),
('paper_id', models.IntegerField(default=0)),
],
),
]
|
#!/usr/bin/env python3
from git import Repo
import re
import DingTalk
import sys
import os
def getLastCommit(path):
if not os.path.exists(path):
return
with open(path, 'r') as f:
content =f.read()
#re.match(r'^DVTSourceControlLocationRevisionKey$', content)
result = re.findall(r"DVTSourceControlLocationRevisionKey = (.+?);", content)
if (len(result) < 1):
print('没有找到')
return result[0]
def getGitLog(project_path, lastCommit):
print("getGitLog:"+project_path)
repo = Repo(project_path)
head = repo.head
master = head.reference
# log = master.log()
# log2 = head.log()
newCommits = []
for i in repo.iter_commits():
if i.hexsha == lastCommit:
break
newCommits.append(i)
return newCommits
# 获取 git 日志
def getCommits(log_path, project_path):
lastCommit = getLastCommit(log_path)
if lastCommit is None:
print('未或得上次 commit')
return
print('上次提交的 commit')
print(lastCommit)
print('开始搜索最近的 commit')
return getGitLog(project_path, lastCommit)
if __name__ == "__main__":
print(DingTalk.getDingTalkRbootUrl())
|
import matplotlib.pyplot as plt
import numpy as np
dense_path_length = np.loadtxt("dense_path_lengths.txt")
p5_rf1_path_length_matrix = np.loadtxt("p5/RF1_pl_matrix_lmbda1.txt")
p5_rf2_path_length_matrix = np.loadtxt("p5/RF2_pl_matrix_lmbda1.txt")
p5_rf3_path_length_matrix = np.loadtxt("p5/RF3_pl_matrix_lmbda1.txt")
p5_rf_path_length_matrix = np.zeros(p5_rf1_path_length_matrix.shape)
p5_sd_rf = np.zeros(p5_rf1_path_length_matrix.shape[0])
p7_sd_rf = np.zeros(p5_rf1_path_length_matrix.shape[0])
sd_sp = np.zeros(p5_rf1_path_length_matrix.shape[0])
p5_pl_rf = np.zeros(p5_rf1_path_length_matrix.shape[0])
p7_pl_rf = np.zeros(p5_rf1_path_length_matrix.shape[0])
pl_sp = np.zeros(p5_rf1_path_length_matrix.shape[0])
pl_halton = np.zeros(p5_rf1_path_length_matrix.shape[0])
count = 0
for i in range(p5_rf_path_length_matrix.shape[0]):
for j in range(p5_rf_path_length_matrix.shape[1]):
flag = 1
if(p5_rf1_path_length_matrix[i,j]==-1):
p5_rf1_path_length_matrix[i,j] = 0
flag = 0
if(p5_rf2_path_length_matrix[i,j]==-1):
p5_rf2_path_length_matrix[i,j] = 0
flag = 0
if(p5_rf3_path_length_matrix[i,j]==-1):
p5_rf3_path_length_matrix[i,j] = 0
flag = 0
p5_rf_path_length_matrix[i,j] = flag
for i in range(p5_rf_path_length_matrix.shape[0]):
for j in range(p5_rf_path_length_matrix.shape[1]):
if(p5_rf_path_length_matrix[i,j]==0):
p5_rf_path_length_matrix[i,j] = -1
continue
p5_rf_path_length_matrix[i,j] = (p5_rf1_path_length_matrix[i,j] + p5_rf2_path_length_matrix[i,j] + p5_rf3_path_length_matrix[i,j])/3.0
p7_rf1_path_length_matrix = np.loadtxt("p7/RF1_pl_matrix_lmbda1.txt")
p7_rf2_path_length_matrix = np.loadtxt("p7/RF2_pl_matrix_lmbda1.txt")
p7_rf3_path_length_matrix = np.loadtxt("p7/RF3_pl_matrix_lmbda1.txt")
p7_rf_path_length_matrix = np.zeros(p7_rf1_path_length_matrix.shape)
for i in range(p7_rf_path_length_matrix.shape[0]):
for j in range(p7_rf_path_length_matrix.shape[1]):
flag = 1
if(p7_rf1_path_length_matrix[i,j]==-1):
p7_rf1_path_length_matrix[i,j] = 0
flag = 0
if(p7_rf2_path_length_matrix[i,j]==-1):
p7_rf2_path_length_matrix[i,j] = 0
flag = 0
if(p7_rf3_path_length_matrix[i,j]==-1):
p7_rf3_path_length_matrix[i,j] = 0
flag = 0
p7_rf_path_length_matrix[i,j] = flag
for i in range(p7_rf_path_length_matrix.shape[0]):
for j in range(p7_rf_path_length_matrix.shape[1]):
if(p7_rf_path_length_matrix[i,j]==0):
p7_rf_path_length_matrix[i,j] = -1
continue
p7_rf_path_length_matrix[i,j] = (p7_rf1_path_length_matrix[i,j] + p7_rf2_path_length_matrix[i,j] + p7_rf3_path_length_matrix[i,j])/3.0
halton_path_length_matrix = np.loadtxt("Halton_pl_matrix_lmbda1.txt")
sp1_path_length_matrix = np.loadtxt("SP1_pl_matrix_lmbda1.txt")
sp2_path_length_matrix = np.loadtxt("SP2_pl_matrix_lmbda1.txt")
sp3_path_length_matrix = np.loadtxt("SP3_pl_matrix_lmbda1.txt")
sp_path_length_matrix = np.zeros(sp1_path_length_matrix.shape)
for i in range(sp_path_length_matrix.shape[0]):
for j in range(sp_path_length_matrix.shape[1]):
flag = 1
if(sp1_path_length_matrix[i,j]==-1):
sp1_path_length_matrix[i,j] = 0
flag = 0
if(sp2_path_length_matrix[i,j]==-1):
sp2_path_length_matrix[i,j] = 0
flag = 0
if(sp3_path_length_matrix[i,j]==-1):
sp3_path_length_matrix[i,j] = 0
flag = 0
sp_path_length_matrix[i,j] = flag
for i in range(sp_path_length_matrix.shape[0]):
for j in range(sp_path_length_matrix.shape[1]):
if(sp_path_length_matrix[i,j]==0):
sp_path_length_matrix[i,j] = -1
continue
sp_path_length_matrix[i,j] = (sp1_path_length_matrix[i,j] + sp2_path_length_matrix[i,j] + sp3_path_length_matrix[i,j])/3.0
n = [200, 400, 600, 800, 1000, 1200]
sr_p5_rf = [0, 0, 0, 0, 0, 0]
sr_p7_rf = [0, 0, 0, 0, 0, 0]
sr_halton = [0, 0, 0, 0, 0, 0]
sr_sp = [0, 0, 0, 0, 0, 0,]
hfont = {'fontname': 'Helvetica'}
from math import sqrt
# cases = [2, 5, 6, 7, 8, 9, 11, 12, 14, 26, 28, 35, 36, 37, 38, 39, 70, 72, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89]
cases = [85, 86, 87, 88]
def calc_sd(a, b, c):
mean = (a+b+c)/3.0
sd = sqrt((a-mean)**2+(b-mean)**2+(c-mean)**2)
return sd
count = np.zeros(len(n))
consider = {}
for i in range(len(n)):
consider[i] = []
for j in range(len(p5_rf_path_length_matrix[i])):
# if(p5_rf_path_length_matrix[i,j]==-1 or p7_rf_path_length_matrix[i,j]==-1 or sp_path_length_matrix[i,j]==-1 or halton_path_length_matrix[i,j]==-1):
# continue
if (not j in cases):
continue
consider[i].append(j)
p5_sd_rf[i] += calc_sd(p5_rf1_path_length_matrix[i,j], p5_rf2_path_length_matrix[i,j], p5_rf3_path_length_matrix[i,j])
p7_sd_rf[i] += calc_sd(p7_rf1_path_length_matrix[i,j], p7_rf2_path_length_matrix[i,j], p7_rf3_path_length_matrix[i,j])
sd_sp[i] += calc_sd(sp1_path_length_matrix[i,j], sp2_path_length_matrix[i,j], sp3_path_length_matrix[i,j])
p5_pl_rf[i] += p5_rf_path_length_matrix[i,j]/dense_path_length[j]
p7_pl_rf[i] += p7_rf_path_length_matrix[i,j]/dense_path_length[j]
pl_sp[i] += sp_path_length_matrix[i,j]/dense_path_length[j]
pl_halton[i] = halton_path_length_matrix[i,j]/dense_path_length[j]
# print("i = ",i," adding p7_pl_rf = ", p7_pl_rf[i])
count[i] += 1
for i in range(len(n)):
p5_sd_rf[i]/=count[i]
p7_sd_rf[i]/=count[i]
sd_sp[i]/=count[i]
p5_pl_rf[i]/=count[i]
p7_pl_rf[i]/=count[i]
pl_sp[i]/=count[i]
print("pl_sp = ", pl_sp)
print("p5_pl_rf = ", p5_pl_rf)
print("p7_pl_rf = ", p7_pl_rf)
# plt.plot(n[1:], p5_pl_rf[1:], color = "green", linewidth = 2, label = "RF+Halton(50:50)")
plt.plot(n[1:], p7_pl_rf[1:], color = "orange", linewidth = 2, label = "RF+Halton(30:70)")
# plt.fill_between(n[1:], p7_pl_rf[1:]-p7_sd_rf[1:], p7_pl_rf[1:]+p7_sd_rf[1:], color = 'orange', alpha = 0.5)
plt.plot(n[1:], pl_sp[1:], color = "blue", linewidth = 2, label = "SP+Halton")
# plt.fill_between(n[1:], pl_sp[1:]-sd_sp[1:], pl_sp[1:]+sd_sp[1:], color = 'blue', alpha = 0.5)
plt.plot(n[1:], pl_halton[1:], color = "red", linewidth = 2, label = "Halton")
plt.xlabel("No of Samples ", **hfont)
plt.ylabel("Cost (Normalized) ", **hfont)
leg = plt.legend()
leg_lines = leg.get_lines()
leg_texts = leg.get_texts()
plt.setp(leg_lines, linewidth=4)
plt.setp(leg_texts, fontsize='medium')
plt.xlim(0, 2000)
plt.ylim(0.8, 1.2)
plt.title("Path Length", **hfont)
plt.grid(True)
plt.savefig("Path_Length.jpg", bbox_inches='tight')
plt.show()
def intersection(lst1, lst2, lst3, lst4, lst5):
lst3 = [value for value in lst1 if (value in lst2 and value in lst3 and value in lst4 and value in lst5)]
return lst3
print("consider = ", consider)
print("intersection = ", intersection(consider[n.index(n[-2])], consider[n.index(n[-1])], consider[n.index(n[-3])], consider[n.index(n[-4])], consider[n.index(n[-5])])) |
from django.test import TestCase
from django.test import Client
import json
from applications.images.factories import ImageFactory
from applications.images.models import Image
class TestImagesApi(TestCase):
def setUp(self):
self.client = Client()
super(TestImagesApi, self).setUp()
def test_imges_url_returns_code_200(self):
success_status = 200
response = self.client.get('/api/images/')
self.assertEqual(response.status_code, success_status)
def test_imges_available_from_api_list(self):
images_count = 10
for i in range(images_count):
ImageFactory()
response = self.client.get('/api/images/')
images = json.loads(response.content.decode('utf8'))
self.assertEqual(len(images), images_count)
def test_imges_available_from_detail_page(self):
img = ImageFactory()
response = self.client.get('/api/images/{id}/'.format(id=img.id))
image = json.loads(response.content.decode('utf8'))
self.assertEqual(image['image'], img.image().decode('utf8'))
def test_delete_image_via_api(self):
img = ImageFactory()
self.client.delete('/api/images/{id}/'.format(id=img.id))
self.assertFalse(Image.objects.all().exists())
def test_can_create_image_via_api(self):
self.client.post('/api/images/', data={'base64_image': 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAIAAAACACAYAAADDPmHLAAAMj0lEQVR42u2dC5AUxRnHZ3fvBZGXvA7ig4eFEq0SrSQmlAcEDlAUkUdiqUAdBhODAQQhIQrIHRSxTJkICWKCxkCgkooJIWIMhRDgnmDKaLASSEgEDB5QgNzxvPfm/7/Zi1e4u3cz3b09s9P/qq96b/e65+vp37x6ur8OWUaBVki3A0Z6ZQAIuAwAAZcBIOAyAARcBoCAywAQcBkAAi4DQMBlAAi4DAABlwEg4AoUAHcsreqNJBfWA9YFlg3LgtXDamHnYGdgJ2GV+4q6RnX7rFppCQAaOoLkNtidsfRW2EDYVQ6KqYF9ANsPew9WBnsbUNTprp9MpQ0AaPQOSO6DTYSNhXVVsJmLsD/Dfg/bDBiqdddbVL4HAA0/AMk82FRLTaMn0iXYa7AfAoT9uveDW/kWADQ8r+UrYdNgGRpd4X3C67CFAOGQ7v3iVL4EAI0/E8nzsM66fWkl3kQuhz0LEBp1O9Ne+QoANDzv2n8Oe0i3L0m0EzYFEFTpdqQ98g0AaHw+rr0BG63bl3aI9wQjAMFZ3Y60JT8B8AqSR5L8C6/FDbBM3b7GxKeF0YCgSbcjyeQLAND44yz76I/n7z9gi2HbLPvZ/UbYbNhjsLBm12cDgJ9o9iGp/ALAPiRfjPNThWUfZRfj5Cmw7PsFnXU8Duvn5c4jzwOAhuxn2T1yV/rK0/1g7Nx/J8n7OySTNFchHz7u1OxDQvkBgAlItsT5qRQ7Nq+NvPdbdq+dTs2Hnz/S7ENC+QGAGZZ9Kr9Sm7Bjp7aRdwiSdzVXYTn8XKrZh4TyMwA7sWPz28h7N5I3NVdhBfxcotmHhPIzALzjH4CdezxJ3leRFGiuggFAREkAoLbCJsbrekU+dhjx6Nf5noAyAIioDQAodrjwRutvsf/vZNl9AEWwHN3+WwYAMbUDgBadgF2AXWfZo3y8IgOAiBwA4FUZAERkAFArA4B6GQBEZABQKwOAehkARBQbBdRJ4SZWwL6psnwDgIcFwPii5gmFmzAAeFkGAIXCzu2PZBZsDOx6WAfdFY6jSMxUaSUAeJofasrztiMZ3s58HERyDLYL9lLO0BIlcw+UAICGZ/87qf+e5Z0xerrUGoDdVvsBaC2OK3wZNh8gXHSRP6GkAxCbovUb2L2yy/apWgNQjCRPoKy/wMYBgtOynJMKQGxS5mbLnqNnZOv/9wAAgBNMhwqWRwhGAIJLMpyTDQBnxiyWWWYa6BkAwDeTBICDWL8kocyNAGCaDOekAYDG56hdEq77/bvX9BQA+D4/AIC3kXxBUrlfAwSviRYiBQA0PsvZa8Ufuh10LQAAnMdIAP5q2fEKZOhD2GDRS4EsAKZY9lRpo09rLgBYzQ8A4H0kt0gsewEAeF6kAFkAvIPkdokVSyfNAgBr+QEAHEByk8SyOR6yPyCodVuAMABofN7UVEisVLppKgDYxA8AgPEDbpBc/oMA4NduM8sAYB2SmZIrlU4aAwDe4gcAcMSye0RlagcAcD1jWgiA2HM/x+L1kFypNFL0un1F3f7LTwCgEkkfyRvgFLk+bjuHRAEYhmSP5Aqlkw7j6GcMI+t86YiszHAj79hVvHeYAQB+4SajKACFSDw77ckDYgCpJ/nhclnesFBI2cHiumNIFABWaJiiSvldTWGr8baKou7Nb/Fw+v8pkm8o2tYxAHCtm4yuAUDjM/gC4+Q5Cb4YIEV/hWt/cyyjquJR3XMy6thx01HhBnkfcMJpJhEA+Dx7QGGF/KyqrHDNkJJluUf5B07/z+H0v1DxNvmW8E9OM4kA4IW5915UNBJqmFxe2KN531SXjBycHalnqFnVs5XmAYAXnGYSAWC+ZcfqM/pEbPwn0fjNASGO77qnY9esc3tw9H8+BdteDQDmOs0kAoDqsXR+Ux0afx4a/0X+cXTnxHDPnNObwyFrQoq2vwUATHSaSQSAjUgeTlHlPK7oocxQ/bTSwl4MZmUdQeP3yjmzNhyKqrrrj6dSAOB4tJEIALzhuCuFFfSiDuBRb02XrOqfbVs8gGsOWJW77s3CaX8DGv+BFPtyEAAMdppJBACOVh2R4krGEwdM/suy34+fgjFEa2Ps+xaLXvF3ou9aW2Pi36KnM0INB8sKex5s7cjZ4vy+OZHajbjmf0XDfjgCAPo7zSQCQDmSL2uoaEzRirDVtDYrXP/GnmW5WkOynikeHe4QqX0URz2jl1+tyY3jAKCv00wiAMgY4OhC0YM4+ubj6HP8zCtbl8qG9UWj8yZvjiX3Pb8bVQKAzzrN5KtLQMhqWtM589zC7Uv6XW75rqp4VFZ2pC4fp10uC9PNssPDhmN1a0lDSb6j6mNW1+ozrSH2PxkxY0/eNZYdhYTWR2QfSlbKLwEMwHR3iirHfvU5FUXd17R88XFxfnecdhej4adb+k67XtIBAPA5p5lEANhg2at1qFY0YjU8Xl7UY23LFxdKh0/PCDexH8I0/CcqAQCOX8yJAMBewPmqa4XT/uq9RVc393Ad3jE53LvDqRdw3f22iO9pqs0AYLLTTCIAsBdQcQzc6AFc84e8teT65mjbuOl6CY2vci6/n7UKADjumRUBgNO//qCyRpFQw9jywh6cUcvT/gKc9n+gcns+11wAsNppJhEABiH5p8IKvbevqGvzJIrTe8bedFXmJQZ99kLgR69qLADY7jSTCADMy143RSt3RZfuK+rGuYZ8n74Jd/teXihKt9ir2RsAnHKaUXRIGMO0Kun2xM3fRNz8bTmyY1Ln3I6nOOI16HEGkukoGr+fm4yiAHBAaKGKGuH6PxTX/4pTu++6o1PWxb0qtpFGWg8ACtxkFAWAXcFlKmqUGaq/vbSw57tni/PHdsio3aZiG2mkaQBgo5uMogCwm/Qjy16SXaqywrW3lizrvb+qZNS4nEjdH2WXn0biI3IuAHD1QkzG1DB2z86SXSsAcAsA+Ht1yajx2ZG612WXn0Z6E41/j9vMMgDgrOB3ZNcKAAwGAAcBwP0AwAw+TazJAGCz28yypodLHxsAAAYBgEMAYAoAMLEH4ouDYAYCgAa3BcgCgBHBtsqsGQAYCAA+AAAPAADX05/TXLPR+EIrk8qMESR1mhgA6AcAjgKAhwDAJlnlppG4YObNAEBoVVKZAHBABoMgSZkAAQCuBQDHAMA0ALBBlp9pIvb8jUfjCz8dyQ4T910kz8ooCwBcAwA+AgAFAOBVmX6mgRg69lsyCpINAMtjh4Rwvz0A6AsAjgOARwDAKzL99Ll4qR0jeupvkYpQseyzXw97UKQcANAHAJwAAF8HAC/L9tOn2g2bgMY/J6tAVcGiWS4HJ3AxBldTorPDNbnFy3JPVpeMnJkdqV+nwk8fiQNUOfhmiawjv0Wqw8X3QvKo5SJcfMfIpZt3PdP39PnSETMzw40qAeAI4/Mq94NLtQ4Xvw4Nf1jFRjw/ru5i6fAZkXCTyjWDXsTOfVx3PXXJ8wBcLsubEQopXTTKAOBl1ZTnqV41zADgZaUAAK7AUa2wfI7WfU5h+UIyAKjXCgAQzEWjZMgAoFYGAPUyAIjIAKBWBgDn4tg7hn1jzF+OhmorUKYBQEQeAoCLMiyCrUGD1sd8Yzf3dyx7oaxEQaANACLyEABfRUP+NoGPjBCyKkE+A4CIPALATjRifhIfuR8ZFDreekAGABF5BIA2w7DCTy4NtyjOTwYAEWHHFiDRPSKoAI24vg0/E42GWo68nl1TwQ8AjEeie2LISjTi0234ybPUjDg/PYG8qyyPyg8AMCrXh5p95Zo/N6IhL8f7ET5yzSSO0u0S5+fhyFes0fek8jwAlIRVt2WIcxOmtzwCtvLtM0i2wOLdJApP3FAtvwAwEskOD/jLpV8ZpoZBofncz3kQvPYPSvD/M9H4nh7QqnuHtluAgNfRObr9cKCtaPz7dDvRlvwEAI84zhF0HBNfgxjQYjQAuKDbkbbkGwCoGAQ/hj3mYd95P/Cw6KreqZJXd2JSAQSuVs6VOXrq9qWVeLQvQsOvES4phfIlABQg6GrZi1bybNDu4eYKxDt8Tl59Co1fqXu/OJVvAWgRQOiNhKFj2QnjOFy6gD6G/dKyx/wpGbOfCvkegBbF7g/4LD4JxpApKmBguDoGrGLEEoZmqdFdb1GlDQBXCkDw2fxOGKONcur6DZYdzKo9deb0azb2fyz7LR8HgDAa2vto9KjuuslU2gIQT7EBHLxksOuW3baMZcDJrLyOcyoWp4ix4U+iob04XUy6AgWA0adlAAi4DAABlwEg4DIABFwGgIDLABBwGQACLgNAwGUACLgMAAGXASDgMgAEXP8D3ZYdvaYDXJAAAAAASUVORK5CYII='})
self.assertTrue(Image.objects.exists()) |
# 2019/09/16
n=int(input())
a=sorted(list(map(int,input().split())))
cnt=0
res=set()
for e in a:
if e%2==0:
while not e&1:
e>>=1
res.add(e)
print(len(res))
# ↓高速でうごくの見つけた
# ABC019C - 高橋くんと魔法の箱
n=int(input())
a=list(map(int,input().split()))
res=set()
for i in a:
res.add(i//(i&-i)) # ここが高速化のポイント
print(len(res))
|
# coding=UTF-8
from django.forms.fields import CharField, SlugField
from django.forms.forms import Form
from django.forms.models import ModelForm
from JJEhr.lesson.models import Course
class AddCourseForm(ModelForm):
class Meta:
model = Course
class UpdateCourseForm(ModelForm):
class Meta:
model = Course
class ExportContactsForm(Form):
recipient_list = CharField()
class SendEmailForm(Form):
recipient_list = CharField()
head = CharField(label=u"邮件主题")
message = SlugField(label=u"邮件内容")
|
# Generated by Django 3.2.6 on 2021-08-28 12:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20210828_1758'),
]
operations = [
migrations.AlterField(
model_name='account',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='account',
name='is_admin',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='account',
name='is_staff',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='account',
name='is_superadmin',
field=models.BooleanField(default=True),
),
]
|
# -*- coding: utf-8 -*-
# K-Nearest Neighbors (K-NN)
## Importing the libraries
import numpy as np
from numpy import savetxt
import matplotlib.pyplot as plt
import pandas as pd
import os
from os import path
import pickle
model_name = 'k_nearest_neighbors'
data_name = 'Breast_cancer_data'
"""## Training the K-NN model on the Training set"""
os.chdir("train_data")
X_train = pd.read_csv('X_train.csv')
y_train = pd.read_csv('y_train.csv')
os.chdir("../test_data")
X_test = pd.read_csv('X_test.csv')
y_test = pd.read_csv('y_test.csv')
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train.values.ravel())
# .values will give the values in an array. (shape: (n,1)
# ravel() to convert column vector to 1D array to avoid warning
"""## Making the Confusion Matrix"""
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
#print(cm)
def record(string,accuracy):
return string,accuracy
accuracy = str(format(accuracy_score(y_test, y_pred),'.3f'))
print(record("k_nearest_neighbors",accuracy))
os.chdir("../models")
pkl_model_filename = model_name + '_model.pkl'
with open (pkl_model_filename,'wb') as file:
pickle.dump(classifier, file) |
'''
Name & Student ID: Conall McCarthy, *********
Date: 20/02/18
Description: Assignment 11, Write a program that takes a file with each line being a row of a unsolved
sudoku puzzle, create a board, solve the sudoku puzzle using 2 functions (1. solveBoard and 2. isValidMove)
and print out a formatted board.
1.solveBoard(b, row, col)
Write a recursive function that attempts to solve the board. Each row should be considered and if all
the rows are completed the board should be solved. Within each row if a column contains a number
the next column is checked recursively. If a column is empty a number should be placed in the
column (if valid) and the function again recursively called.
#
2. def isValidMove (b, row, col, number):Write a function that evaluated whether a
number can be placed in a given row and column as a valid move. Remember that a number (1-9)
cannot be repeated within a given row, column and mini-grid. This function should return True if the
move is valid. Hint use the module (%) operator for the mini-grid case.
'''
# reads in a file in the format of 9 numbers on 9 lines separated by spaces and converts it into a list of lists
# with each inner list being a row
def readBoard(file):
infile = open(file, "r")
b = []
for line in infile:
newline = line.strip("\n")
row = newline.split(" ")
# converts the list row to a list of numbers, not characters
list_num = []
for num in row:
list_num.append(int(num))
b.append(list_num)
print(b)
infile.close()
return b
# takes a board in the form of a list of lists and formats it into a sudoku table
def printBoard(SBoard):
for rowNo in range(9):
if rowNo % 3 == 0:
print("+---------+---------+---------+")
for colNo in range(9):
if colNo % 3 == 0:
print("|", end="")
if SBoard[rowNo][colNo] == 0:
print(" ", end="")
else:
print(" %i " % (SBoard[rowNo][colNo]), end="")
print("|")
print("+---------+---------+---------+")
# takes a sudoku board(list of lists) and solves the board using recursion
def solveBoard(b, row, col):
# initially sets result to false
result = False
# base case
# if: Have I reached row number 9. If I have, I've solved the board return True and return the filled out board.
if row == 9:
result = True
return result, b
# else (recursive case)
# if the current square is not 0 recursively call the function again passing in the next square .
else:
if b[row][col] != 0:
# if col is less than 8 increment the column, call the function again passing back the result
# (true or false) and the board, then returns the result and the board
if col < 8:
result, b = solveBoard(b, row, col + 1)
return result, b
# if the col is == 8, sets col back to 0 and increments the row, call the function again passing back the
# result (true or false) and the board, then returns the result and the board
elif col == 8:
result, b = solveBoard(b, row + 1, 0)
return result, b
# else consider every possible combination of number
else:
# is this a validNumber?
# loops through the numbers 1-9
for new_insert in range(1, 10, 1):
if isValidMove(b, row, col, new_insert):
# update the square with the number
b[row][col] = new_insert
# finds the next position
if col < 8:
next_col = col + 1
next_row = row
else:
next_row = row + 1
next_col = 0
# if recursively calling the function again (next row next col) == true:
# return true and the board
result, b = solveBoard(b, next_row, next_col)
if result == True:
return True, b
# At the end of my loop, set the square back to zero, pass back false and the board
# if no valid move is found, sets the square back to 0
b[row][col] = 0
return result, b
# function that takes the board, current position(row and col) and checks if a number can be placed in that position
def isValidMove(b, row, col, number):
valid = True
# CHECK COLUMN
# checking if number is in the column
# creates a columns list appending every number in the current to a column using a while loop
col_list = []
x = 0
while x < 9:
col_list.append(b[x][col])
x += 1
if number in col_list:
valid = False
# CHECK ROW
# checks if the number is in the current row (rows already in a list in b)
elif number in b[row]:
valid = False
# CHECK MINI GRID
else:
# list to hold the grid numbers
grid_list = []
# finds the top row of the grid
row_check = (row // 3) * 3
# 2 for loops with a range of 3, that append the numbers in the same gird as current number
# being checked to the gird list using modular arithmetic
for row_num in range(0,3,1):
# finds the leftmost column of the grid, resets on each outer loop
col_check = (col // 3) * 3
for col_num in range(0,3,1):
# appends what is in the current box to the grid list
grid_list.append(b[row_check][col_check])
# increments the column on each loop
col_check += 1
# increments the row on each loop
row_check += 1
# checks if the current number is in the grid_list
if number in grid_list:
valid = False
# returns valid if number passes checks
return valid
# Main Program
filename = "easyPuzzle.txt"
# filename = "hardPuzzle.txt"
board = readBoard(filename)
print("\nPROBLEM:")
printBoard(board)
# calls the function and checks if the answer is true of false
check_answer, solvedBoard = solveBoard(board, 0, 0)
if check_answer == False:
print("Solution does not exist")
printBoard(solvedBoard)
else:
print("\nSOLUTION:")
printBoard(solvedBoard)
|
class Solution:
def maxProfit(self, prices: List[int]) -> int:
@cache
def dp(i:int) -> Tuple[int,int]:
"""Returns 1. Max profit after ith day. No stock hold, can buy.
2. Max balance after ith day. Holding a stock, can sell"""
if i < 0: return 0, -10**9
profit, balance = dp(i-1)
# For profit : 1. do nothing, just profit, or sell, the profit is balance (the money currently you have plus the profit you sell the stock)
# For balance : 1. do nothing, or buy, when have balance means holding a stock, as this state, you can't buy a stock, you have to use profit to buy.
return max(profit, balance + prices[i]), max(balance, profit - prices[i])
return dp(len(prices)-1)[0] |
import inspect
import numpy
import sklearn.metrics
from sklearn.preprocessing import LabelBinarizer
class BaseCalculator(object):
mapping = {}
def __init__(self, clf, X_test, y_test, *args, **kwargs):
self.clf = clf
self.X_test = X_test
self._binarizer = LabelBinarizer()
self._binarizer.fit(y_test)
self.y_test = self._binarizer.transform(y_test)
def apply_mapping(self, dict):
for calc_name, model_name in self.mapping.items():
dict[model_name] = dict[calc_name]
del dict[calc_name]
return dict
def base_calculations(self):
raise NotImplementedError("You must override this method")
def calculate(self):
kwargs = self.base_calculations()
result = {}
for member in inspect.getmembers(self, inspect.isfunction):
if self.is_calculate_function(member[0]):
result[member[0].replace("calculate_", "")] = member[1](self, **kwargs)
return self.apply_mapping(result)
def is_calculate_function(self, member_name):
return member_name.startswith("calculate_")
class SKLearnCalculator(BaseCalculator):
_needed_args = set()
_to_pred = ["y_pred", "y2"]
_to_proba = ["y_score", "y_prob", 'probas_pred']
_to_y_test = ["y_true", "y1"]
def __init__(self, clf, X_test, y_test, MetricsClass=None):
self.metrics_class = MetricsClass
super(SKLearnCalculator, self).__init__(clf, X_test, y_test)
self.add_calculate_functions()
def _has_common_element(self, l1, l2):
for x in l1:
if x in l2:
return True
return False
def base_calculations(self):
result = {}
if self._has_common_element(self._needed_args, self._to_pred):
result["pred"] = self._binarizer.transform(self.clf.predict(self.X_test))
if self._has_common_element(self._needed_args, self._to_proba):
try:
result["proba"] = self.clf.predict_proba(self.X_test)[:,-1]
except:
result["proba"] = self.clf.score(self.X_test, self.y_test)
return result
def add_calculate_functions(self):
if self.metrics_class is None:
predicate = lambda i: True
else:
field_names = self._get_field_names()
predicate = lambda j: j in field_names
for member in inspect.getmembers(sklearn.metrics,
lambda x: inspect.isfunction(x) and not x.__name__.startswith("_") and predicate(x.__name__)):
setattr(self, "calculate_" + member[0], self._add_wrapper(member[1]))
return dict
def _get_field_names(self):
result = []
for x in self.metrics_class._meta.get_fields():
if hasattr(x, "related_model"):
if hasattr(x.related_model, "alternative_model_name"):
result += [x.related_model.alternative_model_name]
continue
result += [x.name]
return result
def _add_wrapper(self, func):
args = inspect.getargspec(func)[0]
self._needed_args.update(args)
def wrapper(self, **kwargs):
new_args = self._convert_args(args, **kwargs)
return func(*new_args)
return wrapper
def _convert_args(self, args, **kwargs):
new_args = []
for arg in args:
if arg in self._to_y_test:
new_args += [self.y_test]
elif arg in self._to_pred:
new_args += [kwargs["pred"]]
elif arg in self._to_proba:
new_args += [kwargs["proba"]]
return new_args
|
import unittest
import wethepeople as wtp
from wethepeople.objects import PetitionResponse, SignatureResponse
from wethepeople.objects import Petition, Signature
# No requests are made for this, this just silences the ua warning
# These Tests make sure that Nationstates obj keeps concurrent all object values
class api_returns_petiton_object(unittest.TestCase):
def test_api_petitionResponse(self):
api = wtp.Api()
o = api.get_petitions(mock=1)
self.assertIsInstance(o, PetitionResponse)
def test_api_petition(self):
api = wtp.Api()
o = api.get_petitions(mock=1)
self.assertIsInstance(o.results[0], Petition)
class api_returns_signature_object(unittest.TestCase):
def test_api_SignatureResponse(self):
api = wtp.Api()
o = api.get_petitions(mock=1).results[0].search_signatures(limit=1)
self.assertIsInstance(o, SignatureResponse)
def test_api_petition(self):
api = wtp.Api()
o = api.get_petitions(mock=1).results[0].search_signatures(limit=1).results[0]
self.assertIsInstance(o, Signature)
|
from domains.domainConstructors import SimpleMDP
def blockWorld(numOfBlockes, numOfSlots, initConfig, goalConfig):
return SimpleMDP() |
from django.urls import path
from .views import home, general, tecnologia, programacion, videojuegos, tutoriales, signup
urlpatterns = [
path('', home, name="index"),
path('general/', general, name='general'),
path('tecnologia/', tecnologia, name='tecnologia'),
path('programacion/', programacion, name='programacion'),
path('videojuegos/', videojuegos, name='videojuegos'),
path('tutoriales/', tutoriales, name='tutoriales'),
path('signup/', signup, name='signup'),
] |
from django.db import models
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
from django.contrib.auth.hashers import make_password
from django.db.models.signals import post_save
from django.dispatch import receiver
User = get_user_model()
# 3rd party import
from PIL import Image
def profilepic_directory_path(instance, filename):
"""
Change filename of profile picture
"""
ext = filename.split('.')[1]
filename = f'{instance.user.username}-pic.{ext}'
return f'customer/profilepic/{filename}'
class Customer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
mobile = models.CharField(verbose_name='Mobile Number', max_length=10)
landmark = models.TextField(verbose_name='Landmark', null=True, blank=True)
street_address = models.TextField(verbose_name='Street Address')
city = models.CharField(verbose_name='City', max_length=40)
pin = models.CharField(verbose_name='PIN', max_length=6)
state = models.CharField(verbose_name='State', max_length=30)
country = models.CharField(verbose_name='Country', max_length=30)
profile_pic = models.ImageField(upload_to=profilepic_directory_path, verbose_name='Profile Pic', blank=True, null=True)
def __str__(self):
return self.user.username
def get_address(self):
return f"{self.landmark if self.landmark else ''}\n{self.street_address},\n{self.city}, Pin-{self.pin},\n{self.state}, {self.country}"
def save(self, *args, **kwargs):
if self.state and self.country:
self.state = self.state.capitalize()
self.country = self.country.capitalize()
if self.profile_pic:
super(Customer, self).save(*args, **kwargs)
img = Image.open(self.profile_pic.path)
if img.height>200 and img.width>200:
set_dim = (200, 200)
img.thumbnail(set_dim)
img.save(self.profile_pic.path, quality=50)
super(Customer, self).save(*args, **kwargs)
"""
Link User model to Customer model on creation of user instance
"""
@receiver(post_save, sender=User)
def createCustomer(sender, instance, created, **kwargs):
try:
customer_group = Group.objects.get(name='customer')
except Group.DoesNotExist:
pass
else:
instance.groups.add(customer_group) # Add this user to customer group
if created:
Customer.objects.create(user=instance)
@receiver(post_save, sender=User)
def updateCustomer(sender, instance, created, **kwargs):
if not created:
instance.customer.save()
|
import gym
import matplotlib
import numpy as np
from collections import defaultdict
from BlackjackEnv import BlackjackEnv
import plotting
import sys
if "../" not in sys.path:
sys.path.append("../")
def make_epsilon_greedy_policy(Q, epsilon, nA):
def policy_fn(observation):
A = np.ones(env.nA) * epsilon / nA
A[np.argmax(Q[observation])] += 1 - epsilon
return A
return policy_fn
def mc_control_epsilon_greedy(env, num_episodes, discount_factor=1.0, epsilon=0.1):
# Keeps track of sum and count of returns for each state
# to calculate an average. We could use an array to save all
# returns (like in the book) but that's memory inefficient.
returns_sum = defaultdict(float)
returns_count = defaultdict(float)
# The final action-value function.
# A nested dictionary that maps state -> (action -> action-value).
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# The policy we're following
policy = make_epsilon_greedy_policy(Q, epsilon, env.action_space.n)
for i_episode in range(num_episodes):
done = False
state = env.reset()
episode = []
while not done:
probs = policy(state)
action = np.random.choice(np.arange(len(probs)), p=probs)
next_state, reward, done, _ = env.step(action)
episode.append((state, action, reward))
state = next_state
for state, action, reward in episode:
firstOccurence = next(i for i, x in enumerate(episode) if x[0] == state and x[1] == action)
G = sum(x[2]*discount_factor**i for i, x in enumerate(episode[firstOccurence:]))
returns_sum[(state, action)] += G
returns_count[(state, action)] += 1
Q[state][action] = returns_sum[(state, action)]/returns_count[(state, action)]
return Q, policy
matplotlib.style.use('ggplot')
env = BlackjackEnv()
Q, policy = mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
# For plotting: Create value function from action-value function
# by picking the best action at each state
V = defaultdict(float)
for state, actions in Q.items():
action_value = np.max(actions)
V[state] = action_value
plotting.plot_value_function(V, title="Optimal Value Function")
|
# -*- coding: utf-8 -*-
"""
Author: zero
Email: 13256937698@163.com
Date: 2019-11-01
"""
import tushare as ts
from thctools import Technical
ts.set_token('24c7a5d5b40cd5db779cbc888ba4516d4be3384c0cf897caeaf2415b')
pro = ts.pro_api()
df = pro.query('daily', ts_code='603019.SH')
df = df.rename(columns={'trade_date': 'date'})[['date', 'open', 'high', 'low', 'close']]
print(df.head(10))
tech_obj = Technical(df)
print(tech_obj.ma.head(10))
print(tech_obj.macd.head(10))
print(tech_obj.kdj.head(10))
|
import subprocess
import sys
import requests
import json
import raidheroes_scraper as rs
import discord_poster as dp
import time
boss_code_map = {
'Massive Kitty Golem': 'golem',
'Vale Guardian': 'vg',
'Gorseval the Multifarious': 'gorse',
'Sabetha the Saboteur': 'sab',
'Slothasor': 'sloth',
'Matthias Gabrel': 'matt',
'Keep Construct': 'kc',
'Xera': 'xera',
'Cairn the Indomitable': 'cairn',
'Mursaat Overseer': 'mo',
'Samarog': 'sam',
'Deimos': 'dei',
};
guild_tag_map = {
'Very Innovative Players': 'VIP',
'Karma': 'KA',
'Valiant Feline Explorers': 'VFX',
'Thank Mr Goose': 'HONK',
'The Essence Of': 'LUCK',
'Silver Kings And Golden Queens': 'SKGQ',
'Bourne Again': 'bash',
'Avantosik': 'Heim',
'Verucas Illusion': 'VI',
}
main_guilds_list = [
'VIP',
'KA',
'VFX',
'SKGQ',
'LUCK',
]
def parse_and_upload(filepath, use_discord):
print('New file processing: ' + filepath)
subprocess.call([
'X:\\Documents\\arcdps\\autoparse\\raid_heroes.exe',
'X:\\Documents\\arcdps\\arcdps.cbtlogs\\' + filepath
], cwd='X:\\Documents\\arcdps\\autoparse\\')
boss_name, char_name, guild_name, file_evtc = filepath.split('\\');
if (guild_name in guild_tag_map):
guild_name = guild_tag_map[guild_name];
else:
guild_name = 'null'
if (boss_name in boss_code_map) :
#scp the generated file
time_created = file_evtc.split('.')[0];
file_name = file_evtc.split('.')[0] + '_' + boss_code_map[boss_name] + '.html'
data = rs.scrape_file(char_name, 'X:\\Documents\\arcdps\\autoparse\\' + file_name)
logmetadata = {
'boss': boss_name,
'name': char_name,
'guild': guild_name,
'time': time_created,
'path': file_name,
'bosstime': data['bosstime'],
'class': data['class'],
'cleavedmg': data['cleavedmg'],
'bossdmg': data['bossdmg'],
'rank': data['rank'],
'people': data['people'],
'success': 1 if data['success'] else 0,
}
if (boss_name != 'Massive Kitty Golem') :
print('uploading...\n\r')
print(' '.join([
'rsync',
'-vz',
'--chmod=u+rwx,g+rwx,o+rwx',
'-e', '"ssh -i ~/.ssh/id_rsa"',
file_name,
'root@logs.xn--jonathan.com:/var/www/logs/html/logs'
]))
subprocess.call(
[
'rsync',
'-vz',
'--chmod=u+rwx,g+rwx,o+rwx',
'-e', 'ssh -i C:/Users/Jonathan/.ssh/id_rsa',
file_name,
'root@logs.xn--jonathan.com:/var/www/logs/html/logs'
],
cwd='X:\\Documents\\arcdps\\autoparse\\',
shell=True
)
print('\n\rPUT-ing');
print(str(logmetadata))
print('')
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.put('https://logs.xn--jonathan.com/api/logmetadata', data=json.dumps(logmetadata), headers=headers)
print('PUT response: ' + str(r))
print('PUT response: ' + r.text)
logLink = json.loads(r.text)[0];
print(logLink)
print('')
time_string = str(data['bosstime'] // 60) + ':' + ('0' if (data['bosstime'] % 60 < 10) else '') + str(data['bosstime'] % 60)
print('time: ' + time_string)
if use_discord:
if (data['success']):
print('Successful Boss Attempt. Posting to discord.')
dp.win(boss_name, time_string, logLink)
else:
print('Unsuccessful Boss Attempt. Heckling discord.')
dp.lose(boss_name, time_string, logLink)
if (data['success'] or (boss_name == 'Deimos')):
alldpsdata = rs.scrape_all_data(boss_name, time_created, 'X:\\Documents\\arcdps\\autoparse\\' + file_name)
for dpsd in alldpsdata:
print(dpsd)
r = requests.put('https://logs.xn--jonathan.com/api/dpsdata', data=json.dumps(alldpsdata), headers=headers)
print('PUT response: ' + str(r))
print('PUT response: ' + r.text)
print('===============================================================\n\r\n\r')
if (len(sys.argv) > 1):
print('New file notified: ' + sys.argv[1])
time.sleep(4)
parse_and_upload(sys.argv[1], len(sys.argv) == 3 and sys.argv[2] == 'post_discord')
|
from PNS import *
#--- Create unmyelianted axon first
# Unmyelinated axon initialization parameter bunch
initBunch = Bunch()
initBunch.fiber_diam = 0.5 #[um]
initBunch.rho_a = 100.0 # axioplasmic resistivity [ohm-cm]
initBunch.rho_e = 500.0 # extracellular resistivity [ohm-cm]
initBunch.cm = 1.0 # [uF/cm^2] membrane cap
initBunch.L = 1000 # [um] ~the length of 10 nodes of 10um diameter mylinated axon
initBunch.segLen = 2.5 # [um] limits length of each segment
initBunch.nseg = makeOdd((initBunch.L/initBunch.segLen)) # always odd
smallFiber = UnmyelinatedNerve(initBunch, sweeneyCh)
#--- Stimulation electrode
myAmp = -5.25e-3/1e-9 # amplitude [nA]
myDel = 5.0 # start at t=5ms [ms]
myDur = 1 # duration [ms]
electrode = DummyElectrode(myDel, myAmp, myDur)
#--- Unmyelinated simulation initialization parameter bunch for testing
simBunch = Bunch()
simBunch.dt = 0.005 # [ms]
simBunch.v_init = -70.0 # [mV]
simBunch.celsius = 37.0 # [deg C]
simBunch.elec_dist = 0.25 # [mm]
simBunch.elecLocX = 0.5 # 0 to 1, normalized location along unmeylinated axon
simBunch.tstop = 10.0 # [ms]
sim = StimSim(smallFiber, electrode, simBunch)
#--- simulation
#sim.run_sim()
#Vm_vec_py = np.array(sim.Vm_vec)
#t_vec_py = np.array(sim.t_vec)
#--- Plotting utilities
def plot_segV(Vm_vec_py, t_vec_py, segIdx):
# Plot the time course of voltage at a segment
if segIdx >= Vm_vec_py.shape[0]:
print "Only %d segments, %d invalid!" % (Vm_vec_py.shape[0], segIdx)
return
plt.figure()
plt.plot(t_vec, Vm_vec_py[segIdx, :])
plt.xlabel('time (ms)')
plt.ylabel('mV')
plt.title('Time course @ %.2fum' % (smallFiber.idx2len(segIdx)))
plt.show(block=False)
def plot_segV_together(Vm_vec_py, t_vec_py, listIdx):
plt.figure()
for i in range(len(listIdx)):
v_alpha = (i-0.0)/(len(listIdx)) + 0.1
plt.plot(t_vec_py, Vm_vec_py[listIdx[i],:], 'r-', alpha=v_alpha)
plt.xlabel('time (ms)')
plt.ylabel('mV')
plt.title('Time course for %.2fum to %.2fum' % \
(smallFiber.idx2len(listIdx[0]), smallFiber.idx2len(listIdx[-1])))
plt.show(block=False)
def plot_allV(Vm_vec_py, tStart=4.0, tEnd=10.0, dt=0.1):
# Plot voltage over entire axon, different
# curves for different time periods
if tStart > h.tstop:
print "tStart > tstop!"
return
if tStart >= tEnd:
print "tStart must be less than tEnd!"
return
plt.figure()
L = np.array([seg.x for seg in smallFiber.axon]) * smallFiber.axon.L
for i in np.arange(tStart, tEnd, dt):
# later is more opaque
#import pdb; pdb.set_trace()
v_alpha = (i-tStart)/(tEnd-tStart)
#plt.plot(L, Vm_vec_py[:, int(i/h.dt)], 'r-', alpha=v_alpha)
plt.plot(L, Vm_vec_py[:, int(i/h.dt)])
plt.xlabel('length (um)')
plt.ylabel('mV')
plt.show(block=False)
#plot_allV(Vm_vec_py, tStart=4.5, tEnd=8.5, dt=0.05)
#plot_segV_together(Vm_vec_py, t_vec_py, [smallFiber.len2Idx(l) for l in np.arange(100,1000,100)])
#plt.xlim((4,4+electrode.dummy_stim.dur+1))
#plt.show(block=False)
# --- Actual work for report
electrode.setDelay(2)
durations = [1, 2.5, 3, 5, 10]
diameters = [0.2, 0.5, 0.7, 1.0, 1.2, 1.5]
lo_lim = -0.5e-3/1e-9 # cathodic current low limit [nA]
hi_lim = -10e-3/1e-9 # cathodic current high limit [nA]
threshCur = []
sim.simParams.elec_dist = 0.5 # 0.25mm
print "elec_dist=%.2f mm" % (sim.simParams.elec_dist)
for i in range(len(durations)):
print "PW=%.2fms: " % (durations[i])
electrode.setDur(durations[i])
sim.change_tstop(np.max([10, 5+durations[i]]))
threshCur.append(strength_diam(sim, diameters, lo_lim, hi_lim, AP_status, AP_msgFn))
|
# -*- coding: utf-8 -*-
# @Time : 2019/11/20 10:17
# @Author : zxl
# @FileName: test.py
import pandas as pd
import numpy as np
arr=np.array([1,2,3,4])
b=np.reshape(arr,newshape=(-1,1))
print(arr)
print(b) |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Setting.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Setting(object):
def setupUi(self, Setting):
Setting.setObjectName("Setting")
Setting.resize(400, 270)
Setting.setStyleSheet("")
self.frame = QtWidgets.QFrame(Setting)
self.frame.setGeometry(QtCore.QRect(0, 0, 400, 270))
self.frame.setStyleSheet("\n"
"background-color: rgb(42, 131, 162)")
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.pushButton = QtWidgets.QPushButton(self.frame)
self.pushButton.setGeometry(QtCore.QRect(150, 140, 95, 35))
self.pushButton.setStyleSheet("\n"
"\n"
"QPushButton{color: rgb(0,0,0);\n"
"border-image: url(:/new/res/43.gif);\n"
"font: 25 12pt \"微软雅黑 Light\";\n"
"border:2px groove gray;\n"
"border-radius:15px;\n"
"padding:4px 4px;\n"
"}\n"
"QPushButton:hover{\n"
"background-color: rgb(22, 94, 131);\n"
"border-image: url(:/new/res/46.gif);color: rgb(0, 255, 255);\n"
"font: 25 12pt \"微软雅黑 Light\";\n"
"border:2px groove gray;\n"
"border-radius:15px;\n"
"padding:4px 4px;}\n"
"QPushButton:pressed{\n"
"border-image: url(:/new/res/46.gif);color: rgb(0, 255, 255);\n"
"font: 25 12pt \"微软雅黑 Light\";\n"
"border:2px groove gray;\n"
"border-radius:15px;\n"
"padding:4px 4px;}\n"
"")
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.frame)
self.label_2.setGeometry(QtCore.QRect(30, 180, 341, 31))
self.label_2.setStyleSheet("color: rgb(211, 70, 0);\n"
"font: 10pt \"Adobe 黑体 Std R\";")
self.label_2.setObjectName("label_2")
self.layoutWidget = QtWidgets.QWidget(self.frame)
self.layoutWidget.setGeometry(QtCore.QRect(70, 80, 266, 35))
self.layoutWidget.setObjectName("layoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label = QtWidgets.QLabel(self.layoutWidget)
self.label.setStyleSheet("color: rgb(255, 255, 255);\n"
"font: 12pt \"Adobe 黑体 Std R\";")
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.lineEdit = QtWidgets.QLineEdit(self.layoutWidget)
self.lineEdit.setStyleSheet("color: rgb(255, 255, 255);\n"
"border-image:url(:/new/res/Rectangle 5.png);\n"
"font: 9pt \"Adobe Caslon Pro\";\n"
"font: 25 12pt \"微软雅黑 Light\";")
self.lineEdit.setObjectName("lineEdit")
self.horizontalLayout.addWidget(self.lineEdit)
self.retranslateUi(Setting)
QtCore.QMetaObject.connectSlotsByName(Setting)
def retranslateUi(self, Setting):
_translate = QtCore.QCoreApplication.translate
Setting.setWindowTitle(_translate("Setting", "Form"))
self.pushButton.setText(_translate("Setting", "确定"))
self.label_2.setText(_translate("Setting", "注:输入0代表本地摄像头(默认为本地摄像头)"))
self.label.setText(_translate("Setting", "视频流地址"))
import photo_rc
|
# -*- coding: utf-8 -*-
class Welcome_Meli():
lbl_titulo_id = "com.mercadolibre:id/home_onboarding_step_title"
lbl_subtitulo_id = "com.mercadolibre:id/home_onboarding_step_subtitle"
texto_mensaje_1 = "Libera tus ideas"
texto_subtitulo_1 = "Estás en el lugar perfecto para encontrar lo que buscas."
texto_mensaje_2 = "Te cuidamos, siempre"
texto_subtitulo_2 = "Con Mercado Pago, protegemos tu dinero hasta que estés feliz con tu compra. ¡Y tienes hasta 12 cuotas sin interés!"
texto_mensaje_3 = "Llegamos a donde estás"
texto_subtitulo_3 = "A la puerta de tu casa o al otro lado del país, con Mercado Envíos llevamos tu compra a donde nos digas."
lbl_tituloRegistro_id = "com.mercadolibre:id/home_onboarding_register_title"
msj_tituloRegistro = "¿Qué estás esperando?"
lbl_subtituloRegistro_id = "com.mercadolibre:id/home_onboarding_register_subtitle"
msj_subtituloRegistro = "¡Es gratis!"
btn_faceRegister_id = "com.mercadolibre:id/home_onboarding_facebook_register_button"
msj_facebookRegister = "Registrarme con Facebook"
btn_EmilRegister_id = "com.mercadolibre:id/home_onboarding_email_register_button"
msj_facebookRegister = "Registrarme con mi e-mail"
btn_yaRegistrado_id = "com.mercadolibre:id/home_onboarding_already_has_account_button"
msj_yaRegistrado = "Ya tengo cuenta"
lbl_terminos_id = "com.mercadolibre:id/home_onboarding_step_terms_text"
|
def base_site(request):
"""
Inject few template variables that we need in base template.
"""
from django.conf import settings
context = {}
context['STATIC_URL'] = '/static/'
context['BASE_SIDEBAR'] = 'yui-t2'
if hasattr(settings, 'STATIC_URL'):
context['STATIC_URL'] = settings.STATIC_URL
if not hasattr(settings, 'BASE_SIDEBAR'):
return context
if settings.BASE_SIDEBAR == 'left':
context['BASE_SIDEBAR'] = 'yui-t2'
if settings.BASE_SIDEBAR == 'right':
context['BASE_SIDEBAR'] = 'yui-t4'
return context
|
# as always we start with importing the libraries that we are going to need
# brandonrose.com/clustering
import re, nltk
from pandas import DataFrame
import numpy as numpy
# to conduct the analysis we are going to need afew other libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
from scipy.cluster.hierarchy import linkage, dendrogram
# matplotlib for viz
import matplotlib.pyplot as plt
# load in texts:
with open('dereklin.txt', 'r', encoding='utf8') as rf:
lintext = rf.read()
with open('stephenmitchell.txt', 'r', encoding='utf8') as rf:
mitchelltext = rf.read()
with open('jameslegge.txt', 'r', encoding='utf8') as rf:
leggetext = rf.read()
with open('giafufeng.txt', 'r', encoding='utf8') as rf:
fengtext = rf.read()
with open('brucelinnell.txt', 'r', encoding='utf8') as rf:
linnelltext = rf.read()
# concatenate it for easy use
totalDao = lintext + mitchelltext + leggetext + fengtext + linnelltext
# break into each author section
daos = re.split(r"82",totalDao)
#print(len(daos))
# containers for information
labels = []
author = []
i = 0
# go through the items in the list
for text in (daos):
if i == 0:
author = "Lin"
if i == 1:
author = "Mitchell"
if i == 2:
author = "Legge"
if i == 3:
author = "Feng"
if i == 4:
author = "Linnell"
if author != None:
currentauthor = i
labels.append(author)
i = i + 1
#print(author)
vectorizer = TfidfVectorizer(max_features=1000, use_idf=False)
count_matrix = vectorizer.fit_transform(daos)
# get the distances between all of the documents.
distances = euclidean_distances(count_matrix)
# first we can group documents together based on which ones are closest
# we will use the "ward" algorithm to do it
linkages = linkage(distances, 'ward')
# we will use scipy's dendogram function
dendrogram(linkages, labels=labels, orientation='right', leaf_font_size=8, leaf_rotation=45)
# adjust how it looks a bit
plt.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)
plt.tight_layout()
# author_dictionary = {"['Lin']": 0, "['Mitchell']": 1, "['Legge']": 2, "['Feng']": 3, "['Linnell']": 4}
# print(author_dictionary)
# color_dictionary = {0: "blue", 1: "red", 2: "green", 3: "black", 4: "purple"}
# # create an axis object to manipulate the color of our labels
# ax = plt.gca()
# # get the labels
# labels = ax.get_ymajorticklabels()
# print(labels)
# # interate through the labels and change their colors
# for label in labels:
# #label.set_color(color_dictionary[label.get_text()])
# label_text = label.get_text()
# print("101", label_text)
# author = author_dictionary[label_text]
# color = color_dictionary[author]
# label.set_color(color)
plt.show()
|
from algorithm import quick_sort
data = [5, 8, 3, 9, 0, 3, 6, 7]
print(data)
n_ops = quick_sort.sort(data)
print(data)
print("n_ops: %d" % (n_ops))
|
#! /usr/bin/env python
from __future__ import print_function, division
from collections import defaultdict
from copy import copy
"""
1. input color of current tile (0 = black, 1 = white) (all tiles start black)
2. program outputs color to paint tile (0 = black, 1 = white)
3. program outputs direction to turn (0 = 90deg left, 1 = 90deg right)
4. move forward one panel after turning (starts facing up)
"""
next_direction = {
"up": {0: "left", 1: "right"},
"left": {0: "down", 1: "up"},
"down": {0: "right", 1: "left"},
"right": {0: "up", 1: "down"}
}
direction_vector = {
"up": (0, -1),
"down": (0, 1),
"left": (-1, 0),
"right": (1, 0)
}
class PaintingRobot(object):
def __init__(self, raw_intcodes):
self.computer = IntcodeComputer(raw_intcodes)
self.location = (0, 0)
self.direction = "up"
self.painted_tiles = defaultdict(int) # default 0 (black)
def count_painted_tiles(self):
"""return number of tiles painted at least once"""
return len(self.painted_tiles)
def step(self):
# add appropriate inputs
self.computer.add_to_input_queue([self.painted_tiles[self.location]])
_, pc = self.computer.run_program()
# read outputs
outputs = self.computer.get_outputs()
color = outputs[0]
turn = outputs[1]
# update internal state
self.painted_tiles[self.location] = color
self.direction = next_direction[self.direction][turn]
self.location = (self.location[0] + direction_vector[self.direction][0],
self.location[1] + direction_vector[self.direction][1])
# if pc is None, program halted and painting is complete
return pc is not None
def run(self, start_on_white=False):
if start_on_white:
self.painted_tiles[self.location] = 1
while self.step():
pass
print("finished painting {} tiles".format(self.count_painted_tiles()))
def display(self):
# determine dimensions of painted area
xmin = xmax = ymin = ymax = 0
for x, y in self.painted_tiles:
xmin = min(x, xmin)
xmax = max(x, xmax)
ymin = min(y, ymin)
ymax = max(y, ymax)
for y in range(ymin, ymax + 1):
row = []
for x in range(xmin, xmax + 1):
if ((x, y) in self.painted_tiles
and self.painted_tiles[(x, y)] == 1): # white
row.append("o")
else:
row.append(" ")
print("".join(row))
class IntcodeComputer(object):
def __init__(self, raw_intcode_list, initial_inputs=[]):
self.pc = 0 # program counter
self.rb = 0 # relative base
self.input_queue = initial_inputs
self.output_queue = []
self.intcodes = self.intcodes_from_list(raw_intcode_list)
def intcodes_from_list(self, intcode_list):
"""generate a dict of index, intcode pairs from a list of intcodes.
Note: this format was chosen because I didn't know if any operations
would result in values being stored at addresses outside of the
predefined "program space" and a dict would handle this situation
gracefully. In retrospect, this was not necessary and a list would have
worked and been simpler.
"""
intcodes = defaultdict(int) # return 0 by default
for addr, code in enumerate(intcode_list):
intcodes[addr] = int(code)
return intcodes
def print_intcodes(self):
"""print intcodes as a comma-separated list"""
# does not handle sparse "programs"
print(self.intcodes.values())
def add_to_input_queue(self, inlist):
self.input_queue += inlist
def get_outputs(self):
outlist = copy(self.output_queue)
self.output_queue = []
return outlist
def run_program(self):
"""run intcodes, which are stored as a dict of step: intcode pairs
intcodes encode the operation as well as the parameter mode. The two least
significant digits are the operation. The most significant digit(s) are the
parameter mode, one digit per parameter, read right-to-left (hundreds place
is 1st parameter, thousands place is 2nd parameter, etc.)
parameter mode 0: parameters is a position (an address)
parameter mode 1: parameter is immediate (a literal value)
"""
num_params = {
1: 3,
2: 3,
3: 1,
4: 1,
5: 2,
6: 2,
7: 3,
8: 3,
9: 1,
98: 0,
99: 0
}
def decode_intcode(intcode):
"""decode intcode into its operation and the parameter modes.
returns tuple of op and list of the parameter modes, in order (mode for
first parameter is first element in list)
"""
op = intcode % 100
param_modes = intcode // 100
param_mode_list = []
for _ in range(num_params[op]):
param_mode_list.append(param_modes % 10)
param_modes //= 10
return op, param_mode_list
def check_remaining_opcodes():
if self.pc + num_params[op] > last:
raise Exception("out of opcodes")
def get_parameters():
parameters = []
for n in range(num_params[op]):
if param_modes[n] == 0:
# position mode
parameters.append(self.intcodes[self.pc + n + 1])
elif param_modes[n] == 1:
# absolute (literal) mode
parameters.append(self.pc + n + 1)
elif param_modes[n] == 2:
# relative mode
parameters.append(self.rb + self.intcodes[self.pc + n + 1])
else:
raise Exception("invalid parameter mode: {}"
.format(param_modes[n]))
return parameters
last = len(self.intcodes) - 1
while self.pc <= last:
op, param_modes = decode_intcode(self.intcodes[self.pc])
if op == 1:
# add
check_remaining_opcodes()
args = get_parameters()
self.intcodes[args[2]] = self.intcodes[args[0]] + self.intcodes[args[1]]
self.pc += num_params[op] + 1
elif op == 2:
# multiply
check_remaining_opcodes()
args = get_parameters()
self.intcodes[args[2]] = self.intcodes[args[0]] * self.intcodes[args[1]]
self.pc += num_params[op] + 1
elif op == 3:
# store input at address of parameter
check_remaining_opcodes()
args = get_parameters()
# if no input is available, return
if len(self.input_queue) == 0:
return self.intcodes, self.pc
self.intcodes[args[0]] = int(self.input_queue.pop(0))
self.pc += num_params[op] + 1
elif op == 4:
# print value at address of parameter
check_remaining_opcodes()
args = get_parameters()
self.output_queue.append(self.intcodes[args[0]])
self.pc += num_params[op] + 1
elif op == 5:
# jump if true (jump address in 2nd parameter)
check_remaining_opcodes()
args = get_parameters()
if self.intcodes[args[0]]:
self.pc = self.intcodes[args[1]]
else:
self.pc += num_params[op] + 1
elif op == 6:
# jump if false (jump address in 2nd parameter)
check_remaining_opcodes()
args = get_parameters()
if self.intcodes[args[0]]:
self.pc += num_params[op] + 1
else:
self.pc = self.intcodes[args[1]]
elif op == 7:
# less than (arg1 < arg2 ? arg3 <- 1 : arg3 <- 0)
check_remaining_opcodes()
args = get_parameters()
if self.intcodes[args[0]] < self.intcodes[args[1]]:
self.intcodes[args[2]] = 1
else:
self.intcodes[args[2]] = 0
self.pc += num_params[op] + 1
elif op == 8:
# equals (arg1 == arg2 ? arg3 <- 1 : arg3 <- 0)
check_remaining_opcodes()
args = get_parameters()
if self.intcodes[args[0]] == self.intcodes[args[1]]:
self.intcodes[args[2]] = 1
else:
self.intcodes[args[2]] = 0
self.pc += num_params[op] + 1
elif op == 9:
# adjust rb by parameter
check_remaining_opcodes()
args = get_parameters()
self.rb += self.intcodes[args[0]]
self.pc += num_params[op] + 1
elif op == 98:
# print entire program
check_remaining_opcodes()
print("program counter: {}".format(self.pc))
print("program:")
print(self.intcodes)
self.pc += num_params[op] + 1
elif op == 99:
# end program
return self.intcodes, None
else:
# invalid
raise Exception("invalid opcode: {}".format(self.intcodes[self.pc]))
# should never reach this point (only if end is reached before program
# stop instruction)
raise Exception("ran out of intcodes before program stop reached")
def read_input(filename):
"""read input file and return list of raw intcodes."""
with open(filename, "r") as infile:
raw_intcodes = infile.readlines()[0].strip().split(",")
return raw_intcodes
if __name__ == "__main__":
robot = PaintingRobot(read_input("input.txt"))
print("part 1")
robot.run()
print("part 2")
robot = PaintingRobot(read_input("input.txt"))
robot.run(start_on_white=True)
robot.display()
|
import numpy as np # linear algebra
import os
import scipy.ndimage
import matplotlib.pyplot as plt
import SimpleITK as sitk
from skimage import measure, morphology
from scipy.ndimage.morphology import binary_dilation,generate_binary_structure
import pydicom as dicom
def load_scan(path):
#slices = [dicom.read_file(path + '/' + s, force = True) for s in os.listdir(path)]
slices = []
for s in os.listdir(path):
if s.endswith('.dcm'):
slices.append(dicom.read_file(path + '/' + s, force = True))
assert (len(slices) > 0)
slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))
if slices[0].ImagePositionPatient[2] == slices[1].ImagePositionPatient[2]:
sec_num = 2;
while slices[0].ImagePositionPatient[2] == slices[sec_num].ImagePositionPatient[2]:
sec_num = sec_num+1;
slice_num = int(len(slices) / sec_num)
slices.sort(key = lambda x:float(x.InstanceNumber))
slices = slices[0:slice_num]
slices.sort(key = lambda x:float(x.ImagePositionPatient[2]))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_pixels_hu(slices):
image = np.stack([s.pixel_array for s in slices])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Convert to Hounsfield units (HU)
for slice_number in range(len(slices)):
intercept = slices[slice_number].RescaleIntercept
slope = slices[slice_number].RescaleSlope
if slope != 1:
image[slice_number] = slope * image[slice_number].astype(np.float64)
image[slice_number] = image[slice_number].astype(np.int16)
image[slice_number] += np.int16(intercept)
origin = slices[0].ImagePositionPatient
origin_str = [float(origin[0].original_string), float(origin[1].original_string), float(origin[2].original_string)]
#print (type(origin))
zspacing = slices[0].SliceThickness
#print (zspacing)
xyspacing = slices[0].PixelSpacing
#print (type(xyspacing[0]))
#assert (1>2)
spacing = [float(zspacing), float(xyspacing[0]), float(xyspacing[1])]
#print (origin_str, spacing)
return np.array(image, dtype=np.int16), np.array(origin_str, dtype=np.float32), np.array(spacing, dtype=np.float32)
def binarize_per_slice(image, spacing, intensity_th=-300, sigma=1, area_th=20, eccen_th=0.99, bg_patch_size=10):
bw = np.zeros(image.shape, dtype=bool)
# prepare a mask, with all corner values set to nan
image_size = image.shape[1]
grid_axis = np.linspace(-image_size/2+0.5, image_size/2-0.5, image_size)
x, y = np.meshgrid(grid_axis, grid_axis)
d = (x**2+y**2)**0.5
nan_mask = (d<image_size/2).astype(float)
nan_mask[nan_mask == 0] = np.nan
for i in range(image.shape[0]):
# Check if corner pixels are identical, if so the slice before Gaussian filtering
if len(np.unique(image[i, 0:bg_patch_size, 0:bg_patch_size])) == 1 or\
len(np.unique(image[i, -bg_patch_size:, -bg_patch_size:])) == 1 or\
len(np.unique(image[i, 0:bg_patch_size, -bg_patch_size:])) == 1 or\
len(np.unique(image[i, -bg_patch_size:, 0:bg_patch_size])) == 1:
current_bw = scipy.ndimage.filters.gaussian_filter(np.multiply(image[i].astype('float32'), nan_mask), sigma, truncate=2.0) < intensity_th
else:
current_bw = scipy.ndimage.filters.gaussian_filter(image[i].astype('float32'), sigma, truncate=2.0) < intensity_th
# select proper components
label = measure.label(current_bw)
properties = measure.regionprops(label)
valid_label = set()
for prop in properties:
if prop.area * spacing[1] * spacing[2] > area_th and prop.eccentricity < eccen_th:
valid_label.add(prop.label)
current_bw = np.in1d(label, list(valid_label)).reshape(label.shape)
bw[i] = current_bw
return bw
def all_slice_analysis(bw, spacing, cut_num=0, vol_limit=[0.68, 7.5], area_th=2e3, dist_th=50):
# in some cases, several top layers need to be removed first
if cut_num > 0:
bw0 = np.copy(bw)
bw[-cut_num:] = False
label = measure.label(bw, connectivity=1)
# remove components access to corners
mid = int(label.shape[2] / 2)
bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
label[-1-cut_num, 0, 0], label[-1-cut_num, 0, -1], label[-1-cut_num, -1, 0], label[-1-cut_num, -1, -1], \
label[0, 0, mid], label[0, -1, mid], label[0, mid, 0], label[0, mid, -1],\
label[-1-cut_num, 0, mid], label[-1-cut_num, -1, mid], label[-1-cut_num, mid, 0], label[-1-cut_num, mid, -1]])
for l in bg_label:
label[label == l] = 0
# select components based on volume
properties = measure.regionprops(label)
for prop in properties:
if prop.area * spacing.prod() < vol_limit[0] * 1e6 or prop.area * spacing.prod() > vol_limit[1] * 1e6:
label[label == prop.label] = 0
# prepare a distance map for further analysis
x_axis = np.linspace(-label.shape[1]/2+0.5, label.shape[1]/2-0.5, label.shape[1]) * spacing[1]
y_axis = np.linspace(-label.shape[2]/2+0.5, label.shape[2]/2-0.5, label.shape[2]) * spacing[2]
x, y = np.meshgrid(x_axis, y_axis)
d = (x**2+y**2)**0.5
vols = measure.regionprops(label)
valid_label = set()
# select components based on their area and distance to center axis on all slices
for vol in vols:
single_vol = label == vol.label
slice_area = np.zeros(label.shape[0])
min_distance = np.zeros(label.shape[0])
for i in range(label.shape[0]):
slice_area[i] = np.sum(single_vol[i]) * np.prod(spacing[1:3])
min_distance[i] = np.min(single_vol[i] * d + (1 - single_vol[i]) * np.max(d))
if np.average([min_distance[i] for i in range(label.shape[0]) if slice_area[i] > area_th]) < dist_th:
valid_label.add(vol.label)
bw = np.in1d(label, list(valid_label)).reshape(label.shape)
# fill back the parts removed earlier
if cut_num > 0:
# bw1 is bw with removed slices, bw2 is a dilated version of bw, part of their intersection is returned as final mask
bw1 = np.copy(bw)
bw1[-cut_num:] = bw0[-cut_num:]
bw2 = np.copy(bw)
bw2 = scipy.ndimage.binary_dilation(bw2, iterations=cut_num)
bw3 = bw1 & bw2
label = measure.label(bw, connectivity=1)
label3 = measure.label(bw3, connectivity=1)
l_list = list(set(np.unique(label)) - {0})
valid_l3 = set()
for l in l_list:
indices = np.nonzero(label==l)
l3 = label3[indices[0][0], indices[1][0], indices[2][0]]
if l3 > 0:
valid_l3.add(l3)
bw = np.in1d(label3, list(valid_l3)).reshape(label3.shape)
return bw, len(valid_label)
def fill_hole(bw):
# fill 3d holes
label = measure.label(~bw)
# idendify corner components
mid = int(label.shape[2] / 2)
bg_label = set([label[0, 0, 0], label[0, 0, -1], label[0, -1, 0], label[0, -1, -1], \
label[0, 0, mid], label[0, -1, mid], label[0, mid, 0], label[0, mid, -1],\
label[-1, 0, 0], label[-1, 0, -1], label[-1, -1, 0], label[-1, -1, -1],\
label[-1, 0, mid], label[-1, -1, mid], label[-1, mid, 0], label[-1, mid, -1]])
bw = ~np.in1d(label, list(bg_label)).reshape(label.shape)
return bw
def two_lung_only(bw, spacing, max_iter=22, max_ratio=4.8):
def extract_main(bw, cover=0.95):
for i in range(bw.shape[0]):
current_slice = bw[i]
label = measure.label(current_slice)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
area = [prop.area for prop in properties]
count = 0
sum = 0
while sum < np.sum(area)*cover:
sum = sum+area[count]
count = count+1
filter = np.zeros(current_slice.shape, dtype=bool)
for j in range(count):
bb = properties[j].bbox
filter[bb[0]:bb[2], bb[1]:bb[3]] = filter[bb[0]:bb[2], bb[1]:bb[3]] | properties[j].convex_image
bw[i] = bw[i] & filter
label = measure.label(bw)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
bw = label==properties[0].label
return bw
def fill_2d_hole(bw):
for i in range(bw.shape[0]):
current_slice = bw[i]
label = measure.label(current_slice)
properties = measure.regionprops(label)
for prop in properties:
bb = prop.bbox
current_slice[bb[0]:bb[2], bb[1]:bb[3]] = current_slice[bb[0]:bb[2], bb[1]:bb[3]] | prop.filled_image
bw[i] = current_slice
return bw
found_flag = False
iter_count = 0
bw0 = np.copy(bw)
while not found_flag and iter_count < max_iter:
label = measure.label(bw, connectivity=2)
properties = measure.regionprops(label)
properties.sort(key=lambda x: x.area, reverse=True)
if len(properties) > 1 and properties[0].area/properties[1].area < max_ratio:
found_flag = True
bw1 = label == properties[0].label
bw2 = label == properties[1].label
else:
bw = scipy.ndimage.binary_erosion(bw)
iter_count = iter_count + 1
if found_flag:
d1 = scipy.ndimage.morphology.distance_transform_edt(bw1 == False, sampling=spacing)
d2 = scipy.ndimage.morphology.distance_transform_edt(bw2 == False, sampling=spacing)
bw1 = bw0 & (d1 < d2)
bw2 = bw0 & (d1 > d2)
bw1 = extract_main(bw1)
bw2 = extract_main(bw2)
else:
bw1 = bw0
bw2 = np.zeros(bw.shape).astype('bool')
bw1 = fill_2d_hole(bw1)
bw2 = fill_2d_hole(bw2)
bw = bw1 | bw2
return bw1, bw2, bw
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def save_itk(image, origin, spacing, filename):
if type(origin) != tuple:
if type(origin) == list:
origin = tuple(reversed(origin))
else:
origin = tuple(reversed(origin.tolist()))
if type(spacing) != tuple:
if type(spacing) == list:
spacing = tuple(reversed(spacing))
else:
spacing = tuple(reversed(spacing.tolist()))
itkimage = sitk.GetImageFromArray(image, isVector=False)
itkimage.SetSpacing(spacing)
itkimage.SetOrigin(origin)
sitk.WriteImage(itkimage, filename, True)
def dilation_mask(bw):
struct = generate_binary_structure(2,1)
for islice in range(bw.shape[0]):
curslice = bw[islice]
##print ('curslice shape: ', curslice.shape)
bw[islice] = binary_dilation(curslice,structure=struct,iterations=2)
return bw
def step1_python(case_path):
#case = load_scan(case_path)
#case_pixels, origin, spacing = get_pixels_hu(case)
case_pixels, origin, spacing = load_itk_image(case_path)
shape_original = case_pixels.shape
start_id_1 = (512 - shape_original[1])//2
start_id_2 = (512 - shape_original[2])//2
if not shape_original[1] == shape_original[2]:
new_case_pixels = np.ones([shape_original[0],512,512]) * (-1000) # HU
new_case_pixels[:,start_id_1:shape_original[1]+start_id_1,start_id_2:start_id_2+shape_original[2]] = case_pixels
case_pixels = new_case_pixels
bw = binarize_per_slice(case_pixels, spacing)
# bw_test = bw[:,start_id_1:start_id_1+shape_original[1],start_id_2:start_id_2+shape_original[2]]
# save_itk(bw_test.astype('uint8'), origin, spacing, '/run/media/qin/yolayDATA2/CARVE14/data/lungs_correction/bw_1.nii.gz')
flag = 0
cut_num = 0
cut_step = 3
bw0 = np.copy(bw)
while flag == 0 and cut_num < bw.shape[0]:
bw = np.copy(bw0)
bw, flag = all_slice_analysis(bw, spacing, cut_num=cut_num, vol_limit=[0.68,7.5])
if flag != 0:
break
else:
cut_num = cut_num + cut_step
bw = fill_hole(bw)
#bw = ~bw
#bw = dilation_mask(bw)
bw_copy = bw.copy()
bw1, bw2, bw = two_lung_only(bw, spacing)
if not shape_original[1] == shape_original[2]:
case_pixels = case_pixels[:,start_id_1:start_id_1+shape_original[1],start_id_2:start_id_2+shape_original[2]]
bw1 = bw1[:,start_id_1:start_id_1+shape_original[1],start_id_2:start_id_2+shape_original[2]]
bw2 = bw2[:,start_id_1:start_id_1+shape_original[1],start_id_2:start_id_2+shape_original[2]]
bw_copy = bw_copy[:,start_id_1:start_id_1+shape_original[1],start_id_2:start_id_2+shape_original[2]]
return case_pixels, bw1, bw2, bw_copy, origin, spacing
def load_pixels(case_path):
case = load_scan(case_path)
case_pixels, origin, spacing = get_pixels_hu(case)
return case_pixels, origin, spacing
|
def is_float(value):
try:
float(value)
return True
except:
return False
def str2pair(x):
nums = x.split(',')
if (is_float(nums[0])):
peso = float(nums[0])
llegada = int(nums[1])
return peso, llegada
def LeeGrafo(filename):
G = []
file = open(filename)
for line in file:
G.append([str2pair(x) for x in line.split(' ')])
return G
|
__author__ = 'Daoyuan'
from BaseSolution import *
import math
class PerfectSquares(BaseSolution):
def __init__(self):
BaseSolution.__init__(self)
self.fuckinglevel = 9
self.push_test(
params = (9453,),
)
self.push_test(
params = (9975,)
)
self.push_test(
params = (7929,)
)
self.push_test(
params = (6337,)
)
self.push_test(
params = (12,),
expects = 3
)
self.push_test(
params = (13,),
expects = 2
)
self.push_test(
params = (4,),
expects = 1
)
self.push_test(
params = (0,),
expects = 0
)
### TLE.
### Combination + Pruning
### for future reference
def solution(self, n):
squares = []
for i in range(1, n):
square = i * i
if square > n:
break
else:
squares += square,
squares.reverse()
least = [999]
list(self.combinesum( squares, n, 1, least))
if least[0] > 900: return 0
return least[0]
def combinesum(self, squares, target, count, least):
if count > least[0]: return
for i in xrange(len(squares)):
if target % squares[i] ==0:
n = target / squares[i]
count = count + n -1
if count < least[0]:
least[0] = count
yield [squares[i],] * n
elif squares[i] > target:
continue
elif squares[i] < target:
tar = target % squares[i]
n = target / squares[i]
arr = self.less_than(squares, tar)
this = [squares[i],] * n
for next in self.combinesum(arr, tar, count + n, least):
yield next + this
def less_than(self, arr, num):
length = len(arr)
start = length - 1
end = 0
while start - end > 1:
mid = ( start + end ) / 2
if arr[mid] > num:
end = mid
elif arr[mid] < num:
start = mid
else:
end = mid
break;
return arr[end:]
## Convert from Java DP
def solution(self, n):
if n==0: return 0
squares = [0] * (n + 1)
squares[1] = 1
for i in xrange(2,n+1):
squares[i] = 1e9
for j in xrange(1,i):
if j * j > i: break
squares[i] = min(squares[i], squares[i-j*j])
squares[i] =squares[i] + 1
return squares[n]
# This variable to store the middle result
# and can be reused by different test cases!!!!
numSquares = [0]
def solution(self, n):
length = len(self.numSquares)
if n >= length:
square = [ i**2 for i in xrange(1, int(math.sqrt(n)) + 1)]
for i in xrange(length, n+1): # attention, start from length
self.numSquares += min( [ 1+ self.numSquares[ i - item ] for item in square if item <= i ] ),
return self.numSquares[n] |
"""
cache spacy
@author: Carl Mueller
"""
from functools import partial
from cachetools import cached, Cache
from cachetools.keys import hashkey
import spacy
@cached(Cache(1), key=partial(hashkey, 'spacy'))
def load_spacy(model_name, **kwargs):
"""
Load a language-specific spaCy pipeline (collection of data, models, and
resources) for tokenizing, tagging, parsing, etc. text; the most recent
package loaded is cached.
Args:
name (str): standard 2-letter language abbreviation for a language;
currently, spaCy supports English ('en') and German ('de')
**kwargs: keyword arguments passed to :func:`spacy.load`; see the
`spaCy docs <https://spacy.utils/docs#english>`_ for details
* via (str): non-default directory from which to load package data
* vocab
* tokenizer
* parser
* tagger
* entity
* matcher
* serializer
* vectors
Returns:
:class:`spacy.<lang>.<Language>`
Raises:
RuntimeError: if package can't be loaded
"""
print("Loading Spacy model into cache...")
return spacy.load(model_name, **kwargs)
|
# -*- encoding: utf-8 -*-
# Shaolin's Blind Fury
#
# Copyright: Hugo Ruscitti
# Web: www.losersjuegos.com.ar
import pilas
import enemigo
import random
import golpe
class Estrella(enemigo.Enemigo):
"""Una estrella ninja que vuela intentando golpear al shaolin."""
def __init__(self, x, y, direccion, shaolin):
enemigo.Enemigo.__init__(self)
self._cargar_imagen()
self.shaolin = shaolin
self.centro = ("centro", "centro")
self.x = x
self.y = y
self.altura_del_salto = 100
self.golpe = golpe.Golpe(self, [shaolin], -65, 0)
self.direccion = direccion
self.actualizar()
self.aprender(pilas.habilidades.Arrastrable)
def _cargar_imagen(self):
"""Carga una imagen de estrella ninja o de una taza de cafe (!!!)."""
if random.randint(0, 10) < 8:
self.imagen = "estrella.png"
else:
self.imagen = "cafe.png"
def actualizar(self):
enemigo.Enemigo.actualizar(self)
self.x += self.direccion * 6
self.rotacion += 10
self.z = self.y
self.sombra.escala = 0.5
if self.esta_fuera_de_la_pantalla():
self.eliminar_todo()
if self.golpe:
shaolin_que_ha_golpeado = self.golpe.verificar_colisiones()
if shaolin_que_ha_golpeado:
shaolin_que_ha_golpeado.ha_sido_golpeado(self, False)
self.eliminar_todo()
def eliminar_todo(self):
self.eliminar()
self.sombra.eliminar()
self.eliminar_golpe()
def eliminar_golpe(self):
if self.golpe:
self.golpe.eliminar()
self.golpe = None
|
"""
Reference:
- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition.
arXiv:1512.03385 [cs.CV]
- Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Identity Mappings in Deep Residual Networks.
arXiv:1603.05027 [cs.CV]
- F: residual function
- h: identity skip connection
- f: activation function
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras
import keras.backend as K
import tensorflow as tf
from keras import layers
from keras.layers import (
Input,
Activation,
Conv2D,
BatchNormalization,
AveragePooling2D,
GlobalAveragePooling2D,
Lambda
)
def original_residual_fn(x, filters, activation="relu"):
""" From arXiv:1512.03385
Plain Network. Our plain baselines (Fig. 3, middle) are mainly inspired by
the philosophy of VGG nets [41] (Fig. 3, left). The convolutional layers
mostly have 3X3 filters and follow two simple design rules: (i) for the same
output feature map size, the layers have the same number of filters; and
(ii) if the feature map size is halved, the number of filters is doubled so
as to preserve the time complexity per layer. We perform downsampling
directly by convolutional layers that have a stride of 2.
"""
# strides for first convolution
s1 = (2, 2) if x.shape[1].value < filters else (1, 1)
x = Conv2D(filters, kernel_size=3, strides=s1)
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters, kernel_size=3) # strides=(1, 1)
x = BatchNormalization(axis=1)(x)
return x
# Residual function for full pre-activation
def residual_fn(x, filters, activation="relu"):
# strides for first convolution
s1 = (2, 2) if x.shape[1].value < filters else (1, 1)
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters, kernel_size=3, strides=s1)
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters, kernel_size=3) # strides=(1, 1)
return x
def original_bottleneck_residual_fn(x, filters, activation="relu"):
"""
Because of concerns on the training time that we can afford, we modify the
building block as a bottleneck design.
The parameter-free identity shortcuts are particularly important for the
bottleneck architectures. If the identity shortcut in Fig. 5 (right) is
replaced with projection, one can show that the time complexity and model
size are doubled, as the shortcut is connected to the two high-dimensional
ends. So identity shortcuts lead to more efficient models for the bottleneck
designs.
"""
# strides for first convolution
s1 = (2, 2) if x.shape[1].value < filters else (1, 1)
bottleneck_filters = int(filters/4)
#
x = Conv2D(filters=bottleneck_filters, kernel_size=1, strides=s1, padding="SAME")(x)
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
#
x = Conv2D(filters=bottleneck_filters, kernel_size=3, strides=1, padding="SAME")(x)
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
#
x = Conv2D(filters=filters, kernel_size=1, strides=1, padding="SAME")(x)
x = BatchNormalization(axis=1)(x)
return x
def bottleneck_residual_fn(x, filters, activation="relu"):
# strides for first convolution
s1 = (2, 2) if x.shape[1].value < filters else (1, 1)
bottleneck_filters = int(filters/4)
#
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters=bottleneck_filters, kernel_size=1, strides=s1, padding="SAME")(x)
#
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters=bottleneck_filters, kernel_size=3, strides=1, padding="SAME")(x)
#
x = BatchNormalization(axis=1)(x)
x = Activation(activation)(x)
x = Conv2D(filters=filters, kernel_size=1, strides=1, padding="SAME")(x)
return x
##############################
# Shortcut == Identity skip connection
def identity_shortcut(x):
shortcut = Lambda(lambda t: tf.identity(t))(x)
return shortcut
def zero_padding_shortcut(x, filters):
"""
Zero-padding shortcuts are used for increasing dimensions, and all
shortcuts are parameter free.
"""
input_channels = x.shape[1].value
# Decreases height and width of feature maps
x = AveragePooling2D(pool_size=(2, 2), strides=(2, 2), padding="SAME")(x)
# Channel-wise padding
channel_diff = filters - input_channels
pad0 = int(channel_diff/2)
pad1 = channel_diff - pad0
paddings = [[0, 0], [pad0, pad1], [0, 0], [0, 0]] # [B, C, H, W]
shortcut = Lambda(lambda t: tf.pad(t, paddings))(x)
return shortcut
def projection_shortcut(x, filters):
"""
If this is not the case (e.g., when changing the input/output channels), we
can perform a linear projection W_{s} by the shortcut connections to match
the dimensions:
y = F(x, {W_i}) + W_{s}*x
"""
shortcut = Conv2D(filters=filters, kernel_size=1, strides=2, padding="SAME")(x)
return shortcut
#####################################################
# Residual unit
###################################################
def original_residual_unit(x, filters, activation):
h = shortcut(shortcut_type, x, filters)
F = residual_function(residual_fn,_type, x, filters)
y = layers.add([h, F])
y = Activation(activation)(y)
return y
# full pre-activation type
def residual_unit(x, filters, bottleneck, activation="relu"):
input_channels = x.shape[1].value
if input_channels < filters:
h = projection_shortcut(x, filters)
else:
h = identity_shortcut(x)
if bottleneck:
F = bottleneck_residual_fn(x, filters, activation)
else:
F = residual_fn(x, filters, activation)
y = layers.add([h, F])
return y
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from stream import Stream
from peg import *
from grammar import Grammar
|
from twilio.rest import Client
import os
def schedule_message(message,number):
account_sid = 'ACc0418760bcc77f170e9210571dd1073b'
auth_token = '687af050011e5f8d04e13ca1ad96b79e'
client = Client(account_sid, auth_token)
number = str(number)
message = client.messages.create(
from_='whatsapp:+14155238886',
body=message,
to='whatsapp:+'+number
)
print(message.sid)
|
__authors__ = 'Antonio Ritacco'
__email__ = 'ritacco.ant@gmail.com'
import numpy as np
import torch.nn as nn
import torch
# from torch_geometric.data import Data
from collections import defaultdict
from torch.nn.modules.distance import PairwiseDistance
from helpers import pairwise_distances
import networkx as nx
'''
Pytorch implementation of the Incremental Growing Neural Gas algorithm, based on:
An Incremental Growing Neural Gas Learns Topologies. Y. Prudent,
2005 IEEE International Joint Conference on Neural Networks
'''
class IncrementalGrowingNeuralGas:
def __init__(self, epsilon, amature, alfac1, alfacN, cuda=False):
self.Units = None
self.Ages = None
self.Connections = dict()
self.epsilon = epsilon
self.amature = amature
self.alfac1 = alfac1
self.alfacN = alfacN
self.Error = 0
self.cuda = cuda
self.network = None
def findWinning(self, x):
if self.Units is None:
val1 = None
val2 = None
index1 = None
index2 = None
else:
pdist = PairwiseDistance(p=2)
if self.cuda:
distance_vector = pdist(self.Units.cuda(), x.cuda())
distance_vector.to('cpu')
# distance_vector = pairwise_distances(self.Units.cuda(), x.cuda())
# distance_vector.to('cpu')
else:
distance_vector = pdist(self.Units, x)
# distance_vector = pairwise_distances(self.Units, x)
if self.Units.shape[0] < 2:
tuples = torch.topk(distance_vector, k=1, largest=False)
val1 = tuples.values[0]
index1 = tuples.indices[0]
val2 = None
index2 = None
else:
tuples = torch.topk(torch.reshape(distance_vector, (-1,)), k=2, largest=False)
val1 = tuples.values[0]
val2 = tuples.values[1]
index1 = tuples.indices[0]
index2 = tuples.indices[1]
return {'val1': val1, 'index1': index1, 'val2': val2, 'index2': index2}
def _forward(self, x):
distDict = self.findWinning(x)
if distDict['val1'] is None:
self.network = nx.Graph()
node_id = self.network.number_of_nodes()
self.network.add_node(node_id, age=0)
self.Units = x.clone().detach().requires_grad_(False)
else:
best_unit = distDict['index1'].item()
if distDict['val1'] >= self.epsilon:
node_id = self.network.number_of_nodes()
self.network.add_node(node_id, age=0)
self.Units = torch.cat((self.Units, x.clone().detach().requires_grad_(False)))
else:
if distDict['val2'] is None or distDict['val2'] >= self.epsilon:
node_id = self.network.number_of_nodes()
self.network.add_node(node_id, age=0)
self.Units = torch.cat((self.Units, x.clone().detach().requires_grad_(False)))
self.network.add_edge(best_unit, node_id, age=0)
else:
second_best_unit = distDict['index2'].item()
self.Units[best_unit] += torch.reshape(self.alfac1 * (x - self.Units[best_unit]), (-1,))
if best_unit not in self.network.nodes():
self.network.add_node(best_unit, age=0)
for u in self.network.neighbors(best_unit):
self.Units[u] += torch.reshape(self.alfacN * (x - self.Units[u]), (-1,))
self.network._adj[best_unit][u]['age'] += 1
# self.network._node[u]
if second_best_unit in self.network.neighbors(best_unit):
self.network._adj[best_unit][second_best_unit]['age'] = 0
else:
self.network.add_edge(best_unit, second_best_unit, age=0)
## <Remove edges too old>
edge_list_to_remove = []
for u, v, attrib in self.network.edges(data=True):
if attrib['age'] >= self.amature:
edge_list_to_remove.append([u, v])
self.network.remove_edges_from(edge_list_to_remove)
##</Remove edges too old>
## <Increasing nodes age of the winner unit>
for u in self.network.neighbors(best_unit):
self.network._node[u]['age'] += 1
## </Increasing nodes age of the winner unit>
## <Remove nodes too isolated>
node_list_to_remove = []
for n in self.network.nodes():
if len(self.network._adj[n]) < 1:
node_list_to_remove.append(n)
if node_list_to_remove:
list_to_keep = list(set(list(range(0, self.Units.shape[0]))) - set(node_list_to_remove))
self.Units = torch.index_select(self.Units, 0, torch.tensor(list_to_keep))
self.network.remove_nodes_from(node_list_to_remove)
old_index_nodes = [name for name in self.network.nodes()]
new_index_nodes = list(range(0, len(list_to_keep)))
mapping = dict(zip(old_index_nodes, new_index_nodes))
self.network = nx.relabel_nodes(self.network, mapping)
## </Remove nodes too isolated>
#
# def forward(self, x):
# if self.cuda:
# x = x.cuda()
# distDict = self.findWinning(x)
# if distDict['val1'] is None or distDict['val1'] >= self.epsilon:
# if distDict['val1'] is None:
# self.Units = x.clone().detach().requires_grad_(False)
# self.Ages = torch.tensor([0.0], requires_grad=False)
# else:
# self.Units = torch.cat((self.Units, x.clone().detach().requires_grad_(False)))
# self.Ages = torch.cat((self.Ages, torch.tensor([0.0], requires_grad=False)))
# else:
# bestUnit = distDict['index1'].item()
# newUnit = self.Units.shape[0]
# if distDict['index2'] is not None:
# newUnit = distDict['index2'].item()
#
# if distDict['val2'] is None or distDict['val2'] >= self.epsilon:
# self.Units = torch.cat((self.Units, x.clone().detach().requires_grad_(False)))
# self.Ages = torch.cat((self.Ages, torch.tensor([0.0], requires_grad=False)))
# # newUnit = self.Units.shape[0]
#
# else:
#
# self.Units[bestUnit] += torch.reshape(self.alfac1*(x-self.Units[bestUnit]), (-1,))
# if bestUnit not in self.Connections.keys():
# self.Units[newUnit] += torch.reshape(self.alfacN * (x - self.Units[newUnit]), (-1,))
# else:
# for index in self.Connections[bestUnit]:
# self.Units[index] += torch.reshape(self.alfacN*(x-self.Units[index]), (-1,))
# self.createConnection(bestUnit, newUnit)
#
# if distDict['index2'] not in self.Connections[distDict['index1']]:
# self.Connections[distDict['index1']].append(distDict['index2'])
# self.Ages[distDict['index1']] = 0.0
# self.Ages[distDict['index2']] = 0.0
# else:
# self.Connections[distDict['index1']].append(distDict['index2'])
#
#
# for index in self.Connections[bestUnit]:
# self.Ages[index] += 1.0
# def getMatureNeurons(self):
# neuronsList = []
# neuronIndexes = []
# i = 0
#
# for age in self.Ages:
# if age >= self.amature:
# neuronsList.append(self.Units[i])
# neuronIndexes.append(i)
# i += 1
# if(len(neuronsList)>0):
# return torch.stack(neuronsList), neuronIndexes
# else:
# return None, None
def get_mature_neurons(self, training=True):
neuronsList = []
i = 0
if training:
for node_id in self.network._node:
if self.network._node[node_id]['age'] >= self.amature:
neuronsList.append(self.Units[node_id])
i += 1
else:
neuron_to_remove = []
for node_id in self.network._node:
if self.network._node[node_id]['age'] < self.amature:
neuron_to_remove.append(node_id)
if neuron_to_remove:
list_to_keep = list(set(list(range(0, self.Units.shape[0]))) - set(neuron_to_remove))
neuronsList = torch.index_select(self.Units, 0, torch.tensor(list_to_keep))
self.network.remove_nodes_from(neuron_to_remove)
old_index_nodes = [name for name in self.network.nodes()]
new_index_nodes = list(range(0, len(list_to_keep)))
mapping = dict(zip(old_index_nodes, new_index_nodes))
self.network = nx.relabel_nodes(self.network, mapping)
if training:
if (len(neuronsList) > 0):
return torch.stack(neuronsList)
else:
return None
else:
if (neuronsList.shape[0]>0):
return neuronsList
else:
return None
|
"""
Introduction of Tuples in python.
* It is immutable in nature.
* we can not change or add value of tuples
* we defined in ( )
"""
size = (2,4)
""" size[1] = 23 (TypeError: 'tuple' object does not support item assignment)"""
print(size[1]) |
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
import numpy as nu
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import os
class Portfolio:
#def equities = []
#def allocations = []
def __init__(self, equities, allocations):
self.equities = equities
self.allocations = allocations
class PortfolioSimulation:
#def portfolio
#def start_date
#def end_date
dao = da.DataAccess('Yahoo', cachestalltime=0)
def __init__(self, portfolio, start_date, end_date):
self.portfolio = portfolio
self.start_date = start_date
self.end_date = end_date
def get_data(self, keys):
dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(self.start_date, self.end_date, dt.timedelta(hours=16))
data = self.dao.get_data(ldt_timestamps, self.portfolio.equities, keys)
return dict(zip(keys, data))
def data_fix(self, map):
map = map.fillna(method='ffill')
map = map.fillna(method='bfill')
map = map.fillna(1.0)
def calculate_returns(self, equity_prices):
return tsu.returnize0(equity_prices)
def simulate(self):
return 0
def calculate_optimal_allocations(self):
return 0
def print_results(self):
print '\n allocated returns: \n {}'.format(allocated_ret)
print '\n comulative daily returns:\n {}'.format(cumulative_daily_return)
print '\n mean: \n {}'.format(mean)
print '\n standard deviation:\n {} \n'.format(std)
print '\n Sharpe ratio:\n {} \n'.format(sharpe)
def simulate(startdate, enddate, equities, allocations):
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(startdate, enddate, dt_timeofday)
# c_dataobj = da.DataAccess('Yahoo')
c_dataobj = da.DataAccess('Yahoo', cachestalltime=0)
ls_keys = [ 'close', 'actual_close']
ldf_data = c_dataobj.get_data(ldt_timestamps, equities, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# index_data = c_dataobj.get_data(ldt_timestamps, ['$SPX'], ls_keys)
# i_data = dict(zip(ls_keys, index_data))
# rint d_data['close']
na_pr = d_data['close']
na_pr = na_pr.fillna(method='ffill')
na_pr = na_pr.fillna(method='bfill')
na_pr = na_pr.fillna(1.0)
na_price = na_pr.values
# index_pr = i_data['close']
# index_pr = index_pr.fillna(method='ffill')
# index_pr = index_pr.fillna(method='bfill')
# index_pr = index_pr.fillna(1.0)
# index_price = index_pr.values
# print na_price
# print na_price[1, :]
# na_normalized_price = na_price / na_price[0, :]
na_rets = na_price.copy()
# na_rets = na_normalized_price.copy()
tsu.returnize0(na_rets)
# index_rets = index_price.copy()
# tsu.returnize0(index_rets)
# plt.plot(ldt_timestamps, na_rets)
# plt.legend(equities)
# plt.ylabel('Adjusted Close')
# plt.xlabel('Date')
# plt.draw()
# plt.show()
print na_rets
allocated_ret = na_rets * allocations
print '\n allocated returns: \n {}'.format(allocated_ret)
cumulative_daily_return = nu.sum(allocated_ret, axis=1)
print '\n comulative daily returns:\n {}'.format(cumulative_daily_return)
mean = nu.mean(cumulative_daily_return)
# index_mean = nu.mean(index_rets)
print '\n mean: \n {}'.format(mean)
std = nu.std(cumulative_daily_return)
print '\n standard deviation:\n {} \n'.format(std)
# sharpe = (mean - index_mean) / nu.std(cumulative_daily_return - index_rets)
sharpe = (mean) * nu.sqrt(252) / nu.std(cumulative_daily_return)
print '\n Sharpe ratio:\n {} \n'.format(sharpe)
return std, mean, sharpe, nu.prod(cumulative_daily_return + 1)
# vol, daily_ret, sharpe, cum_ret = simulate(dt.datetime(2010, 1, 1), dt.datetime(2010, 12, 31), ['AXP', 'HPQ', 'IBM', 'HNZ'], [0.0, 0.0, 0.0, 1.0])
# vol, daily_ret, sharpe, cum_ret = simulate(dt.datetime(2011, 1, 1), dt.datetime(2011, 12, 31), ['AAPL', 'GLD', 'GOOG', 'XOM'], [0.4, 0.4, 0.0, 0.2])
#vol, daily_ret, sharpe, cum_ret = simulate(dt.datetime(2010, 1, 1), dt.datetime(2010, 12, 31), ['BRCM', 'TXN', 'AMD', 'ADI'] , [0.4, 0.6, 0.0, 0.])
max_sharpe = 0
max_vol = 0
max_daily_ret = 0
max_cum_ret = 0;
allocations = []
for i in nu.arange(0, 1.1, 0.1):
for j in nu.arange(0, 1.1, 0.1):
for k in nu.arange(0, 1.1, 0.1):
for l in nu.arange(0, 1.1, 0.1):
if i + j + k + l == 1 :
vol, daily_ret, sharpe, cum_ret = simulate(dt.datetime(2010, 1, 1), dt.datetime(2010, 12, 31), ['BRCM', 'TXN', 'IBM', 'HNZ'] , [i, j, k, l])
print [i, j, k, l]
if sharpe > max_sharpe:
max_sharpe = sharpe
max_vol = vol
max_daily_ret = daily_ret
max_cum_ret = cum_ret
allocations = [i, j, k, l]
print max_vol
print max_daily_ret
print max_sharpe
print max_cum_ret
print allocations
|
1. Let _propKey_ be the result of evaluating |PropertyName|.
1. ReturnIfAbrupt(_propKey_).
1. Let _exprValueRef_ be the result of evaluating |AssignmentExpression|.
1. Let _propValue_ be ? GetValue(_exprValueRef_).
1. If IsAnonymousFunctionDefinition(|AssignmentExpression|) is *true*, then
1. Let _hasNameProperty_ be ? HasOwnProperty(_propValue_, `"name"`).
1. If _hasNameProperty_ is *false*, perform SetFunctionName(_propValue_, _propKey_).
1. Assert: _enumerable_ is *true*.
1. Return CreateDataPropertyOrThrow(_object_, _propKey_, _propValue_). |
import glob
def read_files():
folders = ["binarytrees", "binarytreesredux", "chameneousredux", "redux",
"fasta", "fastaredux", "Include", "knucleotide", "mandelbrot",
"meteor", "nbody", "regexdna", "revcomp", "spectralnorm",
"threadring", "pidigits"]
# "pidigits"
extensions = ["gcc", "c", "csharp", "sbcl", "clojure",
"hack", "java", "javascript", "ocaml", "perl",
"php", "py", "jruby", "yarv", "scala", "racket", "ghc"]
texts = []
for folder in folders:
path = "benchmarksgame/bench/" + folder + "/*."
for x in extensions:
files = glob.glob(path + x)
for file in files:
with open(file, encoding="ISO-8859-1") as f:
texts.append(f.read())
return texts
print(len(read_files()))
|
from django.shortcuts import render
from django.views.generic import TemplateView, ListView,ListView, DetailView,View
class OpenWeatherView(TemplateView):
template_name = "weather.html"
|
import string, re
class EncryptionMonitor:
'''
Class for Encryption Monitoring based on dm-crypt
'''
encryption = {'is_encrypted':"", 'cipher':""}
def __init__(self):
self.server = '150.162.63.32'
def get_encryption_info(self):
#Ubuntu default
log_path = "/etc/crypttab"
is_encrypted = "no"
cipher = "-"
f = open(log_path, 'r')
line = f.readline()
while line:
if line.find("#") == -1:
line_trimmed = re.sub(' +',' ', line)
line_trimmed.split()
is_encrypted = "yes"
cipher = line_trimmed[3]
line = f.readline()
self.encryption['is_encrypted'] = is_encrypted
self.encryption['cipher'] = cipher
return self.encryption
if __name__=="__main__":
encMon = EncryptionMonitor()
print encMon.get_encryption_info()
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
from flask import current_app as app
from amundsen_application.models.user import load_user, User
TEST_USER_ID = 'test_user_id'
def get_test_user(app: app) -> User: # type: ignore
user_info = {
'email': 'test@email.com',
'user_id': TEST_USER_ID,
'first_name': 'Firstname',
'last_name': 'Lastname',
'full_name': 'Firstname Lastname',
}
return load_user(user_info)
|
import boto3
import botocore
def get_ddb_client():
dynamodb = boto3.resource('dynamodb', 'us-west-2')
table = dynamodb.Table('agg_count')
return table
def process_image(ddb, name, image):
key = {
'Id': image['Id']['S'],
}
if name == 'INSERT':
update_expression = 'SET alert_count = alert_count + :incre'
elif name == 'MODIFY':
update_expression = 'SET alert_count = alert_count - :incre'
else:
return
update_obj = {
'Key': key,
'ConditionExpression': 'attribute_exists(alert_count)',
'UpdateExpression': update_expression,
'ExpressionAttributeValues': {':incre': 1}
}
try:
ddb.update_item(**update_obj)
except botocore.exceptions.ClientError as e:
msg = str(e)
print(msg)
if msg.find('ConditionalCheckFailedException') > 1:
print('adding alert_count field')
update_obj['UpdateExpression'] = 'SET alert_count = :incre'
update_obj['ConditionExpression'] = 'attribute_not_exists(alert_count)'
ddb.update_item(**update_obj)
else:
raise
def handler(event, context):
ddb = get_ddb_client()
print('received event: ', event)
try:
print('processing event')
for record in event['Records']:
event_name = record['eventName']
image = record['dynamodb']['NewImage']
print('event', event)
process_image(ddb, event_name, image)
except Exception as e:
print('unexpected error: ', str(e))
|
# -*- encoding: UTF-8 -*-
##############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2015-Today Laxicon Solution.
# (<http://laxicon.in>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from odoo import api, models, fields
from datetime import datetime
class sb_patient_recipes(models.Model):
_name = "sb.patient_recipes"
_rec_name = "patient"
recipe_date = fields.Datetime(default=(datetime.today().date()), readonly=True, string='Recipe Date')
patient = fields.Many2one('res.partner', string='Patient', domain=[('customer', '=', True)], required=True)
email = fields.Char(string='Email')
diagnosis = fields.Many2one('sb.common_diagnostics', string='Diagnosis')
doctor = fields.Many2one('res.partner', string='Doctor', domain=[('supplier', '=', True)])
# medications_for_recipes = fields.Many2many('sb.medications_for_recipes', 'patient_recipes_medications_for_recipes_rel', 'medications_for_recipes_id', 'patient_recipes_id')
medication_ids = fields.One2many('sb.medications_for_recipes', 'receipt_id', string="Medications")
@api.onchange('patient', 'email')
def patient_onchange(self):
self.email = self.patient.email_id
self.doctor = self.patient.family_dr_id or False
# if self.email and not self.patient.email_id:
# self.patient.write({'email_id': self.email})
# @api.onchange('email','patient')
# def patient_email(self):
# if self.patient and self.email:
# self.patient.write({'email_id':self.email, 'email':self.email})
@api.model
def create(self, vals):
x = super(sb_patient_recipes, self).create(vals)
template = self.env['mail.template'].search([('name', '=', 'Patient Receipt E-mail Template')], limit=1)
if template:
mail_send = template.send_mail(x.id)
print (mail_send)
if self.patient and self.email:
self.patient.write({'email_id': self.email, 'email': self.email})
return x
# @api.model
def write(self, vals):
x = super(sb_patient_recipes, self).write(vals)
# template = self.env['mail.template'].search([('name','=','Patient Receipt E-mail Template')],limit=1)
# if template:
# mail_send = template.send_mail(x.id)
if self.patient and self.email:
self.patient.write({'email_id': self.email, 'email': self.email})
return x
|
#!/usr/bin/env python
__name__ = 'fconv_txt2gti'
__author__ = 'Teruaki Enoto'
__version__ = '1.00'
__date__ = '2018 June 7'
import os
import sys
import astropy.io.fits as pyfits
from optparse import OptionParser
from datetime import datetime
parser = OptionParser()
parser.add_option("-i","--inputfile",dest="inputfile",
action="store",help="input text file, tab separated",type="string")
parser.add_option("-o","--outputgti",dest="outputgti",
action="store",help="output gti file name",type="string")
(options, args) = parser.parse_args()
if options.inputfile == None:
sys.stderr.write("input text file is needed. %> -i inputfile")
quit()
if options.outputgti == None:
sys.stderr.write("output gti file name is needed. %> -o outputgti")
quit()
sys.stdout.write("inputfile : %s " % options.inputfile)
sys.stdout.write("outputgti: %s " % options.outputgti)
if not os.path.exists(options.inputfile):
sys.stderr.write("input text fits file does not exists: %s" % options.inputfile)
quit()
if os.path.exists(options.outputgti):
sys.stderr.write("output gti file has already existed: %s " % options.outputgti)
quit()
f = open('gti_columns.txt','w')
dump = """START D s
STOP D s
"""
f.write(dump)
f.close()
f = open('gti_header.txt','w')
dump = """XTENSION= 'BINTABLE' / binary table extension
BITPIX = 8 / 8-bit bytes
NAXIS = 2 / 2-dimensional binary table
NAXIS1 = 16 / width of table in bytes
NAXIS2 = 1 / number of rows in table
PCOUNT = 0 / size of special data area
GCOUNT = 1 / one data group (required keyword)
TFIELDS = 2 / number of fields in each row
TTYPE1 = 'START ' / lower GTI boundary
TFORM1 = 'D ' / data format of field: 8-byte DOUBLE
TUNIT1 = 's ' / physical unit of field
TTYPE2 = 'STOP ' / upper GTI boundary
TFORM2 = 'D ' / data format of field: 8-byte DOUBLE
TUNIT2 = 's ' / physical unit of field
EXTNAME = 'STDGTI ' / The name of this table
HDUCLASS= 'OGIP ' / format conforms to OGIP standard
HDUCLAS1= 'GTI ' / table contains Good Time Intervals
HDUCLAS2= 'STANDARD' / standard Good Time Interval table
ONTIME = 0.00000000000000E+00 / [s] sum of all Good Time Intervals
TSTART = 0.00000000000000E+00 / [s] Lower bound of first GTI
TSTOP = 0.00000000000000E+00 / [s] Uppler bound of last GTI
TIMEUNIT= 's ' / All times in s unless specified otherwise
TIMESYS = 'TT ' / XMM time will be TT (Terrestial Time)
TIMEREF = 'LOCAL ' / Reference location of photon arrival times
TASSIGN = 'SATELLITE' / Location of time assignment
TIMEZERO= 0 / Clock correction (if not zero)
CLOCKAPP= T / Clock correction applied?
MJDREFI = 56658 / MJD reference day
MJDREFF = 7.775925925925930E-04 / MJD reference (fraction of day)
"""
f.write(dump)
f.close()
f = open('tmp_gti_data_shrink.txt','w')
flag_first = True
for line in open(options.inputfile):
cols = line.split()
if cols[0] == '#':
continue
tmp_TSTART = cols[0]
tmp_TSTOP = cols[1]
if flag_first:
TSTART = tmp_TSTART
prev_TSTOP = tmp_TSTOP
flag_first = False
continue
if tmp_TSTART != prev_TSTOP:
dump = '%s %s\n' % (TSTART,prev_TSTOP)
f.write(dump)
TSTART = tmp_TSTART
prev_TSTOP = tmp_TSTOP
dump = '%s %s\n' % (TSTART,prev_TSTOP)
f.write(dump)
f.close()
#cmd = 'ftcreate gti_columns.txt %s %s headfile=gti_header.txt extname="GTI" clobber=yes\n' % (options.inputfile,options.outputgti)
cmd = 'ftcreate gti_columns.txt tmp_gti_data_shrink.txt %s headfile=gti_header.txt extname="GTI" clobber=yes\n' % (options.outputgti)
print(cmd);os.system(cmd)
cmd = 'rm -f tmp_gti_data_shrink.txt gti_columns.txt gti_header.txt'
print(cmd);os.system(cmd)
|
import os
import csv
csv_data1 = os.path.join('raw_data','election_data_1.csv')
with open(csv_data1,'r') as csvfile:
csvreader1 = csv.reader(csvfile,delimiter=',')
for row in csvreader1:
totalVotes = sum(1 for row in csvfile) - 1
if row[2] = Roger:
totalRoger = count
if row[2] = Gomez:
totalGomez = count
if row[2] = Brentwood:
totalBrentwood = count
if row[2] = Higgins:
totalHiggins = count
print("Election Results")
print("-----------------")
print("Total Votes: " + totalVotes)
print("-----------------")
print("Rogers: "+ totalRoger)
print("Gomez: " + totalGomez)
print("Brentwood: " + totalBrentwood)
print("Higgins: "+ totalHiggins)
print("------------------")
print("Winner: " + winner)
print("------------------") |
# irgend nen zeugs importieren
from __future__ import division
import nltk, re, pprint
from nltk import word_tokenize
# oeffne Textdatei
f = open('test.txt', 'rU')
# lese Textdatei
raw = f.read()
# txt umwandeln damit nltk damit arbeiten kann
tokens = nltk.word_tokenize(raw)
text = nltk.Text(tokens)
# nltk methode concordance aufrufen
text.concordance('test')
|
# Generated by Django 2.0.7 on 2018-07-16 23:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('guide', '0005_auto_20180716_2351'),
]
operations = [
migrations.AlterField(
model_name='jeu',
name='genre',
field=models.ManyToManyField(to='guide.Genre'),
),
]
|
from TN import *
"""
####Algorithm ideas
Orthogonal question: when to stop trying paths
- when you can't find any more? Seems to use too many nodes
- when there is a single failure? Seems to work, but still uses too many nodes
- maybe s tries each neighbor once, then stops.
Optimal performance looks not-so-good at this point. We may be able to fix it a bit using a (semifudgy) solution:
Each node maintains a local view of the network out to k hops
When a node receives a request to route a message to t, it runs some kind of more well-informed algorithm (how do local max-flows translate to s-t paths?), then sends the packet (along with its recommendations?) to some neighbor
dense networks seem to work better (as long as we're not looking for all the edges) what about semidense networks (kh = log n or sqrt(n) or something)?
Packets sent between s and t represent keyshares. Should we add an edge between s and t if they successfully communicate? Under what extra conditions (e.g. number of agreeing paths)?
"""
'''
arguments are complex numbers
'''
def hyper_dist(a:complex,b:complex):
return np.arccosh(1 + (2*(abs(a-b)**2))/((1 - abs(a)**2)*(1 - abs(b)**2)))#FIXME why is this taking so long? Maybe we should precalculate/cache all neighbor distances
class Isometry:
def __init__(self,rotation,translation):
self.rot = rotation
self.trans = translation
def __repr__(self):
return 'ISO r = {}; t = {}'.format(self.rot,self.trans)
'''
arg 0 is an isometry (pair of complex numbers), arg 1 is a single complex number
'''
def evaluate(self,arg):
return (self.rot * arg + self.trans) / (1 + np.conj(self.trans) * self.rot * arg)
def cross(self,l):
return Isometry((self.rot * l.rot + l.rot * self.trans * np.conj(l.trans)) / (self.rot * l.trans * np.conj(self.trans) + 1),
(self.rot * l.trans + self.trans) / (self.rot * l.trans * np.conj(self.trans) + 1))
def inv(self):
a = Isometry(np.conj(self.rot),0)
b = Isometry(1,-self.trans)
return a.cross(b)
'''
algorithm 1
'''
def define_generators(q):
generators = []
rot_isom = Isometry(np.e ** (1j * (2 * np.pi / q)),0)
trans_isom = Isometry(1,np.tanh(np.arccosh(1 / (np.sin(np.pi / q))))).cross(Isometry(-1,0))
for i in range(q):
#for some reason doing it the way their pseudocode says to doesn't work because of this R^i thing
#it only affects the zeroth generator (and therefore only the root)
rot_isom_i = Isometry((np.array([rot_isom.rot,rot_isom.trans]) ** complex(i))[0],0j)
generators.append(rot_isom_i.cross(trans_isom).cross(rot_isom_i.inv()))
return generators
class HyperNode(TNNode):
def __init__(self,node_id,q,coordinates=None,index=None,isometry=None):
super().__init__(node_id)
self.coords = coordinates
self.idx = index
self.isom = isometry
self.q = q
self.d_coords = []#list of available daughter coords (list of [(coords,index,isometry)] which can instantiate daughters)
self.neighbors = set()
self.daughters = set()
self.distances = {}#map (cache) certain nodes to distance to that node
#this causes hyper_dist to be called 40% as often -- ~1.1x speedup overall (impact of this function on the entire run is now halved)
self.d_add_search_flag = False
self.max_neighbor_called = -1#in a real system, one per search
self.times_visited = 0
def __repr__(self):
return "HN, {} (@{})".format(self.id,self.coords)
def reset_all_and_return_ops(self):
ops = super().reset_all_and_return_ops()
self.d_add_search_flag = False
self.max_neighbor_called = -1
return ops
'''
algorithm 2
'''
def init_as_root(self):
self.coords = 0 + 0j
self.idx = 0
self.isom = Isometry(1 + 0j,0 + 0j)
self.calculate_daughter_coords()
'''
algorithm 3
'''
def calculate_daughter_coords(self):
generators = define_generators(self.q)
for i in range(self.q):
didx = (self.idx + i) % self.q
disom = self.isom.cross(generators[didx])
dcoord = disom.evaluate(0)
self.d_coords.append((dcoord,didx,disom))
def reset_flags(self):
super().reset_flags()
self.d_add_search_flag = False
self.max_neighbor_called = -1
self.distances.clear()#just so the memory doesn't fill up. this is probably no bueno but since the target node changes all the time there's no use in remembering previous targets
'''
add this node as a daughter and give it the info it needs (coords, index, isometry)
'''
def add_daughter(self,d,visited=None):
if len(self.d_coords) == 0:
self.calculate_daughter_coords() #only do this if/when we add a daughter
if visited is None:
visited = set()
if self in visited:
return None
visited.add(self)
if len(self.d_coords) > 0:
self.daughters.add(d)
return self.d_coords.pop(0)
else:
#ask our neighbors if they can add d
for neighbor in self.neighbors:
info = neighbor.add_daughter(d,visited)
if info is not None:
return info
return None
'''
add a link from me to n
'''
def add_neighbor(self,n):
if n in self.neighbors:
return#already exists
if self.coords is None:
if n.coords is None:
raise AttributeError("Tried to connect two nodes that weren't already in the network")
#make n our parent
self.coords, self.idx, self.isom = n.add_daughter(self)
self.neighbors.add(n)
n.add_neighbor(self)#enforce reciprocity
def add_public_key_in_person(self,t_node):
self.add_neighbor(t_node)
'''
calculate the distance to this other node in the embedding space
'''
def dist_to(self,target_coords:complex):
if target_coords in self.distances:
return self.distances[target_coords]
else:
dist = hyper_dist(self.coords,target_coords)
self.distances.update({target_coords:dist})#this shouldn't get too big since we're only calculating distance to a small set of target nodes
return dist
"""
algorithm 4
this is NOT as stated in the paper, since we're using blacklisting (is it guaranteed to still work? it certainly seems to)
"""
'''
initialize the search
npaths variable tells us how many paths to find (we'll stop when we find this many or when we have found the max).
max distance scale tells us how far nodes are allowed to be from t, as a linear function of the distance between s and t
specifically, nodes that are further than max_dist_scale * (dist from s to t) are excluded
'''
def count_vd_paths_to_hyper(self,dest_coords,max_paths=float('inf'),max_dist_scale=float('inf'),stop_on_first_failure=False):
self.search_blacklist_flag = True
st_dist = hyper_dist(self.coords,dest_coords)
#start a search to dest from each neighbor
neighbors_to_call = list(sorted(list(self.neighbors),key=lambda x: x.dist_to(dest_coords)))
paths = []
for neighbor in neighbors_to_call:
if hyper_dist(neighbor.coords,dest_coords) <= max_dist_scale * st_dist:
self.operations_done += 1
path_ret = neighbor.gnh_interm(dest_coords,self,st_dist,max_dist_scale)
if path_ret is not None:
paths.append(path_ret)
if len(paths) >= max_paths:
break
elif stop_on_first_failure:
break
return paths
def gnh_interm(self,dest_coords,pred,st_dist,max_dist_scale):
if self.coords == dest_coords:#this has to happen before the visited check to implement path shadowing
#blacklist nodes on this path
self.pulse_pred.update({-1:pred})
self.resetted_flag = False
self.search_blacklist_flag = False#t is never blacklisted, but the function after sets it to be so
path = self.v2_vd_blacklist_zip(-1,[])
return path
if -1 in self.pulse_pred:
return None#we've already been visited in this search
if self.search_blacklist_flag:
return None#we are already blacklisted (or have been visited in this search), so don't go this way
#we've relayed this pulse now
self.pulse_pred.update({-1:pred})
self.resetted_flag = False
#otherwise ask the *right* neighbor(s) if they know the muffin man
neighbors_to_call = list(sorted(list(self.neighbors),key=lambda x: x.dist_to(dest_coords)))
for neighbor in neighbors_to_call:
if hyper_dist(neighbor.coords,dest_coords) <= max_dist_scale * st_dist:
self.operations_done += 1
path_ret = neighbor.gnh_interm(dest_coords,self,st_dist,max_dist_scale)
if path_ret is not None:
return path_ret
return None
"""
neighbor-blacklisting
"""
'''
initialize the search
npaths variable tells us how many paths to find (we'll stop when we find this many or when we have found the max).
max distance scale tells us how far nodes are allowed to be from t, as a linear function of the distance between s and t
specifically, nodes that are further than max_dist_scale * (dist from s to t) are excluded
'''
def count_vd_paths_to_hyper_neighborbl(self,dest_coords,max_dist_scale=float('inf'),stop_on_first_failure=False):
st_dist = hyper_dist(self.coords,dest_coords)
#start a search to dest from ourselves
candidate_paths = []
pulse_num = 0
while self.max_neighbor_called < (len(self.neighbors)-1):#semantically identical to just doing the neighbor loop
path_ret = self.gnh_neighborbl_interm(dest_coords,None,pulse_num,st_dist,max_dist_scale)
pulse_num += 1
if path_ret is not None:
candidate_paths.append([self] + path_ret)
elif stop_on_first_failure:
break
#now calculate a graph union among all the candidate paths and use that to do a strict max flow
if len(candidate_paths) > 0:
paths = vd_paths_from_candidates(candidate_paths,self,candidate_paths[0][-1])
else:
paths = []
return paths
def gnh_neighborbl_interm(self,dest_coords,pred,pulse_num,st_dist,max_dist_scale):
if self.coords == dest_coords:#this has to happen before the visited check to implement path shadowing
#blacklist nodes on this path
self.pulse_pred.update({pulse_num:pred})
self.resetted_flag = False
path = self.neighbor_blacklist_zip(pulse_num)
return path
if pulse_num in self.pulse_pred:
return None#we've already been visited in this search
#we've relayed this pulse now
self.pulse_pred.update({pulse_num:pred})
self.resetted_flag = False
#otherwise ask the *right* neighbor(s) if they know the muffin man
neighbors_to_call = list(sorted(list(self.neighbors),key=lambda x:x.dist_to(dest_coords)))
start_idx = self.max_neighbor_called + 1
for nidx in range(start_idx,len(neighbors_to_call)):
neighbor = neighbors_to_call[nidx]
if hyper_dist(neighbor.coords,dest_coords) <= max_dist_scale * st_dist:
self.operations_done += 1
path_ret = neighbor.gnh_neighborbl_interm(dest_coords,self,pulse_num,st_dist,max_dist_scale)
self.max_neighbor_called = nidx
if path_ret is not None:
return path_ret
self.max_neighbor_called = len(self.neighbors) - 1
return None
'''
reconstruct the path from s to t backwards and blacklist the nodes on it
iterative now to avoid recursion depth limits
'''
def neighbor_blacklist_zip(self,pulse_num):
path = []
current = self
while (pulse_num not in current.pulse_pred) or (current.pulse_pred[pulse_num] is None):
path = [current] + path
current = current.pulse_pred[pulse_num]
return path
"""
multi-visit algorithm
generally, each node asks its neighbors how many times they've been visited and prioritizes them based on the ones which have been visited the fewest times
"""
def get_num_times_visited(self):
self.operations_done += 1#this is why this method is important
return self.times_visited
'''
initialize the search
npaths variable tells us how many paths to find (we'll stop when we find this many or when we have found the max).
max distance scale tells us how far nodes are allowed to be from t, as a linear function of the distance between s and t
specifically, nodes that are further than max_dist_scale * (dist from s to t) are excluded
'''
def count_vd_paths_to_hyper_multivisit(self,dest_coords,max_dist_scale=float('inf'),stop_on_first_failure=False):
st_dist = hyper_dist(self.coords,dest_coords)
#start a search to dest from ourselves
candidate_paths = []
pulse_num = 0
while self.max_neighbor_called < (len(self.neighbors) - 1):#semantically identical to just doing the neighbor loop
path_ret = self.gnh_multivisit_interm(dest_coords,None,pulse_num,st_dist,max_dist_scale)
pulse_num += 1
if path_ret is not None:
candidate_paths.append([self] + path_ret)
elif stop_on_first_failure:
break
#now calculate a graph union among all the candidate paths and use that to do a strict max flow
if len(candidate_paths) > 0:
paths = vd_paths_from_candidates(candidate_paths,self,candidate_paths[0][-1])
else:
paths = []
return paths
def gnh_multivisit_interm(self,dest_coords,pred,pulse_num,st_dist,max_dist_scale):
if self.coords == dest_coords:#this has to happen before the visited check to implement path shadowing
#blacklist nodes on this path
self.pulse_pred.update({pulse_num:pred})
self.resetted_flag = False
path = self.multi_visit_zip(pulse_num)
return path
if pulse_num in self.pulse_pred:
return None#we've already been visited in this search
#we've relayed this pulse now
self.pulse_pred.update({pulse_num:pred})
self.resetted_flag = False
#otherwise ask the *right* neighbor(s) if they know the muffin man
neighbors_blacklist_counts = {n:n.get_num_times_visited() for n in self.neighbors}
self.operations_done += len(self.neighbors)#send all of the blacklist-count requests
#primary sort is the blacklist count, secondary is distance to t
neighbors_to_call = list(sorted(list(self.neighbors),key=lambda x:(neighbors_blacklist_counts[x],x.dist_to(dest_coords))))
start_idx = self.max_neighbor_called + 1
for nidx in range(start_idx,len(neighbors_to_call)):
neighbor = neighbors_to_call[nidx]
if hyper_dist(neighbor.coords,dest_coords) <= max_dist_scale * st_dist:
self.operations_done += 1#send the pathfind request
path_ret = neighbor.gnh_multivisit_interm(dest_coords,self,pulse_num,st_dist,max_dist_scale)
self.max_neighbor_called = nidx
if path_ret is not None:
return path_ret
self.max_neighbor_called = len(self.neighbors) - 1
return None
'''
I am on the path to dest from s, so I need to be blacklisted
this method also reconstructs the path
changed to loop to avoid recursion depth problems
'''
def multi_visit_zip(self,pulse_num):
#TODO implement tree-return for multi-blacklist zip?
path = []
current = self
while (pulse_num in current.pulse_pred) and (current.pulse_pred[pulse_num] is not None):
current.operations_done += 1#in order to retrace these paths we need to send another message
current.times_visited += 1#blacklist ourselves for this one
path.insert(0,current)
current = current.pulse_pred[pulse_num]
return path
"""
CENTRALIZED STUFF
"""
'''
use max flow on a subgraph of HG with all vertices at distance less than max_dist_scale * dist(s,t)
dist_measure ('path' or 't'): measure as closest distance to any vertex on the shortest path ('path') or as closest to t ('t')
This could be done decentralized according to the processes laid out in A. Segall's 1979 paper "decentralized maximum flow algorithms"
returns both the number of paths and the nodes used (assumed to be the entire subgraph)
'''
def hyper_VD_paths_local(HG:List[HyperNode],s:int,t:int,max_dist_scale=float('inf'),dist_measure='path',autoscale_increment=None):
#first reduce HG down
HGp = []
nodes_removed = set()
st_dist = hyper_dist(HG[s].coords,HG[t].coords)
short_path = None
if dist_measure == 'path':
#find a shortest path from s to t
short_path = HG[s].count_vd_paths_to_hyper(HG[t].coords,max_paths=1)[0]
elif dist_measure == 't':
short_path = [HG[t]]
else:
raise AttributeError('Unknown distance metric: {}'.format(dist_measure))
for node in HG:
if min([hyper_dist(node.coords,x.coords) for x in short_path]) <= max_dist_scale * st_dist:
nodecpy = HyperNode(node.id,node.q,node.coords,node.idx,node.isom)
nodecpy.neighbors = node.neighbors.copy()
HGp.append(nodecpy)
else:
nodes_removed.add(node)
#remove unnecessary edges
for node in HGp:
node.neighbors = node.neighbors - nodes_removed
HGp_nx = convert_to_nx_graph(HGp)
HGp_nx_transform = vertex_disjoint_transform(HGp_nx)
if 'f{}'.format(s) in HGp_nx_transform.nodes:
s_run = 'f{}'.format(s)
else:
s_run = s
if autoscale_increment is not None:
#keep increasing the max dist scale until the graph is connected
ret = 0
try:
ret = nx.algorithms.flow.edmonds_karp(HGp_nx_transform,s_run,t).graph['flow_value'],len(HGp)
except nx.NetworkXError:
ret = hyper_VD_paths_local(HG,s,t,max_dist_scale=max_dist_scale+autoscale_increment,dist_measure=dist_measure,autoscale_increment=autoscale_increment)
return ret
else:
ret = 0
try:
ret = nx.algorithms.flow.edmonds_karp(HGp_nx_transform,s_run,t).graph['flow_value'],len(HGp)
except nx.NetworkXError:
raise AttributeError('max dist scale of {} too small for s,t pair (s and t are cut) and autoscale is disabled.'.format(max_dist_scale))
return ret |
# The only import you need!
import socket, requests, re, random, time
class TwitchBot:
def __init__(self):
# Options (Don't edit)
self.SERVER = "irc.twitch.tv" # server
self.PORT = 6667 # port
# Options (Edit this)
self.PASS = "oauth:Your Oauth" # bot password can be found on https://twitchapps.com/tmi/
self.BOT = "dante0713" # Bot's name [NO CAPITALS]
self.CHANNEL = "dante0713" # Channal name [NO CAPITALS]
self.OWNER = "dante0713" # Owner's name [NO CAPITALS]
self.QUIT = True
self.SOCKET = socket.socket()
self.read_buffer = ""
self.NickNameFile = 'F:/TwitchBot-master/NickNameList.txt'
self.NickList = []
self.AudienceList = {}
self.BombRange = [0, 100]
# Functions
def read_nick_name_file(self):
nick_list = []
read_nick_file = ""
try:
read_nick_file = open(self.NickNameFile, 'r')
# do stuff with ReadNickFile
for line in read_nick_file.readlines():
if line == "":
break
line = line.strip('\n')
line = line[:len(line)].split(", ")
nick_list.append(line)
finally:
if read_nick_file is not None:
read_nick_file.close()
return nick_list
def send_message(self, s, line):
message_temp = "PRIVMSG #" + self.CHANNEL + " :\x01ACTION " + line + " !\x01\r\n"
s.send((message_temp + "\r\n").encode('utf8'))
def get_user(self, line):
separate = line.split(":", 2)
user = separate[1].split("!", 1)[0]
return user
def get_message(self, line):
global message
try:
message = (line.split(":", 2))[2]
except:
message = ""
return message
def join_chat(self):
readbuffer_join = "".encode('utf8')
Loading = True
self.NickList = self.read_nick_name_file()
self.AudienceList = self.keep_viewer()
while Loading:
readbuffer_join = self.SOCKET.recv(1024)
readbuffer_join = readbuffer_join.decode('utf8')
temp = readbuffer_join.split("\n")
readbuffer_join = readbuffer_join.encode('utf8')
readbuffer_join = temp.pop()
for line in temp:
Loading = self.loading_completed(line)
self.send_message(self.SOCKET, "各位好啊~ 現在已經開台囉~ 歡迎來到丹堤實況台~ 希望今天的主題你們會喜歡! ")
print("Bot has joined " + self.CHANNEL + " Channel!")
def loading_completed(self, line):
if ("End of /NAMES list" in line):
return False
else:
return True
def set_nick_name_from_lines(self, user, message):
Sentence = message.split(' ')
if len(Sentence) == 2:
Sentence = Sentence[1]
if len(Sentence) <= 10:
self.set_nick_name(user=user, NickName=re.sub('\r|\n|\t', '', Sentence))
print(self.NickList)
return 1
else:
return 2
else:
return 3
def set_nick_name(self, user, NickName):
flag = True
for line in self.NickList:
if user == line[0]:
line[1] = NickName
flag = False
if flag:
self.NickList.append([user, NickName])
def get_nick_name(self, user):
for line in self.NickList:
if user == line[0]:
return line[1]
return user
def Console(self, line):
# gets if it is a user or twitch server
if "PRIVMSG" in line:
return False
else:
return True
# 認人
def store_nick_list(self):
WriteNickFile = open(self.NickNameFile, 'w')
words = ""
try:
for i in range(len(self.NickList)):
words = words + self.NickList[i][0] + ", " + re.sub('\n', '', self.NickList[i][1]) + "\n"
# do stuff with WriteNickFile
WriteNickFile.write(words)
finally:
if WriteNickFile is not None:
WriteNickFile.close()
# Code runs
def setting(self):
s_prep = socket.socket()
s_prep.connect((self.SERVER, self.PORT))
s_prep.send(("PASS " + self.PASS + "\r\n").encode('utf8'))
s_prep.send(("NICK " + self.BOT + "\r\n").encode('utf8'))
s_prep.send(("JOIN #" + self.CHANNEL + "\r\n").encode('utf8'))
self.SOCKET = s_prep
self.join_chat()
self.read_buffer = ""
# 湖中女神 輸入控制
def set_lake_stuff_from_lines(self, message, user):
Sentence = message.split(' ')
if len(Sentence) == 2:
Sentence = Sentence[1]
if len(Sentence) <= 10:
return Sentence
else:
return 1
else:
return 2
pass
# 湖中女神機率控制
def get_stuff(self):
number = random.randint(0, 99999)
if number <= 15:
return 0
elif number <= 35:
return 1
elif number <= 470:
return 2
else:
return 3
def set_CDTime(self, message):
Sentence = message.split(' ')
if len(Sentence) == 2:
Sentence = Sentence[1]
return int(Sentence)
else:
self.send_message(self.SOCKET, "Input error: The command should be EX. !女神CD [second]")
# 終極密碼
def number_check(self, user, input_number, target, range):
if input_number < range[1] and input_number > range[0]:
if input_number > target:
range[1] = input_number
self.send_message(self.SOCKET,'range goes [' + str(range[0]) + '] to [' + str(range[1]) + ']')
return range
elif input_number < target:
range[0] = input_number
self.send_message(self.SOCKET,'range goes [' + str(range[0]) + '] to [' + str(range[1]) + ']')
return range
elif input_number == target:
self.send_message(self.SOCKET,'Find bomb. Congratulation! '+ user +' just got timeout for 60 seconds. P.S. 您將獲得60秒禁言以及50丹丹幣 dante0Happy ')
self.timeout(user)
self.BombRange = [0,100]
return False
else:
pass
else:
self.send_message(self.SOCKET,'Input error: Dear '+ user +', please type the number between THE RANGE')
return range
def timeout(self, user):
self.SOCKET.send(("PRIVMSG #" + self.CHANNEL + " :" + ".timeout " + user + " 60" + "\r\n").encode('utf8'))
self.SOCKET.send(("PRIVMSG #" + self.CHANNEL + " :" + "!addpoints " + user + " 50" + "\r\n").encode('utf8'))
def get_Bomb_number(self, message):
return re.sub('\r|\n|\t', '', message)
# 爬聊天室觀眾 準備計算 point 未完成
def keep_viewer(self):
audience_list = {}
res = requests.get('http://tmi.twitch.tv/group/user/dante0713/chatters')
words = re.sub('\r|\n|\t|', '', res.text)
viewers = words.split('"viewers": [')[1].split(']')[0]
mods = words.split('"moderators": [')[1].split(']')[0]
viewers = re.sub(", " + r'"' + "streamelements" + r'"' + "", "",
viewers + ", " + mods) # 去除 streamelements, kimikobot
viewers = re.sub(", " + r'"' + "Nightbot" + r'"' + "", "", viewers)
viewers = re.sub(", " + r'"' + "kimikobot" + r'"' + "", "", viewers)
viewer_list = re.sub(r'"', "", re.sub(" ", "", viewers)).split(',')
for viewer in viewer_list:
audience_list[viewer] = 0
return audience_list
def compare_set(self):
flag = False
audience_list = self.keep_viewer()
for key in audience_list:
if key in self.AudienceList:
self.AudienceList[key] += 1
else:
self.AudienceList[key] = 0
def count_loyalty(self, time_keep_flag, first_time):
# 問題: 不知道不講話的觀眾會不會被算進去
if time_keep_flag == False:
next_time = time.time()
time_count = next_time - first_time
if time_count >= 60:
self.compare_set()
else:
return False
def show_audience_list(self):
words = ""
print(self.AudienceList)
for key in self.AudienceList:
words = words + key + ": " + str(self.AudienceList[key])+ ", "
return words
def run(self):
lady_of_lake_CD = time.time()
CD_Time = 30
BombSolution = 0
first_time = 0
time_keep_flag = True
lady_was_die_in_user_hands = ""
lady_of_lake_flag = True
hello_flag = True
BombFlag = False
while self.QUIT:
try:
if time_keep_flag == True:
first_time = time.time()
time_keep_flag = self.count_loyalty(time_keep_flag, first_time)
self.read_buffer = self.SOCKET.recv(1024)
self.read_buffer = self.read_buffer.decode('utf8')
temp = self.read_buffer.split("\n")
self.read_buffer = self.read_buffer.encode('utf8')
self.read_buffer = temp.pop()
except:
temp = ""
for line in temp:
if line == "":
break
# So twitch doesn't timeout the bot.
if "PING" in line and self.Console(line):
self.SOCKET.send("PONG tmi.twitch.tv\r\n".encode('utf8'))
break
# get user
user = self.get_user(line)
# get user's nick name
nick_name = self.get_nick_name(user)
# get message send by user
message = self.get_message(line)
# for you to see the chat from CMD
print(user + " > " + message)
# commands
if user == self.OWNER:
if "!丹堤bot指令集" in message:
self.send_message(self.SOCKET, "親愛的 " + nick_name + " ~ 所有指令在 https://goo.gl/etv8rT 中可以查詢")
break
elif "大家晚安" in message or "quit" in message:
#self.send_message(self.SOCKET, "謝謝今天的各位的參與,喜歡我的朋友可以加入我的臉書粉專 https://www."
# "facebook.com/dante0713 ,台裡的最新資訊都在臉書粉專裡,祝各位有個美"
# "麗的夜晚,大家晚安囉~ 88")
self.send_message(self.SOCKET, "丹堤bot 下線中...")
self.send_message(self.SOCKET, "丹堤bot 已離線")
self.send_message(self.SOCKET, "Points: " + self.show_audience_list())
self.store_nick_list()
self.QUIT = False
break
elif "!myGit" in message:
self.send_message(self.SOCKET, "Here's my Twitch Bot link. https://github.com/Dante0713/TwitchBot/blob/master/README.md")
break
elif "!我的Git" in message:
self.send_message(self.SOCKET, "這是我寫的聊天室機器人,歡迎觀看及使用 https://github.com/Dante0713/TwitchBot/blob/master/README_CH.md")
break
elif "滋滋卡滋滋,湖中女神~ 神力復甦!!" in message:
lady_of_lake_flag = True
lady_was_die_in_user_hands = ""
self.send_message(self.SOCKET, "在台主施以神奇的魔法後,湖中女神意外的復活了!!!")
break
elif "!終極密碼" in message:
self.BombRange = [0, 100]
BombSolution = random.randint(self.BombRange[0],self.BombRange[1])
self.send_message(self.SOCKET, "終極密碼開始,Range goes [0] to [100]")
BombFlag = True
elif "!Turn Off Say Hi" in message:
hello_flag = False
self.send_message(self.SOCKET, "英文打招呼功能已關閉!")
break
elif "!女神CD " in message:
CD_Time = self.set_CDTime(message)
self.send_message(self.SOCKET, "女神CD更改為每"+ str(CD_Time) +"使用一次")
if "!湖中女神 " in message or "!drop " in message:
if (lady_of_lake_CD - time.time()) < 0:
lady_of_lake_CD = time.time() + CD_Time
if lady_of_lake_flag == True:
stuff = self.set_lake_stuff_from_lines(message, user)
if stuff == 1:
self.send_message(self.SOCKET, "很抱歉,由於您的物品名稱太長,導致掉下去湖中的過程,刺死了湖中女神,請您訂閱台主、斗內台主或使用小奇點以喚回湖中女神 ==> 進入CD 100秒") # 中文版 防呆
lady_was_die_in_user_hands = user
lady_of_lake_flag = False
break
elif stuff == 2:
self.send_message(self.SOCKET, "很抱歉,由於您丟入湖裡的物品長得太奇怪,湖中女神認不出來,請您再丟一次,不知道怎麼丟可以問台主 :) ==> 進入CD 100秒")
break
else:
value = self.get_stuff()
if value == 0:
self.send_message(self.SOCKET, "恭喜你, 成功用愛情擄獲了湖中女神的心, 湖中女神決定不只給你 金 " + stuff + " 作為回報,也獻上了他的肉體 <3 (恭喜您獲得 1000 丹丹幣) ==> 進入CD "+ str(CD_Time) +" 秒")
self.SOCKET.send(("PRIVMSG #" + self.CHANNEL + " :" + "!addpoints " + user + " 1000" + "\r\n").encode('utf8'))
break
elif value == 1:
self.send_message(self.SOCKET, "恭喜你, 成功用十塊錢擄獲了湖中女神的心, 湖中女神決定用 銀 " + stuff + " 回報你的斗內 <3 (恭喜您獲得 800 丹丹幣) ==> 進入CD "+ str(CD_Time) +" 秒")
self.SOCKET.send((
"PRIVMSG #" + self.CHANNEL + " :" + "!addpoints " + user + " 800" + "\r\n").encode(
'utf8'))
break
elif value == 2:
self.send_message(self.SOCKET, "很抱歉,湖中女神聽不到你說甚麼,於是你的 " +stuff + " 就這樣默默的沉入湖底... ==> 進入CD "+ str(CD_Time) +" 秒")
break
elif value == 3:
self.send_message(self.SOCKET, "湖中女神覺得你很誠實,所以決定把 "+ stuff +" 物歸原主 ==> 進入CD "+ str(CD_Time) +" 秒")
break
else:
self.send_message(self.SOCKET,
"湖中女神已經被" + lady_was_die_in_user_hands + "殺死,只有訂閱、斗內或小奇點,才有辦法讓台主使湖中女神死亡復甦! ") # 中文版 防呆
break
elif (lady_of_lake_CD - time.time()) >= 0:
pass
if "!認人 " in message or "!set_nick_name " in message:
case = self.set_nick_name_from_lines(message=message, user=user)
if case == 1:
self.send_message(self.SOCKET, "恭喜你輸入成功")
break
elif case == 2:
self.send_message(self.SOCKET, '親愛的' + nick_name + ',您設定的暱稱酷炫屌炸天,而且超過十個字,導致我的腦容量爆表拉!!! NotLikeThis')
break
elif case == 3:
self.send_message(self.SOCKET, '小淘氣,不要鬧在下了~ 您的暱稱不可包含空格 提示: (!認人 <您的暱稱>)')
break
if "月月" in message or "丹丹" in message or "提哥" in message or "堤哥" in message or "月子" in message or "月提" in message or "月堤" in message or "丹提" in message or "丹堤" in message or "台主" in message:
if "安安" in message or "ㄤㄤ" in message or "你好" in message or "KonCha" in message or "Hi" in message or "hi" in message:
self.send_message(self.SOCKET, "你好啊~" + nick_name + " ! 歡迎來到丹堤實況台~ 希望你會喜歡今天的實況內容~ ")
break
if "早" in message:
self.send_message(self.SOCKET, "早啊~" + nick_name + " ! 早起精神好! ")
break
if '好久不見' in message:
self.send_message(self.SOCKET, "真的是好久不見了~ " + nick_name + ", 我給您留了個位置, 趕快拉張椅子坐下來看台吧 <3")
break
if '姊姊' in message or '姐姐' in message or '解解' in message:
if len(message) < 6:
self.send_message(self.SOCKET, nick_name + "妹妹早阿~ 小朋友們今天有沒有都乖乖的呀? ")
break
if hello_flag:
if 'Hi' in message or 'hi' in message:
if 'Dante' in message or 'dante' in message:
self.send_message(self.SOCKET, "Hello, " + nick_name + "!")
break
else:
self.send_message(self.SOCKET, "Hi there! Nice to meet you")
break
else:
pass
if BombFlag:
if self.get_Bomb_number(message).isdigit():
bomb_result = self.number_check(user, int(self.get_Bomb_number(message)), BombSolution, self.BombRange)
if bomb_result == False:
BombFlag = False
else:
self.BombRange = bomb_result
else:
pass
if "歐吼" in message or "喔齁" in message:
if user == "n75830" or user == "ss87414" or user == "winnie0810":
self.send_message(self.SOCKET, "歐~~~ 齁~~~~~" + nick_name + "早安呀")
break
if "丹寶貝" in message or '丹寶寶' in message:
if user == 'morgn__':
self.send_message(self.SOCKET, "摩根寶貝你來啦~ TwitchUnity TwitchUnity")
break
if "InuyoFace" in message:
self.send_message(self.SOCKET, "你想幹嘛? ScaredyCat")
break
if "KappaPride" in message:
if "阿" in message and "月" in message and "仔" in message:
split_nick_name = ""
if user == "ninomiyalena":
split_nick_name = "LENA"
elif user == "tiaolowan":
split_nick_name = "樓王"
else:
split_nick_name = nick_name
self.send_message(self.SOCKET, " FailFish ".join(split_nick_name))
break
else:
break
############################################################################
if __name__ == '__main__':
Dante0713 = TwitchBot()
Dante0713.__init__()
Dante0713.setting()
Dante0713.run()
|
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, aliased
from sqlalchemy.orm.exc import NoResultFound
from threading import Thread
from werkzeug.exceptions import BadRequest
from sqlalchemy.exc import IntegrityError
from werkzeug.security import generate_password_hash,check_password_hash
import sys
from flask import jsonify
sys.path.insert(0, '../models/')
from models import Base,User,Filedetails
import datetime
engine = create_engine('sqlite:///beeruva.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
def listofilesuploaded(currentuser):
"""
Function to return list of files uploaded by a user.
"""
session=DBSession()
files=session.query(Filedetails).filter_by(userid=currentuser.userid)
listoffiles=[]
for i in files:
filedata={}
filedata['fileid']=i.fileid
filedata['filename']=i.filename
filedata['filetype']=i.fileextension
filedata['Upload date']=i.fileuploadedon
listoffiles.append(filedata)
return jsonify(listoffiles)
def getchildren(folderid, currentuser):
"""
Function to return list of files uploaded by a user.
"""
session=DBSession()
files=session.query(Filedetails).filter_by(parentid=folderid)
listoffiles=[]
for i in files:
filedata={}
filedata['fileid']=i.fileid
filedata['filename']=i.filename
filedata['fileext']=i.fileextension
filedata['filetype']=i.filetype
filedata['Upload date']=i.fileuploadedon
listoffiles.append(filedata)
return jsonify(listoffiles)
def getdescendents(folderid):
"""
Function to return list of files uploaded by a user.
"""
session=DBSession()
files=session.query(Filedetails).filter_by(parentid=folderid)
listoffiles=[]
for i in files:
filedata={}
filedata['fileid']=i.fileid
filedata['filename']=i.filename
filedata['fileext']=i.fileextension
filedata['filetype']=i.filetype
filedata['Upload date']=i.fileuploadedon
listoffiles.append(filedata)
return jsonify(listoffiles)
def check_access(fileid,currentuser):
"""
Function to check users access to the requested file.
"""
session=DBSession()
returnfiledata={}
try:
filedata=session.query(Filedetails).filter_by(userid=currentuser.userid).filter_by(fileid=fileid).one()
returnfiledata['fileid']=filedata.fileid
returnfiledata['filename']=filedata.filename
returnfiledata['access_state']=1
return returnfiledata
except NoResultFound:
returnfiledata['access_state']=0
return returnfiledata
|
"""
Solution to Codeforces problem 282A
Copyright (c) GeneralMing. All rights reserved.
https://github.com/GeneralMing/codeforces
"""
n = int(input())
x = 0
for i in range(0,n):
inp = str(input())
if('+' in inp):
x = x+1
else:
x = x-1
print(x) |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-03 16:27
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop_app', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name': 'Категория', 'verbose_name_plural': 'Категории'},
),
migrations.AddField(
model_name='category',
name='alias',
field=models.SlugField(default='', max_length=100, verbose_name='Псевдоним для url'),
),
migrations.AddField(
model_name='category',
name='order',
field=models.IntegerField(default=1, verbose_name='Порядок показа'),
),
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(verbose_name='Описание'),
),
migrations.AlterField(
model_name='category',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop_app.Category', verbose_name='Родительская категория'),
),
migrations.AlterField(
model_name='category',
name='title',
field=models.CharField(max_length=254, verbose_name='Наименование'),
),
]
|
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow import keras
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
import numpy as np
print(tf.__version__)
(train_data, test_data), info = tfds.load(
# Use the version pre-encoded with an ~8k vocabulary.
'imdb_reviews/subwords8k',
# Return the train/test datasets as a tuple.
split = (tfds.Split.TRAIN, tfds.Split.TEST),
# Return (example, label) pairs from the dataset (instead of a dictionary).
as_supervised=True,
# Also return the `info` structure.
with_info=True)
print('== train_data ==')
print(train_data)
print('== train_data.take(1) ==')
print(train_data.take(1))
print('== info info')
print(info)
encoder = info.features['text'].encoder
print ('Vocabulary size: {}'.format(encoder.vocab_size))
sample_string = 'Hello TensorFlow.'
encoded_string = encoder.encode(sample_string)
print ('Encoded string is {}'.format(encoded_string))
original_string = encoder.decode(encoded_string)
print ('The original string: "{}"'.format(original_string))
assert original_string == sample_string
for ts in encoded_string:
print ('{} ----> {}'.format(ts, encoder.decode([ts])))
for train_example, train_label in train_data.take(1):
print('Encoded text:', train_example[:10].numpy())
print('Label:', train_label.numpy())
BUFFER_SIZE = 1000
train_batches = (
train_data
.shuffle(BUFFER_SIZE)
.padded_batch(32, train_data.output_shapes))
test_batches = (
test_data
.padded_batch(32, train_data.output_shapes))
|
import numpy as np
files = ['train_data.in', 'eval_data.in']
cnt = 0
for filename in files:
with open(filename, 'r') as fin, open(filename + '.new', 'w') as fout:
for line in fin:
line = line.strip()
fout.write('__label__{}'.format(cnt))
fout.write('\t')
fout.write(line)
fout.write('\n')
cnt += 1
with open("user_features.tsv", 'w') as fuser:
for i in range(cnt):
fuser.write(str(i))
fuser.write('\t')
fuser.write(str(np.random.randint(10)))
fuser.write('\t')
fuser.write(str(np.random.randint(3)))
fuser.write('\n')
|
# =======================================================================================
# helpers/adapters.py
# =======================================================================================
from . import morse_local_config as exp_settings
from morse.builder import Component
from morse.middleware.ros_request_manager import ros_service, ros_action
from morse.core.overlay import MorseOverlay
from morse.core.exceptions import MorseServiceError
from morse.middleware.ros import ROSPublisher, ROSPublisherTF, ROSSubscriber
# ----------------------------------------------------------------------------------------
def morse_to_ros_namespace( name ):
return name.replace(".", "/")
# ----------------------------------------------------------------------------------------
class ros:
# Topics - Publisher/Subscriber and more
# --------------------------------------
class Publisher(ROSPublisher):
pass
class Subscriber(ROSSubscriber):
pass
class TfBroadcaster(ROSPublisherTF):
pass
# Decorators
# --------------------------------------
service = ros_service
action = ros_action
# Classes inherit from MorseOverlay
# --------------------------------------
class Service(MorseOverlay):
"""
A ROS service is created to export MORSE services through the overlay class.
Therefore, the class exporting this services must inherit from this.
"""
pass
class Action(MorseOverlay):
"""
A ROS action is created to export MORSE services through the overlay class.
Therefore, the class exporting this services must inherit from this.
"""
pass
# Registers
# --------------------------------------
class ServiceRegister:
"""
This class attaches (registers) a MORSE service to a ROS service that exports it.
"""
_mw_location = exp_settings.mw_loc;
@staticmethod
def register( component, service_class, name = "" ):
service = ros.Service._mw_location + service_class
component.add_overlay( "ros", service, namespace = name )
class ActionRegister:
pass
class TopicRegister:
"""
This class attaches (registers) a MORSE datastream to a ROS datastream that exports it.
"""
_mw_location = exp_settings.mw_loc;
@staticmethod
def register( component, name = "" ):
component.add_interface("ros", topic = name )
# ----------------------------------------------------------------------------------------
def register_ros_service( obj, name, service_class ):
service_path = exp_settings.mw_loc + service_class
obj.add_overlay("ros", service_path, namespace = name )
def register_ros_action( obj, name, action_class ):
action_path = exp_settings.mw_loc + action_class
obj.add_overlay("ros", action_path, namespace = name )
def register_ros_topic( obj, name, topic_class = None ):
if topic_class is not None:
topic_path = exp_settings.mw_loc + topic_class
obj.add_stream("ros", topic_path, topic = name )
else:
topic_path = ""
obj.add_stream("ros", topic = name )
# =======================================================================================
|
import os
import logging
import argparse
import json
import cv2
import progressbar
import numpy as np
from keras.models import load_model
import rle
from unet import preprocess
from ImageMaskIterator import ImageMaskIterator
from PIL import Image
from multiprocessing import Process, Queue
def get_model_uid(model_filename):
model_uid = os.path.basename(model_filename)
model_uid, _ = os.path.splitext(model_uid)
return model_uid
def get_tmp_scaled(args):
model_uid = get_model_uid(args.model)
tmp_dir = args.tmp_dir
return os.path.join(tmp_dir, model_uid, "scaled")
def get_submission_filename(args):
model_uid = get_model_uid(args.model)
tmp_dir = args.tmp_dir
return os.path.join(tmp_dir, model_uid, "submission.csv")
def predict_csv_writer(queue_batches, test_ids, csv_filename, scaled_dir):
current_sample = 0
with progressbar.ProgressBar(0, len(test_ids)) as pbar, \
open(csv_filename, "w") as csv_file:
csv_file.write("img,rle_mask\n")
while True:
bmasks = queue_batches.get()
if isinstance(bmasks, str) and bmasks == 'DONE':
break
for i in range(bmasks.shape[0]):
if current_sample < len(test_ids):
mask_full_res = scale(bmasks[i, ...])
basename = test_ids[current_sample]
# Save predicted mask
output_filename = os.path.join(scaled_dir, basename + ".png")
cv2.imwrite(output_filename, mask_full_res)
# Write in csv file
csv_file.write(basename + ".jpg" + ",")
csv_file.write(rle.dumps(mask_full_res))
csv_file.write("\n")
current_sample += 1
pbar.update(current_sample)
logging.info("Writing in csv done for {} samples.".format(current_sample))
def predict_test(args):
"""
Compute the prediction mask for all test images.
:param args:
"""
scaled_dir = get_tmp_scaled(args)
os.makedirs(scaled_dir, exist_ok=True)
csv_filename = get_submission_filename(args)
model = load_model(args.model, compile=False)
input_shape = model.input_shape[1:3]
with open(args.test, "r") as jfile:
test_ids = json.load(jfile)
logging.info("Apply prediction model on {} images".format(len(test_ids)))
test_iterator = ImageMaskIterator(args.images_dir, None,
test_ids,
batch_size=args.batch_size,
x_shape=input_shape,
shuffle=False,
x_preprocess=preprocess)
queue_batches = Queue()
writer_p = Process(target=predict_csv_writer,
args=(queue_batches, test_ids, csv_filename, scaled_dir))
writer_p.daemon = True
writer_p.start()
for batch_idx in range(test_iterator.steps_per_epoch):
bx, _ = next(test_iterator)
bmasks = model.predict_on_batch(bx)
queue_batches.put(bmasks)
logging.info("Prediction done.")
queue_batches.put("DONE")
writer_p.join()
def load_test(args):
src_dir = args.src_dir
csv_filename = get_submission_filename(args)
with open(args.test, "r") as jfile:
test_ids = json.load(jfile)
with progressbar.ProgressBar(0, len(test_ids)) as pbar, \
open(csv_filename, "w") as csv_file:
csv_file.write("img,rle_mask\n")
for i, test_id in enumerate(test_ids):
mask_filename = os.path.join(src_dir, test_id + ".png")
mask = np.array(Image.open(mask_filename))
# Write in csv file
csv_file.write(test_id + ".jpg" + ",")
csv_file.write(rle.dumps(mask))
csv_file.write("\n")
pbar.update(i + 1)
def scale(mask):
mask_u8 = (mask * 255).astype(np.uint8)
if mask_u8.shape != (1280, 1920):
mask_u8_full = cv2.resize(mask_u8, (1920, 1280),
interpolation=cv2.INTER_CUBIC)
else:
mask_u8_full = mask_u8
return (mask_u8_full[:, 1:1919] > 127).astype(np.uint8)
def make_submission(args):
if os.path.isdir(args.src_dir):
logging.info("Use existing predicted masks.")
load_test(args)
else:
model_uid = get_model_uid(args.model)
logging.info("Model uid: {}".format(model_uid))
predict_test(args)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(
description='Create a submission given a trained model.')
parser.add_argument('--tmp_dir',
type=str, default="../data/tmp",
help='Temp directory to save the predicted masks.')
parser.add_argument('--src_dir',
type=str, default="",
help='Directory to load predicted masks')
parser.add_argument('--model',
type=str,
help="Path to the model checkpoint.")
parser.add_argument('--images_dir',
type=str,
help="Path to the images directory.")
parser.add_argument('--test',
type=str,
help="Path to the json file with the test ids.")
parser.add_argument('--batch_size',
type=int, default=4,
help="Number of samples processed in one batch.")
args = parser.parse_args()
make_submission(args)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 05 01:14:03 2017
@author: mheinl
"""
from google import search
import requests, re, getopt, sys
harvest = []
inputfile = ''
outputfile = 'harvest.txt'
googleSearchTerm = 'onion links'
help = '\nUsage: onion_harvester.py [options]\n'\
'Options:\n'\
'-h \t \t \t Show this help\n'\
'-i \t <inputfile> \t Read URLs from inputfile\n'\
'-o \t <outputfile> \t Write harvested onions to outputfile (default: harvest.txt)\n'\
'-s \t <searchterm> \t Harvest Google search results (default: \'onion links\')'
### Handle options and arguments
try:
opts, args = getopt.getopt(sys.argv[1:],"hi:o:s:")
except getopt.GetoptError:
print(help)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(help)
sys.exit()
elif opt in ('-o'):
outputfile = arg
elif opt in ('-i'):
inputfile = arg
elif opt in ('-s'):
googleSearchTerm = arg
### Send request, parse response, write in harvest
def harvester (website):
print('[+] scan ' + website.rstrip())
try:
page = requests.get(website.rstrip())
onions = re.findall('([2-7a-z]{16}).onion', page.text)
harvest.extend(onions)
except requests.exceptions.RequestException as e:
print('[-] Error: ' + str(e))
### If passed, read URLs from file
if inputfile:
file = open(inputfile, 'r')
print('[+] Harvest URLs from ' + inputfile + ':')
for website in file:
harvester(website)
### read URLs directly from google
print('[+] Harvest URLs from Google:')
for website in search(googleSearchTerm, stop=1):
harvester(website)
### Treat the Harvest
harvest = (set(harvest)) # convert to set terminating doubles
print('[+] write ' + str(len(harvest)) + ' unique identifiers to ' + outputfile)
out = open(outputfile,'w')
for element in harvest:
out.write("%s\n" % element)
print('[+] Done!')
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import unittest
class BaseTestFixture(unittest.TestCase):
driver = None
def setUp(self):
print("Running SetUp")
# declare chrome options
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("disable-infobars")
# Start WebDriver
self.driver = webdriver.Chrome(options=chrome_options)
# Call overriden _testInitialize in test case class
self._testInitialize()
def tearDown(self):
print("Running Teardown")
# Call overridden _testCleanup in test case class
self._testCleanup
self.driver.close()
self.driver.quit()
def _testInitialize(self):
pass
def _testCleanup(self):
pass
|
# 키보드 제어, 마우스 제어, 편의점 주소 크롤링
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome('chromedriver')
url = 'https://map.naver.com'
driver.get(url)
search = driver.find_element_by_css_selector('input#search-input')
# '씨유' 입력 후 ENTER키 작동
search.send_keys('씨유')
search.send_keys(Keys.ENTER)
time.sleep(0.5)
# 1페이지 ~ 5페이지 크롤링
for j in range(0,5):
time.sleep(2)
page = driver.find_elements_by_css_selector('div.paginate a')
targets = driver.find_elements_by_class_name('lsnx_det')
for target in targets:
names = target.find_element_by_css_selector('dt a').text
address = target.find_element_by_css_selector('dd.addr').text
print(names, address)
print('\n')
page[j].click()
# 6페이지 이후 크롤링
count = 0
while count < 2:
for j in range(1,6):
time.sleep(2)
page = driver.find_elements_by_css_selector('div.paginate a')
targets = driver.find_elements_by_class_name('lsnx_det')
for target in targets:
names = target.find_element_by_css_selector('dt a').text
address = target.find_element_by_css_selector('dd.addr').text
print(names, address)
print('\n')
page[j].click()
count += 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 9 16:07:23 2019
@author: arnab
"""
from keras.models import model_from_json
from pathlib import Path
from keras.preprocessing import image
import numpy as np
from keras.applications import vgg16
import joblib
# Load the json file that contains the model's structure
f = Path("arnab_model_structure.json")
model_structure = f.read_text()
# Recreate the Keras model object from the json data
model = model_from_json(model_structure)
# Re-load the model's trained weights
model.load_weights("arnab_model_weights.h5")
x_test = joblib.load("arnab_x_test.dat")
y_test = joblib.load("arnab_y_test.dat")
# Given the extracted features, make a final prediction using our own model
results = model.predict(x_test)
# Since we are only testing one image with possible class, we only need to check the first result's first element
print(results[5])
print(y_test[5]) |
import hashlib
GRAVATAR_URL = ("https://www.gravatar.com/avatar/"
"{hashed_email}?s={size}&r=g&d=robohash")
def create_gravatar_url(email, size=200):
"""Use GRAVATAR_URL above to create a gravatar URL.
You need to create a hash of the email passed in.
PHP example: https://en.gravatar.com/site/implement/hash/
For Python check hashlib check out (md5 / hexdigest):
https://docs.python.org/3/library/hashlib.html#hashlib.hash.hexdigest
"""
hash_object = hashlib.md5(email.replace(' ', '').strip().lower().encode())
hex_eq = hash_object.hexdigest()
return GRAVATAR_URL.format(hashed_email=hex_eq, size=size)
# def main():
# print('paddle to the buoy... and then waves!')
# what = create_gravatar_url(' support@pybit.es', 1000)
# print(what)
# if __name__ == '__main__':
# main()
|
"""
https://edabit.com/challenge/A8gEGRXqMwRWQJvBf
"""
def tic_tac_toe(ls: list) -> str:
a = ''.join((row[0] for row in ls if len(set(row)) == 1))
b = ''.join((row[0] for row in zip(*ls) if len(set(row)) == 1))
c = set([ls[0][0],ls[1][1], ls[2][2]])
d = set([ls[2][0], ls[1][1], ls[0][2]])
if len(a) == 1:
return a[0]
if len(b) == 1:
return b[0]
if len(c) == 1:
return c.pop()
if len(d) == 1:
return d.pop()
return 'Draw'
print(tic_tac_toe([
["O", "O", "O"],
["O", "X", "X"],
["E", "X", "X"]
]))
assert tic_tac_toe([
["X", "O", "X"],
["O", "X", "O"],
["O", "X", "X"]
]) == "X"
assert tic_tac_toe([
["O", "O", "O"],
["O", "X", "X"],
["E", "X", "X"]
]) == "O"
print("Success") |
class like_provider(object):
def insert_like(self, post_id, user_id ):
pass
def delete_like(self, post_id , user_id):
pass
def select_like_by_user(self, user_id):
pass
def select_like_by_post(self, post_id):
pass
|
from sklearn.datasets import fetch_california_housing
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
n_epochs = 10000
learning_rate = 0.001
housing_data = fetch_california_housing(data_home='D:\\学习笔记\\ai\\dataSets', download_if_missing=True)
data = housing_data.data
m, n = data.shape
housing_data_puls_bias = np.c_[np.ones((m, 1)), data]
X = tf.constant(housing_data_puls_bias, name='X', dtype=tf.float32)
y = tf.constant(housing_data.target.reshape(-1, 1), name='y', dtype=tf.float32)
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
print('theta:', theta.shape)
y_hat = tf.matmul(X, theta, name='y_hat')
err = y_hat - y
print(y_hat.shape, y.shape)
print('err.shape:', err.shape)
mse = tf.reduce_mean(tf.square(err), name='mse')
# opt=tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
opt=tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = opt.minimize(mse)
init = tf.global_variables_initializer()
print(X.shape)
with tf.Session() as sess:
init.run()
for epoch in range(n_epochs):
if epoch%1==0:
print('mse.eval():',mse.eval())
train_op.run()
# print('theta:', theta.eval())
'''
还是有错!!!!
''' |
import XPLMCamera
controlCamera = XPLMCamera.XPLMControlCamera
dontControlCamera = XPLMCamera.XPLMDontControlCamera
isCameraBeingControlled = XPLMCamera.XPLMIsCameraBeingControlled
readCameraPosition = XPLMCamera.XPLMReadCameraPosition
ControlCameraUntilViewChanges = XPLMCamera.xplm_ControlCameraUntilViewChanges
ControlCameraForever = XPLMCamera.xplm_ControlCameraForever
import XPLMDataAccess
findDataRef = XPLMDataAccess.XPLMFindDataRef
canWriteDataRef = XPLMDataAccess.XPLMCanWriteDataRef
isDataRefGood = XPLMDataAccess.XPLMIsDataRefGood
getDataRefTypes = XPLMDataAccess.XPLMGetDataRefTypes
getDatai = XPLMDataAccess.XPLMGetDatai
setDatai = XPLMDataAccess.XPLMSetDatai
getDataf = XPLMDataAccess.XPLMGetDataf
setDataf = XPLMDataAccess.XPLMSetDataf
getDatad = XPLMDataAccess.XPLMGetDatad
setDatad = XPLMDataAccess.XPLMSetDatad
getDatavi = XPLMDataAccess.XPLMGetDatavi
setDatavi = XPLMDataAccess.XPLMSetDatavi
getDatavf = XPLMDataAccess.XPLMGetDatavf
setDatavf = XPLMDataAccess.XPLMSetDatavf
getDatab = XPLMDataAccess.XPLMGetDatab
setDatab = XPLMDataAccess.XPLMSetDatab
registerDataAccessor = XPLMDataAccess.XPLMRegisterDataAccessor
unregisterDataAccessor = XPLMDataAccess.XPLMUnregisterDataAccessor
shareData = XPLMDataAccess.XPLMShareData
unshareData = XPLMDataAccess.XPLMUnshareData
Type_Unknown = XPLMDataAccess.xplmType_Unknown
Type_Int = XPLMDataAccess.xplmType_Int
Type_Float = XPLMDataAccess.xplmType_Float
Type_Double = XPLMDataAccess.xplmType_Double
Type_FloatArray = XPLMDataAccess.xplmType_FloatArray
Type_IntArray = XPLMDataAccess.xplmType_IntArray
Type_Data = XPLMDataAccess.xplmType_Data
import XPLMDefs
ShiftFlag = XPLMDefs.xplm_ShiftFlag
OptionAltFlag = XPLMDefs.xplm_OptionAltFlag
ControlFlag = XPLMDefs.xplm_ControlFlag
DownFlag = XPLMDefs.xplm_DownFlag
UpFlag = XPLMDefs.xplm_UpFlag
NO_PLUGIN_ID = XPLMDefs.XPLM_NO_PLUGIN_ID
PLUGIN_XPLANE = XPLMDefs.XPLM_PLUGIN_XPLANE
KEY_RETURN = XPLMDefs.XPLM_KEY_RETURN
KEY_ESCAPE = XPLMDefs.XPLM_KEY_ESCAPE
KEY_TAB = XPLMDefs.XPLM_KEY_TAB
KEY_DELETE = XPLMDefs.XPLM_KEY_DELETE
KEY_LEFT = XPLMDefs.XPLM_KEY_LEFT
KEY_RIGHT = XPLMDefs.XPLM_KEY_RIGHT
KEY_UP = XPLMDefs.XPLM_KEY_UP
KEY_DOWN = XPLMDefs.XPLM_KEY_DOWN
KEY_0 = XPLMDefs.XPLM_KEY_0
KEY_1 = XPLMDefs.XPLM_KEY_1
KEY_2 = XPLMDefs.XPLM_KEY_2
KEY_3 = XPLMDefs.XPLM_KEY_3
KEY_4 = XPLMDefs.XPLM_KEY_4
KEY_5 = XPLMDefs.XPLM_KEY_5
KEY_6 = XPLMDefs.XPLM_KEY_6
KEY_7 = XPLMDefs.XPLM_KEY_7
KEY_8 = XPLMDefs.XPLM_KEY_8
KEY_9 = XPLMDefs.XPLM_KEY_9
KEY_DECIMAL = XPLMDefs.XPLM_KEY_DECIMAL
VK_BACK = XPLMDefs.XPLM_VK_BACK
VK_TAB = XPLMDefs.XPLM_VK_TAB
VK_CLEAR = XPLMDefs.XPLM_VK_CLEAR
VK_RETURN = XPLMDefs.XPLM_VK_RETURN
VK_ESCAPE = XPLMDefs.XPLM_VK_ESCAPE
VK_SPACE = XPLMDefs.XPLM_VK_SPACE
VK_PRIOR = XPLMDefs.XPLM_VK_PRIOR
VK_NEXT = XPLMDefs.XPLM_VK_NEXT
VK_END = XPLMDefs.XPLM_VK_END
VK_HOME = XPLMDefs.XPLM_VK_HOME
VK_LEFT = XPLMDefs.XPLM_VK_LEFT
VK_UP = XPLMDefs.XPLM_VK_UP
VK_RIGHT = XPLMDefs.XPLM_VK_RIGHT
VK_DOWN = XPLMDefs.XPLM_VK_DOWN
VK_SELECT = XPLMDefs.XPLM_VK_SELECT
VK_PRINT = XPLMDefs.XPLM_VK_PRINT
VK_EXECUTE = XPLMDefs.XPLM_VK_EXECUTE
VK_SNAPSHOT = XPLMDefs.XPLM_VK_SNAPSHOT
VK_INSERT = XPLMDefs.XPLM_VK_INSERT
VK_DELETE = XPLMDefs.XPLM_VK_DELETE
VK_HELP = XPLMDefs.XPLM_VK_HELP
VK_0 = XPLMDefs.XPLM_VK_0
VK_1 = XPLMDefs.XPLM_VK_1
VK_2 = XPLMDefs.XPLM_VK_2
VK_3 = XPLMDefs.XPLM_VK_3
VK_4 = XPLMDefs.XPLM_VK_4
VK_5 = XPLMDefs.XPLM_VK_5
VK_6 = XPLMDefs.XPLM_VK_6
VK_7 = XPLMDefs.XPLM_VK_7
VK_8 = XPLMDefs.XPLM_VK_8
VK_9 = XPLMDefs.XPLM_VK_9
VK_A = XPLMDefs.XPLM_VK_A
VK_B = XPLMDefs.XPLM_VK_B
VK_C = XPLMDefs.XPLM_VK_C
VK_D = XPLMDefs.XPLM_VK_D
VK_E = XPLMDefs.XPLM_VK_E
VK_F = XPLMDefs.XPLM_VK_F
VK_G = XPLMDefs.XPLM_VK_G
VK_H = XPLMDefs.XPLM_VK_H
VK_I = XPLMDefs.XPLM_VK_I
VK_J = XPLMDefs.XPLM_VK_J
VK_K = XPLMDefs.XPLM_VK_K
VK_L = XPLMDefs.XPLM_VK_L
VK_M = XPLMDefs.XPLM_VK_M
VK_N = XPLMDefs.XPLM_VK_N
VK_O = XPLMDefs.XPLM_VK_O
VK_P = XPLMDefs.XPLM_VK_P
VK_Q = XPLMDefs.XPLM_VK_Q
VK_R = XPLMDefs.XPLM_VK_R
VK_S = XPLMDefs.XPLM_VK_S
VK_T = XPLMDefs.XPLM_VK_T
VK_U = XPLMDefs.XPLM_VK_U
VK_V = XPLMDefs.XPLM_VK_V
VK_W = XPLMDefs.XPLM_VK_W
VK_X = XPLMDefs.XPLM_VK_X
VK_Y = XPLMDefs.XPLM_VK_Y
VK_Z = XPLMDefs.XPLM_VK_Z
VK_NUMPAD0 = XPLMDefs.XPLM_VK_NUMPAD0
VK_NUMPAD1 = XPLMDefs.XPLM_VK_NUMPAD1
VK_NUMPAD2 = XPLMDefs.XPLM_VK_NUMPAD2
VK_NUMPAD3 = XPLMDefs.XPLM_VK_NUMPAD3
VK_NUMPAD4 = XPLMDefs.XPLM_VK_NUMPAD4
VK_NUMPAD5 = XPLMDefs.XPLM_VK_NUMPAD5
VK_NUMPAD6 = XPLMDefs.XPLM_VK_NUMPAD6
VK_NUMPAD7 = XPLMDefs.XPLM_VK_NUMPAD7
VK_NUMPAD8 = XPLMDefs.XPLM_VK_NUMPAD8
VK_NUMPAD9 = XPLMDefs.XPLM_VK_NUMPAD9
VK_MULTIPLY = XPLMDefs.XPLM_VK_MULTIPLY
VK_ADD = XPLMDefs.XPLM_VK_ADD
VK_SEPARATOR = XPLMDefs.XPLM_VK_SEPARATOR
VK_SUBTRACT = XPLMDefs.XPLM_VK_SUBTRACT
VK_DECIMAL = XPLMDefs.XPLM_VK_DECIMAL
VK_DIVIDE = XPLMDefs.XPLM_VK_DIVIDE
VK_F1 = XPLMDefs.XPLM_VK_F1
VK_F2 = XPLMDefs.XPLM_VK_F2
VK_F3 = XPLMDefs.XPLM_VK_F3
VK_F4 = XPLMDefs.XPLM_VK_F4
VK_F5 = XPLMDefs.XPLM_VK_F5
VK_F6 = XPLMDefs.XPLM_VK_F6
VK_F7 = XPLMDefs.XPLM_VK_F7
VK_F8 = XPLMDefs.XPLM_VK_F8
VK_F9 = XPLMDefs.XPLM_VK_F9
VK_F10 = XPLMDefs.XPLM_VK_F10
VK_F11 = XPLMDefs.XPLM_VK_F11
VK_F12 = XPLMDefs.XPLM_VK_F12
VK_F13 = XPLMDefs.XPLM_VK_F13
VK_F14 = XPLMDefs.XPLM_VK_F14
VK_F15 = XPLMDefs.XPLM_VK_F15
VK_F16 = XPLMDefs.XPLM_VK_F16
VK_F17 = XPLMDefs.XPLM_VK_F17
VK_F18 = XPLMDefs.XPLM_VK_F18
VK_F19 = XPLMDefs.XPLM_VK_F19
VK_F20 = XPLMDefs.XPLM_VK_F20
VK_F21 = XPLMDefs.XPLM_VK_F21
VK_F22 = XPLMDefs.XPLM_VK_F22
VK_F23 = XPLMDefs.XPLM_VK_F23
VK_F24 = XPLMDefs.XPLM_VK_F24
VK_EQUAL = XPLMDefs.XPLM_VK_EQUAL
VK_MINUS = XPLMDefs.XPLM_VK_MINUS
VK_RBRACE = XPLMDefs.XPLM_VK_RBRACE
VK_LBRACE = XPLMDefs.XPLM_VK_LBRACE
VK_QUOTE = XPLMDefs.XPLM_VK_QUOTE
VK_SEMICOLON = XPLMDefs.XPLM_VK_SEMICOLON
VK_BACKSLASH = XPLMDefs.XPLM_VK_BACKSLASH
VK_COMMA = XPLMDefs.XPLM_VK_COMMA
VK_SLASH = XPLMDefs.XPLM_VK_SLASH
VK_PERIOD = XPLMDefs.XPLM_VK_PERIOD
VK_BACKQUOTE = XPLMDefs.XPLM_VK_BACKQUOTE
VK_ENTER = XPLMDefs.XPLM_VK_ENTER
VK_NUMPAD_ENT = XPLMDefs.XPLM_VK_NUMPAD_ENT
VK_NUMPAD_EQ = XPLMDefs.XPLM_VK_NUMPAD_EQ
import XPLMDisplay
registerDrawCallback = XPLMDisplay.XPLMRegisterDrawCallback
unregisterDrawCallback = XPLMDisplay.XPLMUnregisterDrawCallback
createWindowEx = XPLMDisplay.XPLMCreateWindowEx
destroyWindow = XPLMDisplay.XPLMDestroyWindow
getScreenSize = XPLMDisplay.XPLMGetScreenSize
getScreenBoundsGlobal = XPLMDisplay.XPLMGetScreenBoundsGlobal
getAllMonitorBoundsGlobal = XPLMDisplay.XPLMGetAllMonitorBoundsGlobal
getAllMonitorBoundsOS = XPLMDisplay.XPLMGetAllMonitorBoundsOS
getMouseLocationGlobal = XPLMDisplay.XPLMGetMouseLocationGlobal
getWindowGeometry = XPLMDisplay.XPLMGetWindowGeometry
setWindowGeometry = XPLMDisplay.XPLMSetWindowGeometry
getWindowGeometryOS = XPLMDisplay.XPLMGetWindowGeometryOS
setWindowGeometryOS = XPLMDisplay.XPLMSetWindowGeometryOS
getWindowGeometryVR = XPLMDisplay.XPLMGetWindowGeometryVR
setWindowGeometryVR = XPLMDisplay.XPLMSetWindowGeometryVR
getWindowIsVisible = XPLMDisplay.XPLMGetWindowIsVisible
setWindowIsVisible = XPLMDisplay.XPLMSetWindowIsVisible
windowIsPoppedOut = XPLMDisplay.XPLMWindowIsPoppedOut
windowIsInVR = XPLMDisplay.XPLMWindowIsInVR
setWindowGravity = XPLMDisplay.XPLMSetWindowGravity
setWindowResizingLimits = XPLMDisplay.XPLMSetWindowResizingLimits
setWindowPositioningMode = XPLMDisplay.XPLMSetWindowPositioningMode
setWindowTitle = XPLMDisplay.XPLMSetWindowTitle
getWindowRefCon = XPLMDisplay.XPLMGetWindowRefCon
setWindowRefCon = XPLMDisplay.XPLMSetWindowRefCon
takeKeyboardFocus = XPLMDisplay.XPLMTakeKeyboardFocus
hasKeyboardFocus = XPLMDisplay.XPLMHasKeyboardFocus
bringWindowToFront = XPLMDisplay.XPLMBringWindowToFront
isWindowInFront = XPLMDisplay.XPLMIsWindowInFront
registerKeySniffer = XPLMDisplay.XPLMRegisterKeySniffer
unregisterKeySniffer = XPLMDisplay.XPLMUnregisterKeySniffer
registerHotKey = XPLMDisplay.XPLMRegisterHotKey
unregisterHotKey = XPLMDisplay.XPLMUnregisterHotKey
countHotKeys = XPLMDisplay.XPLMCountHotKeys
getNthHotKey = XPLMDisplay.XPLMGetNthHotKey
getHotKeyInfo = XPLMDisplay.XPLMGetHotKeyInfo
setHotKeyCombination = XPLMDisplay.XPLMSetHotKeyCombination
Phase_Modern3D = XPLMDisplay.xplm_Phase_Modern3D
Phase_FirstCockpit = XPLMDisplay.xplm_Phase_FirstCockpit
Phase_Panel = XPLMDisplay.xplm_Phase_Panel
Phase_Gauges = XPLMDisplay.xplm_Phase_Gauges
Phase_Window = XPLMDisplay.xplm_Phase_Window
Phase_LastCockpit = XPLMDisplay.xplm_Phase_LastCockpit
MouseDown = XPLMDisplay.xplm_MouseDown
MouseDrag = XPLMDisplay.xplm_MouseDrag
MouseUp = XPLMDisplay.xplm_MouseUp
CursorDefault = XPLMDisplay.xplm_CursorDefault
CursorHidden = XPLMDisplay.xplm_CursorHidden
CursorArrow = XPLMDisplay.xplm_CursorArrow
CursorCustom = XPLMDisplay.xplm_CursorCustom
WindowLayerFlightOverlay = XPLMDisplay.xplm_WindowLayerFlightOverlay
WindowLayerFloatingWindows = XPLMDisplay.xplm_WindowLayerFloatingWindows
WindowLayerModal = XPLMDisplay.xplm_WindowLayerModal
WindowLayerGrowlNotifications = XPLMDisplay.xplm_WindowLayerGrowlNotifications
WindowDecorationNone = XPLMDisplay.xplm_WindowDecorationNone
WindowDecorationRoundRectangle = XPLMDisplay.xplm_WindowDecorationRoundRectangle
WindowDecorationSelfDecorated = XPLMDisplay.xplm_WindowDecorationSelfDecorated
WindowDecorationSelfDecoratedResizable = XPLMDisplay.xplm_WindowDecorationSelfDecoratedResizable
WindowPositionFree = XPLMDisplay.xplm_WindowPositionFree
WindowCenterOnMonitor = XPLMDisplay.xplm_WindowCenterOnMonitor
WindowFullScreenOnMonitor = XPLMDisplay.xplm_WindowFullScreenOnMonitor
WindowFullScreenOnAllMonitors = XPLMDisplay.xplm_WindowFullScreenOnAllMonitors
WindowPopOut = XPLMDisplay.xplm_WindowPopOut
WindowVR = XPLMDisplay.xplm_WindowVR
import XPLMGraphics
setGraphicsState = XPLMGraphics.XPLMSetGraphicsState
bindTexture2d = XPLMGraphics.XPLMBindTexture2d
generateTextureNumbers = XPLMGraphics.XPLMGenerateTextureNumbers
worldToLocal = XPLMGraphics.XPLMWorldToLocal
localToWorld = XPLMGraphics.XPLMLocalToWorld
drawTranslucentDarkBox = XPLMGraphics.XPLMDrawTranslucentDarkBox
drawString = XPLMGraphics.XPLMDrawString
drawNumber = XPLMGraphics.XPLMDrawNumber
getFontDimensions = XPLMGraphics.XPLMGetFontDimensions
measureString = XPLMGraphics.XPLMMeasureString
Font_Basic = XPLMGraphics.xplmFont_Basic
Font_Proportional = XPLMGraphics.xplmFont_Proportional
import XPLMInstance
createInstance = XPLMInstance.XPLMCreateInstance
destroyInstance = XPLMInstance.XPLMDestroyInstance
instanceSetPosition = XPLMInstance.XPLMInstanceSetPosition
import XPLMMap
createMapLayer = XPLMMap.XPLMCreateMapLayer
destroyMapLayer = XPLMMap.XPLMDestroyMapLayer
registerMapCreationHook = XPLMMap.XPLMRegisterMapCreationHook
mapExists = XPLMMap.XPLMMapExists
drawMapIconFromSheet = XPLMMap.XPLMDrawMapIconFromSheet
drawMapLabel = XPLMMap.XPLMDrawMapLabel
mapProject = XPLMMap.XPLMMapProject
mapUnproject = XPLMMap.XPLMMapUnproject
mapScaleMeter = XPLMMap.XPLMMapScaleMeter
mapGetNorthHeading = XPLMMap.XPLMMapGetNorthHeading
MapStyle_VFR_Sectional = XPLMMap.xplm_MapStyle_VFR_Sectional
MapStyle_IFR_LowEnroute = XPLMMap.xplm_MapStyle_IFR_LowEnroute
MapStyle_IFR_HighEnroute = XPLMMap.xplm_MapStyle_IFR_HighEnroute
MapLayer_Fill = XPLMMap.xplm_MapLayer_Fill
MapLayer_Markings = XPLMMap.xplm_MapLayer_Markings
MapOrientation_Map = XPLMMap.xplm_MapOrientation_Map
MapOrientation_UI = XPLMMap.xplm_MapOrientation_UI
MAP_USER_INTERFACE = XPLMMap.XPLM_MAP_USER_INTERFACE
MAP_IOS = XPLMMap.XPLM_MAP_IOS
import XPLMMenus
findPluginsMenu = XPLMMenus.XPLMFindPluginsMenu
findAircraftMenu = XPLMMenus.XPLMFindAircraftMenu
createMenu = XPLMMenus.XPLMCreateMenu
destroyMenu = XPLMMenus.XPLMDestroyMenu
clearAllMenuItems = XPLMMenus.XPLMClearAllMenuItems
appendMenuItem = XPLMMenus.XPLMAppendMenuItem
appendMenuItemWithCommand = XPLMMenus.XPLMAppendMenuItemWithCommand
appendMenuSeparator = XPLMMenus.XPLMAppendMenuSeparator
setMenuItemName = XPLMMenus.XPLMSetMenuItemName
checkMenuItem = XPLMMenus.XPLMCheckMenuItem
checkMenuItemState = XPLMMenus.XPLMCheckMenuItemState
enableMenuItem = XPLMMenus.XPLMEnableMenuItem
removeMenuItem = XPLMMenus.XPLMRemoveMenuItem
Menu_NoCheck = XPLMMenus.xplm_Menu_NoCheck
Menu_Unchecked = XPLMMenus.xplm_Menu_Unchecked
Menu_Checked = XPLMMenus.xplm_Menu_Checked
import XPLMNavigation
getFirstNavAid = XPLMNavigation.XPLMGetFirstNavAid
getNextNavAid = XPLMNavigation.XPLMGetNextNavAid
findFirstNavAidOfType = XPLMNavigation.XPLMFindFirstNavAidOfType
findLastNavAidOfType = XPLMNavigation.XPLMFindLastNavAidOfType
findNavAid = XPLMNavigation.XPLMFindNavAid
getNavAidInfo = XPLMNavigation.XPLMGetNavAidInfo
countFMSEntries = XPLMNavigation.XPLMCountFMSEntries
getDisplayedFMSEntry = XPLMNavigation.XPLMGetDisplayedFMSEntry
getDestinationFMSEntry = XPLMNavigation.XPLMGetDestinationFMSEntry
setDisplayedFMSEntry = XPLMNavigation.XPLMSetDisplayedFMSEntry
setDestinationFMSEntry = XPLMNavigation.XPLMSetDestinationFMSEntry
getFMSEntryInfo = XPLMNavigation.XPLMGetFMSEntryInfo
setFMSEntryInfo = XPLMNavigation.XPLMSetFMSEntryInfo
setFMSEntryLatLon = XPLMNavigation.XPLMSetFMSEntryLatLon
clearFMSEntry = XPLMNavigation.XPLMClearFMSEntry
getGPSDestinationType = XPLMNavigation.XPLMGetGPSDestinationType
getGPSDestination = XPLMNavigation.XPLMGetGPSDestination
Nav_Unknown = XPLMNavigation.xplm_Nav_Unknown
Nav_Airport = XPLMNavigation.xplm_Nav_Airport
Nav_NDB = XPLMNavigation.xplm_Nav_NDB
Nav_VOR = XPLMNavigation.xplm_Nav_VOR
Nav_ILS = XPLMNavigation.xplm_Nav_ILS
Nav_Localizer = XPLMNavigation.xplm_Nav_Localizer
Nav_GlideSlope = XPLMNavigation.xplm_Nav_GlideSlope
Nav_OuterMarker = XPLMNavigation.xplm_Nav_OuterMarker
Nav_MiddleMarker = XPLMNavigation.xplm_Nav_MiddleMarker
Nav_InnerMarker = XPLMNavigation.xplm_Nav_InnerMarker
Nav_Fix = XPLMNavigation.xplm_Nav_Fix
Nav_DME = XPLMNavigation.xplm_Nav_DME
Nav_LatLon = XPLMNavigation.xplm_Nav_LatLon
NAV_NOT_FOUND = XPLMNavigation.XPLM_NAV_NOT_FOUND
import XPLMPlanes
setUsersAircraft = XPLMPlanes.XPLMSetUsersAircraft
placeUserAtAirport = XPLMPlanes.XPLMPlaceUserAtAirport
placeUserAtLocation = XPLMPlanes.XPLMPlaceUserAtLocation
countAircraft = XPLMPlanes.XPLMCountAircraft
getNthAircraftModel = XPLMPlanes.XPLMGetNthAircraftModel
acquirePlanes = XPLMPlanes.XPLMAcquirePlanes
releasePlanes = XPLMPlanes.XPLMReleasePlanes
setActiveAircraftCount = XPLMPlanes.XPLMSetActiveAircraftCount
setAircraftModel = XPLMPlanes.XPLMSetAircraftModel
disableAIForPlane = XPLMPlanes.XPLMDisableAIForPlane
USER_AIRCRAFT = XPLMPlanes.XPLM_USER_AIRCRAFT
import XPLMPlugin
getMyID = XPLMPlugin.XPLMGetMyID
countPlugins = XPLMPlugin.XPLMCountPlugins
getNthPlugin = XPLMPlugin.XPLMGetNthPlugin
findPluginByPath = XPLMPlugin.XPLMFindPluginByPath
findPluginBySignature = XPLMPlugin.XPLMFindPluginBySignature
getPluginInfo = XPLMPlugin.XPLMGetPluginInfo
isPluginEnabled = XPLMPlugin.XPLMIsPluginEnabled
enablePlugin = XPLMPlugin.XPLMEnablePlugin
disablePlugin = XPLMPlugin.XPLMDisablePlugin
reloadPlugins = XPLMPlugin.XPLMReloadPlugins
sendMessageToPlugin = XPLMPlugin.XPLMSendMessageToPlugin
hasFeature = XPLMPlugin.XPLMHasFeature
isFeatureEnabled = XPLMPlugin.XPLMIsFeatureEnabled
enableFeature = XPLMPlugin.XPLMEnableFeature
enumerateFeatures = XPLMPlugin.XPLMEnumerateFeatures
MSG_PLANE_CRASHED = XPLMPlugin.XPLM_MSG_PLANE_CRASHED
MSG_PLANE_LOADED = XPLMPlugin.XPLM_MSG_PLANE_LOADED
MSG_AIRPORT_LOADED = XPLMPlugin.XPLM_MSG_AIRPORT_LOADED
MSG_SCENERY_LOADED = XPLMPlugin.XPLM_MSG_SCENERY_LOADED
MSG_AIRPLANE_COUNT_CHANGED = XPLMPlugin.XPLM_MSG_AIRPLANE_COUNT_CHANGED
MSG_PLANE_UNLOADED = XPLMPlugin.XPLM_MSG_PLANE_UNLOADED
MSG_WILL_WRITE_PREFS = XPLMPlugin.XPLM_MSG_WILL_WRITE_PREFS
MSG_LIVERY_LOADED = XPLMPlugin.XPLM_MSG_LIVERY_LOADED
MSG_ENTERED_VR = XPLMPlugin.XPLM_MSG_ENTERED_VR
MSG_EXITING_VR = XPLMPlugin.XPLM_MSG_EXITING_VR
MsgPlaneCrashed = XPLMPlugin.XPLM_MSG_PLANE_CRASHED
MsgPlaneLoaded = XPLMPlugin.XPLM_MSG_PLANE_LOADED
MsgAirportLoaded = XPLMPlugin.XPLM_MSG_AIRPORT_LOADED
MsgSceneryLoaded = XPLMPlugin.XPLM_MSG_SCENERY_LOADED
MsgAirplaneCountChanged = XPLMPlugin.XPLM_MSG_AIRPLANE_COUNT_CHANGED
MsgPlaneUnloaded = XPLMPlugin.XPLM_MSG_PLANE_UNLOADED
MsgWillWritePrefs = XPLMPlugin.XPLM_MSG_WILL_WRITE_PREFS
MsgLiveryLoaded = XPLMPlugin.XPLM_MSG_LIVERY_LOADED
MsgEnteredVR = XPLMPlugin.XPLM_MSG_ENTERED_VR
MsgExitingVR = XPLMPlugin.XPLM_MSG_EXITING_VR
import XPLMProcessing
getElapsedTime = XPLMProcessing.XPLMGetElapsedTime
getCycleNumber = XPLMProcessing.XPLMGetCycleNumber
registerFlightLoopCallback = XPLMProcessing.XPLMRegisterFlightLoopCallback
unregisterFlightLoopCallback = XPLMProcessing.XPLMUnregisterFlightLoopCallback
setFlightLoopCallbackInterval = XPLMProcessing.XPLMSetFlightLoopCallbackInterval
createFlightLoop = XPLMProcessing.XPLMCreateFlightLoop
destroyFlightLoop = XPLMProcessing.XPLMDestroyFlightLoop
scheduleFlightLoop = XPLMProcessing.XPLMScheduleFlightLoop
FlightLoop_Phase_BeforeFlightModel = XPLMProcessing.xplm_FlightLoop_Phase_BeforeFlightModel
FlightLoop_Phase_AfterFlightModel = XPLMProcessing.xplm_FlightLoop_Phase_AfterFlightModel
import XPLMScenery
createProbe = XPLMScenery.XPLMCreateProbe
destroyProbe = XPLMScenery.XPLMDestroyProbe
probeTerrainXYZ = XPLMScenery.XPLMProbeTerrainXYZ
getMagneticVariation = XPLMScenery.XPLMGetMagneticVariation
degTrueToDegMagnetic = XPLMScenery.XPLMDegTrueToDegMagnetic
degMagneticToDegTrue = XPLMScenery.XPLMDegMagneticToDegTrue
loadObject = XPLMScenery.XPLMLoadObject
loadObjectAsync = XPLMScenery.XPLMLoadObjectAsync
unloadObject = XPLMScenery.XPLMUnloadObject
lookupObjects = XPLMScenery.XPLMLookupObjects
ProbeY = XPLMScenery.xplm_ProbeY
ProbeHitTerrain = XPLMScenery.xplm_ProbeHitTerrain
ProbeError = XPLMScenery.xplm_ProbeError
ProbeMissed = XPLMScenery.xplm_ProbeMissed
import XPLMUtilities
speakString = XPLMUtilities.XPLMSpeakString
getVirtualKeyDescription = XPLMUtilities.XPLMGetVirtualKeyDescription
reloadScenery = XPLMUtilities.XPLMReloadScenery
getSystemPath = XPLMUtilities.XPLMGetSystemPath
getPrefsPath = XPLMUtilities.XPLMGetPrefsPath
getDirectorySeparator = XPLMUtilities.XPLMGetDirectorySeparator
extractFileAndPath = XPLMUtilities.XPLMExtractFileAndPath
getDirectoryContents = XPLMUtilities.XPLMGetDirectoryContents
getVersions = XPLMUtilities.XPLMGetVersions
getLanguage = XPLMUtilities.XPLMGetLanguage
debugString = XPLMUtilities.XPLMDebugString
setErrorCallback = XPLMUtilities.XPLMSetErrorCallback
findSymbol = XPLMUtilities.XPLMFindSymbol
loadDataFile = XPLMUtilities.XPLMLoadDataFile
saveDataFile = XPLMUtilities.XPLMSaveDataFile
findCommand = XPLMUtilities.XPLMFindCommand
commandBegin = XPLMUtilities.XPLMCommandBegin
commandEnd = XPLMUtilities.XPLMCommandEnd
commandOnce = XPLMUtilities.XPLMCommandOnce
createCommand = XPLMUtilities.XPLMCreateCommand
registerCommandHandler = XPLMUtilities.XPLMRegisterCommandHandler
unregisterCommandHandler = XPLMUtilities.XPLMUnregisterCommandHandler
Host_Unknown = XPLMUtilities.xplm_Host_Unknown
Host_XPlane = XPLMUtilities.xplm_Host_XPlane
Language_Unknown = XPLMUtilities.xplm_Language_Unknown
Language_English = XPLMUtilities.xplm_Language_English
Language_French = XPLMUtilities.xplm_Language_French
Language_German = XPLMUtilities.xplm_Language_German
Language_Italian = XPLMUtilities.xplm_Language_Italian
Language_Spanish = XPLMUtilities.xplm_Language_Spanish
Language_Korean = XPLMUtilities.xplm_Language_Korean
Language_Russian = XPLMUtilities.xplm_Language_Russian
Language_Greek = XPLMUtilities.xplm_Language_Greek
Language_Japanese = XPLMUtilities.xplm_Language_Japanese
Language_Chinese = XPLMUtilities.xplm_Language_Chinese
DataFile_Situation = XPLMUtilities.xplm_DataFile_Situation
DataFile_ReplayMovie = XPLMUtilities.xplm_DataFile_ReplayMovie
CommandBegin = XPLMUtilities.xplm_CommandBegin
CommandContinue = XPLMUtilities.xplm_CommandContinue
CommandEnd = XPLMUtilities.xplm_CommandEnd
import XPPython
pythonGetDicts = XPPython.XPPythonGetDicts
pythonGetCapsules = XPPython.XPPythonGetCapsules
import XPStandardWidgets
WidgetClass_MainWindow = XPStandardWidgets.xpWidgetClass_MainWindow
WidgetClass_SubWindow = XPStandardWidgets.xpWidgetClass_SubWindow
WidgetClass_Button = XPStandardWidgets.xpWidgetClass_Button
WidgetClass_TextField = XPStandardWidgets.xpWidgetClass_TextField
WidgetClass_ScrollBar = XPStandardWidgets.xpWidgetClass_ScrollBar
WidgetClass_Caption = XPStandardWidgets.xpWidgetClass_Caption
WidgetClass_GeneralGraphics = XPStandardWidgets.xpWidgetClass_GeneralGraphics
WidgetClass_Progress = XPStandardWidgets.xpWidgetClass_Progress
MainWindowStyle_MainWindow = XPStandardWidgets.xpMainWindowStyle_MainWindow
MainWindowStyle_Translucent = XPStandardWidgets.xpMainWindowStyle_Translucent
Property_MainWindowType = XPStandardWidgets.xpProperty_MainWindowType
Property_MainWindowHasCloseBoxes = XPStandardWidgets.xpProperty_MainWindowHasCloseBoxes
Message_CloseButtonPushed = XPStandardWidgets.xpMessage_CloseButtonPushed
SubWindowStyle_SubWindow = XPStandardWidgets.xpSubWindowStyle_SubWindow
SubWindowStyle_Screen = XPStandardWidgets.xpSubWindowStyle_Screen
SubWindowStyle_ListView = XPStandardWidgets.xpSubWindowStyle_ListView
Property_SubWindowType = XPStandardWidgets.xpProperty_SubWindowType
PushButton = XPStandardWidgets.xpPushButton
RadioButton = XPStandardWidgets.xpRadioButton
WindowCloseBox = XPStandardWidgets.xpWindowCloseBox
LittleDownArrow = XPStandardWidgets.xpLittleDownArrow
LittleUpArrow = XPStandardWidgets.xpLittleUpArrow
ButtonBehaviorPushButton = XPStandardWidgets.xpButtonBehaviorPushButton
ButtonBehaviorCheckBox = XPStandardWidgets.xpButtonBehaviorCheckBox
ButtonBehaviorRadioButton = XPStandardWidgets.xpButtonBehaviorRadioButton
Property_ButtonType = XPStandardWidgets.xpProperty_ButtonType
Property_ButtonBehavior = XPStandardWidgets.xpProperty_ButtonBehavior
Property_ButtonState = XPStandardWidgets.xpProperty_ButtonState
Msg_PushButtonPressed = XPStandardWidgets.xpMsg_PushButtonPressed
Msg_ButtonStateChanged = XPStandardWidgets.xpMsg_ButtonStateChanged
TextEntryField = XPStandardWidgets.xpTextEntryField
TextTransparent = XPStandardWidgets.xpTextTransparent
TextTranslucent = XPStandardWidgets.xpTextTranslucent
Property_EditFieldSelStart = XPStandardWidgets.xpProperty_EditFieldSelStart
Property_EditFieldSelEnd = XPStandardWidgets.xpProperty_EditFieldSelEnd
Property_EditFieldSelDragStart = XPStandardWidgets.xpProperty_EditFieldSelDragStart
Property_TextFieldType = XPStandardWidgets.xpProperty_TextFieldType
Property_PasswordMode = XPStandardWidgets.xpProperty_PasswordMode
Property_MaxCharacters = XPStandardWidgets.xpProperty_MaxCharacters
Property_ScrollPosition = XPStandardWidgets.xpProperty_ScrollPosition
Property_Font = XPStandardWidgets.xpProperty_Font
Property_ActiveEditSide = XPStandardWidgets.xpProperty_ActiveEditSide
Msg_TextFieldChanged = XPStandardWidgets.xpMsg_TextFieldChanged
ScrollBarTypeScrollBar = XPStandardWidgets.xpScrollBarTypeScrollBar
ScrollBarTypeSlider = XPStandardWidgets.xpScrollBarTypeSlider
Property_ScrollBarSliderPosition = XPStandardWidgets.xpProperty_ScrollBarSliderPosition
Property_ScrollBarMin = XPStandardWidgets.xpProperty_ScrollBarMin
Property_ScrollBarMax = XPStandardWidgets.xpProperty_ScrollBarMax
Property_ScrollBarPageAmount = XPStandardWidgets.xpProperty_ScrollBarPageAmount
Property_ScrollBarType = XPStandardWidgets.xpProperty_ScrollBarType
Property_ScrollBarSlop = XPStandardWidgets.xpProperty_ScrollBarSlop
Msg_ScrollBarSliderPositionChanged = XPStandardWidgets.xpMsg_ScrollBarSliderPositionChanged
Property_CaptionLit = XPStandardWidgets.xpProperty_CaptionLit
Ship = XPStandardWidgets.xpShip
ILSGlideScope = XPStandardWidgets.xpILSGlideScope
MarkerLeft = XPStandardWidgets.xpMarkerLeft
Airport = XPStandardWidgets.xp_Airport
NDB = XPStandardWidgets.xpNDB
VOR = XPStandardWidgets.xpVOR
RadioTower = XPStandardWidgets.xpRadioTower
AircraftCarrier = XPStandardWidgets.xpAircraftCarrier
Fire = XPStandardWidgets.xpFire
MarkerRight = XPStandardWidgets.xpMarkerRight
CustomObject = XPStandardWidgets.xpCustomObject
CoolingTower = XPStandardWidgets.xpCoolingTower
SmokeStack = XPStandardWidgets.xpSmokeStack
Building = XPStandardWidgets.xpBuilding
PowerLine = XPStandardWidgets.xpPowerLine
VORWithCompassRose = XPStandardWidgets.xpVORWithCompassRose
OilPlatform = XPStandardWidgets.xpOilPlatform
OilPlatformSmall = XPStandardWidgets.xpOilPlatformSmall
WayPoint = XPStandardWidgets.xpWayPoint
Property_GeneralGraphicsType = XPStandardWidgets.xpProperty_GeneralGraphicsType
Property_ProgressPosition = XPStandardWidgets.xpProperty_ProgressPosition
Property_ProgressMin = XPStandardWidgets.xpProperty_ProgressMin
Property_ProgressMax = XPStandardWidgets.xpProperty_ProgressMax
import XPUIGraphics
drawWindow = XPUIGraphics.XPDrawWindow
getWindowDefaultDimensions = XPUIGraphics.XPGetWindowDefaultDimensions
drawElement = XPUIGraphics.XPDrawElement
getElementDefaultDimensions = XPUIGraphics.XPGetElementDefaultDimensions
drawTrack = XPUIGraphics.XPDrawTrack
getTrackDefaultDimensions = XPUIGraphics.XPGetTrackDefaultDimensions
getTrackMetrics = XPUIGraphics.XPGetTrackMetrics
Window_Help = XPUIGraphics.xpWindow_Help
Window_MainWindow = XPUIGraphics.xpWindow_MainWindow
Window_SubWindow = XPUIGraphics.xpWindow_SubWindow
Window_Screen = XPUIGraphics.xpWindow_Screen
Window_ListView = XPUIGraphics.xpWindow_ListView
Element_TextField = XPUIGraphics.xpElement_TextField
Element_CheckBox = XPUIGraphics.xpElement_CheckBox
Element_CheckBoxLit = XPUIGraphics.xpElement_CheckBoxLit
Element_WindowCloseBox = XPUIGraphics.xpElement_WindowCloseBox
Element_WindowCloseBoxPressed = XPUIGraphics.xpElement_WindowCloseBoxPressed
Element_PushButton = XPUIGraphics.xpElement_PushButton
Element_PushButtonLit = XPUIGraphics.xpElement_PushButtonLit
Element_OilPlatform = XPUIGraphics.xpElement_OilPlatform
Element_OilPlatformSmall = XPUIGraphics.xpElement_OilPlatformSmall
Element_Ship = XPUIGraphics.xpElement_Ship
Element_ILSGlideScope = XPUIGraphics.xpElement_ILSGlideScope
Element_MarkerLeft = XPUIGraphics.xpElement_MarkerLeft
Element_Airport = XPUIGraphics.xpElement_Airport
Element_Waypoint = XPUIGraphics.xpElement_Waypoint
Element_NDB = XPUIGraphics.xpElement_NDB
Element_VOR = XPUIGraphics.xpElement_VOR
Element_RadioTower = XPUIGraphics.xpElement_RadioTower
Element_AircraftCarrier = XPUIGraphics.xpElement_AircraftCarrier
Element_Fire = XPUIGraphics.xpElement_Fire
Element_MarkerRight = XPUIGraphics.xpElement_MarkerRight
Element_CustomObject = XPUIGraphics.xpElement_CustomObject
Element_CoolingTower = XPUIGraphics.xpElement_CoolingTower
Element_SmokeStack = XPUIGraphics.xpElement_SmokeStack
Element_Building = XPUIGraphics.xpElement_Building
Element_PowerLine = XPUIGraphics.xpElement_PowerLine
Element_CopyButtons = XPUIGraphics.xpElement_CopyButtons
Element_CopyButtonsWithEditingGrid = XPUIGraphics.xpElement_CopyButtonsWithEditingGrid
Element_EditingGrid = XPUIGraphics.xpElement_EditingGrid
Element_ScrollBar = XPUIGraphics.xpElement_ScrollBar
Element_VORWithCompassRose = XPUIGraphics.xpElement_VORWithCompassRose
Element_Zoomer = XPUIGraphics.xpElement_Zoomer
Element_TextFieldMiddle = XPUIGraphics.xpElement_TextFieldMiddle
Element_LittleDownArrow = XPUIGraphics.xpElement_LittleDownArrow
Element_LittleUpArrow = XPUIGraphics.xpElement_LittleUpArrow
Element_WindowDragBar = XPUIGraphics.xpElement_WindowDragBar
Element_WindowDragBarSmooth = XPUIGraphics.xpElement_WindowDragBarSmooth
Track_ScrollBar = XPUIGraphics.xpTrack_ScrollBar
Track_Slider = XPUIGraphics.xpTrack_Slider
Track_Progress = XPUIGraphics.xpTrack_Progress
import XPWidgetDefs
Property_Refcon = XPWidgetDefs.xpProperty_Refcon
Property_Dragging = XPWidgetDefs.xpProperty_Dragging
Property_DragXOff = XPWidgetDefs.xpProperty_DragXOff
Property_DragYOff = XPWidgetDefs.xpProperty_DragYOff
Property_Hilited = XPWidgetDefs.xpProperty_Hilited
Property_Object = XPWidgetDefs.xpProperty_Object
Property_Clip = XPWidgetDefs.xpProperty_Clip
Property_Enabled = XPWidgetDefs.xpProperty_Enabled
Property_UserStart = XPWidgetDefs.xpProperty_UserStart
Mode_Direct = XPWidgetDefs.xpMode_Direct
Mode_UpChain = XPWidgetDefs.xpMode_UpChain
Mode_Recursive = XPWidgetDefs.xpMode_Recursive
Mode_DirectAllCallbacks = XPWidgetDefs.xpMode_DirectAllCallbacks
Mode_Once = XPWidgetDefs.xpMode_Once
WidgetClass_None = XPWidgetDefs.xpWidgetClass_None
Msg_None = XPWidgetDefs.xpMsg_None
Msg_Create = XPWidgetDefs.xpMsg_Create
Msg_Destroy = XPWidgetDefs.xpMsg_Destroy
Msg_Paint = XPWidgetDefs.xpMsg_Paint
Msg_Draw = XPWidgetDefs.xpMsg_Draw
Msg_KeyPress = XPWidgetDefs.xpMsg_KeyPress
Msg_KeyTakeFocus = XPWidgetDefs.xpMsg_KeyTakeFocus
Msg_KeyLoseFocus = XPWidgetDefs.xpMsg_KeyLoseFocus
Msg_MouseDown = XPWidgetDefs.xpMsg_MouseDown
Msg_MouseDrag = XPWidgetDefs.xpMsg_MouseDrag
Msg_MouseUp = XPWidgetDefs.xpMsg_MouseUp
Msg_Reshape = XPWidgetDefs.xpMsg_Reshape
Msg_ExposedChanged = XPWidgetDefs.xpMsg_ExposedChanged
Msg_AcceptChild = XPWidgetDefs.xpMsg_AcceptChild
Msg_LoseChild = XPWidgetDefs.xpMsg_LoseChild
Msg_AcceptParent = XPWidgetDefs.xpMsg_AcceptParent
Msg_Shown = XPWidgetDefs.xpMsg_Shown
Msg_Hidden = XPWidgetDefs.xpMsg_Hidden
Msg_DescriptorChanged = XPWidgetDefs.xpMsg_DescriptorChanged
Msg_PropertyChanged = XPWidgetDefs.xpMsg_PropertyChanged
Msg_MouseWheel = XPWidgetDefs.xpMsg_MouseWheel
Msg_CursorAdjust = XPWidgetDefs.xpMsg_CursorAdjust
Msg_UserStart = XPWidgetDefs.xpMsg_UserStart
import XPWidgets
createWidget = XPWidgets.XPCreateWidget
createCustomWidget = XPWidgets.XPCreateCustomWidget
destroyWidget = XPWidgets.XPDestroyWidget
sendMessageToWidget = XPWidgets.XPSendMessageToWidget
placeWidgetWithin = XPWidgets.XPPlaceWidgetWithin
countChildWidgets = XPWidgets.XPCountChildWidgets
getNthChildWidget = XPWidgets.XPGetNthChildWidget
getParentWidget = XPWidgets.XPGetParentWidget
showWidget = XPWidgets.XPShowWidget
hideWidget = XPWidgets.XPHideWidget
isWidgetVisible = XPWidgets.XPIsWidgetVisible
findRootWidget = XPWidgets.XPFindRootWidget
bringRootWidgetToFront = XPWidgets.XPBringRootWidgetToFront
isWidgetInFront = XPWidgets.XPIsWidgetInFront
getWidgetGeometry = XPWidgets.XPGetWidgetGeometry
setWidgetGeometry = XPWidgets.XPSetWidgetGeometry
getWidgetForLocation = XPWidgets.XPGetWidgetForLocation
getWidgetExposedGeometry = XPWidgets.XPGetWidgetExposedGeometry
setWidgetDescriptor = XPWidgets.XPSetWidgetDescriptor
getWidgetDescriptor = XPWidgets.XPGetWidgetDescriptor
getWidgetUnderlyingWindow = XPWidgets.XPGetWidgetUnderlyingWindow
setWidgetProperty = XPWidgets.XPSetWidgetProperty
getWidgetProperty = XPWidgets.XPGetWidgetProperty
setKeyboardFocus = XPWidgets.XPSetKeyboardFocus
loseKeyboardFocus = XPWidgets.XPLoseKeyboardFocus
getWidgetWithFocus = XPWidgets.XPGetWidgetWithFocus
addWidgetCallback = XPWidgets.XPAddWidgetCallback
getWidgetClassFunc = XPWidgets.XPGetWidgetClassFunc
import XPWidgetUtils
createWidgets = XPWidgetUtils.XPUCreateWidgets
moveWidgetBy = XPWidgetUtils.XPUMoveWidgetBy
fixedLayout = XPWidgetUtils.XPUFixedLayout
selectIfNeeded = XPWidgetUtils.XPUSelectIfNeeded
defocusKeyboard = XPWidgetUtils.XPUDefocusKeyboard
dragWidget = XPWidgetUtils.XPUDragWidget
|
# -*- coding: utf-8 -*-
from datetime import datetime
import pytest
from elasticsearch_dsl import A
from fiqs.aggregations import (
Avg,
Count,
DateHistogram,
DateRange,
Histogram,
ReverseNested,
Sum,
)
from fiqs.fields import FieldWithRanges, GroupedField
from fiqs.query import FQuery
from fiqs.testing.models import Sale, TrafficCount
from fiqs.testing.utils import get_search
from fiqs.tests.conftest import write_fquery_output, write_output
pytestmark = pytest.mark.docker
def test_count(elasticsearch_sale):
assert get_search().count() == 500
def test_total_in_traffic_and_total_out_traffic(elasticsearch_traffic):
# Total in traffic and out traffic
write_fquery_output(
FQuery(get_search()).values(
Sum(TrafficCount.incoming_traffic),
Sum(TrafficCount.outgoing_traffic),
),
'total_in_traffic_and_total_out_traffic',
)
def test_total_in_traffic_and_total_out_traffic_by_shop(elasticsearch_traffic):
# Total in traffic and out traffic by shop id
write_fquery_output(
FQuery(get_search()).values(
Sum(TrafficCount.incoming_traffic),
Sum(TrafficCount.outgoing_traffic),
).group_by(
TrafficCount.shop_id,
),
'total_in_traffic_and_total_out_traffic_by_shop',
)
def test_avg_product_price_by_product_type(elasticsearch_sale):
# Average product price by product type
write_fquery_output(
FQuery(get_search()).values(
avg_product_price=Avg(Sale.product_price),
).group_by(
Sale.product_type,
),
'avg_product_price_by_product_type',
)
def test_avg_part_price_by_part(elasticsearch_sale):
# Average part price by part
write_fquery_output(
FQuery(get_search()).values(
avg_part_price=Avg(Sale.part_price),
).group_by(
Sale.part_id,
),
'avg_part_price_by_part',
)
def test_avg_part_price_by_product(elasticsearch_sale):
# Average part price by product
write_fquery_output(
FQuery(get_search()).values(
avg_part_price=Avg(Sale.part_price),
).group_by(
Sale.product_id,
Sale.parts,
),
'avg_part_price_by_product',
)
def test_avg_part_price_by_product_by_part(elasticsearch_sale):
# Average part price by product by part
write_fquery_output(
FQuery(get_search()).values(
avg_part_price=Avg(Sale.part_price),
).group_by(
Sale.product_id,
Sale.part_id,
),
'avg_part_price_by_product_by_part',
)
def test_avg_product_price_by_shop_by_product_type(elasticsearch_sale):
# Average product price by shop by product
write_fquery_output(
FQuery(get_search()).values(
avg_product_price=Avg(Sale.product_price),
).group_by(
Sale.shop_id,
Sale.product_type,
),
'avg_product_price_by_shop_by_product_type',
)
def test_avg_part_price_by_shop_range_by_part_id(elasticsearch_sale):
# Average part price by shop range by part id
ranges = [{
'from': 1,
'to': 5,
'key': '1 - 5',
}, {
'from': 5,
'key': '5+',
}]
write_fquery_output(
FQuery(get_search()).values(
avg_part_price=Avg(Sale.part_price),
).group_by(
FieldWithRanges(Sale.shop_id, ranges=ranges),
Sale.part_id,
),
'avg_part_price_by_shop_range_by_part_id',
)
def test_avg_part_price_by_product_and_by_part(elasticsearch_sale):
# Average part price by product and by part
# This type of query is not possible with FQuery
search = get_search()
products_bucket = search.aggs.bucket(
'products', 'nested', path='products',
)
products_bucket.bucket(
'product_id', 'terms', field='products.product_id',
).bucket(
'parts', 'nested', path='products.parts',
).metric(
'avg_part_price', 'avg', field='products.parts.part_price',
)
products_bucket.bucket(
'parts', 'nested', path='products.parts',
).bucket(
'part_id', 'terms', field='products.parts.part_id',
).metric(
'avg_part_price', 'avg', field='products.parts.part_price',
)
write_output(search, 'avg_part_price_by_product_and_by_part')
def test_nb_sales_by_product_type(elasticsearch_sale):
# Nb sales by product_type
write_fquery_output(
FQuery(get_search()).values(
ReverseNested(
Sale,
Count(Sale),
),
).group_by(
Sale.product_type,
),
'nb_sales_by_product_type',
)
def test_nb_sales_by_product_type_by_part_id(elasticsearch_sale):
# Nb sales by product type by part_id
write_fquery_output(
FQuery(get_search()).values(
ReverseNested(
Sale,
Count(Sale),
),
).group_by(
Sale.product_type,
Sale.part_id,
),
'nb_sales_by_product_type_by_part_id',
)
def test_total_and_avg_sales_by_product_type(elasticsearch_sale):
# Average sale price by product type
write_fquery_output(
FQuery(get_search()).values(
ReverseNested(
Sale,
avg_sales=Avg(Sale.price),
total_sales=Sum(Sale.price),
),
).group_by(
Sale.product_type,
),
'total_and_avg_sales_by_product_type',
)
def test_avg_product_price_and_avg_sales_by_product_type(elasticsearch_sale):
# Average sale price and average product price by product type
write_fquery_output(
FQuery(get_search()).values(
ReverseNested(
Sale,
avg_sales=Avg(Sale.price),
),
avg_product_price=Avg(Sale.product_price),
).group_by(
Sale.product_type,
),
'avg_product_price_and_avg_sales_by_product_type',
)
def test_no_aggregate_no_metric(elasticsearch_sale):
# Nothing :o
write_output(get_search(), 'no_aggregate_no_metric')
def test_total_sales_by_shop(elasticsearch_sale):
# Total sales by shop
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
Sale.shop_id,
),
'total_sales_by_shop',
)
def test_total_sales_by_payment_type(elasticsearch_sale):
# Total sales by payment type
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
Sale.payment_type,
),
'total_sales_by_payment_type',
)
def test_total_sales_by_payment_type_by_shop(elasticsearch_sale):
# Total sales by shop by payment type
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
Sale.payment_type,
Sale.shop_id,
),
'total_sales_by_payment_type_by_shop',
)
def test_total_sales_by_price_histogram(elasticsearch_sale):
# Total sales by price histogram
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
Histogram(
Sale.price,
interval=100,
),
),
'total_sales_by_price_histogram',
)
@pytest.mark.parametrize('interval,pretty_period', [
('1d', 'day'),
('1w', 'week'),
('1M', 'month'),
('1y', 'year'),
])
def test_total_sales_by_period(elasticsearch_sale, interval, pretty_period):
# Total sales period by period
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
DateHistogram(
Sale.timestamp,
interval=interval,
),
),
'total_sales_{}_by_{}'.format(pretty_period, pretty_period),
)
def test_total_sales_by_day_offset(elasticsearch_sale):
# Total sales by day, with offset
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
DateHistogram(
Sale.timestamp,
interval='1d',
offset='+8h',
),
),
'total_sales_by_day_offset_8hours',
)
def test_total_sales_every_four_days(elasticsearch_sale):
# Total sales every four days
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
DateHistogram(
Sale.timestamp,
interval='4d',
),
),
'total_sales_every_four_days',
)
def test_nb_sales_by_shop(elasticsearch_sale):
# Number of sales by shop
write_fquery_output(
FQuery(get_search()).values(
Count(Sale.id),
).group_by(
Sale.shop_id,
),
'nb_sales_by_shop',
)
def test_total_sales_day_by_day_by_shop_and_by_payment(elasticsearch_sale):
# Total sales day by day, by shop and by payment type
# This type of query is not possible with FQuery
search = get_search()
agg = search.aggs.bucket(
'timestamp', 'date_histogram', field='timestamp', interval='1d',
)
agg.bucket(
'shop_id', 'terms', field='shop_id',
).metric(
'total_sales', 'sum', field='price',
)
agg.bucket(
'payment_type', 'terms', field='payment_type',
).metric(
'total_sales', 'sum', field='price',
)
write_output(search, 'total_sales_day_by_day_by_shop_and_by_payment')
def test_total_and_avg_sales_by_shop(elasticsearch_sale):
# Total sales and average sales by shop
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
avg_sales=Avg(Sale.price),
).group_by(
Sale.shop_id,
),
'total_and_avg_sales_by_shop',
)
def test_total_sales(elasticsearch_sale):
# Total sales, no aggregations
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
),
'total_sales',
)
def test_total_sales_and_avg_sales(elasticsearch_sale):
# Total sales and avg sales, no aggregations
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
avg_sales=Avg(Sale.price),
),
'total_sales_and_avg_sales',
)
def test_total_sales_by_shop_and_by_payment(elasticsearch_sale):
# Total sales by shop and by payment type
# This type of query is not possible with FQuery
search = get_search()
search.aggs.bucket(
'shop_id', 'terms', field='shop_id',
).metric(
'total_sales', 'sum', field='price',
)
search.aggs.bucket(
'payment_type', 'terms', field='payment_type',
).metric(
'total_sales', 'sum', field='price',
)
write_output(search, 'total_sales_by_shop_and_by_payment')
def test_total_sales_by_payment_type_by_shop_range(elasticsearch_sale):
# Total sales by payment by shop range
ranges = [[1, 5], [5, 11], [11, 15]]
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
Sale.payment_type,
FieldWithRanges(Sale.shop_id, ranges=ranges),
),
'total_sales_by_payment_type_by_shop_range',
)
def test_total_sales_by_shop_range_by_payment_type(elasticsearch_sale):
# Total sales by shop range by payment_type
ranges = [[1, 5], [5, 11], [11, 15]]
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
FieldWithRanges(Sale.shop_id, ranges=ranges),
Sale.payment_type,
),
'total_sales_by_shop_range_by_payment_type',
)
def test_total_sales_by_shop_range(elasticsearch_sale):
# Total sales by shop range
ranges = [{
'from': 1,
'to': 5,
'key': '1 - 5',
}, {
'from': 5,
'key': '5+',
}]
write_fquery_output(
FQuery(get_search()).values(
total_sales=Sum(Sale.price),
).group_by(
FieldWithRanges(Sale.shop_id, ranges=ranges),
),
'total_sales_by_shop_range',
)
def test_nb_sales_by_shop_limited_size(elasticsearch_sale):
# Nb sales by shop limited size
write_fquery_output(
FQuery(get_search(), default_size=2).values(
Count(Sale),
).group_by(
Sale.shop_id,
),
'nb_sales_by_shop_limited_size',
)
def test_nb_sales_by_shop_by_payment_type_limited_size(elasticsearch_sale):
# Nb sales by shop by payment type limited size
write_fquery_output(
FQuery(get_search(), default_size=2).values(
Count(Sale),
).group_by(
Sale.shop_id,
Sale.payment_type,
),
'nb_sales_by_shop_by_payment_type_limited_size',
)
def test_total_sales_by_shop_limited_size(elasticsearch_sale):
# Total sales by shop limited size
write_fquery_output(
FQuery(get_search(), default_size=2).values(
total_sales=Sum(Sale.price),
).group_by(
Sale.shop_id,
),
'total_sales_by_shop_limited_size',
)
@pytest.fixture
def date_ranges_with_keys():
return [
{
'from': datetime(2016, 1, 1),
'to': datetime(2016, 1, 15),
'key': 'first_half',
},
{
'from': datetime(2016, 1, 15),
'to': datetime(2016, 1, 31),
'key': 'second_half',
},
]
def test_nb_sales_by_date_range_with_keys(elasticsearch_sale,
date_ranges_with_keys):
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
DateRange(
Sale.timestamp,
ranges=date_ranges_with_keys,
),
),
'nb_sales_by_date_range_with_keys',
)
def test_nb_sales_by_date_range_without_keys(elasticsearch_sale):
# Nb sales by date range without keys
ranges_without_keys = [
{
'from': datetime(2016, 1, 1),
'to': datetime(2016, 1, 15),
},
{
'from': datetime(2016, 1, 15),
'to': datetime(2016, 1, 31),
},
]
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
DateRange(
Sale.timestamp,
ranges=ranges_without_keys,
),
),
'nb_sales_by_date_range_without_keys',
)
def test_nb_sales_by_date_range_by_payment_type(elasticsearch_sale,
date_ranges_with_keys):
# Nb sales by date range by payment type
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
DateRange(
Sale.timestamp,
ranges=date_ranges_with_keys,
),
Sale.payment_type,
),
'nb_sales_by_date_range_by_payment_type',
)
def test_nb_sales_by_payment_type_by_date_range(elasticsearch_sale,
date_ranges_with_keys):
# Nb sales by payment type by date range
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
Sale.payment_type,
DateRange(
Sale.timestamp,
ranges=date_ranges_with_keys,
),
),
'nb_sales_by_payment_type_by_date_range',
)
@pytest.fixture
def shops_by_group():
return {
'group_a': list(range(1, 6)),
'group_b': list(range(6, 11)),
}
def test_nb_sales_by_grouped_shop(elasticsearch_sale, shops_by_group):
# Nb sales by grouped shop id
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
GroupedField(
Sale.shop_id,
groups=shops_by_group,
),
),
'nb_sales_by_grouped_shop',
)
def test_nb_sales_by_grouped_shop_by_payment_type(elasticsearch_sale,
shops_by_group):
# Nb sales by grouped shop id by payment type
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
GroupedField(
Sale.shop_id,
groups=shops_by_group,
),
Sale.payment_type,
),
'nb_sales_by_grouped_shop_by_payment_type',
)
def test_nb_sales_by_payment_type_by_grouped_shop(elasticsearch_sale,
shops_by_group):
# Nb sales by payment type by grouped shop id
write_fquery_output(
FQuery(get_search()).values(
Count(Sale),
).group_by(
Sale.payment_type,
GroupedField(
Sale.shop_id,
groups=shops_by_group,
),
),
'nb_sales_by_payment_type_by_grouped_shop',
)
def test_avg_sales_by_grouped_shop(elasticsearch_sale, shops_by_group):
# Avg price by grouped shop id
write_fquery_output(
FQuery(get_search()).values(
avg_sales=Avg(Sale.price),
).group_by(
GroupedField(
Sale.shop_id,
groups=shops_by_group,
),
),
'avg_sales_by_grouped_shop',
)
# All these filter tests still use elasticsearch_dsl (for the time being?)
def test_avg_price_filter_shop_id_1(elasticsearch_sale):
# Avg price for shop_id 1
a = A('filter', term={'shop_id': 1})
a.bucket(
'avg_price',
'avg',
field='price',
)
search = get_search()
search.aggs.bucket(
'shop_id_1',
a,
)
write_output(search, 'avg_price_filter_shop_id_1')
def test_nb_sales_by_product_type_filter_product_type_1(elasticsearch_sale):
# Number of sales, by product type, for product_type_1
a = A('filter', term={'products.product_type': 'product_type_1'})
a.bucket(
'reverse_nested_root',
'reverse_nested',
)
search = get_search()
search.aggs.bucket(
'products',
'nested',
path='products',
).bucket(
'product_type_1',
a,
)
write_output(search, 'nb_sales_by_product_type_filter_product_type_1')
def test_nb_sales_by_product_type_by_part_id_filter_product_type_1(
elasticsearch_sale):
# Number of sales, by product type, by part id, for product_type_1
a = A('filter', term={'products.product_type': 'product_type_1'})
a.bucket(
'parts',
'nested',
path='products.parts',
).bucket(
'part_id',
'terms',
field='products.parts.part_id',
).metric(
'reverse_nested_root',
'reverse_nested',
)
search = get_search()
search.aggs.bucket(
'products',
'nested',
path='products',
).bucket(
'product_type_1',
a,
)
write_output(
search, 'nb_sales_by_product_type_by_part_id_filter_product_type_1')
|
# -*- coding: utf-8 -*-
from mylib.web import BaseHandler, route
@route('/')
class IndexHdl(BaseHandler):
def get(self):
self.render('index.html',{"hint_info":self.hint_info}) |
from django.conf.urls import patterns, url
from core import views
urlpatterns = patterns('',
url(r'^$', views.ArticleListView.as_view(), name='article-list'),
url(r'^posts/(?P<slug>[-_\w]+)/$', views.ArticleDetailView.as_view(), name='article-detail'),
url(r'^categories/(?P<slug>[-_\w]+)/$', views.CategoryDetailView.as_view(), name='category-detail'),
)
|
# coding=utf-8
# Fix error "ImportError: cannot import name 'cached_property' from 'werkzeug'"
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask import Flask
from app.backend.database import db
from app.backend.database.models.oauth import security, user_datastore
from app.config import config_dict
def create_app(config_name=None):
if not config_name:
config_name = 'default'
app = Flask(__name__, static_folder='../../frontend/static', template_folder='../../frontend/templates')
app.config.from_object(config_dict[config_name])
config_dict[config_name].init_app(app)
## Start the modules on app context
db.init_app(app)
security.init_app(app, user_datastore)
## Register the BLueprints
# API RESTful
from app.backend.api import api_blueprint
app.register_blueprint(api_blueprint)
# OAuth and Login
from backend.web.oauth import oauth_blueprint
app.register_blueprint(oauth_blueprint, url_prefix='/login')
# Frontend
from app.frontend.views import main_blueprint
from app.frontend.views import pet_blueprint
from app.frontend.views import event_blueprint
app.register_blueprint(main_blueprint)
app.register_blueprint(pet_blueprint)
app.register_blueprint(event_blueprint)
return app
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 18 09:01:37 2019
@author: diogo
"""
import os
#to use the new IEX cloud change this value to 'iexcloud-v1' (is also the default)
os.environ['IEX_API_VERSION']='iexcloud-sandbox'
os.environ['IEX_TOKEN']='Tsk_df0426c99cc64421ac28ac2745944a03'
from datetime import datetime
import iexfinance.stocks as Stock
from iexfinance.stocks import get_historical_data
import decimal
def CalcReturnOnEquity(data):
ROE= float(data[0]['netIncome'])/float(data[0]['shareholderEquity'])
return round(ROE*100,2)
def CalcReturnOnTotalAssets(data):
EBIT = float(data[0]['grossProfit'])-float(data[0]['operatingExpense'])
ROTA = EBIT / float(data[0]['totalAssets'])
return round(ROTA*100,2)
#this is for long term ability to pay debts
def CalcDebtToEquityRatio(data):
output = float(data[0]['totalLiabilities'])/float(data[0]['shareholderEquity'])
return round(output*100,2)
def CalcCurrentRatio(data):
output= float(data[0]['currentDebt'])/float(data[0]['currentAssets'])
return round(output*100,2)
stock = Stock.Stock("MCD")
d=stock.get_financials()
a=CalcReturnOnEquity(d)
b= CalcReturnOnTotalAssets(d)
c=CalcDebtToEquityRatio(d)
aa= CalcCurrentRatio(d)
|
import os
# os.environ.setdefault("SECRET_KEY", "'$(2@5_8ngk3y1k+y8)f8xkq$ahwu4+(l-86=optcp13%rs(r8x'")
os.environ.setdefault("SECRET_KEY", "'$(2@5_8ngk3y1k+y8)f8xkq$ahwu4+(l-86=optcp13%rs(r8x'")
os.environ.setdefault("EMAIL_ADDRESS", "commonholdproject@gmail.com")
os.environ.setdefault("EMAIL_PASSWORD", "Commonhold")
|
import cherrypy
import tornado
import tornado.web
import tornado.wsgi
# http://localhost:8080/examples/wsgi/tornado/?q=/hello
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
app = tornado.wsgi.WSGIAdapter(tornado.web.Application([
(r"/hello", MainHandler),
]))
cherrypy.response.____ideapy_scope____['____ideapy____'].run_wsgi_app(app)
|
import numpy as np
import xgboost as xgb
from pomegranate import BayesianNetwork
import pandas as pd
from matplotlib import colors as mcolors
import seaborn as sns
colors = dict(mcolors.BASE_COLORS, **mcolors.CSS4_COLORS) # '#539caf' is a good color !
name = 'train_FD001.txt'
df = pd.read_csv(name, sep=' ', header=None)
df.drop(columns=[26, 27], inplace=True)
var_list = df.var()
index_remove = []
for i in range(len(var_list)):
if var_list[i] < 0.001:
index_remove.append(i)
df.drop(columns=index_remove, inplace=True)
num_samples = len(df)
unit_list = df[0].tolist()
index_unit_change = []
for i in range(num_samples-1):
if unit_list[i] != unit_list[i+1]:
index_unit_change.append(i-9) # window length is 10
index_unit_change.append(num_samples-10)
Label = np.linspace(0, 0, num_samples)
for item in index_unit_change:
print(item)
for j in range(10):
Label[item+j] = 2
Label[item-j-1] = 1
df['Label'] = Label
df.drop(columns=[0, 1], inplace=True)
attribute_used = list(df)
print(attribute_used)
attribute_used.remove('Label')
print(attribute_used)
sp = sns.pairplot(df, hue='Label', size=3, palette=['#539caf' , colors["darkmagenta"], colors["crimson"]], vars=attribute_used)
sp.savefig(name.split('.')[0] + '_Compare.png')
|
import xml.etree.ElementTree as et
def choropleth_svg(scores):
tree = et.parse('data/us_counties.svg')
root = tree.getroot()
root.set('type', 'image/svg+xml')
# Map colors
colors = ["#F1EEF6", "#D4B9DA", "#C994C7", "#DF65B0", "#DD1C77", "#980043"]
path_style = 'font-size:12px;fill-rule:nonzero;stroke:#FFFFFF;stroke-opacity:1;'
path_style += 'stroke-width:0.1;stroke-miterlimit:4;stroke-dasharray:none;'
path_style += 'stroke-linecap:butt;marker-start:none;stroke-linejoin:bevel;fill:'
for p in root.findall('{http://www.w3.org/2000/svg}path'):
if p.attrib['id'] not in ['State_Lines', 'separator']:
# Add a tooltip to label the state
title = et.Element('{http://www.w3.org/2000/svg}title')
title.text = p.attrib['{http://www.inkscape.org/namespaces/inkscape}label']
try:
fips = (int(p.attrib['id'][:2]), int(p.attrib['id'][2:]))
ind = min(int(scores[fips]*5), 5)
p.set('style', path_style + colors[ind])
title.text += '\nScore: %0.2f' % scores[fips]
except:
continue
p.append(title)
#tree.write('static/test_counties.svg')
#return
return et.tostring(root) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.