text stringlengths 38 1.54M |
|---|
# Generated by Django 3.1.2 on 2021-07-12 17:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loot', '0014_auto_20201118_1745'),
]
operations = [
migrations.AlterField(
model_name='item',
name='category',
field=models.CharField(choices=[('CS', 'Caster'), ('HL', 'Healer'), ('PH', 'Physical'), ('TN', 'Tank')], max_length=2),
),
migrations.AlterField(
model_name='player',
name='player_class',
field=models.CharField(choices=[('DR', 'Druid'), ('HN', 'Hunter'), ('MG', 'Mage'), ('PL', 'Paladin'), ('PR', 'Priest'), ('RG', 'Rogue'), ('SH', 'Shaman'), ('WL', 'Warlock'), ('WR', 'Warrior')], default='WR', max_length=2),
),
migrations.AlterField(
model_name='player',
name='rank',
field=models.IntegerField(choices=[(0, 'Inactive'), (10, 'Pug'), (20, 'Trial'), (30, 'Member'), (40, 'Veteran'), (47, 'Raider'), (50, 'Core Raider'), (60, 'Class Lead'), (70, 'Officer'), (80, 'GM')], default=20),
),
]
|
import csv
import numpy as np
from nltk import word_tokenize, pos_tag, ne_chunk
def readData(url, max_length):
'''
Read Train Data Set
'''
with open(url) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
array_data = []
array_labels = []
for row in reader:
data = row['HEADLINE']
label = row['TAG']
vector_sentence = word_tokenize(data)
if(len(vector_sentence)>max_length):
vector_sentence = vector_sentence[:max_length]
while(len(vector_sentence) < max_length):
vector_sentence.append("-")
array_data.append(vector_sentence)
if label=='SAFE':
array_labels.append(1)
elif label=='UNSAFE':
array_labels.append(0)
array_data = np.array(array_data)
array_labels = np.array(array_labels)
return (array_data, array_labels)
def readDataTest(url, max_length):
'''
Read Test Data Set
'''
with open(url) as tsvfile:
reader = csv.DictReader(tsvfile, dialect='excel-tab')
array_data = []
array_labels = []
array_id = []
for row in reader:
id_ = row['ID']
data = row['HEADLINE']
vector_sentence = word_tokenize(data)
if(len(vector_sentence)>max_length):
vector_sentence = vector_sentence[:max_length]
while(len(vector_sentence) < max_length):
vector_sentence.append("-")
array_data.append(vector_sentence)
array_id.append(id_)
array_data = np.array(array_data)
array_id = np.array(array_id)
return (array_data, array_id)
|
import os
import base64
import requests
# disable ssl warnings
import urllib3
urllib3.disable_warnings()
# API configuration and parameters ...
pc_address = '10.38.15.9'
username = 'admin'
password = os.environ.get('PASSWORD', 'nx2Tech911!') # change the password to a suitable value
authorization = base64.b64encode(f'{username}:{password}'.encode()).decode()
url = f'https://{pc_address}:9440/api/nutanix/v3'
kwargs = {
'verify': False,
'headers': {'Authorization': f'Basic {authorization}'}
}
# ==========================================================================================
# List the available clusters (PEs) connected to this Prism Central
# ==========================================================================================
payload = {'kind': 'cluster'}
resp = requests.post(f'{url}/clusters/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Clusters\n==========================')
for cluster in resp.json()['entities']:
# Note: PC itself is a cluster, but it cannot be used as a resource to provision VMs,
# check for 'AOS' type cluster to run VMs
if 'AOS' in cluster['status']['resources']['config'].get('service_list', []):
print(f'({count}) Name: {cluster["status"]["name"]},\t UUID: {cluster["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
# ==========================================================================================
# List the available Networks
# ==========================================================================================
payload = {'kind': 'subnet', 'length': 999}
resp = requests.post(f'{url}/subnets/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Subnets\n==========================')
for subnet in resp.json()['entities']:
print(f'({count}) Name: {subnet["status"]["name"]},\t UUID: {subnet["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
# ==========================================================================================
# List the available Images
# ==========================================================================================
payload = {'kind': 'image', 'length': 999}
resp = requests.post(f'{url}/images/list', json=payload, **kwargs)
if resp.status_code == 200:
count = 1
print('\n==========================\nAvailable Images\n==========================')
for image in resp.json()['entities']:
print(f'({count}) Name: {image["spec"]["name"]},\t UUID: {image["metadata"]["uuid"]}')
count += 1
else:
print(f'ERROR - API call failed, status code: {resp.status_code}, message: {resp.content}')
|
import sys
from PyQt5.QtWidgets import QMainWindow,QApplication,QHBoxLayout,QVBoxLayout,QLabel,QWidget
from PyQt5.QtWidgets import QPushButton,QMessageBox,QLineEdit,QFileDialog,QRadioButton
from PyQt5.QtGui import QIcon,QPixmap,QPalette,QBrush,QFont
from graduation_project.predict import pre_photo
from graduation_project.recognition import rec_photo
import os
#Start interface
class FirstPage(QMainWindow):
def __init__(self,width = 1000,height = 600):
super().__init__()
self.setFixedSize(width, height)
self.initUI()
self.button()
self.startButton.clicked.connect(self.message)
#initialize window background image and icon
def initUI(self):
#setting Window icon
self.setWindowIcon(QIcon('icon.png'))
self.setWindowTitle('Start Interface')
#setting background image
palettel = QPalette()
pix = QPixmap('sky.jpg')
pix = pix.scaled(self.width(),self.height())
palettel.setBrush(QPalette.Background,QBrush(pix))
self.setPalette(palettel)
#start up button
def button(self):
widget = QWidget()
self.setCentralWidget(widget)
self.startButton = QPushButton('START',self)
font = QFont('宋体',35)
font.setItalic(True)
self.startButton.setFont(font)
self.startButton.setFixedSize(150,50)
self.buttonLayout(self.startButton,widget)
#Ok button position
def buttonLayout(self,button,widget):
hbox = QVBoxLayout()
hbox.addWidget(button)
vbox = QHBoxLayout()
vbox.addLayout(hbox)
widget.setLayout(vbox)
windowList = []
def on_pushButton_clicked_1(self):
the_window = SecondPage_1()
self.windowList.append(the_window)
self.close()
the_window.show()
windowList = []
def on_pushButton_clicked_2(self):
the_window = SecondPage_2()
self.windowList.append(the_window)
self.close()
the_window.show()
#ask if there is a model file
def message(self):
button = QMessageBox.question(self,'choose model',
"If there is a model file",
QMessageBox.Yes | QMessageBox.No,QMessageBox.No)
if button ==QMessageBox.Yes:
self.on_pushButton_clicked_1()
else:
self.on_pushButton_clicked_2()
#Image Prediction
class SecondPage_1(QMainWindow):
def __init__(self,width = 1000, height = 600):
super().__init__()
self.setFixedSize(width,height)
self.initUI()
#图片的位置
self.number = 0
# self.text_button_layout_1()
self.width_height_layout_1()
self.Path = []
#SecondPage start
def initUI(self):
# setting WindowIcon
self.setWindowIcon(QIcon('icon.png'))
self.setWindowTitle('Image Prediction')
# setting the background image
palette = QPalette()
pix = QPixmap('sky.jpg')
pix = pix.scaled(self.width(),self.height())
palette.setBrush(QPalette.Background,QBrush(pix))
self.setPalette(palette)
def back_button(self):
self.backButton = QPushButton("Previous",self)
font = QFont('宋体',25)
font.setItalic(True)
font.setBold(True)
self.backButton.setFont(font)
self.backButton.setFixedSize(150,50)
self.backButton.clicked.connect(self.close_message)
return self.backButton
windowList = []
def closeWin_1(self):
the_window = FirstPage()
self.windowList.append(the_window)
self.close()
the_window.show()
self.windowList = []
def closeWin_2(self):
the_window =SecondPage_2()
self.windowList.append(the_window)
self.close()
the_window.show()
def close_message(self):
close_button = QMessageBox.question(self,'return','Are you sure to return to the homepage?',
QMessageBox.Yes | QMessageBox.No,QMessageBox.Yes)
if close_button == QMessageBox.Yes:
self.closeWin_1()
else:
self.closeWin_2()
def buttonDialog(self,type = 'file'):
self.choose_button = QPushButton('choose folder',self)
font = QFont('Microsoft YaHei UI',10)
font.setItalic(True)
font.setBold(True)
self.choose_button.setFont(font)
self.choose_button.setFixedSize(110,30)
if type == 'folder':
self.choose_button.clicked.connect(self.showFolder)
elif type == 'file':
self.choose_button.clicked.connect(self.showFile)
return self.choose_button
def go_on_button(self,num = None):
self.go = QPushButton('go>>',self)
font = QFont('宋体',25)
font.setItalic(True)
font.setBold(True)
self.go.setFont(font)
self.go.setFixedSize(150,50)
if num == -1:
self.go.clicked.connect(self.width_height_layout_2)
elif num == 0:
self.go.clicked.connect(self.text_button_layout_1)
elif num == 1:
self.go.clicked.connect(self.text_button_layout_2)
else:
self.go.clicked.connect(self.text_button_layout_3)
return self.go
def set_font(self,object):
font = QFont('宋体',10)
font.setItalic(True)
object.setFont(font)
Path = []
def showFolder(self):
fname = QFileDialog.getExistingDirectory(None,'choose folder',r'C:\Users\DELL\.keras\models')
self.text_edit.setText(fname)
if self.text_edit.text() == '':
self.Path.append('')
else:
self.Path.append(fname)
print(self.Path)
def showFile(self):
fname = QFileDialog.getOpenFileName(None,'choose file',r'C:\Users\DELL\.keras\models','All files(*.h5)')
self.text_edit.setText(fname[0])
if self.text_edit.text() == '':
self.Path.append('')
else:
self.Path.append(fname[0])
print(self.Path)
def text(self,title = None):
self.text_edit = QLineEdit(self)
self.text_edit.setFixedSize(600,30)
self.text_edit.setPlaceholderText(title)
self.set_font(self.text_edit)
return self.text_edit
def width_height(self,content):
self.line_text = QLineEdit()
self.line_text.setPlaceholderText(content)
self.line_text.setFixedSize(150, 30)
self.line_text.editingFinished.connect(self.get_text)
return self.line_text
def get_text(self):
if self.line_text.text() != '':
self.Path.append(self.line_text.text())
else:
self.Path.append('')
print(self.Path)
def label(self, content):
label = QLabel(content)
font = QFont('Microsoft YaHei UI', 20)
font.setBold(True)
font.setItalic(True)
label.setFont(font)
return label
def width_height_layout(self, label=None, width_height_text=None, back_button=None, go_on_button=None):
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(label)
hbox1.addWidget(width_height_text)
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(back_button)
hbox2.addWidget(go_on_button)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def width_height_layout_1(self):
self.width_height_layout(self.label('width:'), self.width_height('input the image width'),
self.back_button(), self.go_on_button(-1))
def width_height_layout_2(self):
QApplication.processEvents()
self.width_height_layout(self.label('height:'), self.width_height('input the image height'),
self.back_button(), self.go_on_button(0))
def text_button_layout(self,text_box,buttonDialog,back_button,go_on_button,):
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(text_box)
hbox1.addWidget(buttonDialog)
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(back_button)
hbox2.addWidget(go_on_button)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def text_button_layout_1(self):
QApplication.processEvents()
self.text_button_layout(self.text('choose validation image path'),self.buttonDialog('folder'),
self.back_button(),self.go_on_button(1))
def text_button_layout_2(self):
QApplication.processEvents()
self.text_button_layout(self.text('choose text iamge path'),self.buttonDialog('folder'),
self.back_button(),self.go_on_button())
def text_button_layout_3(self):
QApplication.processEvents()
self.text_button_layout(self.text('select model file(.h5)'),self.buttonDialog('file'),
self.back_button(),self.Third_button_predict())
def Third_button_predict(self):
predict = QPushButton('Predict',self)
predict.setFixedSize(150,50)
font = QFont('宋体',25)
font.setItalic(True)
font.setBold(True)
predict.setFont(font)
predict.clicked.connect(self.predict_result)
#在此处关联预测函数
return predict
def predict_result(self):
self.result = pre_photo(self.Path[0],self.Path[1],self.Path[2],self.Path[3],self.Path[4])
QApplication.processEvents()
self.Firth_layout(self.FirthPage_button()[0],self.FirthPage_button()[1],self.name_kind_probability()[0],
self.name_kind_probability()[1],self.name_kind_probability()[2],self.Firth_show_imagewindow())
#FirthPage Start
def FirthPage_button(self):
self.button_left = QPushButton('<==',self)
self.button_right = QPushButton('==>',self)
self.button_left.setFixedSize(60,50)
self.button_right.setFixedSize(60,50)
#seting image <==,==>
self.button_left.clicked.connect(self.left_button)
self.button_right.clicked.connect(self.right_button)
return self.button_left,self.button_right
def left_button(self):
if self.button_left.text() == '<==' and self.number > 0:
self.number -= 1
pixmap = self.Firth_show_imagewindow(os.path.join(self.Path[3],(self.result[0])[self.number]))
name,kind,probability = self.name_kind_probability((self.result[0])[self.number],
(self.result[1])[self.number],(self.result[2])[self.number])
self.Firth_layout(self.button_left,self.button_right,name,kind,probability,pixmap)
def right_button(self):
if self.button_right.text() == '==>' and self.number < len(self.result[0]):
self.number += 1
pixmap = self.Firth_show_imagewindow(os.path.join(self.Path[3],(self.result[0])[self.number]))
name,kind,probability= self.name_kind_probability((self.result[0])[self.number],
(self.result[1])[self.number],(self.result[2])[self.number])
self.Firth_layout(self.button_left,self.button_right,name,kind,probability,pixmap)
#show prediction image
def Firth_show_imagewindow(self,path = None):
label = QLabel()
label.setMaximumSize(380,380)
if path == None:
pix = QPixmap(os.path.join(self.Path[3],(self.result[0])[0]))
else:
pix = QPixmap(path)
label.setPixmap(pix)
label.setVisible(True)
label.setStyleSheet('border:1px solid black')
return label
#show prediction image name or probability
def Firth_show_image_text(self,context = None):
self.text = QLineEdit(self)
self.text.setFixedSize(140,50)
self.text.setPlaceholderText(context)
self.set_font(self.text)
self.text.setReadOnly(True)
return self.text
def name_kind_probability(self,name_content = None,kind_content = None,probability_content =None):
name = self.Firth_show_image_text('image name')
if name_content == None:
name.setText((self.result[0])[0])
else:
name.setText(name_content)
kind = self.Firth_show_image_text('kind')
if kind_content == None:
kind.setText((self.result[1])[0])
else:
kind.setText(kind_content)
probability = self.Firth_show_image_text('prediction')
if probability_content == None:
probability.setText(str((self.result[2])[0]))
else:
probability.setText(str(probability_content))
return name,kind,probability
def Firth_layout(self,left_button=None,right_button=None,name = None,kind =None,probability = None,pixmap = None):
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addWidget(left_button)
hbox1.addWidget(right_button)
vbox = QVBoxLayout()
vbox.addStretch(2)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addWidget(name)
vbox.addStretch(1)
vbox.addWidget(kind)
vbox.addStretch(1)
vbox.addWidget(probability)
vbox.addStretch(2)
hbox = QHBoxLayout()
hbox.addWidget(pixmap)
hbox.addLayout(vbox)
widget.setLayout(hbox)
#close event
def closeEvent(self,event):
reply = QMessageBox.question(self,'Message','Are you sure to quit?',
QMessageBox.Yes | QMessageBox.No,QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
#Training Nerual Network
class SecondPage_2(QMainWindow):
def __init__(self,width = 1000,height = 600):
super().__init__()
self.setFixedSize(width, height)
self.initUI()
# self.first_layout()
self.width_height_layout_1()
self.Path_parameter = []
def initUI(self):
# setting WindowIcon
self.setWindowIcon(QIcon('icon.png'))
self.setWindowTitle('Training Nerual Network')
# setting the background image
palette = QPalette()
pix = QPixmap('sky.jpg')
pix = pix.scaled(self.width(), self.height())
palette.setBrush(QPalette.Background, QBrush(pix))
self.setPalette(palette)
def back_button(self):
backButton = QPushButton("Previous", self)
font = QFont('宋体', 25)
font.setItalic(True)
font.setBold(True)
backButton.setFont(font)
backButton.setFixedSize(150, 50)
backButton.clicked.connect(self.closeWin)
return backButton
def go_on_button(self,num = None):
go = QPushButton('go>>')
font = QFont('宋体',25)
font.setItalic(True)
font.setBold(True)
go.setFont(font)
go.setFixedSize(150,50)
if num == -1:
go.clicked.connect(self.width_height_layout_2)
elif num == 0:
go.clicked.connect(self.first_layout)
elif num == 1:
go.clicked.connect(self.second_layout)
elif num == 2:
go.clicked.connect(self.third_layout)
elif num == 3:
go.clicked.connect(self.firth_layout)
elif num ==4:
go.clicked.connect(self.fifth_layout)
elif num == 5:
go.clicked.connect(self.fifth_layout_1)
elif num == 6:
go.clicked.connect(self.fifth_layout_2)
elif num ==7:
go.clicked.connect(self.sixth_layout)
elif num == 8:
go.clicked.connect(self.seventh_layout)
elif num == 9:
go.clicked.connect(self.eighth_layout)
else:
go.clicked.connect(self.ninth_layout)
return go
windowList = []
def closeWin(self):
the_window = FirstPage()
self.windowList.append(the_window)
self.close()
the_window.show()
self.windowList = []
def set_font(self,object):
font = QFont('Microsoft YaHei UI',10)
font.setItalic(True)
object.setFont(font)
#text box
def text_lineEdit(self,content = None):
self.text = QLineEdit()
self.text.setFixedSize(600,30)
self.text.setPlaceholderText(content)
self.set_font(self.text)
return self.text
Path_parameter = []
def show_folder(self):
fname = QFileDialog.getExistingDirectory(None,'choose folder',r'C:\Users\DELL\.keras\models')
self.text.setText(fname)
if self.text.text() == '':
self.Path_parameter.append('')
else:
self.Path_parameter.append(fname)
print(self.Path_parameter)
def buttonDialog(self,content):
self.button_dialog = QPushButton(content,self)
self.button_dialog.setFixedSize(110,30)
self.button_dialog.clicked.connect(self.show_folder)
font = QFont('Microsoft YaHei UI',10)
font.setBold(True)
font.setItalic(True)
self.button_dialog.setFont(font)
return self.button_dialog
def width_height(self,content):
self.line_text = QLineEdit()
self.line_text.setPlaceholderText(content)
self.line_text.setFixedSize(150,30)
self.line_text.editingFinished.connect(self.get_text)
return self.line_text
def get_text(self):
if self.line_text.text() != '':
self.Path_parameter.append(self.line_text.text())
else:
self.Path_parameter.append('')
print(self.Path_parameter)
def label(self,content):
label = QLabel(content)
font = QFont('Microsoft YaHei UI', 20)
font.setBold(True)
font.setItalic(True)
label.setFont(font)
return label
def width_height_layout(self,label =None,width_height_text = None,back_button = None,go_on_button =None):
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(label)
hbox1.addWidget(width_height_text)
hbox1.addStretch(1)
hbox2 =QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(back_button)
hbox2.addWidget(go_on_button)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def width_height_layout_1(self):
self.width_height_layout(self.label('width:'),self.width_height('recommend width:150~224'),
self.back_button(),self.go_on_button(-1))
def width_height_layout_2(self):
QApplication.processEvents()
self.width_height_layout(self.label('height:'),self.width_height('recommend height:150~224'),
self.back_button(),self.go_on_button(0))
def text_button_Layout(self,text_lineEdit = None,buttonDialog = None,back_button = None,go_on_button = None):
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(text_lineEdit)
hbox1.addWidget(buttonDialog)
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(back_button)
hbox2.addWidget(go_on_button)
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def first_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('select unprocessed training images folder'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button(1))
def second_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('save processed training images'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button(2))
def third_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('select unprocessed validation images folder'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button(3))
def firth_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('save processed validation images'),
self.buttonDialog('choose floder'),self.back_button(),self.go_on_button(4))
def radio_Button(self,content = None):
r_button = QRadioButton(content,self)
font = QFont('Microsoft YaHei UI',20)
font.setBold(True)
font.setItalic(True)
r_button.setFont(font)
r_button.clicked.connect(lambda : self.btnstate(r_button))
return r_button
def btnstate(self,button):
if button.text() == 'sgd':
if button.isChecked() == True:
self.Path_parameter.append('sgd')
print(self.Path_parameter)
elif button.text() == 'rsmprop':
if button.isChecked() == True:
self.Path_parameter.append('rsmprop')
print(self.Path_parameter)
elif button.text() == 'adam':
if button.isChecked() == True:
self.Path_parameter.append('adam')
print(self.Path_parameter)
elif button.text() == '8':
if button.isChecked() == True:
self.Path_parameter.append('8')
print(self.Path_parameter)
elif button.text() == '16':
if button.isChecked() == True:
self.Path_parameter.append('16')
print(self.Path_parameter)
elif button.text() == '32':
if button.isChecked() == True:
self.Path_parameter.append('32')
print(self.Path_parameter)
def label_rbutton(self,content):
label = QLabel(content)
font = QFont('Microsoft YaHei UI',20)
font.setBold(True)
font.setItalic(True)
label.setFont(font)
return label
def epoches(self):
self.text = QLineEdit()
self.text.setPlaceholderText('Recommended 100~200')
self.text.setFixedSize(150,30)
self.text.editingFinished.connect(self.get_epoches)
return self.text
def get_epoches(self):
if self.text.text() != '':
self.Path_parameter.append(self.text.text())
else:
self.Path_parameter.append('')
print(self.Path_parameter)
def fifth_layout(self):
QApplication.processEvents()
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(self.label_rbutton('optimizer:'))
hbox1.addWidget(self.radio_Button('sgd'))
hbox1.addStretch(1)
hbox1.addWidget(self.radio_Button('rsmprop'))
hbox1.addStretch(1)
hbox1.addWidget(self.radio_Button('adam'))
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(self.back_button())
hbox2.addWidget(self.go_on_button(5))
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def fifth_layout_1(self):
QApplication.processEvents()
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(self.label_rbutton('batch-size:'))
hbox1.addWidget(self.radio_Button('8'))
hbox1.addStretch(1)
hbox1.addWidget(self.radio_Button('16'))
hbox1.addStretch(1)
hbox1.addWidget(self.radio_Button('32'))
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(self.back_button())
hbox2.addWidget(self.go_on_button(6))
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def fifth_layout_2(self):
QApplication.processEvents()
widget = QWidget()
self.setCentralWidget(widget)
hbox1 = QHBoxLayout()
hbox1.addStretch(1)
hbox1.addWidget(self.label_rbutton('epoches:'))
hbox1.addWidget(self.epoches())
hbox1.addStretch(1)
hbox2 = QHBoxLayout()
hbox2.addStretch(1)
hbox2.addWidget(self.back_button())
hbox2.addWidget(self.go_on_button(7))
vbox = QVBoxLayout()
vbox.addStretch(1)
vbox.addLayout(hbox1)
vbox.addStretch(1)
vbox.addLayout(hbox2)
widget.setLayout(vbox)
def sixth_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('select the folder to save tensorboard file'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button(8))
def seventh_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('select the folder to save model file'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button(9))
def eighth_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('selct the folder to save model-weight file'),
self.buttonDialog('choose folder'),self.back_button(),self.go_on_button())
def train_button(self):
train_button = QPushButton('Train',self)
font = QFont('宋体',25)
font.setItalic(True)
font.setBold(True)
train_button.setFont(font)
train_button.setFixedSize(150,50)
train_button.clicked.connect(self.train)
return train_button
def go_window(self):
the_window = SecondPage_1()
self.windowList.append(the_window)
self.close()
the_window.show()
def train(self):
parameters = []
for item in self.Path_parameter:
parameters.append(item.replace('/','\\'))
rec_photo(parameters[0],parameters[1],parameters[2],parameters[3],parameters[4],parameters[5],parameters[6],
parameters[7],parameters[8],parameters[9],parameters[10],parameters[11],parameters[12])
self.go_window()
def ninth_layout(self):
QApplication.processEvents()
self.text_button_Layout(self.text_lineEdit('select the folder to save model-structure file'),
self.buttonDialog('choose folder'),self.back_button(),self.train_button())
if __name__ =="__main__":
app = QApplication(sys.argv)
first = FirstPage()
first.show()
sys.exit(app.exec_())
|
N = int(input())
A = list(map(int, input().split()))
sums =sum(A)
l = 0
mlen = sums
for i in A:
l += i
r = sums - l
#print("l="+str(l) + "r="+str(r))
lr =l-r
if lr <0:lr*=-1
#print("mlen="+str(mlen)+"rl=" + str(lr))
if mlen < lr:
break
else:
mlen = r-l
if mlen <0:mlen*=-1
print(mlen) |
from django.shortcuts import render
from .models import Fooddishes
from .models import Fooddishes1, Toprecipe
# Create your views here.
def home(request):
dishes = Fooddishes.objects.all()
dishes1 = Fooddishes1.objects.all()
recipes = Toprecipe.objects.all()
return render(request, "base.html", {'dishes' : dishes, 'dishes1' : dishes1 , 'recipes': recipes })
|
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.metrics import AUC, BinaryAccuracy, FalseNegatives, FalsePositives, Precision, Recall, \
TrueNegatives, TruePositives
from tensorflow.python.keras.callbacks import LambdaCallback
from tensorflow.python.keras.layers import Dropout
from tensorflow.python.keras.regularizers import l2
from classifier.Data.TrainValidationDataset import TrainValidationDataset
from classifier.models.IClassifierModel import IClassifierModel
from classifier.models.utility.ManualInterrupter import ManualInterrupter
from classifier.prediction.losses.weighted_binary_cross_entropy import WeightedBinaryCrossEntropy
from data_models.weights.theme_weights import ThemeWeights
# 16/32 =>
#valuation of predictions for theme "ipad"
# ------
# TP = 162
# TN = 745
# FP = 78
# FN = 35
# Recall = 0.8223350253807107
# Precision = 0.675
# F-scrore = 0.7414187643020596
# AUC = 0.9346701124399406
# 32/64 =>
# TP = 173
# TN = 751
# FP = 50
# FN = 46
# Recall = 0.7899543378995434
# Precision = 0.7757847533632287
# F-scrore = 0.7828054298642534
# AUC = 0.956236211584834
class IpadClassifierModel(IClassifierModel):
embedding_output_dim = 72
epochs = 200
# Model properties
__model_name__ = "Model-5"
run_eagerly: bool = False
# Other properties
__plot_directory: str
def __init__(self, plot_directory: str = None):
self.__plot_directory = plot_directory
def train_model(self, themes_weight: ThemeWeights,
dataset: TrainValidationDataset,
voc_size: int,
keras_callback: LambdaCallback):
conv_reg = 0.02#0.015
dense_reg = 0.02#0.015
dropout_conv = 0.20#0.2
dropout_dense = 0.20#0.2
conv_filters = 176
dense_after_conv: int = 128
article_length = dataset.article_length
theme_count = dataset.theme_count
input = keras.layers.Input(shape=(dataset.article_length,))
layer = keras.layers.Embedding(input_dim=voc_size, input_length=article_length, output_dim=self.embedding_output_dim,
mask_zero=True)(input)
layer = Dropout(dropout_conv)(layer)
# Each convolution will have filter looking for some specific word combinations. For example a filter might
# return a small value except for "apple iphone".
conv1 = keras.layers.Conv1D(filters=conv_filters, kernel_size=2, input_shape=(voc_size, self.embedding_output_dim),
activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
conv1 = keras.layers.GlobalMaxPooling1D()(conv1)
conv1 = keras.layers.Dense(dense_after_conv, activation=tf.nn.relu)(conv1)
conv1 = Dropout(dropout_conv)(conv1)
conv2 = keras.layers.Conv1D(filters=conv_filters, kernel_size=3, input_shape=(voc_size, self.embedding_output_dim),
activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
conv2 = keras.layers.GlobalMaxPooling1D()(conv2)
conv2 = keras.layers.Dense(dense_after_conv, activation=tf.nn.relu)(conv2)
conv2 = Dropout(dropout_conv)(conv2)
conv3 = keras.layers.Conv1D(filters=conv_filters, kernel_size=1, input_shape=(voc_size, self.embedding_output_dim),
activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
conv3 = keras.layers.GlobalMaxPooling1D()(conv3)
conv3 = keras.layers.Dense(dense_after_conv, activation=tf.nn.relu)(conv3)
conv3 = Dropout(dropout_conv)(conv3)
conv4 = keras.layers.Conv1D(filters=conv_filters, kernel_size=5, input_shape=(voc_size, self.embedding_output_dim),
activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
conv4 = keras.layers.GlobalMaxPooling1D()(conv4)
conv4 = keras.layers.Dense(dense_after_conv, activation=tf.nn.relu)(conv4)
conv4 = Dropout(dropout_conv)(conv4)
layer = keras.layers.Concatenate()([conv1, conv2, conv3, conv4])
layer = keras.layers.Dense(dense_after_conv*2, activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
layer = keras.layers.Dropout(dropout_dense)(layer)
layer = keras.layers.Dense(dense_after_conv/2, activation=tf.nn.relu, kernel_regularizer=l2(l=conv_reg))(layer)
layer = keras.layers.Dropout(dropout_dense)(layer)
layer = keras.layers.Dense(theme_count, activation=tf.nn.sigmoid, kernel_regularizer=l2(l=dense_reg))(layer)
model = keras.Model(inputs=input, outputs=layer)
model.compile(optimizer=tf.keras.optimizers.Adam(clipnorm=1, learning_rate=0.00003),
loss=WeightedBinaryCrossEntropy(themes_weight.weight_array()),
metrics=[AUC(multi_label=True), BinaryAccuracy(), TruePositives(),
TrueNegatives(), FalseNegatives(), FalsePositives(),
Recall(), Precision()],
run_eagerly=None)
model.summary()
self._model = model
if self.__plot_directory is not None:
self.plot_model(self.__plot_directory)
# Fix for https://github.com/tensorflow/tensorflow/issues/38988
model._layers = [layer for layer in model._layers if not isinstance(layer, dict)]
callbacks = [ManualInterrupter(), keras_callback]
model.fit(dataset.trainData,
epochs=self.epochs,
steps_per_epoch=dataset.train_batch_count,
validation_data=dataset.validationData,
validation_steps=dataset.validation_batch_count,
callbacks=callbacks,
)
def get_model_name(self):
return self.__model_name__
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 22:45:10 2020
@author: admin
"""
import numpy as np
import pandas as pd
from flask import Flask,render_template,request,jsonify
import pickle
app = Flask(__name__)
model =pickle.load(open('finalmodel_TRB.pkl','rb'))
cols_when_model_builds = model.get_booster().feature_names
@app.route('/')
def home():
return render_template('temp_index.html',prediction_test=0)
@app.route('/predict',methods=['POST','GET'])
def predict():
if request.method == "POST":
#int_features=[float(x) for x in request.form.values()]
loan_amnt = request.form.getlist('loan_amnt ')
Rate_of_intrst = request.form.getlist('Rate_of_intrst')
annual_inc = request.form.getlist('annual_inc')
debt_income_ratio = request.form.getlist('debt_income_ratio')
numb_credit = request.form.getlist('numb_credit')
total_credits = request.form.getlist('total_credits')
total_rec_int = request.form.getlist('total_rec_int')
tot_curr_bal = request.form.getlist('tot_curr_bal')
new_row ={'loan_amnt ':int(loan_amnt[0]),'Rate_of_intrst':float(Rate_of_intrst[0]),
'annual_inc':float(annual_inc[0]),
'debt_income_ratio':float(debt_income_ratio[0]),'numb_credit':int(numb_credit[0]),
'total_credits':int(total_credits[0]),'total_rec_int':float(total_rec_int[0]),
'tot_curr_bal':int(tot_curr_bal[0])}
print(new_row)
final_features = pd.DataFrame()
final_features=final_features.append(new_row,ignore_index=True)
final_features=final_features[cols_when_model_builds]
prediction = model.predict(final_features)
output = round(prediction[0],2)
return render_template('temp_index.html',prediction_test=format(output))
if __name__ == "__main__":
app.run(debug=True)
|
import tkinter as tk
# TO DO: turns, purple highlithing for chosen pawn, better code
class Checkers(tk.Frame):
def __init__(self, parent, *args, **kwargs):
tk.Frame.__init__(self, parent, *args, **kwargs)
self.parent = parent
my_frame = tk.Frame(parent)
# holds coords and a corresponding instances of buttons
COORDS_BUTTON_DICT = {}
# holds coords and a corresponding list, which contains color and type of figure (zeros if there isn't any figure)
COORDS_FIGURE_DICT = {} # ie {(0,0): ["white", "queen"], (1,1): ["red", "pawn"], (2,2): ["0", "0"]}
# generated for a specified coords. holds coords of possible moves and corresponding coords of beaten figure
# (if this move implies beating) or 0 (if no beating)
MOVES_BEATING_DICT = {} # ie {(3, 2): 0, (4, 5): (3, 4)}
OLD_COORDS = (0, 0) # generated when clicking a figure. holds coords of a figure to move
IS_CLICKED = False
white_pawn_image = tk.PhotoImage(file="img/white_pawn_1.png")
red_pawn_image = tk.PhotoImage(file="img/red_pawn_1.png")
white_queen_image = tk.PhotoImage(file="img/white_queen_1.png")
red_queen_image = tk.PhotoImage(file="img/red_queen_1.png")
white_bg = tk.PhotoImage(file="img/white_bg.png")
grey_bg = tk.PhotoImage(file="img/grey_bg.png")
white_pawn_highlighted_image = tk.PhotoImage(file="img/white_pawn_highlighted_1.png")
red_pawn_highlighted_image = tk.PhotoImage(file="img/red_pawn_highlighted_1.png")
white_queen_highlighted_image = tk.PhotoImage(file="img/white_queen_highlighted_1.png")
red_queen_highlighted_image = tk.PhotoImage(file="img/red_queen_highlighted_1.png")
highlighted_bg = tk.PhotoImage(file="img/highlighted_bg.png")
def highlight_img(coords):
if IS_CLICKED:
if COORDS_FIGURE_DICT[coords] == ["white", "pawn"]:
COORDS_BUTTON_DICT[coords].config(image=white_pawn_highlighted_image)
if COORDS_FIGURE_DICT[coords] == ["red", "pawn"]:
COORDS_BUTTON_DICT[coords].config(image=red_pawn_highlighted_image)
if COORDS_FIGURE_DICT[coords] == ["white", "queen"]:
COORDS_BUTTON_DICT[coords].config(image=white_queen_highlighted_image)
if COORDS_FIGURE_DICT[coords] == ["red", "queen"]:
COORDS_BUTTON_DICT[coords].config(image=red_queen_highlighted_image)
for element in MOVES_BEATING_DICT:
COORDS_BUTTON_DICT[element].config(image=highlighted_bg)
else:
if COORDS_FIGURE_DICT[coords] == ["white", "pawn"]:
COORDS_BUTTON_DICT[coords].config(image=white_pawn_image)
if COORDS_FIGURE_DICT[coords] == ["red", "pawn"]:
COORDS_BUTTON_DICT[coords].config(image=red_pawn_image)
if COORDS_FIGURE_DICT[coords] == ["white", "queen"]:
COORDS_BUTTON_DICT[coords].config(image=white_queen_image)
if COORDS_FIGURE_DICT[coords] == ["red", "queen"]:
COORDS_BUTTON_DICT[coords].config(image=red_queen_image)
for element in MOVES_BEATING_DICT:
if COORDS_FIGURE_DICT[element][0] == "0":
COORDS_BUTTON_DICT[element].config(image=grey_bg)
# returns list of coords
def get_left_top_diagonal(coords):
diagonal_coords_list = []
(x, y) = coords
while x >= 0 and y >= 0:
if (x, y) != coords:
diagonal_coords_list.append((x, y))
x = x - 1
y = y - 1
return diagonal_coords_list
def get_right_top_diagonal(coords):
diagonal_coords_list = []
(x, y) = coords
while x >= 0 and y <= 7:
if (x, y) != coords:
diagonal_coords_list.append((x, y))
x = x - 1
y = y + 1
return diagonal_coords_list
def get_right_bottom_diagonal(coords):
diagonal_coords_list = []
(x, y) = coords
while x <= 7 and y <= 7:
if (x, y) != coords:
diagonal_coords_list.append((x, y))
x = x + 1
y = y + 1
return diagonal_coords_list
def get_left_bottom_diagonal(coords):
diagonal_coords_list = []
(x, y) = coords
while x <= 7 and y >= 0:
if (x, y) != coords:
diagonal_coords_list.append((x, y))
x = x + 1
y = y - 1
return diagonal_coords_list
def get_queen_moves(my_list, coords):
# checking for a move without beating
for square_to_move_to in my_list:
if COORDS_FIGURE_DICT[square_to_move_to][0] == "0":
counter = 0
x = 0
while x < my_list.index(square_to_move_to):
if COORDS_FIGURE_DICT[my_list[x]][0] != "0":
counter += 1
x += 1
if counter == 0:
MOVES_BEATING_DICT[square_to_move_to] = 0
# checking for a move with beating
for enemy in my_list:
if COORDS_FIGURE_DICT[enemy][0] != COORDS_FIGURE_DICT[coords][0] \
and COORDS_FIGURE_DICT[enemy][1] != "0":
for square_to_move_to in my_list:
if my_list.index(square_to_move_to) > my_list.index(enemy) and \
COORDS_FIGURE_DICT[square_to_move_to][0] == "0":
counter = 0
x = 0
while x < my_list.index(square_to_move_to):
if COORDS_FIGURE_DICT[my_list[x]][0] != "0":
counter += 1
x += 1
if counter == 1:
MOVES_BEATING_DICT[square_to_move_to] = enemy
def get_pawn_moves(my_list, coords):
# checking for a move without beating
if len(my_list) >= 1:
if COORDS_FIGURE_DICT[my_list[0]][0] == "0":
MOVES_BEATING_DICT[my_list[0]] = 0
# checking for a move with beating
if len(my_list) >= 2:
if COORDS_FIGURE_DICT[my_list[0]][0] == "white" and COORDS_FIGURE_DICT[coords][0] == "red":
if COORDS_FIGURE_DICT[my_list[1]][0] == "0":
MOVES_BEATING_DICT[my_list[1]] = my_list[0]
elif COORDS_FIGURE_DICT[my_list[0]][0] == "red" and COORDS_FIGURE_DICT[coords][0] == "white":
if COORDS_FIGURE_DICT[my_list[1]][0] == "0":
MOVES_BEATING_DICT[my_list[1]] = my_list[0]
def get_possible_moves_dict(coords):
nonlocal MOVES_BEATING_DICT
MOVES_BEATING_DICT.clear()
if COORDS_FIGURE_DICT[coords][1] == "pawn":
if COORDS_FIGURE_DICT[coords][0] == "white":
get_pawn_moves(get_right_bottom_diagonal(coords), coords)
get_pawn_moves(get_left_bottom_diagonal(coords), coords)
elif COORDS_FIGURE_DICT[coords][0] == "red":
get_pawn_moves(get_right_top_diagonal(coords), coords)
get_pawn_moves(get_left_top_diagonal(coords), coords)
elif COORDS_FIGURE_DICT[coords][1] == "queen":
get_queen_moves(get_left_top_diagonal(coords), coords)
get_queen_moves(get_right_top_diagonal(coords), coords)
get_queen_moves(get_right_bottom_diagonal(coords), coords)
get_queen_moves(get_left_bottom_diagonal(coords), coords)
def move_figure(new_coords):
if new_coords in MOVES_BEATING_DICT:
if new_coords[0] == 0 or new_coords[0] == 7:
COORDS_FIGURE_DICT[new_coords][0] = COORDS_FIGURE_DICT[OLD_COORDS][0]
COORDS_FIGURE_DICT[new_coords][1] = "queen"
COORDS_FIGURE_DICT[OLD_COORDS] = ["0", "0"]
else:
COORDS_FIGURE_DICT[new_coords] = COORDS_FIGURE_DICT[OLD_COORDS]
COORDS_FIGURE_DICT[OLD_COORDS] = ["0", "0"]
image_names_dict = {tuple(["white", "pawn"]): white_pawn_image,
tuple(["white", "queen"]): white_queen_image,
tuple(["red", "pawn"]): red_pawn_image, tuple(["red", "queen"]): red_queen_image}
img = image_names_dict[tuple(COORDS_FIGURE_DICT[new_coords])]
COORDS_BUTTON_DICT[new_coords].config(image=img)
COORDS_BUTTON_DICT[OLD_COORDS].config(image=grey_bg)
# checking if there was a beating and removing the beaten figure
if MOVES_BEATING_DICT[new_coords] != 0:
pawn_coords_to_delete = MOVES_BEATING_DICT[new_coords]
COORDS_FIGURE_DICT[pawn_coords_to_delete] = ["0", "0"]
COORDS_BUTTON_DICT[pawn_coords_to_delete].config(image=grey_bg)
def my_click(x, y):
nonlocal IS_CLICKED
nonlocal OLD_COORDS
if IS_CLICKED:
new_coords = (x, y)
move_figure(new_coords)
IS_CLICKED = False
highlight_img(OLD_COORDS)
else:
OLD_COORDS = (x, y)
get_possible_moves_dict(OLD_COORDS)
if COORDS_FIGURE_DICT[OLD_COORDS][0] != "0": # if there is a figure at this coords
IS_CLICKED = True
highlight_img(OLD_COORDS)
def construct_buttons():
grid_count = 0
for x in range(0, 8):
for y in range(0, 8):
btn = tk.Button(my_frame, image=white_bg,
command=lambda row_n=x, column_n=y: my_click(row_n, column_n))
COORDS_BUTTON_DICT[(x, y)] = btn
COORDS_BUTTON_DICT[(x, y)].grid(row=x, column=y)
grid_count += 1
for coords in COORDS_BUTTON_DICT:
if (coords[0] + coords[1]) % 2 == 1:
COORDS_BUTTON_DICT[coords].config(image=grey_bg)
def construct_figures():
nonlocal COORDS_FIGURE_DICT
COORDS_FIGURE_DICT = COORDS_BUTTON_DICT.copy()
for coords in COORDS_FIGURE_DICT:
if coords[0] in [0, 2] and coords[1] % 2 == 1:
COORDS_FIGURE_DICT[coords] = ["white", "pawn"]
elif coords[0] == 1 and coords[1] % 2 == 0:
COORDS_FIGURE_DICT[coords] = ["white", "pawn"]
elif coords[0] in [5, 7] and coords[1] % 2 == 0:
COORDS_FIGURE_DICT[coords] = ["red", "pawn"]
elif coords[0] == 6 and coords[1] % 2 == 1:
COORDS_FIGURE_DICT[coords] = ["red", "pawn"]
else:
COORDS_FIGURE_DICT[coords] = ["0", "0"]
for coords in COORDS_FIGURE_DICT:
if COORDS_FIGURE_DICT[coords][0] == "white":
COORDS_BUTTON_DICT[coords].config(image=white_pawn_image)
elif COORDS_FIGURE_DICT[coords][0] == "red":
COORDS_BUTTON_DICT[coords].config(image=red_pawn_image)
construct_buttons()
construct_figures()
my_frame.pack()
if __name__ == "__main__":
root = tk.Tk()
root.title('CHECKERS')
Checkers(root).pack()
root.mainloop()
|
''''
anisotropic TMM formulation
PQ formulation breaks down since the
differential equation
dz([ex, ey, hx, hy]) = Gamma ([ex, ey hx, hy])) has a Gamma matrix which is completely dense
'''
import numpy as np
import cmath;
from scipy.linalg import eig
def Gamma(kx, ky, e_tensor, m_r):
'''
:param kx:
:param ky:
:param e_tensor: is a 3x3 matrix containing dielectric components
#exx exy exz | e[0,0] e[1,0] e[2,0]
#eyx eyy eyz | e[1,0] e[1,1] e[1,2]
#ezx ezy ezz | e[2,0] e[2,1]
:param m_r:
:return:
'''
e = e_tensor; #for compactness of notation
j = cmath.sqrt(-1);
Gamma = np.matrix([[-j*(kx*(e[2,0]/e[2,2]) + ky/m_r) , j*kx*(1/m_r-e[2,1]/e[2,2]) , kx*ky/e[2,2] , -kx**2/e[2,2]+m_r ],
[j*ky*(-e[2,0]/e[2,2]) , -j*(ky*(e[2,1]/e[2,2])) , ky**2/e[2,2] - (m_r), -kx*ky/e[2,2] ],
[kx*ky/m_r+e[1,0]-e[1,2]*e[2,0]/e[2,2], -kx**2/m_r+e[1,1]-e[1,2]*e[2,1]/e[2,2],-j*(ky*e[1,2]/e[2,2]), j*kx*(e[1,2]/e[2,2])],
[ky**2/m_r-e[0,0]+e[0,2]*e[2,0]/e[2,2], -kx*ky/m_r-e[0,1]+e[0,2]*e[2,1]/e[2,2], j*ky*(e[0,2]/e[2,2]), -j*(kx*e[0,2]/e[2,2])]]);
#if we deal with this matrix, then the traditional TMM approach is unstable...
return Gamma;
def eigen_Gamma(Gamma):
[U,V] = eig(Gamma);
#execute a sorting algorithm on the eigenvalues
return U,V |
import datetime
import json
from flask import Blueprint, jsonify, request, current_app
from flask_login import current_user
import db
from utils import database
from utils.file_manager import FileManager, FileType
bp = Blueprint("home", __name__)
@bp.route("/ping", methods=["GET"])
def ping():
return jsonify({"msg": "pong"})
@bp.route("/theme", methods=["GET"])
def theme():
"""
Returns theme JSON object
Returns (JSON): theme JSON object
"""
try:
with open(current_app.config["THEME_DIR"], "r") as f:
data = json.load(f)
return jsonify({"theme": data})
except FileNotFoundError as e:
return jsonify({"ERROR": e})
@bp.route("/top_k", methods=["GET"])
def top_k():
"""
Returns the top k latest entries in the database
Returns (JSON): a list of top k entries
"""
db_conn = db.get_db()
k = request.args.get("k")
top_ks = database.get_top_k_entries(db_conn, int(k))
db_conn.close()
return jsonify({"top_k": top_ks})
@bp.route("/public_repos", methods=["GET", "POST"])
def public_repos():
"""
List all public repos from github
Returns (JSON): GET -> a list of repos and updated timestamp
POST -> set visibility of projects
"""
if request.method == "GET":
db_conn = db.get_db()
repos, updated = database.get_public_repos(db_conn)
db_conn.close()
return jsonify({"repos": repos, "updated": updated})
if current_user.is_authenticated:
db_conn = db.get_db()
selected = request.json["projects"]
for s in selected:
entry_id = s["id"]
visible = s["visible"]
database.update_visibility("public_repos", db_conn, entry_id, visible)
db_conn.close()
return jsonify({"INFO": "Updated"})
return jsonify({"ERROR": "Unauthenticated"})
@bp.route("/blogs", methods=["GET", "POST"])
def blogs():
"""
GET all published blogs and external blog links
Returns (JSON): GET -> a list of all blogs
POST -> INFO message
"""
if request.method == "GET":
# external links
db_conn = db.get_db()
all_blogs, updated = database.get_articles(db_conn)
db_conn.close()
# internal blogs
fm: FileManager = current_app.config["FILE_MANAGER"]
published_files = fm.list(as_dict=True, file_type=FileType.PUBLISHED)
published_files = list(published_files.values())
all_blogs.extend(published_files)
if current_user.is_authenticated:
fm: FileManager = current_app.config["FILE_MANAGER"]
unpublished_files = fm.list(as_dict=True, file_type=FileType.UNPUBLISHED)
unpublished_files = list(unpublished_files.values())
all_blogs.extend(unpublished_files)
return jsonify({"blogs": all_blogs, "updated": updated})
@bp.route("/blogs/external_link", methods=["POST"])
def add_external_blog_link():
if current_user.is_authenticated:
db_conn = db.get_db()
title = request.json["title"]
description = request.json["description"]
url = request.json["url"]
image_url = request.json["image_url"]
time_stamp = request.json["time_stamp"] + " 00:00:00"
database.add_entry("blogs", db_conn, title, description, url, image_url, time_stamp)
db_conn.close()
return jsonify({"INFO": "Blog added"})
return jsonify({"ERROR": "Unauthenticated"}), 401
@bp.route("/publications", methods=["GET", "POST"])
def publications():
"""
GET or POST publications
Returns (JSON): GET -> a list of all publications
POST -> INFO message
"""
if request.method == "GET":
db_conn = db.get_db()
all_blogs = database.get_entries("publications", db_conn)
db_conn.close()
return jsonify({"publications": all_blogs})
if current_user.is_authenticated:
db_conn = db.get_db()
title = request.json["title"]
description = request.json["description"]
url = request.json["url"]
image_url = request.json["image_url"]
time_stamp = request.json["time_stamp"] + " 00:00:00"
database.add_entry("publications", db_conn, title, description, url, image_url, time_stamp)
db_conn.close()
return jsonify({"INFO": "Publication added"})
return jsonify({"ERROR": "Unauthenticated"}), 401
@bp.route("/markdown_content", methods=["GET", "POST"])
def markdown_content():
file_name: str = request.args.get("path")
file_type: FileType = FileType(int(request.args.get("file_type")))
fm: FileManager = current_app.config["FILE_MANAGER"]
if ".." in file_name or "~" in file_name or "/" in file_name:
return jsonify({"INFO": "Invalid file name"}), 550
if file_type == FileType.UNPUBLISHED and not current_user.is_authenticated:
return jsonify({"ERROR": "Unauthenticated"}), 401
if request.method == "GET":
if request.args.get("version"):
content = fm.read_version(file_name, request.args.get("version"), file_type)
else:
content = fm.read(file_name, file_type)
return jsonify({"INFO": "Document found", "markdown": content})
elif current_user.is_authenticated:
content = request.json["markdown"]
fm.write(file_name, content, file_type)
return jsonify({"INFO": "Document written", "time": str(datetime.datetime.now())})
else:
return jsonify({"ERROR": "Unauthenticated"}), 401
@bp.route("/publish", methods=["GET"])
def publish():
"""
Publish blog by moving file to published dir
"""
if current_user.is_authenticated:
file_name: str = request.args.get("path")
fm: FileManager = current_app.config["FILE_MANAGER"]
if ".." in file_name or "~" in file_name or "/" in file_name:
return jsonify({"INFO": "Invalid file name"}), 550
published = fm.publish(file_name=file_name)
info = "published" if published else "not published"
return jsonify({"INFO": info})
return jsonify({"ERROR": "Unauthenticated"}), 401
@bp.route("/unpublish", methods=["GET"])
def unpublish():
"""
Unpublish blog by removed it from published fir
"""
if current_user.is_authenticated:
file_name: str = request.args.get("path")
fm: FileManager = current_app.config["FILE_MANAGER"]
if ".." in file_name or "~" in file_name or "/" in file_name:
return jsonify({"INFO": "Invalid file name"}), 550
unpublished = fm.unpublish(file_name=file_name)
info = "unpublished" if unpublished else "not unpublished"
return jsonify({"INFO": info})
return jsonify({"ERROR": "Unauthenticated"}), 401
@bp.route("/list_published", methods=["GET"])
def list_published():
fm: FileManager = current_app.config["FILE_MANAGER"]
published_files = fm.list(as_dict=True, file_type=FileType.PUBLISHED)
return jsonify({"published": published_files})
@bp.route("/list_unpublished", methods=["GET"])
def list_unpublished():
if current_user.is_authenticated:
fm: FileManager = current_app.config["FILE_MANAGER"]
unpublished_files = fm.list(as_dict=True, file_type=FileType.UNPUBLISHED)
return jsonify({"unpublished": unpublished_files})
return jsonify({"ERROR": "Unauthenticated"}), 401
|
#Project Euler Question 48
#Self powers
result = 0
for x in range(1,(1000 + 1)):
result += (x**x)
print (int(str(result)[-10::])) |
'''
@package fader
@brief
@details
@author Remus Avram
@date 2014.12
'''
from PyQt4 import QtCore, QtGui
class FaderWidget(QtGui.QWidget):
'''Fades between tow widgets'''
def __init__(self, old_widget, new_widget=None, duration=1000, reverse=False):
QtGui.QWidget.__init__(self, new_widget)
self.resize(old_widget.size())
self.old_pixmap = QtGui.QPixmap(old_widget.size())
old_widget.render(self.old_pixmap)
self.pixmap_opacity = 1.0
self.timeline = QtCore.QTimeLine()
if reverse:
self.timeline.setDirection(self.timeline.Backward)
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.deleteLater)
self.timeline.setDirection(duration)
self.timeline.start()
self.show()
# END def __init__
def paintEvent(self, event):
painter = QtGui.QPainter()
painter.begin(self)
painter.setOpacity(self.pixmap_opacity)
painter.drawPixmap(0, 0, self.old_pixmap)
painter.end()
# END def paintEvent
def animate(self, value):
self.pixmap_opacity = 1.0 - value
self.repaint()
self.update()
# END def animate
# END class FaderWidget
class FaderExample(QtGui.QWidget):
'''
Example widget using a FaderWidget to transition
between two simple colored QWdgets in a stack layout.
'''
def __init__(self):
super(FaderExample, self).__init__()
self.resize(600,600)
self.vlayout = QtGui.QVBoxLayout(self)
self.w1 = QtGui.QWidget()
self.w1.setStyleSheet("QWidget {background-color: blue;}")
self.w2 = QtGui.QWidget()
self.w2.setStyleSheet("QWidget {background-color: red;}")
self.stacked = QtGui.QStackedLayout()
self.stacked.addWidget(self.w1)
self.stacked.addWidget(self.w2)
self.vlayout.addLayout(self.stacked)
self.fadeButton = QtGui.QPushButton("Fade")
self.resetButton = QtGui.QPushButton("Reset")
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addWidget(self.fadeButton)
buttonLayout.addWidget(self.resetButton)
self.vlayout.addLayout(buttonLayout)
self.fadeButton.clicked.connect(self.fade)
self.resetButton.clicked.connect(self.reset)
# END def __init__
def fade(self):
FaderWidget(self.w1, self.w2)
self.stacked.setCurrentWidget(self.w2)
# END def fade
def reset(self):
self.stacked.setCurrentWidget(self.w1)
# END def reset
# END class FaderExample
'''
Run in Maya:
================================================================================
import sys
from PyQt4 import QtGui
pathToDir = r"D:\Programare\PySide-PyQt\Tutorials\cmivfx - PyQt4 UI Development for Maya\PyQt for Maya"
if pathToDir not in sys.path:
sys.path.append(pathToDir)
try:
reload(fader)
except:
import fader
f = fader.FaderExample()
f.show()
================================================================================
'''
|
from sklearn import datasets
# Load dataset
iris = datasets.load_iris()
# Create and fit a nearest-neighbor classifier
from sklearn import neighbors
knn = neighbors.KNeighborsClassifier()
knn.fit(iris.data, iris.target)
# Predict and print the result
result=knn.predict([[0.1, 0.2, 0.3, 0.4]])
print(result)
|
# -*- coding:utf-8 -*-
# __author__ = 'gupan'
set_1 = set([1, 2, 3, 4, 5, 6, 7, 8])
set_2 = set([2, 3, 4, 5, 6, 8, 9, 10])
sub_1 = {1, 2, 3}
sub_2 = {11}
print(set_1)
print(set_2)
print(set_1.intersection(set_2))
print(set_1.union(set_2))
print(set_1.difference(set_2))
print(set_2.difference(set_1))
print(sub_1.issubset(set_1))
print(sub_2.issubset(set_1))
print(set_1.issuperset(sub_1))
print(set_1.issuperset(sub_2))
print(set_1.symmetric_difference(set_2))
print(set_1.isdisjoint(set_2))
print(sub_1.isdisjoint(sub_2))
print(set_1&set_2)
print(set_1-set_2)
print(set_1|set_2)
print(set_1^set_2)
set_1.add(100)
print(set_1)
set_1.update({11, 14, 15})
print(set_1)
set_1.remove(100)
print(set_1)
set_1.pop()
print(set_1)
set_1.discard(100)
print(set_1)
set_1.discard(100)
print(set_1)
print(len(set_1))
print(2 in set_1) |
import sys
import json
from twisted.python import log
from twisted.internet import reactor
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from utils.thread_with_trace import thread_with_trace
from api.tal import Tal
from config import token
class Anna(WebSocketServerProtocol):
def __init__(self, *args, **kwargs):
self.ws = Tal(token, self)
super(Anna, self).__init__(*args, **kwargs)
def onConnect(self, request):
print("Anna connected")
self.ws.connect()
def onMessage(self, payload, isBinary):
payload = json.loads(payload)
if payload == 'start':
self.ws.start_streaming()
elif payload == 'stop':
self.ws.stop_streaming()
def onClose(self, wasClean, code, reason):
print("Anna disconnected")
self.ws.stop_streaming()
self.ws.close()
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = WebSocketServerFactory()
factory.protocol = Anna
reactor.listenTCP(9000, factory)
reactor.run() |
from django.utils.translation import ugettext_lazy as _
"""
Django settings for vminventory project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'wun$ty%5f92!r9@r=*05!id)!#leeb4u!tit=x1ib*cvh0yih6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Set this list of users and e-mails addresses if you want to get e-mails when something goes wrong.
#ADMINS = [('Name Lastname', 'em@ail')]
# Set the hostname that should serve this app. See:
# https://docs.djangoproject.com/en/2.2/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': os.path.join(BASE_DIR, 'templates'),
'APP_DIRS': True,
# 'DEBUG': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.request',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# 'django.core.context_processors.request',
],
},
},
]
#TEMPLATE_CONTEXT_PROCESSORS = (
#'django.core.context_processors.request',
#'django.contrib.auth.context_processors.auth',
#)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'vmtory',
'django_tables2',
'django_python3_ldap',
'crispy_forms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'vminventory.urls'
WSGI_APPLICATION = 'vminventory.wsgi.application'
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',
# Uncomment the following line fot LDAP auth
# 'django_python3_ldap.auth.LDAPBackend',
)
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# 'ENGINE': 'django.db.backends.postgresql_psycopg2',
# 'NAME': 'vmtory',
# 'USER': 'django',
# 'PASSWORD': 'django',
# 'HOST': 'db',
}
}
VMTORY_NOTIFICATION_BACKEND = (
'email',
# 'itop',
)
VMTORY_SUPPORT_EMAIL = 'support@mycompany.com'
# We use this backend that prints all the emails in console instead of sending them
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = 'vmtory@test.local'
# EMAIL_HOST = '<your mail server>'
# EMAIL_PORT = '25'
ITOP_USERNAME = ''
ITOP_PASSWORD = ''
ITOP_API_URL = 'https://itop.yourdomain/webservices/rest.php?version=1.0'
ITOP_CATEGORIES = {
'VM_POWER_ON': '',
'VM_POWER_OFF': '',
'VM_POWER_RESET': '',
'VM_CLONE': '',
'VM_BACKUP': '',
'SNAPSHOT_CREATE': '',
'SNAPSHOT_RESTORE': '',
'SNAPSHOT_DELETE': '',
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGES = (
('en', _('English')),
('es', _('Spanish')),
)
#LANGUAGE_CODE = 'es-AR'
LANGUAGE_CODE = 'en'
# TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
# STATIC_ROOT = '/home/administrator/vmtory/vminventory/vmtory/static/'
# Theme para los formularios crispy
CRISPY_TEMPLATE_PACK = 'uni_form'
# LDAP Configuration
# The URL of the LDAP server.
# LDAP_AUTH_URL = "ldap://ldapserver.yourdomain:389"
# Initiate TLS on connection.
# LDAP_AUTH_USE_TLS = False
# The LDAP search base for looking up users.
# LDAP_AUTH_SEARCH_BASE = "ou=Users,dc=yourcomany,dc=com,dc=ar"
# The LDAP class that represents a user.
# LDAP_AUTH_OBJECT_CLASS = "posixAccount"
# User model fields mapped to the LDAP
# attributes that represent them.
# LDAP_AUTH_USER_FIELDS = {
# "username": "uid",
# "first_name": "givenName",
# "last_name": "sn",
# "email": "mail",
# }
# A tuple of django model fields used to uniquely identify a user.
# LDAP_AUTH_USER_LOOKUP_FIELDS = ("username",)
# Path to a callable that takes a dict of {model_field_name: value},
# returning a dict of clean model data.
# Use this to customize how data loaded from LDAP is saved to the User model.
LDAP_AUTH_CLEAN_USER_DATA = "django_python3_ldap.utils.clean_user_data"
# Path to a callable that takes a user model and a dict of {ldap_field_name: [value]},
# and saves any additional user relationships based on the LDAP data.
# Use this to customize how data loaded from LDAP is saved to User model relations.
# For customizing non-related User model fields, use LDAP_AUTH_CLEAN_USER_DATA.
LDAP_AUTH_SYNC_USER_RELATIONS = "django_python3_ldap.utils.sync_user_relations"
# Path to a callable that takes a dict of {ldap_field_name: value},
# returning a list of [ldap_search_filter]. The search filters will then be AND'd
# together when creating the final search filter.
LDAP_AUTH_FORMAT_SEARCH_FILTERS = "django_python3_ldap.utils.format_search_filters"
# Path to a callable that takes a dict of {model_field_name: value}, and returns
# a string of the username to bind to the LDAP server.
# Use this to support different types of LDAP server.
LDAP_AUTH_FORMAT_USERNAME = "django_python3_ldap.utils.format_username_openldap"
# Sets the login domain for Active Directory users.
LDAP_AUTH_ACTIVE_DIRECTORY_DOMAIN = None
# The LDAP username and password of a user for authenticating the `ldap_sync_users`
# management command. Set to None if you allow anonymous queries.
LDAP_AUTH_CONNECTION_USERNAME = None
LDAP_AUTH_CONNECTION_PASSWORD = None
|
"""
Sawyer Bailey Paccione and Olif Soboka Hordofa
audioDetection.py
Tufts University Spring 2021, ME-0035
Purpose: Detect whether audio was playing or not playing and convey that
information to a LEGO SPIKE PRIME
Description: This audio detection uses K-Nearest-Neighbors Algorithm. This
is a supervised training algorithm. There are two cases, no music, and music.
1. Have no music playing and read in 5 Values
2. Play music and read in another 5 Values
3. Run the microphone continuously and get the most recent
reading
4. Find the minimum distance between this reading and the 5 no
music values and the 5 music values.
5. Send the case (music/no-music) for the value that has the
minimum distance to the SPIKE PRIME
"""
################################################################################
# Imports #
################################################################################
import serial, time # Serial Communication to The Spike Prime
import pyaudio, wave # Essential for Audio Detection
from array import array # Mathematics for Audio Processing
import statistics
################################################################################
# Setup for Serial Communication #
################################################################################
serialPort = serial.Serial(
port='/dev/ttyS0',
baudrate = 115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
################################################################################
# Setup for Audio #
################################################################################
form_1 = pyaudio.paInt16 # 16-bit resolution
chans = 1 # 1 channel
samp_rate = 44100 # 44.1kHz sampling rate
chunk = 4096 # 2^12 samples for buffer
record_secs = 1 # seconds to record
dev_index = 2 # device index found by p.get_device_info_by_index(ii)
audio = pyaudio.PyAudio() # create pyaudio instantiation
"""
get_audio_val()
Purpose: Read a value from the microphone attached to the RPI
Arguments: N/A
Returns: The Average of the all the readings taken in 1 second
Effects: N/A
Notes: Everytime that this function is called the PyAudio object
is opened and closed. This appears inefficient; however it works
and if simply opened at the beggining of the program and closed
at the end, there is an overflow error.
"""
def get_audio_val() :
# Start Listening For Audio
stream = audio.open(format = form_1, \
rate = samp_rate,channels = chans, \
input_device_index = dev_index,input = True, \
frames_per_buffer=chunk)
frames = []
# loop through stream and append audio chunks to frame array
for ii in range(0,int((samp_rate/chunk)*record_secs)):
raw = stream.read(chunk)
data = array('h',raw).tolist()
std = statistics.stdev(data)
frames.append(std)
audio.close(stream)
# Calculate the Average reading of the Audio Data
mu = statistics.mean(frames)
print("Mu:", mu)
return mu
################################################################################
# Global Variables #
################################################################################
training = []
# class_training = [[0, 1335.4445504225775], [0, 1356.6027573564102], [0, 1249.9147047388728], [0, 1511.3257309542355], [0, 1281.1382817982096], [1, 1780.7345239511874], [1, 2416.3177562886126], [1, 1774.6496247911393], [1, 1631.1579184233392], [1, 1809.1426950083303]] # Uncomment if you don't want to do training
"""
train_nearest_neighbors()
Purpose: Train kNN Algorithm for Music Detection
Arguments: N/A
Returns: N/A
Effects: Initializes the values in training array
Notes: Waits for user input to continue reading the next value
"""
def train_nearest_neighbors():
for case in range(2) :
for counter in range(5):
command_string = "Press Enter To get the " + str(counter + 1) + " value for case " + str(case + 1) + " "
input(command_string)
sound = get_audio_val()
training.append([case, sound])
time.sleep(0.1)
print(training)
"""
main()
Purpose: Runs the main computer program of Audio Dection
Arguments: N/A
Returns: N/A
Effects: Training Array and the Serial Port connected to the SPIKE PRIME
Notes:
"""
def main() :
train_nearest_neighbors()
curr_case = 0
while True:
curr_sound = get_audio_val()
mini = 1000
prev_case = curr_case
for (c,s) in training:
dist = abs(curr_sound - s)
if dist < mini:
mini = dist
curr_case = c
if (curr_case != prev_case) :
# Send Case to Spike if Different then before
print(curr_case)
if serialPort:
to_send = str(curr_case) + "\r\n"
serialPort.write(to_send.encode())
if __name__ == "__main__":
main()
|
import uuid
from jsonschema import FormatChecker
@FormatChecker.cls_checks("uuid")
def check_uuid_format(instance):
try:
uuid.UUID(instance)
return True
except ValueError:
return False
print("JSONSchema validation supported for the following 'format' values:")
print(", ".join(FormatChecker.checkers.keys()))
|
import numpy as np
from math_utils import *
from entities import *
def worldgenerator(densities, width, height):
floor = np.empty([width, height], dtype=object)
for x in range(width):
for y in range(height):
val = np.sum([step(np.random.uniform(0, 1) - np.sum(densities[:i]), 1) for i in range(1, len(densities))])
color = EntityColor.GRASS
etype = EntityType.BLOCK
if val == 1:
color = EntityColor.LAVA
etype = EntityType.LAVA
elif val == 2:
color = EntityColor.GOLD
etype = EntityType.GOLD
floor[x, y] = Entity((x, y), etype, color)
floor[2, 0].color(EntityColor.LAVA)
return floor
class World:
def __init__(self, width, height):
self.floor = worldgenerator([0.8, 0.1, 0.1], width, height)
self.entities = np.empty(0, dtype=object)
self.skybox = (135/8, 206/8, 250/8)
self.players = []
self.PIDs = []
self.training_pool = []
def spawn(self, entity):
if entity.etype() == EntityType.PLAYER:
self.players.append(entity)
i = 0
while 1:
if i not in self.PIDs:
self.players[-1].PID = i
self.PIDs.append(i)
break
i += 1
else:
self.add_entity(entity)
def add_entity(self, entity):
self.entities = np.append(self.entities, entity)
def get_block(self, pos):
(w, h) = self.floor.shape
(x, y) = pos
if x < 0 or x >= w or y < 0 or y >= h:
return None
else:
return self.floor[x, y]
def set_block(self, x, y, entity):
(h, w) = self.floor.shape
if x < 0 or x >= w or y < 0 or y >= h:
return None
else:
self.floor[x, y] = entity
def move(self, entity, dxy):
if dxy == (0, 0):
return
new_pos = np.add(entity.pos(), dxy)
# Process Entity Movement
block = self.get_block(new_pos)
if block == None:
return
entity.move(block)
def perceive(self):
for player in self.players:
self.update_percept(player)
def react(self):
for player in self.players:
P = player.percept.vector
dxy = player.get_move(P)
self.move(player,dxy)
for watcher_PID in player.watchers:
self.training_pool.append([
watcher_PID,
P,
player.move_memory_buffer[0]
])
def learn(self):
# update watchers
for player in self.players:
while player.PID in player.watchers:
player.watchers.remove(player.PID)
tmp = self.training_pool
player.train([item for item in self.training_pool if item[0] == player.PID])
def update_percept(self,player):
# Get spatial percept
coords = player.get_visible_coords()
block_vals = []
for coord in coords:
block_vals.append(self.get_block(coord).etype().value[0])
player.percept.spatial(np.array(block_vals))
return |
class Foo:
def func(self):
print('我胡汉三又回来了')
f1=Foo()
f1.func() #调用类的方法,也可以说是调用非数据描述符
#函数是一个非数据描述符对象(一切皆对象么)
print(dir(Foo.func))
print(hasattr(Foo.func,'__set__'))
print(hasattr(Foo.func,'__get__'))
print(hasattr(Foo.func,'__delete__'))
#有人可能会问,描述符不都是类么,函数怎么算也应该是一个对象啊,怎么就是描述符了
#笨蛋哥,描述符是类没问题,描述符在应用的时候不都是实例化成一个类属性么
#函数就是一个由非描述符类实例化得到的对象
#没错,字符串也一样
f1.func='这是实例属性啊'
print(f1.func)
del f1.func #删掉了非数据
f1.func() |
# Generated by Django 2.0.3 on 2018-10-21 06:31
import datetime
from django.db import migrations, models
import django.utils.timezone
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('upload', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='file',
name='date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='filedownload',
name='date',
field=models.DateField(default=datetime.datetime(2018, 10, 21, 6, 31, 15, 103366, tzinfo=utc)),
preserve_default=False,
),
]
|
import numpy as np
import numpy.fft as fft
# Constant parameters
Re = 10
nu = 1/Re
NX,NY = 128,128
LX,LY = 2*np.pi,2*np.pi
deltaX = LX/(NX-1)
deltaY = LY/(NY-1)
deltaT = 0.001
iter_step = 3000
def pre():
kx1 = np.linspace(-NX / 2, NX / 2, NX, endpoint=False).reshape(NX, 1)
kx2 = np.linspace(-NX / 2, NX / 2, NX, endpoint=False).reshape(NX, 1) * 1j
ky1 = kx1.T
ky2 = kx2.T
return kx1,kx2,ky1,ky2
def initial():
u, v = np.zeros([128, 128]), np.zeros([128, 128])
for i in range(NX):
for j in range(NY):
for w in range(9):
for l in range(9):
u[i, j] += np.random.randn() * np.cos((w - 4) * i * deltaX + (l - 4) * i * deltaX) + \
np.random.randn() * np.sin((w - 4) * i * deltaX + (l - 4) * i * deltaX)
v[i, j] += np.random.randn() * np.cos((w - 4) * i * deltaX + (l - 4) * i * deltaX) + \
np.random.randn() * np.sin((w - 4) * i * deltaX + (l - 4) * i * deltaX)
u = 2 * np.real(u) / np.max(np.real(u)) + (np.random.random([128, 128]) - 0.5) * 4
v = 2 * np.real(v) / np.max(np.real(v)) + (np.random.random([128, 128]) - 0.5) * 4
spectral_u = fft.fftshift(fft.fft2(u)) / (NX * NY)
spectral_v = fft.fftshift(fft.fft2(v)) / (NX * NY)
spectral_u_extention, spectral_v_extention = np.zeros((int(NX * 2), int(NY * 2))) + 0j, np.zeros(
(int(NX * 2), int(NY * 2))) + 0j
_spectral_u_extention, _spectral_v_extention = np.zeros((int(NX * 2), int(NY * 2))) + 0j, np.zeros(
(int(NX * 2), int(NY * 2))) + 0j
for epoch in range(1000):
spectral_u_extention[int(NX / 2):int(NX * 3 / 2), int(NY / 2):int(NY * 3 / 2)] = spectral_u
spectral_v_extention[int(NX / 2):int(NX * 3 / 2), int(NY / 2):int(NY * 3 / 2)] = spectral_v
u_extention = fft.ifft2(fft.ifftshift(spectral_u_extention)) * (NX * NY)
v_extention = fft.ifft2(fft.ifftshift(spectral_v_extention)) * (NX * NY)
UV = u_extention * v_extention
UU = u_extention * u_extention
VV = v_extention * v_extention
spectral_uv = fft.fftshift(fft.fft2(UV))[int(NX / 2):int(NX * 3 / 2), int(NY / 2):int(NY * 3 / 2)] / (NX * NY)
spectral_uu = fft.fftshift(fft.fft2(UU))[int(NX / 2):int(NX * 3 / 2), int(NY / 2):int(NY * 3 / 2)] / (NX * NY)
spectral_vv = fft.fftshift(fft.fft2(VV))[int(NX / 2):int(NX * 3 / 2), int(NY / 2):int(NY * 3 / 2)] / (NX * NY)
RHS_x = -kx2 * spectral_uu - ky2 * spectral_uv - 0.05 * (kx1 * kx1 + ky1 * ky1) * spectral_u
RHS_y = -kx2 * spectral_uv - ky2 * spectral_vv - 0.05 * (kx1 * kx1 + ky1 * ky1) * spectral_v
spectral_u = spectral_u + RHS_x * 0.001
spectral_v = spectral_v + RHS_y * 0.001
u = fft.ifft2(fft.ifftshift(spectral_u)) * (NX * NY)
v = fft.ifft2(fft.ifftshift(spectral_v)) * (NX * NY)
u = np.sqrt(1 / np.mean(u * u)) * u
v = np.sqrt(1 / np.mean(v * v)) * v
spectral_u = fft.fftshift(fft.fft2(u)) / (NX * NY)
spectral_v = fft.fftshift(fft.fft2(v)) / (NX * NY)
return u, spectral_u
for dataset_number in range(0,1001):
kx1,kx2,ky1,ky2 = pre()
u, spectral_u = initial()
# Allocate some variables
# {
sum_u = np.zeros([iter_step+1,128,128])
sum_u[0] = np.real(u)
# }
# Iterations for Fourier spectral methods
#{
for epoch in range(iter_step):
RHS_x = -nu*(kx1*kx1+ky1*ky1)*spectral_u
spectral_u = spectral_u + RHS_x*deltaT
u = fft.ifft2(fft.ifftshift(spectral_u))*(NX*NY)
u = np.real(u)
sum_u[epoch+1] = u
# print(epoch)
# }
# Save dataset
sum_u.tofile('dataset/2d_headting_sdt_'+str(dataset_number)+'.dat')
print(dataset_number)
|
default_global_configuration = {
'dashboard': {
'show_not_required_requisitions': True,
'show_not_required_scheduled_entries': True,
'allow_additional_requisitions': False},
'appointment': {
'allowed_iso_weekdays': '1234567',
'use_same_weekday': True,
'default_appt_type': 'default',
'appointments_per_day_max': 30,
'appointments_days_forward': 8},
}
|
import mock
import unittest
import datetime
from common import helpers
@mock.patch('common.config.DEFAULT_FILTER', {"validity": {"$ne": False}})
class Test(unittest.TestCase):
# No argument passed
def test_build_mongo_query_no_args(self):
response = helpers.build_mongo_query({})
self.assertEqual({'validity': {'$ne': False}}, response)
# Date field passed - dob
def test_build_mongo_query_datetype1_args(self):
response = helpers.build_mongo_query({'dob': datetime.date(1900, 1, 1)})
self.assertEqual({'dob': datetime.datetime(1900, 1, 1, 0, 0), 'validity': {'$ne': False}}, response)
# String field passed - name
def test_build_mongo_query_name_args(self):
response = helpers.build_mongo_query({'name': 'Luke Skywalker'})
self.assertEqual({'name': 'Luke Skywalker', 'validity': {'$ne': False}}, response)
|
#!/usr/bin/env python
'''
Author: Marco Boretto
Mail: marco.boretto@cern.ch
Python Version: 2.7
'''
import logging
import logging.config
from controllers import Gfal2Controller
import logging
import gfal2
if __name__ == '__main__':
# logging.config.fileConfig('na62cdr-logging.ini')
LOGGER = logging.getLogger(__name__)
#exist
URLS = ['/castor/cern.ch/na62/data/2017/raw/run/008042/na62raw_1503965806-02-008042-1501.dat']
#not exist
URLS = ['/castor/cern.ch/na62/data/2017/raw/run/008042/na62raw_1503946483-01-008042-0795.dat']
URLS = [
'/castor/cern.ch/na62/data/2017/raw/run/TestTransfer-2015-05-15-15-15',
'/castor/cern.ch/na62/data/2017/raw/run/008042/na62raw_1503946483-01-008042-0795.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505894648-02-008128-3028.dat',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/chod_run08128_burst03030',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/irc_run08128_burst03030',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/muv3_run08128_burst03030',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/lkr_run08128_burst03030',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505894728-01-008128-3033.dat',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/irc_run08128_burst03040',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895239-03-008128-3065.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895255-01-008128-3066.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895270-02-008128-3067.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895286-03-008128-3068.dat',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/muv3_run08128_burst03070',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/irc_run08128_burst03070',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/rich_run08128_burst03070',
'/castor/cern.ch/na62/data/2017/raw/run/primitives/lkr_run08128_burst03070',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895302-01-008128-3069.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895318-02-008128-3070.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505866527-02-008128-1627.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895350-01-008128-3072.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895366-02-008128-3073.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505866575-02-008128-1630.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895398-01-008128-3075.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505895414-02-008128-3076.dat',
'/castor/cern.ch/na62/data/2017/raw/run/008128/na62raw_1505894616-03-008128-3026.dat',
'/castor/cern.ch/na62/data/2016/raw/run/2016-09-25/na62raw_1474829017-03-006356-0827.dat',
'/castor/cern.ch/na62/data/2016/raw/run/2016-09-26/na62raw_1474841557-02-006356-1468.dat'
]
castor = Gfal2Controller()
for url in URLS:
if castor.file_exist(url):
LOGGER.info("File %s Exists!", url)
else:
LOGGER.info("File %s Not Exists!", url)
print castor.get_size(url)
#info = castor.stat(url)
#print info.st_size
continue
if castor.is_on_tape(url):
LOGGER.info("File %s On tape!", url)
else:
LOGGER.info("File %s not On tape!", url)
exit(1)
path = 'srm://srm-public.cern.ch:8443/srm/managerv2?SFN=' + '/castor/cern.ch/na62/data/2017/raw/run/007987/na62raw_1502717519-01-007987-0066.dat'
context = gfal2.creat_context()
try:
print context.listxattr(path)
print context.stat(path)
a = context.stat(path)
print a
print a.st_gid
print a.st_size
#['user.replicas', 'user.status', 'srm.type', 'spacetoken']
#print context.getxattr(path, 'user.status')
#print context.getxattr(path, 'user.replicas')
#print context.getxattr(path, 'srm.type')
#print context.getxattr(path, 'spacetoken')
#self.logger.debug('Castor file status: '+ castor_status + ' ' + path)
except gfal2.GError, error:
#logger.debug('Must be an existing file ' + path + ' ' + str(error.code) + ' ' + error.message)
print ('Must be an existing file ' + path + ' ' + str(error.code) + ' ' + error.message)
# def file_exist(self, path):
# """
# Return true if a file exist
# """
# try:
# self.context.lstat(self.get_complete_url(path))
# self.logger.debug('File exist: ' + path)
# return True
# except gfal2.GError, error:
# self.logger.debug('File NOT exist: ' + path + ' ' + str(error.code) + ' ' + error.message)
# return False
#
# def is_on_tape(self, path):
# """
# Return true if a file is on tape
# """
# if not self.file_exist(path):
# return False
#
#
# try:
# castor_status = self.context.getxattr(self.get_complete_url(path), 'user.status')
# self.logger.debug('Castor file status: '+ castor_status + ' ' + path)
# except gfal2.GError, error:
# self.logger.debug('Must be an existing file ' + path + ' ' + str(error.code) + ' ' + error.message)
#
# if castor_status.strip() in ["NEARLINE", "ONLINE_AND_NEARLINE"]:
# self.logger.debug('File on tape: ' + path + ' ')
# return True
# else:
# self.logger.debug('File NOT on tape: ' + path)
# return False
|
"""
Extracts frames from youtube live streams
Todo:
- Add tests
Ref:
https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
"""
import cv2
import pafy
import youtube_dl
import time
import os
from urllib.error import HTTPError
import configparser
import logging
from libs import log
logger = logging.getLogger("Frame-Extractor")
config = configparser.ConfigParser()
config.read("./config.ini")
NUM_FRAMES_TO_EXTRACT = config.getint("frame-extract", "NUM_FRAMES_TO_EXTRACT")
WAIT_PER_FRAME = config.getint("frame-extract", "WAIT_PER_FRAME")
CAPTURE_RESOLUTION_HEIGHT= config.getint("frame-extract", "CAPTURE_RESOLUTION_HEIGHT")
CAPTURE_RESOLUTION_WIDTH = config.getint("frame-extract", "CAPTURE_RESOLUTION_WIDTH")
FRAME_SAVE_DIR = config.get("frame-extract", "FRAME_SAVE_DIR")
def load_video_stram(youtube_stream_url: str):
"""
Loads youtube live stream and returns vPafy object
Args:
youtube_stream_url(str)
Returns:
vPafy: vpafy object
Raises:
ValueError: If youtube stream is not live, then throws an error
HTTPError: Unable to fulfill HTTP request, errcode is outputted
ToDo:
What if non-youtube URL?
"""
try:
logger.debug(f"Loading Youtube Stream:", youtube_stream_url)
vPafy = pafy.new(youtube_stream_url)
logger.debug("Checeking if Stream is live")
isLive = False
if vPafy.duration == '00:00:00': isLive = True
print (f"Stream Title: {vPafy.title}")
logger.debug(f"Is Steam Live: {isLive}")
if not isLive: raise ValueError
return vPafy
except HTTPError as err:
print("HTTP Error", err.errno)
except ValueError:
print("The Youtube video stream is not live!")
def main():
logger.info("Starting Youtube Live Frame Extractor.")
vPafy = load_video_stram(
config.get("frame-extract", "LIVESTREAM_VIDEO_URL")
)
# Create frame output directory
if not os.path.exists(FRAME_SAVE_DIR):
os.makedirs(FRAME_SAVE_DIR)
play = vPafy.getbest(preftype="mp4")
cap = cv2.VideoCapture(play.url)
# Assign video capture resosltuion
cap.set(3, CAPTURE_RESOLUTION_WIDTH)
cap.set(4, CAPTURE_RESOLUTION_HEIGHT)
for i in range(NUM_FRAMES_TO_EXTRACT):
logger.debug(f"Capturing frame {i}/{NUM_FRAMES_TO_EXTRACT}")
ret, frame = cap.read()
frame_name = (f"frame_{i}.jpg")
savepath = FRAME_SAVE_DIR + frame_name
logger.info(f"Saving {frame_name} ---> {i}/{NUM_FRAMES_TO_EXTRACT}")
cv2.imwrite(savepath, frame)
logger.debug(f"Waiting for for {WAIT_PER_FRAME}s to load next frame")
time.sleep(WAIT_PER_FRAME)
cap.release()
logger.info(f"Extracted {NUM_FRAMES_TO_EXTRACT} frames successfully")
if __name__=="__main__":
logger.setLevel(logging.INFO)
log.add_stream_handler(logger)
try:
main()
except Exception as error:
logger.exception("Unhandled exception:")
raise error
|
import itertools
class Conjunto:
def __init__(self):
self.conjunto = []
def adicionar(self, item):
if item not in self.conjunto:
self.conjunto.append(item)
def remover(self, item):
self.conjunto.remove(item)
def pertinencia(self, item):
return item in self.conjunto
def contido(self, conjunto):
for i in conjunto:
if i not in self.conjunto:
return False
return True
def uniao(self, conjunto):
conjuntoResultado = conjunto
for i in itertools.chain(self.conjunto, conjunto):
conjuntoResultado.append(i)
return conjuntoResultado
def interseccao(self, conjunto):
conjuntoResultado = []
for i in self.conjunto:
if i in conjunto:
conjuntoResultado.append(i)
return conjuntoResultado
def diferenca(self, conjunto):
conjuntoResultado = []
for i in self.conjunto:
if i not in conjunto:
conjuntoResultado.append(i)
return conjuntoResultado
def complemento(self, conjunto):
conjuntoResultado = []
for i in self.conjunto:
if i not in conjunto:
conjuntoResultado.append(i)
return conjuntoResultado
def cartesiano(self, conjunto):
conjuntoResultado = []
for elemento in itertools.product(self.conjunto, conjunto):
conjuntoResultado.append(elemento)
return conjuntoResultado
def disjuntos(self, conjunto):
conjuntoResultado = []
for i in self.conjunto:
if i not in conjunto:
conjuntoResultado.append(i)
else:
conjuntoResultado.append((i,'A'))
for i in conjunto:
if i not in self.conjunto:
conjuntoResultado.append(i)
else:
conjuntoResultado.append((i,'B'))
return conjuntoResultado
def conuntoPartes(self):
conjuntoResult = []
tamanhoConjunto = len(self.conjunto)
for x in range(tamanhoConjunto):
for i in itertools.combinations(self.conjunto, x):
conjuntoResult.append(i)
return conjuntoResult
a = Conjunto()
a.adicionar('a')
a.adicionar('b')
a.adicionar('c')
a.adicionar('d')
print(a.conuntoPartes())
|
import time, requests, json, urllib, os
from flask import Flask
app = Flask(__name__)
def get_data():
data = {
"tillfalle":"Urval1",
"vy":"Antagningspoang",
"antagningsomgang":"HT2020",
"larosateId":"",
"utbildningstyp":"p",
"fritextFilter":"",
"urvalsGrupp":"",
"firstResult":0,
"maxResults":25000,
"sorteringsKolumn":1,
"sorteringsOrdningDesc":False,
"requestNumber":1,
"paginate":True
}
data = urllib.parse.quote(json.dumps(data))
print(data)
encoded = "https://cors-anywhere.herokuapp.com/statistik.uhr.se/rest/stats/tableData?request=" + data
print(encoded)
return requests.get(
encoded,
headers={
"x-requested-with": "Python 3.8"
}
)
jdata = None
try:
data = open("cache.json")
text = data.read()
jdata = json.loads(text)
data.close()
except:
data = get_data()
cache = open("cache.json","w+")
cache.write(data.text)
cache.close()
jdata = json.loads(data.text)
@app.route('/<university>/<merit>')
def search(university, merit):
#
# Process data and return an array
#
return university + str(merit)
if __name__ == '__main__':
app.run() |
a = int(input())
reverse = 0
while(a > 0):
reminder = a %10
reverse = (reverse *10) + reminder
a = a//10
print("reverse of entered number is = %d" %reverse)
|
############################################################################################
## A multifuntional python script to carryout secondary structure analyses using MDTraj ##
## Author: Sid Narasimhan ##
############################################################################################
"""
GENERAL INFO:
make sure you copy this script in the folder containing your md run files
Usage: python ss_mdtraj.py <xtc file> <tpr file> <function_name>
"""
import os
import sys
import pickle as p
import matplotlib as mpl
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import mdtraj as md
from math import pi
traj = md.load_xtc(sys.argv[1],top=sys.argv[2])
n_frames = traj.n_frames
""" Dictionary of secondary structures """
""" function_name: dssp """
""" Output: DSSP matrix [residueXframes] """
if sys.argv[3] == 'dssp':
dssp_array = md.compute_dssp(traj,simplified=True)
ss = range(2*n_frames)
file = open('dssp.txt','w')
file.close()
file = open('dssp.txt','r+')
for i in range(n_frames):
#file.write('Frame_%s: ' % i) """ UNCOMMENT THIS LINE IF YOU WANT FRAME NUMBERS IN YOUR OUTPUT """
file.write(dssp_array[i])
file.write('\n')
file.close()
""" Ramachandran plot """
""" function_name: rplot """
""" WARNING: THIS WILL GENERATE <n_frames> PNGS """
if sys.argv[3] == 'rplot':
phi = md.compute_phi(traj)
psi = md.compute_psi(traj)
print phi[1][traj.n_frames-1]
for i in range(20):
plt.scatter(phi[1][i], psi[1][i], marker = 'x')
plt.xlabel(r'$\Phi$ Angle [radians]')
plt.ylabel(r'$\Psi$ Angle [radians]')
plt.xlim(-pi,pi)
plt.ylim(-pi,pi)
plt.savefig('%s_frame' % i, dpi=320)
plt.close()
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 19 11:04:34 2019
@author: Blake
"""
import sqlite3
import os, sys
prev_violations = []
# Checks if database file exists and if it does connects to database, else quits the script
def check_db_file():
db_file = "./violations.db"
# If file exists, run query
if os.path.isfile(db_file):
# Connecting to the database
connection = sqlite3.connect("violations.db")
cursor = connection.cursor()
append_violations(cursor,connection)
return cursor, connection
# If file doesn't exist print error message
else:
print("Error: %s file not found" % db_file)
sys.exit
def append_violations(cursor,connection):
print("\nPrinting violations that are greater than one in descending order, please wait...\n")
# Print query where violations are greater than one, sorted in descending order by the violations points count
cursor.execute("SELECT DISTINCT COUNT(violations.points), inspections.facility_name, facility_address, facility_zip, facility_city FROM inspections INNER JOIN violations on violations.serial_number = inspections.serial_number WHERE violations.points >= 1 GROUP BY facility_name ORDER BY COUNT(violations.points) DESC")
result = cursor.fetchall()
# Prints the first 2 values from the query, then stores the rest of the query after the second index to the prev_violations array
for r in result:
print(r[:2])
prev_violations.append(r[1:])
print("\nImporting facility violations data into table previous_violations, please wait....\n")
check_table_exists(cursor,connection)
# Function that checks if the table exists and if it does it will drop the table then recreate it.
def check_table_exists(cursor, connection):
try:
# Creates new table previous violations with the facility name, address, zip and city cells
cursor.execute("CREATE TABLE previous_violations (facility_name VARCHAR(500), facility_address VARCHAR(255), facility_zip VARCHAR(10), facility_city VARCHAR(255))")
# Inserts the prev_violations array into the previous_violations table
for v in prev_violations:
cursor.execute('INSERT INTO previous_violations VALUES (?,?,?,?)', v)
print("Created table previous_violations and data import successful.")
# Commiting the database changes and closing the connection
connection.commit()
connection.close()
except:
print("The previos_violations table already exists, deleting table and creating again.\n")
# Drops table
cursor.execute("DROP TABLE previous_violations")
# Creates new table previous violations with the facility name, address, zip and city cells
cursor.execute("CREATE TABLE previous_violations (facility_name VARCHAR(500), facility_address VARCHAR(255), facility_zip VARCHAR(10), facility_city VARCHAR(255))")
# Inserts the prev_violations array into the previous_violations table
for v in prev_violations:
cursor.execute('INSERT INTO previous_violations VALUES (?,?,?,?)', v)
print("Created table previous_violations and data import successful.")
# Commiting the database changes and closing the connection
connection.commit()
connection.close()
check_db_file() |
def multiply(x,y):
result = 0
for i in range(y):
result += x
print(result)
multiply(4,6)
|
"""
Making an index page with search Engine for the Leetcode root folder
@author: pkugoodspeed
@date: 06/12/2018
@copyright: jogchat.com
"""
import os
from jinja2 import Template
from .utils import ColorMessage, getHtmlElement
def _getIndexStyle(font="Chalkduster", theme="silver", boxcolor="gray", hovercolor="orange"):
return """body {{
background-color: {BODY};
font-family: {FONT};
}}
#Input {{
background-position: 10px 12px;
background-repeat: no-repeat;
width: 100%;
font-size: 16px;
padding: 12px 20px 12px 40px;
border: 1px solid #ddd;
margin-bottom: 12px;
}}
#folders {{
list-style-type: none;
padding: 0;
margin: 0;
}}
#folders li a {{
border: 1px solid #ddd;
margin-top: -1px; /* Prevent double borders */
background-color: {BOX};
padding: 8px;
text-decoration: none;
font-size: 17px;
color: black;
display: block
}}
#folders li a:hover:not(.header) {{
background-color: {HOVER};
}}
""".format(FONT=font, BODY=theme, BOX=boxcolor, HOVER=hovercolor)
def _getSearchScripts():
"""
Here we only consider folders
"""
return """
function _check(filename, keywords) {
var i;
var l = keywords.length;
for(i = 0; i < l; i++){
if(filename.indexOf(keywords[i]) == -1){
return false;
}
}
return true;
}
function _filter() {
var input, keywords, ul, li, len_li, filename, i;
input = document.getElementById("Input");
keywords = input.value.toUpperCase().split(" ");
ul = document.getElementById("folders");
li = ul.getElementsByTagName("li");
len_li = li.length;
for (i = 0; i < len_li; i++) {
filename = li[i].getElementsByTagName("a")[0].innerHTML.toUpperCase();
if (_check(filename, keywords)) {
li[i].style.display = "";
} else {
li[i].style.display = "none";
}
}
}
"""
def _getIndexBody(path, addr):
if not os.path.exists(path):
ColorMessage(path + " does not exist!", "red")
folders = [f for f in os.listdir(path) if os.path.isdir(path + "/" + f) and f[0] == "["]
folders.sort(key=lambda x: int(x[1:].split("]")[0]))
hl_folders = [getHtmlElement(
tag='a', selfclose=False, msg=f, href="\"{ADDR}/{F}\"".format(ADDR=addr, F=f), target="\"_self\"") for f in folders]
li_folders = [getHtmlElement(tag='li', msg=f) for f in hl_folders]
body = getHtmlElement(tag="h1", msg="Problems") + "\n"
body += getHtmlElement(
tag="input", msg="", selfclose=True, type="\"text\"", id="\"Input\"",
onkeyup="\"_filter()\"", placeholder="\"Search for keywords...\"", title="\"SE\"") + "\n"
body += getHtmlElement(tag='ul', msg="\n".join(li_folders), selfclose=False, id="\"folders\"")
return body
def makeSearchIndex(path, addr, template_file="./templates/base.html"):
""" Creating a index.html page for problem directory """
ColorMessage("Generating Index Page for " + addr + "...", "cyan")
f = open(template_file, 'r')
template = Template(f.read())
f.close()
index_file = path + "/index.html"
if os.path.exists(index_file):
os.remove(index_file)
ColorMessage("\tGetting styles...", "cyan")
styles=_getIndexStyle()
ColorMessage("\tGetting page contents...", "cyan")
page_body=_getIndexBody(path, addr)
ColorMessage("\tGetting search scripts...", "cyan")
page_scripts = _getSearchScripts()
try:
with open(path + "/index.html", "w") as idx:
idx.write(template.render(
styles=styles,
page_body=page_body,
page_scripts=page_scripts))
ColorMessage("Creating index page succeed!", "cyan")
except:
ColorMessage("Creating index page failed!", "red")
try:
os.chmod(path + "/index.html", 0o777)
ColorMessage("Changing permission level succeed!", "cyan")
except:
ColorMessage("Changing permission level failed!", "red")
|
BASE_HOST = "http://0.0.0.0"
PORT = 5000
BASE_PATH = "/api/"
def get_formatted_URL():
return "{}:{}{}".format(BASE_HOST, PORT, BASE_PATH)
|
"""
@author: Tingxuan Gu
"""
import logging
import os
from datetime import datetime, date
from typing import Optional
from urllib import error
import rootpath
import wget
rootpath.append()
from backend.connection import Connection
from paths import SOIL_MOIS_DATA_DIR
from backend.data_preparation.crawler.crawlerbase import CrawlerBase
logger = logging.getLogger('TaskManager')
class SoilMoisCrawler(CrawlerBase):
"""
This class is responsible for collecting data from NASAGrace
"""
TIME_FORMAT = "%Y%m%d"
def __init__(self):
super().__init__()
self.baseDir = 'https://nasagrace.unl.edu/GRACE'
self.select_exists = 'select datetime from env_soil_moisture group by datetime having count(*) = 872505'
def crawl(self, date_stamp: date) -> Optional[str]:
"""
:param date_stamp: the date stamp of the file which is being crawled
:return: crawled file's path if file exists on NASAGrace, else None
"""
formatted_date_stamp = date_stamp.strftime('%Y%m%d')
file_url = f'{self.baseDir}/{formatted_date_stamp}/sfsm_perc_0125deg_US_{formatted_date_stamp}.tif'
if not os.path.isdir(SOIL_MOIS_DATA_DIR):
os.makedirs(SOIL_MOIS_DATA_DIR)
try:
logger.info(f'trying to download file: {file_url}')
wget.download(file_url, os.path.join(SOIL_MOIS_DATA_DIR, formatted_date_stamp + '.tif'))
except error.HTTPError:
logger.info(f'file: {file_url} not found, skipped')
else:
logger.info(f'file: {file_url} downloaded')
return os.path.join(SOIL_MOIS_DATA_DIR, formatted_date_stamp + '.tif')
def get_exists(self) -> set:
"""gets how far we went last time"""
return set(Connection.sql_execute(self.select_exists))
if __name__ == '__main__':
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
target_date = "20131230"
crawler = SoilMoisCrawler()
crawler.crawl(datetime.strptime(target_date, SoilMoisCrawler.TIME_FORMAT))
|
import asyncio
import json
import pymongo
import redis
import opentracing
import logging
import time
from nats.aio.client import Client as NATS
from nats.aio.errors import ErrConnectionClosed, ErrTimeout, ErrNoServers
from jaeger_client import Config
async def run(loop):
#=============== configuracion con servidor de nats ===============
await nc.connect(servers=["http://35.223.171.148:4222"])
future = asyncio.Future()
#=============== configuracion con servidor de jaeguer ============
config = Config(
config={ # usually read from some yaml config
'sampler': {
'type': 'const',
'param': 1,
},
'local_agent': {
'reporting_host': "34.72.72.45",
'reporting_port': 5775,
},
'logging': True,
},
service_name='my-app',
)
tracer = config.initialize_tracer()
with opentracing.tracer.start_span('Datos_BDS') as span:
# ======== enviando datos a mongo ================
async def message_handler(msg):
data = json.loads(msg.data.decode())
#aca se empizan a enviar los datos a las bases de datos
con=pymongo.MongoClient('34.70.196.45',27017)
with opentracing.tracer.start_span('Datos_Mongo',child_of=span) as span_mongo:
try:
db=con.proyecto2
db.casos.insert({"name":data["name"],"depto":data["depto"],"age":data["age"],"form":data["form"],"state":data["state"]})
print("datos enviados a mongo")
span_mongo.log_event('send data mongo', payload=data)
except Exception as e:
span_mongo.set_tag('send data mongo', 'Failure')
print(e)
print("problemas con la conexion de mongo")
finally:
con.close()
#print(data)
with opentracing.tracer.start_span('Datos_Redis',child_of=span) as span_redis:
# ======== enviando datos a redis ================
try:
r = redis.Redis(host='34.70.196.45',port=6379)
r.rpush('proyecto2','{"name" : "'+data["name"]+'", "depto" : "'+data["depto"]+'", "age" :'+str(data["age"])+', "form" : "'+data["form"]+'", "state" : "'+data["state"]+'"}')
print("datos enviados a redis")
span_redis.log_event('send data redis', payload=data)
except Exception as e:
span_redis.set_tag('send data redis', 'Failure')
print(e)
print("hay problemas en la conexion con redis")
print(data)
await nc.subscribe("updates", cb=message_handler)
if __name__ == '__main__':
nc = NATS()
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
try:
loop.run_forever()
finally:
loop.close()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2016-TODAY touch:n:track <https://tnt.pythonanywhere.com>
# Part of tnt: Flespi Monitoring addon for Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
class TntFlespiDeviceLog(models.Model):
_inherit = 'tnt.flespi.device.log'
device_name = fields.Char(compute='_compute_device_name')
@api.depends('device_id')
def _compute_device_name(self):
for record in self:
record.device_name = record.device_id.display_name
|
#Python program to get an input string from the user and replace all the empty spaces with _ underscore symbol
input_string=input("enter the input string :")
print("The input string is ",input_string)
for x in input_string:
if (x==" "):
replaced_string=input_string.replace(" ","_")
print("The input string after replacing the empty spaces is ",replaced_string)
|
def funcao(nome,fabricante,**carro):
carros={}
carros["nome"]=nome
carros["fabricante"]=fabricante
for key, value in carro.items():
carros[key]=value
return carros
a=funcao("camaro","Chevrolet",ano=2015,porência=461)
print(a)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2016 Paul Brodersen <paulbrodersen+entropy_estimators@gmail.com>
# Author: Paul Brodersen <paulbrodersen+entropy_estimators@gmail.com>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
TODO:
- make python3 compatible
- fix code for p-norm 1 and 2 (norm argument currently ignored)
- write test for get_pid()
- get_pmi() with normalisation fails test
"""
import numpy as np
import itertools
from scipy.spatial import cKDTree
from scipy.special import gamma, digamma
from scipy.stats import multivariate_normal, rankdata
log = np.log # i.e. information measures are in nats
# log = np.log2 # i.e. information measures are in bits
def unit_interval(arr):
return (arr - np.nanmin(arr, axis=0)[None,:]) / (np.nanmax(arr, axis=0) - np.nanmin(arr, axis=0))
def rank(arr):
return np.apply_along_axis(rankdata, 0, arr)
def det(array_or_scalar):
if array_or_scalar.size > 1:
return np.linalg.det(array_or_scalar)
else:
return array_or_scalar
def get_h_mvn(x):
"""
Computes the entropy of a multivariate Gaussian distribution:
H(X) = (1/2) * log((2 * pi * e)^d * det(cov(X)))
Arguments:
----------
x: (n, d) ndarray
n samples from a d-dimensional multivariate normal distribution
Returns:
--------
h: float
entropy H(X)
"""
d = x.shape[1]
h = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(x.T)))
return h
def get_mi_mvn(x, y):
"""
Computes the mutual information I between two multivariate normal random
variables, X and Y:
I(X, Y) = H(X) + H(Y) - H(X, Y)
Arguments:
----------
x, y: (n, d) ndarrays
n samples from d-dimensional multivariate normal distributions
Returns:
--------
mi: float
mutual information I(X, Y)
"""
d = x.shape[1]
# hx = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(x.T)))
# hy = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(y.T)))
# hxy = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=y.T)))
# mi = hx + hy - hxy
# hx = 0.5 * log(det(2*np.pi*np.e*np.cov(x.T)))
# hy = 0.5 * log(det(2*np.pi*np.e*np.cov(y.T)))
# hxy = 0.5 * log(det(2*np.pi*np.e*np.cov(np.c_[x,y].T)))
hx = get_h_mvn(x)
hy = get_h_mvn(y)
hxy = get_h_mvn(np.c_[x,y])
mi = hx + hy - hxy
# mi = 0.5 * (log(det(np.cov(x.T))) + log(det(np.cov(y.T))) - log(det(np.cov(np.c_[x,y].T))))
return mi
def get_pmi_mvn(x, y, z):
"""
Computes the partial mutual information PMI between two multivariate normal random
variables, X and Y, while conditioning on a third MVN RV, Z:
I(X;Y|Z) = H(X,Z) + H(Y,Z) - H(X, Y, Z) - H(Z)
where:
H(Z) = (1/2) * log(det(2 * pi * e * cov(Z)))
H(X,Z) = (1/2) * log(det(2 * pi * e * cov(XZ)))
H(Y,Z) = (1/2) * log(det(2 * pi * e * cov(YZ)))
H(X,Y,Z) = (1/2) * log(det(2 * pi * e * cov(XYZ)))
Arguments:
----------
x, y, z: (n, d) ndarrays
n samples from d-dimensional multivariate normal distributions
Returns:
--------
pmi: float
partial mutual information I(X;Y|Z)
"""
d = x.shape[1]
hz = 0.5 * log((2 * np.pi * np.e)**d * det(np.cov(z.T)))
hxz = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(x.T, y=z.T)))
hyz = 0.5 * log((2 * np.pi * np.e)**(2*d) * det(np.cov(y.T, y=z.T)))
hxyz = 0.5 * log((2 * np.pi * np.e)**(3*d) * det(np.cov(np.c_[x,y,z].T)))
pmi = hxz + hyz - hxyz - hz
return pmi
def get_h(x, k=1, norm=np.inf, min_dist=0.):
"""
Estimates the entropy H of a random variable x (in nats) based on
the kth-nearest neighbour distances between point samples.
@reference:
Kozachenko, L., & Leonenko, N. (1987). Sample estimate of the entropy of a random vector. Problemy Peredachi Informatsii, 23(2), 9–16.
Arguments:
----------
x: (n, d) ndarray
n samples from a d-dimensional multivariate distribution
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
norm: 1, 2, or np.inf (default np.inf)
p-norm used when computing k-nearest neighbour distances
1: absolute-value norm
2: euclidean norm
3: max norm
min_dist: float (default 0.)
minimum distance between data points;
smaller distances will be capped using this value
Returns:
--------
h: float
entropy H(X)
"""
n, d = x.shape
# volume of the d-dimensional unit ball...
# if norm == np.inf: # max norm:
# log_c_d = 0
# elif norm == 2: # euclidean norm
# log_c_d = (d/2.) * log(np.pi) -log(gamma(d/2. +1))
# elif norm == 1:
# raise NotImplementedError
# else:
# raise NotImplementedError("Variable 'norm' either 1, 2 or np.inf")
log_c_d = 0.
kdtree = cKDTree(x)
# query all points -- k+1 as query point also in initial set
# distances, idx = kdtree.query(x, k + 1, eps=0, p=norm)
distances, idx = kdtree.query(x, k + 1, eps=0, p=np.inf)
distances = distances[:, -1]
# enforce non-zero distances
distances[distances < min_dist] = min_dist
sum_log_dist = np.sum(log(2*distances)) # where did the 2 come from? radius -> diameter
h = -digamma(k) + digamma(n) + log_c_d + (d / float(n)) * sum_log_dist
return h
def get_mi(x, y, k=1, normalize=None, norm=np.inf, estimator='ksg'):
"""
Estimates the mutual information (in nats) between two point clouds, x and y,
in a D-dimensional space.
I(X,Y) = H(X) + H(Y) - H(X,Y)
@reference:
Kraskov, Stoegbauer & Grassberger (2004). Estimating mutual information. PHYSICAL REVIEW E 69, 066138
Arguments:
----------
x, y: (n, d) ndarray
n samples from d-dimensional multivariate distributions
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
normalize: function or None (default None)
if a function, the data pre-processed with the function before the computation
norm: 1, 2, or np.inf (default np.inf)
p-norm used when computing k-nearest neighbour distances
1: absolute-value norm
2: euclidean norm
3: max norm
min_dist: float (default 0.)
minimum distance between data points;
smaller distances will be capped using this value
estimator: 'ksg' or 'naive' (default 'ksg')
'ksg' : see Kraskov, Stoegbauer & Grassberger (2004) Estimating mutual information, eq(8).
'naive': entropies are calculated individually using the Kozachenko-Leonenko estimator implemented in get_h()
Returns:
--------
mi: float
mutual information I(X,Y)
"""
if normalize:
x = normalize(x)
y = normalize(y)
# construct state array for the joint process:
xy = np.c_[x,y]
if estimator == 'naive':
# compute individual entropies
hx = get_h(x, k=k, norm=norm)
hy = get_h(y, k=k, norm=norm)
hxy = get_h(xy, k=k, norm=norm)
# compute mi
mi = hx + hy - hxy
elif estimator == 'ksg':
# store data pts in kd-trees for efficient nearest neighbour computations
# TODO: choose a better leaf size
x_tree = cKDTree(x)
y_tree = cKDTree(y)
xy_tree = cKDTree(xy)
# kth nearest neighbour distances for every state
# query with k=k+1 to return the nearest neighbour, not counting the data point itself
# dist, idx = xy_tree.query(xy, k=k+1, p=norm)
dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)
epsilon = dist[:, -1]
# for each point, count the number of neighbours
# whose distance in the x-subspace is strictly < epsilon
# repeat for the y subspace
n = len(x)
nx = np.empty(n, dtype=np.int)
ny = np.empty(n, dtype=np.int)
for ii in range(n):
# nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1
# ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1
nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
mi = digamma(k) - np.mean(digamma(nx+1) + digamma(ny+1)) + digamma(n) # version (1)
# mi = digamma(k) -1./k -np.mean(digamma(nx) + digamma(ny)) + digamma(n) # version (2)
elif estimator == 'lnc':
# TODO: (only if you can find some decent explanation on how to set alpha!)
raise NotImplementedError("Estimator is one of 'naive', 'ksg'; currently: {}".format(estimator))
else:
raise NotImplementedError("Estimator is one of 'naive', 'ksg'; currently: {}".format(estimator))
return mi
def get_pmi(x, y, z, k=1, normalize=None, norm=np.inf, estimator='fp'):
"""
Estimates the partial mutual information (in nats), i.e. the
information between two point clouds, x and y, in a D-dimensional
space while conditioning on a third variable z.
I(X,Y|Z) = H(X,Z) + H(Y,Z) - H(X,Y,Z) - H(Z)
The estimators are based on:
@reference:
Frenzel & Pombe (2007) Partial mutual information for coupling analysis of multivariate time series
Poczos & Schneider (2012) Nonparametric Estimation of Conditional Information and Divergences
Arguments:
----------
x, y, z: (n, d) ndarray
n samples from d-dimensional multivariate distributions
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
normalize: function or None (default None)
if a function, the data pre-processed with the function before the computation
norm: 1, 2, or np.inf (default np.inf)
p-norm used when computing k-nearest neighbour distances
1: absolute-value norm
2: euclidean norm
3: max norm
estimator: 'fp', 'ps' or 'naive' (default 'fp')
'naive': entropies are calculated individually using the Kozachenko-Leonenko estimator implemented in get_h()
'fp' : Frenzel & Pombe estimator (effectively the KSG-estimator for mutual information)
Returns:
--------
pmi: float
partial mutual information I(X,Y;Z)
"""
if normalize:
x = normalize(x)
y = normalize(y)
z = normalize(z)
# construct state array for the joint processes:
xz = np.c_[x,z]
yz = np.c_[y,z]
xyz = np.c_[x,y,z]
if estimator == 'naive':
# compute individual entropies
# TODO: pass in min_dist
hz = get_h(z, k=k, norm=norm)
hxz = get_h(xz, k=k, norm=norm)
hyz = get_h(yz, k=k, norm=norm)
hxyz = get_h(xyz, k=k, norm=norm)
pmi = hxz + hyz - hxyz - hz
elif estimator == 'fp':
# construct k-d trees
z_tree = cKDTree(z)
xz_tree = cKDTree(xz)
yz_tree = cKDTree(yz)
xyz_tree = cKDTree(xyz)
# kth nearest neighbour distances for every state
# query with k=k+1 to return the nearest neighbour, not the data point itself
# dist, idx = xyz_tree.query(xyz, k=k+1, p=norm)
dist, idx = xyz_tree.query(xyz, k=k+1, p=np.inf)
epsilon = dist[:, -1]
# for each point, count the number of neighbours
# whose distance in the relevant subspace is strictly < epsilon
n = len(x)
nxz = np.empty(n, dtype=np.int)
nyz = np.empty(n, dtype=np.int)
nz = np.empty(n, dtype=np.int)
for ii in range(n):
# nz[ii] = len( z_tree.query_ball_point( z_tree.data[ii], r=epsilon[ii], p=norm)) - 1
# nxz[ii] = len(xz_tree.query_ball_point(xz_tree.data[ii], r=epsilon[ii], p=norm)) - 1
# nyz[ii] = len(yz_tree.query_ball_point(yz_tree.data[ii], r=epsilon[ii], p=norm)) - 1
nz[ii] = len( z_tree.query_ball_point( z_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
nxz[ii] = len(xz_tree.query_ball_point(xz_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
nyz[ii] = len(yz_tree.query_ball_point(yz_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
pmi = digamma(k) + np.mean(digamma(nz +1) -digamma(nxz +1) -digamma(nyz +1))
elif estimator == 'ps':
# (I am fairly sure that) this is the correct implementation of the estimator,
# but the estimators is just crap.
# construct k-d trees
xz_tree = cKDTree(xz, leafsize=2*k)
yz_tree = cKDTree(yz, leafsize=2*k)
# determine k-nn distances
n = len(x)
rxz = np.empty(n, dtype=np.int)
ryz = np.empty(n, dtype=np.int)
# rxz, dummy = xz_tree.query(xz, k=k+1, p=norm) # +1 to account for distance to itself
# ryz, dummy = yz_tree.query(xz, k=k+1, p=norm) # +1 to account for distance to itself; xz NOT a typo
rxz, dummy = xz_tree.query(xz, k=k+1, p=np.inf) # +1 to account for distance to itself
ryz, dummy = yz_tree.query(xz, k=k+1, p=np.inf) # +1 to account for distance to itself; xz NOT a typo
pmi = yz.shape[1] * np.mean(log(ryz[:,-1]) - log(rxz[:,-1])) # + log(n) -log(n-1) -1.
else:
raise NotImplementedError("Estimator one of 'naive', 'fp', 'ps'; currently: {}".format(estimator))
return pmi
def get_imin(x1, x2, y, k=1, normalize=None, norm=np.inf):
"""
Estimates the average specific information (in nats) between a random variable Y
and two explanatory variables, X1 and X2.
I_min(Y; X1, X2) = \sum_{y \in Y} p(y) min_{X \in {X1, X2}} I_spec(y; X)
where
I_spec(y; X) = \sum_{x \in X} p(x|y) \log(p(y|x) / p(x))
@reference:
Williams & Beer (2010). Nonnegative Decomposition of Multivariate Information. arXiv:1004.2515v1
Kraskov, Stoegbauer & Grassberger (2004). Estimating mutual information. PHYSICAL REVIEW E 69, 066138
Arguments:
----------
x1, x2, y: (n, d) ndarray
n samples from d-dimensional multivariate distributions
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
normalize: function or None (default None)
if a function, the data pre-processed with the function before the computation
norm: 1, 2, or np.inf (default np.inf)
p-norm used when computing k-nearest neighbour distances
1: absolute-value norm
2: euclidean norm
3: max norm
Returns:
--------
i_min: float
average specific information I_min(Y; X1, X2)
"""
if normalize:
y = normalize(y)
y_tree = cKDTree(y)
n = len(y)
i_spec = np.zeros((2, n))
for jj, x in enumerate([x1, x2]):
if normalize:
x = normalize(x)
# construct state array for the joint processes:
xy = np.c_[x,y]
# store data pts in kd-trees for efficient nearest neighbour computations
# TODO: choose a better leaf size
x_tree = cKDTree(x)
xy_tree = cKDTree(xy)
# kth nearest neighbour distances for every state
# query with k=k+1 to return the nearest neighbour, not counting the data point itself
# dist, idx = xy_tree.query(xy, k=k+1, p=norm)
dist, idx = xy_tree.query(xy, k=k+1, p=np.inf)
epsilon = dist[:, -1]
# for each point, count the number of neighbours
# whose distance in the x-subspace is strictly < epsilon
# repeat for the y subspace
nx = np.empty(n, dtype=np.int)
ny = np.empty(n, dtype=np.int)
for ii in xrange(N):
# nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=norm)) - 1
# ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=norm)) - 1
nx[ii] = len(x_tree.query_ball_point(x_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
ny[ii] = len(y_tree.query_ball_point(y_tree.data[ii], r=epsilon[ii], p=np.inf)) - 1
i_spec[jj] = digamma(k) - digamma(nx+1) + digamma(ny+1) + digamma(n) # version (1)
i_min = np.mean(np.min(i_spec, 0))
return i_min
def get_pid(x1, x2, y, k=1, normalize=None, norm=np.inf):
"""
Estimates the partial information decomposition (in nats) between a random variable Y
and two explanatory variables, X1 and X2.
I(X1, X2; Y) = synergy + unique_{X1} + unique_{X2} + redundancy
redundancy = I_{min}(X1, X2; Y)
unique_{X1} = I(X1; Y) - redundancy
unique_{X2} = I(X2; Y) - redundancy
synergy = I(X1, X2; Y) - I(X1; Y) - I(X2; Y) + redundancy
The estimator is based on:
@reference:
Williams & Beer (2010). Nonnegative Decomposition of Multivariate Information. arXiv:1004.2515v1
Kraskov, Stoegbauer & Grassberger (2004). Estimating mutual information. PHYSICAL REVIEW E 69, 066138
For a critique of I_min as a redundancy measure, see
Bertschinger et al. (2012). Shared Information – New Insights and Problems in Decomposing Information in Complex Systems. arXiv:1210.5902v1
Griffith & Koch (2014). Quantifying synergistic mutual information. arXiv:1205.4265v6
Arguments:
----------
x1, x2, y: (n, d) ndarray
n samples from d-dimensional multivariate distributions
k: int (default 1)
kth nearest neighbour to use in density estimate;
imposes smoothness on the underlying probability distribution
normalize: function or None (default None)
if a function, the data pre-processed with the function before the computation
norm: 1, 2, or np.inf (default np.inf)
p-norm used when computing k-nearest neighbour distances
1: absolute-value norm
2: euclidean norm
3: max norm
Returns:
--------
synergy: float
information about Y encoded by the joint state of x1 and x2
unique_x1: float
information about Y encoded uniquely by x1
unique_x2: float
information about Y encoded uniquely by x2
redundancy: float
information about Y encoded by either x1 or x2
"""
mi_x1y = get_mi(x1, y, k=k, normalize=normalize, norm=norm)
mi_x2y = get_mi(x2, y, k=k, normalize=normalize, norm=norm)
mi_x1x2y = get_mi(np.c_[x1, x2], y, k=k, normalize=normalize, norm=norm)
redundancy = get_imin(x1, x2, y, k=k, normalize=normalize, norm=norm)
unique_x1 = mi_x1y - redundancy
unique_x2 = mi_x2y - redundancy
synergy = mi_x1x2y - mi_x1y - mi_x2y + redundancy
return synergy, unique_x1, unique_x2, redundancy
# --------------------------------------------------------------------------------
def get_mvn_data(total_rvs, dimensionality=2, scale_sigma_offdiagonal_by=1., total_samples=1000):
data_space_size = total_rvs * dimensionality
# initialise distribution
mu = np.random.randn(data_space_size)
sigma = np.random.rand(data_space_size, data_space_size)
# sigma = 1. + 0.5*np.random.randn(data_space_size, data_space_size)
# ensures that sigma is positive semi-definite
sigma = np.dot(sigma.transpose(), sigma)
# scale off-diagonal entries -- might want to change that to block diagonal entries
# diag = np.diag(sigma).copy()
# sigma *= scale_sigma_offdiagonal_by
# sigma[np.diag_indices(len(diag))] = diag
# scale off-block diagonal entries
d = dimensionality
for ii, jj in itertools.product(range(total_rvs), repeat=2):
if ii != jj:
sigma[d*ii:d*(ii+1), d*jj:d*(jj+1)] *= scale_sigma_offdiagonal_by
# get samples
samples = multivariate_normal(mu, sigma).rvs(total_samples)
return [samples[:,ii*d:(ii+1)*d] for ii in range(total_rvs)]
def test_get_h(k=5, norm=np.inf):
X, = get_mvn_data(total_rvs=1,
dimensionality=2,
scale_sigma_offdiagonal_by=1.,
total_samples=1000)
analytic = get_h_mvn(X)
kozachenko = get_h(X, k=k, norm=norm)
print("analytic result: {:.5f}".format(analytic))
print("K-L estimator: {:.5f}".format(kozachenko))
def test_get_mi(k=5, normalize=None, norm=np.inf):
X, Y = get_mvn_data(total_rvs=2,
dimensionality=2,
scale_sigma_offdiagonal_by=1., # 0.1, 0.
total_samples=10000)
# solutions
analytic = get_mi_mvn(X, Y)
naive = get_mi(X, Y, k=k, normalize=normalize, norm=norm, estimator='naive')
ksg = get_mi(X, Y, k=k, normalize=normalize, norm=norm, estimator='ksg')
print("analytic result: {:.5f}".format(analytic))
print("naive estimator: {:.5f}".format(naive))
print("KSG estimator: {:.5f}".format(ksg))
print
print("naive - analytic: {:.5f}".format(naive - analytic))
print("ksg - analytic: {:.5f}".format(ksg - analytic))
print
print("naive / analytic: {:.5f}".format(naive / analytic))
print("ksg / analytic: {:.5f}".format(ksg / analytic))
print
# for automated testing:
assert np.isclose(analytic, naive, rtol=0.1, atol=0.1), "Naive MI estimate strongly differs from expectation!"
assert np.isclose(analytic, ksg, rtol=0.1, atol=0.1), "KSG MI estimate strongly differs from expectation!"
def test_get_pmi(k=5, normalize=None, norm=np.inf):
X, Y, Z = get_mvn_data(total_rvs=3,
dimensionality=2,
scale_sigma_offdiagonal_by=1.,
total_samples=10000)
# solutions
analytic = get_pmi_mvn(X, Y, Z)
naive = get_pmi(X, Y, Z, k=k, normalize=normalize, norm=norm, estimator='naive')
fp = get_pmi(X, Y, Z, k=k, normalize=normalize, norm=norm, estimator='fp')
print("analytic result : {:.5f}".format(analytic))
print("naive estimator : {:.5f}".format(naive))
print("FP estimator : {:.5f}".format(fp))
print
# for automated testing:
assert np.isclose(analytic, naive, rtol=0.5, atol=0.5), "Naive MI estimate strongly differs from expectation!"
assert np.isclose(analytic, fp, rtol=0.5, atol=0.5), "FP MI estimate strongly differs from expectation!"
def test_get_pid(k=5, normalize=None, norm=np.inf):
# rdn -> only redundant information
# unq -> only unique information
# xor -> only synergistic information
pass
|
'''
Approach 1: Backtracking
Algorithm
Backtracking is an algorithm for finding all solutions by exploring all potential candidates.
If the solution candidate turns to be not a solution (or at least not the last one),
backtracking algorithm discards it by making some changes on the previous step,
i.e. backtracks and then try again.
Here is a backtrack function which takes a first integer to add and a current combination as arguments
backtrack(first, curr).
If the current combination is done - add it to output.
Iterate over the integers from first to n.
Add integer i into the current combination curr.
Proceed to add more integers into the combination : backtrack(i + 1, curr).
Backtrack by removing i from curr.
'''
def combine(self, n: int, k: int) -> List[List[int]]:
# Here is a backtrack function which takes a first integer to add and a current combination as arguments backtrack(first, curr).
def backtrack(first, comb):
# If the current combination is done - add it to output.
if len(comb) == k:
output.append(comb[:])
# Iterate over the integers from first to n.
for i in range(first, len(nums)):
# Add integer i into the current combination curr.
# print(nums[i])
comb.append(nums[i])
# Proceed to add more integers into the combination : backtrack(i + 1, curr).
backtrack(i+1, comb)
# Backtrack by removing i from curr.
comb.pop()
output = []
nums = [i+1 for i in range(n)]
backtrack(0, [])
# print(output)
return output
|
produkty = {'S1222': 'sukienka trojkat',
'P1222': 'spodnie krata',
'X212': 'konsola do gier'}
igla = 'X2X'
if igla in produkty:
print("znalazlem{0}".format(igla))
else:
print("Brak w magazynie {0}".format(igla))
|
#!/usr/bin/python3
#The seed() method initializes the basic random number generator. Call this function before calling any other random module function.
# seed ([x], [y])
import random
random.seed()
print ("random number with default seed", random.random())
random.seed(10)
print ("random number with int seed", random.random())
random.seed("hello",2)
print ("random number with string seed", random.random())
|
def solution_seventeen(str):
sub_str = str[-2:]
return sub_str * 4
print(solution_seventeen('Mcnilly'))
print(solution_seventeen('Goshomi'))
|
from yolo3.model import YoloBody, TinyYoloBody, model_saver, channel_move_forward, yolo_head
from yolo3.utils import letterbox_image
import os
import numpy as np
import torch
import colorsys
import time
import cv2
from PIL import Image, ImageFont, ImageDraw
class Yolo():
def __init__(self):
self.model_path = 'logs/veh_sign/Yolo_20180823-1623_11.100000381469727.pth' # model path or trained weights path
self.anchors_path = 'model_data/yolo_anchors.txt'
self.classes_path = 'model_data/Sign_Veh_Navinfo_classes.txt'
self.score = 0.1
self.iou = 0.45
self.model_image_size = (416, 416)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.device = torch.device("cuda")
self.generate()
self.detect_tools = DetectTools(self.anchors, self.class_names, self.model_image_size, self.score)
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.pth'), 'Keras model or weights must be a .pkl file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
self.yolo_model = TinyYoloBody(num_anchors // 3, num_classes).to(self.device) \
if is_tiny_version else YoloBody(num_anchors // 3, num_classes).to(self.device)
model_saver(self.yolo_model, self.model_path, save=False)
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
def detect_image(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
start = time.clock()
'''
image is [1, 416, 416, 3], numpy.array
output is [1, (anchors/3)*(classes+5), 416, 416]
'''
image_data = np.asarray(boxed_image)
image_data = image_data[np.newaxis,:]
inputs = channel_move_forward(image_data)
print('time used',time.clock() - start)
inputs = torch.Tensor(inputs).float().cuda()
out_puts = self.yolo_model.forward(inputs)
result = self.detect_tools.yolo_eval(out_puts, np.array([image.size[0], image.size[1]]))
print('Found {} boxes for {}'.format(len(result), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
# thickness = (image.size[0] + image.size[1]) // 300
thickness = 2
for i, c in list(enumerate(result)):
predicted_class = self.class_names[c[0]]
box = c[1][1:]
score = c[1][0]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
left, top, right, bottom = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
# print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c[0]])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c[0]])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
return image
class DetectTools():
def __init__(self, anchors, class_names, model_image_size, score):
self.anchors = anchors
self.class_names = class_names
self.model_image_size = model_image_size
self.score = score
def yolo_eval(self, out_puts, image_size):
num_layers = len(out_puts)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5], [0, 1, 2]]
Result = []
for i in range(num_layers):
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(out_puts[i], self.anchors[anchor_mask[i]], len(self.class_names), self.model_image_size, 1,)
box = self.yolo_boxes(box_xy[0], box_wh[0], np.array([self.model_image_size[1], self.model_image_size[0]]), image_size)
box_score = box_confidence[0] * box_class_probs[0]
# for x in range(shape[0]):
# for y in range(shape[2]):
# for z in range(shape[3]):
# BOXES.append(list(box[x,:,y,z]))
# SCORES.append(list(box_score[x,:,y,z]))
box_re = np.reshape(box,[3, 4, -1])
score_re = np.reshape(box_score, [3, len(self.class_names), -1])
mask = score_re >= self.score
mask_id = np.where(mask == True)
score_result = score_re[mask_id]
box_result = box_re[mask_id[0],:, mask_id[2]]
for i in range(len(score_result)):
Result.append([mask_id[1][i], score_result[i], box_result[i][0],box_result[i][1],box_result[i][2],box_result[i][3]])
if len(Result) >= 1:
result_to_show = self.non_max_suppression(np.array(Result))
else:
result_to_show = []
return result_to_show
def yolo_boxes(self, box_xy, box_wh, input_shape, image_shape):
'''
:param input_shape: must be numpy.array[col_num, row_num]
:param image_shape: must be numpy.array[col_num, row_num]
:return: box shape [3, 4, w, h] , is x_min, y_min, x_max, y_max
'''
new_shape = image_shape * np.min(input_shape / image_shape)
scale = input_shape / new_shape
offset = (input_shape - new_shape) / 2. /input_shape
box_xy[:, 0, :, :] -= offset[0]
box_xy[:, 1, :, :] -= offset[1]
box_xy[:, 0, :, :] *= scale[0]
box_xy[:, 1, :, :] *= scale[1]
box_wh[:, 0, :, :] *= scale[0]
box_wh[:, 1, :, :] *= scale[1]
box_min = box_xy - (box_wh / 2)
box_max = box_xy + (box_wh / 2)
box = np.concatenate((box_min, box_max), 1)
box_shape = np.shape(box)
matrix = np.ones([3, 1, box_shape[2], box_shape[3]])
image_shape_matrix = np.concatenate((matrix * image_shape[0], matrix * image_shape[1]),1)
image_shape_matrix = np.concatenate((image_shape_matrix, image_shape_matrix),1)
box *= image_shape_matrix
return box
def non_max_suppression(self,data, iou_thres = 0.45):
result = []
for i in range(len(self.class_names)):
class_box = np.array(data[np.where(data[:,0] == i),1:][0])
class_box_sort = class_box[np.argsort(-class_box[...,0])]
for j in range(len(class_box_sort)):
if len(class_box_sort) >= 2:
iou = self.iou(class_box_sort[1:, 1:], class_box_sort[0, 1:])
mask_iou = iou <= iou_thres
mask_id = np.insert(np.where(mask_iou == False)[0] + 1, 0,values=0)
result.append([i,class_box_sort[0]])
class_box_sort = np.delete(class_box_sort, mask_id,0)
elif len(class_box_sort) == 0:
break
elif len(class_box_sort) == 1:
result.append([i,class_box_sort.squeeze()])
break
return result
def iou(self,data,data_0):
ixmin = np.maximum(data[:, 0], data_0[0])
iymin = np.maximum(data[:, 1], data_0[1])
ixmax = np.minimum(data[:, 2], data_0[2])
iymax = np.minimum(data[:, 3], data_0[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
uni = ((data_0[2] - data_0[0] + 1.) * (data_0[3] - data_0[1] + 1.) +
(data[:, 2] - data[:, 0] + 1.) *
(data[:, 3] - data[:, 1] + 1.) - inters)
iou = inters / uni
return iou
def detect_image(yolo):
path = 'F:/Navinfo/Object_detection/Traffic_Light_Detection/annotation/sign_vehicle_training_data/'
i = 0
while True:
img = 'fc2_save_2018-07-12-173534-%s.png'%str(i).zfill(4)
# img = input('Input image filename:')
try:
image = Image.open(path + img)
except:
print('Open Error! Try again!')
i = int(i)
i += 1
continue
else:
r_image = yolo.detect_image(image)
r_image.show()
i = int(i)
i += 1
if __name__ == '__main__':
detect_image(Yolo()) |
"""
Creates CSV files where the data manipulations and cleaning are performed
"""
import csv
import processor
def writecsv(filename, keyword=None):
"""
Function for writing data into csv files
:param filename: Name of the file to be generated
"""
if filename == 'tracksearch':
print('Generating ' + filename + '.csv...')
data = processor.searchTracks(keyword)
data.to_csv(filename + '.csv')
print(filename + ' has been created successfully.')
#
elif filename == 'playlistartists':
print('Generating ' + filename + '.csv...')
data = processor.getPlaylist(keyword)
data.to_csv(filename + '.csv')
print(filename + ' has been created successfully.')
#
elif filename == 'newreleases':
print('Generating ' + filename + '.csv...')
data = processor.getNewrelease()
data.to_csv(filename + '.csv')
print(filename + ' has been created successfully.')
writecsv('tracksearch', keyword='ambient')
writecsv('playlistartists', keyword='spotify:user:redmusiccompany:playlist:6TMuXkxElkRUIEFmxyrCcq')
writecsv('newreleases')
|
#Algoritmos y programación I.
#Cátedra: Essaya.
#Práctica: Grace.
#Nombre: Mauro Javier Santoni.
#Padrón: 102654.
#Corrector: Juan Patricio Marshall.
"""
Conway's Game of Life
---------------------
https://es.wikipedia.org/wiki/Juego_de_la_vida
El "tablero de juego" es una malla formada por cuadrados ("células") que se
extiende por el infinito en todas las direcciones. Cada célula tiene 8 células
vecinas, que son las que están próximas a ella, incluidas las diagonales. Las
células tienen dos estados: están "vivas" o "muertas" (o "encendidas" y
"apagadas"). El estado de la malla evoluciona a lo largo de unidades de tiempo
discretas (se podría decir que por turnos). El estado de todas las células se
tiene en cuenta para calcular el estado de las mismas al turno siguiente.
Todas las células se actualizan simultáneamente.
Las transiciones dependen del número de células vecinas vivas:
* Una célula muerta con exactamente 3 células vecinas vivas "nace" (al turno
siguiente estará viva).
* Una célula viva con 2 ó 3 células vecinas vivas sigue viva, en otro caso
muere o permanece muerta (por "soledad" o "superpoblación").
"""
def main():
"""
Función principal del programa. Crea el estado inicial de Game of LIfe
y muestra la simulación paso a paso mientras que el usuaio presione
Enter.
"""
life = life_crear([
'..........',
'..........',
'..........',
'.....#....',
'......#...',
'....###...',
'..........',
'..........',
])
while True:
for linea in life_mostrar(life):
print(linea)
print()
input("Presione Enter para continuar, CTRL+C para terminar")
print()
life = life_siguiente(life)
#-----------------------------------------------------------------------------
def life_crear(mapa):
"""
Crea el estado inicial de Game of life a partir de una disposición
representada con los caracteres '.' y '#'.
`mapa` debe ser una lista de cadenas, donde cada cadena representa una
fila del tablero, y cada caracter puede ser '.' (vacío) o '#' (célula).
Todas las filas deben tener la misma cantidad decaracteres.
Devuelve el estado del juego, que es una lista de listas donde cada
sublista representa una fila, y cada elemento de la fila es False (vacío)
o True (célula).
"""
estado_del_juego = []
if mapa != []:
for i in range(len(mapa)):
sublista_aux = []
for j in range(len(mapa[0])):
sublista_aux.append(mapa[i][j] == '#')
estado_del_juego.append(sublista_aux)
return estado_del_juego
def pruebas_life_crear():
"""Prueba el correcto funcionamiento de life_crear()."""
# Para cada prueba se utiliza la instrucción `assert <condición>`, que
# evalúa que la <condición> sea verdadera, y lanza un error en caso
# contrario.
assert life_crear([]) == []
assert life_crear(['.']) == [[False]]
assert life_crear(['#']) == [[True]]
assert life_crear(['#.', '.#']) == [[True, False], [False, True]]
#-----------------------------------------------------------------------------
def life_mostrar(life):
"""
Crea una representación del estado del juego para mostrar en pantalla.
Recibe el estado del juego (inicialmente creado con life_crear()) y
devuelve una lista de cadenas con la representación del tablero para
mostrar en la pantalla. Cada una de las cadenas representa una fila
y cada caracter debe ser '.' (vacío) o '#' (célula).
"""
representacion_tablero = []
if not life == []:
for i in range(len(life)):
cadena_tablero = ''
for j in range(len(life[0])):
if life[i][j]:
cadena_tablero += '#'
else:
cadena_tablero += '.'
representacion_tablero.append(cadena_tablero)
return representacion_tablero
def pruebas_life_mostrar():
"""Prueba el correcto funcionamiento de life_mostrar()."""
assert life_mostrar([]) == []
assert life_mostrar([[False]]) == ['.']
assert life_mostrar([[True]]) == ['#']
assert life_mostrar([[True, False], [False, True]]) == ['#.', '.#']
#-----------------------------------------------------------------------------
def cant_adyacentes(life, f, c):
"""
Calcula la cantidad de células adyacentes a la celda en la fila `f` y la
columna `c`.
Importante: El "tablero" se considera "infinito": las celdas del borde
izquierdo están conectadas a la izquierda con las celdas del borde
derecho, y viceversa. Las celdas del borde superior están conectadas hacia
arriba con las celdas del borde inferior, y viceversa.
"""
cont_adyacentes = 0
for i in range(-1,2):
for j in range(-1,2):
valor = life[(f+i)%len(life)][(c+j)%len(life[0])]
if valor and not (((f+i)%len(life)) == f and ((c+j)%len(life[0])) == c):
cont_adyacentes += 1
return cont_adyacentes
def pruebas_cant_adyacentes():
"""Prueba el correcto funcionamiento de cant_adyacentes()."""
assert cant_adyacentes(life_crear(['.']), 0, 0) == 0
assert cant_adyacentes(life_crear(['..', '..']), 0, 0) == 0
assert cant_adyacentes(life_crear(['..', '..']), 0, 1) == 0
assert cant_adyacentes(life_crear(['##', '..']), 0, 0) == 2
assert cant_adyacentes(life_crear(['##', '..']), 0, 1) == 2
assert cant_adyacentes(life_crear(['#.', '.#']), 0, 0) == 4
assert cant_adyacentes(life_crear(['##', '##']), 0, 0) == 8
assert cant_adyacentes(life_crear(['.#.', '#.#', '.#.']), 1, 1) == 4
assert cant_adyacentes(life_crear(['.#.', '..#', '.#.']), 1, 1) == 3
assert cant_adyacentes(life_crear(['...', '.#.', '...']), 1, 1) == 0
#-----------------------------------------------------------------------------
def celda_siguiente(life, f, c):
"""
Calcula el estado siguiente de la celda ubicada en la fila `f` y la
columna `c`.
Devuelve True si en la celda (f, c) habrá una célula en la siguiente
iteración, o False si la celda quedará vacía.
"""
celda = life[f][c]
n = cant_adyacentes(life, f, c)
if celda:
return n == 2 or n == 3
return n == 3
def pruebas_celda_siguiente():
"""Prueba el correcto funcionamiento de celda_siguiente()."""
assert celda_siguiente(life_crear(['.']), 0, 0) == False
assert celda_siguiente(life_crear(['..', '..']), 0, 0) == False
assert celda_siguiente(life_crear(['..', '..']), 0, 1) == False
assert celda_siguiente(life_crear(['##', '..']), 0, 0) == True
assert celda_siguiente(life_crear(['##', '..']), 0, 1) == True
assert celda_siguiente(life_crear(['#.', '.#']), 0, 0) == False
assert celda_siguiente(life_crear(['##', '##']), 0, 0) == False
assert celda_siguiente(life_crear(['.#.', '#.#', '.#.']), 1, 1) == False
assert celda_siguiente(life_crear(['.#.', '..#', '.#.']), 1, 1) == True
assert cant_adyacentes(life_crear(['...', '.#.', '...']), 1, 1) == False
#-----------------------------------------------------------------------------
def life_siguiente(life):
"""
Calcula el siguiente estado del juego.
Recibe el estado actual del juego (lista de listas de False/True) y
devuelve un _nuevo_ estado que representa la siguiente iteración según las
reglas del juego.
Importante: El "tablero" se considera "infinito": las celdas del borde
izquierdo están conectadas a la izquierda con las celdas del borde
derecho, y viceversa. Las celdas del borde superior están conectadas hacia
arriba con las celdas del borde inferior, y viceversa.
"""
siguiente = []
for f in range(len(life)):
fila = []
for c in range(len(life[0])):
fila.append(celda_siguiente(life, f, c))
siguiente.append(fila)
return siguiente
#-----------------------------------------------------------------------------
def pruebas():
"""Ejecuta todas las pruebas"""
pruebas_life_crear()
pruebas_life_mostrar()
pruebas_cant_adyacentes()
pruebas_celda_siguiente()
pruebas()
main()
|
#记录生肖,根据年份来判断
#python对双引号和单引号没有区分,当str里面含有单引号就用双引号
chinses_zodiac = '猴鸡狗猪鼠牛虎兔龙蛇马羊'
#print(Chinses_zodiac[0:4]) #从后往前用负数
#year = 2018
#print(year % 12)
#print(Chinses_zodiac[year % 12])
#print('狗'in Chinses_zodiac)
#year = int(input('请输入出生年份:'))
#for cz in Chinses_zodiac:
# print(cz)
#for i in range(1,13):
# print(i)
#for year in range(2000, 2019):
# print('%s 年的生肖是 %s' % (year, chinses_zodiac[year % 12]
num = 5
while True:
num = num + 1
if num > 10:
continue
print(num) |
import csv
import glob
import os
import pathlib
import random
import subprocess
import sys
import cv2
# import dlib
import joblib
import numpy as np
from tqdm import tqdm
import shutil
# import Models.Config.SEResNet50_config as config
from mtcnn.mtcnn import MTCNN
base_path = os.path.dirname(os.path.abspath(__file__))
###AFEW utils
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty #print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % (cm[i, j])
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
def filter_blacklist(data):
blacklist = ['010924040', '005221760', '010915080', '004510640']
index_to_remove = []
for i, item in enumerate(data):
if item['info']['video_name'] in blacklist:
index_to_remove.append(i)
for index in index_to_remove:
try:
removed = data[index]
del data[index]
# print("Removed: " + removed['info']['video_name'])
except:
print("Error removing: ", removed['info']['video_name'])
return data
def _read_dataset(partition, input_path_ds, output_path_cache=base_path + '/CacheFrameProcessing',
debug_max_num_samples=None, cache_p=None):
"""read a partition of dataset"""
# print("Init reading from video files")
data = []
if not os.path.isdir(output_path_cache):
os.makedirs(output_path_cache)
# iterate partition
for set in list_dirs(input_path_ds):
if partition == os.path.basename(set):
# print("Processing partition: ", partition)
# for this partition extract all video frames
for class_dir in tqdm(list_dirs(set)):
# print("Procssing class: ", os.path.basename(class_dir))
# init params
openface_fdir = ""
label = os.path.basename(class_dir)
# exctract video frames for any video in a class
openface_fdir, _ = extract_frames_from_video_folder(class_dir, output_path_cache, debug_max_num_samples,
cache_p, partition)
# preprocess every video frame by detectding and aligning faces
returned_sequences, map_infos = pre_process_video(openface_fdir, output_path_cache, cache_p, partition)
# append processed data
data += process_data(returned_sequences, map_infos, label)
# check dataset integrity and get statistics
data = check_data(data, output_path_cache, cache_p, partition, input_path_ds)
# flush
shutil.rmtree(output_path_cache)
return data
def recover_data(input_path_ds, output_cache_path, cache_p, partition, failed_sequences):
# print("Recovering failed videos")
recovered = []
recover_path = output_cache_path
if not os.path.isdir(recover_path):
os.makedirs(recover_path)
# iterate partition
for set in list_dirs(input_path_ds):
if partition == os.path.basename(set):
for class_dir in list_dirs(set):
file_list = glob.glob('{}/*.avi'.format(class_dir))
file_list.sort()
for f in range(0, file_list.__len__()):
aviName = file_list[f].split('\\')[(-1)].rstrip('.avi')
for item in failed_sequences:
if aviName == item[0]:
shutil.copy(file_list[f], recover_path)
openface_fdir, _ = extract_frames_from_video_folder(recover_path, recover_path, None, cache_p, partition)
# generate all bbox for failed video with our detector
fd = MTCNN(steps_threshold=[0.35, 0.4, 0.5, 0.6])
bbox_dir, openface_fdir = get_bbox(recover_path, fd, openface_fdir)
# preprocess every video frame by using our bbox for aligning faces
returned_sequences, map_infos = pre_process_video(openface_fdir, output_cache_path, cache_p, partition, bbox_dir)
extra_map_infos = []
for i, seq in enumerate(returned_sequences):
if len(seq) == 0:
openface_fdir.clear()
openface_fdir.append(os.path.join(recover_path, map_infos[i]['video_name']))
extra_seq, extra_map = pre_process_video(openface_fdir, output_cache_path, cache_p, partition,
bbox=bbox_dir, as_img=True)
returned_sequences += extra_seq
extra_map_infos += extra_map
map_infos += extra_map_infos
for i, seq in enumerate(returned_sequences):
new_seq = list()
new_map_info = list()
item_to_del = map_infos[i]['video_name']
label = ""
if len(seq) > 0:
for x in failed_sequences:
if item_to_del == x[0]:
label = x[1]
new_seq.append(returned_sequences[i])
new_map_info.append(map_infos[i])
recovered += process_data(new_seq, new_map_info, label)
# print("Successful recovered: " + item_to_del)
# else:
# print("Error recovering: " + item_to_del + " , Skipping!")
# print("End recovering failed data")
return recovered
def get_bbox(recover_path, fd, openface_fdir):
bb_dir = os.path.join(recover_path + "/bb")
if not os.path.exists(bb_dir):
os.makedirs(bb_dir)
for dir in list_dirs(recover_path):
if os.path.basename(dir) != "bb":
deleted = 0
for file in os.listdir(dir):
if file.endswith(".png"):
frame = cv2.imread(os.path.join(dir, file))
faces = fd.detect_faces(frame)
if len(faces) != 0:
bounding_box = faces[0]['box']
keypoints = faces[0]['keypoints']
width = bounding_box[2]
height = bounding_box[3]
txs = bounding_box[0]
tys = bounding_box[1]
new_width = int(width * 1.0323)
new_height = int(height * 0.7751)
new_txs = int(width * (-0.0075) + txs)
new_tys = int(height * (0.2459) + tys)
file_bb = open(bb_dir + "/" + file[:-4] + ".txt", "w")
file_bb.write(str(new_txs) + " " + str(new_tys) + " " + str(new_txs + new_width) + " " + str(
new_tys + new_height))
file_bb.close()
else:
deleted += 1
os.remove(os.path.join(dir, file))
if len(os.listdir(dir)) == 0:
for x in openface_fdir:
if os.path.basename(dir) in x:
openface_fdir.remove(x)
shutil.rmtree(dir)
# print("empty deleted: ", str(dir))
return bb_dir, openface_fdir
def process_data(sequences, infos, label):
data = []
for i in range(len(sequences)):
example = {
'frames': sequences[i],
'label': label,
'info': infos[i],
}
data.append(example)
return data
def check_data(data, output_cache_path, cache_p, partition, input_path_ds):
"""Check data video integrity filtering out bad sequences, in addition a statistics log will be stored"""
total_frames = 0 # total frames in data
tatal_frames_discarded = 0 # without face or with wrong prediction
total_faces_recognized_percentage = list() # percentage of face recognition/alignment success
total_failed_sequences = list() # will contain all video's names failed during pre process
# print("Checking data integrity")
# open statistic file in order to store statistics data
csv.register_dialect('mydialect', delimiter=';', quotechar='"', lineterminator='\r\n', quoting=csv.QUOTE_MINIMAL)
with open(os.path.join(cache_p, 'dataset_' + partition + '_statistics.csv'), 'w', newline='') as stats_file:
# print("Stats log file opened")
writer = csv.writer(stats_file, dialect='mydialect')
writer.writerow(["Video", "Label", "Total frames", "Discarded frames", "face_presence_percentage"])
item_to_del = []
# iterate over all items
for item in data:
info = item['info']
if len(item['frames']) > 0:
writer.writerow([info['video_name'], item['label'], info['total_frames'], info['discarded_frames'],
info['face_present_percentage']])
# update global stats variable
total_frames += info['total_frames']
tatal_frames_discarded += info['discarded_frames']
total_faces_recognized_percentage.append(info['face_present_percentage'])
else:
total_failed_sequences.append((info['video_name'], item['label']))
item_to_del.append(item)
for item in item_to_del: data.remove(item)
# recover failed_sequences if there are
if len(total_failed_sequences) > 0:
# write dataset stats
writer.writerow([' '])
writer.writerow(['Recovered failed videos during acquisition'])
writer.writerow(["Video", "Label", "Total frames", "Discarded frames", "face_presence_percentage"])
recovered = recover_data(input_path_ds, output_cache_path, cache_p, partition, total_failed_sequences)
total_failed_sequences.clear()
item_to_del.clear()
# update new statistics based on new recovered videos
for item in recovered:
info = item['info']
if len(item['frames']) > 0:
writer.writerow([info['video_name'], item['label'], info['total_frames'], info['discarded_frames'],
info['face_present_percentage']])
# update global stats variable
total_frames += info['total_frames']
tatal_frames_discarded += info['discarded_frames']
total_faces_recognized_percentage.append(info['face_present_percentage'])
else:
total_failed_sequences.append((info['video_name'], item['label']))
item_to_del.append(item)
for item in item_to_del: recovered.remove(item)
data += recovered
# write dataset stats
writer.writerow([' '])
writer.writerow(['Dataset statistics'])
writer.writerow(["Total frames", "Total discarded frames", "face_presence_percentage_mean", "Failed sequences"])
writer.writerow([total_frames, tatal_frames_discarded, np.mean(total_faces_recognized_percentage),
'\r\n'.join([x[0] for x in total_failed_sequences])])
stats_file.close()
# print("End check data integrity")
return data
def list_dirs(directory):
"""Returns all directories in a given directory"""
return [f for f in pathlib.Path(directory).iterdir() if f.is_dir()]
def extract_frames_from_video_folder(input_avi, output_path_cache, debug_max_num_samples, cache_p, partition,
read_all=False, video_format='.avi', existent_file_list=None):
"""Extract frames from a folder(class)"""
if existent_file_list is None:
if not read_all:
file_list = glob.glob(('{}/*' + video_format).format(input_avi))
else:
file_list = glob.glob(input_avi + "/*")
else:
file_list = existent_file_list
file_list.sort()
data = []
error_video = []
# iterate over all video in dir
openface_fdir = []
# print("Init Frames Extraction")
current_num_samples = 0
# print("Extracting")
for f in tqdm(range(0, file_list.__len__())):
file_to_read = os.path.normpath(file_list[f])
try:
aviName = file_to_read.split('/')[(-1)].replace('.avi', '')
aviName = aviName.replace('.mp4', '')
# get path and file name
save_path = '{}/{}'.format(output_path_cache, aviName)
if not os.path.isdir(save_path):
os.makedirs(save_path)
output = '{}/{}-%6d_frame.png'.format(save_path, aviName)
# get aspect ratio
asr = get_output_size(file_list[f])
# extract all frames from a video
extract_frames(file_list[f], output, asr, cache_p, partition)
openface_fdir.append(save_path)
if debug_max_num_samples is not None:
if current_num_samples == debug_max_num_samples - 1:
break
except:
# check and count video lost
error_video.append(aviName)
# print(aviName + ' ffmpeg failed' + '\n')
current_num_samples += 1
# print("End Frames Extraction")
return openface_fdir, error_video
def extract_frames(src, dest, asr, cache_p, partition):
"""Call ffmpeg service and save all frames in dest folder"""
# print("Calling FFMPEG on video: ", os.path.basename(src))
# command = ["ffmpeg", "-i", src,"-s", asr, "-q:a", "1", dest]
command = ['ffmpeg', '-loglevel', 'info', '-hide_banner', '-nostats', '-i', src, '-s', asr, '-q:a', '1', dest]
try:
log_file = open(os.path.join(cache_p, 'FFMPEG_output_' + partition + '.log'), "a")
p = subprocess.Popen(command, stdout=log_file, stderr=log_file).wait()
log_file.close()
except Exception as e:
print(e)
def refactor_output_sequence(frames_dir, avi_name):
new_folder_align = os.path.join(frames_dir, (avi_name + "_aligned"))
# creating dir for dataset cache
if not os.path.isdir(new_folder_align):
os.makedirs(new_folder_align)
with open(os.path.join(frames_dir, avi_name + '.csv'), 'w', newline='') as stats_file:
writer = csv.writer(stats_file)
writer.writerow(["frame", " face_id", " timestamp", " confidence", " success"])
for file in (list_dirs(frames_dir)):
if len(str(os.path.basename(file))) > len(avi_name) and avi_name in os.path.basename(
file) and file.is_dir() and str(os.path.basename(file)) != str(os.path.basename(new_folder_align)):
frame_number = int(os.path.basename(file)[len(avi_name) + 1:].replace("_frame_aligned", ""))
to_delete_path = str(file).replace("_aligned", ".csv")
new_face_file_name = 'frame_det_00_{:06d}.png'.format(frame_number)
new_file_path = os.path.join(new_folder_align, new_face_file_name)
file = str(file) + "/face_det_000000.png"
if os.path.exists(file):
os.rename(file, new_file_path)
writer.writerow([str(frame_number), " 0", " 0.000", " 1", " 1"])
else:
writer.writerow([str(frame_number), " 0", " 0.000", " 0", " 0"])
os.remove(to_delete_path)
stats_file.close()
def openface_call(openface_fdir, out_dir, cache_p, partition, bbox=None, as_img=True):
"""preprocess video"""
# create command for open face
if as_img:
command = ['/data/s4179447/OpenFace/build/bin/FaceLandmarkImg']
else:
command = ['/data/s4179447/OpenFace/build/bin/FeatureExtraction']
for _dir in openface_fdir:
command.append("-fdir")
command.append(_dir)
if bbox is not None:
command.append("-bboxdir")
command.append(bbox)
resize_shape = 400
scale = 1.46
# resize_shape = 200
# scale = 0.73
command += ['-out_dir', out_dir, '-simsize', str(resize_shape), '-simscale', str(scale),
'-format_aligned', 'jpg', '-nomask', '-simalign', '-wild', '-multi_view', '1', '-nobadaligned']
try:
# print("Calling OpenFace")
log_file = open(os.path.join(cache_p, 'OpenFace_output_' + partition + '.log'), "a")
p = subprocess.Popen(command, stdout=log_file, stderr=log_file).wait()
log_file.close()
# print("End OpenFace")
except Exception as e:
print(e)
sys.exit(-1)
def pre_process_video(openface_fdir, frames_dir, cache_p, partition, resize_shape=(224, 224), bbox=None, as_img=False):
aligned_videos = []
all_maps = []
# print("Init pre processing")
openface_call(openface_fdir, frames_dir, cache_p, partition, bbox, as_img)
# theshold for diltering out bad faces
threshold_detection = 0.1
if as_img:
refactor_output_sequence(frames_dir, str(os.path.basename(openface_fdir[0])))
# keep needed info from openface csv out
for filename in os.listdir(frames_dir):
if filename.endswith(".csv"):
aligned_frames = []
filename = filename[:-4]
aligned_frames_dir = frames_dir + "/" + filename + "_aligned"
# open csv
with open(frames_dir + "/" + filename + ".csv", mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
line_count = 0
map_info = {}
map_frame = {}
map_info['video_name'] = filename
readed_frames = 0
discarded_frames = 0
for row in csv_reader:
if int(row[' success']) == 1 and float(row[' confidence']) > threshold_detection:
aligned_frame = '{}/frame_det_00_{:06d}.png'.format(aligned_frames_dir, int(row['frame']))
aligned_frames.append(cv2.imread(aligned_frame))
map_frame[row['frame']] = row[' confidence']
else:
discarded_frames += 1
readed_frames = int(row['frame'])
csv_file.close()
map_info['total_frames'] = readed_frames
map_info['discarded_frames'] = discarded_frames
map_info['face_present_percentage'] = np.round((readed_frames - discarded_frames) / readed_frames, 2)
map_info['detections_info'] = map_frame
all_maps.append(map_info)
aligned_videos.append(aligned_frames)
if len(aligned_frames) > 0:
shutil.rmtree(frames_dir + "/" + filename)
# when everything is done flush directories
shutil.rmtree(frames_dir + "/" + filename + "_aligned")
os.remove(frames_dir + "/" + filename + ".csv")
# print("End pre processing")
return aligned_videos, all_maps
def get_output_size(path, fixed=True, w=720, h=480):
"""given input path of video, returns it's width and height"""
cap = cv2.VideoCapture(path)
if fixed:
width = w
height = h
else:
if cap.isOpened():
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
return '{}x{}'.format(width, height)
def split_video(item=None, split_len=16, partition='Train'):
splitted_video = []
video = item['frames']
label = item['label']
len_video = len(video)
steps = len_video // split_len
rest = len_video % split_len
i = 0
# if video len is > of split len
if steps > 0:
# get all possible sequences
while i < steps:
start = i * split_len
stop = (i * split_len) + split_len
actual = np.array(video[start:stop])
item = {
'frames': actual,
'label': label,
}
splitted_video.append(item)
i += 1
pads = []
# do padding if there are enough samples left
if 'val' not in partition.lower():
# print('Padding on train gen video')
if rest >= (split_len / 2):
for i in range(split_len - rest):
pads.append(video[-1])
start = stop
last = np.concatenate((video[start:], pads), axis=0)
item = {
'frames': np.array(last),
'label': label,
}
splitted_video.append(item)
# do padding il video_len is < split_len
elif steps == 0:
rest = split_len - len_video
pads = []
for i in range(rest):
pads.append(video[-1])
last = np.concatenate((video, pads), axis=0)
item = {
'frames': np.array(last),
'label': label,
}
splitted_video.append(item)
return splitted_video
def top_left(f):
return (f['roi'][0], f['roi'][1])
def bottom_right(f):
return (f['roi'][0] + f['roi'][2], f['roi'][1] + f['roi'][3])
def enclosing_square(rect):
def _to_wh(s, l, ss, ll, width_is_long):
if width_is_long:
return l, s, ll, ss
else:
return s, l, ss, ll
def _to_long_short(rect):
x, y, w, h = rect
if w > h:
l, s, ll, ss = x, y, w, h
width_is_long = True
else:
s, l, ss, ll = x, y, w, h
width_is_long = False
return s, l, ss, ll, width_is_long
s, l, ss, ll, width_is_long = _to_long_short(rect)
hdiff = (ll - ss) // 2
s -= hdiff
ss = ll
return _to_wh(s, l, ss, ll, width_is_long)
def add_margin(roi, qty):
return (
(roi[0] - qty),
(roi[1] - qty),
(roi[2] + 2 * qty),
(roi[3] + 2 * qty))
def cut(frame, roi):
pA = (int(roi[0]), int(roi[1]))
pB = (int(roi[0] + roi[2] - 1), int(roi[1] + roi[3] - 1)) # pB will be an internal point
W, H = frame.shape[1], frame.shape[0]
A0 = pA[0] if pA[0] >= 0 else 0
A1 = pA[1] if pA[1] >= 0 else 0
data = frame[A1:pB[1], A0:pB[0]]
if pB[0] < W and pB[1] < H and pA[0] >= 0 and pA[1] >= 0:
return data
w, h = int(roi[2]), int(roi[3])
img = np.zeros((h, w, frame.shape[2]), dtype=np.uint8)
offX = int(-roi[0]) if roi[0] < 0 else 0
offY = int(-roi[1]) if roi[1] < 0 else 0
np.copyto(img[offY:offY + data.shape[0], offX:offX + data.shape[1]], data)
return img
def cut_centered(frame, shape=(224, 224), random=True, random_values=None, max_change_fraction=0.045,
only_narrow=False):
from PIL import Image
left = int((frame.shape[1] - shape[0]) / 2)
top = int((frame.shape[1] - shape[0]) / 2)
right = int((frame.shape[1] + shape[0]) / 2)
bottom = int((frame.shape[1] + shape[0]) / 2)
if random:
if random_values is None:
sigma = shape[0] * max_change_fraction
xy = _random_normal_crop(2, sigma, mean=-sigma / 5).astype(int)
wh = _random_normal_crop(2, sigma * 2, mean=sigma / 2, positive=only_narrow).astype(int)
else:
xy, wh = random_values
else:
xy = [0, 0]
wh = [0, 0]
return frame[(top + wh[0]):(bottom + wh[0]), (left + xy[0]):(right + xy[0]), :]
def pad(img):
w, h, c = img.shape
if w == h:
return img
size = max(w, h)
out = np.zeros((size, size, c))
np.copyto(out[0:w, 0:h], img)
return out
def findRelevantFace(objs, W, H):
mindistcenter = None
minobj = None
for o in objs:
cx = o['roi'][0] + (o['roi'][2] / 2)
cy = o['roi'][1] + (o['roi'][3] / 2)
distcenter = (cx - (W / 2)) ** 2 + (cy - (H / 2)) ** 2
if mindistcenter is None or distcenter < mindistcenter:
mindistcenter = distcenter
minobj = o
return minobj
tmp_A = []
FIT_PLANE_SIZ = 16
for y in np.linspace(0, 1, FIT_PLANE_SIZ):
for x in np.linspace(0, 1, FIT_PLANE_SIZ):
tmp_A.append([y, x, 1])
Amatrix = np.matrix(tmp_A)
def _fit_plane(im):
original_shape = im.shape
if len(im.shape) > 2 and im.shape[2] > 1:
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
im = cv2.resize(im, (FIT_PLANE_SIZ, FIT_PLANE_SIZ))
if im.dtype == np.uint8:
im = im.astype(float)
# do fit
A = Amatrix
tmp_b = []
for y in range(FIT_PLANE_SIZ):
for x in range(FIT_PLANE_SIZ):
tmp_b.append(im[y, x])
b = np.matrix(tmp_b).T
fit = (A.T * A).I * A.T * b
fit[0] /= original_shape[0]
fit[1] /= original_shape[1]
def LR(x, y):
return np.repeat(fit[0] * x, len(y), axis=0).T + np.repeat(fit[1] * y, len(x), axis=0) + fit[2]
xaxis = np.array(range(original_shape[1]))
yaxis = np.array(range(original_shape[0]))
imest = LR(yaxis, xaxis)
return np.array(imest)
def linear_balance_illumination(im):
if im.dtype == np.uint8:
im = im.astype(float)
if len(im.shape) == 2:
im = np.expand_dims(im, 2)
if im.shape[2] > 1:
im = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
imout = im.copy()
imest = _fit_plane(im[:, :, 0])
imout[:, :, 0] = im[:, :, 0] - imest + np.mean(imest)
if im.shape[2] > 1:
imout = cv2.cvtColor(imout, cv2.COLOR_YUV2BGR)
return imout.reshape(im.shape)
def mean_std_normalize(inp):
std = inp.flatten().std()
if std < 0.001:
std = 0.001
return (inp - inp.flatten().mean()) / inp.flatten().std()
def _random_normal_crop(n, maxval, positive=False, mean=0):
gauss = np.random.normal(mean, maxval / 2, (n, 1)).reshape((n,))
gauss = np.clip(gauss, mean - maxval, mean + maxval)
if positive:
return np.abs(gauss)
else:
return gauss
def random_change_image(img, random_values=(
_random_normal_crop(1, 0.5, mean=1)[0], _random_normal_crop(1, 48)[0], random.randint(0, 1))):
# brightness and contrast
a, b, random = random_values
img = (img - 128.0) * a + 128.0 + b
img = np.clip(img, 0, 255)
img = img.astype(np.uint8)
# flip
if random:
img = np.fliplr(img)
return img
def random_change_roi(roi, max_change_fraction=0.045, only_narrow=False, random_values=None):
# random crop con prob + alta su 0 (gaussiana)
sigma = roi[3] * max_change_fraction
if random_values is None:
xy = _random_normal_crop(2, sigma, mean=-sigma / 5).astype(int)
wh = _random_normal_crop(2, sigma * 2, mean=sigma / 2, positive=only_narrow).astype(int)
else:
xy, wh = random_values
# print("orig roi: %s" % str(roi))
# print("rand changes -> xy:%s, wh:%s" % (str(xy), str(wh)))
roi2 = (roi[0] + xy[0], roi[1] + xy[1], roi[2] - wh[0], roi[3] - wh[1])
# print("new roi: %s" % str(roi2))
return roi2
def roi_center(roi):
return (roi[0] + roi[2] // 2, roi[1] + roi[3] // 2)
def random_image_rotate(img, rotation_center, random_angle_deg=_random_normal_crop(1, 10)[0]):
angle_deg = random_angle_deg
M = cv2.getRotationMatrix2D(rotation_center, angle_deg, 1.0)
nimg = cv2.warpAffine(img, M, dsize=img.shape[0:2])
return nimg.reshape(img.shape)
def random_image_skew(img, rotation_center, random_skew=_random_normal_crop(2, 0.1, positive=True)):
s = random_skew
M = np.array([[1, s[0], 1], [s[1], 1, 1]])
nimg = cv2.warpAffine(img, M, dsize=img.shape[0:2])
return nimg.reshape(img.shape)
def equalize_hist(img):
if len(img.shape) > 2 and img.shape[2] > 1:
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
else:
return cv2.equalizeHist(img)
def draw_emotion(y, w, h):
EMOTIONS = ["Angry", "Disgust", "Fear", "Happy", "Neutral", "Sad", "Surprise"]
COLORS = [(120, 120, 120), (50, 50, 255), (0, 255, 255), (255, 0, 0), (0, 0, 140), (0, 200, 0), (42, 42, 165),
(100, 100, 200), (170, 170, 170), (80, 80, 80)]
emotionim = np.zeros((w, h, 3), dtype=np.uint8)
barh = h // len(EMOTIONS)
MAXEMO = np.sum(y)
for i, yi in enumerate(y):
# #print((EMOTIONS[i], yi))
p1, p2 = (0, i * barh), (int(yi * w // MAXEMO), (i + 1) * 20)
# cv2.rectangle(emotionim, p1,p2, COLORS[i], cv2.FILLED)
cv2.putText(emotionim, "%s: %.1f" % (EMOTIONS[i], yi), (0, i * 20 + 14), cv2.FONT_HERSHEY_SIMPLEX, 0.6,
(255, 255, 255))
return emotionim
def show_frame(frame, text):
font = cv2.FONT_HERSHEY_SIMPLEX
position = (10, 20)
fontScale = 0.3
fontColor = (255, 255, 255)
lineType = 1
cv2.putText(frame,
text,
position,
font,
fontScale,
fontColor,
lineType)
cv2.imshow('frame', frame)
cv2.waitKey(0)
|
from sklearn import svm
class Classifier:
def __init__(self, configuration):
self.configuration = configuration
self.classifier = svm.SVC(kernel='linear', C=1000)
# self.classifier = svm.SVC()
# self.classifier = svm.LinearSVC()
# self.classifier = svm.SVC(decision_function_shape='ovo')
def load_configuration(self):
if self.configuration.classifier:
self.classifier = self.configuration.classifier
def save_configuration(self):
self.configuration.classifier = self.classifier
def train(self):
X = self.configuration.data_values_normalized
y = self.configuration.data_labels
self.classifier.fit(X, y)
self.save_configuration()
def predict(self, data):
return self.classifier.predict(data)
|
import time
from fastapi import status, HTTPException
from src.service.lesson_interface import LessonsInterface
def hello_world():
return "Hello World!"
def why_python():
return """
Tcl -- It is short (only three letters) and does a suprising
amount given that it doesn't have a vowel. It can be
pronounced "Tickle", which is a command.
Perl -- Bigger and has a vowel. However you'll note that it isn't
a common english word; you'll have to know what you're doing to
use it, especially with spell-checkers which otherwise complain
that it looks like noise.
Python -- This is a Real English Word (honest, look it up!) that
happens to refer to a type of snake, which you'll notice is an
object. With the two vowels, python is quite readable.
"""
def _add_elements(element=[]):
element.append("New")
return element
def mutable_default_attributes():
_add_elements()
return _add_elements()
def _range(alphabet: list):
output = ""
start = time.time()
for i in range(len(alphabet)):
output += alphabet[i]
end = time.time()
exec_time = end - start
return exec_time, output
def _enumerator(alphabet: list):
output = ""
start = time.time()
for i, letter in enumerate(alphabet):
output += letter
end = time.time()
exec_time = end - start
return exec_time, output
def _no_specials(alphabet: list):
output = ""
start = time.time()
for letter in alphabet:
output += letter
end = time.time()
exec_time = end - start
return exec_time, output
def enumerator_over_range():
# initialize alphabet
alphabet_array = list(map(chr, range(ord("a"), ord("z") + 1)))
# run different algoritms and measure them
r_time, alphabet_ranges = _range(alphabet_array)
e_time, alphabet_enumerate = _enumerator(alphabet_array)
ns_time, alphabet_nospecials = _no_specials(alphabet_array)
return f"""Enumerate alphabet: {alphabet_enumerate}; Time: {e_time}s
Range alphabet: {alphabet_ranges}; Time: {r_time}s
No Specials alphabet: {alphabet_nospecials}; Time: {ns_time}s"""
class LessonOneInterface(LessonsInterface):
def execute(self, action: str, url: str) -> str:
"""Overrides LessonsInterface.execute()"""
if action == "run":
return self._run_lesson_one()
else:
# following fast api choice of 422 over 400 --> https://github.com/tiangolo/fastapi/issues/643
raise HTTPException(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=f"ERROR: Action not allowed: {action} for lesson 1"
)
def _run_lesson_one(self) -> str:
return f"""
1. Simple Hello World
{hello_world()}
#######################################
2. Why Python?
{why_python()}
#######################################
3. Mutable default attributes - what do you get after calling
a method with _add_elements(element=[]) more than once?
{mutable_default_attributes()}
#######################################
Is Enumerator better optimized than range?
{enumerator_over_range()}
Yes, but if you dont need that index just use in <collection> syntax
#######################################
"""
|
#!/usr/bin/env python
import sys
filename = sys.argv[1]
nfile = filename.split('.')[0] + '_swapped.2dm'
with open(nfile, 'w') as ofile:
with open(filename) as ifile:
for line in ifile.readlines():
if line.split()[0] == "ND":
h, n, x, y, z = line.split()
nline = ' '.join([h,n,x,z,y])+'\n'
ofile.write(nline)
else:
ofile.write(line)
|
#!/usr/bin/env python
"""
Test file for creating data training module
"""
import sys
import pickle
import string
import random
import re
import math
import nltk
import numpy as np
from nltk import FreqDist
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
def import_all_featuresets(num_features, num_tweets):
"""Import and return featuresets from subset of tweets"""
all_featuresets = []
try:
f = open('modules/jar_of_pickles/all_featuresets{}_{}.pickle'.format(num_features, num_tweets), 'rb')
except:
print('Error: Loading file all_featureset{}_{}.pickle'.format(num_features, num_tweets))
return False
while True:
try:
all_featuresets = all_featuresets + pickle.load(f)
#print(len(all_featuresets))
except EOFError:
return all_featuresets
def import_batch_featuresets(f):
"""import and return subset of featuresets from file"""
try:
batch = pickle.load(f)
return batch
except EOFError:
return False
def main():
pos_tweets = []
neg_tweets = []
num_features = 3000
num_tweets = 10000
MNBclassifier = MultinomialNB()
"""check arguments"""
if len(sys.argv) > 1:
num_features = int(sys.argv[1])
print('arg1: num_features = {}'.format(num_features))
if len(sys.argv) > 2:
num_tweets = int(sys.argv[2])
print('arg2: num_tweets = {}'.format(num_tweets))
"""split training into batch processes"""
batch_size = 5000
num_batches = math.ceil(num_tweets/batch_size)
X_train = np.zeros((batch_size, num_features), dtype=bool)
y_train = np.empty(batch_size, dtype='<U3')
all_classes = np.array(('pos', 'neg'))
print('number of batches: {}\tbatch size: {}'.format(num_batches, batch_size))
"""opening file"""
f = open('modules/jar_of_pickles/all_featuresets{}_{}.pickle'.format(num_features, num_tweets), 'rb')
"""import batch of featuresets"""
for batch_num in range(num_batches):
print('importing batch {}/{}...'.format(batch_num+1, num_batches), end='')
batch = import_batch_featuresets(f)
print('DONE')
"""partial training of classifier"""
print('training on batch {}/{}...'.format(batch_num+1, num_batches), end='')
for index, tweet in enumerate(batch):
featureset = tweet[0]
sentiment = tweet[1]
X_train[index] = featureset
y_train[index] = sentiment
MNBclassifier.partial_fit(X_train, y_train, classes=all_classes)
print('DONE')
f.close()
"""save classifier"""
print('saving classifier...', end='')
f = open('modules/jar_of_pickles/classifier_stanford{}_{}.pickle'.format(num_features, num_tweets), 'wb')
pickle.dump(MNBclassifier, f)
f.close()
print('DONE')
"""testing"""
testing_set = batch
f.close()
X = np.zeros((len(testing_set), num_features), dtype=bool)
y = np.empty(len(testing_set), dtype='<U3')
for index, tweet in enumerate(testing_set):
featureset = tweet[0]
sentiment = tweet[1]
X[index] = featureset
y[index] = sentiment
print('DONE')
print('(DODGY) testing...', end='')
accuracy = MNBclassifier.score(X, y)
print('DONE')
print('acc = {}'.format(accuracy))
if __name__ == "__main__":
main()
|
import os
import json
import numpy as np
import pandas as pd
from itertools import chain
from keras import optimizers
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model
from keras.layers import Input
from keras.layers import Dense, GlobalAveragePooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
csv_file = "../../CHESTXRAY/Data_Entry_2017.csv"
csv_file_encoded = "Data_Entry_2017_Encoded.csv"
image_dir = "../../images"
image_col = "Image Index" # image name column in csv
label_col = "label" # label column in csv
classes = ['Atelectasis','Cardiomegaly','Consolidation','Edema','Effusion',
'Emphysema','Fibrosis','Hernia','Infiltration','Mass','Nodule',
'Pleural_Thickening','Pneumonia','Pneumothorax']
nb_classes = len(classes)
weights_pretrained = "imagenet"
image_shape = (299, 299)
slices = [0.8, 0.1, 0.1] # train/valid/test
epochs = 50
batch_size = 32
def build_model():
base_model = InceptionV3(weights=weights_pretrained, include_top=False)
nb_gpus = len(os.getenv("CUDA_VISIBLE_DEVICES", "1").split(","))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(2048, activation="relu")(x)
predictions = Dense(nb_classes, activation="softmax")(x)
model = Model(inputs=base_model.input, outputs=predictions)
if nb_gpus > 1:
model = multi_gpu_model(model, gpus=nb_gpus)
return model
def get_generator(dataframe, horizontal_flip=False, shuffle=True):
datagen = ImageDataGenerator(preprocessing_function=preprocess_input, horizontal_flip=horizontal_flip)
generator = datagen.flow_from_dataframe(dataframe=dataframe,
directory=image_dir,
x_col=image_col,
y_col=label_col,
has_ext=True,
target_size=image_shape, # (height, weight)
batch_size=batch_size,
shuffle=shuffle, # shuffle images
class_mode="categorical", # class mode
save_to_dir=None) # save augmented images to local
return generator
def save_history(history):
with open("history.json", "wb") as f:
json.dump(history.history, f)
def train(dataframe, model):
nb_records, _ = dataframe.shape
train_generator = get_generator(dataframe=dataframe.iloc[:int(nb_records*slices[0])],
horizontal_flip=True,
shuffle=True)
valid_generator = get_generator(dataframe=dataframe.iloc[int(nb_records*slices[0]) : int(nb_records*(slices[0]+slices[1]))],
horizontal_flip=True,
shuffle=True)
checkpoint = ModelCheckpoint("weights_{epoch:03d}_{val_acc:.4f}.hdf5",
monitor="val_acc",
verbose=1,
save_best_only=False,
mode="auto",
save_weights_only=True)
adam = optimizers.Adam(lr=0.001)
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit_generator(train_generator, epochs=epochs, validation_data=valid_generator, callbacks=[checkpoint])
save_history(history)
def prepare_dataframe():
df = pd.read_csv(csv_file)
df = df.sample(frac=1) # shuffle rows in dataframe
df[label_col] = df["Finding Labels"].map(lambda x: "NORMAL" if x == "No Finding" else "ABNORMAL") # make binary labels
df.to_csv(csv_file_encoded)
return df
if __name__ == "__main__":
df = prepare_dataframe()
model = build_model()
train(dataframe=df, model=model)
|
import greenthumb
import json
import sched
from greenthumb import util
from greenthumb.models.mongo import (users, gardens, plant_types, user_plants)
from flask import (abort, request, session, jsonify)
import datetime
import bson
"""
GreenThumb REST API: usergarden.
GreenThumb Group <greenthumb441@umich.edu>
"""
WATERING_DESCRIPTION = 'Water it.'
def is_valid_id(object_id):
try:
bson.objectid.ObjectId(object_id)
except Exception as e:
return False
return True
@greenthumb.app.route('/api/v1/usergarden/', methods=['GET'])
def get_user_gardens():
'''
Route which returns a list of all gardens in json format
'''
if 'email' not in session:
abort(403)
user_gardens = []
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
for garden_id in user.gardens:
garden = gardens.objects(id=garden_id)
if garden != []:
garden = garden[0]
user_gardens.append(json.loads(garden.to_json()))
return jsonify(user_gardens), 200
@greenthumb.app.route('/api/v1/usergarden/<string:garden_id>/', methods=['GET', 'PUT', 'DELETE'])
def get_garden(garden_id: str):
'''
Route which returns a json object with a single garden
'''
if 'email' not in session:
abort(403)
if not is_valid_id(garden_id):
abort(401)
if request.method == 'DELETE':
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# need to do str(i) because user.gardens is a list of ObjectIdFields
if garden_id in [str(i) for i in user.gardens]:
try:
garden = gardens.objects(id=garden_id)
except Exception as e:
abort(404)
if str(garden) == '[]':
abort(404)
garden = garden[0]
user.gardens.remove(garden.id)
# delete all plants in the garden
for plant_id in garden.plants:
plant = user_plants.objects(id=plant_id)
if str(plant) == '[]':
continue
plant = plant[0]
plant_type = plant_types.objects(id=plant.plant_type_id)
if str(plant_type) == '[]':
plant.delete()
continue
plant_type = plant_type[0]
plant.delete()
garden.delete()
user.save()
else:
abort(401)
elif request.method == 'PUT':
expected_fields = ['name', 'address', 'latitudetl', 'longitudetl', 'latitudebr', 'longitudebr']
for field in expected_fields:
if field not in request.json:
abort(401)
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# need to do str(i) because user.gardens is a list of ObjectIdFields
if garden_id in [str(i) for i in user.gardens]:
garden = gardens.objects(id=garden_id)
if str(garden) == '[]':
abort(404)
garden = garden[0]
garden.name = request.json['name']
garden.address = request.json['address']
garden.topleft_lat = request.json['latitudetl']
garden.topleft_long = request.json['longitudetl']
garden.bottomright_lat = request.json['latitudebr']
garden.bottomright_long = request.json['longitudebr']
garden.save()
else:
abort(401)
else:
garden = {}
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
if garden_id in [str(i) for i in user.gardens]:
garden = gardens.objects(id=garden_id)
if str(garden) == '[]':
abort(404)
garden = json.loads(garden[0].to_json())
return jsonify(garden)
return "", 200
@greenthumb.app.route('/api/v1/usergarden/get_plants/', methods=['GET'])
def get_user_plants():
'''
Route which returns a list of all of the user's plants in json format
'''
if 'email' not in session:
abort(403)
plants_list = []
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# add all plants in each of the user's gardens
for garden_id in user.gardens:
garden = gardens.objects(id=garden_id)
if garden != []:
garden = garden[0]
for plant_id in garden.plants:
plant = user_plants.objects(id=plant_id)
if plant != []:
plant = plant[0]
plants_list.append(plant.to_dict())
return jsonify(plants_list), 200
@greenthumb.app.route('/api/v1/usergarden/get_plants/<string:plant_id>/', methods=['GET'])
def get_user_plants_with_id(plant_id):
'''
Route which returns a the specified plant in json format
'''
if 'email' not in session:
abort(403)
if not is_valid_id(plant_id):
abort(401)
plants_list = {}
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# add all plants in each of the user's gardens
for garden_id in user.gardens:
garden = gardens.objects(id=garden_id)
if garden != []:
garden = garden[0]
for user_plant_id in garden.plants:
if plant_id != str(user_plant_id):
continue
try:
plant = user_plants.objects(id=plant_id)
except Exception as e:
abort(404)
if str(plant) == '[]':
abort(404)
plant = plant[0]
plants_list = plant.to_dict()
return jsonify(plants_list), 200
@greenthumb.app.route('/api/v1/usergarden/add_garden/', methods=['POST'])
def add_garden_location():
expected_fields = ['name', 'address', 'latitudetl',
'longitudetl', 'latitudebr', 'longitudebr']
if 'email' not in session:
abort(403)
# check that the right info was provided, else 401
for field in expected_fields:
if field not in request.json:
abort(401)
with util.MongoConnect():
# if user not in database 401
user = greenthumb.models.mongo.users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
body = request.json
# add garden to db
garden = greenthumb.models.mongo.gardens(name=body['name'], address=body['address'], topleft_lat=body['latitudetl'],
topleft_long=body['longitudetl'], bottomright_lat=body['latitudebr'], bottomright_long=body['longitudebr'], plants=[])
garden.save()
# add garden id to user's garden list
user.gardens.append(str(garden.id))
user.save()
return {"id": str(garden.id)}, 200
@greenthumb.app.route('/api/v1/usergarden/<string:garden_id>/add_plant/', methods=['POST'])
def add_plant_to_garden(garden_id: str):
expected_fields = ['plant_type_id', 'latitude',
'longitude', 'light_duration', 'light_intensity', 'last_watered', 'name', 'price', 'outdoors']
if 'email' not in session:
abort(403)
if not is_valid_id(garden_id):
abort(401)
# check that the right info was provided, else 401
for field in expected_fields:
if field not in request.json:
# Don't reject if still using the old set of expected fields
if field in ['light_duration', 'light_intensity', 'price']:
request.json[field] = 0
elif field == 'name':
request.json['name'] = ''
elif field == 'outdoors':
request.json['outdoors'] = False
else:
abort(401)
if not is_valid_id(request.json['plant_type_id']):
abort(401)
if request.json['last_watered'].find('.') == -1:
request.json['last_watered'] = request.json['last_watered'] + '.000000'
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# need to do str(i) because user.gardens is a list of ObjectIdFields
if garden_id in [str(i) for i in user.gardens]:
try:
garden = gardens.objects(id=garden_id)
except Exception as e:
abort(404)
if str(garden) == '[]':
abort(404)
garden = garden[0]
try:
plant_type = plant_types.objects(id=request.json['plant_type_id'])
except Exception as e:
abort(404)
if str(plant_type) == '[]':
abort(404)
plant_type = plant_type[0]
#request.json['last_watered']
user_plant = user_plants(plant_type_id=request.json['plant_type_id'],
latitude=request.json['latitude'],
longitude=request.json['longitude'],
light_duration=request.json['light_duration'],
light_intensity=request.json['light_intensity'],
name=request.json['name'],
price=request.json['price'],
outdoors=request.json['outdoors'],
last_watered=datetime.datetime.strptime(request.json['last_watered'], '%Y-%m-%d %H:%M:%S.%f')).save()
garden.plants.append(str(user_plant.id))
garden.save()
return {"id": str(user_plant.id)}, 200
else:
abort(401)
@greenthumb.app.route('/api/v1/usergarden/<string:garden_id>/edit_plant/<string:plant_id>/', methods=['PUT'])
def edit_plant_in_garden(garden_id: str, plant_id: str):
expected_fields = ['plant_type_id', 'latitude',
'longitude', 'light_duration', 'light_intensity', 'last_watered', 'name', 'price', 'outdoors']
if 'email' not in session:
abort(403)
if not is_valid_id(garden_id):
abort(401)
if not is_valid_id(plant_id):
abort(401)
# check that the right info was provided, else 401
for field in expected_fields:
if field not in request.json:
# Don't reject if still using the old set of expected fields
if field in ['light_duration', 'light_intensity', 'price']:
request.json[field] = 0
elif field == 'name':
request.json['name'] = ''
elif field == 'outdoors':
request.json['outdoors'] = False
else:
abort(401)
if not is_valid_id(request.json['plant_type_id']):
abort(401)
if request.json['last_watered'].find('.') == -1:
request.json['last_watered'] = request.json['last_watered'] + '.000000'
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# need to do str(i) because user.gardens is a list of ObjectIdFields
if garden_id in [str(i) for i in user.gardens]:
try:
garden = gardens.objects(id=garden_id)
except Exception as e:
abort(404)
if str(garden) == '[]':
abort(404)
garden = garden[0]
# same as above
if plant_id not in [str(i) for i in garden.plants]:
abort(401)
try:
plant_type = plant_types.objects(id=request.json['plant_type_id'])
except Exception as e:
abort(404)
if str(plant_type) == '[]':
abort(404)
plant_type = plant_type[0]
try:
plant = user_plants.objects(id=plant_id)
except Exception as e:
abort(404)
if str(plant) == '[]':
abort(404)
plant = plant[0]
plant.plant_type_id = plant_type.id
plant.latitude = request.json['latitude']
plant.longitude = request.json['longitude']
plant.light_duration = request.json['light_duration']
plant.light_intensity = request.json['light_intensity']
plant.name = request.json['name']
plant.price = request.json['price']
plant.outdoors = request.json['outdoors']
plant.last_watered = datetime.datetime.strptime(request.json['last_watered'], '%Y-%m-%d %H:%M:%S.%f')
plant.save()
else:
abort(401)
return "", 200
@greenthumb.app.route('/api/v1/usergarden/<string:garden_id>/delete_plant/<string:plant_id>/', methods=['DELETE'])
def delete_plant_in_garden(garden_id: str, plant_id: str):
if 'email' not in session:
abort(403)
if not is_valid_id(garden_id):
abort(401)
if not is_valid_id(plant_id):
abort(401)
with util.MongoConnect():
user = users.objects(email=session['email'])
if str(user) == '[]':
abort(401)
user = user[0]
# need to do str(i) because user.gardens is a list of ObjectIdFields
if garden_id in [str(i) for i in user.gardens]:
try:
garden = gardens.objects(id=garden_id)
except Exception as e:
abort(404)
if str(garden) == '[]':
abort(404)
garden = garden[0]
# same as above
if plant_id not in [str(i) for i in garden.plants]:
abort(401)
try:
plant = user_plants.objects(id=plant_id)
except Exception as e:
abort(404)
if str(plant) == '[]':
abort(404)
plant = plant[0]
plant_type = plant_types.objects(id=str(plant.plant_type_id))
if str(plant_type) == '[]':
abort(401)
plant_type = plant_type[0]
#goes in place of "Water it": plant_type["watering_description"]
garden.plants.remove(plant.id)
garden.save()
plant.delete()
else:
abort(401)
return "", 200 |
from django.urls import path, include
from rest_framework import routers
from rest_framework.authtoken import views
## Import views from the Rest API
from restAPI.views import (
TeamView,
ProjectView,
DefectView,
ReviewView,
ProductView,
PhaseTypeView,
UserView
)
"""
*** This file is used to route all of the API urls.
There are two mandatory arguments to the register() method:
The URL prefix to use for this set of routes and viewset
The viewset class
Offical : https://www.django-rest-framework.org/api-guide/routers/
"""
app_name = 'restAPI'
router = routers.DefaultRouter()
router.register('Team', TeamView)
router.register('Project', ProjectView)
router.register('Review', ReviewView)
router.register('Defect', DefectView)
router.register('Product', ProductView)
router.register('PhaseType', PhaseTypeView)
router.register('Users', UserView)
## The .urls attribute on a router instance is simply a standard list of URL patterns
## If using namespacing with hyperlinked serializers you'll also need to ensure that any view_name parameters on the serializers correctly reflect the namespace.
urlpatterns = [
path('', include(router.urls), name='restAPI'),
path('auth-token/', views.obtain_auth_token)
]
|
from typing import Dict, Generator, List
from dateutil.relativedelta import relativedelta
from django.utils import timezone
from ee.clickhouse.client import sync_execute
SLOW_THRESHOLD_MS = 10000
SLOW_AFTER = relativedelta(hours=6)
SystemStatusRow = Dict
def system_status() -> Generator[SystemStatusRow, None, None]:
alive = is_alive()
yield {"key": "clickhouse_alive", "metric": "Clickhouse database alive", "value": alive}
if not alive:
return
disk_status = sync_execute(
"SELECT formatReadableSize(total_space), formatReadableSize(free_space) FROM system.disks"
)
for index, (total_space, free_space) in enumerate(disk_status):
metric = "Clickhouse disk" if len(disk_status) == 1 else f"Clickhouse disk {index}"
yield {"key": f"clickhouse_disk_{index}_free_space", "metric": f"{metric} free space", "value": free_space}
yield {"key": f"clickhouse_disk_{index}_total_space", "metric": f"{metric} total space", "value": total_space}
table_sizes = sync_execute(
"""
SELECT
table,
formatReadableSize(sum(bytes)) AS size,
sum(rows) AS rows
FROM system.parts
WHERE active
GROUP BY table
ORDER BY rows DESC
"""
)
yield {
"key": "clickhouse_table_sizes",
"metric": "Clickhouse table sizes",
"value": "",
"subrows": {"columns": ["Table", "Size", "Rows"], "rows": table_sizes},
}
system_metrics = sync_execute("SELECT * FROM system.asynchronous_metrics")
system_metrics += sync_execute("SELECT * FROM system.metrics")
yield {
"key": "clickhouse_system_metrics",
"metric": "Clickhouse system metrics",
"value": "",
"subrows": {"columns": ["Metric", "Value", "Description"], "rows": list(sorted(system_metrics))},
}
def is_alive() -> bool:
try:
sync_execute("SELECT 1")
return True
except:
return False
def get_clickhouse_running_queries() -> List[Dict]:
return query_with_columns(
"SELECT elapsed as duration, query, * FROM system.processes ORDER BY duration DESC",
columns_to_remove=["address", "initial_address", "elapsed"],
)
def get_clickhouse_slow_log() -> List[Dict]:
return query_with_columns(
f"""
SELECT query_duration_ms as duration, query, *
FROM system.query_log
WHERE query_duration_ms > {SLOW_THRESHOLD_MS}
AND event_time > %(after)s
ORDER BY duration DESC
LIMIT 200
""",
{"after": timezone.now() - SLOW_AFTER},
columns_to_remove=[
"address",
"initial_address",
"query_duration_ms",
"event_time",
"event_date",
"query_start_time_microseconds",
"thread_ids",
"ProfileEvents.Names",
"ProfileEvents.Values",
"Settings.Names",
"Settings.Values",
],
)
def query_with_columns(query, args=None, columns_to_remove=[]) -> List[Dict]:
metrics, types = sync_execute(query, args, with_column_types=True)
type_names = [key for key, _type in types]
rows = []
for row in metrics:
result = {}
for type_name, value in zip(type_names, row):
if isinstance(value, list):
value = ", ".join(map(str, value))
if type_name not in columns_to_remove:
result[type_name] = value
rows.append(result)
return rows
|
# -*- coding: utf-8 -*-
from bot_proto import *
from db_proto import DB
def feedback_handler(update, feedback_state):
if 'message' in update:
message = update['message']
if 'from_id' in update:
from_id = update['from_id']
if feedback_state == 1:
log_event("FEEDBACK: {0}".format(message))
telebot.send_feedback_ok(from_id)
db.set_feedback_state(from_id, 0)
def schedule_handler(update, schedule_state):
if 'message_unicode' in update:
message_unicode = update['message_unicode']
if 'from_id' in update:
from_id = update['from_id']
if schedule_state == 1:
if 'callback_data' in update:
data = update['callback_data']
telebot.send_schedule(from_id, data)
db.set_schedule_state(from_id, 2)
#telebot.send_menu(from_id)
if schedule_state == 2:
if 'callback_data' in update:
data = update['callback_data']
if data.encode('utf-8') == telebot.schedule_req:
db.set_schedule_state(from_id, 1)
telebot.send_schedule_request(from_id, telebot.programs)
else:
db.set_schedule_state(from_id, 0)
telebot.send_menu(from_id)
def notif_handler(update, notif_state):
if 'message_unicode' in update:
message_unicode = update['message_unicode']
if 'from_id' in update:
from_id = update['from_id']
if notif_state == 1:
if message_unicode == telebot.notif_able:
db.set_notification_state(from_id, 2)
telebot.send_notif_able_request(from_id, telebot.programs)
elif message_unicode == telebot.notif_disable:
db.set_notification_state(from_id, 3)
telebot.send_notif_disable_request(from_id, db.get_programs(from_id))
else:
db.set_notification_state(from_id, 0)
telebot.send_menu(from_id)
elif notif_state == 2:
if 'callback_data' in update:
data = update['callback_data']
db.add_programs(from_id, data)
db.set_notification_state(from_id, 0)
telebot.add_program(from_id, data)
telebot.send_menu(from_id)
elif notif_state == 3:
if 'callback_data' in update:
data = update['callback_data']
db.delete_programs(from_id, data)
db.set_notification_state(from_id, 0)
telebot.delete_program(from_id, data)
telebot.send_menu(from_id)
else:
db.set_notification_state(from_id, 0)
telebot.send_menu(from_id)
def check_updates():
parameters_list = telebot.get_updates()
if not parameters_list:
return 0
for parameters in parameters_list:
run_command(parameters)
def run_command(update):
#print update
message =''
message_unicode =u''
from_id = 0
if 'message' in update:
message = update['message']
if 'message_unicode' in update:
message_unicode = update['message_unicode']
if 'from_id' in update:
from_id = update['from_id']
try:
log_event("{0} send message '{1}'".format(from_id, message))
except:
pass
notif_state = db.get_notification_state(from_id)
schedule_state = db.get_schedule_state(from_id)
feedback_state = db.get_feedback_state(from_id)
print from_id, message, feedback_state
if message == '/start':
telebot.send_menu(from_id)
db.add_default_id(from_id)
elif message_unicode == telebot.menu_str:
db.add_default_id(from_id)
telebot.send_menu(from_id)
elif notif_state != 0:
notif_handler(update, notif_state)
elif schedule_state != 0:
schedule_handler(update, schedule_state)
elif feedback_state != 0:
feedback_handler(update, feedback_state)
#print message_unicode.encode('raw_unicode_escape')
elif message_unicode == telebot.cards_str:
telebot.send_cards(from_id)
db.add_default_id(from_id)
elif message_unicode == telebot.contacts_str:
telebot.send_contacts(from_id)
db.add_default_id(from_id)
elif message_unicode == telebot.notification_str:
telebot.send_notification_request(from_id)
db.set_notification_state(from_id, 1)
elif message_unicode == telebot.schedule_str:
telebot.send_schedule_request(from_id, telebot.programs)
db.set_schedule_state(from_id, 1)
elif message_unicode == telebot.feedback_str:
print 1
telebot.send_feedback_request(from_id)
db.set_feedback_state(from_id, 1)
else:
print 2
log_event('No action')
db.add_default_id(from_id)
telebot.send_menu(from_id)
feedback_state = db.get_feedback_state(from_id)
print from_id, message, feedback_state
if __name__ == "__main__":
telebot = Telegram()
db = DB()
telebot.send_text(telebot.admin_id, "START")
while True:
try:
check_updates()
time.sleep(telebot.Interval)
except KeyboardInterrupt:
print 'Interrupt by user..'
break
except Exception, e:
log_event(get_exception()) |
#!/usr/bin/python
import boto3
import json
import logging
import time
import sys
import click
client = boto3.client('iam')
logging.basicConfig(filename='output.log', level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
iam_output = dict()
# Generating credentials report
def get_credentials_report_data():
client.generate_credential_report()
while client.generate_credential_report()['State'] != "COMPLETE":
time.sleep(5)
info = client.get_credential_report()["Content"]
output = info.decode("utf-8").split("\n")
data = []
for item in output:
data.append(item.split(","))
res = []
for i in range(len(data)):
if i == 0:
continue
result = {}
for j in range(len(data[i])):
result[data[0][j]] = data[i][j]
res.append(result)
iam_output["credential_report"] = res
# Generating the users data
def get_users_data():
response = client.list_users()['Users']
for user in response:
list_of_checks = {
"LoginProfile" : client.get_login_profile,
"PolicyNames" : client.list_user_policies,
"AttachedPolicies" : client.list_attached_user_policies,
"AccessKeyMetadata" : client.list_access_keys,
"Groups" : client.list_groups_for_user,
"MFADevices" : client.list_mfa_devices,
"Certificates" : client.list_signing_certificates,
"SSHPublicKeys" : client.list_ssh_public_keys,
"User" : client.get_user,
}
for key in list_of_checks:
try:
user[key] = list_of_checks[key](UserName=user['UserName'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['Users'] = response
# Generating the groups data
def get_groups_data():
response = client.list_groups()['Groups']
for group in response:
list_of_checks = {
"Group" : client.get_group,
"PolicyNames": client.list_group_policies,
"AttachedPolicies": client.list_attached_group_policies,
}
for key in list_of_checks:
try:
group[key] = list_of_checks[key](GroupName=group['GroupName'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['Groups'] = response
# Generating the roles data
def get_roles_data():
response = client.list_roles()['Roles']
for role in response:
list_of_checks = {
"Role" : client.get_role,
"PolicyNames" : client.list_role_policies,
"Tags" : client.list_role_tags,
"AttachedPolicies" : client.list_attached_role_policies,
"InstanceProfiles" : client.list_instance_profiles_for_role,
}
for key in list_of_checks:
try:
role[key] = list_of_checks[key](RoleName=role['RoleName'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['Roles'] = response
# Generating the policies data
def get_policies_data():
response = client.list_policies()['Policies']
for policy in response:
list_of_checks = {
"Policy" : client.get_policy,
"Versions" : client.list_policy_versions,
}
for key in list_of_checks:
try:
policy[key] = list_of_checks[key](PolicyArn=policy['Arn'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['Policies'] = response
# Generating the instance profile data
def get_instance_profiles_data():
response = client.list_instance_profiles()['InstanceProfiles']
for profile in response:
list_of_checks = {
"InstanceProfile" : client.get_instance_profile,
}
for key in list_of_checks:
try:
profile[key] = list_of_checks[key](InstanceProfileName=profile['InstanceProfileName'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['InstanceProfiles'] = response
# Generating the account aliases data
def get_account_aliases_data():
response = client.list_account_aliases()['AccountAliases']
iam_output['AccountAliases'] = response
# Generating the open id connection providers data
def get_open_id_connection_providers_data():
response = client.list_open_id_connect_providers()['OpenIDConnectProviderList']
iam_output['OpenIDConnectProviderList'] = response
# def get_open_id_connection_providers_data():
# response = client.list_open_id_connect_providers()['OpenIDConnectProviderList']
# for provider in response:
# list_of_checks = {
# "Arn" : client.get_open_id_connect_provider,
# }
# for key in list_of_checks:
# try:
# provider[key] = list_of_checks[key](OpenIDConnectProviderArn=provider['Arn'])
# except Exception as e:
# logging.warning("Failed with %s; skipping.", e)
# iam_output['OpenIDConnectProviderList'] = response
# Generating the saml providers data
def get_saml_providers_data():
response = client.list_saml_providers()['SAMLProviderList']
iam_output['SAMLProviderList'] = response
# def get_saml_providers_data():
# response = client.list_saml_providers()['SAMLProviderList']
# for provider in response:
# list_of_checks = {
# "Arn" : client.get_saml_provider,
# }
# for key in list_of_checks:
# try:
# provider[key] = list_of_checks[key](SAMLProviderArn=provider['Arn'])
# except Exception as e:
# logging.warning("Failed with %s; skipping.", e)
# iam_output['SAMLProviderList'] = response
# Generating the server certificates data
def get_server_certificates_data():
response = client.list_server_certificates()['ServerCertificateMetadataList']
for certificate in response:
list_of_checks = {
"ServerCertificate" : client.get_server_certificate,
}
for key in list_of_checks:
try:
certificate[key] = list_of_checks[key](ServerCertificateName=certificate['ServerCertificateName'])[key]
except Exception as e:
logging.warning("Failed with %s; skipping.", e)
iam_output['ServerCertificateMetadataList'] = response
# Generating the virtual mfa providers data
def get_virtual_mfa_devices_data():
response = client.list_virtual_mfa_devices()['VirtualMFADevices']
iam_output['VirtualMFADevices'] = response
# Generating the account authorization data
def get_account_authorization_data():
response = dict()
output = client.get_account_authorization_details()
response['UserDetailList'] = output['UserDetailList']
response['GroupDetailList'] = output['GroupDetailList']
response['RoleDetailList'] = output['RoleDetailList']
response['Policies'] = output['Policies']
iam_output['AccountAuthorizationDetails'] = response
# Generating the account password policy data
def get_account_password_policy_data():
response = client.get_account_password_policy()['PasswordPolicy']
iam_output['PasswordPolicy'] = response
# Generating the account summary data
def get_account_summary_data():
response = client.get_account_summary()['SummaryMap']
iam_output['SummaryMap'] = response
# Calling all the funcions to generate data
def iam_analysis(output_file_path):
print("Starting your AWS IAM Analysis...")
get_users_data()
get_groups_data()
get_roles_data()
# Currently we are getting used policies via get_account_summary_data and later will try to handle this to keep 6xx policies locally rather fetching from AWS as it's time consuming and making unnecessary requests to the AWS API
# get_policies_data()
get_instance_profiles_data()
get_account_aliases_data()
get_open_id_connection_providers_data()
get_saml_providers_data()
get_server_certificates_data()
get_virtual_mfa_devices_data()
get_account_authorization_data()
get_account_password_policy_data()
get_account_summary_data()
f = open(output_file_path, "w")
f.write(json.dumps(iam_output, indent=4, sort_keys=True, default=str))
print("Successfully written output to : %s" %(output_file_path))
f.close()
@click.group()
def main():
"""
Simple CLI for AWS IAM access rights
"""
pass
@main.command('extract')
@click.option('--outputpath', default="output.json", help="File to store the results")
def extract(outputpath):
"""Extract policies from designated AWS account to which you are logged into"""
iam_analysis(outputpath)
if __name__ == "__main__":
main()
|
import os
def upload_cover_dir(obj,file_obj):
file_ext = file_obj.split('.')[-1].lower
_file = '{0}.{1}'.format(obj.id,file_ext)
return os.path.join('book_cover',_file)
def upload_pdf_dir(obj,file_obj):
file_ext = file_obj.split('.')[-1].lower
_file = '{0}.{1}'.format(obj.id,file_ext)
return os.path.join('pdf',_file) |
# -*- coding:utf8 -*-
import os, sys, re, json
import argparse
from copy import deepcopy
from copy import deepcopy
from bs4 import BeautifulSoup
from pyquery import PyQuery as pq
from urllib.parse import urljoin
import vthread
import pymongo
from copy import deepcopy
html_text = '''
<div id="main-content" class="region clearfix">
<div class="region region-content">
<div id="block-system-main" class="block block-system clearfix">
<div class="content">
<div id="node-129" class="node node-hegre-model clearfix" about="/models/alena" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-model-board field-type-image"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/w780/public/models/alena-980x553.jpg?itok=S_fTVegN" width="780" height="440"/></div><div class="grid-5 alpha hegre-model-profile"><div class="box border"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/small-portrait/public/models/alena-148.jpg?itok=zaiXCeCv" width="124" height="161"/><h4>Profile</h4><div class="item-list"><ul><li class="first">Height: 5' 6" (169 cm)</li>
<li>Weight: 112 lbs (51 kg)</li>
<li>Age: 21</li>
<li>Occupation: Student</li>
<li class="last">Country: <a href="/models/country/ukraine" typeof="skos:Concept" property="rdfs:label skos:prefLabel" datatype="">Ukraine</a></li>
</ul></div></div></div><div class="grid-15 omega"><h2>New and Fearless</h2><div class="field-name-body field-type-text-with-summary"><p><strong>Alena knows that being a model isn’t just about having the right look; it’s also about walking the right walk.</strong></p>
<p>Perhaps not the most stunning model on Hegre-Art.com; Alena certainly knows how to compensate with loads of sensuality and body language that reads like an erotic thriller you only bring out when alone and tucked under the covers. At first she comes off as innocent, but deep down she has the heart of a fearless vixen.</p>
<p>Alena smiles easy and prides herself on her easy going personality. She grew up under the open skies of the countryside but now lives in the big city and studies computer landscaping full time. She loves her perky breasts and admits with a smile that they have taken her far in life.</p>
<p><em><strong>Alena is brand new to the modeling business and we certainly hope she decides to stick around!</strong></em></p>
</div><div class="grid-11 model-join alpha omega"><div class="box"><h3>Alena: Complete and Uncensored</h3><p>Access <em>every</em> photo and film of <strong>Alena</strong>, including:</p><div class="item-list"><ul class="model-stats"><li class="first last"><em class="placeholder">590 MB</em>+ of zip archives with over <em class="placeholder">270</em> photos</li>
</ul></div><p>Plus access to the entire Hegre-Art library of 248,291 photos and over 481 films.</p><a href="http://nudes.hegre-art.com/hit/1/5/108914/17/2142" class="hegre-direct"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/all/themes/hegregirls/images/get-full-access.png" alt=""/></a></div></div></div><div class="clearfix"/><h3>Alena Galleries</h3><div id="node-1670" class="grid-4 alpha node-grid " about="/galleries/alena-alone" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Alone" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-alone" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaAlone-coverl.jpg?itok=MVRnUVy2"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaAlone-coverl.jpg?itok=j_T6wIir" alt="" title="Alena Alone"/></a></div><div class="release-date">June 21<sup>st</sup>, 2007</div><div class="preview-link"><a href="/free-nudes/hegre-art-alena-alone">Preview Alena Alone</a></div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-alone">Alena Alone</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div id="node-1614" class="grid-4 node-grid " about="/galleries/alena-coke" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Coke" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-coke" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaCocaCola-coverl.jpg?itok=UaoFZSE_"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaCocaCola-coverl.jpg?itok=Gm9_9bBk" alt="" title="Alena Coke"/></a></div><div class="release-date">April 25<sup>th</sup>, 2007</div><div class="preview-link"><a href="/free-nudes/hegre-art-alena-coke">Preview Alena Coke</a></div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-coke">Alena Coke</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div id="node-3431" class="grid-4 node-grid " about="/galleries/alena-hot-bath" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Hot Bath" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-hot-bath" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaHotBath-coverl.jpg?itok=gCNrUgJY"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaHotBath-coverl.jpg?itok=4V4DeO0s" alt="" title="Alena Hot Bath"/></a></div><div class="release-date">December 11<sup>th</sup>, 2006</div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-hot-bath">Alena Hot Bath</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div id="node-1476" class="grid-4 node-grid " about="/galleries/alena-kitchen-setting" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Kitchen Setting" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-kitchen-setting" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaKitchenSetting-coverl.jpg?itok=dR1M8tWG"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaKitchenSetting-coverl.jpg?itok=IUDLs45e" alt="" title="Alena Kitchen Setting"/></a></div><div class="release-date">November 6<sup>th</sup>, 2006</div><div class="preview-link"><a href="/free-nudes/hegre-art-alena-kitchen-setting">Preview Alena Kitchen Setting</a></div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-kitchen-setting">Alena Kitchen Setting</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div id="node-1409" class="grid-4 omega node-grid " about="/galleries/alena-skin-skin" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Skin To Skin" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-skin-skin" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaSkinToSkin-coverl.jpg?itok=zLZOMQzO"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaSkinToSkin-coverl.jpg?itok=5cujcdJP" alt="" title="Alena Skin To Skin"/></a></div><div class="release-date">August 28<sup>th</sup>, 2006</div><div class="preview-link"><a href="/free-nudes/hegre-art-alena-leather-dreams">Preview Alena Skin To Skin</a></div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-skin-skin">Alena Skin To Skin</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div id="node-1320" class="grid-4 alpha node-grid " about="/galleries/alena-table-dance" typeof="sioc:Item foaf:Document">
<span property="dc:title" content="Alena Table Dance" class="rdf-meta element-hidden"/><span property="sioc:num_replies" content="0" datatype="xsd:integer" class="rdf-meta element-hidden"/>
<div class="content">
<div class="field-name-coverl field-type-image"><a href="/galleries/alena-table-dance" rel="http://www.hegregirls.com/sites/default/files/styles/popup/public/covers/AlenaTableDance-coverl.jpg?itok=gz_ImJ2S"><img typeof="foaf:Image" src="http://www.hegregirls.com/sites/default/files/styles/thumbnail/public/covers/AlenaTableDance-coverl.jpg?itok=k3Bp_3GB" alt="" title="Alena Table Dance"/></a></div><div class="release-date">May 29<sup>th</sup>, 2006</div><div class="preview-link"><a href="/free-nudes/hegre-art-alena-table-dance">Preview Alena Table Dance</a></div> </div>
<div class="grid-meta">
<h4><a href="/galleries/alena-table-dance">Alena Table Dance</a></h4>
</div>
<div class="clearfix margin-top">
</div>
</div>
<div class="clearfix bottom-border"/> </div>
<div class="clearfix margin-top">
</div>
</div>
</div>
</div>
<div id="block-hegre-hegre-join-banner" class="block block-hegre">
<div class="content">
<div class="clearfix"><div class="image"><a href="http://nudes.hegre-art.com/hit/1/5/108914/17/2142" class="hegre-direct"><img typeof="foaf:Image" src="http://hegregirls.com/sites/all/themes/hegregirls/images/bottom-join-banner.jpg" alt=""/></a></div><div class="meta"><div class="item-list"><ul><li class="first">248,291 Photos</li>
<li class="last">481 Films</li>
</ul></div></div></div> </div>
</div>
</div>
</div>
'''
b = pq(html_text, parser='html')
print(b('.content .content .grid-4 .preview-link')) |
Num = int(input("Please enter a number:"))
#print(Num%2)
NumStr = str(Num)
if(Num%2 == 0):
print( NumStr +" is an Even Number")
else:
print(NumStr +" is a Odd Number")
|
from .mnist import get_mnist
from .usps import get_usps
from .sixteen_class_imagenet import get_16_class_imageNet_dataloader
__all__ = (get_usps, get_mnist, get_16_class_imageNet_dataloader)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author rsg
#
import os
import datetime
import time
import logging
from utils import random_str
logger = logging.getLogger(__name__)
class FileService(object):
def __init__(self, file_name, content):
self._file_name = file_name
self._content = content
self._ext = None
self._new_file_name = self._get_new_name()
def get_path(self):
"""生成文件存储路径,此路径是增加日期目录分割并替换文件名称之后的路径
Returns:
str: 存储路径
"""
today = datetime.date.today()
date_list = [str(today.year), str(today.month), str(today.day)]
dirictory = "/".join(date_list)
return os.path.join(dirictory, self._new_file_name)
def _get_ext(self):
fragments = self._file_name.split(".")
return fragments[-1]
def _get_new_name(self):
"""获取新文件名称"""
return random_str.get_random_str(20) + str(int(time.time())) + "." + self.ext
def _create_dir(self, path):
"""如果目录不存在则创建目录
Args:
path: 文件路径
Returns:
"""
file_dir_list = path.split("/")[:-1]
file_dir = "/".join(file_dir_list)
try:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
except IOError:
logger.error("Failed to create dir %s" % file_dir, exc_info=True)
raise
def save(self, root_path):
"""
Args:
root_path: 文件保存根路径
Returns:
Raises:
文件保存失败抛出IOError异常
"""
abs_path = os.path.join(root_path, self.get_path())
self._create_dir(abs_path)
try:
with open(abs_path, mode="wb+") as file:
file.write(self._content)
except IOError:
logger.error("save file error", exc_info=True)
raise
@property
def ext(self):
if self._ext is None:
self._ext = self._get_ext()
return self._ext |
from dataloader import PortraitDataloader, PortraitToInferloader, mean, std
import torch
import segmentation_models_pytorch as smp
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
import os
import cv2
#base_path = Path(__file__).parent.parent
data_path = Path("./APDrawingDB/data/").resolve()
#df = pd.read_csv(data_path / "my_metadata.csv")
df = pd.DataFrame()
# location of original and mask image
img_fol = data_path / "test_imgs"
#mask_fol = data_path / "mytrain_masks_bw-128"
df['image_name'] = list(os.listdir(img_fol))
# test_dataloader = PortraitDataloader(df, img_fol, mask_fol, mean, std, "val", 1, 4)
test_dataloader = PortraitToInferloader(img_fol, mean, std, 1, 4)
ckpt_path = "./model_office_384x384_effnet_2.pth"
device = torch.device("cuda")
#model = smp.Unet("resnext50_32x4d", encoder_weights=None, classes=1, activation='sigmoid')
model = smp.Unet("timm-efficientnet-b4", encoder_weights=None, classes=1, activation=None)
model.to(device)
model.eval()
state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state["state_dict"])
# start prediction
tta = 1
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
with torch.no_grad():
#preds_ = []
for i, batch in enumerate(test_dataloader):
preds_ = []
#fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 15))
#fig.suptitle("predicted_mask//original_mask")
print("i", i)
images, mask_target, name = batch
for _ in range(tta):
preds = model(images.to(device))
#print(preds)
preds_ += [preds.detach().cpu().numpy()]
preds = np.mean(preds_, axis=0)
preds = torch.Tensor(preds)
#batch_preds = torch.sigmoid(preds)
batch_preds = normPRED(preds)
batch_preds = batch_preds.detach().cpu().numpy()
#print(batch_preds)
batch_preds = np.squeeze(batch_preds)
#mask_target = np.squeeze(mask_target)
#pred1 = np.where(batch_preds==1, mask_target[:,:,0], 0)
#pred2 = np.where(batch_preds==1, mask_target[:,:,1], 0)
#pred3 = np.where(batch_preds==1, mask_target[:,:,2], 0)
#pred = np.stack([pred1, pred2, pred3], axis=2)
#print(name)
#ax1.imshow(batch_preds, cmap="gray")
#ax1.imshow(pred) # if using own dataset
#ax1.imshow(batch_preds, cmap="gray")
batch_preds = np.where(batch_preds>0., batch_preds, 0)
pred = 1. - np.float32(batch_preds)
#print(pred.shape)
#print(np.float32(pred))
#print('./results/'+name[0])
#plt.imshow(pred, cmap='gray')
#plt.show()
cv2.imwrite('./results/'+name[0], pred*255)
#break
#ax2.imshow(np.squeeze(mask_target), cmap="gray")
#plt.show()
|
import sys
import traceback
from typing import List
from importlib import util
from ConfigValidator.CustomErrors.BaseError import BaseError
from ConfigValidator.CLIRegister.CLIRegister import CLIRegister
from ConfigValidator.Config.Validation.ConfigValidator import ConfigValidator
from ConfigValidator.CustomErrors.ConfigErrors import ConfigInvalidClassNameError
from ExperimentOrchestrator.Experiment.ExperimentController import ExperimentController
def is_no_argument_given(args: List[str]): return (len(args) == 1)
def is_config_file_given(args: List[str]): return (args[1][-3:] == '.py')
def load_and_get_config_file_as_module(args: List[str]):
module_name = args[1].split('/')[-1].replace('.py', '')
spec = util.spec_from_file_location(module_name, args[1])
config_file = util.module_from_spec(spec)
sys.modules[module_name] = config_file
spec.loader.exec_module(config_file)
return config_file
if __name__ == "__main__":
try:
if is_no_argument_given(sys.argv):
sys.argv.append('help')
CLIRegister.parse_command(sys.argv)
elif is_config_file_given(sys.argv): # If the first arugments ends with .py -> a config file is entered
config_file = load_and_get_config_file_as_module(sys.argv)
if hasattr(config_file, 'RobotRunnerConfig'):
config = config_file.RobotRunnerConfig() # Instantiate config from injected file
ConfigValidator.validate_config(config) # Validate config as a valid RobotRunnerConfig
ExperimentController(config).do_experiment() # Instantiate controller with config and start experiment
else:
raise ConfigInvalidClassNameError
else: # Else, a utility command is entered
CLIRegister.parse_command(sys.argv)
except BaseError as e: # All custom errors are displayed in custom format
print(f"\n{e}")
except: # All non-covered errors are displayed normally
traceback.print_exc() |
# graph by adjacency matrix
INF = 99999999 #infinity
graph = [ [0,7, 5],
[7,0,INF],
[5,INF,0]
]
print(graph)
|
#Exemplo de rede convolucional usando Keras
from keras.datasets import mnist
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout, Conv2D, Flatten, MaxPooling2D
(x_treino, y_treino), (x_teste, y_teste) = mnist.load_data()
print(x_treino.shape)
#Usando one hot encoding
y_treino = to_categorical(y_treino)
y_teste = to_categorical(y_teste)
#O modelo 'Conv2D' precisa que tenha na entrada 3 dimensões, altura, largura e padrão de cores
x_treino = x_treino.reshape(60000, 28, 28, 1) #60000 amostras, matriz de 28x28, e padrão de cor = 1 (cinza)
x_teste = x_teste.reshape(10000, 28, 28, 1)
#Criando a rede CNN
modelo = Sequential()
modelo.add(Conv2D(filters=32, kernel_size=5, activation='relu', input_shape=(28, 28, 1))) #primeira camada
modelo.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid')) #segunda camada
modelo.add(Conv2D(filters=64, kernel_size=5, activation='relu')) #terceira camada
modelo.add(MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid')) #quarta camada
modelo.add(Flatten()) #Função que transforma um espaço multidimensional em uma única dimensão, ex., uma matriz de duas
#dimensões 28x28 seria transformada em uma única dimensão de tamanho84. É como se cada linha da matriz fosse colocada
#uma do lado da outra, ficnado um array longo de uma única dimensão
modelo.add(Dense(80, kernel_initializer='normal', activation='relu')) #quinta camada
modelo.add(Dropout(0.2))
modelo.add(Dense(10, kernel_initializer='normal', activation='softmax')) #última camada
#Definindo o otimizador e a função de custo
modelo.compile(optimizer='adam', loss='categorical_crossentropy', metrix=['accuracy'])
#Treinando o modelo
modelo.fit(x_treino, y_treino, batch_size=200, epochs=10, validation_data=(x_teste, y_teste), verbose=1)
|
import unittest
from algkit.solutions import find_words, exist_word
class WordSearchTestCase(unittest.TestCase):
def test_find_words_n1(self):
board = [
['o', 'a', 'a', 'n'],
['e', 't', 'a', 'e'],
['i', 'h', 'k', 'r'],
['i', 'f', 'l', 'v']
]
words = ["oath", "pea", "eat", "rain"]
self.assert_find_words(board, words, ["eat", "oath"])
def test_find_words_e1(self):
self.assert_find_words(
board=[["a", "a"]],
words=["a"],
excepted=["a"]
)
def assert_find_words(self, board, words, excepted):
self.assertEqual(sorted(excepted), sorted(find_words(board, words)))
def test_exist_word(self):
board = [
['A', 'B', 'C', 'E'],
['S', 'F', 'C', 'S'],
['A', 'D', 'E', 'E']
]
self.assertTrue(exist_word(board, 'ABCCED'))
self.assertTrue(exist_word(board, 'SEE'))
self.assertFalse(exist_word(board, 'ABCB'))
if __name__ == '__main__':
unittest.main()
|
# # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
@staticmethod
def get_carry_on(num):
if num >= 10:
return num - 10, 1
else:
return num, 0
@staticmethod
def next_digit(node, node1, node2):
next_node = None
next_node1 = None
next_node2 = None
if node is not None:
next_node = node.next
if node1 is not None:
next_node1 = node1.next
if node2 is not None:
next_node2 = node2.next
return next_node, next_node1, next_node2
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
node1 = l1
node2 = l2
n = node1.val + node2.val
n, carry_on = self.get_carry_on(n)
root = ListNode(n)
node = root
if node1 is not None:
node1 = node1.next
if node2 is not None:
node2 = node2.next
while True:
if (node1 is not None) & (node2 is not None):
n = node1.val + node2.val + carry_on
elif node1 is not None:
n = node1.val + carry_on
elif node2 is not None:
n = node2.val + carry_on
elif carry_on != 0:
n = carry_on
else:
break
n, carry_on = self.get_carry_on(n)
new_node = ListNode(n)
node.next = new_node
node, node1, node2 = self.next_digit(node, node1, node2)
return root
|
import app
import item
import uiScriptLocale
window = {
"name" : "Acce_CombineWindow",
"x" : 0,
"y" : 0,
"style" : ("movable", "float",),
"width" : 215,
"height" : 270,
"children" :
(
{
"name" : "board",
"type" : "board",
"style" : ("attach",),
"x" : 0,
"y" : 0,
"width" : 215,
"height" : 270,
"children" :
(
{
"name" : "TitleBar",
"type" : "titlebar",
"style" : ("attach",),
"x" : 6,
"y" : 6,
"width" : 200,
"color" : "yellow",
"children" :
(
{
"name" : "TitleName",
"type" : "text",
"x" : 95,
"y" : 3,
"text" : uiScriptLocale.ACCE_COMBINE,
"text_horizontal_align" : "center"
},
),
},
{
"name" : "Acce_Combine",
"type" : "image",
"x" : 9,
"y" : 35,
"image" : uiScriptLocale.LOCALE_UISCRIPT_PATH + "acce/acce_combine.tga",
"children" :
(
{
"name" : "AcceSlot",
"type" : "slot",
"x" : 3,
"y" : 3,
"width" : 200,
"height" : 150,
"slot" : (
{"index":0, "x":78, "y":7, "width":32, "height":32},
{"index":1, "x":78, "y":60, "width":32, "height":32},
{"index":2, "x":78, "y":115, "width":32, "height":32},
),
},
{
"name" : "Main",
"type" : "text",
"text" : uiScriptLocale.ACCE_MAIN,
"text_horizontal_align":"center",
"x" : 85+12,
"y" : 7+36,
},
{
"name" : "serve",
"type" : "text",
"text" : uiScriptLocale.ACCE_SERVE,
"text_horizontal_align" : "center",
"x" : 85+12,
"y" : 60+38,
},
{
"name" : "Result",
"type" : "text",
"text" : uiScriptLocale.ACCE_RESULT,
"text_horizontal_align":"center",
"x" : 85+12,
"y" : 115+40
},
),
},
{
"name" : "NeedMoney",
"type" : "text",
"text" : "",
"text_horizontal_align" : "center",
"x" : 105,
"y" : 215,
},
{
"name" : "AcceptButton",
"type" : "button",
"x" : 40,
"y" : 235,
"text" : uiScriptLocale.OK,
"default_image" : "d:/ymir work/ui/public/middle_button_01.sub",
"over_image" : "d:/ymir work/ui/public/middle_button_02.sub",
"down_image" : "d:/ymir work/ui/public/middle_button_03.sub",
},
{
"name" : "CancelButton",
"type" : "button",
"x" : 114,
"y" : 235,
"text" : uiScriptLocale.CANCEL,
"default_image" : "d:/ymir work/ui/public/middle_button_01.sub",
"over_image" : "d:/ymir work/ui/public/middle_button_02.sub",
"down_image" : "d:/ymir work/ui/public/middle_button_03.sub",
},
),
},
),
}
|
#!/usr/bin/env python
# imports from standard python
from __future__ import print_function
import os
import sys
# imports from local packages
# imports from pip packages
import keras
import six
# imports from MiST
from MiST import eval
from MiST import train
from MiST import multi_train
from MiST import utilis
from MiST import reco_train
from MiST import settings
from MiST import globaldef as gl
def init():
# fancy logo
logo()
# parse final settings from argparse+configparse
arg = settings.options()
# make some widely used arguments available as global varaibles
gl.arg = arg
# check if everything is reasonable
utilis.arg_consistency_check()
# set tf log level to disable most warnings: TF_CPP_MIN_LOG_LEVEL=2
if not arg['verbose']:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# start tensorflow in advance so we can set a few options
settings.init_tf(arg['gpucores'], arg['verbose'])
print()
print('-' * gl.screenwidth)
# call the right function to go on
if arg['type'] == 'train':
print('--- Running in training mode')
print('-' * gl.screenwidth)
train.init()
elif arg['type'] == 'eval':
print('--- Running in evaluation mode')
print('-' * gl.screenwidth)
eval.init()
elif arg['type'] == 'multi_train':
print('--- Running in multi training mode')
print('-' * gl.screenwidth)
multi_train.init()
elif arg['type'] == 'reco_train':
print('--- Running in multi training mode')
print('-' * gl.screenwidth)
reco_train.init(arg)
else:
print('### init type exception')
def logo():
logocolorstart = 242
try:
print()
logofile = open('MiST/logo/logo.txt', 'r')
logocolor = logocolorstart
for line in logofile:
print('\x1b[38;5;'+str(logocolor)+'m'+line,end='')
logocolor = logocolor + 1
print('\x1b[0m')
except:
print()
print()
|
'''
check the +/- of the 10 E. coli specific regions for each of the downloaded enterobase genomes,
and compile that as a table.
eg. `name` `marker 1` `marker 2` `marker N`
'''
import csv
import logging
import os
import subprocess
import sys
import tempfile
from tqdm import tqdm
from multiprocessing import pool
logging.basicConfig(filename='gene_presence_analysis.log', level=logging.DEBUG)
MARKER_FILE = 'ecoli_specific_markers.fasta'
def analyze_gene_presence(fasta_files):
'''
Args:
list[str]: list of paths of files to be added to gene presence report
Returns:
str: path of csv file that contains presence/absence of each marker on each genome
'''
invalid_file_count = 0
total_hit = 0
curr_valid_file_count = 0
csv_file = 'gene_presence_report.csv'
fieldnames = [
"genome_name",
"1436893830000|3159571",
"1436893909000|3159808",
"2873786891000|3159389",
"2873787160000|3160196",
"4310679577000|3158082",
"4310679772000|3158667",
"4310679831000|3158844",
"4310680254000|3160113",
"4310680315000|3160296",
"4310680399000|3160548"
]
with open(csv_file, 'w') as csv_handle:
csv_writer = csv.DictWriter(csv_handle, fieldnames=fieldnames)
csv_writer.writeheader()
for fasta_file in tqdm(fasta_files):
presence_output, num_hit = get_presence_output(fasta_file)
if num_hit is -1 or num_hit is -2:
invalid_file_count += 1
if num_hit is -2:
logging.warning('Blast error for %s', fasta_file)
continue
curr_valid_file_count += 1
total_hit += num_hit
msg = str("Invalid file count: %d, Average Hits: %d/%d=%.2f"
%(invalid_file_count, total_hit, curr_valid_file_count, total_hit/curr_valid_file_count))
tqdm.write(msg)
logging.debug(msg)
msg = str('%s'%list(presence_output.values()))
tqdm.write(msg)
logging.info(msg)
# need lock for this part
with open(csv_file, 'a') as csv_handle:
csv_writer = csv.DictWriter(csv_handle, fieldnames=fieldnames)
csv_writer.writerow(presence_output)
def get_presence_output(fasta_file):
'''
Args:
fasta_file(str): fasta_file to be queried by marker database
Returns:
dict: dictionary of presence output
'''
if os.path.getsize(fasta_file) < 1000:
logging.debug('%s is not a valid fasta file (too small)'%fasta_file)
return None, -1
with tempfile.TemporaryDirectory() as temp_dir:
genome_name = os.path.splitext(os.path.basename(fasta_file))[0]
blast_db = os.path.join(temp_dir, genome_name)
cmd = [
"makeblastdb",
"-in", fasta_file,
"-dbtype", "nucl",
'-title', genome_name,
'-out', blast_db
]
output = run_subprocess(cmd)
cmd2 = [
'blastn',
'-query', MARKER_FILE,
'-db', blast_db,
'-perc_identity', "90",
'-qcov_hsp_perc', "90",
'-max_target_seqs', '1', # this number needs to be greater than number of genome
'-max_hsps', '1', # we only want to know hit/no hit
'-outfmt', '6 qseqid'
]
blast_result = run_subprocess(cmd2)
if blast_result is None:
msg = str('Blast error')
logging.debug(msg)
return None, -2
new_entry = {
"genome_name": genome_name,
"1436893830000|3159571":0,
"1436893909000|3159808":0,
"2873786891000|3159389":0,
"2873787160000|3160196":0,
"4310679577000|3158082":0,
"4310679772000|3158667":0,
"4310679831000|3158844":0,
"4310680254000|3160113":0,
"4310680315000|3160296":0,
"4310680399000|3160548":0
}
num_hit = 0
for line in blast_result.split('\n'):
allele_name = '|'.join(line.strip().split('|')[1:])
if new_entry.get(allele_name) is 0:
new_entry[allele_name] = 1
num_hit += 1
return new_entry, num_hit
def run_subprocess(cmd):
output = None
try:
completed_process = subprocess.run(
cmd,
universal_newlines=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
logging.debug(completed_process.stderr)
logging.info(completed_process.stdout)
output = completed_process.stdout
except subprocess.CalledProcessError as err:
logging.warning("Error: %s %s", err.stderr, err.stdout)
return output
def main(enterobase_dir = '/home/sam/moria/enterobase_db'):
basenames = os.listdir(enterobase_dir)
filenames = []
for basename in basenames:
filename = os.path.join(enterobase_dir, basename)
filenames.append(filename)
analyze_gene_presence(filenames)
# analyze_gene_presence(['/home/sam/Projects/MoreSerotype/mini_experiment/ESC_HA8367AA_AS.fasta', '/home/sam/Projects/MoreSerotype/mini_experiment/ESC_OA1793AA_AS.fasta', '/home/sam/Projects/MoreSerotype/mini_experiment/Straphylococcus.fasta'])
if __name__ == '__main__':
main()
|
from django.contrib import admin
# Register your models here.
from .models import newsletter_list
class NewsletterAdmin(admin.ModelAdmin):
list_display = ['email_address', 'user_name', 'threshold', 'timestamp', 'updated']
list_filter = ['email_address', 'user_name', 'threshold', 'timestamp', 'updated']
search_fields = ['email_address', 'user_name', 'threshold', 'timestamp', 'updated']
class Meta:
model = newsletter_list
admin.site.register(newsletter_list, NewsletterAdmin) |
n = int(input())
numbers = [x for x in input().split()]
count_final = 0
result = numbers[0]
for i in numbers:
count = 0
for j in range(len(numbers)):
if i == numbers[j]:
count += 1
if count > count_final:
count_final = count
result = i
print(result)
|
from django.contrib import admin
from polls.models import Person
# Register your models here.
admin.site.register(Person)
|
def is_polydivisible(s, b):
digit_list = list(s)
list_len = len(digit_list) + 1
list_power = range(1, list_len)
list_power.reverse()
base_10 = 0
for i, j in zip(digit_list, list_power):
base_10 += int(i)*(b**j)
decider = 0
base_10_list = [int(x) for x in str(base_10)]
calc_list = []
n = 1
for i in base_10_list:
calc_list.append(str(i))
calc = int(''.join(calc_list))
print calc
if calc % n != 0:
decider += 1
n += 1
if decider == 0:
return True
else:
return False
def get_polydivisible(n, b):
next = 0
valid_polys = []
while len(valid_polys) < n:
if next%(len(list(str(next)))) == 0:
valid_polys.append(next)
next += 1
valid_polys.reverse()
print valid_polys
val = valid_polys[0]
if b == 10:
op = val
else:
num_rep={10:'A',
11:'B',
12:'C',
13:'D',
14:'E',
15:'F',
16:'G',
17:'H',
18:'I',
19:'J',
20:'K',
21:'L',
22:'M',
23:'N',
24:'O',
25:'P',
26:'Q',
27:'R',
28:'S',
29:'T',
30:'U',
31:'V',
32:'W',
33:'X',
34:'Y',
35:'Z',
36:'a',
37:'b',
38:'c',
39:'d',
40:'e',
41:'f',
42:'g',
43:'h',
44:'i',
45:'j',
46:'k',
47:'l',
48:'m',
49:'n',
50:'o',
51:'p',
52:'q',
53:'r',
54:'s',
55:'t',
56:'u',
57:'v',
58:'w',
59:'x',
60:'y',
61:'z'}
new_num_string=''
current=val
while current!=0:
remainder=current%n
if 72>remainder>9:
remainder_string=num_rep[remainder]
elif remainder>=72:
remainder_string='('+str(remainder)+')'
else:
remainder_string=str(remainder)
new_num_string=remainder_string+new_num_string
current=current/n
op = new_num_string
return str(op)
|
from component import *
destroyables = []
def get_destroyables():
return destroyables
# singleton
class Destroyable(Component):
type = 'destroyable'
def __init__(self, shape = None, prefix = ''):
self.shape = shape
self.prefix = prefix
def attach(self, entity):
super(Destroyable, self).attach(entity)
destroyables.append(self)
def detach(self, entity):
super(Destroyable, self).detach(entity)
global destroyables
destroyables = [destroyable for destroyable in destroyables if destroyable != self]
def collides(self, x, y):
ex, ey = self.get('x'), self.get('y')
if self.shape:
return self.shape.collides(ex, ey, x, y)
else:
return (ex - x)**2 + (ey - y)**2 < 100
def destroy(self):
self.entity.destroy()
def update(self, entity):
pass
|
#encoding: UTF-8
# Autor: Karla Fabiola Ramirez Martinez
# Descripcion: Porcentaje hombres y mujeres
mujeres=int(input("Dime el numero de mujeres que hay: "))
hombres=int(input("Dime el numero de hombres que hay: "))
total=mujeres+hombres
porcentaje=100/total
pmujeres=porcentaje*mujeres
phombres=porcentaje*hombres
print("Porcentaje de hombres: %5.2f "%phombres)
print("Porcentaje de mujeres: %5.2f "%pmujeres)
print("Total de alumnos: ",total)
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import itertools
for x in itertools.permutations( [ 1, 2, 3, 4 ] ) :
print( x )
|
"""
Utilities for lists.
"""
from __future__ import absolute_import
from __future__ import print_function
from six.moves import xrange
import numpy as np
from operator import itemgetter
from itertools import groupby
def list2ndarray(a):
if(isinstance(a, list)):
return np.asarray(a)
assert(isinstance(a, np.ndarray))
return a
def ismember(a, b):
bad_idx = np.iinfo(np.int32).min
d = {}
for i, elt in enumerate(b):
if elt not in d:
d[elt] = i
loc = np.array([d.get(x, bad_idx) for x in a], dtype='int32')
f = loc != bad_idx
return f, loc
def sort(a, reverse=False, return_index=False):
if return_index:
idx = np.argsort(a)
if reverse:
idx = idx[::-1]
if not(isinstance(a, np.ndarray)):
a = np.asarray(a)
s_a = a[idx]
return s_a, idx
else:
if reverse:
return np.sort(a)[::-1]
return np.sort(a)
def intersect(a, b, assume_unique=False, return_index = False):
c = np.intersect1d(a, b, assume_unique)
if return_index:
_, ia = ismember(c, a)
_, ib = ismember(c, b)
return c, ia, ib
else:
return c
def split_list(a, idx, num_parts, key = None):
if not(isinstance(a, np.ndarray)):
a = np.asarray(a)
if key is None:
key = a
_, ids=np.unique(key, return_inverse=True)
n = float(ids.max()+1)
idx_1 = int(np.floor((idx-1)*n/num_parts))
idx_2 = int(np.floor(idx*n/num_parts))
loc = np.empty(len(a), dtype='int64')
k = 0
for i in range(idx_1, idx_2):
loc_i = (ids==i).nonzero()[0]
loc[k:k+len(loc_i)] = loc_i
k += len(loc_i)
loc = loc[:k]
return a[loc], loc
# def test_list_utils():
# list1 = ['2', '2', '1', '2', '3', '3', '4', '4', '4', '4']
# list2 = ['0', '1', '2', '20']
# f, loc = ismember(list2, list1)
# assert(np.all(loc == [np.iinfo(np.int32).min, 2, 0, np.iinfo(np.int32).min]))
# assert(np.all(f == [False, True, True, False]))
# list1_s, idx = sort(list1, return_index=True)
# assert(np.all(list1_s == ['1', '2', '2', '2', '3', '3', '4', '4', '4', '4']))
# list1_u, i_a, i_b, = np.unique(list1, True, True)
# assert(np.all(list1_u == ['1', '2', '3', '4']))
# assert(np.all(i_a == [2, 0, 4, 6]))
# assert(np.all(i_b == [1, 1, 0, 1, 2, 2, 3, 3, 3, 3]))
# list_i, i_a, i_b = intersect(list2, list1_u, return_index=True)
# assert(np.all(list_i == ['1', '2']))
# assert(np.all(i_a == [1, 2]))
# assert(np.all(i_b == [0, 1]))
# list1_d = np.setdiff1d(list1, list2)
# assert(np.all(list1_d == ['3', '4']))
# list2_d = np.setdiff1d(list2, list1)
# assert(np.all(list2_d == ['0', '20']))
# list_s, loc = split_list(list1, 1, 3)
# assert(np.all(list_s == ['1']))
# assert(np.all(loc == [2]))
# list_s, loc = split_list(list1, 2, 3)
# assert(np.all(list_s == ['2', '2', '2']))
# assert(np.all(loc == [0, 1, 3]))
# list_s, loc = split_list(list1, 3, 3)
# assert(np.all(list_s == ['3', '3', '4', '4', '4', '4']))
# assert(np.all(loc == [i for i in xrange(4, 10)]))
# def ismember(a, b):
# d = {}
# for i, elt in enumerate(b):
# if elt not in d:
# d[elt] = i
# loc = [d.get(x, None) for x in a]
# f = [x is not None for x in loc]
# return f, loc
# def sort(a, reverse=False, return_index=False):
# if return_index:
# idx=sorted(range(len(a)), key=a.__getitem__, reverse=reverse)
# s_a = [a[i] for i in idx]
# return s_a, idx
# else:
# return sorted(a, reverse=reverse)
# def unique(a, return_index=False, return_inverse=False, return_counts=False):
# r=np.unique(a, return_index, return_inverse, return_counts)
# if isinstance(r, np.ndarray):
# r = r.tolist()
# else:
# r = list(r)
# r[0] = r[0].tolist()
# r = tuple(r)
# return r
# # sort_list = sorted(set(list_in))
# # list_out = map(itemgetter(0), groupby(sort_list))
# # indx_in2out = [list_in.index(x) for x in list_out]
# # indx_out2in = [list_out.index(x) for x in list_in]
# # return list_out, indx_in2out, indx_out2in
# def intersect(a, b, assume_unique=False, return_index = False):
# c = np.intersect1d(a, b, assume_unique).tolist()
# if return_index:
# _, ia = ismember(c, a)
# _, ib = ismember(c, b)
# return c, ia, ib
# else:
# return c
# # f, ia = ismember(b, a)
# # print(f)
# # ia = ia[f]
# # f, ib = ismember(a, b)
# # ib = ib[f]
# # c = a[ia]
# # return c, ia, ib
# def setdiff(a, b, assume_unique=False):
# return np.setdiff1d(a, b, assume_unique).tolist()
# #return list(set(a) - set(b))
# def union(a, b):
# return np.union1d(a, b).tolist()
# def split_list(a, idx, num_parts, key = None):
# if key is None:
# key = a
# _, _, ids=unique(key, return_index=True, return_inverse=True)
# n = float(ids.max()+1)
# idx_1 = int(np.floor((idx-1)*n/num_parts))
# idx_2 = int(np.floor(idx*n/num_parts))
# loc=np.empty(len(a), dtype='int32')
# k=0
# for i in range(idx_1, idx_2):
# loc_i = (ids==i).nonzero()[0]
# loc[k:k+len(loc_i)] = loc_i
# k += len(loc_i)
# loc = loc[:k]
# return [a[j] for j in loc], loc.tolist()
# def test_list_utils():
# list1 = ['2', '2', '1', '2', '3', '3', '4', '4', '4', '4']
# list2 = ['0', '1', '2', '20']
# f, loc = ismember(list2, list1)
# assert(loc == [None, 2, 0, None])
# assert(f == [False, True, True, False])
# list1_s, idx = sort(list1, return_index=True)
# assert(list1_s == ['1', '2', '2', '2', '3', '3', '4', '4', '4', '4'])
# list1_u, i_a, i_b, = unique(list1, True, True)
# assert(list1_u == ['1', '2', '3', '4'])
# assert(np.all(i_a == [2, 0, 4, 6]))
# assert(np.all(i_b == [1, 1, 0, 1, 2, 2, 3, 3, 3, 3]))
# list_i, i_a, i_b = intersect(list2, list1_u, return_index=True)
# assert(list_i == ['1', '2'])
# assert(i_a == [1, 2])
# assert(i_b == [0, 1])
# list1_d = setdiff(list1, list2)
# assert(list1_d == ['3', '4'])
# list2_d = setdiff(list2, list1)
# assert(list2_d == ['0', '20'])
# list_s, loc = split_list(list1, 1, 3)
# assert(list_s == ['1'])
# assert(loc == [2])
# list_s, loc = split_list(list1, 2, 3)
# assert(list_s == ['2', '2', '2'])
# assert(loc == [0, 1, 3])
# list_s, loc = split_list(list1, 3, 3)
# assert(list_s == ['3', '3', '4', '4', '4', '4'])
# assert(loc == [i for i in xrange(4, 10)])
# class ListUtils:
# @staticmethod
# def parse_list(list_file,separator="="):
# data=pd.read_csv(list_file,header=None,sep=separator)
# n_columns=data.shape[1];
# lists=[None]*n_columns
# for i in range(n_columns):
# lists[i]=data[i].values.tolist()
# return lists
# @staticmethod
# def parse_scp(list_file,separator="="):
# data=pd.read_csv(list_file,header=None,sep=separator)
# assert(n_columns>=2,'File %s has n_columns=%d<2' % (list_file,n_columns))
# key=data[0].values.tolist()
# for i in xrange(2,n_columns):
# data[1]+=data[i]
# scp=data[1].values.tolist()
# return key, scp
# @staticmethod
# def divide_lists(lists,part_indx,n_part,key_list=0):
# n_lists=len(lists)
# assert(key_list<n_lists,'key_list=%d but n_lists=%d' % (key_list,n_lists))
# key=lists[key_list]
# _,_,ids=unique(key_list)
# n=ids.max()
# indx1=floor((part_indx-1)*n/n_part);
# indx2=floor(part_indx*n/n_part)-1;
# loc=np.array([])
# for i in range(indx1,indx2):
# loc_i=(ids==i).nonzero()
# loc=vstack((loc,loc_i))
# sublists=[]
# for i in xrange(n_lists):
# sublist_i=[lists[i][j] for j in loc]
# sublists.append(sublist_i)
# return sublists
# @staticmethod
# def unique(list_in):
# sort_list=sorted(set(list_in))
# list_out=map(itemgetter(0), groupby(sort_list))
# indx_in2out=[list_in.index(x) for x in list_out]
# indx_out2in=[list_out.index(x) for x in list_in]
# return list_out,indx_in2out,indx_out2in
# @staticmethod
# def ismember(a,b):
# d = {}
# for i, elt in enumerate(b):
# if elt not in d:
# d[elt] = i
# loc=[d.get(x,-1) for x in a]
# f=[x != -1 for x in loc]
# return f,loc
|
from thumbnail_image.utils import AWSUtils, ImageUtils, FileUtils
class UploadThumbnailImageToS3:
_s3_client = None
def __init__(self, s3_client) -> None:
self._s3_client = s3_client
def execute(self, s3_event: dict) -> str:
bucket, key = AWSUtils.get_s3_data(s3_event)
if not key.endswith('_thumbnail.png'):
bytes = AWSUtils.download_s3_object(bucket, key, self._s3_client)
image = ImageUtils.bytes_to_image(bytes)
thumbnail_image = ImageUtils.image_to_thumbnail(image)
thumbnail_bytes = ImageUtils.image_to_bytes(thumbnail_image)
thumbnail_key = FileUtils.new_filename(key)
if AWSUtils.upload_to_s3(bucket, thumbnail_key, thumbnail_bytes, self._s3_client):
return AWSUtils.s3_url(bucket, thumbnail_key, self._s3_client)
|
#
# Copyright (c) 2023, Gabriel Linder <linder.gabriel@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
from contextlib import contextmanager
from fcntl import LOCK_EX, LOCK_NB, flock
from typing import IO, AnyStr, Iterator
@contextmanager
def lock_and_open_for_write(fname: str,
mode: str = 'w') -> Iterator[IO[AnyStr]]:
try:
fd = None
lock_fd = open(f'{fname}.lock', 'w') # noqa: SIM115
flock(lock_fd, LOCK_EX | LOCK_NB)
fd = open(fname, mode) # noqa: SIM115
yield fd
finally:
if fd is not None:
fd.flush()
fd.close()
lock_fd.close()
|
class Calculator:
def __init__(self):
self.result = 0
def add(self, num):
self.result += num
return self.result
cal1 = Calculator()
cal2 = Calculator()
print(cal1.add(3))
print(cal1.add(4))
print(cal2.add(3))
print(cal2.add(7))
class FourCal:
pass
a=FourCal()
print(type(a))
class FourCal:
def __init__(self, first, second):
self.first = first
self.second = second
def add(self):
result = self.first + self.second
return result
def mul(self):
result = self.first * self.second
return result
def sub(self):
result = self.first - self.second
return result
def div(self):
result = self.first / self.second
return result
a=FourCal(2, 4)
print(a.add())
class MoreFourCal(FourCal):
pass
a=MoreFourCal(4, 5)
print(a.mul())
class ChildFourCal(FourCal):
def div(self):
if self.second==0:
return 0
else:
return self.first /self.second
a=ChildFourCal(2, 6)
print(a.div())
import sys
sys.path.append("C:\\eomcoding\\subFolder")
import mod1
print(mod1.add(3, 4))
import game.sound.echo
game.sound.echo.echo_test()
from game.sound.echo import echo_test as e
e()
from game.sound import *
echo.echo_test()
from game import *
sound.echo.echo_test()
class Calculator:
def __init__(self):
self.value = 0
def add(self, val):
self.value += val
class UpgradCalculator(Calculator):
def minus(self, val):
self.value = self.value - val
cal = UpgradCalculator()
cal.add(10)
cal.minus(7)
print(cal.value)
class MaxLimitCalculator(Calculator):
def add(self, val):
self.value+= val
if self.value > 100:
self.value=100
su = MaxLimitCalculator()
su.add(50)
su.add(60)
su.add(-20)
su.add(50)
print(su.value)
print(all([1, 2, abs(-3)-3]))
print(abs(-3)-3)
print(chr(ord('a'))=='a')
print(list(filter(lambda x: x>0, [1, -2, 3, -5, 8, -3])))
print(int('0xea', 16))
print(list(map(lambda x: x*3, [1, 2, 3, 4])))
print(int(max([-8, 2, 7, 5, -3, 5, 0, 1])+(min([-8, 2, 7, 5, -3, 5, 0, 1]))))
print(round(5.666666666666667, 4)) |
import numpy as np
import pandas as pd
def mbe(observe:pd.DataFrame, predict:pd.DataFrame) -> pd.DataFrame:
"""Mean bias error of predicted data.
Calculates mean bias error as sum(predict - observe)/(total entries).
Parameters
----------
observe : pandas.DataFrame
Observed values.
predict : pandas.DataFrame
Predicted values.
Returns
-------
pandas.DataFrame
Mean bias error of predicted values.
"""
diff = predict - observe
mbe = diff.sum()/len(observe.index)
mbe = mbe.to_frame().T
return mbe
def rmse(observe:pd.DataFrame, predict:pd.DataFrame) -> pd.DataFrame:
"""Root mean square error of predicted data.
Calculates root mean square error between observe and predict.
Parameters
----------
observe : pandas.DataFrame
Oserved values.
predict : pandas.DataFrame
Predicited values.
Returns
-------
pandas.DataFrame
Root mean square error of predcited values.
"""
n = len(observe.index)
diff = predict - observe
diff = diff.pow(2)
rmse = diff.sum()/len(observe.index)
rmse = rmse.to_frame().T
rmse = rmse.pow(0.5)
return rmse
def pbias(observe:pd.DataFrame, predict:pd.DataFrame) -> pd.DataFrame:
"""Percent bias of predicted data.
Calculates percent bias as 100%*(sum(predict-observe)/sum(observe)).
Parameters
----------
observe : pandas.DataFrame
Observed values.
predict : pandas.DataFrame
Predicted values.
Returns
-------
pandas.DataFrame
Precent bias of predicted values.
"""
pbdf = predict-observe
pbdf = 100*(pbdf.sum()/observe.sum())
pbdf = pbdf.to_frame().T
return pbdf
def pbias_by_index(observe:pd.DataFrame, predict:pd.DataFrame):
"""Percent bias of predicted data for each entry.
Computes percent bias at the same regularity as the index
using predict-obsererve assuming aggregation has already
been performed on both DataFrames. This is useful for
looking at percent bias by month or year for example.
Parameters
----------
observe : pandas.DataFrame
Observed values.
predict : pandas.DataFrame
Predicted values.
Returns
-------
pandas.Dataframe
Percent bias of predictions according to index provided.
"""
pbdf = predict-observe
pbdf = 100*(pbdf/observe)
return pbdf
def normalize(data:pd.DataFrame) -> pd.DataFrame:
"""Normalizes data.
Normalizes only the values of a given DataFrame. A normalized
value is computed as: (value - column_min)/(column_max -column_min).
Parameters
----------
data : pandas.DataFrame
Returns
-------
pd.DataFrame
normalized 'data' without altering the index
"""
normal_df = pd.DataFrame(index=data.index, columns=data.columns)
for column in data.columns:
column_series = data[column]
min_x = column_series.min()
max_x = column_series.max()
column_series = (column_series - min_x)/(max_x-min_x)
normal_df[column] = column_series
return normal_df
def normalize_pair(data:pd.DataFrame, norming_data:pd.DataFrame):
"""Normalize by other data.
Normalizes two DataFrames by one of them, (norming_data), and
returns both normalized by the same DataFrame.
Parameters
----------
data : pandas.DataFrame
Contains values to be normalized by the norming_data.
norming_data : pandas.DataFrame
Contains values to be normalized and to normalize data by.
Returns
-------
data_normed : pandas.DataFrame
The given Dataset 'data' normalized by 'norming_data'.
norming_data_normed : pandas.DataFrame
The given Dataset 'norming_data' normalized by itself.
"""
data_normed = pd.DataFrame(index=data.index, columns=data.columns)
norming_normed = pd.DataFrame(index=norming_data.index, columns=norming_data.columns)
for data_column, norming_column in zip(data.columns, norming_data.columns):
data_column_series = data[data_column]
norming_column_series = norming_data[data_column]
min_x = norming_column_series.min()
max_x = norming_column_series.max()
data_column_series = (data_column_series-min_x)/(max_x-min_x)
norming_column_series = (norming_column_series-min_x)/(max_x-min_x)
data_normed[data_column] = data_column_series
norming_normed[norming_column] = norming_column_series
return data_normed, norming_normed
def mean_standardize(data:pd.DataFrame) -> pd.DataFrame:
"""Standardizes data by the mean.
Standardize only the values of a given DataFrame about
their mean value.
Parameters
----------
data : pandas.DataFrame
Contains values to be standardized.
Returns
-------
pandas.DataFrame
'data' standardized about the mean without alterting the index.
"""
mean_standard_df = pd.DataFrame(index=data.index, columns=data.columns)
for column in data.columns:
column_series = data[column]
mean = column_series.mean()
std = column_series.std()
column_series = (column_series - mean)/std
mean_standard_df[column] = column_series
return mean_standard_df
def median_standardize(data:pd.DataFrame) -> pd.DataFrame:
"""Standardizes data by the median.
Standardize only the values of a given DataFrame about
their median value.
Parameters
----------
data : pandas.DataFrame
Returns
-------
pandas.DataFrame
'data' standardized about the medain without alterting the index.
"""
median_standard_df = pd.DataFrame(index = data.index, columns = data.columns)
for column in data.columns:
column_series = data[column]
median = column_series.median()
mad = column_series.mad()
column_series = (column_series - median)/mad
median_standard_df[column] = column_series
return median_standard_df
|
"""
linked list ADT, a simple one, demonstrate the add
"""
class _Node:
__slots__ = '_element', '_next'
def __init__(self, element, next):
self._element = element
self._next = next
class SinglyLinkedList:
"""singly linked list, no dummy head"""
def __init__(self):
self._head = None
self._size = 0
def __len__(self): return self._size
def is_empty(self): return self._size == 0
def add(self, value):
# add to the end of the queue
new = _Node(value, None)
if self.is_empty():
self._head = new
else:
node = self._head
while node._next is not None:
node = node._next
node._next = new
self._size += 1
def show(self): # for debug
if self.is_empty():
print("Empty queue")
else:
print("head <= trail")
node = self._head
while node is not None:
print(node._element, end=' ')
node = node._next
print()
if __name__ == '__main__':
q = SinglyLinkedList()
for i in range(20):
q.add(i)
q.show()
|
import base64
import re
import graphene
from graphene_django.registry import get_global_registry
from graphql_relay import from_global_id
registry = get_global_registry()
INT_PATTERN = r"^\d+$"
def get_id_from_base64_encoded_string(value):
if isinstance(value, int):
return value
if re.match(INT_PATTERN, value):
return int(value)
try:
return base64.b64decode(bytes(value, "utf-8")).decode().split(":")[1]
except IndexError:
return None
def get_id_from_base64_encodedstring(string):
try:
id = int(base64_decodestring(string).split(":")[1])
except IndexError:
raise Exception("Invalid ID encoding")
return id
def base64_decodestring(string):
"""Decodes a string using Base64 and return a string."""
return base64.b64decode(bytes(string, "utf-8")).decode()
def get_model_name(model):
"""Return name of the model with first letter lowercase."""
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def get_output_fields(model, return_field_name):
"""Return mutation output field for model instance."""
model_type = registry.get_type_for_model(model)
fields = {return_field_name: graphene.Field(model_type)}
return fields
def clean_seo_fields(data):
"""Extract and assign seo fields to given dictionary."""
seo_fields = data.pop("seo", None)
if seo_fields:
data["seo_title"] = seo_fields.get("title")
data["seo_description"] = seo_fields.get("description")
def snake_to_camel_case(name):
"""Convert snake_case variable name to camelCase."""
if isinstance(name, str):
split_name = name.split("_")
return split_name[0] + "".join(map(str.capitalize, split_name[1:]))
return name
def get_nodes(ids, graphene_type=None):
pks = []
types = []
for graphql_id in ids:
_type, _id = from_global_id(graphql_id)
if graphene_type:
assert str(graphene_type) == _type, ("Must receive an {} id.").format(
graphene_type._meta.name
)
pks.append(_id)
types.append(_type)
# If `graphene_type` was not provided, check if all resolved types are
# the same. This prevents from accidentally mismatching IDs of different
# types.
if types and not graphene_type:
assert len(set(types)) == 1, "Received IDs of more than one type."
# get type by name
type_name = types[0]
for model, _type in registry._registry.items():
if _type._meta.name == type_name:
graphene_type = _type
break
nodes = list(graphene_type._meta.model.objects.filter(pk__in=pks))
if not nodes:
raise Exception(
"Could not resolve to a nodes with the global id list of '%s'." % ids
)
nodes_pk_list = [str(node.pk) for node in nodes]
for pk in pks:
assert pk in nodes_pk_list, "There is no node of type {} with pk {}".format(
_type, pk
)
return nodes
|
#!/usr/bin/python
import scipy.io.wavfile as wavfile
import numpy
#from scipy import signal
#import matplotlib.pyplot as mplot
import pydemod.modulation.phase as phasemod
import pydemod.coding.manchester as manchester
import pydemod.coding.polynomial as poly
import pydemod.app.amss as amss
import sys
# symbolLength: expressed in samples
# (it is twice the bitrate of 46.875 since every bit is coded by
# 2 manchester symbols
def amss_deshape(signal, symbolLength):
# logical version of the signal (0 - 1)
logical = numpy.array(signal >= 0, int)
changes = numpy.diff(logical)
manchesterThreshold = 1.6*symbolLength
changeInstants = numpy.nonzero(changes)[0]
changeLengths = numpy.diff(changeInstants)
print "changeLengths = " + str(changeLengths)
# reconstruct pulse stream
#pulseStream = changes[changeInstants]
pulseStream = []
for i in range(changeLengths.size):
if changeLengths[i] > manchesterThreshold and len(pulseStream) > 0:
pulseStream.append(pulseStream[-1])
pulseStream.append(changes[changeInstants[i+1]])
return pulseStream
##### MAIN PROGRAM #####
(sampleRate, samples) = wavfile.read(sys.argv[1])
(avgPeriod, deltaPhiF) = phasemod.naive_phase_demod(samples)
manchesterPeriod = sampleRate / 46.875 / avgPeriod / 2
bits = manchester.manchester_decode(amss_deshape(deltaPhiF, manchesterPeriod))
# find pair-impulse sync
# must find 2 pulses with the same value
#print bits
word_stream = poly.amss_code.bitstream_to_wordstream(bits)
print word_stream
s = amss.Station()
s.process_stream(word_stream)
#mplot.plot(deltaPhi)
#mplot.plot(deltaPhiF)
#mplot.show()
|
import torch
import time
import numpy as np
import logger
import get_data_greedy
import encoder_greedy
import arg_parser
import model_utils
def load_model_and_optimizer(opt, num_GPU=None):
resnet = encoder_greedy.FullModel(opt)
optimizer=[]
for idx, layer in enumerate(resnet.encoder):
optimizer.append(torch.optim.Adam(layer.parameters(), lr=opt.learning_rate, weight_decay=opt.weight_decay))
resnet, num_GPU = model_utils.distribute_over_GPUs(opt, resnet, num_GPU=num_GPU)
return resnet, optimizer, num_GPU
def train(opt, resnet, num_GPU):
total_step = len(train_loader)
starttime = time.time()
print_idx = 100
cur_train_module = opt.cur_train_module
for epoch in range(opt.start_epoch, opt.num_epochs):
print(
"Epoch [{}/{}], total Step [{}], Time (s): {:.1f}".format(
epoch + 1,
opt.num_epochs,
total_step,
time.time() - starttime,
)
)
for step, (img1, img2, target) in enumerate(train_loader):
x_t1 = img1.to(opt.device)
x_t2 = img2.to(opt.device)
_, _, _, _, loss = resnet(x_t1, x_t2, num_GPU, opt, n=cur_train_module)
loss = torch.mean(loss, 0)
if cur_train_module != 4:
loss = loss[cur_train_module].unsqueeze(0)
for idx, cur_loss in enumerate(loss):
if len(loss) == 1:
idx = cur_train_module
resnet.zero_grad()
if idx == 3:
cur_loss.backward()
else:
cur_loss.backward(retain_graph=True)
optimizer_r[idx].step()
print_loss = cur_loss.item()
print("\t \t Loss {}: \t \t {:.4f}".format(idx, print_loss))
# if (epoch + 1) % 10 == 0:
torch.save(resnet.state_dict(),
'/lustre/home/hyguo/code/code/SimCLR/models/models_0506/model16-{}-{}.pth'.format(epoch+1, cur_train_module))
if __name__ == "__main__":
opt = arg_parser.parse_args()
arg_parser.create_log_path(opt)
opt.training_dataset = "unlabeled"
# random seeds
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
np.random.seed(opt.seed)
torch.backends.cudnn.benchmark = True
# load model
resnet, optimizer_r, num_GPU = load_model_and_optimizer(opt)
if opt.cur_train_module > 0 and opt.cur_train_module < 4:
resnet.load_state_dict(torch.load(
'/lustre/home/hyguo/code/code/SimCLR/models/models_0506/model16-100-{}.pth'.format(opt.cur_train_module - 1)))
logs = logger.Logger(opt)
train_loader, _, supervised_loader, _, test_loader, _ = get_data_greedy.get_dataloader(opt)
train(opt, resnet, num_GPU)
|
try:
import cPickle as pickle
except:
import pickle
import six, os, sys, csv, time, \
random, os.path as osp, \
subprocess, json, \
numpy as np, pandas as pd, \
glob, re, networkx as nx, \
h5py, yaml, copy, multiprocessing as mp, \
pandas as pd, yaml, collections, \
logging, colorlog, yaml, cvbase as cvb, shutil, \
easydict
import subprocess
# tensorflow as tf, keras, torch , redis
# import torch
# from torch import nn
# from torch.autograd import Variable
# import torch.nn.functional as F
import matplotlib
# matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from IPython import embed
from IPython.display import display, HTML, SVG
# root_path = osp.normpath(
# osp.join(osp.abspath(osp.dirname(__file__)))
# )
root_path = '/home/wangxinglu/prj/few-shot/'
def set_stream_logger(log_level=logging.INFO):
sh = colorlog.StreamHandler()
sh.setLevel(log_level)
sh.setFormatter(
colorlog.ColoredFormatter(
'%(asctime)s %(filename)s [line:%(lineno)d] %(log_color)s%(levelname)s%(reset)s %(message)s'))
logging.root.addHandler(sh)
def set_file_logger(work_dir=None, log_level=logging.DEBUG):
work_dir = work_dir or os.getcwd()
fh = logging.FileHandler(os.path.join(work_dir, 'log.txt'))
fh.setLevel(log_level)
fh.setFormatter(
logging.Formatter('%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s %(message)s'))
logging.root.addHandler(fh)
# def set_logger():
logging.root.setLevel(logging.DEBUG)
set_stream_logger()
set_file_logger()
def gc_collect():
import gc
gc.collect()
def sel_np(A):
dtype = str(A.dtype)
shape = A.shape
A = A.ravel().tolist()
sav = {'shape': shape, 'dtype': dtype,
'A': A
}
return json.dumps(sav)
def desel_np(s):
import json
sav = json.loads(s)
A = sav['A']
A = np.array(A, dtype=sav['dtype']).reshape(sav['shape'])
return A
def append_file(line, file=None):
file = file or 'append.txt'
with open(file, 'a') as f:
f.writelines(line + '\n')
def cpu_priority(level=19):
import psutil
p = psutil.Process(os.getpid())
p.nice(level)
def get_gpu_memory_map():
"""Get the current gpu usage.
Returns
-------
usage: dict
Keys are device ids as integers.
Values are memory usage as integers in MB.
"""
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
# Convert lines into a dictionary
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def init_dev(n=(0,)):
logging.info('use gpu {}'.format(n))
from os.path import expanduser
home = expanduser("~")
if isinstance(n, int):
n = (n,)
devs = ''
for n_ in n:
devs += str(n_) + ','
os.environ["CUDA_VISIBLE_DEVICES"] = devs
os.environ['PATH'] = home + '/cuda-8.0/bin:' + os.environ['PATH']
# os.environ['PATH'] = home + '/anaconda2/bin:' + os.environ['PATH']
os.environ['PATH'] = home + '/usr/local/cuda-8.0/bin:' + os.environ['PATH']
os.environ['LD_LIBRARY_PATH'] = home + '/cuda-8.0/lib64'
os.environ['LD_LIBRARY_PATH'] = '/usr/local/cuda-8.0/lib64'
# os.environ['PYTHONWARNINGS'] = "ignore"
def set_env(key, value):
value = os.path.abspath(value)
os.environ[key] = value + ':' + os.environ[key]
def allow_growth_tf():
import tensorflow as tf
_sess_config = tf.ConfigProto(allow_soft_placement=True)
_sess_config.gpu_options.allow_growth = True
return _sess_config
def allow_growth_keras():
import tensorflow as tf
tf_graph = tf.get_default_graph()
_sess_config = tf.ConfigProto(allow_soft_placement=True)
_sess_config.gpu_options.allow_growth = True
sess = tf.Session(config=_sess_config, graph=tf_graph)
import keras.backend as K
K.set_session(sess)
return sess
def get_dev(n=1, ok=(0, 1, 2, 3, 4, 5, 6, 7), mem=(0.5, 0.9), sleep=60):
import GPUtil, time
logging.info('Auto select gpu')
GPUtil.showUtilization()
def _limit(devs, ok):
return [int(dev) for dev in devs if dev in ok]
devs = GPUtil.getAvailable(order='memory', maxLoad=1, maxMemory=mem[0], limit=n) #
devs = _limit(devs, ok)
if len(devs) >= 1:
logging.info('available {}'.format(devs))
# GPUtil.showUtilization()
return int(devs[0]) if n == 1 else devs
while len(devs) == 0:
devs = GPUtil.getAvailable(order='random', maxLoad=1, maxMemory=mem[1], limit=n)
devs = _limit(devs, ok)
if len(devs) >= 1:
logging.info('available {}'.format(devs))
GPUtil.showUtilization()
return devs[0] if n == 1 else devs
logging.info('no device avelaible')
GPUtil.showUtilization()
time.sleep(sleep)
# def grid_iter(tmp):
# res = cartesian(tmp.values())
# np.random.shuffle(res)
# for res_ in res:
# yield dict(zip(tmp.keys(), res_))
def shuffle_iter(iter):
iter = list(iter)
np.random.shuffle(iter)
for iter_ in iter:
yield iter_
def optional_arg_decorator(fn):
def wrapped_decorator(*args):
if len(args) == 1 and callable(args[0]):
return fn(args[0])
else:
def real_decorator(decoratee):
return fn(decoratee, *args)
return real_decorator
return wrapped_decorator
def randomword(length):
import random, string
return ''.join(random.choice(string.lowercase) for i in range(length))
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
def cosort(tensor, y, return_y=False):
comb = zip(tensor, y)
comb_sorted = sorted(comb, key=lambda x: x[1])
if not return_y:
return np.array([comb_[0] for comb_ in comb_sorted])
else:
return np.array([comb_[0] for comb_ in comb_sorted]), np.array([comb_[1] for comb_ in
comb_sorted])
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.start_time = time.time()
# logger.info('time pass {}'.format(self.diff))
return self.diff
timer = Timer()
@optional_arg_decorator
def timeit(fn, info=''):
def wrapped_fn(*arg, **kwargs):
timer = Timer()
timer.tic()
res = fn(*arg, **kwargs)
diff = timer.toc()
logging.info((info + 'takes time {}').format(diff))
return res
return wrapped_fn
class Database(object):
def __init__(self, *args, **kwargs):
self.fid = h5py.File(*args, **kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.fid.close()
def __getitem__(self, keys):
if isinstance(keys, (tuple, list)):
return [self._get_single_item(k) for k in keys]
return self._get_single_item(keys)
def _get_single_item(self, key):
return np.asarray(self.fid[key])
def __setitem__(self, key, value):
if key in self.fid:
if self.fid[key].shape == value.shape and \
self.fid[key].dtype == value.dtype:
print('shape type same, old is updated')
self.fid[key][...] = value
else:
del self.fid[key]
print('old shape {} new shape {} updated'.format(self.fid[key].shape, value.shape))
self.fid.create_dataset(key, data=value)
else:
self.fid.create_dataset(key, data=value)
def __delitem__(self, key):
del self.fid[key]
def __len__(self):
return len(self.fid)
def __iter__(self):
return iter(self.fid)
def flush(self):
self.fid.flush()
def close(self):
self.fid.close()
def keys(self):
return self.fid.keys()
def mypickle(data, file_path):
mkdir_p(osp.dirname(file_path), delete=False)
print('pickle into', file_path)
with open(file_path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
def unpickle(file_path):
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data
def write_df(df, path):
df.to_hdf(path, 'df', mode='w')
def read_df(path):
return pd.read_hdf(path, 'df')
def mkdir_p(path, delete=False):
if path == '': return
if delete:
rm(path)
if not osp.exists(path):
print('mkdir -p ' + path)
subprocess.call(('mkdir -p ' + path).split())
def shell(cmd, block=True):
import os
my_env = os.environ.copy()
home = os.path.expanduser('~')
# logging.info('cmd is ' + cmd)
if block:
# subprocess.call(cmd.split())
task = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
msg = task.communicate()
if msg[0] != b'' and msg[0] != '':
logging.info('stdout {}'.format(msg[0]))
if msg[1] != b'' and msg[1] != '':
logging.error('stderr {}'.format(msg[1]))
return msg
else:
print('Non-block!')
task = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=my_env)
return task
def check_path(path):
path = osp.dirname(path)
if not osp.exists(path):
mkdir_p(path)
def ln(path, to_path):
if osp.exists(to_path):
print('error! exist ' + to_path)
path = osp.abspath(path)
cmd = "ln -s " + path + " " + to_path
print(cmd)
proc = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return proc
def tar(path, to_path=None):
if not osp.exists(path):
return
if not osp.exists(to_path):
mkdir_p(to_path)
if os.path.exists(to_path) and not len(os.listdir(to_path)) == 0:
rm(path)
return
if to_path is not None:
cmd = "tar xf " + path + " -C " + to_path
print(cmd)
else:
cmd = "tar xf " + path
shell(cmd, block=True)
if os.path.exists(path):
rm(path)
def rmdir(path):
cmd = "rmdir " + path
shell(cmd)
def rm(path, block=True, hard=True):
path = osp.abspath(path)
if not hard:
dst = glob.glob('{}.bak*'.format(path))
parsr = re.compile('{}.bak(\d+)'.format(path))
used = [0, ]
for d in dst:
m = re.match(parsr, d)
used.append(int(m.groups()[0]))
dst_path = '{}.bak{}'.format(path, max(used) + 1)
cmd = 'mv {} {} '.format(path, dst_path)
print(cmd)
shell(cmd, block=block)
else:
if osp.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
return
def show_img(path):
from IPython.display import Image
fig = Image(filename=(path))
return fig
def show_pdf(path):
from IPython.display import IFrame
path = osp.relpath(path)
return IFrame(path, width=600, height=300)
def print_graph_info():
import tensorflow as tf
graph = tf.get_default_graph()
graph.get_tensor_by_name("Placeholder:0")
layers = [op.name for op in graph.get_operations() if op.type == "Placeholder"]
print([graph.get_tensor_by_name(layer + ":0") for layer in layers])
print([op.type for op in graph.get_operations()])
print([n.name for n in tf.get_default_graph().as_graph_def().node])
print([v.name for v in tf.global_variables()])
print(graph.get_operations()[20])
def chdir_to_root(fn):
def wrapped_fn(*args, **kwargs):
restore_path = os.getcwd()
os.chdir(root_path)
res = fn(*args, **kwargs)
os.chdir(restore_path)
return res
return wrapped_fn
def scp(src, dest, dry_run=False):
cmd = ('scp -r ' + src + ' ' + dest)
print(cmd)
if dry_run: return
return shell(cmd, block=False)
def read_list(file, delimi=" "):
if osp.exists(file):
lines = np.genfromtxt(file, dtype='str', delimiter=delimi)
return lines
else:
return []
def cp(from_path, to):
subprocess.call(('cp -r ' + from_path + ' ' + to).split())
def mv(from_path, to):
if not osp.exists(to):
mkdir_p(to)
if not isinstance(from_path, list):
subprocess.call(('mv ' + from_path + ' ' + to).split())
else:
for from_ in from_path:
subprocess.call(('mv ' + from_ + ' ' + to).split())
def dict_concat(d_l):
d1 = d_l[0].copy()
for d in d_l[1:]:
d1.update(d)
return d1
def clean_name(name):
import re
name = re.findall('([a-zA-Z0-9/-]+)(?::\d+)?', name)[0]
name = re.findall('([a-zA-Z0-9/-]+)(?:_\d+)?', name)[0]
return name
class Struct(object):
def __init__(self, entries):
self.__dict__.update(entries)
def __getitem__(self, item):
return self.__dict__[item]
def dict2obj(d):
return Struct(d)
def dict2str(others):
name = ''
for key, val in others.iteritems():
name += '_' + str(key)
if isinstance(val, dict):
name += '_' + dict2str(val)
elif isinstance(val, list):
for val_ in val:
name += '-' + str(val_)
else:
name += '_' + str(val)
return name
def list2str(li, delimier=''):
name = ''
for name_ in li:
name += (str(name_) + delimier)
return name
def write_list(file, l, sort=True, delimiter=' ', fmt='%.18e'):
l = np.array(l)
if sort:
l = np.sort(l, axis=0)
np.savetxt(file, l, delimiter=delimiter, fmt=fmt)
def rsync(from_, to):
cmd = ('rsync -avzP ' + from_ + ' ' + to)
print(cmd)
return shell(cmd, block=False)
def i_vis_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
import tensorflow as tf
from IPython.display import display, HTML, SVG
import os
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
import tensorflow as tf
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>" % size)
return strip_def
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph' + str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
if __name__ == '__main__':
pass
|
"""
Created on Wed Sep 18 16:47:06 2019
@author: Kanthasamy Chelliah
The function wavogram(wavFile) provides spectrogram for any wav file (mono).
Simple usage: wavogram(wavFile)
Optional arguments:
str: pltTitle => send any string to be used as the plot title.
bool: masked => set to false if full data needs to be plotted. Leave it default if masking lower amplitudes is okay.
bool: cbar => set to true if colorbar needs to be plotted.
Output .jpg file is saved with the same basename as the .wav file
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import soundfile as sf
from scipy import signal
def wavogram(wavFile,pltTitle='',masked=True,cbar=False):
f = plt.figure()
plt.set_cmap('hot')
ax1 = plt.subplot(3,1,1)
sig, fs = sf.read(wavFile)
if sig.ndim > 1:
print("Error! Can process only mono files!!")
return
x,y,S = signal.spectrogram(sig,fs)
S = 20.*np.log10(S/0.00002)
if masked:
cutOff = np.max(S) - 0.4* (np.max(S)-np.min(S))
S[S<cutOff] = np.nan
plt.pcolormesh(y,x,S)
if pltTitle != '':
ax1.set_title(pltTitle)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
if cbar:
plt.colorbar()
plt.tight_layout()
plt.savefig(os.path.splitext(wavFile)[0]+'.jpg', bbox_inches = "tight",dpi=300)
if __name__ == '__main__':
# To test a single wav file:
wav_file = 'file.wav'
wavogram(wav_file)
# To test all wav files in one directory
import glob
wavLoc = '/Path/to/files/*.wav';
for wav_file in glob.glob(wavLoc):
wavogram(wav_file)
|
# MNIST数据集:http://yann.lecun.com/exdb/mnist/
from tensorflow.examples.tutorials.mnist import input_data
# 加载数据集
mnist = input_data.read_data_sets('e:/soft/MNIST_DATA', one_hot=True)
# 加载训练集样本
train_x = mnist.train.images
# 加载验证集样本
validation_x = mnist.validation.images
# 加载测试集样本
test_x = mnist.test.images
# 加载训练集标签
train_y = mnist.train.labels
# 加载验证集标签
validation_y = mnist.validation.labels
# 加载测试集标签
test_y = mnist.test.labels
print('train_x.shape:', train_x.shape, 'train_y.shape:', train_y.shape)
# 查看训练集中第二个样本的内容和标签
print(train_x[1])
print(train_y[1])
# 获取训练集数据的前100个
images, labels = mnist.train.next_batch(100)
print('images.shape:', images.shape, 'labels.shape:', labels.shape)
import matplotlib.pyplot as plt
# 绘制训练集前20个样本
fig, ax = plt.subplots(nrows=4, ncols=5)
ax = ax.flatten()
for i in range(20):
img = train_x[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.show()
|
import importlib
import inspect
import json
import re
import shutil
import sys
import traceback
from os import path
from getgauge import logger
from getgauge.registry import registry
from getgauge.util import *
project_root = get_project_root()
impl_dirs = get_step_impl_dirs()
env_dir = os.path.join(project_root, 'env', 'default')
requirements_file = os.path.join(project_root, 'requirements.txt')
sys.path.append(project_root)
PLUGIN_JSON = 'python.json'
VERSION = 'version'
PYTHON_PROPERTIES = 'python.properties'
SKEL = 'skel'
def load_impls(step_impl_dirs=impl_dirs):
os.chdir(project_root)
for impl_dir in step_impl_dirs:
if not os.path.isdir(impl_dir):
logger.error('Cannot import step implementations. Error: {} does not exist.'.format(step_impl_dirs))
logger.error('Make sure `STEP_IMPL_DIR` env var is set to a valid directory path.')
return
base_dir = project_root if impl_dir.startswith(project_root) else os.path.dirname(impl_dir)
_import_impl(base_dir, impl_dir)
def copy_skel_files():
try:
logger.info('Initialising Gauge Python project')
logger.info('create {}'.format(env_dir))
os.makedirs(env_dir)
logger.info('create {}'.format(impl_dirs[0]))
shutil.copytree(os.path.join(SKEL,path.basename(impl_dirs[0]) ), impl_dirs[0])
logger.info('create {}'.format(os.path.join(env_dir, PYTHON_PROPERTIES)))
shutil.copy(os.path.join(SKEL, PYTHON_PROPERTIES), env_dir)
f = open(requirements_file, 'w')
f.write('getgauge==' + _get_version())
f.close()
except:
logger.fatal('Exception occurred while copying skel files.\n{}.'.format(traceback.format_exc()))
def _import_impl(base_dir, step_impl_dir):
for f in os.listdir(step_impl_dir):
file_path = os.path.join(step_impl_dir, f)
if f.endswith('.py'):
_import_file(base_dir, file_path)
elif path.isdir(file_path):
_import_impl(base_dir, file_path)
def _import_file(base_dir, file_path):
rel_path = os.path.normpath(file_path.replace(base_dir + os.path.sep, ''))
try:
module_name = os.path.splitext(rel_path.replace(os.path.sep, '.'))[0]
m = importlib.import_module(module_name)
# Get all classes in the imported module
classes = inspect.getmembers(m, lambda member: inspect.isclass(member) and member.__module__ == module_name)
if len(classes) > 0:
for c in classes:
file = inspect.getfile(c[1])
# Create instance of step implementation class.
if _has_methods_with_gauge_decoratores(c[1]):
update_step_resgistry_with_class(c[1](), file_path) # c[1]() will create a new instance of the class
except:
logger.fatal('Exception occurred while loading step implementations from file: {}.\n{}'.format(rel_path, traceback.format_exc()))
# Inject instace in each class method (hook/step)
def update_step_resgistry_with_class(instance, file_path):
for info in registry.get_all_methods_in(file_path):
class_methods = [x[0] for x in inspect.getmembers(instance, inspect.ismethod)]
if info.impl.__name__ in class_methods:
info.instance = instance
def _get_version():
json_data = open(PLUGIN_JSON).read()
data = json.loads(json_data)
return data[VERSION]
def _has_methods_with_gauge_decoratores(klass):
foo = r"@(step|before_suite|after_suite|before_scenario|after_scenario|before_spec|after_spec|before_step|after_step|screenshot|custom_screen_grabber)"
sourcelines = inspect.getsourcelines(klass)[0]
for i,line in enumerate(sourcelines):
if re.match(foo, line.strip()) != None:
return True
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-04-03 18:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('pharmacies', '0003_drugs_recommended_price'),
]
operations = [
migrations.AddField(
model_name='drugs',
name='counter',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='drugs',
name='updated',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='pharmacy',
name='updated',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AddField(
model_name='prices',
name='updated',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
u"""Import utils functions."""
from __future__ import absolute_import
from .utils import pairwise, unzip, admissible_filter, sanity_check
from .bnode import Bnode
from .drawbkps import draw_bkps
|
from rest_framework import serializers
from .models import Bucketlist
from .models import Teste
from .models import Cliente, Notificacao, Promocao, Categoria, Prestador, Servico
class BucketlistSerializer(serializers.ModelSerializer):
"""Serializer to map the Model instance into JSON format."""
class Meta:
"""Meta class to map serializer's fields with the model fields."""
model = Bucketlist
fields = ('id', 'name', 'date_created', 'date_modified')
read_only_fields = ('date_created', 'date_modified')
class TesteSerializer(serializers.ModelSerializer):
class Meta:
model = Teste
fields = ("id", "nome", "descricao")
class ClienteSerializer(serializers.ModelSerializer):
class Meta:
model = Cliente
fields = ("id", "nome", "cpf", "email", "data_nasc")
class NotificacaoSerializer(serializers.ModelSerializer):
class Meta:
model = Notificacao
fields = ("id", "descricao", "data", "hora")
class PromocaoSerializer(serializers.ModelSerializer):
class Meta:
model = Promocao
fields = ("id", "titulo", "descricao", "imagem_promocao", "link_promocao")
class CategoriaSerializer(serializers.ModelSerializer):
class Meta:
model = Categoria
fields = ("id", "nome", "imagem_categoria")
class PrestadorSerializer(serializers.ModelSerializer):
class Meta:
model = Prestador
fields = ("id", "nome", "cpf", "email", "data_nasc")
class ServicoSerializer(serializers.ModelSerializer):
class Meta:
model = Servico
fields = ("id", "titulo", "descricao", "valor", "imagem", "tipo", "data", "hora")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import state
def cmd_help(msg):
"""
Help message for IrcBot
@param msg unused, only for consistency with other commands
"""
help_msg =\
"""
IrcBot
Implemented commands: help SHUTDOWN word-count karma calc
"""
return state.done(help_msg)
|
import numpy as np
import tensorflow as tf
from Agent.networks import *
np.random.seed(1)
tf.set_random_seed(1)
class PolicyGradient:
def __init__(
self,
n_actions,
learning_rate=0.02,
reward_decay=0.95,
):
self.n_actions = n_actions
self.lr = learning_rate
self.gamma = reward_decay
self.ep_obs, self.ep_as, self.ep_rs = [], [], []
self.network = QNetworkNIPS([84,84],n_actions,'Net')
self._build_net()
self.sess = tf.Session()
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter("./pg_log", graph=tf.get_default_graph())
self.sess.run(tf.global_variables_initializer())
def weight_variable(self, shape):
initializer = tf.contrib.layers.xavier_initializer()
var = tf.Variable(initializer(shape))
return var
def _build_net(self):
self.tf_obs = tf.placeholder(tf.float32, [None, 84, 84, 4])
self.tf_acts = tf.placeholder(tf.int32, [None, ], name="actions_num")
self.tf_vt = tf.placeholder(tf.float32, [None, ], name="actions_value")
all_act = tf.identity(self.network(self.tf_obs))
self.all_act_prob = tf.nn.softmax(all_act, name='act_prob') # use softmax to convert to probability
with tf.name_scope('loss'):
# to maximize total reward (log_p * R) is to minimize -(log_p * R), and the tf only have minimize(loss)
neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=all_act, labels=self.tf_acts) # this is negative log of chosen action
# or in this way:
# neg_log_prob = tf.reduce_sum(-tf.log(self.all_act_prob)*tf.one_hot(self.tf_acts, self.n_actions), axis=1)
loss = tf.reduce_mean(neg_log_prob * self.tf_vt) # reward guided loss
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
self.train_op = tf.train.AdamOptimizer(self.lr).minimize(loss)
def choose_action(self, observation):
prob_weights = self.sess.run(self.all_act_prob, feed_dict={self.tf_obs: [observation]})
action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())
# select action w.r.t the actions prob
return action
def choose_action_test(self, observation):
prob_weights = self.sess.run(self.all_act_prob, feed_dict={self.tf_obs: [observation]})
action = np.argmax(prob_weights.shape[1])
return action
def store_transition(self, s, a, r):
self.ep_obs.append([s])
self.ep_as.append(a)
self.ep_rs.append(r)
def learn(self, ep):
# discount and normalize episode reward
discounted_ep_rs_norm = self._discount_and_norm_rewards()
# train on episode
train,summary = self.sess.run([self.train_op, self.merged], feed_dict={
self.tf_obs: np.vstack(self.ep_obs), # shape=[None, n_obs]
self.tf_acts: np.array(self.ep_as), # shape=[None, ]
self.tf_vt: discounted_ep_rs_norm, # shape=[None, ]
})
self.writer.add_summary(summary, ep)
self.ep_obs, self.ep_as, self.ep_rs = [], [], [] # empty episode data
return discounted_ep_rs_norm
def _discount_and_norm_rewards(self):
# discount episode rewards
discounted_ep_rs = np.zeros_like(self.ep_rs)
running_add = 0
for t in reversed(range(0, len(self.ep_rs))):
running_add = running_add * self.gamma + self.ep_rs[t]
discounted_ep_rs[t] = running_add
# normalize episode rewards
discounted_ep_rs -= np.mean(discounted_ep_rs)
discounted_ep_rs /= np.std(discounted_ep_rs)
return discounted_ep_rs
|
def add_simple_uvs():
'''Add cube map uvs on mesh
'''
pass
def add_texture_paint_slot(type='DIFFUSE_COLOR',
name="Untitled",
width=1024,
height=1024,
color=(0.0, 0.0, 0.0, 1.0),
alpha=True,
generated_type='BLANK',
float=False):
'''Add a texture paint slot
:param type: Type, Merge method to use
:type type: enum in ['DIFFUSE_COLOR', 'DIFFUSE_INTENSITY', 'ALPHA', 'TRANSLUCENCY', 'SPECULAR_COLOR', 'SPECULAR_INTENSITY', 'SPECULAR_HARDNESS', 'AMBIENT', 'EMIT', 'MIRROR_COLOR', 'RAYMIRROR', 'NORMAL', 'WARP', 'DISPLACE'], (optional)
:param name: Name, Image datablock name
:type name: string, (optional, never None)
:param width: Width, Image width
:type width: int in [1, inf], (optional)
:param height: Height, Image height
:type height: int in [1, inf], (optional)
:param color: Color, Default fill color
:type color: float array of 4 items in [0, inf], (optional)
:param alpha: Alpha, Create an image with an alpha channel
:type alpha: boolean, (optional)
:param generated_type: Generated Type, Fill the image with a grid for UV map testingBLANK Blank, Generate a blank image.UV_GRID UV Grid, Generated grid to test UV mappings.COLOR_GRID Color Grid, Generated improved UV grid to test UV mappings.
:type generated_type: enum in ['BLANK', 'UV_GRID', 'COLOR_GRID'], (optional)
:param float: 32 bit Float, Create image with 32 bit floating point bit depth
:type float: boolean, (optional)
'''
pass
def brush_colors_flip():
'''Toggle foreground and background brush colors
'''
pass
def brush_select(paint_mode='ACTIVE',
sculpt_tool='BLOB',
vertex_paint_tool='MIX',
weight_paint_tool='MIX',
texture_paint_tool='DRAW',
toggle=False,
create_missing=False):
'''Select a paint mode’s brush by tool type
:param paint_mode: Paint ModeACTIVE Current, Set brush for active paint mode.SCULPT Sculpt.VERTEX_PAINT Vertex Paint.WEIGHT_PAINT Weight Paint.TEXTURE_PAINT Texture Paint.
:type paint_mode: enum in ['ACTIVE', 'SCULPT', 'VERTEX_PAINT', 'WEIGHT_PAINT', 'TEXTURE_PAINT'], (optional)
:param sculpt_tool: Sculpt Tool
:type sculpt_tool: enum in ['BLOB', 'CLAY', 'CLAY_STRIPS', 'CREASE', 'DRAW', 'FILL', 'FLATTEN', 'GRAB', 'INFLATE', 'LAYER', 'MASK', 'NUDGE', 'PINCH', 'ROTATE', 'SCRAPE', 'SIMPLIFY', 'SMOOTH', 'SNAKE_HOOK', 'THUMB'], (optional)
:param vertex_paint_tool: Vertex Paint ToolMIX Mix, Use mix blending mode while painting.ADD Add, Use add blending mode while painting.SUB Subtract, Use subtract blending mode while painting.MUL Multiply, Use multiply blending mode while painting.BLUR Blur, Blur the color with surrounding values.LIGHTEN Lighten, Use lighten blending mode while painting.DARKEN Darken, Use darken blending mode while painting.
:type vertex_paint_tool: enum in ['MIX', 'ADD', 'SUB', 'MUL', 'BLUR', 'LIGHTEN', 'DARKEN'], (optional)
:param weight_paint_tool: Weight Paint ToolMIX Mix, Use mix blending mode while painting.ADD Add, Use add blending mode while painting.SUB Subtract, Use subtract blending mode while painting.MUL Multiply, Use multiply blending mode while painting.BLUR Blur, Blur the color with surrounding values.LIGHTEN Lighten, Use lighten blending mode while painting.DARKEN Darken, Use darken blending mode while painting.
:type weight_paint_tool: enum in ['MIX', 'ADD', 'SUB', 'MUL', 'BLUR', 'LIGHTEN', 'DARKEN'], (optional)
:param texture_paint_tool: Texture Paint Tool
:type texture_paint_tool: enum in ['DRAW', 'SOFTEN', 'SMEAR', 'CLONE', 'FILL', 'MASK'], (optional)
:param toggle: Toggle, Toggle between two brushes rather than cycling
:type toggle: boolean, (optional)
:param create_missing: Create Missing, If the requested brush type does not exist, create a new brush
:type create_missing: boolean, (optional)
'''
pass
def delete_texture_paint_slot():
'''Delete selected texture paint slot
'''
pass
def face_select_all(action='TOGGLE'):
'''Change selection for all faces
:param action: Action, Selection action to executeTOGGLE Toggle, Toggle selection for all elements.SELECT Select, Select all elements.DESELECT Deselect, Deselect all elements.INVERT Invert, Invert selection of all elements.
:type action: enum in ['TOGGLE', 'SELECT', 'DESELECT', 'INVERT'], (optional)
'''
pass
def face_select_hide(unselected=False):
'''Hide selected faces
:param unselected: Unselected, Hide unselected rather than selected objects
:type unselected: boolean, (optional)
'''
pass
def face_select_linked():
'''Select linked faces
'''
pass
def face_select_linked_pick(deselect=False):
'''Select linked faces under the cursor
:param deselect: Deselect, Deselect rather than select items
:type deselect: boolean, (optional)
'''
pass
def face_select_reveal(unselected=False):
'''Reveal hidden faces
:param unselected: Unselected, Hide unselected rather than selected objects
:type unselected: boolean, (optional)
'''
pass
def grab_clone(delta=(0.0, 0.0)):
'''Move the clone source image
:param delta: Delta, Delta offset of clone image in 0.0..1.0 coordinates
:type delta: float array of 2 items in [-inf, inf], (optional)
'''
pass
def hide_show(action='HIDE', area='INSIDE', xmin=0, xmax=0, ymin=0, ymax=0):
'''Hide/show some vertices
:param action: Action, Whether to hide or show verticesHIDE Hide, Hide vertices.SHOW Show, Show vertices.
:type action: enum in ['HIDE', 'SHOW'], (optional)
:param area: Area, Which vertices to hide or showOUTSIDE Outside, Hide or show vertices outside the selection.INSIDE Inside, Hide or show vertices inside the selection.ALL All, Hide or show all vertices.MASKED Masked, Hide or show vertices that are masked (minimum mask value of 0.5).
:type area: enum in ['OUTSIDE', 'INSIDE', 'ALL', 'MASKED'], (optional)
:param xmin: X Min
:type xmin: int in [-inf, inf], (optional)
:param xmax: X Max
:type xmax: int in [-inf, inf], (optional)
:param ymin: Y Min
:type ymin: int in [-inf, inf], (optional)
:param ymax: Y Max
:type ymax: int in [-inf, inf], (optional)
'''
pass
def image_from_view(filepath=""):
'''Make an image from the current 3D view for re-projection
:param filepath: File Path, Name of the file
:type filepath: string, (optional, never None)
'''
pass
def image_paint(stroke=None, mode='NORMAL'):
'''Paint a stroke into the image
:param stroke: Stroke
:type stroke: bpy_prop_collection of OperatorStrokeElement, (optional)
:param mode: Stroke Mode, Action taken when a paint stroke is madeNORMAL Normal, Apply brush normally.INVERT Invert, Invert action of brush for duration of stroke.SMOOTH Smooth, Switch brush to smooth mode for duration of stroke.
:type mode: enum in ['NORMAL', 'INVERT', 'SMOOTH'], (optional)
'''
pass
def mask_flood_fill(mode='VALUE', value=0.0):
'''Fill the whole mask with a given value, or invert its values
:param mode: ModeVALUE Value, Set mask to the level specified by the ‘value’ property.VALUE_INVERSE Value Inverted, Set mask to the level specified by the inverted ‘value’ property.INVERT Invert, Invert the mask.
:type mode: enum in ['VALUE', 'VALUE_INVERSE', 'INVERT'], (optional)
:param value: Value, Mask level to use when mode is ‘Value’; zero means no masking and one is fully masked
:type value: float in [0, 1], (optional)
'''
pass
def mask_lasso_gesture(path=None, mode='VALUE', value=1.0):
'''Add mask within the lasso as you move the brush
:param path: path
:type path: bpy_prop_collection of OperatorMousePath, (optional)
:param mode: ModeVALUE Value, Set mask to the level specified by the ‘value’ property.VALUE_INVERSE Value Inverted, Set mask to the level specified by the inverted ‘value’ property.INVERT Invert, Invert the mask.
:type mode: enum in ['VALUE', 'VALUE_INVERSE', 'INVERT'], (optional)
:param value: Value, Mask level to use when mode is ‘Value’; zero means no masking and one is fully masked
:type value: float in [0, 1], (optional)
'''
pass
def project_image(image=''):
'''Project an edited render from the active camera back onto the object
:param image: Image
:type image: enum in [], (optional)
'''
pass
def sample_color(location=(0, 0), merged=False, palette=False):
'''Use the mouse to sample a color in the image
:param location: Location
:type location: int array of 2 items in [0, inf], (optional)
:param merged: Sample Merged, Sample the output display color
:type merged: boolean, (optional)
:param palette: Add to Palette
:type palette: boolean, (optional)
'''
pass
def texture_paint_toggle():
'''Toggle texture paint mode in 3D view
'''
pass
def vert_select_all(action='TOGGLE'):
'''Change selection for all vertices
:param action: Action, Selection action to executeTOGGLE Toggle, Toggle selection for all elements.SELECT Select, Select all elements.DESELECT Deselect, Deselect all elements.INVERT Invert, Invert selection of all elements.
:type action: enum in ['TOGGLE', 'SELECT', 'DESELECT', 'INVERT'], (optional)
'''
pass
def vert_select_ungrouped(extend=False):
'''Select vertices without a group
:param extend: Extend, Extend the selection
:type extend: boolean, (optional)
'''
pass
def vertex_color_brightness_contrast(brightness=0.0, contrast=0.0):
'''Adjust vertex color brightness/contrast
:param brightness: Brightness
:type brightness: float in [-100, 100], (optional)
:param contrast: Contrast
:type contrast: float in [-100, 100], (optional)
'''
pass
def vertex_color_dirt(blur_strength=1.0,
blur_iterations=1,
clean_angle=3.14159,
dirt_angle=0.0,
dirt_only=False):
'''Undocumented
:param blur_strength: Blur Strength, Blur strength per iteration
:type blur_strength: float in [0.01, 1], (optional)
:param blur_iterations: Blur Iterations, Number of times to blur the colors (higher blurs more)
:type blur_iterations: int in [0, 40], (optional)
:param clean_angle: Highlight Angle, Less than 90 limits the angle used in the tonal range
:type clean_angle: float in [0, 3.14159], (optional)
:param dirt_angle: Dirt Angle, Less than 90 limits the angle used in the tonal range
:type dirt_angle: float in [0, 3.14159], (optional)
:param dirt_only: Dirt Only, Don’t calculate cleans for convex areas
:type dirt_only: boolean, (optional)
'''
pass
def vertex_color_hsv(h=0.5, s=1.0, v=1.0):
'''Adjust vertex color HSV values
:param h: Hue
:type h: float in [0, 1], (optional)
:param s: Saturation
:type s: float in [0, 2], (optional)
:param v: Value
:type v: float in [0, 2], (optional)
'''
pass
def vertex_color_invert():
'''Invert RGB values
'''
pass
def vertex_color_levels(offset=0.0, gain=1.0):
'''Adjust levels of vertex colors
:param offset: Offset, Value to add to colors
:type offset: float in [-1, 1], (optional)
:param gain: Gain, Value to multiply colors by
:type gain: float in [0, inf], (optional)
'''
pass
def vertex_color_set():
'''Fill the active vertex color layer with the current paint color
'''
pass
def vertex_color_smooth():
'''Smooth colors across vertices
'''
pass
def vertex_paint(stroke=None, mode='NORMAL'):
'''Paint a stroke in the active vertex color layer
:param stroke: Stroke
:type stroke: bpy_prop_collection of OperatorStrokeElement, (optional)
:param mode: Stroke Mode, Action taken when a paint stroke is madeNORMAL Normal, Apply brush normally.INVERT Invert, Invert action of brush for duration of stroke.SMOOTH Smooth, Switch brush to smooth mode for duration of stroke.
:type mode: enum in ['NORMAL', 'INVERT', 'SMOOTH'], (optional)
'''
pass
def vertex_paint_toggle():
'''Toggle the vertex paint mode in 3D view
'''
pass
def weight_from_bones(type='AUTOMATIC'):
'''Set the weights of the groups matching the attached armature’s selected bones, using the distance between the vertices and the bones
:param type: Type, Method to use for assigning weightsAUTOMATIC Automatic, Automatic weights from bones.ENVELOPES From Envelopes, Weights from envelopes with user defined radius.
:type type: enum in ['AUTOMATIC', 'ENVELOPES'], (optional)
'''
pass
def weight_gradient(type='LINEAR',
xstart=0,
xend=0,
ystart=0,
yend=0,
cursor=1002):
'''Draw a line to apply a weight gradient to selected vertices
:param type: Type
:type type: enum in ['LINEAR', 'RADIAL'], (optional)
:param xstart: X Start
:type xstart: int in [-inf, inf], (optional)
:param xend: X End
:type xend: int in [-inf, inf], (optional)
:param ystart: Y Start
:type ystart: int in [-inf, inf], (optional)
:param yend: Y End
:type yend: int in [-inf, inf], (optional)
:param cursor: Cursor, Mouse cursor style to use during the modal operator
:type cursor: int in [0, inf], (optional)
'''
pass
def weight_paint(stroke=None, mode='NORMAL'):
'''Paint a stroke in the current vertex group’s weights
:param stroke: Stroke
:type stroke: bpy_prop_collection of OperatorStrokeElement, (optional)
:param mode: Stroke Mode, Action taken when a paint stroke is madeNORMAL Normal, Apply brush normally.INVERT Invert, Invert action of brush for duration of stroke.SMOOTH Smooth, Switch brush to smooth mode for duration of stroke.
:type mode: enum in ['NORMAL', 'INVERT', 'SMOOTH'], (optional)
'''
pass
def weight_paint_toggle():
'''Toggle weight paint mode in 3D view
'''
pass
def weight_sample():
'''Use the mouse to sample a weight in the 3D view
'''
pass
def weight_sample_group(group='DEFAULT'):
'''Select one of the vertex groups available under current mouse position
:param group: Keying Set, The Keying Set to use
:type group: enum in ['DEFAULT'], (optional)
'''
pass
def weight_set():
'''Fill the active vertex group with the current paint weight
'''
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.