text stringlengths 38 1.54M |
|---|
# pip install beautifulsoup4 selenium lxml
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import json
import os
import csv
output_file_path = 'outputs/output_CSV_PARSER.csv'
pagination = 1 # страниц с обьявлениями
l = 'https://hh.ru/search/resume?page='
try:
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), output_file_path)
os.remove(path)
except:
pass
### настройка
ua = dict(DesiredCapabilities.CHROME)
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument('window-size=1920x935')
driver = webdriver.Chrome(options=options, executable_path='driver/chromedriver.exe')
###
def parseResume(l):
driver.get(l)
# ВАКАНСИЯ
try:
resumeTitlePosition = driver.find_element_by_css_selector("span[data-qa='resume-block-title-position']").text.encode("utf-8").decode("utf-8") # название вакансии
except:
resumeTitlePosition = ''
try:
about = driver.find_element_by_css_selector("div[data-qa='resume-block-skills-content']").text.encode("utf-8").decode("utf-8")
except:
about = ''
fields=[about,resumeTitlePosition]
with open(output_file_path, 'a', encoding="utf-8") as f:
writer = csv.writer(f, delimiter=';')
writer.writerow(fields)
linksResume = []
print('получение списка..')
for p in range(pagination):
print('страница ' + str(p))
driver.get(l+str(p))
body = driver.find_element_by_tag_name("body");
items = body.find_elements_by_class_name("resume-search-item__name");
for item in items:
linksResume.append(item.get_attribute("href"))
for i,l in enumerate(linksResume):
print(str(i)+'/'+str(len(linksResume)))
parseResume(l)
print('готово')
# l = 'https://hh.ru/resume/4fea78920008356d8b0039ed1f497951747264?hhtmFrom=resume_search_result'
# l = 'https://hh.ru/resume/489598f90005efc0950039ed1f506c46576564?hhtmFrom=resume_search_result'
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Row, Column
from apps.classes.models import YogaClass
from tempus_dominus.widgets import DatePicker, TimePicker, DateTimePicker
from datetime import datetime
from django.utils.formats import get_format
from django.utils import formats
from apps.cards.models import CardType
class ClassForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['name'].widget.attrs.update(
{'placeholder': 'Lớp CB 2-4-6'})
self.fields['start_at'] = forms.DateField(
label=_('start at').capitalize(),
widget=DatePicker(
options={
'useCurrent': True,
},
attrs={
'icon_toggle': True,
'input_group': False,
'placeholder': formats.date_format(datetime.now(), use_l10n=True)
}
),
)
self.fields['end_at'] = forms.DateField(
required=False,
label=_('end at').capitalize(),
widget=DatePicker(
options={
'useCurrent': True,
},
attrs={
'icon_toggle': True,
'input_group': False,
'placeholder': formats.date_format(datetime.now(), use_l10n=True)
}
),
)
self.fields['end_at'].required = False
self.fields['price_per_lesson'].widget.attrs.update({
'placeholder': '50.000'
})
self.fields['price_per_month'].widget.attrs.update({
'placeholder': '600.000'
})
self.fields['price_for_training_class'].widget.attrs.update({
'placeholder': '10.000.000'
})
self.fields['max_people'].widget.attrs.update({
'placeholder': 25
})
self.helper = FormHelper()
self.helper.layout = Layout(
'course',
'name',
Row(
Column('trainer', css_class='form-group col-md-6 mb-0'),
Column('max_people', css_class='form-group col-md-6 mb-0'),
css_class='form-row'
),
Row(
Column('price_per_lesson', css_class='form-group col-md-4 mb-0'),
Column('price_per_month', css_class='form-group col-md-4 mb-0'),
Column('price_for_training_class', css_class='form-group col-md-4 mb-0'),
css_class='form-row'
),
Row(
Column('start_at', css_class='form-group col-md-6 mb-0'),
Column('end_at', css_class='form-group col-md-6 mb-0'),
css_class='form-row'
),
Submit('submit', _('Save'), css_class='btn-success'))
# Focus on form field whenever error occurred
errorList = list(self.errors)
if errorList:
for item in errorList:
self.fields[item].widget.attrs.update(
{'autofocus': 'autofocus'})
break
else:
self.fields['name'].widget.attrs.update({'autofocus': 'autofocus'})
class Meta:
model = YogaClass
exclude = ['slug', 'created_at', 'updated_at']
def clean_name(self):
from django.utils.text import slugify
from django.core.exceptions import ValidationError
name = self.cleaned_data['name']
slug = slugify(name)
if YogaClass.objects.filter(slug=slug).exists():
raise ValidationError(_('A class with this name already exists.'))
return name
def clean_end_at(self):
cleaned_data = super(ClassForm, self).clean()
end_at = cleaned_data['end_at']
if 'start_at' in cleaned_data and end_at is not None:
start_at = cleaned_data['start_at']
if end_at < start_at:
raise forms.ValidationError(
_('End at must be greater than Start at'))
return end_at
class ClassNewForm(ClassForm):
def clean_start_at(self):
cleaned_data = super(ClassForm, self).clean()
start_at = cleaned_data['start_at']
if start_at < datetime.now().date():
raise forms.ValidationError(_('wrong start date'))
return start_at
class ClassEditForm(ClassForm):
def clean_name(self):
name = self.cleaned_data['name']
if 'name' in self.changed_data:
from django.utils.text import slugify
from django.core.exceptions import ValidationError
slug = slugify(name)
if YogaClass.objects.filter(slug=slug).exists():
raise ValidationError(
_('A class with this name already exists.'))
return name
else:
return name
def clean_start_at(self):
return super(ClassForm, self).clean()['start_at']
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:50:03 2019
@author: ZHOUFENG
"""
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 19:26:40 2019
@author: ZHOUFENG
"""
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
#动态分配显存
config = tf.ConfigProto()
config.gpu_options.allow_growth = True #不全部占满显存, 按需分配
sess = tf.Session(config = config)
#os.environ['KERAS_BACKEND']='theano'
import numpy as np
#import scipy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization,Activation
#from keras.optimizers import SGD
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, CSVLogger
from keras import regularizers
from keras.utils import np_utils
import matplotlib.pyplot as plt
#import h5py
#from keras.models import load_model
# load Preprocessed data from file
#alltrain_count = 48506#53861#49346#40423#48507#17600#56592#57511
#alltest_count = 25201#35908#49346#40423#32339# 49800#24254#24648
#X_train = np.load("./predata_aug/augsplit_train_48506/X_augsplit100_train_" + "alltrain_count" + str(alltrain_count) + ".npy")
#y_train = np.load("./predata_aug/augsplit_train_48506/y_augsplit100_train_" + "alltrain_count" + str(alltrain_count) + ".npy")
#X_test = np.load("./predata_aug/augsplit_train_48506/X_augsplit100_test_" + "alltest_count" + str(alltest_count) + ".npy")
#y_test = np.load("./predata_aug/augsplit_train_48506/y_augsplit100_test_" + "alltest_count" + str(alltest_count) + ".npy")
#未进行数据增强,盐城最好的
testRatio = 0.9
#X_train = np.load("./predata/noaug_oversamples/X_stand_pca_train_" + "testRatio" + str(testRatio) + ".npy")
#y_train = np.load("./predata/noaug_oversamples/y_stand_pca_train_" + "testRatio" + str(testRatio) + ".npy")
#X_test = np.load("./predata/noaug_oversamples/X_stand_pca_test_" + "testRatio" + str(testRatio) + ".npy")
#y_test = np.load("./predata/noaug_oversamples/y_stand_pca_test_" + "testRatio" + str(testRatio) + ".npy")
X_train = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_noover_sameneighbor/alldata_split/X_stand_pca_train_" + "testRatio" + str(testRatio) + ".npy")
y_train = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_noover_sameneighbor/alldata_split/y_stand_pca_train_" + "testRatio" + str(testRatio) + ".npy")
X_test = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_noover_sameneighbor/alldata_split/X_stand_pca_test_" + "testRatio" + str(testRatio) + ".npy")
y_test = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_noover_sameneighbor/alldata_split/y_stand_pca_test_" + "testRatio" + str(testRatio) + ".npy")
#alltrain_count = 2280#1710
#alltest_count = 38290
#X_train = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_sameneighbor/twotypes_split/X_noaugsplit_train_" + "alltrain_count" + str(alltrain_count) + ".npy")
#y_train = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_sameneighbor/twotypes_split/y_noaugsplit_train_" + "alltrain_count" + str(alltrain_count) + ".npy")
#X_test = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_sameneighbor/twotypes_split/X_noaugsplit_test_" + "alltest_count" + str(alltest_count) + ".npy")
#y_test = np.load("./poyang_allfile/poyang_predata/splitdata/patch/patch_noaug/noaug_sameneighbor/twotypes_split/y_noaugsplit_test_" + "alltest_count" + str(alltest_count) + ".npy")
# 使用tensorflow作为后端 Reshape data into (numberofsumples, height, width, channels)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2], X_train.shape[3]))
X_test = np.reshape(X_test, (X_test.shape[0], X_train.shape[1], X_test.shape[2], X_test.shape[3]))
# 把类标签转化为one-hot编码 convert class labels to on-hot encoding
y_train = np_utils.to_categorical(y_train)#
y_test = np_utils.to_categorical(y_test)
# Define the input shape
input_shape = X_train[0].shape
print(input_shape)#(30,30,9)
# number of filters
C1 = 16 #32#
pool_size = (2, 2)
# 定义网络框架 Define the model structure
model = Sequential()
model.add(Conv2D(C1, (3, 3), padding = 'same', input_shape = input_shape))#卷积层1 #input:30*30*9 output:30*30*16
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = pool_size)) #池化层1 #output:15*15*16
model.add(Dropout(0.15))
model.add(Conv2D(2*C1, (3, 3))) #input:15*15*16 output:13*13*32
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = pool_size)) #output:6*6*32
model.add(Dropout(0.15))
model.add(Conv2D(4*C1, (3, 3))) #input:6*6*32 output:4*4*64
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = pool_size)) #output:2*2*64
model.add(Dropout(0.25))
model.add(Conv2D(4*C1, (1, 1)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = pool_size))
model.add(Dropout(0.25))
model.add(Flatten()) #input:2*2*64 output:256 #Flatten层用来将输入“压平”,即把多维的输入一维化,常用在从卷积层到全连接层的过渡
model.add(Dense(6*C1,kernel_regularizer = regularizers.l2(0.001)))#全连接层180个神经元 #output:48
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2))#全连接2层(2个类别)
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
#plot_model(model,to_file='G:/desktop/myProject/model.png')
# 定义优化和训练方法 Define optimization and train method
#monitor:被监测的量 factor:每次减少学习率的因子,学习率将以lr = lr*factor的形式被减少
#patience:当patience个epoch过去而模型性能不提升时,学习率减少的动作会被触发
#mode:‘auto’,‘min’,‘max’之一,在min模式下,如果检测值触发学习率减少。在max模式下,当检测值不再上升则触发学习率减少
#epsilon:阈值,用来确定是否进入检测值的“平原区”
#cooldown:学习率减少后,会经过cooldown个epoch才重新进行正常操作
#min_lr:学习率的下限
#verbose:信息展示模式,0或1
reduce_lr = ReduceLROnPlateau(monitor = 'val_acc', factor = 0.9, patience = 5, min_lr = 0.0000001, verbose = 1)
#filepath准备存放模型的地方,
#save_best_only:当设置为True时,将只保存在验证集上性能最好的模型
#checkpointer = ModelCheckpoint(filepath = "./HDF5/checkpoint.hdf5", verbose = 1, save_best_only = True)
#最好的盐城
#checkpointer = ModelCheckpoint(filepath = "./HDF5/checkpoint_noaugrmsprop21_100.hdf5", verbose = 1, save_best_only = True)
checkpointer = ModelCheckpoint(filepath = "./poyang_allfile/poyang_hdf5/patch_hdf5/patch_noaug/noaug_noover_sameneighbor/testratio0.9/checkpoint3_200.hdf5", verbose = 1, save_best_only = True)
#lr:学习率 momentum:动量参数 decay:每次更新后学习率衰减值 nesterov:布尔值,确定是否使用nesterov动量
#sgd = SGD(lr=0.0001, decay=1e-6, momentum=0.9, nesterov=True)
#sgd = SGD(lr=0.0001, momentum=0.9, nesterov=True)
#亦称作多类的对数损失,
#注意使用该目标函数时,需要将标签转化为形如(nb_samples, nb_classes)的二值序列
model.compile(optimizer = 'rmsprop', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.summary()
history_save = CSVLogger('./poyang_allfile/poyang_history/patch_history/patch_noaug/noaug_noover_sameneighbor/testratio0.9/historysave3_200.csv',separator = ',',append = False)
# 开始训练模型 Start to train model
history = model.fit(X_train, y_train,
batch_size = 32,
epochs = 200,
verbose = 1,
validation_data = (X_test, y_test),
callbacks = [reduce_lr, checkpointer ,history_save],
shuffle = True)
# save the model with h5py
#model.save('./model/HSI_model_epochs100.h5')
#最好的盐城
#model.save('./model/HSI_model_epochs100_noaugrmsprop21.h5')
model.save('./poyang_allfile/poyang_model/patch_model/patch_noaug/noaug_noover_sameneighbor/testratio0.9/HSI_model_epochs3_200.h5')
# summarize history for accuracy
#plt.plot(history.history['acc'])
#plt.plot(history.history['val_acc'])
#plt.title('model accuracy')
#plt.ylabel('accuracy')
#plt.xlabel('epoch')
#plt.grid(True)
#plt.legend(['train', 'test'], loc = 'upper left')
#plt.savefig("./result/model_accuracy_100.svg")
#plt.savefig("./result/100_noaug/100_noaugrmsprop/model_accuracy_100_noaugrmsprop21.svg")
#plt.savefig("./yancheng_allfile/yancheng_result/patch_result/patch_noaug/noaug_sameneighbor/testratio0.8/model_accuracy2_200.svg")
#plt.show()
# summarize history for loss
#plt.plot(history.history['loss'])
#plt.plot(history.history['val_loss'])
#plt.title('model loss')
#plt.ylabel('loss')
#plt.xlabel('epoch')
#plt.grid(True)
#plt.legend(['train', 'test'], loc = 'upper left')
#plt.savefig("./result/model_loss_100.svg")
#plt.savefig("./result/100_noaug/100_noaugrmsprop/model_loss_100_noaugrmsprop21.svg")
#plt.savefig("./yancheng_allfile/yancheng_result/patch_result/patch_noaug/noaug_sameneighbor/testratio0.8/model_loss2_200.svg")
#plt.show()
|
from unittest import TestCase
from cell import Cell
from row import Row
class TestRow(TestCase):
def setUp(self):
self.row = Row(0, [2, 0, 4, 3, 5, 0, 6, 9, 8])
def test_sum(self):
self.assertLessEqual(self.row.sum(), 45)
def test_add_type_error(self):
self.assertRaises(TypeError, self.row.add, 10)
def test_add_value_error(self):
self.assertRaises(ValueError, self.row.add, Cell(1, 2, 3))
def test_set_cell(self):
pass
def test_updating_row_values(self):
self.row.set_cell(1, 1, "test_row.test_updating_row_values()")
pass
def test_percent_complete(self):
completeness = self.row.percent_complete
print(str(completeness * 100) + "% complete")
self.row.__print__()
|
import subprocess
def test_equality():
completed = subprocess.run(["bin/canidae", "test/logic/equality.can"], text=True, capture_output=True)
assert completed.returncode == 0
lines = completed.stdout.split("\n")
assert len(lines) == 29
assert lines[0] == "false"
assert lines[1] == "true"
assert lines[2] == "false"
assert lines[3] == "false"
assert lines[4] == "true"
assert lines[5] == "true"
assert lines[6] == "false"
assert lines[7] == "true"
assert lines[8] == "true"
assert lines[9] == "false"
assert lines[10] == "true"
assert lines[11] == "true"
assert lines[12] == "true"
assert lines[13] == "false"
assert lines[14] == "true"
assert lines[15] == "false"
assert lines[16] == "true"
assert lines[17] == "true"
assert lines[18] == "false"
assert lines[19] == "false"
assert lines[20] == "true"
assert lines[21] == "false"
assert lines[22] == "false"
assert lines[23] == "true"
assert lines[24] == "false"
assert lines[25] == "false"
assert lines[26] == "false"
assert lines[27] == "true"
assert lines[28] == ""
def test_not():
completed = subprocess.run(["bin/canidae", "test/logic/not.can"], text=True, capture_output=True)
assert completed.returncode == 0
lines = completed.stdout.split("\n")
assert len(lines) == 10
assert lines[0] == "false"
assert lines[1] == "true"
assert lines[2] == "true"
assert lines[3] == "false"
assert lines[4] == "true"
assert lines[5] == "false"
assert lines[6] == "true"
assert lines[7] == "false"
assert lines[8] == "true"
assert lines[9] == ""
def test_inequality():
completed = subprocess.run(["bin/canidae", "test/logic/inequality.can"], text=True, capture_output=True)
assert completed.returncode == 0
lines = completed.stdout.split("\n")
assert len(lines) == 13
assert lines[0] == "true"
assert lines[1] == "false"
assert lines[2] == "false"
assert lines[3] == "true"
assert lines[4] == "false"
assert lines[5] == "true"
assert lines[6] == "false"
assert lines[7] == "true"
assert lines[8] == "true"
assert lines[9] == "false"
assert lines[10] == "true"
assert lines[11] == "false"
assert lines[12] == ""
def test_inequality_wrong_type_1():
completed = subprocess.run(["bin/canidae", "test/logic/inequality_wrong_type_1.can"], text=True, capture_output=True)
assert completed.returncode == 70
lines = completed.stderr.split("\n")
assert len(lines) == 4
assert "Cannot perform comparison on values of different type" in lines[0]
assert lines[2].startswith("\t[line 1]")
assert lines[3] == ""
def test_inequality_wrong_type_2():
completed = subprocess.run(["bin/canidae", "test/logic/inequality_wrong_type_2.can"], text=True, capture_output=True)
assert completed.returncode == 70
lines = completed.stderr.split("\n")
assert len(lines) == 4
assert "Cannot perform comparison on objects of different type" in lines[0]
assert lines[2].startswith("\t[line 1]")
assert lines[3] == ""
def test_logic_operators():
completed = subprocess.run(["bin/canidae", "test/logic/logic_operators.can"], text=True, capture_output=True)
assert completed.returncode == 0
lines = completed.stdout.split("\n")
assert len(lines) == 17
assert lines[0] == "true"
assert lines[1] == "false"
assert lines[2] == "false"
assert lines[3] == "false"
assert lines[4] == "true"
assert lines[5] == "true"
assert lines[6] == "true"
assert lines[7] == "false"
assert lines[8] == "false"
assert lines[9] == "true"
assert lines[10] == "0"
assert lines[11] == "1"
assert lines[12] == "true"
assert lines[13] == "1"
assert lines[14] == "false"
assert lines[15] == "1"
assert lines[16] == ""
|
import numpy as np
import cv2
img = cv2.imread("chess.png")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 固定阈值
ret, th = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY)
print(ret)
# cv2.imwrite("../doc/threshold_fix.png", th)
# 自适应阈值
th = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 17, 6)
cv2.imwrite("../doc/threshold_adapt.png", th)
|
import tkinter as tk
from tkinter import *
from tkinter import messagebox
background_colour = '#707371'
button_color = '#18c9c1'
root = tk.Tk()
root.geometry("250x400+300+300")
root.title("Calculator")
root.resizable(0, 0) # Allows resizing of widget when run
root.configure()
data = StringVar()
val = "" # String variable to hold mathematical input from calculator screen
operator = "" # String variable to hold operators. Operators can be +,-,/,*
# Value Buttons
# When one of these buttons is clicked
# their value is presented on the calculator screen
def btn1_isclicked():
global val
val = val + "1"
data.set(val)
def btn2_isclicked():
global val
val = val + "2"
data.set(val)
def btn3_isclicked():
global val
val = val + "3"
data.set(val)
def btn4_isclicked():
global val
val = val + "4"
data.set(val)
def btn5_isclicked():
global val
val = val + "5"
data.set(val)
def btn6_isclicked():
global val
val = val + "6"
data.set(val)
def btn7_isclicked():
global val
val = val + "7"
data.set(val)
def btn8_isclicked():
global val
val = val + "8"
data.set(val)
def btn9_isclicked():
global val
val = val + "9"
data.set(val)
def btn0_isclicked():
global val
val = val + "0"
data.set(val)
# Operands
def btnPlus_isclicked():
global val, operator
operator = "+"
val = val + "+"
data.set(val)
def btnMinus_isclicked():
global val, operator
operator = "-"
val = val + "-"
data.set(val)
def btnMulti_isclicked():
global val, operator
operator = "*"
val = val + "*"
data.set(val)
def btnDivid_isclicked():
global val, operator
operator = "/"
val = val + "/"
data.set(val)
def btnClear_isclicked():
global val, operator
operator = ""
val = ""
data.set(val)
# Function for calculating result
def calculate():
global val, operator
# Checking for division by zero
if operator == "/":
x = int((val.split("/")[1]))
if x == 0:
messagebox.showerror("Error!!, Division by 0 not allowed")
val = ""
data.set(val)
else:
data.set(str(eval(val)))
# Visuals
lbl = Label(root, text='Label', font=20, anchor=SE, textvariable = data)
lbl.pack(expand=True, fill='both')
btnrow1 = Frame(root)
btnrow1.pack(expand=True, fill="both")
btnrow2 = Frame(root, bg=background_colour)
btnrow2.pack(expand=True, fill="both")
btnrow3 = Frame(root, bg=background_colour)
btnrow3.pack(expand=True, fill="both")
btnrow4 = Frame(root, bg=background_colour)
btnrow4.pack(expand=True, fill="both")
# Adding Buttons
# First Row
btn1 = Button(btnrow1, text="1", font=20, border=0, bg=background_colour, command=btn1_isclicked)
btn1.pack(side=LEFT, expand=True, fill="both")
btn2 = Button(btnrow1, text="2", font=20, border=0, bg=background_colour, command=btn2_isclicked)
btn2.pack(side=LEFT, expand=True, fill="both")
btn3 = Button(btnrow1, text="3", font=20, border=0, bg=background_colour, command=btn3_isclicked)
btn3.pack(side=LEFT, expand=True, fill="both")
btn4 = Button(btnrow1, text="+", font=20, border=0, bg=background_colour, command=btnPlus_isclicked)
btn4.pack(side=LEFT, expand=True, fill="both")
# Second Row
btn5 = Button(btnrow2, text="4", font=20, border=0, bg=background_colour, command=btn4_isclicked)
btn5.pack(side=LEFT, expand=True, fill="both")
btn6 = Button(btnrow2, text="5", font=20, border=0, bg=background_colour, command=btn5_isclicked)
btn6.pack(side=LEFT, expand=True, fill="both")
btn7 = Button(btnrow2, text="6", font=20, border=0, bg=background_colour, command=btn6_isclicked)
btn7.pack(side=LEFT, expand=True, fill="both")
btn8 = Button(btnrow2, text="-", font=20, border=0, bg=background_colour, command=btnMinus_isclicked)
btn8.pack(side=LEFT, expand=True, fill="both")
# Third Row
btn9 = Button(btnrow3, text="7", font=20, border=0, bg=background_colour, command=btn7_isclicked)
btn9.pack(side=LEFT, expand=True, fill="both")
btn10 = Button(btnrow3, text="8", font=20, border=0, bg=background_colour, command=btn8_isclicked)
btn10.pack(side=LEFT, expand=True, fill="both")
btn11 = Button(btnrow3, text="9", font=20, border=0, bg=background_colour, command=btn9_isclicked)
btn11.pack(side=LEFT, expand=True, fill="both")
btn12 = Button(btnrow3, text="X", font=20, border=0, bg=background_colour, command=btnMulti_isclicked)
btn12.pack(side=LEFT, expand=True, fill="both")
# Fourth Row
btn13 = Button(btnrow4, text="C", font=20, border=0, bg=background_colour, command=btnClear_isclicked)
btn13.pack(side=LEFT, expand=True, fill="both")
btn14 = Button(btnrow4, text="0", font=20, border=0, bg=background_colour, command=btn0_isclicked)
btn14.pack(side=LEFT, expand=True, fill="both")
btn15 = Button(btnrow4, text="=", font=20, border=0, bg=background_colour, command=calculate)
btn15.pack(side=LEFT, expand=True, fill="both")
btn16 = Button(btnrow4, text="%", font=20, border=0, bg=background_colour, command=btnDivid_isclicked)
btn16.pack(side=LEFT, expand=True, fill="both")
root.mainloop() |
from mysql.connector import MySQLConnection
import settings
db = MySQLConnection(
user=settings.DB_USER,
password=settings.DB_PASSWORD,
database=settings.DB_NAME,
host=settings.DB_HOST,
port=settings.DB_PORT,
charset=settings.DB_CHARSET,
collation=settings.DB_COLLATION,
)
_managers = []
def register_manager(manager):
"""Register a manager."""
_managers.append(manager)
def create_tables():
"""Create the database tables if necessary."""
import purbeurre.models
for manager in _managers:
print(f"Creating table {manager.table}")
manager.create_table()
def drop_tables():
"""Removes tables from the database if they are present."""
import purbeurre.models
for manager in reversed(_managers):
print(f"Dropping table {manager.table}")
manager.drop_table()
|
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
from photutils import CircularAperture
from photutils import CircularAnnulus
from photutils import aperture_photometry
from colossus.cosmology import cosmology
from functools import partial
from scipy.optimize import curve_fit
import glob
import convergence_map
import importlib
import astropy.units as u
from astropy.stats import sigma_clipped_stats
import stacking
import plotting
import lensingModel
from scipy.optimize import minimize
importlib.reload(convergence_map)
importlib.reload(stacking)
importlib.reload(plotting)
importlib.reload(lensingModel)
cosmo = cosmology.setCosmology('planck18')
apcosmo = cosmo.toAstropy()
# measure the radial convergence profile from the stacked map
def measure_profile(image, step, reso=1.5, maxtheta=180):
profile, profile_unc = [], []
center = len(image)/2. - 0.5
steppix = step/reso
inner_aper = CircularAperture([center, center], steppix)
profile.append(float(aperture_photometry(image, inner_aper)['aperture_sum']/inner_aper.area))
i = 1
while step*i < maxtheta:
new_aper = CircularAnnulus([center, center], steppix*i, steppix*(i+1))
profile.append(float(aperture_photometry(image, new_aper)['aperture_sum']/new_aper.area))
i += 1
return np.array(profile)
def fit_best_bias_or_mass(obs_profile, zdist, p0=6E12, binsize=12, sigma=None, maxtheta=180, mode='mass'):
if sigma is None:
sigma = np.ones(len(obs_profile))
filtered_model_of_mass = partial(lensingModel.filtered_model_in_bins, zdist, binsize=binsize, maxtheta=maxtheta,
mode=mode)
obs_thetas = np.arange(binsize/2, maxtheta, binsize)
if mode == 'mass':
popt, pcov = curve_fit(filtered_model_of_mass, obs_thetas, obs_profile, p0=p0, bounds=[10**11, 10**14], sigma=sigma)
return np.log10(popt[0])
elif mode == 'bias':
popt, pcov = curve_fit(filtered_model_of_mass, obs_thetas, obs_profile, p0=2., bounds=[0.5, 5.], sigma=sigma)
return popt[0]
def fit_mass_to_peak(zdist, peakkappa, stdev=None):
partialpeak = partial(lensingModel.filtered_model_center, zdist)
popt, pcov = curve_fit(partialpeak, [0], [peakkappa], p0=[6e12])
return popt[0]
# use stacks of noise maps to
def annuli_errs(binsize=12, reso=1.5, maxtheta=180):
names = sorted(glob.glob('stacks/noise_stacks/map*'))
noise_profiles = []
for i in range(len(names)):
noisemap = np.load(names[i], allow_pickle=True)
noise_profiles.append(measure_profile(noisemap, binsize, reso, maxtheta=maxtheta))
return np.std(noise_profiles, axis=0)
def covariance_matrix(color, mode, binsize=12, reso=1.5, maxtheta=180):
if mode == 'bootstrap':
names = sorted(glob.glob('stacks/bootstacks/*_%s.npy' % color))
elif mode == 'noise':
names = sorted(glob.glob('stacks/noise_stacks/*_%s.npy' % color))
elif mode == 'random':
names = sorted(glob.glob('stacks/random_stacks/*_%s.npy' % color))
noise_profiles = []
for i in range(len(names)):
noisemap = np.load(names[i], allow_pickle=True)
noise_profiles.append(measure_profile(noisemap, binsize, reso, maxtheta=maxtheta))
noise_profiles = np.array(noise_profiles)
avg_profile = np.mean(noise_profiles, axis=0)
n_bins = len(noise_profiles[0])
n_realizations = len(noise_profiles)
c_ij = np.zeros((n_bins, n_bins))
for i in range(n_bins):
for j in range(n_bins):
k_i = noise_profiles[:,i]
k_i_bar = avg_profile[i]
k_j = noise_profiles[:, j]
k_j_bar = avg_profile[j]
product = (k_i - k_i_bar) * (k_j - k_j_bar)
sum = np.sum(product)
c_ij[i, j] = 1/(n_realizations - 1) * sum
plotting.plot_cov_matrix(color, c_ij)
return c_ij
def model_variance_weights(zdist, binsize=12, reso=1.5, imsize=240, maxtheta=180):
masses = np.logspace(11.5, 13.5, 21)
models = []
for mass in masses:
models.append(lensingModel.filtered_model_in_bins(zdist, 1, mass, binsize=binsize, reso=reso, imsize=imsize, maxtheta=maxtheta))
return np.std(models, axis=0)
"""def likelihood_for_mass(observed_profile, covar_mat, zdist, obs_thetas, m_per_h):
filtered_model_of_mass = lensingModel.filtered_model_in_bins(zdist, obs_thetas, m_per_h)
residual = observed_profile - filtered_model_of_mass
residual = residual[:, np.newaxis]
chi_square = np.dot(residual.T, np.dot(np.linalg.inv(covar_mat), residual))[0][0]
return 0.5 * chi_square
def fit_mc(color, samplename, plot=False, binsize=12, reso=1.5, mode='noise'):
# read in the stack of lensing convergence at positions of quasars
stacked_map = np.load('stacks/%s_%s_stack.npy' % (samplename, color), allow_pickle=True)
maxtheta = int(len(stacked_map) / 2 * reso)
# measure the profile of the stack using annular bins
kap_profile = measure_profile(stacked_map, binsize, reso=reso)
# calculate redshift distribution dn/dz
zdist = fits.open('catalogs/derived/%s_%s.fits' % (samplename, color))[1].data['Z']
covar = covariance_matrix(color, mode, binsize, reso)
obs_thetas = np.arange(binsize / 2, maxtheta, binsize)
partiallike = partial(likelihood_for_mass, kap_profile, covar, zdist, obs_thetas)
mss = np.logspace(12.5, 13, 10)
for m in mss:
print(partiallike(m))
#likelihood = log_likelihood(kap_profile, filtered_model_of_mass, covar)
#result = minimize(partiallike, )
#print(result)"""
def fit_mass_or_bias_suite(color, samplename, plot, binsize=12, reso=1.5, mode='noise', mass_or_bias_mode='mass'):
# read in the stack of lensing convergence at positions of quasars
stacked_map = np.load('stacks/%s_%s_stack.npy' % (samplename, color), allow_pickle=True)
maxtheta = int(len(stacked_map) / 2 * reso)
maxtheta = 180
# measure the profile of the stack using annular bins
kap_profile = measure_profile(stacked_map, binsize, reso=reso, maxtheta=maxtheta)
# calculate redshift distribution dn/dz
zdist = fits.open('catalogs/derived/%s_%s.fits' % (samplename, color))[1].data['Z']
# estimate uncertainty in each annulus for model fitting
err_profile = annuli_errs(color, 'noise', binsize, reso, maxtheta=maxtheta)
print(err_profile)
mod_weights = model_variance_weights(zdist, binsize=binsize, reso=reso, maxtheta=maxtheta)
err_profile = err_profile / mod_weights
print(err_profile)
#print(err_profile)
#err_profile = covariance_matrix(color, 'noise', binsize=binsize, reso=reso, maxtheta=maxtheta)
#print(err_profile)
#print(np.sqrt(np.diagonal(err_profile)))
#print(err_profile)
# fit entire profile, not just the peak convergence
#if use_peak == False:
# fit whole profile
avg_mass_or_bias = fit_best_bias_or_mass(kap_profile, zdist, binsize=binsize, sigma=err_profile, maxtheta=maxtheta,
mode=mass_or_bias_mode)
print(avg_mass_or_bias)
# generate a noiseless stack using the best fit model
if mass_or_bias_mode == 'mass':
modelmap = lensingModel.model_stacked_map(zdist, 10**avg_mass_or_bias, imsize=len(stacked_map), reso=reso)
p0 = 10**avg_mass_or_bias
elif mass_or_bias_mode == 'bias':
modelmap = lensingModel.model_stacked_map(zdist, avg_mass_or_bias, imsize=len(stacked_map), reso=reso,
mode='bias')
p0 = avg_mass_or_bias
else:
return
# to estimate uncertainty on mass, add noise map to the noiseless model and refit many times
masses_or_biases = []
profiles = []
if mode == 'bootstrap':
bootnames = sorted(glob.glob('stacks/bootstacks/*_%s.npy' % color))
#for i in range(len(bootnames)):
for i in range(len(bootnames)):
bootmap = np.load(bootnames[i], allow_pickle=True)
bootprofile = measure_profile(bootmap, binsize, reso=reso, maxtheta=maxtheta)
profiles.append(bootprofile)
bootmass_or_bias = fit_best_bias_or_mass(bootprofile, zdist, p0=p0, binsize=binsize, sigma=err_profile,
maxtheta=maxtheta, mode=mass_or_bias_mode)
print(bootmass_or_bias)
masses_or_biases.append(bootmass_or_bias)
elif mode == 'noise':
noisestacknames = sorted(glob.glob('stacks/noise_stacks/*_%s.npy' % color))
#for i in range(len(noisestacknames)):
for i in range(10):
noisemap = np.load(noisestacknames[i], allow_pickle=True)
model_plus_noise = modelmap + noisemap
noisyprofile = measure_profile(model_plus_noise, binsize, reso=reso, maxtheta=maxtheta)
profiles.append(noisyprofile)
noisymass = fit_best_bias_or_mass(noisyprofile, zdist, p0=p0, binsize=binsize, sigma=err_profile,
maxtheta=maxtheta, mode=mass_or_bias_mode)
print(noisymass)
masses_or_biases.append(noisymass)
elif mode == 'random':
randnames = sorted(glob.glob('stacks/random_stacks/*_%s.npy' % color))
for i in range(len(randnames)):
randmap = np.load(randnames[i], allow_pickle=True)
model_plus_noise = modelmap + randmap
noisyprofile = measure_profile(model_plus_noise, binsize, reso=reso)
profiles.append(noisyprofile)
noisymass = fit_best_bias_or_mass(noisyprofile, zdist, p0=p0, binsize=binsize, sigma=err_profile,
maxtheta=maxtheta, mode=mass_or_bias_mode)
print(noisymass)
masses_or_biases.append(noisymass)
masses_or_biases = np.array(masses_or_biases)
if mass_or_bias_mode == 'mass':
masses = masses_or_biases
highermasses = masses[np.where(masses > avg_mass_or_bias)]
higher_std = np.sqrt(1/(len(highermasses)-1)*np.sum(np.square(highermasses - avg_mass_or_bias)))
lowermasses = masses[np.where(masses < avg_mass_or_bias)]
lower_std = np.sqrt(1/(len(lowermasses)-1)*np.sum(np.square(lowermasses - avg_mass_or_bias)))
np.array([avg_mass_or_bias, higher_std, lower_std]).dump('masses/%s_%s_mass.npy' % (samplename, color))
elif mass_or_bias_mode == 'bias':
bias_std = np.std(masses_or_biases)
np.array([avg_mass_or_bias, bias_std]).dump('bias/%s/%s.npy' % (samplename, color))
"""kap_errs = np.std(profiles, axis=0)
if plot:
obs_theta = np.arange(binsize / 2, maxtheta, binsize)
theta_range = np.arange(0.5, maxtheta, 0.5)
best_mass_profile = lensingModel.filtered_model_at_theta(zdist, 10 ** (avg_mass), theta_range)
# lowest_mass_profile = filter_model(zdist, theta_range, 10**(lowmass))
binned_model = lensingModel.filtered_model_in_bins(zdist, obs_theta, 10 ** (avg_mass), binsize, reso, maxtheta=maxtheta)
theta_range = np.arange(0.5, maxtheta, 0.5) * u.arcmin.to('rad')
oneterm = lensingModel.int_kappa(theta_range, 10**avg_mass, 'one', zdist)
twoterm = lensingModel.int_kappa(theta_range, 10**avg_mass, 'two', zdist)
plotting.plot_kappa_profile(color, kap_profile, kap_errs, binsize, maxtheta, best_mass_profile, binned_model, oneterm, twoterm)
else:
if do_stack:
cat = fits.open('catalogs/derived/%s_%s.fits' % (samplename, color))[1].data
ras, decs = cat['RA'], cat['DEC']
planck_map = hp.read_map('maps/smoothed_masked_planck.fits', dtype=np.single)
peak_k, background_std = stacking.fast_stack(ras, decs, planck_map, iterations=1000)
else:
background_avg, background_med, background_std = sigma_clipped_stats(stacked_map)
peak_k = np.max(stacked_map)
print(peak_k, background_std)
avg_mass, higher_std, lower_std = fit_mass_to_peak(zdist, peak_k, stdev=background_std)"""
def fit_mass_to_cutouts(sample_id, color, binsize=12, nbootstraps=0):
# read in the stack of lensing convergence at positions of quasars
stacked_map = stacking.stack_suite(color, sample_id, True, False, mode='cutout')
print(np.max(stacked_map))
kap_profile = measure_profile(stacked_map, binsize, reso=1.5)
# calculate redshift distribution dn/dz
zdist = fits.open('catalogs/derived/%s_%s.fits' % (sample_id, color))[1].data['Z']
avgmass = fit_best_mass(kap_profile, zdist, binsize=binsize, maxtheta=180)
print(avgmass)
if nbootstraps > 0:
mass_bootstraps = []
for j in range(nbootstraps):
bootmap = stacking.stack_suite(color, sample_id, True, False, mode='cutout', bootstrap=True)
bootprofile = measure_profile(bootmap, binsize, reso=1.5)
mass_bootstraps.append(fit_best_mass(bootprofile, zdist, binsize=binsize, maxtheta=180))
mass_errs = np.std(mass_bootstraps, axis=0)
np.array([avgmass, mass_errs, mass_errs]).dump('masses/%s_%s_mass.npy' % (sample_id, color))
# !!!!! need to figure out weight for each bin for optimal fit
def gaussian(x, a1, b1, s1):
gauss = a1 * np.exp(-np.square(x - b1) / (2 * (s1 ** 2)))
return gauss
def fit_gauss_hist_one_sided(data, xs, nbins):
histnp = np.histogram(data, bins=nbins)
histvals = histnp[0]
histbins = histnp[1]
histbins2 = histbins[:len(histbins) - 1]
lefthist = histvals[:histvals.argmax() + int(nbins/100)]
leftbins = histbins[:histvals.argmax() + int(nbins/100)]
maxval = np.max(histvals)
peakcolor = histbins[histvals.argmax()]
popt, pcov = curve_fit(gaussian, leftbins, lefthist, p0=[maxval, peakcolor, 0.1])
return popt |
import RPi.GPIO as GPIO
class Button:
def __init__(self, pin, cmd, noop):
self.pin = pin
self.cmd = cmd
self.noop = noop
self.state = False
def setup(self):
GPIO.setup(self.pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def get_command(self):
input_state = GPIO.input(self.pin)
if input_state == GPIO.LOW:
if not self.state:
self.state = True
return self.cmd
else:
return self.noop
else:
self.state = False
return self.noop
|
# -*- coding:utf-8 -*-
"""
@Time: 2019/09/27 10:51
@Author: Shanshan Wang
@Version: Python 3.7
@Function:
"""
import numpy as np
import torch
import random
from torch.autograd import Variable
a=[[1,2,3],[4,5,6],[7,8,9]]
a=[torch.Tensor(row) for row in a]
print('a:',a)
a=[row.data.numpy() for row in a]
print('aa:',a)
action=Variable(torch.LongTensor(a))
print(action) |
# needs for implementing tests
import unittest
# needs for webdriver
from selenium import webdriver
# needs for typing in input
from selenium.webdriver.common.keys import Keys
# class that has used the unittest and declares it as TestCase
class PythonOrgSearch(unittest.TestCase):
# always run first
def setUp(self):
self.driver = webdriver.Chrome("drivers/chromedriver")
# runs and must always starts with the word test
def test_search_in_python(self):
driver = self.driver
driver.get("http://www.python.org")
self.assertIn("Python", driver.title)
elem = driver.find_element_by_name("q")
elem.send_keys("pycon")
elem.send_keys(Keys.RETURN)
assert "No results found." not in driver.page_source
# gets called after every test method
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
# used to run test suite
unittest.main() |
nombres = ["juan", "alejandro", "maria"]
print(type(nombres), type(nombres[0]))
print(nombres[0])
apellidos = ("diaz", "daza", "medina")
print(type(apellidos), type(apellidos[0]))
print(apellidos[0])
nombres[1] = "mario"
apellidos = (apellidos[0], apellidos[1], "salgado")
nombres.append("jose")
print(nombres)
print(apellidos)
x = 1, 2, 4
print(x, type(x))
y, w = 2, 4
print(y)
print(w) |
from funcs import markov_chain
from random import random
from time import time
cdf = None
def get_symbol(table):
rnd = random()
for key in table:
if rnd < table[key]:
return key
def round_table(table, generated):
if not generated:
return table
else:
key = generated[0]
if key in table:
return round_table(table[key], generated[1:])
else:
return
def generate_symbol(generated, index):
global cdf
while True:
answer = round_table(cdf[index], generated)
if answer:
return get_symbol(answer)
else:
generated = generated[1:]
index -= 1
def generator(n):
global cdf
depth = len(cdf)
string = ''
for i in range(1, n+1):
index = i if i <= depth else depth
string += generate_symbol(string[-index+1:], index)
return string
def main():
global cdf
_, _, cdf = markov_chain(r'.\src', 8)
while True:
print(generator(100))
if input():
break
if __name__ == '__main__':
main()
|
from gtts import gTTS
from playsound import playsound
import datetime
import webbrowser
import wikipedia
import json, codecs, apiai
import speech_recognition as sr
def speak(mytext):
myobj = gTTS(text=mytext, lang='hi', slow=False)
myobj.save("groot.mp3")
playsound("groot.mp3")
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour >=0 and hour < 12:
mytext = "Good Morning!"
elif hour >= 12 and hour < 18:
mytext = "Good Afternoon!"
else:
mytext = "Good Evening!"
mytext = mytext + (" I am Groot, version 1.0, How may I help you?")
speak(mytext)
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
r.adjust_for_ambient_noise(source)
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recoginzing...")
user_query = r.recognize_google(audio, language = 'en-in')
print(f"User said: {user_query}\n")
except Exception as e:
print(e)
print("Say that again please...")
return "None"
return user_query
if __name__ == "__main__":
wishMe()
CLIENT_ACCESS_TOKEN = "7f790c9c5d11467493162773c9196204"
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
groot = ai.text_request()
groot.lang = 'en '
groot.session_id = "<SESSION ID, UNIQUE FOR EACH USER>"
while True:
user_query = takeCommand().lower()
if 'what is' in user_query:
print("Searching wikipedia...")
user_query = user_query.replace("what is", "")
results = wikipedia.summary(user_query, sentences=2)
speak("According to Wikipedi...")
# speak("Baba Bole...")
print(results)
speak(results)
elif 'open youtube' in user_query:
print("Opening YouTube...")
webbrowser.open_new_tab("https://www.youtube.com/")
elif 'open github' in user_query:
print("Opening GitHub...")
webbrowser.open_new_tab("https://github.com/")
elif 'open google' in user_query:
print("Opening Google...")
webbrowser.open_new_tab("https://www.google.com/")
elif 'the time' in user_query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
else:
groot.query = user_query
print(groot.query)
response = groot.getresponse()
obj = json.load(response)
reply = obj['result']['fulfillment']['speech']
speak(reply)
|
# coding=utf-8
import numpy as np
import matplotlib.pyplot as mp
from mpl_toolkits.mplot3d import axes3d
n=500
x=np.random.normal(0,1,n)
y=np.random.normal(0,1,n)
z=np.random.normal(0,1,n)
mp.figure('3D Scatter')
ax3d = mp.gca(projection='3d')
ax3d.set_xlabel('x',fontsize=14)
ax3d.set_ylabel('y',fontsize=14)
ax3d.set_zlabel('z',fontsize=14)
mp.tick_params(labelsize=10)
d=x**2+y**2+z**2
ax3d.scatter(x,y,z,s=30,c=d,alpha=0.7,cmap='jet_r')
mp.show() |
from __future__ import annotations
from collections import defaultdict
from datetime import date, datetime, timedelta, timezone
from typing import Dict, List
import pytest
from barkylib import bootstrap
from barkylib.domain import commands
from barkylib.services import handlers, unit_of_work
from barkylib.adapters import repository
from barkylib.adapters.orm import start_mappers
from barkylib.services.unit_of_work import FakeUnitOfWork
def boostrap_test_app():
return bootstrap.bootstrap(start_orm=False, uow=FakeUnitOfWork())
class TestAddBookmark:
def test_add_single_bookmark(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 3, 31, 0, 0, 0, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.AddBookmarkCommand(
0,
f"Test", # title
f"http://example.com", # url
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
assert bus.uow.bookmarks.get_by_title(f"Test") is not None
assert bus.uow.committed
def test_get_bookmark_by_id(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 3, 31, 0, 0, 0, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.AddBookmarkCommand(
99,
f"Test", # title
f"http://example.com", # url
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
assert bus.uow.bookmarks.get_by_id(99) is not None
assert bus.uow.committed
def test_get_bookmark_by_url(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 3, 31, 0, 0, 0, 0, tzinfo=timezone.utc)
# add one
bus.handle(
commands.AddBookmarkCommand(
99,
f"Test", # title
f"http://example.com", # url
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
assert bus.uow.bookmarks.get_by_url(f"http://example.com") is not None
assert bus.uow.committed
def test_get_all_bookmarks(self):
bus = boostrap_test_app()
nu: datetime = datetime(2021, 3, 31, 0, 0, 0, 0, tzinfo=timezone.utc)
bus.handle(
commands.AddBookmarkCommand(
99,
f"Test", # title
f"http://example.com", # url
nu.isoformat(), # date added
nu.isoformat(), # date edited
)
)
nuto = nu + timedelta(days = 2, hours=12)
bus.handle(
commands.AddBookmarkCommand(
999,
f"Test2", # title
f"http://example.com", # url
nuto.isoformat(), # date added
nuto.isoformat(), # date edited
)
)
records = bus.uow.bookmarks.get_all()
assert len(records) == 2 |
import random as r
import pygame as pg
import collections
from pygame_ops import pygame_fix
from rich.console import Console
from recursive_backtracker import draw_points
c = Console()
def available_vertices(width, game_display, multiplier):
return_vertices = set()
edges = {}
for i in range(0, width * 2, 2):
for j in range(0, width * 2, 2):
return_vertices.add((i, j))
if i < width * 2 - 2:
edges[r.randint(1, 9999)] = (i + 1, j)
if j < width * 2 - 2:
edges[r.randint(1, 9999)] = (i, j + 1)
pygame_fix()
return list(sorted(return_vertices)), [x[1] for x in sorted(edges.items())]
def connected_vertices(available_v, edge):
if (edge[0] - 1, edge[1]) in available_v and (edge[0] + 1, edge[1]) in available_v:
return [(edge[0] - 1, edge[1]), (edge[0] + 1, edge[1])]
else:
return [(edge[0], edge[1] + 1), (edge[0], edge[1] - 1)]
def adjacent_edges(available_v, V, edges, maze):
return_values = []
for vertex in V:
possible_edges = [(vertex[0] + 1, vertex[1]),
(vertex[0] - 1, vertex[1]),
(vertex[0], vertex[1] + 1),
(vertex[0], vertex[1] - 1)]
for edge in possible_edges
def generate_maze(width, game_display, colour, multiplier=1):
vertices, edges = available_vertices(width, game_display, multiplier)
maze = []
V = [r.randint(0, len(vertices))]
while True:
pygame_fix()
return maze
|
# Generated by Django 3.1.5 on 2021-05-29 01:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
('department', models.CharField(blank=True, max_length=100, null=True)),
('title', models.CharField(blank=True, max_length=100, null=True)),
],
options={
'verbose_name_plural': 'Προφίλ Χρηστών',
},
),
migrations.CreateModel(
name='Student',
fields=[
('profile', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='accounts.profile')),
('postgrad_program', models.CharField(choices=[('Κατεύθυνση 1η: Τεχνολογίες και Εφαρμογές Ιστού', 'Κατεύθυνση 1η: Τεχνολογίες και Εφαρμογές Ιστού'), ('Κατεύθυνση 2η: Διαχείριση Δικτύων Επικοινωνιών και Υπηρεσιών Επόμενης Γενιάς', 'Κατεύθυνση 2η: Διαχείριση Δικτύων Επικοινωνιών και Υπηρεσιών Επόμενης Γενιάς'), ('Κατεύθυνση 3η: Πληροφοριακά Συστήματα στη Διοίκηση Επιχειρήσεων', 'Κατεύθυνση 3η: Πληροφοριακά Συστήματα στη Διοίκηση Επιχειρήσεων')], max_length=80, null=True)),
('program_duration', models.CharField(choices=[('Πλήρης Φοίτηση', 'Πλήρης Φοίτηση'), ('Μερική Φοίτηση', 'Μερική Φοίτηση')], max_length=14, null=True)),
],
options={
'verbose_name': 'Φοιτητής',
'verbose_name_plural': 'Φοιτητές',
},
),
]
|
n = 10 # 12
def solution(n):
answer=''
while True:
if n%3 == 0 :
answer ='4'+answer
n=n//3 -1
elif n%3 == 1:
answer = '1' +answer
n=n//3
else :
answer = '2' +answer
n=n//3
if n == 0 : break
return answer
print(solution(n))
#6점 16분
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import spacy
import matplotlib.pyplot as plt
EN = spacy.load('en_core_web_sm')
from sklearn.preprocessing import MultiLabelBinarizer
import fasttext
data = pd.read_csv('Preprocessed_data.csv')
# Make a dict having tag frequencies
data.tags = data.tags.apply(lambda x: x.split('|'))
tag_freq_dict = {}
for tags in data.tags:
for tag in tags:
if tag not in tag_freq_dict:
tag_freq_dict[tag] = 0
else:
tag_freq_dict[tag] += 1
# Get most common tags
tags_to_use = 600
tag_freq_dict_sorted = sorted(tag_freq_dict.items(), key=lambda x: x[1], reverse=True)
final_tags = tag_freq_dict_sorted[:tags_to_use]
for i in range(len(final_tags)):
final_tags[i] = final_tags[i][0]
# Change tag data to only for final_tags
final_tag_data = []
X = []
for i in range(0, len(data)):
temp = []
for tag in data.iloc[i].tags:
if tag in final_tags:
temp.append(tag)
if(temp != []):
final_tag_data.append(temp)
X.append(data.iloc[i].processed_title)
tag_encoder = MultiLabelBinarizer()
tags_encoded = tag_encoder.fit_transform(final_tag_data)
# Load pre-trained embeddings
fasttext_model = fasttext.load_model('embeddings.bin')
import gensim
# WORD2VEC
W2V_SIZE = 300
W2V_WINDOW = 7
W2V_EPOCH = 32
W2V_MIN_COUNT = 10
w2v_model = gensim.models.word2vec.Word2Vec.load('SO_word2vec_embeddings.bin')
#Model Training
#Split into train and test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(np.array(X), tags_encoded, test_size=0.2, random_state=42)
print("TRAIN size:", len(X_train))
print("TEST size:", len(X_test))
print("Y _ TEST:", y_train[1])
#Tokenizing
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, Dropout
from keras.utils.np_utils import to_categorical
#Max number of words in each complaint.
MAX_SEQUENCE_LENGTH = 300
#This is fixed.
EMBEDDING_DIM = 300
tokenizer = Tokenizer()
tokenizer.fit_on_texts(data.post_corpus)
word_index = tokenizer.word_index
vocab_size = len(word_index)
print('Found %s unique tokens.' % len(word_index))
# saving
import pickle
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
# loading tokenizer
import pickle
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
word_index = tokenizer.word_index
vocab_size = len(word_index)
print('Found %s unique tokens.' % len(word_index))
# Convert the data to padded sequences
X_train_padded = tokenizer.texts_to_sequences(X_train)
X_train_padded = pad_sequences(X_train_padded, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X_train_padded.shape)
# Embedding matrix for the embedding layer
embedding_matrix = np.zeros((vocab_size+1, 300))
for word, i in tokenizer.word_index.items():
if word in w2v_model.wv:
embedding_matrix[i] = w2v_model.wv[word]
print(embedding_matrix.shape)
import keras.backend as K
# Custom loss function to handle multilabel classification task
def multitask_loss(y_true, y_pred):
# Avoid divide by 0
y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
# Multi-task loss
return K.mean(K.sum(- y_true * K.log(y_pred) - (1 - y_true) * K.log(1 - y_pred), axis=1))
# Build Model
import keras
model = Sequential()
model.add(Embedding(vocab_size+1, 300, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False))
model.add(LSTM(600, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(units = 10000, kernel_initializer = 'glorot_uniform', activation = 'relu'))
model.add(Dropout(0.35))
model.add(Dense(units = 1150, kernel_initializer = 'glorot_uniform',
activation = 'relu'
)
)
model.add(Dropout(0.25))
model.add(Dense(units = 750,
kernel_initializer = 'glorot_uniform',
activation = 'relu'
)
)
model.add(Dense(600, activation='sigmoid'))
model.summary()
model.compile(loss=multitask_loss,
optimizer="adam")
# Train Model
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
callbacks = [ ReduceLROnPlateau(monitor='val_loss', patience=5, cooldown=0),
EarlyStopping(monitor='val_acc', min_delta=1e-4, patience=5)]
BATCH_SIZE = 256
history = model.fit(X_train_padded, y_train,
batch_size=BATCH_SIZE,
epochs=8,
validation_split=0.1,
verbose=1,
callbacks=callbacks)
# Save model
model.save('Tag_predictor.h5')
# Helper function to save the training history for plotting purposes
import json,codecs
import numpy as np
def saveHist(path,history):
new_hist = {}
for key in list(history.history.keys()):
if type(history.history[key]) == np.ndarray:
new_hist[key] == history.history[key].tolist()
elif type(history.history[key]) == list:
if type(history.history[key][0]) == np.float64:
new_hist[key] = list(map(float, history.history[key]))
print(new_hist)
with codecs.open(path, 'w', encoding='utf-8') as f:
json.dump(new_hist, f, separators=(',', ':'), sort_keys=True, indent=4)
def loadHist(path):
with codecs.open(path, 'r', encoding='utf-8') as f:
n = json.loads(f.read())
return n
from keras.models import load_model
import keras.losses
keras.losses.multitask_loss = multitask_loss
model = load_model('Tag_predictor.h5')
#saveHist('./train_history.json', history)
history = loadHist('train_history.json')
# Evaluation
import matplotlib.pyplot as plt
X_test_padded = tokenizer.texts_to_sequences(X_test)
X_test_padded = pad_sequences(X_test_padded, maxlen=MAX_SEQUENCE_LENGTH)
score = model.evaluate(X_test_padded, y_test, batch_size=512)
print("LOSS:",score)
loss = history['loss']
val_loss = history['val_loss']
epochs = range(len(loss))
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.savefig('evaluation.png')
# A wrapper function in order to predict the tags for any given input
def predict_tags(text):
# Tokenize text
x_test = pad_sequences(tokenizer.texts_to_sequences([text]), maxlen=MAX_SEQUENCE_LENGTH)
# Predict
prediction = model.predict([x_test])[0]
for i,value in enumerate(prediction):
if value > 0.5:
prediction[i] = 1
else:
prediction[i] = 0
tags = tag_encoder.inverse_transform(np.array([prediction]))
return tags
test_idx = np.random.randint(len(X_test), size=5)
for idx in test_idx:
test_case = idx
print('Test Case: ' + str(X_test[test_case]))
print('-'*100)
print('Predicted: ' + str(predict_tags(X_test[test_case])))
print('Ground Truth: ' + str(tag_encoder.inverse_transform(np.array([y_test[test_case]]))))
print('\n')
|
# Untitled - By: Javier - Mon Jul 27 2020
import sensor, image, time, utime
from pyb import LED
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE) # or GRAYSCALE...
sensor.set_framesize(sensor.VGA) # or QQVGA...
sensor.set_windowing((640, 80))
sensor.skip_frames(time = 2000)
#clock = time.clock()
FPS=10
red_led=LED(1); # Uso los leds para ver si el programa corre en tiempo real
green_led=LED(2);
blue_led=LED(3);
while(True):
#clock.tick()
start = utime.ticks_ms()
img = sensor.snapshot()
#img = sensor.snapshot().cartoon(seed_threshold=0.1, floating_thresholds=0.05) #Pongo la función de cartoon para que consuma un poco de procesamiento
t_elapsed = utime.ticks_diff(utime.ticks_ms(), start)
if t_elapsed/1000 > (1/FPS): #Si la tarea tardó más de 1/FPS se pone en rojo
green_led.off()
red_led.on()
else: #Sino en verde
green_led.on()
red_led.off()
#print(clock.fps(), t_elapsed, 1/FPS )
#https://forums.openmv.io/viewtopic.php?t=689 Para correr el programa sin pasar info a la pc
|
#!/usr/bin/env python
# coding=utf-8
#================================================================
# Copyright (C) 2020 Fisher. All rights reserved.
#
# 文件名称:DIEN.py
# 创 建 者:YuLianghua
# 创建日期:2020年01月18日
# 描 述:
#
#================================================================
from tensorflow as tf
from rnn import dynamic_rnn
from .dien_ops import VecAttGRUCell
from .dien_ops import prelu, dice
class DIEN(object):
def __init__(self, **kwargs):
self.user_nums = kwargs.get("user_nums")
self.item_nums = kwargs.get("item_nums")
self.cat_nums = kwargs.get("cat_nums")
self.embeding_size = kwargs.get("embedding_size")
self.hidden_size = kwargs.get("hidden_size")
self.use_negsampling = kwargs.get("use_negsampling")
self.keep_prob = kwargs.get("keep_prob")
self.learning_rate = kwargs.get("learning_rate")
self.attention_size= kwargs.get("attention_size")
def embedding(self):
with tf.variable_scope("embedding_layer"):
# user
self.user_embedding_table = tf.get_variable('user_embedding_table',
[self.user_nums, self.embeding_size])
self.user_embeddings = tf.nn.embedding_lookup(self.user_embedding_table, self.users)
# item
self.item_embedding_table = tf.get_variable("item_embedding_table",
[self.item_nums, self.embeding_size])
self.item_embeddings = tf.nn.embedding_lookup(self.item_embedding_table, self.target_x)
self.hist_item_embeddings = tf.nn.embedding_lookup(self.item_embedding_table, self.history_x)
if self.use_negsampling:
self.neg_item_embeddings = tf.nn.embedding_lookup(self.item_embedding_table, self.neg_x)
# cat
self.cat_embedding_table = tf.get_variable("cat_embedding_table",
[self.cat_nums, self.embeding_size])
self.cat_embeddings = tf.nn.embedding_lookup(self.cat_embedding_table, self.target_cat)
self.hist_cat_embeddings = tf.nn.embedding_lookup(self.cat_embedding_table, self.history_cat)
if self.use_negsampling:
self.neg_cat_embeddings = tf.nn.embedding_lookup(self.cat_embedding_table, self.neg_cat)
self.x_embedding = tf.concat([self.item_embeddings, self.cat_embeddings], axis=1)
self.hist_x_embedding = tf.concat([self.hist_item_embeddings, self.hist_cat_embeddings], axis=2)
if self.use_negsampling:
# 负采样的item选第一个
self.neg_x_embedding = tf.concat(
[self.neg_item_embeddings[:, :, 0, :], self.neg_cat_embeddings[:, :, 0, :]], axis=-1
)
self.neg_x_embedding = tf.reshape(self.neg_x_embedding,
[-1, self.neg_item_embeddings.shape[1], self.embeding_size*2])
self.neg_hist_x_embedding = tf.concat([self.neg_item_embeddings, self.neg_cat_embeddings], -1)
def auxiliary_loss(self, h_states, click_seq, no_click_seq, mask, stag=None):
mask = tf.cast(mask, tf.float32)
click_input = tf.concat([h_states, click_seq], -1)
noclick_input = tf.concat([h_states, no_click_seq], -1)
click_prop_ = self.auxiliary_net(click_input, stag=stag)[:,:,0]
noclick_prop_ = self.auxiliary_net(noclick_input, stag=stag)[:,:,0]
click_loss_ = -tf.reshape(tf.log(click_prop_), [-1, click_seq.shape[1]]) * mask
noclick_loss_ = -tf.reshape(tf.log(1.0 - noclick_prop_), [-1, noclick_input.shape[1]]) * mask
loss_ tf.reduce_mean(click_loss_ + noclick_loss_)
return loss_
def auxiliary_net(self, input, stag="auxiliary_net"):
bn1 = tf.layers.batch_normalization(inputs=input, name="bn1"+stag, reuse=tf.AUTO_REUSE)
dnn1 = tf.layers.dense(bn1, 100, activation=tf.nn.sigmoid, name="f1"+stag, reuse=tf.AUTO_REUSE)
dnn1 = tf.nn.dropout(dnn1, keep_prob=self.keep_prob)
dnn2 = tf.layers.dense(dnn1, 50, activation=tf.nn.sigmoid, name="f2"+stag, reuse=tf.AUTO_REUSE)
dnn2 = tf.nn.dropout(dnn2, keep_prob=self.keep_prob)
dnn3 = tf.layers.dense(dnn2, 2, activation=None, name="f3"+stag, reuse=tf.AUTO_REUSE)
y_hat= tf.nn.softmax(dnn3) + 0.00000001
return y_hat
def fcn_attention(self, query, facts, attention_size, mask,
stag="null", mode="SUM", softmax_stag=1, time_major=False,
return_alphas=False, for_cnn=False):
if isinstance(facts, tuple):
# bi-rnn
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 2)
if time_major:
# (T, B, D) -> (B, T, D)
facts = tf.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # hidden size for rnn layer
query = tf.layers.dense(query, facts_size, activation=tf.nn.relu, name="f1"+stag)
query = tf.nn.dropout(query, keep_prob=self.keep_prob)
query = tf.expand_dims(query, 1)
queries = tf.tile(query, [1, tf.shape(facts)[1], 1]) # Batch * Time * Hidden size
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1) # Batch * Time * (4 * Hidden size)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag) # Batch * Time * 1
d_layer_3_all = tf.reshape(d_layer_3_all,[-1,1,tf.shape(facts)[1]]) # Batch * 1 * time
scores = d_layer_3_all
key_masks = tf.expand_dims(mask, 1) # Batch * 1 * Time
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not for_cnn:
scores = tf.where(key_masks, scores, paddings)
if softmax_stag:
scores = tf.nn.softmax(scores)
if mode=="SUM":
output = tf.matmul(scores, facts)
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]]) # Batch * Time
output = facts * tf.expand_dims(scores, -1) # Batch * Time * Hidden size
output = tf.reshape(output,tf.shape(facts))
if return_alphas:
return output, scores
else:
return output
def __call__(self):
# init placeholder
## 用户历史 item id
self.history_x = tf.placeholder(dtype=tf.int32, [None, None], name="history_items") ## 用户历史 item 应的cate id list self.history_cat = tf.placeholder(dtype=tf.int32, [None, None], name="history_cats")
## 用户id
self.users = tf.placeholder(dtype=tf.int32, [None,], name="user_ids")
## target的item id
self.target_x = tf.placeholder(dtype=tf.int32, [None,], name="target_x")
## target item对应的cate id
self.target_cat=tf.placeholder(dtype=tf.int32, [None,], name="target_cat")
## 历史行为的mask
self.mask = tf.placeholder(dtype=tf.float32, [None, None], name="mask")
## 历史行为的长度
self.seq_len = tf.placeholder(dtype=tf.int32, [None], name="seq_len")
## 目标值
self.target = tf.placeholder(dtype=tf.float32, [None, None], name="target")
if self.use_negsampling:
## 负采样数据 batch * seq_len * 采样数量
self.neg_x = tf.placeholder(dtype=tf.int32, [None, None, None], name="neg_items")
self.neg_cat= tf.placeholder(dtype=tf.int32, [None, None, None], name="neg_cats")
self.embedding()
# build graph
with tf.name_scope("rnn_layer_1"):
rnn_outputs, _ = tf.nn.dynamic_rnn(tf.nn.rnn_cell.GRUCell(self.hidden_size),
inputs=self.hist_x_embedding,
sequence_length=self.seq_len,
dtype=tf.float32,
name="gru1")
# 辅助loss
self.aux_loss = self.auxiliary_loss(rnn_outputs[:,:-1,:],
self.hist_x_embedding[:,1:,:],
self.neg_x_embedding[:, 1:,:],
self.mask[:, 1:],
stag="gru")
with tf.name_scope("attention_layer_1"):
att_outputs, alphas = self.fcn_attention(self.x_embedding, rnn_outputs, self.attention_size,
self.mask, softmax_stag=1, stag="1_1", mode="LIST",
return_alphas=True)
with tf.name_scope("rnn_2"):
augru_ouputs, final_state = dynamic_rnn(VecAttGRUCell(self.hidden_size),
inputs=rnn_outputs,
att_score)
inp = tf.concat([self.user_embeddings,
self.x_embedding,
self.hist_x_embedding,
self.x_embedding*self.hist_x_embedding,
final_state], axis=1)
self.fcn_net(inp, use_dice=True)
def fcn_net(self, inp, use_dice=False):
bn1 = tf.layers.batch_normalization(inputs=inp,name='bn1')
dnn1 = tf.layers.dense(bn1,200,activation=None,name='f1')
if use_dice:
dnn1 = dice(dnn1,name='dice_1')
else:
dnn1 = prelu(dnn1,'prelu1')
dnn2 = tf.layers.dense(dnn1,80,activation=None,name='f2')
if use_dice:
dnn2 = dice(dnn2,name='dice_2')
else:
dnn2 = prelu(dnn2,name='prelu2')
dnn3 = tf.layers.dense(dnn2,2,activation=None,name='f3')
self.y_hat = tf.nn.softmax(dnn3) + 0.00000001
with tf.name_scope('Metrics'):
ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target)
self.loss = ctr_loss
if self.use_negsampling:
self.loss += self.aux_loss
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat),self.target),tf.float32))
|
print(type('HelloWorld'))
print(type(10))
print(type(10.3))
print(type(False))
print(type(1j))
print(type('10')) # todo เป็นข้อความ
print(type(10+3.5))
|
## https://leetcode.com/problems/reverse-linked-list-ii/
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
node_val = []
res = tmp_res = ListNode()
while head:
node_val.append(head.val)
head = head.next
for v in node_val[:m - 1]:
tmp_res.next = ListNode(v)
tmp_res = tmp_res.next
for i in range(n - 1, m - 2, -1):
tmp_res.next = ListNode(node_val[i])
tmp_res = tmp_res.next
for v in node_val[n:]:
tmp_res.next = ListNode(v)
tmp_res = tmp_res.next
return res.next
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def pca_variance(pca, dataframe):
dimensions = ['Dimension {}'.format(i) for i in range(1,len(pca.components_)+1)]
components = pd.DataFrame(np.round(pca.components_, 4), columns = dataframe.keys())
ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)
variance_ratios = pd.DataFrame(np.round(ratios, 4), columns = ['Explained Variance'])
variance_ratios.index = dimensions
fig, ax = plt.subplots(figsize = (14,8))
# Plot the feature weights as a function of the components
components.plot(ax = ax, kind = 'bar');
ax.set_ylabel("Feature Weights")
ax.set_xticklabels(dimensions, rotation=0)
# Display the explained variance ratios
for i, ev in enumerate(pca.explained_variance_ratio_):
ax.text(i-0.40, ax.get_ylim()[1] + 0.05, "Explained Variance\n %.4f"%(ev))
def pca_cumsum(pca):
# create an x-axis variable for each pca component
x = np.arange(1,7)
# plot the cumulative variance
plt.plot(x, np.cumsum(pca.explained_variance_ratio_), '-o', color='black')
# plot the components' variance
plt.bar(x, pca.explained_variance_ratio_, align='center', alpha=0.5)
# plot styling
plt.ylim(0, 1.05)
plt.annotate('Cumulative\nexplained\nvariance',
xy=(3.7, .88), arrowprops=dict(arrowstyle='->'), xytext=(4.5, .6))
for i,j in zip(x, np.cumsum(pca.explained_variance_ratio_)):
plt.annotate(str(j.round(4)),xy=(i+.2,j-.02))
plt.xticks(range(1,7))
plt.xlabel('PCA components')
plt.ylabel('Explained Variance')
plt.show()
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from skimage import io, color, filters, feature, restoration
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
this_file = os.path.realpath(__file__)
SCRIPT_DIRECTORY = os.path.split(this_file)[0]
ROOT_DIRECTORY = os.path.split(SCRIPT_DIRECTORY)[0]
DOCUMENTS = os.path.split(ROOT_DIRECTORY)[0]
DATA_STASH = os.path.join(DOCUMENTS, 'line_remover_2_data_stash')
GRAY_STASH = os.path.join(DATA_STASH, 'gray')
BINARY_STASH = os.path.join(DATA_STASH, 'binar')
sys.path.append(ROOT_DIRECTORY)
class imageGenerator(object):
'''
This class takes a path to an image, the directory we want to
export images to, and the number of images we want to create. With that
information, we generate new images that are our original image but skewed,
zoomed, rotated, etc. The new images are saved into our export directory.
'''
def __init__(self, image, export_path, n):
self.image = image
self.export_path = export_path
self.n = n
self.create_and_save_images()
def create_img_generator(self):
# ''' this creates the image generator object'''
self.datagen = ImageDataGenerator(
# rotation_range=60,
horizontal_flip=True,
fill_mode='nearest')
print('generator created')
def create_pictures(self):
# '''This will run through the generator created
# in the create_img_generator function to actually save the images'''
img = load_img(self.image)
x = img_to_array(img) # this is a Numpy array with shape (3, 150, 150)
x = x.reshape((1,) + x.shape) # this is a Numpy array with shape (1, 3, 150, 150)
# the .flow() command below generates batches of randomly transformed images
# and saves the results to the `preview/` directory
i = 0
for batch in self.datagen.flow(x, batch_size=1,
save_to_dir=self.export_path, save_prefix=self.image, save_format='png'):
i += 1
if i > self.n:
break # otherwise the generator would loop indefinitely
def create_and_save_images(self):
self.create_img_generator()
self.create_pictures()
if __name__ == '__main__':
print('Hey')
imageGenerator(os.path.join(GRAY_STASH, 'all/lines/509_img_190_348.png'),
os.path.join(GRAY_STASH, 'all/lines'), 200)
# imageGenerator(os.path.join(ROOT_DIRECTORY, 'drawings'),
# os.path.join(ROOT_DIRECTORY, 'generated_drawings'), 100)
|
# ! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "Miller"
# Datetime: 2019/11/20 9:09
import pymysql
conn = pymysql.Connect(host="127.0.0.1", port=3306, user="root", password="123", db="db10", autocommit=False)
cursor = conn.cursor()
cursor.executemany("insert into actor(name) values (%s)", args=("miller2", "miller3","miller4","miller5","miller6"))
cursor.close()
conn.close()
|
#SendMail.py
import smtplib
from datetime import date
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
def sendEmailFn(fromaddr, toaddr, mailPass, zipFiles, dirPath):
print('SendMail')
try:
#set test values
#fromaddr = 'fromaddr@mail.com'
#toaddr= ['mailone@mail.com', 'mailtwo@mail.com']
#mailPass='passValue'
#build mail message + attachment
message = MIMEMultipart()
message['From'] = fromaddr
message['To'] = ','.join(toaddr)
message['Subject'] = "Sabre Reports {}".format(date.today())
body = 'This is a test mail. Current date {}. Please do not respond.'.format(date.today())
message.attach(MIMEText(body, 'plain'))
for toAttach in zipFiles:
attachFile = open(toAttach, "rb")
attachedFile = MIMEBase('application', 'octet-stream')
attachedFile.set_payload((attachFile).read())
encoders.encode_base64(attachedFile)
fileName = str(toAttach).replace(dirPath,'')
attachedFile.add_header('Content-Disposition', "attachment;filename= %s" % fileName)
message.attach(attachedFile)
#connect to a mail server and send mail
server = smtplib.SMTP('mail_smtp_host_value', portValue)#replace smtp_host_value and port value for the real ones
server.starttls()
server.login(fromaddr, mailPass)
mailMessage = message.as_string()
server.sendmail(fromaddr, toaddr, mailMessage)
server.quit()
return 0
print("Mail Sended")
except Exception as e:
print('Error sending mail message ', e)
return 1 |
import sys
i = 0
phrase = "The right format"
while i + len(phrase) < 42:
print('-', end='')
i += 1
print(phrase, end='')
|
class Room:
def __init__(self, room_number, capacity, room_cost):
self.room_number = room_number
self.song_list = []
self.guests = []
self.capacity = capacity
self.room_cost = room_cost
self.total_guest_money = 0
self.guest_money = {}
def check_guest_in(self, guest, bar):
if len(self.guests) < self.capacity and guest.money >= self.room_cost:
self.guests.append(guest.name)
self.guest_money[guest.name] = guest.money
self.total_guest_money += guest.money
bar.tab += self.room_cost
return "Room full"
def check_guest_out(self, guest):
self.guests.remove(guest.name)
del self.guest_money[guest.name]
self.total_guest_money -= guest.money
def add_song_to_list(self, song):
self.song_list.append(song.title)
# def pay_bar_tab(self, bar):
# guest_remaining_money = "£" + str(bar.tab) + " bill paid for room " + str(self.room_number) + "; "
# if self.total_guest_money >= bar.tab:
# for person in self.guests:
# self.guest_money[person] = (self.guest_money[person] / self.total_guest_money) * (self.total_guest_money - bar.tab)
# self.total_guest_money -= bar.tab
# for person in self.guests:
# guest_remaining_money += person + " has £" + str(self.guest_money[person]) + " remaining. "
# return guest_remaining_money
# return "Not enough money!"
|
# -*- coding:utf-8 -*-
"""
python 数据结构 : list
@author:dell
@file: Day07_05_list.py
@time: 2020/01/08
"""
if __name__ == '__main__':
# 创建方式一
list1 = list("abcde")
# 创建方式二
list2 = [1, 2, 3]
# 遍历
for ele in list1:
print(ele)
for i in range(len(list2)):
print(list2[i])
for i, ele in enumerate(list2):
print(i, ele)
# 切片操作
print("切片操作")
print(list1[1:2])
print(list1[1:4])
print(list1[1:])
print(list1[:4])
print(list1[-3])
print(list1[::-1]) # 翻转
print(list1[:]) # 实现拷贝
# list 增
list3 = [1, 5, 3, 90, 100, 30]
list3.append(30) # 末尾添加元素
list3.insert(1, 400) # 在指定位置添加元素
list4 = [55, 66]
list3 += list4 # list 合并
# list 删除指定元素(如果该元素是重复元素, 只删除第一个)
if 30 in list3:
list3.remove(30)
print(list3)
# list 删除指定索引的元素
list3.pop(0)
print(list3)
# 清空list
list3.clear()
print(list3)
# list排序
list1 = ['orange', 'apple', 'zoo', 'internationalization', 'blueberry']
list2 = sorted(list1)
# sorted函数返回列表排序后的拷贝不会修改传入的列表
# 函数的设计就应该像sorted函数一样尽可能不产生副作用
list3 = sorted(list1, reverse=True)
# 通过key关键字参数指定根据字符串长度进行排序而不是默认的字母表顺序
list4 = sorted(list1, key=len)
print(list1)
print(list2)
print(list3)
print(list4)
# 给列表对象发出排序消息直接在列表对象上进行排序
list1.sort(reverse=True)
print(list1)
# 生成式
import sys
f = [x for x in range(1, 10)]
print(f)
f = [x + y for x in 'ABCDE' for y in '1234567']
print(f)
f = [x ** 2 for x in range(1, 1000)]
print(sys.getsizeof(f)) # 查看对象占用内存的字节数
print(f)
# 生成器(注意:list1 的类型为:generator)
def my_list():
for i in range(100):
yield i*2
list1 = my_list()
print(type(list1))
for x in list1:
print(x, end=" ")
|
N, K, L = map(int, input().split())
round = 1
while True:
if abs(K-L) == 1 and min(K,L) % 2:
break
K = (K+1)//2
L = (L+1)//2
round += 1
print(round)
'''
1 2 3 4 5 6 7 8 9 10 11
1 2 3 4 5 6
1 2 3
1 2
1
''' |
"""Django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from myDjango.views import index,chart,chart_dq,chart_pp,bj_chart,gz_chart
urlpatterns = [
path('admin/', admin.site.urls),
path('index/',index,name='index'),
path('chart/',chart,name='chart'),
path('chart_dq/', chart_dq, name='chart_dq'),
path('chart_pp', chart_pp, name='chart_pp'),
path('bj_chart', bj_chart, name='bj_chart'),
path('gz_chart', gz_chart, name='gz_chart'),
]
|
import pandas as pd
from sklearn.linear_model import LogisticRegression
train = pd.read_csv("train.csv")
print(train.head())
monthly_income_mean = train["MonthlyIncome"].mean()
train = train.fillna({"MonthlyIncome" : monthly_income_mean, "NumberOfDependents": 1})
y = train['SeriousDlqin2yrs']
print(y.head())
X = train.drop('SeriousDlqin2yrs', axis=1)
print(X.head())
model = LogisticRegression(max_iter=200).fit(X,y)
test2 = pd.read_csv("test2.csv")
monthly_income_mean_test = test2["MonthlyIncome"].mean()
test2 = test2.fillna({"MonthlyIncome" : monthly_income_mean_test, "NumberOfDependents": 1})
test2_y = test2['SeriousDlqin2yrs']
test2_X = test2.drop("SeriousDlqin2yrs", axis=1)
test2_predictions = model.predict(test2_X)
pd.DataFrame({"prediction":test2_predictions}).to_csv("test2-predictions.csv")
|
import cv2
import numpy as np
import pandas as pd
import os
import csv
from csv import writer
from csv import reader
class BloodVesselsExtract:
def extract_bv(self, image):
b, green_fundus, r = cv2.split(image)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
contrast_enhanced_green_fundus = clahe.apply(green_fundus)
# applying alternate sequential filtering (3 times closing opening)
r1 = cv2.morphologyEx(contrast_enhanced_green_fundus, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
R1 = cv2.morphologyEx(r1, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)), iterations=1)
r2 = cv2.morphologyEx(R1, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
R2 = cv2.morphologyEx(r2, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (11, 11)), iterations=1)
r3 = cv2.morphologyEx(R2, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
R3 = cv2.morphologyEx(r3, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (23, 23)), iterations=1)
f4 = cv2.subtract(R3, contrast_enhanced_green_fundus)
f5 = clahe.apply(f4)
# removing very small contours through area parameter noise removal
ret, f6 = cv2.threshold(f5, 15, 255, cv2.THRESH_BINARY)
mask = np.ones(f5.shape[:2], dtype="uint8") * 255
contours, hierarchy = cv2.findContours(f6.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # change removed im2
for cnt in contours:
if cv2.contourArea(cnt) <= 200:
cv2.drawContours(mask, [cnt], -1, 0, -1)
im = cv2.bitwise_and(f5, f5, mask=mask)
ret, fin = cv2.threshold(im, 15, 255, cv2.THRESH_BINARY_INV)
newfin = cv2.erode(fin, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)), iterations=1)
# removal of blobs of unwanted bigger chunks taking in consideration they are not straight lines
fundus_eroded = cv2.bitwise_not(newfin)
xmask = np.ones(image.shape[:2], dtype="uint8") * 255
xcontours, xhierarchy = cv2.findContours(fundus_eroded.copy(), cv2.RETR_LIST,
cv2.CHAIN_APPROX_SIMPLE) # changed removed x1
for cnt in xcontours:
shape = "unidentified"
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.04 * peri, False)
if len(approx) > 4 and cv2.contourArea(cnt) <= 3000 and cv2.contourArea(cnt) >= 100:
shape = "circle"
else:
shape = "veins"
if (shape == "circle"):
cv2.drawContours(xmask, [cnt], -1, 0, -1)
final_image = cv2.bitwise_and(fundus_eroded, fundus_eroded, mask=xmask)
blood_vessels = final_image
return blood_vessels
# Append a column in existing csv using csv.reader and csv.writer classes
def add_column_in_csv(input_file, output_file, transform_row):
# Open the input_file in read mode and output_file in write mode
with open(input_file, 'r') as read_obj, \
open(output_file, 'w', newline='') as write_obj:
# Create a csv.reader object from the input file object
csv_reader = reader(read_obj)
# Create a csv.writer object from the output file object
csv_writer = writer(write_obj)
# Read each row of the input csv file as list
for row in csv_reader:
# Pass the list / row in the transform function to add column text for this row
transform_row(row, csv_reader.line_num)
# Write the updated row / list to the output file
csv_writer.writerow(row)
def main(self):
current_directory = os.getcwd()
pathFolder = current_directory + "\images/"
filesArray = [x for x in os.listdir(pathFolder) if os.path.isfile(os.path.join(pathFolder, x))]
lst = []
for file_name in filesArray:
file_name_no_extension = os.path.splitext(file_name)[0]
fundus = cv2.imread(pathFolder + '/' + file_name)
bve = BloodVesselsExtract()
bloodvessel = bve.extract_bv(fundus)
# cv2.imwrite(destinationFolder + file_name_no_extension + "_bloodvessel.png", bloodvessel)
resized_bvimg = cv2.resize(bloodvessel, (360,360))
cv2.imshow("Blood Vessels", resized_bvimg)
cv2.waitKey(0)
# calculation of density of white pixels representing blood vessels
n_white_pix = np.sum(bloodvessel == 255)
height = bloodvessel.shape[0]
width = bloodvessel.shape[1]
total_pix = height * width
density_white_pix = n_white_pix / total_pix
print("No. of white pixels = ", n_white_pix)
print("Density of white pixels = ", density_white_pix)
# for preparation of training data
# list of tuples of required data
lst.append((density_white_pix))
# adding records into dataframe and storing in csv file
df2 = pd.read_csv(current_directory + '/records.csv')
df2["density_of_blood_vessels"] = density_white_pix
df2.to_csv("records.csv", index=False)
# df2 = pd.DataFrame({'image_name': [file_name_no_extension], 'density_of_blood_vessels': [density_white_pix]})
# df2.to_csv('records.csv')
# Create a DataFrame object for density of blood vessels
df1 = pd.DataFrame(lst, columns=['density_of_blood_vessels'])
# print(df1)
# for preparation of training data
# header_of_new_col = 'density_of_blood_vessels'
# Add a list as column
# add_column_in_csv('C:/Users/Jenish Tamrakar/Desktop/DR/training_sample.csv',
# 'C:/Users/Jenish Tamrakar/Desktop/DR/training_sample1.csv',
# lambda row, line_num: row.append(lst[line_num - 1]))
if __name__ == "__main__":
bve = BloodVesselsExtract()
bve.main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class FaceExtInfo(object):
def __init__(self):
self._max_age = None
self._min_age = None
self._query_type = None
@property
def max_age(self):
return self._max_age
@max_age.setter
def max_age(self, value):
self._max_age = value
@property
def min_age(self):
return self._min_age
@min_age.setter
def min_age(self, value):
self._min_age = value
@property
def query_type(self):
return self._query_type
@query_type.setter
def query_type(self, value):
self._query_type = value
def to_alipay_dict(self):
params = dict()
if self.max_age:
if hasattr(self.max_age, 'to_alipay_dict'):
params['max_age'] = self.max_age.to_alipay_dict()
else:
params['max_age'] = self.max_age
if self.min_age:
if hasattr(self.min_age, 'to_alipay_dict'):
params['min_age'] = self.min_age.to_alipay_dict()
else:
params['min_age'] = self.min_age
if self.query_type:
if hasattr(self.query_type, 'to_alipay_dict'):
params['query_type'] = self.query_type.to_alipay_dict()
else:
params['query_type'] = self.query_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = FaceExtInfo()
if 'max_age' in d:
o.max_age = d['max_age']
if 'min_age' in d:
o.min_age = d['min_age']
if 'query_type' in d:
o.query_type = d['query_type']
return o
|
import hashlib
import logging
try:
from django.core.cache import caches
from django.conf import settings
except ImportError:
from diskcache import Cache as caches
# logger
logger = logging.getLogger('djangoweasycache')
class Conf(object):
"""
Configuration class
"""
try:
conf = settings.WEASY_CACHE
except NameError:
conf = {}
except AttributeError:
conf = {}
# Log output level
LOG_LEVEL = conf.get('log_level', 'INFO')
# Set up standard logging handler in case there is none
if not logger.handlers:
logger.setLevel(level=getattr(logging, Conf.LOG_LEVEL))
logger.propagate = False
formatter = logging.Formatter(fmt='%(asctime)s [WCACHE] %(levelname)s %(message)s',
datefmt='%H:%M:%S')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
# stringify function args
def join_(*args):
rv = ""
for _string in args:
rv += ' ' + str(_string)
return rv.lstrip()
# get cache key for storage
def cache_get_key(*args, **kwargs):
serialise = []
for arg in args:
serialise.append(str(arg))
for key, arg in kwargs.items():
serialise.append(str(key))
serialise.append(str(arg))
key = hashlib.md5("".join(serialise).encode('utf-8')).hexdigest()
return key, serialise
# define cache key structure
def cache_define_key(fn, override_key=None, override_key_for_self=None, *args, **kwargs):
if override_key_for_self is not None:
# use property of object - pulling out self as args[0]
new_args = [getattr(args[0], override_key_for_self, override_key_for_self)] + list(args[1:]) if len(args) > 1 else [getattr(args[0], override_key_for_self, override_key_for_self)]
key, serialise = cache_get_key(fn.__name__, new_args, **kwargs)
elif override_key is not None:
# use custom string
key, serialise = cache_get_key(fn.__name__, [override_key], **{})
else:
# default - use mix of args and kwargs
key, serialise = cache_get_key(fn.__name__, *args, **kwargs)
return key, serialise
# get cache by its label - path
def get_cache(cache_label, use_diskcache=False):
if not use_diskcache:
return caches[cache_label]
else:
return caches(cache_label)
# decorator for caching functions
def cache_for(cache_label, time=None, override_key=None, override_key_for_self=None, use_diskcache=False):
"""
:param cache_label: key for django cache
:param time: timeout in seconds
:param override_key: if not None defines cache key
:param override_key_for_self: if not None defines self properties as first part of cache key
:param use_diskcache: if True uses diskcache lib instead of django cache framework
:return: result of decorated function
"""
def decorator(fn):
def wrapper(*args, **kwargs):
cache = get_cache(cache_label, use_diskcache)
key, serialise = cache_define_key(fn, override_key, override_key_for_self, *args, **kwargs)
result = cache.get(key)
if result is None:
result = fn(*args, **kwargs)
cache.set(key, result, time) if time is not None else cache.set(key, result)
logger.info('Cache {} set {}'.format(cache_label, serialise))
else:
logger.info('Cache {} hit {}'.format(cache_label, serialise))
return result
return wrapper
return decorator
|
# test.py
from src.WeatherAPP1 import WeatherAPP1
from src.WeatherAPP2 import WeatherAPP2
from src.WeatherAPP3 import WeatherAPP3
from src.WeatherSDK import WeatherSDK
from src.air_quality import AirQuality
sdk = WeatherSDK()
app1 = WeatherAPP1(sdk)
app2 = WeatherAPP2(sdk)
app3 = WeatherAPP3(sdk)
sdk.registeObserver(app1)
sdk.registeObserver(app2)
sdk.registeObserver(app3)
sdk.changeWeatherInfo(1, 2, AirQuality.LIGHTLY_POLLUTED)
# ==========
# 天气APP1
# 现在的气温:1.00摄氏度
# 现在的湿度2.00
# 现在的空气质量轻度污染
# ==========
# ==========
# 天气APP2
# 现在的湿度2.00
# 下雨提示:不会下雨,放心出去浪吧
# ==========
# ==========
# 天气APP3
# 现在的空气质量轻度污染
# 运动建议:空气质量不好,建议宅家
# ==========
|
import pygame
import pygame.freetype
import string
import settings as st
from math import sqrt
SQRT2 = sqrt(2)
def floatcastable(str):
try:
float(str)
return True
except:
return False
LATEX_FONT_PATH = st.font_locator("cmu.ttf")
LATEX_iFONT_PATH = st.font_locator("cmu_i.ttf")
LATEX_bFONT_PATH = st.font_locator("cmu_b.ttf")
LATEX_biFONT_PATH = st.font_locator("cmu_bi.ttf")
def size_to_pixels(font_size):
font = pygame.freetype.Font(LATEX_FONT_PATH, font_size)
return font.render('1234567890')[0].get_size()[1]
print(size_to_pixels(100))
def makeSmaller(font_size):
return round(font_size/SQRT2)
def findEndOfOperand(tex):
pass #implement
'''
Notes: apply only to TeX strings that have spaces around operators.
We can't insert spaces upon receiving them into smartSurface because of
smartSurface's recursive nature.
'''
class smartSurface:
operators = {'+','-','*','^'} #'*' will appear as a cdot
spacing = 10
def __init__(self, tex, pos, font_size, depth): #depth is number of layers into generation we are.
self.surface = None
font = pygame.freetype.Font(LATEX_FONT_PATH, font_size)
iFont = pygame.freetype.Font(LATEX_iFONT_PATH, font_size)
st.lock.acquire()
self.tex = tex
self.hitboxes=[]
if tex[0]=="\\":
pass
elif tex[0]=="(":
depth_in=0
i=0
while True:
if tex[i]=="(":
depth_in+=1
elif tex[i]==")":
depth_in-=1
i+=1
if depth_in==0:
break
if depth_in>0:
raise ValueError("Mismatched parentheses")
firstTex = tex[1:i-1]
lastTex = tex[i:]
firstSurface = None
if lastTex[0] in operators:
endOfSecondOperand = 1+findEndOfOperand(lastTex[1:])
secondTex = tex[i:i+endOfSecondOperand]
pass #read through until we hit the end of the second operand
else:
pass #we can treat this as self-contained
#generate first
#generate last
#add parens around first
#splice
else:
hasOperator = -1
for i in range(len(tex)):
if tex[i] in operators:
hasOperator=i
break
if hasOperator == -1:
pass #interpret w/o operators
else:
before=tex[:hasOperator]
operator = tex[hasOperator]
secondOperand = tex[i:hasOperator+1+findEndOfOperand(tex[hasOperator+1:])]
'''if tex in list(string.ascii_letters):
self.surface,rect = iFont.render(tex,st.fontColor)
self.hitboxes.append((self.surface.get_rect(),pos))
elif floatcastable(tex):
self.surface,rect = font.render(tex,st.fontColor)
self.hitboxes.append((self.surface.get_rect(),pos))
elif tex[0] =="\\":
pass
elif tex[0] == "(":
pass
elif tex[0] == ")":
pass
else:
for i in range(0,len(tex)):
if tex[i] in self.operators:
firstSurface = smartSurface(tex[0:i],[0,i],font_size)
firstWidth, firstHeight = firstSurface.get_size()
secondSurface = smartSurface(tex[i+1:],[i+1,len(tex)],font_size)
secondWidth, secondHeight = secondSurface.get_size()
operatorSurface, operatorRect = font.render(tex[i],st.fontColor)
operatorWidth, operatorHeight = operatorSurface.get_size()
finalWidth = firstWidth+operatorWidth+secondWidth+2*self.spacing
finalHeight = max(firstHeight,secondHeight,operatorHeight)
self.surface = pygame.Surface((finalWidth, finalHeight))
self.surface.fill((255,255,255))
self.surface.blit(firstSurface.surface, (0,(finalHeight-firstHeight)//2))
self.surface.blit(secondSurface.surface, (finalWidth-secondWidth,(finalHeight-secondHeight)//2))
self.surface.blit(operatorSurface, (firstWidth+self.spacing,(finalHeight-operatorHeight)//2))
for (hitbox,value) in firstSurface.hitboxes:
self.hitboxes.append((hitbox.move(0,(finalHeight-firstHeight)//2),value))
for (hitbox,value) in secondSurface.hitboxes:
self.hitboxes.append((hitbox.move(finalWidth-secondWidth,(finalHeight-secondHeight)//2),value))
self.hitboxes.append((operatorSurface.get_rect().move(firstWidth+self.spacing,(finalHeight-operatorHeight)//2),value))
break
if self.surface == None:
i=1
while floatcastable(tex[:i]) or tex[i-1]=='.': #if it's part of a float
i+=1
i-=1
firstSurface = smartSurface(tex[0:i],[0,i],font_size)
firstWidth, firstHeight = firstSurface.get_size()
secondSurface = smartSurface(tex[i:],[i,len(tex)],font_size)
secondWidth, secondHeight = secondSurface.get_size()
finalWidth = firstWidth+secondWidth+self.spacing
finalHeight = max(firstHeight,secondHeight)
self.surface = pygame.Surface((finalWidth, finalHeight))
self.surface.fill((255,255,255))
self.surface.blit(firstSurface.surface, (0,(finalHeight-firstHeight)//2))
self.surface.blit(secondSurface.surface, (finalWidth-secondWidth,(finalHeight-secondHeight)//2))
for (hitbox,value) in firstSurface.hitboxes:
self.hitboxes.append((hitbox.move(0,(finalHeight-firstHeight)//2),value))
for (hitbox,value) in secondSurface.hitboxes:
self.hitboxes.append((hitbox.move(finalWidth-secondWidth,(finalHeight-secondHeight)//2),value))'''
st.lock.release()
def get_size(self):
return self.surface.get_size()
def __hash__(self):
return hash(self.tex)
#pygame.init()
#screen = pygame.display.set_mode([800,600], pygame.RESIZABLE)
#expression = smartSurface("3a+2-2",[0,5],24)
#pygame.image.save(expression.surface,'C:/Users/carls/desktop/test1.bmp')
#screen.fill((255,255,255))
#screen.blit(expression.surface,(0,0))
#pygame.display.update()
'''from math import ceil
for n in range(5,1000):
font = pygame.freetype.Font(LATEX_FONT_PATH, n)
e3,r3=font.render('1234567890',(0,0,0),(255,255,255))
if e3.get_size()[1]!= ceil(0.7*(n+0.5)):
print(n,e3.get_size()[1],ceil(0.7*(n+0.5)))'''
|
# Finding E to the Nth Digit
# ---------------------------
# Just like the previous problem, but with e instead of PI.
# Enter a number and have the program generate e up to that many decimal places.
# Keep a limit to how far the program will go.
# ---------------------------
# Ref - https://www.mathsisfun.com/numbers/e-eulers-number.html
def nth_input(prompt):
while True:
try:
n = int(input(prompt))
return n
except ValueError as e:
print("Not an integer! Try again...")
def e_gen(n):
e = (1 + 1 / n) ** n
return e
def e_decimal(func, i):
e_lst = [x for x in str(func)]
if i == 0:
print("".join(e_lst[0]))
elif i > 0 and i < len(e_lst):
print("".join(e_lst[0:i+2]))
else:
print("Out of range!")
print("Display e to n decimal places.")
e_decimal(e_gen(100000), nth_input("Enter the value for n "))
|
# https://leetcode.com/problems/palindrome-linked-list/description/
"""
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
list_to_store_vals = list()
while head:
list_to_store_vals.append(head.val)
head = head.next
if list_to_store_vals == list_to_store_vals[::-1]:
return True
else:
return False
sol = Solution()
ln1 = ListNode(1)
ln2 = ListNode(2)
ln3 = ListNode(2)
ln4 = ListNode(2)
ln5 = ListNode(1)
ln1.next = ln2
ln2.next = ln3
ln3.next = ln4
ln4.next = ln5
print(sol.isPalindrome(ln1))
|
import psycopg2
import psycopg2.extras
from random import randint, choice, shuffle, randrange
import string
from math import ceil, floor
import matplotlib.pyplot as plt
import numpy as np
import os
def get_crossover_point(n_rows, more_size):
percentile_low = 0
percentile_high = 100
while True:
conn = psycopg2.connect(os.environ['CONN_STRING'])
conn.set_session(autocommit=True)
cur = conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
cur.execute("""CREATE TEMPORARY TABLE bigtable (
Id SERIAL PRIMARY KEY,
data varchar(100),
more varchar(10000));""")
percentile = ceil((percentile_high + percentile_low) / 2)
print(n_rows, more_size, percentile)
dist = [1]*ceil(n_rows * (percentile / 100))
dist.extend([0]*ceil(n_rows * ((100-percentile) / 100)))
shuffle(dist)
insert_sql = ''
for i in range(n_rows):
rand_val = dist[i]
fmt = '%%%dx' % more_size
more_str = fmt % randrange(16**more_size)
insert_sql += "INSERT INTO bigtable(data, more) VALUES ('data{0}', '{1}');\n".format(rand_val, more_str)
cur.execute(insert_sql.encode('ascii'))
cur.execute("CREATE INDEX idx ON bigtable(data);")
cur.execute("ANALYZE bigtable;")
cur.execute("""EXPLAIN SELECT * from bigtable
WHERE data='data1'""")
uses_index = False
for row in cur.fetchall():
uses_index = 'Index Cond' in row.QUERY_PLAN
if uses_index:
percentile_low = percentile
else:
percentile_high = percentile
conn.close()
if (percentile_high - percentile_low) <= 1:
break
return percentile
more_size = (100, 1000)
n_rows = [int(n) for n in np.logspace(2, 6, 100)]
results = []
for m in more_size:
row_results = []
for n in n_rows:
x_over = get_crossover_point(n, m)
row_results.append(x_over)
results.append(row_results)
print(results)
f = open('results/results.csv', 'w')
f.write('rows,%s\n' % ','.join([str(m) for m in more_size]))
for i in range(len(n_rows)):
f.write('%s,%s\n' % (n_rows[i], ','.join([str(results[j][i]) for j in range(len(more_size))])))
f.close()
plt.figure(figsize=(10, 6.5))
plt.title('Onset of Table Scan vs Number of Rows')
for i, row_results in enumerate(results):
plot_label = '~%d bytes / row' % more_size[i]
plt.semilogx(n_rows, row_results, label=plot_label)
plt.ylabel('% of data returned which initiates table scan')
plt.xlabel('Number of Rows')
plt.minorticks_on()
plt.ylim((0,50))
plt.legend(loc='lower right')
plt.grid(True, which='major', linestyle='solid', axis='y')
plt.grid(True, which='minor', linestyle='dashed', axis='y')
plt.grid(True, which='major', linestyle='solid', axis='x')
plt.grid(True, which='minor', linestyle='dashed', axis='x')
plt.savefig('results/results.png', format='png', transparent=True)
plt.close()
more_size = range(100, 3000, 100)
n_rows = (5000,10000)
results = []
for n in n_rows:
row_results = []
for m in more_size:
x_over = get_crossover_point(n, m)
row_results.append(x_over)
results.append(row_results)
print(results)
f = open('results/results_size.csv', 'w')
f.write('bytes,%s\n' % ','.join([str(n) for n in n_rows]))
for i in range(len(more_size)):
f.write('%s,%s\n' % (more_size[i], ','.join([str(results[j][i]) for j in range(len(n_rows))])))
f.close()
plt.figure(figsize=(10,6.5))
plt.title('Onset of Table Scan vs Row Size')
for i, row_results in enumerate(results):
plot_label = '%d rows' % n_rows[i]
plt.plot(more_size, row_results, label=plot_label)
plt.ylabel('% of data returned which initiates table scan')
plt.xlabel('Row Size (bytes)')
plt.minorticks_on()
plt.ylim((0,50))
plt.legend(loc='lower right')
plt.grid(True, which='major', linestyle='solid', axis='y')
plt.grid(True, which='minor', linestyle='dashed', axis='y')
plt.grid(True, which='major', linestyle='solid', axis='x')
plt.grid(True, which='minor', linestyle='dashed', axis='x')
plt.savefig('results/results_size.png', format='png', transparent=True)
plt.close()
|
import tensorflow as tf
import numpy as np
IMAGE_SIZE = 64
LABEL_NUM = 75
class Dataset(object):
def __init__(self, images, labels, one_hot=False, dtype=tf.float32):
dtype = tf.as_dtype(dtype).base_dtype
if dtype not in (tf.uint8, tf.float32):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
self._num_examples = images.shape[0]
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
def next_batch(self, batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
self._epochs_completed += 1
# Shuffle the data
perm = np.arange(self._num_examples)
np.random.shuffle(perm)
self._images = self._images[perm]
self._labels = self._labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@property
def epochs_completed(self):
return self._epochs_completed
def inference(x_image, keep_prob):
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
return tf.nn.relu(conv) + b
def max_pool2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
with tf.name_scope('conv1_1'):
W_conv1_1 = weight_variable([3, 3, 1, 32])
b_conv1_1 = bias_variable([32])
h_conv1_1 = conv2d(x_image, W_conv1_1, b_conv1_1)
with tf.name_scope('conv1_2'):
W_conv1_2 = weight_variable([3, 3, 32, 32])
b_conv1_2 = bias_variable([32])
h_conv1_2 = conv2d(h_conv1_1, W_conv1_2, b_conv1_2)
with tf.name_scope('poo1'):
h_pool1 = max_pool2x2(h_conv1_2)
with tf.name_scope('dropout1'):
h_conv1_drop = tf.nn.dropout(h_pool1, keep_prob)
with tf.name_scope('conv2_1'):
W_conv2_1 = weight_variable([3, 3, 32, 64])
b_conv2_1 = bias_variable([64])
h_conv2_1 = conv2d(h_conv1_drop, W_conv2_1, b_conv2_1)
with tf.name_scope('conv2_2'):
W_conv2_2 = weight_variable([3, 3, 64, 64])
b_conv2_2 = bias_variable([64])
h_conv2_2 = conv2d(h_conv2_1, W_conv2_2, b_conv2_2)
with tf.name_scope('pool2'):
h_pool2 = max_pool2x2(h_conv2_2)
with tf.name_scope('dropout2'):
h_conv2_drop = tf.nn.dropout(h_pool2, keep_prob)
with tf.name_scope('fc1'):
W_fc1 = weight_variable([8 * 8 * 64, 256])
h_pool2_flat = tf.reshape(h_conv2_drop, [-1, 8 * 8 * 64])
b_fc1 = bias_variable([256])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
with tf.name_scope('dropout3'):
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
with tf.name_scope('output'):
W_fc2 = weight_variable([256, LABEL_NUM])
b_fc2 = bias_variable([LABEL_NUM])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
return y_conv
def training(loss, optimizer, global_step, learning_rate):
return tf.train.AdadeltaOptimizer() \
.minimize(loss, global_step=global_step)
def loss(logits, labels):
return tf.reduce_mean(tf.contrib.keras.backend.categorical_crossentropy(logits, labels))
def evaluation(logits, labels):
correct_prediction = tf.equal(tf.argmax(logits,1), tf.argmax(labels,1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
import sys, urllib2, smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from bs4 import BeautifulSoup
class Book:
def __init__(self, title, author, price, summary):
self.title = title
self.author = author
self.price = price
self.summary = summary
def details(self):
return self.title + '\n' + self.author + '\n' + self.price + '\n' + self.summary + '\n'
# get the books of the month
def get_monthly_books(url):
book_list = []
# query the website and return the html to the variable page
page = urllib2.urlopen(url)
# parse the html using beautiful soup and store in variable `soup`
html = BeautifulSoup(page, "html.parser")
# get the books with author and price
books = html.findAll('div', attrs={'class': 'book-details'})
for book in books:
title = book.find('p', attrs={'class': 'title product-field'}).text.strip()
author = book.find('p', attrs={'class': 'attribution product-field contributor-list'}).text.strip()
price = book.find('p', attrs={'class': 'product-field price'}).text.strip()
book_list.append(Book(title, author, price, ''))
return book_list
# get the book of the day TODO
def get_daily_book(url):
# query the website and return the html to the variable page
page = urllib2.urlopen(url)
# parse the html using beautiful soup and store in variable `soup`
html = BeautifulSoup(page, "html.parser")
# find highlighted books in the page
books = html.findAll('div', attrs={'class':'secondary-heading widget-title'})
for book in books:
if book.text == 'Offerta del giorno':
title = book.findNext('div', attrs={'class':'widget-text-description'}).h1.text.strip()
author = book.findNext('span', attrs={'class':'visible-contributors'}).text.strip()
price = book.findNext('div', attrs={'class':'pricing regular-price'}).text.strip()
summary = book.findNext('div', attrs={'class':'widget-text-description'}).p.text.strip()
book = Book(title, author, price, summary)
return book
# send myself an email with the list of the books
def send_email(email, password, books):
# prepare the email
msg = MIMEMultipart()
msg['From'] = email
msg['To'] = email
msg['Subject'] = "Kobo: ebooks in offerta"
body = '\n'.join(b.details() for b in books)
msg.attach(MIMEText(body.encode('utf-8'), 'plain'))
# send the email
server = smtplib.SMTP("smtp.live.com", 587)
server.ehlo()
server.starttls()
server.ehlo()
server.login(email, password)
server.sendmail(email, email, msg.as_string())
print("Email sent to " + email)
def main():
mm = get_monthly_books("https://www.kobo.com/it/it/p/OfferteDelMese")
dd = get_daily_book("https://www.kobo.com/it")
books = [dd] + mm # make the book of the day the first of the list
print '\n'.join(b.details() for b in books)
if len(sys.argv) == 3:
email = sys.argv[1]
passw = sys.argv[2]
send_email(email, passw, books)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
"""
Copyright (c) 2013 Qimin Huang <qiminis0801@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import requests
import json
import time
class Weather:
def __init__(self):
pass
def weather(self, cityid):
flag = 1
while flag:
try:
r = requests.get('http://www.weather.com.cn/data/cityinfo/' + cityid + '.html')
weatherinfo = json.loads(r.text)[u'weatherinfo']
flag = 0
except:
pass
return weatherinfo[u'city'] + u', ' + weatherinfo[u'weather'] + u', ' + weatherinfo[u'temp1'] + u' ~ ' + weatherinfo[u'temp2']
if __name__ == '__main__':
weather = Weather()
print weather.weather('101010100')
print weather.weather('101231001')
|
from unittest import TestCase
def num_coins(cents: int) -> int:
total_num_coins = 0
for coin in [25, 10, 5, 1]:
total_num_coins += cents//coin
cents %= coin
return total_num_coins
class TestNumCoins(TestCase):
def test_33(self):
self.assertEqual(num_coins(33), 5)
def test_31(self):
self.assertEqual(num_coins(31), 3)
|
#!/usr/bin/env python3
import pytest
from pftpy.actions import ActionContext
from pftpy.graph_actions import partition
import igraph
@pytest.fixture
def ctx():
ctx = ActionContext()
ctx.register_actions(*partition.defined_actions)
return ctx
def test_bfpartition(ctx):
graph: igraph.Graph = igraph.Graph.Full(4)
a = graph.add_vertex()
b = graph.add_vertex()
c = graph.add_vertex()
graph.add_edge(a, b)
graph.add_edge(0, a)
graph.add_edge(0, b)
graph.add_edge(1, c)
graph.vs['idx'] = [i for i in range(graph.vcount())]
brute_force: partition.BFPartition = ctx.construct_action("BFPartition")
solution, weight = brute_force.solve_rec(graph)
print(solution)
assert len(solution) == 3
assert weight == 5*3*2
def test_bfpartition2(ctx):
graph: igraph.Graph = igraph.Graph()
a = graph.add_vertex()
b = graph.add_vertex()
c = graph.add_vertex()
graph.vs['idx'] = [i for i in range(graph.vcount())]
brute_force: partition.BFPartition = ctx.construct_action("BFPartition")
solution, weight = brute_force.solve_rec(graph)
print(solution)
assert len(solution) == 3
assert weight == 2*2*2
def test_branchingpartition(ctx):
for i in range(10):
graph: igraph.Graph = igraph.Graph.Erdos_Renyi(8, m=i+10)
graph.vs['idx'] = [i for i in range(graph.vcount())]
old_bf: partition.BFPartition = ctx.construct_action("BFPartition")
new_bf = partition.BFPartition = ctx.construct_action("BranchingPartition")
sol1, weight1 = old_bf.solve_rec(graph)
sol2, weight2 = new_bf.solve_rec(graph)
assert weight1 == weight2
def test_branchingpartition_reductions(ctx):
for i in range(10):
graph: igraph.Graph = igraph.Graph.Erdos_Renyi(8, m=i+10)
graph.vs['idx'] = [i for i in range(graph.vcount())]
bf = partition.BFPartition = ctx.construct_action("BranchingPartition")
bf_no_lb = partition.BFPartition = ctx.construct_action("BranchingPartition")
bf_no_lb.reduction_sw = False
bf_no_lb.reduction_lb = False
bf_no_lb.reduction_lb2 = False
sol1, weight1 = bf.solve_rec(graph)
sol2, weight2 = bf_no_lb.solve_rec(graph)
assert weight1 == weight2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
import uuid
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch.dispatcher import receiver
from django.db import models
from authtools.models import AbstractEmailUser
from rest_framework.authtoken.models import Token
log = logging.getLogger(__name__)
class User(AbstractEmailUser):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
class Meta(AbstractEmailUser.Meta):
swappable = 'AUTH_USER_MODEL'
verbose_name = _('user')
verbose_name_plural = _('users')
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
if created:
token, token_created = Token.objects.get_or_create(user=instance)
log.info('created token: {0} for {1}'.format(token, instance.email)) |
a = int(input("Enter the a:"))
b = int(input("Enter the b:"))
c = int(input("Enter the c:"))
disc = (b**2)-4*a*c
Real_1 = (-b+(disc**0.5))/(2*a)
Real_2 = (-b-(disc**0.5))/(2*a)
if disc > 0:
print("There are two real roots:")
print(Real_1)
print(Real_2)
elif disc == 0:
print("There is one real root")
print(Real_1)
else:
print("There are two complex roots")
|
from collections import Counter
import json
import os
data_root_list = [
"data/chinese/seq2umt_ops",
"data/chinese/wdec",
"data/nyt/wdec",
"data/nyt/seq2umt_ops",
"data/nyt/seq2umt_pos",
]
wdec_nyt_root = "data/nyt/wdec"
wdec_chinese_root = "data/chinese/wdec"
# triplet = s p o
def cnt_train_key(
data_root, key_fn=lambda l: [(t["subject"], t["predicate"], t["object"]) for t in l]
):
triplet_cnt = Counter()
source = os.path.join(data_root, "new_train_data.json")
with open(source, "r") as s:
for line in s:
line = json.loads(line)
spo_list = line["spo_list"]
# spo = [(t["subject"], t["predicate"], t["object"]) for t in spo_list]
spo = key_fn(spo_list)
triplet_cnt.update(spo)
return triplet_cnt
def filter_test(data_root, dataset, cnt, thr: int):
# filter the sub test set whose freq less than thr
source = os.path.join(data_root, dataset)
target = os.path.join(data_root, "filter_" + str(thr) + "_" + dataset)
write_linenum = 0
with open(source, "r", encoding="utf-8") as s, open(
target, "w", encoding="utf-8"
) as t:
for all_linenum, line in enumerate(s):
jline = json.loads(line)
spo_list = jline["spo_list"]
if check_thr(spo_list, cnt, thr):
t.write(line)
write_linenum += 1
print(data_root + " valid sent / all sent = %d/%d" % (write_linenum, all_linenum))
return write_linenum / all_linenum
def filter_partial_triplet(data_root, dataset, cnt, fn):
# filter the sub test set whose freq less than thr
source = os.path.join(data_root, dataset)
all_triplet_num = 0
all_valid_num = 0
with open(source, "r", encoding="utf-8") as s:
for all_linenum, line in enumerate(s):
jline = json.loads(line)
spo_list = jline["spo_list"]
spo_list = fn(spo_list)
all_triplet_num += len(spo_list)
valid_list = [cnt[spo] > 0 for spo in spo_list]
all_valid_num += sum(valid_list)
print(
data_root
+ " valid sent / all sent = %d/%d = %.3f"
% (all_valid_num, all_triplet_num, all_valid_num / all_triplet_num)
)
def check_num(spo_list, fn):
# MAX = 3
spo = [(t["subject"], t["predicate"], t["object"]) for t in spo_list]
return fn(len(spo))
def check_thr(spo_list, cnt, MAX):
# MAX = 3
spo = [(t["subject"], t["predicate"], t["object"]) for t in spo_list]
spo_freq = [cnt[tri] < MAX for tri in spo]
return all(spo_freq)
if __name__ == "__main__":
nyt_list = [
"data/nyt/wdec",
"data/nyt/seq2umt_pos",
"data/nyt/multi_head_selection",
]
test = nyt_list[0]
sp_fn = lambda l: [(t["subject"], t["predicate"]) for t in l]
spo_fn = lambda l: [(t["subject"], t["predicate"], t["object"]) for t in l]
cnt = cnt_train_key(test, key_fn=sp_fn)
filter_partial_triplet(test, "new_test_data.json", cnt, fn=sp_fn)
|
from eutility.eutility import timer
from eutility.fileops import data
from eutility.fileops import printdoc
from eutility.fileops import matrix
from eutility.fileops import readcsv
# This is a list of solved problems.
# the euler000 function is the generic version of the problem.
# the problem000 function takes no arguments,
# and solves the problems with the inputs defined on the website.
def problem001():
from problems.euler001 import euler001
printdoc(euler001)
with data('euler001.txt') as f:
limit = int(f.next())
ns = [int(i) for i in f]
with timer():
return euler001(ns, limit)
def problem002():
from problems.euler002 import euler002
printdoc(euler002)
with data('euler002.txt') as f:
limit = int(f.next())
with timer():
return euler002(limit)
def problem003():
from problems.euler003 import euler003
printdoc(euler003)
with data('euler003.txt') as f:
limit = int(f.next())
with timer():
return euler003(limit)
def problem004():
from problems.euler004 import euler004
printdoc(euler004)
with data('euler004.txt') as f:
n = int(f.next())
with timer():
return euler004(n)
def problem005():
from problems.euler005 import euler005
printdoc(euler005)
with data('euler005.txt') as f:
n = int(f.next())
with timer():
return euler005(n)
def problem006():
from problems.euler006 import euler006
printdoc(euler006)
with data('euler006.txt') as f:
n = int(f.next())
with timer():
return euler006(n)
def problem007():
from problems.euler007 import euler007
printdoc(euler007)
with data('euler007.txt') as f:
n = int(f.next())
with timer():
return euler007(n)
def problem008():
from problems.euler008 import euler008
printdoc(euler008)
with data('euler008.txt') as f:
n = int(f.next())
number = f.next()
with timer():
return euler008(n, number)
def problem009():
from problems.euler009 import euler009
printdoc(euler009)
with timer():
return euler009()
def problem010():
from problems.euler010 import euler010
printdoc(euler010)
with data('euler010.txt') as f:
limit = int(f.next())
with timer():
return euler010(limit)
def problem011():
from problems.euler011 import euler011
printdoc(euler011)
with data('euler011.txt') as f:
n = int(f.next())
grid = matrix(f, int)
with timer():
return euler011(n, grid)
def problem012():
from problems.euler012 import euler012
printdoc(euler012)
with data('euler012.txt') as f:
n = int(f.next())
with timer():
return euler012(n)
def problem013():
from problems.euler013 import euler013
printdoc(euler013)
with data('euler013.txt') as f:
n = int(f.next())
ns = [int(i) for i in f]
return euler013(n, ns)
def problem014():
from problems.euler014 import euler014
printdoc(euler014)
with data('euler014.txt') as f:
limit = int(f.next())
with timer():
return euler014(limit)
def problem015():
from problems.euler015 import euler015
printdoc(euler015)
with data('euler015.txt') as f:
n = int(f.next())
with timer():
return euler015(n)
def problem016():
from problems.euler016 import euler016
printdoc(euler016)
with data('euler016.txt') as f:
exp = int(f.next())
with timer():
return euler016(exp)
def problem017():
from problems.euler017 import euler017
printdoc(euler017)
with data('euler017.txt') as f:
start = int(f.next())
end = int(f.next())
with timer():
return euler017(start, end)
def problem018():
from problems.euler018 import euler018
printdoc(euler018)
with data('euler018.txt') as f:
triangle = matrix(f, int)
with timer():
return euler018(triangle)
def problem019():
from problems.euler019 import euler019
printdoc(euler019)
with data('euler019.txt') as f:
dates = matrix(f, int)
with timer():
return euler019(dates[0], dates[1])
def problem020():
from problems.euler020 import euler020
printdoc(euler020)
with data('euler020.txt') as f:
n = int(f.next())
with timer():
return euler020(n)
def problem021():
from problems.euler021 import euler021
printdoc(euler021)
with data('euler021.txt') as f:
limit = int(f.next())
with timer():
return euler021(limit)
def problem022():
from problems.euler022 import euler022
printdoc(euler022)
with data('euler022.txt') as f:
filedata = readcsv(f.next(), fn=lambda x: x.strip('"'))
with timer():
return euler022(filedata)
def problem023():
from problems.euler023 import euler023
printdoc(euler023)
with data('euler023.txt') as f:
limit = int(f.next())
with timer():
return euler023(limit)
def problem024():
from problems.euler024 import euler024
printdoc(euler024)
with data('euler024.txt') as f:
n = int(f.next())
digits = int(f.next())
with timer():
return euler024(n, digits)
def problem025():
from problems.euler025 import euler025
printdoc(euler025)
with data('euler025.txt') as f:
length = int(f.next())
with timer():
return euler025(length)
def problem026():
from problems.euler026 import euler026
printdoc(euler026)
with data('euler026.txt') as f:
maxn = int(f.next())
with timer():
return euler026(maxn)
def problem027():
from problems.euler027 import euler027
printdoc(euler027)
with data('euler027.txt') as f:
limit = int(f.next())
with timer():
return euler027(limit)
def problem028():
from problems.euler028 import euler028
printdoc(euler028)
with data('euler028.txt') as f:
diagonal = int(f.next())
with timer():
return euler028(diagonal)
def problem029():
from problems.euler029 import euler029
printdoc(euler029)
with data('euler029.txt') as f:
a = int(f.next())
b = int(f.next())
with timer():
return euler029(a, b)
def problem030():
from problems.euler030 import euler030
printdoc(euler030)
with data('euler030.txt') as f:
power = int(f.next())
with timer():
return euler030(power)
def problem035():
from problems.euler035 import euler035
printdoc(euler035)
with data('euler035.txt') as f:
limit = int(f.next())
with timer():
return euler035(limit)
def problem036():
from problems.euler036 import euler036
printdoc(euler036)
with data('euler036.txt') as f:
limit = int(f.next())
with timer():
return euler036(limit)
def problem037():
from problems.euler037 import euler037
printdoc(euler037)
with data('euler037.txt') as f:
n = int(f.next())
with timer():
return euler037(n)
if __name__ == "__main__":
_problems = [fn for fn in dir() if fn.startswith('problem')]
while True:
p = raw_input("Which problem would you like to run? (Type 'help' for help): ").strip()
# exit condition
if p == 'exit':
break
elif p == 'list':
print _problems
elif p == 'help':
print ('\n'
'exit: exits the program.\n'
'list: lists all problems.\n'
'all: runs all problems.\n')
# run all problems
elif p == 'all':
for p in _problems:
print '\n'
eval('{}()'.format(p))
else:
# account for format '13' or 'problem013'
if p in _problems:
prob = p
else:
prob = 'problem{}'.format(p.zfill(3))
# run the problem itself
if prob in _problems:
print '\n'
eval('{}()'.format(prob))
print '\n'
else:
try:
int(p)
print 'This problem ({}) has not yet been added to the solutions list.'.format(prob)
print 'Please choose again.\n'
except:
print "I'm sorry, '{}' is not a valid command. Please try again.".format(p)
|
from django.shortcuts import render,redirect
#from student.forms import Studentform
from student.models import Studenttable
from django.contrib import messages
def studentpage(request):
return render(request,"student/studentpage.html")
def registerstudent(request):
return render(request,"student/registerstudent.html")
def savestudent(request):
n=request.POST.get("r1")
cn=request.POST.get("r2")
e=request.POST.get("r3")
p=request.POST.get("r4")
#if sf.is_valid():
# sf.save()
Studenttable(name=n,contactno=cn,email=e,password=p).save()
messages.success(request,"Registered Successfully")
return redirect("registerstudent")
#else:
# return render(request,"student/registerstudent.html")
def loginstudent(request):
return render(request,"student/loginstudent.html")
def checkloginstudent(request):
x=request.POST.get("l1")
y=request.POST.get("l2")
try:
Studenttable.objects.get(email=x,password=y)
return render(request,"student/studenthome.html",{"name":x})
except Studenttable.DoesNotExist:
messages.error(request,"Invalid User")
return redirect("checkloginstudent")
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
from django.http import JsonResponse
@method_decorator(csrf_exempt,name='dispatch')
def phoneurl_view(request):
pnum = request.POST.get('cname')
try:
Studenttable.objects.get(contactno=pnum)
data={'error':'Contact number taken'}
except Studenttable.DoesNotExist:
data={'message':'Contact number available'}
return JsonResponse(data)
|
"""
Array of Array Products
Given an array of integers arr, you’re asked to calculate for each index i the product of all integers except the integer at that index (i.e. except arr[i]). Implement a function arrayOfArrayProducts that takes an array of integers and returns an array of the products.
Solve without using division and analyze your solution’s time and space complexities.
Examples:
input: arr = [8, 10, 2]
output: [20, 16, 80] # by calculating: [10*2, 8*2, 8*10]
input: arr = [2, 7, 3, 4]
output: [84, 24, 56, 42] # by calculating: [7*3*4, 2*3*4, 2*7*4, 2*7*3]
Constraints:
[time limit] 5000ms
[input] array.integer arr
0 ≤ arr.length ≤ 20
[output] array.integer
"""
# Brute force
# Time O(n^2)
# Space O(n)
# keyword = 축적값
def array_of_array_products(arr):
res = []
if len(arr) <= 1:
return res
for i in range(len(arr)):
multiplied = 1
for j in range(len(arr)):
if j == i:
pass
else:
multiplied *= arr[j]
res.append(multiplied)
return res
"""
# arr = [2, 7, 3, 4]
# new = [7 * 3 * 4, 2 * 3 * 4, 2 * 7 * 4, 2 * 7 * 3]
# 2 -> [1] - [7, 3, 4]
# 7 -> [2] - [3, 4]
# 3 -> [2, 7] - [4]
# 4 -> [2, 7, 3] - [1]
===========================================================
1 7x3x4x1 1x2 3x4x1, 1x2x7 4x1, 1x2x7x3 1
l r l r l r l r
=> We need to make
l = [1, 1x2, 1x2x7, 1x2x7x3]
r = [1, 1x4, 1x4x3, 1x4x3x7]
=> then reverse r
=> and then just multiply it
"""
# Time complexity: O(n)
# Space complexity: O(n) -> O(n^2)
def array_of_array_products(arr):
if len(arr) <= 1:
return []
left_acc = [1]
right_acc = [1]
for i in range(0, len(arr) - 1):
tmp = left_acc[-1] * arr[i]
left_acc.append(tmp)
for i in range(len(arr) - 1, 0, -1):
tmp = right_acc[0] * arr[i]
right_acc.insert(0, tmp)
# result to left
for i in range(len(arr)):
left_acc[i] *= right_acc[i]
return left_acc
# Time complexity: O(n^2) *Because time complexity of insert() is O(n).
# Space complexity: O(n)
def array_of_array_products(arr):
if len(arr) <= 1:
return []
left = [1]
right = [1]
for i in range(len(arr) - 1):
left.append(left[-1] * arr[i])
for i in range(len(arr) - 1, 0, -1):
right.insert(0, right[0] * arr[i])
for i in range(len(arr)):
left[i] *= right[i]
return left
|
import pygame
import time
from project.objects.object_game import Game
from project import config
config_object = getattr(config, "MainConfig")
def main(level_id):
game = Game(level_id)
screen = pygame.display.set_mode(game.get_size())
pygame.display.set_caption("Tank Game")
clock = pygame.time.Clock()
game.start_game()
while game.running:
game.process_events()
if not game.is_frozen and not game.is_ended:
game.update_groups()
screen.fill((0, 0, 0))
game.draw_groups(screen)
pygame.display.flip()
clock.tick(config_object.fps)
if game.is_ended:
time.sleep(1)
pygame.quit()
pygame.init()
if __name__ == "__main__":
main(int(input("Choose level in range[1, 3]: ")))
|
X, Y, Z = map(int, input().split())
ans = 0
for i in range(0, Z+1):
A = X + i
B = Y + (Z - i)
ans = max(ans, min(A, B))
print(ans)
|
'''
Created on Jul 10, 2014
@author: jtaghiyar
'''
import codecs
import os
import re
from setuptools import setup
def read(*paths):
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, *paths)) as f:
return f.read()
def get_version():
version_file = read("kronos", "kronos_version.py")
version_match = re.search(r"^kronos_version = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
long_description = read('README.md')
setup(name='kronos_pipeliner',
version=get_version(),
description='A workflow assembler for genome analytics and informatics',
long_description=long_description,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
keywords='pipeline workflow bioinformatics kronos',
author='M. Jafar Taghiyar',
author_email='jafar.taghiyar@gmail.com',
url='https://github.com/jtaghiyar/kronos',
license='MIT',
packages=['kronos', 'templates'],
entry_points={'console_scripts':['kronos=kronos:main']},
install_requires = ['pyyaml>=3.11', 'ruffus==2.4.1']
)
|
import unittest
import warnings
from gevent.testing.modules import walk_modules
from gevent.testing import main
from gevent.testing.sysinfo import NON_APPLICABLE_SUFFIXES
from gevent.testing import six
class TestExec(unittest.TestCase):
pass
def make_exec_test(path, module):
def test(_):
with open(path, 'rb') as f:
src = f.read()
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
six.exec_(src, {'__file__': path})
name = "test_" + module.replace(".", "_")
test.__name__ = name
setattr(TestExec, name, test)
def make_all_tests():
for path, module in walk_modules(recursive=True):
if module.endswith(NON_APPLICABLE_SUFFIXES):
continue
make_exec_test(path, module)
make_all_tests()
if __name__ == '__main__':
main()
|
givenTestCase = [0x20072000, 0x20e6fffd, 0x00072022, 0x00864020, 0x3105000f, 0x0085402a,
0xac082008, 0x20e70008, 0xace8fffc, 0x8c082004, 0x8ce50000]
myTestCase = [0x2084115c, 0x2001115c, 0x00812022, 0x200501a4, 0x30a60539, 0xac062000, 0x8c072000, 0xac070000]
instructions = {"0b000000": {"0b100000": "add", "0b100010": "sub", "0b101010": "slt",
"0b011010": "div", "0b010000": "mfhi", "0b000100": "sllv",
"0b000011": "sra", "0b001000": "jr", "0b100110": "xor",
"0b000000": "sll"},
"0b001000": "addi", "0b001100": "andi", "0b100011": "lw", "0b101011": "sw", "0b000100": "beq",
"0b000101": "bne", "0b000010": "j", "0b000011": "jal", "0b001110": "xori", "0b001010": "slti",
}
registers = {} # created empty dictionary for registers
for x in range(32):
registers.update({f'${x}' : 0})
registers.update({'lo':0, 'hi':0})
dataMemory = {} # created empty dictionary for dataMemory
for x in range(0x2000, 0x3000, 4):
dataMemory.update({x: 0})
pc = 0
def setPc(n=0):
global pc
pc = n*4
def incrementPc(n=1):
global pc
pc += n*4
def reg(num):
return "$" + str(num)
class showUpdate(object):
def __init__(self, f):
self.f = f
def __call__(self, *args, **kwargs):
oldRegisters = dict(registers)
oldPc = int(pc)
self.f()
for key in registers.keys():
if registers[key] != oldRegisters[key]:
print(f"{key}: {oldRegisters[key]} -> {registers[key]}")
if oldPc != pc:
print(f"pc: {oldPc} -> {pc}")
@showUpdate
def sra():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rt)] >> imm
@showUpdate
def xori():
global assembly_language, registers
assembly_language = f"{instruction}, ${rt}, ${rs}, {imm}"
registers[reg(rt)] = registers[reg(rs)] ^ imm
@showUpdate
def slti():
global assembly_language, registers
assembly_language = f"{instruction}, ${rt}, ${rs}, {imm}"
registers[reg(rt)] = 1 if registers[reg(rs)] < imm else 0
@showUpdate
def xor():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rs)] ^ registers[reg(rt)]
@showUpdate
def sllv():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rt)] << rs
@showUpdate
def sll():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rt)] << shamt
@showUpdate
def add():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rs)] + registers[reg(rt)]
@showUpdate
def sub():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rs)] - registers[reg(rt)]
@showUpdate
def slt():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = 1 if registers[reg(rs)] < registers[reg(rt)] else 0
@showUpdate
def addi():
global assembly_language, registers
assembly_language = f"{instruction}, ${rt}, ${rs}, {imm}" if abs(imm) < 8192 else f"{instruction}, ${rt}, ${rs}, {hex(imm)}"
registers[reg(rt)] = registers[reg(rs)] + imm
pass
@showUpdate
def andi():
global assembly_language, registers
assembly_language = f"{instruction}, ${rt}, ${rs}, {imm}" if abs(imm) < 8192 else f"{instruction}, ${rt}, ${rs}, {hex(imm)}"
registers[reg(rt)] = registers[reg(rs)] & imm
@showUpdate
def lw():
global assembly_language, registers
# assembly_language = f"{instruction} ${rt}, 0$({rs})" if imm == 0 else f"{instruction} ${rt}, {hex(imm)}(${rs})"
assembly_language = f"{instruction} ${rt}, {imm}(${rs})" if abs(imm) < 8192 else f"{instruction} ${rt}, {hex(imm)}(${rs})"
a = dataMemory[(registers[reg(rs)]+imm)]
registers[reg(rt)] = dataMemory[(registers[reg(rs)]+imm)]
@showUpdate
def sw():
global assembly_language, registers
assembly_language = f"{instruction} ${rt}, {imm}(${rs})" if abs(imm) < 8192 else f"{instruction} ${rt}, {hex(imm)}(${rs})"
dataMemory[registers[reg(rs)]+imm] = registers[reg(rt)]
@showUpdate
def div():
global assembly_language, registers
assembly_language = f"{instruction}, ${rs}, %{rt}"
registers["lo"] = registers[reg(rs)] / registers[reg(rt)]
registers["hi"] = registers[reg(rs)] % registers[reg(rt)]
@showUpdate
def mfhi():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}"
registers[reg(rd)] = registers["hi"]
@showUpdate
def beq():
global assembly_language, registers
assembly_language = f"{instruction}, ${rs}, ${rt}, {imm}" if abs(imm) < 8192 else f"{instruction}, ${rs}, ${rt}, {hex(imm)}"
if registers[reg(rs)] == registers[reg(rt)]:
incrementPc()
incrementPc(imm)
else:
pass
@showUpdate
def bne():
global assembly_language, registers
assembly_language = f"{instruction}, ${rs}, ${rt}, {imm}" if abs(imm) < 8192 else f"{instruction}, ${rs}, ${rt}, {hex(imm)}"
if registers[reg(rs)] == registers[reg(rt)]:
pass
else:
incrementPc()
incrementPc(imm)
@showUpdate
def j():
global assembly_language, registers
setPc(imm)
@showUpdate
def jr():
global assembly_language, registers
assembly_language = f"{instruction}, ${rs}"
setPc(registers[reg(rs)])
@showUpdate
def jal():
global assembly_language, registers
registers[reg(31)] = int(pc / 4)
setPc(registers[reg(rs)])
@showUpdate
def sra():
global assembly_language, registers
assembly_language = f"{instruction}, ${rd}, ${rt}, {shamt}"
registers[reg(rd)] = registers[reg(rt)] >> imm
@showUpdate
def xori():
global assembly_language, registers
assembly_language = f"{instruction} ${rt}, {imm}(${rs})" if abs(imm) < 8192 else f"{instruction} ${rt}, {hex(imm)}(${rs})"
registers[reg(rt)] = registers[reg(rs)] ^ imm
@showUpdate
def slti():
global assembly_language, registers
assembly_language = f"{instruction} ${rt}, {imm}(${rs})" if abs(imm) < 8192 else f"{instruction} ${rt}, {hex(imm)}(${rs})"
registers[reg(rt)] = 1 if registers[reg(rs)] < imm else 0
@showUpdate
def xor():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rs)] ^ registers[reg(rt)]
@showUpdate
def sllv():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${rt}"
registers[reg(rd)] = registers[reg(rt)] << rs
@showUpdate
def sll():
global assembly_language, registers
assembly_language = f"{instruction} ${rd}, ${rs}, ${shamt}"
registers[reg(rd)] = registers[reg(rt)] << shamt
i = ""
rd = ""
rt = ""
rs = ""
op = ""
shamt = ""
jumpi = ""
assembly_language = ""
imm = 0
instruction = ""
# This was stolen from StackOverflow
def twos_comp(val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255
val = val - (1 << bits) # compute negative value
return val # return positive value as is
def machine_to_assembly(machine_codes):
global i
global rd
global rt
global rs
global op
global shamt
global jumpi
global assembly_language
global imm
global instruction
assembly_languages = []
for machine_code in machine_codes:
machine_code_hex = hex(machine_code) # Just for debugging
machine_code = format(machine_code, '#034b')
i = machine_code[23:]
rd = str(machine_code[18:23])
rt = str(machine_code[13:18])
rs = str(machine_code[8:13])
op = str(machine_code[0:8]) # if 0, instruction is r-type
shamt = str(machine_code[-11:-6])
jumpi = str(machine_code[9:])
r_type = True if op == "0b000000" else False
if r_type:
funct = "0b" + i[-6:]
# else:
# imm = twos_comp(int((rd + i), 2), 16) if r_type else twos_comp(int)
# #imm = hex(imm) if imm >= 8192 else str(imm)
imm = twos_comp(int(i,2), 16) if r_type else twos_comp(int((rd + i), 2), 16)
rd = int(rd, 2)
rt = int(rt, 2)
rs = int(rs, 2)
shamt = int(shamt, 2)
jumpi = int(jumpi, 2)
instruction = instructions[op][funct] if r_type else instructions[op]
pythonInstruction = globals()[instruction]
pythonInstruction()
assembly_languages.append(assembly_language)
pass
return assembly_languages
print("Given Test Case:")
for test in machine_to_assembly(givenTestCase):
pass
print("\t" + test)
print("My Test Case:")
for test in machine_to_assembly(myTestCase):
print("\t" + test)
|
#!/usr/bin/env python
"""
The DXPServer allows people to store and retrieve dynamic instruction
frequency information for Python programs. It is hoped that by offering
this service to the Python community a large database of instruction count
frequencies can be accumulated for different versions of Python.
The DXPserver currently implements just a few methods:
add_dx_info(appname, email, pyversion, dxlist)
Register the dynamic instruction frequencies for a single
application run by a particular email address, using a particular
version of Python. There is no real useful return value unless
an error is detected.
appname: A non-empty string that identifies the application that
generated this instruction profile.
email: A valid email address (while this is logged, it will only be
used to contact the owner of a misbehaving client).
pyversion: A three-element tuple as returned by
sys.version_info[:3]. People running pre-2.0 versions of Python
will have to synthesize this from the first word of sys.version.
All three elements must be ints.
dxlist: A run-length encoded version of the list returned by
sys.getdxp(). You will only have this function available if you
compiled your Python interpreter with the DYNAMIC_EXECUTION_PROFILE
macro defined. You can choose to define DXPAIRS as well or not.
This method accepts either type of getdxp() output. The run-length
encoding is described below.
get_dx_info(pyversion)
Return the instruction profiling information that has been
accumulated for version pyversion. The format for pyversion is the
same as in add_dx_info. The return value is a dictionary with two
keys: 'counts' and 'pairs'. The value associated with the 'counts'
key is a run-length encoded list of opcode frequencies as would be
returned by rle(sys.getdxp()) without DXPAIRS defined. The value
associated with the 'pairs' key is a list of opcode frequencies as
would be returned by rle(sys.getdxp()) with DXPAIRS defined. If
there is no information recorded for one category or another
appropriate zero-filled lists are returned.
versions()
Return the version numbers for which this server has some
instruction counts.
usage()
Return detailed usage information.
synopsis()
Return brief usage information.
The input dxlist and the output returned by get_dx_info must be run-length
encoded. The algorithm is straightforward:
def rle(l):
newl = []
lastel = None
count = 0
for elt in l:
if elt == lastel:
count = count + 1
continue
elif lastel is not None:
if isinstance(lastel, types.ListType):
lastel = rle(lastel)
newl.append([lastel, count])
lastel = elt
count = 1
if isinstance(lastel, types.ListType):
lastel = rle(lastel)
newl.append([lastel, count])
return newl
Use the following to run-length encode sys.getdxp() output:
dxinfo = rle(sys.getdxp())
Decoding is similar:
def rld(l):
newl = []
for elt, count in l:
if isinstance(elt, types.ListType):
elt = rld(elt)
newl.extend([elt]*count)
return newl
dxinfo = rld(rpcserver.get_dx_info((1,5,2)))
Both rle() and rld() are included in the dxpserver.py module/script
available from <http://www.musi-cal.com/~skip/python/>.
You can use the atexit module to automatically transmit instruction counts
to the server at normal program termination:
def send_instruction_counts(appname, email):
if not hasattr(sys, 'getdxp'):
return
dxpserver = xmlrpclib.Server('http://manatee.mojam.com:7304')
dxpserver.add_dx_info(appname, email, sys.version_info[:3],
rle(sys.getdxp()))
import atexit
atexit.register(send_instruction_counts, 'myapp', 'me@some.where')
"""
import sys
import types
import os
import getopt
import shelve
from SimpleXMLRPCServer import SimpleXMLRPCServer as RPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler as RPCHandler
if os.path.exists("/Users/skip"):
DXPAIRDB = "/Users/skip/misc/dxpair.db"
else:
DXPAIRDB = "/home/skip/misc/dxpair.db"
class DB:
def __init__(self, file):
self.db = shelve.open(file)
def get(self, key, default=None):
if self.db.has_key(key):
return self.db[key]
else:
return default
def __getattr__(self, attr):
return getattr(self.db, attr)
def __del__(self):
self.db.close()
class DXPServer(RPCServer):
def __init__(self, address):
RPCServer.__init__(self, address, RPCHandler, False)
def server_bind(self):
import socket
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.socket.bind(self.server_address)
class DXPMethods:
"""Methods to collect dynamic execution info"""
def __init__(self, dxpairdb):
self.dxpairdb = dxpairdb
def synopsis(self, *args):
"""\
usage:
dxp.add_dx_info(appname, author, pyversion, dxlist)
dxp.get_dx_info(pyversion)
dxp.synopsis()
dxp.usage()
dxp.versions()
For detailed instructions, execute the server's usage() method.
"""
return DXPMethods.synopsis.__doc__
def usage(self, *args):
return __doc__
def add_dx_info(self, appname=None, email=None, pyversion=None,
dxlist=None):
if not email or not isinstance(email, types.StringType):
return ("Error: missing or invalid email.\n\n"+
self.synopsis())
if not (pyversion and
isinstance(pyversion, types.ListType) and
len(pyversion) == 3 and
map(type, pyversion) == [types.IntType]*3):
return ("Error: missing or invalid version info.\n\n"+
self.synopsis())
if not appname or not isinstance(appname, types.StringType):
return ("Error: missing or invalid application name.\n\n"+
self.synopsis())
pyversion = tuple(pyversion)
db = DB(self.dxpairdb)
emails = db.get("emails", {})
thisemail = emails[email] = emails.get(email, {})
if not thisemail.get("__valid", 1):
return 'thanks'
thisemail[appname] = thisemail.get(appname, 0) + 1
db["emails"] = emails
dxinfoversion = "dxinfo.%d.%d.%d" % pyversion
dxlist = rld(dxlist)
dxinfo = db.get(dxinfoversion)
if dxinfo is None:
dxinfo = [0] * 256
else:
dxinfo = rld(dxinfo)
if len(dxlist) == 257:
dxcounts = dxlist[256]
else:
dxcounts = dxlist
for i in range(256):
dxinfo[i] = dxinfo[i] + dxcounts[i]
db[dxinfoversion] = rle(dxinfo)
if len(dxlist) == 257:
dxpairversion = "dxpair.%d.%d.%d" % pyversion
dxpairinfo = db.get(dxpairversion)
if dxpairinfo is None:
dxpairinfo = []
for i in range(256):
dxpairinfo.append([0]*256)
else:
dxpairinfo = rld(dxpairinfo)
for i in range(256):
for j in range(256):
dxpairinfo[i][j] = dxpairinfo[i][j] + dxlist[i][j]
db[dxpairversion] = rle(dxpairinfo)
db.close()
return 'thanks'
def get_dx_info(self, pyversion=None):
if not (pyversion and
isinstance(pyversion, types.ListType) and
len(pyversion) == 3 and
map(type, pyversion) == [types.IntType]*3):
return ("Error: missing or invalid version info.\n\n"+
self.synopsis())
pyversion = tuple(pyversion)
dxinfoversion = "dxinfo.%d.%d.%d" % pyversion
dxpairversion = "dxpair.%d.%d.%d" % pyversion
db = DB(self.dxpairdb)
dxinfo = db.get(dxinfoversion)
dxpairinfo = db.get(dxpairversion)
db.close()
if dxinfo is None:
dxinfo = rle([0]*256)
if dxpairinfo is None:
dxpairinfo = []
for i in range(256):
dxpairinfo.append([0]*256)
dxpairinfo = rle(dxpairinfo)
return {"counts": dxinfo, "pairs": dxpairinfo}
def versions(self):
db = DB(self.dxpairdb)
keys = db.keys()
v = []
for key in keys:
if key[:6] == "dxinfo":
v.append(map(int, key.split(".")[1:]))
return v
def invalidate_email(self, email):
db = DB(self.dxpairdb)
emails = db.get("emails", {})
thisemail = emails[email] = emails.get(email, {})
thisemail["__valid"] = 0
db["emails"] = emails
return 0
def _dispatch(self, method, params):
if hasattr(self, method):
return getattr(self, method)(*params)
else:
return ("Error: unrecognized method: %s.\n\n"%method+
self.synopsis())
def rle(l):
newl = []
lastel = None
count = 0
for elt in l:
if elt == lastel:
count = count + 1
continue
elif lastel is not None:
if isinstance(lastel, types.ListType):
lastel = rle(lastel)
newl.append([lastel, count])
lastel = elt
count = 1
if isinstance(lastel, types.ListType):
lastel = rle(lastel)
newl.append([lastel, count])
return newl
def rld(l):
newl = []
for elt, count in l:
if isinstance(elt, types.ListType):
elt = rld(elt)
newl.extend([elt]*count)
return newl
def main():
dxpairdb = DXPAIRDB
opts, args = getopt.getopt(sys.argv[1:], "d:")
for opt, arg in opts:
if opt == "-d":
dxpairdb = arg
dbserver = DXPServer(("", 7304))
dbserver.register_instance(DXPMethods(dxpairdb))
try:
dbserver.serve_forever()
except KeyboardInterrupt:
raise SystemExit
return
if __name__ == "__main__":
main()
|
import math
from tkinter import *
root = Tk()
root.title("DEV's CALCULATOR")
input_field = Entry(root, bg="#9bd12e", fg="black", width=27, font=20, borderwidth=9, relief=SUNKEN)
input_field.grid(row=0, column=0, columnspan=5)
op = ""
var1 = 0
def display(number):
input_field.insert(END, number)
def operation(operator): # FUNCTION WHEN ANY OPERATOR IS PRESS
global var1, op
op = operator
var1 = float(input_field.get())
input_field.delete(0, END)
if "sqrt" in operator:
input_field.insert(0, math.sqrt(var1))
var1 = 0
elif "square" in operator:
input_field.insert(0, math.pow(var1, 2))
var1 = 0
elif "factorial" in operator:
x = 1
while var1 != 1:
x *= var1
var1 -= 1
input_field.insert(0, x)
var1 = 0
def equal(): # FUNCTION WHEN EQUAL BUTTON IS PRESS
global var1
var2 = int(input_field.get())
input_field.delete(0, END)
if "+" in op:
answer = var1 + var2
input_field.insert(0, answer)
elif "-" in op:
answer = var1 - var2
input_field.insert(0, answer)
elif "*" in op:
answer = var1 * var2
input_field.insert(0, answer)
elif "/" in op:
answer = "{:.3f}".format(var1 / var2)
input_field.insert(0, answer)
var1 = 0
def clear():
input_field.delete(0, END)
# DEFINING THE BUTTONS
Button_1 = Button(root, text="1", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(1))
Button_2 = Button(root, text="2", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(2))
Button_3 = Button(root, text="3", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(3))
Button_4 = Button(root, text="4", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(4))
Button_5 = Button(root, text="5", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(5))
Button_6 = Button(root, text="6", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(6))
Button_7 = Button(root, text="7", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(7))
Button_8 = Button(root, text="8", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(8))
Button_9 = Button(root, text="9", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(9))
Button_0 = Button(root, text="0", bg="#0a0d0a", fg="white", padx=20, pady=15, font="bold", command=lambda: display(0))
Button_add = Button(root, text="+", bg="#14213d", fg="white", padx=20, pady=15, font="bold", command=lambda: operation("+"))
Button_sub = Button(root, text="-", bg="#14213d", fg="white", padx=22, pady=15, font="bold", command=lambda: operation("-"))
Button_mul = Button(root, text="", bg="#14213d", fg="white", padx=22, pady=15, font="bold", command=lambda: operation(""))
Button_divide = Button(root, text="/", bg="#14213d", fg="white", padx=23, pady=15, font="bold", command=lambda: operation("/"))
Button_sqrt = Button(root, text="√x", bg="#14213d", fg="white", padx=15, pady=15, font="bold", command=lambda: operation("sqrt"))
Button_square = Button(root, text="x²", bg="#14213d", fg="white", padx=18, pady=15, font="bold", command=lambda: operation("square"))
Button_factorial = Button(root, text="x!", bg="#14213d", fg="white", padx=19, pady=15, font="bold", command=lambda: operation("factorial"))
Button_equal = Button(root, text="=", bg="#b1b3ba", fg="black", padx=55, pady=15, font=30, command=lambda: equal())
Button_clear = Button(root, text="Clear", bg="#b1b3ba", fg="black", padx=140, pady=10, font=25, command=clear)
# PUTTING THINGS(Buttons) ON SCREEN WITH GRID LAYOUT
Button_1.grid(row=3, column=0)
Button_2.grid(row=3, column=1)
Button_3.grid(row=3, column=2)
Button_4.grid(row=2, column=0)
Button_5.grid(row=2, column=1)
Button_6.grid(row=2, column=2)
Button_7.grid(row=1, column=0)
Button_8.grid(row=1, column=1)
Button_9.grid(row=1, column=2)
Button_0.grid(row=4, column=0)
Button_add.grid(row=1, column=3)
Button_sub.grid(row=2, column=3)
Button_mul.grid(row=3, column=3)
Button_divide.grid(row=4, column=3)
Button_sqrt.grid(row=1, column=4)
Button_square.grid(row=2, column=4)
Button_factorial.grid(row=3, column=4)
Button_equal.grid(row=4, column=1, columnspan=2)
Button_clear.grid(row=5, column=0, columnspan=5)
root.mainloop()
|
from spotipy import oauth2
from .config import base_url
SPOTIPY_CLIENT_ID = "9dacc40b7cf6403289c13726ae7a6647"
SPOTIPY_CLIENT_SECRET = "27e24d7036974055813ea973024b3a0c"
SPOTIPY_REDIRECT_URI = base_url + "auth_callback"
SCOPE = 'user-library-read playlist-read-private ugc-image-upload user-read-playback-state user-read-email playlist-read-collaborative user-modify-playback-state user-read-private playlist-modify-public user-library-modify user-top-read user-read-playback-position user-read-currently-playing user-follow-read user-read-recently-played user-follow-modify'
sp_oauth = oauth2.SpotifyOAuth(
SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET,
SPOTIPY_REDIRECT_URI, scope=SCOPE
) |
import numpy as np
import math
class Vector2D():
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "x={}, y={}".format(self.x, self.y)
def modul(self):
lenght = ((self.x)**2 + (self.y)**2)**0.5
return lenght
def zwieksz(self, x, y):
self.x = self.x + x
self.y = self.y + y
def zmniejsz(self, x, y):
self.x = self.x - x
self.y = self.y - y
def mnozenie(self,a):
self.x = self.x * a
self.y = self.y * a
def translacja(self):
pass
def il_skal(self, x, y):
iloczyn = self.x* x + self.y * y
return iloczyn
def obroc(self, A):
arg_x = self.x* math.cos(A*(math.pi/180)) - self.y* math.sin(A*(math.pi/180))
arg_y = self.x* math.sin(A*(math.pi/180)) + self.y* math.cos(A*(math.pi/180))
return arg_x, arg_y
|
from jarjar import jarjar
# define a jarjar
jj = jarjar()
# vanilla channel change
res = jj.text('1', channel='@nolan')
# unicode in message
res = jj.text(u'2 \ua000')
# unicode in attach
res = jj.attach({u'Unicode \ua000': u'Unicode \ua000'}, message='3')
# decorator
@jj.decorate(message='4', attach={'exception?': 'no!'})
def works():
"""Call that should work..."""
return
@jj.decorate(message='5', attach={'exception?': '*YES*'})
def doesnt_work():
"""Call that should not work..."""
jkfdskljfdkjl
return
try:
works()
doesnt_work()
except Exception:
pass
jj.text('*YOU SHOULD HAVE RECEIVED 5 TESTS*')
print('done')
|
# -*- coding: utf-8 -*-
from .tools import getUrl, loadHTML, re_, writeToFile
def main():
lineUrl = input("Введите URL: ")
title = getUrl(str(lineUrl))
if title == None:
print("URL not found")
text = loadHTML(lineUrl)
textFound = re_(text)
writeToFile(textFound, lineUrl)
input("Done")
if __name__ == '__main__':
main()
|
"""
2 - ( modificar parametros): A qualquer momento posso abrir a tela que define
e modificar os parâmetros das janelas de corte.
""" |
#!/usr/bin/env python
# coding: utf-8
import gym
import time
import numpy as np
import office_control.envs as office_env
#import MC.EpsilonGreedy as MCE
#import TD.QLearning as QL
import NN.DQN as DQN
#import FA.QLearning_FA as LQL
from lib import plotting
#import envTest
import argparse
import os
def get_output_folder(parent_dir, env_name):
"""Return save folder.
Assumes folders in the parent_dir have suffix -run{run
number}. Finds the highest run number and sets the output folder
to that number + 1. This is just convenient so that if you run the
same script multiple times tensorboard can plot all of the results
on the same plots with different names.
Parameters
----------
parent_dir: str
Path of the directory containing all experiment runs.
Returns
-------
parent_dir/run_dir
Path to this run's save directory.
"""
os.makedirs(parent_dir, exist_ok=True)
experiment_id = 0
for folder_name in os.listdir(parent_dir):
if not os.path.isdir(os.path.join(parent_dir, folder_name)):
continue
try:
folder_name = int(folder_name.split('-run')[-1])
if folder_name > experiment_id:
experiment_id = folder_name
except:
pass
experiment_id += 1
parent_dir = os.path.join(parent_dir, env_name)
parent_dir = parent_dir + '-run{}'.format(experiment_id)
return parent_dir
def main():
parser = argparse.ArgumentParser(description='Run Reinforcment Learning on Chamber')
parser.add_argument('--env', default='office_control-v0', help='Office env name')
parser.add_argument('-o', '--output', default='chamber-v0', help='Directory to save data to')
parser.add_argument('--num', default=1, help='Number of Episodes')
parser.add_argument('--df', default=0.95, help='Discount Factor')
parser.add_argument('--alpha', default=0.5, help='Constant step-size parameter')
parser.add_argument('--epsilon', default=0.9, help='Epsilon greedy policy')
parser.add_argument('--epsilon_min', default=0.1, help='Smallest Epsilon that can get')
parser.add_argument('--epsilon_decay', default=0.6, help='Epsilon decay after the number of episodes')
parser.add_argument('--batch_size', default=32, help='Sampling batch size')
parser.add_argument('--lr', default=0.01, help='Learning rate')
args = parser.parse_args()
output = get_output_folder(args.output, args.env)
print(output)
#create environment
env = gym.make(args.env)
#Q, policy = MCE.mc_control_epsilon_greedy(env, num_episodes=500000, epsilon=0.1)
# Q, stats = QL.q_learning(env, int(args.num), float(args.df), float(args.alpha), float(args.epsilon),
# float(args.epsilon_min), float(args.epsilon_decay), output)
# print(Q)
# # estimator = LQL.Estimator(env)
# stats = LQL.q_learning(env, estimator, int(args.num), float(args.df), float(args.epsilon),
# float(args.epsilon_decay))
#envTest.run_random_policy(env)
state_size = env.nS
action_size = env.nA
agent = DQN.DQNAgent(state_size, action_size, float(args.df), float(args.lr))
#DQN.test_model(env, agent)
stats, model = DQN.q_learning(env, agent, int(args.num), int(args.batch_size),
float(args.epsilon), float(args.epsilon_min), float(args.epsilon_decay), output)
#plotting.plot_episode_stats(stats, smoothing_window=1)
if __name__ == '__main__':
main()
|
# 287. Find the Duplicate Number
# https://leetcode.com/problems/find-the-duplicate-number/
class Solution:
def findDuplicate(self, nums: 'List[int]') -> 'int':
# Floyd's Tortoise and Hare (Cycle Detection)
# 1. detect loop exist
# 2. detect the entrance to the cycle
tortoise = nums[0]
hare = nums[0]
while True:
tortoise = nums[tortoise]
hare = nums[nums[hare]]
if tortoise == hare:
break # loop exists
print(tortoise)
print(hare)
pointer1 = nums[0]
pointer2 = tortoise
# find the entrance to the cycle
while pointer1 != pointer2:
pointer1 = nums[pointer1]
pointer2 = nums[pointer2]
return pointer1
|
# This program takes two input files as input, (one is control file "zma_mature_700_701" and another one is treatment file "zma_mature_714", in case of my project) compares #them and gives common miRNAs between them and their respective count in each file.
with open('zma_mature_700_701', 'r') as file1:
with open('zma_mature_714', 'r') as file2:
same = set(file1).intersection(file2)
same.discard('\n')
same.discard('\n')
wordcount= {}
with open('some_output_file3.txt', 'w') as file_out:
for line in same:
file_out.write(line)
wordcount={}
file_698=open("zma_mature_700_701","r+")
file_709=open("zma_mature_714","r+")
file_out=open("some_output_file3.txt","r+")
for line1 in file_out.read().split():
wordcount[line1] = 0
for line2 in file_698.read().split():
if (line1 == line2):
wordcount[line1] += 1
file_698.seek(0)
file_out.seek(0)
wordcount1={}
for line1 in file_out.read().split():
wordcount1[line1] = 0
for line2 in file_709.read().split():
if (line1 == line2):
wordcount1[line1] += 1
file_709.seek(0)
with open('common_zma_mature_700_701_714.txt', 'w') as file_comm:
file_comm.write("miRNA"+"\t"+"SRR_700_701 count"+"\t"+"SRR_714 count"+"\n")
for k1,v1 in wordcount.items():
a=wordcount[k1]
b=wordcount1[k1]
file_comm.write(k1 + "\t" + str(a)+"\t"+str(b)+"\n")
|
from mininet.topo import Topo
class MyFirstTopo(Topo):
"Simple topolopy example."
def __init__(self):
"Create custom topo."
#Init topo
Topo.__init__(self)
#Add hosts and switches
h1 = self.addHost('h1')
h2 = self.addHost('h2')
h3 = self.addHost('h3')
h4 = self.addHost('h4')
leftSwitch = self.addSwitch('s1')
rightSwitch = self.addSwitch('s2')
#Add links
self.addLink(h1, leftSwitch)
self.addLink(h2, leftSwitch)
self.addLink(leftSwitch, rightSwitch)
self.addLink(rightSwitch, h3)
self.addLink(rightSwitch, h4)
topos = {'myfirsttopo': (lambda: MyFirstTopo())}
|
#!/usr/bin/python
from __future__ import division
import logging
import math
import signal
import sys
import time
from threading import Timer
import led_configs
from nuimo import Nuimo, NuimoDelegate
from sonos import SonosAPI
nuimo_sonos_controller = None
class NuimoSonosController(NuimoDelegate):
def __init__(self, bled_com, nuimo_mac):
NuimoDelegate.__init__(self)
self.nuimo = Nuimo(bled_com, nuimo_mac, self)
self.sonos = SonosAPI()
self.default_led_timeout = 3
self.max_volume = 42 # should be dividable by 7
self.volume_bucket_size = int(self.max_volume / 7)
self.last_vol_matrix = None
self.vol_reset_timer = None
self.stop_pending = False
def start(self):
self.nuimo.connect()
while not self.stop_pending:
time.sleep(0.1)
self.sonos.disconnect()
self.nuimo.disconnect()
self.nuimo.terminate()
def stop(self):
self.stop_pending = True
def on_button(self):
if self.sonos.is_playing():
self.sonos.pause()
self.nuimo.display_led_matrix(led_configs.pause, self.default_led_timeout)
else:
self.sonos.play()
self.nuimo.display_led_matrix(led_configs.play, self.default_led_timeout)
def on_swipe_right(self):
self.sonos.next()
self.nuimo.display_led_matrix(led_configs.next, self.default_led_timeout)
def on_swipe_left(self):
self.sonos.prev()
self.nuimo.display_led_matrix(led_configs.previous, self.default_led_timeout)
def on_fly_right(self):
self.on_swipe_right()
def on_fly_left(self):
self.on_swipe_left()
def on_wheel_right(self, value):
self.sonos.vol_up(self._calculate_volume_delta(value))
self._show_volume()
def on_wheel_left(self, value):
self.sonos.vol_down(self._calculate_volume_delta(value))
self._show_volume()
def on_connect(self):
self.nuimo.display_led_matrix(led_configs.default, self.default_led_timeout)
def _calculate_volume_delta(self, value):
return min(value / 20 + 1, 5)
def _show_volume(self):
volume = self.sonos.get_volume()
if volume is None: volume = 0
bucket = min(int(math.ceil(volume / self.volume_bucket_size)), 7)
matrix = getattr(led_configs, 'vol' + str(bucket))
if matrix != self.last_vol_matrix:
self.last_vol_matrix = matrix
self.nuimo.display_led_matrix(matrix, self.default_led_timeout)
if self.vol_reset_timer is not None:
self.vol_reset_timer.cancel()
self.vol_reset_timer = Timer(self.default_led_timeout+1, self._reset_vol).start()
def _reset_vol(self):
self.last_vol_matrix = None
self.vol_reset_timer = None
def signal_term_handler(signal, frame):
logging.info('Received SIGTERM signal!')
nuimo_sonos_controller.stop()
def signal_int_handler(signal, frame):
logging.info('Received SIGINT signal. This makes Panda sad! :(')
nuimo_sonos_controller.stop()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(message)s')
signal.signal(signal.SIGTERM, signal_term_handler)
signal.signal(signal.SIGINT, signal_int_handler)
if len(sys.argv) != 3:
raise RuntimeError('Invalid number of arguments')
com = sys.argv[1]
mac = sys.argv[2]
nuimo_sonos_controller = NuimoSonosController(com, mac)
nuimo_sonos_controller.start()
|
import torch
from tqdm.notebook import tqdm
from utils import highlight, erase, binary_metric
import torch.nn.functional as F
def dice_loss(pred, label):
smooth=1e-3
true = label.masked_fill(label < 0, 0)
pred = F.softmax(pred, dim = 1)
true = F.one_hot(true, num_classes=pred.shape[1])
inse = torch.sum(pred * true, 0)
l = torch.sum(pred, 0)
r = torch.sum(true, 0)
loss = 1.0 - (2.0 * inse + smooth) / (l + r + smooth)
return torch.sum(loss)# + F.cross_entropy(pred, label, weight = torch.tensor([0.37, 0.63]).to(label.device)) / 5
def tokenClassificationTrainStep(model, optimizer, clip, src, labels, attention_mask = None):
optimizer.zero_grad()
if 'longformer' in str(type(model)):
logits = model(src, attention_mask = attention_mask).logits
else:
logits = torch.cat((model(src[:, :512], attention_mask = attention_mask[:, :512]).logits, model(src[:, 512:], attention_mask = attention_mask[:, 512:]).logits), dim = 1)
counts = torch.unique(labels.masked_select(attention_mask.bool()), return_counts = True)[1] if attention_mask is not None else torch.unique(labels, return_counts = True)[1]
criterion = dice_loss#torch.nn.CrossEntropyLoss(weight = torch.tensor([0.2715, 0.7285])).to(counts.device)#1 / (1 - torch.pow(0.99857, counts))
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, model.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(-100).type_as(labels) # -100 criterion.ignore_index for ce loss
)
loss = criterion(active_logits, active_labels)
else:
loss = criterion(logits.view(-1, model.num_labels), labels.view(-1))
#logits = [batch size, src len]
score = binary_metric(logits, labels)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
return {'loss':loss.item(), 'metric':score}, logits
def tokenClassificationEvalStep(model, src, labels, attention_mask = None):
if 'longformer' in str(type(model)):
logits = model(src, attention_mask = attention_mask).logits
else:
logits = torch.cat((model(src[:, :512], attention_mask = attention_mask[:, :512]).logits, model(src[:, 512:], attention_mask = attention_mask[:, 512:]).logits), dim = 1)
counts = torch.unique(labels.masked_select(attention_mask.bool()), return_counts = True)[1] if attention_mask is not None else torch.unique(labels, return_counts = True)[1]
criterion = dice_loss#torch.nn.CrossEntropyLoss(weight = torch.tensor([0.2715, 0.7285])).to(counts.device)#1 / (1 - torch.pow(0.99857, counts))
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, model.num_labels)
active_labels = torch.where(
active_loss, labels.view(-1), torch.tensor(-100).type_as(labels)
)
loss = criterion(active_logits, active_labels)
else:
loss = criterion(logits.view(-1, model.num_labels), labels.view(-1))
score = binary_metric(logits, labels)
return {'loss':loss.item(), 'metric':score}, logits
def conditionalGenerationTrainStep(model, optimizer, clip, src, trg):
optimizer.zero_grad()
output = model(src, decoder_input_ids = trg[:,:-1])[0]
#output = [batch size, trg len - 1, output dim]
#trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:,1:].contiguous().view(-1)
# print(src.shape, output.shape, trg.shape)
# print(trg)
#output = [batch size * trg len - 1, output dim]
#trg = [batch size * trg len - 1]
criterion = torch.nn.CrossEntropyLoss(ignore_index=1)
loss = criterion(output, trg)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
return {'loss':loss.item()}
def conditionalGenerationEvalStep(model, src, trg):
output = model(src, decoder_input_ids = trg[:,:-1])[0]
#output = [batch size, trg len - 1, output dim]
#trg = [batch size, trg len]
output_dim = output.shape[-1]
output = output.contiguous().view(-1, output_dim)
trg = trg[:,1:].contiguous().view(-1)
#output = [batch size * trg len - 1, output dim]
#trg = [batch size * trg len - 1]
criterion = torch.nn.CrossEntropyLoss(ignore_index=1)
loss = criterion(output, trg)
return {'loss':loss.item()}
def train(iterator, clip, h = None, optH = None, w = None, optW = None, connection = 0.5, tuning = False):
epoch_loss = {}
if h is not None:
if not tuning:
h.train()
else:
h.eval()
epoch_loss['h'] = 0
epoch_loss['metric'] = torch.zeros(4)
device = h.device
if w is not None:
w.train()
epoch_loss['w'] = 0
device = w.device
for _, batch in enumerate(iterator):
src = batch['article_ids'].to(device)
trg = batch['highlights_ids'].to(device)
article_attention_mask = batch['article_attention_mask'].to(device)
h_mask = highlight(src, trg)
if 'h' in epoch_loss:
if tuning and 'w' in epoch_loss:
with torch.no_grad():
outputs, preds = tokenClassificationEvalStep(h, src, h_mask, article_attention_mask)
else:
outputs, preds = tokenClassificationTrainStep(h, optH, clip, src, h_mask, article_attention_mask)
epoch_loss['h'] += outputs['loss']
epoch_loss['metric'] += outputs['metric']
if 'w' in epoch_loss:
src_erased = erase(src, torch.logical_and(preds.argmax(2), article_attention_mask)).to(device) if 'h' in epoch_loss and torch.rand(1) < connection else erase(src, h_mask).to(device)
outputs = conditionalGenerationTrainStep(w, optW, clip, src_erased, trg)
epoch_loss['w'] += outputs['loss']
return {key:value/len(iterator) for key, value in epoch_loss.items()}
def evaluate(iterator, h = None, w = None, connection = 0.5):
epoch_loss = {}
if h is not None:
h.eval()
epoch_loss['h'] = 0
epoch_loss['metric'] = torch.zeros(4)
device = h.device
if w is not None:
w.eval()
epoch_loss['w'] = 0
device = w.device
with torch.no_grad():
for _, batch in enumerate(iterator):
src = batch['article_ids'].to(device)
trg = batch['highlights_ids'].to(device)
article_attention_mask = batch['article_attention_mask'].to(device)
h_mask = highlight(src, trg)
if 'h' in epoch_loss:
outputs, preds = tokenClassificationEvalStep(h, src, h_mask, article_attention_mask)
epoch_loss['h'] += outputs['loss']
epoch_loss['metric'] += outputs['metric']
if 'w' in epoch_loss:
src_erased = erase(src, torch.logical_and(preds.argmax(2), article_attention_mask)).to(device) if 'h' in epoch_loss and torch.rand(1) < connection else erase(src, h_mask).to(device)
outputs = conditionalGenerationEvalStep(w, src_erased, trg)
epoch_loss['w'] += outputs['loss']
return {key:value/len(iterator) for key, value in epoch_loss.items()}
|
import socket # Import socket module
s = socket.socket()
host = socket.gethostname()
port = 8667 # Reserve a port for your service.
s.connect((host, port))
while True:
message=raw_input("Enter message: ")
if message=="end":
s.close()
break
s.send(message)
data=s.recv(1024)
print data
|
# Generated by Django 2.2.2 on 2019-09-18 07:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('type_data', '0008_accident'),
]
operations = [
migrations.AlterField(
model_name='accident',
name='death_toll',
field=models.DecimalField(decimal_places=0, max_digits=6, verbose_name='死亡人数'),
),
migrations.AlterField(
model_name='accident',
name='occurrence_time',
field=models.DateField(verbose_name='事故时间'),
),
]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Testing the different dict implementations in `gl.utils.dicts`.
"""
from __future__ import (
division, print_function, absolute_import, unicode_literals)
# Third party libraries.
import pytest
# DNV GL libraries.
from dnvgl.framework.dict import SortedDict, CaseInsensitiveDict
# ID: $Id$"
__date__ = "$Date$"[6:-1]
__scm_version__ = "$Revision$"[10:-1]
__author__ = "`Berthold Höllmann <berthold.hoellmann@dnvgl.com>`__"
__copyright__ = "Copyright © 2015 by DNV GL SE"
class TestSortedDict(object):
def test_outp(self):
probe = SortedDict(c=3)
probe['b'] = 2
probe['a'] = 1
assert list(probe.items()) == [('a', 1), ('b', 2), ('c', 3)]
class TestCaseInsensitiveDict(object):
def test_init_1(self):
assert CaseInsensitiveDict(A=1, B=2) == {'a': 1, 'b': 2}
@pytest.fixture
def dict_data(self):
return CaseInsensitiveDict({'a': 1, 'B': 2}, d=3, E=4, A=5)
def test_init_2(self, dict_data):
assert dict_data == {'a': 5, 'b': 2, 'd': 3, 'e': 4}
def test_init_3(self):
probe = CaseInsensitiveDict((('A', 5), ('b', 2), ('D', 3), ('e', 4)))
assert probe == {'a': 5, 'b': 2, 'd': 3, 'e': 4}
def test_getitem_1(self, dict_data):
assert dict_data['A'] == 5
def test_getitem_2(self, dict_data):
assert dict_data['e'] == 4
def test_getitem_3(self, dict_data):
assert dict_data['d'] == 3
def test_update_1(self, dict_data):
dict_data.update({'A': 1, 'B': 22})
assert dict_data == {'a': 1, 'b': 22, 'd': 3, 'e': 4}
def test_update_2(self, dict_data):
dict_data.update((('A', 1), ('B', 22)))
assert dict_data == {'a': 1, 'b': 22, 'd': 3, 'e': 4}
# Local Variables:
# mode: python
# compile-command: "cd ../../..;python setup.py test"
# End:
|
for i in range (100,0,-1):
print(i, "bottles of cola on the wall, take one down, pass it around,", i-1,"bottles of cola on the wall!")
|
from datetime import datetime
class Spy:
def __init__(self,name,salutation,age,rating):
self.name= name
self.salutation = salutation
self.age= age
self.rating= rating
self.is_online=True
self.chats=[]
self.current_status_message= None
class chatmessage:
def __init__(self,message,sent_by_me):
self.message=message
self.time=datetime.now()
self.sent_by_me= sent_by_me
class colors:
PINK = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
ORANGE = '\033[91m'
BLACK = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RED='\033[31m'
spy=Spy('bond','Mr',20,4.8)
friend_one=Spy('arpit','MR',20,4.0)
friend_two=Spy('raja','MR',20,4.0)
friend_three=Spy('vivek','MR',21,4.8)
friends=[friend_one,friend_two,friend_three]
|
# USAGE
# python motion_detector.py
# python motion_detector.py --video videos/example_01.mp4
# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import yagmail
import os
import subprocess
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
vs = VideoStream(src=0).start()
time.sleep(2.0)
# otherwise, we are reading from a video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the first frame in the video stream
firstFrame = None
frames_array = []
# firstFrame = cv2.imread('2.png', 0)
# firstFrame = imutils.resize(firstFrame, width=500)
# firstFrame = cv2.GaussianBlur(firstFrame, (21, 21), 0)
lastUploaded = datetime.datetime.now()
motionCounter = 0
writer = None
(h, w) = (None, None)
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
# out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
frame = vs.read()
frame = frame if args.get("video", None) is None else frame[1]
text = "Unoccupied"
timestamp = datetime.datetime.now()
# if the frame could not be grabbed, then we have reached the end
# of the video
if frame is None:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
if firstFrame is None:
firstFrame = gray
continue
# compute the absolute difference between the current frame and
# first frame
frameDelta = cv2.absdiff(firstFrame, gray)
thresh = cv2.threshold(frameDelta, 100, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for c in cnts:
# if the contour is too small, ignore it
if cv2.contourArea(c) < args["min_area"]:
continue
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = cv2.boundingRect(c)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
text = "Occupied"
if text == "Occupied":
# check to see if enough time has passed between uploads
if (timestamp - lastUploaded).seconds >= 4:
# increment the motion counter
motionCounter += 1
print("inside occupied",motionCounter)
# write the flipped frame
# frame = imutils.resize(frame, width=300)
(h, w) = frame.shape[:2]
frames_array.append(frame)
# check to see if the number of frames with consistent motion is
# high enough
if motionCounter >= 4:
# check to see if dropbox sohuld be used
# write the image to temporary file
cv2.imwrite('messigray.png',frame)
print("limit exceeded",motionCounter)
lastUploaded = timestamp
motionCounter = 0
# out.release()
# update the last uploaded timestamp and reset the motion
# counter
# otherwise, the room is not occupied
else:
motionCounter = 0
# draw the text and timestamp on the frame
cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
(10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
# show the frame and record if the user presses a key
cv2.imshow("Security Feed", frame)
cv2.imshow("Thresh", thresh)
cv2.imshow("Frame Delta", frameDelta)
key = cv2.waitKey(1) & 0xFF
# if the `q` key is pressed, break from the lop
if key == ord("q"):
break
# os.system('spd-say "Suspicious activity detected"')
#subprocess.call(['/usr/bin/snap/vlc', ""])
writer = cv2.VideoWriter("custom.avi", fourcc, 20,(w , h ), True)
key = cv2.waitKey(1) & 0xFF
print(frames_array)
for i in range(len(frames_array)):
# writing to a image array
writer.write(frames_array[i])
writer.release()
# cleanup the camera and close any open windows
vs.stop() if args.get("video", None) is None else vs.release()
cv2.destroyAllWindows() |
max_coor = 300
def calculate_grid(serial):
grid = [[0 for row in range(0, max_coor)] for col in range(0, max_coor)]
for col in range(0, max_coor):
for row in range(0, max_coor):
grid[col][row] = calculate_power(col, row, serial)
return grid
def calculate_power(x, y, serial):
rack_id = x + 10
return (rack_id * y + serial) * rack_id % 1000 // 100 - 5
def find_subgrid(grid, size):
max_power = 0
pos = None
for col in range(0, max_coor - size + 1):
for row in range(0, max_coor - size + 1):
p = sum([grid[col + i][row + j] for i in range(0, size) for j in range(0, size)])
if p > max_power:
max_power = p
pos = (col, row)
return max_power, pos
def subgroups(grid):
memo = {col_no: {row_no: row for row_no, row in enumerate(col)} for col_no, col in enumerate(grid)}
max_size = 0
max_pos = None
max_power = 0
for size in range(2, 300):
new_memo = {}
for col in range(0, max_coor - size + 1):
new_memo[col] = {}
for row in range(0, max_coor - size + 1):
new_power = memo[col][row]
for i in range(col, col + size):
new_power += grid[i][row + size - 1]
for j in range(row, row + size - 1):
new_power += grid[col + size - 1][j]
new_memo[col][row] = new_power
if new_power > max_power:
max_power = new_power
max_pos = (col, row)
max_size = size
memo = new_memo
return max_pos, max_size
grid = calculate_grid(4842)
print(subgroups(grid))
|
import json
import pathlib
import os
import sys
import re
import difflib
import colorama
import typing
from ansible_collections.nhsd.apigee.plugins.module_utils.models.manifest import meta
from ansible_collections.nhsd.apigee.plugins.module_utils.models.manifest.manifest import Manifest
SCHEMA_VERSION_REGEX = re.compile(r"^([1-9][0-9]*)\.([0-9]+)\.([0-9]+)$")
class SchemaString:
def __init__(self, schema_or_major: typing.Union[str, int], minor=None, patch=None):
if isinstance(schema_or_major, str):
schema_version = schema_or_major
match = re.match(SCHEMA_VERSION_REGEX, schema_version)
if not match:
raise ValueError(f"Invalid SchemaString {schema_version}")
self.major, self.minor, self.patch = [int(match.group(x)) for x in range(1, 4)]
elif isinstance(schema_or_major, int):
if not isinstance(minor, int) and isinstance(patch, int):
raise ValueError("SchemaString.__init__ requires major, minor, patch")
self.major = schema_or_major
self.minor = minor
self.patch = patch
else:
raise TypeError("Invalid arguments to SchemaString.__init__")
def __str__(self):
return f"{self.major}.{self.minor}.{self.patch}"
def __eq__(self, other):
return (
self.major == other.major
and self.minor == other.minor
and self.patch == other.patch
)
def __lt__(self, other):
return (
self.major < other.major
or (self.major == other.major and self.minor < other.minor)
or (self.major == other.major and self.minor == other.minor and self.patch < other.patch)
)
def __le__(self, other):
return self < other or self == other
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def valid_increments(self):
return [
SchemaString(self.major + 1, 0, 0),
SchemaString(self.major, self.minor + 1, 0),
SchemaString(self.major, self.minor, self.patch + 1)
]
def main():
"""
"""
UPDATED_SCHEMA_VERSION = SchemaString(meta.SCHEMA_VERSION)
script_dir = pathlib.Path(__file__).parent
relative_schema_dir = script_dir.joinpath(f"../tests/unit/plugins/module_utils/models/manifest/schema_versions/")
# last_schema_file_name = pathlib.Path(f"v{}.json")
schema_dir_glob = relative_schema_dir.glob("v*.json")
SCHEMA_VERSION = SchemaString("1.0.0")
SCHEMA_FILE_NAME_PATTERN = re.compile(r"v([1-9][0-9]*\.[0-9]+\.[0-9]+)\.json$")
for schema_file in schema_dir_glob:
match = re.match(SCHEMA_FILE_NAME_PATTERN, schema_file.name)
schema_version = SchemaString(match.group(1))
if schema_version >= SCHEMA_VERSION:
SCHEMA_VERSION = schema_version
new_schema = Manifest.schema_json(indent=2)
with open(relative_schema_dir.joinpath(f"v{SCHEMA_VERSION}.json")) as f:
last_schema_ = json.loads(f.read())
last_schema = json.dumps(last_schema_, indent=2)
deltas = difflib.unified_diff(
last_schema.split("\n"),
new_schema.split("\n"),
fromfile=str(SCHEMA_VERSION),
tofile=str(UPDATED_SCHEMA_VERSION),
)
deltas = [delta for delta in deltas]
if not deltas:
raise ValueError(f"No difference between proposed {UPDATED_SCHEMA_VERSION} schema and current {meta.SCHEMA_VERSION}")
if UPDATED_SCHEMA_VERSION not in SCHEMA_VERSION.valid_increments():
raise ValueError(f"""{UPDATED_SCHEMA_VERSION} is invalid increment after current {SCHEMA_VERSION}.
Please increment major, minor or patch integer, e.g:
""" + f"\n".join(str(x) for x in SCHEMA_VERSION.valid_increments()))
print("-"*50)
for delta in deltas:
if delta.startswith("+"):
col = colorama.Fore.GREEN
elif delta.startswith("-"):
col = colorama.Fore.RED
else:
col = ""
print(col + delta, end="")
print(colorama.Fore.RESET)
_input = None
print("-"*50)
print("Confirm spec changes? ", end="")
while _input not in ["y", "n"]:
if _input is not None:
print("Please enter y or n. ", end="")
print("(y/n): ", end="")
_input = input().lower()
if _input != "y":
print("Did not update schema.")
return
new_schema_file_name = f"v{UPDATED_SCHEMA_VERSION}.json"
new_schema_file = os.path.join(relative_schema_dir, new_schema_file_name)
with open(new_schema_file, "w") as f:
f.write(new_schema)
print(
f"""Wrote {new_schema_file}"
Validate this file by executing
$ make test
in {script_dir.parent}""")
if __name__ == "__main__":
try:
main()
except ValueError as e:
print(f"Error! {e}", file=sys.stderr)
sys.exit(2)
|
# Math
import theano
import theano.tensor as T
import numpy
# Model
from model import *
from alphabet import Alphabet
from corpus import Corpus
# Plumbing
import pickle
import argparse
import os
import sys
# Increase the recursion limit, which is required for
# gradient compilation
sys.setrecursionlimit(9999)
# Constant random seed
numpy.random.seed(0)
# Command line arguments
parser=argparse.ArgumentParser(description='Run language model training.')
parser.add_argument('--data', dest='data', help='Data file to run on', required=True)
parser.add_argument('--alphabet', dest='alphabet', help='Alphabet file to run with', required=True)
parser.add_argument('--out', dest='out', help='Output directory', required=True)
# Optional model arguments
parser.add_argument('--embedding_size', type=int, dest='embedding_size', help='Word embedding size', default=300)
parser.add_argument('--rnn_size', type=int, dest='rnn_size', help='Hidden state size', default=500)
parser.add_argument('--batch_size', type=int, dest='batch_size', help='Minibatch size', default=50)
parser.add_argument('--seq_length', type=int, dest='seq_length', help='Sequence length', default=50)
# Optional training arguments
parser.add_argument('--epochs', type=int, dest='epochs', help='Number of epochs', default=5000 * 13)
parser.add_argument('--learning_rate', type=float, dest='learning_rate', help='Learning rate for SGD', default=0.05)
parser.add_argument('--checkpoint_freq', type=int, dest='checkpoint_frequency', help='Save a checkpoint every X epochs', default=5000)
parser.add_argument('--sample_length', type=int, dest='sample_length', help='Length of sample to take', default=50)
parser.add_argument('--sample_freq', type=int, dest='sample_frequency', help='Take a sample every X epochs', default=100)
parser.add_argument('--softmax_temp', type=float, dest='softmax_temperature', help='Softmax temperature for sampling', default=1)
arg = parser.parse_args()
# Load data and alphabet
with open(arg.data, 'rb') as f:
data = pickle.load(f)
with open(arg.alphabet, 'rb') as f:
alphabet = pickle.load(f)
if not os.path.exists(arg.out):
os.makedirs(arg.out)
# Unpack arguments
epochs = arg.epochs
embedding_size = arg.embedding_size
rnn_size = arg.rnn_size
batch_size = arg.batch_size
seq_length = arg.seq_length
learning_rate = arg.learning_rate
# Create corpus
corpus = Corpus(data, seq_length, batch_size)
# Create the model
embedding_layer = Embedding(alphabet.size, embedding_size)
gru_layer_1 = GRU(embedding_size, rnn_size, rnn_size)
gru_layer_2 = GRU(rnn_size, rnn_size, rnn_size)
output_layer = Output(rnn_size, alphabet.size)
forward_network = Composition([embedding_layer, gru_layer_1, gru_layer_2, output_layer])
# Create training-mode model
true_output = T.imatrix('y') # Will ultimately be seq_length x batch_size
train_layer = output_layer.create_training_node(batch_size, true_output)
training_network = Composition([embedding_layer, gru_layer_1, gru_layer_2, train_layer])
# Compute gradients
initial_hidden = tuple(T.matrix('h') for _ in range(training_network.n_hiddens)) # batch_size x rrn_size
inputs = T.imatrix('x') # seq_length x batch_size
new_hidden, costs = training_network.unroll(seq_length, initial_hidden, inputs)
cost = T.mean(costs)
# Training function, which also updates parameters
# according to SGD (TODO adam/nesterov momentum)
print('Compiling training function...')
training_function = theano.function(
(inputs, true_output) + initial_hidden,
(cost,) + new_hidden,
updates = [
(
param,
param - learning_rate * T.grad(cost, param)
) for param in training_network.params
],
mode='FAST_RUN'
)
print('Done.')
# Training function
def train(inputs, outputs, hiddens):
args = (inputs, outputs) + hiddens
result = training_function(*args)
# Loss, new hidden state
return result[0], result[1:]
# Sampling function
print('Compiling sampling function...')
singleton = forward_network.create_singleton_function()
print('Done.')
# Initialize the hidden state at the beginning of all the samples.
current_hiddens = (
numpy.zeros((batch_size, rnn_size)), # Layer 1
numpy.zeros((batch_size, rnn_size)) # Layer 2
)
smooth_cost = None
# Training loop
print('Beginning training loop.')
for epoch in range(epochs):
# Get the next batch from the corpus
inputs, outputs, resets = corpus.next_batch()
# Reset any of the samples that wrapped to the beginning
# of a document
for i in range(batch_size):
if resets[i]:
for j, layer in enumerate(current_hiddens):
# Zero out this particular batch
current_hiddens[j][i] = 0
# Feed inputs and outputs into the training function
cost, current_hiddens = train(inputs, outputs, current_hiddens)
# Update smooth cost for logging
if smooth_cost is None:
smooth_cost = cost
else:
smooth_cost = smooth_cost * 0.01 + 0.99 * cost
# Log cost
print('Epoch %d\tSmooth Loss %f\tLoss %f' % (epoch, smooth_cost, cost))
# Periodically save checkpoints
if epoch % arg.checkpoint_frequency == 0:
path = os.path.join(arg.out, 'epoch-%d-%f.pk' % (epoch, smooth_cost))
with open(path, 'wb') as f:
print('Saving checkpoint to %s' % path)
pickle.dump(forward_network, f)
# Periodically sample on batch 0
if epoch % arg.sample_frequency == 0:
tokens = []
next_token = outputs[0][-1] # Last next token of batch 0
hiddens = current_hiddens[0]
for t in range(arg.sample_length):
predictions, hiddens = singleton(next_token, hiddens)
# Softmax to get the token given the prediction
if arg.softmax_temperature == 0:
next_token = numpy.argmax(predictions)
else:
# Apply softmax temperature
predictions = predictions ** (1 / arg.softmax_temperature)
predictions /= predictions.sum()
# Choose probabilistically
next_token = numpy.random.choice(len(predictions), p = predictions)
tokens.append(next_tokens)
print('Sample:')
print(' '.join(alphabet.to_token(token) for token in tokens))
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import re
import warnings
from operator import itemgetter
import mmcv
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmaction.core import OutputHook
from mmaction.datasets.pipelines import Compose
from mmaction.models import build_recognizer
def init_recognizer(config, checkpoint=None, device='cuda:0', **kwargs):
"""Initialize a recognizer from config file.
Args:
config (str | :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str | None, optional): Checkpoint path/url. If set to None,
the model will not load any weights. Default: None.
device (str | :obj:`torch.device`): The desired device of returned
tensor. Default: 'cuda:0'.
Returns:
nn.Module: The constructed recognizer.
"""
if 'use_frames' in kwargs:
warnings.warn('The argument `use_frames` is deprecated PR #1191. '
'Now you can use models trained with frames or videos '
'arbitrarily. ')
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
# pretrained model is unnecessary since we directly load checkpoint later
config.model.backbone.pretrained = None
model = build_recognizer(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
load_checkpoint(model, checkpoint, map_location='cpu')
model.cfg = config
model.to(device)
model.eval()
return model
def inference_recognizer(model, video, outputs=None, as_tensor=True, **kwargs):
"""Inference a video with the recognizer.
Args:
model (nn.Module): The loaded recognizer.
video (str | dict | ndarray): The video file path / url or the
rawframes directory path / results dictionary (the input of
pipeline) / a 4D array T x H x W x 3 (The input video).
outputs (list(str) | tuple(str) | str | None) : Names of layers whose
outputs need to be returned, default: None.
as_tensor (bool): Same as that in ``OutputHook``. Default: True.
Returns:
dict[tuple(str, float)]: Top-5 recognition result dict.
dict[torch.tensor | np.ndarray]:
Output feature maps from layers specified in `outputs`.
"""
if 'use_frames' in kwargs:
warnings.warn('The argument `use_frames` is deprecated PR #1191. '
'Now you can use models trained with frames or videos '
'arbitrarily. ')
if 'label_path' in kwargs:
warnings.warn('The argument `use_frames` is deprecated PR #1191. '
'Now the label file is not needed in '
'inference_recognizer. ')
input_flag = None
if isinstance(video, dict):
input_flag = 'dict'
elif isinstance(video, np.ndarray):
assert len(video.shape) == 4, 'The shape should be T x H x W x C'
input_flag = 'array'
elif isinstance(video, str) and video.startswith('http'):
input_flag = 'video'
elif isinstance(video, str) and osp.exists(video):
if osp.isfile(video):
if video.endswith('.npy'):
input_flag = 'audio'
else:
input_flag = 'video'
if osp.isdir(video):
input_flag = 'rawframes'
else:
raise RuntimeError('The type of argument video is not supported: '
f'{type(video)}')
if isinstance(outputs, str):
outputs = (outputs, )
assert outputs is None or isinstance(outputs, (tuple, list))
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = cfg.data.test.pipeline
# Alter data pipelines & prepare inputs
if input_flag == 'dict':
data = video
if input_flag == 'array':
modality_map = {2: 'Flow', 3: 'RGB'}
modality = modality_map.get(video.shape[-1])
data = dict(
total_frames=video.shape[0],
label=-1,
start_index=0,
array=video,
modality=modality)
for i in range(len(test_pipeline)):
if 'Decode' in test_pipeline[i]['type']:
test_pipeline[i] = dict(type='ArrayDecode')
test_pipeline = [x for x in test_pipeline if 'Init' not in x['type']]
if input_flag == 'video':
data = dict(filename=video, label=-1, start_index=0, modality='RGB')
if 'Init' not in test_pipeline[0]['type']:
test_pipeline = [dict(type='OpenCVInit')] + test_pipeline
else:
test_pipeline[0] = dict(type='OpenCVInit')
for i in range(len(test_pipeline)):
if 'Decode' in test_pipeline[i]['type']:
test_pipeline[i] = dict(type='OpenCVDecode')
if input_flag == 'rawframes':
filename_tmpl = cfg.data.test.get('filename_tmpl', 'img_{:05}.jpg')
modality = cfg.data.test.get('modality', 'RGB')
start_index = cfg.data.test.get('start_index', 1)
# count the number of frames that match the format of `filename_tmpl`
# RGB pattern example: img_{:05}.jpg -> ^img_\d+.jpg$
# Flow patteren example: {}_{:05d}.jpg -> ^x_\d+.jpg$
pattern = f'^{filename_tmpl}$'
if modality == 'Flow':
pattern = pattern.replace('{}', 'x')
pattern = pattern.replace(
pattern[pattern.find('{'):pattern.find('}') + 1], '\\d+')
total_frames = len(
list(
filter(lambda x: re.match(pattern, x) is not None,
os.listdir(video))))
data = dict(
frame_dir=video,
total_frames=total_frames,
label=-1,
start_index=start_index,
filename_tmpl=filename_tmpl,
modality=modality)
if 'Init' in test_pipeline[0]['type']:
test_pipeline = test_pipeline[1:]
for i in range(len(test_pipeline)):
if 'Decode' in test_pipeline[i]['type']:
test_pipeline[i] = dict(type='RawFrameDecode')
if input_flag == 'audio':
data = dict(
audio_path=video,
total_frames=len(np.load(video)),
start_index=cfg.data.test.get('start_index', 1),
label=-1)
test_pipeline = Compose(test_pipeline)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
# forward the model
with OutputHook(model, outputs=outputs, as_tensor=as_tensor) as h:
with torch.no_grad():
scores = model(return_loss=False, **data)[0]
returned_features = h.layer_outputs if outputs else None
num_classes = scores.shape[-1]
score_tuples = tuple(zip(range(num_classes), scores))
score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True)
top5_label = score_sorted[:5]
if outputs:
return top5_label, returned_features
return top5_label
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('data/titanic_data.csv')
print(df.head(10))
|
# 26:49
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution1(object):
def inorderSuccessor(self, root, p):
return self.inorderSearch(root, p)
def inorderSearch(self, node, p):
if not node:
return
res = None
if node.val == p.val:
if node.right:
return self.findNext(node.right)
elif node.val > p.val:
res = self.inorderSearch(node.left, p)
if not res:
return node
else:
res = self.inorderSearch(node.right, p)
return res
def findNext(self, node):
if not node.left:
return node
return self.findNext(node.left)
# よくわからなかった
class Solution(object):
def inorderSuccessor(self, root, p):
if p.right:
p = p.right
while p.left:
p = p.left
return p
node = root
s = []
inorder = float("-inf")
while s or node:
while node:
s.append(node)
node = node.left
node = s.pop()
if inorder == p.val:
return node
inorder = node.val
node = node.right
return None |
#!/usr/bin/python3
from gi.repository import Gtk
#from gi.repository import GLib
from gi.repository import GObject
from pprint import pprint
import urllib
#import sqlite3
import threading
#from "../py-sonic/py-sonic/"
#from importlib import import_module
#importlib.import_module("../py-sonic/py-sonic/")
#import_module('../py-sonic/py-sonic/', 'pysonic')
import libsonic
__version__ = '0.0.1'
import settings
import cache
appsettings = settings.settings()
serverinfo = ''
# Use threads
#GLib.threads_init()
GObject.threads_init()
class MainWindow(Gtk.Window):
NAV_LIBRARY = 1
# == GUI Elements ======
def __init__(self):
Gtk.Window.__init__(self, title="Mellow")
#self.vpan = Gtk.VPaned()
#self.hpan = Gtk.HPaned()
#self.vpan.show()
#self.hpan.show()
#self.vBox1.pack_end(self.hpan, True, True, 0)
#self.hpan.pack1(self.vpan, False, True)
self.refreshing = False
# Start the grid
self.grid = Gtk.Grid()
self.add(self.grid)
# Toolbar above the artists list
self.toolBar = Gtk.Toolbar()
self.grid.add(self.toolBar)
context = self.toolBar.get_style_context()
context.add_class(Gtk.STYLE_CLASS_PRIMARY_TOOLBAR)
#self.connectButton = Gtk.Button(label="Connect")
#self.connectButton.connect("clicked", self.onConnectbuttonClicked)
#self.add(self.button)
#self.grid.add(self.connectButton)
#self.connectButton = Gtk.ToolButton(stock_id=Gtk.STOCK_CONNECT)
#self.connectButton.set_property("visible",True)
##self.connectButton.set_property("icon_name","list-add-symbolic")
#self.connectButton.connect("clicked", self.onConnectbuttonClicked)
#self.toolBar.add(self.connectButton)
self.refreshButton = Gtk.ToolButton()
self.refreshButton.set_property("visible",True)
self.refreshButton.set_property("icon_name","view-refresh")
self.refreshButton.set_label("Refresh artists from server")
self.refreshButton.connect("clicked", self.onRefreshbuttonClicked)
self.toolBar.add(self.refreshButton)
# Playback toolbar with the widgets you might expect there
self.playbackToolBar = Gtk.Toolbar()
#self.grid.add(self.playbackToolBar)
self.grid.attach_next_to(self.playbackToolBar, self.toolBar, Gtk.PositionType.RIGHT, 4, 1)
context = self.playbackToolBar.get_style_context()
context.add_class(Gtk.STYLE_CLASS_PRIMARY_TOOLBAR)
self.previousButtonButton = Gtk.ToolButton(stock_id=Gtk.STOCK_MEDIA_PREVIOUS)
self.previousButtonButton.connect("clicked", self.onPreviousButtonbuttonClicked)
self.playbackToolBar.add(self.previousButtonButton)
self.playButton = Gtk.ToolButton(stock_id=Gtk.STOCK_MEDIA_PLAY)
self.playButton.connect("clicked", self.onPlaybuttonClicked)
self.playbackToolBar.add(self.playButton)
self.stopButton = Gtk.ToolButton(stock_id=Gtk.STOCK_MEDIA_STOP)
self.stopButton.connect("clicked", self.onStopbuttonClicked)
self.playbackToolBar.add(self.stopButton)
self.nextButton = Gtk.ToolButton(stock_id=Gtk.STOCK_MEDIA_NEXT)
self.nextButton.connect("clicked", self.onNextbuttonClicked)
self.playbackToolBar.add(self.nextButton)
# Main list
self.mainscroll = Gtk.ScrolledWindow()
self.mainscroll.set_hexpand(True)
self.mainscroll.set_vexpand(True)
self.grid.attach_next_to(self.mainscroll, self.toolBar, Gtk.PositionType.BOTTOM, 1, 3)
self.mainnavliststore = Gtk.ListStore(int, str)
self.mainnavtreeview = Gtk.TreeView(model=self.mainnavliststore)
select = self.mainnavtreeview.get_selection()
select.connect("changed", self.onMainNavViewchanged)
self.mainscroll.add(self.mainnavtreeview)
renderer_nav = Gtk.CellRendererText()
column_nav = Gtk.TreeViewColumn("Source", renderer_nav, text=1)
self.mainnavtreeview.append_column(column_nav)
# Make 'artistname' column searchable
self.mainnavtreeview.set_search_column(1)
self.mainnavliststore.append([self.NAV_LIBRARY, 'Library'])
# Artists list
self.artistscroll = Gtk.ScrolledWindow()
self.artistscroll.set_hexpand(True)
self.artistscroll.set_vexpand(True)
#self.grid.attach(artistscroll, 0, 1, 3, 1)
self.grid.attach_next_to(self.artistscroll, self.playbackToolBar, Gtk.PositionType.BOTTOM, 2, 1)
self.artistliststore = Gtk.ListStore(int, str)
self.artisttreeview = Gtk.TreeView(model=self.artistliststore)
artistselect = self.artisttreeview.get_selection()
artistselect.connect("changed", self.onArtistViewchanged)
self.artistscroll.add(self.artisttreeview)
#renderer_text = Gtk.CellRendererText()
#column_text = Gtk.TreeViewColumn("ID", renderer_text, text=0)
#mainwindow.artisttreeview.append_column(column_text)
renderer_artistName = Gtk.CellRendererText()
#renderer_editabletext.set_property("editable", True)
column_artistName = Gtk.TreeViewColumn("Artist", renderer_artistName, text=1)
self.artisttreeview.append_column(column_artistName)
# Make 'artistname' column searchable
self.artisttreeview.set_search_column(1)
#renderer_editabletext.connect("edited", self.text_edited)
self.progressbar = Gtk.ProgressBar()
self.progressbar.set_visible(False)
#self.grid.attach_next_to(self.progressbar, self.artistscroll, Gtk.PositionType.BOTTOM, 1, 1)
self.grid.attach_next_to(self.progressbar, self.mainscroll, Gtk.PositionType.BOTTOM, 1, 1)
#self.loadArtistList()
# Album list
self.albumscroll = Gtk.ScrolledWindow()
self.albumscroll.set_hexpand(True)
self.albumscroll.set_vexpand(True)
#self.grid.attach_next_to(self.albumscroll, self.connectButton, Gtk.PositionType.RIGHT, 1, 2)
self.grid.attach_next_to(self.albumscroll, self.artistscroll, Gtk.PositionType.RIGHT, 2, 1)
self.albumliststore = Gtk.ListStore(int, str)
self.albumtreeview = Gtk.TreeView(model=self.albumliststore)
albumselect = self.albumtreeview.get_selection()
albumselect.connect("changed", self.onAlbumViewchanged)
self.albumscroll.add(self.albumtreeview)
#pprint(self.artistliststore.get(0))
#self.loadAlbumList(self.artistliststore.get(1))
renderer_albumName = Gtk.CellRendererText()
column_albumName = Gtk.TreeViewColumn("Album", renderer_albumName, text=1)
self.albumtreeview.append_column(column_albumName)
self.albumtreeview.set_search_column(1)
# Track list
self.trackscroll = Gtk.ScrolledWindow()
self.trackscroll.set_hexpand(True)
self.trackscroll.set_vexpand(True)
self.grid.attach_next_to(self.trackscroll, self.artistscroll, Gtk.PositionType.BOTTOM, 4, 2)
self.trackliststore = Gtk.ListStore(int, str)
self.tracktreeview = Gtk.TreeView(model=self.trackliststore)
#self.albumtreeview.connect(
self.trackscroll.add(self.tracktreeview)
#self.set_default_size(gtk.gdk.screen_width(),500)
self.set_default_size(appsettings['winWidth'], appsettings['winHeight'])
self.move(appsettings['winX'], appsettings['winY']);
self.loadArtistList()
self.loadAlbumList(-1)
def loadArtistList(mainwindow):
"""
Refresh artists listing
"""
# fetch artists, @TODO: has ifModifiedSince for caching
serverinfo = settings.getServerInfo()
print(serverinfo)
hasCache = cache.haveCachedArtists(serverinfo)
if False == hasCache:
mainwindow.onRefreshbuttonClicked(mainwindow)
#artists = mainwindow.getArtistsFromServer(serverinfo)
#cache.saveArtists(serverinfo, artists)
else:
print("get from cache")
artists = cache.getArtists(serverinfo)
mainwindow.artistliststore.clear()
previousLetter = ''
mainwindow.artistliststore.append([-1, 'All artists'])
for artist in artists:
#print(artist)
thisLetter = artist['indexLetter']
#print(thisLetter)
if thisLetter != previousLetter:
#print(thisLetter)
previousLetter = thisLetter
mainwindow.artistliststore.append([artist['id'], artist['name']])
def loadAlbumList(mainwindow, artistID):
# Allow sorting on the column
#self.tvcolumn.set_sort_column_id(0)
# Allow drag and drop reordering of rows
#self.treeview.set_reorderable(True)
print('show albums for artist ?', artistID)
serverinfo = settings.getServerInfo()
#conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
#albums = conn.getMusicDirectory(artistid)
#albums = conn.getArtist(artistID)
albums = cache.getAlbums(serverinfo, artistID)
#pprint(albums)
mainwindow.albumliststore.clear()
mainwindow.albumliststore.append([-1, 'All albums'])
for album in albums:
#pprint(album)
mainwindow.albumliststore.append([album['id'], album['name']])
#def loadTrackList(mainwindow, artistID, albumID):
def loadTrackList(mainwindow, albumID):
#print('show albums for artist ' + artistID + ' and album '+ str(albumID))
serverinfo = settings.getServerInfo()
if {} == serverinfo:
print("Login failed!")
return
try:
conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
except urllib.error.HTTPError:
print("User/pass fail")
print ("Getting album " + str(albumID))
try:
# @TODO: use ifModifiedSince with caching
print("getTrackList()")
tracks = conn.getAlbum(albumID)
album = tracks["album"]
pprint(album)
songCount = album["songCount"]
saveTracks(serverinfo, album)
except urllib.error.HTTPError:
print("authfail while getting album")
return -1
except KeyError, e:
print("[getArtistsFromServer] KeyError: something was wrong with the data")
return -1
#pprint(artists)
# == Main navigation ======
def onMainNavViewchanged(self, selection):
model, treeiter = selection.get_selected()
if treeiter != None:
print("You selected", model[treeiter][0])
# == Music navigation ======
def onArtistViewchanged(self, selection):
model, treeiter = selection.get_selected()
if treeiter != None:
print("You selected", model[treeiter][0])
self.loadAlbumList(model[treeiter][0])
def onAlbumViewchanged(self, selection):
model, treeiter = selection.get_selected()
if treeiter != None:
print("You selected", model[treeiter][0])
self.loadTrackList(model[treeiter][0])
# == Buttons ======
def onRefreshbuttonClicked(self, widget):
refreshThread = self.UpdateFromServerThread(self)
refreshThread.setDaemon(True)
refreshThread.start()
def onPreviousButtonbuttonClicked(self, widget):
print("Skipping backward")
def onNextbuttonClicked(self, widget):
print("Skipping forward")
def onPlaybuttonClicked(self, widget):
print("Beginning playback")
def onStopbuttonClicked(self, widget):
print("Stop playback")
def onConnectbuttonClicked(self, widget):
print("Connecting...")
#settings.createdb()
serverinfo = settings.getServerInfo()
#pprint(serverinfo)
conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
songs = conn.getRandomSongs(size=2)
pprint(songs)
# == Subsonic remote data retrieval ======
class UpdateFromServerThread(threading.Thread):
def __init__(self, mainwindow):
threading.Thread.__init__(self)
self.mainwindow = mainwindow
pprint(mainwindow)
pprint(self.mainwindow)
def run(self):
if True == self.mainwindow.refreshing:
# Already refreshing
print("Already refreshing from server, ignore")
return
print("Refreshing...")
#self.mainwindow.progressbar.pulse()
self.mainwindow.refreshing = True
serverinfo = settings.getServerInfo()
cache.clearArtists(serverinfo)
cache.saveArtists(serverinfo, self.getArtistsFromServer(serverinfo))
# refresh artist list in window
self.mainwindow.loadArtistList()
artists = cache.getArtists(serverinfo)
print("also storing albums:")
cache.clearAlbums(serverinfo)
result = self.cacheAllAlbumsFromServer(serverinfo, artists)
self.mainwindow.refreshing = False
def getArtistsFromServer(self, serverinfo):
if {} == serverinfo:
print("Login failed!")
return
try:
conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
except urllib.error.HTTPError:
print("User/pass fail")
print ("Getting artists")
try:
# @TODO: use ifModifiedSince with caching
print("Using API ", conn.apiVersion)
if ('1.8.0' == conn.apiVersion):
print("getArtists()")
artists = conn.getArtists()
artists = artists["artists"]
else:
print("getIndexes()")
artists = conn.getIndexes()
artists = artists["indexes"]
except urllib.error.HTTPError:
print("authfail while getting artists")
return -1
except KeyError, e:
print("[getArtistsFromServer] KeyError: something was wrong with the data")
return -1
#pprint(artists)
return artists
def cacheAllAlbumsFromServer(self, serverinfo, artists):
if {} == serverinfo:
print("Login failed!")
return
try:
conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
except urllib.error.HTTPError:
print("User/pass fail")
self.mainwindow.progressbar.set_fraction(0)
self.mainwindow.progressbar.set_visible(True)
allAlbums = {'album':[], 'albumCount':0}
#print("iterating over ? artists", len(artists))
counter = 0
for artist in artists:
counter += 1
if 0 == counter % 20:
self.mainwindow.progressbar.set_fraction((0.0 + counter) / len(artists))
#print(". ? ?", (counter, (0.0 + counter) / len(artists),) )
#print ("Getting albums for artist ", artist['id'])
try:
# @TODO: use ifModifiedSince with caching
if ('1.8.0' == conn.apiVersion):
albums = conn.getArtist(artist['id'])
albums = albums["artist"]
if 1 == albums['albumCount']:
# Only one album, fix the list:
albums["album"] = [albums["album"]]
else:
print("API version unsupported: need 1.8.0 or newer")
except urllib.error.HTTPError:
print("authfail while getting albums")
return -1
except KeyError, e:
print("[getAllAlbumsFromServer] KeyError: something was wrong with the data")
return -1
allAlbums['album'].extend(albums['album'])
allAlbums['albumCount'] = allAlbums['albumCount'] + albums['albumCount']
cache.saveAlbums(serverinfo, allAlbums)
self.mainwindow.progressbar.set_visible(False)
return True
def getAlbumsFromServer(self, serverinfo, artistID):
if {} == serverinfo:
print("Login failed!")
return
try:
conn = libsonic.Connection(serverinfo['host'], serverinfo['username'], serverinfo['password'], serverinfo['port'])
except urllib.error.HTTPError:
print("User/pass fail")
print ("Getting albums for artist ", artistID)
#albums = conn.getArtist(artistID)
#pprint(albums)
#albums = albums["artist"]
try:
# @TODO: use ifModifiedSince with caching
if ('1.8.0' == conn.apiVersion):
#print("getArtist", artistID)
albums = conn.getArtist(artistID)
albums = albums["artist"]
else:
print("API version unsupported: need 1.8.0 or newer")
except urllib.error.HTTPError:
print("authfail while getting albums")
return -1
except KeyError, e:
print("[getAlbumsFromServer] KeyError: something was wrong with the data")
return -1
#pprint(albums)
return albums
# Initialise and create the main window of the program
win = MainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
|
# -*- coding: utf-8 -*-
from odoo import models, fields, api
from odoo.exceptions import UserError
import copy
class StockImmediateTransfer(models.TransientModel):
_inherit = "stock.immediate.transfer"
@api.multi
def process(self):
'''移动'''
context = self.env.context or {}
pickings = self.env['stock.picking'].search([('id', 'in', context.get('active_ids'))])
if pickings:
result = super(StockImmediateTransfer, self).process()
pickings.check_inventory_legal()
#创建发票
pickings.create_supplier_invoice_platform_purchase()
pickings.create_freight_invoice()
pickings.create_supplier_invoice_fba_replenish()
#修改状态
pickings.write({'b2b_state': 'done'})
pickings.modify_related_order_state()
return result
# @api.multi
# def process(self):
# '''移动'''
# result = super(StockImmediateTransfer, self).process()
# loc_obj = self.env['stock.location']
# merchant = self.env.user.merchant_id or self.env.user
# context = self.env.context or {}
# pickings = self.env['stock.picking'].search([('id', 'in', context.get('active_ids'))])
# if pickings:
# pickings.write({'b2b_state': 'done'})
# for picking in pickings:
# sale_order = picking.sale_order_id
# if sale_order:
# sale_order_done = True
# if sale_order.deliverys.filtered(lambda r: r.b2b_state == 'draft'):
# sale_order_done = False
# if sale_order_done:
# sale_order.b2b_state = 'delivered'
# # invoice
# invoice_obj = self.env['invoice']
# if picking.origin_type == 'own_delivery': #自有产品平台发货,生成运费账单
# invoice_val = {
# 'picking_id': picking.id,
# 'sale_order_id': sale_order.id,
# 'merchant_id': merchant.id,
# 'origin': sale_order.name,
# 'type': 'distributor',
# 'state': 'paid',
# 'order_line': []
# }
# create_invoice = False
# for line in picking.pack_operation_product_ids:
# invoice_val['order_line'].append((0, 0, {
# 'product_id': line.product_id.id,
# 'product_uom_qty': line.qty_done,
# 'product_uom': line.product_uom_id.id,
# 'platform_price': 0,
# 'freight': line.b2b_sale_line_id.supplier_freight,
# 'operation_line_id': line.id,
# }))
# if line.platform_location:
# create_invoice = True
# if create_invoice:
# invoice = invoice_obj.create(invoice_val)
# invoice.invoice_confirm()
# elif picking.origin_type == 'agent_delivery': #平台采购生成发票(供应商库位发货的发票,第三方仓库发货的发票)
# purchase_order = picking.purchase_order_id
# if purchase_order:
# purchase_order._compute_b2b_state()
# third_loc = loc_obj.search([
# ('partner_id', '=', merchant.partner_id.id),
# ('location_id', '=', self.env.ref('b2b_platform.third_warehouse').id)], limit=1)
# supplier_loc = loc_obj.search([
# ('partner_id', '=', merchant.partner_id.id),
# ('location_id', '=', self.env.ref('b2b_platform.supplier_stock').id)], limit=1)
# third_loc_invoice = {
# 'picking_id': picking.id,
# 'sale_order_id': sale_order.id,
# 'purchase_order_id': picking.purchase_order_id.id,
# 'merchant_id': merchant.id,
# 'type': 'supplier',
# 'detail_type': 'supplier_third_stock',
# 'origin': picking.purchase_order_id.name,
# 'state': 'draft',
# 'order_line': []
# }
# supplier_loc_invoice = copy.deepcopy(third_loc_invoice)
# supplier_loc_invoice['detail_type'] = 'supplier_own_stock'
# for line in picking.pack_operation_product_ids:
# if line.location_id == supplier_loc:
# supplier_loc_invoice['order_line'].append((0, 0, {
# 'product_id': line.product_id.id,
# 'product_uom_qty': line.qty_done,
# 'product_uom': line.product_uom_id.id,
# 'platform_price': line.product_id.supplier_price,
# 'freight': line.b2b_sale_line_id.supplier_freight,
# 'operation_line_id': line.id,
# }))
# elif line.location_id == third_loc:
# third_loc_invoice['order_line'].append((0, 0, {
# 'product_id': line.product_id.id,
# 'product_uom_qty': line.qty_done,
# 'product_uom': line.product_uom_id.id,
# 'platform_price': line.product_id.supplier_price,
# 'freight': 0,
# 'operation_line_id': line.id,
# }))
# if supplier_loc_invoice.get('order_line'):
# invoice = invoice_obj.create(supplier_loc_invoice)
# if third_loc_invoice.get('order_line'):
# invoice = invoice_obj.create(third_loc_invoice)
# elif picking.origin_type == 'fba_delivery': #fba 补发货
# picking.purchase_order_id.b2b_state = 'done'
# picking.fba_replenish_id.state = 'done'
# third_loc = loc_obj.search([
# ('partner_id', '=', merchant.partner_id.id),
# ('location_id', '=', self.env.ref('b2b_platform.third_warehouse').id)], limit=1)
# supplier_loc = loc_obj.search([
# ('partner_id', '=', merchant.partner_id.id),
# ('location_id', '=', self.env.ref('b2b_platform.supplier_stock').id)], limit=1)
# third_loc_invoice = {
# 'picking_id': picking.id,
# 'fba_freight': picking.fba_replenish_id.freight,
# 'fba_replenish_id': picking.fba_replenish_id.id,
# 'purchase_order_id': picking.purchase_order_id.id,
# 'merchant_id': merchant.id,
# 'type': 'supplier',
# 'detail_type': 'supplier_fba_third_stock',
# 'origin': picking.fba_replenish_id.name,
# 'state': 'draft',
# 'order_line': []
# }
# supplier_loc_invoice = copy.deepcopy(third_loc_invoice)
# supplier_loc_invoice['detail_type'] = 'supplier_fba_own_stock'
# for line in picking.pack_operation_product_ids:
# if line.location_id == supplier_loc:
# supplier_loc_invoice['order_line'].append((0, 0, {
# 'product_id': line.product_id.id,
# 'product_uom_qty': line.qty_done,
# 'product_uom': line.product_uom_id.id,
# 'platform_price': line.product_id.supplier_price,
# 'freight': 0,
# 'operation_line_id': line.id,
# }))
# elif line.location_id == third_loc:
# third_loc_invoice['order_line'].append((0, 0, {
# 'product_id': line.product_id.id,
# 'product_uom_qty': line.qty_done,
# 'product_uom': line.product_uom_id.id,
# 'platform_price': 0,
# 'freight': 0,
# 'operation_line_id': line.id,
# }))
# if supplier_loc_invoice.get('order_line'):
# invoice = invoice_obj.create(supplier_loc_invoice)
# if third_loc_invoice.get('order_line'):
# invoice = invoice_obj.create(third_loc_invoice)
# return result |
import requests
import json
#http://www.itwhy.org/%E8%BD%AF%E4%BB%B6%E5%B7%A5%E7%A8%8B/python/python-%E7%AC%AC%E4%B8%89%E6%96%B9-http-%E5%BA%93-requests-%E5%AD%A6%E4%B9%A0.html
#r = requests.get('http://10.199.96.149:8080/api/v3/banned/?_page=1&_limit=10000',auth=('intelligentFamily-client','Mjg5NTM2NTk1MzI0Mzg2MDExMjg1MDUzODg0NzI1MzI5OTC'))
#print(r.text)
x = "1234567xxx"
r = requests.post(url='http://10.199.96.149:8080/api/v3/banned/',data=json.dumps({
"who": x,
"as": "client_id",
"reason": "banned the clientId",
"desc": "normal banned",
"until": 1998377000
}),auth=('intelligentFamily-client','Mjg5NTM2NTk1MzI0Mzg2MDExMjg1MDUzODg0NzI1MzI5OTC'),headers={'Content-Type':'application/json'})
print(r.text)
|
from typing import List, Set, Dict
import pytest
# 80ms, 15.7MB (98%, 43%)
class SolutionFirst:
def merge(self, intervals: List[List[int]]) -> List[List[int]]:
if not intervals:
return
res = []
intervals = sorted(intervals)
pre_begin, pre_end = intervals[0]
for begin, end in intervals[1:]:
if begin > pre_end:
res.append([pre_begin, pre_end])
pre_begin, pre_end = begin, end
else:
pre_end = end if end > pre_end else pre_end
res.append([pre_begin, pre_end])
return res
Solution = SolutionFirst
@pytest.mark.parametrize(['in_', 'out'], [
([[1,3],[2,6],[8,10],[15,18]], [[1,6],[8,10],[15,18]]),
([[1,4],[4,5]], [[1,5]]),
([[1,4],[2,3]], [[1,4]]),
])
def test1(in_, out):
assert Solution().merge(in_) == out
|
# Standard library imports
from datetime import datetime
import influxdb_client
from influxdb_client.client.write_api import SYNCHRONOUS
# Third party imports
import yfinance as yf
# Local application imports
from include.writetodb import writePriceToDb
def getTickerPriceHistory2Db(ticker,bucket,org,url,token):
stock = yf.Ticker(ticker)
stock_history=stock.history(period="max")
if stock_history.empty:
return
print("=============")
print("ticker:",ticker)
print(stock_history)
num_records = len(stock_history)
print("length:",num_records)
for i in range(num_records):
print("record:",i)
dictionary_stock_history = stock_history.to_dict('records')
field_string=dictionary_stock_history[i]
time_string=str(stock_history.index.values[i])
time_string=time_string[:-3]
#print(time_string)
utc_time = datetime.strptime(time_string,"%Y-%m-%dT%H:%M:%S.%f")
utc_time=int(utc_time.timestamp())
open = float(field_string['Open'])
high = float(field_string['High'])
low = float(field_string['Low'])
close = float(field_string["Close"])
volume = float(field_string['Volume'])
q = influxdb_client.Point("price").tag("ticker",ticker).field("close",close).field("high",high).field("low",low).field("open",open).field("volume",volume).time(time_string)
writePriceToDb(q,bucket,org,url,token)
def getTickerPriceDates2Db(ticker,bucket,org,date1,date2,url,token):
#stock = yf.Ticker(ticker)
#stock_history=stock.history(period="max")
stock_history=yf.download(ticker,start=date1, end=date2)
if stock_history.empty:
return
print("=============")
print("ticker:",ticker)
print(stock_history)
num_records = len(stock_history)
print("length:",num_records)
for i in range(num_records):
print("record:",i)
dictionary_stock_history = stock_history.to_dict('records')
field_string=dictionary_stock_history[i]
time_string=str(stock_history.index.values[i])
time_string=time_string[:-3]
#print(time_string)
utc_time = datetime.strptime(time_string,"%Y-%m-%dT%H:%M:%S.%f")
utc_time=int(utc_time.timestamp())
open = float(field_string['Open'])
high = float(field_string['High'])
low = float(field_string['Low'])
close = float(field_string["Close"])
volume = float(field_string['Volume'])
q = influxdb_client.Point("price").tag("ticker",ticker).field("close",close).field("high",high).field("low",low).field("open",open).field("volume",volume).time(time_string)
writePriceToDb(q,bucket,org,url,token)
|
import streamlit as st
from datetime import date
from utils import load_model
from plot import plot
from data import get_date
from keras import backend as K
@st.cache
def load_date(dt):
return get_date(dt)
@st.cache(allow_output_mutation=True)
def load_fronts_model():
model = load_model("weights.hdf5")
# noinspection PyProtectedMember
model._make_predict_function()
model.summary()
session = K.get_session()
return model, session
if __name__ == '__main__':
st.title("Распознавание атмосферных фронтов")
with st.spinner("Загрузка модели"):
model, session = load_fronts_model()
K.set_session(session)
dt = st.date_input("Дата:", date(2020, 1, 1))
with st.spinner("Данные атмосферы загружаются сразу за год (в папку data). Это около 2 гигабайт, и может занять "
"некоторое время (до 10 минут)."):
try:
data = load_date(dt)
except:
data = None
st.text("Нет данных атмосферы на указанную дату")
if data is not None:
fronts = model.predict(data).argmax(axis=-1)
plotted_fronts = plot(data[0], fronts[0], (256, 256), dt)
st.pyplot(plotted_fronts)
|
#!/usr/bin/env python
import cv2
import numpy as np
import rospy
from cv_bridge import CvBridge, CvBridgeError
from sensor_msgs.msg import Image
def find_circle(msg):
global bridge
img = bridge.imgmsg_to_cv2(msg, desired_encoding="passthrough")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 13)
cimg = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 50, param1=100, param2=30, minRadius=5, maxRadius=100)
if circles is not None:
circles = np.uint16(np.around(circles))
for i in circles[0, :2]:
cv2.circle(cimg, (i[0], i[1]), i[2], (0, 255, 0), 10)
cv2.circle(cimg, (i[0], i[1]), 10, (0, 0, 255), 10)
img_pub.publish(bridge.cv2_to_imgmsg(cimg, "bgr8"))
# cv2.imshow('detected circles', cimg)
# if cv2.waitKey(1) & 0xff == ord('q'):
# return
if __name__ == "__main__":
bridge = CvBridge()
img_pub = rospy.Publisher('/localizer_img', Image, queue_size=1)
rospy.init_node('localizer_circle', anonymous=True)
rospy.Subscriber("/camera/rgb/image_color", Image, find_circle)
rate = rospy.Rate(10)
rospy.spin()
|
def max_subarray(arr, start, end):
if start == end:
return (arr[start], start, end)
else:
mid = (start+end)//2
lmax = max_subarray(arr, start, mid)
rmax = max_subarray(arr, mid+1, end)
cmax = max_cross_subarray(arr, start, mid, end)
return max((lmax, rmax, cmax), key=lambda x:x[0])
def max_cross_subarray(arr, start, mid, end):
lmax = max(((sum(arr[i:mid+1]),i) for i in range(start,mid+1)),
key = lambda x:x[0])
rmax = max(((sum(arr[mid+1:i+1]),i) for i in range(mid+1, end+1)),
key = lambda x:x[0])
return (lmax[0]+rmax[0], lmax[1], rmax[1])
#we need to return more than just max, but also new start and end
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#While loop
num = int(input("Enter the number: "))
sum = 0
i = 1
while i<=5:
sum = sum + i
i += 1
print("The sum till the given num is: ", sum)
# In[ ]:
#while loop to add digits in out given number
n = 9790058868
tot = 0
while 0<n:
dig = n % 10 # This one took the last digit of the given num for eg. 1234 is a n it takes 4 as the value
tot = dig + tot #adding last digit of the given number with tot and store it in tot
n = n//10 #It removes the last digit of the given num and executes the while condition as we given
print(tot)
# def add_digit():
# if tot > 9:
# print("We have a two digit numbers as result, So we added that too!!!")
# x = tot
# dig = x % 10
# tot = dig + x
# x = x//10
# print ("Printing the total:", x)
if tot>9:
y = tot
while y>=9:
y = tot
dig2 = y % 10
tot = dig2 + 10
z = tot//10
# print("given number", tot)
# except:
# print("we cannot add the two digit result")
# finally:
# Print("xxxxxxxx")
# def length_digit(a):
# a = tot
# a_string = str(a)
# length = len(a_string)
# return length
# print("The length of the result", length_digit(a)) #this will print the first digit of the number
#now took out lastdigit and add with total and store that in tot. then remove the last digit and again execute the loop.
#So that we could get the sum of the digits we given
print()
print("The sum of the given number digit is:" , tot)
# In[ ]:
#Fibonacci number using function
fib(n):
a = 0
b = 1
if n==1:
print(a)
else:
print(a)
print(b)
for i in range(2,n):
c = a+b
a=b
b=c
print("A is now: ",a ,"B is now: ",b)
print(c)
fib(5)
# In[ ]:
#while loop to add digits in out given number
n = 9790058868
tot = 0
while 0<n:
dig = n % 10 # This one took the last digit of the given num for eg. 1234 is a n it takes 4 as the value
tot = dig + tot #adding last digit of the given number with tot and store it in tot
n = n//10 #It removes the last digit of the given num and executes the while condition as we given
print(tot)
# In[ ]:
|
s = str(input())
teamA = 0
teamB = 0
answer = "NO"
for i in range(0, len(s)):
if(teamA == 7 or teamB == 7):
answer = "YES"
if(s[i] == "0"):
teamA += 1
teamB = 0
elif(s[i] == "1"):
teamA = 0
teamB += 1
print(answer) |
from setuptools import setup
setup(
name = 'Grub Wallpaper Generator',
version = '1.0',
py_modules = ['animewal'],
install_requires = [
'click',
'requests',
'bs4',
],
entry_points = '''
[console_scripts]
grubwallpaper = animewal:cli
''',
)
|
def classPhotos(r,b):
# Write your code here.
r.sort()
b.sort()
if(len(r)>=1 and len(b)>=1):
ct=0
while(r[ct]==b[ct]):
return False
if(r[ct]>=b[ct]):
for i in range(ct+1,len(r)):
if(r[i]<=b[i]):
return False
return True
elif(b[ct]>=r[ct]):
for i in range(ct+1,len(r)):
if(b[i]<=r[i]):
return False
return True
else:
return False
|
# coding=utf-8
import caffe
import numpy as np
deploy = "/Users/lhw/caffe-project/race_classification/race_deploy.prototxt"
caffemodel = "/Users/lhw/caffe-project/race_classification/race_iter_500000.caffemodel"
image = "/Users/lhw/caffe-project/race_classification/test_image/aaa.bmp"
net = caffe.Net(deploy, caffemodel, caffe.TEST)
# 图片预处理设置
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
# transformer.set_mean('data',np.load(meanfile).mean(1).mean(1))
transformer.set_raw_scale('data', 255)
transformer.set_channel_swap('data', (2, 1, 0))
# 导入图片
img = caffe.io.load_image(image)
# 执行上面设置的图像预处理操作,并将图片载入blob中
net.blobs['data'].data[...] = transformer.preprocess('data', img)
out = net.forward()
race_list = ['Asian', 'Black', 'White']
prob = net.blobs['prob'].data[0].flatten()
print prob
predict = prob.argsort()[-1]
print prob.argsort()
print 'the class is', race_list[predict]
print out['prob']
predict = out['prob'].argmax()
print 'the class is', race_list[predict]
|
import sqlite3
import os
class config():
path = os.path.dirname(os.path.realpath(__file__))
con = sqlite3.connect(path+"/db/"+"frameshock.db");
host="192.168.0.107";
LPORT=9669;
WebPort=80
Apache="/var/www"
#Shodan API KEY
#wAVHCCkorRhFNwGOE6JO9OXVkacdBxlH
APIKey="wAVHCCkorRhFNwGOE6JO9OXVkacdBxlH";
#filter for db
filter = "";
def __init__(self):
pass
def getApiKey(self):
return self.APIKey;
def getCursor(self):
return self.con.cursor();
def getCon(self):
return self.con;
def getHost(self):
return self.host;
def getPort(self):
return self.LPORT;
def getPath(self):
return self.path
def getApachePath(self):
return self.Apache
def getWebPort(self):
return self.WebPort
def getUrlInjector(self):
url=""
if(os.path.isfile(self.getApachePath()+"/Injector/injector.py")==True):
url = self.host+":"+str(self.WebPort)+"/Injector/injector.py"
if(os.path.isfile(self.getApachePath()+"/Injector/injector.sh")==True):
url = self.host+":"+str(self.WebPort)+"/Injector/injector.sh"
if(os.path.isfile(self.getApachePath()+"/Injector/injector.bash")==True):
url = self.host+":"+str(self.WebPort)+"/Injector/injector.bash"
return url
def getInjectorName(self):
file=""
if(os.path.isfile(self.getApachePath()+"/Injector/injector.py")==True):
file = "injector.py"
if(os.path.isfile(self.getApachePath()+"/Injector/injector.sh")==True):
file = "injector.sh"
if(os.path.isfile(self.getApachePath()+"/Injector/injector.bash")==True):
file = "injector.bash"
return file
def getTargets(self):
cur = self.getCursor();
self.filter=raw_input("Filter Descriptor: (Empty for all)")
targets=[]
if(self.filter!=""):
sql = "Select * from results where search='%s'" % (self.filter)
targets = cur.execute(sql).fetchall();
else:
targets = cur.execute("Select * from results").fetchall();
return targets
|
import json
import datetime
import random
from django.utils import timezone
from logging import Handler
class DBHandler(Handler, object):
"""
This handler will add logs to a database model defined in settings.py
If log message (pre-format) is a json string, it will try to apply the array onto the log event object
"""
model_name = None
expiry = None
def __init__(self, model="", expiry=0):
super(DBHandler, self).__init__()
self.model_name = model
self.expiry = int(expiry)
def emit(self, record):
# big try block here to exit silently if exception occurred
try:
# instantiate the model
model = self.get_model(self.model_name)
log_entry = model(level=record.levelname, message=self.format(record))
# test if msg is json and apply to log record object
try:
data = json.loads(record.msg)
for key, value in data.items():
if hasattr(log_entry, key):
try:
setattr(log_entry, key, value)
except:
pass
except:
pass
log_entry.save()
# in 20% of time, check and delete expired logs
if self.expiry and random.randint(1, 5) == 1:
model.objects.filter(time__lt=timezone.now() - datetime.timedelta(seconds=self.expiry)).delete()
except:
pass
def get_model(self, name):
names = name.split('.')
mod = __import__('.'.join(names[:-1]), fromlist=names[-1:])
return getattr(mod, names[-1])
|
from random import choice
def generateRelic():
return choice(relics)
relics = [
'Axe of the Dwarvish Lords',
'Baba Yaga\'s Hut',
'Codex of the Infinite Planes',
'Good Crown of Might',
'Neutral Crown of Might',
'Evil Crown of Might',
'Crystal of the Ebon Flame',
'Cup and Talisman of Al\'Akbar',
'Eye of Vecna',
'The Hand of Vecna',
'Heward\'s Mystical Organ',
'Horn of Change',
'Invulnerable Coat of Arnd',
'Iron Flask of Tuerny the Merciless',
'Jacinth of Inestimable Beauty',
'Johydee\'s Mask',
'Kuroth\'s Quill',
'Mace of Cuthbert',
'Machine of Lum the Mad',
'Mighty Servant of Leuk-O',
'Orb of the Hatchling',
'Orb of the Wyrmkind',
'Orb of the Dragonette',
'Orb of the Dragon',
'Orb of the Great Serpent',
'Orb of the Firedrake',
'Orb of the Elder Wyrm',
'Orb of the Eternal Grand Dragon',
'Good Orb of Might',
'Neutral Orb of Might',
'Evil Orb of Might',
'Queen Ehlissa\'s Marvelous Nightingale',
'Recorder of Ye\'Cind',
'Ring of Gaxx',
'Part I of Rod of Seven Parts',
'Part II of Rod of Seven Parts',
'Part III of Rod of Seven Parts',
'Part IV of Rod of Seven Parts',
'Part V of Rod of Seven Parts',
'Part VI of Rod of Seven Parts',
'Part VII of Rod of Seven Parts',
'Good Sceptre of Might',
'Neutral Sceptre of Might',
'Evil Sceptre of Might',
'Sword of Kas',
'Tooth 1 of Dahlver-Nar',
'Tooth 2 of Dahlver-Nar',
'Tooth 3 of Dahlver-Nar',
'Tooth 4 of Dahlver-Nar',
'Tooth 5 of Dahlver-Nar',
'Tooth 6 of Dahlver-Nar',
'Tooth 7 of Dahlver-Nar',
'Tooth 8 of Dahlver-Nar',
'Tooth 9 of Dahlver-Nar',
'Tooth 10 of Dahlver-Nar',
'Tooth 11 of Dahlver-Nar',
'Tooth 12 of Dahlver-Nar',
'Tooth 13 of Dahlver-Nar',
'Tooth 14 of Dahlver-Nar',
'Tooth 15 of Dahlver-Nar',
'Tooth 16 of Dahlver-Nar',
'Tooth 17 of Dahlver-Nar',
'Tooth 18 of Dahlver-Nar',
'Tooth 19 of Dahlver-Nar',
'Tooth 20 of Dahlver-Nar',
'Tooth 21 of Dahlver-Nar',
'Tooth 22 of Dahlver-Nar',
'Tooth 23 of Dahlver-Nar',
'Tooth 24 of Dahlver-Nar',
'Tooth 25 of Dahlver-Nar',
'Tooth 26 of Dahlver-Nar',
'Tooth 27 of Dahlver-Nar',
'Tooth 28 of Dahlver-Nar',
'Tooth 29 of Dahlver-Nar',
'Tooth 30 of Dahlver-Nar',
'Tooth 31 of Dahlver-Nar',
'Tooth 32 of Dahlver-Nar',
'Throne of the Gods',
'Wand of Orcus'
]
if __name__ == '__main__':
while True:
input()
print(generateRelic())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.