text stringlengths 38 1.54M |
|---|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
#recursive solution
class Solution:
def helper(self, node, in_order):
#traverse to the left most leaf
if node.left:
self.helper(node.left, in_order)
#if found, append to the result
in_order.append(node.val)
#traverse the right subtree
if node.right:
self.helper(node.right, in_order)
def inorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
in_order = []
self.helper(root, in_order)
return in_order
#iterative solution
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
if not root:
return []
stack = []
in_order = []
curr = root
while stack or curr:
while curr:
stack.append(curr)
curr = curr.left
curr = stack.pop()
in_order.append(curr.val)
curr = curr.right
return in_order
|
# -*- coding: utf-8 -*-
import random
import math
import statistics
random.seed()
done = False
area_square = 4
needles = 1000
while (not done):
estimates = []
for i in range(100):
needles_in_circle = 0
needles_in_square = needles
for i in range(needles):
# Generates x and y coordinates for the needle between -1 and 1, since the center is (0,0)
x = (random.random() * 2) - 1
y = (random.random() * 2) - 1
# If the distance to the center is less than or equal to 1, the needle is inside the circle
if (math.sqrt(x**2 + y**2) <= 1):
needles_in_circle += 1
area_circle = (area_square * needles_in_circle) / needles_in_square
estimates.append(area_circle)
average = sum(estimates) / 100
print(average)
standard_deviation = statistics.stdev(estimates)
if (standard_deviation < 0.005):
done = True
else:
needles *= 2
print("The estimated value for pi is ", average)
print(needles, " needles were used in the final estimation") |
"""
U-Net from https://github.com/tdeboissiere/DeepLearningImplementations/tree/master/pix2pix
Changes :
- added init='glorot_uniform' to Convolution2D, Deconvolution2D
- changed the final layers to get a number of channels equal to number of classes
of a semantic segmentation problem.
Limitations:
- number of rows and columns must be power of two (not necessarily square images)
- tensorflow backend (why? because of Deconvolution2D ?)
- need to provide the batch size as a parameter, but it can be None
"""
# Keras imports
from keras.models import Model
from keras.layers import (Input, merge)
from keras.layers.convolutional import (Convolution2D, MaxPooling2D,
ZeroPadding2D)
from keras.layers.core import Dropout
from keras.regularizers import l2
from layers.deconv import Deconvolution2D
from layers.ourlayers import (CropLayer2D, NdSoftmax)
def build_unet(img_shape=(3, None, None), nclasses=8, l2_reg=0.,
init='glorot_uniform', path_weights=None,
freeze_layers_from=None, padding=100, dropout=True):
# Regularization warning
if l2_reg > 0.:
print ("Regularizing the weights: " + str(l2_reg))
# Input
inputs = Input(img_shape, name='input')
padded = ZeroPadding2D(padding=(padding, padding), name='padded')(inputs)
# Block 1
conv1_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid',
name='conv1_1', W_regularizer=l2(l2_reg))(padded)
conv1_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid',
name='conv1_2', W_regularizer=l2(l2_reg))(conv1_1)
pool1 = MaxPooling2D((2, 2), (2, 2), name='pool1')(conv1_2)
# Block 2
conv2_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid',
name='conv2_1', W_regularizer=l2(l2_reg))(pool1)
conv2_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid',
name='conv2_2', W_regularizer=l2(l2_reg))(conv2_1)
pool2 = MaxPooling2D((2, 2), (2, 2), name='pool2')(conv2_2)
# Block 3
conv3_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid',
name='conv3_1', W_regularizer=l2(l2_reg))(pool2)
conv3_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid',
name='conv3_2', W_regularizer=l2(l2_reg))(conv3_1)
pool3 = MaxPooling2D((2, 2), (2, 2), name='pool3')(conv3_2)
# Block 4
conv4_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid',
name='conv4_1', W_regularizer=l2(l2_reg))(pool3)
conv4_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid',
name='conv4_2', W_regularizer=l2(l2_reg))(conv4_1)
if dropout:
conv4_2 = Dropout(0.5, name='drop1')(conv4_2)
pool4 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv4_2)
# Block 5
conv5_1 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid',
name='conv5_1', W_regularizer=l2(l2_reg))(pool4)
conv5_2 = Convolution2D(1024, 3, 3, init, 'relu', border_mode='valid',
name='conv5_2', W_regularizer=l2(l2_reg))(conv5_1)
if dropout:
conv5_2 = Dropout(0.5, name='drop2')(conv5_2)
# pool5 = MaxPooling2D((2, 2), (2, 2), name='pool4')(conv5_2)
# Upsampling 1
upconv4 = Deconvolution2D(512, 2, 2, conv5_2._keras_shape, init,
'linear', border_mode='valid', subsample=(2, 2),
name='upconv4', W_regularizer=l2(l2_reg))(conv5_2)
conv4_2_crop = CropLayer2D(upconv4, name='conv4_2_crop')(conv4_2)
upconv4_crop = CropLayer2D(upconv4, name='upconv4_crop')(upconv4)
Concat_4 = merge([conv4_2_crop, upconv4_crop], mode='concat', concat_axis=3, name='Concat_4')
conv6_1 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid',
name='conv6_1', W_regularizer=l2(l2_reg))(Concat_4)
conv6_2 = Convolution2D(512, 3, 3, init, 'relu', border_mode='valid',
name='conv6_2', W_regularizer=l2(l2_reg))(conv6_1)
# Upsampling 2
upconv3 = Deconvolution2D(256, 2, 2, conv6_2._keras_shape, init,
'linear', border_mode='valid', subsample=(2, 2),
name='upconv3', W_regularizer=l2(l2_reg))(conv6_2)
conv3_2_crop = CropLayer2D(upconv3, name='conv3_2_crop')(conv3_2)
Concat_3 = merge([conv3_2_crop, upconv3], mode='concat', name='Concat_3')
conv7_1 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid',
name='conv7_1', W_regularizer=l2(l2_reg))(Concat_3)
conv7_2 = Convolution2D(256, 3, 3, init, 'relu', border_mode='valid',
name='conv7_2', W_regularizer=l2(l2_reg))(conv7_1)
# Upsampling 3
upconv2 = Deconvolution2D(128, 2, 2, conv7_2._keras_shape, init,
'linear', border_mode='valid', subsample=(2, 2),
name='upconv2', W_regularizer=l2(l2_reg))(conv7_2)
conv2_2_crop = CropLayer2D(upconv2, name='conv2_2_crop')(conv2_2)
Concat_2 = merge([conv2_2_crop, upconv2], mode='concat', name='Concat_2')
conv8_1 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid',
name='conv8_1', W_regularizer=l2(l2_reg))(Concat_2)
conv8_2 = Convolution2D(128, 3, 3, init, 'relu', border_mode='valid',
name='conv8_2', W_regularizer=l2(l2_reg))(conv8_1)
# Upsampling 4
upconv1 = Deconvolution2D(64, 2, 2, conv8_2._keras_shape, init,
'linear', border_mode='valid', subsample=(2, 2),
name='upconv1', W_regularizer=l2(l2_reg))(conv8_2)
conv1_2_crop = CropLayer2D(upconv1, name='conv1_2_crop')(conv1_2)
Concat_1 = merge([conv1_2_crop, upconv1], mode='concat', name='Concat_1')
conv9_1 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid',
name='conv9_1', W_regularizer=l2(l2_reg))(Concat_1)
conv9_2 = Convolution2D(64, 3, 3, init, 'relu', border_mode='valid',
name='conv9_2', W_regularizer=l2(l2_reg))(conv9_1)
conv10 = Convolution2D(nclasses, 1, 1, init, 'linear', border_mode='valid',
name='conv10', W_regularizer=l2(l2_reg))(conv9_2)
# Crop
final_crop = CropLayer2D(inputs, name='final_crop')(conv10)
# Softmax
softmax_unet = NdSoftmax()(final_crop)
# Complete model
model = Model(input=inputs, output=softmax_unet)
# Load pretrained Model
if path_weights:
pass
# Freeze some layers
if freeze_layers_from is not None:
freeze_layers(model, freeze_layers_from)
return model
# Freeze layers for finetunning
def freeze_layers(model, freeze_layers_from):
# Freeze the VGG part only
if freeze_layers_from == 'base_model':
print (' Freezing base model layers')
freeze_layers_from = 23
# Show layers (Debug pruposes)
for i, layer in enumerate(model.layers):
print(i, layer.name)
print (' Freezing from layer 0 to ' + str(freeze_layers_from))
# Freeze layers
for layer in model.layers[:freeze_layers_from]:
layer.trainable = False
for layer in model.layers[freeze_layers_from:]:
layer.trainable = True
if __name__ == '__main__':
print ('BUILD')
model = build_unet(img_shape=(256, 512, 3), nclasses=11)
print ('COMPILING')
model.compile(loss="binary_crossentropy", optimizer="rmsprop")
model.summary()
print ('END COMPILING')
|
import numpy as np
# a=np.array([1,2,3])
# print(a)
#创建数组
ar1=np.array(range(10))
ar2=np.arange(10)
ar3=np.array([[1,2,3,5],['a','b','c']])
print(ar1)
print(ar2)
print(ar3)
#生成随机数,先生成数字,再生成形状
print(np.random.rand(10).reshape(2,5))
#创建数组arange类似range
print("="*30)
print(np.arange(10))
print(np.arange(10.0))
print(np.arange(5,12))
print(np.arange(5.0,12,2))
#创建数组:linspace:返回在间隔[开始,停止]上计算的num个均匀间隔的样本
print(np.linspace(10,20,num=21,endpoint=True))#10到20分成20份
print(np.linspace(10,13,num=3,endpoint=True))
print("="*30)
print(np.zeros((2,5),dtype=int))
ar4=np.zeros((3,4))
#ones_like()按照别的数组格式用1进行填充
ar5=np.ones_like(ar4)
print(ar4)
print(ar5)
print(np.eye(3))
#数组形状
ar6=np.arange(10)
ar7=np.zeros((2,5))
print(ar1)
print(ar7)
#T表示转置
print(ar7.T)
#reshape后的元素需要与之前的一样
print(ar6.reshape(2,5))
#resize
print(np.resize(np.arange(10),(2,3)))
#数组复制
ar8=np.arange(10)
# ar9=ar8
ar10=ar8.copy()
ar10[2]=100
# ar8[2]=100
print(ar8,ar10)
#改变数组类型astype
ar11=np.arange(10,dtype=float)
print(ar11.dtype)
print(ar11.astype(np.int64).dtype)
#数组堆叠
# a=np.arange(5)
# b=np.arange(5,9)
# print(a)
# print(b)
#横向连接
# print(np.hstack((a,b)))
print("="*30)
#竖向连接
# a=np.array([[1],[2],[3]])
# b=np.array([['a'],['b'],['c']])
# print(np.vstack((a,b)))
a=np.arange(5)
b=np.arange(5,10)
print(np.stack((a,b),axis=0))
#数组拆分
ar=np.arange(16).reshape(4,4)
print(ar)
#hsplit水平拆分
print(np.hsplit(ar,2)[0])
print(np.vsplit(ar,2)[0])
#运算
ar=np.arange(6).reshape(2,3)
print(ar)
print(ar.mean())#求平均值
print(ar.max())
print(ar.std())#标准差
print(ar.var())#方差
print(ar.sum(),np.sum(ar,axis=0))#求和,axis=0表示按列求和 |
# -*- coding:utf-8 -*-
from web_.spider_web_allLink import html_download
from web_.spider_web_allLink import html_parse
from web_.spider_web_allLink import link_manage
from web_.spider_web_allLink import results_print
class Spider_Main(object):
def __init__(self):
self.links = link_manage.LinkManager()
self.downloader = html_download.HtmlDownloader()
self.parser = html_parse.HtmlPaser()
self.r_printer = results_print.R_printer()
def craw(self, url):
count = 1
self.links.link_add_new(url)
while self.links.link_has_new():
link_new = self.links.link_get_new()
try:
html_content = self.downloader.downloader(link_new)
links_new = self.parser.parser(link_new, html_content)
self.links.links_add_new(links_new)
self.r_printer.data_print_one(link_new)
print "%d, %s" % (count, link_new)
count += 1
except :
print "Craw failed : %s" % link_new
# self.r_printer.data_print()
if __name__ == "__main__":
url = 'http://192.168.130.130:81/'
obj_craw = Spider_Main()
obj_craw.craw(url) |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
train_dataset = pd.read_csv('../input/train.csv')
test_dataset = pd.read_csv('../input/test.csv')
# In[ ]:
train_dataset.describe()
# In[ ]:
train_dataset.info()
# In[ ]:
import matplotlib as plt
#Bar graph Surived Vs Dead according to Pclass and Sex
def bar_chart(feature):
survived = train_dataset[train_dataset['Survived'] == 1][feature].value_counts()
dead = train_dataset[train_dataset['Survived'] == 0][feature].value_counts()
df = pd.DataFrame([survived,dead])
df.index = ['Survived','Dead']
df.plot(kind = 'bar', stacked = True, figsize=(3,3))
bar_chart('Pclass')
bar_chart('Sex')
# In[ ]:
#Encoding categorical data
train_dataset['Sex'] = train_dataset['Sex'].map({'male':0, 'female':1})
test_dataset['Sex'] = test_dataset['Sex'].map({'male':0, 'female':1})
#print(train_dataset.values[:,4])
#print(test_dataset.values[:,3])
# In[ ]:
#Fitting DecisionTreeClassifier to the training set
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
X_train = train_dataset[['Pclass','Sex']]
y = train_dataset['Survived']
X_test = test_dataset[['Pclass','Sex']]
dtree.fit(X_train,y)
#Predicting Test set results
prediction = dtree.predict(X_test)
#Creating csv file of predicion
passengers_id = test_dataset['PassengerId']
dfPrediction = pd.DataFrame({'PassengerId':passengers_id, 'Survived':prediction})
dfPrediction.to_csv('submission.csv', index=False)
#print(dfPrediction)
|
# iterate through float
# ask user for float
item_name =("What is the item name? ")
error = "your item name got number in it"
has_errors = ""
# look at each character in float and if it number,complain
for letter in item_name:
if letter.isdigit()== True:
print(error)
has_errors = "yes"
break
# gives us feedback...
if has_errors != "yes":
print("you are OK") |
import scipy.io as sio
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn import svm
from sklearn import tree
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.feature_selection import SelectKBest
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
import random
import sys
from time import time
print('Start')
# the index of all 6000 samples for training
Data = sio.loadmat('order_for_60000.mat')
index = Data['order2']
N = 30000;
index = range(2*N)
random.shuffle(index)
Data = sio.loadmat('Y_75_train.mat')
Y = Data['Y_75_train']
Y_train = Y[index[0:N],:]
Y_test = Y[index[-N:],:]
X_train_fin = np.load('X_train_pca_200.npy')
X_test_fin = np.load('X_test_pca_200.npy')
print(X_train_fin.shape)
print(X_test_fin.shape)
t0 = time()
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1,
algorithm="SAMME.R")
bdt_real.fit(X_train_fin, Y_train.ravel())
P = bdt_real.predict(X_test_fin)
A = accuracy_score(P, Y_test.ravel())
print(A)
print("done in %0.3fs" % (time() - t0))
print('Finish')
|
import RPi.GPIO as GPIO
import time
import numpy as np
import cv2
import datetime
import os
import glob
GPIO.setmode(GPIO.BOARD)
GPIO.setup(36,GPIO.OUT)
pwm = GPIO.PWM(36,50)
#opens completely
pwm.start(7.7)
time.sleep(0.5)
# # fully closes at 3.4% duty cycle
# pwm.ChangeDutyCycle(3.4)
# time.sleep(0.5)
os.system("sudo mkdir servo_pics")
# define the codec and create VideoWriter object
fps_out = 1
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter("servo_pwm" + ".avi", fourcc, fps_out, (1280, 720))
direction_scale = 1
direction_thickness = 2
# pwm.ChangeDutyCycle(7)
def close():
i = 0
duty_cycles1 = [x for x in np.arange(7.7,3.4,-2)]
print(duty_cycles1)
for i in duty_cycles1:
pwm.ChangeDutyCycle(i)
time.sleep(0.2)
image = "sudo raspistill -w 1280 -h 720 -hf -vf -o /home/pi/Desktop/809T_Autonomous_Robotics/Assignments/Ass_6/servo_pics/" + str(i) + ".jpg"
os.system(image)
image = cv2.imread(str(i) + ".jpg")
text = "Duty: " + str(i) + "%"
cv2.putText(image,text,(50,50),cv2.FONT_HERSHEY_DUPLEX,direction_scale,(0,0,255), direction_thickness)
# cv2.imshow("image",image)
cv2.imwrite(str(i) + ".jpg",image)
print("Saved image: ", i)
time.sleep(0.1)
def open():
i = 0
duty_cycles2 = [x for x in np.arange(3.4,7.7,2)]
print(duty_cycles2)
for i in duty_cycles2:
pwm.ChangeDutyCycle(i)
time.sleep(0.2)
image = "sudo raspistill -w 1280 -h 720 -hf -vf -o /home/pi/Desktop/809T_Autonomous_Robotics/Assignments/Ass_6/servo_pics/" + str(i) + ".jpg"
os.system(image)
print("Saved image: ", i)
time.sleep(0.1)
# open()
close()
open()
# close()
pwm.stop()
GPIO.cleanup()
# # find all images recorded during the run
# files = glob.glob("servo_pics/*.jpg")
# print(files)
# files = sorted(glob.glob("servo_pics/*.jpg"))
# print(files)
# # loop through and print frames to video file
# for x in files:
# print(x)
# image_out = cv2.imread(x)
# out.write(image_out)
print("Video_servo" + ".avi is now ready for viewing!")
|
from django import forms
from .models import *
class CompeticionForm(forms.ModelForm):
class Meta:
model= Competicion
fields =['nombre', 'equipo', 'torneo','descripcion',]
class EquipoForm(forms.ModelForm):
class Meta:
model= Equipo
fields =['nombre', 'lugar_origen', 'ColorPrimerUniforme','ColorSegundoUniforme','numeroDeJugadores',]
class TorneoForm(forms.ModelForm):
class Meta:
model= Torneo
fields =['nombre', 'lugar', 'premio','incripcion','equipos']
def __init__ (self, *args, **kwargs):
super(TorneoForm, self).__init__(*args, **kwargs)
#En este caso vamos a usar el widget checkbox multiseleccionable.
self.fields["equipos"].widget = forms.widgets.CheckboxSelectMultiple()
#Podemos usar un texto de ayuda en el widget
self.fields["equipos"].help_text = "Ingrese los Equipos al torneo"
#En este caso le indicamos que nos muestre todos los actores, pero aquí podríamos filtrar datos si fuera necesario
self.fields["equipos"].queryset = Equipo.objects.all()
|
import numpy as np
import matplotlib.pyplot as plt
x = [1.0, 2.0, 4.0, 3.0, 5.0]
y = [1.0, 3.0, 3.0, 2.0, 5.0]
print ("X values:", x)
print ("Y values:", y)
avgx = sum(x)/len(x)
print ("x mean:", avgx)
avgy = sum(y)/len(y)
print ("y mean:", avgy)
slope = 0
bias = 0
for i in range(len(x)):
slope += (x[i] - avgx) * (y[i] - avgy)
bias += (x[i] - avgx) ** 2
print(slope)
b1 = slope / bias
b0 = avgy - (b1 * avgx)
print ("Slope:", b1)
print ("Intercept:", b0)
rmse = 0
y_pre=[]
for i in range(len(x)):
y_pred = b0 + b1 * x[i]
rmse += (y[i] - y_pred) ** 2
y_pre.append(y_pred)
rmse = np.sqrt(rmse/len(x))
print ("Rmse:", rmse)
print("y_pre is:",y_pre)
plt.scatter(x,y,s=10)
plt.xlabel('x')
plt.ylabel('y')
plt.plot(x,y_pre,color='y')
plt.show()
|
"""
python -m venv venv
cd ..
create a virtual environment
python -m venv
. web_server/bin/activate
pip list
deactivate
EXPORT FLASK_APP=server.py
EXPORT FLASK_ENV=development
flask run
pip freeze > requirements.txt # put current deps in a requirements.txt file captures in env
pythonanywhere
""" |
from django import forms
class SearchUser(forms.Form):
name1 = forms.CharField(
label='Username1',
error_messages={'required': 'Introdueix un usuari'},
widget=forms.TextInput(
attrs={
'class':'form-control form-control-lg',
'placeholder':'productes_capell'}
)
)
name2 = forms.CharField(
label='Username2',
error_messages={'required': 'Introdueix un usuari'},
widget=forms.TextInput(
attrs={
'class':'form-control form-control-lg',
'placeholder':'la_vallenca'}
)
)
|
from blog.extend.UrlsHelper import url
from blog import blog
from blog.views import views
blog.add_url_rule('/', view_func=views.index, methods=['GET', 'POST'])
blog.add_url_rule('/index', view_func=views.index, methods=['GET', 'POST'])
blog.add_url_rule('/<string:categoryname>/<string:month>/<int:page>',
view_func=views.index,
methods=['GET', 'POST'])
url('/login', 'views.views.login', methods=['GET', 'POST'])
url('/login/authorized', 'views.views.authorized', methods=['GET', 'POST'])
url('/logout', 'views.views.logout')
url('/usereditinfo/', 'views.views.usereditinfo', methods=['GET', 'POST'])
url('/userchangepwd/', 'views.views.userchangepwd', methods=['GET', 'POST'])
url('/register', 'views.views.register', methods=['GET', 'POST'])
url('/article_create', 'views.views.article_create', methods=['GET', 'POST'])
url('/article_edit/<int:id>', 'views.views.article_edit', methods=['GET', 'POST'])
url('/search', 'views.views.search', methods=['GET', 'POST'])
url('/search_result/<string:sch>/<int:page>', 'views.search_result')
url('/upload', 'views.upload.upload', methods=['GET', 'POST'])
url('/uploads/<filename>', 'views.upload.uploaded_file')
url('/blog_msg', 'views.views.blog_msg')
url('/about', 'views.views.blog_about')
url('/calendar', 'views.views.blog_calendar')
url('/calendar_json', 'views.views.calendar_json', methods=['GET', 'POST'])
url('/visit_json', 'views.views.visit_json')
url('/article_json', 'views.views.article_json')
url('/article_commit', 'views.views.article_commit', methods=['GET', 'POST'])
# ############ admin ###############
url('/admin/main', 'views.admin.index1')
url('/admin/users', 'views.admin.users')
url('/admin/useredit/<id>', 'views.admin.useredit', methods=['GET', 'POST'])
url('/admin/userdelete/<id>', 'views.admin.userdelete')
url('/admin/category', 'views.admin.category')
url('/admin/categorycreate', 'views.admin.categorycreate', methods=['GET', 'POST'])
url('/admin/categoryedit/<id>', 'views.admin.categoryedit', methods=['GET', 'POST'])
url('/admin/categorydelete/<id>', 'views.admin.categorydelete')
url('/admin/article', 'views.admin.article')
url('/admin/articledelete/<id>', 'views.admin.articledelete')
url('/admin/admin_second_bar', 'views.admin.admin_second_bar')
url('/admin/admin_second_baredit/<id>', 'views.admin.admin_second_baredit', methods=['GET', 'POST'])
url('/admin/settings', 'views.admin.settings', methods=['GET', 'POST'])
url('/admin/imgs', 'views.admin.imgs')
url('/admin/atts', 'views.admin.atts')
url('/admin/rmfile/<filename>/<urls>', 'views.upload.rmfile')
url('/admin/backup', 'views.backup.backup')
url('/admin/dobackup', 'views.backup.dobackup')
url('/admin/visit_statistics', 'views.Taskscheduler.visit_statistics') |
#!/usr/bin/env python
'''
twoprime: analysis of twoprime-seq data
'''
from ez_setup import use_setuptools
use_setuptools()
from setuptools import find_packages, setup
__version__ = '0.01a'
entry_points = """
[console_scripts]
twoprime-process-signals = twoprime.process_signals:main
"""
install_requires = ["genomedata>1.3.1", "numpy"]
if __name__ == '__main__':
setup(name='twoprime',
version='0.01a',
description='Analysis of twoprime sequencing data',
author='Jay Hesselberth',
author_email='jay.hesselberth@gmail.com',
packages=['twoprime'],
install_requires=install_requires,
entry_points=entry_points
)
|
#!/usr/bin/python3
# Falsely assumes all months have 31 days
line = input().split()
month = line[1]
M = [ 'JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC' ]
assert month in M
days = -1 # days since 1 JAN
# add days in previous months:
days += 31 * M.index(month)
# add days in this month:
days += int(line[0])
before = days < 31 + 28
feb29 = (days == 59 and month == 'FEB')
# shift by weekday on 1 JAN, where 0 = FRI:
days += [ 'FRI' , 'SAT', 'SUN', 'MON', 'TUE', 'WED', 'THU' ].index(input().strip())
if days % 7 == 0 and (before or feb29):
print ("TGIF")
elif days % 7 in [0,6] and not before and not feb29:
print ("not sure")
else:
print (":(")
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
# THRESH_BINARY
# THRESH_BINARY_INV
# THRESH_MASK
# THRESH_OTSU
# THRESH_TOZERO
# THRESH_TOZERO_INV
# THRESH_TRIANGLE
# THRESH_TRUNC
img=cv2.imread('1.jpg',0)
ret,binThreshold=cv2.threshold(img, 168, 255, cv2.THRESH_TRUNC)
cv2.imshow("Binary",binThreshold)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import util
def display_board(board, player):
for elem in board:
for i in elem:
print(i, end='')
print('\n',end='')
print(f"health: {player['current_health']} armor: {player['base_armor']}")
def display_stats(player):
line_selected = 0
while True:
util.clear_screen()
print(f"Equipped armor: {player['equipped_armor']}")
print(f"Equipped weapon: {player['equipped_weapon']}")
print("health: ", player['current_health'], ", armor: ", player['base_armor'])
counter = 0
for item in player['items']:
if item['type'] == 'sword':
if line_selected == counter:
print(f"> type: {item['type']} damage:")
else:
print(f"type: {item['type']} damage:")
elif item['type'] == 'key':
if line_selected == counter:
print(f"> type: {item['type']} damage:")
else:
print(f"type: {item['type']} damage:")
elif item['type'] == 'food':
if line_selected == counter:
print(f"> type: {item['type']} damage:")
else:
print(f"type: {item['type']} damage:")
elif item['type'] == 'armor':
if line_selected == counter:
print(f"> type: {item['type']} damage:")
else:
print(f"type: {item['type']} damage:")
else:
print("type: " , item['type'])
counter +=1
key = util.key_pressed()
if key == 'w':
if line_selected == 0:
continue
elif line_selected == len(player['items']):
counter = 0
else:
line_selected-=1
elif key == 's':
if line_selected == len(player['items'])-1:
continue
elif line_selected >= 0:
line_selected+=1
elif key == 'i':
return player
elif key == 'e':
equipped = player['items'][line_selected]
if equipped['type'] == 'armor':
player['equipped_armor'] = equipped
elif equipped['type'] == 'sword':
player['equipped_weapon'] = equipped
""" inventory_x_length = 10
inventory_y_length = 5
for i in range(inventory_y_length):
for j in range(inventory_x_length):
if(i == 0 or i == inventory_x_length-1):
print("-", end="")
else:
print("|")
"""
|
__author__ = 'Rushil'
#0.123456789101112131415161718192021...
num_list = []
for i in range(1000000):
num_list += str(i)
m_list = ''.join(num_list)[1:]
print(len(m_list))
print(m_list[1])
n = 1
prod = 1
while n != 1000000:
prod *= int(m_list[n-1])
print(int(m_list[n-1]) , n)
n *= 10
print(prod) |
''' Implementation of realtional coherence my thesis
basically hardcoded to work on the output from Nitish's
model.
'''
from collections import defaultdict
from create_wned_tas import init_view, init_constituent, serialize_tas
from ccg_nlpy import core, local_pipeline
from ccg_nlpy.core import view
import numpy as np
import pdb
from os import listdir
from os.path import isfile, join
RELATIONS_FILE = \
"/shared/preprocessed/cddunca2/thesis/fb15k_237_relations.tsv"
#CAND_MAP="jointScoreMap"
CAND_MAP="labelScoreMap"
def get_ta_dir(directory):
"""
Returns a list of TextAnnotation objects which are instatiated
using the serialized json data in the directory parameter.
@param directory path to directory with serialized TAs
@return tas a list of TextAnnotations
"""
pipeline = local_pipeline.LocalPipeline()
serialized_tas = [join(directory+"/",f) \
for f in listdir(directory) if isfile(join(directory+"/",f))]
tas = []
for ser_ta in serialized_tas:
with open(ser_ta, mode='r', encoding='utf-8') as f:
tas.append(core.text_annotation.TextAnnotation(f.read(),pipeline))
return tas
def init_relations_dict():
''' Returns a dictionary which maps a Wikipedia title t1 to a set
of Wikipedia titles T such that (t1, r, tk) or (tk, r, t1) for tk in T
is a relation in the FB15k-237 dataset.
'''
rel_dict = defaultdict(set)
with open(RELATIONS_FILE, "r") as f:
for line in f.readlines():
spline = line.strip().split("\t")
rel_dict[spline[0]].add(spline[2])
rel_dict[spline[2]].add(spline[0])
return rel_dict
def init_coherence_constituent(el_con, label, score):
return {'tokens':el_con['tokens'], 'score':score, 'label': label,
'start':el_con['start'], 'end':el_con['end']}
def get_disambiguation_context(constituent, constituents, strategy):
''' The disambiguation context is the set of all titles which are not a
candidate title for the constituent under consideration.
Question: how does this work with mentions which corefer?
'''
disambiguation_context = set()
for con in constituents:
if strategy == "cucerzan":
disambiguation_context.update(list(con[CAND_MAP].keys()))
if strategy == "vinculum":
disambiguation_context.update(con['label'])
return disambiguation_context.difference(set(constituent[CAND_MAP].keys()))
def compute_confidence(constituent):
''' Compute the confidence of the label as measured by the distance
between the top two scoring titles. If there is only one title then
the confidence is the score of that as given by the joint.
'''
scores = constituent[CAND_MAP].values()
if(len(scores) == 1):
return list(scores)[0]
sorted_scores = sorted(scores, reverse=True)
return sorted_scores[0]-sorted_scores[1]
REL_DICT = init_relations_dict()
def score_cand(candidate, disambiguation_context):
sum_of_scores = 0.0
for disambiguation_cand in disambiguation_context:
if disambiguation_cand in REL_DICT[candidate]:
sum_of_scores+=1
return sum_of_scores
def coherence_cand(constituent, disambiguation_context):
best_score = compute_confidence(constituent)
best_cand = constituent['label']
candidates = constituent[CAND_MAP].keys()
if(len(candidates) == 1):
return best_score, best_cand
Z = len(disambiguation_context) - 1
for candidate in candidates:
norm_score = score_cand(candidate, disambiguation_context) / Z
coh_score = norm_score + constituent[CAND_MAP][candidate]
if coh_score > best_score:
best_score = coh_score
best_cand = candidate
return best_score, best_cand
def coherence_view(view):
#strategy = "vinculum"
strategy = "cucerzan"
constituents = view["viewData"][0]["constituents"]
coherence_constituents = []
for constituent in constituents:
disambiguation_context = get_disambiguation_context(constituent, constituents,
strategy)
score, label = coherence_cand(constituent, disambiguation_context)
coherence_constituents.append(init_coherence_constituent(constituent,
label,score))
coh_view = init_view("COHERENCE")
coh_view["viewData"][0]["constituents"] = coherence_constituents
return coh_view
EL_VIEW="NEUREL"
EL_VIEW="English_WIKIFIERVIEW"
def add_coherence_view_ta_dir(ta_dir_in, ta_dir_out=None):
tas = get_ta_dir(ta_dir_in)
for ta in tas:
el_view = ta.view_dictionary[EL_VIEW].as_json
ta.view_dictionary['COHERENCE'] = view.View(coherence_view(el_view), ta.tokens)
if ta_dir_out == None:
ta_dir_out = ta_dir_in
serialize_tas(tas, ta_dir_out)
if __name__=="__main__":
import sys
ta_dir_in = sys.argv[1]
if len(sys.argv) > 2:
ta_dir_out = sys.argv[2]
add_coherence_view_ta_dir(ta_dir_in, ta_dir_out)
|
from baseop import BaseOp
from funcs import has_parity, to_hex_digits, to_signed
from memory.memory import fetch_signed_byte
class OpRlca(BaseOp):
def __init__(self, processor):
BaseOp.__init__(self)
self.processor = processor
def execute(self, processor, memory, pc):
result = _rlc_value(self.processor, self.processor.main_registers['a'])
self.processor.main_registers['a'] = result
return 4, False, pc
def __str__(self):
return 'rlca'
class OpRla(BaseOp):
def __init__(self, processor):
BaseOp.__init__(self)
self.processor = processor
def execute(self, processor, memory, pc):
value = self.processor.main_registers['a']
rotated = _rl_value(self.processor, value)
self.processor.main_registers['a'] = rotated
return 4, False, pc
def __str__(self):
return 'rla'
class OpRrca(BaseOp):
def __init__(self, processor):
BaseOp.__init__(self)
self.processor = processor
def execute(self, processor, memory, pc):
value = self.processor.main_registers['a']
rotated = _rrc_value(self.processor, value)
self.processor.main_registers['a'] = rotated
return 4, False, pc
def __str__(self):
return 'rrca'
class OpRra(BaseOp):
def __init__(self, processor):
BaseOp.__init__(self)
self.processor = processor
def execute(self, processor, memory, pc):
low_bit = self.processor.main_registers['a'] & 0b1
rotated = self.processor.main_registers['a'] >> 1
if self.processor.condition('c'):
rotated |= 0b10000000
self.processor.main_registers['a'] = rotated
self.processor.set_condition('c', low_bit == 1)
self.processor.set_condition('h', False)
self.processor.set_condition('n', False)
return 4, False, pc
def __str__(self):
return 'rra'
class OpRlcReg(BaseOp):
def __init__(self, processor, reg):
BaseOp.__init__(self)
self.processor = processor
self.reg = reg
def execute(self, processor, memory, pc):
result = _rlc_value(self.processor, self.processor.main_registers[self.reg])
self.processor.main_registers[self.reg] = result
_set_sign_zero_parity_flags(self.processor, result)
return 8, False, pc
def __str__(self):
return 'rlc {}'.format(self.reg)
class OpRrcReg(BaseOp):
def __init__(self, processor, reg):
BaseOp.__init__(self)
self.processor = processor
self.reg = reg
def execute(self, processor, memory, pc):
result = _rrc_value(self.processor, self.processor.main_registers[self.reg])
self.processor.main_registers[self.reg] = result
_set_sign_zero_parity_flags(self.processor, result)
return 8, False, pc
def __str__(self):
return 'rrc {}'.format(self.reg)
class OpRrcHlIndirect(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
result = _rrc_value(self.processor, self.memory[0xffff & address])
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 15, False, pc
def __str__(self):
return 'rrc (hl)'
class OpRlReg(BaseOp):
def __init__(self, processor, reg):
BaseOp.__init__(self)
self.processor = processor
self.reg = reg
def execute(self, processor, memory, pc):
result = _rl_value(self.processor, self.processor.main_registers[self.reg])
self.processor.main_registers[self.reg] = result
_set_sign_zero_parity_flags(self.processor, result)
return 8, False, pc
def __str__(self):
return 'rl {}'.format(self.reg)
class OpRrReg(BaseOp):
def __init__(self, processor, reg):
BaseOp.__init__(self)
self.processor = processor
self.reg = reg
def execute(self, processor, memory, pc):
result = _rr_value(self.processor, self.processor.main_registers[self.reg])
self.processor.main_registers[self.reg] = result
_set_sign_zero_parity_flags(self.processor, result)
return 8, False, pc
def __str__(self):
return BaseOp.__str__(self)
class OpRlcHlIndirect(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
result = _rlc_value(self.processor, self.memory[0xffff & address])
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 15, False, pc
def __str__(self):
return 'rlc (hl)'
class OpRlHlIndirect(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
result = _rl_value(self.processor, self.memory[0xffff & address])
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 15, False, pc
def __str__(self):
return 'rl (hl)'
class OpRrHlIndirect(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
result = _rr_value(self.processor, self.memory[0xffff & address])
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 15, False, pc
def __str__(self):
return 'rl (hl)'
class OpRld(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
mem_value = self.memory[0xffff & address]
mem_digits = to_hex_digits(mem_value)
reg_value = self.processor.main_registers['a']
reg_digits = to_hex_digits(reg_value)
self.memory[0xffff & address] = (mem_digits[1] << 4) + reg_digits[1]
self.processor.main_registers['a'] = reg_digits[0] + (mem_digits[0] >> 4)
_set_sign_zero_parity_flags(self.processor, self.processor.main_registers['a'])
self.processor.set_condition('h', False)
self.processor.set_condition('n', False)
return 18, False, pc
def __str__(self):
return 'rld'
class OpRrd(BaseOp):
def __init__(self, processor, memory):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
def execute(self, processor, memory, pc):
address = self.processor.get_16bit_reg('hl')
mem_value = self.memory[0xffff & address]
mem_digits = to_hex_digits(mem_value)
reg_value = self.processor.main_registers['a']
reg_digits = to_hex_digits(reg_value)
self.memory[0xffff & address] = (reg_digits[1] << 4) + (mem_digits[0] >> 4)
self.processor.main_registers['a'] = reg_digits[0] + mem_digits[1]
_set_sign_zero_parity_flags(self.processor, self.processor.main_registers['a'])
self.processor.set_condition('h', False)
self.processor.set_condition('n', False)
return 18, False, pc
def __str__(self):
return 'rrd'
class OpRlcIndexedIndirect(BaseOp):
def __init__(self, processor, memory, indexed_reg):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
self.indexed_reg = indexed_reg
def execute_with_offset(self, processor, memory, pc, offset):
address = self.processor.index_registers[self.indexed_reg] + offset
value = self.memory[0xffff & address]
result = _rlc_value(self.processor, value)
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 23, False, pc
def __str__(self):
return 'rlc ({} + d)'.format(self.indexed_reg)
class OpRrcIndexedIndirect(BaseOp):
def __init__(self, processor, memory, indexed_reg):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
self.indexed_reg = indexed_reg
def execute_with_offset(self, processor, memory, pc, offset):
address = self.processor.index_registers[self.indexed_reg] + offset
value = self.memory[0xffff & address]
result = _rrc_value(self.processor, value)
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 23, False, pc
def __str__(self):
return 'rrc ({} + d)'.format(self.indexed_reg)
class OpRlIndexedIndirect(BaseOp):
def __init__(self, processor, memory, indexed_reg):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
self.indexed_reg = indexed_reg
def execute_with_offset(self, processor, memory, pc, offset):
address = self.processor.index_registers[self.indexed_reg] + offset
value = self.memory[0xffff & address]
result = _rl_value(self.processor, value)
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 23, False, pc
def __str__(self):
return 'rl ({} + d)'.format(self.indexed_reg)
class OpRrIndexedIndirect(BaseOp):
def __init__(self, processor, memory, indexed_reg):
BaseOp.__init__(self)
self.processor = processor
self.memory = memory
self.indexed_reg = indexed_reg
def execute_with_offset(self, processor, memory, pc, offset):
address = self.processor.index_registers[self.indexed_reg] + offset
value = self.memory[0xffff & address]
result = _rr_value(self.processor, value)
self.memory[0xffff & address] = result
_set_sign_zero_parity_flags(self.processor, result)
return 23, False, pc
def __str__(self):
return 'rr ({} + d)'.format(self.indexed_reg)
def _rlc_value(processor, value):
high_bit = value >> 7
rotated = (value << 1) & 0xff
rotated |= high_bit
_set_carry_and_negate_flags_after_left_rotate(processor, high_bit)
return rotated
def _rl_value(processor, value):
high_bit = value >> 7
rotated = (value << 1) & 0xff
if processor.condition('c'):
rotated |= 0b1
_set_carry_and_negate_flags_after_left_rotate(processor, high_bit)
return rotated
def _rr_value(processor, value):
low_bit = value & 0b1
rotated = value >> 1
if processor.condition('c'):
rotated |= 0b10000000
_set_carry_and_negate_flags_after_right_rotate(processor, low_bit)
return rotated
def _rrc_value(processor, value):
low_bit = value & 0b1
rotated = value >> 1
if low_bit > 0:
rotated |= 0b10000000
_set_carry_and_negate_flags_after_right_rotate(processor, low_bit)
return rotated
def _set_carry_and_negate_flags_after_right_rotate(processor, low_bit):
processor.set_condition('c', low_bit == 1)
processor.set_condition('h', False)
processor.set_condition('n', False)
def _set_carry_and_negate_flags_after_left_rotate(processor, high_bit):
processor.set_condition('c', high_bit == 1)
processor.set_condition('h', False)
processor.set_condition('n', False)
def _set_sign_zero_parity_flags(processor, result):
processor.set_condition('s', result & 0b10000000 > 0)
processor.set_condition('z', result == 0)
processor.set_condition('p', has_parity(result))
|
from django.conf.urls import url
from education.views import ClassList, CreateStudent, CreateTeacher, UserClassList, sign_up_for_class
urlpatterns = [
url('^class_list/$', ClassList.as_view()),
url('^user_class_list/$', UserClassList.as_view()),
url('^create_student/$', CreateStudent.as_view()),
url('^create_teacher/$', CreateTeacher.as_view()),
url('^sign_up_for_class/(?P<class_id>\d+)$', sign_up_for_class),
]
|
#! -*- coding:utf-8 -*-
from django.shortcuts import render
from app_shop.models import Product, Catalog
def index(request):
latest_product_list = Product.objects.all().order_by('-name')#[:5]
latest_catalog_list = Catalog.objects.all().order_by('-name')#[:5]
context = {'latest_product_list': latest_product_list, 'latest_catalog_list':latest_catalog_list}
return render(request, 'app_shop/index.html', context)
################################################################################
#def get_herofunc(request, heroname): #get_herofunc - функция перехватывает имя героя
# try:
# tryvar=Hero.objects.get(name=heroname) #пытается найти обьект с именем героя, которого перехватила функция
# except Hero.DoesNotExist: #исключения, если такого героя не существует
# return HttpResponse('Героя с таким именем нет!') #выводим страничку с ошибкой
#
# return render_to_response('base.html',{'tryvar': tryvar})
#########################################################
#It’s a very common idiom to load a template, fill a context and return an HttpResponse object with the result of the rendered template.
#Django provides a shortcut. Here’s the full index() view, rewritten:
#from django.shortcuts import render
#from polls.models import Poll
#def index(request):
# latest_poll_list = Poll.objects.all().order_by('-pub_date')[:5]
# context = {'latest_poll_list': latest_poll_list}
# return render(request, 'polls/index.html', context)
#Note that once we’ve done this in all these views, we no longer need to import loader, RequestContext and HttpResponse
#(you’ll want to keep HttpResponse if you still have the stub methods for detail, results, and vote).
#The render() function takes the request object as its first argument, a template name as its second argument and a dictionary as its optional third argument.
#It returns an HttpResponse object of the given template rendered with the given context."
######################################################### |
import luigi
import os
import luigi.contrib.postgres
import json
from app.utils.helper import derive_current_timestamp
from app.helpers.subreddit_ingestion import SubredditIngestion
class IngestSubreddit(luigi.Task):
"""
Task to individually ingest the Subreddit data and store as separate output targets
"""
subreddit_name = luigi.Parameter()
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
data_dir_path = luigi.Parameter()
def run(self):
# Instantiate the subreddit ingestion object
subreddit_ingestion = SubredditIngestion(self.subreddit_name,
self.start,
self.top_n_subreddits,
self.top_n_posts,
self.top_n_comments)
results = subreddit_ingestion.derive_top_data()
# Save subreddit data into a subreddit specific file
with open(self.output().path, "w") as output_file:
json.dump(results, output_file)
def output(self):
# derive the save paths for each subreddit
subreddit_save_path = os.path.join(self.data_dir_path, f"{self.subreddit_name}.json")
return luigi.LocalTarget(subreddit_save_path)
|
import numpy as np
import pandas as pd
class TranslatingCordinateSystem:
def __init__(self, r_a = None, v_a = None, a_a = None):
self.r_a = r_a
self.v_a = v_a
self.a_a = a_a
def translations(self):
r_b = self.r_a |
from googletrans import Translator
tr = Translator()
from telegram import Update, KeyboardButton, ReplyKeyboardMarkup
from telegram.ext import CallbackContext, Updater, ConversationHandler, CommandHandler, MessageHandler, Filters, \
CallbackQueryHandler
import globals
from database import Database
from datetime import datetime
database = Database("member.db")
def start_command(update, context):
user_id = update.message.from_user.id
username = update.message.from_user.username
user = database.get_user_by_chat_id(user_id)
if not user:
database.create_user(user_id, username, datetime.now().strftime("%d-%m-%Y %H:%M:%S"))
button = [
[KeyboardButton(globals.language[1]), KeyboardButton(globals.language[2])],
[KeyboardButton(globals.language[3]), KeyboardButton(globals.language[4])],
[KeyboardButton(globals.language[5]), KeyboardButton(globals.language[6])]
]
update.message.reply_html(
"<b>Quydagilardan birini tanlang\n\nВыберите один из следующих\n\nChoose one of the following</b>\n\n🔽🔽🔽🔽🔽🔽",
reply_markup=ReplyKeyboardMarkup(button, resize_keyboard=True))
def user_command(update, context):
users = database.count_user()
count = 0
for user in users:
count += 1
update.message.reply_html(
text=f"<b>Foydalanuvchilar soni</b> {count}\n\n<b>Количество пользователей</b> {count}\n\n<b>Quantity of users</b> {count}")
def help_command(update,context):
update.message.reply_text(text=f"{globals.helpp[1]}\n\n{globals.helpp[2]}\n\n{globals.helpp[3]}")
def message_handler(update, context):
state = context.user_data.get('state', 0)
msg = update.message.text
if msg == globals.language[1] or msg == globals.language[3]:
update.message.reply_html("matn kiriting:")
if msg == globals.language[1]:
context.user_data['state'] = 1
else:
context.user_data['state'] = 3
elif msg == globals.language[2] or msg == globals.language[5]:
update.message.reply_html("введите текст:")
if msg == globals.language[2]:
context.user_data['state'] = 2
else:
context.user_data['state'] = 5
elif msg == globals.language[4] or msg == globals.language[6]:
update.message.reply_html("enter the text:")
if msg == globals.language[4]:
context.user_data['state'] = 4
else:
context.user_data['state'] = 6
elif state == 1:
result = tr.translate(msg, src='uz', dest='ru')
update.message.reply_text(f"{result.text}")
elif state == 2:
result = tr.translate(msg, src='ru', dest='uz')
update.message.reply_text(f"{result.text}")
elif state == 3:
result = tr.translate(msg, src='uz', dest='en')
update.message.reply_text(f"{result.text}")
elif state == 4:
result = tr.translate(msg, src='en', dest='uz')
update.message.reply_text(f"{result.text}")
elif state == 5:
result = tr.translate(msg, src='ru', dest='en')
update.message.reply_text(f"{result.text}")
elif state == 6:
result = tr.translate(msg, src='en', dest='ru')
update.message.reply_text(f"{result.text}")
else:
start_command(update,context)
def main():
updater = Updater("YOUR_TOKEN")
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start_command))
dispatcher.add_handler(CommandHandler("users", user_command))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(MessageHandler(Filters.text, message_handler))
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
import numpy as np
# Numpy is an array processing library with support for n-dimensional arrays and a number of mathematical operations
# How numpy handles arrays of varying shapes is common to a lot of mathematical processing done in python,
# most relevantly TensorFlow.
# Suppose we have a list of numbers
l = []
for i in range(1000):
l.append(i)
# This can be converted into an array
a = np.array(l)
print(f"Shape of a: {a.shape}")
# We can reshape this array
b = np.reshape(a, (10, 10, 10))
print(f"Shape of b: {b.shape}")
# We can expand the dimensions of this array
# Note that these two operations are equivalent
c = np.expand_dims(a, axis=-1)
print(f"Shape of c: {c.shape}")
c = np.reshape(a, (1000, 1))
print(f"Shape of c: {c.shape}")
# We can also concatenate arrays along different axes
d = np.concatenate([b, b], axis=0)
e = np.concatenate([b, b], axis=1)
f = np.concatenate([b, b], axis=2)
print(f"Shape of d: {d.shape}")
print(f"Shape of e: {e.shape}")
print(f"Shape of f: {f.shape}")
# For mathematical operations, lets take a subset of b
# Here, we take the first 3 elements of each dimension
arr = b[:3, :3, :3]
print(f"arr: \n{arr}")
# Alternatively, we could have taken the first 3 elements of the first dimension
_ = b[:3, ...]
print(f"_: \n{_}")
# See https://numpy.org/doc/stable/reference/arrays.indexing.html
# for more information regarding this kind of indexing
# But lets keep things small
# Suppose we want to multiply the entire array by a constant.
# This is made easy by a process called broadcasting.
# There is a lot to broadcasting, so to keep things brief and simple,
# see https://numpy.org/doc/stable/user/basics.broadcasting.html
# for more information
# Here, arr2[i][j][k] == arr[i][j][k] * 3
arr2 = arr * 3
print(f"arr2: \n{arr2}")
# Broadcasting applies to the base binary mathematical operations
# i.e. + - * /
# Note here that none of these numpy operations are done in-place.
# i.e. in Line 57, arr is unchanged and available for future use.
# See the numpy docs https://numpy.org/doc/stable/index.html
# for options regarding in-place operations
|
import random
from card import Card
values = list(Card.VALUES)
suits = list(Card.SUITS)
class Deck:
''' Deck of cards supporting common operations of shuffling and drawing. '''
def __init__(self):
self.cards = []
self.index = 0
for i in range(0, len(values)):
for j in range(0, len(suits)):
self.cards.append(Card(values[i], suits[j]))
self.reset()
def reset(self):
random.shuffle(self.cards)
self.index = 0
def draw_top(self):
card = self.cards[self.index]
self.index = (self.index + 1) % len(self.cards)
return card
|
#
#Step 1 : Understand the problem statement
#step 2 : Write the Algorithm
#Step 3 : Decide the programming language
#Step 4 : Write the Program
#Step 5 : Test the Written Program
#program statement:
# accept number from user and return addition of digits in that number
##################################################################################################
#Algorithm
#Start
#Accept number from user as no
#Display addition of digit number
##End
####################################################################################################
#Function Name: DisplaySumDigit()
#input :Integer
#output:integer
#Description:Display addition of digit number
#Author: Sunil Bhagwan Jarad
#Date:19/2/2021
####################################################################################################
def DisplaySumDigit(no):
sum=0
while(no!=0):
sum=sum+int(no%10)
no=int(no/10)
return sum
#print(DisplaySumDigit(no))
def main():
value=int(input("Enter any Number:"))
no=DisplaySumDigit(value)
print("Sum of digit:",no)
if __name__ == "__main__":
main()
|
from django.shortcuts import render
from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from django.db.models import Count
from .models import User, Wish
from django.core.exceptions import ObjectDoesNotExist
import time
import re
import datetime
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
# Create your views here.
def index(request):
return render(request, "wishlist/index.html")
def register(request):
logged = True
if len(request.POST["name"]) < 2:
messages.error(request, "The Name Field must have at least 2 characters")
logged = False
if request.POST["name"].isalpha() == False:
messages.error(request, "Your name can only have letters")
logged = False
if not EMAIL_REGEX.match(request.POST["email"]):
messages.error(request, "Please enter a valid email address")
logged = False
if len(request.POST["password"]) < 8:
messages.error(request, "Your password must contain at least 8 characters")
logged = False
if request.POST["password"] != request.POST["confirm_password"]:
messages.error(request, "Your passwords didn't match")
logged = False
if len(request.POST["date_of_birth"]) < 1:
messages.error(request, "Date of Birth Field cannot be empty")
logged = False
if not logged:
return redirect ("/")
User.objects.create(name=request.POST["name"], password=request.POST["password"], email=request.POST["email"], date_of_birth=request.POST["date_of_birth"])
request.session["current_user"] = User.objects.get(email=request.POST["email"]).id
return redirect("/dashboard")
def login(request):
try:
users = User.objects.get(email=request.POST["email"], password=request.POST["password"])
except ObjectDoesNotExist:
messages.error(request, "Invalid username or password")
return redirect("/")
else:
context = {}
request.session["current_user"] = User.objects.get(email=request.POST["email"], password=request.POST["password"]).id
if "current_user" in request.session.keys():
return redirect("/dashboard")
def dashboard(request):
if "current_user" in request.session.keys():
user = User.objects.get(pk = request.session["current_user"])
wishes = Wish.objects.all()
my_wishes = user.items.all()
wishes = Wish.objects.exclude(id__in=my_wishes)
context = {
"user": user,
'wishes': wishes,
'my_wishes': my_wishes,
}
return render(request, "wishlist/dashboard.html", context)
def added(request):
if "current_user" in request.session.keys():
user = User.objects.get(pk = request.session["current_user"])
context = {
"user": user
}
return render(request, "wishlist/added.html", context)
def submitted(request):
wishes = Wish.objects.create(user_id=(User.objects.get(pk=request.session["current_user"])).id, item = request.POST["item"])
user = User.objects.get(pk = request.session["current_user"])
user.items.add(wishes)
return redirect("/dashboard")
def addWish(request, id):
if "current_user" in request.session.keys():
user = User.objects.get(pk = request.session["current_user"])
wish = Wish.objects.get(id = id)
wish.wishers.add(user)
return redirect("/dashboard")
def removeWish(request, id):
if "current_user" in request.session.keys():
user = User.objects.get(pk = request.session["current_user"])
wish = Wish.objects.get(id = id)
wish.wishers.remove(user)
return redirect("/dashboard")
def item(request, id):
if "current_user" in request.session.keys():
user = User.objects.get(pk = request.session["current_user"])
context = {
"user": user,
"wish": Wish.objects.get(id = id)
}
return render(request, "wishlist/item.html", context)
def delete(request, id):
wish = Wish.objects.get(id = id)
wish.delete()
return redirect("/dashboard")
def logout(request):
request.session.clear()
messages.add_message(request, messages.INFO, "Successfully logged out")
return render(request, "wishlist/index.html") |
import StarLAB
StarLAB.version()
myStarLAB = StarLAB.Connect(IP="192.168.86.104")
tempdata = myStarLAB.atmos.getTempC()
print("Temperature", tempdata)
myStarLAB.enableRover()
run = myStarLAB.motors.setMotorPower(60,60)
|
import abc
from interfaces import Point3D
from typing import Sequence, Tuple
class Triangulator(abc.ABC):
@abc.abstractmethod
def __init__(self, file_paths):
pass
@abc.abstractmethod
def localize(self, known_points: Sequence[Tuple[Point3D, Point3D]], unkown_points: Sequence[Point3D]) ->Sequence[Point3D]:
pass
|
#!/usr/bin/env python3
'''
Created on Jul 23, 2017
@author: Daniel Sela, Arnon Sela
Example parameters:
-e 10 --w-ref -c “J242117.88+355328.8” -f ../../../smu/dat/000901_sky0001_1a_match.datc ../../../smu/dat/000901_sky0001_1b_match.datc ../../../smu/dat/000901_sky0001_1c_match.datc ../../../smu/dat/000901_sky0001_1d_match.datc
'''
from rotseana.findburst.matchcoords_gd import matchcoords_gd
def cmdargs():
import argparse
import os
filename=os.path.basename(__file__)
progname=filename.rpartition('.')[0]
parser = argparse.ArgumentParser(description="""
{progname} extract data from a star with a given error for the coordinates.
Example:
{progname} -e 10 -—w-ref --log name_gd -c “J111734.010+501526.228” -f ../dat/all_coords.txt
""".format(progname=progname))
parser.add_argument('--error', '-e', type=float, required=False, default=5.0,
help="""+/- error range within to search; e.g., 5.0""")
parser.add_argument('--fits-index', '-i', type=int, required=False, default=1, dest='fits_index',
help="""In case of FITS data file, identifies the extension to read. defaults to 1.""")
parser.add_argument('--w-ref', action='store_true', required=False, default=False, dest='with_reference',
help="""Adds file and object id as reference to entries.""")
parser.add_argument('--mindelta', type=float, default=0.1, required=False,
help="""minimum delta""")
parser.add_argument('--minsig', type=float, default=1.0, required=False,
help="""minimum sigma""")
parser.add_argument('--minchisq', type=float, default=2.0, required=False,
help="""minimum chisg""")
parser.add_argument('--log', type=str, required=False,
help="""Path to log (txt)""")
#parser.add_argument('--plot', action='store_true', required=False, default=False,
# help="""Create plot""")
parser.add_argument('--verbose', '-v', action='store_true', required=False, default=False,
help="""prints to console.""")
parser.add_argument('--quiet', '-q', action='store_true', required=False, default=False,
help="""don't print results to console.""")
parser.add_argument('--coord','-c', type=str, required=True,
help="""coordinate to lookup; e.g., J123456.64+123456.7""")
parser.add_argument('--file', '-f', type=str, dest='coord_file', required=True,
help="""Path to coordinate file.""")
args = parser.parse_args()
# specific option validation:
#if args.plot and args.log is None:
# parser.error("--plot requires --log.")
if args.quiet and args.log is None:
parser.error("if --quiet and no --log, results will not be printed.")
argsd=vars(args)
return argsd
if __name__=='__main__':
args=cmdargs()
matchcoords_gd(**args) |
import requests
import json
import re
def get_greater_30(v_id):
url = "http://s.video.qq.com/get_playsource?id=" + v_id + "&type=4&range=1-10000&otype=json"
session = requests.session()
res = session.get(url).text
json_re = re.match("QZOutputJson=(.*)", res).groups()
if len(json_re):
json_res = json.loads(json_re[0][:-1])
return json_res['PlaylistItem']['videoPlayList']
else:
return None
def main():
# temp_id = "jvhuaf93hh858fs"
# temp_id = "sx5xljydk45g1pp"
v_id = "7casb7nes159mrl"
temp_id = "00v79tmuo39v1va"
temp_id = "s2fo606divlys5k"
temp_id = "uv56axs1nx33hmc"
res = get_greater_30(temp_id)
for item in res:
print(item)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 expandtab number
"""
题目描述
给定一个会议时间安排的数组,每个会议时间都会包括开始和结束的时间 [[s1,e1],[s2,e2],…] (si < ei),为避免会议冲突,同时要考虑充分利用会议室资源,请你计算至少需要多少间会议室,才能满足这些会议安排。
tag
贪心 堆
样例
1
2
输入: [[0, 30],[5, 10],[15, 20]]
输出: 2
http://shaocheng.me/2019/07/18/LeetCode-253-Meeting-Rooms-II/
Authors: qianweishuo<qzy922@gmail.com>
Date: 2019/11/20 上午7:44
"""
import heapq
from typing import List
class Solution:
def minMeetingRooms(self, intervals: List[List[int]]) -> int:
""" 小顶堆贪心,取最早结束的会议室 """
if not intervals:
return 0
free_rooms = [] # 准备好会议室资源堆, 元素是各场会议的结束时间
intervals.sort(key=lambda x: x[0]) # 按照开始时间排序
heapq.heappush(free_rooms, intervals[0][1])
for i in intervals[1:]:
if free_rooms[0] <= i[0]: # 如果堆顶(最早的前序会议结束时间)小于本会议的开始时间
heapq.heappop(free_rooms) # 说明最早空出来的那间已经可用, 抛出&复用,节省会议室
heapq.heappush(free_rooms, i[1]) # 将本会议入堆
return len(free_rooms) # 注意全程会议室资源都是单调递增的
|
#keras51_homework.py
import numpy as np
y = np.array([1,2,3,4,5,1,2,3,4,5])
y = np.array([0,1,2,3,4,0,1,2,3,4])
from keras.utils import np_utils
y = np_utils.to_categorical(y)
# y = y - 1
'''
print(y)
[[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 1.]
[0. 1. 0. 0. 0. 0.]
[0. 0. 1. 0. 0. 0.]
[0. 0. 0. 1. 0. 0.]
[0. 0. 0. 0. 1. 0.]
[0. 0. 0. 0. 0. 1.]]
#_____________________ 1. slicing 하는 방법_______________________
y = y [ : , 1 : ]
#__________________ 2. y 전체에서 1을 빼는 방법 (numpy)______________
y = np.array([1,2,3,4,5,1,2,3,4,5])
y = y - 1
-> ([0,1,2,3,4,0,1,2,3,4])
numpy 안에서만 가능 ( 단, 같은 자료형만 가능 )
[[1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]
[1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]]
# ___________________3. sklearn 이용하는 방법___________________
# 그런데 y의 차원을 2차원으로 바꾸어 주어야 함
print(y.shape) # (10,)
y = y.reshape(10,1)
from sklearn.preprocessing import OneHotEncoder
aaa = OneHotEncoder()
aaa.fit(y)
y = aaa.transform(y).toarray()
print(y)
[[1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]
[1. 0. 0. 0. 0.]
[0. 1. 0. 0. 0.]
[0. 0. 1. 0. 0.]
[0. 0. 0. 1. 0.]
[0. 0. 0. 0. 1.]]
_______________________________
Y_train = to_categorical(Y_train, num_classes = 10) 나중에 이것도 확인해보자
np.argmax (a, axis)
axis = 0 -> x 축
axis = 1 -> y 축
axis = 2 -> z 축
''' |
import gin
import pytest
import numpy.testing as npt
import tensorflow as tf
import math
def test_get_angles():
coordinates = tf.constant(
[
[0, 0, 0],
[0, 0, 1],
[0, 0, -1],
[0, 1, 0],
],
dtype=tf.float32)
angle_idxs = tf.constant(
[
[1, 0, 2],
[1, 0, 3],
[1, 3, 2],
[0, 1, 3]
],
dtype=tf.int64)
npt.assert_almost_equal(
gin.deterministic.md.get_angles(
coordinates,
angle_idxs).numpy(),
[math.pi, 0.5 * math.pi, 0.5 * math.pi, 0.25 * math.pi],
decimal=2)
coordinates = tf.constant(
[
[1, 0, 0],
[-1, 0, 0],
[0, math.sqrt(3), 0]
],
dtype=tf.float32)
angle_idxs = tf.constant(
[
[0, 1, 2],
[0, 2, 1],
[2, 0, 1]
],
dtype=tf.int64)
npt.assert_almost_equal(
gin.deterministic.md.get_angles(
coordinates,
angle_idxs).numpy(),
[math.pi/3, math.pi/3, math.pi/3])
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-14 21:20
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('usuarioAdministrador', '0008_auto_20171113_1533'),
]
operations = [
migrations.RemoveField(
model_name='noticias',
name='creador',
),
migrations.RemoveField(
model_name='noticiasintereses',
name='interes',
),
migrations.RemoveField(
model_name='noticiasintereses',
name='noticia',
),
migrations.DeleteModel(
name='noticias',
),
migrations.DeleteModel(
name='noticiasIntereses',
),
]
|
#!/usr/bin/env python
# coding: utf-8
"""pRESTo
The program uses `curl` conventions when appropriate, but compatibility with
`curl` is not a priority.
Usage: presto-url.py [options] <url>
presto-url.py -h | --help
presto-url.py --version
Options:
-a Use authentication. It will use the default auth method and security
credentials for a domain name in a given URL. Provider, app and other
params can be set explicitly. See the documentation for `presto-cfg`
--auth-app=<auth-app> Name of the app to use for authentication. See the
documentation for `presto-cfg`
--auth-token=<auth-token> Name of the auth token.
--auth-provider=<auth-provider> Name of the auth provider
-c Colorize output.
-p Pretty-print the output.
-d <data> Data to send.
-i Include HTTP headers in the output.
-I Display headers only. Please note that this does *not* use HTTP HEAD
method. Use -X instead if you need it.
-H | --header <header> Extra HTTP header to use.
-X | --request <method> Specify a custom HTTP request method.
Example:
presto-url.py -icpa -X post http://www.odesk.com/api/
"""
import sys
from docopt import docopt
from oauthlib.oauth1.rfc5849 import *
from urlparse import urlparse
import httplib2
import simplejson as json
from json_tools.printer import print_json
from presto.config_utils import PrestoCfg, PrestoCfgException
from presto import version
from presto.models import config
ver = version.get_version()
if __name__ == '__main__':
args = docopt(__doc__, argv=sys.argv[1:], help=True, version=ver)
uri = unicode(args['<url>'])
sch, net, path, par, query, fra = urlparse(uri)
method = unicode((args['--request'] or u'GET').upper())
body = args['-d']
headers = {}
if body:
headers['Content-Type'] = u'application/x-www-form-urlencoded'
if args['-a']:
try:
if args['--auth-provider']:
provider = config.filter("providers", name=args['--auth-provider'])
else:
provider = config.filter("providers", domain_name=net)
app_name = args['--auth-app'] or u'default'
app = provider.filter('apps', name=app_name)
token_name = args['--auth-token'] or u'default'
token = app.filter('tokens', name=token_name)
access_token_key = token.token_key
access_token_secret = token.token_secret
except PrestoCfgException, e:
print "Error: %s " % e
raise
client = Client(app.public_key,
app.secret_key,
resource_owner_key=access_token_key,
resource_owner_secret=access_token_secret,
signature_type=SIGNATURE_TYPE_QUERY)
uri, headers, body = client.sign(uri=uri, http_method=method, headers=headers, body=body)
http = httplib2.Http()
response, content = httplib2.Http.request(http, uri, method=method, body=body,
headers=headers)
if args['-i'] and headers:
if args['-c']:
print_json(dict(headers), True)
elif args['-p']:
print_json(dict(headers), False)
else:
for i in headers:
print "%s: %s" % (i.title(), headers[i])
if args['-I']:
if args['-c']:
print_json(dict(response), True)
elif args['-p']:
print_json(dict(response), False)
else:
for i in response:
print "%s: %s" % (i.title(), response[i])
else:
if response["content-type"] == "application/json":
if args['-c']:
print_json(json.loads(content), True)
elif args['-p']:
print_json(json.loads(content), False)
else:
print content
else:
print content
|
from zope.interface import Interface
class IChildsDictLike(Interface):
def __getitem__(child_key):
"""Get the object traversal child object"""
|
import torch
import torch.nn as nn
import numpy as np
import cv2
from torch.nn import functional as F
from collections import OrderedDict
global glb_spatial_grad # dictionary saving gradient of intermediate feature
global glb_feature
global glb_c_grad
global img_index
# TODO: How about batch_size > 1 ???
def save_grad_at(module, grad_in, grad_out):
global glb_spatial_grad
global img_index
# grad_out[0].shape = (bn, c, h,w)
# FIXME: absolute value? clamp(relu)?
# grad_at = torch.abs(torch.sum(grad_out[0].detach(), dim=1))
# grad_at = torch.sum(grad_out[0].detach(), dim=1)
grad_at = torch.sum(torch.abs(grad_out[0].detach()), dim=1)
# grad_at = torch.sum(torch.clamp(grad_out[0].detach(), min=0.0), dim=1)
glb_spatial_grad[id(module)][img_index] = grad_at
def save_grad(module, grad_in, grad_out):
global glb_c_grad
global img_index
grad_at = F.adaptive_avg_pool2d(grad_out[0].detach(), 1)
glb_c_grad[id(module)][img_index] = grad_at
def save_feature(module, input, output):
global glb_feature
global img_index
glb_feature[id(module)][img_index] = output.detach()
def compute_gradCAMs(feature, grad):
bn, c, h, w = feature.shape
# TODO: normalization is needed?
# normalized_grad = grad / (torch.sqrt(torch.mean(torch.pow(grad, 2))) + 1e-5)
# weight = F.adaptive_avg_pool2d(normalized_grad, 1)
# weight = F.adaptive_avg_pool2d(grad, 1)
weight = grad
gradCAM = (feature * weight).sum(dim=1)
gradCAM = torch.clamp(gradCAM, min=0.0)
# gradCAM -= gradCAM.min()
# gradCAM /= gradCAM.max()
gradCAM = gradCAM.view(bn, -1)
gradCAM /= torch.max(gradCAM, dim=1)[0].unsqueeze(1)
gradCAM = gradCAM.view(bn, h, w)
return gradCAM
def remove_hook(handlers):
for handle in handlers:
handle.remove()
def write_gradient(filename, at, image):
h, w, _ = image.shape
data = at.data.cpu().numpy()
# print '[', filename, '] min, max : ', data.min(), data.max()
print '[ {} ] {:.3f}, {:.3f}, {:.3f}'.format(filename.split('/')[-1], data.min(), data.mean(), data.max())
data -= data.min()
data /= data.max()
data = cv2.resize(data, (w,h), interpolation=cv2.INTER_NEAREST)
data = cv2.applyColorMap(np.uint8(data * 255.0), cv2.COLORMAP_JET)
data = data.astype(np.float) + image.astype(np.float)
data = data / data.max() * 255.0
cv2.imwrite(filename, np.uint8(data))
def write_gradcam(filename, gcam, raw_image_path, image):
h, w, _ = image.shape
gcam = gcam.data.cpu().numpy()
gcam = cv2.resize(gcam, (w, h))
gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
gcam = gcam.astype(np.float) + image.astype(np.float)
gcam = gcam / gcam.max() * 255.0
cv2.imwrite(filename, np.uint8(gcam))
def calculate_attention(
teacher_net,
student_net,
training_generator,
filepath,
epoch):
global glb_spatial_grad
global img_index
# teacher_layer = [teacher_net.pool1, teacher_net.pool2, teacher_net.relu3,
# teacher_net.relu4, teacher_net.pool5]
# student_layer = [student_net.pool1, student_net.pool2, student_net.relu3,
# student_net.relu4, student_net.pool5]
#
# feature_name = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']
teacher_layer = [teacher_net.conv5]
student_layer = [student_net.conv5]
feature_name = ['conv5']
# initialize gradient dictionary
glb_spatial_grad = OrderedDict()
for i in range(len(teacher_layer)):
glb_spatial_grad[id(teacher_layer[i])] = OrderedDict()
glb_spatial_grad[id(student_layer[i])] = OrderedDict()
img_index = 0
# ce_loss = nn.CrossEntropyLoss()
# net.train()
teacher_net.eval()
student_net.eval()
handlers = []
for i in range(len(teacher_layer)):
handlers.append(teacher_layer[i].register_backward_hook(save_grad_at))
handlers.append(student_layer[i].register_backward_hook(save_grad_at))
for x, x_low, y, paths in training_generator:
x = x.cuda().float()
x_low = x_low.cuda().float()
y = y.cuda() - 1
bn, _, _, _ = x.shape
one_hot_y = torch.zeros((bn, 200)).float().cuda()
filenames = []
raw_imgs = []
low_raw_imgs = []
for j in range(bn):
one_hot_y[j][y[j]] = 1.0
filename = paths[j].split("/")[-1].split(".")[0]
filenames.append(filename)
# species = paths[0].split("/")[-2]
# recover raw image from tensor value
raw_image = x[j].permute((1, 2, 0)).cpu().numpy()
raw_image += np.array([123.68, 116.779, 103.939])
raw_image[raw_image < 0] = 0
raw_image[raw_image > 255.] = 255.
raw_image = np.uint8(raw_image)
raw_imgs.append(raw_image)
low_raw_image = x_low[j].permute((1, 2, 0)).cpu().numpy()
low_raw_image += np.array([123.68, 116.779, 103.939])
low_raw_image[low_raw_image < 0] = 0
low_raw_image[low_raw_image > 255.] = 255.
low_raw_image = np.uint8(low_raw_image)
low_raw_imgs.append(low_raw_image)
teacher_net.zero_grad()
t, t_features = teacher_net(x)
t.backward(gradient=one_hot_y)
student_net.zero_grad()
s, s_features = student_net(x_low)
s.backward(gradient=one_hot_y)
for i in range(len(teacher_layer)):
# file = filepath + filename + '_' + str(epoch) + 'th'
teacher_at = glb_spatial_grad[id(teacher_layer[i])][img_index]
student_at = glb_spatial_grad[id(student_layer[i])][img_index]
residual_at = teacher_at - student_at
t_activation = (torch.mean(t_features[feature_name[i]], dim=1)).detach()
s_activation = (torch.mean(s_features[feature_name[i]], dim=1)).detach()
res_activation = torch.clamp(t_activation - s_activation, min=0.0)
weighted_activation = res_activation * t_activation
t_mul = teacher_at * t_activation
mul = teacher_at * res_activation
t_act = torch.mean(torch.abs(t_features[feature_name[i]]), dim=1).detach()
s_act = torch.mean(torch.abs(s_features[feature_name[i]]), dim=1).detach()
# t_act = torch.sqrt(torch.mean(torch.abs(t_features[feature_name[i]]), dim=1)).detach()
# s_act = torch.sqrt(torch.mean(torch.abs(s_features[feature_name[i]]), dim=1)).detach()
# t_act = t_act / np.amax(t_act)
# s_act = s_act / np.amax(s_act)
res_act = torch.clamp(t_act - s_act, min=0.0)
weighted_act = res_act * t_act
# mul = F.adaptive_avg_pool2d(mul, 5)
# mul = F.adaptive_avg_pool2d(mul, 6)
print mul.shape
for j in range(bn):
file = filepath + filenames[j] + '_' + str(epoch) + 'th'
write_gradient(file + "_grad_t_" + feature_name[i] + ".png", teacher_at[j], raw_imgs[j])
write_gradient(file + "_grad_s_" + feature_name[i] + ".png", student_at[j], low_raw_imgs[j])
write_gradient(file + "_grad_r_" + feature_name[i] + ".png", residual_at[j], raw_imgs[j])
write_gradient(file + "_act_t_" + feature_name[i] + ".png", t_activation[j], raw_imgs[j])
write_gradient(file + "_act_s_" + feature_name[i] + ".png", s_activation[j], low_raw_imgs[j])
write_gradient(file + "_act_r_" + feature_name[i] + ".png", res_activation[j], low_raw_imgs[j])
write_gradient(file + "_act_rXt_" + feature_name[i] + ".png", weighted_activation[j], low_raw_imgs[j]) # highlight teacher's high point
write_gradient(file + "_act_t_actXtgrad_" + feature_name[i] + ".png", t_mul[j], raw_imgs[j])
write_gradient(file + "_act_r_actXtgrad_" + feature_name[i] + ".png", mul[j], low_raw_imgs[j])
write_gradient(file + "_abs_act_t_" + feature_name[i] + ".png", t_act[j], raw_imgs[j])
write_gradient(file + "_abs_act_s_" + feature_name[i] + ".png", s_act[j], low_raw_imgs[j])
write_gradient(file + "_abs_act_r_" + feature_name[i] + ".png", res_act[j], low_raw_imgs[j])
write_gradient(file + "_abs_act_rXt_" + feature_name[i] + ".png", weighted_act[j], low_raw_imgs[j])
# To save all gradient of training images, uncomment following lines
img_index += 1
if img_index > 1:
break
# net.eval()
remove_hook(handlers)
return
def calculate_gradCAM(
net,
training_generator,
filepath,
epoch ) :
global glb_feature
global glb_c_grad
global img_index
# layers = [net.pool1, net.pool2, net.relu3, net.relu4, net.pool5]
layers = [net.conv1, net.conv2, net.conv3, net.conv4, net.conv5]
feature_name = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']
# glb_abs_grad = OrderedDict()
glb_c_grad = OrderedDict()
glb_feature = OrderedDict()
for i in layers:
glb_c_grad[id(i)] = OrderedDict()
glb_feature[id(i)] = OrderedDict()
img_index = 0
ce_loss = nn.CrossEntropyLoss()
# net.train()
net.eval()
handlers = []
for i in layers:
handlers.append(i.register_forward_hook(save_feature))
handlers.append(i.register_backward_hook(save_grad))
for x, _, y, path in training_generator:
x = x.cuda().float()
y = y.cuda() - 1
bn, _, _, _ = x.shape
one_hot_y = torch.zeros((bn, 200)).float().cuda()
filename = []
raw_imgs = []
for i in range(bn):
one_hot_y[i][y[i]] = 1.0
filename.append(path[i].split("/")[-1].split(".")[0])
raw_image = x[i].permute((1,2,0)).cpu().numpy()
raw_image += np.array([123.68, 116.779, 103.939])
raw_image[raw_image < 0] = 0
raw_image[raw_image > 255.] = 255.
raw_image = np.uint8(raw_image)
raw_imgs.append(raw_image)
net.zero_grad()
t, t_features = net(x)
t.backward(gradient=one_hot_y)
for i in range(len(layers)):
gcam = compute_gradCAMs(glb_feature[id(layers[i])][img_index], glb_c_grad[id(layers[i])][img_index])
for j in range(bn):
f = filepath + filename[j] + '_' + str(epoch) + 'th_' + feature_name[i] + ".png"
write_gradcam(f, gcam[j], path[j], raw_imgs[j])
# To save all grad_cam of training images, uncomment following lines
img_index += 1
if img_index > 4:
break
# net.eval()
remove_hook(handlers)
return
|
import torch
import numpy as np
import numbers
import random
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, *img):
for t in self.transforms:
img = t(*img)
return img
class ToTensor(object):
def __call__(self, *img):
img = list(img)
for i in range(len(img)):
if isinstance(img[i], np.ndarray):
img[i] = torch.from_numpy(img[i])
else:
raise TypeError('Data should be ndarray.')
return tuple(img)
class RandomCrop(object):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, *img):
th, tw = self.size
h, w = img[0].shape[-2], img[0].shape[-1]
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
img = list(img)
for i in range(len(img)):
img[i] = self.crop(img[i], y1, x1, y1+ th, x1 + tw)
return tuple(img)
def crop(self, im, x_start, y_start, x_end, y_end):
if len(im.shape) == 3:
return im[:, x_start:x_end, y_start:y_end]
else:
return im[x_start:x_end, y_start:y_end]
class Normalize(object):
def __init__(self):
self.mean = [250., 50., 0., 0.]
self.std = [60., 50., 50., 50.]
def __call__(self, *img):
img = list(img)
for i in range(4):
img[i] = (img[i] - self.mean[i]) / self.std[i]
img[i + 4] = (img[i + 4] - self.mean[i]) /self.std[i]
return tuple(img)
if __name__ == '__main__':
a = np.arange(2*10*10)
a.resize([2, 10, 10])
a = a.astype(np.float32)
b = a + 1
t = Compose([
RandomCrop(5),
ToTensor(),
])
c, d = t(*[a, b])
|
class cal2:
def setdata(self, radius):
self.radius = radius
print("Radius Set Succesfully! ")
def area(self):
radius = self.radius
self.result = 3.14 * (radius**2)
def display(self):
self.area()
print(f"Area of A Circle with {self.radius} is: {self.result}")
c = cal2()
radius = float(input("Enter Radius:"))
c.setdata(radius)
c.display()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 10:44:44 2018
@author: Ian
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms, datasets, models
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
#define model classes
class LogisticRegressionModel(nn.Module):
def __init__(self, input_dim, output_dim):
super(LogisticRegressionModel, self).__init__()
self.linear = nn.Linear(input_dim, output_dim)
def forward(self, x):
out = self.linear(x)
return out
class FeedforwardNeuralNetModel(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(FeedforwardNeuralNetModel, self).__init__()
#linear function
self.fc1 = nn.Linear(input_size, hidden_size)
#non-linear function
self.sig = nn.Sigmoid()
#linear function (readout)
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
#linear function
out = self.fc1(x)
#non-linear
out = self.sig(out)
#linear function (readout)
out = self.fc2(out)
return out
class CNN_Model(nn.Module):
def __init__(self):
super(CNN_Model, self).__init__()
#convolution 1
self.cnn1 = nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=1, padding=3)
self.relu1 = nn.ReLU()
#Pooling 1
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
#convolution 2
self.cnn2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=5, stride=1, padding=2)
self.relu2 = nn.ReLU()
#Pooling 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
#fully Connected 1 (readout)
self.fc1 = nn.Linear(128 * 56 * 56, 4)
def forward(self, x):
#conv 1
out = self.cnn1(x)
out = self.relu1(out)
#Pooling 1
out = self.maxpool1(out)
#conv 2
out = self.cnn2(out)
out = self.relu2(out)
#Pooling 2
out = self.maxpool2(out)
#Resize
'''
current size: (20, 128, 56, 56) --20 immages per batch, each image is now 128 x 56 x 56 after conv and pooling
out.size(0): 20
New out size: (20, 128*56*56)
'''
out = out.view(out.size(0), -1)
#readout
out = self.fc1(out)
return out
#instantiate data transformations
data_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
#identify test dataset
test_dataset = datasets.ImageFolder(root='Images/testing/', transform=data_transforms)
#instantiate dataset loader
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=10, shuffle=False, num_workers=4)#
#get class names from dataset
class_names = test_dataset.classes
#create lists of models, model states to load, and figure titles for plotting and exporting
models = [LogisticRegressionModel(3*224*224, 4),
FeedforwardNeuralNetModel(3*224*224, 100, 4),
CNN_Model(),
models.resnet101(pretrained=False, num_classes = 4)
]
model_states = ['Saved_Models\\PhotoClass_Logreg_Model.pkl',
'Saved_Models\\PhotoClass_FF_NerualNet_Model.pkl',
'Saved_Models\\PhotoClass_Simple_CNN_Model.pkl',
'Saved_Models\\PhotoClass_ResNet101_TL.pkl'
]
figure_titles = ['Logistic Regression', 'Feed Forward Neural Net', 'Simple Convolutional NN', 'ResNet 101']#
figure_names = ['predictions/log_reg.png', 'predictions/ff_nn.png', 'predictions/Simple_CNN.png', 'predictions/ResNet.png']#
models_2d = ['LogisticRegressionModel', 'FeedforwardNeuralNetModel']
#loop over each model, load saved weights, evaluate test images, plot images with predicted labels
for model, state, fig_title, fig_name in zip(models, model_states, figure_titles, figure_names):
print(model.__class__.__name__)
model.load_state_dict(torch.load(state))
model.eval()
correct = 0
for images, labels in test_loader:
img_labels = [class_names[lbl] for lbl in labels]
if model.__class__.__name__ in models_2d:
#need to reshape images for log reg and FF model
images_trans = images.view(-1, 3*224*224)
output = model(images_trans)
else:
output = model(images)
#run output through softmax to get prediction probabilities
soft_out = F.softmax(output)
prob, predicted = torch.max(soft_out.data, 1)
#count the number of correct predictions
correct += (predicted == labels).numpy().sum()
#get predicted labels and probabilities
pred_labels = [class_names[pred] for pred in predicted]
pred_probs = [np.round(p.numpy(),2) for p in prob]
#plot each image with the predicted label and probability of that prediction
images_so_far = 0
fig = plt.figure(figsize=(6, 8))
for j in range(images.size()[0]):
images_so_far += 1
ax = plt.subplot(5, 2, images_so_far)
inp = images.data[j].numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
ax.imshow(inp)
ax.axis('off')
pred_label = pred_labels[j] #class_names[predicted[j]]
pred_prob = np.round(pred_probs[j] * 100,2)
ax.set_title('predicted: {} ({}%)'.format(pred_label, pred_prob))
#save plots to file
fig.suptitle(fig_title)
plt.tight_layout()
fig.subplots_adjust(top=0.88)
plt.savefig(fig_name)
plt.close()
#print true labels, predicted labels and number correct
print(img_labels)
print(pred_labels)
print(correct)
print('-'*10)
|
class Solution:
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
dd, ss, count = {0:1}, 0, 0
for x in nums:
ss += x
c = dd.get(ss-k, None)
if c != None:
count += c
if ss not in dd:
dd[ss] = 0
dd[ss] += 1
return count
print(Solution().subarraySum([2,2,1,2,1,2], 3)) |
''' 矩阵中的路径
题目:请设计一个函数,用来判断在一个矩阵中是否存在一条包含某字符串所有字符的路径。路径可以从矩阵中的任意一个
格子开始,每一步可以在矩阵中向左,向右,向上,向下移动一个格子。如果一条路径经过了矩阵中的某一个格子,则该路
径不能再进入该格子。 例如 a b c e s f c s a d e e 矩阵中包含一条字符串"bcced"的路径,但是矩阵中不包含
"abcb"路径,因为字符串的第一个字符b占据了矩阵中的第一行第二个格子之后,路径不能再次进入该格子。
'''
'''
思路:优化版回溯法
1.将matrix字符串模拟映射为一个字符矩阵(但并不实际创建一个矩阵)
2.取一个boolean[matrix.length]标记某个字符是否已经被访问过,用一个布尔矩阵进行是否存在该数值的标记。
3.如果没找到结果,需要将对应的boolean标记值置回false,返回上一层进行其他分路的查找。
'''
# coding=utf-8
class Solution:
def hasPath(self, matrix, rows, cols, path):
# write code here
for i in range(rows):
for j in range(cols):
if matrix[i * cols + j] == path[0]:
if self.findPath(list(matrix), rows, cols, path[1:], i, j):
return True
def findPath(self, matrix, rows, cols, path, i, j):
if not path:
return True
matrix[i * cols + j] = 0
if j + 1 < cols and matrix[i * cols + j + 1] == path[0]:
return self.findPath(matrix, rows, cols, path[1:], i, j + 1)
elif j - 1 >= 0 and matrix[i * cols + j - 1] == path[0]:
return self.findPath(matrix, rows, cols, path[1:], i, j - 1)
elif i + 1 < rows and matrix[(i + 1) * cols + j] == path[0]:
return self.findPath(matrix, rows, cols, path[1:], i + 1, j)
elif i - 1 >= 0 and matrix[(i - 1) * cols + j] == path[0]:
return self.findPath(matrix, rows, cols, path[1:], i - 1, j)
else:
return False
# matrix = [
# ['A', 'B', 'C', 'E'],
# ['S', 'F', 'C', 'S'],
# ['A', 'D', 'E', 'E']
# ]
# matrix 为一维数组的形式
matrix = ['A', 'B', 'C', 'E', 'S', 'F', 'C', 'S', 'A', 'D', 'E', 'E']
s = Solution()
path = "ABCCED"
flag = s.hasPath(matrix, 3, 4, path)
print(flag)
path = "SEEDE"
flag = s.hasPath(matrix, 3, 4, path)
print(flag)
path = "ABCESCEE"
flag = s.hasPath(matrix, 3, 4, path)
print(flag)
|
from experiments.classification import Classification
from dataloaders.fer_loader import FERDataset
from models.alex_net import AlexNet
import pickle
from scipy.misc import imread
from utils.face_detector import FRDetector
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Training and saving
classification_cnn = Classification(**{'classifier': AlexNet, 'dset':FERDataset, 'max_samples': 1000, 'neural_net':True})
classification_cnn.grid_search()
classification_cnn.gen_metrics()
|
import math
import collections
def getRoots(aNeigh):
def findRoot(aNode,aRoot):
while aNode != aRoot[aNode][0]:
aNode = aRoot[aNode][0]
return (aNode,aRoot[aNode][1])
myRoot = {}
for myNode in aNeigh.keys():
myRoot[myNode] = (myNode,0)
for myI in aNeigh:
for myJ in aNeigh[myI]:
(myRoot_myI,myDepthMyI) = findRoot(myI,myRoot)
(myRoot_myJ,myDepthMyJ) = findRoot(myJ,myRoot)
if myRoot_myI != myRoot_myJ:
myMin = myRoot_myI
myMax = myRoot_myJ
if myDepthMyI > myDepthMyJ:
myMin = myRoot_myJ
myMax = myRoot_myI
myRoot[myMax] = (myMax,max(myRoot[myMin][1]+1,myRoot[myMax][1]))
myRoot[myMin] = (myRoot[myMax][0],-1)
myToRet = {}
for myI in aNeigh:
if myRoot[myI][0] == myI:
myToRet[myI] = []
for myI in aNeigh:
myToRet[findRoot(myI,myRoot)[0]].append(myI)
return myToRet
def hopcroft():
def bipartition(graph, root = 0, sub = None):
u = [root]
v = []
treated = {k:False for k in sub} if sub is not None else {k:False for k in graph}
select = False
while not all(val == True for key, val in treated.items()):
l = v if select else u
k = u if select else v
for vertice in l:
if not treated[vertice]:
for connected in graph[vertice]:
if not treated[connected]:
k.append(connected)
treated[vertice] = True
select = not select
return u, v
def graph(edges):
graph = {}
for i in edges:
if i[0] not in graph:
graph[i[0]] = []
if i[1] not in graph:
graph[i[1]] = []
graph[i[0]].append(i[1])
graph[i[1]].append(i[0])
return graph
def BFS (U, V, Pair_U, Pair_V, graph, Dist):
queue = collections.deque()
for u in U:
if Pair_U[u] == -1:
Dist[u] = 0
queue.append(u)
Dist[-1] = None
while queue:
u = queue.popleft()
if Dist[-1] is None or Dist[u] < Dist[-1]:
for v in graph[u]:
if Dist[ Pair_V[v] ] is None:
Dist[ Pair_V[v] ] = Dist[u] + 1
queue.append(Pair_V[v])
return Dist[-1] is not None
def DFS (u, Pair_U, Pair_V, graph, Dist):
if u != -1:
for v in graph[u]:
if Dist[u] is not None and Dist[ Pair_V[v] ] and Dist[ Pair_V[v] ] == Dist[u] + 1:
if DFS(Pair_V[v], Pair_U, Pair_V, graph, Dist):
Pair_V[v] = u
Pair_U[u] = v
return True
Dist[u] = None
return False
return True
N = int(input())
edges = []
for i in range(N):
(a, b) = input().split()
edges.append((int(a), int(b)))
graph = graph(edges)
roots = getRoots(graph)
U, V = [], []
for i in roots:
m, n = bipartition(graph, i, roots[i])
U += m
V += n
graph[-1] = []
Pair_U = {k:-1 for k in U}
Pair_V = {k:-1 for k in V}
Dist = {k:None for k in U}
matching = 0
while BFS(U, V, Pair_U, Pair_V, graph, Dist) :
for u in U:
if Pair_U[u] == -1:
if DFS(u, Pair_U, Pair_V, graph, Dist) :
matching = matching + 1
print(str(matching))
if __name__ == '__main__':
hopcroft()
|
import json
import logging
from urllib.request import Request, urlopen
from django.utils.functional import cached_property
from reviewboard.admin.server import build_server_url
from rbintegrations.basechat.forms import BaseChatIntegrationConfigForm
from rbintegrations.basechat.integration import BaseChatIntegration
def build_slack_message(integration, title, title_link, fallback_text, fields,
pre_text, body, color, thumb_url, image_url):
"""Build message using Slack webhook format.
This will build the payload data for HTTP requests to services such as
Slack, Mattermost and Discord.
Args:
integration (BaseChatIntegration):
The Integration.
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
fields (dict):
The fields comprising the rich message to display in chat.
pre_text (unicode):
Text to display before the rest of the message.
body (unicode):
The body of the message.
color (unicode):
A Slack color string or RGB hex value for the message.
thumb_url (unicode):
URL of an image to show on the side of the message.
image_url (unicode):
URL of an image to show in the message.
Returns:
dict:
The payload of the Slack message request.
"""
if not color:
color = integration.DEFAULT_COLOR
attachment = {
'color': color or integration.DEFAULT_COLOR,
'fallback': fallback_text,
'title': title,
'title_link': title_link,
'text': body,
'pretext': pre_text,
}
if fields:
attachment['fields'] = fields
if thumb_url:
attachment['thumb_url'] = thumb_url
if image_url:
attachment['image_url'] = image_url
return {
'attachments': [attachment],
'icon_url': integration.LOGO_URL,
}
def notify(integration, title, title_link, fallback_text, local_site,
review_request, event_name, fields, pre_text, body, color,
thumb_url, image_url):
"""Send a webhook notification.
This will post the given message to any Slacks/Mattermost channels
configured to receive it. This is oriented towards Slack, however is
broken out of the SlackIntegration because other services (like
Mattermost) duplicate Slack APIs.
Args:
integration (BaseChatIntegration):
The Integration.
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
local_site (reviewboard.site.models.LocalSite):
The Local Site for the review request or review emitting
the message. Only integration configurations matching this
Local Site will be processed.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the notification is bound to.
event_name (unicode):
The name of the event triggering this notification.
fields (dict):
The fields comprising the rich message to display in chat.
pre_text (unicode):
Text to display before the rest of the message.
body (unicode):
The body of the message.
color (unicode):
A Slack color string or RGB hex value for the message.
thumb_url (unicode):
URL of an image to show on the side of the message.
image_url (unicode):
URL of an image to show in the message.
"""
common_payload = build_slack_message(integration=integration,
title=title,
title_link=title_link,
fallback_text=fallback_text,
fields=fields,
pre_text=pre_text,
body=body,
color=color,
thumb_url=thumb_url,
image_url=image_url)
# Send a notification to any configured channels.
for config in integration.get_configs(local_site):
if not config.match_conditions(form_cls=integration.config_form_cls,
review_request=review_request):
continue
payload = dict({
'username': config.get('notify_username'),
}, **common_payload)
channel = config.get('channel')
if channel:
payload['channel'] = channel
webhook_url = config.get('webhook_url')
logging.debug('Sending notification for event "%s", '
'review_request ID %d to channel "%s", '
'webhook URL %s',
event_name, review_request.pk, channel, webhook_url)
try:
data = json.dumps(payload).encode('utf-8')
headers = {
'Content-Type': 'application/json',
'Content-Length': len(data),
}
urlopen(Request(webhook_url, data, headers))
except Exception as e:
logging.error('Failed to send notification: %s',
e, exc_info=True)
def format_link(path, text):
"""Format the given URL and text to be shown in a message.
This will combine together the parts of the URL (method, domain, path)
and format it using Slack/Mattermost's URL syntax. This is oriented
towards Slack, however is broken out of the SlackIntegration because
other services (like Mattermost) duplicate Slack APIs.
Args:
path (unicode):
The path on the Review Board server.
text (unicode):
The text for the link.
Returns:
unicode:
The link for use in Slack.
"""
# Slack/Mattermost only want these three entities replaced, rather than
# all the entities that Django's escape() would attempt to replace.
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
return '<%s|%s>' % (build_server_url(path), text)
class SlackIntegration(BaseChatIntegration):
"""Integrates Review Board with Slack.
This will handle updating Slack channels when review requests are posted,
changed, or closed, and when there's new activity on the review request.
"""
name = 'Slack'
description = (
'Notifies channels in Slack when review requests are created, '
'updated, and reviewed.'
)
default_settings = {
'webhook_url': '',
'channel': '',
'notify_username': 'Review Board',
}
config_form_cls = BaseChatIntegrationConfigForm
DEFAULT_COLOR = '#efcc96'
ASSETS_BASE_URL = 'https://static.reviewboard.org/integration-assets/slack'
ASSETS_TIMESTAMP = '?20160830-2346'
LOGO_URL = '%s/reviewboard.png?%s' % (ASSETS_BASE_URL, ASSETS_TIMESTAMP)
VALID_IMAGE_URL_EXTS = ('.png', '.bmp', '.gif', '.jpg', '.jpeg')
TROPHY_URLS = {
'fish': '%s/fish-trophy.png?%s' % (ASSETS_BASE_URL, ASSETS_TIMESTAMP),
'milestone': '%s/milestone-trophy.png?%s' % (ASSETS_BASE_URL,
ASSETS_TIMESTAMP),
}
def notify(self, title, title_link, fallback_text, local_site,
review_request, event_name=None, fields={}, pre_text=None,
body=None, color=None, thumb_url=None, image_url=None):
"""Send a webhook notification to Slack.
This will post the given message to any Slack channels configured to
receive it.
Args:
title (unicode):
The title for the message.
title_link (unicode):
The link for the title of the message.
fallback_text (unicode):
The non-rich fallback text to display in the chat, for use in
IRC and other services.
fields (dict):
The fields comprising the rich message to display in chat.
local_site (reviewboard.site.models.LocalSite):
The Local Site for the review request or review emitting
the message. Only integration configurations matching this
Local Site will be processed.
review_request (reviewboard.reviews.models.ReviewRequest):
The review request the notification is bound to.
event_name (unicode):
The name of the event triggering this notification.
pre_text (unicode, optional):
Text to display before the rest of the message.
body (unicode, optional):
The body of the message.
color (unicode, optional):
A Slack color string or RGB hex value for the message.
thumb_url (unicode, optional):
URL of an image to show on the side of the message.
image_url (unicode, optional):
URL of an image to show in the message.
"""
notify(self, title, title_link, fallback_text, local_site,
review_request, event_name, fields, pre_text, body, color,
thumb_url, image_url)
def format_link(self, path, text):
"""Format the given URL and text to be shown in a Slack message.
This will combine together the parts of the URL (method, domain, path)
and format it using Slack's URL syntax.
Args:
path (unicode):
The path on the Review Board server.
text (unicode):
The text for the link.
Returns:
unicode:
The link for use in Slack.
"""
return format_link(path, text)
@cached_property
def icon_static_urls(self):
"""Return the icons used for the integration.
Returns:
dict:
The icons for Slack.
"""
from rbintegrations.extension import RBIntegrationsExtension
extension = RBIntegrationsExtension.instance
return {
'1x': extension.get_static_url('images/slack/icon.png'),
'2x': extension.get_static_url('images/slack/icon@2x.png'),
}
|
"""
Script: util.py
===============
Description:
------------
utilities for dealing with data
Usage:
------
python preprocess.py -i $DATA_DIR -o data.df
##################
Jay Hack
jhack@stanford.edu
Fall 2014
##################
"""
import os
import pickle as pkl
import pandas as pd
def load_data(num_dfs=1, data_dir='/data/aers/formatted', verbose=True):
"""
loads and concatenates the specified number of dataframes
"""
if verbose:
print '-----> Loading data (%d dataframes)' % num_dfs
df_paths = [os.path.join(data_dir, p) for p in os.listdir(data_dir) if p.endswith('.df')]
dfs = [pkl.load(open(p, 'r')) for p in df_paths[:num_dfs]]
data = pd.concat(dfs, axis=0)
return data
def load_drug_names(path='/data/aers/formatted/new_drug_names.pkl', verbose=True):
if verbose:
print '-----> Loading drugnames (%s)' % path
return pkl.load(open(path, 'r'))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-19 14:36
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('df_shouye', '0012_pesticide'),
]
operations = [
migrations.CreateModel(
name='Environment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('air_temp', models.FileField(blank=True, upload_to=b'airtemp')),
('air_hum', models.FileField(blank=True, upload_to=b'airhum')),
('soil_temp', models.FileField(blank=True, upload_to=b'soiltemp')),
('soil_hum', models.FileField(blank=True, upload_to=b'soilhum')),
('pH_value', models.FileField(blank=True, upload_to=b'pHvalue')),
('eType', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='df_shouye.GoodsInfo')),
],
),
]
|
#!/usr/bin/env python3
import argparse
import csv
import datetime
import makegraph as mg
import numpy as np
import os
import random
import sys
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Layer
from tensorflow.python.keras import activations
from tensorflow.python.framework.tensor_shape import Dimension
print("TensorFlow version: {}".format(tf.__version__))
tf.enable_eager_execution()
def parseArgs():
"""
Parse command line arguments
"""
#Main argument parser
parser = argparse.ArgumentParser(description='')
parser.add_argument('npys', metavar='NumPy training file', type=str, nargs='*', help='')
parser.add_argument('-i', '--inputs', metavar='<# Inputs>', dest='numInputs', type=int, required=True, help='Number of model inputs')
parser.add_argument('-v', '--vertices', metavar='<# Vertices>', dest='numVertices', type=int, required=True, help='Number of graph vertices')
parser.add_argument('-t', '--tests', metavar='<# Tests>', dest='numTests', type=int, required=True, help='Number of records to reserve for testing the model')
parser.add_argument('-e', '--epochs', metavar='<# Epochs>', dest='numEpochs', type=int, default=1000, help='Number of epochs to train the model for')
parser.add_argument('-c', '--checkpoint', metavar='<Directory>', dest='checkpointDir', type=str, help='Directory to save model checkpoints')
return parser.parse_args()
def buildGraph(features, numVertices):
"""
Convert dense graph to adjacency matrix
"""
g = np.zeros((numVertices, numVertices))
graphData,types = np.hsplit(features, (numVertices,))
for i, v in enumerate(graphData):
for j in range(numVertices):
g[(i,j)] = (v >> j) & 0x1
return g
class SpectralLayer(Layer):
"""
Class to perform spectral graph convolution
Reference: https://towardsdatascience.com/how-to-do-deep-learning-on-graphs-with-graph-convolutional-networks-62acf5b143d0
Weights can be this layer disabled to make this layer behave like a pure input layer
"""
def __init__(self, A, units, activation, useWeights=True, **kwargs):
super(SpectralLayer, self).__init__(**kwargs)
self.A = A
self.units = units
self.activation = activations.get(activation)
self.useWeights = useWeights
def build(self, input_shape):
self.I = np.eye(*self.A.shape)
A_hat = self.A + self.I
D = np.sum(A_hat, axis=0)
D_inv = D**-0.5
D_inv = np.diag(D_inv)
A_hat = D_inv * A_hat * D_inv
z = K.zeros(A_hat.shape)
self.A_hat = z + A_hat
if self.useWeights:
self.kernel = self.add_weight(
'kernel',
shape=(input_shape[2], self.units),
trainable=True)
print("kernel shape = ", self.kernel.shape)
super(SpectralLayer, self).build(input_shape)
def call(self, X):
#When building the model, a three dimensional tensor is passed through
# If so, grab the second dimension for dot product
if X.shape.rank > 2:
X_sub = X[1]
X = X_sub
#Apply the feature vector to the normalized graph
aggregate = K.dot(self.A_hat, X)
#Apply the weights if necessary and expand the batch dimension
if self.useWeights:
dot = K.dot(aggregate, self.kernel)
else:
dot = aggregate
dot = tf.expand_dims(dot,0)
return dot
def convertTraining(training, numVertices, numInputs):
"""
Convert training data to categorical forms
"""
converted = []
for entry in training:
inputs, labels = np.hsplit(entry, (numInputs,))
labels = np.matrix(labels)
vertexTypes, denseGraph = np.hsplit(inputs, (numVertices,))
#Convert node types into categories
vertexCategories = np.zeros((numVertices, len(mg.NODE_TYPES)))
for i, t in enumerate(vertexTypes):
vertexCategories[(i,t)] = 1
#Separate labels into circuit types and node inclusion labels
classLabels, inclusionLabels = np.hsplit(labels, (1,))
inclusionLabels = inclusionLabels.reshape(inclusionLabels.shape[1], inclusionLabels.shape[0])
#Convert dense graph into adjacency matrix
graph = buildGraph(denseGraph, denseGraph.shape[0])
#Add identity matrix to graph
I = K.eye(graph.shape[0])
features = K.concatenate((I, vertexCategories))
spectral = SpectralLayer(graph, features.shape[1], activation=tf.nn.relu, input_shape=features.shape, useWeights=False)
converted.append((graph, spectral, features, classLabels))
return converted
def train(training, numFeatures, numVertices, numEpochs, checkpointDir=None):
"""
Train the model
"""
data = convertTraining(training, numVertices, numFeatures)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
scc = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
bce = tf.keras.losses.BinaryCrossentropy()
def loss(model, features, labels):
preds = model(features)
lossValue = scc(y_true=labels, y_pred=preds)
return lossValue
def grad(model, features, labels):
with tf.GradientTape() as tape:
lossValue = loss(model, features, labels)
return lossValue, tape.gradient(lossValue, model.trainable_variables)
#Core model to be trained by all graphs in training set
coreModel = tf.keras.Sequential([
tf.keras.layers.Reshape((numVertices*(numVertices+len(mg.NODE_TYPES)),), input_shape=(1, numVertices, numVertices+len(mg.NODE_TYPES))),
tf.keras.layers.Dense(numVertices, activation=tf.nn.relu),
tf.keras.layers.Dense(numVertices//2, activation=tf.nn.relu),
tf.keras.layers.Dense(len(mg.LABELS))
])
for epochNum in range(numEpochs):
epochStart = datetime.datetime.today()
print("Starting Epoch {:03d} at {:s}:".format(epochNum, epochStart.strftime("%H:%M:%S")))
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# Training loop
for (i, (graph, spectral, features, labels)) in enumerate(data):
if i != 0 and i % 500 == 0:
print(" Graph #{:d}".format(i))
model = tf.keras.Sequential()
model.add(spectral)
model.add(coreModel)
model.compile(optimizer, scc, metrics=['accuracy'])
loss_value, grads = grad(model, features, labels)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
#Add current loss
epoch_loss_avg(loss_value)
# Compare predicted label to actual label
preds = model(features)
epoch_accuracy(labels, preds)
if epochNum % 1 == 0:
epochEnd = datetime.datetime.today()
epochDuration = epochEnd - epochStart
print(" Loss: {:.3f}, Accuracy: {:.3%}, Duration: {:d}s".format(epoch_loss_avg.result(), epoch_accuracy.result(), int(epochDuration.total_seconds())))
if checkpointDir:
checkpointName = "check.{:d}".format(epochNum)
checkpointPath = os.path.join(checkpointDir, checkpointName)
coreModel.save_weights(checkpointPath)
def main():
args = parseArgs()
#print(args.npys)
if not args.npys:
print(f'ERROR: At least one training file is required')
return 1
for npy in args.npys:
if not os.path.exists(npy):
print(f'ERROR: File {npy} does not exist.')
return 1
graphData = []
print("INFO: Training from {} files...".format(len(args.npys)))
for npy in args.npys:
graphData.append(np.load(npy))
if len(graphData) == 0:
print(f'ERROR: No training data read from input files.')
return 1
training = np.concatenate(graphData, axis=0).astype(np.int64)
print("INFO: Training set shape = {}".format(training.shape))
np.random.shuffle(training)
training, test = np.vsplit(training, (training.shape[0]-args.numTests,))
numFeatures = args.numInputs
numVertices = args.numVertices
numClasses = len(mg.LABELS)
numInclusionLabels = training.shape[1]-args.numInputs-1
train(training, numFeatures, numVertices, args.numEpochs, args.checkpointDir)
return 0
test_accuracy = tf.keras.metrics.Accuracy()
for (x, y) in test_dataset:
logits = model(x)
prediction = tf.argmax(logits, axis=1, output_type=tf.int32)
test_accuracy(prediction, y)
print("Test set accuracy: {:.3%}".format(test_accuracy.result()))
tf.stack([y,prediction],axis=1)
return 0
if __name__ == '__main__':
sys.exit(main())
|
lista = []
n = int(input())
for i in range(0, n):
x, y = input().split(' ')
n1 = int(x)
n2 = int(y)
if n2 == 0:
lista.append('divisao impossivel')
else:
divisao = n1 / n2
lista.append(divisao)
for c in range(0, len(lista)):
print(lista[c])
|
#!/usr/bin/python
import datetime, os, sys
from pyasn1.codec.der import decoder
# $ sudo apt-get install python-crypto
sys.path = sys.path[1:] # removes script directory from aes.py search path
from Crypto.Cipher import AES # https://www.dlitz.net/software/pycrypto/api/current/Crypto.Cipher.AES-module.html
from Crypto.Protocol.KDF import PBKDF2 # https://www.dlitz.net/software/pycrypto/api/current/Crypto.Protocol.KDF-module.html#PBKDF2
from Crypto.Util.strxor import strxor # https://www.dlitz.net/software/pycrypto/api/current/Crypto.Util.strxor-module.html#strxor
import hashlib, hmac # do not use any other imports/libraries
def intToBytestring(i):
s = ''
if not i:
return chr(0x00)
else:
while i > 0:
s = chr(i & 0xff) + s
i = i >> 8
return s
def intToBase128Bytestring(i):
first_remainder = True
s = ''
quotient = i
remainder = 0
while quotient:
remainder = quotient % 0x80
quotient = quotient // 0x80
if first_remainder:
s = chr(remainder) + s
first_remainder = False
else:
s = chr(0x80 | remainder) + s
return s
def asn1_len(content_str):
# helper function - should be used in other functions to calculate length octet(s)
# content - bytestring that contains TLV content octet(s)
# returns length (L) octet(s) for TLV
if len(content_str) <= 0x7f:
return chr(len(content_str))
else:
contentStrLen = len(content_str)
contentStrBytes = intToBytestring(contentStrLen)
return chr(0x80 | len(contentStrBytes)) + contentStrBytes
pass
def asn1_boolean(bool):
# BOOLEAN encoder has been implemented for you
if bool:
bool = chr(0xff)
else:
bool = chr(0x00)
return chr(0x01) + asn1_len(bool) + bool
def asn1_null():
# returns DER encoding of NULL
return chr(0x05) + chr(0x00)
pass
def asn1_integer(i):
# i - arbitrary integer (of type 'int' or 'long')
# returns DER encoding of INTEGER
iBytes = intToBytestring(i)
if not i:
return chr(0x02) + asn1_len(chr(0x00)) + chr(0x00)
elif ord(iBytes[0]) & 0x80:
return chr(0x02) + asn1_len(chr(0x00) + iBytes) + chr(0x00) + iBytes
else:
return chr(0x02) + asn1_len(iBytes) + iBytes
pass
def asn1_bitstring(bitstr):
# bitstr - bytestring containing bitstring (e.g., "10101")
# returns DER encoding of BITSTRING
pad_len = 8 - len(bitstr) % 8
if pad_len == 8:
pad_len = 0
bitstr += "0" * pad_len
i = 0
for bit in bitstr:
i = i << 1
if bit == '1':
i = i | 1
length_in_bytes = (len(bitstr) + 7) / 8
s = ""
for _ in xrange(length_in_bytes):
s = chr(i & 0b11111111) + s
i = i >> 8
s = chr(pad_len) + s
return chr(0b00000011) + asn1_len(s) + s
def asn1_octetstring(octets):
# octets - arbitrary byte string (e.g., "abc\x01")
# returns DER encoding of OCTETSTRING
return chr(0x04) + asn1_len(octets) + octets
pass
def asn1_objectidentifier(oid):
# oid - list of integers representing OID (e.g., [1,2,840,123123])
# returns DER encoding of OBJECTIDENTIFIER
oidFb = intToBytestring(oid[0] * 40 + oid[1])
comp = ''
for i in oid[2:]:
comp += intToBase128Bytestring(i)
return chr(0x06) + asn1_len(oidFb + comp) + oidFb + comp
pass
def asn1_sequence(der):
# der - DER bytestring to encapsulate into sequence
# returns DER encoding of SEQUENCE
return chr(0x30) + asn1_len(der) + der
pass
def asn1_set(der):
# der - DER bytestring to encapsulate into set
# returns DER encoding of SET
return chr(0x31) + asn1_len(der) + der
pass
def asn1_printablestring(string):
# string - bytestring containing printable characters (e.g., "foo")
# returns DER encoding of PrintableString
return chr(0x13) + asn1_len(string) + string
pass
def asn1_utctime(time):
# time - bytestring containing timestamp in UTCTime format (e.g., "121229010100Z")
# returns DER encoding of UTCTime
return chr(0x17) + asn1_len(time) + time
pass
def asn1_tag_explicit(der, tag):
# der - DER encoded bytestring
# tag - tag value to specify in the type octet
# returns DER encoding of original DER that is encapsulated in tag type
return chr(0xa0 | tag) + asn1_len(der) + der
pass
# this function benchmarks how many PBKDF2 iterations
# can be performed in one second on the machine it is executed
def benchmark():
# measure time for performing 10000 iterations
startTime = datetime.datetime.now()
keyPass = PBKDF2('hebele', os.urandom(8), 36, 10000)
stopTime = datetime.datetime.now()
time = (stopTime - startTime).total_seconds()
# extrapolate to 1 second
iter = 10000 // time
print "[+] Benchmark: %s PBKDF2 iterations in 1 second" % (iter)
return iter # returns number of iterations that can be performed in 1 second
def cbcEnc(nonce, keyAES, pfile):
chunkLoop = True
aesObject = AES.new(keyAES)
cipherTxt = ''
xorResult = ''
BS = 16 # Was going to get as a parameter and try in the below lambda function but had problems
# pad = lambda fileData: fileData + (BS - len(fileData) % BS) * chr(BS - len(fileData) % BS)
with open(pfile, 'r') as p:
while True:
fileData = p.read(BS)
pad = BS - len(fileData)
if not chunkLoop:
break
elif 0 < pad < BS:
fileData = fileData + (chr(pad) * pad)
chunkLoop = False
elif not fileData:
fileData = chr(BS) * BS
chunkLoop = False
xorResult = strxor(nonce, fileData)
nonce = aesObject.encrypt(xorResult)
cipherTxt += aesObject.encrypt(xorResult)
p.close()
return cipherTxt
pass
def cbcDec(nonce, keyAES, cfile, initPos):
aesObject = AES.new(keyAES)
plainTxt = ''
cipherTxt = ''
pad = ''
#unpad = lambda fileData : fileData[:-ord(fileData[len(fileData)-1:])]
with open(cfile, 'r') as p:
p.seek(initPos)
while True:
fileData = p.read(16)
if not fileData:
break
cipherTxt = fileData
dec = aesObject.decrypt(cipherTxt)
plainTxt += strxor(nonce, dec)
nonce = cipherTxt
p.close()
return plainTxt[:-ord(plainTxt[-1])]
pass
def encrypt(pfile, cfile):
chunkLoop = True
# benchmarking
iter = benchmark()
# asking for password
print "[?] Enter password:",
password = raw_input()
# deriving key
nonce = os.urandom(16)
salt = os.urandom(8)
keyLen = 36
keyPass = PBKDF2(password, salt, keyLen, int(iter))
keyAES = keyPass[:16]
keyHMAC = keyPass[16:]
cipherTxt = cbcEnc(nonce, keyAES, pfile)
# writing ciphertext in temporary file and calculating HMAC digest
with open(cfile + '.tmp', "w+") as p: # I figure it is bad practice to create this file in the same directory
p.write(cipherTxt) # but Windows gives me a headache when I try to write in another directory
p.close()
macer = hmac.new(keyHMAC, None, hashlib.sha1)
with open(cfile + '.tmp', 'r') as p:
while chunkLoop:
data_chunk = p.read(512)
if not data_chunk:
chunkLoop = False
macer.update(data_chunk)
p.close()
# writing DER structure in cfile
asn = asn1_sequence(asn1_sequence(asn1_octetstring(salt) + asn1_integer(int(iter)) + asn1_integer(keyLen))
+ asn1_sequence(asn1_objectidentifier([2, 16, 840, 1, 101, 3, 4, 1, 2])
+ asn1_octetstring(nonce))
+ asn1_sequence(asn1_sequence(asn1_objectidentifier([1, 3, 14, 3, 2, 26]) + asn1_null())
+ asn1_octetstring(macer.digest())))
#print len(asn)
# writing temporary ciphertext file to cfile
with open(cfile, 'w+') as p:
p.write(asn)
with open(cfile + '.tmp', 'r') as y:
for x in y:
p.write(x)
y.close()
p.close()
# deleting temporary ciphertext file
os.remove(cfile + '.tmp')
pass
def decrypt(cfile, pfile):
# reading DER structure
with open(cfile, 'r') as p:
fileData = p.read()
p.close()
nonce = str(decoder.decode(fileData)[0][1][1])
salt = str(decoder.decode(fileData)[0][0][0])
keyLen = int(decoder.decode(fileData)[0][0][2])
iter = int(decoder.decode(fileData)[0][0][1])
digest = decoder.decode(fileData)[0][2][1]
cipherInit = 0x5D - ord(asn1_len(asn1_integer(iter)))
#print len(nonce)
#print len(salt)
#print keyLen
#print iter
#print cipherInit
# asking for password
print "[?] Enter password:",
password = raw_input()
# deriving key
keyPass = PBKDF2(password, salt, keyLen, int(iter))
keyAES = keyPass[:16]
keyHMAC = keyPass[16:]
# first pass over ciphertext to calculate and verify HMAC
macer = hmac.new(keyHMAC, None, hashlib.sha1)
with open(cfile, 'r') as p:
p.seek(cipherInit)
while True:
fileData = p.read(512)
if not fileData:
break
macer.update(fileData)
p.close()
fileDigest = macer.digest()
if fileDigest != digest:
print "[-] Wrong key or message has been manipulated!"
else:
print "[+] HMAC verification successful!"
# second pass over ciphertext to decrypt
plainTxt = cbcDec(nonce, keyAES, cfile, cipherInit)
with open(pfile, 'w+') as p:
p.write(plainTxt)
p.close()
pass
def usage():
print "Usage:"
print "-encrypt <plaintextfile> <ciphertextfile>"
print "-decrypt <ciphertextfile> <plaintextfile>"
sys.exit(1)
if len(sys.argv) != 4:
usage()
elif sys.argv[1] == '-encrypt':
encrypt(sys.argv[2], sys.argv[3])
elif sys.argv[1] == '-decrypt':
decrypt(sys.argv[2], sys.argv[3])
else:
usage()
|
# Normalna klasa
# Przykład zastosowania
class Stack2:
def __init__(self):
self.items = []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def __len__(self):
return len(self.items)
if __name__ == '__main__':
import example2
from timeit import timeit
print('Używanie klasy')
s = Stack2()
print(timeit('s.push(1); s.pop()', 'from __main__ import s'))
print('Używanie domknięcia')
s = example2.Stack()
print(timeit('s.push(1); s.pop()', 'from __main__ import s'))
|
import pandas as pd
import os
import warnings
warnings.filterwarnings('ignore')
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
sinais = pd.read_csv(os.path.abspath("/var/www/physionet/public_html/python/data-set-sinais-vitais.csv"), delimiter=',')
print('Quantidade de registros totais: ')
print(len(sinais))
features = sinais.columns.difference(['classe'])
X = sinais[features].values
y = sinais['classe'].values
print('Quantidade de classes: ')
print(len(sinais['classe'].value_counts()))
print('Quantidade por classe: ')
print(sinais['classe'].value_counts())
'''
scikit-learn usa uma versão otimizada do algoritmo CART
Parâmetros DecisionTreeClassifier
random_state: É comum na maioria dos algoritmos e é importante
mantê-lo fixo, o valor não importa, desde que seja sempre o mesmo,
dessa forma conseguiremos gerar sempre o mesmo modelo com os mesmos dados.
criterion: É a métrica utilizada para construção da árvore de decisão. Pode ser gini ou entropy.
max_depth: É a profundida máxima da árvore, profundida demais pode gerar um sistema super
especializado nos dados de treinamento, também conhecido como overfitting.
Profundida de menos vai diminuir a capacidade de generalização do modelo.
'''
classificador = DecisionTreeClassifier()
classificador.fit(X, y) # Treinando com tudo
features_importance = zip(classificador.feature_importances_, features)
for importance, feature in sorted(features_importance, reverse=True):
print("%s: %f%%" % (feature, importance*100))
from sklearn.model_selection import GridSearchCV
param_grid = {
"criterion": ['entropy', 'gini']
}
grid_search = GridSearchCV(classificador, param_grid, scoring="accuracy")
grid_search.fit(X, y)
classificador = grid_search.best_estimator_
grid_search.best_params_, grid_search.best_score_
print(grid_search.best_score_)
print(grid_search.best_params_)
|
#!/usr/bin/python
import re
import sys
import NER
import subprocess
a = NER.states()
b = NER.dupli()
c = NER.readFile()
args = []
args.insert(0,a)
args.insert(1,b)
args.insert(2,c)
file=open(sys.argv[1],'r')
input = file.read()
a = NER.find(input,'en',args)
#print a
|
from django.shortcuts import render, redirect
from .forms import NewReviewerForm, ReviewForm
from django.contrib import messages
from django.conf import settings
from django.http import HttpResponse
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import login as auth_login
from postapp.models import Post, PostUser
from django.contrib.auth.decorators import login_required
from .models import Review, ReviewUser
from django.urls import reverse
# Create your views here.
def register2(request):
if request.user.is_authenticated:
try:
check = PostUser.objects.get(user=request.user)
return redirect(reverse('viewfeed'))
except:
return redirect(reverse('rviewfeed'))
if request.method == 'POST':
form = NewReviewerForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Registration successful. You can login now!")
return redirect("login2")
messages.error("There was some problem. Please try again!")
else:
form = NewReviewerForm()
context = {'form': form}
return render(request,'register2.html', context)
def login2(request):
if request.user.is_authenticated:
try:
check = PostUser.objects.get(user=request.user)
return redirect(reverse('viewfeed'))
except:
return redirect(reverse('rviewfeed'))
if request.method == 'POST':
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
try:
check = ReviewUser.objects.get(user=user)
auth_login(request, user)
return redirect("rviewfeed")
except:
pass
messages.error(request, "Username or password wrong")
else:
form = AuthenticationForm()
return render(request, 'login2.html', {'form': form})
@login_required
def rviewfeed(request):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'GET':
postlist = Post.objects.all()
if len(postlist)==0:
messages.error(request, "Nothing to show!!")
return render(request, 'rfeed.html', {'postlist': postlist})
@login_required
def addreview(request, postid):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'GET':
post = Post.objects.get(id=postid)
form = ReviewForm()
return render(request, 'review.html', {'post': post, 'form': form})
@login_required
def writereview(request, postid):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'POST':
form = ReviewForm(request.POST)
if form.is_valid():
post = Post.objects.get(id=postid)
rtext = form.cleaned_data['rtext']
user = ReviewUser.objects.get(user=request.user)
review = Review(user=user, post=post, text=rtext)
review.save()
return redirect(viewreviews)
@login_required
def viewreviews(request):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'GET':
user = ReviewUser.objects.get(user=request.user)
reviews = Review.objects.filter(user=user)
if len(reviews)==0:
messages.error(request, "You haven't reviewed any post yet!!")
return render(request, 'myreviews.html', {'reviews': reviews})
@login_required
def editreview(request, reviewid):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'GET':
review = Review.objects.get(id=reviewid)
data = {'rtext': review.text}
form = ReviewForm(initial = data)
return render(request, 'editreview.html', {'form': form, 'review': review})
if request.method == 'POST':
review = Review.objects.get(id=reviewid)
form = ReviewForm(request.POST, request.FILES)
if form.is_valid():
review.text = form.cleaned_data['rtext']
review.save()
return redirect(viewreviews)
@login_required
def deletereview(request, reviewid):
try:
check = ReviewUser.objects.get(user=request.user)
except:
return HttpResponse("You don't have permission to view this page")
if request.method == 'GET':
review = Review.objects.get(id=reviewid)
review.delete()
return redirect(viewreviews) |
from clients import *
from contracts import *
from devices import *
from reports import *
from scans import *
from schedules import *
from vulnerabilities import *
|
# -*- coding: utf-8 -*-
__author__ = 'lufo'
import requests
import json
import os
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
def face_detection(img_path):
"""
传入图片路径,判断图片中有没有人脸,使用YOLO
:return: 有返回True,没有返回False
"""
path = '/Users/lufo/Downloads/darknet/'
os.chdir(path)
a = subprocess.check_output('./darknet detection test ./yolo_test_1.cfg ./yolo_test_1_39000.weights ' + img_path,
shell=True)
if 'aeroplane' in a:
return True
else:
return False
def uniqify(seq, idfun=None):
# list 去重
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if seen.has_key(marker): continue
seen[marker] = 1
result.append(item)
return result
def save_img_url(keyword_list, path='./names/'):
number_of_img = 2000
for keyword in keyword_list:
print keyword
pn = number_of_img / 60 + 1
url_list = []
for i in xrange(pn):
search_url = 'http://image.baidu.com/i?tn=baiduimagejson&ie=utf-8&width=&height=&word=%s&rn=60&pn=%s' % (
keyword, str(i * 60)) # word为关键字,rn为显示的数量,pn为从第几张开始显示
# print search_url
try:
resp = requests.get(search_url, timeout=5, allow_redirects=False).content
# print chardet.detect(resp.read())
resp_js = json.loads(resp.decode('gb2312', errors='ignore'))
if resp_js['data']:
# print len(resp_js['data'])
for x in resp_js['data'][:-1]:
try:
url_list.append(x['objURL'])
except Exception, e:
print e
except Exception, e:
print e
if not os.path.isdir(path):
os.mkdir(path)
with open(path + keyword + '.txt', 'w') as fw:
# print len(url_list)
for url in uniqify(url_list):
fw.write(url + '\n')
def save_img(img_url_list, path):
for i, img_url in enumerate(img_url_list):
img_path = path + str(i) + '.jpg'
# print img_path
with open(img_path, 'wb') as fw:
try:
fw.write(requests.get(img_url, timeout=5, allow_redirects=False).content)
except Exception, e:
print img_url
print e
def save_all_img(begin=0, end=200000):
file_list = []
with open('names.txt') as fr:
for keyword in fr:
file_list.append((keyword.strip() + '.txt'))
for file in file_list[begin:end]:
img_url_list = []
print './names/' + os.path.normcase(file)
with open('./names/' + os.path.normcase(file)) as fr:
for url in fr:
img_url_list.append(url.strip())
path = './names/' + file.split('.txt')[0] + '/'
if not os.path.isdir(path):
os.mkdir(path)
save_img(img_url_list, path)
def save_img_url_main(step=100, number_of_threads=4, begin=800):
keyword_list = []
with open('names.txt') as fr:
for keyword in fr:
keyword_list.append(keyword.strip())
end = len(keyword_list)
pool = ThreadPool(number_of_threads)
for i in xrange(begin, end, step * number_of_threads):
pool.map(save_img_url, [keyword_list[i + j * step:i + (j + 1) * step] for j in xrange(number_of_threads)])
pool.close()
pool.join()
def save_all_img_main(step=25, number_of_threads=4, begin=0):
"""
并行保存所有图片
:param step: 每个线程每次循环保存多少人的图片
:param number_of_threads: 开启线程数
:param begin: 从list的第几个元素开始抓取
"""
keyword_list = []
with open('names.txt') as fr:
for keyword in fr:
keyword_list.append(keyword.strip())
end = len(keyword_list)
pool = ThreadPool(number_of_threads)
func = lambda x: save_all_img(x, x + step)
for i in xrange(begin, end, step * number_of_threads):
pool.map(func, xrange(i, i + step * number_of_threads, step))
pool.close()
pool.join()
def delete_img_without_face(begin=0, end=200000):
"""
删除没有人脸的图片
"""
file_list = []
with open('names.txt') as fr:
for keyword in fr:
file_list.append((keyword.strip() + '.txt'))
for file in file_list[begin:end]:
path = '/Users/lufo/PycharmProjects/images/names/' + file.split('.txt')[0] + '/'
# print path
path_list = []
for dir_info in os.walk(path):
for filename in dir_info[2]:
if '.jpg' in filename:
path_list.append(os.path.join(dir_info[0], filename))
for img_path in path_list:
print img_path
if not face_detection(img_path):
os.remove(img_path)
def delete_img_without_face_main(step=25, number_of_threads=8, begin=0):
"""
并行删除没有人脸的图片
"""
keyword_list = []
with open('names.txt') as fr:
for keyword in fr:
keyword_list.append(keyword.strip())
end = len(keyword_list)
pool = ThreadPool(number_of_threads)
func = lambda x: delete_img_without_face(x, x + step)
for i in xrange(begin, end, step * number_of_threads):
pool.map(func, xrange(i, i + step * number_of_threads, step))
pool.close()
pool.join()
if __name__ == '__main__':
# save_all_img()
# delete_img_without_face_main(step=1, number_of_threads=7)
# save_img_url_main(number_of_threads=1, begin=0)
save_all_img_main(number_of_threads=4, begin=0)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='PKGame',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('invitor', models.CharField(max_length=100)),
('invitee', models.CharField(max_length=100)),
('created_at', models.DateTimeField(default=datetime.datetime(2016, 12, 28, 15, 43, 56, 562297))),
('invitor_init_assets', models.FloatField(default=300000.0, editable=False)),
('invitee_init_assets', models.FloatField(default=300000.0, editable=False)),
('status', models.IntegerField(default=0, choices=[(0, b'\xe7\xad\x89\xe5\xbe\x85\xe4\xb8\xad'), (1, b'\xe9\x80\xb2\xe8\xa1\x8c\xe4\xb8\xad'), (2, b'\xe5\xb7\xb2\xe7\xb5\x90\xe6\x9d\x9f'), (-1, b'\xe5\xb7\xb2\xe5\x8f\x96\xe6\xb6\x88')])),
('life', models.IntegerField(default=1, choices=[(1, b'1\xe9\x80\xb1'), (2, b'2\xe9\x80\xb1'), (3, b'3\xe9\x80\xb1'), (4, b'4\xe9\x80\xb1'), (5, b'5\xe9\x80\xb1'), (6, b'6\xe9\x80\xb1'), (7, b'7\xe9\x80\xb1'), (8, b'8\xe9\x80\xb1'), (0, b'\xe6\xb0\xb8\xe4\xb9\x85\xe6\x8c\x81\xe7\xba\x8c')])),
('mode', models.IntegerField(default=1, choices=[(1, b'\xe7\xb8\xbe\xe6\x95\x88\xe8\xa9\x95\xe6\xaf\x94'), (2, b'\xe8\xb3\x87\xe7\x94\xa2\xe8\xa9\x95\xe6\xaf\x94')])),
],
),
]
|
"""Define vanilla CNNs with torch backbones, mainly for patch classification."""
import numpy as np
import torch
import torchvision.models as torch_models
from torch import nn
from torchvision.models import WeightsEnum
from tiatoolbox.models.models_abc import ModelABC
from tiatoolbox.utils.misc import select_device
def _get_architecture(arch_name, weights: str or WeightsEnum = "DEFAULT", **kwargs):
"""Get a model.
Model architectures are either already defined within torchvision or
they can be custom-made within tiatoolbox.
Args:
arch_name (str):
Architecture name.
weights (str or WeightsEnum):
torchvision model weights (get_model_weights).
kwargs (dict):
Key-word arguments.
Returns:
List of PyTorch network layers wrapped with `nn.Sequential`.
https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html
"""
backbone_dict = {
"alexnet": torch_models.alexnet,
"resnet18": torch_models.resnet18,
"resnet34": torch_models.resnet34,
"resnet50": torch_models.resnet50,
"resnet101": torch_models.resnet101,
"resnext50_32x4d": torch_models.resnext50_32x4d,
"resnext101_32x8d": torch_models.resnext101_32x8d,
"wide_resnet50_2": torch_models.wide_resnet50_2,
"wide_resnet101_2": torch_models.wide_resnet101_2,
"densenet121": torch_models.densenet121,
"densenet161": torch_models.densenet161,
"densenet169": torch_models.densenet169,
"densenet201": torch_models.densenet201,
"inception_v3": torch_models.inception_v3,
"googlenet": torch_models.googlenet,
"mobilenet_v2": torch_models.mobilenet_v2,
"mobilenet_v3_large": torch_models.mobilenet_v3_large,
"mobilenet_v3_small": torch_models.mobilenet_v3_small,
}
if arch_name not in backbone_dict:
msg = f"Backbone `{arch_name}` is not supported."
raise ValueError(msg)
creator = backbone_dict[arch_name]
model = creator(weights=weights, **kwargs)
# Unroll all the definition and strip off the final GAP and FCN
if "resnet" in arch_name or "resnext" in arch_name:
return nn.Sequential(*list(model.children())[:-2])
if "densenet" in arch_name:
return model.features
if "alexnet" in arch_name:
return model.features
if "inception_v3" in arch_name or "googlenet" in arch_name:
return nn.Sequential(*list(model.children())[:-3])
return model.features
class CNNModel(ModelABC):
"""Retrieve the model backbone and attach an extra FCN to perform classification.
Args:
backbone (str):
Model name.
num_classes (int):
Number of classes output by model.
Attributes:
num_classes (int):
Number of classes output by the model.
feat_extract (nn.Module):
Backbone CNN model.
pool (nn.Module):
Type of pooling applied after feature extraction.
classifier (nn.Module):
Linear classifier module used to map the features to the
output.
"""
def __init__(self, backbone, num_classes=1) -> None:
"""Initialize :class:`CNNModel`."""
super().__init__()
self.num_classes = num_classes
self.feat_extract = _get_architecture(backbone)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# Best way to retrieve channel dynamically is passing a small forward pass
prev_num_ch = self.feat_extract(torch.rand([2, 3, 96, 96])).shape[1]
self.classifier = nn.Linear(prev_num_ch, num_classes)
# pylint: disable=W0221
# because abc is generic, this is actual definition
def forward(self, imgs):
"""Pass input data through the model.
Args:
imgs (torch.Tensor):
Model input.
"""
feat = self.feat_extract(imgs)
gap_feat = self.pool(feat)
gap_feat = torch.flatten(gap_feat, 1)
logit = self.classifier(gap_feat)
return torch.softmax(logit, -1)
@staticmethod
def postproc(image):
"""Define the post-processing of this class of model.
This simply applies argmax along last axis of the input.
"""
return np.argmax(image, axis=-1)
@staticmethod
def infer_batch(model: nn.Module, batch_data: torch.Tensor, *, on_gpu: bool):
"""Run inference on an input batch.
Contains logic for forward operation as well as i/o aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (torch.Tensor):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
img_patches_device = batch_data.to(select_device(on_gpu=on_gpu)).type(
torch.float32,
) # to NCHW
img_patches_device = img_patches_device.permute(0, 3, 1, 2).contiguous()
# Inference mode
model.eval()
# Do not compute the gradient (not training)
with torch.inference_mode():
output = model(img_patches_device)
# Output should be a single tensor or scalar
return output.cpu().numpy()
class CNNBackbone(ModelABC):
"""Retrieve the model backbone and strip the classification layer.
This is a wrapper for pretrained models within pytorch.
Args:
backbone (str):
Model name. Currently, the tool supports following
model names and their default associated weights from pytorch.
- "alexnet"
- "resnet18"
- "resnet34"
- "resnet50"
- "resnet101"
- "resnext50_32x4d"
- "resnext101_32x8d"
- "wide_resnet50_2"
- "wide_resnet101_2"
- "densenet121"
- "densenet161"
- "densenet169"
- "densenet201"
- "inception_v3"
- "googlenet"
- "mobilenet_v2"
- "mobilenet_v3_large"
- "mobilenet_v3_small"
Examples:
>>> # Creating resnet50 architecture from default pytorch
>>> # without the classification layer with its associated
>>> # weights loaded
>>> model = CNNBackbone(backbone="resnet50")
>>> model.eval() # set to evaluation mode
>>> # dummy sample in NHWC form
>>> samples = torch.rand(4, 3, 512, 512)
>>> features = model(samples)
>>> features.shape # features after global average pooling
torch.Size([4, 2048])
"""
def __init__(self, backbone) -> None:
"""Initialize :class:`CNNBackbone`."""
super().__init__()
self.feat_extract = _get_architecture(backbone)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
# pylint: disable=W0221
# because abc is generic, this is actual definition
def forward(self, imgs):
"""Pass input data through the model.
Args:
imgs (torch.Tensor):
Model input.
"""
feat = self.feat_extract(imgs)
gap_feat = self.pool(feat)
return torch.flatten(gap_feat, 1)
@staticmethod
def infer_batch(model: nn.Module, batch_data: torch.Tensor, *, on_gpu: bool):
"""Run inference on an input batch.
Contains logic for forward operation as well as i/o aggregation.
Args:
model (nn.Module):
PyTorch defined model.
batch_data (torch.Tensor):
A batch of data generated by
`torch.utils.data.DataLoader`.
on_gpu (bool):
Whether to run inference on a GPU.
"""
img_patches_device = batch_data.to(select_device(on_gpu=on_gpu)).type(
torch.float32,
) # to NCHW
img_patches_device = img_patches_device.permute(0, 3, 1, 2).contiguous()
# Inference mode
model.eval()
# Do not compute the gradient (not training)
with torch.inference_mode():
output = model(img_patches_device)
# Output should be a single tensor or scalar
return [output.cpu().numpy()]
|
import datetime
import time
from functools import wraps
import logging
logger = logging.getLogger(__name__)
def time_consumed(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
elaps = str(datetime.timedelta(seconds=end-start))
logger.info('%s time consumed: %s' % (func.__name__, elaps))
return result
return wrapper
|
import jax
from jax import numpy as jnp, random
import sys
sys.path.append(".")
from survae.nn.nets import MLP
import survae
from flax import linen as nn
import numpy as np
from survae.transforms import Abs
from survae.distributions import Bernoulli
rng = random.PRNGKey(0)
rng, key = random.split(rng)
x = random.uniform(rng, (3,2))
x = jnp.array([[0.3423, 0.2345,0.898] for _ in range(10000)])
print("==================== x =========================")
print(x)
abs = Abs(Bernoulli)
# # conv = survae.Conv1x1(3,False)
# params = conv.init(key, rng, x)['params']
# print("===============================================")
# # print(deq)
# # print("++++++++++++++++++++++++++++++")
y,ldj=abs.forward(key,x)
print("====================== y =======================")
print(y)
print("====================== ldj =======================")
print(ldj)
print("====================== inverse y[0] =======================")
_x=abs.inverse(key,y)
print(_x)
print("====================== x[0] - inverse y[0] =======================")
print(x[0]-_x[0])
print("=========================")
|
class A():
def test(self):
print("AAAAAAAAAAA")
class B(A):
pass
b = B()
b.test()# 这样写子类继承AAAAAAA
class A():
def test(self):
print("AAAAAAAAAAA")
class B(A):
def test(self):
print("BBBBBBBBBBBB")
b = B()
b.test()#这样写子类执行BBBBBB
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse, HttpResponseRedirect
from django.template import loader
from .forms import ContactsForm
from django.shortcuts import render
from django.contrib import messages
from django.shortcuts import render
import logging
logger = logging.getLogger(__name__)
# Create your views here.
def index(request):
template = loader.get_template("contacts/index.html")
return HttpResponse(template.render({}, request))
def list(request):
from contacts.models import Contact
all_contacts = Contact.objects.all()
template = loader.get_template("contacts/list.html")
return HttpResponse(template.render({ "contacts": all_contacts}, request))
def add(request):
if request.method == "POST":
form = ContactsForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, "Contact Successfully added")
else:
messages.error(request, "Unable to Create new contact")
return HttpResponseRedirect(redirect_to="/add")
else:
form = ContactsForm()
template = loader.get_template("contacts/add.html")
return HttpResponse(template.render({"form": form}, request))
|
"""
Implement an autocomplete system. That is, given a query string s and
a set of all possible query strings, return all strings in the set that
have s as a prefix.
For example, given the query string de and the set of strings
[dog, deer, deal], return [deer, deal].
Hint: Try preprocessing the dictionary into a more efficient data
structure to speed up queries.
"""
class TrieNode:
def __init__(self, char=None):
self.char = char
self.children = {}
self.end = False
class Trie:
def __init__(self):
self.root = TrieNode()
self.word_list = []
def insert(self, key):
node = self.root
for c in list(key):
if not node.children.get(c):
node.children[c] = TrieNode()
node = node.children[c]
node.end = True
def search(self, key):
node = self.root
word = ""
for c in list(key):
if not node.children.get(c):
return False
word += c
node = node.children[c]
self.search_helper(node, word)
for word in self.word_list:
print(word)
def search_helper(self, node, word):
if node.end:
self.word_list.append(word)
for key, value in node.children.items():
self.search_helper(value, word + key)
if __name__ == '__main__':
keys = ["dog", "deer", "deal", "de"]
key = "de"
trie = Trie()
for s in keys:
trie.insert(s)
trie.search(key)
|
import FWCore.ParameterSet.Config as cms
from EventFilter.HcalRawToDigi.hcallaserhbhehffilter2012_cfi import *
hcallLaser2012Filter = cms.Sequence(hcallaserhbhehffilter2012)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class MovieItem(scrapy.Item):
mname = scrapy.Field()
mdesc = scrapy.Field()
mimg = scrapy.Field()
mlink = scrapy.Field()
|
#! /usr/bin/env python
import sys
import random
import time
# data = sys.stdin.readlines()
nums = random.sample(xrange(10000), 10000)
def q_sort(nums, left, right):
i, j = left, right
pivot = int(nums[i] + nums[j]) / 2
while i < j:
while nums[i] < pivot:
i += 1
while nums[j] > pivot:
j -= 1
if i <= j:
x = nums[j]
nums[j] = nums[i]
nums[i] = x
i +=1
j -=1
if left < j:
nums = q_sort(nums, left, j)
if right > i:
nums = q_sort(nums, i, right)
return nums
start_time = time.time()
array = q_sort(nums, 0, (len(nums) - 1))
end_time = time.time()
print array
print "Exec Time %.6f secs" % (float(end_time - start_time)) |
# -*- coding: utf-8 -*-
#import unittest # 1. llamar libreria
#import probando # 2. llamar la clase a probar
# https://cgoldberg.github.io/python-unittest-tutorial/
# Todos los métodos que comiencen con el nombre test serán ejecutados.
# probando testing
#class TestUM(unittest.TestCase): # 3. heredar esto
# def test_numbers_3_4(self):
# # self.assertEqual( multiply(3,4), 12)4
# self.assertEqual(7, 3+4)
#
# def testProbando(self): # 4. hacer clase con cualquier nombre que comience con test
# # a = hola()
# a = probando.probando()
# b = a.hola()
#
# self.assertEqual(b, "hola a todos") # 5. escribir un assert
# def testSumando(self):
# a = probando.probando()
# res = a.sumando(3,4)
#
## self.assertEqual(res, 7, "3 + 5 ")
# self.assertEqual(res, 7, "Error en la suma")
#if __name__ == '__main__': # 6. escribir un main
# unittest.main()
'''
any member function whose name begins with test in a class deriving from "unittest.TestCase" will be run
This abbreviated output includes the amount of time the tests took,
Tests have 3 possible outcomes:
ok
The test passes.
FAIL
The test does not pass, and raises an AssertionError exception.
ERROR
The test raises an exception other than AssertionError.
'''
|
#!/usr/bin/env python
import logging
import signal
from tornado.ioloop import IOLoop
from client import BetelbotClientConnection
from config import JsonConfig
from topic import getTopics
from util import Client, signalHandler
def onTopicPublished(topic, data=None):
# Callback function that prints the name of topic and associated data to
# console.
#
# This function is designed to be executed by Betelbot client whenever a
# subscription receives new data from a publisher.
if data:
print '[{}]{}'.format(topic, ' '.join(map(str, data)))
def main():
# Start up a Betelbot client and subscribe to all topics. When data is
# received, print to console.
#
# The main purpose of this script is for logging messages.
signal.signal(signal.SIGINT, signalHandler)
cfg = JsonConfig()
logger = logging.getLogger('')
logger.setLevel(cfg.general.logLevel)
client = Client('', cfg.server.port, BetelbotClientConnection)
conn = client.connect()
topics = getTopics()
for topic in topics:
conn.subscribe(topic, onTopicPublished)
IOLoop.instance().start()
if __name__ == "__main__":
main() |
import sys
import math
import datetime
#dirname =sys.argv[1]
subject=sys.argv[1]
tstart=sys.argv[2]
tstart=datetime.datetime.strptime(tstart,"%Y%m%d%H%M");
dirname='/srv/gsfs0/projects/ashley/common/device_validation/subject'+subject
walk=open(dirname+'/samsung_walk_'+subject+'.tsv','r').read().replace(' ','').split('\n')
while '' in walk:
walk.remove('')
run=open(dirname+'/samsung_run_'+subject+'.tsv','r').read().replace(' ','').split('\n')
while '' in run:
run.remove('')
bike=open(dirname+'/samsung_bike_'+subject+'.tsv','r').read().replace(' ','').split('\n')
while '' in bike:
bike.remove('');
vo2max=open(dirname+'/samsung_max_'+subject+'.tsv','r').read().replace(' ','').split('\n')
while '' in vo2max:
vo2max.remove('');
outf=open(dirname+'/samsung_'+subject+'.tsv','w')
outf.write('Date\tHeartRate\n')
maxwalk=0
maxrun=0
maxbike=0
hr_dict=dict()
for line in walk:
tokens=line.split(',')
time=math.floor(float(tokens[0]))
hr=float(tokens[1])
if hr < 20:
continue
if time > maxwalk:
maxwalk=time
if time not in hr_dict:
hr_dict[time]=[hr]
else:
hr_dict[time].append(hr)
for line in run:
tokens=line.split(',')
time=math.floor(float(tokens[0]))
hr=float(tokens[1])
if hr < 20:
continue
time=time+maxwalk;
if time > maxrun:
maxrun=time
if time not in hr_dict:
hr_dict[time]=[hr]
else:
hr_dict[time].append(hr)
for line in bike:
tokens=line.split(',')
time=math.floor(float(tokens[0]))
hr=float(tokens[1])
if hr < 20:
continue
time=time+maxrun
if time > maxbike:
maxbike=time
if time not in hr_dict:
hr_dict[time]=[hr]
else:
hr_dict[time].append(hr)
for line in vo2max:
tokens=line.split(',')
time=math.floor(float(tokens[0]))
hr=float(tokens[1])
if hr < 20:
continue
time=time+maxbike
if time not in hr_dict:
hr_dict[time]=[hr]
else:
hr_dict[time].append(hr)
timevals=hr_dict.keys()
timevals.sort()
for val in timevals:
meanhr=sum(hr_dict[val])/len(hr_dict[val])
ts=tstart+datetime.timedelta(minutes=val)
tstring=datetime.datetime.strftime(ts,"%Y%m%d%H%M")
outf.write(tstring+'00-0700'+'\t'+str(meanhr)+'\n')
|
import paramiko
from sshtunnel import SSHTunnelForwarder
import json
import io
import os
import sys
from base64 import b64decode
from sqlbag import S, load_sql_from_file, temporary_database as temporary_db, sql_from_folder, raw_execute, DB_ERROR_TUPLE
from migra import Migration
from contextlib import contextmanager
import tempfile
import shutil
import subprocess
from migrations import load_from_app_model
PENDING_FOLDER = 'DB/migration/pending'
DRY_RUN = False
@contextmanager
def tempfolder():
t = None
try:
t = tempfile.mkdtemp()
yield t
finally:
if t:
shutil.rmtree(t)
def databases_are_equal(dburl_a, dburl_b):
with S(dburl_a) as s0, S(dburl_b) as s1:
m = Migration(s0, s1)
m.set_safety(False)
m.add_all_changes()
if m.statements:
print('DIFFERENCES FOUND:')
print(m.sql)
return not m.statements
def do_schema_dump(dburl, outfile):
COMMAND = 'pg_dump --no-owner --no-privileges --schema-only --column-inserts -f {} {}'
command = COMMAND.format(outfile, dburl)
print('MAKING DUMP OF SCHEMA: '.format(command))
subprocess.check_output(command, shell=True)
print('DUMP COMPLETE')
def do_migration(REAL_DB_URL):
PENDING = sql_from_folder(PENDING_FOLDER)
with tempfolder() as tempf:
outfile = os.path.join(tempf, 'schemadump.sql')
do_schema_dump(REAL_DB_URL, outfile)
for i in range(len(PENDING) + 1):
ATTEMPTING = list(reversed(PENDING))[:i]
ATTEMPTING.reverse()
print("TESTING MIGRATION USING LAST {} MIGRATION FILES".format(i))
with temporary_db() as dummy_db_url, temporary_db() as target_db_url:
with S(dummy_db_url) as s_dummy:
load_sql_from_file(s_dummy, outfile)
try:
for migration_sql in ATTEMPTING:
raw_execute(s_dummy, migration_sql)
except DB_ERROR_TUPLE as e:
print('TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS FAILED, MOVING TO NEXT'.format(i))
continue
load_from_app_model(target_db_url)
if databases_are_equal(dummy_db_url, target_db_url):
print('APPLYING LAST {} PENDING FILES'.format(i))
with S(REAL_DB_URL) as s_real:
for migration_sql in ATTEMPTING:
if not DRY_RUN:
print("EXECUTING:")
print(migration_sql)
raw_execute(s_real, migration_sql)
else:
print('DRY RUN, would apply:')
print(migration_sql)
print('SUCCESS: DATABASE UP TO DATE.')
return 0
else:
print('TRIED USING LAST {} PENDING FILES TO MIGRATE BUT THIS DOES NOT GIVE A CORRECT OUTCOME, MOVING TO NEXT'.format(i))
print('COULD NOT FIND A CORRECT MIGRATION PATH :(')
return 1
@contextmanager
def connection_from_encoded_config(encoded_config):
CONFIG = json.loads(b64decode(encoded_config))
pk = paramiko.RSAKey.from_private_key(file_obj=io.StringIO(CONFIG['private_key']))
with SSHTunnelForwarder(
(CONFIG['remote_host'], 22),
ssh_username=CONFIG['user'],
ssh_pkey=pk,
remote_bind_address=(CONFIG['private_host'], 5432),
local_bind_address=('0.0.0.0', CONFIG['local_port'])
):
yield CONFIG['connection_url']
def main(task_method, encoded_config):
with connection_from_encoded_config(encoded_config) as REAL_DB_URL:
result = task_method(REAL_DB_URL)
return result
if __name__ == '__main__':
_, task_method_name, encoded_config = sys.argv
try:
task_method = getattr(sys.modules[__name__], task_method_name)
except AttributeError:
print('no such task')
sys.exit(1)
result = main(task_method, encoded_config)
sys.exit(result) |
##
## Lambda function to automatically remediate Evident signature: AWS:EC2 - default_vpc_check
##
## PROVIDED AS IS WITH NO WARRANTY OR GUARANTEES
## Copyright (c) 2016 Evident.io, Inc., All Rights Reserved
##
## ************************** !! W A R N I N G !! **************************
## * Deleting the default VPC is a permanent action. *
## * You must contact AWS Support if you want to create a new default VPC. *
## *************************************************************************
##
## ---
## Use lambda policy: ../policies/AWS:EC2_default_vpc_policy.json
## ---
##
from __future__ import print_function
import json
import re
import boto3
import sys
print('=> Loading function')
def lambda_handler(event, context):
message = event['Records'][0]['Sns']['Message']
alert = json.loads(message)
status = alert['data']['attributes']['status']
# If the signature didn't report a failure, exit..
#
if status != 'fail':
print('=> Nothing to do.')
exit()
# Else, carry on..
#
included = alert['included']
for i in included:
type = i['type']
if type == "regions":
regions = i
if type == "metadata":
metadata = i
region = re.sub('_','-',regions['attributes']['code'])
try:
vpc_id = metadata['attributes']['data']['resource_id']
except Exception as e:
print('=> No VPC to evaluate.')
else:
results = auto_remediate(region, vpc_id)
print ('=> VPC Results: ', results)
def auto_remediate(region, vpc_id):
"""
Auto-Remediate - Delete Default VPCs
== Order of operation ==
1.) Delete the internet-gateway
2.) Delete subnets
3.) Delete route-tables
4.) Delete network access-lists
5.) Delete security-groups
6.) Delete the VPC
"""
ec2 = boto3.client('ec2', region_name=region)
vpc = ec2.describe_vpcs(VpcIds=[ vpc_id ])['Vpcs'][0]['IsDefault']
if vpc != True:
return vpc_id + ' in region ' + region + ' is not the default.'
else:
print ('=> Autoremediating default VPC ' + vpc_id, 'in region ' + region)
remove_ingw(ec2, vpc_id)
remove_subs(ec2, vpc_id)
remove_rtbs(ec2, vpc_id)
remove_acls(ec2, vpc_id)
remove_sgps(ec2, vpc_id)
try:
ec2.delete_vpc(VpcId=vpc_id)
except Exception as e:
results = str(e.message)
else:
results = vpc_id + ' in region ' + region + ' has been deleted.'
return results
def remove_ingw(ec2, vpc_id):
""" Detach and delete the internet-gateway """
igw = ec2.describe_internet_gateways(Filters=[ {'Name' : 'attachment.vpc-id', 'Values' : [ vpc_id ]} ])['InternetGateways']
if igw:
igw_id = igw[0]['InternetGatewayId']
try:
ec2.detach_internet_gateway(InternetGatewayId=igw_id, VpcId=vpc_id)
except Exception as e:
print(str(e.message))
try:
ec2.delete_internet_gateway(InternetGatewayId=igw_id)
except Exception as e:
print(str(e.message))
def remove_subs(ec2, vpc_id):
""" Delete the subnets """
subs = ec2.describe_subnets(Filters=[{ 'Name' : 'vpc-id', 'Values' : [ vpc_id ]} ])['Subnets']
if subs:
for sub in subs:
sub_id = sub['SubnetId']
try:
ec2.delete_subnet(SubnetId=sub_id)
except Exception as e:
print(str(e.message))
def remove_rtbs(ec2, vpc_id):
""" Delete the route-tables """
rtbs = ec2.describe_route_tables(Filters=[{ 'Name' : 'vpc-id', 'Values' : [ vpc_id ]} ])['RouteTables']
if rtbs:
for rtb in rtbs:
main = 'false'
for assoc in rtb['Associations']:
main = assoc['Main']
if main == True:
continue
rtb_id = rtb['RouteTableId']
try:
ec2.delete_route_table(RouteTableId=rtb_id)
except Exception as e:
print(str(e.message))
def remove_acls(ec2, vpc_id):
""" Delete the network-access-lists """
acls = ec2.describe_network_acls(Filters=[{ 'Name' : 'vpc-id', 'Values' : [ vpc_id ]} ])['NetworkAcls']
if acls:
for acl in acls:
default = acl['IsDefault']
if default == True:
continue
acl_id = acl['NetworkAclId']
try:
ec2.delete_network_acl(NetworkAclId=acl_id)
except Exception as e:
print(str(e.message))
def remove_sgps(ec2, vpc_id):
""" Delete any security-groups """
sgps = ec2.describe_security_groups(Filters=[{ 'Name' : 'vpc-id', 'Values' : [ vpc_id ]} ])['SecurityGroups']
if sgps:
for sgp in sgps:
default = sgp['GroupName']
if default == 'default':
continue
sg_id = sgp['GroupId']
try:
ec2.delete_security_group(GroupId=sg_id)
except Exception as e:
print(str(e.message))
|
base = 2
power = 1000
result = base**power
s = 0
for digit in str(result):
s += int(digit)
print(s) |
#This script will audit switchports to make sure access ports have portfast turned on.
#It will also output cmd files which are remediation scripts that can later be run against all devices.
from trigger.netdevices import NetDevices
from ciscoconfparse import CiscoConfParse
dataDir = '/var/data/network-backups/'
nd = NetDevices()
cfgDiffs = []
def standardizeInt(parsed_config):
interfaces = parsed_config.find_lines('^interface.+?thernet')
for i in interfaces:
famobj = CiscoConfParse(parsed_config.find_children(i, exactmatch=True))
if (famobj.find_lines('switchport mode access')):
if (not famobj.find_lines('spanning-tree portfast')):
cfgDiffs.append(i)
cfgDiffs.append(" spanning-tree portfast")
for i in nd:
try:
orig_config = str(dataDir) + str(nd[i].nodeName) + '/startup-config.txt'
parsed_config = CiscoConfParse(orig_config)
cfgDiffs.append('conf t')
standardizeInt(parsed_config)
cfgDiffs.append('end')
cfgDiffs.append('wr mem')
#parsed_config.commit()
#parsed_config.save_as(dataDir + nd[i].nodeName + '/startup-config.txt.new')
newCfg = open(str(dataDir) + str(nd[i].nodeName) + '/startup-config.txt.cmd', 'w')
for line in cfgDiffs:
#print line
newCfg.write(str(line) + '\n')
del cfgDiffs[0:len(cfgDiffs)]
newCfg.close()
except Exception, e:
#print 'File ' + dataDir + nd[i].nodeName + '/startup-config.txt does not exist...skipping'
print e
|
#通过用户输入三角形的边长 ,来计算三角形的面积
a = float(input("请输入第一边的长度 : "))
b = float(input("请输入第二边的长度 : "))
c = float(input("请输入第三边的长度 : "))
#判断三条线是否能组成三角形
if a+b+c-max(a,b,c)>max(a,b,c):
#计算三角形的周长
p = (a + b + c)/2
#计算三角形面积 用了海伦公式
s = (p*(p-a)*(p-b)*(p-c))**0.5
print('三角形的面积是',s)
else:
print("上述三条边不满足构成三角形的条件")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import fnmatch
import os
import pytest
import re
import sys
from mock import MagicMock, patch
from benchmarks.common.tensorflow.run_tf_benchmark import ModelBenchmarkUtil
from test_utils import platform_config
from test_utils.io import parse_json_files
def parse_model_args_file():
"""
Gets test args from the models files in the specified directory to use as parameters
for testing model benchmarking scripts. The file has a
run_tf_benchmarks.py command with args with the corresponding run command
that should get called from model_init.py
"""
current_dir = os.path.dirname(os.path.realpath(__file__))
models_args_path = os.path.join(current_dir, "tf_model_args")
return parse_json_files(models_args_path)
def delete_env_var(env_var):
if env_var in os.environ:
del os.environ[env_var]
def clear_kmp_env_vars():
"""
Clear env vars to ensure that previously set values are not affecting the next test
"""
delete_env_var("KMP_SETTINGS")
delete_env_var("KMP_BLOCKTIME")
delete_env_var("KMP_AFFINITY")
delete_env_var("KMP_HW_SUBSET")
delete_env_var("OMP_NUM_THREADS")
# Get test args to use as parameters for test_run_benchmark
test_arg_values = parse_model_args_file()
@pytest.mark.parametrize("test_args,expected_cmd,comment,cpuset", test_arg_values)
@patch("os.mkdir")
@patch("shutil.rmtree")
@patch("os.listdir")
@patch("os.path.isdir")
@patch("os.path.isfile")
@patch("os.path.exists")
@patch("os.stat")
@patch("os.chdir")
@patch("os.remove")
@patch("glob.glob")
@patch("common.platform_util.PlatformUtil._get_cpuset")
@patch("common.platform_util.os")
@patch("common.platform_util.system_platform")
@patch("common.platform_util.subprocess")
@patch("common.base_model_init.BaseModelInitializer.run_command")
def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_os, mock_get_cpuset,
mock_glob, mock_remove, mock_chdir, mock_stat, mock_path_exists,
mock_is_file, mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir,
test_args, expected_cmd, comment, cpuset):
"""
Runs through executing the specified run_tf_benchmarks.py command from the
test_args and verifying that the model_init file calls run_command with
the expected_cmd string.
"""
print("****** Running The {} test ******".format(comment))
os.environ["PYTHON_EXE"] = "python"
if "mpi" not in test_args:
os.environ["MPI_NUM_PROCESSES"] = "None"
os.environ["MPI_HOSTNAMES"] = "None"
else:
if "--mpi_num_processes=" in test_args:
match_mpi_procs = re.search('--mpi_num_processes=([0-9]+)', test_args)
if match_mpi_procs and match_mpi_procs.lastindex >= 1:
os.environ["MPI_NUM_PROCESSES"] = match_mpi_procs.group(1)
if "--mpi_num_processes_per_socket=" in test_args:
match_per_socket = re.search('--mpi_num_processes_per_socket=([0-9]+)', test_args)
if match_per_socket and match_per_socket.lastindex >= 1:
os.environ["MPI_NUM_PROCESSES_PER_SOCKET"] = match_per_socket.group(1)
mock_os.path.exists.side_effect = True
mock_get_cpuset.return_value = cpuset
mock_is_dir.return_value = True
mock_is_file.return_value = True
mock_stat.return_value = MagicMock(st_nlink=0)
parse_model_args_file()
mock_listdir.return_value = ["data.record"]
mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"]
clear_kmp_env_vars()
platform_config.set_mock_system_type(mock_platform)
platform_config.set_mock_os_access(mock_os)
platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
test_args = re.sub(" +", " ", test_args) # get rid of extra spaces in the test_args string
expected_cmd = re.sub(" +", " ", expected_cmd) # get rid of extra spaces in the expected_cmd string
test_arg_list = test_args.split(" ")
with patch.object(sys, "argv", test_arg_list):
model_benchmark = ModelBenchmarkUtil()
model_benchmark.main()
assert len(mock_run_command.call_args_list) == 1
call_args = mock_run_command.call_args_list[0][0][0]
# python3 argparse parses things in different order than python2
# we'll check that the args are all there though
for actual_arg, expected_arg in zip(sorted(call_args.split()), sorted(expected_cmd.split())):
# use fnmatch in case we have file names with wildcards (like timestamps in output files)
assert fnmatch.fnmatch(actual_arg, expected_arg), \
"Expected: {}\nActual: {}".format(expected_cmd, call_args)
@pytest.mark.parametrize("test_args,socket_id,cpuset",
[["run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 "
"--mode inference --model-name inceptionv3 --batch-size 128 "
"--in-graph /final_int8_inceptionv3.pb --intelai-models . --socket-id 1 "
"--benchmark-only", "1", "0-2"],
["run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 "
"--mode inference --model-name inceptionv3 --batch-size 128 "
"--in-graph /final_int8_inceptionv3.pb --intelai-models . --socket-id 0 "
"--benchmark-only", "0", "50-55"]])
@patch("os.mkdir")
@patch("shutil.rmtree")
@patch("os.listdir")
@patch("os.path.isdir")
@patch("os.path.isfile")
@patch("os.path.exists")
@patch("os.stat")
@patch("os.chdir")
@patch("os.remove")
@patch("glob.glob")
@patch("common.platform_util.PlatformUtil._get_cpuset")
@patch("common.platform_util.os")
@patch("common.platform_util.system_platform")
@patch("common.platform_util.subprocess")
@patch("common.base_model_init.BaseModelInitializer.run_command")
def test_run_benchmark_bad_socket(mock_run_command, mock_subprocess, mock_platform, mock_os, mock_get_cpuset,
mock_glob, mock_remove, mock_chdir, mock_stat, mock_path_exists,
mock_is_file, mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir,
test_args, socket_id, cpuset):
"""
Checks to ensure that the proper error handling is done when the cpuset does not include any cores
for the specified socket_id
"""
os.environ["PYTHON_EXE"] = "python"
if "mpi" not in test_args:
os.environ["MPI_NUM_PROCESSES"] = "None"
os.environ["MPI_HOSTNAMES"] = "None"
else:
if "--mpi_num_processes=" in test_args:
match_mpi_procs = re.search('--mpi_num_processes=([0-9]+)', test_args)
if match_mpi_procs and match_mpi_procs.lastindex >= 1:
os.environ["MPI_NUM_PROCESSES"] = match_mpi_procs.group(1)
if "--mpi_num_processes_per_socket=" in test_args:
match_per_socket = re.search('--mpi_num_processes_per_socket=([0-9]+)', test_args)
if match_per_socket and match_per_socket.lastindex >= 1:
os.environ["MPI_NUM_PROCESSES_PER_SOCKET"] = match_per_socket.group(1)
mock_os.path.exists.side_effect = True
mock_get_cpuset.return_value = cpuset
mock_is_dir.return_value = True
mock_is_file.return_value = True
mock_stat.return_value = MagicMock(st_nlink=0)
parse_model_args_file()
mock_listdir.return_value = ["data.record"]
mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"]
clear_kmp_env_vars()
platform_config.set_mock_system_type(mock_platform)
platform_config.set_mock_os_access(mock_os)
platform_config.set_mock_lscpu_subprocess_values(mock_subprocess)
test_args = re.sub(" +", " ", test_args) # get rid of extra spaces in the test_args string
test_arg_list = test_args.split(" ")
with pytest.raises(SystemExit,
match="ERROR: There are no socket id {} cores in the cpuset.".format(socket_id)):
with patch.object(sys, "argv", test_arg_list):
model_benchmark = ModelBenchmarkUtil()
model_benchmark.main()
|
"""The 'Model' object will be used to configure and run a simulation for one islet. Instances are intended to be run in parallel and update a database file upon completion"""
import pickle
import configparser
import ast
import sys
import os
import sqlite3
import datetime
import numpy as np
import re
from matplotlib import pyplot as plt
import Loss
import Islet
from Helper import *
# headers for data
reference = ['t', 'Vp']
simulated = ['Time', 'VC0']
class Model:
def __init__(self, gid, run, alpha, beta, n, data, mean, threshold, slope):
"""Initialize model instance"""
print(str(datetime.datetime.now()) + '\tModel.init')
# set object variables
self.gid = gid
self.data = data
self.mean = mean
self.slope = slope
self.threshold = threshold
self.db = Islet.env['wd'] + 'run_' + run + '.db'
# set environment variables
Islet.env['rid'] = run
Islet.env['gid'] = gid
Islet.env['wd'] += 'Islet_' + run + '_' + self.gid + '/'
os.chdir(Islet.env['wd'])
# create and run islet instance
print(str(datetime.datetime.now()) + '\tModel.init Create islet', Islet.env['wd'])
self.islet = Islet.Islet([float(alpha), float(beta)], None, int(n), self.gid)
self.islet.run()
self.score()
self.updateDatabase()
self.clean()
def score(self):
"""Score model"""
print(str(datetime.datetime.now()) + '\tModel.score Score instance')
scores = []
# load reference data
ref = np.genfromtxt(self.data, delimiter=',', names=True)
# initialize loss function
path = self.data.split('/')[:len(self.data.split('/'))-1]
path = '/'.join(path) + '/Loss.png'
loss = Loss.Loss(int(self.mean), int(self.slope), int(self.threshold), path)
output_islet_path = Islet.env['output'] + 'Islet_' + Islet.env['rid'] + '_' + self.gid + '/'
for output in os.listdir(output_islet_path ):
if 'csv' in output:
# load simulated data
sim = np.genfromtxt(output_islet_path+ '/' + output, delimiter=',', names=True)
print(str(datetime.datetime.now()) + '\tModel.score Length of reference and experimental data', len(ref[reference[1]]), len(sim[simulated[1]]))
# normalize data to timescale with larger steps and shorter interval
# determine smaller time step
multiple = getMultiple(ref[reference[0]], sim[simulated[0]])
sim_normalized = sim[simulated[1]][::int(multiple)]
print(str(datetime.datetime.now()) + '\tModel.score Large time step / small time step', multiple, 'length of normalized smaller time step data', len(sim_normalized), len(ref[reference[0]]))
# determine smaller sample size
size_normalized = getSize(ref[reference[0]], sim_normalized)
print(str(datetime.datetime.now()) + '\tModel.score Minimum sample size', len(ref[reference[1]]), len(sim_normalized), size_normalized)
# cut off simulated and reference data at same place
ref[reference[1]] = ref[reference[1]][:size_normalized]
sim_normalized = sim_normalized[:size_normalized]
print(str(datetime.datetime.now()) + '\tModel.score Lengths of reference and simulated data after normalization', len(ref[reference[1]]), len(sim_normalized))
# subtract one array from the other
output_data = []
for val in range(len(ref[reference[1]])):
output_data.append(ref[reference[1]][val] - sim_normalized[val])
save = Islet.env['wd'] + re.split('\.csv', output)[0] + '.png'
print(str(datetime.datetime.now()) + '\tModel.score Path to save difference plot', save)
# plt.clf()
# plt.title('Difference Between Reference and Simulated Data')
# plt.xlabel('Time (ms)')
# plt.ylabel('Membrane Potential (mV)')
# plt.plot(output_data)
# plt.savefig(save)
scores.append(loss.getLoss(sum(output_data)))
print(str(datetime.datetime.now()) + '\tModel.score Cell score', scores)
print(str(datetime.datetime.now()) + '\tModel.score Islet score', sum(scores)/len(scores))
# save data to appropriate file
output_generation_path = Islet.env['output'] + 'Islets_' + Islet.env['rid'] + '_' + self.gid.split('_')[0]
output_generation_file = '/Islet_' + Islet.env['rid'] + '_' + self.gid + '.pl'
os.system('mkdir -p ' + output_generation_path)
dump = open(output_generation_path + output_generation_file, 'wb')
pickle.dump([sum(scores)/len(scores), scores], dump)
def updateDatabase(self):
"""Update database so that GA is aware that this process has concluded"""
print(str(datetime.datetime.now()) + '\tModel.updateDatabase Update database: islet', self.gid)
conn = sqlite3.connect(self.db)
c = conn.cursor()
generation = sys.argv[1].split('_')[0]
islet = sys.argv[1].split('_')[1]
c.execute('UPDATE COMPLETED SET ISLET_' + islet + ' = 1 WHERE GENERATION = ' + generation)
conn.commit()
c.close()
def clean(self):
"""Compress and remove folders"""
print(str(datetime.datetime.now()) + '\tModel.clean Compress folders: output folder', Islet.env['output'] + 'Islet_' + Islet.env['rid'] + '_' + self.gid, 'run folder', Islet.env['wd'][:len(Islet.env['wd'])-1])
output_islet_path = Islet.env['output'] + 'Islet_' + Islet.env['rid'] + '_' + self.gid
run_islet_path = Islet.env['wd'][:len(Islet.env['wd'])-1]
# output folder
os.system('tar -zcvf ' + output_islet_path + '.tar.gz ' + output_islet_path)
# run folder
os.system('tar -zcvf ' + run_islet_path + '.tar.gz ' + run_islet_path)
print(str(datetime.datetime.now()) + '\tModel.clean Compressed folders')
# reduce size of output and run folders
os.system('rm -r ' + output_islet_path)
os.system('rm -r ' + run_islet_path)
if __name__ == '__main__':
# run from command line
# python Model.py 1_0 0 0.15 0.75 4 /blue/lamb/tikaharikhanal/Model-of-Pancreatic-Islets/Main/Run/data/fridlyand_VCell_vp.csv -40 20 1
Model(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], sys.argv[5], sys.argv[6], sys.argv[7], sys.argv[8], sys.argv[9])
|
# Corrigido
print('Exercício 006')
print()
# Recebe um número real
n1 = float(input('Informe um número: '))
print()
# Bloco de cálculos com o valor informado
d = n1*2
t = n1*3
q = n1**0.5
# Fim dos cálculos
# Retorna o resultado dos cálculos
print('O dobro de {} é : {} \nO triplo de {} é: {} \nA raiz quadrada de {} é: {}.'.format(
n1, d, n1, t, n1, q))
print()
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# In[ ]:
"""
Created on Thu Nov 22 12:00:30 2018
@author: bhgajula
"""
#importing the libraries
import pandas as pd
#importing the dataset
training_dataset = pd.read_csv('../input/train.csv')
test_dataset = pd.read_csv('../input/test.csv')
survival = training_dataset.iloc[:,1].values
# defining a function to find out to which deck the passenger belongs to from cabin
def Deck(x):
if str(x) != 'nan':
return str(x)[0]
else:
return
###
#dropping the Survived from training set and appending the test set and removing the passengerid and ticket to make finest dataset
dataset = training_dataset.drop(columns=["Survived"]).append(test_dataset).drop(columns=["PassengerId", "Ticket"])
#cleaning the data of our dataset
dataset["hasParents"] = dataset["Parch"].apply(lambda x: (x>0)*1) #making the Parch Column to having Parents and Children or not
dataset["hasSiblings"] = dataset["SibSp"].apply(lambda x: (x>0)*1) #making the Siblings Column to having Siblings and Spouse or not
dataset["Deck"] = dataset["Cabin"].apply(Deck) #extracting the deck of the passenger by cabin
dataset["Title"] = dataset["Name"].str.extract( ' ([A-Za-z]+\.)', expand= False) #extrcting the title of the passenger
dataset = dataset.drop(columns=["Parch","SibSp","Cabin","Name"], axis=1) #dropping the duplicates columns that are extracted
#Encoding the categorical variable Sex to a binary variable by LabelEncoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
dataset["Sex"] = le.fit_transform(dataset["Sex"])
#Fillimg out the unavailable port of embarkation as Southhampton(S)
dataset["Embarked"].fillna('S' , inplace = True)
#filling the unknown age values with the median values of age
dataset["Age"].fillna(dataset["Age"].median(), inplace = True )
#filling the unknown fare values with the median values of fare
dataset["Fare"].fillna(dataset["Fare"].median(), inplace = True )
#Dividing the age in to various sets for different age ranges
dataset.loc[(dataset["Age"]<=18), "Age"] = 0
dataset.loc[(dataset["Age"]>18) & (dataset["Age"]<=30),"Age"] = 1
dataset.loc[(dataset["Age"]>30) & (dataset["Age"]<=50),"Age"] = 2
dataset.loc[(dataset["Age"]>50) & (dataset["Age"]<=65),"Age"] = 3
dataset.loc[(dataset["Age"]>65), "Age"]
#Dividing the fare column in to various sets for fare ranges
dataset.loc[(dataset["Fare"]<=7.91), "Fare"]=0
dataset.loc[(dataset["Fare"]>7.91) & (dataset["Fare"]<=14.454), "Fare"]=1
dataset.loc[(dataset["Fare"]>14.454) & (dataset["Fare"]<=31), "Fare"]=2
dataset.loc[(dataset["Fare"])>31, "Fare"]=3
#Converting the fare as int datatype
dataset["Fare"] = dataset["Fare"].astype(int)
#Converting the Pclass as string
dataset["Pclass"] = dataset["Pclass"].astype("str")
#OneHotEncoding the dataset
dataset = pd.get_dummies(dataset)
#Taking only 891 records of training set to build a model
ml_model=training_dataset.shape[0]
X = dataset[:ml_model]
y = survival
#Dividing the dataset into training set and test set
from sklearn.model_selection import train_test_split
X_train, X_test ,y_train ,y_test = train_test_split(X, y, train_size=0.9, test_size=0.1)
#Building the DecisionTreeClassifier model
from sklearn.tree import DecisionTreeClassifier
classifier0 = DecisionTreeClassifier(criterion="entropy", splitter="best", random_state = 0)
classifier0.fit(X_train,y_train)
#predicting the output
y_pred0 = classifier0.predict(X_test)
#knowing the confusion matrix
from sklearn.metrics import confusion_matrix
cm0=confusion_matrix(y_test,y_pred0)
#accuracy of decision tree classifier
acc_dtc = round(classifier0.score(X_train, y_train) * 100, 2)
acc_dtc
##91.39
###
# Fitting Logistic Regression to the Training set
from sklearn.linear_model import LogisticRegression
classifier1 = LogisticRegression(solver='liblinear', random_state = 0)
classifier1.fit(X_train, y_train)
#predicting the output
y_pred1 = classifier1.predict(X_test)
#knowing the confusion matrix
cm1=confusion_matrix(y_test,y_pred1)
#Accuracy of logistic regression
acc_log = round(classifier1.score(X_train, y_train) * 100, 2)
acc_log
##82.65
###
# Fitting K-NN to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier2 = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier2.fit(X_train, y_train)
#predicting the output
y_pred2 = classifier2.predict(X_test)
#knowing the confusion matrix
cm2=confusion_matrix(y_test,y_pred0)
#accuracy of k nearest neighbour
acc_knn = round(classifier2.score(X_train, y_train) * 100, 2)
acc_knn
##86.27
###
# Fitting SVM to the Training set
from sklearn.svm import SVC
classifier3 = SVC(kernel = 'linear',gamma='auto', random_state = 0)
classifier3.fit(X_train, y_train)
#predicting the output
y_pred3 = classifier3.predict(X_test)
#knowing the confusion matrix
cm3=confusion_matrix(y_test,y_pred3)
#accuracy of support vector machine classifier
acc_svc = round(classifier3.score(X_train, y_train) * 100, 2)
acc_svc
##79.53
###
# Fitting Kernel SVM to the Training set
from sklearn.svm import SVC
classifier4 = SVC(kernel = 'rbf', gamma='auto', random_state = 0)
classifier4.fit(X_train, y_train)
#predicting the output
y_pred4 = classifier4.predict(X_test)
#knowing the confusion matrix
cm4=confusion_matrix(y_test,y_pred4)
#acuracy of kernel svm
acc_ksvm = round(classifier4.score(X_train, y_train) * 100, 2)
acc_ksvm
##79.28
###
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier5 = GaussianNB()
classifier5.fit(X_train, y_train)
#predicting the output
y_pred5 = classifier5.predict(X_test)
#knowing the confusion matrix
cm5=confusion_matrix(y_test,y_pred5)
#accuracy of naive bayes
acc_nb = round(classifier5.score(X_train, y_train) * 100, 2)
acc_nb
###
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier6 = RandomForestClassifier(n_estimators = 50, criterion = 'entropy', random_state = 0)
classifier6.fit(X_train, y_train)
#predicting the output
y_pred6 = classifier6.predict(X_test)
#knowing the confusion matrix
cm6=confusion_matrix(y_test,y_pred6)
#accuracy of random forest classifier
acc_rfc = round(classifier6.score(X_train, y_train) * 100, 2)
acc_rfc
##91.39
###
# Fitting XGBoost to the Training set
from xgboost import XGBClassifier
classifier7 = XGBClassifier(booster='gbtree', silent=1, seed=0, base_score=0.5, subsample=0.75)
classifier7.fit(X_train, y_train)
#predicting the output
y_pred7 = classifier7.predict(X_test)
#knowing the confusion matrix
cm7=confusion_matrix(y_test,y_pred7)
#accuracy of the xg boost
acc_xgb = round(classifier7.score(X_train, y_train) * 100, 2)
acc_xgb
##85.02
#Now after knowing the best model for predicting the accuracy of survival using it on train.csv
#building the model for getting the results on the train.csv
from xgboost import XGBClassifier
CLASSIFIER = XGBClassifier(booster='gbtree', silent=1, seed=0, base_score=0.5, subsample=0.75)
CLASSIFIER.fit(X,y)
Z = dataset[ml_model:]
Z_pred=CLASSIFIER.predict(Z)
titanic_submission = pd.DataFrame({
"PassengerId": test_dataset["PassengerId"],
"Survived": Z_pred
})
titanic_submission.to_csv('titanic_submission.csv', index=False)
|
from django.conf.urls import url, include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from rest_framework import routers
from places.apis_views import PlaceViewSet, GeoJsonViewSet
from vocabs import api_views
router = routers.DefaultRouter()
router.register(r'geojson', GeoJsonViewSet, base_name='places')
router.register(r'skoslabels', api_views.SkosLabelViewSet)
router.register(r'skosnamespaces', api_views.SkosNamespaceViewSet)
router.register(r'skosconceptschemes', api_views.SkosConceptSchemeViewSet)
router.register(r'skosconcepts', api_views.SkosConceptViewSet)
router.register(r'places', PlaceViewSet)
urlpatterns = [
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^admin/', admin.site.urls),
url(r'^arche/', include('arche.urls', namespace='arche')),
url(r'^images/', include('images.urls', namespace='images')),
url(r'^documents/', include('documents.urls', namespace='documents')),
url(r'^browsing/', include('browsing.urls', namespace='browsing')),
url(r'^sparql/', include('sparql.urls', namespace='sparql')),
url(r'^vocabs/', include('vocabs.urls', namespace='vocabs')),
url(r'^vocabs-ac/', include('vocabs.dal_urls', namespace='vocabs-ac')),
url(r'^datamodel/', include('django_spaghetti.urls', namespace='datamodel')),
url(r'^places/', include('places.urls', namespace='places')),
url(r'^', include('webpage.urls', namespace='webpage')),
]
if settings.DEBUG is True:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import pymysql
from elasticsearch import Elasticsearch
from twisted.enterprise import adbapi
from tencentComment.utils.global_list import COMMENT_INDEX, COMMENT_TYPE
from tencentComment.utils.global_list import HOST_PORT
class TencentcommentPipeline(object):
def process_item(self, item, spider):
return item
class JsonWithEncodingPipeline(object):
"""
返回json数据到文件
"""
def __init__(self):
# self.file = codecs.open(get_comment_file_system_path() + get_now_date(), 'w', encoding="utf-8")
self.es = Elasticsearch([HOST_PORT])
def process_item(self, item, spider):
"""
不保存至本地文件系统,仅保存至ElasticSearch
"""
# lines = json.dumps(dict(item), ensure_ascii=False) + "\n"
# self.file.write(lines)
# for x in range(len(item)):
body = json.dumps(dict(item), ensure_ascii=False)
self.es.index(index=COMMENT_INDEX, doc_type=COMMENT_TYPE, body=body, id=None)
return item
def spider_closed(self, spider):
self.file.close()
class MysqlPipeline(object):
"""
插入mysql数据库
未在pipeline启用ITEM_PIPELINES
"""
def __init__(self):
self.conn = pymysql.connect(host='spark2', port=3306, user='root', passwd='mysql', db='comment_spider',
use_unicode=True, charset="utf8")
self.cursor = self.conn.cursor()
def process_item(self, item, spider):
insert_sql = '''
insert into tb_news(title,create_date,url,content) VALUES (%s,%s,%s,%s)
'''
self.cursor.execute(insert_sql, (
item["title"], item["create_date"], item["url"], item["content"]))
self.conn.commit()
class MysqlTwistedPipeline(object):
"""
采用异步的方式插入数据
"""
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings):
dbparms = dict(
host=settings["MYSQL_HOST"],
port=settings["MYSQL_PORT"],
user=settings["MYSQL_USER"],
passwd=settings["MYSQL_PASSWD"],
db=settings["MYSQL_DB"],
use_unicode=True,
charset="utf8",
)
dbpool = adbapi.ConnectionPool("pymysql", **dbparms)
return cls(dbpool)
def process_item(self, item, spider):
"""
使用twisted将mysql插入变成异步
:param item:
:param spider:
:return:
"""
query = self.dbpool.runInteraction(self.do_insert, item)
query.addErrback(self.handle_error)
def handle_error(self, failure):
# 处理异步插入的异常
print(failure)
def do_insert(self, cursor, item):
# 具体插入数据
insert_sql = '''
insert into tb_news(title,create_date,url,content) VALUES (%s,%s,%s,%s)
'''
cursor.execute(insert_sql, (
item["title"], item["create_date"], item["url"], item["content"]))
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0026_auto_20160619_1941'),
]
operations = [
migrations.RemoveField(
model_name='tweet',
name='secret',
),
migrations.AddField(
model_name='tweet',
name='handler',
field=models.ForeignKey(db_column=b'handler', to_field=b'handler', blank=True, to='twitter.TwitterSecret', null=True),
),
migrations.AlterField(
model_name='tweet',
name='uuid',
field=models.UUIDField(default=b'e020d06f-1555-42e0-9891-7a639e99405d', editable=False, unique=True, auto_created=True),
),
migrations.AlterField(
model_name='twittersecret',
name='uuid',
field=models.UUIDField(default=b'955c50c5-e701-4d10-b931-13fe919feece', unique=True, editable=False),
),
]
|
def isValidCommand(command):
command = command.lower()
parsed = command.split(" ")
if(parsed[0] == "eat"):
return True
if(parsed[0] == "examine"):
return True
def processCommand(command, area):
command = command.lower()
parsed = command.split(" ")
if(parsed[0] == "examine"):
print("This room contains: ");
for x in area.objects:
print(x.name) |
DESKTOP = 'C:\\Users\\andre\\Desktop\\'
# File Locations
# ==================================================
# Generated through get_from_twitter
STREAM_DATA_TXT = DESKTOP + 'tweepy_output\\blizzard_stream_data.txt'
STREAM_DATAFRAME_CSV = DESKTOP + 'blizzard_stream_dataframe.csv' # UPDATE AFTER PROTOTYPING
TARGET_STATUSES_TXT = DESKTOP + 'tweepy_output\\blizzard_statuses_data.txt'
STATUSES_DATAFRAME_CSV = DESKTOP + 'tweepy_output\\blizzard_statuses_dataframe.csv'
FOLLOWER_DATA_CSV = DESKTOP + 'tweepy_output\\blizzard_follower_data.csv'
FOLLOWER_FRIENDS_CSV = DESKTOP + 'tweepy_output\\blizzard_follower_friends.csv'
TOP_FRIENDS_FOLLOWED_CSV = DESKTOP + 'tweepy_output\\blizzard_top_friends_followed.csv'
# Generate through explore_twitter_data
FOLLOWERS_MOST_FRIENDED_PDF = DESKTOP + 'tweepy_output\\blizzard_followers_most_friended.pdf'
SPARSE_FRIENDS_MATRIX_CSV = DESKTOP + 'tweepy_output\\blizzard_sparse_matrix.csv'
SPARSE_MATRIX_WLABELS_CSV = DESKTOP + 'tweepy_output\\blizzard_sparse_matrix_wlabels.csv'
TOKENIZED_DESCRIPTIONS = DESKTOP + 'tweepy_output\\blizzard_tokenized_descriptions.csv'
LABELED_DESCRIPTIONS = DESKTOP + 'tweepy_output\\blizzard_labeled_descriptions.csv'
# Constants
# ==================================================
TOP_N = 151
CLUSTERS = 0
# Notifications
# ==================================================
from twilio.rest import Client
from pandas import read_csv
credentials = read_csv( DESKTOP + 'twilio_credentials.csv', delimiter = ',', index_col = None )
ACCOUNT_SID = credentials[ 'account_sid' ][0]
AUTH_TOKEN = credentials[ 'auth_token' ][0]
PHONUE_NUM = credentials[ 'phone_number' ][0]
client = Client( ACCOUNT_SID, AUTH_TOKEN ) |
import json
from chargebee.model import Model
from chargebee import request
from chargebee import APIError
class Customer(Model):
class BillingAddress(Model):
fields = ["first_name", "last_name", "email", "company", "phone", "line1", "line2", "line3", "city", "state_code", "state", "country", "zip", "validation_status"]
pass
class ReferralUrl(Model):
fields = ["external_customer_id", "referral_sharing_url", "created_at", "updated_at", "referral_campaign_id", "referral_account_id", "referral_external_campaign_id", "referral_system"]
pass
class Contact(Model):
fields = ["id", "first_name", "last_name", "email", "phone", "label", "enabled", "send_account_email", "send_billing_email"]
pass
class PaymentMethod(Model):
fields = ["type", "gateway", "gateway_account_id", "status", "reference_id"]
pass
class Balance(Model):
fields = ["promotional_credits", "excess_payments", "refundable_credits", "unbilled_charges", "currency_code", "balance_currency_code"]
pass
class EntityIdentifier(Model):
fields = ["id", "value", "scheme", "standard"]
pass
class Relationship(Model):
fields = ["parent_id", "payment_owner_id", "invoice_owner_id"]
pass
class ParentAccountAccess(Model):
fields = ["portal_edit_child_subscriptions", "portal_download_child_invoices", "send_subscription_emails", "send_invoice_emails", "send_payment_emails"]
pass
class ChildAccountAccess(Model):
fields = ["portal_edit_subscriptions", "portal_download_invoices", "send_subscription_emails", "send_invoice_emails", "send_payment_emails"]
pass
fields = ["id", "first_name", "last_name", "email", "phone", "company", "vat_number", "auto_collection", \
"offline_payment_method", "net_term_days", "vat_number_validated_time", "vat_number_status", \
"allow_direct_debit", "is_location_valid", "created_at", "created_from_ip", "exemption_details", \
"taxability", "entity_code", "exempt_number", "resource_version", "updated_at", "locale", "billing_date", \
"billing_month", "billing_date_mode", "billing_day_of_week", "billing_day_of_week_mode", "pii_cleared", \
"auto_close_invoices", "channel", "card_status", "fraud_flag", "primary_payment_source_id", \
"backup_payment_source_id", "billing_address", "referral_urls", "contacts", "payment_method", \
"invoice_notes", "business_entity_id", "preferred_currency_code", "promotional_credits", "unbilled_charges", \
"refundable_credits", "excess_payments", "balances", "entity_identifiers", "is_einvoice_enabled", \
"einvoicing_method", "meta_data", "deleted", "registered_for_gst", "consolidated_invoicing", \
"customer_type", "business_customer_without_vat_number", "client_profile_id", "relationship", \
"use_default_hierarchy_settings", "parent_account_access", "child_account_access", "vat_number_prefix", \
"entity_identifier_scheme", "entity_identifier_standard"]
@staticmethod
def create(params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers"), params, env, headers)
@staticmethod
def list(params=None, env=None, headers=None):
return request.send_list_request('get', request.uri_path("customers"), params, env, headers)
@staticmethod
def retrieve(id, env=None, headers=None):
return request.send('get', request.uri_path("customers",id), None, env, headers)
@staticmethod
def update(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id), params, env, headers)
@staticmethod
def update_payment_method(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"update_payment_method"), params, env, headers)
@staticmethod
def update_billing_info(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"update_billing_info"), params, env, headers)
@staticmethod
def contacts_for_customer(id, params=None, env=None, headers=None):
return request.send('get', request.uri_path("customers",id,"contacts"), params, env, headers)
@staticmethod
def assign_payment_role(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"assign_payment_role"), params, env, headers)
@staticmethod
def add_contact(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"add_contact"), params, env, headers)
@staticmethod
def update_contact(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"update_contact"), params, env, headers)
@staticmethod
def delete_contact(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"delete_contact"), params, env, headers)
@staticmethod
def add_promotional_credits(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"add_promotional_credits"), params, env, headers)
@staticmethod
def deduct_promotional_credits(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"deduct_promotional_credits"), params, env, headers)
@staticmethod
def set_promotional_credits(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"set_promotional_credits"), params, env, headers)
@staticmethod
def record_excess_payment(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"record_excess_payment"), params, env, headers)
@staticmethod
def collect_payment(id, params, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"collect_payment"), params, env, headers)
@staticmethod
def delete(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"delete"), params, env, headers)
@staticmethod
def move(params, env=None, headers=None):
return request.send('post', request.uri_path("customers","move"), params, env, headers)
@staticmethod
def change_billing_date(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"change_billing_date"), params, env, headers)
@staticmethod
def merge(params, env=None, headers=None):
return request.send('post', request.uri_path("customers","merge"), params, env, headers)
@staticmethod
def clear_personal_data(id, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"clear_personal_data"), None, env, headers)
@staticmethod
def relationships(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"relationships"), params, env, headers)
@staticmethod
def delete_relationship(id, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"delete_relationship"), None, env, headers)
@staticmethod
def hierarchy(id, params, env=None, headers=None):
return request.send('get', request.uri_path("customers",id,"hierarchy"), params, env, headers)
@staticmethod
def update_hierarchy_settings(id, params=None, env=None, headers=None):
return request.send('post', request.uri_path("customers",id,"update_hierarchy_settings"), params, env, headers)
|
# Ao testar sua solução, não se limite ao caso de exemplo.
e = float(input("numero de horas extras:"))
f = float(input("numero de faltas: "))
h = e-(1/4*f)
z = round(h,2)
print(e,"extras e",f,"de falta")
if(z > 400):
print("R$ 500.0")
else:
print("R$ 100.0")
|
import sys
sys.path.append('.')
from enum import Enum
from implementation.hash_table.hash_table import HashTable
class CollisionHandler(Enum):
LINEAR_PROBE = 1
QUADRATIC_PROBE = 2
DOUBLE_HASH = 3
"""
Hash Table implementation using Open Addressing as its collision handling technique
There are 3 available options that can be chosen during instantiation:
- LINEAR_PROBE
- QUADRATIC_PROBE
- DOUBLE_HASH
If omitted, defaults to LINEAR_PROBE
"""
class HashTableOA(HashTable):
DELETED = 'DELETED'
def __init__(self, collision_handler = None, default_size = 13): # choose m length of list to be a prime number
HashTable.__init__(self, default_size)
self.handle_collision = self.get_open_addressing_map(collision_handler)
def get_open_addressing_map(self, method):
mapper = {
CollisionHandler.LINEAR_PROBE: self.linear_probe,
CollisionHandler.QUADRATIC_PROBE: self.quadratic_probe,
CollisionHandler.DOUBLE_HASH: self.double_hash,
}
return mapper.get(method, self.linear_probe)
def linear_probe(self, key, i):
"""
Linear Probing uses the following formula:
h(k, i) = (h'(k) + i) % m (CLRS Chapter 11)
where:
k = the key to be hashed
i = probe sequence
m = length of the list
"""
return (self.general_hash(key) + i) % len(self.storage)
def quadratic_probe(self, key, i):
"""
Quadratic Probing uses the following formula:
h(k, i) = (h'(k) + c1 * i + c2 * i^2) % m (CLRS Chapter 11)
where:
k = the key to be hashed
i = probe sequence
m = length of the list
c1 and c2 are positive auxiliary constants
to make full use of the hash table, the values of c1, c2, and m are constrained
"""
return (self.general_hash(key) + i + i**2) % len(self.storage)
def double_hash(self, key, probe):
"""
Double hashing uses the following formula:
h(k, i) = (h1(k) + i * h2(k)) % m (CLRS Chapter 11)
where:
h1 and h2 are auxiliary hash functions
to make full use of the hash table, the values of c1, c2, and m are constrained
"""
return (self.hash_1(key) + probe * self.hash_2(key)) % len(self.storage)
def general_hash(self, key):
return key % 7
def hash_1(self, key):
return key % len(self.storage)
def hash_2(self, key):
return key % (len(self.storage) - 1)
def double_size(self):
current_size = len(self.storage)
self.storage.extend([None for i in range (current_size)])
# data -- number
def insert(self, data):
"""Time complexity: O(1)"""
probe_num = 0
while (probe_num < len(self.storage)):
hashed_key = self.handle_collision(data, probe_num)
slot_value = self.storage[hashed_key]
if (slot_value is None or slot_value == self.DELETED):
self.storage[hashed_key] = data
return hashed_key
else:
probe_num += 1
self.double_size()
return self.insert(data)
def search(self, data):
"""Time complexity: O(1) on average"""
probe_number = 0
while (probe_number < len(self.storage)):
hashed_key = self.handle_collision(data, probe_number)
if (self.storage[hashed_key] == data):
return hashed_key
elif (self.storage[hashed_key] is None):
return None
else:
probe_number += 1
return None
def delete(self, data):
"""Time complexity: O(1)"""
self.storage[self.storage.index(data)] = self.DELETED
|
Python 3.7.3 (v3.7.3:ef4ec6ed12, Mar 25 2019, 22:22:05) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> a = 12
>>> b = 33
>>> c = a + b
>>> type(a)
<class 'int'>
>>> type(b)
<class 'int'>
>>> c
45
>>> a = "hello"
>>> type(a)
<class 'str'>
>>> a = 45
>>> a + b
78
>>> a - b
12
>>> a / b
1.3636363636363635
>>> a * b
1485
>>> a ** b
3597600662921626628600135655553196556866168975830078125
>>> a ** 2
2025
>>> a ** 3
91125
>>> 10 / 6
1.6666666666666667
>>> 10 // 6
1
>>> 100 // 6
16
>>> 100 / 6
16.666666666666668
>>> 10 % 6
4
>>> print(x := 10 + 20)
SyntaxError: invalid syntax
>>> a = 10
>>> id(a)
140717771613104
>>> id(20)
140717771613424
>>> a = 20
>>> b = a
>>> c = 20
>>> a is b
True
>>> b is c
True
>>> a is c
True
>>> y = 10 + 10
>>> y
20
>>> id(y)
140717771613424
>>> a is y
True
>>> b is y
True
>>> a = [2,4]
>>> b = a
>>> a
[2, 4]
>>> b
[2, 4]
>>> a == b
True
>>> a is b
True
>>> c = [2,4]
>>> a is c
False
>>> msg = "नमस्ते आप कैसे हैं ?"
>>> msg.encode()
b'\xe0\xa4\xa8\xe0\xa4\xae\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa5\x87 \xe0\xa4\x86\xe0\xa4\xaa \xe0\xa4\x95\xe0\xa5\x88\xe0\xa4\xb8\xe0\xa5\x87 \xe0\xa4\xb9\xe0\xa5\x88\xe0\xa4\x82 ?'
>>> x = msg.encode()
>>> x.decode()
'नमस्ते आप कैसे हैं ?'
>>>
|
# ML hw4 problem 3.
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
def elu(arr):
return np.where(arr > 0, arr, np.exp(arr) - 1)
def make_layer(in_size, out_size):
w = np.random.normal(scale=0.5, size=(in_size, out_size))
b = np.random.normal(scale=0.5, size=out_size)
return (w, b)
def forward(inpd, layers):
out = inpd
for layer in layers:
w, b = layer
out = elu(out @ w + b)
return out
def gen_data(dim, layer_dims, N):
layers = []
data = np.random.normal(size=(N, dim))
nd = dim
for d in layer_dims:
layers.append(make_layer(nd, d))
nd = d
w, b = make_layer(nd, nd)
gen_data = forward(data, layers)
gen_data = gen_data @ w + b
return gen_data
if __name__ == '__main__':
if '--load-C' in argv:
print("load centers...")
centers = np.load('C.npy')
centers = centers.reshape(-1,)
plt.plot(centers, 'b')
plt.title('100 iterations')
plt.xlabel('dimension')
plt.ylabel('average std')
plt.savefig('centers.png')
plt.show()
print("generate centers in 1 iteration...")
V_std = []
for d in range(1, 61):
print("\rdimension: %d" % d, end="", flush=True)
N = np.random.randint(1e4, 1e5)
layer_dims = [np.random.randint(60, 80), 100]
data = gen_data(d, layer_dims, N)
V_std.append( data.std() )
print("")
V_std_mean = []
for d in range(1, 61):
print("\rdimension: %d" % d, end="", flush=True)
N = np.random.randint(1e4, 1e5)
layer_dims = [np.random.randint(60, 80), 100]
data = gen_data(d, layer_dims, N)
V_std_mean.append( np.mean( data.std(axis=0) ) )
plt.plot(V_std, 'b')
plt.plot(V_std_mean, 'r')
plt.legend(['all std', 'std mean'], loc='upper left')
plt.xlabel('dimension')
plt.ylabel('std')
plt.savefig('gen.png')
plt.show()
|
upperBound=355000
start=2
selected=[]
for i in range(start,upperBound):
number=str(i)
tot=0
for digit in number:
tot+=(int(digit)**5)
if tot==i:
selected.append(i)
print(selected)
output=0
for i in selected:
output+=i
print(output) |
from datetime import datetime as dt
from django.contrib.syndication.feeds import Feed as RssFeed
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import Http404
from django.template.defaultfilters import pluralize
from atom import Feed as AtomFeed
from blog.models import Post
from discussion.models import CommentNode
from utils.helpers import reverse
from tagging.models import Tag, TaggedItem
from tagging.utils import get_tag_list
def link(location):
return '%s://%s%s' % (settings.SITE_PROTOCOL, Site.objects.get_current().domain, location)
def _BlogEntries(Feed, type='atom'):
class BlogEntries(Feed):
def feed_id(self):
return link(reverse('post_list'))
link = feed_id
feed_title = u"%s blog posts" % Site.objects.get_current().name
title = feed_title
def feed_authors(self):
return ({"name": user.name} for user in User.objects.filter(is_staff=True))
def feed_links(self):
return ({'rel': u'alternate', 'href': self.feed_id()},
{'rel': u'self', 'href': link(reverse('%s_feed' % type, 'blog'))})
def items(self):
return Post.objects.exclude(date__gt=dt.now()).order_by('-date')[:5]
def item_id(self, item):
return link(item.get_absolute_url())
def item_title(self, item):
return 'html', item.name
def item_updated(self, item):
return item.upd_date
def item_published(self, item):
return item.date
def item_content(self, item):
html = settings.SHORT_POSTS_IN_FEED and item.html_short or item.html
return {'type': 'html'}, html
def item_categories(self, item):
return ({'term': unicode(tag)} for tag in item.get_tags())
def item_links(self, item):
return ({'rel': u'self', 'href': self.item_id(item)},
{'rel': u'alternate', 'href': self.item_id(item)})
return BlogEntries
def get_tags_bit(tags):
return '+'.join([tag.name for tag in tags])
def _PostsByTag(Feed, type='atom'):
class PostsByTag(Feed):
def get_object(self, bits):
if len(bits) != 1:
raise ObjectDoesNotExist
else:
return get_tag_list(bits[0].split('+'))
def feed_id(self, obj):
if not obj:
raise Http404
return link(reverse('post_by_tag', tag=get_tags_bit(obj)))
link = feed_id
def feed_title(self, obj):
site = Site.objects.get_current()
return u"%s blog posts with tag%s %s" % (
site.name,
pluralize(len(obj)),
', '.join([tag.name for tag in obj]))
title = feed_title
def feed_authors(self):
return ({"name": user.name} for user in User.objects.filter(is_staff=True))
def feed_links(self, obj):
return ({'rel': u'alternate', 'href': self.feed_id(obj)},
{'rel': u'self', 'href': link(reverse(
'%s_feed' % type,
'tag/%s' % get_tags_bit(obj)))})
def items(self, obj):
return TaggedItem.objects.get_union_by_model(Post, obj)[:5]
def item_id(self, item):
return link(item.get_absolute_url())
def item_title(self, item):
return 'html', item.name
def item_updated(self, item):
return item.upd_date
def item_published(self, item):
return item.date
def item_content(self, item):
html = settings.SHORT_POSTS_IN_FEED and item.html_short or item.html
return {'type': 'html'}, html
def item_links(self, item):
return ({'rel': u'self', 'href': self.item_id(item)},
{'rel': u'alternate', 'href': self.item_id(item)})
return PostsByTag
def _CommentEntries(Feed, type='atom'):
class CommentEntries(Feed):
# If feed get extra_params, then this will be comments for particular entry,
# else - all comments
def get_object(self, bits):
if len(bits) > 1:
raise ObjectDoesNotExist
elif len(bits) == 1:
return Post.objects.get(id=bits[0])
else:
return None
def feed_id(self, obj):
if obj:
return link(obj.get_absolute_url())
else:
return link('%s#comments' % reverse('post_list'))
link = feed_id
def feed_title(self, obj):
site = Site.objects.get_current()
if obj:
return '%s blog comments on %s' % (site.name, obj.name)
else:
return '%s blog comments' % site.name
title = feed_title
def feed_authors(self, obj):
if obj:
return ({'name': c.user.name} for c in obj.comments.all())
else:
return ({'name': c.user.name} for c in self.items(obj))
def feed_links(self, obj):
return ({'rel': u'alternate', 'href': self.feed_id(obj)},
{'rel': u'self', 'href': link(reverse(
'%s_feed' % type,
'comments/%s' % str(getattr(obj, 'id', ''))))})
def items(self, obj):
if obj:
return CommentNode.objects.for_object(obj).order_by('-pub_date')[:30]
else:
return CommentNode.objects.order_by('-pub_date')[:30]
def item_id(self, item):
return link(item.get_absolute_url())
def item_title(self, item):
return 'Comment on %s by %s' % (item.object.name, item.user.name)
def item_updated(self, item):
return item.upd_date
def item_published(self, item):
return item.pub_date
def item_content(self, item):
return {'type': 'html'}, item.body_html
def item_links(self, item):
return ({'rel': u'self', 'href': self.item_id(item)},
{'rel': u'alternate', 'href': self.item_id(item)})
def item_authors(self, item):
return ({'name': item.user.name}, )
return CommentEntries
# Ok, time to build our feeds!
AtomBlogEntries = _BlogEntries(AtomFeed)
RssBlogEntries = _BlogEntries(RssFeed, 'rss')
AtomPostsByTag = _PostsByTag(AtomFeed)
RssPostsByTag = _PostsByTag(RssFeed, 'rss')
AtomCommentEntries = _CommentEntries(AtomFeed)
RssCommentEntries = _CommentEntries(RssFeed, 'rss')
# Featured posts feeds
# DRY violation, needs refactoring
class AtomFeaturedBlogEntries(AtomBlogEntries):
def feed_updated(self):
return Post.featured_objects.order_by('-date')[0].date
def items(self):
return Post.featured_objects.exclude(date__gt=dt.now()).order_by('-date')[:5]
class RssFeaturedBlogEntries(RssBlogEntries):
def feed_updated(self):
return Post.featured_objects.order_by('-date')[0].date
def items(self):
return Post.featured_objects.exclude(date__gt=dt.now()).order_by('-date')[:5]
|
class Array:
def __init__(self, capacity=10):
self._capacity = capacity
self._length = 0
self._array = self._make_array(capacity)
def _make_array(self, capacity):
self._capacity = capacity
return [None] * capacity
def append(self, ele):
if self._capacity == self._length:
tmp_array = self._make_array(2 * self._capacity)
for idx in range(self._length):
tmp_array[idx] = self._array[idx]
self._array = tmp_array
self._array[self._length] = ele
self._length += 1
|
from .nystrom_attention import NystromAttention
from .nystromformer import Nystromformer
from .version import __version__
|
import sys
import yaml
if __name__ == "__main__":
sys.path.append("..")
sys.path.append("./Tensorflow/models/research")
from data.GUI.GUI import *
with open(r'config.yml') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
config['loader']['image_size'] = (config['loader']['default_size']['x'], config['loader']['default_size']['y'])
########
# GUI idzie tutaj i zbiera dane w config
# nadpisuje standardowe wartości wczytane z config.yml
########
gui = GUI(config)
|
import joblib
import numpy as np
import re
from multiprocessing import Pool
class ScikitClassifier:
"""
Adapted from https://nlpforhackers.io/training-ner-large-dataset/
"""
def __init__(self, word2vec=None, clf=None, search=None):
self.word2vec = word2vec
self.clf = clf
self.search = search
def shape(self, word):
word_shape = 'other'
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
word_shape = 'number'
elif re.match('\W+$', word):
word_shape = 'punct'
elif re.match('[A-Z][a-z]+$', word):
word_shape = 'capitalized'
elif re.match('[A-Z]+$', word):
word_shape = 'uppercase'
elif re.match('[a-z]+$', word):
word_shape = 'lowercase'
elif re.match('[A-Z][a-z]+[A-Z][a-z]+[A-Za-z]*$', word):
word_shape = 'camelcase'
elif re.match('[A-Za-z]+$', word):
word_shape = 'mixedcase'
elif re.match('__.+__$', word):
word_shape = 'wildcard'
elif re.match('[A-Za-z0-9]+\.$', word):
word_shape = 'ending-dot'
elif re.match('[A-Za-z0-9]+\.[A-Za-z0-9\.]+\.$', word):
word_shape = 'abbreviation'
elif re.match('[A-Za-z0-9]+\-[A-Za-z0-9\-]+.*$', word):
word_shape = 'contains-hyphen'
return word_shape
def features(self, tokens, index, history):
"""
`tokens` = a POS-tagged sentence [(w1, t1), ...]
`index` = the index of the token we want to extract features for
`history` = the previous IOB tag
"""
# Pad the sequence with placeholders
tokens = ['__START2__', '__START1__'] + tokens + ['__END1__', '__END2__']
history = ['__START2__', '__START1__'] + history
# shift the index with 2, to accommodate the padding
index += 2
word = tokens[index]
prevword = tokens[index-1]
prevprevword = tokens[index-2]
nextword = tokens[index+1]
nextnextword = tokens[index+2]
previob = history[-1]
prevpreviob = history[-2]
feat_words = {
'word': word,
'next-word': nextword,
# 'next-next-word': nextnextword,
'prev-word': prevword,
# 'prev-prev-word': prevprevword,
}
if self.word2vec is not None:
new_feat = {}
for col, word in feat_words.items():
w2v = self.word2vec.wv
size = self.word2vec.vector_size
vec = w2v[word] if word in w2v else np.zeros((size))
vec_dict = {f'{col}-vec-{i}': v for i, v in enumerate(vec)}
new_feat = {**new_feat, **vec_dict}
feat_words = new_feat
feat_others = {
'is_first': prevword == '__START1__',
'is_last': nextword == '__END1__',
'shape': self.shape(word),
'next-shape': self.shape(nextword),
# 'next-next-shape': self.shape(nextnextword),
'prev-shape': self.shape(prevword),
# 'prev-prev-shape': self.shape(prevprevword),
'prev-iob': previob,
# 'prev-prev-iob': prevpreviob,
}
feat_dict = {**feat_words, **feat_others}
return feat_dict
def transform(self, list_of_tagged):
"""
Get X_train and y_train from list of tagged sentences
:param list_of_tagged: [ [(word0, 'OTHER'), (word1, 'STREET'), ...], [...], ... ]
:return X: [{...}, {...}, {...}, ...]
:return y: ['OTHER', 'STREET', ...]
"""
X, y = [], []
for tagged in list_of_tagged:
sentence = [w for w, t in tagged]
history = []
for idx in range(len(tagged)):
label = tagged[idx][1]
history.append(label)
X.append(self.features(sentence, idx, history))
y.append(label)
self.X_train, self.y_train = X, y
def fit(self):
if self.search is not None:
self.search.fit(self.X_train, self.y_train)
self.clf = self.search.best_estimator_
else:
self.clf.fit(self.X_train, self.y_train)
def predict(self, list_of_sentence):
"""
Get y_pred from list of sentence
:param list_of_sentence: [ [word0, word1, ...] , [word0, word1, ...], ...]
:return: [ ['OTHER', 'STREET', ...], ['OTHER', 'POI', ...], ...]
"""
list_of_preds = []
for sentence in list_of_sentence:
history = []
for idx in range(len(sentence)):
X = self.features(sentence, idx, history)
y = self.clf.predict(X)[0]
history.append(y)
y_pred = [(w, t) for w, t in zip(sentence, history)]
list_of_preds.append(y_pred)
return list_of_preds
def parallelize_predict(self, list_of_sentence, n_cores=10):
list_split = np.split(list_of_sentence, n_cores)
pool = Pool(n_cores)
list_results = []
for result in pool.map(self.predict, list_split):
list_results += result
pool.close()
pool.join()
return list_results
def save_model(self, save_path):
joblib.dump(self.clf, save_path)
def load_model(self, load_path):
self.clf = joblib.load(load_path) |
"""
每行数据重复N次合并生成新文件
题目来源 http://www.bathome.net/thread-38017-2-1.html
依山居 0:54 2015/11/14
这个版本可以使用来处理实际数据。。。6百万行,大约17秒。。。
总结:几百万行数据真不算多。不需要逐行读取处理。python列表解析是个好东西~
使用重复列表中元素更好的方法 http://www.oschina.net/question/96078_2141454
python笔记_列表解析 http://www.jianshu.com/p/c635d3c798c2
"""
import time
start=time.time()
an=6
with open("aa.txt") as f:
ta=f.read()
ta=ta.rsplit()
al=[r+"," for r in ta for i in range(an)]
print("al长度:",len(al))
bn=3
with open("bb.txt") as f:
tb=f.read()
tb=tb.rsplit()
bl=[r+"," for r in tb for i in range(bn)]
print("bl长度:",len(bl))
cn=1
with open("cc.txt") as f:
tc=f.read()
tc=tc.rsplit()
cl=[r+"\n" for r in tc for i in range(cn)]
print("cl长度:",len(cl))
end=time.time()
pt=end-start
print("运行耗时:",pt)
rn=len(cl)
tal=[al[r]+bl[r]+cl[r] for r in range(rn)]
#还是用列表解析好~
#for r in range(rn):
# tal.append(al[r]+bl[r]+cl[r])
end=time.time()
pt=end-start
print("运行耗时:",pt)
with open("out.txt","w+") as f:
f.writelines(tal)
f.close()
end=time.time()
pt=end-start
print("运行耗时:",pt)
try:
input("按回车退出")
except SyntaxError:
pass
|
import uuid
import pandas
from alpha_vantage.timeseries import TimeSeries
from alpha_vantage.foreignexchange import ForeignExchange
from confluent_kafka import Producer, Consumer
from flask import jsonify
class AlphaKafka(object):
def __init__(self, host, ckey, csecret, akey):
self.host = host
self.key = ckey
self.secret = csecret
#self.producer = KafkaProducer(bootstrap_servers=self.host)
self.p = Producer({
'bootstrap.servers': host,
'sasl.mechanisms': 'PLAIN',
'security.protocol': 'SASL_SSL',
'sasl.username': ckey,
'sasl.password': csecret,
})
self.c = Consumer({
'bootstrap.servers': host,
'sasl.mechanisms': 'PLAIN',
'security.protocol': 'SASL_SSL',
'sasl.username': ckey,
'sasl.password': csecret,
# this will create a new consumer group on each invocation.
'group.id': str(uuid.uuid1()),
'auto.offset.reset': 'earliest'
})
self.ts = TimeSeries(key=akey, output_format='pandas')
self.fx = ForeignExchange(key=akey)
def acked(self, err, msg):
"""Delivery report callback called (from flush()) on successful or failed delivery of the message."""
if err is not None:
report = "failed to deliver message: {}".format(err.str())
else:
report = "produced to: {} [{}] @ {}".format(
msg.topic(), msg.partition(), msg.offset())
return report
def produce(self, topic, message):
client = self.p
msg = client.produce(topic, value=message, on_delivery=self.acked)
client.flush()
return msg
def timeseries(self, symbol):
client = self.ts
data, metadata = client.get_intraday(
symbol, interval='15min', outputsize='compact')
return data, metadata
#return data.head(2)
#return jsonify(data=data, metatdata=metadata)
def curex(self, curr1, curr2):
client = self.fx
data, _ = client.get_currency_exchange_rate(
from_currency=curr1, to_currency=curr2)
return data
#return data.head(2)
#return jsonify(data=data, metatdata=metadata)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.