seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13857053749 | from operator import truediv
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.layers import Conv1D,Conv2D, Conv3D, Flatten, Dense, Reshape, Lambda
from tensorflow.keras.layers import Dropout, Input,dot,Activation,MaxPool1D,add,BatchNormalization,MaxPool2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.utils.np_utils import to_categorical
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
from collections import Counter
# from MDGCN import trainMDGCN
tf.compat.v1.disable_eager_execution()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
import time
from plotly.offline import init_notebook_mode
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
time_start=time.time()
## Data
## GLOBAL VARIABLES
# dataset = 'IP'
# test_ratio = 0.9
# dataset = 'KSC'
# test_ratio = 0.94
dataset = 'UP'
test_ratio = 0.7
train_val_ratio = 1
train_ratio = 1-test_ratio
windowSize = 11
if dataset == 'UP':
componentsNum = 30
elif dataset == 'UH':
componentsNum = 50 if test_ratio >= 0.99 else 25
elif dataset == 'IP':
componentsNum = 140
elif dataset == 'KSC':
componentsNum = 120
else:
componentsNum = 30
drop = 0.4
class Mish(Activation):
def __init__(self, activation, **kwargs):
super(Mish, self).__init__(activation, **kwargs)
self.__name__ = 'Mish'
def mish(inputs):
return inputs * tf.math.tanh(tf.math.softplus(inputs))
get_custom_objects().update({'Mish': Mish(mish)})
## define a series of data progress function
def loadData(name):
data_path = os.path.join(os.getcwd(),'data')
if name == 'IP':
data = sio.loadmat(os.path.join(data_path, 'Indian_pines_corrected.mat'))['indian_pines_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Indian_pines_gt.mat'))['indian_pines_gt']
elif name == 'SA':
data = sio.loadmat(os.path.join(data_path, 'Salinas_corrected.mat'))['salinas_corrected']
labels = sio.loadmat(os.path.join(data_path, 'Salinas_gt.mat'))['salinas_gt']
elif name == 'UP':
data = sio.loadmat(os.path.join(data_path, 'PaviaU.mat'))['paviaU']
labels = sio.loadmat(os.path.join(data_path, 'PaviaU_gt.mat'))['paviaU_gt']
elif name == 'UH':
data = sio.loadmat(os.path.join(data_path, 'HoustonU.mat'))['houstonU'] # 601*2384*50
labels = sio.loadmat(os.path.join(data_path, 'HoustonU_gt.mat'))['houstonU_gt']
elif name == 'KSC':
data = sio.loadmat(os.path.join(data_path, 'KSC.mat'))['KSC']
labels = sio.loadmat(os.path.join(data_path, 'KSC_gt.mat'))['KSC_gt']
return data, labels
def splitTrainTestSet(X, y, testRatio, randomState=345):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=testRatio, random_state=randomState,stratify=y)
return X_train, X_test, y_train, y_test
def applyPCA(X, numComponents=140):
newX = np.reshape(X, (-1, X.shape[2]))
print(newX.shape)
pca = PCA(n_components=numComponents, whiten=True)
newX = pca.fit_transform(newX)
newX = np.reshape(newX, (X.shape[0],X.shape[1], numComponents))
return newX, pca, pca.explained_variance_ratio_
def padWithZeros(X, margin=2):
newX = np.zeros((X.shape[0] + 2 * margin, X.shape[1] + 2* margin, X.shape[2]),dtype="float16")
x_offset = margin
y_offset = margin
newX[x_offset:X.shape[0] + x_offset, y_offset:X.shape[1] + y_offset, :] = X
return newX
def createPatches(X, y, windowSize=25, removeZeroLabels = True):
margin = int((windowSize - 1) / 2)
zeroPaddedX = padWithZeros(X, margin=margin)
# split patches
patchesData = np.zeros((X.shape[0] * X.shape[1], windowSize, windowSize, X.shape[2]),dtype="float16")
patchesLabels = np.zeros((X.shape[0] * X.shape[1]),dtype="float16")
patchIndex = 0
for r in range(margin, zeroPaddedX.shape[0] - margin):
for c in range(margin, zeroPaddedX.shape[1] - margin):
patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
patchesData[patchIndex, :, :, :] = patch
patchesLabels[patchIndex] = y[r-margin, c-margin]
patchIndex = patchIndex + 1
if removeZeroLabels:
patchesData = patchesData[patchesLabels>0,:,:,:]
patchesLabels = patchesLabels[patchesLabels>0]
patchesLabels -= 1
return patchesData, patchesLabels
def SBNL(X,numComponents):
X_copy1 = np.zeros((X.shape[0],X.shape[1],X.shape[2]))
half1 = int(numComponents/2)
for j in range(0,half1-2):
X_copy1[:,:,2*j] = X[:,:,j]
X_copy1[:,:,2*j+1] = X[:,:,numComponents-j-2]
#X_copy1[:,:,198] = X[:,:,99]
X_copy1[:,:,102] = X[:,:,102]
#X_copy1[:, :, 174] = X[:, :, 87]
X = X_copy1
return X
def non_local_block(ip, intermediate_dim=None, compression=2,
mode='embedded', add_residual=True):
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
ip_shape = K.int_shape(ip)
if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:
raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')
if compression is None:
compression = 1
dim1, dim2, dim3 = None, None, None
# check rank and calculate the input shape
if len(ip_shape) == 3: # temporal / time series data
rank = 3
batchsize, dim1, channels = ip_shape
elif len(ip_shape) == 4: # spatial / image data
rank = 4
if channel_dim == 1:
batchsize, channels, dim1, dim2 = ip_shape
else:
batchsize, dim1, dim2, channels = ip_shape
elif len(ip_shape) == 5: # spatio-temporal / Video or Voxel data
rank = 5
if channel_dim == 1:
batchsize, channels, dim1, dim2, dim3 = ip_shape
else:
batchsize, dim1, dim2, dim3, channels = ip_shape
else:
raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')
# verify correct intermediate dimension specified
if intermediate_dim is None:
intermediate_dim = channels // 2
if intermediate_dim < 1:
intermediate_dim = 1
else:
intermediate_dim = int(intermediate_dim)
if intermediate_dim < 1:
raise ValueError('`intermediate_dim` must be either `None` or positive integer greater than 1.')
if mode == 'gaussian': # Gaussian instantiation
x1 = Reshape((-1, channels))(ip) # xi
x2 = Reshape((-1, channels))(ip) # xj
f = dot([x1, x2], axes=2)
f = Activation('softmax')(f)
elif mode == 'dot': # Dot instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim)
theta = Reshape((-1, intermediate_dim))(theta)
# phi path
phi = _convND(ip, rank, intermediate_dim)
phi = Reshape((-1, intermediate_dim))(phi)
f = dot([theta, phi], axes=2)
size = K.int_shape(f)
# scale the values to make it size invariant
f = Lambda(lambda z: (1. / float(size[-1])) * z)(f)
elif mode == 'concatenate': # Concatenation instantiation
raise NotImplementedError('Concatenate model has not been implemented yet')
else: # Embedded Gaussian instantiation
# theta path
theta = _convND(ip, rank, intermediate_dim)
theta = Reshape((-1, intermediate_dim))(theta)
# phi path
phi = _convND(ip, rank, intermediate_dim)
# phi = Conv2D(channels, (2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
phi = Reshape((-1, intermediate_dim))(phi)
if compression > 1:
# shielded computation
phi = MaxPool1D(compression)(phi)
f = dot([theta, phi], axes=2)#内积函数
f = Activation('softmax')(f)
# g path
g = _convND(ip, rank, intermediate_dim)
g = Reshape((-1, intermediate_dim))(g)
if compression > 1 and mode == 'embedded':
# shielded computation
g = MaxPool1D(compression)(g)
# compute output path
y = dot([f, g], axes=[2, 1])#compression=1
# reshape to input tensor format
if rank == 3:
y = Reshape((dim1, intermediate_dim))(y)
elif rank == 4:
if channel_dim == -1:
y = Reshape((dim1, dim2, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2))(y)
else:
if channel_dim == -1:
y = Reshape((dim1, dim2, dim3, intermediate_dim))(y)
else:
y = Reshape((intermediate_dim, dim1, dim2, dim3))(y)
# project filters
# ip_ = Conv2D(channels, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
# ip__ = Conv2D(channels, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
# y = tf.concat([y,ip_,ip__],3)
y = _convND(y, rank, channels)#1*1*C操作
# residual connection
if add_residual:
y = add([ip, y])
return y
def _convND(ip, rank, channels):
assert rank in [3, 4, 5], "Rank of input must be 3, 4 or 5"
if rank == 3:
x = Conv1D(channels, 1, padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
elif rank == 4:
x1 = Conv2D(channels, (3, 3), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
x = Conv2D(channels, (1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(x1)
else:
x = Conv3D(channels, (1, 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(ip)
return x
X, y = loadData(dataset)
#X = SBNL(X,200)
X = SBNL(X,103)
#X = SBNL(X,176)
X,pca,ratio = applyPCA(X,numComponents=componentsNum)
# X = infoChange(X,componentsNum) # channel-wise shift
# X = SBNL(X,componentsNum)
X, y = createPatches(X, y, windowSize=windowSize)
Xtrain, Xtest, ytrain, ytest = splitTrainTestSet(X, y, test_ratio)
## Train
Xtrain = Xtrain.reshape(-1, windowSize, windowSize, componentsNum, 1)
ytrain = to_categorical(ytrain)
for col in range(ytrain.shape[1]):
b = Counter(ytrain[:,col])
print(b)
Xvalid, Xtest, yvalid, ytest = splitTrainTestSet(Xtest, ytest, (test_ratio-train_ratio/train_val_ratio)/test_ratio)
Xvalid = Xvalid.reshape(-1, windowSize, windowSize, componentsNum, 1)
yvalid = to_categorical(yvalid)
if dataset == 'UP':
output_units = 9
elif dataset == 'UH':
output_units = 20
elif dataset == 'KSC':
output_units = 13
else:
output_units = 16
## implementation of covariance pooling layers
def cov_pooling(features):
shape_f = features.shape.as_list()
centers_batch = tf.reduce_mean(tf.transpose(features, [0, 2, 1]),2)
centers_batch = tf.reshape(centers_batch, [-1, 1, shape_f[2]])
centers_batch = tf.tile(centers_batch, [1, shape_f[1], 1])
tmp = tf.subtract(features, centers_batch)
tmp_t = tf.transpose(tmp, [0, 2, 1])
features_t = 1/tf.cast((shape_f[1]-1),tf.float32)*tf.matmul(tmp_t, tmp)
trace_t = tf.compat.v1.trace(features_t)
trace_t = tf.reshape(trace_t, [-1, 1])
trace_t = tf.tile(trace_t, [1, shape_f[2]])
trace_t = 0.0001*tf.compat.v1.matrix_diag(trace_t)
return tf.add(features_t,trace_t)
def feature_vector(features):
shape_f = features.shape.as_list()
feature_upper = tf.linalg.band_part(features,0,shape_f[2])
return feature_upper
def bn_prelu(X):
X = BatchNormalization(epsilon=1e-5)(X)
X = Activation('Mish')(X)
return X
## input layer
input_layer = Input((windowSize, windowSize, componentsNum, 1))
## convolutional layers
conv_layer1 = Conv3D(filters=8, kernel_size=(3, 3, 3), activation='relu',padding='same')(input_layer)
conv_layer2 = Conv3D(filters=16, kernel_size=(3, 3, 3), activation='relu',padding='same')(conv_layer1)
conv_layer3 = Conv3D(filters=32, kernel_size=(3, 3, 3), activation='relu',padding='same')(conv_layer2)
# print(conv_layer3._keras_shape)
conv3d_shape = conv_layer3.shape
conv_layer3_ = Reshape((conv3d_shape[1], conv3d_shape[2], conv3d_shape[3]*conv3d_shape[4]))(conv_layer3)
conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), padding='same',groups=64)(conv_layer3_)#64*3*3,PaviaU
#conv_layer4 = Conv2D(filters=64, kernel_size=(3,3), padding='same')(conv_layer3_)#Indian Pines,KSC
conv_layer4 = bn_prelu(conv_layer4)
conv_layer5 = Conv2D(filters=64, kernel_size=(1,1))(conv_layer4)
conv_layer5_ = tf.concat([conv_layer5,conv_layer4,conv_layer5],0)
conv_layer5_ = bn_prelu(conv_layer5_)
conv_layer6 = Conv2D(filters=192, kernel_size=(1,3), padding='same',groups=64)(conv_layer5_)#64*3*3,PaviaU
#conv_layer6 = Conv2D(filters=192, kernel_size=(1,3), padding='same')(conv_layer5_)#Indian Pines,KSC
conv_layer6 = bn_prelu(conv_layer6)
conv_layer7 = Conv2D(filters=192, kernel_size=(3,1), padding='same')(conv_layer6)
conv_layer7 = bn_prelu(conv_layer7)
conv_layer8 = Conv2D(filters=192, kernel_size=(3,3), padding='same')(conv_layer7)
conv_layer8 = bn_prelu(conv_layer8)
conv_layer9 = Conv2D(filters=192, kernel_size=(5,5), padding='same')(conv_layer8)
conv_layer9 = bn_prelu(conv_layer9)
conv_layer9_ = Conv2D(filters=192, kernel_size=(7,7), padding='same')(conv_layer8)
conv_layer9_ = bn_prelu(conv_layer9_)
conv_layer10 = Conv2D(filters=64, kernel_size=(1,1))(conv_layer9_)
conv_layer11 = conv_layer10 + conv_layer4
#
conv_layer11 = non_local_block(conv_layer11,compression=1)
conv2d_shape = conv_layer11.shape
conv_layer4 = Reshape((conv2d_shape[1] * conv2d_shape[2], conv2d_shape[3]))(conv_layer4)
conv2d_shape = conv_layer4.shape
cov_pooling_layer1 = Lambda(cov_pooling,output_shape=(conv2d_shape[2],conv2d_shape[2]),mask=None,arguments=None)(conv_layer4)
cov_pooling_layer2 = Lambda(feature_vector,output_shape=(conv2d_shape[2],conv2d_shape[2]),mask=None,arguments=None)(cov_pooling_layer1)
flatten_layer = Flatten()(cov_pooling_layer2)
## fully connected layers
dense_layer1 = Dense(units=256, activation='relu')(flatten_layer)
dense_layer1 = Dropout(0.4)(dense_layer1)
dense_layer2 = Dense(units=128, activation='relu')(dense_layer1)
dense_layer2 = Dropout(0.4)(dense_layer2)
output_layer = Dense(units=output_units, activation='softmax')(dense_layer2)
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
# compiling the model
adam = Adam(lr=0.001, decay=1e-06)
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# checkpoint
filepath = "best-model.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='accuracy', verbose=1, save_best_only=True, mode='max')
callbacks_list = [checkpoint]
start = time.time()
history = model.fit(x=Xtrain, y=ytrain, batch_size=256, epochs=50, validation_data=(Xvalid,yvalid), callbacks=callbacks_list) #,validation_split=(1/3)
end = time.time()
train_time = end - start
print(train_time)
plt.figure(figsize=(7,7))
plt.grid()
plt.plot(history.history['loss'])
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'], loc='upper right')
plt.savefig("loss_curve.pdf")
plt.show()
plt.figure(figsize=(5,5))
plt.ylim(0,1.1)
plt.grid()
plt.plot(history.history['accuracy'])
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['Training','Validation'])
plt.savefig("acc_curve.pdf")
plt.show()
## Test
# load best weights
model.load_weights("best-model.hdf5")
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
Xtest = Xtest.reshape(-1, windowSize, windowSize, componentsNum, 1)
# Xtest.shape
ytest = to_categorical(ytest)
# ytest.shape
Y_pred_test = model.predict(Xtest)
y_pred_test = np.argmax(Y_pred_test, axis=1)
classification = classification_report(np.argmax(ytest, axis=1), y_pred_test)
print(classification)
def AA_andEachClassAccuracy(confusion_matrix):
counter = confusion_matrix.shape[0]
list_diag = np.diag(confusion_matrix)
list_raw_sum = np.sum(confusion_matrix, axis=1)
each_acc = np.nan_to_num(truediv(list_diag, list_raw_sum))
average_acc = np.mean(each_acc)
return each_acc, average_acc
def reports (X_test,y_test,name):
start = time.time()
Y_pred = model.predict(X_test)
y_pred = np.argmax(Y_pred, axis=1)
end = time.time()
print(end - start)
if name == 'IP':
target_names = ['Alfalfa', 'Corn-notill', 'Corn-mintill', 'Corn',
'Grass-pasture', 'Grass-trees', 'Grass-pasture-mowed',
'Hay-windrowed', 'Oats', 'Soybean-notill', 'Soybean-mintill',
'Soybean-clean', 'Wheat', 'Woods', 'Buildings-Grass-Trees-Drives',
'Stone-Steel-Towers']
elif name == 'SA':
target_names = ['Brocoli_green_weeds_1','Brocoli_green_weeds_2','Fallow','Fallow_rough_plow','Fallow_smooth',
'Stubble','Celery','Grapes_untrained','Soil_vinyard_develop','Corn_senesced_green_weeds',
'Lettuce_romaine_4wk','Lettuce_romaine_5wk','Lettuce_romaine_6wk','Lettuce_romaine_7wk',
'Vinyard_untrained','Vinyard_vertical_trellis']
elif name == 'UP':
target_names = ['Asphalt','Meadows','Gravel','Trees', 'Painted metal sheets','Bare Soil','Bitumen',
'Self-Blocking Bricks','Shadows']
elif name == 'UH':
target_names = ['Healthy grass','Stressed grass','Artificial turf','Evergreen trees', 'Deciduous trees','Bare earth','Water',
'Residential buildings','Non-residential buildings','Roads','Sidewalks','Crosswalks','Major thoroughfares','Highways',
'Railways','Paved parking lots','Unpaved parking lots','Cars','Trains','Stadium seats']
elif name == 'KSC':
target_names = ['Srub','Willow swamp','CP hammock','Slash pine','Oak/Broadleaf','Hardwood',
'Swamp','Graminoid','Spartina marsh','Cattail marsh','Salt marsh','Mud flats',
'Water']
classification = classification_report(np.argmax(y_test, axis=1), y_pred, target_names=target_names)
oa = accuracy_score(np.argmax(y_test, axis=1), y_pred)
confusion = confusion_matrix(np.argmax(y_test, axis=1), y_pred)
each_acc, aa = AA_andEachClassAccuracy(confusion)
kappa = cohen_kappa_score(np.argmax(y_test, axis=1), y_pred)
score = model.evaluate(X_test, y_test, batch_size=32)
Test_Loss = score[0]*100
Test_accuracy = score[1]*100
return classification, confusion, Test_Loss, Test_accuracy, oa*100, each_acc*100, aa*100, kappa*100
classification, confusion, Test_loss, Test_accuracy, oa, each_acc, aa, kappa = reports(Xtest,ytest,dataset)
classification = str(classification)
confusion1 = str(confusion)
file_name = "classification_report.txt"
with open(file_name, 'w') as x_file:
x_file.write('{} Test loss (%)'.format(Test_loss))
x_file.write('\n')
x_file.write('{} Test accuracy (%)'.format(Test_accuracy))
x_file.write('\n')
x_file.write('\n')
x_file.write('{} Kappa accuracy (%)'.format(kappa))
x_file.write('\n')
x_file.write('{} Overall accuracy (%)'.format(oa))
x_file.write('\n')
x_file.write('{} Average accuracy (%)'.format(aa))
x_file.write('\n')
x_file.write('\n')
x_file.write('{}'.format(classification))
x_file.write('\n')
x_file.write('{}'.format(confusion1))
| henulx/HDECGCN-Framework | HRACPCNN.py | HRACPCNN.py | py | 20,118 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.compat.v1.disable_eager_execution",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.compat.v1.ConfigProto",
"line_number": 19,
"usage_type": "... |
27940252845 | import random
from kfp import components, dsl
from kfp.components import InputPath, OutputPath
from kfp.components import func_to_container_op
from typing import NamedTuple
def train_data_load(
output_dataset_train_data: OutputPath('Dataset')
):
import tensorflow as tf
import pandas as pd
import pickle
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (_, _) = fashion_mnist.load_data()
df = pd.DataFrame(columns=['image', 'label'])
for i, image in enumerate(train_images):
df.loc[i] = ({'image': image, 'label': train_labels[i]})
with open(output_dataset_train_data, 'wb') as f:
pickle.dump(df, f, pickle.HIGHEST_PROTOCOL)
train_data_load_op = components.create_component_from_func(
train_data_load, base_image='tensorflow/tensorflow',
packages_to_install=['pandas==1.4.2']
)
############################################################################################################################################
############################################################################################################################################
############################################################################################################################################
def preprocess(
pre_data:InputPath('Dataset'),
data: OutputPath('Dataset')
):
import numpy as np
import pickle
import pandas as pd
images = []
labels = []
with open(pre_data, 'rb') as file:
tr_data = pickle.load(file)
for i, item in enumerate(tr_data['image']):
images.append(item)
labels.append(tr_data['label'][i])
images = np.array(images)
labels = np.array(labels)
images = images/255.0
df = pd.DataFrame(columns=['image', 'label'])
for i, image in enumerate(images):
df.loc[i] = ({'image': image, 'label': labels[i]})
with open(data, 'wb') as f:
pickle.dump(df, f, pickle.HIGHEST_PROTOCOL)
preprocess_op = components.create_component_from_func(
preprocess, base_image='python:3.9',
packages_to_install=['numpy==1.23.2', 'pandas==1.4.2']
)
############################################################################################################################################
############################################################################################################################################
############################################################################################################################################
def model_generation(pretrain_model : OutputPath('TFModel')) :
import tensorflow as tf
keras_model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10)
])
keras_model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
keras_model.save(pretrain_model)
load_model_op = components.create_component_from_func(
model_generation, base_image='tensorflow/tensorflow'
)
############################################################################################################################################
############################################################################################################################################
############################################################################################################################################
def train_op(
train_dataset: InputPath('Dataset'),
pre_model: InputPath('TFModel'),
trained_model : OutputPath('TFModel')
) :
import pickle
import tensorflow as tf
from tensorflow import keras
import numpy as np
import pandas as pd
with open(train_dataset, 'rb') as file:
tr_data = pickle.load(file)
images = []
labels = []
for i, item in enumerate(tr_data['image']) :
images.append(item)
labels.append(tr_data['label'][i])
images = np.array(images)
labels = np.array(labels)
model = keras.models.load_model(pre_model)
model.fit(images, labels, epochs=20)
model.save(trained_model)
train_result_op = components.create_component_from_func(
train_op,
base_image='tensorflow/tensorflow',
packages_to_install=['pandas==1.4.2']
)
def model_prediction(
test_dataset: InputPath('Dataset'),
trained_model : InputPath('TFModel')
) -> NamedTuple('Outputs', [('predict', str), ('label', str)]):
from tensorflow import keras
import tensorflow as tf
import pickle
import pandas as pd
import numpy as np
import random
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
with open(test_dataset, 'rb') as file:
tr_data = pickle.load(file)
images = []
labels = []
for i, item in enumerate(tr_data['image']):
images.append(item)
labels.append(tr_data['label'][i])
images = np.array(images)
labels = np.array(labels)
test_num = random.randrange(1,1000)
model = keras.models.load_model(trained_model)
predic_image = images[test_num]
predic_label = labels[test_num]
test = tf.expand_dims(predic_image, 0)
predictions_single = model.predict(test)
predict_value = tf.math.argmax(tf.nn.softmax(predictions_single[0]))
predict_value = f'predict result : {class_names[predict_value]}'
label_value = f'label result: {class_names[predic_label]}'
return (predict_value, label_value)
model_prediction_op = components.create_component_from_func(
model_prediction,
base_image='tensorflow/tensorflow',
packages_to_install=['pandas==1.4.2']
)
@func_to_container_op
def print_text(text1: str, text2: str):
print(text1)
print(text2)
@dsl.pipeline(name='tak test fashion mnist pipeline')
def fashion_mnist_pipeline():
train_data_load_task = train_data_load_op()
preprocess_task = preprocess_op(
train_data_load_task.outputs['output_dataset_train_data']
)
model_task = load_model_op()
train_task = train_result_op(
preprocess_task.outputs['data'],
model_task.outputs['pretrain_model']
)
predict_task = model_prediction_op(
preprocess_task.outputs['data'],
train_task.outputs['trained_model']
)
print_task1 = print_text(predict_task.outputs['predict'], predict_task.outputs['label'])
if __name__ == '__main__':
import kfp
kfp.compiler.Compiler().compile(fashion_mnist_pipeline, 'fashion_mnist_pipeline.yaml') | matildalab-private/kfp_example | fashion_mnist/train_prediction.py | train_prediction.py | py | 6,745 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kfp.components.OutputPath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pick... |
30715904956 | import traceback
from http import HTTPStatus
from flask import request, abort, jsonify
from .constants import REQ_ARG_PAGE, REQ_ARG_PER_PAGE, REQ_ARG_PAGINATION, REQ_ARG_TYPE, ENTITY_TYPE
from .app_cfg import max_items_per_page
def get_request_arg(arg: str, default: int) -> int:
"""
Get a positive integer argument from the request arguments.
:param arg: argument name
:param default: default value
:return:
"""
req_arg = request.args.get(arg, default, type=int)
if req_arg < 1:
abort(HTTPStatus.BAD_REQUEST.value)
return req_arg
def get_request_page() -> int:
"""
Get the page number from the request arguments.
:return:
"""
return get_request_arg(REQ_ARG_PAGE, 1)
def get_request_per_page(default: int) -> int:
"""
Get the items per page from the request arguments.
:param default: default value if not specified in request
:return:
"""
per_page = request.args.get(REQ_ARG_PER_PAGE, default, type=int)
if per_page < 1:
abort(HTTPStatus.BAD_REQUEST.value)
max_items = max_items_per_page()
return per_page if per_page <= max_items else max_items
def get_request_pagination() -> bool:
"""
Get the pagination flag from the request arguments; 'y' or 'n'.
:return:
"""
req_pagination = request.args.get(REQ_ARG_PAGINATION, 'y', type=str)
return True if req_pagination.lower() == 'y' else False
def get_request_type() -> str:
"""
Get the entity response type from the request arguments; 'entity' or 'map'.
:return:
"""
req_type = request.args.get(REQ_ARG_TYPE, ENTITY_TYPE, type=str)
return req_type.lower()
def pagination(page: int, num_per_page: int, total: int) -> (int, int, int, str):
"""
Get pagination data.
:param page: requested page
:param num_per_page: items per page
:param total: total number of items
:return: tuple of offset for start, limit for end, result code, error message
"""
code = HTTPStatus.OK.value
msg = None
offset = num_per_page * (page - 1)
limit = num_per_page * page
if offset > total:
code = HTTPStatus.BAD_REQUEST.value
msg = 'Not a valid page number'
elif limit > total:
limit = total
return offset, limit, code, msg
def _make_result(success: bool, error: int = None, message: str = None, **kwargs):
"""
Make a json result.
:param success: True or False
:param error: if success == False, HTTP error code
:param message: if success == False, HTTP error message
:param kwargs: result data as key/value pairs
:return:
"""
result = {
'success': success
}
if error is not None:
result["error"] = error
result["message"] = message if message is not None else ''
result = {**result, **kwargs}
return jsonify(result)
def success_result(**kwargs):
"""
Make a success json result.
:param kwargs: result data as key/value pairs
:return:
"""
return _make_result(True, **kwargs)
def key_or_alias(key: str, aliases: dict) -> str:
alias = key
if aliases is not None and key in aliases.keys():
alias = aliases[key]
return alias
def paginated_success_result(data: list, page: int, per_page: int, total: int, offset: int, limit: int,
aliases: dict = None, **kwargs):
"""
Make a paginated json result
:param data: result data
:param page: requested page
:param per_page: items per page
:param total: total number of items
:param offset: offset of start of data
:param limit: limit of end of data
:param aliases: dict of aliases for standard body fields
:param kwargs: optional additional entries to include
:return:
"""
num_pages = int(total / per_page)
if num_pages * per_page < total:
num_pages = num_pages + 1
result = {
key_or_alias("data", aliases): data,
key_or_alias("page", aliases): page,
key_or_alias("per_page", aliases): per_page,
key_or_alias("num_pages", aliases): num_pages,
key_or_alias("total", aliases): total,
key_or_alias("offset", aliases): offset,
key_or_alias("limit", aliases): limit
}
return success_result(**{
**result, **kwargs
})
def error_result(error: int, message: str, **kwargs):
"""
Make a fail json result.
:param error: if success == False, HTTP error code
:param message: if success == False, HTTP error message
:param kwargs: result data as key/value pairs
:return:
"""
return _make_result(False, error=error, message=message, **kwargs)
def http_error_result(http_code: HTTPStatus, error):
"""
Make a fail json result.
:param http_code: HTTP status code
:param error: error exception
:return:
"""
if "data" in vars(error):
extra = error.data
else:
extra = {}
return error_result(http_code.value, http_code.phrase, **extra), http_code.value
def print_exc_info():
""" Print exception information. """
for line in traceback.format_exc().splitlines():
print(line)
| ibuttimer/full-stack-trivia | backend/flaskr/util/misc.py | misc.py | py | 5,259 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.args.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.ab... |
34728957249 | from django.shortcuts import render, redirect
from .models import Device, VirtualDevice, Permissions, TrafficData, Device
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.contrib.auth.forms import UserCreationForm
from .forms import DeviceForm, VirtualDeviceForm
from django.contrib.auth.models import User
from django.db.models import Q
def index(request):
devices = Device.objects.all()
virtual_devices = VirtualDevice.objects.all()
permissions = Permissions.objects.all()
traffic_data = TrafficData.objects.all()
context = {
'devices': devices,
'virtual_devices': virtual_devices,
'permissions': permissions,
'traffic_data': traffic_data,
}
return render(request, 'traffic/index.html', context)
def device_list(request):
devices = Device.objects.all()
if request.user.has_perm('traffic.add_device'):
print(f"User {request.user.email} has the 'add_device' permission.")
if request.method == 'POST':
form = DeviceForm(request.POST)
if form.is_valid():
form.save()
return redirect('device_list')
else:
form = DeviceForm()
else:
form = None
return render(request, 'traffic/device_list.html', {'devices': devices, 'form': form})
def virtual_device_list(request):
virtual_devices = VirtualDevice.objects.all()
if request.user.has_perm('traffic.add_virtualdevice'):
# Mostrar formulario para agregar dispositivos virtuales
if request.method == 'POST':
form = VirtualDeviceForm(request.POST)
if form.is_valid():
form.save()
return redirect('virtual_device_list')
else:
form = VirtualDeviceForm()
else:
form = None # Si el usuario no tiene permisos, establecer form en None
return render(request, 'traffic/virtual_device_list.html', {'virtual_devices': virtual_devices, 'form': form})
def permissions_list(request):
users = User.objects.all() # Obtén todos los usuarios
return render(request, 'traffic/permissions_list.html', {'users': users})
def traffic_data_list(request):
# Manejar la lógica de búsqueda
search_device = request.GET.get('search_device')
device_type = request.GET.get('device_type')
lista=[]
if search_device and device_type:
# Utilizar Q para manejar la búsqueda en la tabla correspondiente
if device_type == 'normal':
query="SELECT * FROM traffic_device WHERE name LIKE '%"+search_device+"%'"
for dispositivos in Device.objects.raw(query):
lista.append(dispositivos)
elif device_type == 'virtual':
query="SELECT * FROM traffic_virtualdevice WHERE name LIKE '%"+search_device+"%'"
for dispositivos in VirtualDevice.objects.raw(query):
lista.append(dispositivos)
#print("SQL Query:", str(traffic_data_list.query))
# Renderizar la página con los resultados filtrados
#print("traffic_data_list:", traffic_data_list)
return render(request, 'traffic/traffic_data_list.html', {'traffic_data_list': lista})
def my_protected_view(request):
return render(request, 'my_protected_template.html')
def login_view(request):
if request.method == 'POST':
# Recuperar el nombre de usuario y la contraseña del formulario de inicio de sesión
username = request.POST.get('username')
password = request.POST.get('password')
# Autenticar al usuario
user = authenticate(request, username=username, password=password)
# Verificar si la autenticación fue exitosa
if user is not None:
# Iniciar sesión para el usuario autenticado
login(request, user)
# Redirigir a una página después del inicio de sesión (puedes cambiar esto)
return redirect('index')
else:
# Si la autenticación falla, puedes mostrar un mensaje de error
error_message = "Nombre de usuario o contraseña incorrectos."
return render(request, 'login.html', {'error_message': error_message})
# Si no es una solicitud POST, simplemente renderizar la página de inicio de sesión
return render(request, 'login.html')
def signup_view(request):
if request.method == 'POST':
# Obtener los datos del formulario de registro
form = UserCreationForm(request.POST)
# Verificar si el formulario es válido
if form.is_valid():
# Crear la cuenta de usuario
user = form.save()
# Iniciar sesión para el nuevo usuario
login(request, user)
# Redirigir a una página después del registro (puedes cambiar esto)
return redirect('index')
else:
# Si no es una solicitud POST, simplemente renderizar el formulario de registro
form = UserCreationForm()
return render(request, 'signup.html', {'form': form}) | Lema25/SmartTraffic_Control | smarttraffic/traffic/views.py | views.py | py | 5,227 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "models.Device.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.Device.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.Device",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "mo... |
10425974706 | import logging
import math
import os
import pickle
import random
import signal
import sys
import uuid
from time import sleep
from threading import Thread
import rpyc
from rpyc.utils.server import ThreadedServer
from utils import LOG_DIR
from conf import block_size, replication_factor, minions_conf
MASTER_PORT = 2131
# Issue: State related functions may not work correctly after the Master
# definition changed.
def get_state():
return {'file_table': MasterService.exposed_Master.file_table, \
'block_mapping': MasterService.exposed_Master.block_mapping}
def set_state(state):
MasterService.exposed_Master.file_table = state['file_table']
MasterService.exposed_Master.block_mapping = state['block_mapping']
def int_handler(sig, frame):
pickle.dump(get_state(), open('fs.img', 'wb'))
sys.exit(0)
def set_conf():
# load and use conf file, restore from dump if possible.
master = MasterService.exposed_Master
master.block_size = block_size
master.replication_factor = replication_factor
for mid, loc in minions_conf.items():
host, port = loc.split(":")
master.minions[mid] = (host, port)
master.minion_content[mid] = []
assert len(minions_conf) >= master.replication_factor,\
'not enough minions to hold {} replications'.format(\
master.replication_factor)
# if found saved image of master, restore master state.
if os.path.isfile('fs.img'):
set_state(pickle.load(open('fs.img', 'rb')))
logging.info("Current Config:")
logging.info("Block size: %d, replication_faction: %d, minions: %s",
master.block_size, master.replication_factor,
str(master.minions))
class MasterService(rpyc.Service):
class exposed_Master(object):
# Map file_name to block_ids
# {"file_name": [bid1, bid2, bid3]
file_table = {}
# Map block_id to where it's saved
# {"bid": [mid1, mid2, mid3]}
block_mapping = {}
# Map mid to what's saved on it
# {"mid": [bid1, bid2, bid3]}
minion_content = {}
# Register the information of every minion
# TODO: Merge 'minions' and 'minion_content'
minions = {}
block_size = 0
replication_factor = 0
health_monitoring = 0
def exposed_read(self, fname):
if fname in self.__class__.file_table:
return [(block_id, self.__class__.block_mapping[block_id])
for block_id in self.__class__.file_table[fname]]
return None
def exposed_delete(self, fname):
for block_id in self.__class__.file_table[fname]:
for mid in self.__class__.block_mapping[block_id]:
self.__class__.minion_content[mid].remove(block_id)
del self.__class__.block_mapping[block_id]
del self.__class__.file_table[fname]
def exposed_write(self, dest, size):
if self.exists(dest):
self.wipe(dest)
self.exposed_delete(dest)
self.__class__.file_table[dest] = []
num_blocks = self.calc_num_blocks(size)
blocks = self.alloc_blocks(dest, num_blocks)
return blocks
def exposed_get_block_size(self):
return self.__class__.block_size
def exposed_get_minions(self):
return self.__class__.minions
def exposed_replicate(self, mid):
for block_id in self.__class__.minion_content[mid]:
locations = self.__class__.block_mapping[block_id]
# TODO: Change locations to double linked list
source_mid = random.choice([x for x in locations if x != mid])
target_mid = random.choice([x for x in self.__class__.minions if
x not in locations])
# Replicate block from source to target
self.replicate_block(block_id, source_mid, target_mid)
# Update information registered on Master
self.__class__.block_mapping[block_id].append(target_mid)
self.__class__.minion_content[target_mid].append(block_id)
def exposed_health_report(self):
if not self.__class__.health_monitoring:
Thread(target=self.health_monitor).start()
self.__class__.health_monitoring = 1
return self.health_check()
###############################################################################
# Private functions
###############################################################################
def alloc_blocks(self, dest, num):
blocks = []
for _ in range(num):
block_uuid = uuid.uuid1()
# TODO: Assigning algorithm.
nodes_ids = random.sample(self.__class__.minions.keys(),
self.__class__.replication_factor)
self.__class__.block_mapping[block_uuid] = nodes_ids
for mid in nodes_ids:
self.__class__.minion_content[mid].append(block_uuid)
blocks.append((block_uuid, nodes_ids))
self.__class__.file_table[dest].append(block_uuid)
return blocks
def calc_num_blocks(self, size):
return int(math.ceil(float(size)/self.__class__.block_size))
def minion_lost_handler(self, status):
# TODO
logging.info('1 or more minion dead, status: %s', format(status))
def health_monitor(self):
# actively reach out to minions forever
# SIDE EFFECT: calls minion_lost_handler when
while 1:
minions_status = self.health_check()
if not all(minions_status.values()):
self.minion_lost_handler(minions_status)
sleep(.1)
def health_check(self):
# reach out to known minions on file
# RETURN {minion -> [10]}
res = {}
for m, (host, port) in self.__class__.minions.items():
try:
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
res[m] = 1 if minion.ping() == 'pong' else 0
except ConnectionRefusedError:
res[m] = 0
return res
def exists(self, f):
return f in self.__class__.file_table
def replicate_block(self, block_id, source, target):
source_host, source_port = self.__class__.minions[source]
target_host, target_port = self.__class__.minions[target]
con = rpyc.connect(source_host, port=source_port)
minion = con.root.Minion()
minion.replicate(block_id, target_host, target_port)
def wipe(self, fname):
for block_uuid in self.__class__.file_table[fname]:
node_ids = self.__class__.block_mapping[block_uuid]
for m in [self.exposed_get_minions()[_] for _ in node_ids]:
host, port = m
con = rpyc.connect(host, port=port)
minion = con.root.Minion()
minion.delete(block_uuid)
return
if __name__ == "__main__":
logging.basicConfig(filename=os.path.join(LOG_DIR, 'master'),
format='%(asctime)s--%(levelname)s:%(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
set_conf()
signal.signal(signal.SIGINT, int_handler)
t = ThreadedServer(MasterService, port=2131)
t.start()
| lyu-xg/PyDFS | pydfs/master.py | master.py | py | 7,721 | python | en | code | null | github-code | 1 | [
{
"api_name": "pickle.dump",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "conf.block_size",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "conf.replication_factor",
"l... |
18481392221 | """
Define helpful logging functions.
"""
# STD
from collections import defaultdict
from functools import wraps
from genericpath import isfile
from os import listdir
from os.path import join
from typing import Optional, List, Callable
import os
# EXT
import numpy as np
import torch
# PROJECT
from src.utils.types import LogDict, AggregatedLogs, StepSize
class StatsCollector:
"""
Class that is used to retrieve some interesting information to plot deep from some class, modifying the least
amount of code from the class as possible. In order to achieve this, we decorate function with desirable arguments
and return values with class methods of this class. This way the information can stored easily in a static version
of this class.
"""
_stats = {}
@classmethod
def collect_deltas(cls, func: Callable) -> Callable:
"""
Decorate the compute_recoding_gradient() function of the recoding mechanism and collect information about the
error signals.
"""
@wraps(func)
def wrapper(delta: torch.Tensor, *args) -> None:
if "deltas" not in cls._stats.keys():
cls._stats["deltas"] = []
cls._stats["deltas"].append(delta.detach()) # Don't track gradients
func(delta, *args)
return wrapper
@classmethod
def collect_recoding_gradients(cls, func: Callable) -> Callable:
"""
Decorate the recode() function of the recoding mechanism in order to collect data about the recoding gradients.
"""
@wraps(func)
def wrapper(self, hidden: torch.Tensor, step_size: StepSize, name: Optional[str]) -> torch.Tensor:
grad_norm = torch.norm(hidden.recoding_grad)
if name is None:
if "recoding_grads" not in cls._stats.keys():
cls._stats["recoding_grads"], cls._stats["step_sizes"] = [], []
cls._stats["recoding_grads"].append(grad_norm)
cls._stats["step_sizes"].append(step_size)
else:
if "recoding_grads" not in cls._stats.keys():
cls._stats["recoding_grads"], cls._stats["step_sizes"] = defaultdict(list), defaultdict(list)
cls._stats["recoding_grads"][name].append(grad_norm)
cls._stats["step_sizes"][name].append(step_size)
return func(self, hidden, step_size, name)
return wrapper
@classmethod
def _reduce_deltas(cls, deltas: List[torch.Tensor]):
"""
Reduce all delta values and return the mean.
"""
deltas = torch.stack(deltas)
mean_delta = torch.flatten(deltas).mean()
return mean_delta.item()
@classmethod
def _reduce_recoding_gradients(cls, recoding_gradients: List[torch.Tensor]):
"""
Reduce all recoding gradients by computing the average norm.
"""
recoding_gradients = torch.stack(recoding_gradients)
mean_grad_norm = recoding_gradients.mean()
return mean_grad_norm.item()
@classmethod
def _reduce_step_sizes(cls, step_sizes: List[StepSize]):
"""
Reduce step sizes by returning the average.
"""
step_sizes = torch.stack(step_sizes)
mean_step_size = torch.flatten(step_sizes).mean()
return mean_step_size.item()
@classmethod
def reduce(cls) -> dict:
"""
Reduce collected data in a way that it can be written easily into a log.
"""
reduction_funcs = {
"deltas": cls._reduce_deltas,
"recoding_grads": cls._reduce_recoding_gradients,
"step_sizes": cls._reduce_step_sizes
}
reduced_stats = {}
for stat, data in cls._stats.items():
if type(data) in (dict, defaultdict): # Nested stats
reduced_stats[stat] = {}
for name in data.keys():
reduced_stats[stat][name] = reduction_funcs[stat](cls._stats[stat][name])
else:
reduced_stats[stat] = reduction_funcs[stat](cls._stats[stat])
return reduced_stats
@classmethod
def get_stats(cls) -> dict:
"""
Return all the collected statistics in a way that is easy to write into a log.
"""
reduced_stats = cls.reduce()
return reduced_stats
@classmethod
def wipe(cls) -> None:
"""
Release all collected information.
"""
cls._stats = {}
@staticmethod
def flatten_stats(stats: dict) -> dict:
"""
Flatten a dictionary of stats by removing any nesting.
"""
flattened_stats = {}
for key, value in stats.items():
if type(value) in (dict, defaultdict):
for value_name, val in value.items():
flattened_stats[f"{key}_{value_name}"] = val
else:
flattened_stats[key] = value
return flattened_stats
def remove_logs(log_dir: str, model_name: str) -> None:
"""
Remove logs from previous runs if necessary.
Parameters
----------
log_dir: str
Logging directory in which to look for out logs.
model_name: str
Name of the model the logs belong to.
"""
train_log_path = f"{log_dir}/{model_name}_train.log"
val_log_path = f"{log_dir}/{model_name}_val.log"
# Remove logs from previous runs
if log_dir is not None and os.path.exists(train_log_path):
os.remove(train_log_path)
if log_dir is not None and os.path.exists(val_log_path):
os.remove(val_log_path)
def log_to_file(data: dict, log_path: Optional[str] = None) -> None:
"""
Log some data to a normal text file.
Parameters
----------
data: Any
Data to be logged.
log_path: str
File the data is going to be logged to (if given).
"""
if log_path is None:
return
# If file doesn't exists, create it and write header columns
if not os.path.exists(log_path):
with open(log_path, "w") as log_file:
columns = data.keys()
log_file.write("\t".join(map(str, columns)) + "\n")
log_file.write("\t".join(map(str, data.values())) + "\n")
# If file already exists, write new data
else:
with open(log_path, "a") as log_file:
log_file.write("\t".join(map(str, data.values())) + "\n")
def read_log(path: str) -> LogDict:
"""
Read a log file into a dictionary.
"""
data = defaultdict(lambda: np.array([]))
with open(path, "r") as file:
lines = file.readlines()
headers, lines = lines[0].strip(), lines[1:]
header_parts = headers.split()
for line in lines:
line_parts = line.strip().split()
line_parts = map(float, line_parts) # Cast to float
for header, part in zip(header_parts, line_parts):
data[header] = np.append(data[header], part)
return data
def merge_logs(log1: LogDict, log2: LogDict) -> LogDict:
"""
Merge two log dicts by concatenating the data columns
"""
assert log1.keys() == log2.keys(), "Logs must have the same headers!"
expand = lambda array: array if len(array.shape) == 2 else array[np.newaxis, ...]
merged_log = {}
for header in log1.keys():
data1, data2 = expand(log1[header]), expand(log2[header])
merged_data = np.concatenate([data1, data2], axis=0)
merged_log[header] = merged_data
return merged_log
def aggregate_logs(paths: List[str], name_func: Optional[Callable] = None) -> AggregatedLogs:
"""
Aggregate the data from multiple logs into one LogDict. Requires the logs to have the same headers and the same
number of data points.
If multiple logs receive the same name via the naming function, merge the corresponding data.
"""
def _default_name_func(path: str):
model_name = path[:path.rfind("_")]
return model_name
name_func = name_func if name_func is not None else _default_name_func
logs = {}
for path in paths:
name = name_func(path)
log = read_log(path)
# Merge data
if name in logs:
logs[name] = merge_logs(logs[name], log)
else:
logs[name] = log
return logs
def get_logs_in_dir(log_dir: str, selection_func: Callable = lambda path: True) -> List[str]:
"""
Select paths to log files in a directory based on some selection function that returns True of the log file matches
some criterion.
"""
p = lambda log_dir, path: join(log_dir, path)
return [
p(log_dir, path) for path in listdir(log_dir)
if isfile(p(log_dir, path)) and selection_func(p(log_dir, path))
]
| Kaleidophon/tenacious-toucan | src/utils/log.py | log.py | py | 8,806 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Callable",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "functools.wraps",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
... |
4020893724 | from Modulos.Modulo_System import(
Command_Run
)
#from Modulos.Modulo_Text import(
# Text_Read
#)
from Modulos.Modulo_Files import(
Files_List
)
from Modulos import Modulo_Util_Debian as Util_Debian
from Modulos.Modulo_Language import get_text as Lang
from Interface import Modulo_Util_Qt as Util_Qt
from pathlib import Path
import subprocess
import sys
from functools import partial
from PyQt6.QtWidgets import (
QApplication,
QWidget,
QDialog,
QScrollArea,
QLabel,
QPushButton,
QLineEdit,
QCheckBox,
QComboBox,
QVBoxLayout,
QHBoxLayout,
)
from PyQt6.QtCore import Qt
cfg_file = f'Script_CFG.txt'
cfg_dir = 'Script_Apps/'
def Config_Save(parent=None, cfg=None):
if cfg == None:
pass
else:
Util_Qt.Dialog_Command_Run(
parent=parent,
cmd=cfg,
cfg_file=cfg_file
).exec()
class Window_Main(QWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setWindowTitle('Preparing OS')
self.resize(308, 308)
# Contenedor principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Secciones verticales - Opciones
vbox_main.addStretch()
button_auto = QPushButton( Lang('auto') )
button_auto.clicked.connect(self.evt_automatic)
vbox_main.addWidget(button_auto)
button_apps = QPushButton( Lang('app_menu') )
button_apps.clicked.connect(self.evt_application)
vbox_main.addWidget(button_apps)
button_apt = QPushButton( 'Aptitude' )
button_apt.clicked.connect(self.evt_aptitude)
vbox_main.addWidget(button_apt)
button_repo = QPushButton( Lang('repos_nonfree') )
button_repo.clicked.connect(self.evt_repository)
vbox_main.addWidget(button_repo)
button_3_buffer = QPushButton( Lang('on_3_buffer') )
button_3_buffer.clicked.connect(self.evt_triple_buffer)
vbox_main.addWidget(button_3_buffer)
button_mouse_cfg = QPushButton(
f'{Lang("cfg")} - Mouse'
)
button_mouse_cfg.clicked.connect(self.evt_mouse_cfg)
vbox_main.addWidget(button_mouse_cfg)
# Seccion Vertical - Ejecutar comando
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
button_exec_cmd = QPushButton(
f'{Lang("exec")} {Lang("cmd")}'
)
button_exec_cmd.clicked.connect(self.evt_exec_command)
hbox.addWidget( button_exec_cmd )
hbox.addStretch()
self.entry_exec_cmd = QLineEdit(
placeholderText=Lang('cmd')
)
self.entry_exec_cmd.returnPressed.connect(self.evt_exec_command)
hbox.addWidget( self.entry_exec_cmd )
# Seccion Vertical - Ver comandos
vbox_main.addStretch()
button_view_cfg = QPushButton(
Lang('view_cfg')
)
button_view_cfg.clicked.connect(self.evt_view_cfg)
vbox_main.addWidget(button_view_cfg)
# Seccion Vertical final, salir
#vbox_main.addStretch()
#button_exit = QPushButton( Lang('exit') )
#button_exit.clicked.connect(self.evt_exit)
#vbox_main.addWidget(button_exit)
# Fin, mostrar ventana
self.show()
def evt_automatic(self):
self.hide()
Dialog_Automatic(
parent=self
).exec()
self.show()
def evt_mouse_cfg(self):
self.hide()
Dialog_mouse_config(
parent=self
).exec()
self.show()
def evt_triple_buffer(self):
self.hide()
Dialog_TripleBuffer(
parent=self
).exec()
self.show()
def evt_application(self):
self.hide()
Dialog_apps_menu(
parent=self,
).exec()
self.show()
def evt_aptitude(self):
self.hide()
Dialog_Aptitude(
parent=self
).exec()
self.show()
def evt_repository(self):
Config_Save(
parent=self,
cfg=Util_Debian.Repository()
)
def evt_exec_command(self):
command = self.entry_exec_cmd.text()
if command == '':
pass
else:
Command_Run(
cmd=command,
open_new_terminal=True,
text_input=Lang('continue_enter')
)
def evt_view_cfg(self):
self.hide()
Util_Qt.Dialog_TextEdit(
self,
text=cfg_file,
edit=False
).exec()
self.show()
#def evt_exit(self):
# self.close()
class Dialog_Automatic(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Automatic Mode')
self.resize(512, 128)
# Contenedor principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Seccion Vertical - Opción escritorio
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
self.checkbox_app_desktop = QCheckBox(Lang('app_desk'))
hbox.addWidget(self.checkbox_app_desktop)
hbox.addStretch()
self.combobox_app_desktop = QComboBox()
some_desktops = [
'Desktop-xfce4',
'Desktop-kdeplasma',
'Desktop-gnome3',
'Desktop-lxde',
'Desktop-mate'
]
for desktop in some_desktops:
self.combobox_app_desktop.addItem(desktop)
hbox.addWidget(self.combobox_app_desktop)
# Seccion Vertical - Opción apps opcionales
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
self.checkbox_app_optional = QCheckBox(Lang('app_optional'))
hbox.addWidget(self.checkbox_app_optional)
hbox.addStretch()
self.combobox_app_optional = QComboBox()
self.path_app_optional = f'{cfg_dir}App_Optional/'
if(
Path(
self.path_app_optional +
'App_Optional-wine.txt'
).exists() or
Path(
self.path_app_optional +
'App_Optional-flatpak.txt'
).exists() or
Path(
self.path_app_optional +
'App_Optional-woeusb-ng.txt'
).exists()
):
pass
else:
Util_Debian.App('Optional-wine')
Util_Debian.App('Optional-flatpak')
Util_Debian.App('Optional-woeusb-ng')
archives = Files_List(
files='App_Optional-*.txt',
path=self.path_app_optional,
remove_path=True
)
for app_optional in archives:
self.combobox_app_optional.addItem(app_optional)
hbox.addWidget(self.combobox_app_optional)
# Seccion Vertical final - Iniciar
vbox_main.addStretch()
button_start = QPushButton(Lang('start'))
button_start.clicked.connect(self.evt_start_automatic_mode)
vbox_main.addWidget(button_start)
def evt_start_automatic_mode(self):
# Si este el checkbox desktop en True
config_apps = ''
if self.checkbox_app_desktop.isChecked() == True:
config_apps += Util_Debian.App(
self.combobox_app_desktop.currentText(),
'&&\n\n'
)
else:
pass
# Si el checkbox app optional esta en True
if self.checkbox_app_optional.isChecked() == True:
config_apps += Util_Debian.App(
txt='&&\n\n',
txt_title=(
f'{Lang("app")} / '
f'{self.combobox_app_optional.currentText()}'
),
txt_add='',
cfg_dir='./',
cfg_file=(
self.path_app_optional +
self.combobox_app_optional.currentText()
),
opc='continue'
)
else:
pass
#config_apps = config_apps[:-4]
print(config_apps)
# Configuracion esencial, obligatoria
Config_Save(
parent=self,
cfg=(
Util_Debian.Aptitude('update') + '&&\n\n' +
Util_Debian.App('Essential', '&&\n\n') +
Util_Debian.App('Dependence', '&&\n\n') +
Util_Debian.App('Uninstall', '&&\n\n') +
config_apps +
Util_Debian.Aptitude('clean')
)
)
class Dialog_Aptitude(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Aptitude')
self.resize(308, 256)
# Contenedor Principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Secciones Verticales - Botones - Actualizar y limpiar
button_update = QPushButton(Lang('upd'))
button_update.clicked.connect(self.evt_apt_update)
vbox_main.addWidget(button_update)
button_clean = QPushButton(Lang('cln'))
button_clean.clicked.connect(self.evt_apt_clean)
vbox_main.addWidget(button_clean)
# Seccion Vertical - Espaciador
vbox_main.addStretch()
# Seccion Vertical - Boton Instalar
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
button_install = QPushButton(Lang('install'))
button_install.clicked.connect(self.evt_apt_app_install)
hbox.addWidget(button_install)
self.entry_install = QLineEdit(
placeholderText=Lang('app'),
)
#self.entry_install.returnPressed.connect(self.evt_apt_app_install)
hbox.addWidget(self.entry_install)
# Seccion Vertical - Boton Purgar
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
button_purge = QPushButton(Lang('prg'))
button_purge.clicked.connect(self.evt_apt_app_purge)
hbox.addWidget(button_purge)
self.entry_purge = QLineEdit(
placeholderText=Lang('app'),
)
#self.entry_purge.returnPressed.connect(self.evt_apt_app_purge)
hbox.addWidget(self.entry_purge)
def evt_apt_update(self):
Config_Save(
self,
cfg=Util_Debian.Aptitude('update')
)
def evt_apt_clean(self):
Config_Save(
self,
cfg=Util_Debian.Aptitude('clean')
)
def evt_apt_app_install(self):
if self.entry_install.text() == '':
pass
else:
Config_Save(
parent=self,
cfg = (
'sudo apt update &&\n\n' +
Util_Debian.Aptitude('install') +
self.entry_install.text()
)
)
def evt_apt_app_purge(self):
if self.entry_purge.text() == '':
pass
else:
Config_Save(
parent=self,
cfg = (
Util_Debian.Aptitude('purge') +
self.entry_purge.text() + ' &&\n\n'
'sudo apt autoremove ' +
self.entry_purge.text() + ' &&\n\n' +
Util_Debian.Aptitude('clean')
)
)
class Dialog_apps_menu(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle(Lang('app_menu'))
self.resize(308, 256)
# Contenedor principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Secciones verticales - Opciones
button_app_essential = QPushButton(Lang('essential'))
button_app_essential.clicked.connect(self.evt_app_essential)
vbox_main.addWidget(button_app_essential)
button_app_dependence = QPushButton(Lang('depens'))
button_app_dependence.clicked.connect(self.evt_app_dependence)
vbox_main.addWidget(button_app_dependence)
button_app_uninstall = QPushButton(Lang('utll'))
button_app_uninstall.clicked.connect(self.evt_app_uninstall)
vbox_main.addWidget(button_app_uninstall)
button_app_desktop = QPushButton(Lang('desk'))
button_app_desktop.clicked.connect(self.evt_app_desktop)
vbox_main.addWidget(button_app_desktop)
button_app_optional = QPushButton(Lang('optional'))
button_app_optional.clicked.connect(self.evt_app_optional)
vbox_main.addWidget(button_app_optional)
def evt_app_essential(self):
Config_Save(
parent=self,
cfg=Util_Debian.App(opc='Essential')
)
def evt_app_dependence(self):
Config_Save(
parent=self,
cfg=Util_Debian.App(opc='Dependence')
)
def evt_app_uninstall(self):
Config_Save(
parent=self,
cfg=Util_Debian.App(opc='Uninstall')
)
def evt_app_desktop(self):
Dialog_app_desktop(parent=self).exec()
def evt_app_optional(self):
Dialog_app_optional(parent=self).exec()
class Dialog_app_desktop(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle(Lang('app_desk'))
self.resize(308, 128)
# Contenedor Principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Secciones verticales - Opciones
list_app_desktop = [
'Desktop-xfce4',
'Desktop-kdeplasma',
'Desktop-gnome3',
'Desktop-lxde',
'Desktop-mate'
]
for app_desktop in list_app_desktop:
button_app_desktop = QPushButton(app_desktop)
button_app_desktop.clicked.connect(
partial(self.evt_app_desktop, button=button_app_desktop)
)
vbox_main.addWidget(button_app_desktop)
def evt_app_desktop(self, button):
Config_Save(
parent=self,
cfg=Util_Debian.App(button.text())
)
class Dialog_app_optional(QDialog):
def __init__(self, parent):
super().__init__(parent)
self.setWindowTitle( Lang('app_optional') )
self.resize(308, 360)
# Contenedor Principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Seccioner Verticales - Botones
# Scroll
scroll_area = QScrollArea()
scroll_area.setVerticalScrollBarPolicy(
Qt.ScrollBarPolicy.ScrollBarAsNeeded
)
vbox_main.addWidget(scroll_area)
# Scroll - Contenedor de Widgets
widget_buttons = QWidget()
# Scroll - Layout
vbox = QVBoxLayout()
widget_buttons.setLayout(vbox)
# Scroll - Layout - Botones en orden vertical
self.path_app_optional = f'{cfg_dir}App_Optional/'
if (
Path(
f'{self.path_app_optional}'
'App_Optional-wine.txt'
).exists() or
Path(
f'{self.path_app_optional}'
'App_Optional-flatpak.txt'
).exists() or
Path(
f'{self.path_app_optional}'
'App_Optional-woeusb-ng.txt'
).exists()
):
pass
else:
Util_Debian.App('Optional-wine')
Util_Debian.App('Optional-flatpak')
Util_Debian.App('Optional-woeusb-ng')
try:
archives_app_optional = Files_List(
files='App_Optional-*.txt',
path=self.path_app_optional,
remove_path=True
)
self.list_app_optional = []
for text_app_optional in archives_app_optional:
button_app_optional = QPushButton(text_app_optional)
button_app_optional.clicked.connect(
partial(self.evt_app_optional, button=button_app_optional)
)
vbox.addWidget(button_app_optional)
self.list_app_optional.append(text_app_optional)
except:
pass
# Scroll - Agregar widgets
scroll_area.setWidget(widget_buttons)
# Seccion Vertical - Boton para todas las apps
vbox_main.addStretch()
button_all_app_optional = QPushButton(Lang('all_apps'))
button_all_app_optional.clicked.connect(
self.evt_all_app_optional
)
vbox_main.addWidget(button_all_app_optional)
def evt_app_optional(self, button):
if button.text() in self.list_app_optional:
Config_Save(
parent = self,
cfg = Util_Debian.App(
txt_title = f'{Lang("app")} / {button.text()}',
txt_add = '',
cfg_dir = './',
cfg_file = (
self.path_app_optional +
button.text()
),
opc = 'continue'
)
)
else:
pass
def evt_all_app_optional(self):
app = ''
app_all = ''
line_jump = '&&\n\n'
for text_app_optional in self.list_app_optional:
app = Util_Debian.App(
txt = line_jump,
txt_title = f'{Lang("app")} / {text_app_optional}',
txt_add = '',
cfg_dir = './',
cfg_file = (
self.path_app_optional +
text_app_optional
),
opc = 'continue'
)
app_all += app
# Este es el caracter 4 de line_jump...
if '&' == app_all[-4]:
app_all = app_all[:-4]
else:
pass
Config_Save(
parent=self,
cfg=app_all
)
class Dialog_TripleBuffer(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle( 'Triple Buffer Config' )
self.resize(308, 256)
# Contenedor principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Secciones Vertical - Comando para ver Triple Buffer
command = 'grep drivers /var/log/Xorg.0.log'
command_run = subprocess.check_output(
command, shell=True, text=True
)
label_command = QLabel(
f'<i>{Lang("cmd")}: "{command}</i>\n'
'\n'
f'<b>{command_run}</b>'
)
label_command.setTextInteractionFlags(
Qt.TextInteractionFlag.TextSelectableByMouse |
Qt.TextInteractionFlag.TextSelectableByKeyboard
)
label_command.setWordWrap(True)
vbox_main.addWidget(label_command)
# Secciones Verticales - Botones
list_graphics = [
Lang('gpc_amd'),
Lang('gpc_intel')
]
for graphic in list_graphics:
button_graphic = QPushButton(graphic)
button_graphic.clicked.connect(
partial(
self.evt_TripleBuffer_graphic, button=button_graphic
)
)
vbox_main.addWidget(button_graphic)
def evt_TripleBuffer_graphic(self, button):
dict_graphics = {
Lang('gpc_amd'): '20-radeon.conf',
Lang('gpc_intel'): '20-intel-gpu.conf'
}
if button.text() in dict_graphics:
graphic = dict_graphics[button.text()]
Config_Save(
parent=self,
cfg=Util_Debian.TripleBuffer(graphic)
)
else:
pass
#graphic = button.text()
class Dialog_mouse_config(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.setWindowTitle('Mouse Config')
self.resize(256, 128)
# Contenedor Principal
vbox_main = QVBoxLayout()
self.setLayout(vbox_main)
# Seccion Vertical - Aceleración - Activar/Desactivar
hbox = QHBoxLayout()
vbox_main.addLayout(hbox)
button_acceleration = QPushButton()
button_acceleration.setCheckable(True)
button_acceleration.clicked.connect(
partial(
self.evt_mouse_acceleration_onoff,
button=button_acceleration
)
)
hbox.addWidget(button_acceleration)
if Path(
'/usr/share/X11/xorg.conf.d/'
'50-mouse-acceleration.conf'
).exists():
button_acceleration.setChecked(False)
button_acceleration.setText(Lang("acclr_off"))
else:
button_acceleration.setChecked(True)
button_acceleration.setText(Lang("acclr_on"))
def evt_mouse_acceleration_onoff(self, button):
if button.isChecked() == True:
option = 'AccelerationON'
button.setText( Lang('acclr_on') )
else:
option = 'AccelerationOFF'
button.setText( Lang('acclr_off') )
Config_Save(
parent=self,
cfg=Util_Debian.Mouse_Config(option)
)
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window_Main()
sys.exit(app.exec()) | CocoMarck/Linux_PrepararSistema | Script_Preparar-OS_Qt.py | Script_Preparar-OS_Qt.py | py | 21,971 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Interface.Modulo_Util_Qt.Dialog_Command_Run",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "Interface.Modulo_Util_Qt",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "PyQt6.QtWidgets.QWidget",
"line_number": 52,
"usage_type": "name"
... |
32179622727 | import datetime
from shifts import models
def prepare_prune_worker_shift(before):
d = (datetime.date.today() - before).days
assert d >= 7
beforeisoyear, beforeisoweek, beforeweekday = before.isocalendar()
assert beforeweekday == 0
beforeisoyearweek = 100 * beforeisoyear + beforeisoweek
(
stat_pos,
stat_nul,
stat_neg,
add_counts,
) = models.prepare_update_worker_shift_aggregate_count()
for (worker, isoyearweek, yearmonth), rowid, count in add_counts:
if isoyearweek < beforeisoyearweek:
raise Exception(
"Going to prune everything before week %s but there is a pending update for week %s"
% (beforeisoyearweek, isoyearweek)
)
return models.WorkerShift.objects.filter(shift__date__lt=before)
| Mortal/shiftplanner | shifts/prune.py | prune.py | py | 829 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.date.today",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "shifts.models.prepare_update_worker_shift_aggregate_count",
"line_number": 17,
"usage_type": "call"
... |
29608590768 | import json
def parse(path):
with open(path, 'r') as f:
lines = f.readlines()
lines = [line.strip().split(' ') for line in lines]
frame = []
S_index = []
for line_index, line in enumerate(lines):
line_array = []
for i in range(0, len(line), 2):
k, v = line[i].strip(':'), line[i+1]
if k == 'S':
S_index.append(line_index)
continue
line_array.append(float(v))
frame.append(line_array)
json_out = {}
for i in range(len(S_index) - 1):
rotation = frame[S_index[i]:S_index[i + 1]]
json_out[i] = rotation
return json_out
if __name__ == '__main__':
data = parse('data/LidarData2.txt')
with open('data/LidarData.json', 'w') as f:
f.write(json.dumps(data, indent=2)) | rsmit3/hackaton-team-5 | parse_txt.py | parse_txt.py | py | 832 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
}
] |
34145590490 | # Run this script as `root user`. This is the only script that should ever be run as root user.
# Creats the first IAM admin that can add additional IAM users/roles/groups/etc..
try:
import boto3
except ImportError:
print(f'Error: boto3 is required. Please install.')
print(f'Try: pip install boto3')
def createIamMutatorRole():
print(f'Creating `iam-mutator` role...')
print(f'`iam-mutator` role created.')
def createUser(username):
print(f'Creating user {username}...')
iam = boto3.client('iam')
try:
iam.create_user(UserName=username)
print(f'User {username} created.')
except:
print(f'User {username} probably already exists, not creating.')
def allowUserAccessToIamMutatorRole(username):
print(f'Adding {username} to IAM admin role...')
print(f'User {username} added.')
if __name__ == '__main__':
primaryUserName = 'jim-hill-r'
createIamMutatorRole()
createUser(primaryUserName)
allowUserAccessToIamMutatorRole(primaryUserName) | jim-hill-r/gmby6 | core/infra/provision/start.py | start.py | py | 1,020 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.client",
"line_number": 17,
"usage_type": "call"
}
] |
19031139452 | from abc import ABC
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
from sklearn.utils import indexable
from sklearn.utils.validation import _num_samples
class SlidingTimeSeriesSplit(TimeSeriesSplit, ABC):
def __init__(self, n_splits: int, gap: int = 0):
super().__init__(n_splits=n_splits, gap=gap)
self.n = -1
def split(self, X, y=None, groups=None):
self.n = X.shape[0]
test_size = int(self.n // self.n_splits)
self.max_train_size = test_size
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
gap = self.gap
test_size = (
self.test_size if self.test_size is not None else n_samples // n_folds
)
# Make sure we have enough samples for the given split parameters
if n_folds > n_samples:
raise ValueError(
f"Cannot have number of folds={n_folds} greater"
f" than the number of samples={n_samples}."
)
if n_samples - gap - (test_size * n_splits) <= 0:
raise ValueError(
f"Too many splits={n_splits} for number of samples"
f"={n_samples} with test_size={test_size} and gap={gap}."
)
indices = np.arange(n_samples)
test_starts = range(n_samples - n_splits * test_size, n_samples, test_size)
for test_start in test_starts:
train_end = test_start - gap
if self.max_train_size and self.max_train_size < train_end:
yield (
indices[train_end - self.max_train_size: train_end],
indices[test_start: test_start + test_size],
)
else:
yield (
indices[:train_end],
indices[test_start: test_start + test_size],
)
| vcerqueira/blog | src/cv_extensions/sliding_tss.py | sliding_tss.py | py | 1,953 | python | en | code | 15 | github-code | 1 | [
{
"api_name": "sklearn.model_selection.TimeSeriesSplit",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sklearn.utils.indexable",
"line_number": 22,
"usage_type": "call"
},
{
"api_name"... |
15938992699 | #! venv/bin/python3
# -*- coding: UTF-8 -*-
import queue
from multiprocessing.managers import BaseManager
class QueueServer:
def __init__(self, ip='0.0.0.0', port=3000, authkey='lzw520'):
self._queue = queue.Queue()
self._ip = ip
self._port = port
self._authkey = authkey
self.manager = None
def start(self):
BaseManager.register('get_product_queue', callable=lambda: self._queue)
self.manager = BaseManager(address=(self._ip, self._port), authkey=self._authkey.encode(encoding='utf-8'))
self.manager.start()
print('产品队列服务器已启动!')
def shutdown(self):
if self.manager:
self.manager.shutdown()
print('服务器已关闭!')
else:
print('关闭失败:服务器未启动!')
if __name__ == '__main__':
server = QueueServer()
server.start()
try:
while True:
pass
except KeyboardInterrupt:
print('\n用户指令-退出\n')
finally:
server.shutdown()
| dz85/DXDSTest | producer_consumer/queue_server.py | queue_server.py | py | 1,071 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "queue.Queue",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "multiprocessing.managers.BaseManager.register",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "multiprocessing.managers.BaseManager",
"line_number": 18,
"usage_type": "name"
... |
38612986384 | import state
import math
import collections
def label(graph):
GOAL_NUM = 5
goalTables = [
(i,obj)
for i,obj in enumerate(graph.nodes)
if obj[state.ObjType.Table] > .5
and obj[len(state.ObjType) + state.ObjAttrs.goal] > .5
]
tables = [
(i,obj)
for i,obj in enumerate(graph.nodes)
if obj[state.ObjType.Table] > .5
]
blocks = [
(i,obj)
for i,obj in enumerate(graph.nodes)
if obj[state.ObjType.Block] > .5
]
floors = [
(i,obj)
for i,obj in enumerate(graph.nodes)
if obj[state.ObjType.Floor] > .5
]
onTop = collections.defaultdict(list)
for edge in graph.edges:
if edge.edgeType == state.RelType.OnTop:
onTop[edge.a].append(edge.b)
availableBlocks = {
i for i,block in blocks
if i not in onTop or not any(g in onTop[i] for g in goalTables) # on goal
}
needed = GOAL_NUM - len({
i for i,block in blocks
if i in onTop and any(g in onTop[i] for g in goalTables)
})
if needed < 0:
return 0.
if needed > len(availableBlocks):
return 1.
xKey = len(state.ObjType)
goalX = tables[0][1][xKey]
distances = [abs(graph.nodes[blockIdx][xKey] - goalX) for blockIdx in availableBlocks]
sigmoid = lambda x: 1. / (1 + math.e ** -x)
return sum(sorted(distances)[:needed])
| lukeshimanuki/qqq | heuristic.py | heuristic.py | py | 1,226 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "state.ObjType",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "state.ObjType",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "state.ObjAttrs",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "state.ObjTy... |
36902006495 | '''
Descripttion:
version:
Author: LiQiang
Date: 2021-04-08 21:37:49
LastEditTime: 2021-04-09 19:34:00
'''
"""
主函数,用来提取关键词 、及主题句子
"""
import textRank
##########生成词云所用的库##########
import matplotlib.pyplot as plt
import PIL.Image as Image
import jieba
import numpy as np
import os
from wordcloud import WordCloud,ImageColorGenerator
##################################
with open('statics/1.txt','r',encoding='utf-8') as f:
data=[i for i in f.readlines()]
text=''.join(data)
# print(text)
Tdata=[] #存放正方文本
Fdata=[] #存放反方文本
title=data[0] # 辩题
for i in data:
if i.startswith('正方'):
Tdata.append(i.split(':')[1])
elif i.startswith('反方'):
Fdata.append(i.split(':')[1])
else:
pass
# print(Tdata)
# print(Fdata)
# print(title)
def get_words_and_sentences(text):
"""
:param text: 传入列表 可以传入Tata 和 Fdata
:return:
"""
# 正方选手
T = textRank.TextRank(''.join(text), pr_config={'alpha': 0.85, 'max_iter': 100})
# 提取正方选手前80个关键词,用于生成词云
Tres = T.get_n_keywords(80)
# for i in Tres:
# print(i)
# 提取正方选手 3个句子
Tres2 = T.get_n_sentences(3)
# for i in Tres2:
# print(i)
return Tres,Tres2
def cloud_pic(TList,T=True):
"""
:param TList: 传入列表
T 参数用于选择图片,其中默认为True
:return:
"""
raw_signature_string = ''.join(TList)
text = jieba.cut(raw_signature_string, cut_all=True)
wl_space_split = ' '.join(text)
if T:
alice_coloring = np.array(Image.open('./1.jpg')) # 原图
else:
alice_coloring = np.array(Image.open('./2.jpg')) # 原图
my_wordcloud = WordCloud(background_color="white", # 背景色
max_words=200, # 字数上限
mask=alice_coloring, # 形状
max_font_size=150, # 字体大小
random_state=50, # 随机数量
font_path='C:/Windows/Fonts/simhei.ttf').generate(wl_space_split) # 中文字体
image_color = ImageColorGenerator(alice_coloring)
return my_wordcloud.recolor(color_func=image_color),my_wordcloud
#####绘制词云图
# cloud_pic(''.join([i[0] for i in res]))
def draw_cloud_pic(title,T,F):
"""
重新更改后的cloud_pic
title: 辩题
T: 正方 关键词
F: 反方 关键词
:return:
"""
Tcloud,Tword=cloud_pic(TList=T,T=True)
Fcloud,Fword=cloud_pic(TList=F,T=False)
plt.rcParams['figure.figsize'] = (18, 10) # 画布大小
# 防止中文乱码
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
# 父标题
plt.suptitle(title, fontsize=30, color='g', fontweight='bold')
# 正方词云
plt.subplot(1, 2, 1) # 1行2列 1号位置
plt.title('正方选手', fontsize=20, color='red') # 标题设置
plt.imshow(Tcloud)
plt.imshow(Tword)
plt.axis("off")
# 反方词云
plt.subplot(1, 2, 2) # 1行2列 1号位置
plt.title('反方选手', fontsize=20,color='blue') # 标题设置
plt.imshow(Fcloud)
plt.imshow(Fword)
plt.axis("off")
plt.show()
if __name__ == '__main__':
# 正方
Twords,Tsentences=get_words_and_sentences(text=Tdata)
print("获取正方选手的关键词(前80个)")
print(Twords)
print("获取正方选手的关键句子(前3个)")
for i in Tsentences:
print(i)
# 反方
Fwords, Fsentences = get_words_and_sentences(text=Fdata)
print("获取正方选手的关键词(前80个)")
print(Fwords)
print("获取正方选手的关键句子(前3个)")
for i in Fsentences:
print(i)
# 绘制词云图
cloudT=''.join([i[0] for i in Twords])
cloudF = ''.join([i[0] for i in Fwords])
draw_cloud_pic(title, T=cloudT, F=cloudF) | MarsCube/TextRank-1 | main_two_cloud.py | main_two_cloud.py | py | 4,182 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "textRank.TextRank",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "jieba.cut",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_nu... |
33330576982 | from flask import Flask, render_template
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from io import BytesIO
import base64
from bs4 import BeautifulSoup
import requests
#don't change this
matplotlib.use('Agg')
app = Flask(__name__) #do not change this
#insert the scrapping here
url_get = requests.get('https://www.coingecko.com/en/coins/ethereum/historical_data/usd?start_date=2020-01-01&end_date=2021-06-30#panel')
soup = BeautifulSoup(url_get.content,"html.parser")
web_address = "https://www.coingecko.com/en/coins/ethereum/historical_data/usd?start_date=2020-01-01&end_date=2021-06-30#panel"
web_content = requests.get(web_address).text
web_content_soup = BeautifulSoup(web_content, "html.parser")
#find your right key here
table = soup.find('table', attrs={'class': 'table table-striped text-sm text-lg-normal'})
cek_row = table.find_all('tr')
row_length = len(cek_row)
table_ethereum = web_content_soup.find('table', attrs={'class': 'table table-striped text-sm text-lg-normal'})
list_ethereum = [] #initiating a list
for table_rows in table_ethereum.find_all('tr'):
# Find Header Date
header_date = table_rows.find('th', attrs={'class': 'font-semibold text-center'})
if(header_date) != None:
periode = header_date.text.replace("-", "/")
# Find record
column_iteration = 0
for table_columns in table_rows.find_all('td'):
if (column_iteration) == 0:
market_cap = table_columns.text.replace("$", "").replace(",", "").strip()
elif (column_iteration) == 1:
volume = table_columns.text.replace("$", "").replace(",", "").strip()
elif (column_iteration) == 2:
open_price = table_columns.text.replace("$", "").replace(",", "").strip()
elif (column_iteration) == 3:
close = table_columns.text.replace("$", "").replace(",", "").strip()
if column_iteration == 3:
list_ethereum.append((periode,market_cap, volume, open_price, close))
column_iteration += 1
#change into dataframe
df_ethereum = pd.DataFrame(list_ethereum, columns = ('Periode','Market Cap','Volume', 'Open Price', 'Close'))
#insert data wrangling here
df_ethereum['Periode'] = df_ethereum['Periode'].astype('datetime64')
df_ethereum['Volume'] = df_ethereum['Volume'].astype('float64')
df_ethereum = df_ethereum.set_index('Periode')
df_ethereum['Volume'] = df_ethereum['Volume']
#end of data wranggling
@app.route("/")
def index():
card_data = f'{df_ethereum["Volume"].mean().round(2)}' #be careful with the " and '
# generate plot
ax = df_ethereum.plot(figsize = (20,9))
# Rendering plot
# Do not change this
figfile = BytesIO()
plt.savefig(figfile, format='png', transparent=True)
figfile.seek(0)
figdata_png = base64.b64encode(figfile.getvalue())
plot_result = str(figdata_png)[2:-1]
# render to html
return render_template('index.html',
card_data = card_data,
plot_result=plot_result
)
if __name__ == "__main__":
app.run(debug=True) | kopikepu/Capstone_Webscrapping | app.py | app.py | py | 3,043 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line... |
71059419553 | from django.contrib import admin
from .models import Service, AppointmentRequest, Appointment, EmailVerificationCode, Config
@admin.register(Service)
class ServiceAdmin(admin.ModelAdmin):
list_display = ('name', 'duration', 'price', 'created_at', 'updated_at',)
search_fields = ('name',)
list_filter = ('duration',)
@admin.register(AppointmentRequest)
class AppointmentRequestAdmin(admin.ModelAdmin):
list_display = ('date', 'start_time', 'end_time', 'service', 'created_at', 'updated_at',)
search_fields = ('date', 'service__name',)
list_filter = ('date', 'service',)
@admin.register(Appointment)
class AppointmentAdmin(admin.ModelAdmin):
list_display = ('client', 'appointment_request', 'created_at', 'updated_at',)
search_fields = ('client__user__username', 'appointment_request__service__name',)
list_filter = ('client', 'appointment_request__service',)
@admin.register(EmailVerificationCode)
class EmailVerificationCodeAdmin(admin.ModelAdmin):
list_display = ('user', 'code')
@admin.register(Config)
class ConfigAdmin(admin.ModelAdmin):
list_display = ('slot_duration', 'lead_time', 'finish_time', 'appointment_buffer_time', 'website_name')
| adamspd/django-appointment | appointment/admin.py | admin.py | py | 1,201 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.register",
"line_number": 6,
"usage_type": "call"
},
{
... |
17223706930 | import multiprocessing
from multiprocessing import Pool
import time
# def spawn(num):
# print('Spawned!{}'.format(num))
# if __name__ == '__main__':
# s1 = time.time()
# for i in range(500):
# p = multiprocessing.Process(target=spawn,args=(i,))
# p.start()
# #p.join()
# e1 = time.time()
# s2 = time.time()
# for i in range(500):
# p = multiprocessing.Process(target=spawn,args=(i,))
# p.start()
# p.join()
# e2 = time.time()
# print(e1-s1,e2-s2)
def job(num):
return(2*num)
if __name__=='__main__':
p = Pool(processes=20)
data = p.map(job,range(20))
p.close()
print(data)
#use map to retrive data from the proceses | Chaitanya-Varun/PythonConcepts | MultiProcessing.py | MultiProcessing.py | py | 645 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "multiprocessing.Pool",
"line_number": 27,
"usage_type": "call"
}
] |
26635775008 | """backend/main.py."""
from typing import Optional
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from maze_generator import ALPHA, DISCOUNT, EPSILON, NUM_EPISODES, MazeGenerator
from pydantic import BaseModel, validator
# Initialize FastAPI app
app = FastAPI(
title="DungeonMazeGenerator",
description="Generate a maze with a start, treasure and an end point."
'Legend: "x"= starting point, "+": treasure point, "o": end point',
)
class MazeGeneratorRequest(BaseModel):
"""MazeGeneratorRequest Model."""
maze_size: int = 4
alpha: Optional[float] = ALPHA
discount: Optional[float] = DISCOUNT
epsilon: Optional[float] = EPSILON
num_episodes: Optional[int] = NUM_EPISODES
@validator("num_episodes", pre=True)
def validate_num_episodes(cls, v):
if v <= 0:
raise ValueError("num_episodes should be a positive integer")
return v
@validator("epsilon", pre=True)
def validate_epsilon(cls, v):
if not (0 <= v <= 1):
raise ValueError("epsilon should be between 0 and 1")
return v
@validator("alpha", pre=True)
def validate_alpha(cls, v):
if not (0 <= v <= 1):
raise ValueError("alpha should be between 0 and 1")
return v
@validator("maze_size", pre=True)
def validate_maze_size(cls, v):
if v <= 1:
raise ValueError("maze_size should be at least 2")
return v
@validator("discount", pre=True)
def validate_discount(cls, v):
if not (0 <= v <= 1):
raise ValueError("discount should be between 0 and 1")
return v
# Endpoint that serves generation of a maze prediction
@app.post("/generate_maze", status_code=200, tags=["Generate maze"])
async def generate_maze(maze_generator_request: MazeGeneratorRequest):
"""Generate a maze.
:param maze_generator_request: MazeGeneratorRequest model
:return: StreamingResponse to plot the maze as a response
"""
maze_generator = MazeGenerator(
maze_size=maze_generator_request.maze_size,
alpha=maze_generator_request.alpha,
discount=maze_generator_request.discount,
epsilon=maze_generator_request.epsilon,
num_episodes=maze_generator_request.num_episodes,
)
buffer = maze_generator.generate()
buffer.seek(0) # Return cursor to starting point
return StreamingResponse(buffer, media_type="image/png")
| mariaafara/DungeonMapGenerator | serving/api_main.py | api_main.py | py | 2,456 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "maze_generator.ALP... |
11648173695 | #!/usr/bin/python -tt
""""
created on 17th March 2018
@author: Abhishek Chattopadhyay
FName: addTest
"""
from __future__ import print_function
import os
import sys
import datetime
import xml.etree.ElementTree as ET
BASEDIR = '.'
_template = BASEDIR + '/xml/templates/testtemplate.xml'
optDir = BASEDIR + '/xml/options/'
testDir = BASEDIR + '/Tests/tbd/'
tempDir = BASEDIR + '/Tests/temp/'
runningDir = BASEDIR + '/Tests/running/'
completedDir= BASEDIR + '/Tests/completed/'
scheduledDir= BASEDIR + '/Tests/scheduled/'
debug = False
def Print(level,statement):
global debug
if debug:
print (level+':',statement)
def inputMsg(msg1, msg2):
if type(msg1) is int:
msg1 = str(msg1)
if type(msg2) is int:
msg2 = str(msg2)
return '['+msg1+'] '+msg2
def getInp(item,tag, msg):
inp = raw_input(inputMsg(item[tag],msg))
if inp != '':
item[tag] = inp
else:
Print('INFO: ', 'using old value:')
class userTest:
def __init__(self,elements):
self.tcEdit = False
self.foundPath = ''
self.user = elements
self.codeLineup = ''
self.getInput()
return
def getInput(self): #function gets inputs from the user and returns a dict of inputs
print ('INFO','provide test case inputs, hit enter to auto-generate')
now = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
self.user['id'] = now
msg = 'Name your test case:(HINT: Must be unique add yyyy-mm-dd at the end): '
inp = raw_input(msg)
if inp != '':
if self.validateName(inp):
self.tcEdit = True
self.user = helper.getXmlElem(self.foundPath+inp+'.xml')
else:
self.user['id'] = inp
else:
self.user['id'] = now
print ('INFO: AUTOGENERATED: Test Name: ', str(now)+'.xml')
getInp (self.user,'SCHEDULE_TIME','Schedule Time: [enter 0 to immediately schedule]')
getInp (self.user,'SCHEDULE_DATE','Schedule Date:(HINT: DD-MM-YYYY): ')
if self.user['SCHEDULE_DATE'] == 'TODAY':
self.user['SCHEDULE_DATE'] = datetime.datetime.now().strftime("%d-%m-%Y")
getInp (self.user,'SCHEDULE_POLICY','Schedule Policy: ')
if int(self.user['SCHEDULE_TIME']) == 0:
self.user['SCHEDULE_POLICY'] = 'IMMEDIATE'
print ('INFO: Test case schedule is: ',self.user['SCHEDULE_POLICY'])
getInp (self.user, 'RECURRENCE','Recurrence:(HINT: [yes/no]): ')
getInp (self.user, 'DURATION_H','Duration:(HINT: in hours): ')
self.user['DURATION_M'] = 0 # not allowed now
self.user['DURATION_S'] = 0 # not allowed now
getInp (self.user,'RMX_IP',"RMX IP: ")
getInp (self.user,'RMX_TYPE',"Rmx Type: ")
getInp (self.user,'TESTTYPE',"Want to upgrade and run load: ")
if (self.user['TESTTYPE']).lower() == 'no':
getInp (self.user,'UPGRADE',"Upgrade the RMX: ")
getInp (self.user,'LOAD',"Execute Load Test: ")
if (self.user['UPGRADE']).lower() == 'yes':
# get the build lineup
#self.codeLineup = raw_input("Pick your code line (8.7.5, 8.7.4, 8.5.13, 8.5.21): ")
getInp (self.user,'RELEASE','(8.7.5, 8.7.4, 8.5.13, 8.5.21)')
self.user['RMX_BUILD'] = helper.getLatestBuild(self.user['RELEASE'])
getInp (self.user,'RMX_BUILD', 'Rmx Build: ')
if (self.user['LOAD']).lower() == 'yes':
getInp (self.user,'DMA_IP', "DMA IP: ")
getInp (self.user,'CPS',"Calls Per Second: ")
getInp (self.user,'PROTOCOL',"Protocol: ")
getInp (self.user,'FR',"Failure Rate:(HINT:% failure to monitor): ")
getInp (self.user,'SIPP_PRIMARY',"primary Sipp IP: ")
getInp (self.user,'SIPP_PRI_USR',"primary Sipp ssh user: ")
getInp (self.user,'SIPP_PRI_PASS',"primary Sipp ssh password: ")
if (self.user['RMX_TYPE']).lower() == 'rmx4000':
getInp (self.user,'SIPP_SECONDARY',"secondary Sipp IP: ")
getInp (self.user,'SIPP_SEC_USR',"secondary Sipp ssh user: ")
getInp (self.user,'SIPP_SEC_PASS',"secondary Sipp ssh passowrd: ")
advancedConfig = False
print ('INFO: Based on your inputs further parameters are autocalculated:')
print ('RMX ssh User: ', self.user['RMX_USER'])
print ('RMX ssh password: ', self.user['RMX_PASS'])
print ('RMX su password: ', self.user['RMX_SU_PASS'])
print ('Video Type: ', self.user['VIDEO_TYPE'])
# calculate rate & loading factor
if self.user['CPS'] == '2':
self.user['RATE'] = 2000
self.user['LOADING'] = 75
elif self.user['CPS'] == '5':
self.user['RATE'] = 5000
self.user['LOADING'] = 60
print ('RATE: ', self.user['RATE'])
print ('LOADING %: ', self.user['LOADING'])
# calculate ports & monitor delay
if (self.user['RMX_TYPE']).lower() == 'rmx4000':
self.user['MAX_PORTS'] = 400
self.user['MONITOR_DELAY'] = 15
elif (self.user['RMX_TYPE']).lower() == 'rmx2000':
self.user['MAX_PORTS'] = 200
self.user['MONITOR_DELAY'] = 15
elif (self.user['RMX_TYPE']).lower() == 'ninja':
self.user['MAX_PORTS'] = 100
self.user['MONITOR_DELAY'] = 10
print ('MAX PORTS: ', self.user['MAX_PORTS'])
print ('Monitor Delay: ', self.user['MONITOR_DELAY'])
# calculate hold time
# Calculation of media quality multiplier (1 for HD, 2 for CIF and SD, 3 for AUDIO ONLY
if self.user['VIDEO_TYPE'] in ['CIF','SD']:
MQFactor = 2
elif self.user['VIDEO_TYPE'] == 'HD':
MQFactor = 1
else:
MQFactor = 3
# calculation of ccvmr = (MQFactor * Loadng Factor * ports )/100
ccvmr = (MQFactor * self.user['LOADING'] * self.user['RATE'])/100
# hold time (in msec) = (ccvmr / (rate/1000))*1000
self.user['HOLDTIME'] = (ccvmr / (self.user['RATE']/1000))*1000 # in mili seconds
# the hold time is derived by ((MQFactor * Loading Factor * Ports)/(Rate))*10 this value would be in mil secs
# rate is in thousand calls factor
print (MQFactor, self.user['LOADING'], self.user['MAX_PORTS'], self.user['RATE'])
#print (type(MQFactor), type(self.user['LOADING']), type(self.user['MAX_PORTS']), type(self.user['RATE']))
#holdTime = (MQFactor * self.user['LOADING'] * self.user['MAX_PORTS'] * 10 )/(self.user['RATE'])
#self.user['HOLDTIME'] = holdTime
print ('HoldTime: ', self.user['HOLDTIME'])
print ('INFO: RMX will upgrade: ', self.user['UPGRADE'])
print ('INFO: RMX will be load tested: ', self.user['LOAD'])
advancedConfig = raw_input ('If you want to Overwrite the autocalculated parameters enter (YES)')
if advancedConfig == 'YES':
getInp (self.user,'RMX_USER','Rmx ssh user name: ')
getInp (self.user,'RMX_PASS','Rmx ssh password: ')
getInp (self.user,'RMX_SU_PASS','Rmx super user password:')
getInp (self.user,'VIDEO_TYPE',"Video Type: ")
getInp (self.user,'LOADING','%age loading of RMX: ')
getInp (self.user,'RATE',"Rate: ")
getInp (self.user,'HOLDTIME',"Hold Time: ")
getInp (self.user,'MONITOR_DELAY',"Monitor delay:(HINT: This is the time in minute the failure rate checked would wait feore starting to monitor your sipp load stats): ")
getInp (self.user,'ON_FAIL_RESTART','On Fail Restart? (yes/ no): ') # not allowed now
getInp (self.user,'EMAILTO','email: ') # not allowed now
Print ('INFO',self.user)
#return self.user
def validateName(self,name):
# Check name of test case for uniqueness
paths = [testDir, scheduledDir, completedDir, tempDir]
for path in paths:
if name+'.xml' in os.listdir(path):
self.foundPath = path
print('INFO: ', 'You are editing an existing tc')
return True
if name in os.listdir(runningDir):
raise ValueError('ERROR: This test case is running, cant edit')
return False
def validate(self): # function validates all the inputs by user
result = True
print ('Let me quickly check the inputs')
print ('INFO: ','Checking build: ', end='')
if (self.user['UPGRADE']).lower() == 'no':
return True
if helper.buildavailable(self.user['RMX_BUILD']):
print ('Build choice is fine')
else:
result = result & False
'''command = 'ping -c 4 '
errors = ['100% packet loss','unknown','unreachable']
print ('INFO: Checking if I can reach RMX IP: ',self.user['RMX_IP'], end='')
output = subprocess.Popen(
[command + self.user['RMX_IP']],
shell=True,
stdout=subprocess.PIPE).communicate()[0]
print (output)
#if ("Destination host unreachable" in output) or ('unknown' in output):
print ([i for i in errors if i in output])
if [i for i in errors if i in output.decode('utf-8')]:
print ("{} is offline".format(self.user['RMX_IP']))
result = result & False
else:
print (' : RMX Rechable')
print ('INFO: Checking if I can reach the SIPP machine: ',self.user['SIPP_PRIMARY'] )
output = subprocess.Popen(
[command + self.user['SIPP_PRIMARY']],
shell=True,
stdout=subprocess.PIPE,
).communicate()[0]
if "Destination host unreachable" in output.decode('utf-8') or 'unknown' in output.decode('utf-8'):
print ("{} is offline".format(self.user['SIPP_PRIMARY']))
result = result & False
else:
print (' : SIPP Rechable')
if (self.user['RMX_TYPE']).lower() == 'rmx4000':
print ('INFO: Checking is I can reach the 2nd SIPP machine:', self.user['SIPP_SECONDARY'])
output = subprocess.Popen([command + self.user['SIPP_SECONDARY']],stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
if "Destination host unreachable" in output.decode('utf-8') or 'unknown' in output.decode('utf-8'):
print ("{} is offline".format(self.user['SIPP_SECONDARY']))
result = result & False
else:
print ('SIPP Rechable')
'''
if result == True:
print ('All Good adding test case now')
else:
print ('one or more errors')
return result
def addTest(self,isValid): # function adds the test case to the be scheduled
if isValid:
testFile = testDir + self.user['id'] + '.xml'
else: # If the validation fails then save the file in a temp dir
testFile = tempDir + self.user['id'] + '.xml'
print (testFile)
root = ET.Element('TEST')
for key in self.user.keys(): # Create the test XML
#print (key, self.user[key])
if key == 'id': # id tag it's a root element
root.attrib[key] = self.user['id']
if type(self.user[key]) is int:
self.user[key] = str(self.user[key])
ET.SubElement(root, key).text = self.user[key]
tree = ET.ElementTree(root)
if self.tcEdit:
fileToRemove = self.foundPath + self.user['id'] + '.xml'
os.remove(fileToRemove)
print ('INFO: ','Removed file: ', fileToRemove)
tree.write(testFile) # write the pirmary test xml
print ('INFO: ', 'New test case added, filename: ', testFile)
def addToXmlDB():
pass
def main(elements):
print ('Add A TestCase to Execute')
Print ('INFO',elements)
test = userTest(elements)
test.addTest(test.validate())
if __name__ == '__main__':
sys.dont_write_bytecode = True
import helper
os.system('clear')
main(helper.getXmlElem(_template))
sys.exit(0)
| abhishekchattopadhyay/Octopus | automation/scripts/addTest.py | addTest.py | py | 10,993 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "da... |
31636907161 | import os
import json
from S3utility.s3_notification_info import parse_activity_data
from provider.storage_provider import storage_context
from provider import digest_provider, download_helper
import provider.utils as utils
from activity.objects import Activity
"""
DepositDigestIngestAssets.py activity
"""
class activity_DepositDigestIngestAssets(Activity):
def __init__(self, settings, logger, client=None, token=None, activity_task=None):
super(activity_DepositDigestIngestAssets, self).__init__(
settings, logger, client, token, activity_task
)
self.name = "DepositDigestIngestAssets"
self.pretty_name = "Deposit Digest Ingest Assets"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Deposit Assets for a Digest (Pre-Ingest)"
# Track some values
self.input_file = None
self.digest = None
self.dest_resource = None
# Local directory settings
self.directories = {
"TEMP_DIR": os.path.join(self.get_tmp_dir(), "tmp_dir"),
"INPUT_DIR": os.path.join(self.get_tmp_dir(), "input_dir"),
}
# Track the success of some steps
self.build_status = None
def do_activity(self, data=None):
"do the work"
if self.logger:
self.logger.info("data: %s" % json.dumps(data, sort_keys=True, indent=4))
# Create output directories
self.make_activity_directories()
# parse the data with the digest_provider
real_filename, bucket_name, bucket_folder = parse_activity_data(data)
# Download from S3
self.input_file = download_helper.download_file_from_s3(
self.settings,
real_filename,
bucket_name,
bucket_folder,
self.directories.get("INPUT_DIR"),
)
# Parse input and build digest
digest_config = digest_provider.digest_config(
self.settings.digest_config_section, self.settings.digest_config_file
)
self.build_status, self.digest = digest_provider.build_digest(
self.input_file,
self.directories.get("TEMP_DIR"),
self.logger,
digest_config,
)
if not self.build_status:
self.logger.info(
"Failed to build the Digest in Deposit Digest Ingest Assets for %s",
real_filename,
)
return self.ACTIVITY_PERMANENT_FAILURE
# check if there is an image and if not return True
if not digest_provider.has_image(self.digest):
self.logger.info(
"Digest for file %s has no images to deposit", real_filename
)
return self.ACTIVITY_SUCCESS
# bucket name
cdn_bucket_name = (
self.settings.publishing_buckets_prefix + self.settings.digest_cdn_bucket
)
# deposit the image file to S3
self.deposit_digest_image(self.digest, cdn_bucket_name)
return self.ACTIVITY_SUCCESS
def image_dest_resource(self, digest, cdn_bucket_name):
"concatenate the S3 bucket object path we copy the file to"
msid = utils.msid_from_doi(digest.doi)
article_id = utils.pad_msid(msid)
# file name from the digest image file
file_name = digest.image.file.split(os.sep)[-1]
new_file_name = digest_provider.new_file_name(file_name, msid)
storage_provider = self.settings.storage_provider + "://"
dest_resource = (
storage_provider + cdn_bucket_name + "/" + article_id + "/" + new_file_name
)
return dest_resource
def deposit_digest_image(self, digest, cdn_bucket_name):
"deposit the image file from the digest to the bucket"
self.dest_resource = self.image_dest_resource(digest, cdn_bucket_name)
storage = storage_context(self.settings)
self.logger.info("Depositing digest image to S3 key %s", self.dest_resource)
# set the bucket object resource from the local file
metadata = {"ContentType": utils.content_type_from_file_name(digest.image.file)}
storage.set_resource_from_filename(
self.dest_resource, digest.image.file, metadata
)
self.logger.info("Deposited digest image %s to S3", digest.image.file)
return True
| elifesciences/elife-bot | activity/activity_DepositDigestIngestAssets.py | activity_DepositDigestIngestAssets.py | py | 4,579 | python | en | code | 19 | github-code | 1 | [
{
"api_name": "activity.objects.Activity",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
29687300166 | #!/usr/bin/env python3
#
# An HTTP server that's a message board.
# PRG(Post-Redirect-Get) design pattern : HTTP application의 매우 자주 사용되는 패턴임
# 실행 순서
# 1. localhost:8000/ 접속하면 do_GET을 call => html form내용이 화면에 보여짐
# ==> web browser에서 server를 call할 때 method의 default값은 GET 방식임
# 2. textarea에 데이터 입력하고 submit button누르면 do_POST가 불러지고,
# textarea에서 입력된 데이터가 memory에 저장됨 => redirect (303)을 통해 다시 do_GET이 불리어짐
# ==> redirect할 때 default method가 GET이기 때문에 do_GET call함
# 3. do_GET이 불려지면 textarea에서 입력되고 submit된 데이터 리스트가 보여짐
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
memory = []
form = '''<!DOCTYPE html>
<title>Message Board</title>
<form method="POST">
<textarea name="message"></textarea>
<br>
<button type="submit">Post it!</button>
</form>
<pre>
{}
</pre>
'''
class MessageHandler(BaseHTTPRequestHandler):
def do_POST(self):
# How long was the message?
length = int(self.headers.get('Content-length', 0))
# Read and parse the message
data = self.rfile.read(length).decode()
message = parse_qs(data)["message"][0]
# Escape HTML tags in the message so users can't break world+dog.
# '<' 를 <로 바꾸어 html을 인식하지 못하게 한다.
message = message.replace("<", "<")
# Store it in memory. memory 변수의 data type : list
memory.append(message)
# Send a 303 back to the root page
# 303의 의미 : redirect via GET,
# 1. server가 303을 인식하면, client인 web browser로 보내는 것이 아니라
# Location으로 지정된 server 주소로 다시 재전송됨
# 2. redirect할 때는 response body에 데이터를 넣지 않음
self.send_response(303)
# Location이 '/'라는 의미는 root 주소를 말함, 즉 localhost:8000/을 말함
self.send_header('Location', '/') # web brower에 데이터를 보내기 위해, Location을 '/'로 설정하고 실행한다.
self.end_headers()
# 다시 do_GET을 부르게 되어 있다.
def do_GET(self):
# First, send a 200 OK response.
self.send_response(200)
# Then send headers.
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
# Send the form with the messages in it.
# format 메소드는 string instance안에 있는 것으로
# form 문자열 안에 { } 자리에, "\n".join(memory)결과값이 들어감
# "\n".join(memory)를 하기 때문에 memory에 합쳐서 들어간다.
mesg = form.format("\n".join(memory))
self.wfile.write(mesg.encode())
if __name__ == '__main__':
server_address = ('', 8000)
httpd = HTTPServer(server_address, MessageHandler)
httpd.serve_forever()
| Leftddr/web_programming | Standard Web Library/1. Standard Web Library/5_MessageboardPartThree/MessageboardPartThree.py | MessageboardPartThree.py | py | 3,066 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "urllib.parse.parse_qs",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "http.server.HTTPServer",
"line_number": 75,
"usage_type": "call"
}
] |
798865698 | import utils
def get_relaunch_hits(tasks, args):
knowledge = utils.get_knowledge_file(args)
round_num = utils.get_round(args)
one_assignment_tasks, two_assignment_tasks = [], []
for task in tasks:
image_name = task['url']
if image_name not in knowledge:
two_assignment_tasks.append(task)
continue
task_knowledge = knowledge[image_name]['stage_'+str(args.stage)][task['task_name']]
round_knowledge = task_knowledge['round_' + str(round_num)]
has_w1, has_w2 = 'worker_1' in round_knowledge , 'worker_2' in round_knowledge
if has_w1 and has_w2: continue
if int(not has_w1) + int(not has_w2) == 1:
one_assignment_tasks.append(task)
else:
two_assignment_tasks.append(task)
assert len(one_assignment_tasks) + len(two_assignment_tasks) != 0, 'Stage 2 round %d initial launch done' % round_num
one_assignment_hits = utils.get_hits_from_tasks(one_assignment_tasks, args)
two_assignment_hits = utils.get_hits_from_tasks(two_assignment_tasks, args)
hits = one_assignment_hits + two_assignment_hits
assignments = [1 for _ in one_assignment_hits] + [2 for _ in two_assignment_hits]
return hits, assignments
def prepare_launch(args):
assert utils.stage_1_round_is_done(args), 'You must finish stage 2 round %d first' % utils.get_round(args)
tasks = utils.get_stage_2_tasks(args)
if utils.is_relaunch(args):
hits, assignments = get_relaunch_hits(tasks, args)
else:
hits = utils.get_hits_from_tasks(tasks, args)
assignments = [2 for _ in hits]
rewards = [utils.get_reward(args) for _ in hits]
return hits, rewards, assignments
| ShubhangDesai/visual-genome-test-curation | stage2/initial_launch.py | initial_launch.py | py | 1,722 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.get_knowledge_file",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "utils.get_round",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "utils.get_hits_from_tasks",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "util... |
39334745659 | from flask import Flask, jsonify, render_template, request
import numpy as np
import pandas as pd
import sklearn as sk
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.model_selection import train_test_split
app = Flask(__name__)
model = pickle.load(open('model_mnb.pkl','rb'))
@app.route('/', methods=['GET','POST'])
def root():
return render_template('index.html')
@app.route('/predict', methods=['GET','POST'])
def predict():
dataset = pd.read_csv('deceptive-opinion.csv')
required_dataset = dataset[['verdict', 'review']]
required_dataset.loc[required_dataset['verdict'] == 'deceptive', 'verdict'] = 0
required_dataset.loc[required_dataset['verdict'] == 'truthful', 'verdict'] = 1
X = required_dataset['review']
Y = np.asarray(required_dataset['verdict'], dtype = int)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 42) # 75% training and 25% test
cv = CountVectorizer()
x = cv.fit_transform(X_train)
y = cv.transform(X_test)
message = request.form.get('enteredinfo')
data = [message]
vect = cv.transform(data).toarray()
prediction = model.predict(vect)
return render_template('result.html', prediction_text = prediction)
@app.route('/tryagain')
def tryagain():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True) | bharathprakash321/ML-Review-Detector | app.py | app.py | py | 1,480 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"... |
21220358291 | from distutils.errors import CompileError
from traceback import print_tb
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
import torch
import transformers
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
from transformers import BertConfig, BertForSequenceClassification, BertTokenizer, pipeline, AutoTokenizer, AutoModelForSequenceClassification
import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.svm import SVC
import sys
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from feature_engineering import refuting_features, polarity_features, hand_features, gen_or_load_feats
from feature_engineering import word_overlap_features
from transformers.modeling_utils import load_state_dict
from utils.dataset import DataSet
from utils.generate_test_splits import kfold_split, get_stances_for_folds
from utils.score import report_score, LABELS, score_submission
from utils.system import parse_params, check_version
from transformers import BertModel, AdamW, get_linear_schedule_with_warmup
import seaborn as sns
from pylab import rcParams
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap
from torch import device, nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
from tensorflow.python.client import device_lib
import time
PRE_TRAINED_MODEL_NAME = 'bert-base-uncased'
MODEL = 'bert-base-multilingual-uncased'
transformers.logging.set_verbosity_error()
class GPReviewDataset(Dataset):
def __init__(self, headlines, articles, targets, tokenizer, max_len):
self.headline = headlines
self.article = articles
self.target = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.headline)
def __getitem__(self, item):
headline = str(self.headline[item])
article = str(self.article[item])
target = self.target[item]
encoding = self.tokenizer(
headline,
article,
add_special_tokens=True,
max_length=self.max_len,
padding='max_length',
truncation=True,
return_token_type_ids=False,
return_attention_mask=True,
return_tensors='pt',
verbose=False
)
return {
'headline_text': headline,
'article_text': article,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.float)
}
def create_data_loader(df, tokenizer, max_len, batch_size):
ds = GPReviewDataset(
headlines = df['Headline'].to_numpy(),
articles = df['article'].to_numpy(),
targets = df['Stance'].to_numpy(),
tokenizer = tokenizer,
max_len = max_len
)
return DataLoader(
dataset=ds,
batch_size=batch_size,
num_workers=4
)
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids.to(device),
attention_mask=attention_mask.to(device),
return_dict=False,
)
return self.out(pooled_output)
def get_predictions(model, data_loader):
model = model.eval()
headlines_texts = []
articles_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
headlines = d["headline_text"],
articles = d['article_text']
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
headlines_texts.extend(headlines)
articles_texts.extend(articles)
predictions.extend(preds)
prediction_probs.extend(probs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return headlines_texts, articles_texts, predictions, prediction_probs, real_values
def show_confusion_matrix(confusion_matrix):
hmap = sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap="Blues")
hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')
hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')
plt.ylabel('True sentiment')
plt.xlabel('Predicted sentiment')
plt.show()
if __name__ == "__main__":
RANDOM_SEED = 43
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f'RUNNING ON DEVICE: {device}')
# LOAD THE TOKENIZER AND MODEL
model = SentimentClassifier(4)
model.load_state_dict(torch.load('model/acc_best_model_state.bin'))
model = model.to(device)
tokenizer = BertTokenizer('model/acc_best_model_tokenizer.bin', do_lower_case=True, local_files_only=True)
# GET THE COMPETITION DATASET
Competition_dataset = DataSet("competition_test")
Competition_dataset_stances_frame = pd.DataFrame(Competition_dataset.stances)
Competition_dataset_articles_frame = pd.DataFrame(list(Competition_dataset.articles.items()),columns = ['Body ID','article'])
Competition_dataset_frame = Competition_dataset_stances_frame.merge(Competition_dataset_articles_frame[["Body ID","article"]], on="Body ID", how="left")
Competition_dataset_frame.loc[Competition_dataset_frame["Stance"] == "unrelated", "Stance"] = 0
Competition_dataset_frame.loc[Competition_dataset_frame["Stance"] == "agree", "Stance"] = 1
Competition_dataset_frame.loc[Competition_dataset_frame["Stance"] == "discuss", "Stance"] = 2
Competition_dataset_frame.loc[Competition_dataset_frame["Stance"] == "disagree", "Stance"] = 3
# Competition_dataset_frame = Competition_dataset_frame.iloc[:5]
# CREATE A DATALOADER WITH THE COMPETITION DATASET AND THE TOKENIZER
start_time = time.time()
test_data_loader = create_data_loader(Competition_dataset_frame, tokenizer, 500, 1)
y_headline_text, y_article_text, y_pred, y_pred_probs, y_test = get_predictions(
model,
test_data_loader
)
print("--- %s minutes ---" % round((time.time() - start_time)/60, 2))
class_names = ['class 0', 'class 1', 'class 2', 'class 3']
labels=[0,1,2,3]
print(classification_report(y_test, y_pred, labels=labels, zero_division=0))
dy = pd.DataFrame(y_pred.numpy())
dx = pd.DataFrame(y_test.numpy())
print(dy)
print(dx)
cm = confusion_matrix(dx, dy, labels=labels)
df_cm = pd.DataFrame(cm)
show_confusion_matrix(df_cm)
actual_stance = Competition_dataset_frame['Stance'].values.tolist()
df = Competition_dataset_frame
df['Stance'] = y_pred
predicted_stance = df['Stance'].values.tolist()
df.loc[df["Stance"] == 0, "Stance"] = "unrelated"
df.loc[df["Stance"] == 1, "Stance"] = "agree"
df.loc[df["Stance"] == 2, "Stance"] = "discuss"
df.loc[df["Stance"] == 3, "Stance"] = "disagree"
df.reset_index(drop=True, inplace=True)
df = df[["Headline", "Body ID", "Stance"]]
df.to_csv('answer.csv', index=False, encoding='utf-8')
report_score([LABELS[e] for e in actual_stance],[LABELS[e] for e in predicted_stance])
| Rolo123y/fnc-1-MSCI598 | fnc-bert-eval.py | fnc-bert-eval.py | py | 7,680 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "transformers.logging.set_verbosity_error",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "transformers.logging",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 43,
"usage_type": "name"
}... |
35885711050 | import socket
import subprocess
from datetime import datetime
# Clear the terminal screen
subprocess.call('clear', shell=True)
# Prompt the user to enter the target IP address or hostname
target = input("Enter the target IP address or hostname: ")
# Function to perform the port scanning
def port_scan(target):
try:
# Get the IP address of the target
ip = socket.gethostbyname(target)
# Print scanning information
print("-" * 50)
print("Scanning target:", ip)
print("Time started:", datetime.now())
print("-" * 50)
# Iterate over a range of port numbers and attempt to connect
for port in range(1, 65635):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1) # Set a timeout for the connection attempt
result = sock.connect_ex((ip, port))
if result == 0:
# If the connection was successful (port is open), print the port number
print("Port {}: Open".format(port))
sock.close()
except socket.gaierror:
print("Hostname could not be resolved.")
except socket.error:
print("Could not connect to the server.")
# Call the port_scan function with the specified target
port_scan(target)
| Toothless5143/Port-Scanny | port-scanny.py | port-scanny.py | py | 1,303 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "subprocess.call",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "socket.gethostbyname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.da... |
18596317269 | import numpy as np
import seaborn as sn
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from matplotlib import pyplot
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import data_operations
import constants
def naive_bayes(data, classes, split=70):
train_s, train_s_class, test_s, test_s_class = data_operations.split_sets(
data, classes, split)
gnb = GaussianNB()
gnb.fit(train_s, train_s_class.values.ravel())
pred = gnb.predict(test_s)
return metrics.accuracy_score(test_s_class, pred)
def select_features(dataframe, n_feature):
pixels_features = []
for n in range(10):
df_is_class_n = pd.read_csv('../data/y_train_smpl_'+str(n)+'.csv')
transformer = SelectKBest(score_func=chi2, k = 1)
#new_data = transformer.fit_transform(df_pixels.values, df_is_class0)
fit = transformer.fit(dataframe, df_is_class_n)
scores = pd.DataFrame(fit.scores_)
columns = pd.DataFrame(dataframe.columns)
# concat 2 dataframes for better visualization
featuresScore = pd.concat([columns, scores], axis=1)
featuresScore.columns = ['Pixel', 'Score']
pixels_features.append(
featuresScore.nlargest(n_feature, 'Score')['Pixel'].values)
#count ranges
df_classes = data_operations.load_dataframe(constants.ORIGINAL_CLASSES)
ranges = []
start_indx = 0
last_indx = 0
for n in range(10):
count = len(np.argwhere(df_classes.values.ravel() == n))
last_indx = last_indx + count
ranges.append((start_indx, last_indx))
start_indx = last_indx + 1
#empty dataframe
df_pixels_feature = pd.DataFrame(0, index=np.arange(len(dataframe)), columns=dataframe.columns)
indx = 0
for features in pixels_features:
df_pixels_feature.loc[ranges[indx][0]: ranges[indx][1],features] = dataframe.loc[ranges[indx][0]: ranges[indx][1], features]
indx = indx + 1
return df_pixels_feature
if __name__ == '__main__':
sliced = data_operations.load_dataframe(constants.NORMALIZED_SLICED_SMPL)
classes = data_operations.load_dataframe(constants.ORIGINAL_CLASSES)
classes = data_operations.randomize_data(classes, constants.SEED)
max_accuracy = 0.0
current_accuracy = 0.0
accuracy_list = []
feat = 1
for n in range(len(sliced.columns)):
features_dataframe = select_features(sliced,feat)
features_dataframe = data_operations.randomize_data(features_dataframe, constants.SEED)
current_accuracy = naive_bayes(features_dataframe, classes)
accuracy_list.append(current_accuracy)
if max_accuracy < current_accuracy:
max_accuracy = current_accuracy
feat +=1
pyplot.plot(range(len(accuracy_list)),accuracy_list)
pyplot.title('Accuracy vs n Features')
pyplot.xlabel('No of features')
pyplot.ylabel('Accuracy')
pyplot.show()
| Cryoscopic-E/Data-mining-coursework1 | scripts/research_question.py | research_question.py | py | 3,150 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "data_operations.split_sets",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.naive_bayes.GaussianNB",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.accuracy_score",
"line_number": 23,
"usage_type": "call"
},
... |
25253660991 | import requests
import json
import time
import queue
import threading
proxy_list = []
def get_proxies():
url = "https://proxylist.geonode.com/api/proxy-list?limit=500&page=1&sort_by=lastChecked&sort_type=desc&speed=fast&protocols=socks4%2Csocks5"
response = requests.get(url)
response_status = response.status_code
if response_status != 200:
print("Something went wrong with the connection from URL")
exit()
else:
print("URL connection is successfull \nStatus Code: "+str(response_status))
pass
proxies_jsonresponse = response.json()
print("--------JSON RESPONSES--------")
# print(proxies_jsonresponse)
with open("free_proxy_list.json", "w") as json_proxy_file:
json.dump(proxies_jsonresponse, json_proxy_file)
json_proxy_file.close()
time.sleep(1)
list_proxies()
def list_proxies():
global proxy_list
with open('free_proxy_list.json', 'r') as json_proxy_file:
proxy_data = json.load(json_proxy_file)
# entry = data['data'][0]
# print(entry)
# ip = entry['ip']
# print("IP:", ip)
for proxies in proxy_data['data']:
# print(entry)
ip = proxies['ip']
port = proxies['port']
country = proxies['country']
city = proxies['city']
speed = proxies['speed']
latency = proxies['latency']
response_time = proxies['responseTime']
anonymityLevel = proxies["anonymityLevel"]
# print("IP: ", ip)
# print("Port: ", port)
proxy_ip = ip+":"+port
# print(proxy_ip)
proxy_list.append(proxy_ip)
proxy_count = len(proxy_list)
# print(proxy_list)
print(proxy_count)
# ips = queue.put(proxy_ip)
# print(queue.get(ips))
check_proxies()
counter = 0
# proxy_list = ["15.235.6.188:8080",
# "136.226.49.0:10605",
# "165.225.222.233:10605",
# "167.71.225.180:3128"]
# try:
# for proxy in proxy_list:
# url = "http://ipinfo.io/json"
# response = requests.get(url, proxies={'http':proxy, 'https':proxy})
# counter += 1
# print(counter)
# print(response)
# except:
# print("failed")
def check_proxies():
global proxy_list, counter
if len(proxy_list) != 0:
for proxy in proxy_list:
try:
url = "http://ipinfo.io/json"
response = requests.get(
url,
proxies={'http':proxy,
'https':proxy})
if response.status_code == 200:
print({proxy})
except:
counter += 1
print("some problem occur in check proxies")
print(counter)
continue
else:
print("proxy list is empty")
# thread = threading.Thread(target=check_proxies).start()
# def newfun():
# print("Helllo")
# check_proxies()
# list_proxies()
get_proxies() | milan-sony/foxy | foxytest.py | foxytest.py | py | 3,006 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 34,
... |
31892002549 | import torch
from transfer import TransferNet
from utils import load_image
import torchvision.transforms as transforms
from torchvision.utils import save_image
# path
content_path = "content/chicago.jpg"
style_path = "styles/wave.jpg"
save_dir = ""
weight_path = "saved_weights/fst_wave.pth"
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# transform
transform = transforms.Compose([
transforms.ToTensor()
])
# image
content = load_image(content_path, new_size=(256, 256))
style = load_image(style_path, new_size=(256, 256))
s_data = transform(style).to(device)
c_data = transform(content).unsqueeze(0).to(device)
# model
model = TransferNet().to(device)
model.load_state_dict(torch.load(weight_path))
# pred
model.eval()
with torch.no_grad():
out = model(c_data)
save_image([s_data, c_data[0], out[0]], save_dir+f"pred_wave.png")
| callmewenhao/FastStyleTransfer | predict.py | predict.py | py | 869 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torchvision.tran... |
36795692494 | import sqlite3
import bcrypt
import time
import datetime
import math
# User(UserNumber, Name, Surname, Email_address, Password)
# UserNumber TEXT NOT NULL
# Password TEXT NOT NULL
# Email TEXT NOT NULL
# User_Type CHAR(1)
# Name TEXT NOT NULL
# PRIMARY KEY(UserNumber)
# Admin(UserNumber)
# UserNumber TEXT NOT NULL
# PRIMARY KEY (UserNumber)
# FOREIGN KEY(UserNumber) references USER ON DELETE CASCADE
# Passenger(UserNumber)
# FOREIGN KEY(UserNumber) references USER ON DELETE CASCADE
# Driver(UserNumber, availability)
# UserNumber TEXT NOT NULL
# Availability BOOL NOT NULL DEFAULT False
# PRIMARY KEY(UserNumber)
# FOREIGN KEY(UserNumber) references USER ON DELETE CASCADE
# Vehicle(LicensePlate, model, brand, capacity, type)
# LicensePlate TEXT NOT NULL
# Model TEXT NOT NULL
# Brand TEXT NOT NULL
# Capacity TEXT NOT NULL
# Type TEXT NOT NULL
# PRIMARY KEY(LicensePlate)
# Own(LicensePlate, UserNumber)
# LicensePlate TEXT NOT NULL UNIQUE
# UserNumber TEXT NOT NULL
# PRIMARY KEY(LicensePlate)
# FOREIGN KEY(UserNumber) references Driver
# FOREIGN KEY(LicensePlate) references Vehicle
# HasTrip(TripNumber,UserNumber)
# TripNumber NUMERIC NOT NULL
# UserNumber NUMERIC NOT NULL
# PRIMARY KEY (TripNumber)
# FOREIGN KEY (TripNumber) REFERENCES Trip
# FOREIGN KEY (UserNumber) REFERENCES Passenger
# Trip(TripNumber, DateTime, Status)
# TripNumber NUMERIC NOT NULL
# DateTime DATETIME NOT NULL
# Status TEXT NOT NULL
# PRIMARY KEY(TripNumber)
# HasPayment(TripNumber, TransactionNumber)
# TripNumber NUMERIC NOT NULL
# TransactionNumber NUMERIC NOT NULL
# PRIMARY KEY(TripNumber)
# UNIQUE (TransactionNumber)
# FOREIGN KEY (TripNumber) REFERENCES Trip
# FOREIGN KEY (Transaction Number) REFERENCES Payment
# Payment(TransactionNumber, Cost,PaymentMethod)
# TransactionNumber NUMERIC NOT NULL
# Cost NUMERIC NOT NULL
# PaymentMethod TEXT
# PRIMARY KEY(TransactionNumber)
# Reviews(Reviewld, Rates, Comments)
# Reviewld NUMERIC NOT NULL
# Rates NUMERIC
# Comments TEXT
# PRIMARY KEY(Reviewld)
# HasRev(Reviewld, TripNumber)
# Reviewld NUMERIC NOT NULL
# TripNumber NUMERIC NOT NULL
# PRIMARY KEY(Reviewld)
# FOREIGN KEY (Reviewld) REFERENCES Reviews
# FOREIGN KEY(TripNumber) REFERENCES Trip
# HasAddr(TripNumber, DesAdressNumber, StartAdressNumber )
# TripNumber NUMERIC NOT NULL
# DesAdressNumber NUMERIC NOT NULL
# StartAdressNumber NUMERIC NOT NULL
# PRIMARY KEY(TripNumber)
# FOREIGN KEY(TripNumber) REFERENCES Trip
# FOREIGN KEY(DesAdressNumber) REFERENCES DestAdress
# FOREIGN KEY(StartAdressNumber) REFERENCES StartAdress
# DestAdress(Y Coordinate, X Coordinate, Name, DesAdressNumber)
# YCoordinate NUMERIC NOT NULL
# XCoordinate NUMERIC NOT NULL
# Name TEXT NOT NULL
# DesAdressNumber NUMERIC NOT NULL
# PRIMARY KEY(DesAdressNumber)
# StartAdress(Y Coordinate, X Coordinate, Name, StartAdressNumber)
# YCoordinate FLOAT NOT NULL
# XCoordinate FLOAT NOT NULL
# Name TEXT NOT NULL
# StartAdressNumber NUMERIC NOT NULL
# PRIMARY KEY(StartAdressNumber)
class Database:
def __init__(self, db):
self.conn = sqlite3.connect(db)
self.cur = self.conn.cursor()
self.conn.commit()
self.__usernumber = None
self.__name = None
def fetch(self, sql, params):
self.cur.execute(sql, params)
rows = self.cur.fetchall()
return rows
def execute(self, sql, params):
self.cur.execute(sql, params)
self.conn.commit()
return self.cur.lastrowid
def get_username(self):
return self.__usernumber
def get_name(self):
return self.__name
def check_admin(self):
sql = "SELECT UserNumber FROM USER WHERE UserNumber = ?"
params = (self.__usernumber,)
rows = self.fetch(sql, params)
try:
if len(rows)==0 and rows[0][0] != 'A':
return True
else:
return False
except:
return False
def signup(self, password, email, name, surname, type):
#bcrypt hashed password
sql = "INSERT INTO USER(Password, Email, User_Type, Name) VALUES(?, ?, ?, ?)"
#bcrypt hashed password
password = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
params = (password, email, type, name + ' ' + surname)
self.execute(sql, params)
#get user number
sql = "SELECT UserNumber FROM USER WHERE Email = ?"
params = (email,)
rows = self.fetch(sql, params)
username = rows[0][0]
self.usernumber = username
print(username)
if type == 'D':
sql = "INSERT INTO Driver(UserNumber, Availability) VALUES(?,?)"
params = (username, False)
self.execute(sql, params)
elif type == 'A':
sql = "INSERT INTO Admin(UserNumber) VALUES(?)"
params = (username,)
self.execute(sql, params)
elif type == 'P':
sql = "INSERT INTO Passenger(UserNumber) VALUES(?)"
params = (username,)
self.execute(sql, params)
return rows[0][0]
def login(self, email, password):
#bcrypt hashed password
sql = "SELECT * FROM USER WHERE Email = ?"
params = (email,)
rows = self.fetch(sql, params)
if len(rows) == 0:
return False
else:
#hashed_password = bcrypt.hashpw(rows[0][1].encode('utf8'), bcrypt.gensalt())
#bcrypt hashed password
try:
if bcrypt.checkpw(password.encode('utf8'), rows[0][1] ):
self.__usernumber = rows[0][0]
print(self.__usernumber)
return self.get_account_type(rows[0][0])
else:
return False
except:
try:
if bcrypt.checkpw(password.encode('utf8'), rows[0][1].encode('utf8')):
self.__usernumber = rows[0][0]
self.__name = rows[0][4]
print(self.__usernumber)
return self.get_account_type(rows[0][0])
else:
return False
except:
return False
def get_user_type(self, username=None):
if username == None:
username = self.__usernumber
sql = "SELECT User_Type FROM USER WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
return rows[0][0]
def get_user_info(self, username=None):
if username == None:
username = self.__usernumber
sql = "SELECT * FROM USER WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
#remove password
rows[0] = rows[0][:1] + rows[0][2:]
return rows[0]
def get_user_info_by_email(self, email):
sql = "SELECT * FROM USER WHERE Email = ?"
params = (email,)
rows = self.fetch(sql, params)
#remove password
rows[0] = rows[0][:1] + rows[0][2:]
return rows[0]
def get_account_type(self, username=None):
if username == None:
username = self.__usernumber
#check if user is admin
sql = "SELECT * FROM Admin WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
if len(rows) > 0:
return 'A'
#check if user is driver
sql = "SELECT * FROM Driver WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
if len(rows) > 0:
return 'D'
#check if user is passenger
sql = "SELECT * FROM Passenger WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
if len(rows) > 0:
return 'P'
return 'U'
def get_driver_info(self, username=None):
if username == None:
username = self.__usernumber
sql = "SELECT * FROM USER NATURAL JOIN Driver WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
return rows[0]
def set_driver_avail(self,avail, username=None ):
if username == None:
username = self.__usernumber
sql = "UPDATE Driver SET Availability = ? WHERE UserNumber = ?"
params = (username, avail)
self.execute(sql, params)
return True
def get_driver_avail(self, car_type = "All"):
params = ()
if car_type=="All":
sql = "SELECT Own.UserNumber, Name, avg(Rates) FROM Driver NATURAL JOIN USER NATURAL JOIN Own INNER JOIN HasTrip on hasTrip.LicensePlate=Own.LicensePlate LEFT JOIN HasRev on HasRev.TripNumber=HasTrip.TripNumber LEFT JOIN Reviews on HasRev.Reviewld=Reviews.Reviewld WHERE Driver.Availability = 1 GROUP BY Own.UserNumber"
else:
sql = "SELECT Own.UserNumber, Name, avg(Rates) FROM Driver NATURAL JOIN USER NATURAL JOIN Own INNER JOIN HasTrip on hasTrip.LicensePlate=Own.LicensePlate NATURAL JOIN Vehicle LEFT JOIN HasRev on HasRev.TripNumber=HasTrip.TripNumber LEFT JOIN Reviews on HasRev.Reviewld=Reviews.Reviewld WHERE Driver.Availability = 1 and Vehicle.Type=? GROUP BY Own.UserNumber"
params = (car_type,)
rows = self.fetch(sql,params)
return rows
def get_user_number(self, email):
sql = "SELECT UserNumber FROM USER WHERE Email = ?"
params = (email,)
rows = self.fetch(sql, params)
return rows[0][0]
def get_cars(self, username=None, car_type = "All"):
if username == None:
username = self.__usernumber
if car_type=="All":
sql = "SELECT * FROM Own NATURAL JOIN Vehicle WHERE UserNumber = ?"
params = (username,)
else:
sql = "SELECT * FROM Own NATURAL JOIN Vehicle WHERE UserNumber = ? and Type = ?"
params = (username, car_type,)
rows = self.fetch(sql, params)
return rows
def get_car_info(self, license_plate):
sql = "SELECT * FROM Vehicle WHERE LicensePlate = ?"
params = (license_plate,)
rows = self.fetch(sql, params)
return rows[0]
def get_trips(self, license_plate):
sql = "SELECT * FROM HasTrip NATURAL JOIN Trip WHERE LicensePlate = ?"
params = (license_plate,)
rows = self.fetch(sql, params)
return rows
def get_user_trip(self, username=None):
if username == None:
username = self.__usernumber
sql = "SELECT * FROM HasTrip NATURAL JOIN Trip WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
#[(1, '1', '06 ARS 06', 1678639423, 'Delivered'), (5, '1', '16 AVH 3481', 1681663281, 'On the way'), (8, '1', '06 UY 1215', 1682584471, 'Delivered'), (21, '1', '34 ALP 64', 1684692791, 'Waiting for approval')]
#change the timestamp to a readable format
for i in range(len(rows)):
trip_number = rows[i][0]
try:
sql = "SELECT Cost FROM Payment NATURAL JOIN HasPayment WHERE TripNumber = ?"
params = (trip_number,)
cost_row = self.fetch(sql, params)
cost = cost_row[0][0]
if float(cost) == 0:
transaction = "Cost is not paid"
else:
transaction = cost
except:
None
rows[i] = rows[i][:3] + (datetime.datetime.fromtimestamp(rows[i][3]).strftime('%Y-%m-%d %H:%M:%S'),) + rows[i][4:] + (transaction,)
#format everything into a comma separated string
rows[i] = ', '.join(map(str, rows[i]))
return rows
def get_driver_trip(self, username=None):
if username == None:
username = self.__usernumber
#get plates of cars owned by driver
sql = "SELECT LicensePlate FROM Own WHERE UserNumber = ?"
params = (username,)
rows = self.fetch(sql, params)
#get trips of cars owned by driver
trips = []
for row in rows:
trips += self.get_trips(row[0])
#change the timestamp to a readable format
for i in range(len(trips)):
trips[i] = trips[i][:3] + (datetime.datetime.fromtimestamp(trips[i][3]).strftime('%Y-%m-%d %H:%M:%S'),) + trips[i][4:]
#format everything into a comma separated string
trips[i] = ', '.join(map(str, trips[i]))
return trips
def add_adress(self, x_cor, y_cor, name):
max_value = self.fetch("SELECT COALESCE(MAX(AddrNum), 0) + 1 FROM Addr",params=())[0][0]
sql = "INSERT INTO Addr (AddrNum, YCoordinate, XCoordinate, Name) VALUES (?, ?, ?, ?)"
params = (max_value ,y_cor, x_cor, name)
print(params)
self.execute(sql, params)
self.conn.commit()
return True
def update_driver_availability(self, username, availability):
sql = "UPDATE Driver SET Availability = ? WHERE UserNumber = ?"
params = (availability, username)
self.execute(sql, params)
self.conn.commit()
return True
def add_trip_review(self, trip_number, rank, comment):
max_value = self.fetch("SELECT COALESCE(MAX(Reviewld), 0) + 1 FROM Reviews",params=())[0][0]
sql = "INSERT INTO Reviews (Reviewld, Rates, Comments) VALUES (?, ?, ?)"
params = (max_value, rank, comment)
self.execute(sql, params)
self.conn.commit()
sql = "INSERT INTO HasRev (Reviewld, TripNumber) VALUES (?, ?)"
params = (max_value, trip_number)
self.execute(sql, params)
self.conn.commit()
return True
def delete_trip_review(self, review_id):
sql = "DELETE FROM Reviews WHERE Reviewld = ?"
params = (review_id,)
self.execute(sql, params)
self.conn.commit()
sql = "DELETE FROM HasRev WHERE Reviewld = ?"
params = (review_id,)
self.execute(sql, params)
self.conn.commit()
return True
def get_all_trips_and_ranks(self):
sql = "SELECT * FROM HasTrip NATURAL JOIN Trip LEFT JOIN HasRev ON HasTrip.TripNumber = HasRev.TripNumber LEFT JOIN Reviews ON HasRev.Reviewld = Reviews.Reviewld"
params = ()
rows = self.fetch(sql, params)
#[(1, '1', '06 ARS 06', 1678639423, 'Delivered'), (5, '1', '16 AVH 3481', 1681663281, 'On the way'), (8, '1', '06 UY 1215', 1682584471, 'Delivered'), (21, '1', '34 ALP 64', 1684692791, 'Waiting for approval')]
#change the timestamp to a readable format
for i in range(len(rows)):
trip_number = rows[i][0]
try:
sql = "SELECT Cost FROM Payment NATURAL JOIN HasPayment WHERE TripNumber = ?"
params = (trip_number,)
cost_row = self.fetch(sql, params)
cost = cost_row[0][0]
if float(cost) == 0:
transaction = "Cost is not paid"
else:
transaction = cost
except:
None
rows[i] = rows[i][:3] + (datetime.datetime.fromtimestamp(rows[i][3]).strftime('%Y-%m-%d %H:%M:%S'),) + rows[i][4:5] + (transaction,) + rows[i][7:]
#format everything into a comma separated string
rows[i] = ', '.join(map(str, rows[i]))
return rows
def create_trip(self, selected_driver, selected_car, selected_payment, selected_start_address, selected_destination_address):
date = int(time.time())
max_value = self.fetch("SELECT COALESCE(MAX(TripNumber), 0) + 1 FROM Trip",params=())[0][0]
sql = "INSERT INTO Trip (TripNumber, DateTime, Status) VALUES (?, ?, ?)"
params = (max_value, date, 'Waiting for approval')
self.execute(sql, params)
self.conn.commit()
sql = "INSERT INTO HasTrip (TripNumber, UserNumber, LicensePlate) VALUES (?, ?, ?)"
params = (max_value, self.__usernumber, selected_car)
self.execute(sql, params)
self.conn.commit()
sql = "INSERT INTO Payment (TransactionNumber, Cost, PaymentMethod) VALUES (?, ?, ?)"
max_tramsaction = self.fetch("SELECT COALESCE(MAX(TransactionNumber), 0) + 1 FROM Payment",params=())[0][0]
params = (max_tramsaction, 0, selected_payment[0])
print(params)
self.execute(sql, params)
self.conn.commit()
sql = "INSERT INTO HasPayment (TripNumber,TransactionNumber) VALUES (?, ?)"
params = (max_value, max_tramsaction)
self.execute(sql, params)
self.conn.commit()
sql = "INSERT INTO HasAddr (TripNumber, StartAddrNum, DestAddrNum) VALUES (?, ?, ?)"
params = (max_value, selected_start_address[0], selected_destination_address[0])
self.execute(sql, params)
self.conn.commit()
return True
def update_trip_status(self, trip_number, status):
sql = "UPDATE Trip SET Status = ? WHERE TripNumber = ?"
params = (status, trip_number)
self.execute(sql, params)
self.conn.commit()
if status == "Delivered":
sql = "SELECT StartAddrNum, DestAddrNum FROM HasAddr WHERE TripNumber = ?"
params = (int(trip_number),)
rows = self.fetch(sql, params)
print(rows)
start_addr, dest_addr = rows[0][0],rows[0][1]
sql = "SELECT YCoordinate, XCoordinate FROM Addr WHERE AddrNum = ?"
params = (start_addr,)
rows = self.fetch(sql, params)
YStart, XStart = rows[0][0], rows[0][1]
params = (dest_addr,)
rows = self.fetch(sql, params)
YDest, XDest = rows[0][0], rows[0][1]
cost = round(1000*math.sqrt((float(YDest)-float(YStart))**2 + (int(XDest)-int(XStart))**2),2)
print("cost",YDest,XDest,YStart,XStart,cost)
sql = "SELECT TransactionNumber FROM HasPayment WHERE TripNumber = ?"
params = (trip_number,)
transactionNumber = self.fetch(sql, params)
sql = "UPDATE Payment SET Cost = ? WHERE TransactionNumber = ?"
params = (cost, transactionNumber[0][0])
print(params)
self.execute(sql, params)
self.conn.commit()
elif status == "Cancelled":
sql = "SELECT TransactionNumber FROM HasPayment WHERE TripNumber = ?"
params = (trip_number,)
transactionNumber = self.fetch(sql, params)
sql = "DELETE FROM Payment WHERE TransactionNumber = ?"
params = (transactionNumber[0][0],)
self.execute(sql, params)
self.conn.commit()
sql = "DELETE FROM Payment WHERE TransactionNumber = ?"
self.execute(sql, params)
sql = "DELETE FROM HasPayment WHERE TransactionNumber = ?"
self.execute(sql, params)
self.conn.commit()
return True
def get_addresses(self):
sql = "SELECT AddrNum, Name FROM Addr"
params = ()
rows = self.fetch(sql, params)
return rows
def assign_password(self, password, email):
if self.check_admin() or True:
sql = "UPDATE USER SET Password = ? WHERE Email = ?"
password = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
params = (password, email)
self.execute(sql, params)
return True
else:
return False
if __name__ == '__main__':
db = Database('project.sqlite')
print(db.assign_password('123456', 'test@test.com')) | ardaa/CS281 | database.py | database.py | py | 19,973 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "bcrypt.hashpw",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "bcrypt.gensalt",
"line_number": 148,
"usage_type": "call"
},
{
"api_name": "bcrypt.checkpw",
... |
24870777060 | #!/usr/bin/env python
"""Script that inserts NIfTI/JSON files into the database"""
import os
import sys
import lib.exitcode
import lib.utilities
from lib.lorisgetopt import LorisGetOpt
from lib.dcm2bids_imaging_pipeline_lib.nifti_insertion_pipeline import NiftiInsertionPipeline
__license__ = "GPLv3"
sys.path.append('/home/user/python')
# to limit the traceback when raising exceptions.
# sys.tracebacklimit = 0
def main():
usage = (
"\n"
"********************************************************************\n"
" NIfTI/JSON FILE INSERTION SCRIPT\n"
"********************************************************************\n"
"The program determines NIfTI file protocol and insert it (along with its"
" JSON sidecar file) into the files table.\n\n"
# TODO more description on how the script works
"usage : run_nifti_insertion.py -p <profile> -n <nifti_path> -j <json_path> ...\n\n"
"options: \n"
"\t-p, --profile : Name of the python database config file in dicom-archive/.loris_mri\n"
"\t-n, --nifti_path : Absolute path to the NIfTI file to insert\n"
"\t-j, --json_path : Absolute path to the BIDS JSON sidecar file with scan parameters\n"
"\t-l, --bval_path : Absolute path to the NIfTI BVAL file for DWI acquisitions\n"
"\t-e, --bvec_path : Absolute path to the NIfTI BVEC file for DWI acquisitions\n"
"\t-t, --tarchive_path : Absolute path to the DICOM archive linked to the NIfTI file\n"
"\t-u, --upload_id : ID of the upload (from mri_upload) linked to the NIfTI file\n"
"\t-s, --loris_scan_type : LORIS scan type from the mri_scan_type table\n"
"\t-b, --bypass_extra_checks: If set, bypasses the extra protocol validation checks\n"
"\t-c, --create_pic : If set, creates the pic to be displayed in the imaging browser\n"
"\t-f, --force : If set, forces the insertion of the NIfTI file\n"
"\t-v, --verbose : If set, be verbose\n\n"
"required options are: \n"
"\t--profile\n"
"\t--nifti_path\n"
"\t--json_path OR --loris_scan_type\n"
"\t--tarchive_path OR --upload_id\n"
"\tif --force is set, please provide --loris_scan_type as well\n\n"
)
options_dict = {
"profile": {
"value": None, "required": True, "expect_arg": True, "short_opt": "p", "is_path": False
},
"nifti_path": {
"value": None, "required": True, "expect_arg": True, "short_opt": "n", "is_path": True
},
"json_path": {
"value": None, "required": False, "expect_arg": True, "short_opt": "j", "is_path": True
},
"bval_path": {
"value": None, "required": False, "expect_arg": True, "short_opt": "l", "is_path": True
},
"bvec_path": {
"value": None, "required": False, "expect_arg": True, "short_opt": "e", "is_path": True
},
"tarchive_path": {
"value": None, "required": False, "expect_arg": True, "short_opt": "t", "is_path": True
},
"upload_id": {
"value": None, "required": False, "expect_arg": True, "short_opt": "u", "is_path": False
},
"loris_scan_type": {
"value": None, "required": False, "expect_arg": True, "short_opt": "s", "is_path": False
},
"bypass_extra_checks": {
"value": False, "required": False, "expect_arg": False, "short_opt": "b", "is_path": False
},
"create_pic": {
"value": False, "required": False, "expect_arg": False, "short_opt": "c", "is_path": False
},
"force": {
"value": False, "required": False, "expect_arg": False, "short_opt": "f", "is_path": False
},
"verbose": {
"value": False, "required": False, "expect_arg": False, "short_opt": "v", "is_path": False
},
"help": {
"value": False, "required": False, "expect_arg": False, "short_opt": "h", "is_path": False
},
}
# get the options provided by the user
loris_getopt_obj = LorisGetOpt(usage, options_dict, os.path.basename(__file__[:-3]))
# input error checking and load config_file file
input_error_checking(loris_getopt_obj)
# nifti validation and insertion
NiftiInsertionPipeline(loris_getopt_obj, os.path.basename(__file__[:-3]))
def input_error_checking(loris_getopt_obj):
# check that only one of tarchive_path, upload_id or force has been provided
loris_getopt_obj.check_tarchive_path_upload_id_or_force_set()
# check that json_path or loris_scan_type has been provided (both can be provided)
json_path = loris_getopt_obj.options_dict["json_path"]["value"]
scan_type = loris_getopt_obj.options_dict["loris_scan_type"]["value"]
if not json_path and not scan_type:
print(
"[ERROR ] a json_path or a loris_scan_type need to be provided in order"
"to determine the image file protocol.\n"
)
sys.exit(lib.exitcode.MISSING_ARG)
if __name__ == "__main__":
main()
| aces/Loris-MRI | python/run_nifti_insertion.py | run_nifti_insertion.py | py | 5,231 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "lib.lorisgetopt.LorisGetOpt",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "os.path.ba... |
7214790078 | import sys
sys.stdin = open('불_input.txt')
from collections import deque
dx = [-1,1,0,0]
dy = [0,0,-1,1]
def bfs() :
while q2 :
x,y = q2.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < h and 0 <= ny < w and fire[nx][ny] == 0 :
if arr[nx][ny] == '.' :
q2.append((nx,ny))
fire[nx][ny] = fire[x][y] + 1
while q :
x,y = q.popleft()
for i in range(4) :
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < h and 0 <= ny < w and visit[nx][ny] == 0 :
if fire[nx][ny] == 0 or fire[nx][ny] > (visit[x][y] + 1) :
if arr[nx][ny] == '.' :
q.append((nx,ny))
visit[nx][ny] = visit[x][y] + 1
if (nx == h or nx == -1) or (ny == w or ny == -1) :
print(visit[x][y])
return
print('IMPOSSIBLE')
w, h = map(int,input().split())
arr = [list(map(str,input())) for _ in range(h)]
visit = [[0] * w for _ in range(h)]
fire = [[0] * w for _ in range(h)]
q = deque()
q2 = deque()
for i in range(h) :
for j in range(w) :
if arr[i][j] == 'F' :
q.append((i,j))
visit[i][j] = 1
elif arr[i][j] == 'J' :
q2.append((i,j))
fire[i][j] = 1
bfs()
| HyunSeok0328/Algo | 불.py | 불.py | py | 1,398 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.stdin",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 39,
"usage_type": "call"
}
] |
28238218682 | # -*- coding: utf-8 -*-
"""
This module contains any functions directly related to ROI operations and queries.
"""
import sys
import os.path
import math
import lib
import roi
import numpy as np
import logging
from HMR_RS_LoggerAdapter import HMR_RS_LoggerAdapter
base_logger = logging.getLogger("hmrlib." + os.path.basename(__file__)[:-3])
""" The basic logger object used for logging in hmrlib.
It was loaded in memory by python-sphinx, which is why you see the object's address!"""
logger = HMR_RS_LoggerAdapter(base_logger)
""" The basic logger object adapted with an HMR_RS_LoggerAdapter.
It was loaded in memory by python-sphinx, which is why you see the object's address!"""
# To allow importation of module by sphinx-python
try:
from connect import *
# import ScriptClient
except Exception as e:
if 'IronPython' in sys.version:
raise
else:
pass
def get_poi(poi_name, examination=None):
"""
Returns POI object from a POI name.
Args:
poi_name (str): the name of the POI
examination (str, optional): the name of the examination (CT or StructureSet) for which the poi is defined; if left out the currently selected examination is used.
Returns:
the RayStation POI object
"""
patient = lib.get_current_patient()
if examination is None:
examination = lib.get_current_examination()
try:
poi = [x for x in patient.PatientModel.StructureSets[examination.Name].PoiGeometries if x.OfPoi.Name == poi_name][0]
if abs(poi.Point.x) == sys.float_info.max or abs(poi.Point.x) == sys.float_info.min:
lib.error('POI %s is not segmented on current examination. Select correct CT and try again.' % poi_name)
return poi
except IndexError as e:
logger.error('No POI found named %s', poi_name)
logger.exception(e)
raise
except Exception as e:
logger.exception(e)
raise
def show_poi_coordinates(poi_name):
"""
In a popup window, shows DICOM coordinates of a specific POI defined for the current patient model in RayStation.
Args:
poi_name (str): the name of the POI for which to show the coordinates
"""
import gui
# poi = get_poi(poi_name)
coords = get_poi_coordinates(poi_name)
msg = 'Les coordonnées DICOM du point "%s" sont : [x = %.2f cm, y = %.2f cm, z = %.2f cm]' % (poi_name, coords.x, coords.y, coords.z)
gui.show_log_window(msg)
def show_all_poi_coordinates():
"""
In a popup window, shows DICOM coordinates of all POIs defined to the current patient model.
"""
import gui
patient = lib.get_current_patient()
poi_names = [r.Name for r in patient.PatientModel.PointsOfInterest]
msg = ''
for poi_name in poi_names:
coords = get_poi_coordinates(poi_name)
#msg += 'Les coordonnées DICOM du point "%s" sont : [x = %.2f cm, y = %.2f cm, z = %.2f cm]\n' % (poi_name, coords.x, coords.y, coords.z)
msg += '%s: [x = %.2f cm, y = %.2f cm, z = %.2f cm]\n' % (poi_name, coords.x, coords.y, coords.z)
#gui.show_log_window(msg)
return msg
def identify_isocenter_poi():
""" Attempts to identify isocenter POI.
The expected name of the isocenter POI is either *ISO* or *ISO SCAN*.
If either of the two is found, it is returned, with priority to *ISO*.
If not, the shortest string in the name candidates is returned.
Returns:
str: Name of most probable isocenter POI if found, `None` if not.
"""
patient = lib.get_current_patient()
poi_names = [r.Name for r in patient.PatientModel.PointsOfInterest]
iso_candidates = [x for x in poi_names if x.upper().startswith('ISO')]
# No candidates
if len(iso_candidates) == 0:
logger.warning('No isocenter POI could be found.')
return None
# Multiple candidates
if len(iso_candidates) > 1:
for i in sorted(iso_candidates, key=lambda x: len(x)):
if i.upper() == 'ISO':
logger.info('Isocenter POI identified as "%s"', i)
return i
if i.upper() == 'ISO SCAN':
logger.info('Isocenter POI identified as "%s"', i)
return i
# If all else fails, return shortest string.
guess = sorted(iso_candidates, key=lambda x: len(x))[0]
logger.warning('Best isocenter POI candidate is "%s"', guess)
return guess
# Case where there is only 1 candidates
logger.info('Isocenter POI identified as "%s"', iso_candidates[0])
return iso_candidates[0]
def set_poi_type(poi_name, poi_type='Marker'):
"""
Sets a POI to a given POI type.
Args:
poi_name (str): the name of the POI for which to set the type
poi_type (str, optional): the type of the POI. By default, will set to type *Marker*.
"""
if get_poi_approval(poi_name):
logger.warning('POI "%s" is approved and therefore cannot be changed.' % poi_name)
return
patient = lib.get_current_patient()
try:
patient.PatientModel.PointsOfInterest[poi_name].Type = poi_type
logger.info('Type of POI "%s" set as %s', poi_name, poi_type)
except Exception as e:
logger.exception(e)
raise
def auto_assign_poi_types():
"""
Attempts to automatically assign correct POI types.
Assumes localization point has string *SCAN* as part of its name.
If no POI with *SCAN* in its name ca be found, the isocenter will
be assumed to be the proper localization point.
Isocenter is set to the type *isocenter*, save for the case above.
All other POIs are set to type *marker*.
"""
patient = lib.get_current_patient()
poi_names = [p.Name for p in patient.PatientModel.PointsOfInterest]
iso_name = identify_isocenter_poi()
set_poi_type(iso_name, 'Isocenter')
loc_name = [p.Name for p in patient.PatientModel.PointsOfInterest if 'SCAN' in p.Name.upper()]
if len(loc_name) > 1:
logger.error('More than one POI found for possible localization points. Only one POI should exist with "SCAN" in its name.')
raise SystemError('More than one POI found for possible localization points. Only one POI should exist with "SCAN" in its name.')
elif len(loc_name) == 0:
logger.warning('No localization point could be found. Using the isocenter point for localization.')
loc_name = iso_name
set_poi_type(loc_name, 'LocalizationPoint')
else:
loc_name = loc_name[0]
set_poi_type(loc_name, 'LocalizationPoint')
done_pois = [iso_name, loc_name]
for p in poi_names:
if p not in done_pois:
set_poi_type(p, 'Marker')
def place_prescription_point(target_fraction_dose, ptv_name, poi_name, beamset=None, exam=None):
"""
Attempts to place automatically a POI on a target per-fraction isodose line.
.. rubric::
PRE-REQUISITES
- Existence of an identifiable PTV contour.
- Dose is calculated.
.. rubric::
Algorithm description
- A point on the PTV contour on a slice near the PTV center is found.
- The dose is evaluated at that point.
- An approximate unit vector towards the PTV center is calculated from that position.
+ If the evaluated dose is smaller than the prescription dose, the evaluation point is moved a short distance towards the PTV center.
+ If the evaluated dose is greater than the prescription dose, the evaluation point is moved a short distance away from the PTV center.
- If the prescription dose is overshot by this procedure, the direction of movement is reversed and the move distance halved.
- This process is repeated until evaluated dose equals the prescription dose to 3 decimal figures or 100 iterations are reached.
Finally,
- The specified POI is placed at the found coordinates.
.. seealso::
function `hmrlib.hmrlib.auto_place_prescription_point`
"""
patient = lib.get_current_patient()
if exam is None:
exam = lib.get_current_examination()
if beamset is None:
beamset = lib.get_current_beamset()
try:
# Get PTV center
ptv_center = lib.RSPoint(point=patient.PatientModel.StructureSets[exam.Name].RoiGeometries[ptv_name].GetCenterOfRoi())
# Get slice thickness from CT
slice_width = abs(exam.Series[0].ImageStack.SlicePositions[1] - exam.Series[0].ImageStack.SlicePositions[0])
logger.info('CT slice width = %s cm', slice_width)
initial_point = None
# Find contour point on a slice close to PTV center
for c in patient.PatientModel.StructureSets[exam.Name].RoiGeometries[ptv_name].PrimaryShape.Contours:
#if lib.check_version(4.7):
if c[0].z < ptv_center.z + slice_width and c[0].z > ptv_center.z - slice_width:
initial_point = lib.RSPoint(c[0].x, c[0].y, c[0].z)
break
#else:
# if c.ContourData[0].z < ptv_center.z + slice_width and c.ContourData[0].z > ptv_center.z - slice_width:
# initial_point = lib.RSPoint(c.ContourData[0].x, c.ContourData[0].y, c.ContourData[0].z)
# break
if initial_point is None:
logger.info('Could not find a point on the same slice as the ROi center. Disjoint/noncoplanar PTV?')
logger.info('Trying with first point in contour shape.')
c = patient.PatientModel.StructureSets[exam.Name].RoiGeometries[ptv_name].PrimaryShape.Contours[0]
#if lib.check_version(4.7):
initial_point = lib.RSPoint(c[0].x, c[0].y, c[0].z)
#else:
# initial_point = lib.RSPoint(c.ContourData[0].x, c.ContourData[0].y, c.ContourData[0].z)
logger.info('Initial point = %s cm', initial_point)
u = lib.RSPoint(point=lib.unit_vector(ptv_center - initial_point))
logger.info('Unit vector towards PTV center = %s cm', u)
# Change unit vector so that we stay on the same transverse slice
u.z = 0
logger.info('Approximate unit vector towards PTV center on single CT slice = %s cm', u)
def move_point(point, direction, target_fraction_dose, initial_dose, step=0.05, iteration=0):
point = point + step * direction
dose = beamset.FractionDose.InterpolateDoseInPoint(Point=point.value)
logger.info('Dose at %s = %.3f cGy', point, dose)
if round(dose, 3) == round(target_fraction_dose, 3) or iteration > 100:
# Found a suitable point or reached max iterations
return point
elif (initial_dose < target_fraction_dose and dose > target_fraction_dose) or (initial_dose > target_fraction_dose and dose < target_fraction_dose):
# We overshot the point, so inverse direction and reduce step
return move_point(point, -direction, target_fraction_dose, dose, step=0.5 * step, iteration=iteration + 1)
else:
# Keep going in the same direction with same step
return move_point(point, direction, target_fraction_dose, dose, step=step, iteration=iteration + 1)
dose = beamset.FractionDose.InterpolateDoseInPoint(Point=initial_point.value)
if math.isnan(dose):
lib.error('No dose value available. Check if dose is calculated.')
logger.info('Dose per fraction at initial point = %.3f cGy', dose)
# Assume dose rises when moving towards center of PTV
if dose < target_fraction_dose:
point = move_point(initial_point, u, target_fraction_dose, dose)
else:
point = move_point(initial_point, -u, target_fraction_dose, dose)
logger.info('Final point = %s cm', point)
except Exception as e:
logger.exception(e)
raise
if get_poi_approval(poi_name):
logger.warning('POI %s is approved; cannot continue.', poi_name)
lib.error('POI %s exists and is approved; cannot continue. Unapprove %s before running script.' % (poi_name, poi_name))
set_poi_coordinates(poi_name, point, examination=exam)
return point
def auto_place_prescription_point():
"""
Attempts to place automatically a POI on the prescription dose isoline near
the boundaries of the PTV contour.
.. rubric::
PRE-REQUISITES
1. Prescription dose defined for current beamset.
2. Existence of a dose specification point (requires manual creation).
3. Existence of an identifiable PTV contour.
4. Dose is calculated.
.. rubric::
Algorithm description
- A point on the PTV contour on a slice near the PTV center is found.
- The dose is evaluated at that point.
- An approximate unit vector towards the PTV center is calculated from that position.
+ If the evaluated dose is smaller than the prescription dose, the evaluation point is moved a short distance towards the PTV center.
+ If the evaluated dose is greater than the prescription dose, the evaluation point is moved a short distance away from the PTV center.
- If the prescription dose is overshot by this procedure, the direction of movement is reversed and the move distance halved.
- This process is repeated until evaluated dose equals the prescription dose to 3 decimal figures or 100 iterations are reached.
Finally,
- A POI named *PT PRESC* is placed at the final coordinates of the evaluation point. This POI is created if not already existing.
- The prescription is changed to prescribe the prescription dose at the final evaluation point, on the prescription dose isoline. It is thus automatically satisfied.
.. seealso::
function `hmrlib.hmrlib.place_prescription_point`
"""
# patient = lib.get_current_patient()
# exam = lib.get_current_examination()
beamset = lib.get_current_beamset()
# Create PT PRESC if not present
create_poi({'x': 0, 'y': 0, 'z': 0}, 'PT PRESC')
ptv_candidates = roi.identify_ptvs2()
if len(ptv_candidates) > 1:
logger.warning('More than one possible PTV found: %s. Trying highest PTV in the list %s.', ptv_candidates, sorted(ptv_candidates, key=lambda x: x[3:], reverse=True)[0])
elif len(ptv_candidates) == 0:
logger.error('No PTV could be found. Aborting.')
raise SystemError('No PTV could be found. Aborting.')
ptv = sorted(ptv_candidates, key=lambda x: x[3:], reverse=True)[0]
logger.info('PTV identified as %s', ptv)
try:
presc_dose = lib.get_prescription_dose()
logger.info('Presc dose = %.3f cGy', presc_dose)
fractions = lib.get_fractions()
logger.info('Fractions = %s', fractions)
target_dose = presc_dose / float(fractions)
logger.info('Target dose = %.3f cGy', target_dose)
except Exception as e:
logger.exception(e)
raise
point = place_prescription_point(target_dose, ptv, 'PT PRESC', beamset=beamset)
# Set prescription to PT PRESC
try:
beamset.AddDosePrescriptionToPoi(PoiName='PT PRESC', DoseValue=presc_dose)
except Exception as e:
logger.exception(e)
raise
# Update dose specification point
try:
dsp = [x for x in beamset.DoseSpecificationPoints][0]
dsp.Coordinates = point.value
except IndexError as e:
logger.error('You must create a dose specification point manually before executing this script.')
logger.exception(e)
raise
except Exception as e:
logger.exception(e)
raise
# Set beam dose specification points
# Doesn't really update point coordinates for some reason.
# Leaving to the user to do manually.
# for b in beamset.Beams:
# b.SetDoseSpecificationPoint(Name=dsp.Name)
def create_poi(point, name, color='Green', poi_type='Marker', examination=None):
"""
Creates a new POI.
Args:
point (dict or RSPoint): the (x, y, z) DICOM coordinates of the new point
name (str): the name of the new POI
color (str, optional): the color of the new POI (default: *Green*)
poi_type (str, optional): the type of the new POI (default: *Marker*)
Returns:
the RayStation POI object corresponding to the newly created POI
"""
# Must used dictionary representation of the point if an RSPoint
if isinstance(point, lib.RSPoint):
point = point.value
patient = lib.get_current_patient()
if examination is None:
examination = lib.get_current_examination()
try:
poi = patient.PatientModel.CreatePoi(Examination=examination, Point=point, Volume=0, Name=name, Color=color, Type=poi_type)
return poi
except SystemError as e:
if str(e) == 'Name not unique':
logger.warning('A POI named %s already exists.' % name)
except Exception as e:
logger.exception(e)
raise
def poi_exists(poi_name, examination=None):
"""
Checks if a POI exists.
Args:
poi_name (str): the name of the POI
examination (str, optional): the name of the examination (CT or StructureSet) for which the poi is defined; if left out the currently selected examination is used.
Returns:
True if it exists, False if not.
"""
patient = lib.get_current_patient()
if examination is None:
examination = lib.get_current_examination()
try:
poi = [x for x in patient.PatientModel.StructureSets[examination.Name].PoiGeometries if x.OfPoi.Name == poi_name][0]
except:
return False
if abs(poi.Point.x) == sys.float_info.max or abs(poi.Point.x) == sys.float_info.min:
# Not segmented
return False
return True
def get_poi_approval(poi_name, examination=None):
"""
Checks if POI named *poi_name* is approved (in any of the existing beamsets).
TODO: handle the case where no beamsets or no plans exists, implying that
the structures cannot be approved ?
Args:
poi_name (str): the name of the POI
examination (str, optional): the name of the examination (CT or StructureSet) for which the poi is defined; if left out the currently selected examination is used.
Returns:
True if the POI is approved, False if not.
"""
patient = lib.get_current_patient()
if examination is None:
examination = lib.get_current_examination()
# print 'before'
try:
for beamset_approved_structure_set in patient.PatientModel.StructureSets[examination.Name].ApprovedStructureSets:
for beamset_approved_pois in beamset_approved_structure_set.ApprovedPoiStructures:
if beamset_approved_pois.OfPoi.Name == poi_name:
# print 'after True'
logger.info('POI %s is approved.', poi_name)
return True
# print 'after False'
logger.info('POI %s is NOT approved.', poi_name)
return False
except Exception as e:
logger.exception(e)
raise
def get_poi_coordinates(poi_name, examination=None):
"""
Returns coordinates of POI poi_name in DICOM coordinate system.
Args:
poi_name (str): the name of the POI for which to get the coordinates
examination (str, optional): the name of the examination (CT or StructureSet) for which the poi is defined; if left out the currently selected examination is used.
Returns:
RSPoint: the POI's DICOM coordinates in a RSPoint object
"""
poi = get_poi(poi_name, examination)
logger.info('POI %s: (x, y, z) = (%s, %s, %s)', poi_name, poi.Point.x, poi.Point.y, poi.Point.z)
# poi.Point is a RayStation ExpandoObject
return lib.RSPoint(point=poi.Point)
# return dict(x=poi.Point.x, y=poi.Point.y, z=poi.Point.z)
def set_poi_coordinates(poi_name, point, examination=None):
"""
Sets POI DICOM coordinates for POI poi_name to those specified in point.
Args:
point (RSPoint or RayStation ExpandoObject): the new DICOM coordinates of the point
"""
if examination is None:
examination = lib.get_current_examination()
poi = get_poi(poi_name, examination=examination)
# Need to check if POI is approved. If it it, setting the coordinates
# will crash RayStation.
if get_poi_approval(poi_name):
logger.warning('POI "%s" is approved and therefore cannot be changed.' % poi_name)
return
try:
# ====================================================================
# For some reason this doesn't update point coords !!
# but also doesn't produce an error.
# poi.Point.x = point.x
# poi.Point.y = point.y
# poi.Point.z = point.z
# ====================================================================
# ... but this does update the coordinates, also silently.
poi.Point = {'x': point.x, 'y': point.y, 'z': point.z}
logger.info('Set coordinates of POI "%s" to %s.', poi_name, point)
except Exception as e:
logger.exception(e)
raise
def create_iso(exam=None):
"""
Checks to see if a point named ISO exists. If not, the script creates a copy of REF SCAN to serve as the isocenter.
Args:
exam : RayStation examination object
"""
if exam is None:
exam = lib.get_current_examination()
if not poi_exists("ISO", exam):
if poi_exists("REF SCAN", exam):
REF_SCAN_coords = get_poi_coordinates('REF SCAN', exam)
create_poi(REF_SCAN_coords, 'ISO', 'Blue', 'Isocenter', exam)
def erase_pois_not_in_list(poi_list=None):
patient = lib.get_current_patient()
if poi_list is None:
poi_list = ['ISO', 'REF SCAN']
for poi in patient.PatientModel.PointsOfInterest:
if not poi.Name.upper() in poi_list:
poi.DeleteRoi()
def get_max_dose_coordinates(dose,grid):
#Convert the 3D dose into a one-dimensional list and determine the index for the voxel that receives the highest dose
dose_list = list(dose.DoseValues.DoseData)
max_dose = 0
max_dose_index = 0
for i,d in enumerate(dose_list):
if d > max_dose:
max_dose = d
max_dose_index = i
#Convert that index into DICOM coordinates
nb_vox = [grid.NrVoxels.x,grid.NrVoxels.y,grid.NrVoxels.z]
res = [grid.VoxelSize.x,grid.VoxelSize.y,grid.VoxelSize.z]
corner = [grid.Corner.x,grid.Corner.y,grid.Corner.z]
#NOTE that unravel_index takes coordinates in slowest-changing to fastest-changing order, which in our case means z-y-x
coords = np.unravel_index(np.array(max_dose_index),(grid.NrVoxels.z,grid.NrVoxels.y,grid.NrVoxels.x))
x_coord = grid.Corner.x + grid.VoxelSize.x*coords[2]
y_coord = grid.Corner.y + grid.VoxelSize.y*coords[1]
z_coord = grid.Corner.z + grid.VoxelSize.z*coords[0]
#Package the coorindates as a RSPoint object
max_coords = lib.RSPoint(x_coord,y_coord,z_coord)
#Note that the dose returned is in cGy and the coordinates are in the DICOM system
return max_dose,max_coords
def check_eccentricity(poi_name):
#Script to check if a current point can be used as an isocenter without a collision occurring between the gantry and the patient.
#Simply creates a large cylinder centered on the point and sets the window/level to Lung so the planner can confirm visually that the clearance is adequate.
#Currently assumes a safe radius of 40cm for all types of linac, this number may change if further measurements are made.
patient = lib.get_current_patient()
exam = lib.get_current_examination()
lung_dict = dict(x=-600,y=1600)
exam.Series[0].LevelWindow = lung_dict
if roi.roi_exists("verif_excentricite",exam):
patient.PatientModel.RegionsOfInterest["verif_excentricite"].DeleteRoi()
center = get_poi_coordinates(poi_name,exam)
patient.PatientModel.CreateRoi(Name="verif_ex_temp1", Color="Green", Type="Organ", TissueName=None, RoiMaterial=None)
patient.PatientModel.RegionsOfInterest["verif_ex_temp1"].CreateCylinderGeometry(Radius=30, Axis={ 'x': 0, 'y': 0, 'z': 1 }, Length=50, Examination=exam, Center={ 'x': center.x, 'y': center.y, 'z': center.z })
patient.PatientModel.CreateRoi(Name="verif_ex_temp2", Color="Pink", Type="Organ", TissueName=None, RoiMaterial=None)
patient.PatientModel.RegionsOfInterest["verif_ex_temp2"].SetMarginExpression(SourceRoiName="verif_ex_temp1", MarginSettings={'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 5, 'Posterior': 5, 'Right': 5, 'Left': 5})
patient.PatientModel.RegionsOfInterest["verif_ex_temp2"].UpdateDerivedGeometry(Examination=exam)
patient.PatientModel.CreateRoi(Name="verif_excentricite", Color="Red", Type="Organ", TissueName=None, RoiMaterial=None)
patient.PatientModel.RegionsOfInterest["verif_excentricite"].SetMarginExpression(SourceRoiName="verif_ex_temp2", MarginSettings={'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 5, 'Posterior': 5, 'Right': 5, 'Left': 5})
patient.PatientModel.RegionsOfInterest["verif_excentricite"].UpdateDerivedGeometry(Examination=exam)
patient.PatientModel.RegionsOfInterest["verif_ex_temp1"].DeleteRoi()
patient.PatientModel.RegionsOfInterest["verif_ex_temp2"].DeleteRoi() | mcbanjomike/Scripts-RayStation-4.7.2 | hmrlib/poi.py | poi.py | py | 25,576 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.path.basename",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path",
... |
4709157122 | import easydict
from generator import KoGPT2IdeaGenerator
if __name__ == "__main__":
args = easydict.EasyDict({
'gpus' : 1,
'model_params' : 'model_chp/model_-last.ckpt'
})
evaluator = KoGPT2IdeaGenerator(args)
result = evaluator.generate("내구성")
print(result)
| madcamp-final/KoGPT2_generation | idea_generation/test.py | test.py | py | 304 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "easydict.EasyDict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "generator.KoGPT2IdeaGenerator",
"line_number": 10,
"usage_type": "call"
}
] |
23869569915 | # -*- coding: utf-8 -*-
import logging
from datetime import date
from os import path
import xlsxwriter
from envios import Envio
from helpers import add_util_days, mailer, to_money
from pagamentos import Pagamento
from plataformas import PlataformaABC
logger = logging.getLogger(__name__)
CWD = path.dirname(path.abspath(__file__))
TMP = path.join(CWD, "..", "tmp")
class PedidosPagos:
def __init__(self, plataforma: PlataformaABC, datas: list[date], email_to: list):
self.plataforma = plataforma
self.datas = datas
self.email_to = email_to
def run(self):
logger.info(f"buscando atualizacoes entre {self.datas[0]} e {self.datas[-1]}")
atualizacoes = self.plataforma.lista_atualizacoes_por_datas(self.datas)
logger.info("atualizacoes obtidas com sucesso")
pedidos_detalhados = []
pedidos_itens = []
for data in self.datas:
logger.info(f"executando para a data {data}")
atualizacoes_pedidos_pagos = atualizacoes.get(str(data), {}).get(
"pedido_pago", []
)
pedidos = self.get_pedidos(atualizacoes_pedidos_pagos, data)
pedidos_detalhados += pedidos["detalhado"]
pedidos_itens += pedidos["itens"]
if pedidos_detalhados:
detalhado_xlsx_path = self.to_excel(
pedidos_detalhados, path.join(TMP, "pedidosDetalhados.xlsx")
)
itens_xlsx_path = self.to_excel(
pedidos_itens, path.join(TMP, "pedidosItens.xlsx")
)
self.send_mail(detalhado_xlsx_path, itens_xlsx_path)
def get_pedidos(self, atualizacoes_pedidos_pagos, data: date):
logger.info(f"{len(atualizacoes_pedidos_pagos)} pedidos pagos")
pedidos_detalhados = []
pedidos_itens = []
total = 0
for atualizacao in sorted(
atualizacoes_pedidos_pagos, key=lambda at: at["numero"]
):
pedido = self.plataforma.get_pedido_info(atualizacao["numero"])
total += float(pedido["valor_total"])
pedido["data_leitura"] = data
pedido["detalhe_pagamento"] = Pagamento.consulta_detalhe_transacao(
pagamento=pedido["pagamentos"][0], data=data
)
logger.debug(
f"Pedido {pedido['numero']} => {to_money(pedido['valor_total'])}"
)
pedidos_detalhados.append(self.pedido_detalhado_mapper(pedido))
pedidos_itens.append(self.pedido_itens_mapper(pedido))
logger.info(f"Total {to_money(total)}")
return {"detalhado": pedidos_detalhados, "itens": pedidos_itens}
def pedido_itens_mapper(self, pedido):
cliente = pedido["cliente"]
itens = pedido["itens"]
disponibilidade = max(
map(lambda item: int(item["disponibilidade"]), pedido["itens"])
)
return {
"Data": pedido["data_leitura"].strftime("%d/%m/%Y"),
"Cliente": cliente["nome"],
"Pedido": pedido["numero"],
"Prazo de Envio": add_util_days(
pedido["data_leitura"], disponibilidade
).strftime("%d/%m/%Y"),
"Itens": [
{
"SKU": it["sku"],
"Itens": it["nome"],
"QTD": float(it["quantidade"]),
"Fornecedor": "",
"Custo Real": "",
"Custo Site": to_money(
float(it["quantidade"]) * float(it["preco_custo"] or "0")
),
"Preço Vendido": to_money(
float(it["quantidade"]) * float(it["preco_venda"])
),
}
for it in itens
],
}
def pedido_detalhado_mapper(self, pedido):
cliente = pedido["cliente"]
envio = pedido["envios"][0]
pagamento = pedido["pagamentos"][0]
detalhe_pagamento = pedido["detalhe_pagamento"]
disponibilidade = max(
map(lambda item: int(item["disponibilidade"]), pedido["itens"])
)
rastreio = envio["objeto"]
data_envio = ""
if rastreio:
track_json = Envio.track(rastreio)
data_envio = (
track_json.get("objeto", [{}])[0]
.get("evento", [{}])[0]
.get("dataPostagem", "")
)
cupom = ""
if pedido["cupom_desconto"] is not None:
cupom = pedido["cupom_desconto"].get("codigo", "")
cep = pedido["endereco_entrega"]["cep"]
if cep:
cep = f"{cep[:5]}-{cep[5:]}"
total_liquido = detalhe_pagamento.get("total_liquido", "")
return {
"Data": pedido["data_leitura"].strftime("%d/%m/%Y"),
"Cliente": cliente["nome"],
"Pedido": pedido["numero"],
"Liberação do Pagamento": detalhe_pagamento.get("liberacao_pagamento", ""),
"Pagamento": (
f"{pagamento['forma_pagamento']['codigo']} - "
f"{pagamento['forma_pagamento']['nome']}"
),
"Código": pagamento["transacao_id"],
"Parcelas": pagamento["parcelamento"].get("numero_parcelas", 0),
"Cupom": cupom,
"Data Envio": data_envio,
"Prazo de Envio": add_util_days(
pedido["data_leitura"], disponibilidade
).strftime("%d/%m/%Y"),
"Rastreio": rastreio,
"Frete Real": "",
"Frete": to_money(pedido["valor_envio"]),
"Prazo de Frete": int(envio.get("prazo") or disponibilidade)
- disponibilidade,
"Envio": f"{envio['forma_envio']['nome']} - {envio['forma_envio']['tipo']}",
"CEP": cep,
"Estado": pedido["endereco_entrega"]["estado"],
"Subtotal": to_money(pedido["valor_subtotal"]),
"Desconto": to_money(pedido["valor_desconto"]),
"Total": to_money(pedido["valor_total"]),
"Taxas": to_money(detalhe_pagamento.get("taxas", "")),
"Total Líquido": to_money(total_liquido),
"Situação": pedido["situacao"]["nome"],
}
def send_mail(self, detalhado_file_path: str, itens_file_path: str):
logger.info("Enviando email")
if not self.email_to:
logger.error("Email não enviado")
return
br_date_i = self.datas[0].strftime("%d/%m/%Y")
br_date_f = self.datas[-1].strftime("%d/%m/%Y")
subject = f"Dados do dia {br_date_i}"
body = f"Bom dia \n\nsegue em anexo os pedidos pagos do dia {br_date_i}"
files_to_send = [
{"name": "Site Detalhado", "file": detalhado_file_path},
{"name": "Pedidos Site", "file": itens_file_path},
]
if self.datas[0] != self.datas[-1]:
subject += f" ao dia {br_date_f}"
body += f" ao dia {br_date_f}"
mailer.send(
email_to=self.email_to,
subject=subject,
body=body,
files_to_send=files_to_send,
)
logger.info("Email enviado com sucesso!")
def to_excel(self, pedidos, file_path):
workbook = xlsxwriter.Workbook(file_path)
worksheet = workbook.add_worksheet()
row = 1
self._write_excel_header(workbook, worksheet, pedidos)
for pedido in pedidos:
itens_length = len(pedido.get("Itens", [0]))
self._write_excel_row(
workbook, worksheet, row, row + itens_length - 1, pedido
)
row += itens_length
workbook.close()
return file_path
def _write_excel_row(self, workbook, worksheet, first_row, last_row, row_data):
col = -1
body_format = workbook.add_format(
{
"font_name": "Arial",
"font_size": 9,
"align": "center",
"valign": "vcenter",
}
)
for _, value in row_data.items():
col += 1
if type(value) == list:
for i, list_item in enumerate(value):
sub_col = col - 1
for item in list_item.values():
sub_col += 1
worksheet.write(first_row + i, sub_col, item, body_format)
col = sub_col
else:
if first_row == last_row:
worksheet.write(first_row, col, value, body_format)
else:
worksheet.merge_range(
first_row, col, last_row, col, value, body_format
)
def _write_excel_header(self, workbook, worksheet, row_data):
header_format = workbook.add_format(
{
"bold": True,
"font_name": "Arial",
"font_size": 9,
"bg_color": "#b7e1cd",
"align": "center",
"valign": "vcenter",
}
)
row = 0
headers = []
for k, v in row_data[0].items():
headers += v[0].keys() if type(v) == list else [k]
for i, header in enumerate(headers):
worksheet.write(row, i, header, header_format)
| rennancockles/LojaIntegrada_Scripts | lojaintegrada_scripts/commands/pedidos_pagos.py | pedidos_pagos.py | py | 9,362 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "os.path.abspath",
"line... |
8967238816 | import warnings
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from urllib.parse import urlparse
import mlflow.sklearn
from mlflow.models.signature import infer_signature
import logging
import click
from shippedbrain import shippedbrain
logging.basicConfig(level=logging.WARN)
logger = logging.getLogger(__name__)
SHIPPED_BRAIN_EMAIL = "your_email@mail.com"
SHIPPED_BRAIN_PASSWORD = "your_shippedbrain_password"
MODEL_NAME = "ElasticWine"
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
@click.command()
@click.option("--publish", is_flag=True)
def main(publish):
warnings.filterwarnings("ignore")
np.random.seed(46)
print("MLflow Tracking URI:", mlflow.get_tracking_uri())
# Read the wine-quality csv file from the URL
csv_url = (
"http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
)
try:
data = pd.read_csv(csv_url, sep=";")
except Exception as e:
logger.exception(
"Unable to download training & test CSV, check your internet connection. Error: %s", e
)
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
# The predicted column is "quality" which is a scalar from [3, 9]
train_x = train.drop(["quality"], axis=1)
test_x = test.drop(["quality"], axis=1)
train_y = train[["quality"]]
test_y = test[["quality"]]
alpha = 0.5
l1_ratio = 0.5
with mlflow.start_run() as run:
print("[INFO] Starting run with id:", run.info.run_id)
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=46)
print("[INFO] Training...")
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
print("[INFO]Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print("[INFO]\tRMSE: %s" % rmse)
print("[INFO]\tMAE: %s" % mae)
print("[INFO]\tR2: %s" % r2)
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
# Infer model signature
signature = infer_signature(test_x, predicted_qualities)
# Model registry does not work with file store
if tracking_url_type_store != "file":
mlflow.sklearn.log_model(lr, "model", registered_model_name=MODEL_NAME, signature=signature,
input_example=test_x.iloc[0:2])
else:
mlflow.sklearn.log_model(lr, "model", signature=signature, input_example=test_x.iloc[0:2])
print(f"[INFO] Model run_id='{run.info.run_id}'")
if publish:
print("Publishing model to app.shippedbrain.com")
res = shippedbrain.upload_run(email=SHIPPED_BRAIN_EMAIL,
password=SHIPPED_BRAIN_PASSWORD,
run_id=run.info.run_id,
model_name=MODEL_NAME)
print(res.status_code)
print(res.text)
return run
if __name__ == "__main__":
main()
| shippedbrain/shipped-brain-api | examples/elastic_net/train_and_log.py | train_and_log.py | py | 3,589 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.WARN",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
... |
17957990942 | """
Module for parsing segmented HamNoSys transcriptions.
"""
from pysign.data import HAMNOSYS
import attr
from tabulate import tabulate
def ascify(text, sep='.'):
return '.'.join(
HAMNOSYS.get(char, {"Name": '<'+char+'>'})["Name"] for char in
text).replace('.asciispace.', ' ')
def parse_hamnosys(text,
h=True,
o=True,
c=True,
l=True,
m=True,
ascify_text=False,
):
# define character types
symmetry_base=""
handshape_base=""
handshape_diacritic=""
orientation_base=""
orientation_diacritic=""
location_base=""
location_diacritic=""
contact_base=""
brush=""
movement_base=""
movement_diacritic=""
repetition=""
hand_internal_mov=""
ambiguous_diacritic=""
ambiguous_location = ""
open_bracket=""
open_par=""
open_fuse=""
close_par=""
close_bracket=""
close_fuse=""
dominance_meta=""
# set up environments and variables
in_symmetry, symmetry = False, []
in_handshape, handshape, handshapes_meta = False, [], []
in_orientation, orientation, orientation_meta = False, [], []
in_location, location, location_meta = False, [], []
in_initial, initial_position = False, []
in_brush = False
in_contact, contact, contact_meta = False, [], []
in_movement, movement, movement_meta = False, [], []
in_fusion, in_simultaneous, in_grouped_movement = False, False, False
in_repetition, in_special_repetition, repeat = False, False, []
in_hand_internal = False
rest = ''
for i, char in enumerate(text):
# turn off all environments after space
if char == ' ':
in_symmetry = False
in_handshape = False
in_orientation = False
in_contact = False
in_brush = False
in_location = False
in_initial = False
in_movement = False
in_fusion = False
in_simultaneous = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
rest += char # not strictly necessary, but just to parse everything
# characters unique to symmetry
elif char in symmetry_base:
# turn on symmetry environment
in_symmetry = True
symmetry += [char]
# characters unique to handshape
elif char in handshape_base:
in_handshape = True
handshape += [char]
# turn off other environments
in_symmetry = False
in_orientation = False
in_contact = False
in_brush = False
in_location = False
in_initial = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
# leave in_fusion on
# leave in_simultaneous on
# leave in_movement on
elif char in handshape_diacritic:
handshape[-1] += char
# characters unique to orientation
elif char in orientation_base:
# two base characters in sequence
if in_orientation:
orientation[-1] += char
else:
in_orientation = True
orientation += [char]
# turn off other environments
in_symmetry = False
in_handshape = False
in_contact = False
in_brush = False
in_location = False
in_initial = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
# leave in_fusion on
# leave in_simultaneous on
# leave in_movement on
elif char == orientation_diacritic:
orientation[-1] += char
# join brush and contact symbols
elif char == brush:
in_brush = True
contact += [char]
# turn off other environments
in_symmetry = False
in_handshape = False
in_orientation = False
in_contact = False
in_location = False
in_initial = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
# leave in_fusion on
# leave in_simultaneous on
# leave in_movement on
# characters unique to location
elif char in location_base:
# two location base characters in sequence
if in_location:
location[-1] += char
# more detailed transcription for initial position
elif in_initial:
initial_position[-1] += char # follows another symbol
elif in_contact:
contact[-1] += char
else:
in_location = True
location += [char]
in_symmetry = False
in_handshape = False
in_orientation = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
# leave in_contact on
# leave in_brush on
# leave in_fusion on
# leave in_simultaneous on
# leave in_movement on
elif char in location_diacritic:
location[-1] += char
# characters unique to contact
elif char in contact_base:
# add contact to brush symbol if present
if in_brush:
in_contact = True
contact[-1] += char
in_brush = False
# more detailed transcription for initial position
elif text[i-1] == close_bracket:
in_initial = True
initial_position += [char]
elif in_initial: # end of contact, location, contact sequence
initial_position[-1] += char
in_initial = False
else:
in_contact = True
contact += [char]
in_symmetry = False
in_handshape = False
in_orientation = False
in_location = False
in_repetition = False
in_special_repetition = False
in_hand_internal = False
# leave in_fusion on
# leave in_simultaneous on
# leave in_movement on
# characters unique to movement
elif char in movement_base:
if in_movement:
if in_simultaneous:
movement[-1] += char
elif in_fusion:
movement[-1] += char
elif in_special_repetition:
movement[-1] += char
else:
movement += [char]
# for repeated movements
elif text[i-1] in repetition:
in_movement = True
movement[-1] += char
# beginning of simple movement
else:
in_movement = True
movement += [char]
in_symmetry = False
in_handshape = False
in_orientation = False
in_brush = False
in_contact = False
in_location = False
in_initial = False
in_hand_internal = False
elif char in movement_diacritic:
if in_repetition:
repeat[-1] += char
else:
movement[-1] += char
# keep fused movements together and parse at end
elif char == open_fuse:
movement += [char]
in_movement = True
in_fusion = True
in_symmetry = False
in_handshape = False
in_orientation = False
in_brush = False
in_contact = False
in_location = False
in_initial = False
in_repetition = False
in_special_repetition = False
in_simultaneous = False
in_hand_internal = False
elif char == close_fuse:
if in_fusion:
movement[-1] += char
in_fusion = False
in_handshape = False # these sometimes occur in movement segment
in_orientation = False # these sometimes occur in movement segment
else:
movement_meta += char # unparsed
in_fusion = False
elif char in repetition:
# special type of repetition relating two movements
if in_special_repetition:
movement[-1] += char
# multiple normal repetition
elif in_repetition:
repeat[-1] += char
# normal repetition
else:
in_repetition = True
repeat += [char]
# some diacritics can occur in most environments
elif char in ambiguous_diacritic:
if in_symmetry:
symmetry[-1] += char
elif in_handshape:
handshape[-1] += char
elif in_orientation:
orientation[-1] += char
elif in_contact:
contact[-1] += char
elif in_location:
location[-1] += char
elif in_movement:
if char in ambiguous_location: # can appear in movement segment
if in_hand_internal:
movement[-1] += char
elif text[i-1] == hand_internal_mov:
in_hand_internal = True
movement[-1] += char
else:
in_location = True
location += [char]
else:
movement[-1] += char
# must be location
else:
in_location = True
location += [char]
# check the next character after open bracket
elif char == open_bracket:
if text[i+1] in ambiguous_diacritic: # I think this must be location
location_meta += char
elif text[i+1] in handshape_base:
handshapes_meta += char
elif text[i+1] in orientation_base: # seems unlikely
orientation_meta += char
elif text[i+1] in contact_base: # seems unlikely
location_meta += char
elif text[i+1] in location_base:
location_meta += char
elif text[i+1] in movement_base:
# three options: (a) no dominance symbol, thus simultaneous
if dominance_meta not in text[i:]: # this may be a problem for compounds
in_simultaneous = True
in_movement = True
movement += [char]
# (b) simultaneous and dominance symbol
elif dominance_meta in text[i:]: # this may be a problem for compounds
if text[i-1] == open_bracket:
in_simultaneous = True
in_movement = True
movement += [char]
else:
movement_meta += char
# or (c) nondominant, no simultaneity
else:
movement_meta += char
elif text[i+1] == open_fuse:
movement_meta += char
elif text[i+1] == open_bracket: # two open brackets, the second is movement
movement_meta += char
# grouping symbol; I think this must be movement
elif text[i+1] == open_par:
movement_meta += char
else:
rest += char # unparsed
# check the next character after open paragraph
elif char == open_par:
if text[i+1] in ambiguous_diacritic: # I think this must be location
location_meta += char
elif text[i+1] == handshape_base: # rare
handshapes_meta += char
elif text[i+1] == orientation_base: # rare
orientation_meta += char
elif text[i+1] == brush:
contact_meta += char
elif text[i+1] in contact_base:
contact_meta += char
elif text[i+1] in location_base:
# for locations below the waist; turn on location environment
if text[i-1] in location_base:
in_location = True
location_meta += char
else:
location_meta += char
elif text[i+1] in movement_base:
in_grouped_movement = True # to do: for groups of movement symbols
movement_meta += char
elif text[i+1] in repetition:
in_movement = True
in_special_repetition = True
movement[-1] += char
else:
rest += char # unparsed characters
# marker for 2-handed sign
elif char == dominance_meta:
if in_handshape:
# handshape change in movement environment
if dominance_meta not in handshapes_meta:
handshapes_meta += char
else:
movement_meta += char
elif in_orientation:
# orientation change in movement environment
if dominance_meta not in orientation_meta:
orientation_meta += char
else:
movement_meta += char
elif in_contact:
# may occur in movement environment
if dominance_meta not in location_meta:
location_meta += char
else:
movement_meta += char
elif in_location:
# may occur in movement environment
if dominance_meta not in location_meta:
location_meta += char
else:
movement_meta += char
elif in_movement:
movement_meta += char
else:
rest += char
# turn everything off, so next symbol starts new segment
in_symmetry = False
in_handshape = False
in_orientation = False
in_contact = False
in_brush = False
in_location = False
in_initial = False
in_movement = False
in_simultaneous = False
in_fusion = False
in_repetition = False
in_special_repetition = False
# assign close brackets
elif char == close_bracket:
# close simlutaneous movement if open bracket in movement
if in_simultaneous:
movement[-1] += char
in_movement = True # leave on to parse dominance_meta in 2-handed signs, with simultaneous
in_handshape = False
in_orientation = False
in_contact = False
in_location = False
in_simultaneous = False
elif in_handshape:
handshapes_meta += char
in_handshape = False
elif in_orientation:
orientation_meta += char
in_orientation = False
elif in_location:
location_meta += char
in_location = False
elif in_contact:
location_meta += char
in_location = False
elif in_movement:
movement_meta += char
in_movement= False
else:
rest += char # unparsed
# assign close paragaph
elif char == close_par:
if in_grouped_movement: # to do
movement_meta += char
elif in_contact:
contact_meta += char
elif in_handshape:
handshapes_meta += char
elif in_orientation:
orientation_meta += char
elif in_location:
location_meta += char
elif in_movement:
if in_special_repetition:
movement[-1] += char
else:
movement_meta += char
else:
rest += char # unparsed
# handshapes
if h:
# determine dominant hand, no symmetry
if dominance_meta in handshapes_meta:
# first in list is dominant
dominant_hand = handshape.pop(0)
# second in list is nondominant
nondominant_hand = handshape.pop(0)
# all others
if len(handshape) > 0:
if len (handshape) > 1:
handshape_change = handshape
else:
handshape_change = ''.join(handshape)
else:
handshape_change = ''
# one hand, no symmetry
else:
# first in list is dominant
dominant_hand = handshape.pop(0)
# none
nondominant_hand = ''
# all others
if len(handshape) > 0:
if len(handshape) > 1:
handshape_change = handshape
else:
handshape_change = ''.join(handshape)
else:
handshape_change = ''
else:
dominant_hand = ''
nondominant_hand = ''
handshape_change = ''
# orientation
if o:
# determine dominant orientation
if dominance_meta in orientation_meta:
dominant_orientation = orientation.pop(0)
nondominant_orientation = orientation.pop(0)
if len(orientation) > 0:
if len(orientation) > 1:
orientation_change = orientation
else:
orientation_change = ''.join(orientation)
else:
orientation_change = ''
# only one hand
else:
dominant_orientation = orientation.pop(0)
nondominant_orientation = ''
if len(orientation) > 0:
if len(orientation) > 1:
orientation_change = orientation
else:
orientation_change = ''.join(orientation)
else:
orientation_change = ''
else:
dominant_orientation = ''
orientation_change = ''
nondominant_orientation = ''
# location
if l:
# check if normal or detailed transcription style
if initial_position != []:
dominant_location = location.pop(0)
nondominant_location = location.pop(0)
if len(location) > 0:
if len(location) > 1:
location_change = location
else:
location_change = ''.join(location)
else:
location_change = ''
# determine dominant location
elif dominance_meta in location_meta:
dominant_location = location.pop(0)
nondominant_location = location.pop(0)
if len(location) > 0:
if len(location) > 1:
location_change = location
else:
location_change = ''.join(location)
else:
location_change = ''
# only one hand
else:
dominant_location = location.pop(0)
nondominant_location = ''
if len(location) > 0:
if len(location) > 1:
location_change = location
else:
location_change = ''.join(location)
else:
location_change = ''
else:
dominant_location = ''
location_change = ''
nondominant_location = ''
initial_position = ''
# determine dominant contact: unsure if nondominant contact is possible transcription
# may not occur in some signs
if len(contact) > 0:
if dominance_meta in location_meta:
dominant_contact = contact.pop(0)
if len(contact) > 0:
if len(contact) > 1:
contact_change = contact
else:
contact_change = ''.join(contact)
else:
contact_change = ''
# only one hand
else:
dominant_contact = contact.pop(0)
if len(contact) > 0:
if len(contact) > 1:
contact_change = contact
else:
contact_change = ''.join(contact)
else:
contact_change = ''
else:
dominant_contact = ''
contact_change = ''
# movement
if m:
# parse contents of simultaneous, fused, and special-repetition movements
movement_updated = []
for item in movement:
if open_bracket in item and close_bracket in item:
simul_mov = []
stripped_movement = item.strip('')
for i, char in enumerate(stripped_movement):
if char in movement_base:
simul_mov += [char]
elif char in movement_diacritic:
simul_mov[-1] += char
elif char in ambiguous_location: # finger internal movement
simul_mov[-1] += char
simul_mov.append('simultaneous')
movement_updated.append(simul_mov)
elif open_fuse in item and close_fuse in item:
fused_mov = []
stripped_movement = item.strip('')
for i, char in enumerate(stripped_movement):
if char in movement_base:
fused_mov += [char]
elif char in movement_diacritic:
fused_mov[-1] += char
elif char in ambiguous_location: # finger internal movement
fused_mov[-1] += char
fused_mov.append('fused')
movement_updated.append(fused_mov)
elif open_par in item and close_par in item:
repeated_mov = []
repeating = ''
stripped_movement = item.replace('', '').replace('', '')
for i, char in enumerate(stripped_movement):
if char in movement_base:
repeated_mov += [char]
elif char in movement_diacritic:
repeated_mov[-1] += char
elif char in ambiguous_location: # finger internal movement
repeated_mov[-1] += char
elif char in repetition:
repeating += char
repeated_mov.append(repeating)
movement_updated.append(repeated_mov)
else:
movement_updated.append(item)
# determine dominant movement
if dominance_meta in movement_meta:
dominant_movement = movement_updated.pop(0)
nondominant_movement = movement_updated.pop(0)
if len(movement_updated) > 0:
if len(movement_updated) > 1:
movement_change = movement_updated
else:
movement_change = ''.join(movement_updated)
else:
movement_change = ''
# only one hand
else:
dominant_movement = movement_updated.pop(0)
nondominant_movement = ''
if len(movement_updated) > 0:
if type(movement_updated) == list:
movement_change = movement_updated
else:
movement_change = ''.join(movement_updated)
else:
movement_change = ''
else:
dominant_movement = ''
nondominant_movement = ''
movement_change = ''
# repetition symbols
if repeat != []:
if len(repeat) > 1:
repeat = repeat
else:
repeat = repeat.pop(0)
else:
repeat = ''
data = {
'symmetry': symmetry,
'initial position': initial_position,
'dominant': {
'shape': [dominant_hand, handshape_change],
'orientation': [dominant_orientation, orientation_change],
'location': [dominant_location, location_change],
'contact': [dominant_contact, contact_change],
'movement': [dominant_movement, movement_change],
'repetition': [repeat],
'is_dominant': True
},
'nondominant': {
'shape': [nondominant_hand, ''],
'orientation': [nondominant_orientation, ''],
'location': [nondominant_location, ''],
'contact': [],
'movement': [nondominant_movement, ''],
'is_dominant': False
},
# can be removed later, but keep now to check parsing
'meta': {
'handshape': handshapes_meta,
'orientation': orientation_meta,
'contact': contact_meta,
'location': location_meta,
'movement': movement_meta,
'rest': rest
}
}
return data
@attr.s
class Hand(object):
shape = attr.ib(default='')
orientation = attr.ib(default='')
location = attr.ib(default='')
movement = attr.ib(default='')
is_dominant = attr.ib(default='')
contact = attr.ib(default='')
repetition = attr.ib(default='')
def distance(self, other, weights=None, compare=None):
"""
Compare one hand with another when comparing a sign.
Notes
-----
`weights` is a dictionary with the characteristics one wants to compare
and a weight assigned to it. `compare` is a function that yields a
score between one and zero when comparing strings. The default is a
very simple function that simply yields 1 in case of difference, and 0
in case of identity.
"""
weights = weights or {
'shape': 5,
'orientation': 3,
'location': 2,
'movement': 1,
'contact': 2,
'repetition': 2
}
def identity(string1, string2):
if string1 == string2:
return 0
return 1
compare = compare or identity
# get all values, so we divide by them when weighting
weight_sum = sum(weights.values())
# we make an array with the scores
scores = []
for attribute, weight in sorted(weights.items()):
attr1, attr2 = getattr(self, attribute), getattr(other, attribute)
scores += [compare(attr1, attr2)*weight]
return sum(scores)/weight_sum
@attr.s
class Sign(object):
text = attr.ib(default='')
dominant = attr.ib(default='')
nondominant = attr.ib(default='')
meta = attr.ib(default={'handshape': '', 'orientation': '',
'location': '', 'movement': '', 'rest': ''})
@classmethod
def from_text(cls, text):
data = parse_hamnosys(text)
dominant = Hand(**data['dominant'])
nondominant = Hand(**data['nondominant'])
meta = data['meta']
return cls(
text=text,
dominant=dominant,
nondominant=nondominant,
meta=meta
)
def pprint(self, as_ascii=True):
if not as_ascii:
modify = lambda x: x
else:
modify = ascify
table = [['Category', 'Dominant', 'Change', 'Nondominant']]
for category in ['shape', 'orientation', 'location', 'movement']:
table += [[
category,
modify(getattr(self.dominant, category)[0]),
modify(getattr(self.dominant, category)[1]),
modify(getattr(self.nondominant, category)[0])
]]
print(self.text)
print(tabulate(table, headers='firstrow', tablefmt='pipe'))
# old data, to be deleted
# data = {
# 'symmetry': symmetry,
# 'handshape': {
# 'dominant': {
# 'shape': dominant_hand,
# 'change': handshape_change
# },
# 'nondominant': {
# 'shape': nondominant_hand,
# 'change': '' # is this never annotated?
# },
# },
# 'orientation': {
# 'dominant': {
# 'orientation': dominant_orientation,
# 'change': orientation_change
# },
# 'nondominant': {
# 'orientation': nondominant_orientation,
# 'change': '' # is this never annotated?
# },
# },
# 'contact': contact,
# 'location': {
# 'dominant': {
# 'location': dominant_location,
# 'change': location_change
# },
# 'nondominant': {
# 'location': nondominant_location
# },
# },
# 'movement': {
# 'dominant': {
# 'movement': dominant_movement,
# 'change': movement_change
# },
# 'nondominant': {
# 'movement': nondominant_movement
# },
# },
## 'errors': unparsable,
# 'handshape metasymbols': handshapes_meta,
# 'orientation metasymbols': orientation_meta,
# 'location metasymbols': location_meta,
# 'movement metasymbols': movement_meta,
# 'rest': rest
# }
# else:
# data = {
# 'symmetry': ascify(symmetry),
# 'handshape': {
# 'dominant': {
# 'shape': ascify(dominant_hand),
# 'change': ascify(handshape_change)
# },
# 'nondominant': {
# 'shape': ascify(nondominant_hand),
# 'change': '' # is this never annotated?
# },
# },
# 'orientation': {
# 'dominant': {
# 'orientation': ascify(dominant_orientation),
# 'change': ascify(orientation_change)
# },
# 'nondominant': {
# 'orientation': ascify(nondominant_orientation),
# 'change': '' # is this never annotated?
# },
# },
# 'contact': ascify(contact),
# 'location': {
# 'dominant': {
# 'location': ascify(dominant_location),
# 'change': ascify(location_change)
# },
# 'nondominant': {
# 'location': ascify(nondominant_location)
# },
# },
# 'movement': {
# 'dominant': {
# 'movement': ascify(dominant_movement),
# 'change': ascify(movement_change)
# },
# 'nondominant': {
# 'movement': ascify(nondominant_movement)
# },
# },
## 'errors': ascify(unparsable),
# 'handshape_metasymbols': ascify(handshapes_meta),
# 'orientation_metasymbols': ascify(orientation_meta),
# 'location_metasymbols': ascify(location_meta),
# 'movement_metasymbols': ascify(movement_meta),
# 'rest': ascify(rest)
# }
| lingpy/pysign | src/pysign/parse.py | parse.py | py | 34,106 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pysign.data.HAMNOSYS.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pysign.data.HAMNOSYS",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "attr.ib",
"line_number": 744,
"usage_type": "call"
},
{
"api_name": "attr.ib",
... |
27948279806 | import requests
from datetime import datetime
import os
GENDER = "male"
WEIGHT_KG = 70
HEIGHT_CM = 5.1
AGE = 20
APP_ID = "a824f715"
API_KEY = "e448d4180759e718cbd58d59be69abc2"
exercise_endpoint = "https://trackapi.nutritionix.com/v2/natural/exercise"
sheet_endpoint = "https://api.sheety.co/4bbf6252f12ebc8107aab6bbea8b8b11/myWorkouts/workouts"
exercise_text = input("Tell me which exercises you did: ")
headers = {
"x-app-id": APP_ID,
"x-app-key": API_KEY,
}
parameters = {
"query": exercise_text,
"gender": GENDER,
"weight_kg": WEIGHT_KG,
"height_cm": HEIGHT_CM,
"age": AGE
}
response = requests.post(exercise_endpoint, json=parameters, headers=headers)
result = response.json()
print(result)
today_date = datetime.now().strftime("%d/%m/%Y")
now_time = datetime.now().strftime("%X")
for exercise in result["exercises"]:
sheet_inputs = {
"workout": {
"date": today_date,
"time": now_time,
"exercise": exercise["name"].title(),
"duration": exercise["duration_min"],
"calories": exercise["nf_calories"]
}
}
#No Auth
sheet_response = requests.post(sheet_endpoint, json=sheet_inputs)
#Basic Auth
sheet_response = requests.post(
sheet_endpoint,
json=sheet_inputs,
auth=("fidelis", "Felix2003!")
)
#Bearer Token
bearer_headers = {
"Authorization": f"Bearer {'97656'}"
}
sheet_response = requests.post(
sheet_endpoint,
json=sheet_inputs,
headers=bearer_headers
)
print(sheet_response.text)
| Fidelis-7/100-days-of-coding-in-python | 100-Days/Day_38/main.py | main.py | py | 1,611 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "requests.post",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "datetime.dateti... |
5182513730 | import numpy
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th')
# Fix random seed for reproducibility.
seed = 7
numpy.random.seed(seed)
# Load dataset
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Training dataset is structured as a 3D array of instance, image wdith, and image height.
# For multi-layer perceptron model we must reduce the images down into a vector of pixels.
# Reshape to be [samples],[pixels],[width],[height].
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# Pixel values are gray scale between 0 and 255. Good idea to perform some scaling of input values when using NN models.
# Normalise inputs from 0-255 to 0-1.
X_train = X_train / 255
X_test = X_test / 255
# Finally, output variable is an integer from 0 to 9. This is a multi-class classification problem.
# It is good practise to use a one hot encoding of the class values, transforming the vector of class integers into a binary matrix.
# One hot encode outputs.
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# Define the larger model
def larger_model():
# Create the model
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu')) # Convolutional layer with 30 feature maps of size 5x5.
model.add(MaxPooling2D(pool_size=(2, 2))) # Pooling layer taking the max over 2*2 patches.
model.add(Conv2D(15, (3, 3), activation='relu')) # Convolutional layer with 15 feature maps of size 3x3.
model.add(MaxPooling2D(pool_size=(2, 2))) # Pooling layer taking the max over 2*2 patches.
model.add(Dropout(0.2)) # Dropout layer with a probability of 20%
model.add(Flatten()) # Flatten layer
model.add(Dense(128, activation='relu')) # Fully connected layer with 128 neurons and rectifier activation.
model.add(Dense(50, activation='relu')) # Fully connected layer with 50 neurons and rectifier activation.
model.add(Dense(num_classes, activation='softmax')) # Output layer with number of neurons = number of classes.
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Build the model
model = larger_model()
# Fit the model
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=200)
# Final evaluation of the model
scores = model.evaluate(X_test, y_test, verbose=0)
print("Large CNN Error: %.2f%%" % (100-scores[1]*100))
| olliesguy/Machine-Learning | Neural Networks/Convolutional NN/largerConvolutionalNNwithKeras.py | largerConvolutionalNNwithKeras.py | py | 2,867 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "keras.backend.set_image_dim_ordering",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "... |
1795182435 | import json
import time
import network
try:
with open('wlan.json') as f:
wlan_config = json.load(f)
hostname = wlan_config['HOSTNAME']
wlan_credentials = wlan_config['WLAN_CREDENTIALS']
assert len(wlan_credentials) > 0
except OSError:
raise Exception('wlan config does not exist, but is mandatory')
except ValueError:
raise Exception('wlan config is no valid json')
except KeyError:
raise Exception('wlan config must contain a HOSTNAME and non empty WLAN_CREDENTIALS')
except AssertionError:
raise Exception('WLAN_CREDENTIALS may not be empty')
wlan = network.WLAN(network.STA_IF)
wlan.active(True)
time.sleep_us(100) # bug in micropython 19.1, hostname may not be set directly after wlan activation
wlan.config(dhcp_hostname=hostname)
latest_connected_network = None
def connect():
global latest_connected_network
if not wlan.active():
wlan.active(True)
while not wlan.isconnected():
wlan_list = wlan.scan()
if not wlan_list:
# empty list on active wlan bug -> must restart wlan interface
wlan.active(False)
wlan.active(True)
continue
last_ssid = None
for ssid, _, _, _, _, _ in wlan_list:
ssid = ssid.decode()
if ssid in wlan_credentials:
last_ssid = ssid
wlan.connect(ssid, wlan_credentials[ssid])
start_time = time.ticks_ms()
while (not wlan.isconnected()) and (time.ticks_diff(time.ticks_ms(), start_time) < 5000):
time.sleep_ms(200)
if wlan.isconnected():
print('some buffer for {} bug'.format('print'))
print('connected to wlan: {} as {}'.format(ssid, hostname))
latest_connected_network = ssid
break
if not wlan.isconnected():
print('no connectable network found. retrying...')
time.sleep_ms(200)
# sometimes it does connect for some reason
if wlan.isconnected():
latest_connected_network = last_ssid
def reconnect():
attempts = 0
while not wlan.isconnected():
try:
print('reconnecting wifi...')
if wlan.isconnected():
return
if latest_connected_network is None:
raise Exception('reconnect is called before connect')
wlan.active(True)
ssid = latest_connected_network
wlan.connect(ssid, wlan_credentials[ssid])
start_time = time.ticks_ms()
while (not wlan.isconnected()) and (time.ticks_diff(time.ticks_ms(), start_time) < 5000):
pass
except OSError:
if attempts >= 2:
raise
wlan.active(False)
wlan.active(True)
attempts += 1
print('done')
def isconnected():
return wlan.isconnected()
def ifconfig():
return wlan.ifconfig()
| dtn7/dtn7zero | micropython-lib/wlan.py | wlan.py | py | 3,021 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "network.WLAN",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "network.STA_IF",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "time.sleep_us",
"line_n... |
13857013396 | from inspect import _empty
from discord import player
from discord.ext import commands
import random
import traceback
class Connectx(commands.Cog):
def __init__(self, config, bot: commands.Bot):
self.bot = bot
self.config = config
self.game_list = {}
self.predefined_emoji_list = ["🟥","🟧","🟨","🟩","🟦","🟪","🟫"]
self.blank_space = "0"
self.game_counter = 0
@commands.command()
async def connectx(self, ctx,game_id, position:int = -1,emoji:str = ""):
await ctx.send(self.do_game(ctx,game_id,position,emoji))
def new_game(self, ctx,game_id, position = -1,emoji = ""):
player = {
"name" : str,
"emoji" : str,
"turnID" : int
}
game = {
"ID" : int,
"grid_size" : int,
"players" : {},
"board" : [[]]
}
if position < 1:
return "Need a grid-size (second param)"
elif position > 20:
return f"Easy there cowboy, you really want a Connect {position} game? Well i won't let you."
else:
#get next ID and make game
self.game_counter = self.game_counter+1
game["ID"] = str(self.game_counter)
game["grid_size"] = position
self.game_list[game["ID"]] = game
#make first player
player["name"] = ctx.message.author.name
#player["name"] = ctx.message.author.name
player["turnID"] = 1 #first player, also used as ID
if emoji == "":
#make check for current players emojis
player["emoji"] = random.choice(self.predefined_emoji_list)
else:
player["emoji"] = emoji
#add player to game
game["players"][player["name"]] = player
game["board"] = [["0" for _ in range(position)] for _ in range(position)]
return f"Game made with ID: {game['ID']} size: {game['grid_size']}\n"+self.print_board(ctx,game["ID"])
# self.print_board(ctx,game)
def print_board(self,ctx,game_id):
t = "\n```\n"
current_game = self.game_list[game_id]
grid_size = current_game["grid_size"]
for y in range(grid_size):
t+=("----"*grid_size)
t+=("-\n")
for x in range(grid_size):
current_point = current_game["board"][x][y]
emoji = " "
for player in current_game["players"]:
if current_game["players"][player]["turnID"] == int(current_point):
emoji = current_game["players"][player]["emoji"]
t+=(f"| {emoji} ")
t+=("|\n")
t+=("----"*grid_size+"-")+'\n```'
return t
def play_game(self, ctx,game_id, position = -1,emoji = ""):
position = position-1 #damn 0-indices
current_game =self.game_list[game_id]
t = current_game["grid_size"]-1
for i,z in enumerate(current_game["board"][position]):
if z != self.blank_space:
t=i-1
break
if t<0:
#ctx.send("Column is full, try again.")
return("Column is full, try again.")
else:
self.game_list[game_id]["board"][position][t] = self.game_list[game_id]["players"][ctx.message.author.name]["turnID"]#self.get_user_emoji(ctx,game_id)
return self.print_board(ctx,game_id)
def do_game(self, ctx,game_id, position = -1,emoji = ""):
if game_id.isdigit():
current_game = self.game_list[game_id]
if game_id.lower() == "new":
return self.new_game(ctx,game_id,position,emoji)
elif game_id.lower() == "list":
response ="Game ID List:\n "+",".join([str(t) for t in self.game_list])
return response
elif game_id not in self.game_list:
return f"No such game as {game_id}, Mi'lord.\n Please make a new or pick from list: "+",".join([str(t) for t in self.game_list])
elif current_game["grid_size"] <= position or position < 1:
return f"No such column as {position}, game is {current_game['grid_size']} big."
else:
self.update_player(ctx,game_id,emoji)
return self.play_game(ctx,game_id,position,emoji)
def update_player(self,ctx,game_id,emoji):
if ctx.message.author.name in self.game_list[game_id]["players"]:
if emoji != "":
self.game_list[game_id]["players"][ctx.message.author.name]["emoji"] = emoji
else:
self.game_list[game_id]["players"][ctx.message.author.name] = {
"name" : ctx.message.author.name,
"emoji" : emoji,
"turnID" : len(self.game_list[game_id]["players"])+1
}
def get_user_emoji(self,ctx,game_id):
return self.game_list[game_id]["players"][ctx.message.author.name]["emoji"]
#return self.game_list[game_id]["players"][ctx.message.author.name]["emoji"]
def test_game(self, ctx,game_id, position = -1,emoji = ""):
self.do_game(ctx,game_id,position,emoji)
self.do_game(bot,"1",4,"�")
self.do_game(bot,"1",2,"�")
self.do_game(bot,"1",3,"�")
self.do_game(bot,"1",4,"�")
self.do_game(bot,"1",4,"�")
self.do_game(bot,"1",4,"�")
self.do_game(bot,"1",4,"�")
print(self.do_game(bot,"1",4,"�"))
if __name__ == "__main__":
from configparser import ConfigParser
config = ConfigParser()
config.read("config.ini")
#config.read("template.ini")
bot = commands.Bot(command_prefix="!")
m = Connectx(bot=bot, config=config["CONNECTX"])
#m.test_game(bot,"new",6,"P")
| JeppeLovstad/Discord-Meme-Delivery-Bot | BotModules/connectx.py | connectx.py | py | 6,037 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 8,
"usage_type": "attribute"
},
{
"api... |
43641338998 | #########################
# data_processing.py #
#########################
# Implements DataGenerator and batch sampling
# functionality for MAML. Originally based on
# functionality in CS330 Homework 2, Fall 2020.
# Written by Will Geoghegan for CS330
# final project, Fall 2020. Based on work by
# CS330 course staff.
import numpy as np
import pandas as pd
import tensorflow as tf
import pyarrow.feather as feather
import os
import random
from functools import partial
# A DataGenerator samples batches of tasks from our task distribution,
# with each task containing K samples. We experimented with N-way
# classification based on the size and direction of the price change
# before settling on regression, but N-way specification remains for
# easy extension back to classification
class DataGenerator(object):
def __init__(self, N, K, test_N, test_K, demo):
self.N = N
self.K = K
self.test_N = test_N
self.test_K = test_K
self.c_length = 6 # time dimension
self.c_dim = 145
self.tolerance = 0.075
self.dim_output = self.N
data_path = '../data/'
assert(os.path.isdir(data_path))
print('Loading data...', end='', flush=True)
#self.daily = feather.read_feather(data_path + 'daily.dat')
#self.quarterly = feather.read_feather(data_path + 'quarterly.dat')
if demo == False:
self.combined = feather.read_feather(data_path + 'combined.dat')
self.labels = feather.read_feather(data_path + 'labels.dat')
else:
self.combined = feather.read_feather(data_path + 'combined_demo.dat')
self.labels = feather.read_feather(data_path + 'labels_demo.dat')
print('done.')
tickers = list(self.combined['ticker'].unique())
print('Removing tickers with insufficient data for K=' + str(self.K) + '...', end='', flush=True)
random.seed = 0
np.random.seed(0)
np.random.shuffle(tickers)
copy = tickers.copy()
for ticker in copy:
if len(self.combined[self.combined['ticker']==ticker]) < (self.c_length + self.K * self.N):
tickers.remove(ticker)
self.labels['0'] = self.labels['0'].fillna(value=0)
print('done.')
# Validation and test sets are drawn from large companies that are highly liquid
# and desirable to trade in
table = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')
df = table[0]
symbols = df['Symbol']
sp = list(symbols[symbols.isin(tickers)])
self.num_test = int(len(sp) / 2)
self.num_val = len(sp) - self.num_test
random.seed=0
np.random.seed(0)
np.random.shuffle(sp)
self.metatrain_tickers = [t for t in tickers if t not in sp]
self.metatest_tickers = sp[:self.num_test]
self.metaval_tickers = sp[self.num_test:]
# This function was written for the classification setting.
def get_subset(self, ticker, which):
t = self.combined[self.combined['ticker'] == ticker].iloc[self.c_length:]
l = self.labels[self.combined['ticker'] == ticker].iloc[self.c_length:]
if which == 1:
return t[l['0'] > self.tolerance]
elif which == -1:
return t[l['0'] < -self.tolerance]
else:
return t[(l['0'] <= self.tolerance) & (l['0'] >= -self.tolerance)]
# This function samples a batch of the specified size from the specified task
# partition. Also normalizes the batch by input dimension. Based on
# the equivalent function in CS330 Homework 2.
def sample_batch(self, batch_type, batch_size, shuffle=True, swap=False):
# Select the correct partition.
if batch_type == 'train':
tickers = self.metatrain_tickers
num_classes = self.N
num_samples_per_class = self.K
elif batch_type == 'val':
tickers = self.metaval_tickers
num_classes = self.N
num_samples_per_class = self.K
else:
tickers = self.metatest_tickers
num_classes = self.test_N
num_samples_per_class = self.test_K
sampled_tickers = random.sample(tickers, batch_size)
data_partial = partial(self.get_datapoints, n_classes = num_classes, n_samples = num_samples_per_class)
data = np.zeros((batch_size, num_classes, num_samples_per_class, self.c_length, self.c_dim))
labels = np.zeros((batch_size, num_classes, num_samples_per_class))
for i in range(batch_size):
(data[i, :, :, :, :], labels[i, :, :]) = data_partial(sampled_tickers[i])
labels = labels.reshape((batch_size, num_classes, num_samples_per_class, 1))
# Normalize
data = np.apply_along_axis(lambda x: x - np.mean(x), -1, data)
data = np.apply_along_axis(lambda x: x / np.std(x), -1, data)
return (data, labels)
# This function selects the appropriate datapoints for a single task in a batch.
def get_datapoints(self, ticker, n_classes, n_samples, shuffle=True, test=False):
t = self.combined[self.combined['ticker'] == ticker]
l = self.labels[self.combined['ticker'] == ticker]
curr = t.iloc[self.c_length:]
curr_labels = l.iloc[self.c_length:]
data = np.zeros((n_classes, n_samples, self.c_length, self.c_dim))
labels = np.zeros((n_classes, n_samples))
for i in range(n_classes):
if (test):
n_samples = len(curr)
idxs = random.sample(range(len(curr)), n_samples)
idxs.sort()
for j in range(n_samples):
k = idxs[j]
index = int(curr.iloc[k].name)
start = index - self.c_length + 1
point = (t.loc[start:index]).drop(['ticker', 'date', 'calendardate', 'datekey', 'reportperiod'], axis=1)
data[i, j, :, :] = np.array(point.fillna(value=0))
labels[i, j] = float(l.loc[index]['0'])
return (data, labels)
| wdg3/regularized-meta-learning | src/data_processing.py | data_processing.py | py | 5,371 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.isdir",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "pyarrow.feather.read_feather",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "pyarrow.feath... |
41331723652 | # coding=utf-8
import random
import re
from django.db import models, transaction
from django.db.models import Count, Q
from django.contrib.auth.models import User
from django.conf import settings
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
import django.utils.http
# Enumeration type
class Guess(int):
def __init__(self, value):
if value is None:
self=None
else:
value=int(value)
assert 0 <= value < 3, "value %s out of range" % value
self=value
# Breaks the admin form
#def __unicode__(self):
# return {
# 0: 'Richtig',
# 1: 'Mensch',
# 2: 'Computer',
# }[int(self)]
# Constants
CORRECT = Guess(0)
HUMAN = Guess(1)
COMPUTER = Guess(2)
# Custom fields
class GuessField(models.PositiveSmallIntegerField):
description = "Choice: Correct, Human or Bot"
__metaclass__ = models.SubfieldBase
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(CORRECT, 'Richtig')
, (HUMAN, 'Mensch')
, (COMPUTER, 'Computer')
]
kwargs['null'] = True
super(GuessField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value is None:
return None
if isinstance(value, Guess):
return value
return Guess(value)
def formfield(self, **kwargs):
defaults = {'coerce': lambda x: Guess(x)}
defaults.update(kwargs)
return super(GuessField, self).formfield(**defaults)
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^nbip\.models\.GuessField"])
# Exceptions
class NotEnoughWordsException(Exception):
pass
class NotEnoughExplanationsException(Exception):
def missing_human_explanations(self):
return self.args[0]
def missing_bot_explanations(self):
return self.args[1]
# Models
class Bot(models.Model):
class Meta:
verbose_name = "Bot"
verbose_name_plural = u"Bots"
owner = models.ForeignKey(User, verbose_name="Benutzer", related_name = "bots")
name = models.CharField(max_length=200, verbose_name="Bot-Name")
apikey = models.CharField(max_length=100, unique=True, verbose_name="API-Key")
explaining = models.ForeignKey('Word', verbose_name="Erklärt gerade", null=True)
def __unicode__(self):
return "%s by %s" % (self.name, self.owner)
def word_to_explain(self):
if not self.explaining:
word = Word.random_for_bot(bot = self)
self.explaining = word
self.save()
return self.explaining
@transaction.atomic
def explain_word(self, expl):
expl = Explanation(
word = self.explaining,
explanation = expl,
bot = self);
expl.save()
self.explaining = None
self.save()
class Word(models.Model):
class Meta:
verbose_name = "Wort"
verbose_name_plural = u"Wörter"
lemma = models.CharField(max_length=200)
correct_explanation = models.CharField(max_length=1000,
verbose_name = "Korrekte Erklärung",
help_text= u"<i>Wort</i> ist ein/eine <i>Erklärung</i>")
reference = models.URLField(blank=True,
verbose_name = "Referenz",
help_text = u"URL zu Wikipedia o.ä.")
created = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, verbose_name="Autor", related_name="submitted_words")
n_human_explanations = models.PositiveIntegerField(
verbose_name = "Anzahl menschliche Erklärungen",
default = 0)
n_bot_explanations = models.PositiveIntegerField(
verbose_name = "Anzahl Computer-Erklärungen",
default = 0)
# query components (Q objects)
@classmethod
def q_owner(cls, player):
return Q(author__exact = player.id)
@classmethod
def q_explained(cls, player):
explained = Explanation.objects.filter(author__exact = player).values("word")
return Q(id__in = explained)
@classmethod
def q_bot_explained(cls, bot):
explained = Explanation.objects.filter(bot__exact = bot).values("word")
return Q(id__in = explained)
@classmethod
def q_guessed(cls, player):
guessed = GameRound.objects.filter(player__exact = player).values("word")
return Q(id__in = guessed)
@classmethod
def q_unseen(cls, player):
return Q(~cls.q_owner(player) & ~cls.q_explained(player) & ~cls.q_guessed(player))
@classmethod
def q_answer_unseen(cls, player):
return Q(~cls.q_owner(player) & ~cls.q_guessed(player))
@classmethod
def q_possibly_complete(cls):
return Q(n_human_explanations__gte = settings.HUMAN_EXPLANATIONS) & \
Q(n_bot_explanations__gte = settings.BOT_EXPLANATIONS)
@classmethod
def random(cls, player):
''' Fetch a random to be explained '''
words = cls.objects \
.filter(cls.q_unseen(player))
if len(words) < 1:
raise NotEnoughWordsException()
needy_words = words \
.filter(n_human_explanations__lte = settings.HUMAN_EXPLANATIONS) \
.order_by('-n_human_explanations','-n_bot_explanations')
# If there are words with insufficient answers, return the one that is closest
# to having sufficient
if needy_words:
return needy_words.first()
# Otherwise return a random word
return random.choice(words)
@classmethod
def random_for_bot(cls, bot):
''' Fetch a random to be explained by a bot '''
words = cls.objects \
.exclude(cls.q_bot_explained(bot))
if len(words) < 1:
raise NotEnoughWordsException()
needy_words = words \
.filter(n_bot_explanations__lte = settings.BOT_EXPLANATIONS) \
.order_by('-n_bot_explanations','-n_human_explanations')
# If there are words with insufficient answers, return the one that is closest
# to having sufficient
if needy_words:
return needy_words.first()
# Otherwise return a random word
return random.choice(words)
def usable_for(self, player):
usable_human_explanations = \
self.explanation_set \
.filter(author__isnull = False) \
.exclude(author = player) \
.count()
usable_bot_explanations = \
self.explanation_set \
.filter(bot__isnull = False) \
.exclude(bot__in = player.bots.all()) \
.count()
return (usable_human_explanations >= settings.HUMAN_EXPLANATIONS and
usable_bot_explanations >= settings.BOT_EXPLANATIONS)
@classmethod
# similar to random, but only consider words with enough explanations
def random_explained(cls, player):
candidates = cls.objects \
.filter(cls.q_answer_unseen(player))
# This could possibly be rewritten with SQL, but seems to be tricky
words = filter(lambda w: w.usable_for(player),
candidates.filter(cls.q_possibly_complete()))
# fetches everything; be smarter if required
if not words:
best = candidates \
.order_by('-n_human_explanations', '-n_bot_explanations') \
.first()
if best:
# This is only an approximation, if the current user has explained that
# word already.
raise NotEnoughExplanationsException(
settings.HUMAN_EXPLANATIONS - best.n_human_explanations,
settings.BOT_EXPLANATIONS - best.n_bot_explanations
)
else:
raise NotEnoughWordsException()
return random.choice(words)
def update_cached_fields(self):
self.n_human_explanations = \
Explanation.objects.filter(author__isnull = False, word__exact = self.id).count()
self.n_bot_explanations = \
Explanation.objects.filter(bot__isnull = False, word__exact = self.id).count()
self.save()
def clean_an_explanation(self, e):
r = r"(eine?)? ?(" + re.escape(self.lemma) + \
r")? ?(ist)? ?(eine?) ?"
m = re.match(r, e, re.IGNORECASE)
if m:
return e[m.end(0):]
else:
return e
def clean_explanation(self):
return self.clean_an_explanation(self.correct_explanation)
def link(self):
if self.reference:
return self.reference
else:
return "https://duckduckgo.com/?q=%s" % django.utils.http.urlquote(self.lemma)
def __unicode__(self):
return self.lemma
class Explanation(models.Model):
class Meta:
verbose_name = u"Erklärung"
verbose_name_plural = u"Erklärungen"
word = models.ForeignKey(Word, verbose_name="Wort")
explanation = models.CharField(max_length=1000,
verbose_name= u"Erklärung",
help_text= u"<i>Wort</i> ist ein/eine <i>Erklärung</i>")
# exactly one of these should be not null
author = models.ForeignKey(User, verbose_name="Autor", blank=True, null=True, related_name="submitted_explanations")
bot = models.ForeignKey(Bot, verbose_name="Bot", blank=True, null=True, related_name="submitted_explanations")
def clean(self):
if self.author is None and self.bot is None:
raise ValidationError('Autor oder Bot müssen gesetzt sein.')
if self.author is not None and self.bot is not None:
raise ValidationError('Autor und Bot dürfen nicht beide gesetzt sein.')
def clean_explanation(self):
return self.word.clean_an_explanation(self.explanation)
def type(self):
'''HUMAN or COMPUTER'''
if self.author is not None:
return HUMAN
else:
return COMPUTER
def author_name(self):
if self.author is not None:
return self.author
else:
return u"„%s“ by %s" % (self.bot.name, self.bot.owner)
def __unicode__(self):
return "%s ist ein/eine %s" % (self.word.lemma, self.explanation)
# Keep Word.n_*_explanations up-to-date
@receiver(post_save, sender=Explanation)
@receiver(post_delete, sender=Explanation)
def update_word(sender, instance, **kwargs):
instance.word.update_cached_fields()
class GameRound(models.Model):
class Meta:
verbose_name = u"Spielrunde"
verbose_name_plural = u"Spielrunde"
word = models.ForeignKey(Word, verbose_name="Wort")
explanations = models.ManyToManyField(Explanation, related_name='explanation+', through='GameRoundEntry')
pos = models.PositiveSmallIntegerField()
# What the user guessed for the correct result
guess = GuessField()
player = models.ForeignKey(User, verbose_name="Spieler", related_name="gamerounds")
def __unicode__(self):
return "%s (%d)" % (self.word.lemma, self.id)
@classmethod
@transaction.atomic
def start_new_round(cls, player):
# pick a valid word where the user has not seen the answer before
word = Word.random_explained(player=player)
# fetch all possible explanations
# (excludes those by the current player, or his bots)
human_expls = word.explanation_set \
.filter(author__isnull = False) \
.exclude(author = player)
bot_expls = word.explanation_set.filter(bot__isnull = False) \
.exclude(bot__in = player.bots.all())
assert len(human_expls) >= settings.HUMAN_EXPLANATIONS, \
"n_human_explanations was not up to date? Bug in Word.useable_for()?"
assert len(bot_expls) >= settings.BOT_EXPLANATIONS, \
"n_bot_explanations was not up to date? Bug in Word.useable_for()?"
expl = random.sample(human_expls, settings.HUMAN_EXPLANATIONS) + \
random.sample(bot_expls, settings.BOT_EXPLANATIONS)
poss = range(1 + len(expl))
random.shuffle(poss)
round = GameRound(
word = word,
guess = None,
pos = poss.pop(),
player = player,
)
round.save()
for e in expl:
GameRoundEntry(
gameround = round,
explanation = e,
pos = poss.pop(),
guess = None,
).save()
return round
def get_explanations(self):
entries = self.entries.select_related('explanation', 'explanation__author', 'explanation__word','explanation__bot','explanation__bot__owner').all()
expls = [None] * (1 + len(entries))
expls[self.pos] = {
'text': self.word.clean_explanation(),
'author' : self.word.author,
'guess': self.guess,
'actual': CORRECT,
}
for e in entries:
expls[e.pos] = {
'text': e.explanation.clean_explanation(),
'guess': e.guess,
'author': e.explanation.author_name(),
'actual': e.explanation.type(),
}
return expls
def get_counts(self):
counts = {
CORRECT: 1,
HUMAN: 0,
COMPUTER: 0,
}
for e in GameRoundEntry.objects.filter(gameround=self):
counts[e.explanation.type()] += 1
return counts
@transaction.atomic
def set_guesses(self, guesses):
# TODO: These assertions should be enforced by the client code (JS)
assert self.guess is None
entries = GameRoundEntry.objects.filter(gameround=self)
required = [CORRECT] + [e.explanation.type() for e in entries]
assert len(guesses) == len(required), \
"Wrong number of answers"
assert guesses.count(CORRECT) == required.count(CORRECT), \
"Wrong number of answers 'correct'"
assert guesses.count(HUMAN) == required.count(HUMAN), \
"Wrong number of answers 'human'"
assert guesses.count(COMPUTER) == required.count(COMPUTER), \
"Wrong number of answers 'computer'"
self.guess = guesses[self.pos]
for e in entries:
assert e.pos < len(guesses)
e.guess = guesses[e.pos]
e.save()
self.save()
class GameRoundEntry(models.Model):
class Meta:
verbose_name = u"Spielrunden-Erkärung"
verbose_name_plural = u"Spielrunden-Erkärung"
ordering = ['pos']
unique_together = ('gameround','explanation','pos')
gameround = models.ForeignKey(GameRound, related_name="entries")
explanation = models.ForeignKey(Explanation)
pos = models.PositiveSmallIntegerField()
guess = GuessField()
class Stats(models.Model):
user = models.OneToOneField(User, primary_key=True)
n_words = models.PositiveIntegerField(
verbose_name = "Eingereichte Wörter",
default = 0)
n_explanations = models.PositiveIntegerField(
verbose_name = "Eingereichte Erklärungen",
default = 0)
n_games = models.PositiveIntegerField(
verbose_name = "Spielrunden",
default = 0)
n_correct = models.PositiveIntegerField(
verbose_name = "Korrekt geraten",
default = 0)
n_wrong = models.PositiveIntegerField(
verbose_name = "Falsch geraten",
default = 0)
n_detected_human = models.PositiveIntegerField(
verbose_name = "Mensch erkannt",
default = 0)
n_detected_bot = models.PositiveIntegerField(
verbose_name = "Computer erkannt",
default = 0)
n_tricked = models.PositiveIntegerField(
verbose_name = "Andere reingelegt",
default = 0)
n_not_tricked = models.PositiveIntegerField(
verbose_name = "Andere nicht reingelegt",
default = 0)
def attrs(self):
for field in self._meta.fields:
if type(field) == models.PositiveIntegerField:
yield field.verbose_name, getattr(self, field.name)
def update(self):
self.n_words = \
self.user.submitted_words.count()
self.n_explanations = \
self.user.submitted_explanations.count()
self.n_games = \
self.user.gamerounds.exclude(guess__exact = None).count()
self.n_correct = \
self.user.gamerounds.exclude(guess__exact = None).filter(guess = CORRECT).count()
self.n_wrong = \
self.user.gamerounds.exclude(guess__exact = None).exclude(guess = CORRECT).count()
self.n_detected_human = \
GameRoundEntry.objects \
.filter(explanation__bot__exact = None) \
.filter(gameround__player = self.user) \
.filter(guess = HUMAN) \
.count()
self.n_detected_bot = \
GameRoundEntry.objects \
.filter(explanation__author__exact = None) \
.filter(gameround__player = self.user) \
.filter(guess = COMPUTER) \
.count()
self.n_tricked = \
GameRoundEntry.objects \
.filter(explanation__author = self.user) \
.exclude(guess__exact = None) \
.filter(guess = CORRECT) \
.count()
self.n_not_tricked = \
GameRoundEntry.objects \
.filter(explanation__author = self.user) \
.exclude(guess__exact = None) \
.exclude(guess = CORRECT) \
.count()
self.save()
class BotStats(models.Model):
bot = models.OneToOneField(Bot, primary_key=True)
n_tricked = models.PositiveIntegerField(
verbose_name = "Andere reingelegt",
default = 0)
n_not_tricked = models.PositiveIntegerField(
verbose_name = "Andere nicht reingelegt",
default = 0)
def attrs(self):
for field in self._meta.fields:
if type(field) == models.PositiveIntegerField:
yield field.verbose_name, getattr(self, field.name)
def update(self):
self.n_tricked = \
GameRoundEntry.objects \
.filter(explanation__bot = self.bot) \
.exclude(guess__exact = None) \
.exclude(guess = COMPUTER) \
.count()
self.n_not_tricked = \
GameRoundEntry.objects \
.filter(explanation__bot = self.bot) \
.exclude(guess__exact = None) \
.filter(guess = COMPUTER) \
.count()
self.save()
# Keep Stats up-to-date
@receiver(post_save, dispatch_uid="stats update")
@receiver(post_delete, dispatch_uid="stats update 2")
def update_stats(sender, instance, **kwargs):
affected_users = set()
affected_bots = set()
if type(instance) == Word:
affected_users.add(instance.author)
elif type(instance) == Explanation:
affected_users.add(instance.author)
elif type(instance) == GameRound:
affected_users.add(instance.player)
affected_users.add(instance.word.author)
for e in instance.entries.all():
if e.explanation.type() == HUMAN:
affected_users.add(e.explanation.author)
else:
affected_bots.add(e.explanation.bot)
affected_users.add(e.explanation.bot.owner)
for u in affected_users:
if u:
# Create stats object
if not(hasattr(u, 'stats')):
u.stats = Stats(user=u)
u.stats.save()
u.stats.update()
for b in affected_bots:
if b:
# Create stats object
if not(hasattr(b, 'stats')):
b.stats = BotStats(bot=b)
b.stats.save()
b.stats.update()
| entropia/no-bot-is-perfect | nbip_server/nbip/models.py | models.py | py | 20,182 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.PositiveSmallIntegerField",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "django.db.models.SubfieldBase",
"line_number": 45,
"usage_type": "attrib... |
73539569312 | import argparse
import csv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("firstCol", help = "Number of the first column to match", type=int)
parser.add_argument("secondCol", help = "Number of the second column to match, multiple content", type=int)
parser.add_argument("-b", "--fromBeginning", help="Whether full csv", action="store_false")
parser.add_argument("path", help="Path to the file")
parser.add_argument("name", help="Name of the file to save")
args = parser.parse_args()
return args.firstCol, args.secondCol, args.fromBeginning, args.path, args.name
def open_csv(pathname, fromBeginning):
with open(pathname, newline = '') as csfile:
reader = csv.reader(csfile, delimiter=',')
index = 1 if fromBeginning else 0
result = [row for row in list(reader)[index:]]
return result
def create_list_for_table_with_duplicates(csfile, col1, col2):
result = []
for row in csfile:
temp = [row[col1], row[col2]]
result.append(temp)
doubled = []
for row in result:
res_col_2 = row[1].split(", ")
for tinyRow in res_col_2:
doubled.append([row[0], tinyRow])
return doubled
def save_csv(result, name):
with open(name + ".csv", 'w') as opcsv:
writer = csv.writer(opcsv)
writer.writerows(result)
if __name__ == '__main__':
first, second, begin, path, name = parse_args()
res = open_csv(path, begin)
result = create_list_for_table_with_duplicates(res, first, second)
print(result)
save_csv(result, name)
| EuphoricThinking/encyklopedia_lekow | encyklopedia_leków/rodzielCsv.py | rodzielCsv.py | py | 1,603 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 42,
"usage_type": "call"
}
] |
2616990888 | import json
import logging
from twitterauth.session import Session
from twitterauth.test import APITestCase
from twitterauth.configs import settings
from twitterauth.utils import helper
from twitterauth.utils import payload
LOGGER = logging.getLogger('twitter')
grant_type_missing_err_msg = 'Missing required parameter: grant_type'
invalid_creds_err_msg = 'Unable to verify your credentials'
invalid_grant_type_err_msg = 'invalid_grant_type_value parameter is invalid'
class TestTwitterAuthAPIInputValidation(APITestCase):
@classmethod
def setUpClass(self):
self.session = Session().get_session()
self.base_url = settings.api.url
def test_oauth_without_grant_type(self):
"""
Verify twitter oauth without grant_type
"""
headers = payload.get_oauth_headers(helper.getBase64Value())
# Get bearer token using /oauth2/token
response = self.session.post(self.base_url + "/oauth2/token",
headers=headers)
# Verify error status code and error message
assert response.status_code == 403
LOGGER.info(response.status_code)
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
grant_type_missing_err_msg
def test_oauth_with_invalid_grant_type(self):
"""
Verify twitter oauth with invalid grant_type
"""
headers = payload.get_oauth_headers(helper.getBase64Value())
data = payload.get_oauth_data(grant_type="invalid_grant_type_value")
# Get bearer token using /oauth2/token
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
# Verify error status code and error message
assert response.status_code == 400
LOGGER.info(response.status_code)
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_grant_type_err_msg
def test_oauth_without_content_type(self):
"""
Verify twitter oauth without content_type
"""
headers = payload.get_oauth_headers(helper.getBase64Value(),
content_type="")
# Get bearer token using /oauth2/token
response = self.session.post(self.base_url + "/oauth2/token",
headers=headers)
# Verify error status code
assert response.status_code == 403
LOGGER.info(response.status_code)
def test_oauth_without_authorization(self):
"""
Verify twitter oauth without authorization
"""
headers = payload.get_oauth_headers(helper.getBase64Value())
data = payload.get_oauth_data()
headers.pop('Authorization', None)
# Get bearer token using /oauth2/token
response = self.session.post(self.base_url + "/oauth2/token",
data=data,
headers=headers)
assert response.status_code == 403
LOGGER.info(response.text)
# Verify error message in response
parsed_response = json.loads(response.text)
assert parsed_response["errors"][0]["message"] == \
invalid_creds_err_msg
def test_oauth_invalid_url(self):
"""
Verify twitter oauth with invalid oauth url
"""
headers = payload.get_oauth_headers(helper.getBase64Value())
data = payload.get_oauth_data()
# Get bearer token using /oauth2/token
response = self.session.post(self.base_url + "/oauth2",
data=data,
headers=headers)
assert response.status_code == 404
| rohitkadam19/API-Automation | twitter_app_auth/tests/api/test_input_validation.py | test_input_validation.py | py | 3,892 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "twitterauth.test.APITestCase",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "twitterauth.session.Session",
"line_number": 20,
"usage_type": "call"
},
{
"api_na... |
6950578278 | import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch._jit_internal import Optional, Tuple
from torch.nn import grad # noqa: F401
from torch.nn.functional import linear
from torch.nn.modules.linear import _LinearWithBias
from torch.nn.parameter import Parameter
from torch.overrides import has_torch_function, handle_torch_function
def multi_head_attention_weights(query: Tensor,
key: Tensor,
embed_dim_to_check: int,
num_heads: int,
in_proj_weight: Tensor,
in_proj_bias: Tensor,
dropout_p: float,
training: bool = True,
key_padding_mask: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor]]:
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
will be unchanged. If a BoolTensor is provided, the positions with the
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
is provided, it will be added to the attention weight.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
if not torch.jit.is_scripting():
tens_ops = (query, key, in_proj_weight, in_proj_bias)
if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):
return handle_torch_function(
multi_head_attention_weights, tens_ops, query, key,
embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,
dropout_p, training=training, key_padding_mask=key_padding_mask,
attn_mask=attn_mask)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
# allow MHA to have different sizes for the feature dimension
# assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if torch.equal(query, key):
q, k = linear(query, in_proj_weight, in_proj_bias).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
q = q * scaling
if attn_mask is not None:
assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \
attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \
'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)
if attn_mask.dtype == torch.uint8:
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
attn_mask = attn_mask.to(torch.bool)
if attn_mask.dim() == 2:
attn_mask = attn_mask.unsqueeze(0)
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 2D attn_mask is not correct.')
elif attn_mask.dim() == 3:
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
raise RuntimeError('The size of the 3D attn_mask is not correct.')
else:
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
# attn_mask's dim is 3 now.
# convert ByteTensor key_padding_mask to bool
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
warnings.warn(
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
key_padding_mask = key_padding_mask.to(torch.bool)
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
attn_output_weights.masked_fill_(attn_mask, float('-inf'))
else:
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
return attn_output_weights
class SpatialTemporalTensorAttention(nn.Module):
"""A custom class to implement the multi-dimension Spatial-Temporal MultiheadAttention based on the
`torch.nn.MultiheadAttention` class.
Args:
d_model: total dimension of the model.
num_head: parallel attention heads.
dropout: a Dropout layer on attn_output_weights. Default: 0.1.
"""
def __init__(self,
d_model: int,
num_head: int,
dropout: float = 0.1,
bias: bool = True):
super().__init__()
# assert mode in ['spatial', 'temporal']
# self.mode = mode
self.d_model = d_model
self.num_head = num_head
self.dropout = dropout
self.head_dim = d_model // num_head
assert self.head_dim * num_head == self.d_model, "embed_dim must be divisible by num_heads"
self.in_temporal_proj_weight = Parameter(torch.empty(3 * d_model, d_model))
self.in_spatial_proj_weight = Parameter(torch.empty(2 * d_model, d_model))
if bias:
self.in_temporal_proj_bias = Parameter(torch.empty(3 * d_model))
self.in_spatial_proj_bias = Parameter(torch.empty(2 * d_model))
else:
self.register_parameter('in_temporal_proj_bias', None)
self.register_parameter('in_spatial_proj_bias', None)
self.out_proj = _LinearWithBias(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
torch.nn.init.xavier_uniform_(self.in_temporal_proj_weight)
torch.nn.init.xavier_uniform_(self.in_spatial_proj_weight)
if self.in_temporal_proj_bias is not None:
torch.nn.init.constant_(self.in_temporal_proj_bias, 0.)
if self.in_spatial_proj_bias is not None:
torch.nn.init.constant_(self.in_spatial_proj_bias, 0.)
# self.attn = MultiheadAttention(d_model, num_head, dropout)
def __setstate__(self, state):
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
if '_qkv_same_embed_dim' not in state:
state['_qkv_same_embed_dim'] = True
super(SpatialTemporalTensorAttention, self).__setstate__(state)
def forward(self,
query: Tensor,
key: Tensor,
value: Tensor,
key_padding_mask: Optional[Tensor] = None,
attn_mask: Optional[Tensor] = None) \
-> Tuple[Tensor, Tensor]:
batch_size, seq_len, obj_len, d_model = query.shape
v = value.permute(0, 2, 1, 3)
v = v.reshape(-1, seq_len, d_model)
v = F.linear(v, self.in_temporal_proj_weight[self.d_model * 2:],
self.in_temporal_proj_bias[self.d_model * 2:])
v = v.contiguous().view(-1, batch_size * obj_len * self.num_head, self.head_dim).transpose(0, 1)
# for temporal attention
temporal_attn, no_zero_mask = self._compute_attn(query, key, self.d_model, self.num_head,
self.in_temporal_proj_weight[:self.d_model * 2],
self.in_temporal_proj_bias[:self.d_model * 2],
self.dropout, self.training,
key_padding_mask, attn_mask, attn_dim=1)
# for spatial attention
spatial_attn, _ = self._compute_attn(query, key, self.d_model, self.num_head,
self.in_spatial_proj_weight,
self.in_spatial_proj_bias[:self.d_model * 2],
self.dropout, self.training,
key_padding_mask, attn_mask=None, attn_dim=2)
temporal_attn = temporal_attn.reshape(batch_size, obj_len, self.num_head, seq_len, seq_len
).transpose(1, 2).reshape(-1, obj_len, seq_len, seq_len)
spatial_attn = spatial_attn.reshape(batch_size, seq_len, self.num_head, obj_len, obj_len
).transpose(1, 2).reshape(-1, seq_len, obj_len, obj_len)
# import pdb; pdb.set_trace()
spatial_temporal_attn = torch.bmm(spatial_attn.reshape(-1, obj_len, obj_len),
temporal_attn.transpose(1, 2).reshape(-1, obj_len, seq_len))
spatial_temporal_attn = spatial_temporal_attn.reshape(-1, seq_len, obj_len, seq_len).transpose(1, 2).reshape(
-1, self.num_head, obj_len, seq_len, seq_len).transpose(1, 2).reshape(-1, seq_len, seq_len)
# attn_weights = torch.zeros_like(spatial_temporal_attn)
# attn_weights[no_zero_mask] = F.softmax(spatial_temporal_attn[no_zero_mask], dim=-1)
attn_weights = F.softmax(spatial_temporal_attn, dim=-1)
# spatial_temporal_attn = F.tanh(spatial_temporal_attn)
attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_weights, v)
attn_output = attn_output.reshape(batch_size, obj_len, seq_len, d_model).transpose(1, 2)
attn_output = F.linear(attn_output, self.out_proj.weight, self.out_proj.bias)
return attn_output, attn_weights
@staticmethod
def _compute_attn(query, key, d_model, num_heads, in_proj_weight, in_proj_bias, dropout_p, training,
key_padding_mask=None, attn_mask=None, attn_dim=1):
target_len = query.shape[attn_dim]
source_len = key.shape[attn_dim]
reserve_size = [query.shape[idx] for idx in range(len(query.shape) - 1) if idx != attn_dim]
if key_padding_mask is not None:
kp_mask = ~key_padding_mask.transpose(1, 2) if attn_dim == 1 else ~key_padding_mask
kp_mask = kp_mask.reshape(-1, source_len)
else:
kp_mask = None
a_mask = ~attn_mask if attn_mask is not None else None
if a_mask is not None and attn_mask.shape[1] > 2:
no_zero_mask = ~(torch.bmm((~kp_mask).unsqueeze(2).float(), (~kp_mask).unsqueeze(1).float()).bool())
no_zero_mask += a_mask.unsqueeze(0)
no_zero_mask = (no_zero_mask.reshape(kp_mask.size(0), -1).sum(dim=1)) < target_len * source_len
else:
no_zero_mask = kp_mask.sum(dim=1) < source_len
q = query if attn_dim == 2 else query.permute(0, 2, 1, 3)
k = key if attn_dim == 2 else key.permute(0, 2, 1, 3)
q = q.reshape(-1, target_len, d_model)[no_zero_mask].permute(1, 0, 2)
k = k.reshape(-1, source_len, d_model)[no_zero_mask].permute(1, 0, 2)
# print(q.size(), k.size())
# assert k.size(0) == v.size(0) and k.size(1) == v.size(1)
no_zero_attn_weights = multi_head_attention_weights(q, k, d_model, num_heads,
in_proj_weight, in_proj_bias,
dropout_p, training,
kp_mask[no_zero_mask], a_mask)
no_zero_attn_weights = F.softmax(no_zero_attn_weights, dim=-1)
# no_zero_attn_weights = F.tanh(no_zero_attn_weights)
attn_weights = no_zero_attn_weights.new_zeros((kp_mask.size(0) * num_heads, target_len, source_len))
# print(no_zero_attn_weights.size(), attn_weights.size())
# import pdb; pdb.set_trace()
no_zero_mask = torch.cat([no_zero_mask.unsqueeze(1) for _ in range(num_heads)], dim=1).reshape(-1) # TODO: uncertain
attn_weights[no_zero_mask] += no_zero_attn_weights
return attn_weights, no_zero_mask #.reshape(reserve_size + list(attn_weights.size()[-2:]))
if __name__ == '__main__':
stta = SpatialTemporalTensorAttention(64, 4, 0.1)
inp = torch.rand(16, 15, 9, 64)
key_padding_mask = inp.new_ones(16, 15, 9, 1).bool()
out = stta(inp, inp, inp, key_padding_mask, attn_mask=None)
print(out[0].size())
| Flawless1202/Non-AR-Spatial-Temporal-Transformer | nast/models/utils/spatial_temporal_tensor_attention.py | spatial_temporal_tensor_attention.py | py | 16,739 | python | en | code | 73 | github-code | 1 | [
{
"api_name": "torch.Tensor",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_numbe... |
6403461688 | from traceback import print_tb
import telebot
from telebot import types
import MySQLdb
from datetime import datetime
from telebot.types import ReplyKeyboardRemove
# DB & Bot connection
db = MySQLdb.connect("localhost", "root", "1", "db_bikes") or die(
"could not connect to database")
bot = telebot.TeleBot('5304555854:AAEuWKOhEklQASHjkG7oU0F8AV4jwukkk5Q')
# dependencies
show_bike = ''
free_bike = ()
NAME = []
# Start command
@bot.message_handler(commands=['start'])
def start_message(message):
global NAME
bot.send_message(message.chat.id, 'Welcome, please Enter your name')
NAME = []
# Show all bikes
@bot.message_handler(content_types='text')
def message_reply(message):
global show_bike
free_bike = ()
NAME.append(message.text)
if len(NAME) > 0:
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
item1 = types.KeyboardButton("Show Bikes")
markup.add(item1)
bot.send_message(message.chat.id, 'Select an option',
reply_markup=markup)
if message.text == "Show Bikes":
cursor = db.cursor()
cursor.execute(
"SELECT * FROM bike_monitor")
bike_list = cursor.fetchall()
db.commit()
# Printing list of bikes
for bike in bike_list:
if bike[4] == 0 or bike[3] == 0:
show_bike += ">> Bike #" + \
str(bike[0]) + " taken at: " + str(bike[2]) + "\n\n"
else:
show_bike += (">> Bike #" +
str(bike[0]) + " is free") + "\n\n"
free_bike = (bike,) + (free_bike)
bot.send_message(message.chat.id, show_bike,
parse_mode='Markdown')
# Adding buttons
markup = types.ReplyKeyboardMarkup(resize_keyboard=True)
for bike in free_bike:
markup.add(types.KeyboardButton(
"bike #" + str(bike[0])))
bot.send_message(
message.chat.id, 'Please, select a bike', reply_markup=markup)
# cleaning bike list
show_bike = ''
# Selecting bike
elif message.text[:4] == "bike":
cursor = db.cursor()
# take user_id
cursor.execute(
"SELECT card_id FROM users where name = '%s'" % NAME[0])
user_list = cursor.fetchall()
user_id = str(user_list[0][0])
# take bike info about user
cursor.execute(
f"SELECT * FROM bike_monitor where user_id = \"{user_id}\"")
userExsist = cursor.fetchall()
if not userExsist:
bot.send_message(
message.chat.id, 'Please, enter your name', reply_markup=ReplyKeyboardRemove())
now = datetime.now()
current_time = now.strftime("%H:%M")
cursor.execute(
f"UPDATE bike_monitor SET user_id = \"{user_id}\", take = \"{current_time}\", reserved = '0' where reserved = '1' and id = \"{message.text[6:]}\" limit 1")
print("User ", user_id, " was added")
bot.send_message(
message.chat.id, 'You have reserved bike #'+str(message.text[6:]))
else:
bot.send_message(
message.chat.id, 'You have already reserved bike')
db.commit()
print(NAME)
bot.infinity_polling()
| oneku16/Digital_Campus | bot/bot.py | bot.py | py | 3,542 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "MySQLdb.connect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "telebot.TeleBot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "telebot.types.ReplyKeyboardMarkup",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "tel... |
4743048609 | import requests
from twilio.rest import Client
import os
import time
url = 'http://api.vk.com/method/'
token = os.environ['sms_token']
account_sid = os.environ['account_sid']
auth_token = os.environ['auth_token']
def get_json(user_id):
method = url + 'users.get'
data = {
'user_ids': user_id,
'fields': 'online',
'access_token' : token,
'v' : '5.103',
}
r = requests.get(method, data)
return r.json()
def sms_sender(mesage):
client = Client(account_sid, auth_token)
message = client.messages.create(
body= mesage,
from_='+19367553922',
to='+79298405593'
)
return message.sid
def get_status():
r = get_json('zyoma')
status = r['response'][0]['online']
return status
def main():
while True:
status = get_status()
if status == 1:
sms_sender(mesage='User online!')
break
time.sleep(5)
if __name__ == '__main__':
main()
| zYoma/api_01_sms | main.py | main.py | py | 1,002 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line... |
28410999980 | from django.conf import settings
from django.conf.urls import include, url # noqa
from django.contrib import admin
from django.views.generic import TemplateView
import django_js_reverse.views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^jsreverse/$', django_js_reverse.views.urls_js, name='js_reverse'),
url(r'^$', TemplateView.as_view(template_name='base.html'), name='home'),
url(r'', include('battles.urls', namespace='battles')),
url(r'', include('users.urls')),
url(r'social/', include('social_django.urls', namespace='social')),
url(r'^api-auth/', include('rest_framework.urls')),
url(r'api/', include('battles.endpoints_urls', namespace='api_battles')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| pamella/pokebattle | pokebattle/urls.py | urls.py | py | 857 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name... |
12780521041 | import logging
from fastapi import FastAPI, UploadFile
from fastapi.middleware.cors import CORSMiddleware
import pandas as pd
from .convert import xlsx_to_jvfdtm
logging.basicConfig(level=logging.DEBUG)
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=[
"http://localhost:8080"
],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"]
)
@app.post("/convert")
async def convert(file: UploadFile):
contents = await file.read()
df = pd.read_excel(contents, header=None)
xlsx_to_jvfdtm(df)
return {"ok": "ok"}
| luminousai/rosetta | server/src/app.py | app.py | py | 591 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "fastapi.middle... |
71395832033 | from datetime import datetime
from ninja import FilterSchema, Schema
from pydantic import Field, validator
class CourseIn(Schema):
title: str
description: str
slug: str
language: str
requirements: str
what_you_will_learn: str
level: str
categories: list[int] | None
instructors: list[int] | None
is_published: bool = False
class CourseOut(Schema):
id: int
title: str
description: str
slug: str
language: str
requirements: str
what_you_will_learn: str
level: str
categories: list[int]
instructors: list[int]
is_published: bool = False
created: datetime
modified: datetime
@validator('created', 'modified', allow_reuse=True)
def convert_datetime(cls, value: datetime):
return value.isoformat()
@staticmethod
def resolve_categories(obj):
return [category.id for category in obj.categories.all()]
@staticmethod
def resolve_instructors(obj):
return [instructor.id for instructor in obj.instructors.all()]
class CourseFilter(FilterSchema):
categories: str | None = Field(q='categories__in')
language: str | None = Field(q='language__in')
level: str | None = Field(q='level__in')
@validator('categories', 'language', 'level', allow_reuse=True)
def split_testing(cls, value):
return value.split(',')
class CourseUpdate(Schema):
title: str | None
description: str | None
slug: str | None
language: str | None
requirements: str | None
what_you_will_learn: str | None
level: str | None
categories: list[int] | None
instructors: list[int] | None
is_published: bool | None
class InvalidCategoriesOrInstructors(Schema):
detail: str = 'invalid instructors or categories'
class CourseRelationIn(Schema):
course_id: int
class CourseRelationOut(Schema):
id: int
creator_id: int
course_id: int
done: bool
created: datetime
modified: datetime
@validator('created', 'modified', allow_reuse=True)
def convert_datetime(cls, value: datetime):
return value.isoformat()
class CourseRelationUpdate(Schema):
done: bool
| gabrielustosa/educa | educa/apps/course/schema.py | schema.py | py | 2,173 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ninja.Schema",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "ninja.Schema",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"l... |
73374112673 | from http import HTTPStatus
import django.test
import django.urls
__all__ = []
class StaticURLTests(django.test.TestCase):
def test_homepage_endpoint(self):
response = django.test.Client().get(
django.urls.reverse("homepage:home"),
)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_coffee_endpoint(self):
response = django.test.Client().get(
django.urls.reverse("homepage:coffee"),
)
self.assertIn("Я чайник", response.content.decode("utf-8"))
self.assertEqual(response.status_code, HTTPStatus.IM_A_TEAPOT)
| xtern0o/educational_django_project_yandex | lyceum/homepage/tests.py | tests.py | py | 623 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.test.test",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.test",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.test.test.Client",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.test... |
25744704351 | import datetime
import os
import platform
class Clock:
def __init__(self):
self._stopwatch_counter_num = 10800
self._running = None
self._alarm_minute = None
self._alarm_hour = None
self._seconds = None
self._minutes = None
self._hour = None
self._alarm_time = None
self._time = None
self._date = None
def get_time(self):
self._time = datetime.datetime.now().strftime("%H:%M:%S")
self._hour, self._minutes, self._seconds = self._time.split(':')
return self._time
def get_date(self):
self._date = datetime.datetime.now().strftime("%d/%m/%Y")
return self._date
def clock_loop(self):
while True:
self.get_time()
def print_time(self):
self.get_time()
self.get_date()
print(self._time, self._date)
def alarm(self, alarm_time):
self._alarm_time = alarm_time
self._alarm_hour, self._alarm_minute = self._alarm_time.split(':')
self.alarm_loop()
def alarm_check(self):
if self._hour == self._alarm_hour and self._minutes == self._alarm_minute:
self.beep()
return True
else:
return False
@staticmethod
def beep():
for i in range(3):
if platform.system() == 'Windows':
winsound.Beep(5000, 1000)
elif platform.system() == 'Darwin':
os.system('say Time is Up')
elif platform.system() == 'Linux':
os.system('beep -f 5000')
def alarm_loop(self):
while self.alarm_check() is False:
self.get_time()
self.alarm_check()
def stopwatch(self, running):
self._running = running
while self._running:
tt = datetime.datetime.fromtimestamp(self._stopwatch_counter_num)
self._stopwatch_counter_num += 1
return tt.strftime("%H:%M:%S")
def reset_stopwatch(self):
self._stopwatch_counter_num = 10800
| bailerG/clock_python_app | main.py | main.py | py | 2,054 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "da... |
43816202446 | import numpy as np
from collections import defaultdict
from scipy import sparse
def trans_matrice(L,D,vois):
I,J,V=[],[],[]
for i in range(len(L)):
for l in vois[L[i]]:
j=D[l]
if j>=0:
I.append(i)
J.append(j)
V.append(1./len(vois[L[i]]))
I=np.array(I)
J=np.array(J)
V=np.array(V)
return(sparse.csc_matrix((V,(J,I)),shape=(len(L),len(L))))
import sys
n=int(sys.argv[1])
K=int(sys.argv[2])
vois=defaultdict(list)
filename = 'percLeo1000/cl_sel1000/perc_voisins_L_1000_'+str(K)+'.dat'
Tm = np.loadtxt(filename)[:]
print(Tm)
for k in range(len(Tm)):
Tm[k]=[int(Tm[k][i]) for i in range(2)]
for i in range(len(Tm)):
vois[Tm[i][0]].append(Tm[i][1])
t=100000
L=np.zeros(t)
Err=np.zeros(t)
for r in range(1,10**4+1):
p_k=[]
pos_list=[]
filename = 'percLeo1000/perc/data_n'+str(n)+'_'+str(K)+'/sites'+str(r)+'.dat'
Tm = np.loadtxt(filename)[:]
for a in Tm:
pos_list.append(a)
filename = 'percLeo1000/perc/data_n'+str(n)+'_'+str(K)+'/end'+str(r)+'.dat'
c1 = np.loadtxt(filename)
i0=pos_list.index(c1)
def moins_un():
return(-1)
dic=defaultdict(moins_un)
for i in range(n):
dic[pos_list[i]]=i
M=trans_matrice(pos_list,dic,vois)
P=np.zeros(n)
P[i0]=1
p_k=[1]
S=0
for k in range(t):
P=M.dot(P)
p_k.append(np.sum(P))
p_k=np.array(p_k)
p_k=p_k[:-1]-p_k[1:]
L+=p_k
Err+=p_k**2
if r%10**2==0:
np.save('percolation/'+str(n)+'/Probabilities_n'+str(n)+', '+str(K),[L,Err])
| LeoReg/UniversalExplorationDynamics | Fig3/Percolation_exact_enum.py | Percolation_exact_enum.py | py | 1,605 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "scipy.sparse.csc_matrix",
"li... |
19815836754 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.timezone import get_current_timezone_name
from cms.cache import _get_cache_version, _set_cache_version, _clean_key, _get_cache_key
from cms.utils import get_cms_setting
def _placeholder_cache_key(placeholder, lang):
cache_key = '%srender_placeholder:%s.%s' % (get_cms_setting("CACHE_PREFIX"), placeholder.pk, str(lang))
if settings.USE_TZ:
tz_name = force_text(get_current_timezone_name(), errors='ignore')
cache_key += '.%s' % tz_name.encode('ascii', 'ignore').decode('ascii').replace(' ', '_')
return cache_key
def set_placeholder_cache(placeholder, lang, content):
"""
Caches the rendering of a placeholder
"""
from django.core.cache import cache
cache.set(_placeholder_cache_key(placeholder, lang),
content,
get_cms_setting('CACHE_DURATIONS')['content'],
version=_get_cache_version())
_set_cache_version(_get_cache_version())
def get_placeholder_cache(placeholder, lang):
"""
Retrieves the cached content of a placeholder
"""
from django.core.cache import cache
return cache.get(_placeholder_cache_key(placeholder, lang),
version=_get_cache_version())
def clear_placeholder_cache(placeholder, lang):
from django.core.cache import cache
cache.delete(_placeholder_cache_key(placeholder, lang), version=_get_cache_version())
def _placeholder_page_cache_key(page_lookup, lang, site_id, placeholder_name):
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
return _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
def get_placeholder_page_cache(page_lookup, lang, site_id, placeholder_name):
from django.core.cache import cache
return cache.get(_placeholder_page_cache_key(page_lookup, lang, site_id, placeholder_name),
version=_get_cache_version())
def set_placeholder_page_cache(page_lookup, lang, site_id, placeholder_name, content):
from django.core.cache import cache
cache.set(_placeholder_page_cache_key(page_lookup, lang, site_id, placeholder_name),
content,
get_cms_setting('CACHE_DURATIONS')['content'], version=_get_cache_version())
_set_cache_version(_get_cache_version())
| farhan711/DjangoCMS | cms/cache/placeholder.py | placeholder.py | py | 2,388 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "cms.utils.get_cms_setting",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.USE_TZ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 11,
"usage_type": "name"
},
{
"a... |
74359950112 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/11/9 0:05
# @Author : SELF-T-YY
# @Site :
# @File : classical_sample_ambiguous_shortestPath.py
# @Software: PyCharm
import json
import networkx as nx
import sys
import numpy
fileWritePath = r'../data_forSystem/ieee_visC/IV_ambiguousBetweennessData.json'
fileOriDataPath = r'../data_forSystem/ieee_visC/IVxy.json'
sampleNameList = ['ISRW', 'TIES', 'SRW', 'RES', 'RJ', 'RNS']
sampleRateList = ['5', '10', '15', '20', '25', '30', '35', '40']
with open(fileOriDataPath) as f:
oriData = json.load(f)
tempEdgesList = oriData['edges']
edgesList = [[_['source'], _['target']] for _ in tempEdgesList]
tempNodesList = oriData['nodes']
nodesList = [_['id'] for _ in tempNodesList]
G = nx.Graph()
G.add_edges_from(edgesList)
oriShortestPathData = {}
# 计算原始数据的平均最短路径
for node in nodesList:
length = dict(nx.single_target_shortest_path_length(G, node))
sum = 0
for _ in length:
sum += length[_]
oriShortestPathData[node] = sum / len(length)
ambiguousData = {}
for sampleName in sampleNameList:
tempAmbiguousData_addName_addRate = {}
for rate in sampleRateList:
filePath = r'../data_forSystem/ieee_visC/forceData/IV_forceData_' + sampleName + '_rate_' + rate + '.json'
print(filePath)
sampleRate = 'rate-' + rate
with open(filePath) as f1:
sampleData = json.load(f1)
tempEdgesList = sampleData['edges']
edgesList = [[_['source'], _['target']] for _ in tempEdgesList]
tempNodesList = sampleData['nodes']
nodesList = [_['id'] for _ in tempNodesList]
G = nx.Graph()
G.add_edges_from(edgesList)
shortestPath = {}
for node in nodesList:
if not G.has_node(node):
continue
length = dict(nx.single_target_shortest_path_length(G, node))
sum = 0
for _ in length:
sum += length[_]
shortestPath[node] = sum/len(length)
# 计算歧义
tempAmbiguousData = {_: oriShortestPathData[_] - shortestPath[_] for _ in
shortestPath
if _ in oriShortestPathData}
# 归一化
maxVal = 0
minVal = 100000000
for _ in tempAmbiguousData:
maxVal = max(maxVal, tempAmbiguousData[_])
minVal = min(minVal, tempAmbiguousData[_])
tempAmbiguousData = {_: (tempAmbiguousData[_] - minVal) / (maxVal - minVal) for _ in tempAmbiguousData}
tempAmbiguousData_addName_addRate[sampleRate] = tempAmbiguousData
ambiguousData[sampleName] = tempAmbiguousData_addName_addRate
fw = open(fileWritePath, 'w+')
fw.write(json.dumps(ambiguousData))
fw.close() | ShenXilong2000/newSystem | python/classical_sample_ambiguous_shortestPath.py | classical_sample_ambiguous_shortestPath.py | py | 3,102 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "networkx.Graph",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "networkx.single_target_shortest_path_length",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "... |
25021705065 | #mengimport library yang digunakan
from keras.models import load_model
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
#Membaca gambar menggunakan library PIL, pembaca juga
#dapat menggunakan library lain, seperti OpenCV
gambar1= Image.open("dataset/mnist/testing/0/img_108.jpg")
gambar2= Image.open("dataset/mnist/testing/1/img_0.jpg")
#mengubah ke ndarray numpy
gambar1 = np.asarray(gambar1)
gambar2 = np.asarray(gambar2)
#memanggil model yang telah ditraining sebelumnya
model = load_model('./modelLeNet5.h5')
#memprediksi gambar dg modelnya
#ingat, intensitas perlu dibagi 255 karena saat training jg dibagi 255
#kemudian format input harus (batch size,tinggi gambar, lebar, depth)
#oleh karena itu, kita gunakan reshape seperti di bawah
pred1 = model.predict_classes((gambar1/255).reshape((1,28,28,1)))
pred2 = model.predict_classes((gambar2/255).reshape((1,28,28,1)))
#mengeplot gambar beserta hasil prediksi
plt.figure('gambar1')
plt.imshow(gambar1,cmap='gray')
plt.title('pred:'+str(pred1[0]), fontsize=22)
print("prediksi gambar1:", pred1[0])
plt.figure('gambar2')
plt.imshow(gambar2,cmap='gray')
plt.title('pred:'+str(pred2[0]), fontsize=22)
print("prediksi gambar2:", pred2[0])
plt.show()
| ardianumam/Data-Mining-and-Big-Data-Analytics-Book | edisi2/10.5.2 Menggunakan Model yang telah ditraining.py | 10.5.2 Menggunakan Model yang telah ditraining.py | py | 1,272 | python | id | code | 26 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": ... |
6933911036 | import os
import json
import pytz
import logging
from flask_socketio import SocketIO, emit
from flask import (Flask, render_template, request, jsonify)
from pymongo import MongoClient
from threading import Thread, Event
from datetime import datetime
from convxai.utils import *
logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
app = Flask(__name__)
app.config["SECRET_KEY"] = "liaopi6u123sdfakjb23sd"
socketio = SocketIO(app, logger=True, engineio_logger=True,
async_mode="threading")
mongo = get_mongo_connection()
thread = Thread()
thread_stop_event = Event()
task_mapping = {}
# ########################################
# # Set up paths to save log files
# ########################################
# system_config = parse_system_config_file()
# logFileName = "log_" + datetime.now().astimezone(pytz.timezone('US/Eastern')
# ).strftime("%m%d%Y_%H%M%S") + ".txt"
# logFile = open(os.path.join(
# system_config['system']['logfilePath'], logFileName), "a")
########################################
# Threading and MongoDB
########################################
def init_threading() -> None:
"""
Initiate the thread to monitor interaction and save data into mongoDB database.
"""
global thread
if not thread.is_alive():
thread = Thread(
name='mongo-monitoring',
target=mongo_monitoring,
)
thread.start()
logging.info("Starting Mongo Monitoring Thread")
def mongo_monitoring():
"""
Set up mongoDB monitoring.
"""
global task_mapping
pipeline = [{
"$match": {
"$and": [
{"operationType": "insert"},
{"fullDocument.role": "agent"},
{"fullDocument.reply_to": {"$exists": True}}
]
}
}]
with mongo.message.watch(pipeline) as stream:
for insert_change in stream:
source_message_id = insert_change["fullDocument"]["reply_to"]
response_text = insert_change["fullDocument"]["text"]
message_type = insert_change["fullDocument"]["payload"]["message_type"]
if message_type == "task":
socketio.emit(
"task_response",
{"text": response_text},
to=task_mapping[source_message_id],
namespace="/connection"
)
elif message_type == "conv":
writingIndex = insert_change["fullDocument"]["payload"]["writingIndex"]
socketio.emit(
"conv_response",
{"text": response_text, "writingIndex": writingIndex},
to=task_mapping[source_message_id],
namespace="/connection"
)
del task_mapping[source_message_id],
########################################
# Flask Implementation
########################################
def get_data():
"""
Get the request data.
"""
data = json.loads(str(request.data, encoding='utf-8'))
return data
@app.route("/")
def index():
return render_template("user_interface.html")
@app.route("/init_conversation", methods=["POST"])
def init_conversation():
"""
Initiate the conversation.
"""
body = get_data()
res = mongo.message.insert_one({
"text": body["text"],
"role": "user",
"init": True,
"time": datetime.now(),
"conversation_id": body.get("conversation_id"),
})
return jsonify({"text": "hello world", "conversation_id": str(res.inserted_id)})
@app.route("/reset")
def reset():
"""
Reset the interface.
"""
data = json.dumps({"text": "[RESET]"})
return jsonify({"text": "RESET"})
########################################
# SocketIO Implementation
########################################
@socketio.on('connect', namespace="/connection")
def test_connect():
emit('connected', {'data': 'Connected'})
init_threading()
@socketio.on('disconnect', namespace="/connection")
def test_disconnect():
logging.info('Client disconnected')
@socketio.on("interact", namespace="/connection")
def interact_socket(body):
"""
Interaction between the web server and interface via socketIO.
"""
result = mongo.message.insert_one({
"text": body["text"],
"role": "user",
"init": False,
"time": datetime.now(),
"conversation_id": body["conversation_id"],
"payload": {
"message_type": body["message_type"],
"writing_model": body.get("writing_model", None),
},
"note": "socket",
"user_id": body.get("user_id", ""),
})
# log text editing results into mongo
mongo.log.insert_one({
"user_id": body.get("user_id", ""),
"time": datetime.now(),
"text": body["text"],
"type": body["message_type"],
"writing_model": body.get("writing_model", None),
"conversation_id": body["conversation_id"],
})
### Write logs into files ###
task_mapping[result.inserted_id] = request.sid
responseTime = datetime.now().astimezone(
pytz.timezone('US/Eastern')).strftime("%m-%d-%Y %H:%M:%S")
responseText = body["text"]
responseMessageType = body["message_type"]
responseWritingModel = body.get("writing_model", None)
# logEntry = responseTime + " Text: " + responseText + " \n\t\tMessageType:" + \
# responseMessageType + " \n\t\tWritingModel: " + responseWritingModel + "\n"
# logFile.write(logEntry+"\n")
# logFile.close()
# logging.info("Written to file" + logEntry)
@socketio.on("save", namespace="/connection")
def save(body):
# log text editing results into mongo
res = mongo.log.insert_one({
"user_id": body.get("user_id", ""),
"time": datetime.now(),
"text": body["text"],
"event_type": body.get("mode", "auto-save"),
})
if res:
emit('save', {"status": "success", "mode": body.get("mode", "auto-save")})
########################################
# Run Flask and SocketIO.
########################################
def run_flask_socketio():
"""
Run Flask and Websocket.
"""
socketio.run(app, host="0.0.0.0", port=8080, debug=False)
# for debugging
# socketio.run(app, host="0.0.0.0", port=8001, debug=True)
if __name__ == "__main__":
init_threading()
run_flask_socketio()
| huashen218/convxai | convxai/services/web_service/web_server.py | web_server.py | py | 6,524 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_socketio.Sock... |
19471220856 | """
@file
@brief Data aggregation for timeseries.
"""
import datetime
import pandas
from pandas.tseries.frequencies import to_offset
def _get_column_name(df, name='agg'):
"""
Returns a unique column name not in the existing dataframe.
@param df dataframe
@param name prefix
@return new column name
"""
while name in df.columns:
name += '_'
return name
def aggregate_timeseries(df, index='time', values='y',
unit='half-hour', agg='sum',
per=None):
"""
Aggregates timeseries assuming the data is in a dataframe.
@param df dataframe
@param index time column
@param values value or values column
@param unit aggregate over a specific period
@param sum kind of aggregation
@param per second aggregation, per week...
@return aggregated values
"""
if df is None:
if len(values.shape) == 1:
df = pandas.DataFrame(dict(time=index, y=values))
values = 'y'
else:
df = pandas.DataFrame(dict(time=index))
for i in range(values.shape[1]):
df['y%d' % i] = values[:, i]
values = list(df.columns)[1:]
index = 'time'
def round_(serie, freq, per):
fr = to_offset(freq)
res = pandas.DatetimeIndex(serie).floor(fr) # pylint: disable=E1101
if per is None:
return res
if per == 'week':
pyres = res.to_pydatetime()
return pandas.to_timedelta(
map(
lambda t: datetime.timedelta(
days=t.weekday(), hours=t.hour, minutes=t.minute),
pyres))
if per == 'month':
pyres = res.to_pydatetime()
return pandas.to_timedelta(
map(
lambda t: datetime.timedelta(
days=t.day, hours=t.hour, minutes=t.minute),
pyres))
raise ValueError( # pragma: no cover
f"Unknown frequency '{per}'.")
agg_name = _get_column_name(df)
df = df.copy()
if unit == 'half-hour':
freq = datetime.timedelta(minutes=30)
df[agg_name] = round_(df[index], freq, per)
else:
raise ValueError( # pragma: no cover
f"Unknown time unit '{unit}'.")
if not isinstance(values, list):
values = [values]
if agg == 'sum':
gr = df[[agg_name] + values].groupby(agg_name, as_index=False).sum()
agg_name = _get_column_name(gr, 'week' + index)
gr.columns = [agg_name] + list(gr.columns[1:])
elif agg == 'norm':
gr = df[[agg_name] + values].groupby(agg_name, as_index=False).sum()
agg_name = _get_column_name(gr, 'week' + index)
agg_cols = list(gr.columns[1:])
gr.columns = [agg_name] + agg_cols
for c in agg_cols:
su = gr[c].sum()
if su != 0:
gr[c] /= su
else:
raise ValueError( # pragma: no cover
f"Unknown aggregation '{agg}'.")
return gr.sort_values(agg_name).reset_index(drop=True)
| sdpython/mlinsights | mlinsights/timeseries/agg.py | agg.py | py | 3,224 | python | en | code | 65 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.tseries.frequencies.to_offset",
"line_number": 49,
"usage_type": "call"
},
{
"api_name":... |
640941004 | import json
import pytz
import datetime
def lambda_handler(event, context):
tempvar2=[]
timezones = pytz.common_timezones
#timezones = pytz.all_timezones
for timezone in timezones:
tempvar = timezone[0:2]
if tempvar == 'US':
tempvar2.append(timezone)
return {
'statusCode': 200,
'body': {"TimeZones": tempvar2}
}
| yogeshturerao/deploymentsapis | timezone/timezone.py | timezone.py | py | 382 | python | fa | code | 0 | github-code | 1 | [
{
"api_name": "pytz.common_timezones",
"line_number": 7,
"usage_type": "attribute"
}
] |
72383077154 | import torch
import torch.nn as nn
import torch.nn.functional as F
# Parts of these codes are from: https://github.com/Linfeng-Tang/SeAFusion
class Sobelxy(nn.Module):
def __init__(self):
super(Sobelxy, self).__init__()
kernelx = [[-1, 0, 1],
[-2, 0, 2],
[-1, 0, 1]]
kernely = [[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]]
kernelx = torch.FloatTensor(kernelx).unsqueeze(0).unsqueeze(0)
kernely = torch.FloatTensor(kernely).unsqueeze(0).unsqueeze(0)
self.weightx = nn.Parameter(data=kernelx, requires_grad=False).cuda()
self.weighty = nn.Parameter(data=kernely, requires_grad=False).cuda()
def forward(self, x):
b, c, w, h = x.shape
batch_list = []
for i in range(b):
tensor_list = []
for j in range(c):
sobelx_0 = F.conv2d(torch.unsqueeze(torch.unsqueeze(x[i, j, :, :], 0), 0), self.weightx, padding=1)
sobely_0 = F.conv2d(torch.unsqueeze(torch.unsqueeze(x[i, j, :, :], 0), 0), self.weighty, padding=1)
add_0 = torch.abs(sobelx_0) + torch.abs(sobely_0)
tensor_list.append(add_0)
batch_list.append(torch.stack(tensor_list, dim=1))
return torch.cat(batch_list, dim=0)
class Fusionloss(nn.Module):
def __init__(self):
super(Fusionloss, self).__init__()
self.sobelconv = Sobelxy()
self.mse_criterion = torch.nn.MSELoss()
def forward(self, image_vis, image_ir, generate_img):
image_y = image_vis
B, C, W, H = image_vis.shape
image_ir = image_ir.expand(B, C, W, H)
x_in_max = torch.max(image_y, image_ir)
loss_in = F.l1_loss(generate_img, x_in_max)
# Gradient
y_grad = self.sobelconv(image_y)
ir_grad = self.sobelconv(image_ir)
B, C, K, W, H = y_grad.shape
ir_grad = ir_grad.expand(B, C, K, W, H)
generate_img_grad = self.sobelconv(generate_img)
x_grad_joint = torch.maximum(y_grad, ir_grad)
loss_grad = F.l1_loss(generate_img_grad, x_grad_joint)
return loss_in, loss_grad
| GeoVectorMatrix/Dif-Fusion | models/fs_loss.py | fs_loss.py | py | 2,182 | python | en | code | 33 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.FloatTensor",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
... |
20939299113 | # ========================
# Panda3d - panda3d_gpu.py
# ========================
# Panda3d imports.
from panda3d.core import NodePath, ClockObject, Filename, Texture
from panda3d.core import Shader, ShaderAttrib, PNMImage
from panda3d.core import LVector3i
'''# Local imports.
from etc import _path'''
# Basic Timer object for local script invocation.
class TimeIt:
def __init__(self, msg=""):
self.__msg = msg
self.__clock = ClockObject()
def __enter__(self):
self.__start_dt = self.__clock.getRealTime()
return self
def __exit__(self, *e_info):
self.__dur = round(self.__clock.getRealTime()-self.__start_dt, 3)
if self.__msg:
print()
print("{}: {}".format(self.__msg, self.__dur, 3))
for attr in self.__dict__:
if attr.startswith("_TimeIt__"): continue
print(" {}: {}".format(attr, round(self.__dict__[attr], 3)))
self.total_time = self.__dur
class GPU_Image:
lib_path = "/e/dev/solex/gpu/gpu_image_lib.glsl"
def __init__(self, ref_img,
workgroup_size = LVector3i(32,32,1),
img_format = Texture.FRgba8,
print_times = False):
self.workgroup_size = workgroup_size
self.img_format = img_format
self.x_size = ref_img.getXSize()
self.y_size = ref_img.getYSize()
self.z_size = 1
self.prepare_time = 0
self.process_time = 0
self.extract_time = 0
self.__NP = NodePath("gpu")
self.__gsg = base.win.getGsg()
self.__ref_tex = self.__get_Texture(ref_img)
self.__LINES = self.__Setup()
self.__print_times = print_times
def __enter__(self):
self.process_time = 0
self.extract_time = 0
return self
def __exit__(self, *e_info):
if self.__print_times:
total_time = round(self.prepare_time+self.process_time+self.extract_time, 3)
prep_time = round(self.prepare_time, 3)
proc_time = round(self.process_time, 3)
extr_time = round(self.extract_time, 3)
print()
print("GPU_Image total time: {}".format(total_time))
print(" prepare: {} ({}%)".format(prep_time, round(prep_time/total_time*100),2))
print(" process: {} ({}%)".format(proc_time, round(proc_time/total_time*100),2))
print(" extract: {} ({}%)".format(extr_time, round(extr_time/total_time*100),2))
def __get_Texture(self, ref_img):
# Convert ref_img into texture.
with TimeIt() as prep_timer:
ref_tex = Texture()
# Ensure ref image has an alpha channel.
if not ref_img.hasAlpha():
ref_img.addAlpha()
ref_img.alphaFill(1.0)
# Load tex and set format
ref_tex.load(ref_img)
ref_tex.setFormat(self.img_format)
self.prepare_time += round(prep_timer.total_time, 3)
return ref_tex
def __Setup(self):
"""Prepares GPU_Image obj to receive python calls to
the shader library."""
# Open the shader as a file and get lines so we
# can extract some setup info from it.
shader_os_path = Filename(self.lib_path).toOsLongName()
with open(shader_os_path, "r") as shader_file:
lines = list(shader_file.readlines())
# Extract lib function names.
for line in lines:
# Each function within the image_lib is defined within the confines
# of an "#ifdef/#endif" block; the name we need immediately follows
# the "#ifdef" keyword. This name gets mapped directly to "self"
# as an alias for "self.__Call" so that the user can simply call
# the shader function as though it were a regular method of "self".
if line.startswith("#ifdef"):
func_name = line.split(" ")[1].strip()
# Setup callback that redirects to self.__Call each time
# "self.<func_name> is called, passing along arguments;
# return the modified image (as a Texture object).
def call(func_name=func_name, **kwargs):
mod_tex = self.__Call(str(func_name), **kwargs)
return mod_tex
# Map "func_name" directly to "self".
self.__dict__[func_name] = call
# Add workgroup size layout declaration.
wg = self.workgroup_size
wg_str = "layout (local_size_x={}, local_size_y={}) in;\n"
wg_line = wg_str.format(wg.x, wg.y)
lines.insert(8, wg_line)
return lines
def __Call(self, func_name, **kwargs):
"""Receive python call and redirect request to relevant
function in image shader library; return modified image."""
# Copy self.__Lines (need to keep orig for further calls) and
# add "#define" statement to top to trigger compilation of
# relevant "#ifdef/def" function block in shader.
lines = list(self.__LINES)
lines.insert(2, "#define {}".format(func_name))
# Assemble lines into shader str and compile.
shader_str = "".join(lines)
self.__NP.setShader(Shader.makeCompute(Shader.SL_GLSL, shader_str))
# Set block size from workgroup size.
block_x = int(self.x_size/self.workgroup_size.x)
block_y = int(self.y_size/self.workgroup_size.y)
block_z = int(self.z_size/self.workgroup_size.z)
block_size = LVector3i(block_x,block_y,block_z)
# Create mod_tex for GPU.
with TimeIt() as prep_timer:
mod_img = PNMImage(self.x_size, self.y_size, 4)
mod_tex = Texture()
mod_tex.load(mod_img)
mod_tex.setMinfilter(Texture.FTLinear)
mod_tex.setFormat(self.img_format)
self.prepare_time += prep_timer.total_time
# Pass textures to shader.
self.__NP.setShaderInput("ref_tex", self.__ref_tex)
self.__NP.setShaderInput("mod_tex", mod_tex)
# Set any additional required inputs for this function.
for input_name, input_val in list(kwargs.items()):
if type(input_val) == PNMImage:
input_val = self.__get_Texture(input_val)
self.__NP.setShaderInput(input_name, input_val)
# Call function in shader library.
shader_attrib = self.__NP.getAttrib(ShaderAttrib)
with TimeIt() as proc_timer:
base.graphicsEngine.dispatch_compute(block_size, shader_attrib, self.__gsg)
self.process_time += proc_timer.total_time
# Extract modified texture from GPU.
with TimeIt() as extract_timer:
base.graphicsEngine.extractTextureData(mod_tex, self.__gsg)
self.extract_time += extract_timer.total_time
return mod_tex
| svfgit/solex | gpu/panda3d_gpu.py | panda3d_gpu.py | py | 7,117 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "panda3d.core.ClockObject",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "panda3d.core.LVector3i",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "panda3d.core.Texture.FRgba8",
"line_number": 40,
"usage_type": "attribute"
},
{
"... |
24965986916 | import io
import pandas as pd
import requests
from mage_ai.data_preparation.shared.secrets import get_secret_value
if 'data_loader' not in globals():
from mage_ai.data_preparation.decorators import data_loader
if 'test' not in globals():
from mage_ai.data_preparation.decorators import test
@data_loader
def load_data_from_api(*args, **kwargs):
"""
Template for loading data from API
"""
api_key = get_secret_value('API_Key')
# Create a dictionary with the API key as a parameter
headers = {'x-apisports-key': api_key}
years = ['2021', '2022']
id = []
for year in years:
url = f'https://v3.football.api-sports.io/teams?country=England&league=39&season={year}'
response = requests.get(url,headers=headers)
data = response.json()
data_body = data['response']
for entry in data_body:
for teams, content in entry.items():
if teams == 'team':
for item, contents in content.items():
if item == 'id':
id.append([contents, year])
df = pd.DataFrame(id, columns=['Team ID', 'Year'])
return df
@test
def test_output(output, *args) -> None:
"""
Template code for testing the output of the block.
"""
assert output is not None, 'The output is undefined'
| WillowyBoat2388/football-analytics | data_loaders/teams_id.py | teams_id.py | py | 1,368 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "mage_ai.data_preparation.shared.secrets.get_secret_value",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "call"
},
{... |
36865815414 | #!/usr/bin/env python
# coding: utf-8
# In[174]:
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from itertools import combinations
# ### Part 1: Get the wine dataset¶
# Describe the dataset
#
# Load in the training and test splits of the dataset
# In[2]:
df_train = pd.read_csv('/Users/eiwi/Practical-ML-DS/Chapter 1. Machine Learning Foundations/DATA/winequality-red-train.csv',index_col=0)
df_test = pd.read_csv('/Users/eiwi/Practical-ML-DS/Chapter 1. Machine Learning Foundations/DATA/winequality-red-test.csv', index_col=0)
# In[3]:
# The charts showing the relationship between each feature and the target
fig, ax = plt.subplots(len(df_train.columns),1, sharey = True, figsize = (18,18))
for i in range(len(df_train.columns)):
current_col = df_train[df_train.columns[i]]
ax[i].barh(df_train['quality'], current_col)
ax[i].set_xlabel(str(df_train.columns[i]))
ax[i].set_ylabel('quality')
plt.show()
# In[4]:
f, ax = plt.subplots(figsize=(12, 10))
ax = sns.heatmap(df_train.corr(), annot=True)
# In[5]:
print("All the features given in this dataset are numeric.")
print('The most correlated features are: fixed acidity, density and citric acid, total sulfur dioxide and free sulfur dioxide, negatively correlated: fixed acidity and pH, citric and volatile acidity.')
# In[6]:
print('The Quality paremeter is mostly correlated to alcohol (positively) and volatile acidity (negatively).')
# In[7]:
# Separate target and predictors:
# In[8]:
target_train = df_train.pop('quality')
target_test = df_test.pop('quality')
# In[9]:
## Further split train dataset into train and validation sets
X_train, X_validation, y_train, y_validation = train_test_split(df_train, target_train, test_size=0.33, random_state=42)
# In[10]:
# Set the indices in the new datasets as the subquential range of numbers
def reset_indices(df,target):
lst_idx = []
for idx,_ in enumerate(df.iloc[:,0]):
lst_idx.append(idx)
df['index'] = lst_idx
df.set_index(df['index'], inplace = True)
df.drop(columns='index', inplace=True)
target.index = lst_idx
return df, target
# In[11]:
X_train, y_train = reset_indices(X_train, y_train)
# In[14]:
X_validation, y_validation = reset_indices(X_validation, y_validation)
# In[15]:
X_test, y_test = reset_indices(df_test, target_test)
# In[16]:
print(len(X_train))
print(len(y_train))
print(len(X_validation))
print(len(y_validation))
print(len(X_test))
print(len(y_test))
# In[17]:
print('The data: train set of {} rows, validation set of {} rows, test set of {} rows.'.format(len(X_train), len(X_validation), len(df_test)), end = '\n')
print('Number of predictors: {}. List of predictors: {}. Target: {}. Possible classes : {}.'.format(len(X_train.columns), ', '.join(X_train.columns), 'column "quality"', str(set(target_train))), end = '\n')
print('Missing values for train set: {}, for validation set: {}, for test set: {}.'.format(X_train.isna().sum().sum(), X_validation.isna().sum().sum(), df_test.isna().sum().sum()))
# In[18]:
# The data requires scaling due to the difference in units:
X_train.describe()
# In[19]:
# Data scaling
scaler = StandardScaler()
X_train_std = pd.DataFrame(scaler.fit_transform(X_train), columns = X_train.columns)
X_validation_std = pd.DataFrame(scaler.fit_transform(X_validation), columns = X_validation.columns)
X_test_std = pd.DataFrame(scaler.fit_transform(df_test), columns = df_test.columns)
# In[20]:
fig, ax = plt.subplots(1,2, figsize = (12,8))
ax[0].plot(X_train)
ax[0].set_title('Features before scaling')
ax[1].plot(X_train_std)
ax[1].set_title('Features after scaling')
plt.legend(X_train.columns, loc="lower center", bbox_to_anchor=(0, -0.3), ncol= 3)
plt.show()
# In[21]:
# Now, the data is ready to be fed to the model.
# I will treat this as a classification problem and will try several basic models to
# get an idea of the most suitable algorithm and hyper parameters.
# ### Part 2: Fit models to the wine dataset and test performance
# Make sure you are comfortable with passing the data through a model.
#
# Evaluate the performance of the model.
#
# Make sure you are testing it on the right set of data.
#
# In[23]:
results_dict = {'model':[],
'params':[],
'train_score':[],
'validation_score':[]
}
# In[24]:
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
models_lst = [
LogisticRegression(random_state=42),
SVC(gamma=2, random_state=42),
MLPClassifier(max_iter=1000, random_state=42),
KNeighborsClassifier(n_jobs=-1),
DecisionTreeClassifier(random_state=42),
]
# In[25]:
params = [
{'penalty':('l1','l2', 'elasticnet', 'none'), 'C':(0.025, 0.01,1,10), 'solver': ('newton-cg', 'lbfgs', 'liblinear','sag','saga')},
{'C':(0.025, 0.01,1,10), 'kernel':('linear', 'rbf'), 'decision_function_shape':('ovr','ovo')},
{'activation':('relu', 'logistic'), 'solver':('sgd','lbfgs'), 'alpha':(0.001,0.01,0.1,1), 'learning_rate': ('adaptive','constant')},
{'n_neighbors':(3,5,10), 'weights':('distance','uniform'), 'algorithm':('auto','ball_tree','kd_tree')},
{'criterion':('gini','entropy'), 'splitter':('best','random'),'max_depth':(5,10,25), 'min_samples_split':(2,3,5)}
]
# In[26]:
for i in range(len(models_lst)):
model = models_lst[i]
parameters = params[i]
grid_search_model = GridSearchCV(model, parameters)
grid_search_model.fit(X_train,y_train)
train_score = grid_search_model.best_score_
validation_score = grid_search_model.best_estimator_.score(X_validation,y_validation)
results_dict['model'].append(grid_search_model.best_estimator_)
results_dict['params'].append(grid_search_model.best_params_)
results_dict['train_score'].append(train_score)
results_dict['validation_score'].append(validation_score)
# In[27]:
results_dict
# In[28]:
classifiers_comparison = pd.DataFrame.from_dict(results_dict)
# In[29]:
classifiers_comparison.to_csv('/Users/eiwi/Practical-ML-DS/Chapter 1. Machine Learning Foundations/PROJECT/classifiers_comparison.csv')
# In[30]:
classifiers_comparison
# In[31]:
best_train_score = classifiers_comparison['train_score'].max()
print('best_train_score: ', best_train_score)
print('best_train_score_model: ', classifiers_comparison['model'][classifiers_comparison['train_score']==best_train_score], end = '\n')
print('\n')
second_best_train_score = list(classifiers_comparison['train_score'].sort_values(ascending = False))[1]
print('second_best_train_score: ', second_best_train_score)
print('second_best_train_score_model: ', classifiers_comparison['model'][classifiers_comparison['train_score']==second_best_train_score], end = '\n')
print('\n')
best_validation_score = classifiers_comparison['validation_score'].max()
print('best_validation_score: ', best_validation_score)
print('best_validation_score_model: ', classifiers_comparison['model'][classifiers_comparison['validation_score']==best_validation_score], end = '\n')
print('\n')
second_best_validation_score = list(classifiers_comparison['validation_score'].sort_values(ascending = False))[1]
print('second_best_validation_score: ', second_best_validation_score)
print('second_best_validation_score_model: ', classifiers_comparison['model'][classifiers_comparison['validation_score']==second_best_validation_score], end = '\n')
# In[32]:
print('So far the best models were {} and {}, so I will try to combine them into an ensemble.'.format('DecisionTreeClassifier', 'KNeighborsClassifier' ))
# ### Part 3: Improve your performance by ensembling some of your models
# Combine the results of more than one model
#
# Evaluate the performance of the ensemble
# In[138]:
from sklearn.ensemble import VotingClassifier, StackingClassifier, AdaBoostClassifier, BaggingClassifier, RandomForestClassifier
# In[139]:
ensembles_results = {
'ensemble':[],
'train_score':[],
'validation_score':[]
}
# In[140]:
# First, I will combine DecisionTreeClassifier + KNeighborsClassifier
# using ensemble called VotingClassifier a) with hard vote b) with soft vote
# In[141]:
# a) VotingClassifier with a hard vote: the classifier uses
# predicted class labels for majority rule voting.
# In[142]:
knn_estimator = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=-1, n_neighbors=10, p=2)
dtc_estimator = DecisionTreeClassifier(ccp_alpha=0.0, class_weight=None, criterion='entropy',
max_depth=25, max_features=None, max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=3,
min_weight_fraction_leaf=0.0,splitter='best')
# In[143]:
hard_voting_ensemble = VotingClassifier(estimators=[('knn_estimator',knn_estimator), ('dtc_estimator',dtc_estimator)],
voting='hard')
# In[144]:
hard_voting_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('VotingClassifier_hard')
ensembles_results['train_score'].append(hard_voting_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(hard_voting_ensemble.score(X_validation,y_validation))
# In[145]:
# b) VotingClassifier with a soft vote: predicts the class label based on the argmax
# of the sums of the predicted probabilities
# In[146]:
soft_voting_ensemble = VotingClassifier(estimators=[('knn_estimator',knn_estimator), ('dtc_estimator',dtc_estimator)],
voting='soft')
# In[147]:
soft_voting_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('VotingClassifier_soft')
ensembles_results['train_score'].append(soft_voting_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(soft_voting_ensemble.score(X_validation,y_validation))
# In[148]:
# Secondly, I will use StackingClassifier with the same DecisionTreeClassifier + KNeighborsClassifier
stacking_ensemble = StackingClassifier(estimators=[('knn_estimator',knn_estimator), ('dtc_estimator',dtc_estimator)])
stacking_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('StackingClassifier')
ensembles_results['train_score'].append(stacking_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(stacking_ensemble.score(X_validation,y_validation))
# In[149]:
# Compare StackingClassifier with final AdaBoostClassifier
ada_stacking_ensemble = StackingClassifier(estimators=[('knn_estimator',knn_estimator),
('dtc_estimator',dtc_estimator)],
final_estimator=None,
stack_method='auto',
n_jobs=-1)
ada_stacking_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('StackingClassifier_AdaBoost')
ensembles_results['train_score'].append(ada_stacking_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(ada_stacking_ensemble.score(X_validation,y_validation))
# In[150]:
# Then compare ensemble perfomance with a BaggingClassifier
# using multiple KNeighborsClassifiers OR DecisionTreeClassifiers
# In[151]:
# a) BaggingClassifier with DecisionTreeClassifier
# In[152]:
dtc_bagging_ensemble = BaggingClassifier(base_estimator = dtc_estimator,
n_estimators=100,
random_state=42)
dtc_bagging_ensemble.fit(X_train, y_train)
# In[153]:
ensembles_results['ensemble'].append('DTC_BaggingClassifier')
ensembles_results['train_score'].append(dtc_bagging_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(dtc_bagging_ensemble.score(X_validation,y_validation))
# In[154]:
# b) BaggingClassifier with KNeighborsClassifiers
# In[155]:
knn_bagging_ensemble = BaggingClassifier(base_estimator=knn_estimator,
n_estimators=100,
random_state=42)
# In[156]:
knn_bagging_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('KNN_BaggingClassifier')
ensembles_results['train_score'].append(knn_bagging_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(knn_bagging_ensemble.score(X_validation,y_validation))
# In[157]:
# As DecisionTreeclassifier was the most cussessful in previous ensembles,
# now I will try out RandomForestClassifier
# In[158]:
rfc_ensemble = RandomForestClassifier(n_estimators=100, random_state=0)
rfc_ensemble.base_estimator = dtc_estimator
# In[159]:
rfc_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('RandomForestClassifier')
ensembles_results['train_score'].append(rfc_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(rfc_ensemble.score(X_validation,y_validation))
# In[160]:
#Compare with AdaBoostClassifier based on DecisionTreeClassifiers only
# (as KNeighborsClassifier doesn't support sample_weight)
# In[161]:
dtc_ada_ensemble = AdaBoostClassifier(base_estimator= dtc_estimator, n_estimators=50,
learning_rate=1.0, algorithm='SAMME.R',
random_state=42)
dtc_ada_ensemble.fit(X_train, y_train)
ensembles_results['ensemble'].append('AdaBoost_dtc_ensemble')
ensembles_results['train_score'].append(dtc_ada_ensemble.score(X_train, y_train))
ensembles_results['validation_score'].append(dtc_ada_ensemble.score(X_validation, y_validation))
# In[162]:
# Now let's compare these ensembles
# In[163]:
ensembles_results
# In[164]:
ensembles_comparison = pd.DataFrame.from_dict(ensembles_results)
# In[165]:
ensembles_comparison
# In[166]:
# Conclusion: the highest validation score of 0.822695 was achieved with DTC_BaggingClassifier
# The second best score of 0.817967 was achieved with RandomForestClassifier
# Surprisingly, VotingClassifier and StackingClassifier combining different base models
# have worse perfomanse than ensembles with 1 estimator (RandomForestClassifier, DTC_BaggingClassifier)
# In[362]:
dtc_ada_ensemble.__class__.__name__
# In[324]:
lst = list(combinations([3,6,7], 2))
for t in lst:
print(list(t))
(1,2)
# ### Part 4: Write an algorithm to find the best combination of models and hyperparameters.
#
# There are an infinite number of ways that you could combine different models with different hyperparameters, but some will perform better than others.
#
# List the different parameters which you test over, as well as the ranges which you test
#
# Describe the search procedure which your algorithm implements
# In[484]:
from sklearn.model_selection import cross_val_score
# In[533]:
def algo_name(est):
return est.__class__.__name__
def algo_voting_soft_classifier(estimators_list, param_grid_list, N, X, y, scoring='f1'):
"""
This function searches for best combination of model and hyperparameters to be used in VotingClasifier(soft)
estimators_list — list of non-fitted estimators
param_grid_list — list of param grids for GridSearch, must be same size as estimators_list
N — number of models to use in ensemble
X — features
y — target
scoring — scoring function name (string)
"""
# Lists must be of same size
assert len(estimators_list) == len(param_grid_list)
# Can't use more estimators than we have
assert N <= len(estimators_list)
# We will keep list of best fitted estimators
best_estimators = []
# Let's GridSearch em all
for estimator, param_grid in zip(estimators_list, param_grid_list):
print("Searching best params for " + algo_name(estimator))
gs = GridSearchCV(estimator, param_grid, scoring=scoring)
gs.fit(X,y)
best_estimators.append(gs.best_estimator_)
print("Best score = " + str(gs.best_score_))
# Here we have all possible combinations of N estimators
all_combinations = list(combinations(best_estimators, N))
# Keep track of best ensemble yet
top_score = 0
top_ensemble = None
# Go over all combinations
for t in all_combinations:
voting_estimators = []
voting_estimators_names = []
# Construct list of estimators for VotingClassifier
for estimator in list(t):
name = algo_name(estimator)
voting_estimators_names.append(name)
voting_estimators.append((name, estimator))
print(f"\n\nChecking combination of {N} estimators:")
print("\t\n".join(voting_estimators_names))
# Cross-validating ensemble of N estimators
soft_voting_ensemble = VotingClassifier(estimators=voting_estimators, voting='soft')
cross_vals = cross_val_score(soft_voting_ensemble, X,y, scoring=scoring)
# Selecting top ensemble
current_score = max(cross_vals)
print(f"Score ({scoring}) of this combination is {current_score}")
if (current_score>top_score):
top_score = current_score
top_ensemble = soft_voting_ensemble
return top_ensemble
# In[534]:
estimators_list = [
LogisticRegression(random_state=42, n_jobs=-1),
SVC(gamma=2, random_state=42, probability=True),
# MLPClassifier(max_iter=1000, random_state=42),
# KNeighborsClassifier(n_jobs=-1),
DecisionTreeClassifier(random_state=42)
]
param_grids_list = [
{'penalty':('l1','l2', 'elasticnet', 'none'), 'C':(0.025, 0.01,1,10), 'solver': ('newton-cg', 'lbfgs', 'liblinear','sag','saga')},
{'C':(0.025, 0.01,1,10), 'kernel':('linear', 'rbf'), 'decision_function_shape':('ovr','ovo')},
# {'activation':('relu', 'logistic'), 'solver':('sgd','lbfgs'), 'alpha':(0.001,0.01,0.1,1), 'learning_rate': ('adaptive','constant')},
# {'n_neighbors':(3,5,10), 'weights':('distance','uniform'), 'algorithm':('auto','ball_tree','kd_tree')},
{'criterion':('gini','entropy'), 'splitter':('best','random'),'max_depth':(5,10,25,50), 'min_samples_split':(2,3,5,10)}
]
# In[535]:
demo_X = X_train[:100]
demo_y = y_train[:100]
# In[536]:
import warnings
warnings.filterwarnings("ignore")
best_voter = algo_voting_soft_classifier(estimators_list, param_grids_list, 2, demo_X, demo_y, scoring='accuracy')
warnings.filterwarnings("default")
# In[518]:
best_voter
# In[519]:
warnings.filterwarnings("ignore")
best_voter_3 = algo_voting_soft_classifier(estimators_list, param_grids_list, 3, demo_X, demo_y, scoring='accuracy')
warnings.filterwarnings("default")
# In[520]:
best_voter_3
# ### Part 5: Present your results
#
# The final part of the project requires you to attempt to summarise your work. It should be clear how we could replicate the results by implementing exactly the same ensemble (models and hyperparameters).
#
# Please try to communicate and display your results with any graphs or charts.
# If you have any insights into why certain ensembles or models perform better of worse than others, and would like to write a paragraph to explain this, we'd love to read it!
# Please also write a summary paragraph that describes the best permutation that you found.
# In[ ]:
# Based on my observation, the individual models show lower score than ensembles
# In[676]:
print( 'The average score among individual models is equal to {}, maximum validation score - {} (provided by {}).'.format(round(classifiers_comparison['validation_score'].mean(),3), round(classifiers_comparison['validation_score'],3).max(), str(classifiers_comparison[classifiers_comparison['validation_score'] == classifiers_comparison['validation_score'].max()]['model'])[5:27]))
# In[710]:
print('The average score among ensembles is equal to {}, maximum validation score - {} (provided by {}).'.format(round(ensembles_comparison['validation_score'].mean(),3),
round(ensembles_comparison['validation_score'].max(),3),
str(ensembles_comparison[ensembles_comparison['validation_score'] == ensembles_comparison['validation_score'].max()]['ensemble'])[5:27]))
# In[ ]:
# The most powerful individual models are DecisionTreeClassifier and KNeighborsClassifier.
# The most powerful ensembles are DTC_BaggingClassifier and RandomForestClassifier (both are based on DecisionTreeClassifier)
# In[ ]:
# Single models VS ensembles
# In[718]:
fig, ax = plt.subplots(1, 2, figsize = (16,6), sharey = True)
ax[0].bar(classifiers_comparison.index, classifiers_comparison['train_score'], width = -0.3, align = 'edge', color='navy')
ax[0].bar(classifiers_comparison.index, classifiers_comparison['validation_score'], width = 0.3, align = 'edge', color='orange')
ax[0].set_title('Score of the individual models')
ax[1].bar(ensembles_comparison.index, ensembles_comparison['train_score'], width = -0.3, align = 'edge', color='navy')
ax[1].bar(ensembles_comparison.index, ensembles_comparison['validation_score'], width = 0.3, align = 'edge' , color='orange' )
ax[1].set_title('Score of the ensembles')
plt.title(label = 'Difference in score between individual models and ensembles', loc='left')
plt.show()
# In[719]:
# Interestingly, while individual models tend to show slightly higher score on the validation set,
# while ensembles tend to do the opposite: their train score is higher than validation one.
# In[720]:
# It is also interesting to compare the ensembles combining the different models VS homogeneous ones
# In[727]:
ensembles_comparison
# In[725]:
fig, ax = plt.subplots()
ax.set_title('Ensembles score')
ax.bar(ensembles_comparison.index, ensembles_comparison['train_score'], width = -0.3, align = 'edge', color='navy')
ax.bar(ensembles_comparison.index, ensembles_comparison['validation_score'], width = 0.3, align = 'edge' , color='orange')
ax.set_ylabel('Score')
ax.set_xlabel('Ensemble')
# In[ ]:
# After performing a series of test, I made a conclusion that the best possible score can be achieved with
# the ensemble methods that combine multiple versions of the same estimator (BaggingClassifier, RandomForestClassifier, ) rather than
# the ones combining a list of differents estimators (VotingClassifier and StackingClassifier).
# In[ ]:
# So for the further iteartions I would use BaggingClassifier or RandomForestClassifier.
# The key for reproducibility the would be to stick to the repeatable method for every step of data analysis:
# so it is reasonable to create the standard functions for data processing and cleaning, scaling and modelling.
# Also, in order to replicate the results I would pass an argument 'random_state = 42' to the models
# which allows to get the same results while running the model again.
# ### Part 6:
# A stakeholder asks you which features most affect the response variable (output). Describe how you would organise a test to determine this.
# In[521]:
# I would consider the influence of each feature to the output as a change that occurs in target value
# with change of the output with one unit
# In[525]:
get_ipython().system("['Approach for features importance calculation']('Change_in_var.png')")
# In[745]:
# I would test this approach on desicion-tree based model in order to compare the results with the built function:
feature_importances = list(dtc_estimator.fit(demo_X, demo_y).feature_importances_)
features = demo_X.columns
# In[757]:
feat_imp = pd.DataFrame()
feat_imp['features'] = features
feat_imp['feature_importances'] = feature_importances
# In[762]:
feat_imp.sort_values(by = 'feature_importances', ascending = False)
# In[ ]:
# The inbuilt function suggests that alcohol has the strongest effect on the target (quality of wine).
| TataAndBigData/AICORE_project_1_submission | Wine_project_Tanya.py | Wine_project_Tanya.py | py | 24,410 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
14592252085 | from rest_framework import viewsets, mixins
from rest_framework.response import Response
from .models import AssignmentGroup, Assignment, StudentAssignment
from .serializers import (AssignmentGroupSerializer,
AssignmentSerializer, StudentAssignmentSerializer)
from .policies import (AssignmentGroupAccessPolicy,
AssignmentAccessPolicy, StudentAssignmentAccessPolicy)
class AssignmentGroupViewSet(viewsets.ModelViewSet):
serializer_class = AssignmentGroupSerializer
permission_classes = [AssignmentGroupAccessPolicy]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
assignments = instance.assignments.all()
return Response({
'assignment_group': self.get_serializer(instance).data,
'assignments': AssignmentSerializer(assignments, many=True).data
})
def get_queryset(self):
queryset = AssignmentGroup.objects.all()
course = self.request.query_params.get('course')
if course:
queryset = queryset.filter(course=course)
return queryset
class AssignmentViewSet(viewsets.ModelViewSet):
serializer_class = AssignmentSerializer
permission_classes = [AssignmentAccessPolicy]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
student_assignments = instance.student_assignments.all()
return Response({
'assignment': self.get_serializer(instance).data,
'student_assignments': StudentAssignmentSerializer(
student_assignments, many=True).data
})
def get_queryset(self):
queryset = Assignment.objects.all()
course = self.request.query_params.get('course')
if course:
queryset = queryset.filter(course=course)
group = self.request.query_params.get('group')
if group:
queryset = queryset.filter(group=group)
return queryset
class StudentAssignmentViewSet(mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
serializer_class = StudentAssignmentSerializer
permission_classes = [StudentAssignmentAccessPolicy]
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
assignment = instance.assignment
return Response({
'student_assignment': self.get_serializer(instance).data,
'assignment': AssignmentSerializer(assignment).data
})
def get_object(self):
student = self.request.query_params.get('student')
assignment = self.request.query_params.get('assignment')
return StudentAssignment.objects.get(
student=student, assignment=assignment)
| benhchoi/coursemanager | coursemanager/assignments/api.py | api.py | py | 2,830 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "serializers.AssignmentGroupSerializer",
"line_number": 11,
"usage_type"... |
72382523234 | from django.db import models
import auto_prefetch
from django_resized import ResizedImageField
from folio.utils.media import MediaHelper
from folio.utils.choices import PortfolioChoices
from folio.utils.models import NameBaseModel, ExperienceAndSchoolModel
from django.utils.text import slugify
from django.urls import reverse
from ckeditor.fields import RichTextField
from taggit.managers import TaggableManager
from django.utils.html import mark_safe
from user.models import UserModel
# Create your models here.
class Experiences(ExperienceAndSchoolModel):
name_of_place = models.CharField(max_length=200 )
work_as = models.CharField(max_length=200 )
class Meta:
verbose_name = 'Exprience'
verbose_name_plural = 'Experiences'
class Schools(ExperienceAndSchoolModel):
name_of_school = models.CharField(max_length=200 )
course = models.CharField(max_length=200 )
place = models.CharField(max_length=200 )
class Meta:
verbose_name = 'School'
verbose_name_plural = 'Schools'
class AboutMeModel(NameBaseModel):
email = models.EmailField(null=True)
phone = models.BigIntegerField(null=True, blank=True)
location = models.CharField(max_length=200, null=True)
thumbnail = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image")
resume_photo = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image", null=True, blank=True)
class ServicesModel(NameBaseModel):
i_class = models.CharField(max_length=200)
description2 = models.TextField(null=True)
image = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image", null=True, blank=True)
def save(self, *args, **kwargs):
if not self.slug and self.name:
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse('servicedetail',kwargs={'slug':self.slug})
class Meta:
verbose_name = 'Service'
verbose_name_plural = 'Services'
class ServiceRatingModel(NameBaseModel):
pesentage = models.IntegerField()
class NewsModel(NameBaseModel):
thumbnail = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image")
def save(self, *args, **kwargs):
if not self.slug and self.name:
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
class Tags(models.Model):
name = models.CharField( max_length=50)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Tag'
verbose_name_plural = 'Tags'
class Images(models.Model):
files = models.FileField(upload_to=MediaHelper.get_image_upload_path)
potfolio = auto_prefetch.ForeignKey( "PotfolioModel", on_delete=models.CASCADE, null=True, blank=True)
news = auto_prefetch.ForeignKey( NewsModel, on_delete=models.CASCADE, null=True, blank=True)
blogs = auto_prefetch.ForeignKey( 'PortfolioBlogModel', on_delete=models.CASCADE, null=True, blank=True)
def image_tag(self):
return mark_safe('<img src="MediaHelper.get_image_upload_path%s" width="50" height="50" />'%(self.files))
# return u'<img src="%s" />' % escape(MediaHelper.get_image_upload_path)
image_tag.short_description = 'Image'
class Meta:
verbose_name = 'Image'
verbose_name_plural = 'Images'
class PotfolioModel(NameBaseModel):
client = models.CharField(max_length=200)
firm_name = models.CharField(max_length=200)
image = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image", null=True, blank=True)
category = TaggableManager()
potfoliochoices = models.CharField(choices=PortfolioChoices.choices, max_length=20, default=PortfolioChoices.Backend)
live_website = models.CharField(max_length=200, null=True, blank=True)
date_created = models.DateTimeField(null=True, blank=True)
def get_potfolio_url(self):
return reverse('servicedetail',kwargs={'slug':self.slug})
def save(self, *args, **kwargs):
if not self.slug and self.name:
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
class Meta:
verbose_name = 'Portfolio'
verbose_name_plural = 'Portfolios'
class PotfolioQuestionModel(models.Model):
questions = models.CharField(max_length=200, null=True)
answer = RichTextField(null=True)
class CategoryModel(models.Model):
name = models.CharField(max_length=200)
slug = models.SlugField(null=True,blank=True)
def __str__(self):
return self.name
class PortfolioBlogModel(NameBaseModel):
category = models.ForeignKey(CategoryModel, on_delete=models.CASCADE)
image = ResizedImageField(upload_to=MediaHelper.get_image_upload_path, verbose_name="Image", null=True, blank=True)
tags = TaggableManager()
def save(self, *args, **kwargs):
if not self.slug and self.name:
self.slug = slugify(self.name)
return super().save(*args, **kwargs)
class CommentsModel(models.Model):
author = models.ForeignKey(UserModel, on_delete=models.CASCADE)
post = models.ForeignKey(PortfolioBlogModel, on_delete=models.CASCADE)
body = models.TextField(help_text='Add a comment')
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
active = models.BooleanField(default=True)
def __str__(self):
return f'Comment by {self.author} on {self.post}'
| Gentility01/my-folio1 | core/models.py | models.py | py | 5,589 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "folio.utils.models.ExperienceAndSchoolModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.db.models",
"line_number": 17,
"usage_type": "name"
},
... |
12888400882 | import asyncio
import websockets
import soundfile as sf
import numpy as np
# Initialize a counter for file naming
file_counter = 0
connected_clients = set()
async def register_client(websocket):
connected_clients.add(websocket)
async def unregister_client(websocket):
connected_clients.remove(websocket)
async def send_to_all_clients(message, sender):
for client in connected_clients:
if client != sender:
await client.send(message)
async def audio_handler(websocket, path):
global file_counter # Use the global counter
await register_client(websocket)
try:
while True:
audio_data = await websocket.recv()
# Process audio_data as needed, e.g., save it to a file, perform further analysis, etc.
print("Received audio data:", len(audio_data), "bytes")
# Convert the audio data to 'float32'
audio_data = np.frombuffer(audio_data, dtype='float32')
# Generate a new file name with a consecutive number
file_name = f"received_audio_{file_counter}.wav"
# Save the audio data to the new file
with sf.SoundFile(file_name, mode="w", samplerate=16000, channels=1, subtype="FLOAT") as f:
f.write(audio_data)
file_counter += 1 # Increment the counter
# Broadcast the received audio to all other clients
await send_to_all_clients(audio_data.tobytes(), websocket)
except websockets.ConnectionClosedOK:
print(f"WebSocket connection closed by the client: {websocket.remote_address}")
except websockets.ConnectionClosedError:
print(f"WebSocket connection closed unexpectedly: {websocket.remote_address}")
finally:
await unregister_client(websocket)
start_server = websockets.serve(audio_handler, "localhost", 8765) # WebSocket server listens on localhost:8765
async def main():
await start_server
await asyncio.Future() # Keep the server running indefinitely
asyncio.get_event_loop().run_until_complete(main())
| Tuzteno/Ozzu | ws/ws.py | ws.py | py | 2,072 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.frombuffer",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "soundfile.SoundFile",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "websockets.ConnectionClosedOK",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name"... |
34139755887 | import os
import sys
sys.path.append("Mask_RCNN")
from mrcnn.model import MaskRCNN
from mrcnn import utils
from data import Data
from waldo_config import Waldoconfig
if __name__ == '__main__':
config = Waldoconfig()
config.display()
model = MaskRCNN(mode="training", config=config,
model_dir=config.MODEL_DIR)
weights_path = config.COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
# don't load last layers because we are going to train it
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
dataset_train = Data()
dataset_train.load(config.DATA_DIR, "train")
dataset_train.prepare()
dataset_val = Data()
dataset_val.load(config.DATA_DIR, "val")
dataset_val.prepare()
# training just heads layer is enough
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='heads')
| alseambusher/deepwaldo | train.py | train.py | py | 1,130 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "waldo_config.Waldoconfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mrcnn.model.Mask... |
42014273667 |
import zipfile
from functools import partial
import re
from collections import OrderedDict, Counter, defaultdict
import shutil
import os
from pprint import pprint
import datetime
import subprocess
import sys
from pathlib import Path
import mistune
from book_section import (CHAPTER, PART, SUBCHAPTER, TOC,
_parse_header_line,
BookSectionWithNoFiles)
#from citations import create_citation_note, create_bibliography_citation
from references import process_citations
this_module_dir = Path(os.path.dirname(os.path.abspath(__file__)))
EPUBCHECK_JAR = 'epubcheck-4.0.2/epubcheck.jar'
EPUBCHECK_JAR = 'epubcheck-4.2.2/epubcheck.jar'
EPUBCHECK_JAR = this_module_dir / EPUBCHECK_JAR
CONTAINER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<container xmlns="urn:oasis:names:tc:opendocument:xmlns:container" version="1.0">
<rootfiles>
<rootfile media-type="application/oebps-package+xml" full-path="EPUB/content.opf"/>
</rootfiles>
</container>'''
NAV_HEADER_XML = '''<?xml version='1.0' encoding='utf-8'?><!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" lang="en" xml:lang="en">
<head>
<title>{title}</title>
</head>
<body>
'''
NBSP = ' '
NBSP = ' '
EPUB_VERSION = 'old'
ENDNOTES_EPUB_TYPE = {'old': 'rearnotes',
'3.1': 'endnotes'}
EPUB_META_DIR = 'META-INF'
EPUB_DIR = 'EPUB'
XHTML_FILES_EXTENSION = 'xhtml'
CONTAINER_XML_FPATH = EPUB_META_DIR + '/container.xml'
ENDNOTE_CHAPTER_ID = 'endnotes'
ENDNOTE_CHAPTER_TITLE = {'es': 'Notas',
'en': 'Notes'}
BIBLIOGRAPHY_CHAPTER_ID = 'bibliography'
BIBLIOGRAPHY_CHAPTER_TITLE = {'es': 'Bibliografía',
'en': 'Bibliography'}
BACK_MATTER_PART_ID = 'back_matter_part'
BACK_MATTER_PART_FPATH = EPUB_DIR + f'/appendices.{XHTML_FILES_EXTENSION}'
APPENDICES_PART_TITLE = {'es': 'Apéndices',
'en': 'Appendices'}
TOC_CHAPTER_TITLE = {'es': 'Índice',
'en': 'Table of contents'}
TOC_CHAPTER_ID = 'toc'
NAV_FPATH = EPUB_DIR + f'/nav.{XHTML_FILES_EXTENSION}'
CHAPTER_HEADER_HTML = '''<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops" xml:lang="en" lang="en">
<head>
<title>{title}</title>
</head>\n'''
CHAPTER_SECTION_LINE = '''<section epub:type="{epub_type}" id="{section_id}" class="{epub_type}">
<span id="nav_span_{section_id}"> </span>
'''
NCX_HEADER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<ncx xmlns="http://www.daisy.org/z3986/2005/ncx/" version="2005-1">'''
NCX_FPATH = os.path.join(EPUB_DIR, 'toc.ncx')
OPF_HEADER_XML = '''<?xml version='1.0' encoding='utf-8'?>
<package unique-identifier="id" version="3.0" xmlns="http://www.idpf.org/2007/opf" prefix="rendition: http://www.idpf.org/vocab/rendition/#">
<metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">
'''
OPF_FPATH = os.path.join(EPUB_DIR, 'content.opf')
def _get_epub_fpath_for_endnote_chapter():
return EPUB_DIR + f'/endnotes.{XHTML_FILES_EXTENSION}'
def _get_epub_fpath_for_bibliography_chapter():
return EPUB_DIR + f'/bibliography.{XHTML_FILES_EXTENSION}'
def _get_epub_fpath_for_toc_chapter():
return EPUB_DIR + f'/toc.{XHTML_FILES_EXTENSION}'
def _create_epub_fpath_for_section(section):
if section.kind == CHAPTER:
if section.id == ENDNOTE_CHAPTER_ID:
fpath_in_epub = _get_epub_fpath_for_endnote_chapter()
elif section.id == BIBLIOGRAPHY_CHAPTER_ID:
fpath_in_epub = _get_epub_fpath_for_bibliography_chapter()
elif section.id == TOC_CHAPTER_ID:
fpath_in_epub = _get_epub_fpath_for_toc_chapter()
else:
fpath_in_epub = EPUB_DIR + f'/chapter_{section.idx}.{XHTML_FILES_EXTENSION}'
elif section.kind == PART:
fpath_in_epub = EPUB_DIR + f'/part_{section.idx}.{XHTML_FILES_EXTENSION}'
elif section.kind == SUBCHAPTER:
fpath_in_epub = EPUB_DIR + f'/chapter_{section.parent.idx}.{XHTML_FILES_EXTENSION}'
else:
raise ValueError(f'No fpath defined for this kind of section: {section.kind}')
return fpath_in_epub
_NUM_FOOTNOTES_AND_CITATIONS_SEEN = Counter()
_FOOTNOTE_IDS_SEEN = defaultdict(set)
_CITATION_COUNTS = defaultdict(Counter)
_FOOTNOTE_DEFINITION_ID_COUNTS = defaultdict(Counter)
def _create_html_for_numbered_footnote(number):
html = f'<sup>{number}</sup>'
return html
def _footnote_processor(footnote, endnote_chapter_fpath, book):
footnote_id = footnote['match'].group('id')
book_id = id(book)
if footnote_id in _FOOTNOTE_IDS_SEEN[book_id]:
raise RuntimeError('Repeated footnote ID: ' + footnote_id)
_FOOTNOTE_IDS_SEEN[book_id].add(footnote_id)
_NUM_FOOTNOTES_AND_CITATIONS_SEEN[book_id] += 1
fpath = os.path.join('..', endnote_chapter_fpath)
href_to_footnote_definition = f'{fpath}#ftd_{footnote_id}'
a_id = f'ft_{footnote_id}'
html = _create_html_for_numbered_footnote(_NUM_FOOTNOTES_AND_CITATIONS_SEEN[book_id])
text = f'<a id="{a_id}" href="{href_to_footnote_definition}" role="doc-noteref" epub:type="noteref">{html}</a>'
return {'processed_text': text,
'match_location': footnote['match'].span()[0],
'footnote_id': footnote_id}
def _footnote_definition_processor(footnote_definition,
fpath_for_section_in_epub, book):
footnote_id = footnote_definition['match'].group('id')
book_id = id(book)
_FOOTNOTE_DEFINITION_ID_COUNTS[book_id][footnote_id] += 1
if _FOOTNOTE_DEFINITION_ID_COUNTS[book_id][footnote_id] > 1:
raise RuntimeError('More than one footnote definition for footnote ID: ' + footnote_id)
li_id = f'ftd_{footnote_id}'
content = footnote_definition['match'].group('content').strip()
text = f'<li id= "{li_id}" role="doc-endnote"><p>{content}</p></li>'
return {'footnote_definition_li': text,
'footnote_id': footnote_id}
class _CitationProcessor:
def __init__(self, bibliography_chapter_fpath,
endnote_chapter_fpath,
bibliography_entries_seen,
references_not_found,
citation_counts,
book):
self.bibliography_chapter_fpath = bibliography_chapter_fpath
self.endnote_chapter_fpath = endnote_chapter_fpath
self.book = book
self.bibliography_path = book.bibliography_path
self.bibliography_entries_seen = bibliography_entries_seen
self.references_not_found = references_not_found
self.last_citation_id_processed_for_section = {}
self.last_citation_texts = []
self._citation_counts = citation_counts
def __call__(self, citation, fpath_for_section_in_epub):
debug_notes = ['Mimir2019', 'whatisreal']
debug_notes = []
bibliography_path = self.bibliography_path
if bibliography_path is None:
msg = 'No bibliography defined in metadata, but citations are used'
raise ValueError(msg)
book_id = id(self.book)
citation_id = citation['match'].group('id')
citation_counts = self._citation_counts
citation_counts[citation_id] += 1
footnote_id = f'{citation_id}_{citation_counts[citation_id]}'
_NUM_FOOTNOTES_AND_CITATIONS_SEEN[book_id] += 1
if citation_id in debug_notes:
print('citation_id', citation_id)
fpath = os.path.join('..', self.endnote_chapter_fpath)
href_to_footnote_definition = f'{fpath}#ftd_{footnote_id}'
a_id = f'ft_{footnote_id}'
if citation_id != self.last_citation_id_processed_for_section.get(fpath_for_section_in_epub):
self.last_citation_id_processed_for_section[fpath_for_section_in_epub] = citation_id
citation_texts_to_process = []
if self.last_citation_id_processed_for_section.get(fpath_for_section_in_epub):
citation_texts_to_process = self.last_citation_texts[:]
citation_text = citation['text'].strip()
citation_texts_to_process.append(citation_text)
results = process_citations(citation_texts_to_process,
libray_csl_json_path=bibliography_path)
if citation_id in debug_notes and False:
pprint(results)
strip_citation_id = citation_id.strip().split(' ')[0].split('#')[0].strip('/:')
if strip_citation_id in results['references_not_found']:
self.references_not_found.update(results['references_not_found'])
citation_result = None
#citation_texts_to_process = citation_texts_to_process[:-1]
else:
citation_result = results['citations'][-1]
if False:
pprint(results)
print('citation_id', citation_id)
print('strip_id', strip_citation_id)
print('pandoc_key', citation_result['citation_key'])
assert citation_result['citation_key'].strip('/') == strip_citation_id
if self.last_citation_id_processed_for_section.get(fpath_for_section_in_epub):
self.last_citation_texts = citation_texts_to_process
else:
self.last_citation_texts = []
if citation_id in debug_notes:
print('citation_result')
pprint(citation_result)
if citation_result:
html = _create_html_for_numbered_footnote(_NUM_FOOTNOTES_AND_CITATIONS_SEEN[book_id])
text = f'<a id="{a_id}" href="{href_to_footnote_definition}" role="doc-noteref" epub:type="noteref">{html}</a>'
li_id = f'ftd_{footnote_id}'
back_href_to_footnote = f'{fpath_for_section_in_epub}#ft_{footnote_id}'
footnote_definition_text = f'<li id= "{li_id}" role="doc-endnote">{citation_result["footnote_html_text"]}</li>'
self.bibliography_entries_seen.update(results['references'])
else:
text = citation['text']
footnote_definition_text = None
res = {'footnote_definition_li': footnote_definition_text,
'processed_text': text,
'match_location': citation['match'].span()[0]}
if citation_id in debug_notes:
print('result')
pprint(res)
return res
def _internal_link_processor(internal_link, book):
text = internal_link['match'].group('text')
link_id = internal_link['match'].group('link_id')
section = book.get_section_by_id(link_id)
fname = Path(_create_epub_fpath_for_section(section)).name
link = fname
link = _build_link(link, text, id_=section.id, no_id=True)
return {'processed_text': link}
def _get_citation_location_in_text(footnote_definition, footnote_locations):
if 'match_location' in footnote_definition:
return footnote_definition['match_location']
else:
return footnote_locations[footnote_definition['footnote_id']]
def _split_md_text_in_items(md_text):
# This algorithm has one limitation, it does not allow to have trees
# it can only yield a stream of items, but not items within an item
footnote_re = re.compile(r' *\[\^(?P<id>[^\]]+)\]')
footnote_definition_re = re.compile(r'\[\^(?P<id>[^\]]*)\]:(?P<content>[^\n]+)')
citation_re = re.compile(r' *\[@(?P<id>[^ \],]+),? *(?P<locator_term>[\w]*):? *(?P<locator_positions>[0-9]*)\]', re.UNICODE)
internal_link_re = re.compile(r'\[(?P<text>[^\]]+)\]\(#(?P<link_id>[^\)]+)\)')
item_kinds = OrderedDict([('footnote_definition', {'re': footnote_definition_re}),
('footnote', {'re': footnote_re}),
('citation', {'re': citation_re}),
('internal_link', {'re': internal_link_re}),
])
re_idx = {item_kind: idx for idx, item_kind in enumerate(item_kinds.keys())}
debug_text = '@whatisreal'
debug_text = None
if debug_text and debug_text in md_text:
print('md_text')
pprint(md_text)
start_pos_to_search = 0
while True:
matches = []
for kind, item_def in item_kinds.items():
match = item_def['re'].search(md_text, start_pos_to_search)
if match is None:
continue
matches.append({'kind': kind, 'match': match})
if matches:
matches.sort(key=lambda match: re_idx[match['kind']])
matches.sort(key=lambda match: match['match'].start())
next_match = matches[0]['match']
kind = matches[0]['kind']
if debug_text:
print('matches')
pprint(matches)
else:
yield {'kind': 'std_md',
'text': md_text[start_pos_to_search:]}
break
if start_pos_to_search < next_match.start():
res = {'kind': 'std_md',
'text': md_text[start_pos_to_search:next_match.start()]}
if debug_text and debug_text in res['text']:
print('start_pos_to_search < next_match.start()', start_pos_to_search, next_match.start())
print('yield result')
pprint(res)
yield res
res = {'kind': kind,
'text': md_text[next_match.start():next_match.end()],
'match': next_match}
if debug_text and debug_text in res['text']:
print('yielding in last yield')
yield res
start_pos_to_search = next_match.end()
if start_pos_to_search >= len(md_text):
break
def _process_citations_and_footnotes(md_text,
section,
bibliography_entries_seen,
references_not_found,
endnote_definitions):
items = _split_md_text_in_items(md_text)
footnote_definitions = []
fpath_for_section_in_epub = _create_epub_fpath_for_section(section)
#split_text_in_items, item kinds: std_markdown, citation, footnote, footnote_definition,
citation_processor = _CitationProcessor(bibliography_chapter_fpath=_get_epub_fpath_for_bibliography_chapter(),
endnote_chapter_fpath=_get_epub_fpath_for_endnote_chapter(),
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found,
citation_counts=_FOOTNOTE_DEFINITION_ID_COUNTS[id(section.book)],
book=section.book)
item_processors = {'footnote': partial(_footnote_processor,
endnote_chapter_fpath=_get_epub_fpath_for_endnote_chapter(),
book=section.book),
'footnote_definition': partial(_footnote_definition_processor,
fpath_for_section_in_epub=fpath_for_section_in_epub,
book=section.book),
'citation': partial(citation_processor,
fpath_for_section_in_epub=fpath_for_section_in_epub),
'internal_link': partial(_internal_link_processor,
book=section.book)
}
debug_item = 'citation'
debug_text_in_citation = 'what'
debug_item = None
processed_text = []
footnote_locations = {}
for item in items:
processor = item_processors.get(item['kind'], None)
if debug_item and item['kind'] == debug_item:
if debug_text_in_citation:
if debug_text_in_citation in item['text']:
pprint(item)
else:
pprint(item)
if processor:
processed_item = processor(item)
if 'processed_text' in processed_item:
text = processed_item['processed_text']
else:
text = None
else:
processed_item = None
text = item['text']
if debug_item and item['kind'] == debug_item:
if debug_text_in_citation:
if debug_text_in_citation in item['text']:
print('text', text)
else:
print('text', text)
if (processed_item and 'footnote_definition_li' in processed_item and
processed_item['footnote_definition_li']):
definition = {'footnote_definition_li': processed_item['footnote_definition_li']}
if 'match_location' in processed_item:
definition['match_location'] = processed_item['match_location']
if 'footnote_id' in processed_item:
definition['footnote_id'] = processed_item['footnote_id']
footnote_definitions.append(definition)
if item['kind'] == 'footnote':
footnote_locations[processed_item['footnote_id']] = processed_item['match_location']
if text is not None:
processed_text.append(text)
get_citation_location_in_text = partial(_get_citation_location_in_text,
footnote_locations=footnote_locations)
footnote_definitions.sort(key=get_citation_location_in_text)
endnote_definitions.extend(footnote_definitions)
return {'rendered_text': ''.join(processed_text)}
def _process_basic_markdown(md_text):
renderer = mistune.Renderer(use_xhtml=True)
render_markdown = mistune.Markdown(renderer)
xhtml_text = render_markdown(md_text)
xhtml_text = xhtml_text.replace('<', '<').replace('>', '>')
assert '<h' not in xhtml_text
return xhtml_text
def _process_md_text(md_text, section, bibliography_entries_seen,
references_not_found,
endnote_definitions):
md_text = '\n'.join(md_text)
result = _process_citations_and_footnotes(md_text=md_text,
section=section,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found,
endnote_definitions=endnote_definitions)
result['rendered_lines'] = _process_basic_markdown(result['rendered_text'])
return result
def _split_section_in_fragments(lines):
fragment_lines = []
for line in lines:
if line.startswith('#'):
if fragment_lines:
yield {'kind': 'fragment',
'lines': fragment_lines}
fragment_lines = []
yield {'kind': 'header',
'text': line}
else:
fragment_lines.append(line)
if fragment_lines:
yield {'kind': 'fragment',
'lines': fragment_lines}
def _create_html_for_md_text_in_section(section, bibliography_entries_seen,
references_not_found):
md_text = section.md_text
rendered_lines = []
footnote_definitions = []
for fragment in _split_section_in_fragments(md_text):
if fragment['kind'] == 'header':
text = fragment['text']
res = _parse_header_line(text)
header = f'<h{res["level"]}>{res["text"]}</h{res["level"]}>\n'
rendered_lines.append(header)
elif fragment['kind'] == 'fragment':
result = _process_md_text(fragment['lines'], section=section,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found,
endnote_definitions=footnote_definitions)
rendered_lines.append(result['rendered_lines'])
result = {'rendered_lines': rendered_lines,
'footnote_definitions': footnote_definitions}
return result
def _write_html_in_zip_file(epub_zip, fpath, html):
#soup = BeautifulSoup(html, features='lxml')
#pretty_html = soup.prettify(formatter=None)
epub_zip.writestr(fpath, html)
def _create_chapter(chapter, epub_zip, bibliography_entries_seen,
references_not_found):
footnote_definitions = []
res = _create_html_for_md_text_in_section(chapter,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found)
html = '\n'.join(res['rendered_lines'])
footnote_definitions.extend(res['footnote_definitions'])
for subchapter in chapter.subsections:
html += CHAPTER_SECTION_LINE.format(epub_type='subchapter',
section_id=subchapter.id)
res = _create_html_for_md_text_in_section(subchapter,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found)
footnote_definitions.extend(res['footnote_definitions'])
html += '\n'.join(res['rendered_lines'])
html += '</section>\n'
_create_section_xhtml_file(title=chapter.title,
id_=chapter.id,
section_html=html,
fpath=_create_epub_fpath_for_section(chapter),
epub_type='chapter',
epub_zip=epub_zip)
return {'footnote_definitions': footnote_definitions}
def _create_section_xhtml_file(title, id_, section_html, fpath, epub_type,
epub_zip):
html = CHAPTER_HEADER_HTML.format(title=title)
html += '<body>\n'
html += CHAPTER_SECTION_LINE.format(epub_type=epub_type,
section_id=id_)
html += section_html
html += '</section>\n'
html += '</body>\n'
html += '</html>\n'
_write_html_in_zip_file(epub_zip, fpath, html)
def _create_part(part, epub_zip, bibliography_entries_seen,
references_not_found,
endnote_definitions):
title = part.title
part_id = part.id
fpath = _create_epub_fpath_for_section(part)
result = _create_html_for_md_text_in_section(part,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found)
section_html = '\n'.join(result['rendered_lines'])
endnote_definitions.extend(result['footnote_definitions'])
_create_section_xhtml_file(title=title,
id_=part_id,
section_html=section_html,
fpath=fpath,
epub_type='part',
epub_zip=epub_zip)
footnote_definitions = []
for chapter in part.subsections:
if chapter.kind == CHAPTER:
res = _create_chapter(chapter, epub_zip,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found)
footnote_definitions.extend(res['footnote_definitions'])
else:
raise RuntimeError('A part should only have chapters as subparts.')
endnote_definitions.extend(footnote_definitions)
def _create_endnotes_section_html(endnote_definitions):
html = '<section role="doc-endnotes">\n<ol>\n'
for endnote_definition in endnote_definitions:
if endnote_definition['footnote_definition_li']:
html += endnote_definition['footnote_definition_li']
html += '\n'
html += '</ol>\n</section>\n'
return html
def _create_endnotes_chapter(chapter, endnote_definitions, header_level,
epub_zip):
html = f' <h{header_level}>{chapter.title}</h{header_level}>\n'
html += _create_endnotes_section_html(endnote_definitions)
_create_section_xhtml_file(title=chapter.title,
id_=chapter.id,
section_html=html,
fpath=_create_epub_fpath_for_section(chapter),
epub_type=chapter.kind,
epub_zip=epub_zip)
def _create_bibliography_section_html(bibliography_entries, book):
htmls = list(bibliography_entries.values())
htmls.sort()
html = '<section role="doc-bibliography">\n<ul>\n'
for html_li in htmls:
html += f'<li>{html_li}</li>\n'
html += '</ul>\n</section>\n'
return html
def _create_bibliography_chapter(chapter, bibliography_entries, header_level,
epub_zip):
html = f' <h{header_level}>{chapter.title}</h{header_level}>\n'
html += _create_bibliography_section_html(bibliography_entries,
chapter.book)
_create_section_xhtml_file(title=chapter.title,
id_=chapter.id,
section_html=html,
fpath=_create_epub_fpath_for_section(chapter),
epub_type=chapter.kind,
epub_zip=epub_zip)
def _build_link(link, text, id_=None, for_nav=False):
if id_ and not for_nav:
li = f'<a href="{link}#nav_span_{id_}">{text}</a>'
#li = f'<a href="{link}">{text}</a>'
else:
li = f'<a href="{link}">{text}</a>'
return li
def _build_nav_li_to_section(section, for_nav):
try:
has_no_html = section.has_no_html
except AttributeError:
has_no_html = False
if section.kind == PART and has_no_html:
section_fpath = None
else:
section_fpath = os.path.join('..', _create_epub_fpath_for_section(section))
#section_fpath = os.path.join(_create_epub_fpath_for_section(section))
section_fname = os.path.basename(_create_epub_fpath_for_section(section))
if section_fpath:
li = _build_link(section_fname,
text=section.title,
id_=section.id,
for_nav=for_nav)
else:
li = f'<span>{section.title}</span>'
return li
def _build_nav_for_chapter(chapter, for_nav):
li = _build_nav_li_to_section(chapter, for_nav=for_nav)
subchapters = list(chapter.subsections)
if subchapters:
html = f'<li>{li}\n'
html += '<ol>\n'
for subchapter in subchapters:
li = _build_nav_li_to_section(subchapter, for_nav=False)
html += f'<li>{li}</li>\n'
html += '</ol></li>\n'
else:
html = f'<li>{li}</li>\n'
return html
def _create_toc_section_html(book, for_nav=False):
html = '<nav epub:type="toc">\n'
html += f'<h1>{TOC_CHAPTER_TITLE[book.lang]}</h1>\n'
html += '<ol>\n'
for section in book.subsections:
if section.kind == PART:
li = _build_nav_li_to_section(section, for_nav=for_nav)
html += f'<li>{li}\n'
html += '<ol>\n'
for chapter in section.subsections:
html += _build_nav_for_chapter(chapter, for_nav=for_nav)
html += '</ol></li>\n'
elif section.kind == CHAPTER:
html += _build_nav_for_chapter(section, for_nav=for_nav)
html += '</ol>\n'
html += '</nav>\n'
return html
def _create_toc_chapter(toc_chapter, header_level, epub_zip):
_create_section_xhtml_file(title=toc_chapter.title,
id_=toc_chapter.id,
section_html=_create_toc_section_html(toc_chapter.book),
fpath=_get_epub_fpath_for_toc_chapter(),
epub_type=toc_chapter.kind,
epub_zip=epub_zip)
def _creata_nav(book, epub_zip):
html = NAV_HEADER_XML.format(title=book.title)
html += '<section>\n'
html += _create_toc_section_html(book, for_nav=True)
html += '</section>\n'
html += '</body>\n'
html += '</html>\n'
_write_html_in_zip_file(epub_zip, NAV_FPATH, html)
def _build_nav_point_xml_for_section(section, play_order, level):
fpath = _create_epub_fpath_for_section(section)
fname = os.path.basename(fpath)
span_id = f'nav_span_{section.id}'
nbsps_for_nested = NBSP * (level - 1)
if section.kind in [PART, CHAPTER]:
return f'''<navPoint class="{section.kind}" id="{section.id}" playOrder="{play_order}">
<navLabel>
<text>{nbsps_for_nested}{section.title}</text>
</navLabel>
<content src="{fname}" />
</navPoint>'''
else:
return f'''<navPoint id="{section.id}" playOrder="{play_order}">
<navLabel>
<text>{nbsps_for_nested}{section.title}</text>
</navLabel>
<content src="{fname}#{span_id}" />
</navPoint>'''
def _create_ncx(book, epub_zip):
html = NCX_HEADER_XML
html += '<head>\n'
html += f'<meta content="{book.metadata["uid"]}" name="dtb:uid"/>\n'
html += '</head>\n'
html += f'<docTitle> <text>{book.title}</text> </docTitle>\n'
html += '<navMap>\n'
# Do not use nested navPoints because some ebook do not support them
# Use to simulate nesting
play_order = 1
for section in book.subsections:
if section.kind == PART:
if not section.has_no_html:
html += _build_nav_point_xml_for_section(section, play_order, 1)
play_order += 1
for chapter in section.subsections:
html += _build_nav_point_xml_for_section(chapter, play_order, 2)
play_order += 1
for subchapter in chapter.subsections:
html += _build_nav_point_xml_for_section(subchapter, play_order, 3)
play_order += 1
elif section.kind == CHAPTER:
html += _build_nav_point_xml_for_section(section, play_order, 1)
play_order += 1
for subchapter in section.subsections:
html += _build_nav_point_xml_for_section(subchapter, play_order, 2)
play_order += 1
html += '</navMap>'
html += '</ncx>'
_write_html_in_zip_file(epub_zip, NCX_FPATH, html)
def _create_epub_backbone(epub_zip):
epub_zip.writestr(CONTAINER_XML_FPATH, CONTAINER_XML)
def _create_mimetype_file(epub_zip):
epub_zip.writestr('mimetype', b'application/epub+zip')
def _create_opf(book, epub_zip):
xml = OPF_HEADER_XML
now = datetime.datetime.utcnow().isoformat(timespec='seconds')
xml += f'<meta property="dcterms:modified">{now}Z</meta>\n'
xml += f'<dc:identifier id="id">{book.metadata["uid"]}</dc:identifier>\n'
xml += f'<dc:title>{book.title}</dc:title>\n'
xml += f'<dc:language>{book.lang}</dc:language>\n'
for author in book.metadata['author']:
xml += f'<dc:creator id="creator">{author}</dc:creator>\n'
xml += '</metadata>\n'
xml += '<manifest>\n'
item_xml = '<item href="{fname}" id="{id}" media-type="application/xhtml+xml" />\n'
spine = [TOC_CHAPTER_ID]
xml += item_xml.format(fname=os.path.basename(_get_epub_fpath_for_toc_chapter()),
id='toc')
for section in book.subsections:
if section.kind == PART:
if not section.has_no_html:
spine.append(section.id)
fname = os.path.basename(_create_epub_fpath_for_section(section))
xml += item_xml.format(fname=fname, id=section.id)
for chapter in section.subsections:
spine.append(chapter.id)
fname = os.path.basename(_create_epub_fpath_for_section(chapter))
xml += item_xml.format(fname=fname, id=chapter.id)
elif section.kind == CHAPTER:
spine.append(section.id)
fname = os.path.basename(_create_epub_fpath_for_section(section))
xml += item_xml.format(fname=fname, id=section.id)
fname = os.path.basename(NCX_FPATH)
xml += f'<item href="{fname}" id="ncx" media-type="application/x-dtbncx+xml" />\n'
fname = os.path.basename(NAV_FPATH)
xml += f'<item href="{fname}" id="nav" media-type="application/xhtml+xml" properties="nav" />\n'
xml += '</manifest>\n'
xml += '<spine toc="ncx">\n'
for chapter_id in spine:
xml += f'<itemref idref="{chapter_id}"/>\n'
xml += '</spine>\n'
xml += '<guide>\n'
title = TOC_CHAPTER_TITLE[book.lang]
fname = os.path.basename(_get_epub_fpath_for_toc_chapter())
xml += f'<reference type="toc" title="{title}" href="{fname}" />\n'
xml += '</guide>\n'
xml += '</package>\n'
_write_html_in_zip_file(epub_zip, OPF_FPATH, xml)
def create_epub(book, epub_path):
references_not_found = set()
with zipfile.ZipFile(epub_path, 'w') as epub_zip:
_create_mimetype_file(epub_zip)
_create_epub_backbone(epub_zip)
endnote_definitions = []
bibliography_entries_seen = OrderedDict()
for section in book.subsections:
if section.kind == CHAPTER:
res = _create_chapter(section, epub_zip,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found)
endnote_definitions.extend(res['footnote_definitions'])
elif section.kind == PART:
_create_part(section, epub_zip,
bibliography_entries_seen=bibliography_entries_seen,
references_not_found=references_not_found,
endnote_definitions=endnote_definitions)
elif section.kind == BOOK:
raise ValueError('A book should not include a subsection of kind BOOK')
elif section.kind == SUBCHAPTER:
raise ValueError('A book should not include a subsection of kind SUBCHAPTER')
else:
assert False
if (endnote_definitions or bibliography_entries_seen) and book.has_parts():
back_matter_part = BookSectionWithNoFiles(parent=book,
id_=BACK_MATTER_PART_ID,
title=APPENDICES_PART_TITLE[book.lang],
kind=PART)
back_matter_part.has_no_html = True
book.subsections.append(back_matter_part)
parent = back_matter_part
else:
back_matter_part = None
parent = book
toc_level_for_appendix_chapters = 2 if book.has_parts() else 1
if endnote_definitions:
endnotes_chapter = BookSectionWithNoFiles(parent=parent,
id_=ENDNOTE_CHAPTER_ID,
title=ENDNOTE_CHAPTER_TITLE[book.lang],
kind=CHAPTER)
parent.subsections.append(endnotes_chapter)
_create_endnotes_chapter(endnotes_chapter,
endnote_definitions,
header_level=toc_level_for_appendix_chapters,
epub_zip=epub_zip)
if bibliography_entries_seen:
chapter = BookSectionWithNoFiles(parent=parent,
id_=BIBLIOGRAPHY_CHAPTER_ID,
title=BIBLIOGRAPHY_CHAPTER_TITLE[book.lang],
kind=CHAPTER)
parent.subsections.append(chapter)
_create_bibliography_chapter(chapter,
bibliography_entries_seen,
header_level=toc_level_for_appendix_chapters,
epub_zip=epub_zip)
toc_chapter = BookSectionWithNoFiles(parent=book,
id_=TOC_CHAPTER_ID,
title=TOC_CHAPTER_TITLE[book.lang],
kind=TOC)
_create_toc_chapter(toc_chapter,
header_level=1,
epub_zip=epub_zip)
book.subsections.insert(0, toc_chapter)
_creata_nav(book, epub_zip=epub_zip)
_create_ncx(book, epub_zip=epub_zip)
_create_opf(book, epub_zip=epub_zip)
check_epub(epub_path)
if references_not_found:
msg = 'Some references were not found in the bibliography database'
print('References not found:')
print(','.join(references_not_found))
raise RuntimeError(msg)
def unzip_epub(ebook_path, out_dir):
if out_dir.exists():
shutil.rmtree(out_dir)
with zipfile.ZipFile(ebook_path) as epubzip:
epubzip.extractall(path=out_dir)
def check_epub(ebook_path):
cmd = ['java', '-jar', str(EPUBCHECK_JAR), str(ebook_path)]
completed_process = subprocess.run(cmd, capture_output=True)
if completed_process.returncode:
sys.stdout.write(completed_process.stdout.decode())
sys.stderr.write(completed_process.stderr.decode())
| JoseBlanca/md2epub | epub_creation.py | epub_creation.py | py | 37,754 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line... |
17414099922 | from PySide2.QtWidgets import QAction, QApplication, QMessageBox, QInputDialog
from PySide2.QtCore import Qt
from PySide2.QtGui import QIcon
from PySide2.QtWidgets import QWidget, QMainWindow, QLabel, QApplication, QFileDialog
from model.Workspace import Workspace
from model.Chapter import Chapter
from model.Book import Book
from model.Page import Page
from model.Text import Text
from model.Picture import Picture
from PySide2.QtGui import QIcon
class NewBookAction(QAction):
"""
Klasa sadrzi akciju dodavanja book node-a na workspace
"""
def __init__(self):
"""
Konstruktor
"""
super(NewBookAction, self).__init__("New Book")
self.setIcon(QIcon("src/new.png"))
self.triggered.connect(self.actionCalled)
def actionCalled(self):
"""
Prikazuje dijalog za unos imena nove knjige, kreira novu instancu knjige sa unetim imenom
"""
parent = QApplication.instance().model
newName, ok = QInputDialog.getText(None, "New Book name", "Enter desired new name")
if ok:
if parent.isValidName(newName):
newBook = Book(newName)
book = QApplication.instance().mainWindow.newTab(newBook)
parent.addChild(book)
else:
while not parent.isValidName(newName):
dialog = QMessageBox()
dialog.setWindowTitle("Error")
dialog.setText("That name is not valid")
dialog.setWindowIcon(QIcon("src/notification.png"))
dialog.setModal(True)
dialog.exec_()
newName, cancel = QInputDialog.getText(None, "New Book name", "Enter desired new name")
if not cancel:
break
else:
if parent.isValidName(newName):
newBook = Book(newName)
book = QApplication.instance().mainWindow.newTab(newBook)
parent.addChild(book)
break
| dovvla/multimedia-book | MuMijA/actions/NewBookAction.py | NewBookAction.py | py | 2,130 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PySide2.QtWidgets.QAction",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "PySide2.QtGui.QIcon",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PySide2.QtWidgets.QApplication.instance",
"line_number": 31,
"usage_type": "call"
},
{
... |
761243358 | import allure
import pytest
from pages.cart_page import CartPage
from pages.home_page import HomePage
from pages.login_page import LoginPage
from utils.locators import LoginPageLocators, CartPageLocators, HomePageLocators
from utils.logger import _step
@pytest.mark.usefixtures('setup', 'website_setup')
class TestHomePage:
reruns = 2
reruns_delay = 2
@pytest.mark.dependency(name="open login page", scope="session")
@pytest.mark.flaky(reruns=reruns, reruns_delay=reruns_delay)
@_step
@allure.title('Open login page test')
@allure.description('This is test of open login page on home page')
def test_login_page_opened(self, config):
home_page = HomePage(self.driver, config)
home_page.open_page(f"cn/zh/home.html?cid={config['cid']}")
home_page.go_to_login_page()
login_page = LoginPage(self.driver, config)
login_title = '登录您的账户'
assert login_title in login_page.find_element(*LoginPageLocators.login_title).text
@pytest.mark.usefixtures('setup', 'website_setup')
class TestLoginPage:
reruns = 2
reruns_delay = 2
@pytest.mark.dependency(depends=["open login page"], scope="session")
@pytest.mark.flaky(reruns=reruns, reruns_delay=reruns_delay)
@_step
@allure.title('Login with invalid user test')
@allure.description('This is test of login with invalid user')
def test_login_with_invalid_user(self, config):
home_page = HomePage(self.driver, config)
home_page.open_page(f"cn/zh/home.html?cid={config['cid']}")
home_page.go_to_login_page()
login_page = LoginPage(self.driver, config)
login_page.login('test', is_valid=False)
error_msg = '用户名称或密码不正确'
assert error_msg in login_page.find_element(*LoginPageLocators.login_error_message).text
@pytest.mark.dependency(name="login", depends=["open login page"], scope="session")
@pytest.mark.flaky(reruns=reruns, reruns_delay=reruns_delay)
@_step
@allure.title('Login with valid user test')
@allure.description('This is test of login with valid user')
def test_login_with_valid_user(self, config):
home_page = HomePage(self.driver, config)
home_page.open_page(f"cn/zh/home.html?cid={config['cid']}")
home_page.go_to_login_page()
login_page = LoginPage(self.driver, config)
login_page.login('boxing', is_valid=True, save_cookie=True)
login_page.redirect_to_home()
profile_msg = '账户'
assert profile_msg in login_page.find_element(*HomePageLocators.logged_in_menu).text
login_page.logout()
@pytest.mark.usefixtures('setup', 'website_setup')
class TestCartPage:
reruns = 2
reruns_delay = 2
@pytest.mark.dependency(depends=["login"], scope="session")
@pytest.mark.flaky(reruns=reruns, reruns_delay=reruns_delay)
@_step
@allure.title('Checkout order test')
@allure.description('This is test of checkout order')
def test_checkout_order(self, config, product):
home_page = HomePage(self.driver, config)
home_page.open_page(f"cn/zh/home.html?cid={config['cid']}")
if not home_page.load_cookie('boxing'):
home_page.go_to_login_page()
login_page = LoginPage(self.driver, config)
login_page.login('boxing', is_valid=True, save_cookie=True)
home_page.go_to_cart_page()
cart_page = CartPage(self.driver, config)
cart_page.add_forgot_item_to_cart(catalog_number=product['sku'], quantity=product['quantity'])
cart_page.go_to_order_details_page()
cart_page.fill_order_entry(ship_to='test', bill_to='test', order_number='NA')
cart_page.go_to_review_submit_page()
cart_page.submit_order(is_submit=False)
assert product['name'] in cart_page.find_element(*CartPageLocators.added_item_name_field).text
cart_page.empty_cart()
| BoxingP/selenium-auto-test | lambda/test_website/tests/test_scenarios_with_login.py | test_scenarios_with_login.py | py | 4,013 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pages.home_page.HomePage",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pages.login_page.LoginPage",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.locators.LoginPageLocators.login_title",
"line_number": 27,
"usage_type": "attr... |
26725677323 | from typing import List
import random
from . import aiplayer, game_config, move, randomai, ship, player
class SearchDestroyAi(aiplayer.AIPlayer):
def __init__(self, player_num: int, config: game_config.GameConfig, other_players: List["Player"], type):
super().__init__(player_num, config, other_players, type)
self.list = []
self.already_done = []
self.mode= None
def destroy_mode(self):
self.mode="destroy"
#print("desrtroy activated")
#coords = self.pick_destroy()
if len(self.list)==0:
self.mode="search"
return self.search_mode()
coords = self.list[0]
if coords in self.enemy.possible_hits:
self.mode="destroy"
self.add_coordinates(coords)
row = str(coords[0])
col = str(coords[1])
new = (row + "," + col)
self.list.remove(coords)
#return new
while True:
try:
firing_location = move.Move.from_str(self, new)
except ValueError as e:
print(e)
continue
return firing_location
def search_mode(self):
coord = random.choice(self.coordinates)
if coord in self.enemy.possible_hits:
self.mode = "destroy"
self.add_coordinates(coord)
return self.destroy_mode()
else:
row = str(coord[0])
col = str(coord[1])
self.coordinates.remove(coord)
coords = (row + "," + col)
while True:
try:
firing_location = move.Move.from_str(self, coords)
#print(firing_location)
except ValueError as e:
print(e)
continue
return firing_location
def add_coordinates(self,coords):
x=coords[0]
y=coords[1]
ctr=(x,y)
if ctr not in self.list:
self.list.append(ctr)
self.coordinates.remove(ctr)
left=(x , y-1)
if left in self.coordinates:
self.coordinates.remove(left)
if left not in self.list:
self.list.append(left)
up = (x-1 , y)
if up in self.coordinates:
self.coordinates.remove(up)
if up not in self.list:
self.list.append(up)
right = (x, y+1)
if right in self.coordinates:
self.coordinates.remove(right)
if right not in self.list:
self.list.append(right)
down = (x+1 , y)
if down in self.coordinates:
self.coordinates.remove(down)
if down not in self.list:
self.list.append(down)
def pick_destroy(self):
if len(self.list)==0:
self.mode="search"
return self.search_mode()
next_fire = self.list[0]
if next_fire in self.enemy.possible_hits:
self.mode="destroy"
self.add_coordinates(next_fire)
row = str(next_fire[0])
col = str(next_fire[1])
new = (row + "," + col)
self.list.remove(next_fire)
print (self.list)
return new
# def __init__(self,coordinates):
# self.coordinates= coordinates
# self.pick_coordinates()
def pick_search(self):
#coordinates = self.board.coordinate_list
coord = random.choice(self.coordinates)
if coord in self.enemy.possible_hits:
self.mode="destroy"
self.add_coordinates(coord)
return self.destroy_mode()
else:
row = str(coord[0])
col = str(coord[1])
self.coordinates.remove(coord)
#coords = (row + "," + col)
return (row + "," + col)
def get_move(self):
self.enemy = self.opponents[0]
if self.mode== None:
self.mode="search"
if self.mode == "search":
return self.search_mode()
if self.mode=="destroy":
return self.destroy_mode()
# if self.scan():
# self.mode="destroy"
# return destroy_mode()
#else:
# return self.search_mode()
def scan(self):
self.enemy=self.opponents[0]
for x in self.enemy.possible_hits:
row=x[0]
col=x[1]
if self.enemy.board.contents[row][col]=="X":
mode="destroy"
self.enemy.possible_hits.remove(x)
self.add_coordinates(row,col)
self.already_done.append((row,col))
return True
else:
mode="search"
return False
#for x in range(len(self.enemy.board.contents)):
# for y in range(len(self.enemy.board.contents[0])):
# if str(self.enemy.board.contents[x][y]) == "X":
# mode = "destroy"
# if (x, y) not in self.already_done:
# self.already_done.append((x, y))
# self.add_coordinates((x, y))
# else:
# mode="search"
#if mode=="destroy":
# return True
| ChoBro1/BattleShip | BattleShip/src/searchdestroyai.py | searchdestroyai.py | py | 5,246 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 116,
"usage_type": "call"
}
] |
42599399772 | #!/usr/bin/env python
# encoding: utf-8
import os
"""
@author: wenjiaGuo
@version: ??
@contact: 601152819@qq.com
@software: PyCharm
@file: 8.收集整个网站数据.py
@time: 2017/10/5 20:50
"""
# 如何创建一个爬虫来收集页面标题、正文的第一个段落,
# 以及编辑页面的链接(如果有的话)这些信息。
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
page = set()
def getLinks(pageUrl):
global page
html = urlopen('http://en.wikipedia.org'+pageUrl)
bsObj = BeautifulSoup(html,'lxml')
try:
print(bsObj.h1.get_text())
print(bsObj.find(id='mw-content-text').findAll('p')[0])
print(bsObj.find(id="ca-edit").find("span").find("a").attrs['href'])
except AttributeError:
print('缺少一些属性,不过不用担心')
for link in bsObj.findAll('a',href=re.compile('^(/wiki/)')):
if 'href'in link.attrs:
if link.attrs['href'] not in page:
newPage = link.attrs['href']
print('---------------\n'+newPage)
page.add(newPage)
getLinks(newPage)
getLinks("")
| guowenjia/scrapingAndClear | 8.收集整个网站数据.py | 8.收集整个网站数据.py | py | 1,153 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 31,
"usage_type": "call"
}
] |
24657826075 | from enum import IntEnum
from .graph import Edge, Graph, Node
from .heap import EdgeHeap
class Color(IntEnum):
WHITE = 0
GRAY = 1
BLACK = 2
class KruskalsAlgorithm:
# get all the edges of a graph
# sort them
# check if they do not make a cycle
#
def __init__(self, G: Graph, s: int, t: int) -> None:
self.G = G
self.s = s
self.t = t
# TODO: move makeset here?
self.p = [None for i in range(self.G.n)]
self.h = [None for i in range(self.G.n)]
def run(self):
edges = self.G.get_edges()
# edges = self.heapsort(edges)
# TODO: replace it with heapsort
h = EdgeHeap()
h.insert_all(edges)
# edges = h.heapsort()
# edges = sorted(edges, key= lambda x: x[0], reverse=True)
# TODO: can i put this inside the initialization?
for i in range(self.G.n):
self.makeset(i)
edge_count = 0
i = 0
self.result = []
while(edge_count < self.G.n - 1):
edge = h.remove()
i = i+1
weight, u, v = edge
r1 = self.find(u)
r2 = self.find(v)
if r1 != r2:
self.result.append(edge)
edge_count += 1
self.union(r1, r2)
return self.output()
def output(self):
# print(self.result)
self.create_mst()
self.dfs()
max_bw = -1
path = []
if self.reached == True:
# max_bw = min(self.result, key=lambda x: x[0])
# print("bandwidth: ", max_bw[0])
x = self.t
max_bw = float("inf")
while(x != self.s):
max_bw = min(max_bw, self.temp_wt[(x, self.dad[x])])
# print(x)
path.append(x)
x = self.dad[x]
# print(x)
path.append(x)
# print("bandwidth: ", max_bw)
else:
print("no s-t path found")
path.reverse() # just to reverse so that it shows s - t in O(n) time
return max_bw, path
def dfs(self):
self.temp_wt = {}
self.reached = False
self.color = [Color.WHITE for i in range(self.G.n)]
self.dad = [-1 for i in range(self.G.n)]
self.dfs_recursive(self.s)
def dfs_recursive(self, v):
self.color[v] = Color.GRAY
for edge in self.T[v]:
w, weight = edge
if self.color[w] == Color.WHITE:
self.dad[w] = v
self.temp_wt[(v, w)] = self.temp_wt[(w, v)] = weight
if w == self.t:
self.reached = True
return
self.dfs_recursive(w)
self.color[v] = Color.BLACK
def create_mst(self):
T = [[] for i in range(self.G.n)]
for edge in self.result:
w, a, b = edge
T[a].append((b, w))
T[b].append((a, w))
self.T = T
def makeset(self, v):
self.p[v] = -1
self.h[v] = 0
def find(self, v):
w = v
S = []
while(self.p[w] != -1):
S.append(w)
w = self.p[w]
while(S):
u = S.pop()
self.p[u] = w
return w
def union(self, r1, r2):
if self.h[r1] > self.h[r2]:
self.p[r2] = r1
elif self.h[r2] > self.h[r1]:
self.p[r1] = r2
else: # h[r2] == h[r1]
self.p[r2] = r1
self.h[r1] = self.h[r1] + 1
| Hemal-Mamtora/CSCE629_algo_project | src/kruskals.py | kruskals.py | py | 3,570 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.IntEnum",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "graph.Graph",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "heap.EdgeHeap",
"line_number": 28,
"usage_type": "call"
}
] |
13829989656 | import collections
import itertools
import time
import random
from typing import Optional
import streamlit as st
import numpy as np
import pyaudio
from pydub import AudioSegment, silence
from audio_io import AudioIO
from audioplots import *
from containers import *
from layout import *
ctx = {
# names match pyaudio names
"frames_per_buffer": 1024, # Record in chunks of 1024 samples
"format": pyaudio.paInt16,
"channels": 2,
"rate": 44100, # Record at 44100 samples per second
}
def send_balloons_if_lucky():
if random.randint(0, 100) == 0:
st.balloons()
# Initialize audio only once per session as it's an expensive operation
def get_audio_handle() -> AudioIO:
if 'aio' not in st.session_state:
st.session_state['aio'] = AudioIO(ctx)
return st.session_state['aio']
def add_sound_to_clips(sound: AudioSegment):
if not isinstance(sound, AudioSegment):
st.warning('display_nonsilence called with non-AudioSegment!')
return
with st.spinner('Splitting on silence...'):
start = time.time()
current_segments = silence.split_on_silence(sound,
min_silence_len=2000, # this dude will be modified by the user
silence_thresh=sound.dBFS-16, # FIXME: does this work?
keep_silence=100,
seek_step=1)
end = time.time()
cols = st.columns(3)
with cols[0]:
st.caption('Recording time')
st.markdown(f'{len(sound) / 1000}s')
with cols[1]:
st.caption('Processing Time')
st.markdown(f'{end - start:.3f}s')
with cols[2]:
st.caption('New Clips Found')
st.markdown(len(current_segments))
all_clips = st.session_state['all_clips']
with st.spinner('Creating clips...'):
if len(current_segments) > 1:
for segment in current_segments[:-1]:
all_clips.appendleft(AudioClip(audio_segment=segment, selected=False))
if len(current_segments) > 0:
all_clips.appendleft(AudioClip(audio_segment=current_segments[-1], selected=True))
def draw_sidebar_with_preferences():
with st.sidebar:
st.write('Recording preferences:')
form = st.form(key='Submit')
with form:
min_silence_len_s = st.number_input(
label="Minimum Silence Length (s)",
min_value=0,
value=2,
)
silence_thresh_dbfs = st.number_input(
label="Silence Threshold (dBFS)",
min_value=-200,
max_value=3,
value=-80,
)
approximate_bpm = st.number_input(
label="Approximate Tempo (BPM; for tempo estimation)",
min_value=0,
max_value=300,
value=100,
)
submitted = st.form_submit_button('Submit')
st.write(submitted)
st.write(min_silence_len_s)
st.write(silence_thresh_dbfs)
# Set up UI Elements
if 'all_clips' not in st.session_state:
st.session_state['all_clips'] = collections.deque(maxlen=100) # TODO: remove maxLen?
st.set_page_config(
page_title=None,
page_icon=None,
layout='wide',
initial_sidebar_state='auto',
menu_items=None
)
st.title('Muesli Practice Helper')
#draw_sidebar_with_preferences()
with st.spinner('Initializing Audio...'):
aio = get_audio_handle()
toggled = st.button('Toggle Recording...')
sound_status = st.markdown('Sample Text') # TODO: refactor this
# Check to see if we have any output from last run
if toggled:
if aio.is_recording():
sound_status.markdown('Checking most recent recording...')
sound: Optional[AudioSegment] = aio.finish_recording()
if sound:
sound_status.markdown('Splitting most recent recording on silence...')
add_sound_to_clips(sound)
sound_status.markdown('Recording is ready!')
send_balloons_if_lucky()
draw_audio_clips(st.session_state['all_clips'])
else:
sound_status.markdown('How are you able to see this?')
else:
sound_status.markdown('Recording started...')
aio.start_recording()
else:
if aio.is_recording():
sound_status.markdown('Recording in progress..')
else:
sound_status.markdown('Not recording')
draw_audio_clips(st.session_state['all_clips'])
| phoneticsushi/muesli | streamlit_app.py | streamlit_app.py | py | 4,603 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyaudio.paInt16",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "streamlit.balloons",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "streamlit.sess... |
41179644496 | from binance.client import Client
from yaspin import yaspin
import os.path
from datetime import datetime
import pickle
import pandas as pd
import backtrader as bt
class databutler():
def __init__(self,directory):
#binance client without keys for data acquisition
self.binanceClient = Client("", "")
#directory in project folder for data storage
self.directory = directory
#data source
def get_data(self,data_params):
if data_params['data_source'] == 'BINANCE':
data = self.get_data_BINANCEAPI(data_params = data_params)
elif data_params['data_source'] == 'YAHOO':
data = self.get_data_Yahoo(data_params=data_params)
elif data_params['data_source'] == 'CSV':
data = self.get_data_CSV(data_params=data_params)
return data
def get_data_BINANCEAPI(self,data_params):
spinner = yaspin() # loading visualization
data_details = [data_params['asset'], data_params['interval'],
str(data_params['fromdate'].timestamp()), str(data_params['todate'].timestamp())]
file_ID = data_details[0] + '_' + data_details[1] + '_' + str(datetime.fromtimestamp(float(data_details[2])))[
0:10] + '_' + str(
datetime.fromtimestamp(float(data_details[3])))[0:10]
print(f'Data: {file_ID}')
filepath = self.directory + 'Binance_API/' + file_ID + '.dat'
if os.path.isfile(filepath):
print('Data already downloaded. Loading it.')
with open(filepath, 'rb') as f:
data = pickle.load(f)
else:
spinner.text = f'Downloading data from Binance...'
spinner.start()
klines = self.binanceClient.get_historical_klines(*data_details)
klines_df = pd.DataFrame(klines)
col_names = ['open time', 'open', 'high', 'low', 'close', 'volume', 'close time', 'quote asset volume',
'number of trades', 'taker buy base asset volume', 'taker buy quote asset volume',
'Can be ignored(see docu)']
klines_df.columns = col_names
spinner.stop()
for col in col_names:
klines_df[col] = klines_df[col].astype(float)
klines_df['datetime'] = pd.to_datetime(klines_df['open time'] * 1000000, infer_datetime_format=True)
klines_df = klines_df.drop(
['open time', 'close time', 'quote asset volume', 'number of trades', 'taker buy base asset volume',
'taker buy quote asset volume', 'Can be ignored(see docu)'], axis=1, errors='ignore')
klines_df = klines_df.set_index('datetime')
# Price multiply
klines_df['open'] = klines_df['open'] * data_params['price_multiplier']
klines_df['high'] = klines_df['high'] * data_params['price_multiplier']
klines_df['low'] = klines_df['low'] * data_params['price_multiplier']
klines_df['close'] = klines_df['close'] * data_params['price_multiplier']
# reformat as backtrader datafeed
data = bt.feeds.PandasData(dataname=klines_df)
# save data
print('Saving data.')
with open(filepath, 'wb') as f:
pickle.dump(data, f)
return data
def get_data_CSV(self,data_params):
print('not implemented.')
pass
def get_data_Yahoo(self,data_params):
data = bt.feeds.YahooFinanceData(dataname=data_params['asset'], fromdate=data_params['fromdate'], todate=data_params['todate'])
return data
| webclinic017/coni_standard_backtest | Databutler.py | Databutler.py | py | 3,689 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "binance.client.Client",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "yaspin.yaspin",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.fromtimestamp",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "d... |
21141530709 | """
Player for solitaire.
Created on 19.10.2018
@author: Ruslan Dolovanyuk
"""
import enum
import checker
from constants import Colors
import pygame
Actions = enum.Enum('Actions', 'ChangeZoneUp ChangeZoneDown ChangeRowUp ChangeRowDown ChangeCardUp ChangeCardDown Take Drop')
class Player:
"""Player class for solitaire."""
def __init__(self, board, speech, phrases):
"""Initialize player class."""
self.board = board
self.speech = speech
self.phrases = phrases
self.color = Colors.BLUE
self.__actions = [self.__change_zone, self.__change_row, self.__change_card, self.__take, self.__drop]
def reset(self):
"""Reset current variables."""
self.current_zone = 0
self.__took = False
self.take_card = None
def draw(self):
"""Draw method for player."""
zone = self.board.zones[self.current_zone]
if zone.if_empty():
left, top = zone.get_coord_zero(zone.current_row)
else:
left, top = zone.get_coord_card(zone.current_row, zone.current_card)
pygame.draw.rect(zone.zone, self.color, (left, top, self.board.card_x, self.board.card_y), 1)
def speak(self, changed_zone=False):
"""Speak information for moving cell."""
name = self.phrases[self.board.zones[self.current_zone].NAME]
row = ''
if self.board.zones[self.current_zone].if_rows:
row = self.phrases['column'] + ' ' + str(self.board.zones[self.current_zone].current_row + 1)
self.__speak_card()
if changed_zone:
self.speech.speak(' '.join((name, row)))
def __speak_card(self, card=None):
"""Speak additionaly information for card."""
if card is None:
if not self.board.zones[self.current_zone].if_empty():
card = self.board.zones[self.current_zone].get_card(self.board.zones[self.current_zone].current_card)
if self.board.zones[self.current_zone].if_empty() and card is None:
self.speech.speak(self.phrases['empty'])
else:
if card.status:
if 52 == self.board.deck_count:
rate = str(card.rate_index) if 1 < card.rate_index < 11 else self.phrases[card.rate]
else:
rate = str(card.rate_index) if card.rate_index < 11 else self.phrases[card.rate]
if 'ace' == card.rate:
rate = self.phrases[card.rate]
self.speech.speak(' '.join((rate, self.phrases[card.suit])))
else:
self.speech.speak(self.phrases['close'])
def actions(self, action):
"""Run actions in zones."""
zone = self.board.zones[self.current_zone]
for method_action in self.__actions:
method_action(action, zone)
def __change_zone(self, action, zone):
"""Change zone up or down."""
if Actions.ChangeZoneUp == action:
if 4 == self.current_zone:
self.current_zone = 0
else:
self.current_zone += 1
self.speak(True)
elif Actions.ChangeZoneDown == action:
if 0 == self.current_zone:
self.current_zone = 4
else:
self.current_zone -= 1
self.speak(True)
def __change_row(self, action, zone):
"""Change row in zone up or down."""
if Actions.ChangeRowUp == action:
if zone.if_rows:
if len(zone.rows) == zone.current_row + 1:
self.speech.speak(self.phrases['border'])
self.__speak_card()
else:
zone.current_row += 1
zone.current_card = -1
self.speak()
elif Actions.ChangeRowDown == action:
if zone.if_rows:
if 0 == zone.current_row:
self.speech.speak(self.phrases['border'])
self.__speak_card()
else:
zone.current_row -= 1
zone.current_card = -1
self.speak()
def __change_card(self, action, zone):
"""Change card in zone row up or down."""
if Actions.ChangeCardUp == action and 4 == self.current_zone:
if not zone.if_empty():
if zone.get_card(0) == zone.get_card(zone.current_card):
self.speech.speak(self.phrases['border'])
self.__speak_card()
elif zone.get_card(zone.current_card - 1).status:
zone.current_card -= 1
self.speak()
else:
self.speech.speak(self.phrases['close'])
self.__speak_card()
elif Actions.ChangeCardDown == action and 4 == self.current_zone:
if not zone.if_empty():
if zone.get_card(-1) == zone.get_card(zone.current_card):
self.speech.speak(self.phrases['border'])
self.__speak_card()
else:
zone.current_card += 1
self.speak()
def __take(self, action, zone):
"""Took or put card in zone row."""
if Actions.Take == action:
result_recall = self.__drop_recall(action, zone)
result_deck = self.__drop_deck(action, zone)
if not result_recall and not result_deck:
if zone.if_empty() and not self.__took:
return
if self.__took:
if 'house' == zone.NAME:
result = self.board.zones[3].take(self.__take_cards, self.__take_cards_list)
if result and len(self.__take_cards_list) > 0:
self.__open_card(self.__take_cards_list[-1])
self.__took = False
self.take_card.take = False
elif 'columns' == zone.NAME:
result = self.board.zones[4].take(self.__take_cards, self.__take_cards_list)
if result and len(self.__take_cards_list) > 0:
self.__open_card(self.__take_cards_list[-1])
self.__took = False
self.take_card.take = False
else:
self.__took = False
self.take_card.take = False
else:
cards = zone.rows[zone.current_row] if zone.if_rows else zone.cards
card = zone.get_card(zone.current_card)
self.__take_cards = cards[cards.index(card):]
if checker.change_suits(self.__take_cards) and checker.rate_down(self.__take_cards):
self.__take_cards_list = cards
self.__took = True
self.take_card = card
self.take_card.take = True
self.board.sounds.play('take')
self.__speak_card()
def __drop(self, action, zone):
"""Dropped card from zone."""
if Actions.Drop == action:
if zone.if_empty():
return
cards = zone.rows[zone.current_row] if zone.if_rows else zone.cards
card = zone.get_card(zone.current_card)
rate_index = card.rate_index
if card == zone.get_card(-1):
if 'ace' == card.rate:
for row in range(len(self.board.zones[3].rows)):
if not self.board.zones[3].rows[row]:
self.board.zones[3].rows[row].append(cards.pop())
if cards:
self.__open_card(cards[-1])
else:
self.__speak_card()
return
for row in range(len(self.board.zones[3].rows)):
row_cards = self.board.zones[3].rows[row]
if row_cards:
if card.suit == row_cards[-1].suit and rate_index - 1 == row_cards[-1].rate_index:
row_cards.append(cards.pop())
if cards:
self.__open_card(cards[-1])
else:
self.__speak_card()
return
def __drop_recall(self, action, zone):
"""Dropped cards from recall."""
if 0 == self.current_zone and self.board.zones[1].if_empty():
if zone.if_empty():
self.__speak_card()
return True
while not self.board.zones[2].if_empty():
card = self.board.zones[2].cards.pop(0)
self.board.zones[0].cards.append(card)
self.__open_card(card, False)
while not zone.if_empty():
self.board.zones[1].cards.append(zone.cards.pop())
self.board.sounds.play('deal')
self.__speak_card()
return True
return False
def __drop_deck(self, action, zone):
"""Dropped cards from deck."""
if 1 == self.current_zone:
if zone.if_empty():
self.__speak_card()
return True
while not self.board.zones[2].if_empty():
card = self.board.zones[2].cards.pop(0)
self.board.zones[0].cards.append(card)
self.__open_card(card, False)
for _ in range(self.board.delivery):
if not zone.if_empty():
card = zone.cards.pop()
self.board.zones[2].cards.append(card)
self.__open_card(card)
self.__speak_card()
return True
return False
def __open_card(self, card, open_flag=True):
"""Open card or close if open_flag = False."""
card.status = open_flag
self.board.sounds.play('open')
if open_flag:
self.__speak_card(card)
| DollaR84/solitaire | player.py | player.py | py | 10,168 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "enum.Enum",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "constants.Colors.BLUE",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "constants.Colors",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "pygame.draw.rec... |
22088012589 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='BraketLab',
version='1.8.0',
author="Audun Skau Hansen",
author_email="audunsh4@gmail.com",
description="Educational tool for learning quantum theory with Jupyter Notebooks",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.uio.no/audunsh/braketlab",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires = ["sympy", "numba", "evince", "bubblebox"],
)
| audunsh/braketlab | setup.py | setup.py | py | 742 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
22385044991 | ######################################################################
# Author: IK3D -- Issanou Kamardine
# License: GPL v3
######################################################################
bl_info = {
"name": "Scripts Management",
"author": "IK3D",
"version": (0, 2),
"location": "Properties panel > Scene",
"description": "Pin scripts as favourites",
"category": "Favourites"}
import bpy
from bpy.types import Header, Menu, Panel
from bpy.app.translations import pgettext_iface as iface_
from bpy.app.translations import contexts as i18n_contexts
import fileinput
import sys
import requests
#Operator Add Favourites
class cheminOperator(bpy.types.Operator):
bl_idname = "wm.get_chemin"
bl_label = "Gert addons path"
chemin = bpy.props.StringProperty()
def execute(self, context):
if self.chemin == '':
print("Hello world!")
else:
print("Hello world from %s!" % self.chemin)
dynamic_path = self.chemin
#var new categories and old
oldcatReaded = ""
oldcat =""
indexe_ligne = 0
#Open File script
fichier = open(dynamic_path, "r+")
#read script
lingne = fichier.readline()
if "FAVOURIT" not in lingne:
#Find actual categorie
for ligne in fichier:
indexe_ligne +=1
if "category" in ligne:
oldcatReaded = ligne
oldcat = oldcatReaded.replace("}", "")
oldcat = oldcatReaded.replace('\n', "")
#check close or not in line
if "}" in ligne:
fermer = 1
else:
fermer = 0
break
#fonction replace categorie
def replace_cat(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
#Change categorie
if fermer == False:
replace_cat(dynamic_path, indexe_ligne,' "category": "Favourites",\n')
else:
replace_cat(dynamic_path, indexe_ligne,' "category": "Favourites"}\n')
#save back up, save change
with open(dynamic_path, "r+") as fichier:
first_line = fichier.readline()
if first_line != ("FAVOURIT = "+"'" + oldcat + " '" + "\n" + "\n"):
lines = fichier.readlines()
fichier.seek(0)
fichier.write("FAVOURIT = "+"'" + oldcat + " '" + "\n" + "\n" )
fichier.write(first_line)
fichier.writelines(lines)
fichier.close()
return{'FINISHED'}
#var pike-up script path
dynamic_path = ""
#Operator remove from Favourites
class unchemOperator(bpy.types.Operator):
bl_idname = "wm.un_chemin"
bl_label = "Remove from favourit"
chemin = bpy.props.StringProperty()
def execute(self, context):
if self.chemin == '':
print("Hello world!")
else:
print("Hello world from %s!" % self.chemin)
dynamic_path = self.chemin
#Read file script
fichier = open(dynamic_path, "r+")
#option read
lingne = fichier.readline()
#fonction replace remove line
def replace_efa(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
#fonction replace restore categorie
def replace_cat(file_name, line_num, text):
lines = open(file_name, 'r').readlines()
lines[line_num] = text
out = open(file_name, 'w')
out.writelines(lines)
out.close()
indexe_ligne = 0
#back up, restor categorie
if "category" in lingne:
#remove bad caractaires
FAVOURITrezOld = lingne
FAVOURITA= FAVOURITrezOld.replace("FAVOURIT = ", "")
FAVOURITB= FAVOURITA.replace("'\n'", "")
FAVOURIT= FAVOURITB.replace("'", "")
#remove back up line
replace_efa(dynamic_path,0,'')
#remove empty line
if lingne == '\n'or '\n' + '\n':
replace_efa(dynamic_path,0,'')
#Find categorie line
for ligne in fichier:
indexe_ligne +=1
if "category" in ligne:
#check if line close
if "}" or " }" or " } " or "}" or "} + '\n'" or " }+ '\n'" or " } + '\n'" in ligne:
fermer = 1
else:
fermer = 0
break
#Replase, or, and, close line
if fermer == 0:
replace_cat(dynamic_path, indexe_ligne-2, FAVOURIT+"}")
else:
replace_cat(dynamic_path, indexe_ligne-2, FAVOURIT)
fichier.close()
print ("Vous avez enlevé un Favorit !")
return{'FINISHED'}
#Display Enabled scripts
class EnadonOperator(bpy.types.Operator):
bl_idname = "wm.ena_adon"
bl_label = "View Enabled add-on"
def execute(self, context):
bpy.data.window_managers["WinMan"].addon_support = {'OFFICIAL', 'COMMUNITY', 'TESTING'}
bpy.data.window_managers["WinMan"].addon_filter = 'Enabled'
return {'FINISHED'}
#Display all scripts
class AlladonOperator(bpy.types.Operator):
bl_idname = "wm.all_adon"
bl_label = "View all add-on"
def execute(self, context):
bpy.data.window_managers["WinMan"].addon_support = {'OFFICIAL', 'COMMUNITY', 'TESTING'}
bpy.data.window_managers["WinMan"].addon_filter = 'All'
return {'FINISHED'}
class CheckeurdOperator(bpy.types.Operator):
bl_idname = "wm.check_fav"
bl_label = "check add-on"
def execute(self, context):
bpy.data.window_managers["WinMan"].addon_support = {'OFFICIAL', 'COMMUNITY', 'TESTING'}
bpy.data.window_managers["WinMan"].addon_filter = 'Favourites'
return {'FINISHED'}
class userOperator(bpy.types.Operator):
bl_idname = "wm.user_adon"
bl_label = "View user add-on"
def execute(self, context):
bpy.data.window_managers["WinMan"].addon_support = {'OFFICIAL', 'COMMUNITY', 'TESTING'}
bpy.data.window_managers["WinMan"].addon_filter = 'User'
return {'FINISHED'}
#UI display
class ScriptsManagementPanel(bpy.types.Panel):
"""Creates a Panel in the scene context of the properties editor"""
bl_label = "Scripts Management"
bl_idname = "SCENE_PT_layout"
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "scene"
class USERPREF_MT_addons_dev_guides(Menu):
bl_label = "Development Guides"
# menu to open web-pages with addons development guides
def draw(self, context):
layout = self.layout
_support_icon_mapping = {
'OFFICIAL': 'FILE_BLEND',
'COMMUNITY': 'POSE_DATA',
'TESTING': 'MOD_EXPLODE',
}
@staticmethod
def is_user_addon(mod, user_addon_paths):
import os
if not user_addon_paths:
for path in (bpy.utils.script_path_user(),
bpy.utils.script_path_pref()):
if path is not None:
user_addon_paths.append(os.path.join(path, "addons"))
for path in user_addon_paths:
if bpy.path.is_subdir(mod.__file__, path):
return True
return False
#Add-on layout
def draw(self, context):
import os
import addon_utils
userpref = context.user_preferences
used_ext = {ext.module for ext in userpref.addons}
userpref_addons_folder = os.path.join(userpref.filepaths.script_directory, "addons")
scripts_addons_folder = bpy.utils.user_resource('SCRIPTS', "addons")
#collect the categories that can be filtered on
addons = [(mod, addon_utils.module_bl_info(mod)) for mod in addon_utils.modules(refresh=True)]
#Management header layout
layout = self.layout
layout.label("Run Blender as admin for full permission",icon='LAYER_USED')
split = layout.split(percentage=0.60, align=True,)
if bpy.data.window_managers["WinMan"].addon_filter == 'Favourites':
split.operator("wm.check_fav", icon='SPACE2',text="Favourites")
else:
split.operator("wm.check_fav", icon='SPACE3',text="Favourites")
split.operator("wm.user_adon", text="User")
split.operator("wm.ena_adon", icon='SAVE_AS',text="")
split.operator("wm.all_adon", icon='BOOKMARKS',text="")
#searche layout
layout = self.layout
layout.prop(context.window_manager, "addon_search", text="", icon='VIEWZOOM')
row = layout.row()
split = layout.split()
col = split.column()
filter = context.window_manager.addon_filter
search = context.window_manager.addon_search.lower()
support = context.window_manager.addon_support
#initialized on demand
user_addon_paths = []
addon_numb = 0
for mod, info in addons:
module_name = mod.__name__
module_realpath = mod.__file__
is_enabled = module_name in used_ext
if info["support"] not in support:
continue
#serche parmetres
if search and search not in info["name"].lower():
if info["author"]:
if search not in info["author"].lower():
continue
else:
continue
# check if addon should be visible with current filters
if ((filter == "All") or
(filter == info["category"]) or
(filter == "Enabled" and is_enabled) or
(filter == "Disabled" and not is_enabled) or
(filter == "User" and (mod.__file__.startswith((scripts_addons_folder, userpref_addons_folder))))
):
#limit visible addon on 'All' folder
if bpy.data.window_managers["WinMan"].addon_filter == 'All' and addon_numb < 10:
# Addon UI Code
col_box = col.column()
box = col_box.box()
colsub = box.column()
row = colsub.row()
if info["category"] == "Favourites":
row.operator("wm.un_chemin", icon='PINNED',emboss=False,text="").chemin = module_realpath
sub = row.row()
sub.label(info["name"], icon='SMALL_TRI_RIGHT_VEC')
else:
row.operator("wm.get_chemin", icon='UNPINNED',emboss=False,text="").chemin = module_realpath
sub = row.row()
sub.label(info["name"],)
sub.operator("wm.addon_remove", text="", icon='PANEL_CLOSE',emboss=False).module = mod.__name__
if is_enabled:
row.operator("wm.addon_disable", icon='FILE_TICK', text="", emboss=False).module = module_name
else:
row.operator("wm.addon_enable", icon='CHECKBOX_DEHLT', text="", emboss=False).module = module_name
#incrementation for limitation
addon_numb +=1
if bpy.data.window_managers["WinMan"].addon_filter != 'All':
# Addon UI Code
col_box = col.column()
box = col_box.box()
colsub = box.column()
row = colsub.row()
if info["category"] == "Favourites":
row.operator("wm.un_chemin", icon='PINNED',emboss=False,text="").chemin = module_realpath
sub = row.row()
sub.label(info["name"], icon='SMALL_TRI_RIGHT_VEC')
else:
row.operator("wm.get_chemin", icon='UNPINNED',emboss=False,text="").chemin = module_realpath
sub = row.row()
sub.label(info["name"],)
sub.operator("wm.addon_remove", text="", icon='PANEL_CLOSE',emboss=False).module = mod.__name__
if is_enabled:
row.operator("wm.addon_disable", icon='FILE_TICK', text="", emboss=False).module = module_name
else:
row.operator("wm.addon_enable", icon='CHECKBOX_DEHLT', text="", emboss=False).module = module_name
def register():
bpy.utils.register_class(ScriptsManagementPanel)
#Save Operator view all script for add
bpy.utils.register_class(userOperator)
#Save Operator show favourites
bpy.utils.register_class(CheckeurdOperator)
#Save Operator display all scripts
bpy.utils.register_class(AlladonOperator)
#Save Operator Add Favourites
bpy.utils.register_class(unchemOperator)
#Save Operator Add Favourites
bpy.utils.register_class(cheminOperator)
#Save Operator enabled
bpy.utils.register_class(EnadonOperator)
def unregister():
bpy.utils.unregister_class(ScriptsManagementPanel)
bpy.utils.unregister_class(userOperator)
bpy.utils.unregister_class(CheckeurdOperator)
bpy.utils.unregister_class(AlladonOperator)
bpy.utils.unregister_class(unchemOperator)
bpy.utils.unregister_class(cheminOperator)
bpy.utils.unregister_class(EnadonOperator)
if __name__ == "__main__":
register()
| IIK3D/Blender-Scriptes-management | Scripts_Management.py | Scripts_Management.py | py | 15,086 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "bpy.types",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "bpy.props.StringProperty",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "bpy.props",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "bpy.types",
... |
72116070115 | """
Class for computing various metrics on a data set with a BayesNet Node object.
"""
import logging
import numpy as np
from .cpd.ogive import OgiveCPD
EPSILON = 1e-16
MAP_ACCURACY_KEY = 'map_accuracy'
AUC_KEY = 'auc'
LOGLI_KEY = 'logli'
D_PRIME_KEY = 'd_prime'
NAIVE_KEY = 'naive'
METRICS_KEYS = {NAIVE_KEY, LOGLI_KEY, MAP_ACCURACY_KEY, AUC_KEY, D_PRIME_KEY}
LOGGER = logging.getLogger(__name__)
class Metrics(object):
""" Class for computing various performance metrics based on the data and the parameters in a
Node object. """
def __init__(self, node):
""" Initialize this object with a reference to a BayesNet Node object that holds the
CPD, the data, and the parameters."""
self.node = node
def _check_binary_cpd(self, func_name):
if not isinstance(self.node.cpd, OgiveCPD):
raise TypeError("{} only defined for OgiveCPDs not %s".format(
func_name, self.node.cpd.__class__))
@classmethod
def _check_finite(cls, prob_true, *args):
""" Check that all probabilities are finite; if not, remove those elements and corresponding
elements from other positional args.
:param np.ndarray prob_true: array to check for finiteness
:param args: optional arguments to subselect based on isfinite(prob_true)
:return: np.ndarray|tuple[np.ndarray]
"""
if not np.all(np.isfinite(prob_true)):
valid_idx = np.isfinite(prob_true)
LOGGER.warn("%d non-finite prob corrects found; ignoring these interactions",
np.sum(~valid_idx))
prob_true = prob_true[valid_idx]
args = tuple([arg[valid_idx] for arg in args])
if not len(args):
return prob_true
else:
return (prob_true,) + args
@staticmethod
def compute_per_student_naive(reg_ids, corrects, is_held_out):
""" Compute the per-student naive metrics on the training and test sets, based on predicting
correct if the student had more corrects in the training set. If no data in the training
set exist for the student, predict correct.
:param np.ndarray reg_ids: unique student identifier for each interaction
:param np.ndarray[bool] corrects: correctness values for each interaction
:param np.ndarray[bool] is_held_out: indicator whether an interaction is in the test set
:return: per student naive on the training and test sets
:rtype: float, float
"""
if len(corrects) != len(reg_ids) or len(is_held_out) != len(reg_ids):
raise ValueError("reg_ids (%d), corrects (%d), is_held_out (%d) must have same length",
len(reg_ids), len(corrects), len(is_held_out))
uniq_regids, reg_idxs = np.unique(reg_ids, return_inverse=True)
num_reg_ids = len(uniq_regids)
train_reg_idxs = reg_idxs[~is_held_out]
test_reg_idxs = reg_idxs[is_held_out]
train_corrects = corrects[~is_held_out]
test_corrects = corrects[is_held_out]
per_student_num_correct = np.bincount(train_reg_idxs, weights=train_corrects,
minlength=num_reg_ids)
per_student_num_responses = np.bincount(train_reg_idxs, minlength=num_reg_ids)
pred_correct = (2 * per_student_num_correct >= per_student_num_responses)
train_per_student_naive = np.mean(pred_correct[train_reg_idxs] == train_corrects)
test_per_student_naive = np.mean(pred_correct[test_reg_idxs] == test_corrects)
return train_per_student_naive, test_per_student_naive
def compute_metric(self, metric_key, *args, **kwargs):
""" Compute metric specified by the supplied key.
:param str metric_key: key specifying the metric
:return: the value of the metric
:rtype: float
"""
return getattr(self, 'compute_' + metric_key)(*args, **kwargs)
def compute_naive(self):
""" Compute the accuracy of predicting always correct or always incorrect,
whichever is higher. Defined for binary CPDs only.
:return: a number between 0 and 1 specifying prediction accuracy
:rtype: float
"""
self._check_binary_cpd("Naive metric")
fraction_correct = np.mean(np.array(self.node.data, dtype=float))
return max(fraction_correct, 1. - fraction_correct)
def compute_logli(self, avg=False):
""" Compute the response log-likelihood (the value of the node's CPD given the stored data
and parameters.
:param bool avg: whether to normalize the log-likelihood by the size of the node's data
:return: the sum of the log-likelihoods over the data points.
:rtype: float
"""
log_li = self.node.compute_log_prob()
if avg:
log_li /= self.node.data.size
return log_li
def compute_map_accuracy(self):
""" Compute the MAP accuracy (fraction of data points predicted correctly at the maximum
of the binary probability distribution). Defined for binary CPDs only.
:return: MAP accuracy
:rtype: float
"""
self._check_binary_cpd("MAP accuracy")
prob_true = self.node.cpd.compute_prob_true(**self.node.param_data)
prob_true, data = self._check_finite(prob_true, self.node.data)
return np.mean((prob_true > 0.5) == data)
def compute_d_prime(self):
""" Compute the d-prime statistic measuring separation between response probabilities
conditioned on a true (positive) and false (negative) data points.
Defined for binary CPDs only.
:return: the d-prime statistic of distribution separation
:rtype: float
"""
self._check_binary_cpd("D prime")
prob_true = self.node.cpd.compute_prob_true(**self.node.param_data)
return self.d_prime_helper(self.node.data, prob_true)
def compute_auc(self):
""" Compute the area under curve (AUC) for the task of predicting binary labels
based on the probabilities computed by some model. The curve is the Receiver Operator
Characteristic (ROC) curve, which plots the true positive rate vs. the false positive rate
as one varies the threshold on the probabilities given by the model. AUC is also equal to
the probability that the model will yield a higher probability for a randomly chosen
positive data point than for a randomly chosen negative data point. Defined for binary
CPDs only.
NOTE: this assumes at least one positive and one negative data point (otherwise
the notions of true positive rate and false positive rate do not make
sense).
:return: a number between 0 and 1 specifying area under the ROC curve
:rtype: float
"""
self._check_binary_cpd("AUC")
prob_true = self.node.cpd.compute_prob_true(**self.node.param_data)
return self.auc_helper(self.node.data, prob_true)
@staticmethod
def d_prime_helper(data, prob_true):
""" Compute the d-prime metric (of the separation of probabilities associated with positive
data labels and negative data labels).
:param np.ndarray[bool] data: binary data values (positive/negative class labels).
:param np.ndarray[float] prob_true: probability of positive label
:return: d-prime metric
:rtype: float
"""
if len(prob_true) != len(data):
raise ValueError('prob_true and data must have the same length')
prob_true, data = Metrics._check_finite(prob_true, data)
pc_correct = prob_true[data]
pc_incorrect = prob_true[np.logical_not(data)]
mean_sep = np.mean(pc_correct) - np.mean(pc_incorrect)
norm_const = np.sqrt(0.5 * (np.var(pc_correct) + np.var(pc_incorrect)))
return mean_sep / norm_const
@staticmethod
def auc_helper(data, prob_true):
""" Compute AUC (area under ROC curve) as a function of binary data values and predicted
probabilities. If data includes only positive or only negative labels, returns np.nan.
:param np.ndarray[bool] data: binary data values (positive/negative class labels).
:param np.ndarray[float] prob_true: probability of positive label
:return: area under ROC curve
:rtype: float
"""
if len(prob_true) != len(data):
raise ValueError('prob_true and data must have the same length')
prob_true, data = Metrics._check_finite(prob_true, data)
sorted_idx = np.argsort(prob_true)[::-1]
sorted_prob_true = prob_true[sorted_idx]
unique_prob_true_idx = np.append(np.flatnonzero(np.diff(sorted_prob_true)),
len(sorted_prob_true) - 1)
x = data[sorted_idx]
not_x = np.logical_not(x)
# Compute cumulative sums of true positives and false positives.
tp = np.cumsum(x)[unique_prob_true_idx].astype(float)
fp = np.cumsum(not_x)[unique_prob_true_idx].astype(float)
# The i'th element of tp (fp) is the number of true (false) positives
# resulting from using the i'th largest rp as a threshold. That is,
# we predict correct if a response's rp is >= sorted_prob_true[i].
# We want the first element to correspond to a threshold sufficiently
# high to yield no predictions of correct. The highest rp qualifies
# as this highest threshold if its corresponding response is incorrect.
# Otherwise, we need to add an artificial "highest threshold" at the
# beginning that yields 0 true positives and 0 false positives.
if tp[0] != 0.0:
tp = np.append(0.0, tp)
fp = np.append(0.0, fp)
# Calculate true positive rate and false positive rate.
# This requires at least 1 correct and 1 incorrect response.
if not tp[-1]:
return np.nan
tpr = tp / tp[-1]
if not fp[-1]:
return np.nan
fpr = fp / fp[-1]
return np.trapz(tpr, fpr)
@staticmethod
def online_perc_correct(correct, student_idx):
""" For each interaction, compute the percent correct for the student's previous
interactions. The returned array will contain NaNs for each student's first interaction.
:param np.ndarray[bool] correct:
:param np.ndarray[int] student_idx:
:return: percent correct on previous interactions for this student
:rtype: np.ndarray[float]
"""
student_num_correct = np.zeros(np.max(student_idx) + 1)
student_num_answered = np.zeros(np.max(student_idx) + 1)
online_pc = np.nan * np.empty_like(correct, dtype=float)
for i, c in enumerate(correct):
j = student_idx[i]
if student_num_answered[j]:
online_pc[i] = student_num_correct[j] / float(student_num_answered[j])
student_num_answered[j] += 1
student_num_correct[j] += int(c)
return online_pc
| Knewton/edm2016 | rnn_prof/irt/metrics.py | metrics.py | py | 11,126 | python | en | code | 58 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cpd.ogive.OgiveCPD",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "numpy.all",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.isfinite",
... |
71912287073 | import argparse
import subprocess
import decode
import os
import sys
def full_executable_path(invoked):
# From https://bugs.python.org/issue8557
# https://bugs.python.org/issue8557
# with subprocess.open, c:\windows\system32\curl.exe has precedence on the PATH environment variable for Windows 10
# which discards the path to the thirdparty directory
explicit_dir = os.path.dirname(invoked)
if explicit_dir:
path = [ explicit_dir ]
else:
path = os.environ.get('PATH').split(os.path.pathsep)
for dir in path:
full_path = os.path.join(dir, invoked)
if os.path.exists( full_path ):
return full_path
return invoked # Not found; invoking it will likely fail
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description="""Run CURL to fetch data from a Web Server using the environment variable 'CREDENTIALS' or %USERPROFILE%\_netrc
For Windows OS only""")
parser.add_argument("mediatype", action="store", help="Media type. Typically 'application/json' or 'text/csv'")
parser.add_argument("url", action="store", help="URL to fetch")
parser.add_argument("-o", "--output", dest="output", action="store", help="Output file path")
args = parser.parse_args()
curl_args = [full_executable_path("curl.exe"), '-c', 'cookies.txt', '-b', 'cookies.txt', '--retry', '5', '--no-buffer', '-f', '-k', '-H', 'Accept: ' + args.mediatype, '-H', 'X-Client: Datamart']
credentials=os.getenv("CREDENTIALS")
apikey=os.getenv("APIKEY")
if credentials:
curl_args += ['-u', decode.decode(credentials)]
elif apikey:
curl_args += ['-H', 'X-API-USER: ' + decode.decode(os.getenv("APIUSER")), '-H', 'X-API-KEY: ' + decode.decode(apikey)]
else:
curl_args += ['--netrc-file', os.getenv("USERPROFILE") + '\_netrc ']
output=args.output
if output:
curl_args += ['-o', output]
curl_args += [args.url]
# print(curl_args)
exit_code = subprocess.run(curl_args).returncode
if exit_code == 22:
print("on error (470/401): check the credentials", file=sys.stderr)
print("on error (400): check the REST API version", file=sys.stderr)
print("on error (404): check the domain name", file=sys.stderr)
print("Requested URL: "+ args.url, file=sys.stderr)
sys.exit(exit_code)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| CAST-Extend/com.castsoftware.aip.datamart | utilities/curl.py | curl.py | py | 2,519 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_nu... |
43032086095 | """
The Python script to test if the web page displays "Hello World"
"""
import urllib3
from bs4 import BeautifulSoup
def test_hello_world():
http = urllib3.PoolManager()
response = http.request("GET", "http://localhost")
soup = BeautifulSoup(response.data, "html.parser")
assert soup.h1.text.strip() == "Hello World"
if __name__ == '__main__':
test_hello_world()
| CSEC380-Group16/csec380-project | tests/test_act2.py | test_act2.py | py | 388 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib3.PoolManager",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
15461259378 | """ Debug GAN, generator and discriminator and save the models """
import os
import sys
import logging
import matplotlib.pyplot as plt
from constants import default_list
# Logger import
from dataset import logData
from make_models import logMod
from class_GAN import logGAN
# Debug import
from unbiased_metrics import shower_depth_lateral_width
from dataset import debug_data_pull, debug_shower
from make_models import debug_generator, debug_discriminator, compute_energy
from make_models import make_generator_model, make_discriminator_model
from class_GAN import test_noise, ConditionalGAN
#-------------------------------------------------------------------------------
VERBOSE = False
# Path list from this folder
path_list = [os.path.join('..', path) for path in default_list]
# Examples to show
EXAMPLES = 8
# Define logger and handler
ch = logging.StreamHandler()
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger = logging.getLogger("DEBUGLogger")
logger.addHandler(ch)
logData.addHandler(ch)
logMod.addHandler(ch)
logGAN.addHandler(ch)
#-------------------------------------------------------------------------------
def debug(path_list, num_examples=EXAMPLES, verbose=False):
"""Debug subroutines for the training of the cGAN with dataset in path."""
if verbose :
logger.setLevel(logging.DEBUG)
logger.info('Logging level set on DEBUG.')
else:
logger.setLevel(logging.WARNING)
logger.info('Logging level set on WARNING.')
try:
train_data = debug_data_pull(path_list, num_examples, verbose=verbose)
except AssertionError as error:
print(f"An error occurred while loading the dataset: \n{error}")
sys.exit()
#Execute debug subroutines
train_images = train_data[0]
metrics = shower_depth_lateral_width(train_images)
for el in metrics:
print(f"{el} = {metrics[el]}")
debug_shower(train_images, verbose)
debug_generator(test_noise, verbose=verbose)
debug_discriminator(train_images, verbose)
def debug_cgan(gan, path_list, num_examples=EXAMPLES):
"""Debug of the cGAN methods."""
logger.info("Testing the cGAN methods on noise and real samples.")
noise = gan.generate_noise(num_examples)
gan.generate_and_save_images(noise)
gener, discr = gan.restore()
# Fake showers
predictions = gener(noise, training=False)
decisions = discr(predictions, training=False)
energies = compute_energy(predictions)
k = 0
num_examples = predictions.shape[0]
side = predictions.shape[1]
fig = plt.figure("Fake generated showers", figsize=(20,10))
for i in range(num_examples):
print(f"Example {i+1}\t"
+f"Primary particle = {int(noise[2][i][0])}\t"
+f"Predicted particle = {decisions[2][i][0]}\n"
+f"Initial energy = {noise[1][i][0]}\t"
+f"Generated energy = {energies[i][0]}\t"
+f"Predicted energy = {decisions[1][i][0]}\t"
+f"Decision = {decisions[0][i][0]}\n")
for j in range(side):
k=k+1
plt.subplot(num_examples, side, k)
plt.imshow(predictions[i,j,:,:,0])
plt.axis("off")
plt.show()
# True showers
predictions = debug_data_pull(path_list, num_examples)
images = predictions[0]
decisions = discr(images, training=False)
energies = compute_energy(images)
k = 0
fig = plt.figure("Real generated showers", figsize=(20,10))
for i in range(num_examples):
print(f"Example {i+1}\t"
+f"Primary particle = {int(noise[2][i][0])}\t"
+f"Predicted particle = {decisions[2][i][0]}\n"
+f"Initial energy = {noise[1][i][0]}\t"
+f"Generated energy = {energies[i][0]}\t"
+f"Predicted energy = {decisions[1][i][0]}\t"
+f"Decision = {decisions[0][i][0]}\n")
for j in range(side):
k=k+1
plt.subplot(num_examples, side, k)
plt.imshow(images[i,j,:,:,0])
plt.axis("off")
plt.show()
logger.info("Debug of the cGAN methods finished.")
if __name__=="__main__":
debug(path_list, verbose=VERBOSE)
generator = make_generator_model()
discriminator = make_discriminator_model()
cond_gan = ConditionalGAN(generator, discriminator)
logger.info("The cGAN model has been built correctly.")
cond_gan.summary()
cond_gan.plot_model()
logger.info("The cGAN model has been plotted correctly.")
try:
debug_cgan(cond_gan, path_list)
logger.info("The work is done.")
except Exception as error:
print(error)
logger.handlers.clear()
| Dario-Maglio/EM-shower-simulator-with-NN | em_shower_simulator/debug.py | debug.py | py | 4,726 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "constants.default_list",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "logging.StreamHandle... |
23808525747 | import sys
import json
import urllib.error
import argparse
from datetime import datetime as dt
from urllib.request import urlopen, urlretrieve
from threading import Thread
from cuter import *
sys.path.append('..')
from Database import *
class DownloadImage:
def __init__(self, image_info):
self.id = image_info['id']
self.image_path = '/hologram/datasets/prophecy_apparatus/originals/{}.jpg'.format(self.id)
self.cropped_image_path = self.image_path.replace('originals', 'cropped')
self.url = image_info['url']
self.author = image_info['author']
self.sub_reddit = image_info['subreddit']
self.before = image_info['before']
def download_image(self):
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(self.url, self.image_path)
print('Downloaded: {}'.format(self.url))
self.add_to_database()
except urllib.error.URLError as e:
print('Error {} downloading {}'.format(e, self.url))
except ConnectionResetError as e:
print('Error {} downloading {}'.format(e, self.url))
except UnicodeEncodeError as e:
print('Error {} downloading {}'.format(e, self.url))
def add_to_database(self):
data_to_insert = {}
data_to_insert['_id'] = self.id
data_to_insert['sub_reddit'] = self.sub_reddit
data_to_insert['url'] = self.url
data_to_insert['author'] = self.author
data_to_insert['date'] = dt.today()
database = Database()
database.insert_database(data_to_insert)
data_to_update = {}
data_to_update['_id'] = self.sub_reddit
data_to_update['date'] = dt.today()
data_to_update['before'] = self.before
database = Database()
database.update_last_download(data_to_update)
resize_and_crop(self.image_path, self.cropped_image_path, (1080, 1920), 'middle')
class ScrapReddit:
def __init__(self, sub_reddit, position):
database = Database()
self.sub_reddit = sub_reddit
self.before = database.return_last_download(self.sub_reddit) if position else 0
print(self.before)
self.URL = 'https://api.pushshift.io/reddit/submission/search/?subreddit={}&sort=desc&size=1000'.format(
self.sub_reddit)
self.scrap()
def scrap(self):
while True:
try:
url_to_open = self.URL + '&before={}d&after={}d'.format(self.before, self.before + 1)
url = urlopen(url_to_open)
responses = json.loads(url.read())
if len(responses['data']) > 0:
for resp in responses['data']:
database = Database()
if not database.check_if_image_exists(resp['id']) and '.jpg' in resp['url']:
resp['before'] = self.before
download_image = DownloadImage(resp)
t = Thread(target=download_image.download_image())
t.start()
self.before += 1
except urllib.error.HTTPError as e:
print('Error {}'.format(e))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Scrap images from subreddits')
parser.add_argument('-s', '--subreddit',
dest='subreddit',
action='store',
default='futureporn',
help='subreddit to scrap')
parser.add_argument('-p', '--position',
dest='position',
action='store_true',
help='take the former position')
args = parser.parse_args()
scrap_reddit = ScrapReddit(args.subreddit, args.position)
| sandbenders/ProphecyApparatus | scrap_reddit/scrap_reddit.py | scrap_reddit.py | py | 4,101 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "urllib.error.request.build_opener",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "urlli... |
26575927806 | import sys
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.multioutput import MultiOutputClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
def load_data(database_filepath):
'''
INPUT:
database_filepath - filepath to the database to be read
OUTPUT:
X - X matrix
Y - Y matrix
category_names - column headers from Y matrix
This function reads the provided database into X and Y matrices.
'''
engine = create_engine(f'sqlite:///{database_filepath}')
df = pd.read_sql("SELECT * FROM data", engine)
X = df['message'].values
Y = df.iloc[:,4:].values
category_names = (df.iloc[:,4:]).columns.values
return X, Y, category_names
def tokenize(text):
'''
INPUT:
text - string to be tokenized
OUTPUT:
clean_tokens - list of tokens
This function tokenizes the provided string using word_tokenize and WordNetLemmatizer as well as converts to lower case and strips spaces.
'''
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
def build_model():
'''
OUTPUT:
pipeline - ready to use ML pipeline optimized with grid search
This function creates a ML pipeline and performs a grid search for the best parameters.
'''
# create the pipeline
pipeline = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize)),
('tfidf', TfidfTransformer()),
('clf', MultiOutputClassifier(RandomForestClassifier()))
])
# define grid search parameters
parameters = {
'tfidf__use_idf': (True, False),
'clf__estimator__n_estimators': [50, 60, 70]
}
cv = GridSearchCV(pipeline, param_grid=parameters)
return cv
def evaluate_model(model, X_test, Y_test, category_names):
'''
INPUT:
model - trained model
X_test - input matrix of test dataset
Y_test - results matrix of test dataset
category_names - category names (Y_test column headers)
This function predicts the answers to X_test and compares the prediction to Y_test to evaluate the model performance.
'''
Y_pred = model.predict(X_test)
tp = np.zeros(36)
fp = np.zeros(36)
fn = np.zeros(36)
for i, entry in enumerate(Y_test):
for j in range(36):
if Y_test[i][j] == 1 and Y_pred[i][j] == 1:
tp[j] = tp[j] + 1
elif Y_test[i][j] == 0 and Y_pred[i][j] == 1:
fp[j] = fp[j] + 1
elif Y_test[i][j] == 1 and Y_pred[i][j] == 0:
fn[j] = fn[j] + 1
precision = [tp[i]/(tp[i]+fp[i]) for i in range(36)]
recall = [tp[i]/(tp[i]+fn[i]) for i in range(36)]
f1_score = [2*(precision[i]*recall[i])/(precision[i]+recall[i]) for i in range(36)]
df = pd.DataFrame(list(zip(precision, recall, f1_score)), columns=['precision', 'recall', 'f1_score'], index=category_names)
print (df)
def save_model(model, model_filepath):
'''
INPUT:
model - trained model
model_filepath - path where model will be saved
This function pickles the model and stores it.
'''
pickle.dump(model, open(model_filepath,'wb'))
def main():
if len(sys.argv) == 3:
# load the data from sql into X and Y and category_names
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# build the model
print('Building model...')
model = build_model()
# train the model
print('Training model...')
model.fit(X_train, Y_train)
# evaluate the model
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
# save the model
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main()
| atomopa/udacity_data_scientist_project2 | models/train_classifier.py | train_classifier.py | py | 4,960 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "nltk.tokenize.word_tokenize",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "... |
20850779363 | import numpy as np
import cv2
import mss
import os
LABELS_PATH = os.path.dirname(__file__) + '/coco.names'
LABELS = open(LABELS_PATH).read().strip().split("\n")
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3), dtype="uint8")
class Frame:
def __init__(self, source='webcam', screen_size=(800, 640)):
self.source_name = source
self.screen_size = (int(screen_size.split('x')[0]),
int(screen_size.split('x')[1])) if isinstance(screen_size, str) else screen_size
self.source = cv2.VideoCapture(0) if source == 'webcam' else mss.mss()
def get_frame(self):
if self.source_name == 'webcam':
_, image = self.source.read()
elif self.source_name == 'screen':
monitor = {"top": 0, "left": 0,
"width": self.screen_size[0], "height": self.screen_size[1]}
image = np.array(self.source.grab(monitor))
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
return image
| SarperYurttas/objectDetectionWithYOLO | object_detection/utils.py | utils.py | py | 1,013 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"li... |
23353872700 | # -*-coding:utf-8 -*-
"""
Created on 2016-7-5
@author: Danny
DannyWork Project
"""
import socket
import threading
import time
import logging
import argparse
from utils import close_socket, parse_ping_data, reply_ping_data, get_python_version, start_data_transfer, \
PING_SENDING_START, TRANSFER_PREPARE, TRANSFER_READY
if get_python_version() == '2':
from exceptions import *
class ConnectionHold(threading.Thread):
"""
连接保持与传输检测
"""
socket = None
secret = ''
target_ip = None
target_port = None
socket_timeout = 120
logger = None
def __init__(self, socket, secret, target_ip, target_port, log_level=logging.INFO):
super(ConnectionHold, self).__init__()
self.socket = socket
self.secret = secret
self.target_ip = target_ip
self.target_port = target_port
self.logger = logging.getLogger('Connection Holder')
self.logger.setLevel(log_level)
def create_target_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.target_ip, self.target_port))
return sock
def run(self):
count = 20
try:
while count:
data = self.socket.recv(1024)
if not data:
count -= 1
continue
if data.startswith(PING_SENDING_START.encode('utf8')):
# 解密 ping 数据
plain = parse_ping_data(data, self.secret)
# 将解密后的 ping 数据回发
self.socket.sendall(reply_ping_data(plain))
self.logger.debug('Received ping at {0}.'.format(self.socket))
elif data.startswith(TRANSFER_PREPARE.encode('utf8')):
local_sock = self.create_target_socket()
self.socket.sendall(TRANSFER_READY.encode('utf8'))
tl, tr = start_data_transfer(local_sock, self.socket, self.secret)
self.logger.info('Ready to transfer data between {0} and {1}.'.format(self.socket, local_sock))
# tl.join()
break
else:
# 接收到无效数据,关闭 socket
self.logger.warning('Invalid data received in {0}, closed.'.format(self.socket))
break
except Exception as e:
self.logger.warning('Error in ConnectionHold[{0}]: {1}, closed.'.format(self, e))
close_socket(self.socket)
self.logger.info('ConnectionHold thread for {0} quit.'.format(self.socket))
class RemoteConnect(threading.Thread):
"""
控制并保持与远程端口的连接
实例化时,需传入 pool 参数,为 deque object,请注意 deque 的 maxlen 决定所维护的最大连接数
"""
# 连接配置
ip = None
port = None
# 本地目标配置
target_ip = None
target_port = None
# 默认维护的最大连接数
default_max_connections = 5
# 连接保持与传输检测线程
holding_threads = None
secret = ''
log_level = None
logger = None
def __init__(self, ip, port, target_ip, target_port, secret='', max_connections=None, log_level=logging.INFO):
super(RemoteConnect, self).__init__()
self.ip = ip
self.port = port
self.target_ip = target_ip
self.target_port = target_port
self.default_max_connections = max_connections or self.default_max_connections
self.holding_threads = []
self.secret = secret
self.log_level = log_level
self.logger = logging.getLogger('Remote Connector')
self.logger.setLevel(log_level)
def create_new_socket(self):
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.settimeout(1)
try:
new_sock.connect((self.ip, self.port))
# “握手”过程
# 接受并解密 ping 数据
plain = parse_ping_data(new_sock.recv(1024), self.secret)
if not plain:
raise ConnectionError('No data received, closed.')
# 将解密后的 ping 数据回发
new_sock.sendall(reply_ping_data(plain))
plain = parse_ping_data(new_sock.recv(1024))
except Exception as e:
self.logger.warning('Socket create error: {0}'.format(e))
else:
if plain == 'READY':
new_sock.settimeout(socket.getdefaulttimeout())
return new_sock
self.logger.warning('No reply in {0}, give up.'.format(new_sock))
def start_holding_thread(self, socket):
holding_thread = ConnectionHold(socket, self.secret, self.target_ip, self.target_port, log_level=self.log_level)
holding_thread.start()
return holding_thread
def run(self):
while True:
# 连接保持与传输检测线程状态测试与重启
for thread in self.holding_threads:
if not thread.is_alive():
self.holding_threads.remove(thread)
# 连接管理
if len(self.holding_threads) < self.default_max_connections:
new_sock = self.create_new_socket()
if new_sock:
self.logger.info('Connection ready in {0}.'.format(new_sock))
self.holding_threads.append(self.start_holding_thread(new_sock))
continue
time.sleep(5)
if __name__ == '__main__':
# 参数解析
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--local',
default='127.0.0.1:22',
type=str,
help="Local address to be connected. Default is 127.0.0.1:22.")
parser.add_argument('-e', '--remote',
default='127.0.0.1:50067',
type=str,
help="Remote address to communicate with. Default is 127.0.0.1:50067.")
parser.add_argument('--log-level',
default='INFO',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
type=str,
help="Log level. Default is DEBUG.")
parser.add_argument('-s', '--secret',
default='nN31mnOq0ek4UBXxecl4WnLeCoYOfTQJ',
type=str,
help="Secret key for encryption.")
parser.add_argument('-t', '--timeout',
default=120,
type=int,
help="Socket timeout, default is 120.")
args = parser.parse_args()
# 设置 socket 的默认超时时间
socket.setdefaulttimeout(args.timeout)
# 日志级别
log_level = getattr(logging, args.log_level)
# 设置日志级别及输出格式
logging.basicConfig(level=log_level,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
remote_ip, remote_port = args.remote.split(':')
local_ip, local_port = args.local.split(':')
t = RemoteConnect(remote_ip, int(remote_port), local_ip, int(local_port), secret=args.secret, log_level=log_level)
t.start()
t.join()
| manyunkai/dreverse | slave.py | slave.py | py | 7,337 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "utils.get_python_version",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "logging.INFO",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "logg... |
31900102139 | # -*- coding: utf-8 -*-
"""
@File : pathSum.py
@Author : wenhao
@Time : 2023/2/1 11:58
@LC :
"""
from Tree import TreeNode
from typing import List
class Solution:
def pathSum(self, root: TreeNode, target: int) -> List[List[int]]:
ans = []
if root is None:
return ans
path = []
def dfs(node: TreeNode, s: int):
if node is None:
return
s += node.val
path.append(node.val)
if s == target and node.left is None and node.right is None:
ans.append(path.copy())
path.pop()
return
if node.left:
dfs(node.left, s)
if node.right:
dfs(node.right, s)
path.pop()
dfs(root, 0)
return ans
| callmewenhao/leetcode | offer/二叉树/pathSum.py | pathSum.py | py | 840 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Tree.TreeNode",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "Tree.TreeNode",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 14,
"usage_type": "name"
}
] |
71377869154 | import wx
from wx.lib.newevent import NewCommandEvent
import wx.lib.agw.cubecolourdialog as colordialog
from .constants import TEXT_COLOR
from .icons import ICON_BRUSH_CHECKERBOARD
button_cmd_event, EVT_COLORPICKER_BUTTON = NewCommandEvent()
class ColorPickerButton(wx.Control):
"""
Color picker widget for selecting an RGBA color.
:param wx.Window `parent`: parent window. Must not be ``None``.
:param integer `id`: window identifier. A value of -1 indicates a default value.
:param string `label`: the label displayed beside the color select button.
:param tuple `default`: tuple of the default RGBA color.
"""
def __init__(self, parent, id=wx.ID_ANY, label="", default=(213, 219, 213, 177),
pos=wx.DefaultPosition, size=wx.Size(400, -1), style=wx.NO_BORDER,
*args, **kwargs):
wx.Control.__init__(self, parent, id, pos, size, style, *args, **kwargs)
self.parent = parent
self.cur_color = default
self.label = label
self.padding = (5, 10, 5, 10)
self.buffer = None
self.size = None
self.mouse_in = False
self.mouse_down = False
self.focused = False
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, lambda x: None)
self.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus)
self.Bind(wx.EVT_KILL_FOCUS, self.OnKillFocus)
self.Bind(wx.EVT_LEAVE_WINDOW, self.OnMouseLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self.OnMouseEnter)
self.Bind(wx.EVT_LEFT_DOWN, self.OnMouseDown)
self.Bind(wx.EVT_LEFT_UP, self.OnMouseUp)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnPaint(self, event):
wx.BufferedPaintDC(self, self.buffer)
def OnSize(self, event):
size = self.GetClientSize()
# Make sure size is at least 1px to avoid
# strange "invalid bitmap size" errors.
if size[0] < 1:
size = (1, 1)
self.buffer = wx.Bitmap(*size)
self.UpdateDrawing()
def UpdateDrawing(self):
dc = wx.MemoryDC()
dc.SelectObject(self.buffer)
dc = wx.GCDC(dc)
self.OnDrawBackground(dc)
self.OnDrawWidget(dc)
del dc # need to get rid of the MemoryDC before Update() is called.
self.Refresh()
self.Update()
def OnDrawBackground(self, dc):
dc.SetBackground(wx.Brush(self.parent.GetBackgroundColour()))
dc.Clear()
def OnDrawWidget(self, dc):
fnt = self.parent.GetFont()
dc.SetFont(fnt)
dc.SetPen(wx.TRANSPARENT_PEN)
w, h = self.GetSize()
txt_w, txt_h = dc.GetTextExtent(self.label)
txt_x = self.padding[3]
txt_y = self.padding[0]
txt_w = txt_w + self.padding[1] + self.padding[3]
dc.SetBrush(wx.Brush(ICON_BRUSH_CHECKERBOARD.GetBitmap()))
dc.DrawRoundedRectangle(txt_w, 0, w-txt_w, h, 4)
dc.SetBrush(wx.Brush(wx.Colour(self.cur_color)))
dc.DrawRoundedRectangle(txt_w, 0, w-txt_w, h, 4)
# Draw text
if self.mouse_down or self.focused or self.mouse_in:
color = wx.Colour(TEXT_COLOR).ChangeLightness(120)
else:
color = wx.Colour(TEXT_COLOR)
dc.SetTextForeground(color)
dc.DrawText(self.label, int(txt_x), int(txt_y))
def OnSetFocus(self, event):
self.focused = True
self.Refresh()
def OnKillFocus(self, event):
self.focused = False
self.Refresh()
def OnMouseEnter(self, event):
self.mouse_in = True
self.UpdateDrawing()
def OnMouseLeave(self, event):
self.mouse_in = False
self.UpdateDrawing()
def OnMouseDown(self, event):
self.mouse_down = True
self.SetFocus()
self.UpdateDrawing()
def OnMouseUp(self, event):
self.mouse_down = False
self.ShowDialog()
self.SendButtonEvent()
self.UpdateDrawing()
def SendButtonEvent(self):
wx.PostEvent(self, button_cmd_event(id=self.GetId(), value=self.cur_color))
def ShowDialog(self):
self.color_data = wx.ColourData()
self.color_data.SetColour(self.cur_color)
self.color_dialog = colordialog.CubeColourDialog(None, self.color_data)
if self.color_dialog.ShowModal() == wx.ID_OK:
self.color_data = self.color_dialog.GetColourData()
self.cur_color = self.color_data.GetColour()
self.color_dialog.Destroy()
def DoGetBestSize(self):
font = wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc = wx.ClientDC(self)
dc.SetFont(font)
txt_w, txt_h = dc.GetTextExtent(self.label)
size = (self.padding[3] + txt_w + self.padding[1],
self.padding[0] + txt_h + self.padding[2])
return wx.Size(size)
| GimelStudio/gswidgetkit | gswidgetkit/color_picker.py | color_picker.py | py | 4,863 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "wx.lib.newevent.NewCommandEvent",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "wx.Control",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "wx.ID_ANY",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "wx.Defa... |
13140445812 | #!/usr/bin/env python3
import scapy.all as scapy
import time
import sys
import argparse
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '-target', dest='target_ip', help='Target IP')
parser.add_argument('-s', '-spoof', dest='spoof_ip', help='Spoof IP')
options = parser.parse_args()
if not options.target_ip:
parser.error("[-] Please specify a target IP.")
elif not options.spoof_ip:
parser.error("[-] Please specify the Spoof IP.")
return options
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast / arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def spoof(target_ip, spoof_ip):
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, pdst=target_ip, hwdst=target_mac, psrc=spoof_ip)
scapy.send(packet, verbose=False)
def restore(dest_ip, src_ip):
dest_mac = get_mac(dest_ip)
src_mac = get_mac(src_ip)
packet = scapy.ARP(op=2, pdst=dest_ip, hwdst=dest_mac, psrc=src_ip, hwsrc=src_mac)
scapy.send(packet, verbose=False)
# print(packet.show())
# print(packet.summary())
sent_packets_count = 0
options = get_arguments()
try:
while True:
spoof(options.target_ip, options.spoof_ip)
spoof(options.spoof_ip, options.target_ip)
sent_packets_count = sent_packets_count + 2
print('\r[+] Packets sent:' + str(sent_packets_count), end="")
sys.stdout.flush()
time.sleep(2)
except KeyboardInterrupt:
print("\n[-] Detected CTR + C ...... Quitting.")
restore(options.target_ip, options.spoof_ip)
restore(options.spoof_ip, options.target_ip) | userbarbu/hk4ing-tools | arp_spoof.py | arp_spoof.py | py | 1,851 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "scapy.all.ARP",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scapy.all",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "scapy.all.Ether",
... |
21143693469 | """
Watch global hotkeys.
Created on 15.01.2017
@author: Ruslan Dolovanyuk
"""
import logging
import keyboard
class Hotkeys:
"""Class watch globals hotkeys."""
def __init__(self, config, generals):
"""Initialize class Hotkeys."""
self.log = logging.getLogger()
self.log.info('initialize hotkeys...')
self.config = config
self.generals = generals
for key, value in self.config.__dict__.items():
keyboard.add_hotkey(value, self.check, (key,))
def clear(self):
"""Clear all hotkeys from system."""
self.log.info('clear all hotkeys...')
keyboard.clear_all_hotkeys()
def check(self, key):
"""Check hotkey was pressed."""
self.log.info('catch hotkey %s: %s' % (key, self.config.__dict__[key]))
if 'quit' == key:
self.generals[key] = True
else:
self.generals['text'] = key
| DollaR84/SARA | hotkeys.py | hotkeys.py | py | 936 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "keyboard.add_hotkey",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "keyboard.clear_all_hotkeys",
"line_number": 33,
"usage_type": "call"
}
] |
19466941066 | # -*- coding: utf-8 -*-
"""
@file
@brief Module *code_beatrix*.
.. faqref::
:title: Pourquoi Python?
`Python <https://www.python.org/>`_
est un langage de programmation très répandu aujourd'hui
qui fut choisi à l'`ENSAE <http://www.ensae.fr/ensae/fr/>`_ en
2005 pour remplacer le `C++ <https://fr.wikipedia.org/wiki/C%2B%2B>`_.
Dès la première année, il est apparu que ce nouveau langage permettait
aux étudiants de mettre leurs idées plus rapidement en forme.
Les opinions ont commencé alors un peu à changer à propos de la programmation.
Il est très rare maintenant qu'un étudiant quitte une grande école
d'ingénieurs sans programmer.
Il a été choisi pour trois raisons. La première est sa syntaxe
car il oblige les dévelopeurs à aligner leurs instructions
ce qui rend les programmes plus lisibles.
La seconde parce que sa `grammaire <https://docs.python.org/3/reference/grammar.html>`_
est une des plus courte (voir aussi
`The Python Language Reference <https://docs.python.org/3/reference/>`_).
Enfin, beaucoup de librairies existantes mais codées en C++ étaient déjà
disponibles à l'époque. 10 ans plus tard, le langage est quasi incontournable
dès qu'on touche au traitement de données.
"""
import os
__version__ = "0.6.674"
__author__ = "Xavier Dupré"
__github__ = "https://github.com/sdpython/code_beatrix"
__url__ = "http://www.xavierdupre.fr/app/code_beatrix/helpsphinx/"
__license__ = "MIT License"
__blog__ = os.path.abspath(
os.path.join(os.path.dirname(__file__), "rss_blog_list.xml"))
def _setup_hook(add_print=False, unit_test=False):
"""
if this function is added to the module,
the help automation and unit tests call it first before
anything goes on as an initialization step.
It should be run in a separate process.
@param add_print print *Success: _setup_hook*
@param unit_test used only for unit testing purpose
"""
# we can check many things, needed module
# any others things before unit tests are started
if add_print:
print("Success: _setup_hook")
def check(log=False, kind=None, fLOG=None):
"""
Checks the library is working.
It raises an exception.
@param log if True, display information, otherwise
@param kind None or ``'scratch'`` or ``'video'``
@param fLOG logging function
@return 0 or exception
"""
r = True
if kind is None or kind == "scratch":
from .scratchs import check as check_sc
r &= check_sc()
if kind is None or kind == "video":
from .art.video import check as check_vid
r &= check_vid(fLOG=fLOG)
return r
def load_ipython_extension(ip):
"""
to allow the call ``%load_ext code_beatrix``
@param ip from ``get_ipython()``
"""
from .ipythonhelper.magic_scratch import register_scratch_magics
register_scratch_magics(ip)
| sdpython/code_beatrix | src/code_beatrix/__init__.py | __init__.py | py | 3,006 | python | fr | code | 1 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
20522260254 | import pytest
def test_ctypes_cdll_unknown_dll(pyi_builder, capfd):
with pytest.raises(pytest.fail.Exception, match="Running exe .* failed"):
pyi_builder.test_source(
"""
import ctypes
ctypes.cdll.LoadLibrary('non-existing-2017')
"""
)
out, err = capfd.readouterr()
assert "Failed to load dynlib/dll" in err
| pyinstaller/pyinstaller | tests/functional/test_runtime.py | test_runtime.py | py | 386 | python | en | code | 10,769 | github-code | 1 | [
{
"api_name": "pytest.raises",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pytest.fail",
"line_number": 5,
"usage_type": "attribute"
}
] |
5411922401 | import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from baselines.common.schedules import LinearSchedule
from baselines import logger
from model import NoisyDistDuelingConv, NoisyDistDuelingMLP, NoisyDuelingConv, NoisyDuelingMLP
from replay_buffer import ReplayBuffer_NStep, PrioritizedReplayBuffer_NStep, PrioritizedReplayBuffer
class TestAgent(object):
def __init__(self, ob_shape, num_action, args):
self.args = args
self.num_action = num_action
self.ob_shape = ob_shape
self.dtype = torch.FloatTensor
self.atype = torch.LongTensor
if self.args.cuda:
self.dtype = torch.cuda.FloatTensor
self.atype = torch.cuda.LongTensor
# self.model = NoisyDistDuelingConv(self.nb_atoms, ob_shape[0], num_action, self.dtype, args.sigma_init)
# self.target_model = NoisyDistDuelingConv(self.nb_atoms, ob_shape[0], num_action, self.dtype, args.sigma_init)
# self.model = NoisyDuelingConv(ob_shape[0], num_action, self.dtype, args.sigma_init)
# self.target_model = NoisyDuelingConv(ob_shape[0], num_action, self.dtype, args.sigma_init)
self.model = NoisyDuelingMLP(ob_shape[0], num_action, self.dtype, 0.17)
self.target_model = NoisyDuelingMLP(ob_shape[0], num_action, self.dtype, 0.17)
if self.args.cuda:
self.model.cuda()
self.target_model.cuda()
self.optimizer = optim.Adam(self.model.parameters(), lr=self.args.lr, eps=self.args.adam_eps)
self.criterion = nn.MSELoss(reduce=False)
self.huber_loss = nn.SmoothL1Loss(reduce=False)
def sample_noise(self):
self.model.sample_noise()
self.target_model.sample_noise()
def update_target(self):
self.target_model.load_state_dict(self.model.state_dict())
def act(self, ob):
ob_var = Variable(torch.from_numpy(ob).contiguous().type(self.dtype)).view(-1, *self.ob_shape)
q_out = self.model(ob_var)
_, deterministic_actions = q_out.data.max(1)
out = deterministic_actions.cpu().numpy().astype(np.int32).reshape(-1)
return out[0]
def update(self, obs, actions, rewards, next_obs, dones, weights):
obs = Variable(torch.from_numpy(obs).type(torch.FloatTensor)).view(-1, 4)
next_obs = Variable(torch.from_numpy(next_obs).type(torch.FloatTensor)).view(-1, 4)
dones = Variable(torch.from_numpy(dones.astype(float)).type(torch.FloatTensor)).view(-1, 1)
rewards = Variable(torch.from_numpy(rewards).type(torch.FloatTensor)).view(-1, 1)
actions = Variable(torch.from_numpy(actions.astype(int)).type(torch.LongTensor)).view(-1, 1)
# Compute Bellman loss -> DDQN
q_next = self.target_model(next_obs).detach()
_, best_actions = self.model(next_obs).detach().max(1)
q_next_best = q_next.gather(1, best_actions.view(-1, 1))
q_next_best_rhs = rewards + self.args.gamma * q_next_best * (1 - dones)
q = self.model(obs)
q = q.gather(1, actions).squeeze(1)
loss = self.criterion(q, q_next_best_rhs)
# Step optimizer
self.optimizer.zero_grad()
loss.mean().backward()
self.optimizer.step()
return loss.data.numpy().flatten()
def test_update(self, obs, actions, rewards, obs_next, dones, weights):
obs = Variable(torch.from_numpy(obs).type(self.dtype)).view(-1, *self.ob_shape)
obs_next = Variable(torch.from_numpy(obs_next).type(self.dtype)).view(-1, *self.ob_shape)
weights = Variable(torch.from_numpy(weights).type(self.dtype)).view(-1, 1)
actions = Variable(torch.from_numpy(actions.astype(int)).type(self.atype)).view(-1, 1)
rewards = torch.from_numpy(rewards).type(self.dtype).view(-1, 1)
dones = torch.from_numpy(dones.astype(float)).type(self.dtype).view(-1, 1)
#
online_q = self.model(obs)
online_q_selected = online_q.gather(1, actions)
# DDQN
next_online_q = self.model(obs_next)
_, next_online_action = next_online_q.data.max(1)
next_target_q = self.target_model(obs_next).data
next_target_best = next_target_q.gather(1, next_online_action.view(-1, 1))
targets = rewards + (1.0 - dones) * self.args.gamma * next_target_best
# Error
td_error = online_q_selected.data - targets
errors = F.smooth_l1_loss(online_q_selected, Variable(targets), reduce=False)
weighted_error = (errors * weights).mean()
#
self.optimizer.zero_grad()
weighted_error.backward()
nn.utils.clip_grad_norm(self.model.parameters(), self.args.grad_norm_clipping)
self.optimizer.step()
return td_error.cpu().numpy().flatten()
def learn(env, args):
ob = env.reset()
ob_shape = ob.shape
num_action = int(env.action_space.n)
agent = TestAgent(ob_shape, num_action, args)
replay_buffer = PrioritizedReplayBuffer(args.buffer_size, alpha=args.prioritized_replay_alpha)
args.prioritized_replay_beta_iters = args.max_timesteps
beta_schedule = LinearSchedule(args.prioritized_replay_beta_iters,
initial_p=args.prioritized_replay_beta0,
final_p=1.0)
episode_rewards = [0.0]
saved_mean_reward = None
n_step_seq = []
agent.sample_noise()
agent.update_target()
for t in range(args.max_timesteps):
action = agent.act(ob)
new_ob, rew, done, _ = env.step(action)
replay_buffer.add(ob, action, rew, new_ob, float(done))
ob = new_ob
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > args.learning_starts and t % args.replay_period == 0:
experience = replay_buffer.sample(args.batch_size, beta=beta_schedule.value(t))
(obs, actions, rewards, obs_next, dones, weights, batch_idxes) = experience
agent.sample_noise()
kl_errors = agent.update(obs, actions, rewards, obs_next, dones, weights)
replay_buffer.update_priorities(batch_idxes, np.abs(kl_errors) + 1e-6)
if t > args.learning_starts and t % args.target_network_update_freq == 0:
agent.update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and args.print_freq is not None and len(episode_rewards) % args.print_freq == 0:
print('steps {} episodes {} mean reward {}'.format(t, num_episodes, mean_100ep_reward)) | dai-dao/Rainbow-Net-Pytorch | test_atari.py | test_atari.py | py | 6,772 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "torch.FloatTensor",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "torch.LongTensor",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.cuda",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.cu... |
21003159623 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('organizations', '0008_address_timezone'),
]
operations = [
migrations.AddField(
model_name='organization',
name='image_url',
field=models.URLField(max_length=255, null=True),
),
]
| getcircle/services | organizations/migrations/0009_organization_image_url.py | 0009_organization_image_url.py | py | 424 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AddField",
"line_number": 14,
"usage_type": "call"
},
{
... |
21521899393 | from typing import List
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
number_set = set()
for n in nums:
if n not in number_set:
number_set.add(n)
else:
return True
return False
solution = Solution()
answer = solution.containsDuplicate([1, 2, 3])
print(answer)
| yihsuanhung/leetcode | 217. Contains Duplicate/main.py | main.py | py | 373 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
73710667555 | #!/usr/bin/env python
# Run as:
# python plot_TC_vmax.py $TC_name/ID/year
# python plot_TC_vmax.py FlorenceAL062018
#
import numpy as np
import os, sys, datetime, time, subprocess
import re, csv, glob
import multiprocessing, itertools, collections
import scipy, ncepy
import matplotlib
import matplotlib.image as image
from matplotlib.gridspec import GridSpec
import matplotlib.ticker as mticker
import matplotlib.pyplot as plt
from matplotlib import colors as c
matplotlib.use('Agg')
import cartopy
# Get TC name and number
try:
TC = str(sys.argv[1])
except IndexError:
TC = None
if TC is None:
print('Enter TC name, number, and year as one string')
print('Example: FlorenceAL062018')
TC = input('Enter TC name/number/year: ')
TC_name = TC[:-8]
TC_number = TC[-8:-4]
YYYY = TC[-4:]
print(TC_name, TC_number, YYYY)
# Option to make special HAFS comparison graphics
try:
dummy = str(sys.argv[2])
do_hafs = True
print('Plotting HAFS comparison images')
except IndexError:
dummy = None
do_hafs = False
# Set path and create graphx directory (if not already created)
MEG_DIR = os.getcwd()
BDECK_DIR = '/lfs/h1/ops/prod/dcom/nhc/atcf-noaa/btk'
if do_hafs:
DATA_DIR = os.path.join('/lfs/h2/emc/vpppg/noscrub/',os.environ['USER'],'MEG', TC_name, 'data','hafs')
GRAPHX_DIR = os.path.join('/lfs/h2/emc/ptmp',os.environ['USER'],'MEG', TC_name, 'graphx','hafs')
else:
DATA_DIR = os.path.join('/lfs/h2/emc/vpppg/noscrub/',os.environ['USER'],'MEG', TC_name, 'data')
GRAPHX_DIR = os.path.join('/lfs/h2/emc/ptmp',os.environ['USER'],'MEG', TC_name, 'graphx')
if os.path.exists(DATA_DIR):
if not os.path.exists(GRAPHX_DIR):
os.makedirs(GRAPHX_DIR)
else:
raise NameError('data for '+TC_name+' not found')
#OUT_DIR = os.path.join(GRAPHX_DIR, cycle)
OUT_DIR = GRAPHX_DIR
if not os.path.exists(OUT_DIR):
os.makedirs(OUT_DIR)
try:
valid_time
except NameError:
valid_time = None
# Get list of GFS cycles to get max number of cycles
filelist1 = [f for f in glob.glob(DATA_DIR+'/'+str.lower(TC_name)+'_gfs_'+str(YYYY)+'*csv')]
max_cycles_unordered = [filelist1[x][-14:-4] for x in range(len(filelist1))]
max_cycles_int = [int(x) for x in max_cycles_unordered]
max_cycles_int.sort()
max_cycles = [str(x) for x in max_cycles_int]
max_ncycles = len(max_cycles)
# Get list of cycles based on matching files in DATA_DIR
filelist = [f for f in glob.glob(DATA_DIR+'/'+str.lower(TC_name)+'_*_vmaxlist.csv')]
mlats=[]
mlons=[]
mpres=[]
mvmax=[]
#mrmw=[]
mcycles=[]
mnames=[]
mcolors=[]
models=['HWRF','HMON','GFS','FV3GFS','EC','UKMet']
models=['HWRF','HMON','GFS','GFSO','EC','UK']
if do_hafs:
models=['HWRF','HMON','HF3A','HF3S']
model_strings=['HWRF','HMON','HAFS-A','HAFS-S']
else:
models=['HWRF','HMON','GFS','ECMO','UK']
model_strings=['HWRF','HMON','GFS','EC','UKM']
#max_ncycles = 0
k = 0
for model_str in models:
vmax_file = DATA_DIR+'/'+str.lower(TC_name)+'_'+str.lower(model_str)+'_vmaxlist.csv'
color_list=[]
if os.path.exists(vmax_file):
with open(vmax_file,'r') as f:
reader=csv.reader(f)
i = 0
for row in reader:
if i == 0:
cycle_list = [datetime.datetime.strptime(x,"%Y%m%d%H") for x in row]
for cycle in cycle_list:
if cycle.strftime("%d") == '05' or cycle.strftime("%d") == '06':
color_list.append('blue')
elif cycle.strftime("%d") == '07':
color_list.append('green')
elif cycle.strftime("%d") == '08':
color_list.append('yellow')
elif cycle.strftime("%d") == '09':
color_list.append('orange')
elif cycle.strftime("%d") == '10':
color_list.append('red')
elif i == 1:
vmax_list = [float(x) for x in row]
i += 1
# if len(cycle_list) > max_ncycles:
# max_ncycles = len(cycle_list)
# max_ncycles_ind = k
mvmax.append(vmax_list)
mcycles.append(cycle_list)
mcolors.append(color_list)
k += 1
#print(mvmax[0])
#print(mcycles[0])
cmap=matplotlib.cm.get_cmap('YlGnBu')
values = []
print(max_ncycles)
for i in range(max_ncycles):
values.append(cmap(float(i+1)/float(max_ncycles+1)))
color_dict2 = dict(zip(max_cycles,values))
#color_dict2 = dict(zip(mcycles[max_ncycles_ind],values))
color_dict = {
"2018100518": "powderblue",
"2018100600": "skyblue",
"2018100606": "dodgerblue",
"2018100612": "blue",
"2018100618": "navy",
"2018100700": "lawngreen",
"2018100706": "limegreen",
"2018100712": "forestgreen",
"2018100718": "darkgreen",
"2018100800": "khaki",
"2018100806": "yellow",
"2018100812": "gold",
"2018100818": "orange",
"2018100900": "lightsalmon",
"2018100906": "red",
"2018100912": "firebrick",
"2018100918": "maroon",
"2018101000": "pink",
"2018101006": "hotpink",
"2018101012": "magenta",
"2018101018": "darkmagenta",
"2019071000": "skyblue",
"2019071006": "dodgerblue",
"2019071012": "blue",
"2019071018": "navy",
"2019071100": "lawngreen",
"2019071106": "limegreen",
"2019071112": "forestgreen",
"2019071118": "darkgreen",
"2019071200": "khaki",
"2019071206": "yellow",
"2019071212": "gold",
"2019071218": "orange",
"2019071300": "lightsalmon",
"2019071306": "red",
"2019071312": "firebrick",
"2019071318": "maroon",
}
ovmax=0.
with open(BDECK_DIR+'/b'+str.lower(TC_number)+str(YYYY)+'.dat','r') as f:
reader = csv.reader(f)
for row in reader:
# if row[10].replace(" ","")!='FAKE STRING':
# if row[10].replace(" ","")=='TD' or row[10].replace(" ","")=='TS' or row[10].replace(" ","")=='HU':
# if row[10].replace(" ","")=='TD' or row[10].replace(" ","")=='TS' or row[10].replace(" ","")=='HU' or row[10].replace(" ","")=='EX':
if row[10].replace(" ","")=='DB' or row[10].replace(" ","")=='LO' or row[10].replace(" ","")=='TD' or row[10].replace(" ","")=='TS' or row[10].replace(" ","")=='HU' or row[10].replace(" ","")=='EX':
if row[11].replace(" ","")=='34' or row[11].replace(" ","")=='0':
if float(row[8]) > ovmax:
ovmax = float(row[8])
def plot_vmax():
print('plotting vmax scatter')
fig = plt.figure(figsize=(9,8))
for i in range(len(models)):
# label_str = str.upper(models[i])
if str.upper(models[i]) == 'GFS':
models[i] = 'GFS'
elif str.upper(models[i]) == 'GFSO':
models[i] = 'GFSv14'
x = [i+1 for x in range(len(mvmax[i]))]
y = mvmax[i]
colors_list = [color_dict2[cycle.strftime("%Y%m%d%H")] for cycle in mcycles[i]]
# colors_list = [color_dict2[cycle] for cycle in mcycles[i]]
# plt.scatter(x, y, s=50 , c='k', alpha=0.5)
# plt.scatter(x, y, s=50 , c=mcolors[i])
plt.scatter(x, y, s=150 , c=colors_list, edgecolors='k',zorder=20)
# plt.plot(plot_ovmax, '-', color='black', label='BEST', linewidth=2.)
xlen = len(models)
# x = np.arange(0,xlen+1,1)
if do_hafs:
plt.axis([0,xlen+1,55,165])
else:
plt.axis([0,xlen+1,5,160])
plt.axhline(y=ovmax,xmin=0,xmax=xlen+1.5,color='magenta',linewidth=2,linestyle='--') # observed vmax line
plt.axhline(y=34,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhline(y=64,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhline(y=83,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhline(y=96,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhline(y=113,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhline(y=137,xmin=0,xmax=xlen+1.5,color='k',linewidth=2)
plt.axhspan(137, 200, facecolor='0.1', alpha=0.5)
plt.axhspan(113, 137, facecolor='0.15', alpha=0.5)
plt.axhspan(96, 113, facecolor='0.2', alpha=0.5)
plt.axhspan(83, 96, facecolor='0.25', alpha=0.5)
plt.axhspan(64, 83, facecolor='0.3', alpha=0.5)
plt.axhspan(34, 64, facecolor='0.4', alpha=0.5)
plt.axhspan(0, 34, facecolor='0.5', alpha=0.5)
plt.xticks(np.arange(1,xlen+1,1),model_strings,weight='bold')
plt.ylabel('Maximum Sustained 10-m Wind (kts)',fontweight='bold')
plt.grid(True)
# plt.legend(loc="upper right", ncol=5)
# titlestr = 'Hurricane '+TC_name+' Maximum 10-m Wind Forecast by Initialization \n'+ \
# cycle_date.strftime('%HZ %d %b Initializations')+' valid through '+final_valid_date.strftime('%HZ %d %b %Y')
titlestr = 'Hurricane '+TC_name+' ('+YYYY+') - Maximum Intensity Forecast by Initialization'
plt.title(titlestr, fontweight='bold')
fname = str.lower(TC_name)+'_vmax'
plt.savefig(OUT_DIR+'/'+fname+'.png',bbox_inches='tight')
plt.close()
plot_vmax()
| LoganDawson-NOAA/MEG | TC_plotting/plot_TC_vmax.py | plot_TC_vmax.py | py | 8,760 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"line_numb... |
32805233512 | # *** coding: utf-8 ***
#@Time : 2020/11/28 10:03
#@Author : xueqing.wu
#@Email : wuxueqing@126.com
#@File : apiOrder.py
import settings
from settings import IP,HEADERS
from tools.logger import GetLogger
logger = GetLogger().get_logger()
class ApiOrder():
def __init__(self):
logger.info('开始获取下单接口的URL...')
self.url = IP + '/mtx/index.php?s=/index/buy/add.html'
logger.info('下单接口的URL:{}'.format(self.url))
def order(self, session):
'''
发起下订单的请求
:param session:
:return:
'''
data = {
'goods_id': 1,
'stock': 1,
'buy_type': 'goods',
'address_id': 1290,
'payment_id': 1,
'spec': '',
}
logger.info('开始发起下单请求,请求参数是:{},请求头是:{}'.format(data, HEADERS))
resp_order = session.post(self.url, data=data, headers=HEADERS)
settings.JUMP_URL = resp_order.json().get('data').get('jump_url')
logger.info('响应结果是:{}'.format(resp_order.json()))
return resp_order | aadorable/mtx1212 | api/apiOrder.py | apiOrder.py | py | 1,153 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tools.logger.GetLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "settings.IP",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "settings.HEADERS",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "settings.HEAD... |
10556332205 | """
History
AUTHOR: JOE ROCCA
"""
from __future__ import print_function
import httplib
import json
from random import randint
from datetime import datetime
import calendar
# --------------- MAIN FUNCTIONS ----------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
if (event['session']['application']['applicationId'] !=
"REPLACE WITH APP ID"):
raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "TodayInHistoryIntent":
return get_today_in_history()
elif intent_name == "HistoryWithDateIntent":
return get_today_in_history_for_date(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_help_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- FUNCTIONS THAT CONTROL THE SKILLS BEHAVIOR ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Welcome to History. " \
"Ask me about history on any date."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask for a historical fact by saying, " \
"What happened today in history."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_help_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Help"
speech_output = "Here are some things you can say: " \
"What happened today, " \
"What happened on May 16th. " \
"You can also say, stop, if you're done. " \
"So, how can I help?"
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = speech_output
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying history. " \
"Have a nice day! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session))
# --------------- HANDLE HISTORY INTENTS ----------------------
def get_today_in_history():
session_attributes = {}
reprompt_text = None
fact = fetchFactForToday()
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
'Today in History', fact, reprompt_text, should_end_session))
def get_today_in_history_for_date(intent, session):
date = intent['slots']['DATE']['value']
try:
dt = datetime.strptime(date, '%Y-%m-%d')
month = dt.month
day = dt.day
session_attributes = {}
reprompt_text = None
fact = fetchFactForDay(month, day)
should_end_session = True
except:
session_attributes = {}
reprompt_text = None
fact = "I am having a hard time understanding you. " \
"Here are some things you can say: " \
"What happened today, " \
"What happened on May 16th. " \
"You can also say, stop, if you're done. " \
"So, how can I help?"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
'History', fact, reprompt_text, should_end_session))
# --------------- HELPERS THAT BUILD ALL RESPONSES ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- HISTORY FACT FETCHERS ----------------------
def fetchFactForToday():
conn = httplib.HTTPConnection('history.muffinlabs.com')
conn.request("GET", "/date")
r1 = conn.getresponse()
facts = json.loads(r1.read())['data']['Events']
count = len(facts)
i = randint(0, count)
factObject = facts[i]
factText = factObject['text']
factYear = factObject['year']
formattedFact = 'Today in ' + factYear + ', ' + factText
print (factYear)
print (factText)
print (formattedFact)
return formattedFact
def fetchFactForDay(month, day):
conn = httplib.HTTPConnection('history.muffinlabs.com')
conn.request("GET", "/date/" + str(month) + "/" + str(day))
r1 = conn.getresponse()
facts = json.loads(r1.read())['data']['Events']
count = len(facts) - 1
i = randint(0, count)
factObject = facts[i]
factText = factObject['text']
factYear = factObject['year']
formattedFact = 'On ' + calendar.month_name[month] + ' ' + str(day) + ' in ' + factYear + ', ' + factText
print (factYear)
print (factText)
print (formattedFact)
return formattedFact
| joerocca/AmazonEchoHistorySkill | HistoryAlexaSkill.py | HistoryAlexaSkill.py | py | 8,308 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 161,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 161,
"usage_type": "name"
},
{
"api_name": "httplib.HTTPConnection",
"line_number": 216,
"usage_type": "call"
},
{
"api_name":... |
26890413015 | # Import the modules
import numpy as np
import matplotlib.pyplot as plt
import pickle
from DDPG import DDPG
from sklearn.ensemble import GradientBoostingRegressor
from env import Environment
import numpy as np
import scipy.stats as stats
# Define the parameters
MAX_EPISODES = 1000 # The maximum number of episodes for training the DDPG agent
MAX_EP_STEPS = 200 # The maximum number of steps for each episode
REWARD_FACTOR = 1.0 # The factor to scale the reward
PENALTY_FACTOR = 1.0 # The factor to scale the penalty
# Create an instance of the DDPG agent
agent = DDPG(state_dim=10, action_dim=5, action_bound=1.0, lr_a=0.001, lr_c=0.002, gamma=0.9, batch_size=32, memory_size=10000)
env = Environment(n_users=10, n_servers=5, bandwidth=100, latency=0.1, utility=lambda x: np.log(1 + x))
# Initialize an empty list for the reward history
reward_history = []
# Start the training loop
for i in range(MAX_EPISODES):
# Reset the environment and get the initial state
state = env.reset()
# Initialize the episode reward
ep_reward = 0
# Start the episode loop
for j in range(MAX_EP_STEPS):
# Choose an action based on the state
action = agent.choose_action(state)
# Execute the action and get the next state, reward, and done flag
next_state, reward, done, info = env.step(action)
# Scale the reward and the penalty
reward = reward * REWARD_FACTOR
penalty = info['penalty'] * PENALTY_FACTOR
# Add the reward and the penalty to the episode reward
ep_reward += (reward - penalty)
# Store the transition in the replay buffer
agent.replay_buffer.append((state, action, reward, next_state, done))
# Learn from the replay buffer
agent.learn()
# Update the state
state = next_state
# Check if the episode is done
if done:
break
# Print the episode reward
print('Episode: {}, Reward: {}'.format(i, ep_reward))
# Append the episode reward to the reward history
reward_history.append(ep_reward)
# Save the reward history to a file
np.save('reward.npy', reward_history)
# Save the DDPG agent to a file
agent.save('model.pkl')
# Plot the reward history
plt.plot(reward_history)
plt.xlabel('Episode')
plt.ylabel('Reward')
plt.title('DDPG Learning Curve')
plt.show()
# Access the replay buffer of the DDPG agent
replay_buffer = agent.replay_buffer
# Initialize an empty list for the dataset
dataset = []
# Iterate over the replay buffer
for state, action, reward, next_state, done in replay_buffer:
# Append the state and action to the dataset
dataset.append((state, action))
# Save the dataset to a file
with open("dataset.pkl", "wb") as f:
pickle.dump(dataset, f)
# Load the dataset from the file
with open("dataset.pkl", "rb") as f:
dataset = pickle.load(f)
# Split the dataset into features and labels
X = np.array([state for state, action in dataset]) # The features are the states
y = np.array([action for state, action in dataset]) # The labels are the actions
# Create an instance of the GBDT model
gbdt = GradientBoostingRegressor()
# Train the GBDT model on the dataset
gbdt.fit(X, y)
# Save the GBDT model to a file
with open("gbdt.pkl", "wb") as f:
pickle.dump(gbdt, f)
# Load the GBDT model from the file
with open("gbdt.pkl", "rb") as f:
gbdt = pickle.load(f)
# Evaluate the GBDT model on the resource allocation problem
# You can use any metric you want, such as the reward, the utility, or the QoS
# Here is an example of using the reward as the metric
# Initialize an empty list for the evaluation reward history
eval_reward_history = []
# Start the evaluation loop
for i in range(100):
# Reset the environment and get the initial state
state = env.reset()
# Initialize the evaluation episode reward
eval_ep_reward = 0
# Start the evaluation episode loop
for j in range(MAX_EP_STEPS):
# Predict an action based on the state using the GBDT model
action = gbdt.predict(state.reshape(1, -1))
# Execute the action and get the next state, reward, and done flag
next_state, reward, done, info = env.step(action)
# Scale the reward and the penalty
reward = reward * REWARD_FACTOR
penalty = info['penalty'] * PENALTY_FACTOR
# Add the reward and the penalty to the evaluation episode reward
eval_ep_reward += (reward - penalty)
# Update the state
state = next_state
# Check if the episode is done
if done:
break
# Print the evaluation episode reward
print('Evaluation Episode: {}, Reward: {}'.format(i, eval_ep_reward))
# Append the evaluation episode reward to the evaluation reward history
eval_reward_history.append(eval_ep_reward)
# Save the evaluation reward history to a file
np.save('eval_reward.npy', eval_reward_history)
# Plot the evaluation reward history
plt.plot(eval_reward_history)
plt.xlabel('Evaluation Episode')
plt.ylabel('Reward')
plt.title('GBDT Evaluation Curve')
plt.show()
# Load the reward history of the DDPG agent from the file
reward_history = np.load('reward.npy')
# Load the evaluation reward history of the GBDT model from the file
eval_reward_history = np.load('eval_reward.npy')
# Calculate the mean and the standard deviation of the reward for each model
ddpg_mean = np.mean(reward_history)
ddpg_std = np.std(reward_history)
gbdt_mean = np.mean(eval_reward_history)
gbdt_std = np.std(eval_reward_history)
# Print the mean and the standard deviation of the reward for each model
print('DDPG Mean Reward: {:.2f}, DDPG Standard Deviation: {:.2f}'.format(ddpg_mean, ddpg_std))
print('GBDT Mean Reward: {:.2f}, GBDT Standard Deviation: {:.2f}'.format(gbdt_mean, gbdt_std))
# Perform a t-test to compare the reward of the two models
t, p = stats.ttest_ind(reward_history, eval_reward_history)
# Print the t-statistic and the p-value
print('T-statistic: {:.2f}, P-value: {:.2f}'.format(t, p))
# Interpret the result
if p < 0.05:
print('The difference in reward between the two models is statistically significant.')
else:
print('The difference in reward between the two models is not statistically significant.') | CodeAlpha7/8803-SMR | Distillation/newGBDT.py | newGBDT.py | py | 6,457 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "DDPG.DDPG",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "env.Environment",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "env.reset",
"line_number": 28... |
29166855011 | import numpy as np
import pyqtgraph
from pyqtgraph.Qt import QtGui
from app.misc._miss_plot import MissPlotItem
from osu_analysis import StdScoreData
class HitOffsetGraph(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# Main graph
self.__graph = pyqtgraph.PlotWidget(title='Hit offset graph')
self.__graph.getPlotItem().getAxis('left').enableAutoSIPrefix(False)
self.__graph.getPlotItem().getAxis('bottom').enableAutoSIPrefix(False)
self.__graph.enableAutoRange(axis='x', enable=False)
self.__graph.enableAutoRange(axis='y', enable=False)
self.__graph.setLimits(yMin=-200, yMax=200)
self.__graph.setRange(xRange=[-10, 10000], yRange=[-250, 250])
self.__graph.setLabel('left', 't-offset', units='ms', unitPrefix='')
self.__graph.setLabel('bottom', 'time', units='ms', unitPrefix='')
self.__graph.addLegend()
self.__plot_hits = self.__graph.plot()
self.__plot_rels = self.__graph.plot()
self.__miss_plot = MissPlotItem()
self.__graph.addItem(self.__miss_plot)
self.__graph.addLine(x=None, y=0, pen=pyqtgraph.mkPen((0, 150, 0, 255), width=1))
self.__offset_miss_pos_line = pyqtgraph.InfiniteLine(angle=0, pen=pyqtgraph.mkPen((255, 0, 0, 50), width=1))
#self.__graph.addItem(self.__offset_miss_pos_line)
self.__offset_miss_neg_line = pyqtgraph.InfiniteLine(angle=0, pen=pyqtgraph.mkPen((255, 0, 0, 50), width=1))
#self.__graph.addItem(self.__offset_miss_neg_line)
self.__offset_avg_line = pyqtgraph.InfiniteLine(angle=0, pen=pyqtgraph.mkPen((255, 255, 0, 150), width=1))
self.__graph.addItem(self.__offset_avg_line)
self.__offset_std_line_pos = pyqtgraph.InfiniteLine(angle=0, pen=pyqtgraph.mkPen((255, 150, 0, 150), width=1))
self.__offset_std_line_neg = pyqtgraph.InfiniteLine(angle=0, pen=pyqtgraph.mkPen((255, 150, 0, 150), width=1))
self.__graph.addItem(self.__offset_std_line_pos)
self.__graph.addItem(self.__offset_std_line_neg)
# Hit stats
self.hit_metrics = pyqtgraph.TextItem('', anchor=(0, 0), )
self.__graph.addItem(self.hit_metrics)
# Put it all together
self.__layout = QtGui.QHBoxLayout(self)
self.__layout.setContentsMargins(0, 0, 0, 0)
self.__layout.setSpacing(2)
self.__layout.addWidget(self.__graph)
self.__graph.sigRangeChanged.connect(self.__on_view_range_changed)
self.__on_view_range_changed()
def plot_data(self, score_data):
self.__plot_misses(score_data)
self.__plot_hit_offsets(score_data)
def set_window(self, neg_miss_win, pos_miss_win):
self.__offset_miss_neg_line.setValue(neg_miss_win)
self.__offset_miss_pos_line.setValue(pos_miss_win)
def plot_data(self, play_data):
if play_data.shape[0] == 0:
return
self.__plot_misses(play_data)
self.__plot_hit_offsets(play_data)
self.__plot_rel_offsets(play_data)
self.__plot_avg_global(play_data)
self.__update_hit_stats(play_data)
def __plot_hit_offsets(self, data):
# Extract timings and hit_offsets
miss_filter = data['type'] != StdScoreData.TYPE_MISS
prs_select = data['action'] == StdScoreData.ACTION_PRESS
select = prs_select & miss_filter
if np.count_nonzero(select) == 0:
self.__plot_hits.setData([], [], pen=None, symbol='o', symbolPen=None, symbolSize=2, symbolBrush=(100, 100, 255, 200))
return
hit_timings = data['map_t'].values[select]
hit_offsets = data['map_t'].values[select] - data['replay_t'].values[select]
# Calculate view
xMin = min(hit_timings) - 100
xMax = max(hit_timings) + 100
# Set plot data
self.__plot_hits.setData(hit_timings, hit_offsets, pen=None, symbol='o', symbolPen=None, symbolSize=2, symbolBrush=(100, 100, 255, 200))
self.__graph.setLimits(xMin=xMin - 100, xMax=xMax + 100)
self.__graph.setRange(xRange=[ xMin - 100, xMax + 100 ])
def __plot_rel_offsets(self, data):
# Extract timings and hit_offsets
miss_filter = data['type'] != StdScoreData.TYPE_MISS
rel_select = data['action'] == StdScoreData.ACTION_RELEASE
select = rel_select & miss_filter
if np.count_nonzero(select) == 0:
self.__plot_rels.setData([], [], pen=None, symbol='o', symbolPen=None, symbolSize=2, symbolBrush=(100, 100, 255, 200))
return
hit_timings = data['map_t'].values[select]
hit_offsets = data['map_t'].values[select] - data['replay_t'].values[select]
# Calculate view
xMin = min(hit_timings) - 100
xMax = max(hit_timings) + 100
# Set plot data
self.__plot_rels.setData(hit_timings, hit_offsets, pen=None, symbol='o', symbolPen=None, symbolSize=2, symbolBrush=(105, 217, 255, 200))
self.__graph.setLimits(xMin=xMin - 100, xMax=xMax + 100)
self.__graph.setRange(xRange=[ xMin - 100, xMax + 100 ])
def __plot_misses(self, data):
# Extract data and plot
miss_select = data['type'] == StdScoreData.TYPE_MISS
hit_timings = data['map_t'].values[miss_select]
self.__miss_plot.setData(hit_timings)
def __plot_avg_global(self, data):
# Extract timings and hit_offsets
miss_filter = data['type'] != StdScoreData.TYPE_MISS
hit_offsets = data['map_t'].values[miss_filter] - data['replay_t'].values[miss_filter]
mean_offset = np.mean(hit_offsets)
std_offset = np.std(hit_offsets)
# Set plot data
self.__offset_avg_line.setValue(mean_offset)
self.__offset_std_line_pos.setValue(std_offset*2 + mean_offset)
self.__offset_std_line_neg.setValue(-std_offset*2 + mean_offset)
print(f'mean = {mean_offset:.2f} ms std = {std_offset:.2f} ms')
def __update_hit_stats(self, data):
free_misses = \
(data['type'] == StdScoreData.TYPE_MISS) & \
(data['action'] == StdScoreData.ACTION_FREE)
num_free_misses = np.count_nonzero(free_misses)
press_misses = \
(data['type'] == StdScoreData.TYPE_MISS) & \
(data['action'] == StdScoreData.ACTION_PRESS)
num_press_misses = np.count_nonzero(press_misses)
release_misses = \
(data['type'] == StdScoreData.TYPE_MISS) & \
(data['action'] == StdScoreData.ACTION_RELEASE)
num_release_misses = np.count_nonzero(release_misses)
hold_misses = \
(data['type'] == StdScoreData.TYPE_MISS) & \
(data['action'] == StdScoreData.ACTION_HOLD)
num_hold_misses = np.count_nonzero(hold_misses)
hits = \
(data['type'] == StdScoreData.TYPE_HITP)
data = data[hits]
avg = self.__offset_avg_line.getPos()[1]
dev = self.__offset_std_line_pos.getPos()[1] - avg
self.hit_metrics.setText(
f'''
num free misses: {num_free_misses}
num press misses: {num_press_misses}
num release misses: {num_release_misses}
num hold misses: {num_hold_misses}
µ: {avg:.2f} ms
2σ: ±{dev:.2f} ms ({10*dev/2:.2f} UR)
'''
)
def __on_view_range_changed(self, _=None):
view = self.__graph.viewRect()
pos_x = view.left()
pos_y = view.bottom()
margin_x = 0.001*(view.right() - view.left())
margin_y = 0.001*(view.top() - view.bottom())
self.hit_metrics.setPos(pos_x + margin_x, pos_y + margin_y)
| abraker-osu/osu_aim_tool | app/views/_offset_graph.py | _offset_graph.py | py | 7,745 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "pyqtgraph.Qt.QtGui.QWidget",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pyqtgraph.Qt.QtGui",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pyqtgraph.Qt.QtGui.QWidget.__init__",
"line_number": 13,
"usage_type": "call"
},
{... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.